file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
action.filter.ts | import {Component, Input, OnInit} from '@angular/core';
import {Observable, Subject} from 'rxjs';
import {App, ApplicationType} from '../../shared/model/app.model';
import {RecordService} from '../../shared/api/record.service';
import {RecordActionType} from '../../shared/model/record.model';
@Component({
selector: 'app-clr-datagrid-action-filter',
template: ` <div>
<clr-radio-wrapper>
<input type="radio" clrRadio (change)="change()" [(ngModel)]="val" value="all" name="options" />
<label>All actions</label>
</clr-radio-wrapper>
<clr-radio-wrapper *ngFor="let action of actionTypes">
<input type="radio" clrRadio (change)="change()" [(ngModel)]="val" value="{{ action.key }}" name="options" />
<label>{{ action.name }}</label>
</clr-radio-wrapper>
</div>`
})
export class ActionFilterComponent implements OnInit {
private pchanges = new Subject<any>();
property = 'actionType';
@Input() value = null;
val = 'all';
actionTypes: RecordActionType[];
constructor(private recordService: RecordService) {}
| (): void {
this.recordService.getActionTypes().subscribe((actionTypes: RecordActionType[]) => {
this.actionTypes = actionTypes;
if (this.value === 'all' || this.value === '' || !this.value) {
this.value = null;
} else {
this.val = this.value;
this.pchanges.next(true);
}
});
}
public get changes(): Observable<any> {
return this.pchanges.asObservable();
}
change(): void {
if (this.val === 'all') {
this.value = null;
} else {
this.value = this.val as any as RecordActionType;
}
this.pchanges.next(true);
}
accepts(application: App): boolean {
return true;
}
isActive(): boolean {
return this.value !== null && this.value !== 'all' && this.value !== '';
}
}
| ngOnInit | identifier_name |
action.filter.ts | import {Component, Input, OnInit} from '@angular/core';
import {Observable, Subject} from 'rxjs';
import {App, ApplicationType} from '../../shared/model/app.model';
import {RecordService} from '../../shared/api/record.service';
import {RecordActionType} from '../../shared/model/record.model';
@Component({
selector: 'app-clr-datagrid-action-filter',
template: ` <div>
<clr-radio-wrapper>
<input type="radio" clrRadio (change)="change()" [(ngModel)]="val" value="all" name="options" />
<label>All actions</label>
</clr-radio-wrapper>
<clr-radio-wrapper *ngFor="let action of actionTypes">
<input type="radio" clrRadio (change)="change()" [(ngModel)]="val" value="{{ action.key }}" name="options" />
<label>{{ action.name }}</label>
</clr-radio-wrapper>
</div>`
})
export class ActionFilterComponent implements OnInit {
private pchanges = new Subject<any>();
property = 'actionType';
@Input() value = null;
val = 'all';
actionTypes: RecordActionType[];
constructor(private recordService: RecordService) {}
ngOnInit(): void {
this.recordService.getActionTypes().subscribe((actionTypes: RecordActionType[]) => {
this.actionTypes = actionTypes;
if (this.value === 'all' || this.value === '' || !this.value) {
this.value = null;
} else {
this.val = this.value;
this.pchanges.next(true);
}
});
}
public get changes(): Observable<any> {
return this.pchanges.asObservable();
}
change(): void {
if (this.val === 'all') | else {
this.value = this.val as any as RecordActionType;
}
this.pchanges.next(true);
}
accepts(application: App): boolean {
return true;
}
isActive(): boolean {
return this.value !== null && this.value !== 'all' && this.value !== '';
}
}
| {
this.value = null;
} | conditional_block |
action.filter.ts | import {Component, Input, OnInit} from '@angular/core';
import {Observable, Subject} from 'rxjs'; | import {RecordActionType} from '../../shared/model/record.model';
@Component({
selector: 'app-clr-datagrid-action-filter',
template: ` <div>
<clr-radio-wrapper>
<input type="radio" clrRadio (change)="change()" [(ngModel)]="val" value="all" name="options" />
<label>All actions</label>
</clr-radio-wrapper>
<clr-radio-wrapper *ngFor="let action of actionTypes">
<input type="radio" clrRadio (change)="change()" [(ngModel)]="val" value="{{ action.key }}" name="options" />
<label>{{ action.name }}</label>
</clr-radio-wrapper>
</div>`
})
export class ActionFilterComponent implements OnInit {
private pchanges = new Subject<any>();
property = 'actionType';
@Input() value = null;
val = 'all';
actionTypes: RecordActionType[];
constructor(private recordService: RecordService) {}
ngOnInit(): void {
this.recordService.getActionTypes().subscribe((actionTypes: RecordActionType[]) => {
this.actionTypes = actionTypes;
if (this.value === 'all' || this.value === '' || !this.value) {
this.value = null;
} else {
this.val = this.value;
this.pchanges.next(true);
}
});
}
public get changes(): Observable<any> {
return this.pchanges.asObservable();
}
change(): void {
if (this.val === 'all') {
this.value = null;
} else {
this.value = this.val as any as RecordActionType;
}
this.pchanges.next(true);
}
accepts(application: App): boolean {
return true;
}
isActive(): boolean {
return this.value !== null && this.value !== 'all' && this.value !== '';
}
} | import {App, ApplicationType} from '../../shared/model/app.model';
import {RecordService} from '../../shared/api/record.service'; | random_line_split |
action.filter.ts | import {Component, Input, OnInit} from '@angular/core';
import {Observable, Subject} from 'rxjs';
import {App, ApplicationType} from '../../shared/model/app.model';
import {RecordService} from '../../shared/api/record.service';
import {RecordActionType} from '../../shared/model/record.model';
@Component({
selector: 'app-clr-datagrid-action-filter',
template: ` <div>
<clr-radio-wrapper>
<input type="radio" clrRadio (change)="change()" [(ngModel)]="val" value="all" name="options" />
<label>All actions</label>
</clr-radio-wrapper>
<clr-radio-wrapper *ngFor="let action of actionTypes">
<input type="radio" clrRadio (change)="change()" [(ngModel)]="val" value="{{ action.key }}" name="options" />
<label>{{ action.name }}</label>
</clr-radio-wrapper>
</div>`
})
export class ActionFilterComponent implements OnInit {
private pchanges = new Subject<any>();
property = 'actionType';
@Input() value = null;
val = 'all';
actionTypes: RecordActionType[];
constructor(private recordService: RecordService) {}
ngOnInit(): void |
public get changes(): Observable<any> {
return this.pchanges.asObservable();
}
change(): void {
if (this.val === 'all') {
this.value = null;
} else {
this.value = this.val as any as RecordActionType;
}
this.pchanges.next(true);
}
accepts(application: App): boolean {
return true;
}
isActive(): boolean {
return this.value !== null && this.value !== 'all' && this.value !== '';
}
}
| {
this.recordService.getActionTypes().subscribe((actionTypes: RecordActionType[]) => {
this.actionTypes = actionTypes;
if (this.value === 'all' || this.value === '' || !this.value) {
this.value = null;
} else {
this.val = this.value;
this.pchanges.next(true);
}
});
} | identifier_body |
symbol_test.js | /*
* Copyright 2016 The Closure Compiler Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
* | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @fileoverview Tests for user-defined Symbols.
*/
goog.require('goog.testing.jsunit');
const s1 = Symbol('example');
const s2 = Symbol('example');
/** @unrestricted */
const SymbolProps = class {
[s1]() { return 's1'; }
[s2]() { return 's2'; }
}
function testSymbols() {
const sp = new SymbolProps();
assertEquals('s1', sp[s1]());
assertEquals('s2', sp[s2]());
}
function testArrayIterator() {
// Note: this test cannot pass in IE8 since we can't polyfill
// Array.prototype methods and maintain correct for-in behavior.
if (typeof Object.defineProperties !== 'function') return;
const iter = [2, 4, 6][Symbol.iterator]();
assertObjectEquals({value: 2, done: false}, iter.next());
assertObjectEquals({value: 4, done: false}, iter.next());
assertObjectEquals({value: 6, done: false}, iter.next());
assertTrue(iter.next().done);
} | * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, | random_line_split |
symbol_test.js | /*
* Copyright 2016 The Closure Compiler Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @fileoverview Tests for user-defined Symbols.
*/
goog.require('goog.testing.jsunit');
const s1 = Symbol('example');
const s2 = Symbol('example');
/** @unrestricted */
const SymbolProps = class {
[s1]() { return 's1'; }
[s2]() |
}
function testSymbols() {
const sp = new SymbolProps();
assertEquals('s1', sp[s1]());
assertEquals('s2', sp[s2]());
}
function testArrayIterator() {
// Note: this test cannot pass in IE8 since we can't polyfill
// Array.prototype methods and maintain correct for-in behavior.
if (typeof Object.defineProperties !== 'function') return;
const iter = [2, 4, 6][Symbol.iterator]();
assertObjectEquals({value: 2, done: false}, iter.next());
assertObjectEquals({value: 4, done: false}, iter.next());
assertObjectEquals({value: 6, done: false}, iter.next());
assertTrue(iter.next().done);
}
| { return 's2'; } | identifier_body |
symbol_test.js | /*
* Copyright 2016 The Closure Compiler Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @fileoverview Tests for user-defined Symbols.
*/
goog.require('goog.testing.jsunit');
const s1 = Symbol('example');
const s2 = Symbol('example');
/** @unrestricted */
const SymbolProps = class {
[s1]() { return 's1'; }
[s2]() { return 's2'; }
}
function testSymbols() {
const sp = new SymbolProps();
assertEquals('s1', sp[s1]());
assertEquals('s2', sp[s2]());
}
function | () {
// Note: this test cannot pass in IE8 since we can't polyfill
// Array.prototype methods and maintain correct for-in behavior.
if (typeof Object.defineProperties !== 'function') return;
const iter = [2, 4, 6][Symbol.iterator]();
assertObjectEquals({value: 2, done: false}, iter.next());
assertObjectEquals({value: 4, done: false}, iter.next());
assertObjectEquals({value: 6, done: false}, iter.next());
assertTrue(iter.next().done);
}
| testArrayIterator | identifier_name |
BaseControl.tsx | // Imports
import * as React from "react";
import ValidationManager from "../Validation/ValidationManager"
import ErrorDisplay from "../Validation/ErrorDisplay"
// Importation des règles CSS de bases -> à transformer en styled-components
import "../../../../Common/theming/base.css"
import UpTooltip, { Tooltip } from '../../../Display/Tooltip'
import TypeNullControl from "../Validation/TypeNullControl"
import { ThemedProps } from '../../../../Common/theming/types'
import { isString } from '../../../../Common/utils'
// Exports
const ONCHANGE_MUST_BE_SPECIFIED = "La méthode onChange doit être spécifié dans le cas où la valeur du composant est défini dans les props";
export interface BaseControlProps<_BaseType> extends ThemedProps {
onChange?: (arg: _BaseType, event: any, error: boolean) => void;
value?: _BaseType;
defaultValue?: _BaseType;
disabled?: boolean;
readonly?: boolean;
tooltip?: string | Tooltip;
isRequired?: boolean;
showError?: boolean;
}
export interface BaseControlState<_BaseType> {
error?: string;
value?: _BaseType;
extra?: any;
}
export abstract class BaseControlComponent<_Props, _BaseType> extends React.Component<BaseControlProps<_BaseType> & _Props, BaseControlState<_BaseType>> {
_validationManager: ValidationManager;
constructor(props?: BaseControlProps<_BaseType> & _Props, context?) {
super(props, context);
this.state = {
error: null,
value: this.props.value !== undefined ? this.props.value as any :
this.props.defaultValue !== undefined ? this.props.defaultValue as any
: null
};
this.initWithProps();
this.registerValidations();
}
private initWithProps() {
if (this.props.value !== undefined)
this.state = { value: this.props.value as any };
}
protected registerValidations() {
this._validationManager = new ValidationManager();
if (this.props.isRequired) {
this._validationManager.addControl(new TypeNullControl());
}
}
abstract getValue(args: any): _BaseType;
protected setValue = (receiveValue: any): _BaseType => {
return receiveValue ;
}
abstract renderControl(): JSX.Element;
private checkAndDispatch = (value?: any) => { | var hasError = this.checkData(cleanData);
this.setState({ value: cleanData }, () => { this.dispatchOnChange(this.state.value, null, hasError) });
} else {
this.setState({ value: cleanData }, () => { this.dispatchOnChange(this.state.value, null, null); });
}
}
private equal = (v1, v2) => {
if (v1 === v2) {
return v1 !== 0 || 1 / v1 === 1 / v2;
} else {
return v1 !== v1 && v2 !== v2;
}
}
validateProps(nextProps) {
if (nextProps.value !== undefined && nextProps.onChange === undefined) {
throw new Error(ONCHANGE_MUST_BE_SPECIFIED);
}
}
public componentWillReceiveProps(nextProps) {
var newValue = nextProps.value;
var oldValue = this.state.value;
if (newValue !== undefined && !this.equal(newValue, oldValue)) {
// Handle specific conversion between the value receive from props and the inner state
var value = this.setValue(nextProps.value);
// Reset the error : if one it will be set in the checkData
this.setState({ value: value, error: null }, this.checkAndDispatch);
}
}
public handleChangeEvent = (event) => {
this.checkAndDispatch(event);
}
private checkData = (value?: any) => {
var result = this._validationManager.isValidValue(value || this.state.value);
if (result.hasError) {
this.setState({ error: result.errorMessage });
} else {
this.setState({ error: null });
}
return result.hasError;
}
public hasError = (): boolean => {
return this.state.error != null;
}
public render() {
var _tooltip: Tooltip = null;
if (this.props.tooltip) {
if (isString(this.props.tooltip)) {
_tooltip = {
content: this.props.tooltip as string
}
} else {
_tooltip = this.props.tooltip as Tooltip;
}
}
return (<ErrorDisplay showError={this.props.showError} hasError={this.hasError()} error={this.state.error}>
{_tooltip === null ?
this.renderControl()
:
<UpTooltip {..._tooltip}>
{this.renderControl()}
</UpTooltip>}
</ErrorDisplay>
);
}
componentDidMount() {
this.checkAndDispatch();
}
public dispatchOnChange = (data: _BaseType, event, error: boolean) => {
if (this.props.onChange !== undefined) {
this.props.onChange(data, event, error);
}
}
} | var _value = (value !== undefined) ? value : this.state.value;
var cleanData: _BaseType = this.getValue(_value);
if (this._validationManager !== undefined) { | random_line_split |
BaseControl.tsx | // Imports
import * as React from "react";
import ValidationManager from "../Validation/ValidationManager"
import ErrorDisplay from "../Validation/ErrorDisplay"
// Importation des règles CSS de bases -> à transformer en styled-components
import "../../../../Common/theming/base.css"
import UpTooltip, { Tooltip } from '../../../Display/Tooltip'
import TypeNullControl from "../Validation/TypeNullControl"
import { ThemedProps } from '../../../../Common/theming/types'
import { isString } from '../../../../Common/utils'
// Exports
const ONCHANGE_MUST_BE_SPECIFIED = "La méthode onChange doit être spécifié dans le cas où la valeur du composant est défini dans les props";
export interface BaseControlProps<_BaseType> extends ThemedProps {
onChange?: (arg: _BaseType, event: any, error: boolean) => void;
value?: _BaseType;
defaultValue?: _BaseType;
disabled?: boolean;
readonly?: boolean;
tooltip?: string | Tooltip;
isRequired?: boolean;
showError?: boolean;
}
export interface BaseControlState<_BaseType> {
error?: string;
value?: _BaseType;
extra?: any;
}
export abstract class BaseControlComponent<_Props, _BaseType> extends React.Component<BaseControlProps<_BaseType> & _Props, BaseControlState<_BaseType>> {
_validationManager: ValidationManager;
constructor(props?: BaseControlProps<_BaseType> & _Props, context?) {
super(props, context);
this.state = {
error: null,
value: this.props.value !== undefined ? this.props.value as any :
this.props.defaultValue !== undefined ? this.props.defaultValue as any
: null
};
this.initWithProps();
this.registerValidations();
}
private initWithProps() {
if (this.props.value !== undefined)
this.state = { value: this.props.value as any };
}
protected registerValidations() {
this._validationManager = new ValidationManager();
if (this.props.isRequired) {
this._validationManager.addControl(new TypeNullControl());
}
}
abstract getValue(args: any): _BaseType;
protected setValue = (receiveValue: any): _BaseType => {
return receiveValue ;
}
abstract renderControl(): JSX.Element;
private checkAndDispatch = (value?: any) => {
var _value = (value !== undefined) ? value : this.state.value;
var cleanData: _BaseType = this.getValue(_value);
if (this._validationManager !== undefined) {
var hasError = this.checkData(cleanData);
this.setState({ value: cleanData }, () => { this.dispatchOnChange(this.state.value, null, hasError) });
} else {
this.setState({ value: cleanData }, () => { this.dispatchOnChange(this.state.value, null, null); });
}
}
private equal = (v1, v2) => {
if (v1 === v2) {
return v1 !== 0 || 1 / v1 === 1 / v2;
} else {
return v1 !== v1 && v2 !== v2;
}
}
validate | ps) {
if (nextProps.value !== undefined && nextProps.onChange === undefined) {
throw new Error(ONCHANGE_MUST_BE_SPECIFIED);
}
}
public componentWillReceiveProps(nextProps) {
var newValue = nextProps.value;
var oldValue = this.state.value;
if (newValue !== undefined && !this.equal(newValue, oldValue)) {
// Handle specific conversion between the value receive from props and the inner state
var value = this.setValue(nextProps.value);
// Reset the error : if one it will be set in the checkData
this.setState({ value: value, error: null }, this.checkAndDispatch);
}
}
public handleChangeEvent = (event) => {
this.checkAndDispatch(event);
}
private checkData = (value?: any) => {
var result = this._validationManager.isValidValue(value || this.state.value);
if (result.hasError) {
this.setState({ error: result.errorMessage });
} else {
this.setState({ error: null });
}
return result.hasError;
}
public hasError = (): boolean => {
return this.state.error != null;
}
public render() {
var _tooltip: Tooltip = null;
if (this.props.tooltip) {
if (isString(this.props.tooltip)) {
_tooltip = {
content: this.props.tooltip as string
}
} else {
_tooltip = this.props.tooltip as Tooltip;
}
}
return (<ErrorDisplay showError={this.props.showError} hasError={this.hasError()} error={this.state.error}>
{_tooltip === null ?
this.renderControl()
:
<UpTooltip {..._tooltip}>
{this.renderControl()}
</UpTooltip>}
</ErrorDisplay>
);
}
componentDidMount() {
this.checkAndDispatch();
}
public dispatchOnChange = (data: _BaseType, event, error: boolean) => {
if (this.props.onChange !== undefined) {
this.props.onChange(data, event, error);
}
}
} | Props(nextPro | identifier_name |
BaseControl.tsx | // Imports
import * as React from "react";
import ValidationManager from "../Validation/ValidationManager"
import ErrorDisplay from "../Validation/ErrorDisplay"
// Importation des règles CSS de bases -> à transformer en styled-components
import "../../../../Common/theming/base.css"
import UpTooltip, { Tooltip } from '../../../Display/Tooltip'
import TypeNullControl from "../Validation/TypeNullControl"
import { ThemedProps } from '../../../../Common/theming/types'
import { isString } from '../../../../Common/utils'
// Exports
const ONCHANGE_MUST_BE_SPECIFIED = "La méthode onChange doit être spécifié dans le cas où la valeur du composant est défini dans les props";
export interface BaseControlProps<_BaseType> extends ThemedProps {
onChange?: (arg: _BaseType, event: any, error: boolean) => void;
value?: _BaseType;
defaultValue?: _BaseType;
disabled?: boolean;
readonly?: boolean;
tooltip?: string | Tooltip;
isRequired?: boolean;
showError?: boolean;
}
export interface BaseControlState<_BaseType> {
error?: string;
value?: _BaseType;
extra?: any;
}
export abstract class BaseControlComponent<_Props, _BaseType> extends React.Component<BaseControlProps<_BaseType> & _Props, BaseControlState<_BaseType>> {
_validationManager: ValidationManager;
constructor(props?: BaseControlProps<_BaseType> & _Props, context?) {
| ivate initWithProps() {
if (this.props.value !== undefined)
this.state = { value: this.props.value as any };
}
protected registerValidations() {
this._validationManager = new ValidationManager();
if (this.props.isRequired) {
this._validationManager.addControl(new TypeNullControl());
}
}
abstract getValue(args: any): _BaseType;
protected setValue = (receiveValue: any): _BaseType => {
return receiveValue ;
}
abstract renderControl(): JSX.Element;
private checkAndDispatch = (value?: any) => {
var _value = (value !== undefined) ? value : this.state.value;
var cleanData: _BaseType = this.getValue(_value);
if (this._validationManager !== undefined) {
var hasError = this.checkData(cleanData);
this.setState({ value: cleanData }, () => { this.dispatchOnChange(this.state.value, null, hasError) });
} else {
this.setState({ value: cleanData }, () => { this.dispatchOnChange(this.state.value, null, null); });
}
}
private equal = (v1, v2) => {
if (v1 === v2) {
return v1 !== 0 || 1 / v1 === 1 / v2;
} else {
return v1 !== v1 && v2 !== v2;
}
}
validateProps(nextProps) {
if (nextProps.value !== undefined && nextProps.onChange === undefined) {
throw new Error(ONCHANGE_MUST_BE_SPECIFIED);
}
}
public componentWillReceiveProps(nextProps) {
var newValue = nextProps.value;
var oldValue = this.state.value;
if (newValue !== undefined && !this.equal(newValue, oldValue)) {
// Handle specific conversion between the value receive from props and the inner state
var value = this.setValue(nextProps.value);
// Reset the error : if one it will be set in the checkData
this.setState({ value: value, error: null }, this.checkAndDispatch);
}
}
public handleChangeEvent = (event) => {
this.checkAndDispatch(event);
}
private checkData = (value?: any) => {
var result = this._validationManager.isValidValue(value || this.state.value);
if (result.hasError) {
this.setState({ error: result.errorMessage });
} else {
this.setState({ error: null });
}
return result.hasError;
}
public hasError = (): boolean => {
return this.state.error != null;
}
public render() {
var _tooltip: Tooltip = null;
if (this.props.tooltip) {
if (isString(this.props.tooltip)) {
_tooltip = {
content: this.props.tooltip as string
}
} else {
_tooltip = this.props.tooltip as Tooltip;
}
}
return (<ErrorDisplay showError={this.props.showError} hasError={this.hasError()} error={this.state.error}>
{_tooltip === null ?
this.renderControl()
:
<UpTooltip {..._tooltip}>
{this.renderControl()}
</UpTooltip>}
</ErrorDisplay>
);
}
componentDidMount() {
this.checkAndDispatch();
}
public dispatchOnChange = (data: _BaseType, event, error: boolean) => {
if (this.props.onChange !== undefined) {
this.props.onChange(data, event, error);
}
}
} | super(props, context);
this.state = {
error: null,
value: this.props.value !== undefined ? this.props.value as any :
this.props.defaultValue !== undefined ? this.props.defaultValue as any
: null
};
this.initWithProps();
this.registerValidations();
}
pr | identifier_body |
EditRecord.js | /*
添加用户的教育经历
*/
import React, { PropTypes } from 'react';
import { Toast, Msg } from 'react-weui';
import RecordHistory from '../detail/RecordHistory';
import AddRecord from './AddRecord';
import RemoveRecord from './RemoveRecord';
import { connect } from 'react-redux';
import * as actions from '../../../actions/acceptors/record';
export class EditRecordComponent extends React.Component {
static propTypes = {
data: React.PropTypes.array,
add: PropTypes.func,
remove: PropTypes.func,
init: PropTypes.func,
err: PropTypes.object,
fields: PropTypes.object,
toast: PropTypes.object,
acceptorId: PropTypes.string.isRequired,
};
static contextTypes = {
setTitle: PropTypes.func.isRequired,
}; | }
render() {
const { err, data, toast } = this.props;
return err ? <Msg type="warn" title="发生错误" description={JSON.stringify(err.msg)} /> : (
<div>
<RecordHistory data={data} />
<AddRecord {...this.props} />
<RemoveRecord {...this.props} />
<Toast icon="loading" show={toast.show} >加载中</Toast>
</div>
);
}
}
const mapStateToProps = state => ({
...state.acceptors.records,
});
const mapDispatchToProps = dispatch => ({
add: (id, record) => dispatch(actions.addRecord(id, record)),
init: id => dispatch(actions.initRecords(id)),
remove: (id, recordId) => dispatch(actions.deleteRecord(id, recordId)),
});
export default connect(mapStateToProps, mapDispatchToProps)(EditRecordComponent); | componentDidMount() {
this.props.init(this.props.acceptorId);
this.context.setTitle('修改受赠记录'); | random_line_split |
EditRecord.js | /*
添加用户的教育经历
*/
import React, { PropTypes } from 'react';
import { Toast, Msg } from 'react-weui';
import RecordHistory from '../detail/RecordHistory';
import AddRecord from './AddRecord';
import RemoveRecord from './RemoveRecord';
import { connect } from 'react-redux';
import * as actions from '../../../actions/acceptors/record';
export class EditRecordComponent extends React.Component {
static propTypes = {
data: React.PropTypes.array,
add: PropTypes.func,
remove: PropTypes.func,
init: PropTypes.func,
err: PropTypes.object,
fields: PropTypes.object,
toast: PropTypes.object,
acceptorId: PropTypes.string.isRequired,
};
static contextTypes = {
setTitle: PropTypes.func.isRequired,
};
componentDidMount( | s.init(this.props.acceptorId);
this.context.setTitle('修改受赠记录');
}
render() {
const { err, data, toast } = this.props;
return err ? <Msg type="warn" title="发生错误" description={JSON.stringify(err.msg)} /> : (
<div>
<RecordHistory data={data} />
<AddRecord {...this.props} />
<RemoveRecord {...this.props} />
<Toast icon="loading" show={toast.show} >加载中</Toast>
</div>
);
}
}
const mapStateToProps = state => ({
...state.acceptors.records,
});
const mapDispatchToProps = dispatch => ({
add: (id, record) => dispatch(actions.addRecord(id, record)),
init: id => dispatch(actions.initRecords(id)),
remove: (id, recordId) => dispatch(actions.deleteRecord(id, recordId)),
});
export default connect(mapStateToProps, mapDispatchToProps)(EditRecordComponent);
| ) {
this.prop | identifier_name |
new-thread.controller.js | (function(){
function newThreadCtrl($location, $state, MessageEditorService){
var vm = this;
vm.getConstants = function(){
return MessageEditorService.pmConstants;
};
vm.newThread = MessageEditorService.getThreadTemplate($location.search().boardId);
vm.returnToBoard = function(){
var boardId = $location.search().boardId;
$state.go('board',{boardId: boardId});
};
vm.previewThread = function(){
vm.previewThread = MessageEditorService.getThreadPreview(vm.newThread);
vm.previewThread.$promise.then(function(data){
vm.preview = true;
});
};
vm.postThread = function(){
MessageEditorService.saveThread(vm.newThread).$promise.then(function(data){
});
} |
vm.isOpen = false;
}
angular.module('zfgc.forum')
.controller('newThreadCtrl', ['$location', '$state', 'MessageEditorService', newThreadCtrl]);
})(); | random_line_split |
|
new-thread.controller.js | (function(){
function newThreadCtrl($location, $state, MessageEditorService) |
angular.module('zfgc.forum')
.controller('newThreadCtrl', ['$location', '$state', 'MessageEditorService', newThreadCtrl]);
})(); | {
var vm = this;
vm.getConstants = function(){
return MessageEditorService.pmConstants;
};
vm.newThread = MessageEditorService.getThreadTemplate($location.search().boardId);
vm.returnToBoard = function(){
var boardId = $location.search().boardId;
$state.go('board',{boardId: boardId});
};
vm.previewThread = function(){
vm.previewThread = MessageEditorService.getThreadPreview(vm.newThread);
vm.previewThread.$promise.then(function(data){
vm.preview = true;
});
};
vm.postThread = function(){
MessageEditorService.saveThread(vm.newThread).$promise.then(function(data){
});
}
vm.isOpen = false;
} | identifier_body |
new-thread.controller.js | (function(){
function | ($location, $state, MessageEditorService){
var vm = this;
vm.getConstants = function(){
return MessageEditorService.pmConstants;
};
vm.newThread = MessageEditorService.getThreadTemplate($location.search().boardId);
vm.returnToBoard = function(){
var boardId = $location.search().boardId;
$state.go('board',{boardId: boardId});
};
vm.previewThread = function(){
vm.previewThread = MessageEditorService.getThreadPreview(vm.newThread);
vm.previewThread.$promise.then(function(data){
vm.preview = true;
});
};
vm.postThread = function(){
MessageEditorService.saveThread(vm.newThread).$promise.then(function(data){
});
}
vm.isOpen = false;
}
angular.module('zfgc.forum')
.controller('newThreadCtrl', ['$location', '$state', 'MessageEditorService', newThreadCtrl]);
})(); | newThreadCtrl | identifier_name |
DensifyGeometriesInterval.py | # -*- coding: utf-8 -*-
"""
***************************************************************************
DensifyGeometriesInterval.py by Anita Graser, Dec 2012
based on DensifyGeometries.py
---------------------
Date : October 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import range
__author__ = 'Anita Graser'
__date__ = 'Dec 2012'
__copyright__ = '(C) 2012, Anita Graser'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.core import (QgsProcessingParameterNumber)
from processing.algs.qgis.QgisAlgorithm import QgisFeatureBasedAlgorithm
class DensifyGeometriesInterval(QgisFeatureBasedAlgorithm):
INTERVAL = 'INTERVAL'
def group(self):
return self.tr('Vector geometry')
def __init__(self):
super().__init__()
self.interval = None
def initParameters(self, config=None):
self.addParameter(QgsProcessingParameterNumber(self.INTERVAL,
self.tr('Interval between vertices to add'), QgsProcessingParameterNumber.Double,
1, False, 0, 10000000))
def name(self):
return 'densifygeometriesgivenaninterval'
def displayName(self):
return self.tr('Densify geometries given an interval')
def outputName(self):
|
def prepareAlgorithm(self, parameters, context, feedback):
interval = self.parameterAsDouble(parameters, self.INTERVAL, context)
return True
def processFeature(self, feature, feedback):
if feature.hasGeometry():
new_geometry = feature.geometry().densifyByDistance(float(interval))
feature.setGeometry(new_geometry)
return feature
| return self.tr('Densified') | identifier_body |
DensifyGeometriesInterval.py | # -*- coding: utf-8 -*- | DensifyGeometriesInterval.py by Anita Graser, Dec 2012
based on DensifyGeometries.py
---------------------
Date : October 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import range
__author__ = 'Anita Graser'
__date__ = 'Dec 2012'
__copyright__ = '(C) 2012, Anita Graser'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.core import (QgsProcessingParameterNumber)
from processing.algs.qgis.QgisAlgorithm import QgisFeatureBasedAlgorithm
class DensifyGeometriesInterval(QgisFeatureBasedAlgorithm):
INTERVAL = 'INTERVAL'
def group(self):
return self.tr('Vector geometry')
def __init__(self):
super().__init__()
self.interval = None
def initParameters(self, config=None):
self.addParameter(QgsProcessingParameterNumber(self.INTERVAL,
self.tr('Interval between vertices to add'), QgsProcessingParameterNumber.Double,
1, False, 0, 10000000))
def name(self):
return 'densifygeometriesgivenaninterval'
def displayName(self):
return self.tr('Densify geometries given an interval')
def outputName(self):
return self.tr('Densified')
def prepareAlgorithm(self, parameters, context, feedback):
interval = self.parameterAsDouble(parameters, self.INTERVAL, context)
return True
def processFeature(self, feature, feedback):
if feature.hasGeometry():
new_geometry = feature.geometry().densifyByDistance(float(interval))
feature.setGeometry(new_geometry)
return feature |
"""
*************************************************************************** | random_line_split |
DensifyGeometriesInterval.py | # -*- coding: utf-8 -*-
"""
***************************************************************************
DensifyGeometriesInterval.py by Anita Graser, Dec 2012
based on DensifyGeometries.py
---------------------
Date : October 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import range
__author__ = 'Anita Graser'
__date__ = 'Dec 2012'
__copyright__ = '(C) 2012, Anita Graser'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.core import (QgsProcessingParameterNumber)
from processing.algs.qgis.QgisAlgorithm import QgisFeatureBasedAlgorithm
class DensifyGeometriesInterval(QgisFeatureBasedAlgorithm):
INTERVAL = 'INTERVAL'
def group(self):
return self.tr('Vector geometry')
def __init__(self):
super().__init__()
self.interval = None
def initParameters(self, config=None):
self.addParameter(QgsProcessingParameterNumber(self.INTERVAL,
self.tr('Interval between vertices to add'), QgsProcessingParameterNumber.Double,
1, False, 0, 10000000))
def name(self):
return 'densifygeometriesgivenaninterval'
def displayName(self):
return self.tr('Densify geometries given an interval')
def outputName(self):
return self.tr('Densified')
def prepareAlgorithm(self, parameters, context, feedback):
interval = self.parameterAsDouble(parameters, self.INTERVAL, context)
return True
def processFeature(self, feature, feedback):
if feature.hasGeometry():
|
return feature
| new_geometry = feature.geometry().densifyByDistance(float(interval))
feature.setGeometry(new_geometry) | conditional_block |
DensifyGeometriesInterval.py | # -*- coding: utf-8 -*-
"""
***************************************************************************
DensifyGeometriesInterval.py by Anita Graser, Dec 2012
based on DensifyGeometries.py
---------------------
Date : October 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import range
__author__ = 'Anita Graser'
__date__ = 'Dec 2012'
__copyright__ = '(C) 2012, Anita Graser'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.core import (QgsProcessingParameterNumber)
from processing.algs.qgis.QgisAlgorithm import QgisFeatureBasedAlgorithm
class DensifyGeometriesInterval(QgisFeatureBasedAlgorithm):
INTERVAL = 'INTERVAL'
def group(self):
return self.tr('Vector geometry')
def __init__(self):
super().__init__()
self.interval = None
def initParameters(self, config=None):
self.addParameter(QgsProcessingParameterNumber(self.INTERVAL,
self.tr('Interval between vertices to add'), QgsProcessingParameterNumber.Double,
1, False, 0, 10000000))
def | (self):
return 'densifygeometriesgivenaninterval'
def displayName(self):
return self.tr('Densify geometries given an interval')
def outputName(self):
return self.tr('Densified')
def prepareAlgorithm(self, parameters, context, feedback):
interval = self.parameterAsDouble(parameters, self.INTERVAL, context)
return True
def processFeature(self, feature, feedback):
if feature.hasGeometry():
new_geometry = feature.geometry().densifyByDistance(float(interval))
feature.setGeometry(new_geometry)
return feature
| name | identifier_name |
AbstractMaskSystem.ts | import { System } from '../System';
import type { MaskData } from './MaskData';
import type { Renderer } from '../Renderer';
/**
* System plugin to the renderer to manage masks of certain type
*
* @class
* @extends PIXI.System
* @memberof PIXI.systems
*/
export class AbstractMaskSystem extends System
{
protected maskStack: Array<MaskData>;
protected glConst: number;
/**
* @param {PIXI.Renderer} renderer - The renderer this System works for.
*/
constructor(renderer: Renderer)
{
super(renderer);
/**
* The mask stack
* @member {PIXI.MaskData[]}
*/
this.maskStack = [];
/**
* Constant for gl.enable
* @member {number}
* @private
*/
this.glConst = 0;
}
/**
* gets count of masks of certain type
* @returns {number}
*/
| (): number
{
return this.maskStack.length;
}
/**
* Changes the mask stack that is used by this System.
*
* @param {PIXI.MaskData[]} maskStack - The mask stack
*/
setMaskStack(maskStack: Array<MaskData>): void
{
const { gl } = this.renderer;
const curStackLen = this.getStackLength();
this.maskStack = maskStack;
const newStackLen = this.getStackLength();
if (newStackLen !== curStackLen)
{
if (newStackLen === 0)
{
gl.disable(this.glConst);
}
else
{
gl.enable(this.glConst);
this._useCurrent();
}
}
}
/**
* Setup renderer to use the current mask data.
* @private
*/
protected _useCurrent(): void
{
// OVERWRITE;
}
/**
* Destroys the mask stack.
*
*/
destroy(): void
{
super.destroy();
this.maskStack = null;
}
}
| getStackLength | identifier_name |
AbstractMaskSystem.ts | import { System } from '../System';
import type { MaskData } from './MaskData';
import type { Renderer } from '../Renderer';
/**
* System plugin to the renderer to manage masks of certain type
*
* @class
* @extends PIXI.System
* @memberof PIXI.systems
*/
export class AbstractMaskSystem extends System
{
protected maskStack: Array<MaskData>;
protected glConst: number;
/**
* @param {PIXI.Renderer} renderer - The renderer this System works for.
*/
constructor(renderer: Renderer)
{
super(renderer);
/**
* The mask stack
* @member {PIXI.MaskData[]}
*/
this.maskStack = [];
/**
* Constant for gl.enable
* @member {number}
* @private
*/
this.glConst = 0;
}
/**
* gets count of masks of certain type
* @returns {number}
*/
getStackLength(): number
{
return this.maskStack.length;
}
/**
* Changes the mask stack that is used by this System.
*
* @param {PIXI.MaskData[]} maskStack - The mask stack
*/
setMaskStack(maskStack: Array<MaskData>): void
{
const { gl } = this.renderer;
const curStackLen = this.getStackLength();
this.maskStack = maskStack;
const newStackLen = this.getStackLength();
if (newStackLen !== curStackLen)
{
if (newStackLen === 0)
|
else
{
gl.enable(this.glConst);
this._useCurrent();
}
}
}
/**
* Setup renderer to use the current mask data.
* @private
*/
protected _useCurrent(): void
{
// OVERWRITE;
}
/**
* Destroys the mask stack.
*
*/
destroy(): void
{
super.destroy();
this.maskStack = null;
}
}
| {
gl.disable(this.glConst);
} | conditional_block |
AbstractMaskSystem.ts | import { System } from '../System';
import type { MaskData } from './MaskData';
import type { Renderer } from '../Renderer';
/**
* System plugin to the renderer to manage masks of certain type
*
* @class
* @extends PIXI.System
* @memberof PIXI.systems
*/
export class AbstractMaskSystem extends System
{
protected maskStack: Array<MaskData>;
protected glConst: number;
/**
* @param {PIXI.Renderer} renderer - The renderer this System works for.
*/
constructor(renderer: Renderer)
{
super(renderer);
/**
* The mask stack
* @member {PIXI.MaskData[]}
*/
this.maskStack = [];
/**
* Constant for gl.enable
* @member {number}
* @private
*/
this.glConst = 0;
}
/**
* gets count of masks of certain type
* @returns {number}
*/
getStackLength(): number
{
return this.maskStack.length;
}
/**
* Changes the mask stack that is used by this System.
*
* @param {PIXI.MaskData[]} maskStack - The mask stack
*/
setMaskStack(maskStack: Array<MaskData>): void
{
const { gl } = this.renderer;
const curStackLen = this.getStackLength();
this.maskStack = maskStack;
const newStackLen = this.getStackLength();
if (newStackLen !== curStackLen)
{
if (newStackLen === 0)
{
gl.disable(this.glConst);
}
else
{
gl.enable(this.glConst);
this._useCurrent();
}
}
}
/**
* Setup renderer to use the current mask data.
* @private
*/
protected _useCurrent(): void
|
/**
* Destroys the mask stack.
*
*/
destroy(): void
{
super.destroy();
this.maskStack = null;
}
}
| {
// OVERWRITE;
} | identifier_body |
AbstractMaskSystem.ts | import { System } from '../System';
import type { MaskData } from './MaskData';
import type { Renderer } from '../Renderer';
/**
* System plugin to the renderer to manage masks of certain type
*
* @class
* @extends PIXI.System
* @memberof PIXI.systems
*/
export class AbstractMaskSystem extends System
{
protected maskStack: Array<MaskData>;
protected glConst: number;
/**
* @param {PIXI.Renderer} renderer - The renderer this System works for. | */
constructor(renderer: Renderer)
{
super(renderer);
/**
* The mask stack
* @member {PIXI.MaskData[]}
*/
this.maskStack = [];
/**
* Constant for gl.enable
* @member {number}
* @private
*/
this.glConst = 0;
}
/**
* gets count of masks of certain type
* @returns {number}
*/
getStackLength(): number
{
return this.maskStack.length;
}
/**
* Changes the mask stack that is used by this System.
*
* @param {PIXI.MaskData[]} maskStack - The mask stack
*/
setMaskStack(maskStack: Array<MaskData>): void
{
const { gl } = this.renderer;
const curStackLen = this.getStackLength();
this.maskStack = maskStack;
const newStackLen = this.getStackLength();
if (newStackLen !== curStackLen)
{
if (newStackLen === 0)
{
gl.disable(this.glConst);
}
else
{
gl.enable(this.glConst);
this._useCurrent();
}
}
}
/**
* Setup renderer to use the current mask data.
* @private
*/
protected _useCurrent(): void
{
// OVERWRITE;
}
/**
* Destroys the mask stack.
*
*/
destroy(): void
{
super.destroy();
this.maskStack = null;
}
} | random_line_split |
|
mod.rs | mod env;
mod model;
mod uploader;
use async_trait::async_trait;
use http::Uri;
use model::endpoint::Endpoint;
use opentelemetry::sdk::resource::ResourceDetector;
use opentelemetry::sdk::resource::SdkProvidedResourceDetector;
use opentelemetry::sdk::trace::Config;
use opentelemetry::sdk::Resource;
use opentelemetry::{
global, sdk,
sdk::export::{trace, ExportError},
sdk::trace::TraceRuntime,
trace::{TraceError, TracerProvider},
KeyValue,
};
use opentelemetry_http::HttpClient;
use opentelemetry_semantic_conventions as semcov;
#[cfg(all(
not(feature = "reqwest-client"),
not(feature = "reqwest-blocking-client"),
feature = "surf-client"
))]
use std::convert::TryFrom;
use std::net::SocketAddr;
use std::sync::Arc;
use std::time::Duration;
/// Zipkin span exporter
#[derive(Debug)]
pub struct Exporter {
local_endpoint: Endpoint,
uploader: uploader::Uploader,
}
impl Exporter {
fn new(local_endpoint: Endpoint, client: Box<dyn HttpClient>, collector_endpoint: Uri) -> Self {
Exporter {
local_endpoint,
uploader: uploader::Uploader::new(client, collector_endpoint),
}
}
}
/// Create a new Zipkin exporter pipeline builder.
pub fn new_pipeline() -> ZipkinPipelineBuilder {
ZipkinPipelineBuilder::default()
}
/// Builder for `ExporterConfig` struct.
#[derive(Debug)]
pub struct ZipkinPipelineBuilder {
service_name: Option<String>,
service_addr: Option<SocketAddr>,
collector_endpoint: String,
trace_config: Option<sdk::trace::Config>,
client: Option<Box<dyn HttpClient>>,
}
impl Default for ZipkinPipelineBuilder {
fn default() -> Self {
let timeout = env::get_timeout();
ZipkinPipelineBuilder {
#[cfg(feature = "reqwest-blocking-client")]
client: Some(Box::new(
reqwest::blocking::Client::builder()
.timeout(timeout)
.build()
.unwrap_or_else(|_| reqwest::blocking::Client::new()),
)),
#[cfg(all(
not(feature = "reqwest-blocking-client"),
not(feature = "surf-client"),
feature = "reqwest-client"
))]
client: Some(Box::new(
reqwest::Client::builder()
.timeout(timeout)
.build()
.unwrap_or_else(|_| reqwest::Client::new()),
)),
#[cfg(all(
not(feature = "reqwest-client"),
not(feature = "reqwest-blocking-client"),
feature = "surf-client"
))]
client: Some(Box::new(
surf::Client::try_from(surf::Config::new().set_timeout(Some(timeout)))
.unwrap_or_else(|_| surf::Client::new()),
)),
#[cfg(all(
not(feature = "reqwest-client"),
not(feature = "surf-client"),
not(feature = "reqwest-blocking-client")
))]
client: None,
service_name: None,
service_addr: None,
collector_endpoint: env::get_endpoint(),
trace_config: None,
}
}
}
impl ZipkinPipelineBuilder {
/// Initial a Zipkin span exporter.
///
/// Returns error if the endpoint is not valid or if no http client is provided.
pub fn init_exporter(mut self) -> Result<Exporter, TraceError> {
let (_, endpoint) = self.init_config_and_endpoint();
self.init_exporter_with_endpoint(endpoint)
}
fn init_config_and_endpoint(&mut self) -> (Config, Endpoint) {
let service_name = self.service_name.take();
if let Some(service_name) = service_name {
let config = if let Some(mut cfg) = self.trace_config.take() {
cfg.resource = cfg.resource.map(|r| {
let without_service_name = r
.iter()
.filter(|(k, _v)| **k != semcov::resource::SERVICE_NAME)
.map(|(k, v)| KeyValue::new(k.clone(), v.clone()))
.collect::<Vec<KeyValue>>();
Arc::new(Resource::new(without_service_name))
});
cfg
} else {
Config {
resource: Some(Arc::new(Resource::empty())),
..Default::default()
}
};
(config, Endpoint::new(service_name, self.service_addr))
} else {
let service_name = SdkProvidedResourceDetector
.detect(Duration::from_secs(0))
.get(semcov::resource::SERVICE_NAME)
.unwrap()
.to_string();
(
Config {
// use a empty resource to prevent TracerProvider to assign a service name.
resource: Some(Arc::new(Resource::empty())),
..Default::default()
},
Endpoint::new(service_name, self.service_addr),
)
}
}
fn init_exporter_with_endpoint(self, endpoint: Endpoint) -> Result<Exporter, TraceError> {
if let Some(client) = self.client {
let exporter = Exporter::new(
endpoint,
client,
self.collector_endpoint
.parse()
.map_err::<Error, _>(Into::into)?,
);
Ok(exporter)
} else {
Err(Error::NoHttpClient.into())
}
}
/// Install the Zipkin trace exporter pipeline with a simple span processor.
pub fn install_simple(mut self) -> Result<sdk::trace::Tracer, TraceError> {
let (config, endpoint) = self.init_config_and_endpoint();
let exporter = self.init_exporter_with_endpoint(endpoint)?;
let mut provider_builder =
sdk::trace::TracerProvider::builder().with_simple_exporter(exporter);
provider_builder = provider_builder.with_config(config);
let provider = provider_builder.build();
let tracer = provider.versioned_tracer(
"opentelemetry-zipkin",
Some(env!("CARGO_PKG_VERSION")),
None,
);
let _ = global::set_tracer_provider(provider);
Ok(tracer)
}
/// Install the Zipkin trace exporter pipeline with a batch span processor using the specified
/// runtime.
pub fn install_batch<R: TraceRuntime>(
mut self,
runtime: R,
) -> Result<sdk::trace::Tracer, TraceError> {
let (config, endpoint) = self.init_config_and_endpoint();
let exporter = self.init_exporter_with_endpoint(endpoint)?;
let mut provider_builder =
sdk::trace::TracerProvider::builder().with_batch_exporter(exporter, runtime);
provider_builder = provider_builder.with_config(config);
let provider = provider_builder.build();
let tracer = provider.versioned_tracer(
"opentelemetry-zipkin",
Some(env!("CARGO_PKG_VERSION")),
None,
);
let _ = global::set_tracer_provider(provider);
Ok(tracer)
}
|
/// Assign client implementation
pub fn with_http_client<T: HttpClient + 'static>(mut self, client: T) -> Self {
self.client = Some(Box::new(client));
self
}
/// Assign the service name under which to group traces.
pub fn with_service_address(mut self, addr: SocketAddr) -> Self {
self.service_addr = Some(addr);
self
}
/// Assign the Zipkin collector endpoint
pub fn with_collector_endpoint<T: Into<String>>(mut self, endpoint: T) -> Self {
self.collector_endpoint = endpoint.into();
self
}
/// Assign the SDK trace configuration.
pub fn with_trace_config(mut self, config: sdk::trace::Config) -> Self {
self.trace_config = Some(config);
self
}
}
#[async_trait]
impl trace::SpanExporter for Exporter {
/// Export spans to Zipkin collector.
async fn export(&mut self, batch: Vec<trace::SpanData>) -> trace::ExportResult {
let zipkin_spans = batch
.into_iter()
.map(|span| model::into_zipkin_span(self.local_endpoint.clone(), span))
.collect();
self.uploader.upload(zipkin_spans).await
}
}
/// Wrap type for errors from opentelemetry zipkin
#[derive(thiserror::Error, Debug)]
#[non_exhaustive]
pub enum Error {
/// No http client implementation found. User should provide one or enable features.
#[error("http client must be set, users can enable reqwest or surf feature to use http client implementation within create")]
NoHttpClient,
/// Http requests failed
#[error("http request failed with {0}")]
RequestFailed(#[from] http::Error),
/// The uri provided is invalid
#[error("invalid uri")]
InvalidUri(#[from] http::uri::InvalidUri),
/// Other errors
#[error("export error: {0}")]
Other(String),
}
impl ExportError for Error {
fn exporter_name(&self) -> &'static str {
"zipkin"
}
} | /// Assign the service name under which to group traces.
pub fn with_service_name<T: Into<String>>(mut self, name: T) -> Self {
self.service_name = Some(name.into());
self
} | random_line_split |
mod.rs | mod env;
mod model;
mod uploader;
use async_trait::async_trait;
use http::Uri;
use model::endpoint::Endpoint;
use opentelemetry::sdk::resource::ResourceDetector;
use opentelemetry::sdk::resource::SdkProvidedResourceDetector;
use opentelemetry::sdk::trace::Config;
use opentelemetry::sdk::Resource;
use opentelemetry::{
global, sdk,
sdk::export::{trace, ExportError},
sdk::trace::TraceRuntime,
trace::{TraceError, TracerProvider},
KeyValue,
};
use opentelemetry_http::HttpClient;
use opentelemetry_semantic_conventions as semcov;
#[cfg(all(
not(feature = "reqwest-client"),
not(feature = "reqwest-blocking-client"),
feature = "surf-client"
))]
use std::convert::TryFrom;
use std::net::SocketAddr;
use std::sync::Arc;
use std::time::Duration;
/// Zipkin span exporter
#[derive(Debug)]
pub struct Exporter {
local_endpoint: Endpoint,
uploader: uploader::Uploader,
}
impl Exporter {
fn | (local_endpoint: Endpoint, client: Box<dyn HttpClient>, collector_endpoint: Uri) -> Self {
Exporter {
local_endpoint,
uploader: uploader::Uploader::new(client, collector_endpoint),
}
}
}
/// Create a new Zipkin exporter pipeline builder.
pub fn new_pipeline() -> ZipkinPipelineBuilder {
ZipkinPipelineBuilder::default()
}
/// Builder for `ExporterConfig` struct.
#[derive(Debug)]
pub struct ZipkinPipelineBuilder {
service_name: Option<String>,
service_addr: Option<SocketAddr>,
collector_endpoint: String,
trace_config: Option<sdk::trace::Config>,
client: Option<Box<dyn HttpClient>>,
}
impl Default for ZipkinPipelineBuilder {
fn default() -> Self {
let timeout = env::get_timeout();
ZipkinPipelineBuilder {
#[cfg(feature = "reqwest-blocking-client")]
client: Some(Box::new(
reqwest::blocking::Client::builder()
.timeout(timeout)
.build()
.unwrap_or_else(|_| reqwest::blocking::Client::new()),
)),
#[cfg(all(
not(feature = "reqwest-blocking-client"),
not(feature = "surf-client"),
feature = "reqwest-client"
))]
client: Some(Box::new(
reqwest::Client::builder()
.timeout(timeout)
.build()
.unwrap_or_else(|_| reqwest::Client::new()),
)),
#[cfg(all(
not(feature = "reqwest-client"),
not(feature = "reqwest-blocking-client"),
feature = "surf-client"
))]
client: Some(Box::new(
surf::Client::try_from(surf::Config::new().set_timeout(Some(timeout)))
.unwrap_or_else(|_| surf::Client::new()),
)),
#[cfg(all(
not(feature = "reqwest-client"),
not(feature = "surf-client"),
not(feature = "reqwest-blocking-client")
))]
client: None,
service_name: None,
service_addr: None,
collector_endpoint: env::get_endpoint(),
trace_config: None,
}
}
}
impl ZipkinPipelineBuilder {
/// Initial a Zipkin span exporter.
///
/// Returns error if the endpoint is not valid or if no http client is provided.
pub fn init_exporter(mut self) -> Result<Exporter, TraceError> {
let (_, endpoint) = self.init_config_and_endpoint();
self.init_exporter_with_endpoint(endpoint)
}
fn init_config_and_endpoint(&mut self) -> (Config, Endpoint) {
let service_name = self.service_name.take();
if let Some(service_name) = service_name {
let config = if let Some(mut cfg) = self.trace_config.take() {
cfg.resource = cfg.resource.map(|r| {
let without_service_name = r
.iter()
.filter(|(k, _v)| **k != semcov::resource::SERVICE_NAME)
.map(|(k, v)| KeyValue::new(k.clone(), v.clone()))
.collect::<Vec<KeyValue>>();
Arc::new(Resource::new(without_service_name))
});
cfg
} else {
Config {
resource: Some(Arc::new(Resource::empty())),
..Default::default()
}
};
(config, Endpoint::new(service_name, self.service_addr))
} else {
let service_name = SdkProvidedResourceDetector
.detect(Duration::from_secs(0))
.get(semcov::resource::SERVICE_NAME)
.unwrap()
.to_string();
(
Config {
// use a empty resource to prevent TracerProvider to assign a service name.
resource: Some(Arc::new(Resource::empty())),
..Default::default()
},
Endpoint::new(service_name, self.service_addr),
)
}
}
fn init_exporter_with_endpoint(self, endpoint: Endpoint) -> Result<Exporter, TraceError> {
if let Some(client) = self.client {
let exporter = Exporter::new(
endpoint,
client,
self.collector_endpoint
.parse()
.map_err::<Error, _>(Into::into)?,
);
Ok(exporter)
} else {
Err(Error::NoHttpClient.into())
}
}
/// Install the Zipkin trace exporter pipeline with a simple span processor.
pub fn install_simple(mut self) -> Result<sdk::trace::Tracer, TraceError> {
let (config, endpoint) = self.init_config_and_endpoint();
let exporter = self.init_exporter_with_endpoint(endpoint)?;
let mut provider_builder =
sdk::trace::TracerProvider::builder().with_simple_exporter(exporter);
provider_builder = provider_builder.with_config(config);
let provider = provider_builder.build();
let tracer = provider.versioned_tracer(
"opentelemetry-zipkin",
Some(env!("CARGO_PKG_VERSION")),
None,
);
let _ = global::set_tracer_provider(provider);
Ok(tracer)
}
/// Install the Zipkin trace exporter pipeline with a batch span processor using the specified
/// runtime.
pub fn install_batch<R: TraceRuntime>(
mut self,
runtime: R,
) -> Result<sdk::trace::Tracer, TraceError> {
let (config, endpoint) = self.init_config_and_endpoint();
let exporter = self.init_exporter_with_endpoint(endpoint)?;
let mut provider_builder =
sdk::trace::TracerProvider::builder().with_batch_exporter(exporter, runtime);
provider_builder = provider_builder.with_config(config);
let provider = provider_builder.build();
let tracer = provider.versioned_tracer(
"opentelemetry-zipkin",
Some(env!("CARGO_PKG_VERSION")),
None,
);
let _ = global::set_tracer_provider(provider);
Ok(tracer)
}
/// Assign the service name under which to group traces.
pub fn with_service_name<T: Into<String>>(mut self, name: T) -> Self {
self.service_name = Some(name.into());
self
}
/// Assign client implementation
pub fn with_http_client<T: HttpClient + 'static>(mut self, client: T) -> Self {
self.client = Some(Box::new(client));
self
}
/// Assign the service name under which to group traces.
pub fn with_service_address(mut self, addr: SocketAddr) -> Self {
self.service_addr = Some(addr);
self
}
/// Assign the Zipkin collector endpoint
pub fn with_collector_endpoint<T: Into<String>>(mut self, endpoint: T) -> Self {
self.collector_endpoint = endpoint.into();
self
}
/// Assign the SDK trace configuration.
pub fn with_trace_config(mut self, config: sdk::trace::Config) -> Self {
self.trace_config = Some(config);
self
}
}
#[async_trait]
impl trace::SpanExporter for Exporter {
/// Export spans to Zipkin collector.
async fn export(&mut self, batch: Vec<trace::SpanData>) -> trace::ExportResult {
let zipkin_spans = batch
.into_iter()
.map(|span| model::into_zipkin_span(self.local_endpoint.clone(), span))
.collect();
self.uploader.upload(zipkin_spans).await
}
}
/// Wrap type for errors from opentelemetry zipkin
#[derive(thiserror::Error, Debug)]
#[non_exhaustive]
pub enum Error {
/// No http client implementation found. User should provide one or enable features.
#[error("http client must be set, users can enable reqwest or surf feature to use http client implementation within create")]
NoHttpClient,
/// Http requests failed
#[error("http request failed with {0}")]
RequestFailed(#[from] http::Error),
/// The uri provided is invalid
#[error("invalid uri")]
InvalidUri(#[from] http::uri::InvalidUri),
/// Other errors
#[error("export error: {0}")]
Other(String),
}
impl ExportError for Error {
fn exporter_name(&self) -> &'static str {
"zipkin"
}
}
| new | identifier_name |
mod.rs | mod env;
mod model;
mod uploader;
use async_trait::async_trait;
use http::Uri;
use model::endpoint::Endpoint;
use opentelemetry::sdk::resource::ResourceDetector;
use opentelemetry::sdk::resource::SdkProvidedResourceDetector;
use opentelemetry::sdk::trace::Config;
use opentelemetry::sdk::Resource;
use opentelemetry::{
global, sdk,
sdk::export::{trace, ExportError},
sdk::trace::TraceRuntime,
trace::{TraceError, TracerProvider},
KeyValue,
};
use opentelemetry_http::HttpClient;
use opentelemetry_semantic_conventions as semcov;
#[cfg(all(
not(feature = "reqwest-client"),
not(feature = "reqwest-blocking-client"),
feature = "surf-client"
))]
use std::convert::TryFrom;
use std::net::SocketAddr;
use std::sync::Arc;
use std::time::Duration;
/// Zipkin span exporter
#[derive(Debug)]
pub struct Exporter {
local_endpoint: Endpoint,
uploader: uploader::Uploader,
}
impl Exporter {
fn new(local_endpoint: Endpoint, client: Box<dyn HttpClient>, collector_endpoint: Uri) -> Self {
Exporter {
local_endpoint,
uploader: uploader::Uploader::new(client, collector_endpoint),
}
}
}
/// Create a new Zipkin exporter pipeline builder.
pub fn new_pipeline() -> ZipkinPipelineBuilder {
ZipkinPipelineBuilder::default()
}
/// Builder for `ExporterConfig` struct.
#[derive(Debug)]
pub struct ZipkinPipelineBuilder {
service_name: Option<String>,
service_addr: Option<SocketAddr>,
collector_endpoint: String,
trace_config: Option<sdk::trace::Config>,
client: Option<Box<dyn HttpClient>>,
}
impl Default for ZipkinPipelineBuilder {
fn default() -> Self {
let timeout = env::get_timeout();
ZipkinPipelineBuilder {
#[cfg(feature = "reqwest-blocking-client")]
client: Some(Box::new(
reqwest::blocking::Client::builder()
.timeout(timeout)
.build()
.unwrap_or_else(|_| reqwest::blocking::Client::new()),
)),
#[cfg(all(
not(feature = "reqwest-blocking-client"),
not(feature = "surf-client"),
feature = "reqwest-client"
))]
client: Some(Box::new(
reqwest::Client::builder()
.timeout(timeout)
.build()
.unwrap_or_else(|_| reqwest::Client::new()),
)),
#[cfg(all(
not(feature = "reqwest-client"),
not(feature = "reqwest-blocking-client"),
feature = "surf-client"
))]
client: Some(Box::new(
surf::Client::try_from(surf::Config::new().set_timeout(Some(timeout)))
.unwrap_or_else(|_| surf::Client::new()),
)),
#[cfg(all(
not(feature = "reqwest-client"),
not(feature = "surf-client"),
not(feature = "reqwest-blocking-client")
))]
client: None,
service_name: None,
service_addr: None,
collector_endpoint: env::get_endpoint(),
trace_config: None,
}
}
}
impl ZipkinPipelineBuilder {
/// Initial a Zipkin span exporter.
///
/// Returns error if the endpoint is not valid or if no http client is provided.
pub fn init_exporter(mut self) -> Result<Exporter, TraceError> {
let (_, endpoint) = self.init_config_and_endpoint();
self.init_exporter_with_endpoint(endpoint)
}
fn init_config_and_endpoint(&mut self) -> (Config, Endpoint) {
let service_name = self.service_name.take();
if let Some(service_name) = service_name | else {
let service_name = SdkProvidedResourceDetector
.detect(Duration::from_secs(0))
.get(semcov::resource::SERVICE_NAME)
.unwrap()
.to_string();
(
Config {
// use a empty resource to prevent TracerProvider to assign a service name.
resource: Some(Arc::new(Resource::empty())),
..Default::default()
},
Endpoint::new(service_name, self.service_addr),
)
}
}
fn init_exporter_with_endpoint(self, endpoint: Endpoint) -> Result<Exporter, TraceError> {
if let Some(client) = self.client {
let exporter = Exporter::new(
endpoint,
client,
self.collector_endpoint
.parse()
.map_err::<Error, _>(Into::into)?,
);
Ok(exporter)
} else {
Err(Error::NoHttpClient.into())
}
}
/// Install the Zipkin trace exporter pipeline with a simple span processor.
pub fn install_simple(mut self) -> Result<sdk::trace::Tracer, TraceError> {
let (config, endpoint) = self.init_config_and_endpoint();
let exporter = self.init_exporter_with_endpoint(endpoint)?;
let mut provider_builder =
sdk::trace::TracerProvider::builder().with_simple_exporter(exporter);
provider_builder = provider_builder.with_config(config);
let provider = provider_builder.build();
let tracer = provider.versioned_tracer(
"opentelemetry-zipkin",
Some(env!("CARGO_PKG_VERSION")),
None,
);
let _ = global::set_tracer_provider(provider);
Ok(tracer)
}
/// Install the Zipkin trace exporter pipeline with a batch span processor using the specified
/// runtime.
pub fn install_batch<R: TraceRuntime>(
mut self,
runtime: R,
) -> Result<sdk::trace::Tracer, TraceError> {
let (config, endpoint) = self.init_config_and_endpoint();
let exporter = self.init_exporter_with_endpoint(endpoint)?;
let mut provider_builder =
sdk::trace::TracerProvider::builder().with_batch_exporter(exporter, runtime);
provider_builder = provider_builder.with_config(config);
let provider = provider_builder.build();
let tracer = provider.versioned_tracer(
"opentelemetry-zipkin",
Some(env!("CARGO_PKG_VERSION")),
None,
);
let _ = global::set_tracer_provider(provider);
Ok(tracer)
}
/// Assign the service name under which to group traces.
pub fn with_service_name<T: Into<String>>(mut self, name: T) -> Self {
self.service_name = Some(name.into());
self
}
/// Assign client implementation
pub fn with_http_client<T: HttpClient + 'static>(mut self, client: T) -> Self {
self.client = Some(Box::new(client));
self
}
/// Assign the service name under which to group traces.
pub fn with_service_address(mut self, addr: SocketAddr) -> Self {
self.service_addr = Some(addr);
self
}
/// Assign the Zipkin collector endpoint
pub fn with_collector_endpoint<T: Into<String>>(mut self, endpoint: T) -> Self {
self.collector_endpoint = endpoint.into();
self
}
/// Assign the SDK trace configuration.
pub fn with_trace_config(mut self, config: sdk::trace::Config) -> Self {
self.trace_config = Some(config);
self
}
}
#[async_trait]
impl trace::SpanExporter for Exporter {
/// Export spans to Zipkin collector.
async fn export(&mut self, batch: Vec<trace::SpanData>) -> trace::ExportResult {
let zipkin_spans = batch
.into_iter()
.map(|span| model::into_zipkin_span(self.local_endpoint.clone(), span))
.collect();
self.uploader.upload(zipkin_spans).await
}
}
/// Wrap type for errors from opentelemetry zipkin
#[derive(thiserror::Error, Debug)]
#[non_exhaustive]
pub enum Error {
/// No http client implementation found. User should provide one or enable features.
#[error("http client must be set, users can enable reqwest or surf feature to use http client implementation within create")]
NoHttpClient,
/// Http requests failed
#[error("http request failed with {0}")]
RequestFailed(#[from] http::Error),
/// The uri provided is invalid
#[error("invalid uri")]
InvalidUri(#[from] http::uri::InvalidUri),
/// Other errors
#[error("export error: {0}")]
Other(String),
}
impl ExportError for Error {
fn exporter_name(&self) -> &'static str {
"zipkin"
}
}
| {
let config = if let Some(mut cfg) = self.trace_config.take() {
cfg.resource = cfg.resource.map(|r| {
let without_service_name = r
.iter()
.filter(|(k, _v)| **k != semcov::resource::SERVICE_NAME)
.map(|(k, v)| KeyValue::new(k.clone(), v.clone()))
.collect::<Vec<KeyValue>>();
Arc::new(Resource::new(without_service_name))
});
cfg
} else {
Config {
resource: Some(Arc::new(Resource::empty())),
..Default::default()
}
};
(config, Endpoint::new(service_name, self.service_addr))
} | conditional_block |
mod.rs | mod env;
mod model;
mod uploader;
use async_trait::async_trait;
use http::Uri;
use model::endpoint::Endpoint;
use opentelemetry::sdk::resource::ResourceDetector;
use opentelemetry::sdk::resource::SdkProvidedResourceDetector;
use opentelemetry::sdk::trace::Config;
use opentelemetry::sdk::Resource;
use opentelemetry::{
global, sdk,
sdk::export::{trace, ExportError},
sdk::trace::TraceRuntime,
trace::{TraceError, TracerProvider},
KeyValue,
};
use opentelemetry_http::HttpClient;
use opentelemetry_semantic_conventions as semcov;
#[cfg(all(
not(feature = "reqwest-client"),
not(feature = "reqwest-blocking-client"),
feature = "surf-client"
))]
use std::convert::TryFrom;
use std::net::SocketAddr;
use std::sync::Arc;
use std::time::Duration;
/// Zipkin span exporter
#[derive(Debug)]
pub struct Exporter {
local_endpoint: Endpoint,
uploader: uploader::Uploader,
}
impl Exporter {
fn new(local_endpoint: Endpoint, client: Box<dyn HttpClient>, collector_endpoint: Uri) -> Self |
}
/// Create a new Zipkin exporter pipeline builder.
pub fn new_pipeline() -> ZipkinPipelineBuilder {
ZipkinPipelineBuilder::default()
}
/// Builder for `ExporterConfig` struct.
#[derive(Debug)]
pub struct ZipkinPipelineBuilder {
service_name: Option<String>,
service_addr: Option<SocketAddr>,
collector_endpoint: String,
trace_config: Option<sdk::trace::Config>,
client: Option<Box<dyn HttpClient>>,
}
impl Default for ZipkinPipelineBuilder {
fn default() -> Self {
let timeout = env::get_timeout();
ZipkinPipelineBuilder {
#[cfg(feature = "reqwest-blocking-client")]
client: Some(Box::new(
reqwest::blocking::Client::builder()
.timeout(timeout)
.build()
.unwrap_or_else(|_| reqwest::blocking::Client::new()),
)),
#[cfg(all(
not(feature = "reqwest-blocking-client"),
not(feature = "surf-client"),
feature = "reqwest-client"
))]
client: Some(Box::new(
reqwest::Client::builder()
.timeout(timeout)
.build()
.unwrap_or_else(|_| reqwest::Client::new()),
)),
#[cfg(all(
not(feature = "reqwest-client"),
not(feature = "reqwest-blocking-client"),
feature = "surf-client"
))]
client: Some(Box::new(
surf::Client::try_from(surf::Config::new().set_timeout(Some(timeout)))
.unwrap_or_else(|_| surf::Client::new()),
)),
#[cfg(all(
not(feature = "reqwest-client"),
not(feature = "surf-client"),
not(feature = "reqwest-blocking-client")
))]
client: None,
service_name: None,
service_addr: None,
collector_endpoint: env::get_endpoint(),
trace_config: None,
}
}
}
impl ZipkinPipelineBuilder {
/// Initial a Zipkin span exporter.
///
/// Returns error if the endpoint is not valid or if no http client is provided.
pub fn init_exporter(mut self) -> Result<Exporter, TraceError> {
let (_, endpoint) = self.init_config_and_endpoint();
self.init_exporter_with_endpoint(endpoint)
}
fn init_config_and_endpoint(&mut self) -> (Config, Endpoint) {
let service_name = self.service_name.take();
if let Some(service_name) = service_name {
let config = if let Some(mut cfg) = self.trace_config.take() {
cfg.resource = cfg.resource.map(|r| {
let without_service_name = r
.iter()
.filter(|(k, _v)| **k != semcov::resource::SERVICE_NAME)
.map(|(k, v)| KeyValue::new(k.clone(), v.clone()))
.collect::<Vec<KeyValue>>();
Arc::new(Resource::new(without_service_name))
});
cfg
} else {
Config {
resource: Some(Arc::new(Resource::empty())),
..Default::default()
}
};
(config, Endpoint::new(service_name, self.service_addr))
} else {
let service_name = SdkProvidedResourceDetector
.detect(Duration::from_secs(0))
.get(semcov::resource::SERVICE_NAME)
.unwrap()
.to_string();
(
Config {
// use a empty resource to prevent TracerProvider to assign a service name.
resource: Some(Arc::new(Resource::empty())),
..Default::default()
},
Endpoint::new(service_name, self.service_addr),
)
}
}
fn init_exporter_with_endpoint(self, endpoint: Endpoint) -> Result<Exporter, TraceError> {
if let Some(client) = self.client {
let exporter = Exporter::new(
endpoint,
client,
self.collector_endpoint
.parse()
.map_err::<Error, _>(Into::into)?,
);
Ok(exporter)
} else {
Err(Error::NoHttpClient.into())
}
}
/// Install the Zipkin trace exporter pipeline with a simple span processor.
pub fn install_simple(mut self) -> Result<sdk::trace::Tracer, TraceError> {
let (config, endpoint) = self.init_config_and_endpoint();
let exporter = self.init_exporter_with_endpoint(endpoint)?;
let mut provider_builder =
sdk::trace::TracerProvider::builder().with_simple_exporter(exporter);
provider_builder = provider_builder.with_config(config);
let provider = provider_builder.build();
let tracer = provider.versioned_tracer(
"opentelemetry-zipkin",
Some(env!("CARGO_PKG_VERSION")),
None,
);
let _ = global::set_tracer_provider(provider);
Ok(tracer)
}
/// Install the Zipkin trace exporter pipeline with a batch span processor using the specified
/// runtime.
pub fn install_batch<R: TraceRuntime>(
mut self,
runtime: R,
) -> Result<sdk::trace::Tracer, TraceError> {
let (config, endpoint) = self.init_config_and_endpoint();
let exporter = self.init_exporter_with_endpoint(endpoint)?;
let mut provider_builder =
sdk::trace::TracerProvider::builder().with_batch_exporter(exporter, runtime);
provider_builder = provider_builder.with_config(config);
let provider = provider_builder.build();
let tracer = provider.versioned_tracer(
"opentelemetry-zipkin",
Some(env!("CARGO_PKG_VERSION")),
None,
);
let _ = global::set_tracer_provider(provider);
Ok(tracer)
}
/// Assign the service name under which to group traces.
pub fn with_service_name<T: Into<String>>(mut self, name: T) -> Self {
self.service_name = Some(name.into());
self
}
/// Assign client implementation
pub fn with_http_client<T: HttpClient + 'static>(mut self, client: T) -> Self {
self.client = Some(Box::new(client));
self
}
/// Assign the service name under which to group traces.
pub fn with_service_address(mut self, addr: SocketAddr) -> Self {
self.service_addr = Some(addr);
self
}
/// Assign the Zipkin collector endpoint
pub fn with_collector_endpoint<T: Into<String>>(mut self, endpoint: T) -> Self {
self.collector_endpoint = endpoint.into();
self
}
/// Assign the SDK trace configuration.
pub fn with_trace_config(mut self, config: sdk::trace::Config) -> Self {
self.trace_config = Some(config);
self
}
}
#[async_trait]
impl trace::SpanExporter for Exporter {
/// Export spans to Zipkin collector.
async fn export(&mut self, batch: Vec<trace::SpanData>) -> trace::ExportResult {
let zipkin_spans = batch
.into_iter()
.map(|span| model::into_zipkin_span(self.local_endpoint.clone(), span))
.collect();
self.uploader.upload(zipkin_spans).await
}
}
/// Wrap type for errors from opentelemetry zipkin
#[derive(thiserror::Error, Debug)]
#[non_exhaustive]
pub enum Error {
/// No http client implementation found. User should provide one or enable features.
#[error("http client must be set, users can enable reqwest or surf feature to use http client implementation within create")]
NoHttpClient,
/// Http requests failed
#[error("http request failed with {0}")]
RequestFailed(#[from] http::Error),
/// The uri provided is invalid
#[error("invalid uri")]
InvalidUri(#[from] http::uri::InvalidUri),
/// Other errors
#[error("export error: {0}")]
Other(String),
}
impl ExportError for Error {
fn exporter_name(&self) -> &'static str {
"zipkin"
}
}
| {
Exporter {
local_endpoint,
uploader: uploader::Uploader::new(client, collector_endpoint),
}
} | identifier_body |
instance_metadata.rs | //! The Credentials Provider for an AWS Resource's IAM Role.
use async_trait::async_trait;
use hyper::Uri;
use std::time::Duration;
use crate::request::HttpClient;
use crate::{
parse_credentials_from_aws_service, AwsCredentials, CredentialsError, ProvideAwsCredentials,
};
const AWS_CREDENTIALS_PROVIDER_IP: &str = "169.254.169.254";
const AWS_CREDENTIALS_PROVIDER_PATH: &str = "latest/meta-data/iam/security-credentials";
/// Provides AWS credentials from a resource's IAM role.
///
/// The provider has a default timeout of 30 seconds. While it should work well for most setups,
/// you can change the timeout using the `set_timeout` method.
///
/// # Examples
///
/// ```rust
/// use std::time::Duration;
///
/// use rusoto_credential::InstanceMetadataProvider;
///
/// let mut provider = InstanceMetadataProvider::new();
/// // you can overwrite the default timeout like this:
/// provider.set_timeout(Duration::from_secs(60));
/// ```
///
/// The source location can be changed from the default of 169.254.169.254:
///
/// ```rust
/// use std::time::Duration;
///
/// use rusoto_credential::InstanceMetadataProvider;
///
/// let mut provider = InstanceMetadataProvider::new();
/// // you can overwrite the default endpoint like this:
/// provider.set_ip_addr_with_port("127.0.0.1", "8080");
/// ```
#[derive(Clone, Debug)]
pub struct InstanceMetadataProvider {
client: HttpClient,
timeout: Duration,
metadata_ip_addr: String,
}
impl InstanceMetadataProvider {
/// Create a new provider with the given handle.
pub fn new() -> Self {
InstanceMetadataProvider {
client: HttpClient::new(),
timeout: Duration::from_secs(30),
metadata_ip_addr: AWS_CREDENTIALS_PROVIDER_IP.to_string(),
}
}
/// Set the timeout on the provider to the specified duration.
pub fn set_timeout(&mut self, timeout: Duration) {
self.timeout = timeout;
}
/// Allow overriding host and port of instance metadata service.
pub fn set_ip_addr_with_port(&mut self, ip: &str, port: &str) {
self.metadata_ip_addr = format!("{}:{}", ip, port);
}
}
impl Default for InstanceMetadataProvider {
fn default() -> Self {
Self::new()
}
}
#[async_trait]
impl ProvideAwsCredentials for InstanceMetadataProvider {
async fn | (&self) -> Result<AwsCredentials, CredentialsError> {
let role_name = get_role_name(&self.client, self.timeout, &self.metadata_ip_addr)
.await
.map_err(|err| CredentialsError {
message: format!("Could not get credentials from iam: {}", err.to_string()),
})?;
let cred_str = get_credentials_from_role(
&self.client,
self.timeout,
&role_name,
&self.metadata_ip_addr,
)
.await
.map_err(|err| CredentialsError {
message: format!("Could not get credentials from iam: {}", err.to_string()),
})?;
parse_credentials_from_aws_service(&cred_str)
}
}
/// Gets the role name to get credentials for using the IAM Metadata Service (169.254.169.254).
async fn get_role_name(
client: &HttpClient,
timeout: Duration,
ip_addr: &str,
) -> Result<String, CredentialsError> {
let role_name_address = format!("http://{}/{}/", ip_addr, AWS_CREDENTIALS_PROVIDER_PATH);
let uri = match role_name_address.parse::<Uri>() {
Ok(u) => u,
Err(e) => return Err(CredentialsError::new(e)),
};
Ok(client.get(uri, timeout).await?)
}
/// Gets the credentials for an EC2 Instances IAM Role.
async fn get_credentials_from_role(
client: &HttpClient,
timeout: Duration,
role_name: &str,
ip_addr: &str,
) -> Result<String, CredentialsError> {
let credentials_provider_url = format!(
"http://{}/{}/{}",
ip_addr, AWS_CREDENTIALS_PROVIDER_PATH, role_name
);
let uri = match credentials_provider_url.parse::<Uri>() {
Ok(u) => u,
Err(e) => return Err(CredentialsError::new(e)),
};
Ok(client.get(uri, timeout).await?)
}
| credentials | identifier_name |
instance_metadata.rs | //! The Credentials Provider for an AWS Resource's IAM Role.
use async_trait::async_trait;
use hyper::Uri;
use std::time::Duration;
use crate::request::HttpClient;
use crate::{
parse_credentials_from_aws_service, AwsCredentials, CredentialsError, ProvideAwsCredentials,
};
const AWS_CREDENTIALS_PROVIDER_IP: &str = "169.254.169.254";
const AWS_CREDENTIALS_PROVIDER_PATH: &str = "latest/meta-data/iam/security-credentials";
/// Provides AWS credentials from a resource's IAM role.
///
/// The provider has a default timeout of 30 seconds. While it should work well for most setups,
/// you can change the timeout using the `set_timeout` method.
///
/// # Examples
///
/// ```rust
/// use std::time::Duration;
///
/// use rusoto_credential::InstanceMetadataProvider;
///
/// let mut provider = InstanceMetadataProvider::new();
/// // you can overwrite the default timeout like this:
/// provider.set_timeout(Duration::from_secs(60));
/// ```
///
/// The source location can be changed from the default of 169.254.169.254:
///
/// ```rust
/// use std::time::Duration;
///
/// use rusoto_credential::InstanceMetadataProvider;
///
/// let mut provider = InstanceMetadataProvider::new();
/// // you can overwrite the default endpoint like this:
/// provider.set_ip_addr_with_port("127.0.0.1", "8080");
/// ```
#[derive(Clone, Debug)]
pub struct InstanceMetadataProvider {
client: HttpClient,
timeout: Duration,
metadata_ip_addr: String,
}
impl InstanceMetadataProvider {
/// Create a new provider with the given handle.
pub fn new() -> Self {
InstanceMetadataProvider {
client: HttpClient::new(),
timeout: Duration::from_secs(30),
metadata_ip_addr: AWS_CREDENTIALS_PROVIDER_IP.to_string(),
}
}
/// Set the timeout on the provider to the specified duration.
pub fn set_timeout(&mut self, timeout: Duration) {
self.timeout = timeout;
}
/// Allow overriding host and port of instance metadata service.
pub fn set_ip_addr_with_port(&mut self, ip: &str, port: &str) {
self.metadata_ip_addr = format!("{}:{}", ip, port);
}
}
impl Default for InstanceMetadataProvider {
fn default() -> Self {
Self::new()
}
}
#[async_trait]
impl ProvideAwsCredentials for InstanceMetadataProvider {
async fn credentials(&self) -> Result<AwsCredentials, CredentialsError> |
}
/// Gets the role name to get credentials for using the IAM Metadata Service (169.254.169.254).
async fn get_role_name(
client: &HttpClient,
timeout: Duration,
ip_addr: &str,
) -> Result<String, CredentialsError> {
let role_name_address = format!("http://{}/{}/", ip_addr, AWS_CREDENTIALS_PROVIDER_PATH);
let uri = match role_name_address.parse::<Uri>() {
Ok(u) => u,
Err(e) => return Err(CredentialsError::new(e)),
};
Ok(client.get(uri, timeout).await?)
}
/// Gets the credentials for an EC2 Instances IAM Role.
async fn get_credentials_from_role(
client: &HttpClient,
timeout: Duration,
role_name: &str,
ip_addr: &str,
) -> Result<String, CredentialsError> {
let credentials_provider_url = format!(
"http://{}/{}/{}",
ip_addr, AWS_CREDENTIALS_PROVIDER_PATH, role_name
);
let uri = match credentials_provider_url.parse::<Uri>() {
Ok(u) => u,
Err(e) => return Err(CredentialsError::new(e)),
};
Ok(client.get(uri, timeout).await?)
}
| {
let role_name = get_role_name(&self.client, self.timeout, &self.metadata_ip_addr)
.await
.map_err(|err| CredentialsError {
message: format!("Could not get credentials from iam: {}", err.to_string()),
})?;
let cred_str = get_credentials_from_role(
&self.client,
self.timeout,
&role_name,
&self.metadata_ip_addr,
)
.await
.map_err(|err| CredentialsError {
message: format!("Could not get credentials from iam: {}", err.to_string()),
})?;
parse_credentials_from_aws_service(&cred_str)
} | identifier_body |
instance_metadata.rs | //! The Credentials Provider for an AWS Resource's IAM Role.
use async_trait::async_trait;
use hyper::Uri;
use std::time::Duration;
use crate::request::HttpClient;
use crate::{
parse_credentials_from_aws_service, AwsCredentials, CredentialsError, ProvideAwsCredentials,
};
const AWS_CREDENTIALS_PROVIDER_IP: &str = "169.254.169.254";
const AWS_CREDENTIALS_PROVIDER_PATH: &str = "latest/meta-data/iam/security-credentials";
/// Provides AWS credentials from a resource's IAM role.
///
/// The provider has a default timeout of 30 seconds. While it should work well for most setups,
/// you can change the timeout using the `set_timeout` method.
///
/// # Examples
///
/// ```rust
/// use std::time::Duration;
///
/// use rusoto_credential::InstanceMetadataProvider;
///
/// let mut provider = InstanceMetadataProvider::new();
/// // you can overwrite the default timeout like this:
/// provider.set_timeout(Duration::from_secs(60));
/// ```
///
/// The source location can be changed from the default of 169.254.169.254:
///
/// ```rust
/// use std::time::Duration;
///
/// use rusoto_credential::InstanceMetadataProvider;
///
/// let mut provider = InstanceMetadataProvider::new();
/// // you can overwrite the default endpoint like this:
/// provider.set_ip_addr_with_port("127.0.0.1", "8080");
/// ```
#[derive(Clone, Debug)]
pub struct InstanceMetadataProvider {
client: HttpClient,
timeout: Duration,
metadata_ip_addr: String,
}
impl InstanceMetadataProvider {
/// Create a new provider with the given handle.
pub fn new() -> Self {
InstanceMetadataProvider {
client: HttpClient::new(),
timeout: Duration::from_secs(30),
metadata_ip_addr: AWS_CREDENTIALS_PROVIDER_IP.to_string(),
}
}
/// Set the timeout on the provider to the specified duration.
pub fn set_timeout(&mut self, timeout: Duration) { | self.metadata_ip_addr = format!("{}:{}", ip, port);
}
}
impl Default for InstanceMetadataProvider {
fn default() -> Self {
Self::new()
}
}
#[async_trait]
impl ProvideAwsCredentials for InstanceMetadataProvider {
async fn credentials(&self) -> Result<AwsCredentials, CredentialsError> {
let role_name = get_role_name(&self.client, self.timeout, &self.metadata_ip_addr)
.await
.map_err(|err| CredentialsError {
message: format!("Could not get credentials from iam: {}", err.to_string()),
})?;
let cred_str = get_credentials_from_role(
&self.client,
self.timeout,
&role_name,
&self.metadata_ip_addr,
)
.await
.map_err(|err| CredentialsError {
message: format!("Could not get credentials from iam: {}", err.to_string()),
})?;
parse_credentials_from_aws_service(&cred_str)
}
}
/// Gets the role name to get credentials for using the IAM Metadata Service (169.254.169.254).
async fn get_role_name(
client: &HttpClient,
timeout: Duration,
ip_addr: &str,
) -> Result<String, CredentialsError> {
let role_name_address = format!("http://{}/{}/", ip_addr, AWS_CREDENTIALS_PROVIDER_PATH);
let uri = match role_name_address.parse::<Uri>() {
Ok(u) => u,
Err(e) => return Err(CredentialsError::new(e)),
};
Ok(client.get(uri, timeout).await?)
}
/// Gets the credentials for an EC2 Instances IAM Role.
async fn get_credentials_from_role(
client: &HttpClient,
timeout: Duration,
role_name: &str,
ip_addr: &str,
) -> Result<String, CredentialsError> {
let credentials_provider_url = format!(
"http://{}/{}/{}",
ip_addr, AWS_CREDENTIALS_PROVIDER_PATH, role_name
);
let uri = match credentials_provider_url.parse::<Uri>() {
Ok(u) => u,
Err(e) => return Err(CredentialsError::new(e)),
};
Ok(client.get(uri, timeout).await?)
} | self.timeout = timeout;
}
/// Allow overriding host and port of instance metadata service.
pub fn set_ip_addr_with_port(&mut self, ip: &str, port: &str) { | random_line_split |
test_cargo_profiles.rs | use std::env;
use std::path::MAIN_SEPARATOR as SEP;
use support::{project, execs};
use support::{COMPILING, RUNNING};
use hamcrest::assert_that;
fn setup() |
test!(profile_overrides {
let mut p = project("foo");
p = p
.file("Cargo.toml", r#"
[package]
name = "test"
version = "0.0.0"
authors = []
[profile.dev]
opt-level = 1
debug = false
rpath = true
"#)
.file("src/lib.rs", "");
assert_that(p.cargo_process("build").arg("-v"),
execs().with_status(0).with_stdout(&format!("\
{compiling} test v0.0.0 ({url})
{running} `rustc src{sep}lib.rs --crate-name test --crate-type lib \
-C opt-level=1 \
-C debug-assertions=on \
-C metadata=[..] \
-C extra-filename=-[..] \
-C rpath \
--out-dir {dir}{sep}target{sep}debug \
--emit=dep-info,link \
-L dependency={dir}{sep}target{sep}debug \
-L dependency={dir}{sep}target{sep}debug{sep}deps`
",
running = RUNNING, compiling = COMPILING, sep = SEP,
dir = p.root().display(),
url = p.url(),
)));
});
test!(top_level_overrides_deps {
let mut p = project("foo");
p = p
.file("Cargo.toml", r#"
[package]
name = "test"
version = "0.0.0"
authors = []
[profile.release]
opt-level = 1
debug = true
[dependencies.foo]
path = "foo"
"#)
.file("src/lib.rs", "")
.file("foo/Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.0"
authors = []
[profile.release]
opt-level = 0
debug = false
[lib]
name = "foo"
crate_type = ["dylib", "rlib"]
"#)
.file("foo/src/lib.rs", "");
assert_that(p.cargo_process("build").arg("-v").arg("--release"),
execs().with_status(0).with_stdout(&format!("\
{compiling} foo v0.0.0 ({url})
{running} `rustc foo{sep}src{sep}lib.rs --crate-name foo \
--crate-type dylib --crate-type rlib -C prefer-dynamic \
-C opt-level=1 \
-g \
-C metadata=[..] \
-C extra-filename=-[..] \
--out-dir {dir}{sep}target{sep}release{sep}deps \
--emit=dep-info,link \
-L dependency={dir}{sep}target{sep}release{sep}deps \
-L dependency={dir}{sep}target{sep}release{sep}deps`
{compiling} test v0.0.0 ({url})
{running} `rustc src{sep}lib.rs --crate-name test --crate-type lib \
-C opt-level=1 \
-g \
-C metadata=[..] \
-C extra-filename=-[..] \
--out-dir {dir}{sep}target{sep}release \
--emit=dep-info,link \
-L dependency={dir}{sep}target{sep}release \
-L dependency={dir}{sep}target{sep}release{sep}deps \
--extern foo={dir}{sep}target{sep}release{sep}deps{sep}\
{prefix}foo-[..]{suffix} \
--extern foo={dir}{sep}target{sep}release{sep}deps{sep}libfoo-[..].rlib`
",
running = RUNNING,
compiling = COMPILING,
dir = p.root().display(),
url = p.url(),
sep = SEP,
prefix = env::consts::DLL_PREFIX,
suffix = env::consts::DLL_SUFFIX)));
});
| {
} | identifier_body |
test_cargo_profiles.rs | use std::env;
use std::path::MAIN_SEPARATOR as SEP;
use support::{project, execs};
use support::{COMPILING, RUNNING};
use hamcrest::assert_that;
fn | () {
}
test!(profile_overrides {
let mut p = project("foo");
p = p
.file("Cargo.toml", r#"
[package]
name = "test"
version = "0.0.0"
authors = []
[profile.dev]
opt-level = 1
debug = false
rpath = true
"#)
.file("src/lib.rs", "");
assert_that(p.cargo_process("build").arg("-v"),
execs().with_status(0).with_stdout(&format!("\
{compiling} test v0.0.0 ({url})
{running} `rustc src{sep}lib.rs --crate-name test --crate-type lib \
-C opt-level=1 \
-C debug-assertions=on \
-C metadata=[..] \
-C extra-filename=-[..] \
-C rpath \
--out-dir {dir}{sep}target{sep}debug \
--emit=dep-info,link \
-L dependency={dir}{sep}target{sep}debug \
-L dependency={dir}{sep}target{sep}debug{sep}deps`
",
running = RUNNING, compiling = COMPILING, sep = SEP,
dir = p.root().display(),
url = p.url(),
)));
});
test!(top_level_overrides_deps {
let mut p = project("foo");
p = p
.file("Cargo.toml", r#"
[package]
name = "test"
version = "0.0.0"
authors = []
[profile.release]
opt-level = 1
debug = true
[dependencies.foo]
path = "foo"
"#)
.file("src/lib.rs", "")
.file("foo/Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.0"
authors = []
[profile.release]
opt-level = 0
debug = false
[lib]
name = "foo"
crate_type = ["dylib", "rlib"]
"#)
.file("foo/src/lib.rs", "");
assert_that(p.cargo_process("build").arg("-v").arg("--release"),
execs().with_status(0).with_stdout(&format!("\
{compiling} foo v0.0.0 ({url})
{running} `rustc foo{sep}src{sep}lib.rs --crate-name foo \
--crate-type dylib --crate-type rlib -C prefer-dynamic \
-C opt-level=1 \
-g \
-C metadata=[..] \
-C extra-filename=-[..] \
--out-dir {dir}{sep}target{sep}release{sep}deps \
--emit=dep-info,link \
-L dependency={dir}{sep}target{sep}release{sep}deps \
-L dependency={dir}{sep}target{sep}release{sep}deps`
{compiling} test v0.0.0 ({url})
{running} `rustc src{sep}lib.rs --crate-name test --crate-type lib \
-C opt-level=1 \
-g \
-C metadata=[..] \
-C extra-filename=-[..] \
--out-dir {dir}{sep}target{sep}release \
--emit=dep-info,link \
-L dependency={dir}{sep}target{sep}release \
-L dependency={dir}{sep}target{sep}release{sep}deps \
--extern foo={dir}{sep}target{sep}release{sep}deps{sep}\
{prefix}foo-[..]{suffix} \
--extern foo={dir}{sep}target{sep}release{sep}deps{sep}libfoo-[..].rlib`
",
running = RUNNING,
compiling = COMPILING,
dir = p.root().display(),
url = p.url(),
sep = SEP,
prefix = env::consts::DLL_PREFIX,
suffix = env::consts::DLL_SUFFIX)));
});
| setup | identifier_name |
test_cargo_profiles.rs | use std::env;
use std::path::MAIN_SEPARATOR as SEP;
use support::{project, execs};
use support::{COMPILING, RUNNING};
use hamcrest::assert_that;
fn setup() {
}
test!(profile_overrides {
let mut p = project("foo");
p = p
.file("Cargo.toml", r#"
[package]
name = "test"
version = "0.0.0"
authors = []
[profile.dev]
opt-level = 1
debug = false
rpath = true
"#)
.file("src/lib.rs", "");
assert_that(p.cargo_process("build").arg("-v"),
execs().with_status(0).with_stdout(&format!("\
{compiling} test v0.0.0 ({url})
{running} `rustc src{sep}lib.rs --crate-name test --crate-type lib \
-C opt-level=1 \
-C debug-assertions=on \
-C metadata=[..] \
-C extra-filename=-[..] \
-C rpath \
--out-dir {dir}{sep}target{sep}debug \
--emit=dep-info,link \
-L dependency={dir}{sep}target{sep}debug \
-L dependency={dir}{sep}target{sep}debug{sep}deps`
",
running = RUNNING, compiling = COMPILING, sep = SEP,
dir = p.root().display(),
url = p.url(),
)));
});
test!(top_level_overrides_deps {
let mut p = project("foo");
p = p
.file("Cargo.toml", r#"
[package]
name = "test"
version = "0.0.0"
authors = []
[profile.release]
opt-level = 1
debug = true
[dependencies.foo]
path = "foo"
"#)
.file("src/lib.rs", "")
.file("foo/Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.0"
authors = []
[profile.release]
opt-level = 0
debug = false
[lib]
name = "foo"
crate_type = ["dylib", "rlib"]
"#)
.file("foo/src/lib.rs", "");
assert_that(p.cargo_process("build").arg("-v").arg("--release"),
execs().with_status(0).with_stdout(&format!("\
{compiling} foo v0.0.0 ({url})
{running} `rustc foo{sep}src{sep}lib.rs --crate-name foo \
--crate-type dylib --crate-type rlib -C prefer-dynamic \
-C opt-level=1 \
-g \
-C metadata=[..] \
-C extra-filename=-[..] \
--out-dir {dir}{sep}target{sep}release{sep}deps \
--emit=dep-info,link \
-L dependency={dir}{sep}target{sep}release{sep}deps \
-L dependency={dir}{sep}target{sep}release{sep}deps`
{compiling} test v0.0.0 ({url})
{running} `rustc src{sep}lib.rs --crate-name test --crate-type lib \
-C opt-level=1 \ | --emit=dep-info,link \
-L dependency={dir}{sep}target{sep}release \
-L dependency={dir}{sep}target{sep}release{sep}deps \
--extern foo={dir}{sep}target{sep}release{sep}deps{sep}\
{prefix}foo-[..]{suffix} \
--extern foo={dir}{sep}target{sep}release{sep}deps{sep}libfoo-[..].rlib`
",
running = RUNNING,
compiling = COMPILING,
dir = p.root().display(),
url = p.url(),
sep = SEP,
prefix = env::consts::DLL_PREFIX,
suffix = env::consts::DLL_SUFFIX)));
}); | -g \
-C metadata=[..] \
-C extra-filename=-[..] \
--out-dir {dir}{sep}target{sep}release \ | random_line_split |
base.py | # -*- encoding: utf-8 -*-
"""
sleekxmpp.plugins.base
~~~~~~~~~~~~~~~~~~~~~~
This module provides XMPP functionality that
is specific to client connections.
Part of SleekXMPP: The Sleek XMPP Library
:copyright: (c) 2012 Nathanael C. Fritz
:license: MIT, see LICENSE for more details
"""
import sys
import copy
import logging
import threading
if sys.version_info >= (3, 0):
unicode = str
log = logging.getLogger(__name__)
#: Associate short string names of plugins with implementations. The
#: plugin names are based on the spec used by the plugin, such as
#: `'xep_0030'` for a plugin that implements XEP-0030.
PLUGIN_REGISTRY = {}
#: In order to do cascading plugin disabling, reverse dependencies
#: must be tracked.
PLUGIN_DEPENDENTS = {}
#: Only allow one thread to manipulate the plugin registry at a time.
REGISTRY_LOCK = threading.RLock()
class PluginNotFound(Exception):
"""Raised if an unknown plugin is accessed."""
def register_plugin(impl, name=None):
"""Add a new plugin implementation to the registry.
:param class impl: The plugin class.
The implementation class must provide a :attr:`~BasePlugin.name`
value that will be used as a short name for enabling and disabling
the plugin. The name should be based on the specification used by
the plugin. For example, a plugin implementing XEP-0030 would be
named `'xep_0030'`.
"""
if name is None:
name = impl.name
with REGISTRY_LOCK:
PLUGIN_REGISTRY[name] = impl
if name not in PLUGIN_DEPENDENTS:
|
for dep in impl.dependencies:
if dep not in PLUGIN_DEPENDENTS:
PLUGIN_DEPENDENTS[dep] = set()
PLUGIN_DEPENDENTS[dep].add(name)
def load_plugin(name, module=None):
"""Find and import a plugin module so that it can be registered.
This function is called to import plugins that have selected for
enabling, but no matching registered plugin has been found.
:param str name: The name of the plugin. It is expected that
plugins are in packages matching their name,
even though the plugin class name does not
have to match.
:param str module: The name of the base module to search
for the plugin.
"""
try:
if not module:
try:
module = 'sleekxmpp.plugins.%s' % name
__import__(module)
mod = sys.modules[module]
except ImportError:
module = 'sleekxmpp.features.%s' % name
__import__(module)
mod = sys.modules[module]
elif isinstance(module, (str, unicode)):
__import__(module)
mod = sys.modules[module]
else:
mod = module
# Add older style plugins to the registry.
if hasattr(mod, name):
plugin = getattr(mod, name)
if hasattr(plugin, 'xep') or hasattr(plugin, 'rfc'):
plugin.name = name
# Mark the plugin as an older style plugin so
# we can work around dependency issues.
plugin.old_style = True
register_plugin(plugin, name)
except ImportError:
log.exception("Unable to load plugin: %s", name)
class PluginManager(object):
def __init__(self, xmpp, config=None):
#: We will track all enabled plugins in a set so that we
#: can enable plugins in batches and pull in dependencies
#: without problems.
self._enabled = set()
#: Maintain references to active plugins.
self._plugins = {}
self._plugin_lock = threading.RLock()
#: Globally set default plugin configuration. This will
#: be used for plugins that are auto-enabled through
#: dependency loading.
self.config = config if config else {}
self.xmpp = xmpp
def register(self, plugin, enable=True):
"""Register a new plugin, and optionally enable it.
:param class plugin: The implementation class of the plugin
to register.
:param bool enable: If ``True``, immediately enable the
plugin after registration.
"""
register_plugin(plugin)
if enable:
self.enable(plugin.name)
def enable(self, name, config=None, enabled=None):
"""Enable a plugin, including any dependencies.
:param string name: The short name of the plugin.
:param dict config: Optional settings dictionary for
configuring plugin behaviour.
"""
top_level = False
if enabled is None:
enabled = set()
with self._plugin_lock:
if name not in self._enabled:
enabled.add(name)
self._enabled.add(name)
if not self.registered(name):
load_plugin(name)
plugin_class = PLUGIN_REGISTRY.get(name, None)
if not plugin_class:
raise PluginNotFound(name)
if config is None:
config = self.config.get(name, None)
plugin = plugin_class(self.xmpp, config)
self._plugins[name] = plugin
for dep in plugin.dependencies:
self.enable(dep, enabled=enabled)
plugin._init()
if top_level:
for name in enabled:
if hasattr(self.plugins[name], 'old_style'):
# Older style plugins require post_init()
# to run just before stream processing begins,
# so we don't call it here.
pass
self.plugins[name].post_init()
def enable_all(self, names=None, config=None):
"""Enable all registered plugins.
:param list names: A list of plugin names to enable. If
none are provided, all registered plugins
will be enabled.
:param dict config: A dictionary mapping plugin names to
configuration dictionaries, as used by
:meth:`~PluginManager.enable`.
"""
names = names if names else PLUGIN_REGISTRY.keys()
if config is None:
config = {}
for name in names:
self.enable(name, config.get(name, {}))
def enabled(self, name):
"""Check if a plugin has been enabled.
:param string name: The name of the plugin to check.
:return: boolean
"""
return name in self._enabled
def registered(self, name):
"""Check if a plugin has been registered.
:param string name: The name of the plugin to check.
:return: boolean
"""
return name in PLUGIN_REGISTRY
def disable(self, name, _disabled=None):
"""Disable a plugin, including any dependent upon it.
:param string name: The name of the plugin to disable.
:param set _disabled: Private set used to track the
disabled status of plugins during
the cascading process.
"""
if _disabled is None:
_disabled = set()
with self._plugin_lock:
if name not in _disabled and name in self._enabled:
_disabled.add(name)
plugin = self._plugins.get(name, None)
if plugin is None:
raise PluginNotFound(name)
for dep in PLUGIN_DEPENDENTS[name]:
self.disable(dep, _disabled)
plugin._end()
if name in self._enabled:
self._enabled.remove(name)
del self._plugins[name]
def __keys__(self):
"""Return the set of enabled plugins."""
return self._plugins.keys()
def __getitem__(self, name):
"""
Allow plugins to be accessed through the manager as if
it were a dictionary.
"""
plugin = self._plugins.get(name, None)
if plugin is None:
raise PluginNotFound(name)
return plugin
def __iter__(self):
"""Return an iterator over the set of enabled plugins."""
return self._plugins.__iter__()
def __len__(self):
"""Return the number of enabled plugins."""
return len(self._plugins)
class BasePlugin(object):
#: A short name for the plugin based on the implemented specification.
#: For example, a plugin for XEP-0030 would use `'xep_0030'`.
name = ''
#: A longer name for the plugin, describing its purpose. For example,
#: a plugin for XEP-0030 would use `'Service Discovery'` as its
#: description value.
description = ''
#: Some plugins may depend on others in order to function properly.
#: Any plugin names included in :attr:`~BasePlugin.dependencies` will
#: be initialized as needed if this plugin is enabled.
dependencies = set()
#: The basic, standard configuration for the plugin, which may
#: be overridden when initializing the plugin. The configuration
#: fields included here may be accessed directly as attributes of
#: the plugin. For example, including the configuration field 'foo'
#: would mean accessing `plugin.foo` returns the current value of
#: `plugin.config['foo']`.
default_config = {}
def __init__(self, xmpp, config=None):
self.xmpp = xmpp
if self.xmpp:
self.api = self.xmpp.api.wrap(self.name)
#: A plugin's behaviour may be configurable, in which case those
#: configuration settings will be provided as a dictionary.
self.config = copy.copy(self.default_config)
if config:
self.config.update(config)
def __getattr__(self, key):
"""Provide direct access to configuration fields.
If the standard configuration includes the option `'foo'`, then
accessing `self.foo` should be the same as `self.config['foo']`.
"""
if key in self.default_config:
return self.config.get(key, None)
else:
return object.__getattribute__(self, key)
def __setattr__(self, key, value):
"""Provide direct assignment to configuration fields.
If the standard configuration includes the option `'foo'`, then
assigning to `self.foo` should be the same as assigning to
`self.config['foo']`.
"""
if key in self.default_config:
self.config[key] = value
else:
super(BasePlugin, self).__setattr__(key, value)
def _init(self):
"""Initialize plugin state, such as registering event handlers.
Also sets up required event handlers.
"""
if self.xmpp is not None:
self.xmpp.add_event_handler('session_bind', self.session_bind)
if self.xmpp.session_bind_event.is_set():
self.session_bind(self.xmpp.boundjid.full)
self.plugin_init()
log.debug('Loaded Plugin: %s', self.description)
def _end(self):
"""Cleanup plugin state, and prepare for plugin removal.
Also removes required event handlers.
"""
if self.xmpp is not None:
self.xmpp.del_event_handler('session_bind', self.session_bind)
self.plugin_end()
log.debug('Disabled Plugin: %s' % self.description)
def plugin_init(self):
"""Initialize plugin state, such as registering event handlers."""
pass
def plugin_end(self):
"""Cleanup plugin state, and prepare for plugin removal."""
pass
def session_bind(self, jid):
"""Initialize plugin state based on the bound JID."""
pass
def post_init(self):
"""Initialize any cross-plugin state.
Only needed if the plugin has circular dependencies.
"""
pass
base_plugin = BasePlugin
| PLUGIN_DEPENDENTS[name] = set() | conditional_block |
base.py | # -*- encoding: utf-8 -*-
"""
sleekxmpp.plugins.base
~~~~~~~~~~~~~~~~~~~~~~
This module provides XMPP functionality that
is specific to client connections.
Part of SleekXMPP: The Sleek XMPP Library
:copyright: (c) 2012 Nathanael C. Fritz
:license: MIT, see LICENSE for more details
"""
import sys
import copy
import logging
import threading
if sys.version_info >= (3, 0):
unicode = str
log = logging.getLogger(__name__)
#: Associate short string names of plugins with implementations. The
#: plugin names are based on the spec used by the plugin, such as
#: `'xep_0030'` for a plugin that implements XEP-0030.
PLUGIN_REGISTRY = {}
#: In order to do cascading plugin disabling, reverse dependencies
#: must be tracked.
PLUGIN_DEPENDENTS = {}
#: Only allow one thread to manipulate the plugin registry at a time.
REGISTRY_LOCK = threading.RLock()
class PluginNotFound(Exception):
"""Raised if an unknown plugin is accessed."""
def register_plugin(impl, name=None):
"""Add a new plugin implementation to the registry.
:param class impl: The plugin class.
The implementation class must provide a :attr:`~BasePlugin.name`
value that will be used as a short name for enabling and disabling
the plugin. The name should be based on the specification used by
the plugin. For example, a plugin implementing XEP-0030 would be
named `'xep_0030'`.
"""
if name is None:
name = impl.name
with REGISTRY_LOCK:
PLUGIN_REGISTRY[name] = impl
if name not in PLUGIN_DEPENDENTS:
PLUGIN_DEPENDENTS[name] = set()
for dep in impl.dependencies:
if dep not in PLUGIN_DEPENDENTS:
PLUGIN_DEPENDENTS[dep] = set()
PLUGIN_DEPENDENTS[dep].add(name)
def load_plugin(name, module=None):
"""Find and import a plugin module so that it can be registered.
This function is called to import plugins that have selected for
enabling, but no matching registered plugin has been found.
:param str name: The name of the plugin. It is expected that
plugins are in packages matching their name,
even though the plugin class name does not
have to match.
:param str module: The name of the base module to search
for the plugin.
"""
try:
if not module:
try:
module = 'sleekxmpp.plugins.%s' % name
__import__(module)
mod = sys.modules[module]
except ImportError:
module = 'sleekxmpp.features.%s' % name
__import__(module)
mod = sys.modules[module]
elif isinstance(module, (str, unicode)):
__import__(module)
mod = sys.modules[module]
else:
mod = module
# Add older style plugins to the registry.
if hasattr(mod, name):
plugin = getattr(mod, name)
if hasattr(plugin, 'xep') or hasattr(plugin, 'rfc'):
plugin.name = name
# Mark the plugin as an older style plugin so
# we can work around dependency issues.
plugin.old_style = True
register_plugin(plugin, name)
except ImportError:
log.exception("Unable to load plugin: %s", name)
class PluginManager(object):
def __init__(self, xmpp, config=None):
#: We will track all enabled plugins in a set so that we
#: can enable plugins in batches and pull in dependencies
#: without problems.
self._enabled = set()
#: Maintain references to active plugins.
self._plugins = {}
self._plugin_lock = threading.RLock()
#: Globally set default plugin configuration. This will
#: be used for plugins that are auto-enabled through
#: dependency loading.
self.config = config if config else {}
self.xmpp = xmpp
def register(self, plugin, enable=True):
"""Register a new plugin, and optionally enable it.
:param class plugin: The implementation class of the plugin
to register.
:param bool enable: If ``True``, immediately enable the
plugin after registration.
"""
register_plugin(plugin)
if enable:
self.enable(plugin.name)
def enable(self, name, config=None, enabled=None):
"""Enable a plugin, including any dependencies.
:param string name: The short name of the plugin.
:param dict config: Optional settings dictionary for
configuring plugin behaviour.
"""
top_level = False
if enabled is None:
enabled = set()
with self._plugin_lock:
if name not in self._enabled:
enabled.add(name)
self._enabled.add(name)
if not self.registered(name):
load_plugin(name)
plugin_class = PLUGIN_REGISTRY.get(name, None)
if not plugin_class:
raise PluginNotFound(name)
if config is None:
config = self.config.get(name, None)
plugin = plugin_class(self.xmpp, config)
self._plugins[name] = plugin
for dep in plugin.dependencies:
self.enable(dep, enabled=enabled)
plugin._init()
if top_level:
for name in enabled:
if hasattr(self.plugins[name], 'old_style'):
# Older style plugins require post_init()
# to run just before stream processing begins,
# so we don't call it here.
pass
self.plugins[name].post_init()
def | (self, names=None, config=None):
"""Enable all registered plugins.
:param list names: A list of plugin names to enable. If
none are provided, all registered plugins
will be enabled.
:param dict config: A dictionary mapping plugin names to
configuration dictionaries, as used by
:meth:`~PluginManager.enable`.
"""
names = names if names else PLUGIN_REGISTRY.keys()
if config is None:
config = {}
for name in names:
self.enable(name, config.get(name, {}))
def enabled(self, name):
"""Check if a plugin has been enabled.
:param string name: The name of the plugin to check.
:return: boolean
"""
return name in self._enabled
def registered(self, name):
"""Check if a plugin has been registered.
:param string name: The name of the plugin to check.
:return: boolean
"""
return name in PLUGIN_REGISTRY
def disable(self, name, _disabled=None):
"""Disable a plugin, including any dependent upon it.
:param string name: The name of the plugin to disable.
:param set _disabled: Private set used to track the
disabled status of plugins during
the cascading process.
"""
if _disabled is None:
_disabled = set()
with self._plugin_lock:
if name not in _disabled and name in self._enabled:
_disabled.add(name)
plugin = self._plugins.get(name, None)
if plugin is None:
raise PluginNotFound(name)
for dep in PLUGIN_DEPENDENTS[name]:
self.disable(dep, _disabled)
plugin._end()
if name in self._enabled:
self._enabled.remove(name)
del self._plugins[name]
def __keys__(self):
"""Return the set of enabled plugins."""
return self._plugins.keys()
def __getitem__(self, name):
"""
Allow plugins to be accessed through the manager as if
it were a dictionary.
"""
plugin = self._plugins.get(name, None)
if plugin is None:
raise PluginNotFound(name)
return plugin
def __iter__(self):
"""Return an iterator over the set of enabled plugins."""
return self._plugins.__iter__()
def __len__(self):
"""Return the number of enabled plugins."""
return len(self._plugins)
class BasePlugin(object):
#: A short name for the plugin based on the implemented specification.
#: For example, a plugin for XEP-0030 would use `'xep_0030'`.
name = ''
#: A longer name for the plugin, describing its purpose. For example,
#: a plugin for XEP-0030 would use `'Service Discovery'` as its
#: description value.
description = ''
#: Some plugins may depend on others in order to function properly.
#: Any plugin names included in :attr:`~BasePlugin.dependencies` will
#: be initialized as needed if this plugin is enabled.
dependencies = set()
#: The basic, standard configuration for the plugin, which may
#: be overridden when initializing the plugin. The configuration
#: fields included here may be accessed directly as attributes of
#: the plugin. For example, including the configuration field 'foo'
#: would mean accessing `plugin.foo` returns the current value of
#: `plugin.config['foo']`.
default_config = {}
def __init__(self, xmpp, config=None):
self.xmpp = xmpp
if self.xmpp:
self.api = self.xmpp.api.wrap(self.name)
#: A plugin's behaviour may be configurable, in which case those
#: configuration settings will be provided as a dictionary.
self.config = copy.copy(self.default_config)
if config:
self.config.update(config)
def __getattr__(self, key):
"""Provide direct access to configuration fields.
If the standard configuration includes the option `'foo'`, then
accessing `self.foo` should be the same as `self.config['foo']`.
"""
if key in self.default_config:
return self.config.get(key, None)
else:
return object.__getattribute__(self, key)
def __setattr__(self, key, value):
"""Provide direct assignment to configuration fields.
If the standard configuration includes the option `'foo'`, then
assigning to `self.foo` should be the same as assigning to
`self.config['foo']`.
"""
if key in self.default_config:
self.config[key] = value
else:
super(BasePlugin, self).__setattr__(key, value)
def _init(self):
"""Initialize plugin state, such as registering event handlers.
Also sets up required event handlers.
"""
if self.xmpp is not None:
self.xmpp.add_event_handler('session_bind', self.session_bind)
if self.xmpp.session_bind_event.is_set():
self.session_bind(self.xmpp.boundjid.full)
self.plugin_init()
log.debug('Loaded Plugin: %s', self.description)
def _end(self):
"""Cleanup plugin state, and prepare for plugin removal.
Also removes required event handlers.
"""
if self.xmpp is not None:
self.xmpp.del_event_handler('session_bind', self.session_bind)
self.plugin_end()
log.debug('Disabled Plugin: %s' % self.description)
def plugin_init(self):
"""Initialize plugin state, such as registering event handlers."""
pass
def plugin_end(self):
"""Cleanup plugin state, and prepare for plugin removal."""
pass
def session_bind(self, jid):
"""Initialize plugin state based on the bound JID."""
pass
def post_init(self):
"""Initialize any cross-plugin state.
Only needed if the plugin has circular dependencies.
"""
pass
base_plugin = BasePlugin
| enable_all | identifier_name |
base.py | # -*- encoding: utf-8 -*-
"""
sleekxmpp.plugins.base
~~~~~~~~~~~~~~~~~~~~~~
This module provides XMPP functionality that
is specific to client connections.
Part of SleekXMPP: The Sleek XMPP Library
:copyright: (c) 2012 Nathanael C. Fritz
:license: MIT, see LICENSE for more details
"""
import sys
import copy
import logging
import threading
if sys.version_info >= (3, 0):
unicode = str
log = logging.getLogger(__name__)
#: Associate short string names of plugins with implementations. The
#: plugin names are based on the spec used by the plugin, such as
#: `'xep_0030'` for a plugin that implements XEP-0030.
PLUGIN_REGISTRY = {}
#: In order to do cascading plugin disabling, reverse dependencies
#: must be tracked.
PLUGIN_DEPENDENTS = {}
#: Only allow one thread to manipulate the plugin registry at a time.
REGISTRY_LOCK = threading.RLock()
class PluginNotFound(Exception):
"""Raised if an unknown plugin is accessed."""
def register_plugin(impl, name=None):
"""Add a new plugin implementation to the registry.
:param class impl: The plugin class.
The implementation class must provide a :attr:`~BasePlugin.name`
value that will be used as a short name for enabling and disabling
the plugin. The name should be based on the specification used by
the plugin. For example, a plugin implementing XEP-0030 would be
named `'xep_0030'`.
"""
if name is None:
name = impl.name
with REGISTRY_LOCK:
PLUGIN_REGISTRY[name] = impl
if name not in PLUGIN_DEPENDENTS:
PLUGIN_DEPENDENTS[name] = set()
for dep in impl.dependencies:
if dep not in PLUGIN_DEPENDENTS:
PLUGIN_DEPENDENTS[dep] = set()
PLUGIN_DEPENDENTS[dep].add(name)
def load_plugin(name, module=None):
"""Find and import a plugin module so that it can be registered.
This function is called to import plugins that have selected for
enabling, but no matching registered plugin has been found.
:param str name: The name of the plugin. It is expected that
plugins are in packages matching their name,
even though the plugin class name does not
have to match.
:param str module: The name of the base module to search
for the plugin.
"""
try:
if not module:
try:
module = 'sleekxmpp.plugins.%s' % name
__import__(module)
mod = sys.modules[module]
except ImportError:
module = 'sleekxmpp.features.%s' % name
__import__(module)
mod = sys.modules[module]
elif isinstance(module, (str, unicode)):
__import__(module)
mod = sys.modules[module]
else:
mod = module
# Add older style plugins to the registry.
if hasattr(mod, name):
plugin = getattr(mod, name)
if hasattr(plugin, 'xep') or hasattr(plugin, 'rfc'):
plugin.name = name
# Mark the plugin as an older style plugin so
# we can work around dependency issues.
plugin.old_style = True
register_plugin(plugin, name)
except ImportError:
log.exception("Unable to load plugin: %s", name)
class PluginManager(object):
def __init__(self, xmpp, config=None):
#: We will track all enabled plugins in a set so that we
#: can enable plugins in batches and pull in dependencies
#: without problems.
self._enabled = set()
#: Maintain references to active plugins.
self._plugins = {}
self._plugin_lock = threading.RLock()
#: Globally set default plugin configuration. This will
#: be used for plugins that are auto-enabled through
#: dependency loading.
self.config = config if config else {}
self.xmpp = xmpp
def register(self, plugin, enable=True):
"""Register a new plugin, and optionally enable it.
:param class plugin: The implementation class of the plugin
to register.
:param bool enable: If ``True``, immediately enable the
plugin after registration.
"""
register_plugin(plugin)
if enable:
self.enable(plugin.name)
def enable(self, name, config=None, enabled=None):
"""Enable a plugin, including any dependencies.
:param string name: The short name of the plugin.
:param dict config: Optional settings dictionary for
configuring plugin behaviour.
"""
top_level = False
if enabled is None:
enabled = set()
with self._plugin_lock:
if name not in self._enabled:
enabled.add(name)
self._enabled.add(name)
if not self.registered(name):
load_plugin(name)
plugin_class = PLUGIN_REGISTRY.get(name, None)
if not plugin_class:
raise PluginNotFound(name)
if config is None:
config = self.config.get(name, None)
plugin = plugin_class(self.xmpp, config)
self._plugins[name] = plugin
for dep in plugin.dependencies:
self.enable(dep, enabled=enabled)
plugin._init()
if top_level:
for name in enabled:
if hasattr(self.plugins[name], 'old_style'):
# Older style plugins require post_init()
# to run just before stream processing begins,
# so we don't call it here.
pass
self.plugins[name].post_init()
def enable_all(self, names=None, config=None):
"""Enable all registered plugins.
:param list names: A list of plugin names to enable. If
none are provided, all registered plugins
will be enabled.
:param dict config: A dictionary mapping plugin names to
configuration dictionaries, as used by
:meth:`~PluginManager.enable`.
"""
names = names if names else PLUGIN_REGISTRY.keys()
if config is None:
config = {}
for name in names:
self.enable(name, config.get(name, {}))
def enabled(self, name):
"""Check if a plugin has been enabled.
:param string name: The name of the plugin to check.
:return: boolean
"""
return name in self._enabled
def registered(self, name):
|
def disable(self, name, _disabled=None):
"""Disable a plugin, including any dependent upon it.
:param string name: The name of the plugin to disable.
:param set _disabled: Private set used to track the
disabled status of plugins during
the cascading process.
"""
if _disabled is None:
_disabled = set()
with self._plugin_lock:
if name not in _disabled and name in self._enabled:
_disabled.add(name)
plugin = self._plugins.get(name, None)
if plugin is None:
raise PluginNotFound(name)
for dep in PLUGIN_DEPENDENTS[name]:
self.disable(dep, _disabled)
plugin._end()
if name in self._enabled:
self._enabled.remove(name)
del self._plugins[name]
def __keys__(self):
"""Return the set of enabled plugins."""
return self._plugins.keys()
def __getitem__(self, name):
"""
Allow plugins to be accessed through the manager as if
it were a dictionary.
"""
plugin = self._plugins.get(name, None)
if plugin is None:
raise PluginNotFound(name)
return plugin
def __iter__(self):
"""Return an iterator over the set of enabled plugins."""
return self._plugins.__iter__()
def __len__(self):
"""Return the number of enabled plugins."""
return len(self._plugins)
class BasePlugin(object):
#: A short name for the plugin based on the implemented specification.
#: For example, a plugin for XEP-0030 would use `'xep_0030'`.
name = ''
#: A longer name for the plugin, describing its purpose. For example,
#: a plugin for XEP-0030 would use `'Service Discovery'` as its
#: description value.
description = ''
#: Some plugins may depend on others in order to function properly.
#: Any plugin names included in :attr:`~BasePlugin.dependencies` will
#: be initialized as needed if this plugin is enabled.
dependencies = set()
#: The basic, standard configuration for the plugin, which may
#: be overridden when initializing the plugin. The configuration
#: fields included here may be accessed directly as attributes of
#: the plugin. For example, including the configuration field 'foo'
#: would mean accessing `plugin.foo` returns the current value of
#: `plugin.config['foo']`.
default_config = {}
def __init__(self, xmpp, config=None):
self.xmpp = xmpp
if self.xmpp:
self.api = self.xmpp.api.wrap(self.name)
#: A plugin's behaviour may be configurable, in which case those
#: configuration settings will be provided as a dictionary.
self.config = copy.copy(self.default_config)
if config:
self.config.update(config)
def __getattr__(self, key):
"""Provide direct access to configuration fields.
If the standard configuration includes the option `'foo'`, then
accessing `self.foo` should be the same as `self.config['foo']`.
"""
if key in self.default_config:
return self.config.get(key, None)
else:
return object.__getattribute__(self, key)
def __setattr__(self, key, value):
"""Provide direct assignment to configuration fields.
If the standard configuration includes the option `'foo'`, then
assigning to `self.foo` should be the same as assigning to
`self.config['foo']`.
"""
if key in self.default_config:
self.config[key] = value
else:
super(BasePlugin, self).__setattr__(key, value)
def _init(self):
"""Initialize plugin state, such as registering event handlers.
Also sets up required event handlers.
"""
if self.xmpp is not None:
self.xmpp.add_event_handler('session_bind', self.session_bind)
if self.xmpp.session_bind_event.is_set():
self.session_bind(self.xmpp.boundjid.full)
self.plugin_init()
log.debug('Loaded Plugin: %s', self.description)
def _end(self):
"""Cleanup plugin state, and prepare for plugin removal.
Also removes required event handlers.
"""
if self.xmpp is not None:
self.xmpp.del_event_handler('session_bind', self.session_bind)
self.plugin_end()
log.debug('Disabled Plugin: %s' % self.description)
def plugin_init(self):
"""Initialize plugin state, such as registering event handlers."""
pass
def plugin_end(self):
"""Cleanup plugin state, and prepare for plugin removal."""
pass
def session_bind(self, jid):
"""Initialize plugin state based on the bound JID."""
pass
def post_init(self):
"""Initialize any cross-plugin state.
Only needed if the plugin has circular dependencies.
"""
pass
base_plugin = BasePlugin
| """Check if a plugin has been registered.
:param string name: The name of the plugin to check.
:return: boolean
"""
return name in PLUGIN_REGISTRY | identifier_body |
base.py | # -*- encoding: utf-8 -*-
"""
sleekxmpp.plugins.base
~~~~~~~~~~~~~~~~~~~~~~
This module provides XMPP functionality that
is specific to client connections.
Part of SleekXMPP: The Sleek XMPP Library
:copyright: (c) 2012 Nathanael C. Fritz
:license: MIT, see LICENSE for more details
"""
import sys
import copy
import logging
import threading
if sys.version_info >= (3, 0):
unicode = str
log = logging.getLogger(__name__)
#: Associate short string names of plugins with implementations. The
#: plugin names are based on the spec used by the plugin, such as
#: `'xep_0030'` for a plugin that implements XEP-0030.
PLUGIN_REGISTRY = {}
#: In order to do cascading plugin disabling, reverse dependencies
#: must be tracked.
PLUGIN_DEPENDENTS = {}
#: Only allow one thread to manipulate the plugin registry at a time.
REGISTRY_LOCK = threading.RLock()
class PluginNotFound(Exception):
"""Raised if an unknown plugin is accessed."""
def register_plugin(impl, name=None):
"""Add a new plugin implementation to the registry.
:param class impl: The plugin class.
The implementation class must provide a :attr:`~BasePlugin.name`
value that will be used as a short name for enabling and disabling
the plugin. The name should be based on the specification used by
the plugin. For example, a plugin implementing XEP-0030 would be
named `'xep_0030'`.
"""
if name is None:
name = impl.name
with REGISTRY_LOCK:
PLUGIN_REGISTRY[name] = impl
if name not in PLUGIN_DEPENDENTS:
PLUGIN_DEPENDENTS[name] = set()
for dep in impl.dependencies:
if dep not in PLUGIN_DEPENDENTS:
PLUGIN_DEPENDENTS[dep] = set()
PLUGIN_DEPENDENTS[dep].add(name)
def load_plugin(name, module=None):
"""Find and import a plugin module so that it can be registered.
This function is called to import plugins that have selected for
enabling, but no matching registered plugin has been found.
:param str name: The name of the plugin. It is expected that
plugins are in packages matching their name,
even though the plugin class name does not
have to match.
:param str module: The name of the base module to search
for the plugin.
"""
try:
if not module:
try:
module = 'sleekxmpp.plugins.%s' % name
__import__(module)
mod = sys.modules[module]
except ImportError:
module = 'sleekxmpp.features.%s' % name
__import__(module)
mod = sys.modules[module]
elif isinstance(module, (str, unicode)):
__import__(module)
mod = sys.modules[module]
else:
mod = module
# Add older style plugins to the registry.
if hasattr(mod, name):
plugin = getattr(mod, name)
if hasattr(plugin, 'xep') or hasattr(plugin, 'rfc'):
plugin.name = name
# Mark the plugin as an older style plugin so
# we can work around dependency issues.
plugin.old_style = True
register_plugin(plugin, name)
except ImportError:
log.exception("Unable to load plugin: %s", name)
class PluginManager(object):
def __init__(self, xmpp, config=None):
#: We will track all enabled plugins in a set so that we
#: can enable plugins in batches and pull in dependencies
#: without problems.
self._enabled = set()
#: Maintain references to active plugins.
self._plugins = {}
self._plugin_lock = threading.RLock()
#: Globally set default plugin configuration. This will
#: be used for plugins that are auto-enabled through
#: dependency loading.
self.config = config if config else {}
self.xmpp = xmpp
def register(self, plugin, enable=True):
"""Register a new plugin, and optionally enable it.
:param class plugin: The implementation class of the plugin
to register.
:param bool enable: If ``True``, immediately enable the
plugin after registration.
"""
register_plugin(plugin)
if enable:
self.enable(plugin.name)
def enable(self, name, config=None, enabled=None):
"""Enable a plugin, including any dependencies.
:param string name: The short name of the plugin.
:param dict config: Optional settings dictionary for
configuring plugin behaviour.
"""
top_level = False
if enabled is None:
enabled = set()
with self._plugin_lock:
if name not in self._enabled:
enabled.add(name)
self._enabled.add(name)
if not self.registered(name):
load_plugin(name)
plugin_class = PLUGIN_REGISTRY.get(name, None)
if not plugin_class:
raise PluginNotFound(name)
if config is None:
config = self.config.get(name, None)
plugin = plugin_class(self.xmpp, config)
self._plugins[name] = plugin
for dep in plugin.dependencies:
self.enable(dep, enabled=enabled)
plugin._init()
if top_level:
for name in enabled:
if hasattr(self.plugins[name], 'old_style'):
# Older style plugins require post_init()
# to run just before stream processing begins,
# so we don't call it here.
pass
self.plugins[name].post_init()
def enable_all(self, names=None, config=None):
"""Enable all registered plugins.
:param list names: A list of plugin names to enable. If
none are provided, all registered plugins
will be enabled.
:param dict config: A dictionary mapping plugin names to
configuration dictionaries, as used by
:meth:`~PluginManager.enable`.
"""
names = names if names else PLUGIN_REGISTRY.keys()
if config is None:
config = {}
for name in names:
self.enable(name, config.get(name, {}))
def enabled(self, name):
"""Check if a plugin has been enabled.
:param string name: The name of the plugin to check.
:return: boolean
"""
return name in self._enabled
def registered(self, name):
"""Check if a plugin has been registered.
:param string name: The name of the plugin to check.
:return: boolean
"""
return name in PLUGIN_REGISTRY
def disable(self, name, _disabled=None):
"""Disable a plugin, including any dependent upon it.
:param string name: The name of the plugin to disable.
:param set _disabled: Private set used to track the | """
if _disabled is None:
_disabled = set()
with self._plugin_lock:
if name not in _disabled and name in self._enabled:
_disabled.add(name)
plugin = self._plugins.get(name, None)
if plugin is None:
raise PluginNotFound(name)
for dep in PLUGIN_DEPENDENTS[name]:
self.disable(dep, _disabled)
plugin._end()
if name in self._enabled:
self._enabled.remove(name)
del self._plugins[name]
def __keys__(self):
"""Return the set of enabled plugins."""
return self._plugins.keys()
def __getitem__(self, name):
"""
Allow plugins to be accessed through the manager as if
it were a dictionary.
"""
plugin = self._plugins.get(name, None)
if plugin is None:
raise PluginNotFound(name)
return plugin
def __iter__(self):
"""Return an iterator over the set of enabled plugins."""
return self._plugins.__iter__()
def __len__(self):
"""Return the number of enabled plugins."""
return len(self._plugins)
class BasePlugin(object):
#: A short name for the plugin based on the implemented specification.
#: For example, a plugin for XEP-0030 would use `'xep_0030'`.
name = ''
#: A longer name for the plugin, describing its purpose. For example,
#: a plugin for XEP-0030 would use `'Service Discovery'` as its
#: description value.
description = ''
#: Some plugins may depend on others in order to function properly.
#: Any plugin names included in :attr:`~BasePlugin.dependencies` will
#: be initialized as needed if this plugin is enabled.
dependencies = set()
#: The basic, standard configuration for the plugin, which may
#: be overridden when initializing the plugin. The configuration
#: fields included here may be accessed directly as attributes of
#: the plugin. For example, including the configuration field 'foo'
#: would mean accessing `plugin.foo` returns the current value of
#: `plugin.config['foo']`.
default_config = {}
def __init__(self, xmpp, config=None):
self.xmpp = xmpp
if self.xmpp:
self.api = self.xmpp.api.wrap(self.name)
#: A plugin's behaviour may be configurable, in which case those
#: configuration settings will be provided as a dictionary.
self.config = copy.copy(self.default_config)
if config:
self.config.update(config)
def __getattr__(self, key):
"""Provide direct access to configuration fields.
If the standard configuration includes the option `'foo'`, then
accessing `self.foo` should be the same as `self.config['foo']`.
"""
if key in self.default_config:
return self.config.get(key, None)
else:
return object.__getattribute__(self, key)
def __setattr__(self, key, value):
"""Provide direct assignment to configuration fields.
If the standard configuration includes the option `'foo'`, then
assigning to `self.foo` should be the same as assigning to
`self.config['foo']`.
"""
if key in self.default_config:
self.config[key] = value
else:
super(BasePlugin, self).__setattr__(key, value)
def _init(self):
"""Initialize plugin state, such as registering event handlers.
Also sets up required event handlers.
"""
if self.xmpp is not None:
self.xmpp.add_event_handler('session_bind', self.session_bind)
if self.xmpp.session_bind_event.is_set():
self.session_bind(self.xmpp.boundjid.full)
self.plugin_init()
log.debug('Loaded Plugin: %s', self.description)
def _end(self):
"""Cleanup plugin state, and prepare for plugin removal.
Also removes required event handlers.
"""
if self.xmpp is not None:
self.xmpp.del_event_handler('session_bind', self.session_bind)
self.plugin_end()
log.debug('Disabled Plugin: %s' % self.description)
def plugin_init(self):
"""Initialize plugin state, such as registering event handlers."""
pass
def plugin_end(self):
"""Cleanup plugin state, and prepare for plugin removal."""
pass
def session_bind(self, jid):
"""Initialize plugin state based on the bound JID."""
pass
def post_init(self):
"""Initialize any cross-plugin state.
Only needed if the plugin has circular dependencies.
"""
pass
base_plugin = BasePlugin | disabled status of plugins during
the cascading process. | random_line_split |
font.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/// Implementation of Quartz (CoreGraphics) fonts.
extern crate core_foundation;
extern crate core_graphics;
extern crate core_text;
use app_units::Au;
use byteorder::{BigEndian, ByteOrder};
use core_foundation::base::CFIndex;
use core_foundation::data::CFData;
use core_foundation::string::UniChar;
use core_graphics::font::CGGlyph;
use core_graphics::geometry::CGRect;
use core_text::font::CTFont;
use core_text::font_descriptor::{SymbolicTraitAccessors, TraitAccessors};
use core_text::font_descriptor::kCTFontDefaultOrientation;
use font::{FontHandleMethods, FontMetrics, FontTableMethods, FontTableTag, FractionalPixel};
use font::{GPOS, GSUB, KERN};
use platform::font_template::FontTemplateData;
use platform::macos::font_context::FontContextHandle;
use std::{fmt, ptr};
use std::ops::Range;
use std::sync::Arc;
use style::computed_values::{font_stretch, font_weight};
use text::glyph::GlyphId;
const KERN_PAIR_LEN: usize = 6;
pub struct FontTable {
data: CFData,
}
// assumes 72 points per inch, and 96 px per inch
fn px_to_pt(px: f64) -> f64 {
px / 96. * 72.
}
// assumes 72 points per inch, and 96 px per inch
fn pt_to_px(pt: f64) -> f64 |
fn au_from_pt(pt: f64) -> Au {
Au::from_f64_px(pt_to_px(pt))
}
impl FontTable {
pub fn wrap(data: CFData) -> FontTable {
FontTable { data: data }
}
}
impl FontTableMethods for FontTable {
fn buffer(&self) -> &[u8] {
self.data.bytes()
}
}
#[derive(Debug)]
pub struct FontHandle {
font_data: Arc<FontTemplateData>,
ctfont: CTFont,
h_kern_subtable: Option<CachedKernTable>,
can_do_fast_shaping: bool,
}
impl FontHandle {
/// Cache all the data needed for basic horizontal kerning. This is used only as a fallback or
/// fast path (when the GPOS table is missing or unnecessary) so it needn't handle every case.
fn find_h_kern_subtable(&self) -> Option<CachedKernTable> {
let font_table = match self.table_for_tag(KERN) {
Some(table) => table,
None => return None
};
let mut result = CachedKernTable {
font_table: font_table,
pair_data_range: 0..0,
px_per_font_unit: 0.0,
};
// Look for a subtable with horizontal kerning in format 0.
// https://www.microsoft.com/typography/otspec/kern.htm
const KERN_COVERAGE_HORIZONTAL_FORMAT_0: u16 = 1;
const SUBTABLE_HEADER_LEN: usize = 6;
const FORMAT_0_HEADER_LEN: usize = 8;
{
let table = result.font_table.buffer();
let version = BigEndian::read_u16(table);
if version != 0 {
return None;
}
let num_subtables = BigEndian::read_u16(&table[2..]);
let mut start = 4;
for _ in 0..num_subtables {
// TODO: Check the subtable version number?
let len = BigEndian::read_u16(&table[start + 2..]) as usize;
let cov = BigEndian::read_u16(&table[start + 4..]);
let end = start + len;
if cov == KERN_COVERAGE_HORIZONTAL_FORMAT_0 {
// Found a matching subtable.
if result.pair_data_range.len() > 0 {
debug!("Found multiple horizontal kern tables. Disable fast path.");
return None;
}
// Read the subtable header.
let subtable_start = start + SUBTABLE_HEADER_LEN;
let n_pairs = BigEndian::read_u16(&table[subtable_start..]) as usize;
let pair_data_start = subtable_start + FORMAT_0_HEADER_LEN;
result.pair_data_range = pair_data_start..end;
if result.pair_data_range.len() != n_pairs * KERN_PAIR_LEN {
debug!("Bad data in kern header. Disable fast path.");
return None;
}
let pt_per_font_unit = self.ctfont.pt_size() as f64 /
self.ctfont.units_per_em() as f64;
result.px_per_font_unit = pt_to_px(pt_per_font_unit);
}
start = end;
}
}
if result.pair_data_range.len() > 0 {
Some(result)
} else {
None
}
}
}
struct CachedKernTable {
font_table: FontTable,
pair_data_range: Range<usize>,
px_per_font_unit: f64,
}
impl CachedKernTable {
/// Search for a glyph pair in the kern table and return the corresponding value.
fn binary_search(&self, first_glyph: GlyphId, second_glyph: GlyphId) -> Option<i16> {
let pairs = &self.font_table.buffer()[self.pair_data_range.clone()];
let query = first_glyph << 16 | second_glyph;
let (mut start, mut end) = (0, pairs.len() / KERN_PAIR_LEN);
while start < end {
let i = (start + end) / 2;
let key = BigEndian::read_u32(&pairs[i * KERN_PAIR_LEN..]);
if key > query {
end = i;
} else if key < query {
start = i + 1;
} else {
return Some(BigEndian::read_i16(&pairs[i * KERN_PAIR_LEN + 4..]));
}
}
None
}
}
impl fmt::Debug for CachedKernTable {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "CachedKernTable")
}
}
impl FontHandleMethods for FontHandle {
fn new_from_template(_fctx: &FontContextHandle,
template: Arc<FontTemplateData>,
pt_size: Option<Au>)
-> Result<FontHandle, ()> {
let size = match pt_size {
Some(s) => s.to_f64_px(),
None => 0.0
};
match template.ctfont(size) {
Some(ref ctfont) => {
let mut handle = FontHandle {
font_data: template.clone(),
ctfont: ctfont.clone_with_font_size(size),
h_kern_subtable: None,
can_do_fast_shaping: false,
};
handle.h_kern_subtable = handle.find_h_kern_subtable();
// TODO (#11310): Implement basic support for GPOS and GSUB.
handle.can_do_fast_shaping = handle.h_kern_subtable.is_some() &&
handle.table_for_tag(GPOS).is_none() &&
handle.table_for_tag(GSUB).is_none();
Ok(handle)
}
None => {
Err(())
}
}
}
fn template(&self) -> Arc<FontTemplateData> {
self.font_data.clone()
}
fn family_name(&self) -> String {
self.ctfont.family_name()
}
fn face_name(&self) -> String {
self.ctfont.face_name()
}
fn is_italic(&self) -> bool {
self.ctfont.symbolic_traits().is_italic()
}
fn boldness(&self) -> font_weight::T {
let normalized = self.ctfont.all_traits().normalized_weight(); // [-1.0, 1.0]
let normalized = if normalized <= 0.0 {
4.0 + normalized * 3.0 // [1.0, 4.0]
} else {
4.0 + normalized * 5.0 // [4.0, 9.0]
}; // [1.0, 9.0], centered on 4.0
match normalized.round() as u32 {
1 => font_weight::T::Weight100,
2 => font_weight::T::Weight200,
3 => font_weight::T::Weight300,
4 => font_weight::T::Weight400,
5 => font_weight::T::Weight500,
6 => font_weight::T::Weight600,
7 => font_weight::T::Weight700,
8 => font_weight::T::Weight800,
_ => font_weight::T::Weight900,
}
}
fn stretchiness(&self) -> font_stretch::T {
let normalized = self.ctfont.all_traits().normalized_width(); // [-1.0, 1.0]
let normalized = (normalized + 1.0) / 2.0 * 9.0; // [0.0, 9.0]
match normalized {
v if v < 1.0 => font_stretch::T::ultra_condensed,
v if v < 2.0 => font_stretch::T::extra_condensed,
v if v < 3.0 => font_stretch::T::condensed,
v if v < 4.0 => font_stretch::T::semi_condensed,
v if v < 5.0 => font_stretch::T::normal,
v if v < 6.0 => font_stretch::T::semi_expanded,
v if v < 7.0 => font_stretch::T::expanded,
v if v < 8.0 => font_stretch::T::extra_expanded,
_ => font_stretch::T::ultra_expanded,
}
}
fn glyph_index(&self, codepoint: char) -> Option<GlyphId> {
let characters: [UniChar; 1] = [codepoint as UniChar];
let mut glyphs: [CGGlyph; 1] = [0 as CGGlyph];
let count: CFIndex = 1;
let result = self.ctfont.get_glyphs_for_characters(&characters[0],
&mut glyphs[0],
count);
if !result {
// No glyph for this character
return None;
}
assert!(glyphs[0] != 0); // FIXME: error handling
return Some(glyphs[0] as GlyphId);
}
fn glyph_h_kerning(&self, first_glyph: GlyphId, second_glyph: GlyphId) -> FractionalPixel {
if let Some(ref table) = self.h_kern_subtable {
if let Some(font_units) = table.binary_search(first_glyph, second_glyph) {
return font_units as f64 * table.px_per_font_unit;
}
}
0.0
}
fn can_do_fast_shaping(&self) -> bool {
self.can_do_fast_shaping
}
fn glyph_h_advance(&self, glyph: GlyphId) -> Option<FractionalPixel> {
let glyphs = [glyph as CGGlyph];
let advance = self.ctfont.get_advances_for_glyphs(kCTFontDefaultOrientation,
&glyphs[0],
ptr::null_mut(),
1);
Some(advance as FractionalPixel)
}
fn metrics(&self) -> FontMetrics {
let bounding_rect: CGRect = self.ctfont.bounding_box();
let ascent = self.ctfont.ascent() as f64;
let descent = self.ctfont.descent() as f64;
let em_size = Au::from_f64_px(self.ctfont.pt_size() as f64);
let leading = self.ctfont.leading() as f64;
let scale = px_to_pt(self.ctfont.pt_size() as f64) / (ascent + descent);
let line_gap = (ascent + descent + leading + 0.5).floor();
let max_advance_width = au_from_pt(bounding_rect.size.width as f64);
let average_advance = self.glyph_index('0')
.and_then(|idx| self.glyph_h_advance(idx))
.map(Au::from_f64_px)
.unwrap_or(max_advance_width);
let metrics = FontMetrics {
underline_size: au_from_pt(self.ctfont.underline_thickness() as f64),
// TODO(Issue #201): underline metrics are not reliable. Have to pull out of font table
// directly.
//
// see also: https://bugs.webkit.org/show_bug.cgi?id=16768
// see also: https://bugreports.qt-project.org/browse/QTBUG-13364
underline_offset: au_from_pt(self.ctfont.underline_position() as f64),
strikeout_size: Au(0), // FIXME(Issue #942)
strikeout_offset: Au(0), // FIXME(Issue #942)
leading: au_from_pt(leading),
x_height: au_from_pt((self.ctfont.x_height() as f64) * scale),
em_size: em_size,
ascent: au_from_pt(ascent * scale),
descent: au_from_pt(descent * scale),
max_advance: max_advance_width,
average_advance: average_advance,
line_gap: Au::from_f64_px(line_gap),
};
debug!("Font metrics (@{} pt): {:?}", self.ctfont.pt_size() as f64, metrics);
metrics
}
fn table_for_tag(&self, tag: FontTableTag) -> Option<FontTable> {
let result: Option<CFData> = self.ctfont.get_font_table(tag);
result.and_then(|data| {
Some(FontTable::wrap(data))
})
}
}
| {
pt / 72. * 96.
} | identifier_body |
font.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/// Implementation of Quartz (CoreGraphics) fonts.
extern crate core_foundation;
extern crate core_graphics;
extern crate core_text;
use app_units::Au;
use byteorder::{BigEndian, ByteOrder};
use core_foundation::base::CFIndex;
use core_foundation::data::CFData;
use core_foundation::string::UniChar;
use core_graphics::font::CGGlyph;
use core_graphics::geometry::CGRect;
use core_text::font::CTFont;
use core_text::font_descriptor::{SymbolicTraitAccessors, TraitAccessors};
use core_text::font_descriptor::kCTFontDefaultOrientation;
use font::{FontHandleMethods, FontMetrics, FontTableMethods, FontTableTag, FractionalPixel};
use font::{GPOS, GSUB, KERN};
use platform::font_template::FontTemplateData;
use platform::macos::font_context::FontContextHandle;
use std::{fmt, ptr};
use std::ops::Range;
use std::sync::Arc;
use style::computed_values::{font_stretch, font_weight};
use text::glyph::GlyphId;
const KERN_PAIR_LEN: usize = 6;
pub struct FontTable {
data: CFData,
}
// assumes 72 points per inch, and 96 px per inch
fn px_to_pt(px: f64) -> f64 {
px / 96. * 72.
}
// assumes 72 points per inch, and 96 px per inch
fn pt_to_px(pt: f64) -> f64 {
pt / 72. * 96.
}
fn au_from_pt(pt: f64) -> Au {
Au::from_f64_px(pt_to_px(pt))
}
impl FontTable {
pub fn wrap(data: CFData) -> FontTable {
FontTable { data: data }
}
}
impl FontTableMethods for FontTable {
fn buffer(&self) -> &[u8] {
self.data.bytes()
}
}
#[derive(Debug)]
pub struct FontHandle {
font_data: Arc<FontTemplateData>,
ctfont: CTFont,
h_kern_subtable: Option<CachedKernTable>,
can_do_fast_shaping: bool,
}
impl FontHandle {
/// Cache all the data needed for basic horizontal kerning. This is used only as a fallback or
/// fast path (when the GPOS table is missing or unnecessary) so it needn't handle every case.
fn find_h_kern_subtable(&self) -> Option<CachedKernTable> {
let font_table = match self.table_for_tag(KERN) {
Some(table) => table,
None => return None
};
let mut result = CachedKernTable {
font_table: font_table,
pair_data_range: 0..0,
px_per_font_unit: 0.0,
};
// Look for a subtable with horizontal kerning in format 0.
// https://www.microsoft.com/typography/otspec/kern.htm
const KERN_COVERAGE_HORIZONTAL_FORMAT_0: u16 = 1;
const SUBTABLE_HEADER_LEN: usize = 6;
const FORMAT_0_HEADER_LEN: usize = 8;
{
let table = result.font_table.buffer();
let version = BigEndian::read_u16(table);
if version != 0 {
return None;
}
let num_subtables = BigEndian::read_u16(&table[2..]);
let mut start = 4;
for _ in 0..num_subtables {
// TODO: Check the subtable version number?
let len = BigEndian::read_u16(&table[start + 2..]) as usize;
let cov = BigEndian::read_u16(&table[start + 4..]);
let end = start + len;
if cov == KERN_COVERAGE_HORIZONTAL_FORMAT_0 {
// Found a matching subtable.
if result.pair_data_range.len() > 0 {
debug!("Found multiple horizontal kern tables. Disable fast path.");
return None;
}
// Read the subtable header.
let subtable_start = start + SUBTABLE_HEADER_LEN;
let n_pairs = BigEndian::read_u16(&table[subtable_start..]) as usize;
let pair_data_start = subtable_start + FORMAT_0_HEADER_LEN;
result.pair_data_range = pair_data_start..end;
if result.pair_data_range.len() != n_pairs * KERN_PAIR_LEN {
debug!("Bad data in kern header. Disable fast path.");
return None;
}
let pt_per_font_unit = self.ctfont.pt_size() as f64 /
self.ctfont.units_per_em() as f64;
result.px_per_font_unit = pt_to_px(pt_per_font_unit);
}
start = end;
}
}
if result.pair_data_range.len() > 0 {
Some(result)
} else {
None
}
}
}
struct CachedKernTable {
font_table: FontTable,
pair_data_range: Range<usize>,
px_per_font_unit: f64,
}
impl CachedKernTable {
/// Search for a glyph pair in the kern table and return the corresponding value.
fn binary_search(&self, first_glyph: GlyphId, second_glyph: GlyphId) -> Option<i16> {
let pairs = &self.font_table.buffer()[self.pair_data_range.clone()];
let query = first_glyph << 16 | second_glyph;
let (mut start, mut end) = (0, pairs.len() / KERN_PAIR_LEN);
while start < end {
let i = (start + end) / 2;
let key = BigEndian::read_u32(&pairs[i * KERN_PAIR_LEN..]);
if key > query {
end = i;
} else if key < query {
start = i + 1;
} else {
return Some(BigEndian::read_i16(&pairs[i * KERN_PAIR_LEN + 4..]));
}
}
None
}
}
impl fmt::Debug for CachedKernTable {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "CachedKernTable")
}
}
impl FontHandleMethods for FontHandle {
fn new_from_template(_fctx: &FontContextHandle,
template: Arc<FontTemplateData>,
pt_size: Option<Au>)
-> Result<FontHandle, ()> {
let size = match pt_size {
Some(s) => s.to_f64_px(),
None => 0.0
};
match template.ctfont(size) {
Some(ref ctfont) => {
let mut handle = FontHandle {
font_data: template.clone(),
ctfont: ctfont.clone_with_font_size(size),
h_kern_subtable: None,
can_do_fast_shaping: false,
};
handle.h_kern_subtable = handle.find_h_kern_subtable();
// TODO (#11310): Implement basic support for GPOS and GSUB.
handle.can_do_fast_shaping = handle.h_kern_subtable.is_some() &&
handle.table_for_tag(GPOS).is_none() &&
handle.table_for_tag(GSUB).is_none();
Ok(handle)
}
None => {
Err(())
}
}
}
fn template(&self) -> Arc<FontTemplateData> {
self.font_data.clone()
}
fn | (&self) -> String {
self.ctfont.family_name()
}
fn face_name(&self) -> String {
self.ctfont.face_name()
}
fn is_italic(&self) -> bool {
self.ctfont.symbolic_traits().is_italic()
}
fn boldness(&self) -> font_weight::T {
let normalized = self.ctfont.all_traits().normalized_weight(); // [-1.0, 1.0]
let normalized = if normalized <= 0.0 {
4.0 + normalized * 3.0 // [1.0, 4.0]
} else {
4.0 + normalized * 5.0 // [4.0, 9.0]
}; // [1.0, 9.0], centered on 4.0
match normalized.round() as u32 {
1 => font_weight::T::Weight100,
2 => font_weight::T::Weight200,
3 => font_weight::T::Weight300,
4 => font_weight::T::Weight400,
5 => font_weight::T::Weight500,
6 => font_weight::T::Weight600,
7 => font_weight::T::Weight700,
8 => font_weight::T::Weight800,
_ => font_weight::T::Weight900,
}
}
fn stretchiness(&self) -> font_stretch::T {
let normalized = self.ctfont.all_traits().normalized_width(); // [-1.0, 1.0]
let normalized = (normalized + 1.0) / 2.0 * 9.0; // [0.0, 9.0]
match normalized {
v if v < 1.0 => font_stretch::T::ultra_condensed,
v if v < 2.0 => font_stretch::T::extra_condensed,
v if v < 3.0 => font_stretch::T::condensed,
v if v < 4.0 => font_stretch::T::semi_condensed,
v if v < 5.0 => font_stretch::T::normal,
v if v < 6.0 => font_stretch::T::semi_expanded,
v if v < 7.0 => font_stretch::T::expanded,
v if v < 8.0 => font_stretch::T::extra_expanded,
_ => font_stretch::T::ultra_expanded,
}
}
fn glyph_index(&self, codepoint: char) -> Option<GlyphId> {
let characters: [UniChar; 1] = [codepoint as UniChar];
let mut glyphs: [CGGlyph; 1] = [0 as CGGlyph];
let count: CFIndex = 1;
let result = self.ctfont.get_glyphs_for_characters(&characters[0],
&mut glyphs[0],
count);
if !result {
// No glyph for this character
return None;
}
assert!(glyphs[0] != 0); // FIXME: error handling
return Some(glyphs[0] as GlyphId);
}
fn glyph_h_kerning(&self, first_glyph: GlyphId, second_glyph: GlyphId) -> FractionalPixel {
if let Some(ref table) = self.h_kern_subtable {
if let Some(font_units) = table.binary_search(first_glyph, second_glyph) {
return font_units as f64 * table.px_per_font_unit;
}
}
0.0
}
fn can_do_fast_shaping(&self) -> bool {
self.can_do_fast_shaping
}
fn glyph_h_advance(&self, glyph: GlyphId) -> Option<FractionalPixel> {
let glyphs = [glyph as CGGlyph];
let advance = self.ctfont.get_advances_for_glyphs(kCTFontDefaultOrientation,
&glyphs[0],
ptr::null_mut(),
1);
Some(advance as FractionalPixel)
}
fn metrics(&self) -> FontMetrics {
let bounding_rect: CGRect = self.ctfont.bounding_box();
let ascent = self.ctfont.ascent() as f64;
let descent = self.ctfont.descent() as f64;
let em_size = Au::from_f64_px(self.ctfont.pt_size() as f64);
let leading = self.ctfont.leading() as f64;
let scale = px_to_pt(self.ctfont.pt_size() as f64) / (ascent + descent);
let line_gap = (ascent + descent + leading + 0.5).floor();
let max_advance_width = au_from_pt(bounding_rect.size.width as f64);
let average_advance = self.glyph_index('0')
.and_then(|idx| self.glyph_h_advance(idx))
.map(Au::from_f64_px)
.unwrap_or(max_advance_width);
let metrics = FontMetrics {
underline_size: au_from_pt(self.ctfont.underline_thickness() as f64),
// TODO(Issue #201): underline metrics are not reliable. Have to pull out of font table
// directly.
//
// see also: https://bugs.webkit.org/show_bug.cgi?id=16768
// see also: https://bugreports.qt-project.org/browse/QTBUG-13364
underline_offset: au_from_pt(self.ctfont.underline_position() as f64),
strikeout_size: Au(0), // FIXME(Issue #942)
strikeout_offset: Au(0), // FIXME(Issue #942)
leading: au_from_pt(leading),
x_height: au_from_pt((self.ctfont.x_height() as f64) * scale),
em_size: em_size,
ascent: au_from_pt(ascent * scale),
descent: au_from_pt(descent * scale),
max_advance: max_advance_width,
average_advance: average_advance,
line_gap: Au::from_f64_px(line_gap),
};
debug!("Font metrics (@{} pt): {:?}", self.ctfont.pt_size() as f64, metrics);
metrics
}
fn table_for_tag(&self, tag: FontTableTag) -> Option<FontTable> {
let result: Option<CFData> = self.ctfont.get_font_table(tag);
result.and_then(|data| {
Some(FontTable::wrap(data))
})
}
}
| family_name | identifier_name |
font.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/// Implementation of Quartz (CoreGraphics) fonts.
extern crate core_foundation;
extern crate core_graphics;
extern crate core_text;
use app_units::Au;
use byteorder::{BigEndian, ByteOrder};
use core_foundation::base::CFIndex;
use core_foundation::data::CFData;
use core_foundation::string::UniChar;
use core_graphics::font::CGGlyph;
use core_graphics::geometry::CGRect;
use core_text::font::CTFont;
use core_text::font_descriptor::{SymbolicTraitAccessors, TraitAccessors};
use core_text::font_descriptor::kCTFontDefaultOrientation;
use font::{FontHandleMethods, FontMetrics, FontTableMethods, FontTableTag, FractionalPixel};
use font::{GPOS, GSUB, KERN};
use platform::font_template::FontTemplateData;
use platform::macos::font_context::FontContextHandle;
use std::{fmt, ptr};
use std::ops::Range;
use std::sync::Arc;
use style::computed_values::{font_stretch, font_weight};
use text::glyph::GlyphId;
const KERN_PAIR_LEN: usize = 6;
pub struct FontTable {
data: CFData,
}
// assumes 72 points per inch, and 96 px per inch
fn px_to_pt(px: f64) -> f64 {
px / 96. * 72.
}
// assumes 72 points per inch, and 96 px per inch
fn pt_to_px(pt: f64) -> f64 {
pt / 72. * 96.
}
fn au_from_pt(pt: f64) -> Au {
Au::from_f64_px(pt_to_px(pt))
}
impl FontTable {
pub fn wrap(data: CFData) -> FontTable {
FontTable { data: data }
}
}
impl FontTableMethods for FontTable {
fn buffer(&self) -> &[u8] {
self.data.bytes()
}
}
#[derive(Debug)]
pub struct FontHandle {
font_data: Arc<FontTemplateData>,
ctfont: CTFont,
h_kern_subtable: Option<CachedKernTable>,
can_do_fast_shaping: bool,
}
impl FontHandle {
/// Cache all the data needed for basic horizontal kerning. This is used only as a fallback or
/// fast path (when the GPOS table is missing or unnecessary) so it needn't handle every case.
fn find_h_kern_subtable(&self) -> Option<CachedKernTable> {
let font_table = match self.table_for_tag(KERN) {
Some(table) => table,
None => return None
};
let mut result = CachedKernTable {
font_table: font_table,
pair_data_range: 0..0,
px_per_font_unit: 0.0,
};
// Look for a subtable with horizontal kerning in format 0.
// https://www.microsoft.com/typography/otspec/kern.htm
const KERN_COVERAGE_HORIZONTAL_FORMAT_0: u16 = 1;
const SUBTABLE_HEADER_LEN: usize = 6;
const FORMAT_0_HEADER_LEN: usize = 8;
{
let table = result.font_table.buffer();
let version = BigEndian::read_u16(table);
if version != 0 {
return None;
}
let num_subtables = BigEndian::read_u16(&table[2..]);
let mut start = 4;
for _ in 0..num_subtables {
// TODO: Check the subtable version number?
let len = BigEndian::read_u16(&table[start + 2..]) as usize;
let cov = BigEndian::read_u16(&table[start + 4..]);
let end = start + len;
if cov == KERN_COVERAGE_HORIZONTAL_FORMAT_0 {
// Found a matching subtable.
if result.pair_data_range.len() > 0 {
debug!("Found multiple horizontal kern tables. Disable fast path.");
return None;
}
// Read the subtable header.
let subtable_start = start + SUBTABLE_HEADER_LEN;
let n_pairs = BigEndian::read_u16(&table[subtable_start..]) as usize;
let pair_data_start = subtable_start + FORMAT_0_HEADER_LEN;
result.pair_data_range = pair_data_start..end;
if result.pair_data_range.len() != n_pairs * KERN_PAIR_LEN {
debug!("Bad data in kern header. Disable fast path.");
return None;
}
let pt_per_font_unit = self.ctfont.pt_size() as f64 /
self.ctfont.units_per_em() as f64;
result.px_per_font_unit = pt_to_px(pt_per_font_unit);
}
start = end;
}
}
if result.pair_data_range.len() > 0 {
Some(result)
} else {
None
}
}
}
struct CachedKernTable {
font_table: FontTable,
pair_data_range: Range<usize>,
px_per_font_unit: f64,
}
impl CachedKernTable {
/// Search for a glyph pair in the kern table and return the corresponding value.
fn binary_search(&self, first_glyph: GlyphId, second_glyph: GlyphId) -> Option<i16> {
let pairs = &self.font_table.buffer()[self.pair_data_range.clone()];
let query = first_glyph << 16 | second_glyph;
let (mut start, mut end) = (0, pairs.len() / KERN_PAIR_LEN);
while start < end {
let i = (start + end) / 2;
let key = BigEndian::read_u32(&pairs[i * KERN_PAIR_LEN..]);
if key > query {
end = i;
} else if key < query {
start = i + 1;
} else {
return Some(BigEndian::read_i16(&pairs[i * KERN_PAIR_LEN + 4..]));
}
}
None
}
}
impl fmt::Debug for CachedKernTable {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "CachedKernTable")
}
}
impl FontHandleMethods for FontHandle {
fn new_from_template(_fctx: &FontContextHandle,
template: Arc<FontTemplateData>,
pt_size: Option<Au>)
-> Result<FontHandle, ()> {
let size = match pt_size {
Some(s) => s.to_f64_px(),
None => 0.0
};
match template.ctfont(size) {
Some(ref ctfont) => {
let mut handle = FontHandle {
font_data: template.clone(),
ctfont: ctfont.clone_with_font_size(size),
h_kern_subtable: None,
can_do_fast_shaping: false,
};
handle.h_kern_subtable = handle.find_h_kern_subtable();
// TODO (#11310): Implement basic support for GPOS and GSUB.
handle.can_do_fast_shaping = handle.h_kern_subtable.is_some() &&
handle.table_for_tag(GPOS).is_none() &&
handle.table_for_tag(GSUB).is_none();
Ok(handle)
}
None => {
Err(())
}
}
} |
fn family_name(&self) -> String {
self.ctfont.family_name()
}
fn face_name(&self) -> String {
self.ctfont.face_name()
}
fn is_italic(&self) -> bool {
self.ctfont.symbolic_traits().is_italic()
}
fn boldness(&self) -> font_weight::T {
let normalized = self.ctfont.all_traits().normalized_weight(); // [-1.0, 1.0]
let normalized = if normalized <= 0.0 {
4.0 + normalized * 3.0 // [1.0, 4.0]
} else {
4.0 + normalized * 5.0 // [4.0, 9.0]
}; // [1.0, 9.0], centered on 4.0
match normalized.round() as u32 {
1 => font_weight::T::Weight100,
2 => font_weight::T::Weight200,
3 => font_weight::T::Weight300,
4 => font_weight::T::Weight400,
5 => font_weight::T::Weight500,
6 => font_weight::T::Weight600,
7 => font_weight::T::Weight700,
8 => font_weight::T::Weight800,
_ => font_weight::T::Weight900,
}
}
fn stretchiness(&self) -> font_stretch::T {
let normalized = self.ctfont.all_traits().normalized_width(); // [-1.0, 1.0]
let normalized = (normalized + 1.0) / 2.0 * 9.0; // [0.0, 9.0]
match normalized {
v if v < 1.0 => font_stretch::T::ultra_condensed,
v if v < 2.0 => font_stretch::T::extra_condensed,
v if v < 3.0 => font_stretch::T::condensed,
v if v < 4.0 => font_stretch::T::semi_condensed,
v if v < 5.0 => font_stretch::T::normal,
v if v < 6.0 => font_stretch::T::semi_expanded,
v if v < 7.0 => font_stretch::T::expanded,
v if v < 8.0 => font_stretch::T::extra_expanded,
_ => font_stretch::T::ultra_expanded,
}
}
fn glyph_index(&self, codepoint: char) -> Option<GlyphId> {
let characters: [UniChar; 1] = [codepoint as UniChar];
let mut glyphs: [CGGlyph; 1] = [0 as CGGlyph];
let count: CFIndex = 1;
let result = self.ctfont.get_glyphs_for_characters(&characters[0],
&mut glyphs[0],
count);
if !result {
// No glyph for this character
return None;
}
assert!(glyphs[0] != 0); // FIXME: error handling
return Some(glyphs[0] as GlyphId);
}
fn glyph_h_kerning(&self, first_glyph: GlyphId, second_glyph: GlyphId) -> FractionalPixel {
if let Some(ref table) = self.h_kern_subtable {
if let Some(font_units) = table.binary_search(first_glyph, second_glyph) {
return font_units as f64 * table.px_per_font_unit;
}
}
0.0
}
fn can_do_fast_shaping(&self) -> bool {
self.can_do_fast_shaping
}
fn glyph_h_advance(&self, glyph: GlyphId) -> Option<FractionalPixel> {
let glyphs = [glyph as CGGlyph];
let advance = self.ctfont.get_advances_for_glyphs(kCTFontDefaultOrientation,
&glyphs[0],
ptr::null_mut(),
1);
Some(advance as FractionalPixel)
}
fn metrics(&self) -> FontMetrics {
let bounding_rect: CGRect = self.ctfont.bounding_box();
let ascent = self.ctfont.ascent() as f64;
let descent = self.ctfont.descent() as f64;
let em_size = Au::from_f64_px(self.ctfont.pt_size() as f64);
let leading = self.ctfont.leading() as f64;
let scale = px_to_pt(self.ctfont.pt_size() as f64) / (ascent + descent);
let line_gap = (ascent + descent + leading + 0.5).floor();
let max_advance_width = au_from_pt(bounding_rect.size.width as f64);
let average_advance = self.glyph_index('0')
.and_then(|idx| self.glyph_h_advance(idx))
.map(Au::from_f64_px)
.unwrap_or(max_advance_width);
let metrics = FontMetrics {
underline_size: au_from_pt(self.ctfont.underline_thickness() as f64),
// TODO(Issue #201): underline metrics are not reliable. Have to pull out of font table
// directly.
//
// see also: https://bugs.webkit.org/show_bug.cgi?id=16768
// see also: https://bugreports.qt-project.org/browse/QTBUG-13364
underline_offset: au_from_pt(self.ctfont.underline_position() as f64),
strikeout_size: Au(0), // FIXME(Issue #942)
strikeout_offset: Au(0), // FIXME(Issue #942)
leading: au_from_pt(leading),
x_height: au_from_pt((self.ctfont.x_height() as f64) * scale),
em_size: em_size,
ascent: au_from_pt(ascent * scale),
descent: au_from_pt(descent * scale),
max_advance: max_advance_width,
average_advance: average_advance,
line_gap: Au::from_f64_px(line_gap),
};
debug!("Font metrics (@{} pt): {:?}", self.ctfont.pt_size() as f64, metrics);
metrics
}
fn table_for_tag(&self, tag: FontTableTag) -> Option<FontTable> {
let result: Option<CFData> = self.ctfont.get_font_table(tag);
result.and_then(|data| {
Some(FontTable::wrap(data))
})
}
} |
fn template(&self) -> Arc<FontTemplateData> {
self.font_data.clone()
} | random_line_split |
conf.py | # -*- coding: utf-8 -*-
#
# partpy documentation build configuration file, created by
# sphinx-quickstart on Sat Feb 16 18:56:06 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the | sys.path.insert(0, os.path.abspath('../'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.doctest']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'partpy'
copyright = u'2013, Taylor "Nekroze" Lawson'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.2'
# The full version, including alpha/beta/rc tags.
release = '1.2.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'partpydoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'partpy.tex', u'partpy Documentation',
u'Taylor "Nekroze" Lawson', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'partpy', u'partpy Documentation',
[u'Taylor "Nekroze" Lawson'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'partpy', u'partpy Documentation',
u'Taylor "Nekroze" Lawson', 'partpy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'partpy'
epub_author = u'Taylor "Nekroze" Lawson'
epub_publisher = u'Taylor "Nekroze" Lawson'
epub_copyright = u'2013, Taylor "Nekroze" Lawson'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True | # documentation root, use os.path.abspath to make it absolute, like shown here. | random_line_split |
logger.js | "use strict";
var levels = require('./levels')
, util = require('util')
, events = require('events')
, DEFAULT_CATEGORY = '[default]';
var logWritesEnabled = true;
/**
* Models a logging event.
* @constructor
* @param {String} categoryName name of category
* @param {Log4js.Level} level level of message
* @param {Array} data objects to log
* @param {Log4js.Logger} logger the associated logger
* @author Seth Chisamore
*/
function LoggingEvent (categoryName, level, data, logger) {
this.startTime = new Date();
this.categoryName = categoryName;
this.data = data;
this.level = level;
this.logger = logger;
}
/**
* Logger to log messages.
* use {@see Log4js#getLogger(String)} to get an instance.
* @constructor
* @param name name of category to log to
* @author Stephan Strittmatter
*/
function Logger (name, level) {
this.category = name || DEFAULT_CATEGORY;
if (level) {
this.setLevel(level);
}
}
util.inherits(Logger, events.EventEmitter);
Logger.DEFAULT_CATEGORY = DEFAULT_CATEGORY;
Logger.prototype.level = levels.TRACE;
Logger.prototype.setLevel = function(level) {
this.level = levels.toLevel(level, this.level || levels.TRACE);
};
Logger.prototype.removeLevel = function() {
delete this.level;
};
Logger.prototype.log = function() {
var logLevel = levels.toLevel(arguments[0], levels.INFO);
if (!this.isLevelEnabled(logLevel)) {
return;
}
var numArgs = arguments.length - 1;
var args = new Array(numArgs);
for (var i = 0; i < numArgs; i++) {
args[i] = arguments[i + 1];
}
this._log(logLevel, args);
};
Logger.prototype.isLevelEnabled = function(otherLevel) {
return this.level.isLessThanOrEqualTo(otherLevel);
}; | );
function addLevelMethods(level) {
level = levels.toLevel(level);
var levelStrLower = level.toString().toLowerCase();
var levelMethod = levelStrLower.replace(/_([a-z])/g, function(g) { return g[1].toUpperCase(); } );
var isLevelMethod = levelMethod[0].toUpperCase() + levelMethod.slice(1);
Logger.prototype['is'+isLevelMethod+'Enabled'] = function() {
return this.isLevelEnabled(level.toString());
};
Logger.prototype[levelMethod] = function () {
if (logWritesEnabled && this.isLevelEnabled(level)) {
var numArgs = arguments.length;
var args = new Array(numArgs);
for (var i = 0; i < numArgs; i++) {
args[i] = arguments[i];
}
this._log(level, args);
}
};
}
Logger.prototype._log = function(level, data) {
var loggingEvent = new LoggingEvent(this.category, level, data, this);
this.emit('log', loggingEvent);
};
/**
* Disable all log writes.
* @returns {void}
*/
function disableAllLogWrites() {
logWritesEnabled = false;
}
/**
* Enable log writes.
* @returns {void}
*/
function enableAllLogWrites() {
logWritesEnabled = true;
}
exports.LoggingEvent = LoggingEvent;
exports.Logger = Logger;
exports.disableAllLogWrites = disableAllLogWrites;
exports.enableAllLogWrites = enableAllLogWrites;
exports.addLevelMethods = addLevelMethods; |
['Trace','Debug','Info','Warn','Error','Fatal', 'Mark'].forEach(
function(levelString) {
addLevelMethods(levelString);
} | random_line_split |
logger.js | "use strict";
var levels = require('./levels')
, util = require('util')
, events = require('events')
, DEFAULT_CATEGORY = '[default]';
var logWritesEnabled = true;
/**
* Models a logging event.
* @constructor
* @param {String} categoryName name of category
* @param {Log4js.Level} level level of message
* @param {Array} data objects to log
* @param {Log4js.Logger} logger the associated logger
* @author Seth Chisamore
*/
function LoggingEvent (categoryName, level, data, logger) {
this.startTime = new Date();
this.categoryName = categoryName;
this.data = data;
this.level = level;
this.logger = logger;
}
/**
* Logger to log messages.
* use {@see Log4js#getLogger(String)} to get an instance.
* @constructor
* @param name name of category to log to
* @author Stephan Strittmatter
*/
function Logger (name, level) {
this.category = name || DEFAULT_CATEGORY;
if (level) {
this.setLevel(level);
}
}
util.inherits(Logger, events.EventEmitter);
Logger.DEFAULT_CATEGORY = DEFAULT_CATEGORY;
Logger.prototype.level = levels.TRACE;
Logger.prototype.setLevel = function(level) {
this.level = levels.toLevel(level, this.level || levels.TRACE);
};
Logger.prototype.removeLevel = function() {
delete this.level;
};
Logger.prototype.log = function() {
var logLevel = levels.toLevel(arguments[0], levels.INFO);
if (!this.isLevelEnabled(logLevel)) {
return;
}
var numArgs = arguments.length - 1;
var args = new Array(numArgs);
for (var i = 0; i < numArgs; i++) {
args[i] = arguments[i + 1];
}
this._log(logLevel, args);
};
Logger.prototype.isLevelEnabled = function(otherLevel) {
return this.level.isLessThanOrEqualTo(otherLevel);
};
['Trace','Debug','Info','Warn','Error','Fatal', 'Mark'].forEach(
function(levelString) {
addLevelMethods(levelString);
}
);
function | (level) {
level = levels.toLevel(level);
var levelStrLower = level.toString().toLowerCase();
var levelMethod = levelStrLower.replace(/_([a-z])/g, function(g) { return g[1].toUpperCase(); } );
var isLevelMethod = levelMethod[0].toUpperCase() + levelMethod.slice(1);
Logger.prototype['is'+isLevelMethod+'Enabled'] = function() {
return this.isLevelEnabled(level.toString());
};
Logger.prototype[levelMethod] = function () {
if (logWritesEnabled && this.isLevelEnabled(level)) {
var numArgs = arguments.length;
var args = new Array(numArgs);
for (var i = 0; i < numArgs; i++) {
args[i] = arguments[i];
}
this._log(level, args);
}
};
}
Logger.prototype._log = function(level, data) {
var loggingEvent = new LoggingEvent(this.category, level, data, this);
this.emit('log', loggingEvent);
};
/**
* Disable all log writes.
* @returns {void}
*/
function disableAllLogWrites() {
logWritesEnabled = false;
}
/**
* Enable log writes.
* @returns {void}
*/
function enableAllLogWrites() {
logWritesEnabled = true;
}
exports.LoggingEvent = LoggingEvent;
exports.Logger = Logger;
exports.disableAllLogWrites = disableAllLogWrites;
exports.enableAllLogWrites = enableAllLogWrites;
exports.addLevelMethods = addLevelMethods; | addLevelMethods | identifier_name |
logger.js | "use strict";
var levels = require('./levels')
, util = require('util')
, events = require('events')
, DEFAULT_CATEGORY = '[default]';
var logWritesEnabled = true;
/**
* Models a logging event.
* @constructor
* @param {String} categoryName name of category
* @param {Log4js.Level} level level of message
* @param {Array} data objects to log
* @param {Log4js.Logger} logger the associated logger
* @author Seth Chisamore
*/
function LoggingEvent (categoryName, level, data, logger) {
this.startTime = new Date();
this.categoryName = categoryName;
this.data = data;
this.level = level;
this.logger = logger;
}
/**
* Logger to log messages.
* use {@see Log4js#getLogger(String)} to get an instance.
* @constructor
* @param name name of category to log to
* @author Stephan Strittmatter
*/
function Logger (name, level) {
this.category = name || DEFAULT_CATEGORY;
if (level) {
this.setLevel(level);
}
}
util.inherits(Logger, events.EventEmitter);
Logger.DEFAULT_CATEGORY = DEFAULT_CATEGORY;
Logger.prototype.level = levels.TRACE;
Logger.prototype.setLevel = function(level) {
this.level = levels.toLevel(level, this.level || levels.TRACE);
};
Logger.prototype.removeLevel = function() {
delete this.level;
};
Logger.prototype.log = function() {
var logLevel = levels.toLevel(arguments[0], levels.INFO);
if (!this.isLevelEnabled(logLevel)) {
return;
}
var numArgs = arguments.length - 1;
var args = new Array(numArgs);
for (var i = 0; i < numArgs; i++) {
args[i] = arguments[i + 1];
}
this._log(logLevel, args);
};
Logger.prototype.isLevelEnabled = function(otherLevel) {
return this.level.isLessThanOrEqualTo(otherLevel);
};
['Trace','Debug','Info','Warn','Error','Fatal', 'Mark'].forEach(
function(levelString) {
addLevelMethods(levelString);
}
);
function addLevelMethods(level) {
level = levels.toLevel(level);
var levelStrLower = level.toString().toLowerCase();
var levelMethod = levelStrLower.replace(/_([a-z])/g, function(g) { return g[1].toUpperCase(); } );
var isLevelMethod = levelMethod[0].toUpperCase() + levelMethod.slice(1);
Logger.prototype['is'+isLevelMethod+'Enabled'] = function() {
return this.isLevelEnabled(level.toString());
};
Logger.prototype[levelMethod] = function () {
if (logWritesEnabled && this.isLevelEnabled(level)) {
var numArgs = arguments.length;
var args = new Array(numArgs);
for (var i = 0; i < numArgs; i++) |
this._log(level, args);
}
};
}
Logger.prototype._log = function(level, data) {
var loggingEvent = new LoggingEvent(this.category, level, data, this);
this.emit('log', loggingEvent);
};
/**
* Disable all log writes.
* @returns {void}
*/
function disableAllLogWrites() {
logWritesEnabled = false;
}
/**
* Enable log writes.
* @returns {void}
*/
function enableAllLogWrites() {
logWritesEnabled = true;
}
exports.LoggingEvent = LoggingEvent;
exports.Logger = Logger;
exports.disableAllLogWrites = disableAllLogWrites;
exports.enableAllLogWrites = enableAllLogWrites;
exports.addLevelMethods = addLevelMethods; | {
args[i] = arguments[i];
} | conditional_block |
logger.js | "use strict";
var levels = require('./levels')
, util = require('util')
, events = require('events')
, DEFAULT_CATEGORY = '[default]';
var logWritesEnabled = true;
/**
* Models a logging event.
* @constructor
* @param {String} categoryName name of category
* @param {Log4js.Level} level level of message
* @param {Array} data objects to log
* @param {Log4js.Logger} logger the associated logger
* @author Seth Chisamore
*/
function LoggingEvent (categoryName, level, data, logger) {
this.startTime = new Date();
this.categoryName = categoryName;
this.data = data;
this.level = level;
this.logger = logger;
}
/**
* Logger to log messages.
* use {@see Log4js#getLogger(String)} to get an instance.
* @constructor
* @param name name of category to log to
* @author Stephan Strittmatter
*/
function Logger (name, level) {
this.category = name || DEFAULT_CATEGORY;
if (level) {
this.setLevel(level);
}
}
util.inherits(Logger, events.EventEmitter);
Logger.DEFAULT_CATEGORY = DEFAULT_CATEGORY;
Logger.prototype.level = levels.TRACE;
Logger.prototype.setLevel = function(level) {
this.level = levels.toLevel(level, this.level || levels.TRACE);
};
Logger.prototype.removeLevel = function() {
delete this.level;
};
Logger.prototype.log = function() {
var logLevel = levels.toLevel(arguments[0], levels.INFO);
if (!this.isLevelEnabled(logLevel)) {
return;
}
var numArgs = arguments.length - 1;
var args = new Array(numArgs);
for (var i = 0; i < numArgs; i++) {
args[i] = arguments[i + 1];
}
this._log(logLevel, args);
};
Logger.prototype.isLevelEnabled = function(otherLevel) {
return this.level.isLessThanOrEqualTo(otherLevel);
};
['Trace','Debug','Info','Warn','Error','Fatal', 'Mark'].forEach(
function(levelString) {
addLevelMethods(levelString);
}
);
function addLevelMethods(level) |
Logger.prototype._log = function(level, data) {
var loggingEvent = new LoggingEvent(this.category, level, data, this);
this.emit('log', loggingEvent);
};
/**
* Disable all log writes.
* @returns {void}
*/
function disableAllLogWrites() {
logWritesEnabled = false;
}
/**
* Enable log writes.
* @returns {void}
*/
function enableAllLogWrites() {
logWritesEnabled = true;
}
exports.LoggingEvent = LoggingEvent;
exports.Logger = Logger;
exports.disableAllLogWrites = disableAllLogWrites;
exports.enableAllLogWrites = enableAllLogWrites;
exports.addLevelMethods = addLevelMethods; | {
level = levels.toLevel(level);
var levelStrLower = level.toString().toLowerCase();
var levelMethod = levelStrLower.replace(/_([a-z])/g, function(g) { return g[1].toUpperCase(); } );
var isLevelMethod = levelMethod[0].toUpperCase() + levelMethod.slice(1);
Logger.prototype['is'+isLevelMethod+'Enabled'] = function() {
return this.isLevelEnabled(level.toString());
};
Logger.prototype[levelMethod] = function () {
if (logWritesEnabled && this.isLevelEnabled(level)) {
var numArgs = arguments.length;
var args = new Array(numArgs);
for (var i = 0; i < numArgs; i++) {
args[i] = arguments[i];
}
this._log(level, args);
}
};
} | identifier_body |
merge_java_srcs.py | #!/usr/bin/env python
# Copyright (c) 2014 Intel Corporation. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import os
import re
import sys
import shutil
def DoCopy(path, target_path):
if os.path.isfile(path):
package = ''
package_re = re.compile(
'^package (?P<package>([a-zA-Z0-9_]+.)*[a-zA-Z0-9_]+);$')
for line in open(path).readlines():
match = package_re.match(line)
if match:
package = match.group('package')
break
sub_path = os.path.sep.join(package.split('.'))
shutil.copy(path, os.path.join(target_path, sub_path))
return
for dirpath, _, files in os.walk(path):
if not files:
continue
sub_path = os.path.relpath(dirpath, path)
target_dirpath = os.path.join(target_path, sub_path)
if not os.path.isdir(target_dirpath):
os.makedirs(target_dirpath)
for f in files:
fpath = os.path.join(dirpath, f)
# "interface type;" is invalid for normal android project,
# It's only for chromium's build system, ignore these aidl files.
if f.endswith('.aidl'):
invalid_lines = []
for line in open(fpath).readlines():
if re.match('^interface .*;$', line):
invalid_lines.append(line)
if invalid_lines:
continue
elif not f.endswith('.java'):
continue
shutil.copy(fpath, target_dirpath)
def main():
parser = optparse.OptionParser()
info = ('The java source dirs to merge.')
parser.add_option('--dirs', help=info)
info = ('The target to place all the sources.')
parser.add_option('--target-path', help=info)
options, _ = parser.parse_args()
if os.path.isdir(options.target_path):
shutil.rmtree(options.target_path)
os.makedirs(options.target_path)
for path in options.dirs.split(' '):
if path.startswith('"') and path.endswith('"'):
path = eval(path)
DoCopy(path, options.target_path)
|
if __name__ == '__main__':
sys.exit(main()) | random_line_split |
|
merge_java_srcs.py | #!/usr/bin/env python
# Copyright (c) 2014 Intel Corporation. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import os
import re
import sys
import shutil
def | (path, target_path):
if os.path.isfile(path):
package = ''
package_re = re.compile(
'^package (?P<package>([a-zA-Z0-9_]+.)*[a-zA-Z0-9_]+);$')
for line in open(path).readlines():
match = package_re.match(line)
if match:
package = match.group('package')
break
sub_path = os.path.sep.join(package.split('.'))
shutil.copy(path, os.path.join(target_path, sub_path))
return
for dirpath, _, files in os.walk(path):
if not files:
continue
sub_path = os.path.relpath(dirpath, path)
target_dirpath = os.path.join(target_path, sub_path)
if not os.path.isdir(target_dirpath):
os.makedirs(target_dirpath)
for f in files:
fpath = os.path.join(dirpath, f)
# "interface type;" is invalid for normal android project,
# It's only for chromium's build system, ignore these aidl files.
if f.endswith('.aidl'):
invalid_lines = []
for line in open(fpath).readlines():
if re.match('^interface .*;$', line):
invalid_lines.append(line)
if invalid_lines:
continue
elif not f.endswith('.java'):
continue
shutil.copy(fpath, target_dirpath)
def main():
parser = optparse.OptionParser()
info = ('The java source dirs to merge.')
parser.add_option('--dirs', help=info)
info = ('The target to place all the sources.')
parser.add_option('--target-path', help=info)
options, _ = parser.parse_args()
if os.path.isdir(options.target_path):
shutil.rmtree(options.target_path)
os.makedirs(options.target_path)
for path in options.dirs.split(' '):
if path.startswith('"') and path.endswith('"'):
path = eval(path)
DoCopy(path, options.target_path)
if __name__ == '__main__':
sys.exit(main())
| DoCopy | identifier_name |
merge_java_srcs.py | #!/usr/bin/env python
# Copyright (c) 2014 Intel Corporation. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import os
import re
import sys
import shutil
def DoCopy(path, target_path):
if os.path.isfile(path):
package = ''
package_re = re.compile(
'^package (?P<package>([a-zA-Z0-9_]+.)*[a-zA-Z0-9_]+);$')
for line in open(path).readlines():
match = package_re.match(line)
if match:
package = match.group('package')
break
sub_path = os.path.sep.join(package.split('.'))
shutil.copy(path, os.path.join(target_path, sub_path))
return
for dirpath, _, files in os.walk(path):
if not files:
continue
sub_path = os.path.relpath(dirpath, path)
target_dirpath = os.path.join(target_path, sub_path)
if not os.path.isdir(target_dirpath):
os.makedirs(target_dirpath)
for f in files:
fpath = os.path.join(dirpath, f)
# "interface type;" is invalid for normal android project,
# It's only for chromium's build system, ignore these aidl files.
if f.endswith('.aidl'):
invalid_lines = []
for line in open(fpath).readlines():
if re.match('^interface .*;$', line):
|
if invalid_lines:
continue
elif not f.endswith('.java'):
continue
shutil.copy(fpath, target_dirpath)
def main():
parser = optparse.OptionParser()
info = ('The java source dirs to merge.')
parser.add_option('--dirs', help=info)
info = ('The target to place all the sources.')
parser.add_option('--target-path', help=info)
options, _ = parser.parse_args()
if os.path.isdir(options.target_path):
shutil.rmtree(options.target_path)
os.makedirs(options.target_path)
for path in options.dirs.split(' '):
if path.startswith('"') and path.endswith('"'):
path = eval(path)
DoCopy(path, options.target_path)
if __name__ == '__main__':
sys.exit(main())
| invalid_lines.append(line) | conditional_block |
merge_java_srcs.py | #!/usr/bin/env python
# Copyright (c) 2014 Intel Corporation. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import os
import re
import sys
import shutil
def DoCopy(path, target_path):
if os.path.isfile(path):
package = ''
package_re = re.compile(
'^package (?P<package>([a-zA-Z0-9_]+.)*[a-zA-Z0-9_]+);$')
for line in open(path).readlines():
match = package_re.match(line)
if match:
package = match.group('package')
break
sub_path = os.path.sep.join(package.split('.'))
shutil.copy(path, os.path.join(target_path, sub_path))
return
for dirpath, _, files in os.walk(path):
if not files:
continue
sub_path = os.path.relpath(dirpath, path)
target_dirpath = os.path.join(target_path, sub_path)
if not os.path.isdir(target_dirpath):
os.makedirs(target_dirpath)
for f in files:
fpath = os.path.join(dirpath, f)
# "interface type;" is invalid for normal android project,
# It's only for chromium's build system, ignore these aidl files.
if f.endswith('.aidl'):
invalid_lines = []
for line in open(fpath).readlines():
if re.match('^interface .*;$', line):
invalid_lines.append(line)
if invalid_lines:
continue
elif not f.endswith('.java'):
continue
shutil.copy(fpath, target_dirpath)
def main():
|
if __name__ == '__main__':
sys.exit(main())
| parser = optparse.OptionParser()
info = ('The java source dirs to merge.')
parser.add_option('--dirs', help=info)
info = ('The target to place all the sources.')
parser.add_option('--target-path', help=info)
options, _ = parser.parse_args()
if os.path.isdir(options.target_path):
shutil.rmtree(options.target_path)
os.makedirs(options.target_path)
for path in options.dirs.split(' '):
if path.startswith('"') and path.endswith('"'):
path = eval(path)
DoCopy(path, options.target_path) | identifier_body |
shifter.js | /*
* Copyright (c) 2013, Yahoo! Inc. All rights reserved.
* Copyrights licensed under the New BSD License.
* See the accompanying LICENSE file for terms.
*/
/*jslint node: true, nomen: true */
/**
The `express-yui.shifter` extension exposes a set of utilities to build yui modules
from *.js or build.json files.
@module yui
@submodule shifter
**/
'use strict';
var libfs = require('fs'),
libmkdirp = require('mkdirp'),
libpath = require('path'),
vm = require('vm'),
shifter = require('shifter'),
contextForRunInContext = vm.createContext({
require: null,
module: null,
console: null,
window: null,
document: null
}),
utils = require('./utils'),
debug = require('debug')('locator:yui:shifter');
/**
The `express-yui.shifter` extension exposes a locator plugin to build yui modules
from *.js or build.json files.
Here is an example:
var plugin = app.yui.locatorShifter({});
You can also specify a custom yui build directory, by doing:
var plugin = app.yui.locatorShifter({
yuiBuildDirectory: '/path/to/folder'
});
@class shifter
@static
@uses *path, *fs, *module, *vm, win-spawn, shifter, loader
@extensionfor yui
*/
module.exports = {
/**
Shift yui modules using shifter cli.
@method shiftFiles
@public
@param {array} files filesystem paths for all files to be shifted
@param {object} options configuration
@param {string} options.buildDir custom path for the output of the shifter
@param {boolean} options.cache whether or not we should apply cache to speed up
the shifting process. If true, it will create the folder `.cache` and generate
some hash to prevent shifting the same *.js files if there is not change in
the source.
@param {array} options.args shifter cli build arguments, it defaults to `[]`
@param {function} callback the callback method to signal the end of the operation
**/
shiftFiles: function (files, options, callback) {
var self = this,
queue = [].concat(files);
if (utils.productionMode) {
debug('skipping shifter in production environments.');
if (callback) { callback(null); }
return;
}
options = options || {};
function next() |
next(); // kick off the queue process
},
/**
Analyze a build.json file to extract all the important metadata associated with it.
@method _checkBuildFile
@protected
@param {string} file The filesystem path for the build.json file to be analyzed
@return {object} The parsed and augmented content of the build.json file
**/
_checkBuildFile: function (file) {
var mod,
entry,
metas = libpath.join(libpath.dirname(file), 'meta'),
files,
i,
j,
f;
try {
mod = JSON.parse(libfs.readFileSync(file, 'utf8'));
} catch (e1) {
console.error('Failed to parse build file: ' + file);
console.error(e1);
return;
}
if (!mod.builds) {
console.error('Invalid meta file: ' + file);
return;
}
mod.buildfile = file;
if (libfs.existsSync(metas)) {
files = libfs.readdirSync(metas);
for (i = 0; i < files.length; i += 1) {
f = files[i];
if (libpath.extname(f) === '.json') {
try {
entry = JSON.parse(libfs.readFileSync(libpath.join(metas, f), 'utf8'));
} catch (e2) {
console.error('Failed to parse meta file: ' + f);
console.error(e2);
return;
}
for (j in entry) {
if (entry.hasOwnProperty(j)) {
mod.builds[j] = mod.builds[j] || {};
mod.builds[j].config = entry[j];
// setting the proper filename for test if needed
if (entry[j] && entry[j].condition && entry[j].condition.test &&
libpath.extname(entry[j].condition.test) === '.js') {
entry[j].condition.test = libpath.join(metas, entry[j].condition.test);
}
}
}
}
}
}
return mod;
},
/**
Analyze a javascript file, if it is a yui module, it extracts all the important metadata
associted with it.
@method _checkYUIModule
@protected
@param {string} file The filesystem path for the yui module to be analyzed
@return {object} The parsed and augmented metadata from the yui module
**/
_checkYUIModule: function (file) {
var mod;
contextForRunInContext.YUI = {
add: function (name, fn, version, config) {
if (!mod) {
mod = {
name: name,
buildfile: file,
builds: {}
};
}
mod.builds[name] = {
name: name,
config: config || {}
};
// detecting affinity from the filename
if (file.indexOf('.server.js') === file.length - 10) {
mod.builds[name].config.affinity = 'server';
}
if (file.indexOf('.client.js') === file.length - 10) {
mod.builds[name].config.affinity = 'client';
}
}
};
try {
vm.runInContext(libfs.readFileSync(file, 'utf8'), contextForRunInContext, file);
} catch (e) {
return;
}
return mod;
},
/**
Verifies if a source file was already processed by analyzing its content against an
internal cache mechanism. JSON files (*.json) are an exception, and they will not be
cached since they might includes other files that might change and affects the result
of the build so we can't rely on the source file alone. If the file is not in cache,
it will be included automatically.
Why? This method is just an artifact to avoid spawning a process to execute shifter, which
is very expensive. It is also the main artifact to avoid shifting files when in production,
if the build process includes the build folder, specially because manhattan does not
support spawn. Finally, it is just a noop artifact to avoid calling shifter, it does not
need to cache the response of the shifter process, just opt out for the next call to shift
the same file with the same content.
@method _isCached
@protected
@param {string} file The filesystem path for the file to be cached
@param {string} buildDir The filesystem path for the build folder
@return {boolean} `true` if the file and its content matches the internal cache, otherwise `false`.
**/
_isCached: function (file, buildDir) {
var fileHash,
data;
if (libpath.extname(file) !== '.json') {
fileHash = libpath.join(buildDir, '.cache', utils.md5(file));
data = libfs.readFileSync(file, 'utf8');
if (libfs.existsSync(fileHash) && (libfs.readFileSync(fileHash, 'utf8') === data)) {
return true;
}
libmkdirp.sync(libpath.join(buildDir, '.cache'));
libfs.writeFileSync(fileHash, data, 'utf8');
}
return false;
},
/**
Removes the cache entry for a particular file.
Why? This method is just an artifact to invalidate the cache entry created by
`_isCached` when a shifter error is detected because the cache entry is premature
created before spawing to shifter.
@method _clearCached
@protected
@param {string} file The filesystem path for the file to be cached
@param {string} buildDir The filesystem path for the build folder
**/
_clearCached: function (file, buildDir) {
var fileHash;
fileHash = libpath.join(buildDir, '.cache', utils.md5(file));
if (libfs.existsSync(fileHash)) {
libfs.unlinkSync(fileHash, 'utf8');
}
}
};
| {
var file = queue.shift(),
opts = utils.extend({}, options.opts);
if (file) {
debug('shifting ' + file);
if (options.symlink && self._isLinked(file, options.buildDir)) {
next();
return;
}
if (options.cache && self._isCached(file, options.buildDir)) {
next();
return;
}
opts.cwd = libpath.dirname(file);
opts["build-dir"] = options.buildDir;
if (libpath.extname(file) === '.js') {
opts['yui-module'] = file;
} else {
opts.config = file;
}
shifter.add(opts, function (err) {
if (err) {
if (options.cache) {
// invalidating the cache entry
self._clearCached(file, options.buildDir);
}
callback(new Error(file + ": shifter compiler error: " + err));
return;
}
next(); // next item in queue to be processed
});
} else {
if (callback) {
callback(null);
}
}
} | identifier_body |
shifter.js | /*
* Copyright (c) 2013, Yahoo! Inc. All rights reserved.
* Copyrights licensed under the New BSD License.
* See the accompanying LICENSE file for terms.
*/
/*jslint node: true, nomen: true */
/**
The `express-yui.shifter` extension exposes a set of utilities to build yui modules
from *.js or build.json files.
@module yui
@submodule shifter
**/
'use strict';
var libfs = require('fs'),
libmkdirp = require('mkdirp'),
libpath = require('path'),
vm = require('vm'),
shifter = require('shifter'),
contextForRunInContext = vm.createContext({
require: null,
module: null,
console: null,
window: null,
document: null
}),
utils = require('./utils'),
debug = require('debug')('locator:yui:shifter');
/**
The `express-yui.shifter` extension exposes a locator plugin to build yui modules
from *.js or build.json files.
Here is an example:
var plugin = app.yui.locatorShifter({});
You can also specify a custom yui build directory, by doing:
var plugin = app.yui.locatorShifter({
yuiBuildDirectory: '/path/to/folder'
});
@class shifter
@static
@uses *path, *fs, *module, *vm, win-spawn, shifter, loader
@extensionfor yui
*/
module.exports = {
/**
Shift yui modules using shifter cli.
@method shiftFiles
@public
@param {array} files filesystem paths for all files to be shifted
@param {object} options configuration
@param {string} options.buildDir custom path for the output of the shifter
@param {boolean} options.cache whether or not we should apply cache to speed up
the shifting process. If true, it will create the folder `.cache` and generate
some hash to prevent shifting the same *.js files if there is not change in
the source.
@param {array} options.args shifter cli build arguments, it defaults to `[]`
@param {function} callback the callback method to signal the end of the operation
**/
shiftFiles: function (files, options, callback) {
var self = this,
queue = [].concat(files);
if (utils.productionMode) {
debug('skipping shifter in production environments.');
if (callback) { callback(null); }
return;
}
options = options || {};
function next() {
var file = queue.shift(),
opts = utils.extend({}, options.opts);
if (file) {
debug('shifting ' + file);
if (options.symlink && self._isLinked(file, options.buildDir)) {
next();
return;
}
if (options.cache && self._isCached(file, options.buildDir)) {
next();
return;
}
opts.cwd = libpath.dirname(file);
opts["build-dir"] = options.buildDir;
if (libpath.extname(file) === '.js') {
opts['yui-module'] = file;
} else {
opts.config = file;
}
shifter.add(opts, function (err) {
if (err) {
if (options.cache) { | self._clearCached(file, options.buildDir);
}
callback(new Error(file + ": shifter compiler error: " + err));
return;
}
next(); // next item in queue to be processed
});
} else {
if (callback) {
callback(null);
}
}
}
next(); // kick off the queue process
},
/**
Analyze a build.json file to extract all the important metadata associated with it.
@method _checkBuildFile
@protected
@param {string} file The filesystem path for the build.json file to be analyzed
@return {object} The parsed and augmented content of the build.json file
**/
_checkBuildFile: function (file) {
var mod,
entry,
metas = libpath.join(libpath.dirname(file), 'meta'),
files,
i,
j,
f;
try {
mod = JSON.parse(libfs.readFileSync(file, 'utf8'));
} catch (e1) {
console.error('Failed to parse build file: ' + file);
console.error(e1);
return;
}
if (!mod.builds) {
console.error('Invalid meta file: ' + file);
return;
}
mod.buildfile = file;
if (libfs.existsSync(metas)) {
files = libfs.readdirSync(metas);
for (i = 0; i < files.length; i += 1) {
f = files[i];
if (libpath.extname(f) === '.json') {
try {
entry = JSON.parse(libfs.readFileSync(libpath.join(metas, f), 'utf8'));
} catch (e2) {
console.error('Failed to parse meta file: ' + f);
console.error(e2);
return;
}
for (j in entry) {
if (entry.hasOwnProperty(j)) {
mod.builds[j] = mod.builds[j] || {};
mod.builds[j].config = entry[j];
// setting the proper filename for test if needed
if (entry[j] && entry[j].condition && entry[j].condition.test &&
libpath.extname(entry[j].condition.test) === '.js') {
entry[j].condition.test = libpath.join(metas, entry[j].condition.test);
}
}
}
}
}
}
return mod;
},
/**
Analyze a javascript file, if it is a yui module, it extracts all the important metadata
associted with it.
@method _checkYUIModule
@protected
@param {string} file The filesystem path for the yui module to be analyzed
@return {object} The parsed and augmented metadata from the yui module
**/
_checkYUIModule: function (file) {
var mod;
contextForRunInContext.YUI = {
add: function (name, fn, version, config) {
if (!mod) {
mod = {
name: name,
buildfile: file,
builds: {}
};
}
mod.builds[name] = {
name: name,
config: config || {}
};
// detecting affinity from the filename
if (file.indexOf('.server.js') === file.length - 10) {
mod.builds[name].config.affinity = 'server';
}
if (file.indexOf('.client.js') === file.length - 10) {
mod.builds[name].config.affinity = 'client';
}
}
};
try {
vm.runInContext(libfs.readFileSync(file, 'utf8'), contextForRunInContext, file);
} catch (e) {
return;
}
return mod;
},
/**
Verifies if a source file was already processed by analyzing its content against an
internal cache mechanism. JSON files (*.json) are an exception, and they will not be
cached since they might includes other files that might change and affects the result
of the build so we can't rely on the source file alone. If the file is not in cache,
it will be included automatically.
Why? This method is just an artifact to avoid spawning a process to execute shifter, which
is very expensive. It is also the main artifact to avoid shifting files when in production,
if the build process includes the build folder, specially because manhattan does not
support spawn. Finally, it is just a noop artifact to avoid calling shifter, it does not
need to cache the response of the shifter process, just opt out for the next call to shift
the same file with the same content.
@method _isCached
@protected
@param {string} file The filesystem path for the file to be cached
@param {string} buildDir The filesystem path for the build folder
@return {boolean} `true` if the file and its content matches the internal cache, otherwise `false`.
**/
_isCached: function (file, buildDir) {
var fileHash,
data;
if (libpath.extname(file) !== '.json') {
fileHash = libpath.join(buildDir, '.cache', utils.md5(file));
data = libfs.readFileSync(file, 'utf8');
if (libfs.existsSync(fileHash) && (libfs.readFileSync(fileHash, 'utf8') === data)) {
return true;
}
libmkdirp.sync(libpath.join(buildDir, '.cache'));
libfs.writeFileSync(fileHash, data, 'utf8');
}
return false;
},
/**
Removes the cache entry for a particular file.
Why? This method is just an artifact to invalidate the cache entry created by
`_isCached` when a shifter error is detected because the cache entry is premature
created before spawing to shifter.
@method _clearCached
@protected
@param {string} file The filesystem path for the file to be cached
@param {string} buildDir The filesystem path for the build folder
**/
_clearCached: function (file, buildDir) {
var fileHash;
fileHash = libpath.join(buildDir, '.cache', utils.md5(file));
if (libfs.existsSync(fileHash)) {
libfs.unlinkSync(fileHash, 'utf8');
}
}
}; | // invalidating the cache entry | random_line_split |
shifter.js | /*
* Copyright (c) 2013, Yahoo! Inc. All rights reserved.
* Copyrights licensed under the New BSD License.
* See the accompanying LICENSE file for terms.
*/
/*jslint node: true, nomen: true */
/**
The `express-yui.shifter` extension exposes a set of utilities to build yui modules
from *.js or build.json files.
@module yui
@submodule shifter
**/
'use strict';
var libfs = require('fs'),
libmkdirp = require('mkdirp'),
libpath = require('path'),
vm = require('vm'),
shifter = require('shifter'),
contextForRunInContext = vm.createContext({
require: null,
module: null,
console: null,
window: null,
document: null
}),
utils = require('./utils'),
debug = require('debug')('locator:yui:shifter');
/**
The `express-yui.shifter` extension exposes a locator plugin to build yui modules
from *.js or build.json files.
Here is an example:
var plugin = app.yui.locatorShifter({});
You can also specify a custom yui build directory, by doing:
var plugin = app.yui.locatorShifter({
yuiBuildDirectory: '/path/to/folder'
});
@class shifter
@static
@uses *path, *fs, *module, *vm, win-spawn, shifter, loader
@extensionfor yui
*/
module.exports = {
/**
Shift yui modules using shifter cli.
@method shiftFiles
@public
@param {array} files filesystem paths for all files to be shifted
@param {object} options configuration
@param {string} options.buildDir custom path for the output of the shifter
@param {boolean} options.cache whether or not we should apply cache to speed up
the shifting process. If true, it will create the folder `.cache` and generate
some hash to prevent shifting the same *.js files if there is not change in
the source.
@param {array} options.args shifter cli build arguments, it defaults to `[]`
@param {function} callback the callback method to signal the end of the operation
**/
shiftFiles: function (files, options, callback) {
var self = this,
queue = [].concat(files);
if (utils.productionMode) {
debug('skipping shifter in production environments.');
if (callback) { callback(null); }
return;
}
options = options || {};
function next() {
var file = queue.shift(),
opts = utils.extend({}, options.opts);
if (file) {
debug('shifting ' + file);
if (options.symlink && self._isLinked(file, options.buildDir)) {
next();
return;
}
if (options.cache && self._isCached(file, options.buildDir)) {
next();
return;
}
opts.cwd = libpath.dirname(file);
opts["build-dir"] = options.buildDir;
if (libpath.extname(file) === '.js') {
opts['yui-module'] = file;
} else {
opts.config = file;
}
shifter.add(opts, function (err) {
if (err) {
if (options.cache) {
// invalidating the cache entry
self._clearCached(file, options.buildDir);
}
callback(new Error(file + ": shifter compiler error: " + err));
return;
}
next(); // next item in queue to be processed
});
} else {
if (callback) {
callback(null);
}
}
}
next(); // kick off the queue process
},
/**
Analyze a build.json file to extract all the important metadata associated with it.
@method _checkBuildFile
@protected
@param {string} file The filesystem path for the build.json file to be analyzed
@return {object} The parsed and augmented content of the build.json file
**/
_checkBuildFile: function (file) {
var mod,
entry,
metas = libpath.join(libpath.dirname(file), 'meta'),
files,
i,
j,
f;
try {
mod = JSON.parse(libfs.readFileSync(file, 'utf8'));
} catch (e1) {
console.error('Failed to parse build file: ' + file);
console.error(e1);
return;
}
if (!mod.builds) {
console.error('Invalid meta file: ' + file);
return;
}
mod.buildfile = file;
if (libfs.existsSync(metas)) {
files = libfs.readdirSync(metas);
for (i = 0; i < files.length; i += 1) {
f = files[i];
if (libpath.extname(f) === '.json') {
try {
entry = JSON.parse(libfs.readFileSync(libpath.join(metas, f), 'utf8'));
} catch (e2) {
console.error('Failed to parse meta file: ' + f);
console.error(e2);
return;
}
for (j in entry) {
if (entry.hasOwnProperty(j)) {
mod.builds[j] = mod.builds[j] || {};
mod.builds[j].config = entry[j];
// setting the proper filename for test if needed
if (entry[j] && entry[j].condition && entry[j].condition.test &&
libpath.extname(entry[j].condition.test) === '.js') |
}
}
}
}
}
return mod;
},
/**
Analyze a javascript file, if it is a yui module, it extracts all the important metadata
associted with it.
@method _checkYUIModule
@protected
@param {string} file The filesystem path for the yui module to be analyzed
@return {object} The parsed and augmented metadata from the yui module
**/
_checkYUIModule: function (file) {
var mod;
contextForRunInContext.YUI = {
add: function (name, fn, version, config) {
if (!mod) {
mod = {
name: name,
buildfile: file,
builds: {}
};
}
mod.builds[name] = {
name: name,
config: config || {}
};
// detecting affinity from the filename
if (file.indexOf('.server.js') === file.length - 10) {
mod.builds[name].config.affinity = 'server';
}
if (file.indexOf('.client.js') === file.length - 10) {
mod.builds[name].config.affinity = 'client';
}
}
};
try {
vm.runInContext(libfs.readFileSync(file, 'utf8'), contextForRunInContext, file);
} catch (e) {
return;
}
return mod;
},
/**
Verifies if a source file was already processed by analyzing its content against an
internal cache mechanism. JSON files (*.json) are an exception, and they will not be
cached since they might includes other files that might change and affects the result
of the build so we can't rely on the source file alone. If the file is not in cache,
it will be included automatically.
Why? This method is just an artifact to avoid spawning a process to execute shifter, which
is very expensive. It is also the main artifact to avoid shifting files when in production,
if the build process includes the build folder, specially because manhattan does not
support spawn. Finally, it is just a noop artifact to avoid calling shifter, it does not
need to cache the response of the shifter process, just opt out for the next call to shift
the same file with the same content.
@method _isCached
@protected
@param {string} file The filesystem path for the file to be cached
@param {string} buildDir The filesystem path for the build folder
@return {boolean} `true` if the file and its content matches the internal cache, otherwise `false`.
**/
_isCached: function (file, buildDir) {
var fileHash,
data;
if (libpath.extname(file) !== '.json') {
fileHash = libpath.join(buildDir, '.cache', utils.md5(file));
data = libfs.readFileSync(file, 'utf8');
if (libfs.existsSync(fileHash) && (libfs.readFileSync(fileHash, 'utf8') === data)) {
return true;
}
libmkdirp.sync(libpath.join(buildDir, '.cache'));
libfs.writeFileSync(fileHash, data, 'utf8');
}
return false;
},
/**
Removes the cache entry for a particular file.
Why? This method is just an artifact to invalidate the cache entry created by
`_isCached` when a shifter error is detected because the cache entry is premature
created before spawing to shifter.
@method _clearCached
@protected
@param {string} file The filesystem path for the file to be cached
@param {string} buildDir The filesystem path for the build folder
**/
_clearCached: function (file, buildDir) {
var fileHash;
fileHash = libpath.join(buildDir, '.cache', utils.md5(file));
if (libfs.existsSync(fileHash)) {
libfs.unlinkSync(fileHash, 'utf8');
}
}
};
| {
entry[j].condition.test = libpath.join(metas, entry[j].condition.test);
} | conditional_block |
shifter.js | /*
* Copyright (c) 2013, Yahoo! Inc. All rights reserved.
* Copyrights licensed under the New BSD License.
* See the accompanying LICENSE file for terms.
*/
/*jslint node: true, nomen: true */
/**
The `express-yui.shifter` extension exposes a set of utilities to build yui modules
from *.js or build.json files.
@module yui
@submodule shifter
**/
'use strict';
var libfs = require('fs'),
libmkdirp = require('mkdirp'),
libpath = require('path'),
vm = require('vm'),
shifter = require('shifter'),
contextForRunInContext = vm.createContext({
require: null,
module: null,
console: null,
window: null,
document: null
}),
utils = require('./utils'),
debug = require('debug')('locator:yui:shifter');
/**
The `express-yui.shifter` extension exposes a locator plugin to build yui modules
from *.js or build.json files.
Here is an example:
var plugin = app.yui.locatorShifter({});
You can also specify a custom yui build directory, by doing:
var plugin = app.yui.locatorShifter({
yuiBuildDirectory: '/path/to/folder'
});
@class shifter
@static
@uses *path, *fs, *module, *vm, win-spawn, shifter, loader
@extensionfor yui
*/
module.exports = {
/**
Shift yui modules using shifter cli.
@method shiftFiles
@public
@param {array} files filesystem paths for all files to be shifted
@param {object} options configuration
@param {string} options.buildDir custom path for the output of the shifter
@param {boolean} options.cache whether or not we should apply cache to speed up
the shifting process. If true, it will create the folder `.cache` and generate
some hash to prevent shifting the same *.js files if there is not change in
the source.
@param {array} options.args shifter cli build arguments, it defaults to `[]`
@param {function} callback the callback method to signal the end of the operation
**/
shiftFiles: function (files, options, callback) {
var self = this,
queue = [].concat(files);
if (utils.productionMode) {
debug('skipping shifter in production environments.');
if (callback) { callback(null); }
return;
}
options = options || {};
function | () {
var file = queue.shift(),
opts = utils.extend({}, options.opts);
if (file) {
debug('shifting ' + file);
if (options.symlink && self._isLinked(file, options.buildDir)) {
next();
return;
}
if (options.cache && self._isCached(file, options.buildDir)) {
next();
return;
}
opts.cwd = libpath.dirname(file);
opts["build-dir"] = options.buildDir;
if (libpath.extname(file) === '.js') {
opts['yui-module'] = file;
} else {
opts.config = file;
}
shifter.add(opts, function (err) {
if (err) {
if (options.cache) {
// invalidating the cache entry
self._clearCached(file, options.buildDir);
}
callback(new Error(file + ": shifter compiler error: " + err));
return;
}
next(); // next item in queue to be processed
});
} else {
if (callback) {
callback(null);
}
}
}
next(); // kick off the queue process
},
/**
Analyze a build.json file to extract all the important metadata associated with it.
@method _checkBuildFile
@protected
@param {string} file The filesystem path for the build.json file to be analyzed
@return {object} The parsed and augmented content of the build.json file
**/
_checkBuildFile: function (file) {
var mod,
entry,
metas = libpath.join(libpath.dirname(file), 'meta'),
files,
i,
j,
f;
try {
mod = JSON.parse(libfs.readFileSync(file, 'utf8'));
} catch (e1) {
console.error('Failed to parse build file: ' + file);
console.error(e1);
return;
}
if (!mod.builds) {
console.error('Invalid meta file: ' + file);
return;
}
mod.buildfile = file;
if (libfs.existsSync(metas)) {
files = libfs.readdirSync(metas);
for (i = 0; i < files.length; i += 1) {
f = files[i];
if (libpath.extname(f) === '.json') {
try {
entry = JSON.parse(libfs.readFileSync(libpath.join(metas, f), 'utf8'));
} catch (e2) {
console.error('Failed to parse meta file: ' + f);
console.error(e2);
return;
}
for (j in entry) {
if (entry.hasOwnProperty(j)) {
mod.builds[j] = mod.builds[j] || {};
mod.builds[j].config = entry[j];
// setting the proper filename for test if needed
if (entry[j] && entry[j].condition && entry[j].condition.test &&
libpath.extname(entry[j].condition.test) === '.js') {
entry[j].condition.test = libpath.join(metas, entry[j].condition.test);
}
}
}
}
}
}
return mod;
},
/**
Analyze a javascript file, if it is a yui module, it extracts all the important metadata
associted with it.
@method _checkYUIModule
@protected
@param {string} file The filesystem path for the yui module to be analyzed
@return {object} The parsed and augmented metadata from the yui module
**/
_checkYUIModule: function (file) {
var mod;
contextForRunInContext.YUI = {
add: function (name, fn, version, config) {
if (!mod) {
mod = {
name: name,
buildfile: file,
builds: {}
};
}
mod.builds[name] = {
name: name,
config: config || {}
};
// detecting affinity from the filename
if (file.indexOf('.server.js') === file.length - 10) {
mod.builds[name].config.affinity = 'server';
}
if (file.indexOf('.client.js') === file.length - 10) {
mod.builds[name].config.affinity = 'client';
}
}
};
try {
vm.runInContext(libfs.readFileSync(file, 'utf8'), contextForRunInContext, file);
} catch (e) {
return;
}
return mod;
},
/**
Verifies if a source file was already processed by analyzing its content against an
internal cache mechanism. JSON files (*.json) are an exception, and they will not be
cached since they might includes other files that might change and affects the result
of the build so we can't rely on the source file alone. If the file is not in cache,
it will be included automatically.
Why? This method is just an artifact to avoid spawning a process to execute shifter, which
is very expensive. It is also the main artifact to avoid shifting files when in production,
if the build process includes the build folder, specially because manhattan does not
support spawn. Finally, it is just a noop artifact to avoid calling shifter, it does not
need to cache the response of the shifter process, just opt out for the next call to shift
the same file with the same content.
@method _isCached
@protected
@param {string} file The filesystem path for the file to be cached
@param {string} buildDir The filesystem path for the build folder
@return {boolean} `true` if the file and its content matches the internal cache, otherwise `false`.
**/
_isCached: function (file, buildDir) {
var fileHash,
data;
if (libpath.extname(file) !== '.json') {
fileHash = libpath.join(buildDir, '.cache', utils.md5(file));
data = libfs.readFileSync(file, 'utf8');
if (libfs.existsSync(fileHash) && (libfs.readFileSync(fileHash, 'utf8') === data)) {
return true;
}
libmkdirp.sync(libpath.join(buildDir, '.cache'));
libfs.writeFileSync(fileHash, data, 'utf8');
}
return false;
},
/**
Removes the cache entry for a particular file.
Why? This method is just an artifact to invalidate the cache entry created by
`_isCached` when a shifter error is detected because the cache entry is premature
created before spawing to shifter.
@method _clearCached
@protected
@param {string} file The filesystem path for the file to be cached
@param {string} buildDir The filesystem path for the build folder
**/
_clearCached: function (file, buildDir) {
var fileHash;
fileHash = libpath.join(buildDir, '.cache', utils.md5(file));
if (libfs.existsSync(fileHash)) {
libfs.unlinkSync(fileHash, 'utf8');
}
}
};
| next | identifier_name |
profile.rs | use crate::{Error, Result};
use chrono::{DateTime, Utc};
use colored::Colorize;
use serde::Deserialize;
use std::fs::File;
use std::io::{self, Read};
use std::path::{Path, PathBuf};
use std::time::SystemTime;
/// Represents a file with a provisioning profile info.
#[derive(Debug, Clone)]
pub struct Profile {
pub path: PathBuf,
pub info: Info,
}
impl Profile {
/// Returns instance of the `Profile` parsed from a file.
pub fn from_file(path: &Path) -> Result<Self> {
let mut buf = Vec::new();
File::open(path)?.read_to_end(&mut buf)?;
let info =
Info::from_xml_data(&buf).ok_or_else(|| Error::Own("Couldn't parse file.".into()))?;
Ok(Self {
path: path.to_owned(),
info,
})
}
}
/// Represents provisioning profile info.
#[derive(Debug, PartialEq, Clone)]
pub struct Info {
pub uuid: String,
pub name: String,
pub app_identifier: String,
pub creation_date: SystemTime,
pub expiration_date: SystemTime,
}
#[derive(Debug, Deserialize)]
struct InfoDef {
#[serde(rename = "UUID")]
pub uuid: String,
#[serde(rename = "Name")]
pub name: String,
#[serde(rename = "Entitlements")]
pub entitlements: Entitlements,
#[serde(rename = "CreationDate")]
pub creation_date: plist::Date,
#[serde(rename = "ExpirationDate")]
pub expiration_date: plist::Date,
}
#[derive(Debug, Deserialize)]
struct Entitlements {
#[serde(rename = "application-identifier")]
pub app_identifier: String,
}
impl Info {
/// Returns instance of the `Profile` parsed from a `data`.
pub fn from_xml_data(data: &[u8]) -> Option<Self> {
crate::plist_extractor::find(data).and_then(|xml| {
plist::from_reader_xml(io::Cursor::new(xml))
.ok()
.map(|info: InfoDef| Self {
uuid: info.uuid,
name: info.name,
app_identifier: info.entitlements.app_identifier,
creation_date: info.creation_date.into(),
expiration_date: info.expiration_date.into(),
})
})
}
/// Returns an empty profile info.
pub fn empty() -> Self {
Self {
uuid: "".into(),
name: "".into(),
app_identifier: "".into(),
creation_date: SystemTime::UNIX_EPOCH,
expiration_date: SystemTime::UNIX_EPOCH,
}
}
/// Returns `true` if one or more fields of the profile contain `string`.
pub fn contains(&self, string: &str) -> bool {
let s = string.to_lowercase();
let items = &[&self.name, &self.app_identifier, &self.uuid];
for item in items {
if item.to_lowercase().contains(&s) {
return true;
}
}
false
}
/// Returns a bundle id of a profile.
pub fn bundle_id(&self) -> Option<&str> {
self.app_identifier
.find(|ch| ch == '.')
.map(|i| &self.app_identifier[(i + 1)..])
}
/// Returns profile in a text form.
pub fn description(&self, oneline: bool) -> String {
if oneline {
return format!(
"{} {} {} {}",
self.uuid.yellow(),
DateTime::<Utc>::from(self.expiration_date)
.format("%Y-%m-%d")
.to_string()
.blue(),
self.app_identifier.green(),
self.name
);
} else {
let dates = format!(
"{} - {}",
DateTime::<Utc>::from(self.creation_date),
DateTime::<Utc>::from(self.expiration_date)
)
.blue();
return format!(
"{}\n{}\n{}\n{}",
self.uuid.yellow(),
self.app_identifier.green(),
self.name,
dates
);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use expectest::expect;
use expectest::prelude::*;
#[test]
fn contains() {
let profile = Info {
uuid: "123".into(),
name: "name".into(),
app_identifier: "id".into(),
creation_date: SystemTime::UNIX_EPOCH,
expiration_date: SystemTime::UNIX_EPOCH,
};
expect!(profile.contains("12")).to(be_true());
expect!(profile.contains("me")).to(be_true());
expect!(profile.contains("id")).to(be_true());
}
#[test]
fn | () {
let mut profile = Info::empty();
profile.app_identifier = "12345ABCDE.com.exmaple.app".to_owned();
expect!(profile.bundle_id()).to(be_some().value("com.exmaple.app"));
}
#[test]
fn incorrect_bundle_id() {
let mut profile = Info::empty();
profile.app_identifier = "12345ABCDE".to_owned();
expect!(profile.bundle_id()).to(be_none());
}
#[test]
fn wildcard_bundle_id() {
let mut profile = Info::empty();
profile.app_identifier = "12345ABCDE.*".to_owned();
expect!(profile.bundle_id()).to(be_some().value("*"));
}
}
| correct_bundle_id | identifier_name |
profile.rs | use crate::{Error, Result};
use chrono::{DateTime, Utc};
use colored::Colorize;
use serde::Deserialize;
use std::fs::File;
use std::io::{self, Read};
use std::path::{Path, PathBuf};
use std::time::SystemTime;
/// Represents a file with a provisioning profile info.
#[derive(Debug, Clone)]
pub struct Profile {
pub path: PathBuf,
pub info: Info,
}
impl Profile {
/// Returns instance of the `Profile` parsed from a file.
pub fn from_file(path: &Path) -> Result<Self> {
let mut buf = Vec::new();
File::open(path)?.read_to_end(&mut buf)?;
let info =
Info::from_xml_data(&buf).ok_or_else(|| Error::Own("Couldn't parse file.".into()))?;
Ok(Self {
path: path.to_owned(),
info,
})
}
}
/// Represents provisioning profile info.
#[derive(Debug, PartialEq, Clone)]
pub struct Info {
pub uuid: String,
pub name: String,
pub app_identifier: String,
pub creation_date: SystemTime,
pub expiration_date: SystemTime,
}
#[derive(Debug, Deserialize)]
struct InfoDef {
#[serde(rename = "UUID")]
pub uuid: String,
#[serde(rename = "Name")]
pub name: String,
#[serde(rename = "Entitlements")]
pub entitlements: Entitlements,
#[serde(rename = "CreationDate")]
pub creation_date: plist::Date,
#[serde(rename = "ExpirationDate")]
pub expiration_date: plist::Date,
}
#[derive(Debug, Deserialize)]
struct Entitlements {
#[serde(rename = "application-identifier")]
pub app_identifier: String,
}
impl Info {
/// Returns instance of the `Profile` parsed from a `data`.
pub fn from_xml_data(data: &[u8]) -> Option<Self> {
crate::plist_extractor::find(data).and_then(|xml| {
plist::from_reader_xml(io::Cursor::new(xml))
.ok()
.map(|info: InfoDef| Self {
uuid: info.uuid,
name: info.name,
app_identifier: info.entitlements.app_identifier,
creation_date: info.creation_date.into(),
expiration_date: info.expiration_date.into(),
})
})
}
/// Returns an empty profile info.
pub fn empty() -> Self {
Self {
uuid: "".into(),
name: "".into(),
app_identifier: "".into(),
creation_date: SystemTime::UNIX_EPOCH,
expiration_date: SystemTime::UNIX_EPOCH,
}
}
/// Returns `true` if one or more fields of the profile contain `string`.
pub fn contains(&self, string: &str) -> bool {
let s = string.to_lowercase();
let items = &[&self.name, &self.app_identifier, &self.uuid];
for item in items {
if item.to_lowercase().contains(&s) {
return true;
}
}
false
}
/// Returns a bundle id of a profile.
pub fn bundle_id(&self) -> Option<&str> {
self.app_identifier
.find(|ch| ch == '.')
.map(|i| &self.app_identifier[(i + 1)..])
}
/// Returns profile in a text form.
pub fn description(&self, oneline: bool) -> String {
if oneline {
return format!(
"{} {} {} {}",
self.uuid.yellow(),
DateTime::<Utc>::from(self.expiration_date)
.format("%Y-%m-%d")
.to_string()
.blue(),
self.app_identifier.green(),
self.name
);
} else {
let dates = format!(
"{} - {}",
DateTime::<Utc>::from(self.creation_date),
DateTime::<Utc>::from(self.expiration_date)
)
.blue();
return format!(
"{}\n{}\n{}\n{}",
self.uuid.yellow(),
self.app_identifier.green(),
self.name,
dates
);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use expectest::expect;
use expectest::prelude::*;
#[test]
fn contains() {
let profile = Info {
uuid: "123".into(),
name: "name".into(),
app_identifier: "id".into(),
creation_date: SystemTime::UNIX_EPOCH,
expiration_date: SystemTime::UNIX_EPOCH,
};
expect!(profile.contains("12")).to(be_true());
expect!(profile.contains("me")).to(be_true());
expect!(profile.contains("id")).to(be_true());
}
#[test]
fn correct_bundle_id() {
let mut profile = Info::empty();
profile.app_identifier = "12345ABCDE.com.exmaple.app".to_owned();
expect!(profile.bundle_id()).to(be_some().value("com.exmaple.app"));
}
#[test]
fn incorrect_bundle_id() {
let mut profile = Info::empty();
profile.app_identifier = "12345ABCDE".to_owned();
expect!(profile.bundle_id()).to(be_none());
}
| fn wildcard_bundle_id() {
let mut profile = Info::empty();
profile.app_identifier = "12345ABCDE.*".to_owned();
expect!(profile.bundle_id()).to(be_some().value("*"));
}
} | #[test] | random_line_split |
profile.rs | use crate::{Error, Result};
use chrono::{DateTime, Utc};
use colored::Colorize;
use serde::Deserialize;
use std::fs::File;
use std::io::{self, Read};
use std::path::{Path, PathBuf};
use std::time::SystemTime;
/// Represents a file with a provisioning profile info.
#[derive(Debug, Clone)]
pub struct Profile {
pub path: PathBuf,
pub info: Info,
}
impl Profile {
/// Returns instance of the `Profile` parsed from a file.
pub fn from_file(path: &Path) -> Result<Self> {
let mut buf = Vec::new();
File::open(path)?.read_to_end(&mut buf)?;
let info =
Info::from_xml_data(&buf).ok_or_else(|| Error::Own("Couldn't parse file.".into()))?;
Ok(Self {
path: path.to_owned(),
info,
})
}
}
/// Represents provisioning profile info.
#[derive(Debug, PartialEq, Clone)]
pub struct Info {
pub uuid: String,
pub name: String,
pub app_identifier: String,
pub creation_date: SystemTime,
pub expiration_date: SystemTime,
}
#[derive(Debug, Deserialize)]
struct InfoDef {
#[serde(rename = "UUID")]
pub uuid: String,
#[serde(rename = "Name")]
pub name: String,
#[serde(rename = "Entitlements")]
pub entitlements: Entitlements,
#[serde(rename = "CreationDate")]
pub creation_date: plist::Date,
#[serde(rename = "ExpirationDate")]
pub expiration_date: plist::Date,
}
#[derive(Debug, Deserialize)]
struct Entitlements {
#[serde(rename = "application-identifier")]
pub app_identifier: String,
}
impl Info {
/// Returns instance of the `Profile` parsed from a `data`.
pub fn from_xml_data(data: &[u8]) -> Option<Self> {
crate::plist_extractor::find(data).and_then(|xml| {
plist::from_reader_xml(io::Cursor::new(xml))
.ok()
.map(|info: InfoDef| Self {
uuid: info.uuid,
name: info.name,
app_identifier: info.entitlements.app_identifier,
creation_date: info.creation_date.into(),
expiration_date: info.expiration_date.into(),
})
})
}
/// Returns an empty profile info.
pub fn empty() -> Self {
Self {
uuid: "".into(),
name: "".into(),
app_identifier: "".into(),
creation_date: SystemTime::UNIX_EPOCH,
expiration_date: SystemTime::UNIX_EPOCH,
}
}
/// Returns `true` if one or more fields of the profile contain `string`.
pub fn contains(&self, string: &str) -> bool {
let s = string.to_lowercase();
let items = &[&self.name, &self.app_identifier, &self.uuid];
for item in items {
if item.to_lowercase().contains(&s) |
}
false
}
/// Returns a bundle id of a profile.
pub fn bundle_id(&self) -> Option<&str> {
self.app_identifier
.find(|ch| ch == '.')
.map(|i| &self.app_identifier[(i + 1)..])
}
/// Returns profile in a text form.
pub fn description(&self, oneline: bool) -> String {
if oneline {
return format!(
"{} {} {} {}",
self.uuid.yellow(),
DateTime::<Utc>::from(self.expiration_date)
.format("%Y-%m-%d")
.to_string()
.blue(),
self.app_identifier.green(),
self.name
);
} else {
let dates = format!(
"{} - {}",
DateTime::<Utc>::from(self.creation_date),
DateTime::<Utc>::from(self.expiration_date)
)
.blue();
return format!(
"{}\n{}\n{}\n{}",
self.uuid.yellow(),
self.app_identifier.green(),
self.name,
dates
);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use expectest::expect;
use expectest::prelude::*;
#[test]
fn contains() {
let profile = Info {
uuid: "123".into(),
name: "name".into(),
app_identifier: "id".into(),
creation_date: SystemTime::UNIX_EPOCH,
expiration_date: SystemTime::UNIX_EPOCH,
};
expect!(profile.contains("12")).to(be_true());
expect!(profile.contains("me")).to(be_true());
expect!(profile.contains("id")).to(be_true());
}
#[test]
fn correct_bundle_id() {
let mut profile = Info::empty();
profile.app_identifier = "12345ABCDE.com.exmaple.app".to_owned();
expect!(profile.bundle_id()).to(be_some().value("com.exmaple.app"));
}
#[test]
fn incorrect_bundle_id() {
let mut profile = Info::empty();
profile.app_identifier = "12345ABCDE".to_owned();
expect!(profile.bundle_id()).to(be_none());
}
#[test]
fn wildcard_bundle_id() {
let mut profile = Info::empty();
profile.app_identifier = "12345ABCDE.*".to_owned();
expect!(profile.bundle_id()).to(be_some().value("*"));
}
}
| {
return true;
} | conditional_block |
navigator.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::NavigatorBinding;
use dom::bindings::js::{JSRef, Temporary};
use dom::bindings::utils::{Reflectable, Reflector, reflect_dom_object};
use dom::window::Window;
use servo_util::str::DOMString;
#[deriving(Encodable)]
pub struct Navigator {
pub reflector_: Reflector //XXXjdm cycle: window->navigator->window
}
impl Navigator {
pub fn new_inherited() -> Navigator {
Navigator {
reflector_: Reflector::new()
}
}
pub fn new(window: &JSRef<Window>) -> Temporary<Navigator> {
reflect_dom_object(box Navigator::new_inherited(),
window,
NavigatorBinding::Wrap)
}
}
pub trait NavigatorMethods {
fn Product(&self) -> DOMString;
fn TaintEnabled(&self) -> bool;
fn AppName(&self) -> DOMString;
fn AppCodeName(&self) -> DOMString;
fn Platform(&self) -> DOMString;
}
impl<'a> NavigatorMethods for JSRef<'a, Navigator> {
fn Product(&self) -> DOMString {
"Gecko".to_string()
}
fn | (&self) -> bool {
false
}
fn AppName(&self) -> DOMString {
"Netscape".to_string() // Like Gecko/Webkit
}
fn AppCodeName(&self) -> DOMString {
"Mozilla".to_string()
}
fn Platform(&self) -> DOMString {
"".to_string()
}
}
impl Reflectable for Navigator {
fn reflector<'a>(&'a self) -> &'a Reflector {
&self.reflector_
}
}
| TaintEnabled | identifier_name |
navigator.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::NavigatorBinding;
use dom::bindings::js::{JSRef, Temporary};
use dom::bindings::utils::{Reflectable, Reflector, reflect_dom_object};
use dom::window::Window;
use servo_util::str::DOMString;
#[deriving(Encodable)]
pub struct Navigator {
pub reflector_: Reflector //XXXjdm cycle: window->navigator->window
}
impl Navigator {
pub fn new_inherited() -> Navigator {
Navigator {
reflector_: Reflector::new()
}
}
pub fn new(window: &JSRef<Window>) -> Temporary<Navigator> {
reflect_dom_object(box Navigator::new_inherited(),
window,
NavigatorBinding::Wrap)
}
}
pub trait NavigatorMethods {
fn Product(&self) -> DOMString;
fn TaintEnabled(&self) -> bool;
fn AppName(&self) -> DOMString;
fn AppCodeName(&self) -> DOMString;
fn Platform(&self) -> DOMString;
}
impl<'a> NavigatorMethods for JSRef<'a, Navigator> {
fn Product(&self) -> DOMString {
"Gecko".to_string()
}
fn TaintEnabled(&self) -> bool {
false
}
fn AppName(&self) -> DOMString {
"Netscape".to_string() // Like Gecko/Webkit
}
fn AppCodeName(&self) -> DOMString { |
fn Platform(&self) -> DOMString {
"".to_string()
}
}
impl Reflectable for Navigator {
fn reflector<'a>(&'a self) -> &'a Reflector {
&self.reflector_
}
} | "Mozilla".to_string()
} | random_line_split |
chrome_cache.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Chrome Cache files parser."""
import unittest
from plaso.parsers import chrome_cache
from tests.parsers import test_lib
class ChromeCacheParserTest(test_lib.ParserTestCase):
"""Tests for the Chrome Cache files parser."""
def testParse(self):
"""Tests the Parse function."""
parser = chrome_cache.ChromeCacheParser()
storage_writer = self._ParseFile(['chrome_cache', 'index'], parser)
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 217)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers( | 'recovery_warning')
self.assertEqual(number_of_warnings, 0)
events = list(storage_writer.GetEvents())
expected_event_values = {
'data_type': 'chrome:cache:entry',
'date_time': '2014-04-30 16:44:36.226091',
'original_url': (
'https://s.ytimg.com/yts/imgbin/player-common-vfliLfqPT.webp')}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
if __name__ == '__main__':
unittest.main() | 'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers( | random_line_split |
chrome_cache.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Chrome Cache files parser."""
import unittest
from plaso.parsers import chrome_cache
from tests.parsers import test_lib
class ChromeCacheParserTest(test_lib.ParserTestCase):
"""Tests for the Chrome Cache files parser."""
def testParse(self):
|
if __name__ == '__main__':
unittest.main()
| """Tests the Parse function."""
parser = chrome_cache.ChromeCacheParser()
storage_writer = self._ParseFile(['chrome_cache', 'index'], parser)
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 217)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
events = list(storage_writer.GetEvents())
expected_event_values = {
'data_type': 'chrome:cache:entry',
'date_time': '2014-04-30 16:44:36.226091',
'original_url': (
'https://s.ytimg.com/yts/imgbin/player-common-vfliLfqPT.webp')}
self.CheckEventValues(storage_writer, events[0], expected_event_values) | identifier_body |
chrome_cache.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Chrome Cache files parser."""
import unittest
from plaso.parsers import chrome_cache
from tests.parsers import test_lib
class ChromeCacheParserTest(test_lib.ParserTestCase):
"""Tests for the Chrome Cache files parser."""
def testParse(self):
"""Tests the Parse function."""
parser = chrome_cache.ChromeCacheParser()
storage_writer = self._ParseFile(['chrome_cache', 'index'], parser)
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 217)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
events = list(storage_writer.GetEvents())
expected_event_values = {
'data_type': 'chrome:cache:entry',
'date_time': '2014-04-30 16:44:36.226091',
'original_url': (
'https://s.ytimg.com/yts/imgbin/player-common-vfliLfqPT.webp')}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
if __name__ == '__main__':
| unittest.main() | conditional_block |
|
chrome_cache.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Chrome Cache files parser."""
import unittest
from plaso.parsers import chrome_cache
from tests.parsers import test_lib
class ChromeCacheParserTest(test_lib.ParserTestCase):
"""Tests for the Chrome Cache files parser."""
def | (self):
"""Tests the Parse function."""
parser = chrome_cache.ChromeCacheParser()
storage_writer = self._ParseFile(['chrome_cache', 'index'], parser)
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 217)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
events = list(storage_writer.GetEvents())
expected_event_values = {
'data_type': 'chrome:cache:entry',
'date_time': '2014-04-30 16:44:36.226091',
'original_url': (
'https://s.ytimg.com/yts/imgbin/player-common-vfliLfqPT.webp')}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
if __name__ == '__main__':
unittest.main()
| testParse | identifier_name |
issue-352.ts | import "reflect-metadata";
import {closeTestingConnections, createTestingConnections, reloadTestingDatabases} from "../../utils/test-utils";
import {Connection} from "../../../src/connection/Connection";
import {expect} from "chai";
import {Post} from "./entity/Post";
import {MssqlParameter} from "../../../src/driver/sqlserver/MssqlParameter";
describe("github issues > #352 double precision round to int in mssql", () => {
let connections: Connection[];
before(async () => connections = await createTestingConnections({
entities: [__dirname + "/entity/*{.js,.ts}"],
enabledDrivers: ["mssql"]
}));
beforeEach(() => reloadTestingDatabases(connections));
after(() => closeTestingConnections(connections));
it("real number should be successfully stored and loaded from db including value in parameters", () => Promise.all(connections.map(async connection => {
const posts: Post[] = [];
for (let i = 1; i <= 25; i++) |
await connection.manager.save(posts);
const loadedPost = await connection.manager
.createQueryBuilder(Post, "post")
.where("post.id = :id", { id: new MssqlParameter(1.234567789, "float") })
.getOne();
expect(loadedPost).to.exist;
expect(loadedPost!.id).to.be.equal(1.234567789);
})));
});
| {
const post = new Post();
post.id = i + 0.234567789;
post.title = "hello post";
posts.push(post);
} | conditional_block |
issue-352.ts | import "reflect-metadata";
import {closeTestingConnections, createTestingConnections, reloadTestingDatabases} from "../../utils/test-utils";
import {Connection} from "../../../src/connection/Connection";
import {expect} from "chai";
import {Post} from "./entity/Post";
import {MssqlParameter} from "../../../src/driver/sqlserver/MssqlParameter";
describe("github issues > #352 double precision round to int in mssql", () => {
let connections: Connection[];
before(async () => connections = await createTestingConnections({
entities: [__dirname + "/entity/*{.js,.ts}"],
enabledDrivers: ["mssql"]
}));
beforeEach(() => reloadTestingDatabases(connections));
after(() => closeTestingConnections(connections));
it("real number should be successfully stored and loaded from db including value in parameters", () => Promise.all(connections.map(async connection => {
const posts: Post[] = [];
for (let i = 1; i <= 25; i++) {
const post = new Post();
post.id = i + 0.234567789;
post.title = "hello post";
posts.push(post);
}
await connection.manager.save(posts);
| .getOne();
expect(loadedPost).to.exist;
expect(loadedPost!.id).to.be.equal(1.234567789);
})));
}); | const loadedPost = await connection.manager
.createQueryBuilder(Post, "post")
.where("post.id = :id", { id: new MssqlParameter(1.234567789, "float") }) | random_line_split |
listeners.rs | // Copyright 2021 MaidSafe.net limited.
//
// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. Please review the Licences for the specific language governing
// permissions and limitations relating to use of the SAFE Network Software.
use super::Session;
use crate::Error;
use log::{debug, error, info, trace, warn};
use qp2p::IncomingMessages;
use sn_data_types::PublicKey;
use sn_messaging::{
client::{ClientMsg, Event, ProcessMsg},
section_info::{Error as SectionInfoError, GetSectionResponse, SectionInfoMsg},
MessageId, MessageType, SectionAuthorityProvider, WireMsg,
};
use std::{
collections::{BTreeMap, BTreeSet},
net::SocketAddr,
};
impl Session {
/// Remove a pending transfer sender from the listener map
pub async fn remove_pending_transfer_sender(&self, msg_id: &MessageId) -> Result<(), Error> {
let pending_transfers = self.pending_transfers.clone();
let mut listeners = pending_transfers.write().await;
debug!("Pending transfers at this point: {:?}", listeners);
let _ = listeners
.remove(msg_id)
.ok_or(Error::NoTransferValidationListener)?;
Ok(())
}
// Listen for incoming messages on a connection
pub(crate) async fn spawn_message_listener_thread(
&self,
mut incoming_messages: IncomingMessages,
client_pk: PublicKey,
) {
debug!("Listening for incoming messages");
let mut session = self.clone();
let _ = tokio::spawn(async move {
loop {
match session
.process_incoming_message(&mut incoming_messages, client_pk)
.await
{
Ok(true) => (),
Ok(false) => {
info!("IncomingMessages listener has closed.");
break;
}
Err(err) => {
error!("Error while processing incoming message: {:?}. Listening for next message...", err);
}
}
}
});
}
pub(crate) async fn process_incoming_message(
&mut self,
incoming_messages: &mut IncomingMessages,
client_pk: PublicKey,
) -> Result<bool, Error> {
if let Some((src, message)) = incoming_messages.next().await {
let message_type = WireMsg::deserialize(message)?;
trace!("Incoming message from {:?}", &src);
match message_type {
MessageType::SectionInfo { msg, .. } => {
if let Err(error) = self.handle_section_info_msg(msg, src, client_pk).await {
error!("Error handling network info message: {:?}", error);
}
}
MessageType::Client { msg, .. } => {
match msg {
ClientMsg::Process(msg) => self.handle_client_msg(msg, src).await,
ClientMsg::ProcessingError(error) => {
warn!("Processing error received. {:?}", error);
// TODO: Handle lazy message errors
}
msg => warn!("SupportingInfo received: {:?}", msg),
}
}
msg_type => {
warn!("Unexpected message type received: {:?}", msg_type);
}
}
Ok(true)
} else {
Ok(false)
}
}
// Private helpers
// Handle received network info messages
async fn handle_section_info_msg(
&mut self,
msg: SectionInfoMsg,
src: SocketAddr,
client_pk: PublicKey,
) -> Result<(), Error> {
trace!("Handling network info message {:?}", msg);
match &msg {
SectionInfoMsg::GetSectionResponse(GetSectionResponse::Success(info)) => {
debug!("GetSectionResponse::Success!");
self.update_session_info(info).await
}
SectionInfoMsg::GetSectionResponse(GetSectionResponse::SectionInfoUpdate(
SectionInfoError::InvalidBootstrap(err),
)) => {
warn!(
"Message was interrupted due to {:?}. Attempting to connect to elders again.",
err
);
self.connect_to_elders().await?;
Ok(())
}
SectionInfoMsg::GetSectionResponse(GetSectionResponse::SectionInfoUpdate(
SectionInfoError::TargetSectionInfoOutdated(sap),
)) => {
debug!("Updated section info received: {:?}", sap);
self.update_session_info(sap).await?;
Ok(())
}
SectionInfoMsg::GetSectionResponse(GetSectionResponse::Redirect(sap)) => |
SectionInfoMsg::SectionInfoUpdate(update) => {
let correlation_id = update.correlation_id;
error!("MessageId {:?} was interrupted due to infrastructure updates. This will most likely need to be sent again. Update was : {:?}", correlation_id, update);
if let SectionInfoError::TargetSectionInfoOutdated(sap) = update.clone().error {
trace!("Updated network info: ({:?})", sap);
self.update_session_info(&sap).await?;
}
Ok(())
}
SectionInfoMsg::GetSectionResponse(GetSectionResponse::SectionInfoUpdate(_))
| SectionInfoMsg::GetSectionQuery { .. } => {
Err(Error::UnexpectedMessageOnJoin(format!(
"bootstrapping failed since an invalid response ({:?}) was received",
msg
)))
}
}
}
// Apply updated info to a network session, and trigger connections
async fn update_session_info(&mut self, sap: &SectionAuthorityProvider) -> Result<(), Error> {
let original_known_elders = self.all_known_elders.read().await.clone();
// Change this once sn_messaging is updated
let received_elders = sap
.elders
.iter()
.map(|(name, addr)| (*addr, *name))
.collect::<BTreeMap<_, _>>();
// Obtain the addresses of the Elders
trace!(
"Updating session info! Received elders: ({:?})",
received_elders
);
{
// Update session key set
let mut keyset = self.section_key_set.write().await;
if *keyset == Some(sap.public_key_set.clone()) {
trace!("We have previously received the key set already.");
return Ok(());
}
*keyset = Some(sap.public_key_set.clone());
}
{
// update section prefix
let mut prefix = self.section_prefix.write().await;
*prefix = Some(sap.prefix);
}
{
// Update session elders
let mut session_elders = self.all_known_elders.write().await;
*session_elders = received_elders.clone();
}
if original_known_elders != received_elders {
debug!("Connecting to new set of Elders: {:?}", received_elders);
let new_elder_addresses = received_elders.keys().cloned().collect::<BTreeSet<_>>();
let updated_contacts = new_elder_addresses.iter().cloned().collect::<Vec<_>>();
let old_elders = original_known_elders
.iter()
.filter_map(|(peer_addr, _)| {
if !new_elder_addresses.contains(peer_addr) {
Some(*peer_addr)
} else {
None
}
})
.collect::<Vec<_>>();
self.disconnect_from_peers(old_elders).await?;
self.qp2p.update_bootstrap_contacts(&updated_contacts);
self.connect_to_elders().await
} else {
Ok(())
}
}
// Handle messages intended for client consumption (re: queries + commands)
async fn handle_client_msg(&self, msg: ProcessMsg, src: SocketAddr) {
debug!(
"===> ClientMsg with id {:?} received from {:?}",
msg.id(),
src
);
let queries = self.pending_queries.clone();
let transfers = self.pending_transfers.clone();
let error_sender = self.incoming_err_sender.clone();
let _ = tokio::spawn(async move {
debug!("Thread spawned to handle this client message");
match msg {
ProcessMsg::QueryResponse {
response,
correlation_id,
..
} => {
debug!("Query response (relating to msgid: {})", correlation_id);
trace!("The received query response is {:?}", response);
// Note that this doesn't remove the sender from here since multiple
// responses corresponding to the same message ID might arrive.
// Once we are satisfied with the response this is channel is discarded in
// ConnectionManager::send_query
if let Some(sender) = &queries.read().await.get(&correlation_id) {
trace!(
"Sending response for query w/{} via channel.",
correlation_id
);
let _ = sender.send(response).await;
} else {
trace!("No channel found for {:?}", correlation_id);
}
}
ProcessMsg::Event {
event,
correlation_id,
..
} => {
debug!("Event received to be processed: {:?}", correlation_id);
trace!("Event received is: {:?}", event);
if let Event::TransferValidated { event, .. } = event {
let transfers = transfers.read().await;
let sender = transfers.get(&correlation_id);
if let Some(sender) = sender {
let _ = sender.send(Ok(event)).await;
} else {
warn!(
"No transfer validation listener found for elder {:?} and message {:?}",
src, correlation_id
);
warn!("It may be that this transfer is complete and the listener cleaned up already.");
trace!("Event received was {:?}", event);
}
}
}
ProcessMsg::CmdError {
error,
correlation_id,
..
} => {
debug!(
"Cmd Error was received for Message w/ID: {:?}, sending on error channel",
correlation_id
);
trace!("Error received is: {:?}", error);
let _ = error_sender.send(error).await;
}
msg => {
warn!("Ignoring unexpected message type received: {:?}", msg);
}
};
});
}
}
| {
trace!("GetSectionResponse::Redirect, reboostrapping with provided peers");
// Disconnect from peer that sent us the redirect, connect to the new elders provided and
// request the section info again.
self.disconnect_from_peers(vec![src]).await?;
let endpoint = self.endpoint()?.clone();
let new_elders_addrs: Vec<SocketAddr> =
sap.elders.iter().map(|(_, addr)| *addr).collect();
self.qp2p
.update_bootstrap_contacts(new_elders_addrs.as_slice());
let boostrapped_peer = self
.qp2p
.rebootstrap(&endpoint, new_elders_addrs.as_slice())
.await?;
self.send_get_section_query(client_pk, &boostrapped_peer)
.await?;
Ok(())
} | conditional_block |
listeners.rs | // Copyright 2021 MaidSafe.net limited.
//
// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. Please review the Licences for the specific language governing
// permissions and limitations relating to use of the SAFE Network Software.
use super::Session;
use crate::Error;
use log::{debug, error, info, trace, warn};
use qp2p::IncomingMessages;
use sn_data_types::PublicKey;
use sn_messaging::{
client::{ClientMsg, Event, ProcessMsg},
section_info::{Error as SectionInfoError, GetSectionResponse, SectionInfoMsg},
MessageId, MessageType, SectionAuthorityProvider, WireMsg,
};
use std::{
collections::{BTreeMap, BTreeSet},
net::SocketAddr,
};
impl Session {
/// Remove a pending transfer sender from the listener map
pub async fn remove_pending_transfer_sender(&self, msg_id: &MessageId) -> Result<(), Error> {
let pending_transfers = self.pending_transfers.clone();
let mut listeners = pending_transfers.write().await;
debug!("Pending transfers at this point: {:?}", listeners);
let _ = listeners
.remove(msg_id)
.ok_or(Error::NoTransferValidationListener)?;
Ok(())
}
// Listen for incoming messages on a connection
pub(crate) async fn spawn_message_listener_thread(
&self,
mut incoming_messages: IncomingMessages,
client_pk: PublicKey,
) {
debug!("Listening for incoming messages");
let mut session = self.clone();
let _ = tokio::spawn(async move {
loop {
match session
.process_incoming_message(&mut incoming_messages, client_pk)
.await
{
Ok(true) => (),
Ok(false) => {
info!("IncomingMessages listener has closed.");
break;
}
Err(err) => {
error!("Error while processing incoming message: {:?}. Listening for next message...", err);
}
}
}
});
}
pub(crate) async fn process_incoming_message(
&mut self,
incoming_messages: &mut IncomingMessages,
client_pk: PublicKey,
) -> Result<bool, Error> {
if let Some((src, message)) = incoming_messages.next().await {
let message_type = WireMsg::deserialize(message)?;
trace!("Incoming message from {:?}", &src);
match message_type {
MessageType::SectionInfo { msg, .. } => {
if let Err(error) = self.handle_section_info_msg(msg, src, client_pk).await { | ClientMsg::Process(msg) => self.handle_client_msg(msg, src).await,
ClientMsg::ProcessingError(error) => {
warn!("Processing error received. {:?}", error);
// TODO: Handle lazy message errors
}
msg => warn!("SupportingInfo received: {:?}", msg),
}
}
msg_type => {
warn!("Unexpected message type received: {:?}", msg_type);
}
}
Ok(true)
} else {
Ok(false)
}
}
// Private helpers
// Handle received network info messages
async fn handle_section_info_msg(
&mut self,
msg: SectionInfoMsg,
src: SocketAddr,
client_pk: PublicKey,
) -> Result<(), Error> {
trace!("Handling network info message {:?}", msg);
match &msg {
SectionInfoMsg::GetSectionResponse(GetSectionResponse::Success(info)) => {
debug!("GetSectionResponse::Success!");
self.update_session_info(info).await
}
SectionInfoMsg::GetSectionResponse(GetSectionResponse::SectionInfoUpdate(
SectionInfoError::InvalidBootstrap(err),
)) => {
warn!(
"Message was interrupted due to {:?}. Attempting to connect to elders again.",
err
);
self.connect_to_elders().await?;
Ok(())
}
SectionInfoMsg::GetSectionResponse(GetSectionResponse::SectionInfoUpdate(
SectionInfoError::TargetSectionInfoOutdated(sap),
)) => {
debug!("Updated section info received: {:?}", sap);
self.update_session_info(sap).await?;
Ok(())
}
SectionInfoMsg::GetSectionResponse(GetSectionResponse::Redirect(sap)) => {
trace!("GetSectionResponse::Redirect, reboostrapping with provided peers");
// Disconnect from peer that sent us the redirect, connect to the new elders provided and
// request the section info again.
self.disconnect_from_peers(vec![src]).await?;
let endpoint = self.endpoint()?.clone();
let new_elders_addrs: Vec<SocketAddr> =
sap.elders.iter().map(|(_, addr)| *addr).collect();
self.qp2p
.update_bootstrap_contacts(new_elders_addrs.as_slice());
let boostrapped_peer = self
.qp2p
.rebootstrap(&endpoint, new_elders_addrs.as_slice())
.await?;
self.send_get_section_query(client_pk, &boostrapped_peer)
.await?;
Ok(())
}
SectionInfoMsg::SectionInfoUpdate(update) => {
let correlation_id = update.correlation_id;
error!("MessageId {:?} was interrupted due to infrastructure updates. This will most likely need to be sent again. Update was : {:?}", correlation_id, update);
if let SectionInfoError::TargetSectionInfoOutdated(sap) = update.clone().error {
trace!("Updated network info: ({:?})", sap);
self.update_session_info(&sap).await?;
}
Ok(())
}
SectionInfoMsg::GetSectionResponse(GetSectionResponse::SectionInfoUpdate(_))
| SectionInfoMsg::GetSectionQuery { .. } => {
Err(Error::UnexpectedMessageOnJoin(format!(
"bootstrapping failed since an invalid response ({:?}) was received",
msg
)))
}
}
}
// Apply updated info to a network session, and trigger connections
async fn update_session_info(&mut self, sap: &SectionAuthorityProvider) -> Result<(), Error> {
let original_known_elders = self.all_known_elders.read().await.clone();
// Change this once sn_messaging is updated
let received_elders = sap
.elders
.iter()
.map(|(name, addr)| (*addr, *name))
.collect::<BTreeMap<_, _>>();
// Obtain the addresses of the Elders
trace!(
"Updating session info! Received elders: ({:?})",
received_elders
);
{
// Update session key set
let mut keyset = self.section_key_set.write().await;
if *keyset == Some(sap.public_key_set.clone()) {
trace!("We have previously received the key set already.");
return Ok(());
}
*keyset = Some(sap.public_key_set.clone());
}
{
// update section prefix
let mut prefix = self.section_prefix.write().await;
*prefix = Some(sap.prefix);
}
{
// Update session elders
let mut session_elders = self.all_known_elders.write().await;
*session_elders = received_elders.clone();
}
if original_known_elders != received_elders {
debug!("Connecting to new set of Elders: {:?}", received_elders);
let new_elder_addresses = received_elders.keys().cloned().collect::<BTreeSet<_>>();
let updated_contacts = new_elder_addresses.iter().cloned().collect::<Vec<_>>();
let old_elders = original_known_elders
.iter()
.filter_map(|(peer_addr, _)| {
if !new_elder_addresses.contains(peer_addr) {
Some(*peer_addr)
} else {
None
}
})
.collect::<Vec<_>>();
self.disconnect_from_peers(old_elders).await?;
self.qp2p.update_bootstrap_contacts(&updated_contacts);
self.connect_to_elders().await
} else {
Ok(())
}
}
// Handle messages intended for client consumption (re: queries + commands)
async fn handle_client_msg(&self, msg: ProcessMsg, src: SocketAddr) {
debug!(
"===> ClientMsg with id {:?} received from {:?}",
msg.id(),
src
);
let queries = self.pending_queries.clone();
let transfers = self.pending_transfers.clone();
let error_sender = self.incoming_err_sender.clone();
let _ = tokio::spawn(async move {
debug!("Thread spawned to handle this client message");
match msg {
ProcessMsg::QueryResponse {
response,
correlation_id,
..
} => {
debug!("Query response (relating to msgid: {})", correlation_id);
trace!("The received query response is {:?}", response);
// Note that this doesn't remove the sender from here since multiple
// responses corresponding to the same message ID might arrive.
// Once we are satisfied with the response this is channel is discarded in
// ConnectionManager::send_query
if let Some(sender) = &queries.read().await.get(&correlation_id) {
trace!(
"Sending response for query w/{} via channel.",
correlation_id
);
let _ = sender.send(response).await;
} else {
trace!("No channel found for {:?}", correlation_id);
}
}
ProcessMsg::Event {
event,
correlation_id,
..
} => {
debug!("Event received to be processed: {:?}", correlation_id);
trace!("Event received is: {:?}", event);
if let Event::TransferValidated { event, .. } = event {
let transfers = transfers.read().await;
let sender = transfers.get(&correlation_id);
if let Some(sender) = sender {
let _ = sender.send(Ok(event)).await;
} else {
warn!(
"No transfer validation listener found for elder {:?} and message {:?}",
src, correlation_id
);
warn!("It may be that this transfer is complete and the listener cleaned up already.");
trace!("Event received was {:?}", event);
}
}
}
ProcessMsg::CmdError {
error,
correlation_id,
..
} => {
debug!(
"Cmd Error was received for Message w/ID: {:?}, sending on error channel",
correlation_id
);
trace!("Error received is: {:?}", error);
let _ = error_sender.send(error).await;
}
msg => {
warn!("Ignoring unexpected message type received: {:?}", msg);
}
};
});
}
} | error!("Error handling network info message: {:?}", error);
}
}
MessageType::Client { msg, .. } => {
match msg { | random_line_split |
listeners.rs | // Copyright 2021 MaidSafe.net limited.
//
// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. Please review the Licences for the specific language governing
// permissions and limitations relating to use of the SAFE Network Software.
use super::Session;
use crate::Error;
use log::{debug, error, info, trace, warn};
use qp2p::IncomingMessages;
use sn_data_types::PublicKey;
use sn_messaging::{
client::{ClientMsg, Event, ProcessMsg},
section_info::{Error as SectionInfoError, GetSectionResponse, SectionInfoMsg},
MessageId, MessageType, SectionAuthorityProvider, WireMsg,
};
use std::{
collections::{BTreeMap, BTreeSet},
net::SocketAddr,
};
impl Session {
/// Remove a pending transfer sender from the listener map
pub async fn remove_pending_transfer_sender(&self, msg_id: &MessageId) -> Result<(), Error> {
let pending_transfers = self.pending_transfers.clone();
let mut listeners = pending_transfers.write().await;
debug!("Pending transfers at this point: {:?}", listeners);
let _ = listeners
.remove(msg_id)
.ok_or(Error::NoTransferValidationListener)?;
Ok(())
}
// Listen for incoming messages on a connection
pub(crate) async fn spawn_message_listener_thread(
&self,
mut incoming_messages: IncomingMessages,
client_pk: PublicKey,
) {
debug!("Listening for incoming messages");
let mut session = self.clone();
let _ = tokio::spawn(async move {
loop {
match session
.process_incoming_message(&mut incoming_messages, client_pk)
.await
{
Ok(true) => (),
Ok(false) => {
info!("IncomingMessages listener has closed.");
break;
}
Err(err) => {
error!("Error while processing incoming message: {:?}. Listening for next message...", err);
}
}
}
});
}
pub(crate) async fn process_incoming_message(
&mut self,
incoming_messages: &mut IncomingMessages,
client_pk: PublicKey,
) -> Result<bool, Error> {
if let Some((src, message)) = incoming_messages.next().await {
let message_type = WireMsg::deserialize(message)?;
trace!("Incoming message from {:?}", &src);
match message_type {
MessageType::SectionInfo { msg, .. } => {
if let Err(error) = self.handle_section_info_msg(msg, src, client_pk).await {
error!("Error handling network info message: {:?}", error);
}
}
MessageType::Client { msg, .. } => {
match msg {
ClientMsg::Process(msg) => self.handle_client_msg(msg, src).await,
ClientMsg::ProcessingError(error) => {
warn!("Processing error received. {:?}", error);
// TODO: Handle lazy message errors
}
msg => warn!("SupportingInfo received: {:?}", msg),
}
}
msg_type => {
warn!("Unexpected message type received: {:?}", msg_type);
}
}
Ok(true)
} else {
Ok(false)
}
}
// Private helpers
// Handle received network info messages
async fn handle_section_info_msg(
&mut self,
msg: SectionInfoMsg,
src: SocketAddr,
client_pk: PublicKey,
) -> Result<(), Error> {
trace!("Handling network info message {:?}", msg);
match &msg {
SectionInfoMsg::GetSectionResponse(GetSectionResponse::Success(info)) => {
debug!("GetSectionResponse::Success!");
self.update_session_info(info).await
}
SectionInfoMsg::GetSectionResponse(GetSectionResponse::SectionInfoUpdate(
SectionInfoError::InvalidBootstrap(err),
)) => {
warn!(
"Message was interrupted due to {:?}. Attempting to connect to elders again.",
err
);
self.connect_to_elders().await?;
Ok(())
}
SectionInfoMsg::GetSectionResponse(GetSectionResponse::SectionInfoUpdate(
SectionInfoError::TargetSectionInfoOutdated(sap),
)) => {
debug!("Updated section info received: {:?}", sap);
self.update_session_info(sap).await?;
Ok(())
}
SectionInfoMsg::GetSectionResponse(GetSectionResponse::Redirect(sap)) => {
trace!("GetSectionResponse::Redirect, reboostrapping with provided peers");
// Disconnect from peer that sent us the redirect, connect to the new elders provided and
// request the section info again.
self.disconnect_from_peers(vec![src]).await?;
let endpoint = self.endpoint()?.clone();
let new_elders_addrs: Vec<SocketAddr> =
sap.elders.iter().map(|(_, addr)| *addr).collect();
self.qp2p
.update_bootstrap_contacts(new_elders_addrs.as_slice());
let boostrapped_peer = self
.qp2p
.rebootstrap(&endpoint, new_elders_addrs.as_slice())
.await?;
self.send_get_section_query(client_pk, &boostrapped_peer)
.await?;
Ok(())
}
SectionInfoMsg::SectionInfoUpdate(update) => {
let correlation_id = update.correlation_id;
error!("MessageId {:?} was interrupted due to infrastructure updates. This will most likely need to be sent again. Update was : {:?}", correlation_id, update);
if let SectionInfoError::TargetSectionInfoOutdated(sap) = update.clone().error {
trace!("Updated network info: ({:?})", sap);
self.update_session_info(&sap).await?;
}
Ok(())
}
SectionInfoMsg::GetSectionResponse(GetSectionResponse::SectionInfoUpdate(_))
| SectionInfoMsg::GetSectionQuery { .. } => {
Err(Error::UnexpectedMessageOnJoin(format!(
"bootstrapping failed since an invalid response ({:?}) was received",
msg
)))
}
}
}
// Apply updated info to a network session, and trigger connections
async fn update_session_info(&mut self, sap: &SectionAuthorityProvider) -> Result<(), Error> {
let original_known_elders = self.all_known_elders.read().await.clone();
// Change this once sn_messaging is updated
let received_elders = sap
.elders
.iter()
.map(|(name, addr)| (*addr, *name))
.collect::<BTreeMap<_, _>>();
// Obtain the addresses of the Elders
trace!(
"Updating session info! Received elders: ({:?})",
received_elders
);
{
// Update session key set
let mut keyset = self.section_key_set.write().await;
if *keyset == Some(sap.public_key_set.clone()) {
trace!("We have previously received the key set already.");
return Ok(());
}
*keyset = Some(sap.public_key_set.clone());
}
{
// update section prefix
let mut prefix = self.section_prefix.write().await;
*prefix = Some(sap.prefix);
}
{
// Update session elders
let mut session_elders = self.all_known_elders.write().await;
*session_elders = received_elders.clone();
}
if original_known_elders != received_elders {
debug!("Connecting to new set of Elders: {:?}", received_elders);
let new_elder_addresses = received_elders.keys().cloned().collect::<BTreeSet<_>>();
let updated_contacts = new_elder_addresses.iter().cloned().collect::<Vec<_>>();
let old_elders = original_known_elders
.iter()
.filter_map(|(peer_addr, _)| {
if !new_elder_addresses.contains(peer_addr) {
Some(*peer_addr)
} else {
None
}
})
.collect::<Vec<_>>();
self.disconnect_from_peers(old_elders).await?;
self.qp2p.update_bootstrap_contacts(&updated_contacts);
self.connect_to_elders().await
} else {
Ok(())
}
}
// Handle messages intended for client consumption (re: queries + commands)
async fn handle_client_msg(&self, msg: ProcessMsg, src: SocketAddr) |
}
| {
debug!(
"===> ClientMsg with id {:?} received from {:?}",
msg.id(),
src
);
let queries = self.pending_queries.clone();
let transfers = self.pending_transfers.clone();
let error_sender = self.incoming_err_sender.clone();
let _ = tokio::spawn(async move {
debug!("Thread spawned to handle this client message");
match msg {
ProcessMsg::QueryResponse {
response,
correlation_id,
..
} => {
debug!("Query response (relating to msgid: {})", correlation_id);
trace!("The received query response is {:?}", response);
// Note that this doesn't remove the sender from here since multiple
// responses corresponding to the same message ID might arrive.
// Once we are satisfied with the response this is channel is discarded in
// ConnectionManager::send_query
if let Some(sender) = &queries.read().await.get(&correlation_id) {
trace!(
"Sending response for query w/{} via channel.",
correlation_id
);
let _ = sender.send(response).await;
} else {
trace!("No channel found for {:?}", correlation_id);
}
}
ProcessMsg::Event {
event,
correlation_id,
..
} => {
debug!("Event received to be processed: {:?}", correlation_id);
trace!("Event received is: {:?}", event);
if let Event::TransferValidated { event, .. } = event {
let transfers = transfers.read().await;
let sender = transfers.get(&correlation_id);
if let Some(sender) = sender {
let _ = sender.send(Ok(event)).await;
} else {
warn!(
"No transfer validation listener found for elder {:?} and message {:?}",
src, correlation_id
);
warn!("It may be that this transfer is complete and the listener cleaned up already.");
trace!("Event received was {:?}", event);
}
}
}
ProcessMsg::CmdError {
error,
correlation_id,
..
} => {
debug!(
"Cmd Error was received for Message w/ID: {:?}, sending on error channel",
correlation_id
);
trace!("Error received is: {:?}", error);
let _ = error_sender.send(error).await;
}
msg => {
warn!("Ignoring unexpected message type received: {:?}", msg);
}
};
});
} | identifier_body |
listeners.rs | // Copyright 2021 MaidSafe.net limited.
//
// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3.
// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed
// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. Please review the Licences for the specific language governing
// permissions and limitations relating to use of the SAFE Network Software.
use super::Session;
use crate::Error;
use log::{debug, error, info, trace, warn};
use qp2p::IncomingMessages;
use sn_data_types::PublicKey;
use sn_messaging::{
client::{ClientMsg, Event, ProcessMsg},
section_info::{Error as SectionInfoError, GetSectionResponse, SectionInfoMsg},
MessageId, MessageType, SectionAuthorityProvider, WireMsg,
};
use std::{
collections::{BTreeMap, BTreeSet},
net::SocketAddr,
};
impl Session {
/// Remove a pending transfer sender from the listener map
pub async fn | (&self, msg_id: &MessageId) -> Result<(), Error> {
let pending_transfers = self.pending_transfers.clone();
let mut listeners = pending_transfers.write().await;
debug!("Pending transfers at this point: {:?}", listeners);
let _ = listeners
.remove(msg_id)
.ok_or(Error::NoTransferValidationListener)?;
Ok(())
}
// Listen for incoming messages on a connection
pub(crate) async fn spawn_message_listener_thread(
&self,
mut incoming_messages: IncomingMessages,
client_pk: PublicKey,
) {
debug!("Listening for incoming messages");
let mut session = self.clone();
let _ = tokio::spawn(async move {
loop {
match session
.process_incoming_message(&mut incoming_messages, client_pk)
.await
{
Ok(true) => (),
Ok(false) => {
info!("IncomingMessages listener has closed.");
break;
}
Err(err) => {
error!("Error while processing incoming message: {:?}. Listening for next message...", err);
}
}
}
});
}
pub(crate) async fn process_incoming_message(
&mut self,
incoming_messages: &mut IncomingMessages,
client_pk: PublicKey,
) -> Result<bool, Error> {
if let Some((src, message)) = incoming_messages.next().await {
let message_type = WireMsg::deserialize(message)?;
trace!("Incoming message from {:?}", &src);
match message_type {
MessageType::SectionInfo { msg, .. } => {
if let Err(error) = self.handle_section_info_msg(msg, src, client_pk).await {
error!("Error handling network info message: {:?}", error);
}
}
MessageType::Client { msg, .. } => {
match msg {
ClientMsg::Process(msg) => self.handle_client_msg(msg, src).await,
ClientMsg::ProcessingError(error) => {
warn!("Processing error received. {:?}", error);
// TODO: Handle lazy message errors
}
msg => warn!("SupportingInfo received: {:?}", msg),
}
}
msg_type => {
warn!("Unexpected message type received: {:?}", msg_type);
}
}
Ok(true)
} else {
Ok(false)
}
}
// Private helpers
// Handle received network info messages
async fn handle_section_info_msg(
&mut self,
msg: SectionInfoMsg,
src: SocketAddr,
client_pk: PublicKey,
) -> Result<(), Error> {
trace!("Handling network info message {:?}", msg);
match &msg {
SectionInfoMsg::GetSectionResponse(GetSectionResponse::Success(info)) => {
debug!("GetSectionResponse::Success!");
self.update_session_info(info).await
}
SectionInfoMsg::GetSectionResponse(GetSectionResponse::SectionInfoUpdate(
SectionInfoError::InvalidBootstrap(err),
)) => {
warn!(
"Message was interrupted due to {:?}. Attempting to connect to elders again.",
err
);
self.connect_to_elders().await?;
Ok(())
}
SectionInfoMsg::GetSectionResponse(GetSectionResponse::SectionInfoUpdate(
SectionInfoError::TargetSectionInfoOutdated(sap),
)) => {
debug!("Updated section info received: {:?}", sap);
self.update_session_info(sap).await?;
Ok(())
}
SectionInfoMsg::GetSectionResponse(GetSectionResponse::Redirect(sap)) => {
trace!("GetSectionResponse::Redirect, reboostrapping with provided peers");
// Disconnect from peer that sent us the redirect, connect to the new elders provided and
// request the section info again.
self.disconnect_from_peers(vec![src]).await?;
let endpoint = self.endpoint()?.clone();
let new_elders_addrs: Vec<SocketAddr> =
sap.elders.iter().map(|(_, addr)| *addr).collect();
self.qp2p
.update_bootstrap_contacts(new_elders_addrs.as_slice());
let boostrapped_peer = self
.qp2p
.rebootstrap(&endpoint, new_elders_addrs.as_slice())
.await?;
self.send_get_section_query(client_pk, &boostrapped_peer)
.await?;
Ok(())
}
SectionInfoMsg::SectionInfoUpdate(update) => {
let correlation_id = update.correlation_id;
error!("MessageId {:?} was interrupted due to infrastructure updates. This will most likely need to be sent again. Update was : {:?}", correlation_id, update);
if let SectionInfoError::TargetSectionInfoOutdated(sap) = update.clone().error {
trace!("Updated network info: ({:?})", sap);
self.update_session_info(&sap).await?;
}
Ok(())
}
SectionInfoMsg::GetSectionResponse(GetSectionResponse::SectionInfoUpdate(_))
| SectionInfoMsg::GetSectionQuery { .. } => {
Err(Error::UnexpectedMessageOnJoin(format!(
"bootstrapping failed since an invalid response ({:?}) was received",
msg
)))
}
}
}
// Apply updated info to a network session, and trigger connections
async fn update_session_info(&mut self, sap: &SectionAuthorityProvider) -> Result<(), Error> {
let original_known_elders = self.all_known_elders.read().await.clone();
// Change this once sn_messaging is updated
let received_elders = sap
.elders
.iter()
.map(|(name, addr)| (*addr, *name))
.collect::<BTreeMap<_, _>>();
// Obtain the addresses of the Elders
trace!(
"Updating session info! Received elders: ({:?})",
received_elders
);
{
// Update session key set
let mut keyset = self.section_key_set.write().await;
if *keyset == Some(sap.public_key_set.clone()) {
trace!("We have previously received the key set already.");
return Ok(());
}
*keyset = Some(sap.public_key_set.clone());
}
{
// update section prefix
let mut prefix = self.section_prefix.write().await;
*prefix = Some(sap.prefix);
}
{
// Update session elders
let mut session_elders = self.all_known_elders.write().await;
*session_elders = received_elders.clone();
}
if original_known_elders != received_elders {
debug!("Connecting to new set of Elders: {:?}", received_elders);
let new_elder_addresses = received_elders.keys().cloned().collect::<BTreeSet<_>>();
let updated_contacts = new_elder_addresses.iter().cloned().collect::<Vec<_>>();
let old_elders = original_known_elders
.iter()
.filter_map(|(peer_addr, _)| {
if !new_elder_addresses.contains(peer_addr) {
Some(*peer_addr)
} else {
None
}
})
.collect::<Vec<_>>();
self.disconnect_from_peers(old_elders).await?;
self.qp2p.update_bootstrap_contacts(&updated_contacts);
self.connect_to_elders().await
} else {
Ok(())
}
}
// Handle messages intended for client consumption (re: queries + commands)
async fn handle_client_msg(&self, msg: ProcessMsg, src: SocketAddr) {
debug!(
"===> ClientMsg with id {:?} received from {:?}",
msg.id(),
src
);
let queries = self.pending_queries.clone();
let transfers = self.pending_transfers.clone();
let error_sender = self.incoming_err_sender.clone();
let _ = tokio::spawn(async move {
debug!("Thread spawned to handle this client message");
match msg {
ProcessMsg::QueryResponse {
response,
correlation_id,
..
} => {
debug!("Query response (relating to msgid: {})", correlation_id);
trace!("The received query response is {:?}", response);
// Note that this doesn't remove the sender from here since multiple
// responses corresponding to the same message ID might arrive.
// Once we are satisfied with the response this is channel is discarded in
// ConnectionManager::send_query
if let Some(sender) = &queries.read().await.get(&correlation_id) {
trace!(
"Sending response for query w/{} via channel.",
correlation_id
);
let _ = sender.send(response).await;
} else {
trace!("No channel found for {:?}", correlation_id);
}
}
ProcessMsg::Event {
event,
correlation_id,
..
} => {
debug!("Event received to be processed: {:?}", correlation_id);
trace!("Event received is: {:?}", event);
if let Event::TransferValidated { event, .. } = event {
let transfers = transfers.read().await;
let sender = transfers.get(&correlation_id);
if let Some(sender) = sender {
let _ = sender.send(Ok(event)).await;
} else {
warn!(
"No transfer validation listener found for elder {:?} and message {:?}",
src, correlation_id
);
warn!("It may be that this transfer is complete and the listener cleaned up already.");
trace!("Event received was {:?}", event);
}
}
}
ProcessMsg::CmdError {
error,
correlation_id,
..
} => {
debug!(
"Cmd Error was received for Message w/ID: {:?}, sending on error channel",
correlation_id
);
trace!("Error received is: {:?}", error);
let _ = error_sender.send(error).await;
}
msg => {
warn!("Ignoring unexpected message type received: {:?}", msg);
}
};
});
}
}
| remove_pending_transfer_sender | identifier_name |
viewletService.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
'use strict';
import { TPromise, ValueCallback } from 'vs/base/common/winjs.base';
import { IViewlet } from 'vs/workbench/common/viewlet';
import { IViewletService } from 'vs/workbench/services/viewlet/browser/viewlet';
import Event from 'vs/base/common/event';
import { ISidebar } from 'vs/workbench/browser/parts/sidebar/sidebarPart';
import { Registry } from 'vs/platform/platform';
import { ViewletDescriptor, ViewletRegistry, Extensions as ViewletExtensions } from 'vs/workbench/browser/viewlet';
import { IExtensionService } from 'vs/platform/extensions/common/extensions';
export class ViewletService implements IViewletService {
public _serviceBrand: any;
private sidebarPart: ISidebar;
private viewletRegistry: ViewletRegistry;
private extensionViewlets: ViewletDescriptor[];
private extensionViewletsLoaded: TPromise<void>;
private extensionViewletsLoadedPromiseComplete: ValueCallback;
public get onDidViewletOpen(): Event<IViewlet> { return this.sidebarPart.onDidViewletOpen; };
public get onDidViewletClose(): Event<IViewlet> { return this.sidebarPart.onDidViewletClose; };
constructor(
sidebarPart: ISidebar,
@IExtensionService private extensionService: IExtensionService
) {
this.sidebarPart = sidebarPart;
this.viewletRegistry = Registry.as<ViewletRegistry>(ViewletExtensions.Viewlets);
this.loadExtensionViewlets();
}
private loadExtensionViewlets(): void {
this.extensionViewlets = [];
this.extensionViewletsLoaded = new TPromise<void>(c => {
this.extensionViewletsLoadedPromiseComplete = c;
});
this.extensionService.onReady().then(() => {
const viewlets = this.viewletRegistry.getViewlets();
viewlets.forEach(v => {
if (!!v.extensionId) {
this.extensionViewlets.push(v);
}
});
this.extensionViewletsLoadedPromiseComplete(void 0);
});
}
public openViewlet(id: string, focus?: boolean): TPromise<IViewlet> {
// Built in viewlets do not need to wait for extensions to be loaded
const builtInViewletIds = this.getBuiltInViewlets().map(v => v.id);
const isBuiltInViewlet = builtInViewletIds.indexOf(id) !== -1;
if (isBuiltInViewlet) {
return this.sidebarPart.openViewlet(id, focus); | return this.extensionViewletsLoaded.then(() => {
if (this.viewletRegistry.getViewlet(id)) {
return this.sidebarPart.openViewlet(id, focus);
}
// Fallback to default viewlet if extension viewlet is still not found (e.g. uninstalled)
return this.sidebarPart.openViewlet(this.getDefaultViewletId(), focus);
});
}
public getActiveViewlet(): IViewlet {
return this.sidebarPart.getActiveViewlet();
}
public getViewlets(): ViewletDescriptor[] {
const builtInViewlets = this.getBuiltInViewlets();
return builtInViewlets.concat(this.extensionViewlets);
}
private getBuiltInViewlets(): ViewletDescriptor[] {
return this.viewletRegistry.getViewlets()
.filter(viewlet => !viewlet.extensionId)
.sort((v1, v2) => v1.order - v2.order);
}
public getDefaultViewletId(): string {
return this.viewletRegistry.getDefaultViewletId();
}
public getViewlet(id: string): ViewletDescriptor {
return this.getViewlets().filter(viewlet => viewlet.id === id)[0];
}
} | }
// Extension viewlets need to be loaded first which can take time | random_line_split |
viewletService.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
'use strict';
import { TPromise, ValueCallback } from 'vs/base/common/winjs.base';
import { IViewlet } from 'vs/workbench/common/viewlet';
import { IViewletService } from 'vs/workbench/services/viewlet/browser/viewlet';
import Event from 'vs/base/common/event';
import { ISidebar } from 'vs/workbench/browser/parts/sidebar/sidebarPart';
import { Registry } from 'vs/platform/platform';
import { ViewletDescriptor, ViewletRegistry, Extensions as ViewletExtensions } from 'vs/workbench/browser/viewlet';
import { IExtensionService } from 'vs/platform/extensions/common/extensions';
export class ViewletService implements IViewletService {
public _serviceBrand: any;
private sidebarPart: ISidebar;
private viewletRegistry: ViewletRegistry;
private extensionViewlets: ViewletDescriptor[];
private extensionViewletsLoaded: TPromise<void>;
private extensionViewletsLoadedPromiseComplete: ValueCallback;
public get onDidViewletOpen(): Event<IViewlet> { return this.sidebarPart.onDidViewletOpen; };
public get onDidViewletClose(): Event<IViewlet> { return this.sidebarPart.onDidViewletClose; };
constructor(
sidebarPart: ISidebar,
@IExtensionService private extensionService: IExtensionService
) {
this.sidebarPart = sidebarPart;
this.viewletRegistry = Registry.as<ViewletRegistry>(ViewletExtensions.Viewlets);
this.loadExtensionViewlets();
}
private loadExtensionViewlets(): void {
this.extensionViewlets = [];
this.extensionViewletsLoaded = new TPromise<void>(c => {
this.extensionViewletsLoadedPromiseComplete = c;
});
this.extensionService.onReady().then(() => {
const viewlets = this.viewletRegistry.getViewlets();
viewlets.forEach(v => {
if (!!v.extensionId) {
this.extensionViewlets.push(v);
}
});
this.extensionViewletsLoadedPromiseComplete(void 0);
});
}
public openViewlet(id: string, focus?: boolean): TPromise<IViewlet> {
// Built in viewlets do not need to wait for extensions to be loaded
const builtInViewletIds = this.getBuiltInViewlets().map(v => v.id);
const isBuiltInViewlet = builtInViewletIds.indexOf(id) !== -1;
if (isBuiltInViewlet) {
return this.sidebarPart.openViewlet(id, focus);
}
// Extension viewlets need to be loaded first which can take time
return this.extensionViewletsLoaded.then(() => {
if (this.viewletRegistry.getViewlet(id)) |
// Fallback to default viewlet if extension viewlet is still not found (e.g. uninstalled)
return this.sidebarPart.openViewlet(this.getDefaultViewletId(), focus);
});
}
public getActiveViewlet(): IViewlet {
return this.sidebarPart.getActiveViewlet();
}
public getViewlets(): ViewletDescriptor[] {
const builtInViewlets = this.getBuiltInViewlets();
return builtInViewlets.concat(this.extensionViewlets);
}
private getBuiltInViewlets(): ViewletDescriptor[] {
return this.viewletRegistry.getViewlets()
.filter(viewlet => !viewlet.extensionId)
.sort((v1, v2) => v1.order - v2.order);
}
public getDefaultViewletId(): string {
return this.viewletRegistry.getDefaultViewletId();
}
public getViewlet(id: string): ViewletDescriptor {
return this.getViewlets().filter(viewlet => viewlet.id === id)[0];
}
} | {
return this.sidebarPart.openViewlet(id, focus);
} | conditional_block |
viewletService.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
'use strict';
import { TPromise, ValueCallback } from 'vs/base/common/winjs.base';
import { IViewlet } from 'vs/workbench/common/viewlet';
import { IViewletService } from 'vs/workbench/services/viewlet/browser/viewlet';
import Event from 'vs/base/common/event';
import { ISidebar } from 'vs/workbench/browser/parts/sidebar/sidebarPart';
import { Registry } from 'vs/platform/platform';
import { ViewletDescriptor, ViewletRegistry, Extensions as ViewletExtensions } from 'vs/workbench/browser/viewlet';
import { IExtensionService } from 'vs/platform/extensions/common/extensions';
export class ViewletService implements IViewletService {
public _serviceBrand: any;
private sidebarPart: ISidebar;
private viewletRegistry: ViewletRegistry;
private extensionViewlets: ViewletDescriptor[];
private extensionViewletsLoaded: TPromise<void>;
private extensionViewletsLoadedPromiseComplete: ValueCallback;
public get onDidViewletOpen(): Event<IViewlet> { return this.sidebarPart.onDidViewletOpen; };
public get onDidViewletClose(): Event<IViewlet> { return this.sidebarPart.onDidViewletClose; };
constructor(
sidebarPart: ISidebar,
@IExtensionService private extensionService: IExtensionService
) {
this.sidebarPart = sidebarPart;
this.viewletRegistry = Registry.as<ViewletRegistry>(ViewletExtensions.Viewlets);
this.loadExtensionViewlets();
}
private loadExtensionViewlets(): void {
this.extensionViewlets = [];
this.extensionViewletsLoaded = new TPromise<void>(c => {
this.extensionViewletsLoadedPromiseComplete = c;
});
this.extensionService.onReady().then(() => {
const viewlets = this.viewletRegistry.getViewlets();
viewlets.forEach(v => {
if (!!v.extensionId) {
this.extensionViewlets.push(v);
}
});
this.extensionViewletsLoadedPromiseComplete(void 0);
});
}
public openViewlet(id: string, focus?: boolean): TPromise<IViewlet> {
// Built in viewlets do not need to wait for extensions to be loaded
const builtInViewletIds = this.getBuiltInViewlets().map(v => v.id);
const isBuiltInViewlet = builtInViewletIds.indexOf(id) !== -1;
if (isBuiltInViewlet) {
return this.sidebarPart.openViewlet(id, focus);
}
// Extension viewlets need to be loaded first which can take time
return this.extensionViewletsLoaded.then(() => {
if (this.viewletRegistry.getViewlet(id)) {
return this.sidebarPart.openViewlet(id, focus);
}
// Fallback to default viewlet if extension viewlet is still not found (e.g. uninstalled)
return this.sidebarPart.openViewlet(this.getDefaultViewletId(), focus);
});
}
public getActiveViewlet(): IViewlet {
return this.sidebarPart.getActiveViewlet();
}
public getViewlets(): ViewletDescriptor[] {
const builtInViewlets = this.getBuiltInViewlets();
return builtInViewlets.concat(this.extensionViewlets);
}
private getBuiltInViewlets(): ViewletDescriptor[] |
public getDefaultViewletId(): string {
return this.viewletRegistry.getDefaultViewletId();
}
public getViewlet(id: string): ViewletDescriptor {
return this.getViewlets().filter(viewlet => viewlet.id === id)[0];
}
} | {
return this.viewletRegistry.getViewlets()
.filter(viewlet => !viewlet.extensionId)
.sort((v1, v2) => v1.order - v2.order);
} | identifier_body |
viewletService.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
'use strict';
import { TPromise, ValueCallback } from 'vs/base/common/winjs.base';
import { IViewlet } from 'vs/workbench/common/viewlet';
import { IViewletService } from 'vs/workbench/services/viewlet/browser/viewlet';
import Event from 'vs/base/common/event';
import { ISidebar } from 'vs/workbench/browser/parts/sidebar/sidebarPart';
import { Registry } from 'vs/platform/platform';
import { ViewletDescriptor, ViewletRegistry, Extensions as ViewletExtensions } from 'vs/workbench/browser/viewlet';
import { IExtensionService } from 'vs/platform/extensions/common/extensions';
export class ViewletService implements IViewletService {
public _serviceBrand: any;
private sidebarPart: ISidebar;
private viewletRegistry: ViewletRegistry;
private extensionViewlets: ViewletDescriptor[];
private extensionViewletsLoaded: TPromise<void>;
private extensionViewletsLoadedPromiseComplete: ValueCallback;
public get onDidViewletOpen(): Event<IViewlet> { return this.sidebarPart.onDidViewletOpen; };
public get onDidViewletClose(): Event<IViewlet> { return this.sidebarPart.onDidViewletClose; };
constructor(
sidebarPart: ISidebar,
@IExtensionService private extensionService: IExtensionService
) {
this.sidebarPart = sidebarPart;
this.viewletRegistry = Registry.as<ViewletRegistry>(ViewletExtensions.Viewlets);
this.loadExtensionViewlets();
}
private loadExtensionViewlets(): void {
this.extensionViewlets = [];
this.extensionViewletsLoaded = new TPromise<void>(c => {
this.extensionViewletsLoadedPromiseComplete = c;
});
this.extensionService.onReady().then(() => {
const viewlets = this.viewletRegistry.getViewlets();
viewlets.forEach(v => {
if (!!v.extensionId) {
this.extensionViewlets.push(v);
}
});
this.extensionViewletsLoadedPromiseComplete(void 0);
});
}
public | (id: string, focus?: boolean): TPromise<IViewlet> {
// Built in viewlets do not need to wait for extensions to be loaded
const builtInViewletIds = this.getBuiltInViewlets().map(v => v.id);
const isBuiltInViewlet = builtInViewletIds.indexOf(id) !== -1;
if (isBuiltInViewlet) {
return this.sidebarPart.openViewlet(id, focus);
}
// Extension viewlets need to be loaded first which can take time
return this.extensionViewletsLoaded.then(() => {
if (this.viewletRegistry.getViewlet(id)) {
return this.sidebarPart.openViewlet(id, focus);
}
// Fallback to default viewlet if extension viewlet is still not found (e.g. uninstalled)
return this.sidebarPart.openViewlet(this.getDefaultViewletId(), focus);
});
}
public getActiveViewlet(): IViewlet {
return this.sidebarPart.getActiveViewlet();
}
public getViewlets(): ViewletDescriptor[] {
const builtInViewlets = this.getBuiltInViewlets();
return builtInViewlets.concat(this.extensionViewlets);
}
private getBuiltInViewlets(): ViewletDescriptor[] {
return this.viewletRegistry.getViewlets()
.filter(viewlet => !viewlet.extensionId)
.sort((v1, v2) => v1.order - v2.order);
}
public getDefaultViewletId(): string {
return this.viewletRegistry.getDefaultViewletId();
}
public getViewlet(id: string): ViewletDescriptor {
return this.getViewlets().filter(viewlet => viewlet.id === id)[0];
}
} | openViewlet | identifier_name |
automaton.py | # This file is part of Scapy
# Copyright (C) 2007, 2008, 2009 Arnaud Ebalard
# 2015, 2016, 2017 Maxence Tury
# This program is published under a GPLv2 license
"""
The _TLSAutomaton class provides methods common to both TLS client and server.
"""
import struct
from scapy.automaton import Automaton
from scapy.config import conf
from scapy.error import log_interactive
from scapy.packet import Raw
from scapy.layers.tls.basefields import _tls_type
from scapy.layers.tls.cert import Cert, PrivKey
from scapy.layers.tls.record import TLS
from scapy.layers.tls.record_sslv2 import SSLv2
from scapy.layers.tls.record_tls13 import TLS13
class _TLSAutomaton(Automaton):
"""
SSLv3 and TLS 1.0-1.2 typically need a 2-RTT handshake:
Client Server
| --------->>> | C1 - ClientHello
| <<<--------- | S1 - ServerHello
| <<<--------- | S1 - Certificate
| <<<--------- | S1 - ServerKeyExchange
| <<<--------- | S1 - ServerHelloDone
| --------->>> | C2 - ClientKeyExchange
| --------->>> | C2 - ChangeCipherSpec
| --------->>> | C2 - Finished [encrypted]
| <<<--------- | S2 - ChangeCipherSpec
| <<<--------- | S2 - Finished [encrypted]
We call these successive groups of messages:
ClientFlight1, ServerFlight1, ClientFlight2 and ServerFlight2.
We want to send our messages from the same flight all at once through the
socket. This is achieved by managing a list of records in 'buffer_out'.
We may put several messages (i.e. what RFC 5246 calls the record fragments)
in the same record when possible, but we may need several records for the
same flight, as with ClientFlight2.
However, note that the flights from the opposite side may be spread wildly
across TLS records and TCP packets. This is why we use a 'get_next_msg'
method for feeding a list of received messages, 'buffer_in'. Raw data
which has not yet been interpreted as a TLS record is kept in 'remain_in'.
"""
def parse_args(self, mycert=None, mykey=None, **kargs):
super(_TLSAutomaton, self).parse_args(**kargs)
self.socket = None
self.remain_in = b""
self.buffer_in = [] # these are 'fragments' inside records
self.buffer_out = [] # these are records
self.cur_session = None
self.cur_pkt = None # this is usually the latest parsed packet
if mycert:
self.mycert = Cert(mycert)
else:
self.mycert = None
if mykey:
self.mykey = PrivKey(mykey)
else:
self.mykey = None
self.verbose = kargs.get("verbose", True)
def get_next_msg(self, socket_timeout=2, retry=2):
"""
The purpose of the function is to make next message(s) available in
self.buffer_in. If the list is not empty, nothing is done. If not, in
order to fill it, the function uses the data already available in
self.remain_in from a previous call and waits till there are enough to
dissect a TLS packet. Once dissected, the content of the TLS packet
(carried messages, or 'fragments') is appended to self.buffer_in.
We have to grab enough data to dissect a TLS packet. We start by
reading the first 2 bytes. Unless we get anything different from
\\x14\\x03, \\x15\\x03, \\x16\\x03 or \\x17\\x03 (which might indicate
an SSLv2 record, whose first 2 bytes encode the length), we retrieve
3 more bytes in order to get the length of the TLS record, and
finally we can retrieve the remaining of the record.
"""
if self.buffer_in:
# A message is already available.
return
self.socket.settimeout(socket_timeout)
is_sslv2_msg = False
still_getting_len = True
grablen = 2
while retry and (still_getting_len or len(self.remain_in) < grablen):
if not is_sslv2_msg and grablen == 5 and len(self.remain_in) >= 5:
grablen = struct.unpack('!H', self.remain_in[3:5])[0] + 5
still_getting_len = False
elif grablen == 2 and len(self.remain_in) >= 2:
byte0 = struct.unpack("B", self.remain_in[:1])[0]
byte1 = struct.unpack("B", self.remain_in[1:2])[0]
if (byte0 in _tls_type) and (byte1 == 3):
# Retry following TLS scheme. This will cause failure
# for SSLv2 packets with length 0x1{4-7}03.
grablen = 5
else:
# Extract the SSLv2 length.
is_sslv2_msg = True
still_getting_len = False
if byte0 & 0x80:
grablen = 2 + 0 + ((byte0 & 0x7f) << 8) + byte1
else:
grablen = 2 + 1 + ((byte0 & 0x3f) << 8) + byte1
elif not is_sslv2_msg and grablen == 5 and len(self.remain_in) >= 5: # noqa: E501
grablen = struct.unpack('!H', self.remain_in[3:5])[0] + 5
if grablen == len(self.remain_in):
break
try:
tmp = self.socket.recv(grablen - len(self.remain_in))
if not tmp:
retry -= 1
else:
self.remain_in += tmp
except Exception:
self.vprint("Could not join host ! Retrying...")
retry -= 1
if len(self.remain_in) < 2 or len(self.remain_in) != grablen:
# Remote peer is not willing to respond
return
p = TLS(self.remain_in, tls_session=self.cur_session)
self.cur_session = p.tls_session
self.remain_in = b""
if isinstance(p, SSLv2) and not p.msg:
p.msg = Raw("")
if self.cur_session.tls_version is None or \
self.cur_session.tls_version < 0x0304:
self.buffer_in += p.msg
else:
if isinstance(p, TLS13):
self.buffer_in += p.inner.msg
else:
# should be TLS13ServerHello only
self.buffer_in += p.msg
while p.payload:
if isinstance(p.payload, Raw):
self.remain_in += p.payload.load
p = p.payload
elif isinstance(p.payload, TLS):
|
def raise_on_packet(self, pkt_cls, state, get_next_msg=True):
"""
If the next message to be processed has type 'pkt_cls', raise 'state'.
If there is no message waiting to be processed, we try to get one with
the default 'get_next_msg' parameters.
"""
# Maybe we already parsed the expected packet, maybe not.
if get_next_msg:
self.get_next_msg()
if (not self.buffer_in or
not isinstance(self.buffer_in[0], pkt_cls)):
return
self.cur_pkt = self.buffer_in[0]
self.buffer_in = self.buffer_in[1:]
raise state()
def add_record(self, is_sslv2=None, is_tls13=None):
"""
Add a new TLS or SSLv2 or TLS 1.3 record to the packets buffered out.
"""
if is_sslv2 is None and is_tls13 is None:
v = (self.cur_session.tls_version or
self.cur_session.advertised_tls_version)
if v in [0x0200, 0x0002]:
is_sslv2 = True
elif v >= 0x0304:
is_tls13 = True
if is_sslv2:
self.buffer_out.append(SSLv2(tls_session=self.cur_session))
elif is_tls13:
self.buffer_out.append(TLS13(tls_session=self.cur_session))
else:
self.buffer_out.append(TLS(tls_session=self.cur_session))
def add_msg(self, pkt):
"""
Add a TLS message (e.g. TLSClientHello or TLSApplicationData)
inside the latest record to be sent through the socket.
We believe a good automaton should not use the first test.
"""
if not self.buffer_out:
self.add_record()
r = self.buffer_out[-1]
if isinstance(r, TLS13):
self.buffer_out[-1].inner.msg.append(pkt)
else:
self.buffer_out[-1].msg.append(pkt)
def flush_records(self):
"""
Send all buffered records and update the session accordingly.
"""
s = b"".join(p.raw_stateful() for p in self.buffer_out)
self.socket.send(s)
self.buffer_out = []
def vprint(self, s=""):
if self.verbose:
if conf.interactive:
log_interactive.info("> %s", s)
else:
print("> %s" % s)
| p = p.payload
if self.cur_session.tls_version is None or \
self.cur_session.tls_version < 0x0304:
self.buffer_in += p.msg
else:
self.buffer_in += p.inner.msg | conditional_block |
automaton.py | # This file is part of Scapy
# Copyright (C) 2007, 2008, 2009 Arnaud Ebalard
# 2015, 2016, 2017 Maxence Tury
# This program is published under a GPLv2 license
"""
The _TLSAutomaton class provides methods common to both TLS client and server.
"""
import struct
from scapy.automaton import Automaton
from scapy.config import conf
from scapy.error import log_interactive
from scapy.packet import Raw
from scapy.layers.tls.basefields import _tls_type
from scapy.layers.tls.cert import Cert, PrivKey
from scapy.layers.tls.record import TLS
from scapy.layers.tls.record_sslv2 import SSLv2
from scapy.layers.tls.record_tls13 import TLS13
class _TLSAutomaton(Automaton):
"""
SSLv3 and TLS 1.0-1.2 typically need a 2-RTT handshake:
Client Server
| --------->>> | C1 - ClientHello
| <<<--------- | S1 - ServerHello
| <<<--------- | S1 - Certificate
| <<<--------- | S1 - ServerKeyExchange
| <<<--------- | S1 - ServerHelloDone
| --------->>> | C2 - ClientKeyExchange
| --------->>> | C2 - ChangeCipherSpec
| --------->>> | C2 - Finished [encrypted]
| <<<--------- | S2 - ChangeCipherSpec
| <<<--------- | S2 - Finished [encrypted]
We call these successive groups of messages:
ClientFlight1, ServerFlight1, ClientFlight2 and ServerFlight2.
We want to send our messages from the same flight all at once through the
socket. This is achieved by managing a list of records in 'buffer_out'.
We may put several messages (i.e. what RFC 5246 calls the record fragments)
in the same record when possible, but we may need several records for the
same flight, as with ClientFlight2.
However, note that the flights from the opposite side may be spread wildly
across TLS records and TCP packets. This is why we use a 'get_next_msg'
method for feeding a list of received messages, 'buffer_in'. Raw data
which has not yet been interpreted as a TLS record is kept in 'remain_in'.
"""
def parse_args(self, mycert=None, mykey=None, **kargs):
super(_TLSAutomaton, self).parse_args(**kargs)
self.socket = None
self.remain_in = b""
self.buffer_in = [] # these are 'fragments' inside records
self.buffer_out = [] # these are records
self.cur_session = None
self.cur_pkt = None # this is usually the latest parsed packet
if mycert:
self.mycert = Cert(mycert)
else:
self.mycert = None
if mykey:
self.mykey = PrivKey(mykey)
else:
self.mykey = None
self.verbose = kargs.get("verbose", True)
def get_next_msg(self, socket_timeout=2, retry=2):
"""
The purpose of the function is to make next message(s) available in
self.buffer_in. If the list is not empty, nothing is done. If not, in
order to fill it, the function uses the data already available in
self.remain_in from a previous call and waits till there are enough to
dissect a TLS packet. Once dissected, the content of the TLS packet
(carried messages, or 'fragments') is appended to self.buffer_in.
We have to grab enough data to dissect a TLS packet. We start by
reading the first 2 bytes. Unless we get anything different from
\\x14\\x03, \\x15\\x03, \\x16\\x03 or \\x17\\x03 (which might indicate
an SSLv2 record, whose first 2 bytes encode the length), we retrieve
3 more bytes in order to get the length of the TLS record, and
finally we can retrieve the remaining of the record.
"""
if self.buffer_in:
# A message is already available.
return
self.socket.settimeout(socket_timeout)
is_sslv2_msg = False
still_getting_len = True
grablen = 2
while retry and (still_getting_len or len(self.remain_in) < grablen):
if not is_sslv2_msg and grablen == 5 and len(self.remain_in) >= 5:
grablen = struct.unpack('!H', self.remain_in[3:5])[0] + 5 | # Retry following TLS scheme. This will cause failure
# for SSLv2 packets with length 0x1{4-7}03.
grablen = 5
else:
# Extract the SSLv2 length.
is_sslv2_msg = True
still_getting_len = False
if byte0 & 0x80:
grablen = 2 + 0 + ((byte0 & 0x7f) << 8) + byte1
else:
grablen = 2 + 1 + ((byte0 & 0x3f) << 8) + byte1
elif not is_sslv2_msg and grablen == 5 and len(self.remain_in) >= 5: # noqa: E501
grablen = struct.unpack('!H', self.remain_in[3:5])[0] + 5
if grablen == len(self.remain_in):
break
try:
tmp = self.socket.recv(grablen - len(self.remain_in))
if not tmp:
retry -= 1
else:
self.remain_in += tmp
except Exception:
self.vprint("Could not join host ! Retrying...")
retry -= 1
if len(self.remain_in) < 2 or len(self.remain_in) != grablen:
# Remote peer is not willing to respond
return
p = TLS(self.remain_in, tls_session=self.cur_session)
self.cur_session = p.tls_session
self.remain_in = b""
if isinstance(p, SSLv2) and not p.msg:
p.msg = Raw("")
if self.cur_session.tls_version is None or \
self.cur_session.tls_version < 0x0304:
self.buffer_in += p.msg
else:
if isinstance(p, TLS13):
self.buffer_in += p.inner.msg
else:
# should be TLS13ServerHello only
self.buffer_in += p.msg
while p.payload:
if isinstance(p.payload, Raw):
self.remain_in += p.payload.load
p = p.payload
elif isinstance(p.payload, TLS):
p = p.payload
if self.cur_session.tls_version is None or \
self.cur_session.tls_version < 0x0304:
self.buffer_in += p.msg
else:
self.buffer_in += p.inner.msg
def raise_on_packet(self, pkt_cls, state, get_next_msg=True):
"""
If the next message to be processed has type 'pkt_cls', raise 'state'.
If there is no message waiting to be processed, we try to get one with
the default 'get_next_msg' parameters.
"""
# Maybe we already parsed the expected packet, maybe not.
if get_next_msg:
self.get_next_msg()
if (not self.buffer_in or
not isinstance(self.buffer_in[0], pkt_cls)):
return
self.cur_pkt = self.buffer_in[0]
self.buffer_in = self.buffer_in[1:]
raise state()
def add_record(self, is_sslv2=None, is_tls13=None):
"""
Add a new TLS or SSLv2 or TLS 1.3 record to the packets buffered out.
"""
if is_sslv2 is None and is_tls13 is None:
v = (self.cur_session.tls_version or
self.cur_session.advertised_tls_version)
if v in [0x0200, 0x0002]:
is_sslv2 = True
elif v >= 0x0304:
is_tls13 = True
if is_sslv2:
self.buffer_out.append(SSLv2(tls_session=self.cur_session))
elif is_tls13:
self.buffer_out.append(TLS13(tls_session=self.cur_session))
else:
self.buffer_out.append(TLS(tls_session=self.cur_session))
def add_msg(self, pkt):
"""
Add a TLS message (e.g. TLSClientHello or TLSApplicationData)
inside the latest record to be sent through the socket.
We believe a good automaton should not use the first test.
"""
if not self.buffer_out:
self.add_record()
r = self.buffer_out[-1]
if isinstance(r, TLS13):
self.buffer_out[-1].inner.msg.append(pkt)
else:
self.buffer_out[-1].msg.append(pkt)
def flush_records(self):
"""
Send all buffered records and update the session accordingly.
"""
s = b"".join(p.raw_stateful() for p in self.buffer_out)
self.socket.send(s)
self.buffer_out = []
def vprint(self, s=""):
if self.verbose:
if conf.interactive:
log_interactive.info("> %s", s)
else:
print("> %s" % s) | still_getting_len = False
elif grablen == 2 and len(self.remain_in) >= 2:
byte0 = struct.unpack("B", self.remain_in[:1])[0]
byte1 = struct.unpack("B", self.remain_in[1:2])[0]
if (byte0 in _tls_type) and (byte1 == 3): | random_line_split |
automaton.py | # This file is part of Scapy
# Copyright (C) 2007, 2008, 2009 Arnaud Ebalard
# 2015, 2016, 2017 Maxence Tury
# This program is published under a GPLv2 license
"""
The _TLSAutomaton class provides methods common to both TLS client and server.
"""
import struct
from scapy.automaton import Automaton
from scapy.config import conf
from scapy.error import log_interactive
from scapy.packet import Raw
from scapy.layers.tls.basefields import _tls_type
from scapy.layers.tls.cert import Cert, PrivKey
from scapy.layers.tls.record import TLS
from scapy.layers.tls.record_sslv2 import SSLv2
from scapy.layers.tls.record_tls13 import TLS13
class _TLSAutomaton(Automaton):
| """
SSLv3 and TLS 1.0-1.2 typically need a 2-RTT handshake:
Client Server
| --------->>> | C1 - ClientHello
| <<<--------- | S1 - ServerHello
| <<<--------- | S1 - Certificate
| <<<--------- | S1 - ServerKeyExchange
| <<<--------- | S1 - ServerHelloDone
| --------->>> | C2 - ClientKeyExchange
| --------->>> | C2 - ChangeCipherSpec
| --------->>> | C2 - Finished [encrypted]
| <<<--------- | S2 - ChangeCipherSpec
| <<<--------- | S2 - Finished [encrypted]
We call these successive groups of messages:
ClientFlight1, ServerFlight1, ClientFlight2 and ServerFlight2.
We want to send our messages from the same flight all at once through the
socket. This is achieved by managing a list of records in 'buffer_out'.
We may put several messages (i.e. what RFC 5246 calls the record fragments)
in the same record when possible, but we may need several records for the
same flight, as with ClientFlight2.
However, note that the flights from the opposite side may be spread wildly
across TLS records and TCP packets. This is why we use a 'get_next_msg'
method for feeding a list of received messages, 'buffer_in'. Raw data
which has not yet been interpreted as a TLS record is kept in 'remain_in'.
"""
def parse_args(self, mycert=None, mykey=None, **kargs):
super(_TLSAutomaton, self).parse_args(**kargs)
self.socket = None
self.remain_in = b""
self.buffer_in = [] # these are 'fragments' inside records
self.buffer_out = [] # these are records
self.cur_session = None
self.cur_pkt = None # this is usually the latest parsed packet
if mycert:
self.mycert = Cert(mycert)
else:
self.mycert = None
if mykey:
self.mykey = PrivKey(mykey)
else:
self.mykey = None
self.verbose = kargs.get("verbose", True)
def get_next_msg(self, socket_timeout=2, retry=2):
"""
The purpose of the function is to make next message(s) available in
self.buffer_in. If the list is not empty, nothing is done. If not, in
order to fill it, the function uses the data already available in
self.remain_in from a previous call and waits till there are enough to
dissect a TLS packet. Once dissected, the content of the TLS packet
(carried messages, or 'fragments') is appended to self.buffer_in.
We have to grab enough data to dissect a TLS packet. We start by
reading the first 2 bytes. Unless we get anything different from
\\x14\\x03, \\x15\\x03, \\x16\\x03 or \\x17\\x03 (which might indicate
an SSLv2 record, whose first 2 bytes encode the length), we retrieve
3 more bytes in order to get the length of the TLS record, and
finally we can retrieve the remaining of the record.
"""
if self.buffer_in:
# A message is already available.
return
self.socket.settimeout(socket_timeout)
is_sslv2_msg = False
still_getting_len = True
grablen = 2
while retry and (still_getting_len or len(self.remain_in) < grablen):
if not is_sslv2_msg and grablen == 5 and len(self.remain_in) >= 5:
grablen = struct.unpack('!H', self.remain_in[3:5])[0] + 5
still_getting_len = False
elif grablen == 2 and len(self.remain_in) >= 2:
byte0 = struct.unpack("B", self.remain_in[:1])[0]
byte1 = struct.unpack("B", self.remain_in[1:2])[0]
if (byte0 in _tls_type) and (byte1 == 3):
# Retry following TLS scheme. This will cause failure
# for SSLv2 packets with length 0x1{4-7}03.
grablen = 5
else:
# Extract the SSLv2 length.
is_sslv2_msg = True
still_getting_len = False
if byte0 & 0x80:
grablen = 2 + 0 + ((byte0 & 0x7f) << 8) + byte1
else:
grablen = 2 + 1 + ((byte0 & 0x3f) << 8) + byte1
elif not is_sslv2_msg and grablen == 5 and len(self.remain_in) >= 5: # noqa: E501
grablen = struct.unpack('!H', self.remain_in[3:5])[0] + 5
if grablen == len(self.remain_in):
break
try:
tmp = self.socket.recv(grablen - len(self.remain_in))
if not tmp:
retry -= 1
else:
self.remain_in += tmp
except Exception:
self.vprint("Could not join host ! Retrying...")
retry -= 1
if len(self.remain_in) < 2 or len(self.remain_in) != grablen:
# Remote peer is not willing to respond
return
p = TLS(self.remain_in, tls_session=self.cur_session)
self.cur_session = p.tls_session
self.remain_in = b""
if isinstance(p, SSLv2) and not p.msg:
p.msg = Raw("")
if self.cur_session.tls_version is None or \
self.cur_session.tls_version < 0x0304:
self.buffer_in += p.msg
else:
if isinstance(p, TLS13):
self.buffer_in += p.inner.msg
else:
# should be TLS13ServerHello only
self.buffer_in += p.msg
while p.payload:
if isinstance(p.payload, Raw):
self.remain_in += p.payload.load
p = p.payload
elif isinstance(p.payload, TLS):
p = p.payload
if self.cur_session.tls_version is None or \
self.cur_session.tls_version < 0x0304:
self.buffer_in += p.msg
else:
self.buffer_in += p.inner.msg
def raise_on_packet(self, pkt_cls, state, get_next_msg=True):
"""
If the next message to be processed has type 'pkt_cls', raise 'state'.
If there is no message waiting to be processed, we try to get one with
the default 'get_next_msg' parameters.
"""
# Maybe we already parsed the expected packet, maybe not.
if get_next_msg:
self.get_next_msg()
if (not self.buffer_in or
not isinstance(self.buffer_in[0], pkt_cls)):
return
self.cur_pkt = self.buffer_in[0]
self.buffer_in = self.buffer_in[1:]
raise state()
def add_record(self, is_sslv2=None, is_tls13=None):
"""
Add a new TLS or SSLv2 or TLS 1.3 record to the packets buffered out.
"""
if is_sslv2 is None and is_tls13 is None:
v = (self.cur_session.tls_version or
self.cur_session.advertised_tls_version)
if v in [0x0200, 0x0002]:
is_sslv2 = True
elif v >= 0x0304:
is_tls13 = True
if is_sslv2:
self.buffer_out.append(SSLv2(tls_session=self.cur_session))
elif is_tls13:
self.buffer_out.append(TLS13(tls_session=self.cur_session))
else:
self.buffer_out.append(TLS(tls_session=self.cur_session))
def add_msg(self, pkt):
"""
Add a TLS message (e.g. TLSClientHello or TLSApplicationData)
inside the latest record to be sent through the socket.
We believe a good automaton should not use the first test.
"""
if not self.buffer_out:
self.add_record()
r = self.buffer_out[-1]
if isinstance(r, TLS13):
self.buffer_out[-1].inner.msg.append(pkt)
else:
self.buffer_out[-1].msg.append(pkt)
def flush_records(self):
"""
Send all buffered records and update the session accordingly.
"""
s = b"".join(p.raw_stateful() for p in self.buffer_out)
self.socket.send(s)
self.buffer_out = []
def vprint(self, s=""):
if self.verbose:
if conf.interactive:
log_interactive.info("> %s", s)
else:
print("> %s" % s) | identifier_body |
|
automaton.py | # This file is part of Scapy
# Copyright (C) 2007, 2008, 2009 Arnaud Ebalard
# 2015, 2016, 2017 Maxence Tury
# This program is published under a GPLv2 license
"""
The _TLSAutomaton class provides methods common to both TLS client and server.
"""
import struct
from scapy.automaton import Automaton
from scapy.config import conf
from scapy.error import log_interactive
from scapy.packet import Raw
from scapy.layers.tls.basefields import _tls_type
from scapy.layers.tls.cert import Cert, PrivKey
from scapy.layers.tls.record import TLS
from scapy.layers.tls.record_sslv2 import SSLv2
from scapy.layers.tls.record_tls13 import TLS13
class _TLSAutomaton(Automaton):
"""
SSLv3 and TLS 1.0-1.2 typically need a 2-RTT handshake:
Client Server
| --------->>> | C1 - ClientHello
| <<<--------- | S1 - ServerHello
| <<<--------- | S1 - Certificate
| <<<--------- | S1 - ServerKeyExchange
| <<<--------- | S1 - ServerHelloDone
| --------->>> | C2 - ClientKeyExchange
| --------->>> | C2 - ChangeCipherSpec
| --------->>> | C2 - Finished [encrypted]
| <<<--------- | S2 - ChangeCipherSpec
| <<<--------- | S2 - Finished [encrypted]
We call these successive groups of messages:
ClientFlight1, ServerFlight1, ClientFlight2 and ServerFlight2.
We want to send our messages from the same flight all at once through the
socket. This is achieved by managing a list of records in 'buffer_out'.
We may put several messages (i.e. what RFC 5246 calls the record fragments)
in the same record when possible, but we may need several records for the
same flight, as with ClientFlight2.
However, note that the flights from the opposite side may be spread wildly
across TLS records and TCP packets. This is why we use a 'get_next_msg'
method for feeding a list of received messages, 'buffer_in'. Raw data
which has not yet been interpreted as a TLS record is kept in 'remain_in'.
"""
def parse_args(self, mycert=None, mykey=None, **kargs):
super(_TLSAutomaton, self).parse_args(**kargs)
self.socket = None
self.remain_in = b""
self.buffer_in = [] # these are 'fragments' inside records
self.buffer_out = [] # these are records
self.cur_session = None
self.cur_pkt = None # this is usually the latest parsed packet
if mycert:
self.mycert = Cert(mycert)
else:
self.mycert = None
if mykey:
self.mykey = PrivKey(mykey)
else:
self.mykey = None
self.verbose = kargs.get("verbose", True)
def | (self, socket_timeout=2, retry=2):
"""
The purpose of the function is to make next message(s) available in
self.buffer_in. If the list is not empty, nothing is done. If not, in
order to fill it, the function uses the data already available in
self.remain_in from a previous call and waits till there are enough to
dissect a TLS packet. Once dissected, the content of the TLS packet
(carried messages, or 'fragments') is appended to self.buffer_in.
We have to grab enough data to dissect a TLS packet. We start by
reading the first 2 bytes. Unless we get anything different from
\\x14\\x03, \\x15\\x03, \\x16\\x03 or \\x17\\x03 (which might indicate
an SSLv2 record, whose first 2 bytes encode the length), we retrieve
3 more bytes in order to get the length of the TLS record, and
finally we can retrieve the remaining of the record.
"""
if self.buffer_in:
# A message is already available.
return
self.socket.settimeout(socket_timeout)
is_sslv2_msg = False
still_getting_len = True
grablen = 2
while retry and (still_getting_len or len(self.remain_in) < grablen):
if not is_sslv2_msg and grablen == 5 and len(self.remain_in) >= 5:
grablen = struct.unpack('!H', self.remain_in[3:5])[0] + 5
still_getting_len = False
elif grablen == 2 and len(self.remain_in) >= 2:
byte0 = struct.unpack("B", self.remain_in[:1])[0]
byte1 = struct.unpack("B", self.remain_in[1:2])[0]
if (byte0 in _tls_type) and (byte1 == 3):
# Retry following TLS scheme. This will cause failure
# for SSLv2 packets with length 0x1{4-7}03.
grablen = 5
else:
# Extract the SSLv2 length.
is_sslv2_msg = True
still_getting_len = False
if byte0 & 0x80:
grablen = 2 + 0 + ((byte0 & 0x7f) << 8) + byte1
else:
grablen = 2 + 1 + ((byte0 & 0x3f) << 8) + byte1
elif not is_sslv2_msg and grablen == 5 and len(self.remain_in) >= 5: # noqa: E501
grablen = struct.unpack('!H', self.remain_in[3:5])[0] + 5
if grablen == len(self.remain_in):
break
try:
tmp = self.socket.recv(grablen - len(self.remain_in))
if not tmp:
retry -= 1
else:
self.remain_in += tmp
except Exception:
self.vprint("Could not join host ! Retrying...")
retry -= 1
if len(self.remain_in) < 2 or len(self.remain_in) != grablen:
# Remote peer is not willing to respond
return
p = TLS(self.remain_in, tls_session=self.cur_session)
self.cur_session = p.tls_session
self.remain_in = b""
if isinstance(p, SSLv2) and not p.msg:
p.msg = Raw("")
if self.cur_session.tls_version is None or \
self.cur_session.tls_version < 0x0304:
self.buffer_in += p.msg
else:
if isinstance(p, TLS13):
self.buffer_in += p.inner.msg
else:
# should be TLS13ServerHello only
self.buffer_in += p.msg
while p.payload:
if isinstance(p.payload, Raw):
self.remain_in += p.payload.load
p = p.payload
elif isinstance(p.payload, TLS):
p = p.payload
if self.cur_session.tls_version is None or \
self.cur_session.tls_version < 0x0304:
self.buffer_in += p.msg
else:
self.buffer_in += p.inner.msg
def raise_on_packet(self, pkt_cls, state, get_next_msg=True):
"""
If the next message to be processed has type 'pkt_cls', raise 'state'.
If there is no message waiting to be processed, we try to get one with
the default 'get_next_msg' parameters.
"""
# Maybe we already parsed the expected packet, maybe not.
if get_next_msg:
self.get_next_msg()
if (not self.buffer_in or
not isinstance(self.buffer_in[0], pkt_cls)):
return
self.cur_pkt = self.buffer_in[0]
self.buffer_in = self.buffer_in[1:]
raise state()
def add_record(self, is_sslv2=None, is_tls13=None):
"""
Add a new TLS or SSLv2 or TLS 1.3 record to the packets buffered out.
"""
if is_sslv2 is None and is_tls13 is None:
v = (self.cur_session.tls_version or
self.cur_session.advertised_tls_version)
if v in [0x0200, 0x0002]:
is_sslv2 = True
elif v >= 0x0304:
is_tls13 = True
if is_sslv2:
self.buffer_out.append(SSLv2(tls_session=self.cur_session))
elif is_tls13:
self.buffer_out.append(TLS13(tls_session=self.cur_session))
else:
self.buffer_out.append(TLS(tls_session=self.cur_session))
def add_msg(self, pkt):
"""
Add a TLS message (e.g. TLSClientHello or TLSApplicationData)
inside the latest record to be sent through the socket.
We believe a good automaton should not use the first test.
"""
if not self.buffer_out:
self.add_record()
r = self.buffer_out[-1]
if isinstance(r, TLS13):
self.buffer_out[-1].inner.msg.append(pkt)
else:
self.buffer_out[-1].msg.append(pkt)
def flush_records(self):
"""
Send all buffered records and update the session accordingly.
"""
s = b"".join(p.raw_stateful() for p in self.buffer_out)
self.socket.send(s)
self.buffer_out = []
def vprint(self, s=""):
if self.verbose:
if conf.interactive:
log_interactive.info("> %s", s)
else:
print("> %s" % s)
| get_next_msg | identifier_name |
iron-doc-nav.d.ts | /**
* DO NOT EDIT
*
* This file was automatically generated by
* https://github.com/Polymer/tools/tree/master/packages/gen-typescript-declarations
* |
import {dom, flush} from '@polymer/polymer/lib/legacy/polymer.dom.js';
import {html} from '@polymer/polymer/lib/utils/html-tag.js';
import {LegacyElementMixin} from '@polymer/polymer/lib/legacy/legacy-element-mixin.js';
interface IronDocNavElement extends LegacyElementMixin, HTMLElement {
descriptor: object|null|undefined;
path: string|null|undefined;
baseHref: string|null|undefined;
_sections: any[]|null|undefined;
_descriptorChanged(descriptor: any): void;
_select(event: any): void;
_isSelected(a: any, b: any): any;
_isExpanded(item: any, path: any): any;
}
export {IronDocNavElement};
declare global {
interface HTMLElementTagNameMap {
"iron-doc-nav": IronDocNavElement;
}
} | * To modify these typings, edit the source file(s):
* iron-doc-nav.js
*/
import {Polymer} from '@polymer/polymer/lib/legacy/polymer-fn.js'; | random_line_split |
issue-2502.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license |
struct font<'a> {
fontbuf: &'a ~[u8],
}
impl<'a> font<'a> {
pub fn buf(&self) -> &'a ~[u8] {
self.fontbuf
}
}
fn font<'r>(fontbuf: &'r ~[u8]) -> font<'r> {
font {
fontbuf: fontbuf
}
}
pub fn main() { } | // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms. | random_line_split |
issue-2502.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct | <'a> {
fontbuf: &'a ~[u8],
}
impl<'a> font<'a> {
pub fn buf(&self) -> &'a ~[u8] {
self.fontbuf
}
}
fn font<'r>(fontbuf: &'r ~[u8]) -> font<'r> {
font {
fontbuf: fontbuf
}
}
pub fn main() { }
| font | identifier_name |
issue-2502.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct font<'a> {
fontbuf: &'a ~[u8],
}
impl<'a> font<'a> {
pub fn buf(&self) -> &'a ~[u8] {
self.fontbuf
}
}
fn font<'r>(fontbuf: &'r ~[u8]) -> font<'r> |
pub fn main() { }
| {
font {
fontbuf: fontbuf
}
} | identifier_body |
archive.py | # coding=utf-8
"""archive.py - Archive handling (extract/create) for Comix."""
from __future__ import absolute_import
import cStringIO
import os
import re
import sys
import tarfile
import threading
import zipfile
import gtk
try:
from py7zlib import Archive7z
except ImportError:
Archive7z = None # ignore it.
from src import mobiunpack
from src import process
from src.image import get_supported_format_extensions_preg
ZIP, RAR, TAR, GZIP, BZIP2, SEVENZIP, MOBI = range(7)
_rar_exec = None
_7z_exec = None
class Extractor(object):
"""Extractor is a threaded class for extracting different archive formats.
The Extractor can be loaded with paths to archives (currently ZIP, tar,
or RAR archives) and a path to a destination directory. Once an archive
has been set it is possible to filter out the files to be extracted and
set the order in which they should be extracted. The extraction can
then be started in a new thread in which files are extracted one by one,
and a signal is sent on a condition after each extraction, so that it is
possible for other threads to wait on specific files to be ready.
Note: Support for gzip/bzip2 compressed tar archives is limited, see
set_files() for more info.
"""
def __init__(self):
self._setupped = False
def setup(self, src, dst):
"""Setup the extractor with archive <src> and destination dir <dst>.
Return a threading.Condition related to the is_ready() method, or
None if the format of <src> isn't supported.
"""
self._src = src
self._dst = dst
self._type = archive_mime_type(src)
self._files = []
self._extracted = {}
self._stop = False
self._extract_thread = None
self._condition = threading.Condition()
if self._type == ZIP:
self._zfile = zipfile.ZipFile(src, 'r')
self._files = self._zfile.namelist()
elif self._type in (TAR, GZIP, BZIP2):
self._tfile = tarfile.open(src, 'r')
self._files = self._tfile.getnames()
elif self._type == RAR:
global _rar_exec
if _rar_exec is None:
_rar_exec = _get_rar_exec()
if _rar_exec is None:
print('! Could not find RAR file extractor.')
dialog = gtk.MessageDialog(None, 0, gtk.MESSAGE_WARNING,
gtk.BUTTONS_CLOSE,
_("Could not find RAR file extractor!"))
dialog.format_secondary_markup(_("You need either the <i>rar</i> or the"
" <i>unrar</i> program installed in order "
"to read RAR (.cbr) files."))
dialog.run()
dialog.destroy()
return None
proc = process.Process([_rar_exec, 'vb', '-p-', '--', src])
fd = proc.spawn()
self._files = [name.rstrip(os.linesep) for name in fd.readlines()]
fd.close()
proc.wait()
elif self._type == SEVENZIP:
global _7z_exec, Archive7z
if not Archive7z: # lib import failed
print(': pylzma is not installed... will try 7z tool...')
if _7z_exec is None:
_7z_exec = _get_7z_exec()
else:
try:
self._szfile = Archive7z(open(src, 'rb'), '-')
self._files = self._szfile.getnames()
except:
Archive7z = None
# pylzma can fail on new 7z
if _7z_exec is None:
_7z_exec = _get_7z_exec()
if _7z_exec is None:
print('! Could not find 7Z file extractor.')
elif not Archive7z:
proc = process.Process([_7z_exec, 'l', '-bd', '-slt', '-p-', src])
fd = proc.spawn()
self._files = self._process_7z_names(fd)
fd.close()
proc.wait()
if not _7z_exec and not Archive7z:
dialog = gtk.MessageDialog(None, 0, gtk.MESSAGE_WARNING,
gtk.BUTTONS_CLOSE,
_("Could not find 7Z file extractor!"))
dialog.format_secondary_markup(_("You need either the <i>pylzma</i> "
"or the <i>p7zip</i> program installed "
"in order to read 7Z (.cb7) files."))
dialog.run()
dialog.destroy()
return None
elif self._type == MOBI:
self._mobifile = None
try:
self._mobifile = mobiunpack.MobiFile(src)
self._files = self._mobifile.getnames()
except mobiunpack.unpackException as e:
print('! Failed to unpack MobiPocket: {}'.format(e))
return None
else:
print('! Non-supported archive format: {}'.format(src))
return None
self._setupped = True
return self._condition
def _process_7z_names(self, fd):
START = "----------"
names = []
started = False
item = {}
while True:
try:
line = fd.readline()
except:
break
if line:
line = line.rstrip(os.linesep)
try:
# For non-ascii files names
line = line.decode("utf-8")
except:
pass
if line.startswith(START):
started = True
item = {}
continue
if started:
if line == "":
if item["Attributes"].find("D") == -1:
names.append(item["Path"])
item = {}
else:
key = line.split("=")[0].strip()
value = "=".join(line.split("=")[1:]).strip()
item[key] = value
else:
break
return names
def get_files(self):
"""Return a list of names of all the files the extractor is currently
set for extracting. After a call to setup() this is by default all
files found in the archive. The paths in the list are relative to
the archive root and are not absolute for the files once extracted.
"""
return self._files[:]
def set_files(self, files, extracted=False):
"""Set the files that the extractor should extract from the archive in
the order of extraction. Normally one would get the list of all files
in the archive using get_files(), then filter and/or permute this
list before sending it back using set_files().
The second parameter, extracted allows a trick for the subarchive
managing : setting files as extracted, in order to avoid any blocking
wait on files not present in the original archive.
Note: Random access on gzip or bzip2 compressed tar archives is
no good idea. These formats are supported *only* for backwards
compability. They are fine formats for some purposes, but should
not be used for scanned comic books. So, we cheat and ignore the
ordering applied with this method on such archives.
"""
if extracted:
self._files = files
for filename in files:
self._extracted[filename] = True
return
if self._type in (GZIP, BZIP2):
self._files = [x for x in self._files if x in files]
else:
self._files = files
def is_ready(self, name):
"""Return True if the file <name> in the extractor's file list
(as set by set_files()) is fully extracted.
"""
return self._extracted.get(name, False)
def get_mime_type(self):
"""Return the mime type name of the extractor's current archive."""
return self._type
def stop(self):
"""Signal the extractor to stop extracting and kill the extracting
thread. Blocks until the extracting thread has terminated.
"""
self._stop = True
if self._setupped:
self._extract_thread.join()
self.setupped = False
def extract(self):
"""Start extracting the files in the file list one by one using a
new thread. Every time a new file is extracted a notify() will be
signalled on the Condition that was returned by setup().
"""
self._extract_thread = threading.Thread(target=self._thread_extract)
self._extract_thread.setDaemon(False)
self._extract_thread.start()
def close(self):
"""Close any open file objects, need only be called manually if the
extract() method isn't called.
"""
if self._type == ZIP:
self._zfile.close()
elif self._type in (TAR, GZIP, BZIP2):
self._tfile.close()
elif self._type == MOBI and self._mobifile is not None:
self._mobifile.close()
def _thread_extract(self):
"""Extract the files in the file list one by one."""
# Extract 7z and rar whole archive - if it SOLID - extract one file is SLOW
if self._type in (SEVENZIP,) and _7z_exec is not None:
cmd = [_7z_exec, 'x', '-bd', '-p-',
'-o' + self._dst, '-y', self._src]
proc = process.Process(cmd)
proc.spawn()
proc.wait()
self._condition.acquire()
for name in self._files:
self._extracted[name] = True
self._condition.notify()
self._condition.release()
if self._type in (RAR,) and _rar_exec is not None:
cwd = os.getcwd()
os.chdir(self._dst)
cmd = [_rar_exec, 'x', '-kb', '-p-',
'-o-', '-inul', '--', self._src]
proc = process.Process(cmd)
proc.spawn()
proc.wait()
os.chdir(cwd)
self._condition.acquire()
for name in self._files:
self._extracted[name] = True
self._condition.notify()
self._condition.release()
else:
for name in self._files:
self._extract_file(name)
self.close()
def _extract_file(self, name):
"""Extract the file named <name> to the destination directory,
mark the file as "ready", then signal a notify() on the Condition
returned by setup().
"""
if self._stop:
self.close()
sys.exit(0)
try:
if self._type in (ZIP, SEVENZIP):
dst_path = os.path.join(self._dst, name)
if not os.path.exists(os.path.dirname(dst_path)):
os.makedirs(os.path.dirname(dst_path))
new = open(dst_path, 'wb')
if self._type == ZIP:
new.write(self._zfile.read(name, '-'))
elif self._type == SEVENZIP:
if Archive7z is not None:
new.write(self._szfile.getmember(name).read())
else:
if _7z_exec is not None:
proc = process.Process([_7z_exec, 'x', '-bd', '-p-',
'-o' + self._dst, '-y', self._src, name])
proc.spawn()
proc.wait()
else:
print('! Could not find 7Z file extractor.')
new.close()
elif self._type in (TAR, GZIP, BZIP2):
if os.path.normpath(os.path.join(self._dst, name)).startswith(
self._dst):
self._tfile.extract(name, self._dst)
else:
print('! Non-local tar member: {}\n'.format(name))
elif self._type == RAR:
if _rar_exec is not None:
cwd = os.getcwd()
os.chdir(self._dst)
proc = process.Process([_rar_exec, 'x', '-kb', '-p-',
'-o-', '-inul', '--', self._src, name])
proc.spawn()
proc.wait()
os.chdir(cwd)
else:
print('! Could not find RAR file extractor.')
elif self._type == MOBI:
dst_path = os.path.join(self._dst, name)
self._mobifile.extract(name, dst_path)
except Exception:
# Better to ignore any failed extractions (e.g. from a corrupt
# archive) than to crash here and leave the main thread in a
# possible infinite block. Damaged or missing files *should* be
# handled gracefully by the main program anyway.
pass
self._condition.acquire()
self._extracted[name] = True
self._condition.notify()
self._condition.release()
def extract_file_io(self, chosen):
"""Extract the file named <name> to the destination directory,
mark the file as "ready", then signal a notify() on the Condition
returned by setup().
"""
if os.path.exists(os.path.join(self._dst, chosen)):
cStringIO.StringIO(open(os.path.join(self._dst, chosen), 'rb').read())
if self._type == ZIP:
return cStringIO.StringIO(self._zfile.read(chosen))
elif self._type in [TAR, GZIP, BZIP2]:
return cStringIO.StringIO(self._tfile.extractfile(chosen).read())
elif self._type == RAR:
proc = process.Process([_rar_exec, 'p', '-inul', '-p-', '--',
self._src, chosen])
fobj = proc.spawn()
return cStringIO.StringIO(fobj.read())
elif self._type == SEVENZIP:
if Archive7z is not None:
return cStringIO.StringIO(self._szfile.getmember(chosen).read())
elif _7z_exec is not None:
proc = process.Process([_7z_exec, 'e', '-bd', '-p-', '-so',
self._src, chosen])
fobj = proc.spawn()
return cStringIO.StringIO(fobj.read())
class Packer(object):
"""Packer is a threaded class for packing files into ZIP archives.
It would be straight-forward to add support for more archive types,
but basically all other types are less well fitted for this particular
task than ZIP archives are (yes, really).
"""
def __init__(self, image_files, other_files, archive_path, base_name):
"""Setup a Packer object to create a ZIP archive at <archive_path>.
All files pointed to by paths in the sequences <image_files> and
<other_files> will be included in the archive when packed.
The files in <image_files> will be renamed on the form
"NN - <base_name>.ext", so that the lexical ordering of their
filenames match that of their order in the list.
The files in <other_files> will be included as they are,
assuming their filenames does not clash with other filenames in
the archive. All files are placed in the archive root.
"""
self._image_files = image_files
self._other_files = other_files
self._archive_path = archive_path
self._base_name = base_name
self._pack_thread = None
self._packing_successful = False
def pack(self):
"""Pack all the files in the file lists into the archive."""
self._pack_thread = threading.Thread(target=self._thread_pack)
self._pack_thread.setDaemon(False)
self._pack_thread.start()
def wait(self):
"""Block until the packer thread has finished. Return True if the
packer finished its work successfully.
"""
if self._pack_thread is not None:
self._pack_thread.join()
return self._packing_successful
def _thread_pack(self):
try:
zfile = zipfile.ZipFile(self._archive_path, 'w')
except Exception:
print('! Could not create archive {}'.format(self._archive_path))
return
used_names = []
pattern = '{{:0{}d}} - {}{{}}'.format(len(str(len(self._image_files))), self._base_name)
for i, path in enumerate(self._image_files):
filename = pattern.format(i + 1, os.path.splitext(path)[1])
try:
zfile.write(path, filename, zipfile.ZIP_STORED)
except Exception:
print('! Could not add file {} to add to {}, aborting...'.format(path, self._archive_path))
zfile.close()
try:
os.remove(self._archive_path)
except:
pass
return
used_names.append(filename)
for path in self._other_files:
filename = os.path.basename(path)
while filename in used_names:
filename = '_{}'.format(filename)
try:
zfile.write(path, filename, zipfile.ZIP_DEFLATED)
except Exception:
print('! Could not add file {} to add to {}, aborting...'.format(path, self._archive_path))
zfile.close()
try:
os.remove(self._archive_path)
except:
pass
return
used_names.append(filename)
zfile.close()
self._packing_successful = True
def archive_mime_type(path):
"""Return the archive type of <path> or None for non-archives."""
try:
if os.path.isfile(path):
if not os.access(path, os.R_OK):
return None
if zipfile.is_zipfile(path):
return ZIP
fd = open(path, 'rb')
magic = fd.read(4)
fd.seek(60)
magic2 = fd.read(8)
fd.close()
if tarfile.is_tarfile(path) and os.path.getsize(path) > 0:
if magic.startswith('BZh'):
return BZIP2
if magic.startswith('\037\213'):
return GZIP
return TAR
if magic == 'Rar!':
return RAR
if magic == '7z\xbc\xaf':
|
if magic2 == 'BOOKMOBI':
return MOBI
except Exception:
print('! Error while reading {}'.format(path))
return None
def get_name(archive_type):
"""Return a text representation of an archive type."""
return {ZIP: _('ZIP archive'),
TAR: _('Tar archive'),
GZIP: _('Gzip compressed tar archive'),
BZIP2: _('Bzip2 compressed tar archive'),
RAR: _('RAR archive'),
SEVENZIP: _('7-Zip archive'),
MOBI: _('MobiPocket file'),
}[archive_type]
def get_archive_info(path):
"""Return a tuple (mime, num_pages, size) with info about the archive
at <path>, or None if <path> doesn't point to a supported archive.
"""
image_re = re.compile('\.(' + '|'.join(get_supported_format_extensions_preg()) + ')\s*$', re.I)
extractor = Extractor()
extractor.setup(path, None)
mime = extractor.get_mime_type()
if mime is None:
return None
files = extractor.get_files()
extractor.close()
num_pages = len(filter(image_re.search, files))
size = os.stat(path).st_size
return mime, num_pages, size
def _get_rar_exec():
"""Return the name of the RAR file extractor executable, or None if
no such executable is found.
"""
for command in ('unrar', 'rar'):
if process.Process([command]).spawn() is not None:
return command
return None
def _get_7z_exec():
"""Return the name of the RAR file extractor executable, or None if
no such executable is found.
"""
for command in ('7z', '7za', '7zr'):
if process.Process([command]).spawn() is not None:
return command
return None
| return SEVENZIP | conditional_block |
archive.py | # coding=utf-8
"""archive.py - Archive handling (extract/create) for Comix."""
from __future__ import absolute_import
import cStringIO
import os
import re
import sys
import tarfile
import threading
import zipfile
import gtk
try:
from py7zlib import Archive7z
except ImportError:
Archive7z = None # ignore it.
from src import mobiunpack
from src import process
from src.image import get_supported_format_extensions_preg
ZIP, RAR, TAR, GZIP, BZIP2, SEVENZIP, MOBI = range(7)
_rar_exec = None
_7z_exec = None
class Extractor(object):
"""Extractor is a threaded class for extracting different archive formats.
The Extractor can be loaded with paths to archives (currently ZIP, tar,
or RAR archives) and a path to a destination directory. Once an archive
has been set it is possible to filter out the files to be extracted and
set the order in which they should be extracted. The extraction can
then be started in a new thread in which files are extracted one by one,
and a signal is sent on a condition after each extraction, so that it is
possible for other threads to wait on specific files to be ready.
Note: Support for gzip/bzip2 compressed tar archives is limited, see
set_files() for more info.
"""
def __init__(self):
self._setupped = False
def setup(self, src, dst):
"""Setup the extractor with archive <src> and destination dir <dst>.
Return a threading.Condition related to the is_ready() method, or
None if the format of <src> isn't supported.
"""
self._src = src
self._dst = dst
self._type = archive_mime_type(src)
self._files = []
self._extracted = {}
self._stop = False
self._extract_thread = None
self._condition = threading.Condition()
if self._type == ZIP:
self._zfile = zipfile.ZipFile(src, 'r')
self._files = self._zfile.namelist()
elif self._type in (TAR, GZIP, BZIP2):
self._tfile = tarfile.open(src, 'r')
self._files = self._tfile.getnames()
elif self._type == RAR:
global _rar_exec
if _rar_exec is None:
_rar_exec = _get_rar_exec()
if _rar_exec is None:
print('! Could not find RAR file extractor.')
dialog = gtk.MessageDialog(None, 0, gtk.MESSAGE_WARNING,
gtk.BUTTONS_CLOSE,
_("Could not find RAR file extractor!"))
dialog.format_secondary_markup(_("You need either the <i>rar</i> or the"
" <i>unrar</i> program installed in order "
"to read RAR (.cbr) files."))
dialog.run()
dialog.destroy()
return None
proc = process.Process([_rar_exec, 'vb', '-p-', '--', src])
fd = proc.spawn()
self._files = [name.rstrip(os.linesep) for name in fd.readlines()]
fd.close()
proc.wait()
elif self._type == SEVENZIP:
global _7z_exec, Archive7z
if not Archive7z: # lib import failed
print(': pylzma is not installed... will try 7z tool...')
if _7z_exec is None:
_7z_exec = _get_7z_exec()
else:
try:
self._szfile = Archive7z(open(src, 'rb'), '-')
self._files = self._szfile.getnames()
except:
Archive7z = None
# pylzma can fail on new 7z
if _7z_exec is None:
_7z_exec = _get_7z_exec()
if _7z_exec is None:
print('! Could not find 7Z file extractor.')
elif not Archive7z:
proc = process.Process([_7z_exec, 'l', '-bd', '-slt', '-p-', src])
fd = proc.spawn()
self._files = self._process_7z_names(fd)
fd.close()
proc.wait()
if not _7z_exec and not Archive7z:
dialog = gtk.MessageDialog(None, 0, gtk.MESSAGE_WARNING,
gtk.BUTTONS_CLOSE,
_("Could not find 7Z file extractor!"))
dialog.format_secondary_markup(_("You need either the <i>pylzma</i> "
"or the <i>p7zip</i> program installed "
"in order to read 7Z (.cb7) files."))
dialog.run()
dialog.destroy()
return None
elif self._type == MOBI:
self._mobifile = None
try:
self._mobifile = mobiunpack.MobiFile(src)
self._files = self._mobifile.getnames()
except mobiunpack.unpackException as e:
print('! Failed to unpack MobiPocket: {}'.format(e))
return None
else:
print('! Non-supported archive format: {}'.format(src))
return None
self._setupped = True
return self._condition
def _process_7z_names(self, fd):
START = "----------"
names = []
started = False
item = {}
while True:
try:
line = fd.readline()
except:
break
if line:
line = line.rstrip(os.linesep)
try:
# For non-ascii files names
line = line.decode("utf-8")
except:
pass
if line.startswith(START):
started = True
item = {}
continue
if started:
if line == "":
if item["Attributes"].find("D") == -1:
names.append(item["Path"])
item = {}
else:
key = line.split("=")[0].strip()
value = "=".join(line.split("=")[1:]).strip()
item[key] = value
else:
break
return names
def get_files(self):
"""Return a list of names of all the files the extractor is currently
set for extracting. After a call to setup() this is by default all
files found in the archive. The paths in the list are relative to
the archive root and are not absolute for the files once extracted.
"""
return self._files[:]
def set_files(self, files, extracted=False):
"""Set the files that the extractor should extract from the archive in
the order of extraction. Normally one would get the list of all files
in the archive using get_files(), then filter and/or permute this
list before sending it back using set_files().
The second parameter, extracted allows a trick for the subarchive
managing : setting files as extracted, in order to avoid any blocking
wait on files not present in the original archive.
Note: Random access on gzip or bzip2 compressed tar archives is
no good idea. These formats are supported *only* for backwards
compability. They are fine formats for some purposes, but should
not be used for scanned comic books. So, we cheat and ignore the
ordering applied with this method on such archives.
"""
if extracted:
self._files = files
for filename in files:
self._extracted[filename] = True
return
if self._type in (GZIP, BZIP2):
self._files = [x for x in self._files if x in files]
else:
self._files = files
def is_ready(self, name):
"""Return True if the file <name> in the extractor's file list
(as set by set_files()) is fully extracted.
"""
return self._extracted.get(name, False)
def get_mime_type(self):
"""Return the mime type name of the extractor's current archive."""
return self._type
def stop(self):
"""Signal the extractor to stop extracting and kill the extracting
thread. Blocks until the extracting thread has terminated.
"""
self._stop = True
if self._setupped:
self._extract_thread.join()
self.setupped = False
def extract(self):
"""Start extracting the files in the file list one by one using a
new thread. Every time a new file is extracted a notify() will be
signalled on the Condition that was returned by setup().
"""
self._extract_thread = threading.Thread(target=self._thread_extract)
self._extract_thread.setDaemon(False)
self._extract_thread.start()
def close(self):
"""Close any open file objects, need only be called manually if the
extract() method isn't called.
"""
if self._type == ZIP:
self._zfile.close()
elif self._type in (TAR, GZIP, BZIP2):
self._tfile.close()
elif self._type == MOBI and self._mobifile is not None:
self._mobifile.close()
def _thread_extract(self):
"""Extract the files in the file list one by one."""
# Extract 7z and rar whole archive - if it SOLID - extract one file is SLOW
if self._type in (SEVENZIP,) and _7z_exec is not None:
cmd = [_7z_exec, 'x', '-bd', '-p-',
'-o' + self._dst, '-y', self._src]
proc = process.Process(cmd)
proc.spawn()
proc.wait()
self._condition.acquire()
for name in self._files:
self._extracted[name] = True
self._condition.notify()
self._condition.release()
if self._type in (RAR,) and _rar_exec is not None:
cwd = os.getcwd()
os.chdir(self._dst)
cmd = [_rar_exec, 'x', '-kb', '-p-',
'-o-', '-inul', '--', self._src]
proc = process.Process(cmd)
proc.spawn()
proc.wait()
os.chdir(cwd)
self._condition.acquire()
for name in self._files:
self._extracted[name] = True
self._condition.notify()
self._condition.release()
else:
for name in self._files:
self._extract_file(name)
self.close()
def _extract_file(self, name):
"""Extract the file named <name> to the destination directory,
mark the file as "ready", then signal a notify() on the Condition
returned by setup().
"""
if self._stop:
self.close()
sys.exit(0)
try:
if self._type in (ZIP, SEVENZIP):
dst_path = os.path.join(self._dst, name)
if not os.path.exists(os.path.dirname(dst_path)):
os.makedirs(os.path.dirname(dst_path))
new = open(dst_path, 'wb')
if self._type == ZIP:
new.write(self._zfile.read(name, '-'))
elif self._type == SEVENZIP:
if Archive7z is not None:
new.write(self._szfile.getmember(name).read())
else:
if _7z_exec is not None:
proc = process.Process([_7z_exec, 'x', '-bd', '-p-',
'-o' + self._dst, '-y', self._src, name])
proc.spawn()
proc.wait()
else:
print('! Could not find 7Z file extractor.')
new.close()
elif self._type in (TAR, GZIP, BZIP2):
if os.path.normpath(os.path.join(self._dst, name)).startswith(
self._dst):
self._tfile.extract(name, self._dst)
else:
print('! Non-local tar member: {}\n'.format(name))
elif self._type == RAR:
if _rar_exec is not None:
cwd = os.getcwd()
os.chdir(self._dst)
proc = process.Process([_rar_exec, 'x', '-kb', '-p-',
'-o-', '-inul', '--', self._src, name])
proc.spawn()
proc.wait()
os.chdir(cwd)
else:
print('! Could not find RAR file extractor.')
elif self._type == MOBI:
dst_path = os.path.join(self._dst, name)
self._mobifile.extract(name, dst_path)
except Exception:
# Better to ignore any failed extractions (e.g. from a corrupt
# archive) than to crash here and leave the main thread in a
# possible infinite block. Damaged or missing files *should* be
# handled gracefully by the main program anyway.
pass
self._condition.acquire()
self._extracted[name] = True
self._condition.notify()
self._condition.release()
def extract_file_io(self, chosen):
"""Extract the file named <name> to the destination directory,
mark the file as "ready", then signal a notify() on the Condition
returned by setup().
"""
if os.path.exists(os.path.join(self._dst, chosen)):
cStringIO.StringIO(open(os.path.join(self._dst, chosen), 'rb').read())
if self._type == ZIP:
return cStringIO.StringIO(self._zfile.read(chosen))
elif self._type in [TAR, GZIP, BZIP2]:
return cStringIO.StringIO(self._tfile.extractfile(chosen).read())
elif self._type == RAR:
proc = process.Process([_rar_exec, 'p', '-inul', '-p-', '--',
self._src, chosen])
fobj = proc.spawn()
return cStringIO.StringIO(fobj.read())
elif self._type == SEVENZIP:
if Archive7z is not None:
return cStringIO.StringIO(self._szfile.getmember(chosen).read())
elif _7z_exec is not None:
proc = process.Process([_7z_exec, 'e', '-bd', '-p-', '-so',
self._src, chosen])
fobj = proc.spawn()
return cStringIO.StringIO(fobj.read())
class Packer(object):
|
def archive_mime_type(path):
"""Return the archive type of <path> or None for non-archives."""
try:
if os.path.isfile(path):
if not os.access(path, os.R_OK):
return None
if zipfile.is_zipfile(path):
return ZIP
fd = open(path, 'rb')
magic = fd.read(4)
fd.seek(60)
magic2 = fd.read(8)
fd.close()
if tarfile.is_tarfile(path) and os.path.getsize(path) > 0:
if magic.startswith('BZh'):
return BZIP2
if magic.startswith('\037\213'):
return GZIP
return TAR
if magic == 'Rar!':
return RAR
if magic == '7z\xbc\xaf':
return SEVENZIP
if magic2 == 'BOOKMOBI':
return MOBI
except Exception:
print('! Error while reading {}'.format(path))
return None
def get_name(archive_type):
"""Return a text representation of an archive type."""
return {ZIP: _('ZIP archive'),
TAR: _('Tar archive'),
GZIP: _('Gzip compressed tar archive'),
BZIP2: _('Bzip2 compressed tar archive'),
RAR: _('RAR archive'),
SEVENZIP: _('7-Zip archive'),
MOBI: _('MobiPocket file'),
}[archive_type]
def get_archive_info(path):
"""Return a tuple (mime, num_pages, size) with info about the archive
at <path>, or None if <path> doesn't point to a supported archive.
"""
image_re = re.compile('\.(' + '|'.join(get_supported_format_extensions_preg()) + ')\s*$', re.I)
extractor = Extractor()
extractor.setup(path, None)
mime = extractor.get_mime_type()
if mime is None:
return None
files = extractor.get_files()
extractor.close()
num_pages = len(filter(image_re.search, files))
size = os.stat(path).st_size
return mime, num_pages, size
def _get_rar_exec():
"""Return the name of the RAR file extractor executable, or None if
no such executable is found.
"""
for command in ('unrar', 'rar'):
if process.Process([command]).spawn() is not None:
return command
return None
def _get_7z_exec():
"""Return the name of the RAR file extractor executable, or None if
no such executable is found.
"""
for command in ('7z', '7za', '7zr'):
if process.Process([command]).spawn() is not None:
return command
return None
| """Packer is a threaded class for packing files into ZIP archives.
It would be straight-forward to add support for more archive types,
but basically all other types are less well fitted for this particular
task than ZIP archives are (yes, really).
"""
def __init__(self, image_files, other_files, archive_path, base_name):
"""Setup a Packer object to create a ZIP archive at <archive_path>.
All files pointed to by paths in the sequences <image_files> and
<other_files> will be included in the archive when packed.
The files in <image_files> will be renamed on the form
"NN - <base_name>.ext", so that the lexical ordering of their
filenames match that of their order in the list.
The files in <other_files> will be included as they are,
assuming their filenames does not clash with other filenames in
the archive. All files are placed in the archive root.
"""
self._image_files = image_files
self._other_files = other_files
self._archive_path = archive_path
self._base_name = base_name
self._pack_thread = None
self._packing_successful = False
def pack(self):
"""Pack all the files in the file lists into the archive."""
self._pack_thread = threading.Thread(target=self._thread_pack)
self._pack_thread.setDaemon(False)
self._pack_thread.start()
def wait(self):
"""Block until the packer thread has finished. Return True if the
packer finished its work successfully.
"""
if self._pack_thread is not None:
self._pack_thread.join()
return self._packing_successful
def _thread_pack(self):
try:
zfile = zipfile.ZipFile(self._archive_path, 'w')
except Exception:
print('! Could not create archive {}'.format(self._archive_path))
return
used_names = []
pattern = '{{:0{}d}} - {}{{}}'.format(len(str(len(self._image_files))), self._base_name)
for i, path in enumerate(self._image_files):
filename = pattern.format(i + 1, os.path.splitext(path)[1])
try:
zfile.write(path, filename, zipfile.ZIP_STORED)
except Exception:
print('! Could not add file {} to add to {}, aborting...'.format(path, self._archive_path))
zfile.close()
try:
os.remove(self._archive_path)
except:
pass
return
used_names.append(filename)
for path in self._other_files:
filename = os.path.basename(path)
while filename in used_names:
filename = '_{}'.format(filename)
try:
zfile.write(path, filename, zipfile.ZIP_DEFLATED)
except Exception:
print('! Could not add file {} to add to {}, aborting...'.format(path, self._archive_path))
zfile.close()
try:
os.remove(self._archive_path)
except:
pass
return
used_names.append(filename)
zfile.close()
self._packing_successful = True | identifier_body |
archive.py | # coding=utf-8
"""archive.py - Archive handling (extract/create) for Comix."""
from __future__ import absolute_import
import cStringIO
import os
import re
import sys
import tarfile
import threading
import zipfile | except ImportError:
Archive7z = None # ignore it.
from src import mobiunpack
from src import process
from src.image import get_supported_format_extensions_preg
ZIP, RAR, TAR, GZIP, BZIP2, SEVENZIP, MOBI = range(7)
_rar_exec = None
_7z_exec = None
class Extractor(object):
"""Extractor is a threaded class for extracting different archive formats.
The Extractor can be loaded with paths to archives (currently ZIP, tar,
or RAR archives) and a path to a destination directory. Once an archive
has been set it is possible to filter out the files to be extracted and
set the order in which they should be extracted. The extraction can
then be started in a new thread in which files are extracted one by one,
and a signal is sent on a condition after each extraction, so that it is
possible for other threads to wait on specific files to be ready.
Note: Support for gzip/bzip2 compressed tar archives is limited, see
set_files() for more info.
"""
def __init__(self):
self._setupped = False
def setup(self, src, dst):
"""Setup the extractor with archive <src> and destination dir <dst>.
Return a threading.Condition related to the is_ready() method, or
None if the format of <src> isn't supported.
"""
self._src = src
self._dst = dst
self._type = archive_mime_type(src)
self._files = []
self._extracted = {}
self._stop = False
self._extract_thread = None
self._condition = threading.Condition()
if self._type == ZIP:
self._zfile = zipfile.ZipFile(src, 'r')
self._files = self._zfile.namelist()
elif self._type in (TAR, GZIP, BZIP2):
self._tfile = tarfile.open(src, 'r')
self._files = self._tfile.getnames()
elif self._type == RAR:
global _rar_exec
if _rar_exec is None:
_rar_exec = _get_rar_exec()
if _rar_exec is None:
print('! Could not find RAR file extractor.')
dialog = gtk.MessageDialog(None, 0, gtk.MESSAGE_WARNING,
gtk.BUTTONS_CLOSE,
_("Could not find RAR file extractor!"))
dialog.format_secondary_markup(_("You need either the <i>rar</i> or the"
" <i>unrar</i> program installed in order "
"to read RAR (.cbr) files."))
dialog.run()
dialog.destroy()
return None
proc = process.Process([_rar_exec, 'vb', '-p-', '--', src])
fd = proc.spawn()
self._files = [name.rstrip(os.linesep) for name in fd.readlines()]
fd.close()
proc.wait()
elif self._type == SEVENZIP:
global _7z_exec, Archive7z
if not Archive7z: # lib import failed
print(': pylzma is not installed... will try 7z tool...')
if _7z_exec is None:
_7z_exec = _get_7z_exec()
else:
try:
self._szfile = Archive7z(open(src, 'rb'), '-')
self._files = self._szfile.getnames()
except:
Archive7z = None
# pylzma can fail on new 7z
if _7z_exec is None:
_7z_exec = _get_7z_exec()
if _7z_exec is None:
print('! Could not find 7Z file extractor.')
elif not Archive7z:
proc = process.Process([_7z_exec, 'l', '-bd', '-slt', '-p-', src])
fd = proc.spawn()
self._files = self._process_7z_names(fd)
fd.close()
proc.wait()
if not _7z_exec and not Archive7z:
dialog = gtk.MessageDialog(None, 0, gtk.MESSAGE_WARNING,
gtk.BUTTONS_CLOSE,
_("Could not find 7Z file extractor!"))
dialog.format_secondary_markup(_("You need either the <i>pylzma</i> "
"or the <i>p7zip</i> program installed "
"in order to read 7Z (.cb7) files."))
dialog.run()
dialog.destroy()
return None
elif self._type == MOBI:
self._mobifile = None
try:
self._mobifile = mobiunpack.MobiFile(src)
self._files = self._mobifile.getnames()
except mobiunpack.unpackException as e:
print('! Failed to unpack MobiPocket: {}'.format(e))
return None
else:
print('! Non-supported archive format: {}'.format(src))
return None
self._setupped = True
return self._condition
def _process_7z_names(self, fd):
START = "----------"
names = []
started = False
item = {}
while True:
try:
line = fd.readline()
except:
break
if line:
line = line.rstrip(os.linesep)
try:
# For non-ascii files names
line = line.decode("utf-8")
except:
pass
if line.startswith(START):
started = True
item = {}
continue
if started:
if line == "":
if item["Attributes"].find("D") == -1:
names.append(item["Path"])
item = {}
else:
key = line.split("=")[0].strip()
value = "=".join(line.split("=")[1:]).strip()
item[key] = value
else:
break
return names
def get_files(self):
"""Return a list of names of all the files the extractor is currently
set for extracting. After a call to setup() this is by default all
files found in the archive. The paths in the list are relative to
the archive root and are not absolute for the files once extracted.
"""
return self._files[:]
def set_files(self, files, extracted=False):
"""Set the files that the extractor should extract from the archive in
the order of extraction. Normally one would get the list of all files
in the archive using get_files(), then filter and/or permute this
list before sending it back using set_files().
The second parameter, extracted allows a trick for the subarchive
managing : setting files as extracted, in order to avoid any blocking
wait on files not present in the original archive.
Note: Random access on gzip or bzip2 compressed tar archives is
no good idea. These formats are supported *only* for backwards
compability. They are fine formats for some purposes, but should
not be used for scanned comic books. So, we cheat and ignore the
ordering applied with this method on such archives.
"""
if extracted:
self._files = files
for filename in files:
self._extracted[filename] = True
return
if self._type in (GZIP, BZIP2):
self._files = [x for x in self._files if x in files]
else:
self._files = files
def is_ready(self, name):
"""Return True if the file <name> in the extractor's file list
(as set by set_files()) is fully extracted.
"""
return self._extracted.get(name, False)
def get_mime_type(self):
"""Return the mime type name of the extractor's current archive."""
return self._type
def stop(self):
"""Signal the extractor to stop extracting and kill the extracting
thread. Blocks until the extracting thread has terminated.
"""
self._stop = True
if self._setupped:
self._extract_thread.join()
self.setupped = False
def extract(self):
"""Start extracting the files in the file list one by one using a
new thread. Every time a new file is extracted a notify() will be
signalled on the Condition that was returned by setup().
"""
self._extract_thread = threading.Thread(target=self._thread_extract)
self._extract_thread.setDaemon(False)
self._extract_thread.start()
def close(self):
"""Close any open file objects, need only be called manually if the
extract() method isn't called.
"""
if self._type == ZIP:
self._zfile.close()
elif self._type in (TAR, GZIP, BZIP2):
self._tfile.close()
elif self._type == MOBI and self._mobifile is not None:
self._mobifile.close()
def _thread_extract(self):
"""Extract the files in the file list one by one."""
# Extract 7z and rar whole archive - if it SOLID - extract one file is SLOW
if self._type in (SEVENZIP,) and _7z_exec is not None:
cmd = [_7z_exec, 'x', '-bd', '-p-',
'-o' + self._dst, '-y', self._src]
proc = process.Process(cmd)
proc.spawn()
proc.wait()
self._condition.acquire()
for name in self._files:
self._extracted[name] = True
self._condition.notify()
self._condition.release()
if self._type in (RAR,) and _rar_exec is not None:
cwd = os.getcwd()
os.chdir(self._dst)
cmd = [_rar_exec, 'x', '-kb', '-p-',
'-o-', '-inul', '--', self._src]
proc = process.Process(cmd)
proc.spawn()
proc.wait()
os.chdir(cwd)
self._condition.acquire()
for name in self._files:
self._extracted[name] = True
self._condition.notify()
self._condition.release()
else:
for name in self._files:
self._extract_file(name)
self.close()
def _extract_file(self, name):
"""Extract the file named <name> to the destination directory,
mark the file as "ready", then signal a notify() on the Condition
returned by setup().
"""
if self._stop:
self.close()
sys.exit(0)
try:
if self._type in (ZIP, SEVENZIP):
dst_path = os.path.join(self._dst, name)
if not os.path.exists(os.path.dirname(dst_path)):
os.makedirs(os.path.dirname(dst_path))
new = open(dst_path, 'wb')
if self._type == ZIP:
new.write(self._zfile.read(name, '-'))
elif self._type == SEVENZIP:
if Archive7z is not None:
new.write(self._szfile.getmember(name).read())
else:
if _7z_exec is not None:
proc = process.Process([_7z_exec, 'x', '-bd', '-p-',
'-o' + self._dst, '-y', self._src, name])
proc.spawn()
proc.wait()
else:
print('! Could not find 7Z file extractor.')
new.close()
elif self._type in (TAR, GZIP, BZIP2):
if os.path.normpath(os.path.join(self._dst, name)).startswith(
self._dst):
self._tfile.extract(name, self._dst)
else:
print('! Non-local tar member: {}\n'.format(name))
elif self._type == RAR:
if _rar_exec is not None:
cwd = os.getcwd()
os.chdir(self._dst)
proc = process.Process([_rar_exec, 'x', '-kb', '-p-',
'-o-', '-inul', '--', self._src, name])
proc.spawn()
proc.wait()
os.chdir(cwd)
else:
print('! Could not find RAR file extractor.')
elif self._type == MOBI:
dst_path = os.path.join(self._dst, name)
self._mobifile.extract(name, dst_path)
except Exception:
# Better to ignore any failed extractions (e.g. from a corrupt
# archive) than to crash here and leave the main thread in a
# possible infinite block. Damaged or missing files *should* be
# handled gracefully by the main program anyway.
pass
self._condition.acquire()
self._extracted[name] = True
self._condition.notify()
self._condition.release()
def extract_file_io(self, chosen):
"""Extract the file named <name> to the destination directory,
mark the file as "ready", then signal a notify() on the Condition
returned by setup().
"""
if os.path.exists(os.path.join(self._dst, chosen)):
cStringIO.StringIO(open(os.path.join(self._dst, chosen), 'rb').read())
if self._type == ZIP:
return cStringIO.StringIO(self._zfile.read(chosen))
elif self._type in [TAR, GZIP, BZIP2]:
return cStringIO.StringIO(self._tfile.extractfile(chosen).read())
elif self._type == RAR:
proc = process.Process([_rar_exec, 'p', '-inul', '-p-', '--',
self._src, chosen])
fobj = proc.spawn()
return cStringIO.StringIO(fobj.read())
elif self._type == SEVENZIP:
if Archive7z is not None:
return cStringIO.StringIO(self._szfile.getmember(chosen).read())
elif _7z_exec is not None:
proc = process.Process([_7z_exec, 'e', '-bd', '-p-', '-so',
self._src, chosen])
fobj = proc.spawn()
return cStringIO.StringIO(fobj.read())
class Packer(object):
"""Packer is a threaded class for packing files into ZIP archives.
It would be straight-forward to add support for more archive types,
but basically all other types are less well fitted for this particular
task than ZIP archives are (yes, really).
"""
def __init__(self, image_files, other_files, archive_path, base_name):
"""Setup a Packer object to create a ZIP archive at <archive_path>.
All files pointed to by paths in the sequences <image_files> and
<other_files> will be included in the archive when packed.
The files in <image_files> will be renamed on the form
"NN - <base_name>.ext", so that the lexical ordering of their
filenames match that of their order in the list.
The files in <other_files> will be included as they are,
assuming their filenames does not clash with other filenames in
the archive. All files are placed in the archive root.
"""
self._image_files = image_files
self._other_files = other_files
self._archive_path = archive_path
self._base_name = base_name
self._pack_thread = None
self._packing_successful = False
def pack(self):
"""Pack all the files in the file lists into the archive."""
self._pack_thread = threading.Thread(target=self._thread_pack)
self._pack_thread.setDaemon(False)
self._pack_thread.start()
def wait(self):
"""Block until the packer thread has finished. Return True if the
packer finished its work successfully.
"""
if self._pack_thread is not None:
self._pack_thread.join()
return self._packing_successful
def _thread_pack(self):
try:
zfile = zipfile.ZipFile(self._archive_path, 'w')
except Exception:
print('! Could not create archive {}'.format(self._archive_path))
return
used_names = []
pattern = '{{:0{}d}} - {}{{}}'.format(len(str(len(self._image_files))), self._base_name)
for i, path in enumerate(self._image_files):
filename = pattern.format(i + 1, os.path.splitext(path)[1])
try:
zfile.write(path, filename, zipfile.ZIP_STORED)
except Exception:
print('! Could not add file {} to add to {}, aborting...'.format(path, self._archive_path))
zfile.close()
try:
os.remove(self._archive_path)
except:
pass
return
used_names.append(filename)
for path in self._other_files:
filename = os.path.basename(path)
while filename in used_names:
filename = '_{}'.format(filename)
try:
zfile.write(path, filename, zipfile.ZIP_DEFLATED)
except Exception:
print('! Could not add file {} to add to {}, aborting...'.format(path, self._archive_path))
zfile.close()
try:
os.remove(self._archive_path)
except:
pass
return
used_names.append(filename)
zfile.close()
self._packing_successful = True
def archive_mime_type(path):
"""Return the archive type of <path> or None for non-archives."""
try:
if os.path.isfile(path):
if not os.access(path, os.R_OK):
return None
if zipfile.is_zipfile(path):
return ZIP
fd = open(path, 'rb')
magic = fd.read(4)
fd.seek(60)
magic2 = fd.read(8)
fd.close()
if tarfile.is_tarfile(path) and os.path.getsize(path) > 0:
if magic.startswith('BZh'):
return BZIP2
if magic.startswith('\037\213'):
return GZIP
return TAR
if magic == 'Rar!':
return RAR
if magic == '7z\xbc\xaf':
return SEVENZIP
if magic2 == 'BOOKMOBI':
return MOBI
except Exception:
print('! Error while reading {}'.format(path))
return None
def get_name(archive_type):
"""Return a text representation of an archive type."""
return {ZIP: _('ZIP archive'),
TAR: _('Tar archive'),
GZIP: _('Gzip compressed tar archive'),
BZIP2: _('Bzip2 compressed tar archive'),
RAR: _('RAR archive'),
SEVENZIP: _('7-Zip archive'),
MOBI: _('MobiPocket file'),
}[archive_type]
def get_archive_info(path):
"""Return a tuple (mime, num_pages, size) with info about the archive
at <path>, or None if <path> doesn't point to a supported archive.
"""
image_re = re.compile('\.(' + '|'.join(get_supported_format_extensions_preg()) + ')\s*$', re.I)
extractor = Extractor()
extractor.setup(path, None)
mime = extractor.get_mime_type()
if mime is None:
return None
files = extractor.get_files()
extractor.close()
num_pages = len(filter(image_re.search, files))
size = os.stat(path).st_size
return mime, num_pages, size
def _get_rar_exec():
"""Return the name of the RAR file extractor executable, or None if
no such executable is found.
"""
for command in ('unrar', 'rar'):
if process.Process([command]).spawn() is not None:
return command
return None
def _get_7z_exec():
"""Return the name of the RAR file extractor executable, or None if
no such executable is found.
"""
for command in ('7z', '7za', '7zr'):
if process.Process([command]).spawn() is not None:
return command
return None |
import gtk
try:
from py7zlib import Archive7z | random_line_split |
archive.py | # coding=utf-8
"""archive.py - Archive handling (extract/create) for Comix."""
from __future__ import absolute_import
import cStringIO
import os
import re
import sys
import tarfile
import threading
import zipfile
import gtk
try:
from py7zlib import Archive7z
except ImportError:
Archive7z = None # ignore it.
from src import mobiunpack
from src import process
from src.image import get_supported_format_extensions_preg
ZIP, RAR, TAR, GZIP, BZIP2, SEVENZIP, MOBI = range(7)
_rar_exec = None
_7z_exec = None
class Extractor(object):
"""Extractor is a threaded class for extracting different archive formats.
The Extractor can be loaded with paths to archives (currently ZIP, tar,
or RAR archives) and a path to a destination directory. Once an archive
has been set it is possible to filter out the files to be extracted and
set the order in which they should be extracted. The extraction can
then be started in a new thread in which files are extracted one by one,
and a signal is sent on a condition after each extraction, so that it is
possible for other threads to wait on specific files to be ready.
Note: Support for gzip/bzip2 compressed tar archives is limited, see
set_files() for more info.
"""
def __init__(self):
self._setupped = False
def setup(self, src, dst):
"""Setup the extractor with archive <src> and destination dir <dst>.
Return a threading.Condition related to the is_ready() method, or
None if the format of <src> isn't supported.
"""
self._src = src
self._dst = dst
self._type = archive_mime_type(src)
self._files = []
self._extracted = {}
self._stop = False
self._extract_thread = None
self._condition = threading.Condition()
if self._type == ZIP:
self._zfile = zipfile.ZipFile(src, 'r')
self._files = self._zfile.namelist()
elif self._type in (TAR, GZIP, BZIP2):
self._tfile = tarfile.open(src, 'r')
self._files = self._tfile.getnames()
elif self._type == RAR:
global _rar_exec
if _rar_exec is None:
_rar_exec = _get_rar_exec()
if _rar_exec is None:
print('! Could not find RAR file extractor.')
dialog = gtk.MessageDialog(None, 0, gtk.MESSAGE_WARNING,
gtk.BUTTONS_CLOSE,
_("Could not find RAR file extractor!"))
dialog.format_secondary_markup(_("You need either the <i>rar</i> or the"
" <i>unrar</i> program installed in order "
"to read RAR (.cbr) files."))
dialog.run()
dialog.destroy()
return None
proc = process.Process([_rar_exec, 'vb', '-p-', '--', src])
fd = proc.spawn()
self._files = [name.rstrip(os.linesep) for name in fd.readlines()]
fd.close()
proc.wait()
elif self._type == SEVENZIP:
global _7z_exec, Archive7z
if not Archive7z: # lib import failed
print(': pylzma is not installed... will try 7z tool...')
if _7z_exec is None:
_7z_exec = _get_7z_exec()
else:
try:
self._szfile = Archive7z(open(src, 'rb'), '-')
self._files = self._szfile.getnames()
except:
Archive7z = None
# pylzma can fail on new 7z
if _7z_exec is None:
_7z_exec = _get_7z_exec()
if _7z_exec is None:
print('! Could not find 7Z file extractor.')
elif not Archive7z:
proc = process.Process([_7z_exec, 'l', '-bd', '-slt', '-p-', src])
fd = proc.spawn()
self._files = self._process_7z_names(fd)
fd.close()
proc.wait()
if not _7z_exec and not Archive7z:
dialog = gtk.MessageDialog(None, 0, gtk.MESSAGE_WARNING,
gtk.BUTTONS_CLOSE,
_("Could not find 7Z file extractor!"))
dialog.format_secondary_markup(_("You need either the <i>pylzma</i> "
"or the <i>p7zip</i> program installed "
"in order to read 7Z (.cb7) files."))
dialog.run()
dialog.destroy()
return None
elif self._type == MOBI:
self._mobifile = None
try:
self._mobifile = mobiunpack.MobiFile(src)
self._files = self._mobifile.getnames()
except mobiunpack.unpackException as e:
print('! Failed to unpack MobiPocket: {}'.format(e))
return None
else:
print('! Non-supported archive format: {}'.format(src))
return None
self._setupped = True
return self._condition
def _process_7z_names(self, fd):
START = "----------"
names = []
started = False
item = {}
while True:
try:
line = fd.readline()
except:
break
if line:
line = line.rstrip(os.linesep)
try:
# For non-ascii files names
line = line.decode("utf-8")
except:
pass
if line.startswith(START):
started = True
item = {}
continue
if started:
if line == "":
if item["Attributes"].find("D") == -1:
names.append(item["Path"])
item = {}
else:
key = line.split("=")[0].strip()
value = "=".join(line.split("=")[1:]).strip()
item[key] = value
else:
break
return names
def get_files(self):
"""Return a list of names of all the files the extractor is currently
set for extracting. After a call to setup() this is by default all
files found in the archive. The paths in the list are relative to
the archive root and are not absolute for the files once extracted.
"""
return self._files[:]
def set_files(self, files, extracted=False):
"""Set the files that the extractor should extract from the archive in
the order of extraction. Normally one would get the list of all files
in the archive using get_files(), then filter and/or permute this
list before sending it back using set_files().
The second parameter, extracted allows a trick for the subarchive
managing : setting files as extracted, in order to avoid any blocking
wait on files not present in the original archive.
Note: Random access on gzip or bzip2 compressed tar archives is
no good idea. These formats are supported *only* for backwards
compability. They are fine formats for some purposes, but should
not be used for scanned comic books. So, we cheat and ignore the
ordering applied with this method on such archives.
"""
if extracted:
self._files = files
for filename in files:
self._extracted[filename] = True
return
if self._type in (GZIP, BZIP2):
self._files = [x for x in self._files if x in files]
else:
self._files = files
def is_ready(self, name):
"""Return True if the file <name> in the extractor's file list
(as set by set_files()) is fully extracted.
"""
return self._extracted.get(name, False)
def get_mime_type(self):
"""Return the mime type name of the extractor's current archive."""
return self._type
def stop(self):
"""Signal the extractor to stop extracting and kill the extracting
thread. Blocks until the extracting thread has terminated.
"""
self._stop = True
if self._setupped:
self._extract_thread.join()
self.setupped = False
def extract(self):
"""Start extracting the files in the file list one by one using a
new thread. Every time a new file is extracted a notify() will be
signalled on the Condition that was returned by setup().
"""
self._extract_thread = threading.Thread(target=self._thread_extract)
self._extract_thread.setDaemon(False)
self._extract_thread.start()
def close(self):
"""Close any open file objects, need only be called manually if the
extract() method isn't called.
"""
if self._type == ZIP:
self._zfile.close()
elif self._type in (TAR, GZIP, BZIP2):
self._tfile.close()
elif self._type == MOBI and self._mobifile is not None:
self._mobifile.close()
def _thread_extract(self):
"""Extract the files in the file list one by one."""
# Extract 7z and rar whole archive - if it SOLID - extract one file is SLOW
if self._type in (SEVENZIP,) and _7z_exec is not None:
cmd = [_7z_exec, 'x', '-bd', '-p-',
'-o' + self._dst, '-y', self._src]
proc = process.Process(cmd)
proc.spawn()
proc.wait()
self._condition.acquire()
for name in self._files:
self._extracted[name] = True
self._condition.notify()
self._condition.release()
if self._type in (RAR,) and _rar_exec is not None:
cwd = os.getcwd()
os.chdir(self._dst)
cmd = [_rar_exec, 'x', '-kb', '-p-',
'-o-', '-inul', '--', self._src]
proc = process.Process(cmd)
proc.spawn()
proc.wait()
os.chdir(cwd)
self._condition.acquire()
for name in self._files:
self._extracted[name] = True
self._condition.notify()
self._condition.release()
else:
for name in self._files:
self._extract_file(name)
self.close()
def _extract_file(self, name):
"""Extract the file named <name> to the destination directory,
mark the file as "ready", then signal a notify() on the Condition
returned by setup().
"""
if self._stop:
self.close()
sys.exit(0)
try:
if self._type in (ZIP, SEVENZIP):
dst_path = os.path.join(self._dst, name)
if not os.path.exists(os.path.dirname(dst_path)):
os.makedirs(os.path.dirname(dst_path))
new = open(dst_path, 'wb')
if self._type == ZIP:
new.write(self._zfile.read(name, '-'))
elif self._type == SEVENZIP:
if Archive7z is not None:
new.write(self._szfile.getmember(name).read())
else:
if _7z_exec is not None:
proc = process.Process([_7z_exec, 'x', '-bd', '-p-',
'-o' + self._dst, '-y', self._src, name])
proc.spawn()
proc.wait()
else:
print('! Could not find 7Z file extractor.')
new.close()
elif self._type in (TAR, GZIP, BZIP2):
if os.path.normpath(os.path.join(self._dst, name)).startswith(
self._dst):
self._tfile.extract(name, self._dst)
else:
print('! Non-local tar member: {}\n'.format(name))
elif self._type == RAR:
if _rar_exec is not None:
cwd = os.getcwd()
os.chdir(self._dst)
proc = process.Process([_rar_exec, 'x', '-kb', '-p-',
'-o-', '-inul', '--', self._src, name])
proc.spawn()
proc.wait()
os.chdir(cwd)
else:
print('! Could not find RAR file extractor.')
elif self._type == MOBI:
dst_path = os.path.join(self._dst, name)
self._mobifile.extract(name, dst_path)
except Exception:
# Better to ignore any failed extractions (e.g. from a corrupt
# archive) than to crash here and leave the main thread in a
# possible infinite block. Damaged or missing files *should* be
# handled gracefully by the main program anyway.
pass
self._condition.acquire()
self._extracted[name] = True
self._condition.notify()
self._condition.release()
def extract_file_io(self, chosen):
"""Extract the file named <name> to the destination directory,
mark the file as "ready", then signal a notify() on the Condition
returned by setup().
"""
if os.path.exists(os.path.join(self._dst, chosen)):
cStringIO.StringIO(open(os.path.join(self._dst, chosen), 'rb').read())
if self._type == ZIP:
return cStringIO.StringIO(self._zfile.read(chosen))
elif self._type in [TAR, GZIP, BZIP2]:
return cStringIO.StringIO(self._tfile.extractfile(chosen).read())
elif self._type == RAR:
proc = process.Process([_rar_exec, 'p', '-inul', '-p-', '--',
self._src, chosen])
fobj = proc.spawn()
return cStringIO.StringIO(fobj.read())
elif self._type == SEVENZIP:
if Archive7z is not None:
return cStringIO.StringIO(self._szfile.getmember(chosen).read())
elif _7z_exec is not None:
proc = process.Process([_7z_exec, 'e', '-bd', '-p-', '-so',
self._src, chosen])
fobj = proc.spawn()
return cStringIO.StringIO(fobj.read())
class Packer(object):
"""Packer is a threaded class for packing files into ZIP archives.
It would be straight-forward to add support for more archive types,
but basically all other types are less well fitted for this particular
task than ZIP archives are (yes, really).
"""
def __init__(self, image_files, other_files, archive_path, base_name):
"""Setup a Packer object to create a ZIP archive at <archive_path>.
All files pointed to by paths in the sequences <image_files> and
<other_files> will be included in the archive when packed.
The files in <image_files> will be renamed on the form
"NN - <base_name>.ext", so that the lexical ordering of their
filenames match that of their order in the list.
The files in <other_files> will be included as they are,
assuming their filenames does not clash with other filenames in
the archive. All files are placed in the archive root.
"""
self._image_files = image_files
self._other_files = other_files
self._archive_path = archive_path
self._base_name = base_name
self._pack_thread = None
self._packing_successful = False
def pack(self):
"""Pack all the files in the file lists into the archive."""
self._pack_thread = threading.Thread(target=self._thread_pack)
self._pack_thread.setDaemon(False)
self._pack_thread.start()
def wait(self):
"""Block until the packer thread has finished. Return True if the
packer finished its work successfully.
"""
if self._pack_thread is not None:
self._pack_thread.join()
return self._packing_successful
def _thread_pack(self):
try:
zfile = zipfile.ZipFile(self._archive_path, 'w')
except Exception:
print('! Could not create archive {}'.format(self._archive_path))
return
used_names = []
pattern = '{{:0{}d}} - {}{{}}'.format(len(str(len(self._image_files))), self._base_name)
for i, path in enumerate(self._image_files):
filename = pattern.format(i + 1, os.path.splitext(path)[1])
try:
zfile.write(path, filename, zipfile.ZIP_STORED)
except Exception:
print('! Could not add file {} to add to {}, aborting...'.format(path, self._archive_path))
zfile.close()
try:
os.remove(self._archive_path)
except:
pass
return
used_names.append(filename)
for path in self._other_files:
filename = os.path.basename(path)
while filename in used_names:
filename = '_{}'.format(filename)
try:
zfile.write(path, filename, zipfile.ZIP_DEFLATED)
except Exception:
print('! Could not add file {} to add to {}, aborting...'.format(path, self._archive_path))
zfile.close()
try:
os.remove(self._archive_path)
except:
pass
return
used_names.append(filename)
zfile.close()
self._packing_successful = True
def archive_mime_type(path):
"""Return the archive type of <path> or None for non-archives."""
try:
if os.path.isfile(path):
if not os.access(path, os.R_OK):
return None
if zipfile.is_zipfile(path):
return ZIP
fd = open(path, 'rb')
magic = fd.read(4)
fd.seek(60)
magic2 = fd.read(8)
fd.close()
if tarfile.is_tarfile(path) and os.path.getsize(path) > 0:
if magic.startswith('BZh'):
return BZIP2
if magic.startswith('\037\213'):
return GZIP
return TAR
if magic == 'Rar!':
return RAR
if magic == '7z\xbc\xaf':
return SEVENZIP
if magic2 == 'BOOKMOBI':
return MOBI
except Exception:
print('! Error while reading {}'.format(path))
return None
def get_name(archive_type):
"""Return a text representation of an archive type."""
return {ZIP: _('ZIP archive'),
TAR: _('Tar archive'),
GZIP: _('Gzip compressed tar archive'),
BZIP2: _('Bzip2 compressed tar archive'),
RAR: _('RAR archive'),
SEVENZIP: _('7-Zip archive'),
MOBI: _('MobiPocket file'),
}[archive_type]
def get_archive_info(path):
"""Return a tuple (mime, num_pages, size) with info about the archive
at <path>, or None if <path> doesn't point to a supported archive.
"""
image_re = re.compile('\.(' + '|'.join(get_supported_format_extensions_preg()) + ')\s*$', re.I)
extractor = Extractor()
extractor.setup(path, None)
mime = extractor.get_mime_type()
if mime is None:
return None
files = extractor.get_files()
extractor.close()
num_pages = len(filter(image_re.search, files))
size = os.stat(path).st_size
return mime, num_pages, size
def | ():
"""Return the name of the RAR file extractor executable, or None if
no such executable is found.
"""
for command in ('unrar', 'rar'):
if process.Process([command]).spawn() is not None:
return command
return None
def _get_7z_exec():
"""Return the name of the RAR file extractor executable, or None if
no such executable is found.
"""
for command in ('7z', '7za', '7zr'):
if process.Process([command]).spawn() is not None:
return command
return None
| _get_rar_exec | identifier_name |
winprocess.py | """
Windows Process Control
winprocess.run launches a child process and returns the exit code.
Optionally, it can:
redirect stdin, stdout & stderr to files
run the command as another user
limit the process's running time
control the process window (location, size, window state, desktop)
Works on Windows NT, 2000 & XP. Requires Mark Hammond's win32
extensions.
This code is free for any purpose, with no warranty of any kind.
-- John B. Dell'Aquila <[email protected]>
"""
import win32api, win32process, win32security
import win32event, win32con, msvcrt, win32gui
def logonUser(loginString):
"""
Login as specified user and return handle.
loginString: 'Domain\nUser\nPassword'; for local
login use . or empty string as domain
e.g. '.\nadministrator\nsecret_password'
"""
domain, user, passwd = loginString.split('\n')
return win32security.LogonUser(
user,
domain,
passwd,
win32con.LOGON32_LOGON_INTERACTIVE,
win32con.LOGON32_PROVIDER_DEFAULT
)
class Process:
"""
A Windows process.
"""
def __init__(self, cmd, login=None,
hStdin=None, hStdout=None, hStderr=None,
show=1, xy=None, xySize=None,
desktop=None):
"""
Create a Windows process.
cmd: command to run
login: run as user 'Domain\nUser\nPassword'
hStdin, hStdout, hStderr:
handles for process I/O; default is caller's stdin,
stdout & stderr
show: wShowWindow (0=SW_HIDE, 1=SW_NORMAL, ...)
xy: window offset (x, y) of upper left corner in pixels
xySize: window size (width, height) in pixels
desktop: lpDesktop - name of desktop e.g. 'winsta0\\default'
None = inherit current desktop
'' = create new desktop if necessary
User calling login requires additional privileges:
Act as part of the operating system [not needed on Windows XP]
Increase quotas
Replace a process level token
Login string must EITHER be an administrator's account
(ordinary user can't access current desktop - see Microsoft
Q165194) OR use desktop='' to run another desktop invisibly
(may be very slow to startup & finalize).
"""
si = win32process.STARTUPINFO()
si.dwFlags = (win32con.STARTF_USESTDHANDLES ^
win32con.STARTF_USESHOWWINDOW)
if hStdin is None:
si.hStdInput = win32api.GetStdHandle(win32api.STD_INPUT_HANDLE)
else:
si.hStdInput = hStdin
if hStdout is None:
si.hStdOutput = win32api.GetStdHandle(win32api.STD_OUTPUT_HANDLE)
else:
si.hStdOutput = hStdout
if hStderr is None:
si.hStdError = win32api.GetStdHandle(win32api.STD_ERROR_HANDLE)
else:
si.hStdError = hStderr
si.wShowWindow = show
if xy is not None:
si.dwX, si.dwY = xy
si.dwFlags ^= win32con.STARTF_USEPOSITION
if xySize is not None:
si.dwXSize, si.dwYSize = xySize
si.dwFlags ^= win32con.STARTF_USESIZE
if desktop is not None:
si.lpDesktop = desktop
procArgs = (None, # appName
cmd, # commandLine
None, # processAttributes
None, # threadAttributes
1, # bInheritHandles
win32process.CREATE_NEW_CONSOLE, # dwCreationFlags
None, # newEnvironment
None, # currentDirectory
si) # startupinfo
if login is not None:
hUser = logonUser(login)
win32security.ImpersonateLoggedOnUser(hUser)
procHandles = win32process.CreateProcessAsUser(hUser, *procArgs)
win32security.RevertToSelf()
else:
procHandles = win32process.CreateProcess(*procArgs)
self.hProcess, self.hThread, self.PId, self.TId = procHandles
def | (self, mSec=None):
"""
Wait for process to finish or for specified number of
milliseconds to elapse.
"""
if mSec is None:
mSec = win32event.INFINITE
return win32event.WaitForSingleObject(self.hProcess, mSec)
def kill(self, gracePeriod=5000):
"""
Kill process. Try for an orderly shutdown via WM_CLOSE. If
still running after gracePeriod (5 sec. default), terminate.
"""
win32gui.EnumWindows(self.__close__, 0)
if self.wait(gracePeriod) != win32event.WAIT_OBJECT_0:
win32process.TerminateProcess(self.hProcess, 0)
win32api.Sleep(100) # wait for resources to be released
def __close__(self, hwnd, dummy):
"""
EnumWindows callback - sends WM_CLOSE to any window
owned by this process.
"""
TId, PId = win32process.GetWindowThreadProcessId(hwnd)
if PId == self.PId:
win32gui.PostMessage(hwnd, win32con.WM_CLOSE, 0, 0)
def exitCode(self):
"""
Return process exit code.
"""
return win32process.GetExitCodeProcess(self.hProcess)
def run(cmd, mSec=None, stdin=None, stdout=None, stderr=None, **kw):
"""
Run cmd as a child process and return exit code.
mSec: terminate cmd after specified number of milliseconds
stdin, stdout, stderr:
file objects for child I/O (use hStdin etc. to attach
handles instead of files); default is caller's stdin,
stdout & stderr;
kw: see Process.__init__ for more keyword options
"""
if stdin is not None:
kw['hStdin'] = msvcrt.get_osfhandle(stdin.fileno())
if stdout is not None:
kw['hStdout'] = msvcrt.get_osfhandle(stdout.fileno())
if stderr is not None:
kw['hStderr'] = msvcrt.get_osfhandle(stderr.fileno())
child = Process(cmd, **kw)
if child.wait(mSec) != win32event.WAIT_OBJECT_0:
child.kill()
raise WindowsError, 'process timeout exceeded'
return child.exitCode()
if __name__ == '__main__':
# Pipe commands to a shell and display the output in notepad
print 'Testing winprocess.py...'
import tempfile
timeoutSeconds = 15
cmdString = """\
REM Test of winprocess.py piping commands to a shell.\r
REM This window will close in %d seconds.\r
vol\r
net user\r
_this_is_a_test_of_stderr_\r
""" % timeoutSeconds
cmd, out = tempfile.TemporaryFile(), tempfile.TemporaryFile()
cmd.write(cmdString)
cmd.seek(0)
print 'CMD.EXE exit code:', run('cmd.exe', show=0, stdin=cmd,
stdout=out, stderr=out)
cmd.close()
print 'NOTEPAD exit code:', run('notepad.exe %s' % out.file.name,
show=win32con.SW_MAXIMIZE,
mSec=timeoutSeconds*1000)
out.close()
| wait | identifier_name |
winprocess.py | """
Windows Process Control
winprocess.run launches a child process and returns the exit code.
Optionally, it can:
redirect stdin, stdout & stderr to files
run the command as another user
limit the process's running time
control the process window (location, size, window state, desktop)
Works on Windows NT, 2000 & XP. Requires Mark Hammond's win32
extensions.
This code is free for any purpose, with no warranty of any kind.
-- John B. Dell'Aquila <[email protected]>
"""
import win32api, win32process, win32security
import win32event, win32con, msvcrt, win32gui
def logonUser(loginString):
"""
Login as specified user and return handle.
loginString: 'Domain\nUser\nPassword'; for local
login use . or empty string as domain
e.g. '.\nadministrator\nsecret_password'
"""
domain, user, passwd = loginString.split('\n')
return win32security.LogonUser(
user,
domain,
passwd,
win32con.LOGON32_LOGON_INTERACTIVE,
win32con.LOGON32_PROVIDER_DEFAULT
)
class Process:
"""
A Windows process.
"""
def __init__(self, cmd, login=None,
hStdin=None, hStdout=None, hStderr=None,
show=1, xy=None, xySize=None,
desktop=None):
"""
Create a Windows process.
cmd: command to run
login: run as user 'Domain\nUser\nPassword'
hStdin, hStdout, hStderr:
handles for process I/O; default is caller's stdin,
stdout & stderr
show: wShowWindow (0=SW_HIDE, 1=SW_NORMAL, ...)
xy: window offset (x, y) of upper left corner in pixels
xySize: window size (width, height) in pixels
desktop: lpDesktop - name of desktop e.g. 'winsta0\\default'
None = inherit current desktop
'' = create new desktop if necessary
User calling login requires additional privileges:
Act as part of the operating system [not needed on Windows XP]
Increase quotas
Replace a process level token
Login string must EITHER be an administrator's account
(ordinary user can't access current desktop - see Microsoft
Q165194) OR use desktop='' to run another desktop invisibly
(may be very slow to startup & finalize).
"""
si = win32process.STARTUPINFO()
si.dwFlags = (win32con.STARTF_USESTDHANDLES ^
win32con.STARTF_USESHOWWINDOW)
if hStdin is None:
si.hStdInput = win32api.GetStdHandle(win32api.STD_INPUT_HANDLE)
else:
|
if hStdout is None:
si.hStdOutput = win32api.GetStdHandle(win32api.STD_OUTPUT_HANDLE)
else:
si.hStdOutput = hStdout
if hStderr is None:
si.hStdError = win32api.GetStdHandle(win32api.STD_ERROR_HANDLE)
else:
si.hStdError = hStderr
si.wShowWindow = show
if xy is not None:
si.dwX, si.dwY = xy
si.dwFlags ^= win32con.STARTF_USEPOSITION
if xySize is not None:
si.dwXSize, si.dwYSize = xySize
si.dwFlags ^= win32con.STARTF_USESIZE
if desktop is not None:
si.lpDesktop = desktop
procArgs = (None, # appName
cmd, # commandLine
None, # processAttributes
None, # threadAttributes
1, # bInheritHandles
win32process.CREATE_NEW_CONSOLE, # dwCreationFlags
None, # newEnvironment
None, # currentDirectory
si) # startupinfo
if login is not None:
hUser = logonUser(login)
win32security.ImpersonateLoggedOnUser(hUser)
procHandles = win32process.CreateProcessAsUser(hUser, *procArgs)
win32security.RevertToSelf()
else:
procHandles = win32process.CreateProcess(*procArgs)
self.hProcess, self.hThread, self.PId, self.TId = procHandles
def wait(self, mSec=None):
"""
Wait for process to finish or for specified number of
milliseconds to elapse.
"""
if mSec is None:
mSec = win32event.INFINITE
return win32event.WaitForSingleObject(self.hProcess, mSec)
def kill(self, gracePeriod=5000):
"""
Kill process. Try for an orderly shutdown via WM_CLOSE. If
still running after gracePeriod (5 sec. default), terminate.
"""
win32gui.EnumWindows(self.__close__, 0)
if self.wait(gracePeriod) != win32event.WAIT_OBJECT_0:
win32process.TerminateProcess(self.hProcess, 0)
win32api.Sleep(100) # wait for resources to be released
def __close__(self, hwnd, dummy):
"""
EnumWindows callback - sends WM_CLOSE to any window
owned by this process.
"""
TId, PId = win32process.GetWindowThreadProcessId(hwnd)
if PId == self.PId:
win32gui.PostMessage(hwnd, win32con.WM_CLOSE, 0, 0)
def exitCode(self):
"""
Return process exit code.
"""
return win32process.GetExitCodeProcess(self.hProcess)
def run(cmd, mSec=None, stdin=None, stdout=None, stderr=None, **kw):
"""
Run cmd as a child process and return exit code.
mSec: terminate cmd after specified number of milliseconds
stdin, stdout, stderr:
file objects for child I/O (use hStdin etc. to attach
handles instead of files); default is caller's stdin,
stdout & stderr;
kw: see Process.__init__ for more keyword options
"""
if stdin is not None:
kw['hStdin'] = msvcrt.get_osfhandle(stdin.fileno())
if stdout is not None:
kw['hStdout'] = msvcrt.get_osfhandle(stdout.fileno())
if stderr is not None:
kw['hStderr'] = msvcrt.get_osfhandle(stderr.fileno())
child = Process(cmd, **kw)
if child.wait(mSec) != win32event.WAIT_OBJECT_0:
child.kill()
raise WindowsError, 'process timeout exceeded'
return child.exitCode()
if __name__ == '__main__':
# Pipe commands to a shell and display the output in notepad
print 'Testing winprocess.py...'
import tempfile
timeoutSeconds = 15
cmdString = """\
REM Test of winprocess.py piping commands to a shell.\r
REM This window will close in %d seconds.\r
vol\r
net user\r
_this_is_a_test_of_stderr_\r
""" % timeoutSeconds
cmd, out = tempfile.TemporaryFile(), tempfile.TemporaryFile()
cmd.write(cmdString)
cmd.seek(0)
print 'CMD.EXE exit code:', run('cmd.exe', show=0, stdin=cmd,
stdout=out, stderr=out)
cmd.close()
print 'NOTEPAD exit code:', run('notepad.exe %s' % out.file.name,
show=win32con.SW_MAXIMIZE,
mSec=timeoutSeconds*1000)
out.close()
| si.hStdInput = hStdin | conditional_block |
winprocess.py | """
Windows Process Control
winprocess.run launches a child process and returns the exit code.
Optionally, it can:
redirect stdin, stdout & stderr to files
run the command as another user
limit the process's running time
control the process window (location, size, window state, desktop)
Works on Windows NT, 2000 & XP. Requires Mark Hammond's win32
extensions.
This code is free for any purpose, with no warranty of any kind.
-- John B. Dell'Aquila <[email protected]>
"""
import win32api, win32process, win32security
import win32event, win32con, msvcrt, win32gui
def logonUser(loginString):
"""
Login as specified user and return handle.
loginString: 'Domain\nUser\nPassword'; for local
login use . or empty string as domain
e.g. '.\nadministrator\nsecret_password'
"""
domain, user, passwd = loginString.split('\n')
return win32security.LogonUser(
user,
domain,
passwd,
win32con.LOGON32_LOGON_INTERACTIVE,
win32con.LOGON32_PROVIDER_DEFAULT
)
class Process:
"""
A Windows process.
"""
def __init__(self, cmd, login=None,
hStdin=None, hStdout=None, hStderr=None,
show=1, xy=None, xySize=None,
desktop=None):
"""
Create a Windows process.
cmd: command to run
login: run as user 'Domain\nUser\nPassword'
hStdin, hStdout, hStderr:
handles for process I/O; default is caller's stdin,
stdout & stderr
show: wShowWindow (0=SW_HIDE, 1=SW_NORMAL, ...)
xy: window offset (x, y) of upper left corner in pixels
xySize: window size (width, height) in pixels
desktop: lpDesktop - name of desktop e.g. 'winsta0\\default'
None = inherit current desktop
'' = create new desktop if necessary
User calling login requires additional privileges:
Act as part of the operating system [not needed on Windows XP]
Increase quotas
Replace a process level token
Login string must EITHER be an administrator's account
(ordinary user can't access current desktop - see Microsoft
Q165194) OR use desktop='' to run another desktop invisibly
(may be very slow to startup & finalize).
"""
si = win32process.STARTUPINFO()
si.dwFlags = (win32con.STARTF_USESTDHANDLES ^
win32con.STARTF_USESHOWWINDOW)
if hStdin is None:
si.hStdInput = win32api.GetStdHandle(win32api.STD_INPUT_HANDLE)
else:
si.hStdInput = hStdin
if hStdout is None:
si.hStdOutput = win32api.GetStdHandle(win32api.STD_OUTPUT_HANDLE)
else:
si.hStdOutput = hStdout
if hStderr is None:
si.hStdError = win32api.GetStdHandle(win32api.STD_ERROR_HANDLE)
else:
si.hStdError = hStderr
si.wShowWindow = show
if xy is not None:
si.dwX, si.dwY = xy
si.dwFlags ^= win32con.STARTF_USEPOSITION
if xySize is not None:
si.dwXSize, si.dwYSize = xySize
si.dwFlags ^= win32con.STARTF_USESIZE
if desktop is not None:
si.lpDesktop = desktop
procArgs = (None, # appName
cmd, # commandLine
None, # processAttributes
None, # threadAttributes
1, # bInheritHandles
win32process.CREATE_NEW_CONSOLE, # dwCreationFlags
None, # newEnvironment
None, # currentDirectory
si) # startupinfo
if login is not None:
hUser = logonUser(login)
win32security.ImpersonateLoggedOnUser(hUser)
procHandles = win32process.CreateProcessAsUser(hUser, *procArgs)
win32security.RevertToSelf()
else:
procHandles = win32process.CreateProcess(*procArgs)
self.hProcess, self.hThread, self.PId, self.TId = procHandles | """
Wait for process to finish or for specified number of
milliseconds to elapse.
"""
if mSec is None:
mSec = win32event.INFINITE
return win32event.WaitForSingleObject(self.hProcess, mSec)
def kill(self, gracePeriod=5000):
"""
Kill process. Try for an orderly shutdown via WM_CLOSE. If
still running after gracePeriod (5 sec. default), terminate.
"""
win32gui.EnumWindows(self.__close__, 0)
if self.wait(gracePeriod) != win32event.WAIT_OBJECT_0:
win32process.TerminateProcess(self.hProcess, 0)
win32api.Sleep(100) # wait for resources to be released
def __close__(self, hwnd, dummy):
"""
EnumWindows callback - sends WM_CLOSE to any window
owned by this process.
"""
TId, PId = win32process.GetWindowThreadProcessId(hwnd)
if PId == self.PId:
win32gui.PostMessage(hwnd, win32con.WM_CLOSE, 0, 0)
def exitCode(self):
"""
Return process exit code.
"""
return win32process.GetExitCodeProcess(self.hProcess)
def run(cmd, mSec=None, stdin=None, stdout=None, stderr=None, **kw):
"""
Run cmd as a child process and return exit code.
mSec: terminate cmd after specified number of milliseconds
stdin, stdout, stderr:
file objects for child I/O (use hStdin etc. to attach
handles instead of files); default is caller's stdin,
stdout & stderr;
kw: see Process.__init__ for more keyword options
"""
if stdin is not None:
kw['hStdin'] = msvcrt.get_osfhandle(stdin.fileno())
if stdout is not None:
kw['hStdout'] = msvcrt.get_osfhandle(stdout.fileno())
if stderr is not None:
kw['hStderr'] = msvcrt.get_osfhandle(stderr.fileno())
child = Process(cmd, **kw)
if child.wait(mSec) != win32event.WAIT_OBJECT_0:
child.kill()
raise WindowsError, 'process timeout exceeded'
return child.exitCode()
if __name__ == '__main__':
# Pipe commands to a shell and display the output in notepad
print 'Testing winprocess.py...'
import tempfile
timeoutSeconds = 15
cmdString = """\
REM Test of winprocess.py piping commands to a shell.\r
REM This window will close in %d seconds.\r
vol\r
net user\r
_this_is_a_test_of_stderr_\r
""" % timeoutSeconds
cmd, out = tempfile.TemporaryFile(), tempfile.TemporaryFile()
cmd.write(cmdString)
cmd.seek(0)
print 'CMD.EXE exit code:', run('cmd.exe', show=0, stdin=cmd,
stdout=out, stderr=out)
cmd.close()
print 'NOTEPAD exit code:', run('notepad.exe %s' % out.file.name,
show=win32con.SW_MAXIMIZE,
mSec=timeoutSeconds*1000)
out.close() |
def wait(self, mSec=None): | random_line_split |
winprocess.py | """
Windows Process Control
winprocess.run launches a child process and returns the exit code.
Optionally, it can:
redirect stdin, stdout & stderr to files
run the command as another user
limit the process's running time
control the process window (location, size, window state, desktop)
Works on Windows NT, 2000 & XP. Requires Mark Hammond's win32
extensions.
This code is free for any purpose, with no warranty of any kind.
-- John B. Dell'Aquila <[email protected]>
"""
import win32api, win32process, win32security
import win32event, win32con, msvcrt, win32gui
def logonUser(loginString):
"""
Login as specified user and return handle.
loginString: 'Domain\nUser\nPassword'; for local
login use . or empty string as domain
e.g. '.\nadministrator\nsecret_password'
"""
domain, user, passwd = loginString.split('\n')
return win32security.LogonUser(
user,
domain,
passwd,
win32con.LOGON32_LOGON_INTERACTIVE,
win32con.LOGON32_PROVIDER_DEFAULT
)
class Process:
|
def run(cmd, mSec=None, stdin=None, stdout=None, stderr=None, **kw):
"""
Run cmd as a child process and return exit code.
mSec: terminate cmd after specified number of milliseconds
stdin, stdout, stderr:
file objects for child I/O (use hStdin etc. to attach
handles instead of files); default is caller's stdin,
stdout & stderr;
kw: see Process.__init__ for more keyword options
"""
if stdin is not None:
kw['hStdin'] = msvcrt.get_osfhandle(stdin.fileno())
if stdout is not None:
kw['hStdout'] = msvcrt.get_osfhandle(stdout.fileno())
if stderr is not None:
kw['hStderr'] = msvcrt.get_osfhandle(stderr.fileno())
child = Process(cmd, **kw)
if child.wait(mSec) != win32event.WAIT_OBJECT_0:
child.kill()
raise WindowsError, 'process timeout exceeded'
return child.exitCode()
if __name__ == '__main__':
# Pipe commands to a shell and display the output in notepad
print 'Testing winprocess.py...'
import tempfile
timeoutSeconds = 15
cmdString = """\
REM Test of winprocess.py piping commands to a shell.\r
REM This window will close in %d seconds.\r
vol\r
net user\r
_this_is_a_test_of_stderr_\r
""" % timeoutSeconds
cmd, out = tempfile.TemporaryFile(), tempfile.TemporaryFile()
cmd.write(cmdString)
cmd.seek(0)
print 'CMD.EXE exit code:', run('cmd.exe', show=0, stdin=cmd,
stdout=out, stderr=out)
cmd.close()
print 'NOTEPAD exit code:', run('notepad.exe %s' % out.file.name,
show=win32con.SW_MAXIMIZE,
mSec=timeoutSeconds*1000)
out.close()
| """
A Windows process.
"""
def __init__(self, cmd, login=None,
hStdin=None, hStdout=None, hStderr=None,
show=1, xy=None, xySize=None,
desktop=None):
"""
Create a Windows process.
cmd: command to run
login: run as user 'Domain\nUser\nPassword'
hStdin, hStdout, hStderr:
handles for process I/O; default is caller's stdin,
stdout & stderr
show: wShowWindow (0=SW_HIDE, 1=SW_NORMAL, ...)
xy: window offset (x, y) of upper left corner in pixels
xySize: window size (width, height) in pixels
desktop: lpDesktop - name of desktop e.g. 'winsta0\\default'
None = inherit current desktop
'' = create new desktop if necessary
User calling login requires additional privileges:
Act as part of the operating system [not needed on Windows XP]
Increase quotas
Replace a process level token
Login string must EITHER be an administrator's account
(ordinary user can't access current desktop - see Microsoft
Q165194) OR use desktop='' to run another desktop invisibly
(may be very slow to startup & finalize).
"""
si = win32process.STARTUPINFO()
si.dwFlags = (win32con.STARTF_USESTDHANDLES ^
win32con.STARTF_USESHOWWINDOW)
if hStdin is None:
si.hStdInput = win32api.GetStdHandle(win32api.STD_INPUT_HANDLE)
else:
si.hStdInput = hStdin
if hStdout is None:
si.hStdOutput = win32api.GetStdHandle(win32api.STD_OUTPUT_HANDLE)
else:
si.hStdOutput = hStdout
if hStderr is None:
si.hStdError = win32api.GetStdHandle(win32api.STD_ERROR_HANDLE)
else:
si.hStdError = hStderr
si.wShowWindow = show
if xy is not None:
si.dwX, si.dwY = xy
si.dwFlags ^= win32con.STARTF_USEPOSITION
if xySize is not None:
si.dwXSize, si.dwYSize = xySize
si.dwFlags ^= win32con.STARTF_USESIZE
if desktop is not None:
si.lpDesktop = desktop
procArgs = (None, # appName
cmd, # commandLine
None, # processAttributes
None, # threadAttributes
1, # bInheritHandles
win32process.CREATE_NEW_CONSOLE, # dwCreationFlags
None, # newEnvironment
None, # currentDirectory
si) # startupinfo
if login is not None:
hUser = logonUser(login)
win32security.ImpersonateLoggedOnUser(hUser)
procHandles = win32process.CreateProcessAsUser(hUser, *procArgs)
win32security.RevertToSelf()
else:
procHandles = win32process.CreateProcess(*procArgs)
self.hProcess, self.hThread, self.PId, self.TId = procHandles
def wait(self, mSec=None):
"""
Wait for process to finish or for specified number of
milliseconds to elapse.
"""
if mSec is None:
mSec = win32event.INFINITE
return win32event.WaitForSingleObject(self.hProcess, mSec)
def kill(self, gracePeriod=5000):
"""
Kill process. Try for an orderly shutdown via WM_CLOSE. If
still running after gracePeriod (5 sec. default), terminate.
"""
win32gui.EnumWindows(self.__close__, 0)
if self.wait(gracePeriod) != win32event.WAIT_OBJECT_0:
win32process.TerminateProcess(self.hProcess, 0)
win32api.Sleep(100) # wait for resources to be released
def __close__(self, hwnd, dummy):
"""
EnumWindows callback - sends WM_CLOSE to any window
owned by this process.
"""
TId, PId = win32process.GetWindowThreadProcessId(hwnd)
if PId == self.PId:
win32gui.PostMessage(hwnd, win32con.WM_CLOSE, 0, 0)
def exitCode(self):
"""
Return process exit code.
"""
return win32process.GetExitCodeProcess(self.hProcess) | identifier_body |
aws.py | __metaclass__ = type
import os
from ..util import (
ApplicationError,
display,
is_shippable,
ConfigParser,
)
from . import (
CloudProvider,
CloudEnvironment,
CloudEnvironmentConfig,
)
from ..core_ci import (
AnsibleCoreCI,
)
class AwsCloudProvider(CloudProvider):
"""AWS cloud provider plugin. Sets up cloud resources before delegation."""
def filter(self, targets, exclude):
"""Filter out the cloud tests when the necessary config and resources are not available.
:type targets: tuple[TestTarget]
:type exclude: list[str]
"""
if os.path.isfile(self.config_static_path):
return
aci = self._create_ansible_core_ci()
if os.path.isfile(aci.ci_key):
return
if is_shippable():
return
super(AwsCloudProvider, self).filter(targets, exclude)
def setup(self):
"""Setup the cloud resource before delegation and register a cleanup callback."""
super(AwsCloudProvider, self).setup()
aws_config_path = os.path.expanduser('~/.aws')
if os.path.exists(aws_config_path) and not self.args.docker and not self.args.remote:
raise ApplicationError('Rename "%s" or use the --docker or --remote option to isolate tests.' % aws_config_path)
if not self._use_static_config():
self._setup_dynamic()
def _setup_dynamic(self):
"""Request AWS credentials through the Ansible Core CI service."""
display.info('Provisioning %s cloud environment.' % self.platform, verbosity=1)
config = self._read_config_template()
aci = self._create_ansible_core_ci()
response = aci.start()
if not self.args.explain:
credentials = response['aws']['credentials']
values = dict(
ACCESS_KEY=credentials['access_key'],
SECRET_KEY=credentials['secret_key'],
SECURITY_TOKEN=credentials['session_token'],
REGION='us-east-1',
)
display.sensitive.add(values['SECRET_KEY'])
display.sensitive.add(values['SECURITY_TOKEN'])
config = self._populate_config_template(config, values)
self._write_config(config)
def _create_ansible_core_ci(self):
"""
:rtype: AnsibleCoreCI
"""
return AnsibleCoreCI(self.args, 'aws', 'sts', persist=False, stage=self.args.remote_stage, provider=self.args.remote_provider)
class AwsCloudEnvironment(CloudEnvironment):
"""AWS cloud environment plugin. Updates integration test environment after delegation."""
def get_environment_config(self):
"""
:rtype: CloudEnvironmentConfig
"""
parser = ConfigParser()
parser.read(self.config_path)
ansible_vars = dict(
resource_prefix=self.resource_prefix,
)
ansible_vars.update(dict(parser.items('default')))
display.sensitive.add(ansible_vars.get('aws_secret_key'))
display.sensitive.add(ansible_vars.get('security_token'))
if 'aws_cleanup' not in ansible_vars:
ansible_vars['aws_cleanup'] = not self.managed
env_vars = {'ANSIBLE_DEBUG_BOTOCORE_LOGS': 'True'}
return CloudEnvironmentConfig(
env_vars=env_vars,
ansible_vars=ansible_vars,
callback_plugins=['aws_resource_actions'],
)
def on_failure(self, target, tries):
"""
:type target: TestTarget
:type tries: int
"""
if not tries and self.managed:
display.notice('If %s failed due to permissions, the IAM test policy may need to be updated. '
'For help, consult @mattclay or @gundalow on GitHub or #ansible-devel on IRC.' % target.name) | """AWS plugin for integration tests."""
from __future__ import (absolute_import, division, print_function) | random_line_split |
|
aws.py | """AWS plugin for integration tests."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ..util import (
ApplicationError,
display,
is_shippable,
ConfigParser,
)
from . import (
CloudProvider,
CloudEnvironment,
CloudEnvironmentConfig,
)
from ..core_ci import (
AnsibleCoreCI,
)
class AwsCloudProvider(CloudProvider):
"""AWS cloud provider plugin. Sets up cloud resources before delegation."""
def filter(self, targets, exclude):
"""Filter out the cloud tests when the necessary config and resources are not available.
:type targets: tuple[TestTarget]
:type exclude: list[str]
"""
if os.path.isfile(self.config_static_path):
return
aci = self._create_ansible_core_ci()
if os.path.isfile(aci.ci_key):
return
if is_shippable():
return
super(AwsCloudProvider, self).filter(targets, exclude)
def setup(self):
"""Setup the cloud resource before delegation and register a cleanup callback."""
super(AwsCloudProvider, self).setup()
aws_config_path = os.path.expanduser('~/.aws')
if os.path.exists(aws_config_path) and not self.args.docker and not self.args.remote:
raise ApplicationError('Rename "%s" or use the --docker or --remote option to isolate tests.' % aws_config_path)
if not self._use_static_config():
self._setup_dynamic()
def _setup_dynamic(self):
"""Request AWS credentials through the Ansible Core CI service."""
display.info('Provisioning %s cloud environment.' % self.platform, verbosity=1)
config = self._read_config_template()
aci = self._create_ansible_core_ci()
response = aci.start()
if not self.args.explain:
credentials = response['aws']['credentials']
values = dict(
ACCESS_KEY=credentials['access_key'],
SECRET_KEY=credentials['secret_key'],
SECURITY_TOKEN=credentials['session_token'],
REGION='us-east-1',
)
display.sensitive.add(values['SECRET_KEY'])
display.sensitive.add(values['SECURITY_TOKEN'])
config = self._populate_config_template(config, values)
self._write_config(config)
def | (self):
"""
:rtype: AnsibleCoreCI
"""
return AnsibleCoreCI(self.args, 'aws', 'sts', persist=False, stage=self.args.remote_stage, provider=self.args.remote_provider)
class AwsCloudEnvironment(CloudEnvironment):
"""AWS cloud environment plugin. Updates integration test environment after delegation."""
def get_environment_config(self):
"""
:rtype: CloudEnvironmentConfig
"""
parser = ConfigParser()
parser.read(self.config_path)
ansible_vars = dict(
resource_prefix=self.resource_prefix,
)
ansible_vars.update(dict(parser.items('default')))
display.sensitive.add(ansible_vars.get('aws_secret_key'))
display.sensitive.add(ansible_vars.get('security_token'))
if 'aws_cleanup' not in ansible_vars:
ansible_vars['aws_cleanup'] = not self.managed
env_vars = {'ANSIBLE_DEBUG_BOTOCORE_LOGS': 'True'}
return CloudEnvironmentConfig(
env_vars=env_vars,
ansible_vars=ansible_vars,
callback_plugins=['aws_resource_actions'],
)
def on_failure(self, target, tries):
"""
:type target: TestTarget
:type tries: int
"""
if not tries and self.managed:
display.notice('If %s failed due to permissions, the IAM test policy may need to be updated. '
'For help, consult @mattclay or @gundalow on GitHub or #ansible-devel on IRC.' % target.name)
| _create_ansible_core_ci | identifier_name |
aws.py | """AWS plugin for integration tests."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ..util import (
ApplicationError,
display,
is_shippable,
ConfigParser,
)
from . import (
CloudProvider,
CloudEnvironment,
CloudEnvironmentConfig,
)
from ..core_ci import (
AnsibleCoreCI,
)
class AwsCloudProvider(CloudProvider):
"""AWS cloud provider plugin. Sets up cloud resources before delegation."""
def filter(self, targets, exclude):
"""Filter out the cloud tests when the necessary config and resources are not available.
:type targets: tuple[TestTarget]
:type exclude: list[str]
"""
if os.path.isfile(self.config_static_path):
return
aci = self._create_ansible_core_ci()
if os.path.isfile(aci.ci_key):
return
if is_shippable():
return
super(AwsCloudProvider, self).filter(targets, exclude)
def setup(self):
"""Setup the cloud resource before delegation and register a cleanup callback."""
super(AwsCloudProvider, self).setup()
aws_config_path = os.path.expanduser('~/.aws')
if os.path.exists(aws_config_path) and not self.args.docker and not self.args.remote:
raise ApplicationError('Rename "%s" or use the --docker or --remote option to isolate tests.' % aws_config_path)
if not self._use_static_config():
self._setup_dynamic()
def _setup_dynamic(self):
"""Request AWS credentials through the Ansible Core CI service."""
display.info('Provisioning %s cloud environment.' % self.platform, verbosity=1)
config = self._read_config_template()
aci = self._create_ansible_core_ci()
response = aci.start()
if not self.args.explain:
credentials = response['aws']['credentials']
values = dict(
ACCESS_KEY=credentials['access_key'],
SECRET_KEY=credentials['secret_key'],
SECURITY_TOKEN=credentials['session_token'],
REGION='us-east-1',
)
display.sensitive.add(values['SECRET_KEY'])
display.sensitive.add(values['SECURITY_TOKEN'])
config = self._populate_config_template(config, values)
self._write_config(config)
def _create_ansible_core_ci(self):
|
class AwsCloudEnvironment(CloudEnvironment):
"""AWS cloud environment plugin. Updates integration test environment after delegation."""
def get_environment_config(self):
"""
:rtype: CloudEnvironmentConfig
"""
parser = ConfigParser()
parser.read(self.config_path)
ansible_vars = dict(
resource_prefix=self.resource_prefix,
)
ansible_vars.update(dict(parser.items('default')))
display.sensitive.add(ansible_vars.get('aws_secret_key'))
display.sensitive.add(ansible_vars.get('security_token'))
if 'aws_cleanup' not in ansible_vars:
ansible_vars['aws_cleanup'] = not self.managed
env_vars = {'ANSIBLE_DEBUG_BOTOCORE_LOGS': 'True'}
return CloudEnvironmentConfig(
env_vars=env_vars,
ansible_vars=ansible_vars,
callback_plugins=['aws_resource_actions'],
)
def on_failure(self, target, tries):
"""
:type target: TestTarget
:type tries: int
"""
if not tries and self.managed:
display.notice('If %s failed due to permissions, the IAM test policy may need to be updated. '
'For help, consult @mattclay or @gundalow on GitHub or #ansible-devel on IRC.' % target.name)
| """
:rtype: AnsibleCoreCI
"""
return AnsibleCoreCI(self.args, 'aws', 'sts', persist=False, stage=self.args.remote_stage, provider=self.args.remote_provider) | identifier_body |
aws.py | """AWS plugin for integration tests."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ..util import (
ApplicationError,
display,
is_shippable,
ConfigParser,
)
from . import (
CloudProvider,
CloudEnvironment,
CloudEnvironmentConfig,
)
from ..core_ci import (
AnsibleCoreCI,
)
class AwsCloudProvider(CloudProvider):
"""AWS cloud provider plugin. Sets up cloud resources before delegation."""
def filter(self, targets, exclude):
"""Filter out the cloud tests when the necessary config and resources are not available.
:type targets: tuple[TestTarget]
:type exclude: list[str]
"""
if os.path.isfile(self.config_static_path):
return
aci = self._create_ansible_core_ci()
if os.path.isfile(aci.ci_key):
return
if is_shippable():
return
super(AwsCloudProvider, self).filter(targets, exclude)
def setup(self):
"""Setup the cloud resource before delegation and register a cleanup callback."""
super(AwsCloudProvider, self).setup()
aws_config_path = os.path.expanduser('~/.aws')
if os.path.exists(aws_config_path) and not self.args.docker and not self.args.remote:
raise ApplicationError('Rename "%s" or use the --docker or --remote option to isolate tests.' % aws_config_path)
if not self._use_static_config():
self._setup_dynamic()
def _setup_dynamic(self):
"""Request AWS credentials through the Ansible Core CI service."""
display.info('Provisioning %s cloud environment.' % self.platform, verbosity=1)
config = self._read_config_template()
aci = self._create_ansible_core_ci()
response = aci.start()
if not self.args.explain:
|
self._write_config(config)
def _create_ansible_core_ci(self):
"""
:rtype: AnsibleCoreCI
"""
return AnsibleCoreCI(self.args, 'aws', 'sts', persist=False, stage=self.args.remote_stage, provider=self.args.remote_provider)
class AwsCloudEnvironment(CloudEnvironment):
"""AWS cloud environment plugin. Updates integration test environment after delegation."""
def get_environment_config(self):
"""
:rtype: CloudEnvironmentConfig
"""
parser = ConfigParser()
parser.read(self.config_path)
ansible_vars = dict(
resource_prefix=self.resource_prefix,
)
ansible_vars.update(dict(parser.items('default')))
display.sensitive.add(ansible_vars.get('aws_secret_key'))
display.sensitive.add(ansible_vars.get('security_token'))
if 'aws_cleanup' not in ansible_vars:
ansible_vars['aws_cleanup'] = not self.managed
env_vars = {'ANSIBLE_DEBUG_BOTOCORE_LOGS': 'True'}
return CloudEnvironmentConfig(
env_vars=env_vars,
ansible_vars=ansible_vars,
callback_plugins=['aws_resource_actions'],
)
def on_failure(self, target, tries):
"""
:type target: TestTarget
:type tries: int
"""
if not tries and self.managed:
display.notice('If %s failed due to permissions, the IAM test policy may need to be updated. '
'For help, consult @mattclay or @gundalow on GitHub or #ansible-devel on IRC.' % target.name)
| credentials = response['aws']['credentials']
values = dict(
ACCESS_KEY=credentials['access_key'],
SECRET_KEY=credentials['secret_key'],
SECURITY_TOKEN=credentials['session_token'],
REGION='us-east-1',
)
display.sensitive.add(values['SECRET_KEY'])
display.sensitive.add(values['SECURITY_TOKEN'])
config = self._populate_config_template(config, values) | conditional_block |
jinja_context.py | '''
Created on Jan 16, 2014
@author: sean
'''
from __future__ import absolute_import, division, print_function
import json
import os
from conda.compat import PY3
from .environ import get_dict as get_environ
_setuptools_data = None
def load_setuptools(setup_file='setup.py'):
global _setuptools_data
if _setuptools_data is None:
_setuptools_data = {}
def | (**kw):
_setuptools_data.update(kw)
import setuptools
#Add current directory to path
import sys
sys.path.append('.')
#Patch setuptools
setuptools_setup = setuptools.setup
setuptools.setup = setup
exec(open(setup_file).read())
setuptools.setup = setuptools_setup
del sys.path[-1]
return _setuptools_data
def load_npm():
# json module expects bytes in Python 2 and str in Python 3.
mode_dict = {'mode': 'r', 'encoding': 'utf-8'} if PY3 else {'mode': 'rb'}
with open('package.json', **mode_dict) as pkg:
return json.load(pkg)
def context_processor():
ctx = get_environ()
environ = dict(os.environ)
environ.update(get_environ())
ctx.update(load_setuptools=load_setuptools,
load_npm=load_npm,
environ=environ)
return ctx
| setup | identifier_name |
jinja_context.py | '''
Created on Jan 16, 2014
@author: sean
'''
from __future__ import absolute_import, division, print_function
import json
import os
from conda.compat import PY3
from .environ import get_dict as get_environ
_setuptools_data = None
def load_setuptools(setup_file='setup.py'): | _setuptools_data.update(kw)
import setuptools
#Add current directory to path
import sys
sys.path.append('.')
#Patch setuptools
setuptools_setup = setuptools.setup
setuptools.setup = setup
exec(open(setup_file).read())
setuptools.setup = setuptools_setup
del sys.path[-1]
return _setuptools_data
def load_npm():
# json module expects bytes in Python 2 and str in Python 3.
mode_dict = {'mode': 'r', 'encoding': 'utf-8'} if PY3 else {'mode': 'rb'}
with open('package.json', **mode_dict) as pkg:
return json.load(pkg)
def context_processor():
ctx = get_environ()
environ = dict(os.environ)
environ.update(get_environ())
ctx.update(load_setuptools=load_setuptools,
load_npm=load_npm,
environ=environ)
return ctx | global _setuptools_data
if _setuptools_data is None:
_setuptools_data = {}
def setup(**kw): | random_line_split |
jinja_context.py | '''
Created on Jan 16, 2014
@author: sean
'''
from __future__ import absolute_import, division, print_function
import json
import os
from conda.compat import PY3
from .environ import get_dict as get_environ
_setuptools_data = None
def load_setuptools(setup_file='setup.py'):
global _setuptools_data
if _setuptools_data is None:
|
return _setuptools_data
def load_npm():
# json module expects bytes in Python 2 and str in Python 3.
mode_dict = {'mode': 'r', 'encoding': 'utf-8'} if PY3 else {'mode': 'rb'}
with open('package.json', **mode_dict) as pkg:
return json.load(pkg)
def context_processor():
ctx = get_environ()
environ = dict(os.environ)
environ.update(get_environ())
ctx.update(load_setuptools=load_setuptools,
load_npm=load_npm,
environ=environ)
return ctx
| _setuptools_data = {}
def setup(**kw):
_setuptools_data.update(kw)
import setuptools
#Add current directory to path
import sys
sys.path.append('.')
#Patch setuptools
setuptools_setup = setuptools.setup
setuptools.setup = setup
exec(open(setup_file).read())
setuptools.setup = setuptools_setup
del sys.path[-1] | conditional_block |
jinja_context.py | '''
Created on Jan 16, 2014
@author: sean
'''
from __future__ import absolute_import, division, print_function
import json
import os
from conda.compat import PY3
from .environ import get_dict as get_environ
_setuptools_data = None
def load_setuptools(setup_file='setup.py'):
global _setuptools_data
if _setuptools_data is None:
_setuptools_data = {}
def setup(**kw):
|
import setuptools
#Add current directory to path
import sys
sys.path.append('.')
#Patch setuptools
setuptools_setup = setuptools.setup
setuptools.setup = setup
exec(open(setup_file).read())
setuptools.setup = setuptools_setup
del sys.path[-1]
return _setuptools_data
def load_npm():
# json module expects bytes in Python 2 and str in Python 3.
mode_dict = {'mode': 'r', 'encoding': 'utf-8'} if PY3 else {'mode': 'rb'}
with open('package.json', **mode_dict) as pkg:
return json.load(pkg)
def context_processor():
ctx = get_environ()
environ = dict(os.environ)
environ.update(get_environ())
ctx.update(load_setuptools=load_setuptools,
load_npm=load_npm,
environ=environ)
return ctx
| _setuptools_data.update(kw) | identifier_body |
ipdl-analyze.rs | use std::env;
use std::path::Path;
use std::path::PathBuf;
use std::fs::File;
use std::io::Write;
extern crate tools;
extern crate ipdl_parser;
extern crate getopts;
use getopts::Options;
use tools::file_format::analysis::{read_analysis, read_target, WithLocation, AnalysisTarget, AnalysisKind};
use ipdl_parser::parser;
use ipdl_parser::ast;
type TargetAnalysis = Vec<WithLocation<Vec<AnalysisTarget>>>;
fn | () -> Options {
let mut opts = Options::new();
opts.optmulti("I", "include",
"Additional directory to search for included protocol specifications",
"DIR");
opts.reqopt("d", "outheaders-dir",
"Directory into which C++ headers analysis data is location.",
"HDR_DIR");
opts.reqopt("b", "base-input-prefix",
"Base directory where IPDL input files are found.",
"BASE_DIR");
opts.reqopt("a", "analysis-prefix",
"Base directory where analysis output files are found.",
"ANALYSIS_DIR");
opts
}
fn header_file_name(outheaders_dir: &str, ns: &ast::Namespace, parent_or_child: &str) -> String {
format!("{}/{}/{}{}.h",
outheaders_dir,
ns.namespaces.clone().join("/"),
ns.name.id,
parent_or_child)
}
fn mangle_simple(s: &str) -> String {
format!("{}{}", s.len(), s)
}
fn mangle_nested_name(ns: &[String], protocol: &str, name: &str) -> String {
format!("_ZN{}{}{}E",
ns.iter().map(|id| mangle_simple(&id)).collect::<Vec<_>>().join(""),
mangle_simple(protocol),
mangle_simple(name))
}
fn find_analysis<'a>(analysis: &'a TargetAnalysis, mangled: &str) -> Option<&'a AnalysisTarget>
{
for datum in analysis {
for piece in &datum.data {
if piece.kind == AnalysisKind::Decl && piece.sym.contains(mangled) {
return Some(&piece);
}
}
}
println!("No analysis target found for {}", mangled);
return None
}
fn output_data(outputf: &mut File, locstr: &str, datum: &AnalysisTarget) {
write!(outputf, r#"{{"loc": "{}", "target": 1, "kind": "idl", "pretty": "{}", "sym": "{}"}}"#,
locstr, datum.pretty, datum.sym).unwrap();
write!(outputf, "\n").unwrap();
write!(outputf, r#"{{"loc": "{}", "source": 1, "pretty": "{}", "sym": "{}"}}"#,
locstr, datum.pretty, datum.sym).unwrap();
write!(outputf, "\n").unwrap();
}
fn output_send_recv(outputf: &mut File,
locstr: &str,
protocol: &ast::Namespace,
message: &ast::MessageDecl,
is_ctor: bool,
send_side: &str, send_analysis: &TargetAnalysis,
recv_side: &str, recv_analysis: &TargetAnalysis)
{
let send_prefix = if message.send_semantics == ast::SendSemantics::Intr { "Call" } else { "Send" };
let recv_prefix = if message.send_semantics == ast::SendSemantics::Intr { "Answer" } else { "Recv" };
let ctor_suffix = if is_ctor { "Constructor" } else { "" };
let mangled = mangle_nested_name(&protocol.namespaces,
&format!("{}{}", protocol.name.id, send_side),
&format!("{}{}{}", send_prefix, message.name.id, ctor_suffix));
if let Some(send_datum) = find_analysis(send_analysis, &mangled) {
output_data(outputf, &locstr, &send_datum);
}
let mangled = mangle_nested_name(&protocol.namespaces,
&format!("{}{}", protocol.name.id, recv_side),
&format!("{}{}{}", recv_prefix, message.name.id, ctor_suffix));
if let Some(recv_datum) = find_analysis(recv_analysis, &mangled) {
output_data(outputf, &locstr, &recv_datum);
}
}
fn main() {
let args : Vec<String> = env::args().collect();
let opts = get_options_parser();
let matches = match opts.parse(&args[1..]) {
Ok(m) => { m },
Err(f) => { panic!(f.to_string()) },
};
let mut include_dirs = Vec::new();
for i in matches.opt_strs("I") {
include_dirs.push(PathBuf::from(i))
}
let outheaders_dir = matches.opt_str("d").unwrap();
let base_dir = matches.opt_str("b").unwrap();
let analysis_dir = matches.opt_str("a").unwrap();
let base_path = Path::new(&base_dir);
let analysis_path = Path::new(&analysis_dir);
let mut file_names = Vec::new();
for f in matches.free {
file_names.push(PathBuf::from(f));
}
let maybe_tus = parser::parse(&include_dirs, file_names);
if maybe_tus.is_none() {
println!("Specification could not be parsed.");
return;
}
let tus = maybe_tus.unwrap();
for (_, tu) in tus {
println!("Analyzing {:?}", tu.file_name);
let path = tu.file_name.as_path();
let relative = path.strip_prefix(base_path).unwrap();
let absolute = analysis_path.join(relative);
let mut outputf = File::create(absolute).unwrap();
if let Some((ns, protocol)) = tu.protocol {
let parent_fname = header_file_name(&outheaders_dir, &ns, "Parent");
let parent_analysis = read_analysis(&parent_fname, &read_target);
let child_fname = header_file_name(&outheaders_dir, &ns, "Child");
let child_analysis = read_analysis(&child_fname, &read_target);
let is_toplevel = protocol.managers.len() == 0;
for message in protocol.messages {
let loc = &message.name.loc;
let locstr = format!("{}:{}-{}", loc.lineno, loc.colno, loc.colno + message.name.id.len());
if is_toplevel && message.name.id == "__delete__" {
continue;
}
let is_ctor = protocol.manages.iter().any(|e| e.id == message.name.id);
if message.direction == ast::Direction::ToChild || message.direction == ast::Direction::ToParentOrChild {
output_send_recv(&mut outputf, &locstr, &ns, &message, is_ctor,
"Parent", &parent_analysis, "Child", &child_analysis);
}
if message.direction == ast::Direction::ToParent || message.direction == ast::Direction::ToParentOrChild {
output_send_recv(&mut outputf, &locstr, &ns, &message, is_ctor,
"Child", &child_analysis, "Parent", &parent_analysis);
}
}
}
}
}
| get_options_parser | identifier_name |
ipdl-analyze.rs | use std::env;
use std::path::Path;
use std::path::PathBuf;
use std::fs::File;
use std::io::Write;
extern crate tools;
extern crate ipdl_parser;
extern crate getopts;
use getopts::Options;
use tools::file_format::analysis::{read_analysis, read_target, WithLocation, AnalysisTarget, AnalysisKind};
use ipdl_parser::parser;
use ipdl_parser::ast;
type TargetAnalysis = Vec<WithLocation<Vec<AnalysisTarget>>>;
fn get_options_parser() -> Options {
let mut opts = Options::new();
opts.optmulti("I", "include",
"Additional directory to search for included protocol specifications",
"DIR");
opts.reqopt("d", "outheaders-dir",
"Directory into which C++ headers analysis data is location.",
"HDR_DIR");
opts.reqopt("b", "base-input-prefix",
"Base directory where IPDL input files are found.",
"BASE_DIR");
opts.reqopt("a", "analysis-prefix",
"Base directory where analysis output files are found.",
"ANALYSIS_DIR");
opts
}
fn header_file_name(outheaders_dir: &str, ns: &ast::Namespace, parent_or_child: &str) -> String {
format!("{}/{}/{}{}.h",
outheaders_dir,
ns.namespaces.clone().join("/"),
ns.name.id,
parent_or_child)
}
fn mangle_simple(s: &str) -> String |
fn mangle_nested_name(ns: &[String], protocol: &str, name: &str) -> String {
format!("_ZN{}{}{}E",
ns.iter().map(|id| mangle_simple(&id)).collect::<Vec<_>>().join(""),
mangle_simple(protocol),
mangle_simple(name))
}
fn find_analysis<'a>(analysis: &'a TargetAnalysis, mangled: &str) -> Option<&'a AnalysisTarget>
{
for datum in analysis {
for piece in &datum.data {
if piece.kind == AnalysisKind::Decl && piece.sym.contains(mangled) {
return Some(&piece);
}
}
}
println!("No analysis target found for {}", mangled);
return None
}
fn output_data(outputf: &mut File, locstr: &str, datum: &AnalysisTarget) {
write!(outputf, r#"{{"loc": "{}", "target": 1, "kind": "idl", "pretty": "{}", "sym": "{}"}}"#,
locstr, datum.pretty, datum.sym).unwrap();
write!(outputf, "\n").unwrap();
write!(outputf, r#"{{"loc": "{}", "source": 1, "pretty": "{}", "sym": "{}"}}"#,
locstr, datum.pretty, datum.sym).unwrap();
write!(outputf, "\n").unwrap();
}
fn output_send_recv(outputf: &mut File,
locstr: &str,
protocol: &ast::Namespace,
message: &ast::MessageDecl,
is_ctor: bool,
send_side: &str, send_analysis: &TargetAnalysis,
recv_side: &str, recv_analysis: &TargetAnalysis)
{
let send_prefix = if message.send_semantics == ast::SendSemantics::Intr { "Call" } else { "Send" };
let recv_prefix = if message.send_semantics == ast::SendSemantics::Intr { "Answer" } else { "Recv" };
let ctor_suffix = if is_ctor { "Constructor" } else { "" };
let mangled = mangle_nested_name(&protocol.namespaces,
&format!("{}{}", protocol.name.id, send_side),
&format!("{}{}{}", send_prefix, message.name.id, ctor_suffix));
if let Some(send_datum) = find_analysis(send_analysis, &mangled) {
output_data(outputf, &locstr, &send_datum);
}
let mangled = mangle_nested_name(&protocol.namespaces,
&format!("{}{}", protocol.name.id, recv_side),
&format!("{}{}{}", recv_prefix, message.name.id, ctor_suffix));
if let Some(recv_datum) = find_analysis(recv_analysis, &mangled) {
output_data(outputf, &locstr, &recv_datum);
}
}
fn main() {
let args : Vec<String> = env::args().collect();
let opts = get_options_parser();
let matches = match opts.parse(&args[1..]) {
Ok(m) => { m },
Err(f) => { panic!(f.to_string()) },
};
let mut include_dirs = Vec::new();
for i in matches.opt_strs("I") {
include_dirs.push(PathBuf::from(i))
}
let outheaders_dir = matches.opt_str("d").unwrap();
let base_dir = matches.opt_str("b").unwrap();
let analysis_dir = matches.opt_str("a").unwrap();
let base_path = Path::new(&base_dir);
let analysis_path = Path::new(&analysis_dir);
let mut file_names = Vec::new();
for f in matches.free {
file_names.push(PathBuf::from(f));
}
let maybe_tus = parser::parse(&include_dirs, file_names);
if maybe_tus.is_none() {
println!("Specification could not be parsed.");
return;
}
let tus = maybe_tus.unwrap();
for (_, tu) in tus {
println!("Analyzing {:?}", tu.file_name);
let path = tu.file_name.as_path();
let relative = path.strip_prefix(base_path).unwrap();
let absolute = analysis_path.join(relative);
let mut outputf = File::create(absolute).unwrap();
if let Some((ns, protocol)) = tu.protocol {
let parent_fname = header_file_name(&outheaders_dir, &ns, "Parent");
let parent_analysis = read_analysis(&parent_fname, &read_target);
let child_fname = header_file_name(&outheaders_dir, &ns, "Child");
let child_analysis = read_analysis(&child_fname, &read_target);
let is_toplevel = protocol.managers.len() == 0;
for message in protocol.messages {
let loc = &message.name.loc;
let locstr = format!("{}:{}-{}", loc.lineno, loc.colno, loc.colno + message.name.id.len());
if is_toplevel && message.name.id == "__delete__" {
continue;
}
let is_ctor = protocol.manages.iter().any(|e| e.id == message.name.id);
if message.direction == ast::Direction::ToChild || message.direction == ast::Direction::ToParentOrChild {
output_send_recv(&mut outputf, &locstr, &ns, &message, is_ctor,
"Parent", &parent_analysis, "Child", &child_analysis);
}
if message.direction == ast::Direction::ToParent || message.direction == ast::Direction::ToParentOrChild {
output_send_recv(&mut outputf, &locstr, &ns, &message, is_ctor,
"Child", &child_analysis, "Parent", &parent_analysis);
}
}
}
}
}
| {
format!("{}{}", s.len(), s)
} | identifier_body |
ipdl-analyze.rs | use std::env;
use std::path::Path;
use std::path::PathBuf;
use std::fs::File;
use std::io::Write;
extern crate tools;
extern crate ipdl_parser;
extern crate getopts;
use getopts::Options;
use tools::file_format::analysis::{read_analysis, read_target, WithLocation, AnalysisTarget, AnalysisKind};
use ipdl_parser::parser;
use ipdl_parser::ast;
type TargetAnalysis = Vec<WithLocation<Vec<AnalysisTarget>>>;
fn get_options_parser() -> Options {
let mut opts = Options::new();
opts.optmulti("I", "include",
"Additional directory to search for included protocol specifications",
"DIR");
opts.reqopt("d", "outheaders-dir",
"Directory into which C++ headers analysis data is location.",
"HDR_DIR");
opts.reqopt("b", "base-input-prefix",
"Base directory where IPDL input files are found.",
"BASE_DIR");
opts.reqopt("a", "analysis-prefix",
"Base directory where analysis output files are found.",
"ANALYSIS_DIR");
opts
}
fn header_file_name(outheaders_dir: &str, ns: &ast::Namespace, parent_or_child: &str) -> String {
format!("{}/{}/{}{}.h",
outheaders_dir,
ns.namespaces.clone().join("/"),
ns.name.id,
parent_or_child)
}
fn mangle_simple(s: &str) -> String {
format!("{}{}", s.len(), s)
}
fn mangle_nested_name(ns: &[String], protocol: &str, name: &str) -> String {
format!("_ZN{}{}{}E",
ns.iter().map(|id| mangle_simple(&id)).collect::<Vec<_>>().join(""),
mangle_simple(protocol),
mangle_simple(name))
}
fn find_analysis<'a>(analysis: &'a TargetAnalysis, mangled: &str) -> Option<&'a AnalysisTarget>
{
for datum in analysis {
for piece in &datum.data {
if piece.kind == AnalysisKind::Decl && piece.sym.contains(mangled) {
return Some(&piece);
}
}
}
println!("No analysis target found for {}", mangled);
return None
}
fn output_data(outputf: &mut File, locstr: &str, datum: &AnalysisTarget) {
write!(outputf, r#"{{"loc": "{}", "target": 1, "kind": "idl", "pretty": "{}", "sym": "{}"}}"#,
locstr, datum.pretty, datum.sym).unwrap();
write!(outputf, "\n").unwrap();
write!(outputf, r#"{{"loc": "{}", "source": 1, "pretty": "{}", "sym": "{}"}}"#,
locstr, datum.pretty, datum.sym).unwrap();
write!(outputf, "\n").unwrap();
}
fn output_send_recv(outputf: &mut File,
locstr: &str,
protocol: &ast::Namespace,
message: &ast::MessageDecl,
is_ctor: bool,
send_side: &str, send_analysis: &TargetAnalysis,
recv_side: &str, recv_analysis: &TargetAnalysis)
{
let send_prefix = if message.send_semantics == ast::SendSemantics::Intr { "Call" } else { "Send" };
let recv_prefix = if message.send_semantics == ast::SendSemantics::Intr { "Answer" } else | ;
let ctor_suffix = if is_ctor { "Constructor" } else { "" };
let mangled = mangle_nested_name(&protocol.namespaces,
&format!("{}{}", protocol.name.id, send_side),
&format!("{}{}{}", send_prefix, message.name.id, ctor_suffix));
if let Some(send_datum) = find_analysis(send_analysis, &mangled) {
output_data(outputf, &locstr, &send_datum);
}
let mangled = mangle_nested_name(&protocol.namespaces,
&format!("{}{}", protocol.name.id, recv_side),
&format!("{}{}{}", recv_prefix, message.name.id, ctor_suffix));
if let Some(recv_datum) = find_analysis(recv_analysis, &mangled) {
output_data(outputf, &locstr, &recv_datum);
}
}
fn main() {
let args : Vec<String> = env::args().collect();
let opts = get_options_parser();
let matches = match opts.parse(&args[1..]) {
Ok(m) => { m },
Err(f) => { panic!(f.to_string()) },
};
let mut include_dirs = Vec::new();
for i in matches.opt_strs("I") {
include_dirs.push(PathBuf::from(i))
}
let outheaders_dir = matches.opt_str("d").unwrap();
let base_dir = matches.opt_str("b").unwrap();
let analysis_dir = matches.opt_str("a").unwrap();
let base_path = Path::new(&base_dir);
let analysis_path = Path::new(&analysis_dir);
let mut file_names = Vec::new();
for f in matches.free {
file_names.push(PathBuf::from(f));
}
let maybe_tus = parser::parse(&include_dirs, file_names);
if maybe_tus.is_none() {
println!("Specification could not be parsed.");
return;
}
let tus = maybe_tus.unwrap();
for (_, tu) in tus {
println!("Analyzing {:?}", tu.file_name);
let path = tu.file_name.as_path();
let relative = path.strip_prefix(base_path).unwrap();
let absolute = analysis_path.join(relative);
let mut outputf = File::create(absolute).unwrap();
if let Some((ns, protocol)) = tu.protocol {
let parent_fname = header_file_name(&outheaders_dir, &ns, "Parent");
let parent_analysis = read_analysis(&parent_fname, &read_target);
let child_fname = header_file_name(&outheaders_dir, &ns, "Child");
let child_analysis = read_analysis(&child_fname, &read_target);
let is_toplevel = protocol.managers.len() == 0;
for message in protocol.messages {
let loc = &message.name.loc;
let locstr = format!("{}:{}-{}", loc.lineno, loc.colno, loc.colno + message.name.id.len());
if is_toplevel && message.name.id == "__delete__" {
continue;
}
let is_ctor = protocol.manages.iter().any(|e| e.id == message.name.id);
if message.direction == ast::Direction::ToChild || message.direction == ast::Direction::ToParentOrChild {
output_send_recv(&mut outputf, &locstr, &ns, &message, is_ctor,
"Parent", &parent_analysis, "Child", &child_analysis);
}
if message.direction == ast::Direction::ToParent || message.direction == ast::Direction::ToParentOrChild {
output_send_recv(&mut outputf, &locstr, &ns, &message, is_ctor,
"Child", &child_analysis, "Parent", &parent_analysis);
}
}
}
}
}
| { "Recv" } | conditional_block |
ipdl-analyze.rs | use std::env;
use std::path::Path;
use std::path::PathBuf;
use std::fs::File;
use std::io::Write;
extern crate tools;
extern crate ipdl_parser;
extern crate getopts;
use getopts::Options;
use tools::file_format::analysis::{read_analysis, read_target, WithLocation, AnalysisTarget, AnalysisKind};
use ipdl_parser::parser;
use ipdl_parser::ast;
type TargetAnalysis = Vec<WithLocation<Vec<AnalysisTarget>>>;
fn get_options_parser() -> Options {
let mut opts = Options::new();
opts.optmulti("I", "include",
"Additional directory to search for included protocol specifications",
"DIR");
opts.reqopt("d", "outheaders-dir",
"Directory into which C++ headers analysis data is location.",
"HDR_DIR");
opts.reqopt("b", "base-input-prefix",
"Base directory where IPDL input files are found.",
"BASE_DIR");
opts.reqopt("a", "analysis-prefix",
"Base directory where analysis output files are found.",
"ANALYSIS_DIR");
opts
}
fn header_file_name(outheaders_dir: &str, ns: &ast::Namespace, parent_or_child: &str) -> String {
format!("{}/{}/{}{}.h",
outheaders_dir,
ns.namespaces.clone().join("/"),
ns.name.id,
parent_or_child)
}
fn mangle_simple(s: &str) -> String {
format!("{}{}", s.len(), s)
}
fn mangle_nested_name(ns: &[String], protocol: &str, name: &str) -> String {
format!("_ZN{}{}{}E",
ns.iter().map(|id| mangle_simple(&id)).collect::<Vec<_>>().join(""),
mangle_simple(protocol),
mangle_simple(name))
}
fn find_analysis<'a>(analysis: &'a TargetAnalysis, mangled: &str) -> Option<&'a AnalysisTarget>
{
for datum in analysis {
for piece in &datum.data {
if piece.kind == AnalysisKind::Decl && piece.sym.contains(mangled) {
return Some(&piece);
}
}
}
println!("No analysis target found for {}", mangled);
return None
}
fn output_data(outputf: &mut File, locstr: &str, datum: &AnalysisTarget) {
write!(outputf, r#"{{"loc": "{}", "target": 1, "kind": "idl", "pretty": "{}", "sym": "{}"}}"#,
locstr, datum.pretty, datum.sym).unwrap();
write!(outputf, "\n").unwrap();
write!(outputf, r#"{{"loc": "{}", "source": 1, "pretty": "{}", "sym": "{}"}}"#,
locstr, datum.pretty, datum.sym).unwrap();
write!(outputf, "\n").unwrap();
}
fn output_send_recv(outputf: &mut File,
locstr: &str,
protocol: &ast::Namespace,
message: &ast::MessageDecl,
is_ctor: bool,
send_side: &str, send_analysis: &TargetAnalysis,
recv_side: &str, recv_analysis: &TargetAnalysis)
{
let send_prefix = if message.send_semantics == ast::SendSemantics::Intr { "Call" } else { "Send" };
let recv_prefix = if message.send_semantics == ast::SendSemantics::Intr { "Answer" } else { "Recv" };
let ctor_suffix = if is_ctor { "Constructor" } else { "" };
let mangled = mangle_nested_name(&protocol.namespaces,
&format!("{}{}", protocol.name.id, send_side),
&format!("{}{}{}", send_prefix, message.name.id, ctor_suffix));
if let Some(send_datum) = find_analysis(send_analysis, &mangled) {
output_data(outputf, &locstr, &send_datum);
}
let mangled = mangle_nested_name(&protocol.namespaces,
&format!("{}{}", protocol.name.id, recv_side),
&format!("{}{}{}", recv_prefix, message.name.id, ctor_suffix));
if let Some(recv_datum) = find_analysis(recv_analysis, &mangled) {
output_data(outputf, &locstr, &recv_datum);
}
}
fn main() {
let args : Vec<String> = env::args().collect();
let opts = get_options_parser();
let matches = match opts.parse(&args[1..]) {
Ok(m) => { m },
Err(f) => { panic!(f.to_string()) },
};
let mut include_dirs = Vec::new();
for i in matches.opt_strs("I") {
include_dirs.push(PathBuf::from(i))
}
let outheaders_dir = matches.opt_str("d").unwrap();
let base_dir = matches.opt_str("b").unwrap();
let analysis_dir = matches.opt_str("a").unwrap(); | let base_path = Path::new(&base_dir);
let analysis_path = Path::new(&analysis_dir);
let mut file_names = Vec::new();
for f in matches.free {
file_names.push(PathBuf::from(f));
}
let maybe_tus = parser::parse(&include_dirs, file_names);
if maybe_tus.is_none() {
println!("Specification could not be parsed.");
return;
}
let tus = maybe_tus.unwrap();
for (_, tu) in tus {
println!("Analyzing {:?}", tu.file_name);
let path = tu.file_name.as_path();
let relative = path.strip_prefix(base_path).unwrap();
let absolute = analysis_path.join(relative);
let mut outputf = File::create(absolute).unwrap();
if let Some((ns, protocol)) = tu.protocol {
let parent_fname = header_file_name(&outheaders_dir, &ns, "Parent");
let parent_analysis = read_analysis(&parent_fname, &read_target);
let child_fname = header_file_name(&outheaders_dir, &ns, "Child");
let child_analysis = read_analysis(&child_fname, &read_target);
let is_toplevel = protocol.managers.len() == 0;
for message in protocol.messages {
let loc = &message.name.loc;
let locstr = format!("{}:{}-{}", loc.lineno, loc.colno, loc.colno + message.name.id.len());
if is_toplevel && message.name.id == "__delete__" {
continue;
}
let is_ctor = protocol.manages.iter().any(|e| e.id == message.name.id);
if message.direction == ast::Direction::ToChild || message.direction == ast::Direction::ToParentOrChild {
output_send_recv(&mut outputf, &locstr, &ns, &message, is_ctor,
"Parent", &parent_analysis, "Child", &child_analysis);
}
if message.direction == ast::Direction::ToParent || message.direction == ast::Direction::ToParentOrChild {
output_send_recv(&mut outputf, &locstr, &ns, &message, is_ctor,
"Child", &child_analysis, "Parent", &parent_analysis);
}
}
}
}
} | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.