file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
currency_getter.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2009 CamptoCamp. All rights reserved.
# @author Nicolas Bessi
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
_logger = logging.getLogger(__name__)
class AbstractClassError(Exception):
def __str__(self):
return 'Abstract Class'
def __repr__(self):
return 'Abstract Class'
class AbstractMethodError(Exception):
def __str__(self):
return 'Abstract Method'
def __repr__(self):
return 'Abstract Method'
class UnknowClassError(Exception):
def __str__(self):
return 'Unknown Class'
def __repr__(self):
return 'Unknown Class'
class UnsuportedCurrencyError(Exception):
def __init__(self, value):
self.curr = value
def __str__(self):
return 'Unsupported currency %s' % self.curr
def __repr__(self):
return 'Unsupported currency %s' % self.curr
class Currency_getter_factory():
"""Factory pattern class that will return
a currency getter class base on the name passed
to the register method
"""
def register(self, class_name):
allowed = [
'CH_ADMIN_getter',
'PL_NBP_getter',
'ECB_getter',
'GOOGLE_getter',
'YAHOO_getter',
'MX_BdM_getter',
|
'BG_SIBANK_getter',
'BG_UNICRDT_getter',
]
if class_name in allowed:
exec "from .update_service_%s import %s" % (class_name.replace('_getter', ''), class_name)
class_def = eval(class_name)
_logger.info("from .update_service_%s import %s: class_def %s:" % (class_name.replace('_getter', ''), class_name, class_def))
return class_def()
else:
raise UnknowClassError
|
'CA_BOC_getter',
'RO_BNR_getter',
'BG_CUSTOMS_getter',
|
random_line_split
|
currency_getter.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2009 CamptoCamp. All rights reserved.
# @author Nicolas Bessi
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
_logger = logging.getLogger(__name__)
class AbstractClassError(Exception):
def __str__(self):
return 'Abstract Class'
def __repr__(self):
return 'Abstract Class'
class AbstractMethodError(Exception):
def
|
(self):
return 'Abstract Method'
def __repr__(self):
return 'Abstract Method'
class UnknowClassError(Exception):
def __str__(self):
return 'Unknown Class'
def __repr__(self):
return 'Unknown Class'
class UnsuportedCurrencyError(Exception):
def __init__(self, value):
self.curr = value
def __str__(self):
return 'Unsupported currency %s' % self.curr
def __repr__(self):
return 'Unsupported currency %s' % self.curr
class Currency_getter_factory():
"""Factory pattern class that will return
a currency getter class base on the name passed
to the register method
"""
def register(self, class_name):
allowed = [
'CH_ADMIN_getter',
'PL_NBP_getter',
'ECB_getter',
'GOOGLE_getter',
'YAHOO_getter',
'MX_BdM_getter',
'CA_BOC_getter',
'RO_BNR_getter',
'BG_CUSTOMS_getter',
'BG_SIBANK_getter',
'BG_UNICRDT_getter',
]
if class_name in allowed:
exec "from .update_service_%s import %s" % (class_name.replace('_getter', ''), class_name)
class_def = eval(class_name)
_logger.info("from .update_service_%s import %s: class_def %s:" % (class_name.replace('_getter', ''), class_name, class_def))
return class_def()
else:
raise UnknowClassError
|
__str__
|
identifier_name
|
currency_getter.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2009 CamptoCamp. All rights reserved.
# @author Nicolas Bessi
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
_logger = logging.getLogger(__name__)
class AbstractClassError(Exception):
def __str__(self):
return 'Abstract Class'
def __repr__(self):
return 'Abstract Class'
class AbstractMethodError(Exception):
def __str__(self):
return 'Abstract Method'
def __repr__(self):
return 'Abstract Method'
class UnknowClassError(Exception):
def __str__(self):
return 'Unknown Class'
def __repr__(self):
|
class UnsuportedCurrencyError(Exception):
def __init__(self, value):
self.curr = value
def __str__(self):
return 'Unsupported currency %s' % self.curr
def __repr__(self):
return 'Unsupported currency %s' % self.curr
class Currency_getter_factory():
"""Factory pattern class that will return
a currency getter class base on the name passed
to the register method
"""
def register(self, class_name):
allowed = [
'CH_ADMIN_getter',
'PL_NBP_getter',
'ECB_getter',
'GOOGLE_getter',
'YAHOO_getter',
'MX_BdM_getter',
'CA_BOC_getter',
'RO_BNR_getter',
'BG_CUSTOMS_getter',
'BG_SIBANK_getter',
'BG_UNICRDT_getter',
]
if class_name in allowed:
exec "from .update_service_%s import %s" % (class_name.replace('_getter', ''), class_name)
class_def = eval(class_name)
_logger.info("from .update_service_%s import %s: class_def %s:" % (class_name.replace('_getter', ''), class_name, class_def))
return class_def()
else:
raise UnknowClassError
|
return 'Unknown Class'
|
identifier_body
|
$autoplay.js
|
/**
* @file 自动播放插件
* @import widget/slider/slider.js
*/
(function( gmu, $ ) {
$.extend( true, gmu.Slider, {
options: {
/**
* @property {Boolean} [autoPlay=true] 是否开启自动播放
* @namespace options
* @for Slider
* @uses Slider.autoplay
*/
autoPlay: true,
/**
* @property {Number} [interval=4000] 自动播放的间隔时间(毫秒)
* @namespace options
* @for Slider
* @uses Slider.autoplay
*/
interval: 4000
}
} );
/**
* 自动播放插件
* @class autoplay
* @namespace Slider
* @pluginfor Slider
*/
gmu.Slider.register( 'autoplay', {
_init: function() {
var me = this;
me.on( 'slideend ready', me.resume )
// 清除timer
.on( 'destory', me.stop );
// 避免滑动时,自动切换
me.getEl()
.on( 'touchstart' + me.eventNs, $.proxy( me.stop, me ) )
.on( 'touchend' + me.eventNs, $.proxy( me.resume, me ) );
},
/**
* 恢复自动播放。
* @method resume
* @chainable
* @return {self} 返回本身
* @for Slider
* @uses Slider.autoplay
*/
resume: function() {
var me = this,
opts = me._options;
if ( opts.autoPlay && !me._timer ) {
me._timer = setTimeout( function() {
me.slideTo( me.index + 1 );
me._timer = null;
}, opts.interval );
}
return me;
},
/**
* 停止自动播放
* @method stop
* @chainable
* @return {self} 返回本身
* @for Slider
* @uses Slider.autoplay
*/
stop: function() {
var me = this;
if ( me._timer ) {
clearTimeout( me._timer );
me._timer = null;
}
return me;
}
}
|
);
})( gmu, gmu.$ );
|
conditional_block
|
|
$autoplay.js
|
/**
* @file 自动播放插件
* @import widget/slider/slider.js
*/
(function( gmu, $ ) {
$.extend( true, gmu.Slider, {
options: {
/**
|
* @namespace options
* @for Slider
* @uses Slider.autoplay
*/
autoPlay: true,
/**
* @property {Number} [interval=4000] 自动播放的间隔时间(毫秒)
* @namespace options
* @for Slider
* @uses Slider.autoplay
*/
interval: 4000
}
} );
/**
* 自动播放插件
* @class autoplay
* @namespace Slider
* @pluginfor Slider
*/
gmu.Slider.register( 'autoplay', {
_init: function() {
var me = this;
me.on( 'slideend ready', me.resume )
// 清除timer
.on( 'destory', me.stop );
// 避免滑动时,自动切换
me.getEl()
.on( 'touchstart' + me.eventNs, $.proxy( me.stop, me ) )
.on( 'touchend' + me.eventNs, $.proxy( me.resume, me ) );
},
/**
* 恢复自动播放。
* @method resume
* @chainable
* @return {self} 返回本身
* @for Slider
* @uses Slider.autoplay
*/
resume: function() {
var me = this,
opts = me._options;
if ( opts.autoPlay && !me._timer ) {
me._timer = setTimeout( function() {
me.slideTo( me.index + 1 );
me._timer = null;
}, opts.interval );
}
return me;
},
/**
* 停止自动播放
* @method stop
* @chainable
* @return {self} 返回本身
* @for Slider
* @uses Slider.autoplay
*/
stop: function() {
var me = this;
if ( me._timer ) {
clearTimeout( me._timer );
me._timer = null;
}
return me;
}
} );
})( gmu, gmu.$ );
|
* @property {Boolean} [autoPlay=true] 是否开启自动播放
|
random_line_split
|
index.ts
|
/*******************************************************************************
* ___ _ ____ ____
* / _ \ _ _ ___ ___| |_| _ \| __ )
* | | | | | | |/ _ \/ __| __| | | | _ \
* | |_| | |_| | __/\__ \ |_| |_| | |_) |
* \__\_\\__,_|\___||___/\__|____/|____/
*
* Copyright (c) 2014-2019 Appsicle
* Copyright (c) 2019-2022 QuestDB
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
*
******************************************************************************/
import { css, keyframes } from "styled-components"
const spin = keyframes`
from {
transform: rotate(0deg);
}
to {
transform: rotate(360deg);
}
`
export const spinAnimation = css`
animation: ${spin} 1.5s cubic-bezier(0.62, 0.28, 0.23, 0.99) infinite;
`
|
* See the License for the specific language governing permissions and
* limitations under the License.
|
random_line_split
|
swipe-back.js
|
import { assign, swipeShouldReset } from '../util/util';
import { GESTURE_GO_BACK_SWIPE } from '../gestures/gesture-controller';
import { SlideEdgeGesture } from '../gestures/slide-edge-gesture';
import { NativeRafDebouncer } from '../util/debouncer';
export class SwipeBackGesture extends SlideEdgeGesture {
constructor(_nav, element, gestureCtlr, options) {
super(element, assign({
direction: 'x',
maxEdgeStart: 75,
zone: false,
threshold: 0,
debouncer: new NativeRafDebouncer(),
gesture: gestureCtlr.createGesture({
name: GESTURE_GO_BACK_SWIPE,
priority: 20,
disableScroll: true
})
}, options));
this._nav = _nav;
}
canStart(ev) {
return (this._nav.canSwipeBack() &&
super.canStart(ev));
}
onSlideBeforeStart(ev) {
this._nav.swipeBackStart();
}
onSlide(slide, ev) {
ev.preventDefault();
ev.stopPropagation();
let stepValue = (slide.distance / slide.max);
this._nav.swipeBackProgress(stepValue);
}
onSlideEnd(slide, ev)
|
}
//# sourceMappingURL=swipe-back.js.map
|
{
const currentStepValue = (slide.distance / slide.max);
const isResetDirecction = slide.velocity < 0;
const isMovingFast = Math.abs(slide.velocity) > 0.4;
const isInResetZone = Math.abs(slide.delta) < Math.abs(slide.max) * 0.5;
const shouldComplete = !swipeShouldReset(isResetDirecction, isMovingFast, isInResetZone);
this._nav.swipeBackEnd(shouldComplete, currentStepValue);
}
|
identifier_body
|
swipe-back.js
|
import { assign, swipeShouldReset } from '../util/util';
import { GESTURE_GO_BACK_SWIPE } from '../gestures/gesture-controller';
import { SlideEdgeGesture } from '../gestures/slide-edge-gesture';
import { NativeRafDebouncer } from '../util/debouncer';
export class SwipeBackGesture extends SlideEdgeGesture {
constructor(_nav, element, gestureCtlr, options) {
super(element, assign({
direction: 'x',
maxEdgeStart: 75,
|
priority: 20,
disableScroll: true
})
}, options));
this._nav = _nav;
}
canStart(ev) {
return (this._nav.canSwipeBack() &&
super.canStart(ev));
}
onSlideBeforeStart(ev) {
this._nav.swipeBackStart();
}
onSlide(slide, ev) {
ev.preventDefault();
ev.stopPropagation();
let stepValue = (slide.distance / slide.max);
this._nav.swipeBackProgress(stepValue);
}
onSlideEnd(slide, ev) {
const currentStepValue = (slide.distance / slide.max);
const isResetDirecction = slide.velocity < 0;
const isMovingFast = Math.abs(slide.velocity) > 0.4;
const isInResetZone = Math.abs(slide.delta) < Math.abs(slide.max) * 0.5;
const shouldComplete = !swipeShouldReset(isResetDirecction, isMovingFast, isInResetZone);
this._nav.swipeBackEnd(shouldComplete, currentStepValue);
}
}
//# sourceMappingURL=swipe-back.js.map
|
zone: false,
threshold: 0,
debouncer: new NativeRafDebouncer(),
gesture: gestureCtlr.createGesture({
name: GESTURE_GO_BACK_SWIPE,
|
random_line_split
|
swipe-back.js
|
import { assign, swipeShouldReset } from '../util/util';
import { GESTURE_GO_BACK_SWIPE } from '../gestures/gesture-controller';
import { SlideEdgeGesture } from '../gestures/slide-edge-gesture';
import { NativeRafDebouncer } from '../util/debouncer';
export class SwipeBackGesture extends SlideEdgeGesture {
constructor(_nav, element, gestureCtlr, options) {
super(element, assign({
direction: 'x',
maxEdgeStart: 75,
zone: false,
threshold: 0,
debouncer: new NativeRafDebouncer(),
gesture: gestureCtlr.createGesture({
name: GESTURE_GO_BACK_SWIPE,
priority: 20,
disableScroll: true
})
}, options));
this._nav = _nav;
}
canStart(ev) {
return (this._nav.canSwipeBack() &&
super.canStart(ev));
}
onSlideBeforeStart(ev) {
this._nav.swipeBackStart();
}
onSlide(slide, ev) {
ev.preventDefault();
ev.stopPropagation();
let stepValue = (slide.distance / slide.max);
this._nav.swipeBackProgress(stepValue);
}
|
(slide, ev) {
const currentStepValue = (slide.distance / slide.max);
const isResetDirecction = slide.velocity < 0;
const isMovingFast = Math.abs(slide.velocity) > 0.4;
const isInResetZone = Math.abs(slide.delta) < Math.abs(slide.max) * 0.5;
const shouldComplete = !swipeShouldReset(isResetDirecction, isMovingFast, isInResetZone);
this._nav.swipeBackEnd(shouldComplete, currentStepValue);
}
}
//# sourceMappingURL=swipe-back.js.map
|
onSlideEnd
|
identifier_name
|
GcsArtifactEditor.tsx
|
import { cloneDeep, isNil } from 'lodash';
import React from 'react';
import { ArtifactTypePatterns } from 'core/artifact';
import { IArtifactEditorProps, IArtifactKindConfig } from 'core/domain';
import { StageConfigField } from 'core/pipeline';
import { SpelText } from 'core/widgets';
import { singleFieldArtifactEditor } from '../singleFieldArtifactEditor';
import { ArtifactEditor } from '../ArtifactEditor';
const TYPE = 'gcs/object';
export const GcsMatch: IArtifactKindConfig = {
label: 'GCS',
typePattern: ArtifactTypePatterns.GCS_OBJECT,
type: TYPE,
description: 'A GCS object.',
key: 'gcs',
isDefault: false,
isMatch: true,
editCmp: singleFieldArtifactEditor(
'name',
TYPE,
'Object path',
'gs://bucket/path/to/file',
'pipeline.config.expectedArtifact.gcs.name',
),
};
export const GcsDefault: IArtifactKindConfig = {
label: 'GCS',
typePattern: ArtifactTypePatterns.GCS_OBJECT,
type: TYPE,
description: 'A GCS object.',
key: 'default.gcs',
isDefault: true,
isMatch: false,
editCmp: class extends ArtifactEditor {
constructor(props: IArtifactEditorProps)
|
private onReferenceChange = (reference: string) => {
if (isNil(reference)) {
return;
}
const clonedArtifact = cloneDeep(this.props.artifact);
clonedArtifact.reference = reference;
if (reference.indexOf('#') >= 0) {
const split = reference.split('#');
clonedArtifact.name = split[0];
clonedArtifact.version = split[1];
} else {
clonedArtifact.name = reference;
}
this.props.onChange(clonedArtifact);
};
public render() {
return (
<StageConfigField label="Object path" helpKey="pipeline.config.expectedArtifact.defaultGcs.reference">
<SpelText
placeholder="gs://bucket/path/to/file"
value={this.props.artifact.reference}
onChange={this.onReferenceChange}
pipeline={this.props.pipeline}
docLink={true}
/>
</StageConfigField>
);
}
},
};
|
{
super(props, TYPE);
}
|
identifier_body
|
GcsArtifactEditor.tsx
|
import { cloneDeep, isNil } from 'lodash';
import React from 'react';
import { ArtifactTypePatterns } from 'core/artifact';
import { IArtifactEditorProps, IArtifactKindConfig } from 'core/domain';
import { StageConfigField } from 'core/pipeline';
import { SpelText } from 'core/widgets';
import { singleFieldArtifactEditor } from '../singleFieldArtifactEditor';
import { ArtifactEditor } from '../ArtifactEditor';
const TYPE = 'gcs/object';
export const GcsMatch: IArtifactKindConfig = {
label: 'GCS',
typePattern: ArtifactTypePatterns.GCS_OBJECT,
type: TYPE,
description: 'A GCS object.',
key: 'gcs',
isDefault: false,
isMatch: true,
editCmp: singleFieldArtifactEditor(
'name',
TYPE,
'Object path',
'gs://bucket/path/to/file',
'pipeline.config.expectedArtifact.gcs.name',
),
};
export const GcsDefault: IArtifactKindConfig = {
label: 'GCS',
typePattern: ArtifactTypePatterns.GCS_OBJECT,
type: TYPE,
description: 'A GCS object.',
key: 'default.gcs',
isDefault: true,
isMatch: false,
editCmp: class extends ArtifactEditor {
|
(props: IArtifactEditorProps) {
super(props, TYPE);
}
private onReferenceChange = (reference: string) => {
if (isNil(reference)) {
return;
}
const clonedArtifact = cloneDeep(this.props.artifact);
clonedArtifact.reference = reference;
if (reference.indexOf('#') >= 0) {
const split = reference.split('#');
clonedArtifact.name = split[0];
clonedArtifact.version = split[1];
} else {
clonedArtifact.name = reference;
}
this.props.onChange(clonedArtifact);
};
public render() {
return (
<StageConfigField label="Object path" helpKey="pipeline.config.expectedArtifact.defaultGcs.reference">
<SpelText
placeholder="gs://bucket/path/to/file"
value={this.props.artifact.reference}
onChange={this.onReferenceChange}
pipeline={this.props.pipeline}
docLink={true}
/>
</StageConfigField>
);
}
},
};
|
constructor
|
identifier_name
|
GcsArtifactEditor.tsx
|
import { cloneDeep, isNil } from 'lodash';
import React from 'react';
import { ArtifactTypePatterns } from 'core/artifact';
import { IArtifactEditorProps, IArtifactKindConfig } from 'core/domain';
import { StageConfigField } from 'core/pipeline';
import { SpelText } from 'core/widgets';
import { singleFieldArtifactEditor } from '../singleFieldArtifactEditor';
import { ArtifactEditor } from '../ArtifactEditor';
const TYPE = 'gcs/object';
export const GcsMatch: IArtifactKindConfig = {
label: 'GCS',
typePattern: ArtifactTypePatterns.GCS_OBJECT,
type: TYPE,
description: 'A GCS object.',
key: 'gcs',
isDefault: false,
isMatch: true,
editCmp: singleFieldArtifactEditor(
'name',
TYPE,
'Object path',
'gs://bucket/path/to/file',
'pipeline.config.expectedArtifact.gcs.name',
),
};
export const GcsDefault: IArtifactKindConfig = {
label: 'GCS',
typePattern: ArtifactTypePatterns.GCS_OBJECT,
type: TYPE,
description: 'A GCS object.',
key: 'default.gcs',
isDefault: true,
isMatch: false,
editCmp: class extends ArtifactEditor {
constructor(props: IArtifactEditorProps) {
super(props, TYPE);
}
private onReferenceChange = (reference: string) => {
if (isNil(reference)) {
return;
}
const clonedArtifact = cloneDeep(this.props.artifact);
clonedArtifact.reference = reference;
if (reference.indexOf('#') >= 0) {
const split = reference.split('#');
clonedArtifact.name = split[0];
clonedArtifact.version = split[1];
} else {
|
public render() {
return (
<StageConfigField label="Object path" helpKey="pipeline.config.expectedArtifact.defaultGcs.reference">
<SpelText
placeholder="gs://bucket/path/to/file"
value={this.props.artifact.reference}
onChange={this.onReferenceChange}
pipeline={this.props.pipeline}
docLink={true}
/>
</StageConfigField>
);
}
},
};
|
clonedArtifact.name = reference;
}
this.props.onChange(clonedArtifact);
};
|
random_line_split
|
GcsArtifactEditor.tsx
|
import { cloneDeep, isNil } from 'lodash';
import React from 'react';
import { ArtifactTypePatterns } from 'core/artifact';
import { IArtifactEditorProps, IArtifactKindConfig } from 'core/domain';
import { StageConfigField } from 'core/pipeline';
import { SpelText } from 'core/widgets';
import { singleFieldArtifactEditor } from '../singleFieldArtifactEditor';
import { ArtifactEditor } from '../ArtifactEditor';
const TYPE = 'gcs/object';
export const GcsMatch: IArtifactKindConfig = {
label: 'GCS',
typePattern: ArtifactTypePatterns.GCS_OBJECT,
type: TYPE,
description: 'A GCS object.',
key: 'gcs',
isDefault: false,
isMatch: true,
editCmp: singleFieldArtifactEditor(
'name',
TYPE,
'Object path',
'gs://bucket/path/to/file',
'pipeline.config.expectedArtifact.gcs.name',
),
};
export const GcsDefault: IArtifactKindConfig = {
label: 'GCS',
typePattern: ArtifactTypePatterns.GCS_OBJECT,
type: TYPE,
description: 'A GCS object.',
key: 'default.gcs',
isDefault: true,
isMatch: false,
editCmp: class extends ArtifactEditor {
constructor(props: IArtifactEditorProps) {
super(props, TYPE);
}
private onReferenceChange = (reference: string) => {
if (isNil(reference))
|
const clonedArtifact = cloneDeep(this.props.artifact);
clonedArtifact.reference = reference;
if (reference.indexOf('#') >= 0) {
const split = reference.split('#');
clonedArtifact.name = split[0];
clonedArtifact.version = split[1];
} else {
clonedArtifact.name = reference;
}
this.props.onChange(clonedArtifact);
};
public render() {
return (
<StageConfigField label="Object path" helpKey="pipeline.config.expectedArtifact.defaultGcs.reference">
<SpelText
placeholder="gs://bucket/path/to/file"
value={this.props.artifact.reference}
onChange={this.onReferenceChange}
pipeline={this.props.pipeline}
docLink={true}
/>
</StageConfigField>
);
}
},
};
|
{
return;
}
|
conditional_block
|
server_web.py
|
# A basic web server using sockets
import socket
PORT = 8090
MAX_OPEN_REQUESTS = 5
def
|
(clientsocket):
print(clientsocket)
data = clientsocket.recv(1024)
print(data)
web_contents = "<h1>Received</h1>"
f = open("myhtml.html", "r")
web_contents = f.read()
f.close()
web_headers = "HTTP/1.1 200"
web_headers += "\n" + "Content-Type: text/html"
web_headers += "\n" + "Content-Length: %i" % len(str.encode(web_contents))
clientsocket.send(str.encode(web_headers + "\n\n" + web_contents))
clientsocket.close()
# create an INET, STREAMing socket
serversocket = socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
# bind the socket to a public host, and a well-known port
hostname = socket.gethostname()
ip = socket.gethostbyname(hostname)
# Let's use better the local interface name
hostname = "10.10.104.17"
try:
serversocket.bind((ip, PORT))
# become a server socket
# MAX_OPEN_REQUESTS connect requests before refusing outside connections
serversocket.listen(MAX_OPEN_REQUESTS)
while True:
# accept connections from outside
print ("Waiting for connections at %s %i" % (hostname, PORT))
(clientsocket, address) = serversocket.accept()
# now do something with the clientsocket
# in this case, we'll pretend this is a non threaded server
process_client(clientsocket)
except socket.error:
print("Problemas using port %i. Do you have permission?" % PORT)
|
process_client
|
identifier_name
|
server_web.py
|
# A basic web server using sockets
import socket
PORT = 8090
MAX_OPEN_REQUESTS = 5
def process_client(clientsocket):
print(clientsocket)
data = clientsocket.recv(1024)
print(data)
web_contents = "<h1>Received</h1>"
f = open("myhtml.html", "r")
web_contents = f.read()
f.close()
web_headers = "HTTP/1.1 200"
web_headers += "\n" + "Content-Type: text/html"
web_headers += "\n" + "Content-Length: %i" % len(str.encode(web_contents))
clientsocket.send(str.encode(web_headers + "\n\n" + web_contents))
clientsocket.close()
# create an INET, STREAMing socket
serversocket = socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
# bind the socket to a public host, and a well-known port
hostname = socket.gethostname()
ip = socket.gethostbyname(hostname)
# Let's use better the local interface name
hostname = "10.10.104.17"
try:
serversocket.bind((ip, PORT))
# become a server socket
# MAX_OPEN_REQUESTS connect requests before refusing outside connections
serversocket.listen(MAX_OPEN_REQUESTS)
while True:
# accept connections from outside
print ("Waiting for connections at %s %i" % (hostname, PORT))
(clientsocket, address) = serversocket.accept()
# now do something with the clientsocket
# in this case, we'll pretend this is a non threaded server
process_client(clientsocket)
|
except socket.error:
print("Problemas using port %i. Do you have permission?" % PORT)
|
random_line_split
|
|
server_web.py
|
# A basic web server using sockets
import socket
PORT = 8090
MAX_OPEN_REQUESTS = 5
def process_client(clientsocket):
print(clientsocket)
data = clientsocket.recv(1024)
print(data)
web_contents = "<h1>Received</h1>"
f = open("myhtml.html", "r")
web_contents = f.read()
f.close()
web_headers = "HTTP/1.1 200"
web_headers += "\n" + "Content-Type: text/html"
web_headers += "\n" + "Content-Length: %i" % len(str.encode(web_contents))
clientsocket.send(str.encode(web_headers + "\n\n" + web_contents))
clientsocket.close()
# create an INET, STREAMing socket
serversocket = socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
# bind the socket to a public host, and a well-known port
hostname = socket.gethostname()
ip = socket.gethostbyname(hostname)
# Let's use better the local interface name
hostname = "10.10.104.17"
try:
serversocket.bind((ip, PORT))
# become a server socket
# MAX_OPEN_REQUESTS connect requests before refusing outside connections
serversocket.listen(MAX_OPEN_REQUESTS)
while True:
# accept connections from outside
|
except socket.error:
print("Problemas using port %i. Do you have permission?" % PORT)
|
print ("Waiting for connections at %s %i" % (hostname, PORT))
(clientsocket, address) = serversocket.accept()
# now do something with the clientsocket
# in this case, we'll pretend this is a non threaded server
process_client(clientsocket)
|
conditional_block
|
server_web.py
|
# A basic web server using sockets
import socket
PORT = 8090
MAX_OPEN_REQUESTS = 5
def process_client(clientsocket):
|
# create an INET, STREAMing socket
serversocket = socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
# bind the socket to a public host, and a well-known port
hostname = socket.gethostname()
ip = socket.gethostbyname(hostname)
# Let's use better the local interface name
hostname = "10.10.104.17"
try:
serversocket.bind((ip, PORT))
# become a server socket
# MAX_OPEN_REQUESTS connect requests before refusing outside connections
serversocket.listen(MAX_OPEN_REQUESTS)
while True:
# accept connections from outside
print ("Waiting for connections at %s %i" % (hostname, PORT))
(clientsocket, address) = serversocket.accept()
# now do something with the clientsocket
# in this case, we'll pretend this is a non threaded server
process_client(clientsocket)
except socket.error:
print("Problemas using port %i. Do you have permission?" % PORT)
|
print(clientsocket)
data = clientsocket.recv(1024)
print(data)
web_contents = "<h1>Received</h1>"
f = open("myhtml.html", "r")
web_contents = f.read()
f.close()
web_headers = "HTTP/1.1 200"
web_headers += "\n" + "Content-Type: text/html"
web_headers += "\n" + "Content-Length: %i" % len(str.encode(web_contents))
clientsocket.send(str.encode(web_headers + "\n\n" + web_contents))
clientsocket.close()
|
identifier_body
|
conf.py
|
# -*- coding: utf-8 -*-
#
# pyspark documentation build configuration file, created by
# sphinx-quickstart on Thu Aug 28 15:17:47 2014.
#
# This file is execfile()d with the current directory set to its
|
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shutil
import errno
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
# Remove previously generated rst files. Ignore errors just in case it stops
# generating whole docs.
shutil.rmtree(
"%s/reference/api" % os.path.dirname(os.path.abspath(__file__)), ignore_errors=True)
shutil.rmtree(
"%s/reference/pyspark.pandas/api" % os.path.dirname(os.path.abspath(__file__)),
ignore_errors=True)
try:
os.mkdir("%s/reference/api" % os.path.dirname(os.path.abspath(__file__)))
except OSError as e:
if e.errno != errno.EEXIST:
raise
try:
os.mkdir("%s/reference/pyspark.pandas/api" % os.path.dirname(
os.path.abspath(__file__)))
except OSError as e:
if e.errno != errno.EEXIST:
raise
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.2'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.mathjax',
'sphinx.ext.autosummary',
'nbsphinx', # Converts Jupyter Notebook to reStructuredText files for Sphinx.
# For ipython directive in reStructuredText files. It is generated by the notebook.
'IPython.sphinxext.ipython_console_highlighting',
'numpydoc', # handle NumPy documentation formatted docstrings.
'sphinx_plotly_directive', # For visualize plot result
]
# plotly plot directive
plotly_include_source = True
plotly_html_show_formats = False
plotly_html_show_source_link = False
plotly_pre_code = """import numpy as np
import pandas as pd
import pyspark.pandas as ps"""
numpydoc_show_class_members = False
# Links used globally in the RST files.
# These are defined here to allow link substitutions dynamically.
rst_epilog = """
.. |binder| replace:: Live Notebook
.. _binder: https://mybinder.org/v2/gh/apache/spark/{0}?filepath=python%2Fdocs%2Fsource%2Fgetting_started%2Fquickstart.ipynb
.. |examples| replace:: Examples
.. _examples: https://github.com/apache/spark/tree/{0}/examples/src/main/python
.. |downloading| replace:: Downloading
.. _downloading: https://spark.apache.org/docs/{1}/building-spark.html
.. |building_spark| replace:: Building Spark
.. _building_spark: https://spark.apache.org/docs/{1}/#downloading
""".format(
os.environ.get("GIT_HASH", "master"),
os.environ.get("RELEASE_VERSION", "latest"),
)
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'PySpark'
copyright = ''
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'master'
# The full version, including alpha/beta/rc tags.
release = os.environ.get('RELEASE_VERSION', version)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '.DS_Store', '**.ipynb_checkpoints']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for autodoc --------------------------------------------------
# Look at the first line of the docstring for function and method signatures.
autodoc_docstring_signature = True
autosummary_generate = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'pydata_sphinx_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "../../../docs/img/spark-logo-reverse.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = [
'css/pyspark.css',
]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pysparkdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'pyspark.tex', 'pyspark Documentation',
'Author', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pyspark', 'pyspark Documentation',
['Author'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pyspark', 'pyspark Documentation',
'Author', 'pyspark', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = 'pyspark'
epub_author = 'Author'
epub_publisher = 'Author'
epub_copyright = '2014, Author'
# The basename for the epub file. It defaults to the project name.
#epub_basename = 'pyspark'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
def setup(app):
# The app.add_javascript() is deprecated.
getattr(app, "add_js_file", getattr(app, "add_javascript"))('copybutton.js')
# Skip sample endpoint link (not expected to resolve)
linkcheck_ignore = [r'https://kinesis.us-east-1.amazonaws.com']
|
# containing dir.
|
random_line_split
|
conf.py
|
# -*- coding: utf-8 -*-
#
# pyspark documentation build configuration file, created by
# sphinx-quickstart on Thu Aug 28 15:17:47 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shutil
import errno
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
# Remove previously generated rst files. Ignore errors just in case it stops
# generating whole docs.
shutil.rmtree(
"%s/reference/api" % os.path.dirname(os.path.abspath(__file__)), ignore_errors=True)
shutil.rmtree(
"%s/reference/pyspark.pandas/api" % os.path.dirname(os.path.abspath(__file__)),
ignore_errors=True)
try:
os.mkdir("%s/reference/api" % os.path.dirname(os.path.abspath(__file__)))
except OSError as e:
if e.errno != errno.EEXIST:
|
try:
os.mkdir("%s/reference/pyspark.pandas/api" % os.path.dirname(
os.path.abspath(__file__)))
except OSError as e:
if e.errno != errno.EEXIST:
raise
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.2'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.mathjax',
'sphinx.ext.autosummary',
'nbsphinx', # Converts Jupyter Notebook to reStructuredText files for Sphinx.
# For ipython directive in reStructuredText files. It is generated by the notebook.
'IPython.sphinxext.ipython_console_highlighting',
'numpydoc', # handle NumPy documentation formatted docstrings.
'sphinx_plotly_directive', # For visualize plot result
]
# plotly plot directive
plotly_include_source = True
plotly_html_show_formats = False
plotly_html_show_source_link = False
plotly_pre_code = """import numpy as np
import pandas as pd
import pyspark.pandas as ps"""
numpydoc_show_class_members = False
# Links used globally in the RST files.
# These are defined here to allow link substitutions dynamically.
rst_epilog = """
.. |binder| replace:: Live Notebook
.. _binder: https://mybinder.org/v2/gh/apache/spark/{0}?filepath=python%2Fdocs%2Fsource%2Fgetting_started%2Fquickstart.ipynb
.. |examples| replace:: Examples
.. _examples: https://github.com/apache/spark/tree/{0}/examples/src/main/python
.. |downloading| replace:: Downloading
.. _downloading: https://spark.apache.org/docs/{1}/building-spark.html
.. |building_spark| replace:: Building Spark
.. _building_spark: https://spark.apache.org/docs/{1}/#downloading
""".format(
os.environ.get("GIT_HASH", "master"),
os.environ.get("RELEASE_VERSION", "latest"),
)
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'PySpark'
copyright = ''
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'master'
# The full version, including alpha/beta/rc tags.
release = os.environ.get('RELEASE_VERSION', version)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '.DS_Store', '**.ipynb_checkpoints']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for autodoc --------------------------------------------------
# Look at the first line of the docstring for function and method signatures.
autodoc_docstring_signature = True
autosummary_generate = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'pydata_sphinx_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "../../../docs/img/spark-logo-reverse.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = [
'css/pyspark.css',
]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pysparkdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'pyspark.tex', 'pyspark Documentation',
'Author', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pyspark', 'pyspark Documentation',
['Author'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pyspark', 'pyspark Documentation',
'Author', 'pyspark', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = 'pyspark'
epub_author = 'Author'
epub_publisher = 'Author'
epub_copyright = '2014, Author'
# The basename for the epub file. It defaults to the project name.
#epub_basename = 'pyspark'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
def setup(app):
# The app.add_javascript() is deprecated.
getattr(app, "add_js_file", getattr(app, "add_javascript"))('copybutton.js')
# Skip sample endpoint link (not expected to resolve)
linkcheck_ignore = [r'https://kinesis.us-east-1.amazonaws.com']
|
raise
|
conditional_block
|
conf.py
|
# -*- coding: utf-8 -*-
#
# pyspark documentation build configuration file, created by
# sphinx-quickstart on Thu Aug 28 15:17:47 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shutil
import errno
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
# Remove previously generated rst files. Ignore errors just in case it stops
# generating whole docs.
shutil.rmtree(
"%s/reference/api" % os.path.dirname(os.path.abspath(__file__)), ignore_errors=True)
shutil.rmtree(
"%s/reference/pyspark.pandas/api" % os.path.dirname(os.path.abspath(__file__)),
ignore_errors=True)
try:
os.mkdir("%s/reference/api" % os.path.dirname(os.path.abspath(__file__)))
except OSError as e:
if e.errno != errno.EEXIST:
raise
try:
os.mkdir("%s/reference/pyspark.pandas/api" % os.path.dirname(
os.path.abspath(__file__)))
except OSError as e:
if e.errno != errno.EEXIST:
raise
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.2'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.mathjax',
'sphinx.ext.autosummary',
'nbsphinx', # Converts Jupyter Notebook to reStructuredText files for Sphinx.
# For ipython directive in reStructuredText files. It is generated by the notebook.
'IPython.sphinxext.ipython_console_highlighting',
'numpydoc', # handle NumPy documentation formatted docstrings.
'sphinx_plotly_directive', # For visualize plot result
]
# plotly plot directive
plotly_include_source = True
plotly_html_show_formats = False
plotly_html_show_source_link = False
plotly_pre_code = """import numpy as np
import pandas as pd
import pyspark.pandas as ps"""
numpydoc_show_class_members = False
# Links used globally in the RST files.
# These are defined here to allow link substitutions dynamically.
rst_epilog = """
.. |binder| replace:: Live Notebook
.. _binder: https://mybinder.org/v2/gh/apache/spark/{0}?filepath=python%2Fdocs%2Fsource%2Fgetting_started%2Fquickstart.ipynb
.. |examples| replace:: Examples
.. _examples: https://github.com/apache/spark/tree/{0}/examples/src/main/python
.. |downloading| replace:: Downloading
.. _downloading: https://spark.apache.org/docs/{1}/building-spark.html
.. |building_spark| replace:: Building Spark
.. _building_spark: https://spark.apache.org/docs/{1}/#downloading
""".format(
os.environ.get("GIT_HASH", "master"),
os.environ.get("RELEASE_VERSION", "latest"),
)
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'PySpark'
copyright = ''
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'master'
# The full version, including alpha/beta/rc tags.
release = os.environ.get('RELEASE_VERSION', version)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '.DS_Store', '**.ipynb_checkpoints']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for autodoc --------------------------------------------------
# Look at the first line of the docstring for function and method signatures.
autodoc_docstring_signature = True
autosummary_generate = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'pydata_sphinx_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "../../../docs/img/spark-logo-reverse.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = [
'css/pyspark.css',
]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pysparkdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'pyspark.tex', 'pyspark Documentation',
'Author', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pyspark', 'pyspark Documentation',
['Author'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pyspark', 'pyspark Documentation',
'Author', 'pyspark', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = 'pyspark'
epub_author = 'Author'
epub_publisher = 'Author'
epub_copyright = '2014, Author'
# The basename for the epub file. It defaults to the project name.
#epub_basename = 'pyspark'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
def
|
(app):
# The app.add_javascript() is deprecated.
getattr(app, "add_js_file", getattr(app, "add_javascript"))('copybutton.js')
# Skip sample endpoint link (not expected to resolve)
linkcheck_ignore = [r'https://kinesis.us-east-1.amazonaws.com']
|
setup
|
identifier_name
|
conf.py
|
# -*- coding: utf-8 -*-
#
# pyspark documentation build configuration file, created by
# sphinx-quickstart on Thu Aug 28 15:17:47 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shutil
import errno
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
# Remove previously generated rst files. Ignore errors just in case it stops
# generating whole docs.
shutil.rmtree(
"%s/reference/api" % os.path.dirname(os.path.abspath(__file__)), ignore_errors=True)
shutil.rmtree(
"%s/reference/pyspark.pandas/api" % os.path.dirname(os.path.abspath(__file__)),
ignore_errors=True)
try:
os.mkdir("%s/reference/api" % os.path.dirname(os.path.abspath(__file__)))
except OSError as e:
if e.errno != errno.EEXIST:
raise
try:
os.mkdir("%s/reference/pyspark.pandas/api" % os.path.dirname(
os.path.abspath(__file__)))
except OSError as e:
if e.errno != errno.EEXIST:
raise
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.2'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.mathjax',
'sphinx.ext.autosummary',
'nbsphinx', # Converts Jupyter Notebook to reStructuredText files for Sphinx.
# For ipython directive in reStructuredText files. It is generated by the notebook.
'IPython.sphinxext.ipython_console_highlighting',
'numpydoc', # handle NumPy documentation formatted docstrings.
'sphinx_plotly_directive', # For visualize plot result
]
# plotly plot directive
plotly_include_source = True
plotly_html_show_formats = False
plotly_html_show_source_link = False
plotly_pre_code = """import numpy as np
import pandas as pd
import pyspark.pandas as ps"""
numpydoc_show_class_members = False
# Links used globally in the RST files.
# These are defined here to allow link substitutions dynamically.
rst_epilog = """
.. |binder| replace:: Live Notebook
.. _binder: https://mybinder.org/v2/gh/apache/spark/{0}?filepath=python%2Fdocs%2Fsource%2Fgetting_started%2Fquickstart.ipynb
.. |examples| replace:: Examples
.. _examples: https://github.com/apache/spark/tree/{0}/examples/src/main/python
.. |downloading| replace:: Downloading
.. _downloading: https://spark.apache.org/docs/{1}/building-spark.html
.. |building_spark| replace:: Building Spark
.. _building_spark: https://spark.apache.org/docs/{1}/#downloading
""".format(
os.environ.get("GIT_HASH", "master"),
os.environ.get("RELEASE_VERSION", "latest"),
)
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'PySpark'
copyright = ''
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'master'
# The full version, including alpha/beta/rc tags.
release = os.environ.get('RELEASE_VERSION', version)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '.DS_Store', '**.ipynb_checkpoints']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for autodoc --------------------------------------------------
# Look at the first line of the docstring for function and method signatures.
autodoc_docstring_signature = True
autosummary_generate = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'pydata_sphinx_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "../../../docs/img/spark-logo-reverse.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = [
'css/pyspark.css',
]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pysparkdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'pyspark.tex', 'pyspark Documentation',
'Author', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pyspark', 'pyspark Documentation',
['Author'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pyspark', 'pyspark Documentation',
'Author', 'pyspark', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = 'pyspark'
epub_author = 'Author'
epub_publisher = 'Author'
epub_copyright = '2014, Author'
# The basename for the epub file. It defaults to the project name.
#epub_basename = 'pyspark'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
def setup(app):
# The app.add_javascript() is deprecated.
|
# Skip sample endpoint link (not expected to resolve)
linkcheck_ignore = [r'https://kinesis.us-east-1.amazonaws.com']
|
getattr(app, "add_js_file", getattr(app, "add_javascript"))('copybutton.js')
|
identifier_body
|
cost.py
|
from neon.transforms.cost import Cost
class MulticlsSVMLoss(Cost):
|
class L1SVMLoss(Cost):
def __init__(self, C=10):
self.C = C
def __call__(self, y, t):
return self.C * self.be.sum(self.be.square(self.be.maximum(0, 1 - y * (t * 2 - 1)))) * 0.5 / y.shape[0]
def bprop(self, y, t):
return - self.C * (t * 2 - 1) * self.be.maximum(0, 1 - y * (t * 2 - 1)) / self.be.bsz / y.shape[0]
|
def __init__(self, delta=1.):
self.delta = delta
def __call__(self, y, t):
T = self.be.empty_like(y)
T[:] = self.be.max(y * t, axis=0)
# T = self.be.array(self.be.max(y * t, axis=0).asnumpyarray(), y.shape[0], axis=0)
margin = self.be.square(self.be.maximum(0, y - T + self.delta)) * 0.5
return self.be.sum(margin) / self.be.bsz
def bprop(self, y, t):
T = self.be.empty_like(y)
T[:] = self.be.max(y * t, axis=0)
return self.be.maximum(0, y - T + self.delta) / self.be.bsz
|
identifier_body
|
cost.py
|
from neon.transforms.cost import Cost
class MulticlsSVMLoss(Cost):
def __init__(self, delta=1.):
self.delta = delta
def __call__(self, y, t):
T = self.be.empty_like(y)
T[:] = self.be.max(y * t, axis=0)
# T = self.be.array(self.be.max(y * t, axis=0).asnumpyarray(), y.shape[0], axis=0)
margin = self.be.square(self.be.maximum(0, y - T + self.delta)) * 0.5
return self.be.sum(margin) / self.be.bsz
def bprop(self, y, t):
T = self.be.empty_like(y)
T[:] = self.be.max(y * t, axis=0)
return self.be.maximum(0, y - T + self.delta) / self.be.bsz
class
|
(Cost):
def __init__(self, C=10):
self.C = C
def __call__(self, y, t):
return self.C * self.be.sum(self.be.square(self.be.maximum(0, 1 - y * (t * 2 - 1)))) * 0.5 / y.shape[0]
def bprop(self, y, t):
return - self.C * (t * 2 - 1) * self.be.maximum(0, 1 - y * (t * 2 - 1)) / self.be.bsz / y.shape[0]
|
L1SVMLoss
|
identifier_name
|
cost.py
|
from neon.transforms.cost import Cost
class MulticlsSVMLoss(Cost):
def __init__(self, delta=1.):
self.delta = delta
def __call__(self, y, t):
T = self.be.empty_like(y)
T[:] = self.be.max(y * t, axis=0)
# T = self.be.array(self.be.max(y * t, axis=0).asnumpyarray(), y.shape[0], axis=0)
margin = self.be.square(self.be.maximum(0, y - T + self.delta)) * 0.5
return self.be.sum(margin) / self.be.bsz
def bprop(self, y, t):
T = self.be.empty_like(y)
T[:] = self.be.max(y * t, axis=0)
return self.be.maximum(0, y - T + self.delta) / self.be.bsz
class L1SVMLoss(Cost):
def __init__(self, C=10):
self.C = C
|
def __call__(self, y, t):
return self.C * self.be.sum(self.be.square(self.be.maximum(0, 1 - y * (t * 2 - 1)))) * 0.5 / y.shape[0]
def bprop(self, y, t):
return - self.C * (t * 2 - 1) * self.be.maximum(0, 1 - y * (t * 2 - 1)) / self.be.bsz / y.shape[0]
|
random_line_split
|
|
youtube-searched-for.js
|
'use strict';
var React = require('react');
var mui = require('material-ui');
var SvgIcon = mui.SvgIcon;
var createClass = require('create-react-class');
var ActionYoutubeSearchedFor = createClass({
displayName: 'ActionYoutubeSearchedFor',
render: function render() {
return React.createElement(
SvgIcon,
this.props,
React.createElement('path', { d: 'M17.01 14h-.8l-.27-.27c.98-1.14 1.57-2.61 1.57-4.23 0-3.59-2.91-6.5-6.5-6.5s-6.5 3-6.5 6.5H2l3.84 4 4.16-4H6.51C6.51 7 8.53 5 11.01 5s4.5 2.01 4.5 4.5c0 2.48-2.02 4.5-4.5 4.5-.65 0-1.26-.14-1.82-.38L7.71 15.1c.97.57 2.09.9 3.3.9 1.61 0 3.08-.59 4.22-1.57l.27.27v.79l5.01 4.99L22 19l-4.99-5z' })
);
}
});
|
module.exports = ActionYoutubeSearchedFor;
|
random_line_split
|
|
irc.rs
|
use config;
use std::thread;
use nyaa::Nyaa;
use message_handler::{MessageHandlerBuilder,MessageHandler};
use colors;
use lib::log::Logger;
use lib::connection::Connection;
use lib::commands::Commands;
use std::sync::{Arc,Mutex,mpsc};
pub struct Irc{
con: Connection,
}
impl Irc{
pub fn new(con: Connection)->Irc{
Irc{
con
}
}
pub fn start(&mut self)->bool
|
}
|
{
let mut logger = match Logger::new("logs.txt"){
Ok(l) =>l,
Err(_) =>return false,
};
let cd = match config::ConfigData::new("config.json"){
Ok(a) =>a,
Err(err) =>{
match err{
config::ConfigErr::Parse=>{logger.add("Config Parse Error");},
config::ConfigErr::Read=>{logger.add("Config Read Error");},
config::ConfigErr::Open=>{logger.add("Config Open Error");},
}
return false
}
};
self.con.login(&cd.username,&cd.password);
self.con.join_channels(&cd.channels);
self.con.enable_tags();
self.con.enable_whispers();
self.con.set_char_limit(175);
let mut commands = match Commands::new(){
Ok(a) =>a,
Err(_) =>{
logger.add("Could not connect to redis");
return false
}
};
if commands.add_channels(&cd.channels).is_err(){
logger.add("Could not load commands from redis");
}
let mut sock = match self.con.socket_clone(){
Ok(a) =>a,
Err(_) =>{
logger.add("Could not clone the socket. Error: ");
return false
}
};
let (tx, rx) = mpsc::channel();
let delay = cd.nyaa.delay;
let channels = cd.channels;
thread::spawn(move || {
if let Ok(nyaa) = Nyaa::new(None){
nyaa.start(&mut sock,channels,delay,tx)
}
});
let animes = Arc::new(Mutex::new(Vec::new()));
let animes_clone = animes.clone();
let (dc_sender,dc_receiver) = mpsc::channel();
thread::spawn(move || {
loop{
match rx.recv(){
Ok(a) => {
*animes.lock().unwrap() = a;
match dc_receiver.try_recv(){
Ok(_)=>break,
Err(mpsc::TryRecvError::Empty)=>{},
Err(mpsc::TryRecvError::Disconnected)=>break
}
}
Err(_) =>{
break
}
}
}
});
let colors = match colors::Color::new("colors.txt"){
Ok(a) =>a,
Err(_) =>{
logger.add("Could not load colors file");
return false
}
};
let mut message_handler = MessageHandlerBuilder::new(&mut self.con,&animes_clone)
.commands(commands)
.logger(logger)
.colors(colors)
.admins(cd.admins)
.build();
loop{
if !message_handler.on_new_message(){
if dc_sender.send("need reconnect").is_err(){
println!("dc receiver disconnected")
}
break;
}
}
return false;
}
|
identifier_body
|
irc.rs
|
use config;
use std::thread;
use nyaa::Nyaa;
use message_handler::{MessageHandlerBuilder,MessageHandler};
use colors;
use lib::log::Logger;
use lib::connection::Connection;
use lib::commands::Commands;
use std::sync::{Arc,Mutex,mpsc};
pub struct Irc{
con: Connection,
}
impl Irc{
pub fn new(con: Connection)->Irc{
Irc{
con
}
}
pub fn start(&mut self)->bool{
let mut logger = match Logger::new("logs.txt"){
Ok(l) =>l,
Err(_) =>return false,
};
let cd = match config::ConfigData::new("config.json"){
Ok(a) =>a,
Err(err) =>{
match err{
config::ConfigErr::Parse=>{logger.add("Config Parse Error");},
config::ConfigErr::Read=>{logger.add("Config Read Error");},
config::ConfigErr::Open=>{logger.add("Config Open Error");},
}
return false
}
};
self.con.login(&cd.username,&cd.password);
self.con.join_channels(&cd.channels);
self.con.enable_tags();
self.con.enable_whispers();
self.con.set_char_limit(175);
let mut commands = match Commands::new(){
Ok(a) =>a,
Err(_) =>{
logger.add("Could not connect to redis");
return false
}
};
if commands.add_channels(&cd.channels).is_err(){
logger.add("Could not load commands from redis");
}
|
Ok(a) =>a,
Err(_) =>{
logger.add("Could not clone the socket. Error: ");
return false
}
};
let (tx, rx) = mpsc::channel();
let delay = cd.nyaa.delay;
let channels = cd.channels;
thread::spawn(move || {
if let Ok(nyaa) = Nyaa::new(None){
nyaa.start(&mut sock,channels,delay,tx)
}
});
let animes = Arc::new(Mutex::new(Vec::new()));
let animes_clone = animes.clone();
let (dc_sender,dc_receiver) = mpsc::channel();
thread::spawn(move || {
loop{
match rx.recv(){
Ok(a) => {
*animes.lock().unwrap() = a;
match dc_receiver.try_recv(){
Ok(_)=>break,
Err(mpsc::TryRecvError::Empty)=>{},
Err(mpsc::TryRecvError::Disconnected)=>break
}
}
Err(_) =>{
break
}
}
}
});
let colors = match colors::Color::new("colors.txt"){
Ok(a) =>a,
Err(_) =>{
logger.add("Could not load colors file");
return false
}
};
let mut message_handler = MessageHandlerBuilder::new(&mut self.con,&animes_clone)
.commands(commands)
.logger(logger)
.colors(colors)
.admins(cd.admins)
.build();
loop{
if !message_handler.on_new_message(){
if dc_sender.send("need reconnect").is_err(){
println!("dc receiver disconnected")
}
break;
}
}
return false;
}
}
|
let mut sock = match self.con.socket_clone(){
|
random_line_split
|
irc.rs
|
use config;
use std::thread;
use nyaa::Nyaa;
use message_handler::{MessageHandlerBuilder,MessageHandler};
use colors;
use lib::log::Logger;
use lib::connection::Connection;
use lib::commands::Commands;
use std::sync::{Arc,Mutex,mpsc};
pub struct Irc{
con: Connection,
}
impl Irc{
pub fn new(con: Connection)->Irc{
Irc{
con
}
}
pub fn
|
(&mut self)->bool{
let mut logger = match Logger::new("logs.txt"){
Ok(l) =>l,
Err(_) =>return false,
};
let cd = match config::ConfigData::new("config.json"){
Ok(a) =>a,
Err(err) =>{
match err{
config::ConfigErr::Parse=>{logger.add("Config Parse Error");},
config::ConfigErr::Read=>{logger.add("Config Read Error");},
config::ConfigErr::Open=>{logger.add("Config Open Error");},
}
return false
}
};
self.con.login(&cd.username,&cd.password);
self.con.join_channels(&cd.channels);
self.con.enable_tags();
self.con.enable_whispers();
self.con.set_char_limit(175);
let mut commands = match Commands::new(){
Ok(a) =>a,
Err(_) =>{
logger.add("Could not connect to redis");
return false
}
};
if commands.add_channels(&cd.channels).is_err(){
logger.add("Could not load commands from redis");
}
let mut sock = match self.con.socket_clone(){
Ok(a) =>a,
Err(_) =>{
logger.add("Could not clone the socket. Error: ");
return false
}
};
let (tx, rx) = mpsc::channel();
let delay = cd.nyaa.delay;
let channels = cd.channels;
thread::spawn(move || {
if let Ok(nyaa) = Nyaa::new(None){
nyaa.start(&mut sock,channels,delay,tx)
}
});
let animes = Arc::new(Mutex::new(Vec::new()));
let animes_clone = animes.clone();
let (dc_sender,dc_receiver) = mpsc::channel();
thread::spawn(move || {
loop{
match rx.recv(){
Ok(a) => {
*animes.lock().unwrap() = a;
match dc_receiver.try_recv(){
Ok(_)=>break,
Err(mpsc::TryRecvError::Empty)=>{},
Err(mpsc::TryRecvError::Disconnected)=>break
}
}
Err(_) =>{
break
}
}
}
});
let colors = match colors::Color::new("colors.txt"){
Ok(a) =>a,
Err(_) =>{
logger.add("Could not load colors file");
return false
}
};
let mut message_handler = MessageHandlerBuilder::new(&mut self.con,&animes_clone)
.commands(commands)
.logger(logger)
.colors(colors)
.admins(cd.admins)
.build();
loop{
if !message_handler.on_new_message(){
if dc_sender.send("need reconnect").is_err(){
println!("dc receiver disconnected")
}
break;
}
}
return false;
}
}
|
start
|
identifier_name
|
irc.rs
|
use config;
use std::thread;
use nyaa::Nyaa;
use message_handler::{MessageHandlerBuilder,MessageHandler};
use colors;
use lib::log::Logger;
use lib::connection::Connection;
use lib::commands::Commands;
use std::sync::{Arc,Mutex,mpsc};
pub struct Irc{
con: Connection,
}
impl Irc{
pub fn new(con: Connection)->Irc{
Irc{
con
}
}
pub fn start(&mut self)->bool{
let mut logger = match Logger::new("logs.txt"){
Ok(l) =>l,
Err(_) =>return false,
};
let cd = match config::ConfigData::new("config.json"){
Ok(a) =>a,
Err(err) =>{
match err{
config::ConfigErr::Parse=>{logger.add("Config Parse Error");},
config::ConfigErr::Read=>{logger.add("Config Read Error");},
config::ConfigErr::Open=>{logger.add("Config Open Error");},
}
return false
}
};
self.con.login(&cd.username,&cd.password);
self.con.join_channels(&cd.channels);
self.con.enable_tags();
self.con.enable_whispers();
self.con.set_char_limit(175);
let mut commands = match Commands::new(){
Ok(a) =>a,
Err(_) =>{
logger.add("Could not connect to redis");
return false
}
};
if commands.add_channels(&cd.channels).is_err()
|
let mut sock = match self.con.socket_clone(){
Ok(a) =>a,
Err(_) =>{
logger.add("Could not clone the socket. Error: ");
return false
}
};
let (tx, rx) = mpsc::channel();
let delay = cd.nyaa.delay;
let channels = cd.channels;
thread::spawn(move || {
if let Ok(nyaa) = Nyaa::new(None){
nyaa.start(&mut sock,channels,delay,tx)
}
});
let animes = Arc::new(Mutex::new(Vec::new()));
let animes_clone = animes.clone();
let (dc_sender,dc_receiver) = mpsc::channel();
thread::spawn(move || {
loop{
match rx.recv(){
Ok(a) => {
*animes.lock().unwrap() = a;
match dc_receiver.try_recv(){
Ok(_)=>break,
Err(mpsc::TryRecvError::Empty)=>{},
Err(mpsc::TryRecvError::Disconnected)=>break
}
}
Err(_) =>{
break
}
}
}
});
let colors = match colors::Color::new("colors.txt"){
Ok(a) =>a,
Err(_) =>{
logger.add("Could not load colors file");
return false
}
};
let mut message_handler = MessageHandlerBuilder::new(&mut self.con,&animes_clone)
.commands(commands)
.logger(logger)
.colors(colors)
.admins(cd.admins)
.build();
loop{
if !message_handler.on_new_message(){
if dc_sender.send("need reconnect").is_err(){
println!("dc receiver disconnected")
}
break;
}
}
return false;
}
}
|
{
logger.add("Could not load commands from redis");
}
|
conditional_block
|
Pdf.js
|
import { HTTPError } from 'utils/APIConnector'
import * as AWS from 'utils/AWS'
import * as Config from 'utils/Config'
import * as Data from 'utils/Data'
import { mkSearch } from 'utils/NamedRoutes'
import { PreviewData, PreviewError } from '../types'
import * as utils from './utils'
export const detect = utils.extIs('.pdf')
async function loadPdf({ endpoint, sign, handle }) {
try {
const url = sign(handle)
const search = mkSearch({
url,
input: 'pdf',
output: 'raw',
size: 'w1024h768',
})
const r = await fetch(`${endpoint}/thumbnail${search}`)
if (r.status >= 400) {
const text = await r.text()
throw new HTTPError(r, text)
}
const firstPageBlob = await r.blob()
return PreviewData.Pdf({ handle, firstPageBlob })
} catch (e) {
if (e instanceof HTTPError && e.json?.error === 'Forbidden')
|
// eslint-disable-next-line no-console
console.warn('error loading pdf preview', { ...e })
// eslint-disable-next-line no-console
console.error(e)
throw e
}
}
export const Loader = function PdfLoader({ handle, children }) {
const endpoint = Config.use().binaryApiGatewayEndpoint
const sign = AWS.Signer.useS3Signer()
const data = Data.use(loadPdf, { endpoint, sign, handle })
return children(utils.useErrorHandling(data.result, { handle, retry: data.fetch }))
}
|
{
if (e.json.text?.match(utils.GLACIER_ERROR_RE)) {
throw PreviewError.Archived({ handle })
}
throw PreviewError.Forbidden({ handle })
}
|
conditional_block
|
Pdf.js
|
import { HTTPError } from 'utils/APIConnector'
import * as AWS from 'utils/AWS'
import * as Config from 'utils/Config'
import * as Data from 'utils/Data'
import { mkSearch } from 'utils/NamedRoutes'
import { PreviewData, PreviewError } from '../types'
import * as utils from './utils'
export const detect = utils.extIs('.pdf')
async function
|
({ endpoint, sign, handle }) {
try {
const url = sign(handle)
const search = mkSearch({
url,
input: 'pdf',
output: 'raw',
size: 'w1024h768',
})
const r = await fetch(`${endpoint}/thumbnail${search}`)
if (r.status >= 400) {
const text = await r.text()
throw new HTTPError(r, text)
}
const firstPageBlob = await r.blob()
return PreviewData.Pdf({ handle, firstPageBlob })
} catch (e) {
if (e instanceof HTTPError && e.json?.error === 'Forbidden') {
if (e.json.text?.match(utils.GLACIER_ERROR_RE)) {
throw PreviewError.Archived({ handle })
}
throw PreviewError.Forbidden({ handle })
}
// eslint-disable-next-line no-console
console.warn('error loading pdf preview', { ...e })
// eslint-disable-next-line no-console
console.error(e)
throw e
}
}
export const Loader = function PdfLoader({ handle, children }) {
const endpoint = Config.use().binaryApiGatewayEndpoint
const sign = AWS.Signer.useS3Signer()
const data = Data.use(loadPdf, { endpoint, sign, handle })
return children(utils.useErrorHandling(data.result, { handle, retry: data.fetch }))
}
|
loadPdf
|
identifier_name
|
Pdf.js
|
import { HTTPError } from 'utils/APIConnector'
import * as AWS from 'utils/AWS'
import * as Config from 'utils/Config'
import * as Data from 'utils/Data'
import { mkSearch } from 'utils/NamedRoutes'
import { PreviewData, PreviewError } from '../types'
import * as utils from './utils'
export const detect = utils.extIs('.pdf')
async function loadPdf({ endpoint, sign, handle }) {
try {
const url = sign(handle)
const search = mkSearch({
url,
input: 'pdf',
output: 'raw',
size: 'w1024h768',
})
const r = await fetch(`${endpoint}/thumbnail${search}`)
if (r.status >= 400) {
const text = await r.text()
throw new HTTPError(r, text)
}
const firstPageBlob = await r.blob()
return PreviewData.Pdf({ handle, firstPageBlob })
} catch (e) {
if (e instanceof HTTPError && e.json?.error === 'Forbidden') {
if (e.json.text?.match(utils.GLACIER_ERROR_RE)) {
throw PreviewError.Archived({ handle })
}
throw PreviewError.Forbidden({ handle })
}
// eslint-disable-next-line no-console
console.warn('error loading pdf preview', { ...e })
// eslint-disable-next-line no-console
console.error(e)
|
const endpoint = Config.use().binaryApiGatewayEndpoint
const sign = AWS.Signer.useS3Signer()
const data = Data.use(loadPdf, { endpoint, sign, handle })
return children(utils.useErrorHandling(data.result, { handle, retry: data.fetch }))
}
|
throw e
}
}
export const Loader = function PdfLoader({ handle, children }) {
|
random_line_split
|
Pdf.js
|
import { HTTPError } from 'utils/APIConnector'
import * as AWS from 'utils/AWS'
import * as Config from 'utils/Config'
import * as Data from 'utils/Data'
import { mkSearch } from 'utils/NamedRoutes'
import { PreviewData, PreviewError } from '../types'
import * as utils from './utils'
export const detect = utils.extIs('.pdf')
async function loadPdf({ endpoint, sign, handle })
|
export const Loader = function PdfLoader({ handle, children }) {
const endpoint = Config.use().binaryApiGatewayEndpoint
const sign = AWS.Signer.useS3Signer()
const data = Data.use(loadPdf, { endpoint, sign, handle })
return children(utils.useErrorHandling(data.result, { handle, retry: data.fetch }))
}
|
{
try {
const url = sign(handle)
const search = mkSearch({
url,
input: 'pdf',
output: 'raw',
size: 'w1024h768',
})
const r = await fetch(`${endpoint}/thumbnail${search}`)
if (r.status >= 400) {
const text = await r.text()
throw new HTTPError(r, text)
}
const firstPageBlob = await r.blob()
return PreviewData.Pdf({ handle, firstPageBlob })
} catch (e) {
if (e instanceof HTTPError && e.json?.error === 'Forbidden') {
if (e.json.text?.match(utils.GLACIER_ERROR_RE)) {
throw PreviewError.Archived({ handle })
}
throw PreviewError.Forbidden({ handle })
}
// eslint-disable-next-line no-console
console.warn('error loading pdf preview', { ...e })
// eslint-disable-next-line no-console
console.error(e)
throw e
}
}
|
identifier_body
|
GridStoreAdapter.js
|
/**
GridStoreAdapter
Stores files in Mongo using GridStore
Requires the database adapter to be based on mongoclient
@flow weak
*/
import { MongoClient, GridStore, Db} from 'mongodb';
import { FilesAdapter } from './FilesAdapter';
import defaults from '../../defaults';
export class GridStoreAdapter extends FilesAdapter {
_databaseURI: string;
_connectionPromise: Promise<Db>;
constructor(mongoDatabaseURI = defaults.DefaultMongoURI) {
super();
this._databaseURI = mongoDatabaseURI;
}
_connect() {
if (!this._connectionPromise)
|
return this._connectionPromise;
}
// For a given config object, filename, and data, store a file
// Returns a promise
createFile(filename: string, data) {
return this._connect().then(database => {
const gridStore = new GridStore(database, filename, 'w');
return gridStore.open();
}).then(gridStore => {
return gridStore.write(data);
}).then(gridStore => {
return gridStore.close();
});
}
deleteFile(filename: string) {
return this._connect().then(database => {
const gridStore = new GridStore(database, filename, 'r');
return gridStore.open();
}).then((gridStore) => {
return gridStore.unlink();
}).then((gridStore) => {
return gridStore.close();
});
}
getFileData(filename: string) {
return this._connect().then(database => {
return GridStore.exist(database, filename)
.then(() => {
const gridStore = new GridStore(database, filename, 'r');
return gridStore.open();
});
}).then(gridStore => {
return gridStore.read();
});
}
getFileLocation(config, filename) {
return (config.mount + '/files/' + config.applicationId + '/' + encodeURIComponent(filename));
}
getFileStream(filename: string) {
return this._connect().then(database => {
return GridStore.exist(database, filename).then(() => {
const gridStore = new GridStore(database, filename, 'r');
return gridStore.open();
});
});
}
}
export default GridStoreAdapter;
|
{
this._connectionPromise = MongoClient.connect(this._databaseURI);
}
|
conditional_block
|
GridStoreAdapter.js
|
/**
GridStoreAdapter
Stores files in Mongo using GridStore
Requires the database adapter to be based on mongoclient
@flow weak
*/
import { MongoClient, GridStore, Db} from 'mongodb';
import { FilesAdapter } from './FilesAdapter';
import defaults from '../../defaults';
export class GridStoreAdapter extends FilesAdapter {
_databaseURI: string;
_connectionPromise: Promise<Db>;
constructor(mongoDatabaseURI = defaults.DefaultMongoURI) {
super();
this._databaseURI = mongoDatabaseURI;
}
_connect() {
if (!this._connectionPromise) {
this._connectionPromise = MongoClient.connect(this._databaseURI);
}
return this._connectionPromise;
}
// For a given config object, filename, and data, store a file
// Returns a promise
createFile(filename: string, data) {
return this._connect().then(database => {
const gridStore = new GridStore(database, filename, 'w');
return gridStore.open();
}).then(gridStore => {
return gridStore.write(data);
}).then(gridStore => {
return gridStore.close();
});
}
deleteFile(filename: string) {
return this._connect().then(database => {
const gridStore = new GridStore(database, filename, 'r');
return gridStore.open();
}).then((gridStore) => {
return gridStore.unlink();
}).then((gridStore) => {
return gridStore.close();
});
}
getFileData(filename: string) {
return this._connect().then(database => {
return GridStore.exist(database, filename)
.then(() => {
const gridStore = new GridStore(database, filename, 'r');
return gridStore.open();
});
}).then(gridStore => {
return gridStore.read();
});
}
getFileLocation(config, filename) {
return (config.mount + '/files/' + config.applicationId + '/' + encodeURIComponent(filename));
}
|
return gridStore.open();
});
});
}
}
export default GridStoreAdapter;
|
getFileStream(filename: string) {
return this._connect().then(database => {
return GridStore.exist(database, filename).then(() => {
const gridStore = new GridStore(database, filename, 'r');
|
random_line_split
|
GridStoreAdapter.js
|
/**
GridStoreAdapter
Stores files in Mongo using GridStore
Requires the database adapter to be based on mongoclient
@flow weak
*/
import { MongoClient, GridStore, Db} from 'mongodb';
import { FilesAdapter } from './FilesAdapter';
import defaults from '../../defaults';
export class GridStoreAdapter extends FilesAdapter {
_databaseURI: string;
_connectionPromise: Promise<Db>;
constructor(mongoDatabaseURI = defaults.DefaultMongoURI) {
super();
this._databaseURI = mongoDatabaseURI;
}
_connect()
|
// For a given config object, filename, and data, store a file
// Returns a promise
createFile(filename: string, data) {
return this._connect().then(database => {
const gridStore = new GridStore(database, filename, 'w');
return gridStore.open();
}).then(gridStore => {
return gridStore.write(data);
}).then(gridStore => {
return gridStore.close();
});
}
deleteFile(filename: string) {
return this._connect().then(database => {
const gridStore = new GridStore(database, filename, 'r');
return gridStore.open();
}).then((gridStore) => {
return gridStore.unlink();
}).then((gridStore) => {
return gridStore.close();
});
}
getFileData(filename: string) {
return this._connect().then(database => {
return GridStore.exist(database, filename)
.then(() => {
const gridStore = new GridStore(database, filename, 'r');
return gridStore.open();
});
}).then(gridStore => {
return gridStore.read();
});
}
getFileLocation(config, filename) {
return (config.mount + '/files/' + config.applicationId + '/' + encodeURIComponent(filename));
}
getFileStream(filename: string) {
return this._connect().then(database => {
return GridStore.exist(database, filename).then(() => {
const gridStore = new GridStore(database, filename, 'r');
return gridStore.open();
});
});
}
}
export default GridStoreAdapter;
|
{
if (!this._connectionPromise) {
this._connectionPromise = MongoClient.connect(this._databaseURI);
}
return this._connectionPromise;
}
|
identifier_body
|
GridStoreAdapter.js
|
/**
GridStoreAdapter
Stores files in Mongo using GridStore
Requires the database adapter to be based on mongoclient
@flow weak
*/
import { MongoClient, GridStore, Db} from 'mongodb';
import { FilesAdapter } from './FilesAdapter';
import defaults from '../../defaults';
export class GridStoreAdapter extends FilesAdapter {
_databaseURI: string;
_connectionPromise: Promise<Db>;
constructor(mongoDatabaseURI = defaults.DefaultMongoURI) {
super();
this._databaseURI = mongoDatabaseURI;
}
_connect() {
if (!this._connectionPromise) {
this._connectionPromise = MongoClient.connect(this._databaseURI);
}
return this._connectionPromise;
}
// For a given config object, filename, and data, store a file
// Returns a promise
|
(filename: string, data) {
return this._connect().then(database => {
const gridStore = new GridStore(database, filename, 'w');
return gridStore.open();
}).then(gridStore => {
return gridStore.write(data);
}).then(gridStore => {
return gridStore.close();
});
}
deleteFile(filename: string) {
return this._connect().then(database => {
const gridStore = new GridStore(database, filename, 'r');
return gridStore.open();
}).then((gridStore) => {
return gridStore.unlink();
}).then((gridStore) => {
return gridStore.close();
});
}
getFileData(filename: string) {
return this._connect().then(database => {
return GridStore.exist(database, filename)
.then(() => {
const gridStore = new GridStore(database, filename, 'r');
return gridStore.open();
});
}).then(gridStore => {
return gridStore.read();
});
}
getFileLocation(config, filename) {
return (config.mount + '/files/' + config.applicationId + '/' + encodeURIComponent(filename));
}
getFileStream(filename: string) {
return this._connect().then(database => {
return GridStore.exist(database, filename).then(() => {
const gridStore = new GridStore(database, filename, 'r');
return gridStore.open();
});
});
}
}
export default GridStoreAdapter;
|
createFile
|
identifier_name
|
handlers.py
|
""" Handlers for OpenID Connect provider. """
from django.conf import settings
from django.core.cache import cache
from courseware.access import has_access
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from openedx.core.djangoapps.lang_pref import LANGUAGE_KEY
from openedx.core.djangoapps.user_api.models import UserPreference
from student.models import anonymous_id_for_user
from student.models import UserProfile
from student.roles import GlobalStaff, CourseStaffRole, CourseInstructorRole
class OpenIDHandler(object):
""" Basic OpenID Connect scope handler. """
def scope_openid(self, _data):
""" Only override the sub (subject) claim. """
return ['sub']
def claim_sub(self, data):
"""
Return the value of the sub (subject) claim. The value should be
unique for each user.
"""
# Use the anonymous ID without any course as unique identifier.
# Note that this ID is derived using the value of the `SECRET_KEY`
# setting, this means that users will have different sub
# values for different deployments.
value = anonymous_id_for_user(data['user'], None)
return value
class PermissionsHandler(object):
""" Permissions scope handler """
def scope_permissions(self, _data):
return ['administrator']
def claim_administrator(self, data):
"""
Return boolean indicating user's administrator status.
For our purposes an administrator is any user with is_staff set to True.
"""
return data['user'].is_staff
class ProfileHandler(object):
""" Basic OpenID Connect `profile` scope handler with `locale` claim. """
def scope_profile(self, _data):
""" Add specialized claims. """
return ['name', 'locale']
def claim_name(self, data):
""" User displayable full name. """
user = data['user']
profile = UserProfile.objects.get(user=user)
return profile.name
def claim_locale(self, data):
"""
Return the locale for the users based on their preferences.
Does not return a value if the users have not set their locale preferences.
"""
# Calling UserPreference directly because it is not clear which user made the request.
language = UserPreference.get_value(data['user'], LANGUAGE_KEY)
# If the user has no language specified, return the default one.
if not language:
language = settings.LANGUAGE_CODE
return language
class CourseAccessHandler(object):
"""
Defines two new scopes: `course_instructor` and `course_staff`. Each one is
valid only if the user is instructor or staff of at least one course.
Each new scope has a corresponding claim: `instructor_courses` and
`staff_courses` that lists the course_ids for which the user has instructor
or staff privileges.
The claims support claim request values: if there is no claim request, the
value of the claim is the list all the courses for which the user has the
corresponding privileges. If a claim request is used, then the value of the
claim the list of courses from the requested values that have the
corresponding privileges.
For example, if the user is staff of course_a and course_b but not
course_c, the claim corresponding to the scope request:
scope = openid course_staff
has the value:
{staff_courses: [course_a, course_b] }
For the claim request:
claims = {userinfo: {staff_courses: {values=[course_b, course_d]}}}
the corresponding claim will have the value:
{staff_courses: [course_b] }.
This is useful to quickly determine if a user has the right privileges for a
given course.
For a description of the function naming and arguments, see:
`edx_oauth2_provider/oidc/handlers.py`
"""
COURSE_CACHE_TIMEOUT = getattr(settings, 'OIDC_COURSE_HANDLER_CACHE_TIMEOUT', 60) # In seconds.
def __init__(self, *_args, **_kwargs):
self._course_cache = {}
def scope_course_instructor(self, data):
"""
Scope `course_instructor` valid only if the user is an instructor
of at least one course.
"""
# TODO: unfortunately there is not a faster and still correct way to
# check if a user is instructor of at least one course other than
# checking the access type against all known courses.
course_ids = self.find_courses(data['user'], CourseInstructorRole.ROLE)
return ['instructor_courses'] if course_ids else None
def scope_course_staff(self, data):
"""
Scope `course_staff` valid only if the user is an instructor of at
least one course.
"""
# TODO: see :method:CourseAccessHandler.scope_course_instructor
course_ids = self.find_courses(data['user'], CourseStaffRole.ROLE)
return ['staff_courses'] if course_ids else None
def claim_instructor_courses(self, data):
"""
Claim `instructor_courses` with list of course_ids for which the
user has instructor privileges.
"""
return self.find_courses(data['user'], CourseInstructorRole.ROLE, data.get('values'))
def claim_staff_courses(self, data):
"""
Claim `staff_courses` with list of course_ids for which the user
has staff privileges.
"""
return self.find_courses(data['user'], CourseStaffRole.ROLE, data.get('values'))
def find_courses(self, user, access_type, values=None):
"""
Find all courses for which the user has the specified access type. If
`values` is specified, check only the courses from `values`.
"""
# Check the instance cache and update if not present. The instance
# cache is useful since there are multiple scope and claims calls in the
# same request.
key = (user.id, access_type)
if key in self._course_cache:
course_ids = self._course_cache[key]
else:
course_ids = self._get_courses_with_access_type(user, access_type)
self._course_cache[key] = course_ids
# If values was specified, filter out other courses.
if values is not None:
course_ids = list(set(course_ids) & set(values))
return course_ids
# pylint: disable=missing-docstring
def _get_courses_with_access_type(self, user, access_type):
# Check the application cache and update if not present. The application
# cache is useful since there are calls to different endpoints in close
# succession, for example the id_token and user_info endpoints.
key = '-'.join([str(self.__class__), str(user.id), access_type])
course_ids = cache.get(key)
if not course_ids:
|
return course_ids
class IDTokenHandler(OpenIDHandler, ProfileHandler, CourseAccessHandler, PermissionsHandler):
""" Configure the ID Token handler for the LMS. """
def claim_instructor_courses(self, data):
# Don't return list of courses unless they are requested as essential.
if data.get('essential'):
return super(IDTokenHandler, self).claim_instructor_courses(data)
else:
return None
def claim_staff_courses(self, data):
# Don't return list of courses unless they are requested as essential.
if data.get('essential'):
return super(IDTokenHandler, self).claim_staff_courses(data)
else:
return None
class UserInfoHandler(OpenIDHandler, ProfileHandler, CourseAccessHandler, PermissionsHandler):
""" Configure the UserInfo handler for the LMS. """
pass
|
course_keys = CourseOverview.get_all_course_keys()
# Global staff have access to all courses. Filter courses for non-global staff.
if not GlobalStaff().has_user(user):
course_keys = [course_key for course_key in course_keys if has_access(user, access_type, course_key)]
course_ids = [unicode(course_key) for course_key in course_keys]
cache.set(key, course_ids, self.COURSE_CACHE_TIMEOUT)
|
conditional_block
|
handlers.py
|
""" Handlers for OpenID Connect provider. """
from django.conf import settings
from django.core.cache import cache
from courseware.access import has_access
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from openedx.core.djangoapps.lang_pref import LANGUAGE_KEY
from openedx.core.djangoapps.user_api.models import UserPreference
from student.models import anonymous_id_for_user
from student.models import UserProfile
from student.roles import GlobalStaff, CourseStaffRole, CourseInstructorRole
class OpenIDHandler(object):
""" Basic OpenID Connect scope handler. """
def scope_openid(self, _data):
""" Only override the sub (subject) claim. """
return ['sub']
def claim_sub(self, data):
"""
Return the value of the sub (subject) claim. The value should be
unique for each user.
"""
# Use the anonymous ID without any course as unique identifier.
# Note that this ID is derived using the value of the `SECRET_KEY`
# setting, this means that users will have different sub
# values for different deployments.
value = anonymous_id_for_user(data['user'], None)
return value
class PermissionsHandler(object):
""" Permissions scope handler """
def scope_permissions(self, _data):
return ['administrator']
def claim_administrator(self, data):
"""
Return boolean indicating user's administrator status.
For our purposes an administrator is any user with is_staff set to True.
"""
return data['user'].is_staff
class ProfileHandler(object):
""" Basic OpenID Connect `profile` scope handler with `locale` claim. """
def scope_profile(self, _data):
""" Add specialized claims. """
return ['name', 'locale']
def claim_name(self, data):
""" User displayable full name. """
user = data['user']
profile = UserProfile.objects.get(user=user)
return profile.name
def claim_locale(self, data):
"""
Return the locale for the users based on their preferences.
Does not return a value if the users have not set their locale preferences.
"""
# Calling UserPreference directly because it is not clear which user made the request.
language = UserPreference.get_value(data['user'], LANGUAGE_KEY)
# If the user has no language specified, return the default one.
if not language:
language = settings.LANGUAGE_CODE
return language
class CourseAccessHandler(object):
"""
Defines two new scopes: `course_instructor` and `course_staff`. Each one is
valid only if the user is instructor or staff of at least one course.
Each new scope has a corresponding claim: `instructor_courses` and
`staff_courses` that lists the course_ids for which the user has instructor
or staff privileges.
The claims support claim request values: if there is no claim request, the
value of the claim is the list all the courses for which the user has the
corresponding privileges. If a claim request is used, then the value of the
claim the list of courses from the requested values that have the
corresponding privileges.
For example, if the user is staff of course_a and course_b but not
course_c, the claim corresponding to the scope request:
scope = openid course_staff
has the value:
{staff_courses: [course_a, course_b] }
For the claim request:
claims = {userinfo: {staff_courses: {values=[course_b, course_d]}}}
the corresponding claim will have the value:
{staff_courses: [course_b] }.
This is useful to quickly determine if a user has the right privileges for a
given course.
For a description of the function naming and arguments, see:
`edx_oauth2_provider/oidc/handlers.py`
"""
COURSE_CACHE_TIMEOUT = getattr(settings, 'OIDC_COURSE_HANDLER_CACHE_TIMEOUT', 60) # In seconds.
def __init__(self, *_args, **_kwargs):
self._course_cache = {}
def scope_course_instructor(self, data):
"""
Scope `course_instructor` valid only if the user is an instructor
of at least one course.
"""
# TODO: unfortunately there is not a faster and still correct way to
# check if a user is instructor of at least one course other than
# checking the access type against all known courses.
course_ids = self.find_courses(data['user'], CourseInstructorRole.ROLE)
return ['instructor_courses'] if course_ids else None
def scope_course_staff(self, data):
"""
Scope `course_staff` valid only if the user is an instructor of at
least one course.
"""
# TODO: see :method:CourseAccessHandler.scope_course_instructor
course_ids = self.find_courses(data['user'], CourseStaffRole.ROLE)
return ['staff_courses'] if course_ids else None
def claim_instructor_courses(self, data):
"""
Claim `instructor_courses` with list of course_ids for which the
|
return self.find_courses(data['user'], CourseInstructorRole.ROLE, data.get('values'))
def claim_staff_courses(self, data):
"""
Claim `staff_courses` with list of course_ids for which the user
has staff privileges.
"""
return self.find_courses(data['user'], CourseStaffRole.ROLE, data.get('values'))
def find_courses(self, user, access_type, values=None):
"""
Find all courses for which the user has the specified access type. If
`values` is specified, check only the courses from `values`.
"""
# Check the instance cache and update if not present. The instance
# cache is useful since there are multiple scope and claims calls in the
# same request.
key = (user.id, access_type)
if key in self._course_cache:
course_ids = self._course_cache[key]
else:
course_ids = self._get_courses_with_access_type(user, access_type)
self._course_cache[key] = course_ids
# If values was specified, filter out other courses.
if values is not None:
course_ids = list(set(course_ids) & set(values))
return course_ids
# pylint: disable=missing-docstring
def _get_courses_with_access_type(self, user, access_type):
# Check the application cache and update if not present. The application
# cache is useful since there are calls to different endpoints in close
# succession, for example the id_token and user_info endpoints.
key = '-'.join([str(self.__class__), str(user.id), access_type])
course_ids = cache.get(key)
if not course_ids:
course_keys = CourseOverview.get_all_course_keys()
# Global staff have access to all courses. Filter courses for non-global staff.
if not GlobalStaff().has_user(user):
course_keys = [course_key for course_key in course_keys if has_access(user, access_type, course_key)]
course_ids = [unicode(course_key) for course_key in course_keys]
cache.set(key, course_ids, self.COURSE_CACHE_TIMEOUT)
return course_ids
class IDTokenHandler(OpenIDHandler, ProfileHandler, CourseAccessHandler, PermissionsHandler):
""" Configure the ID Token handler for the LMS. """
def claim_instructor_courses(self, data):
# Don't return list of courses unless they are requested as essential.
if data.get('essential'):
return super(IDTokenHandler, self).claim_instructor_courses(data)
else:
return None
def claim_staff_courses(self, data):
# Don't return list of courses unless they are requested as essential.
if data.get('essential'):
return super(IDTokenHandler, self).claim_staff_courses(data)
else:
return None
class UserInfoHandler(OpenIDHandler, ProfileHandler, CourseAccessHandler, PermissionsHandler):
""" Configure the UserInfo handler for the LMS. """
pass
|
user has instructor privileges.
"""
|
random_line_split
|
handlers.py
|
""" Handlers for OpenID Connect provider. """
from django.conf import settings
from django.core.cache import cache
from courseware.access import has_access
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from openedx.core.djangoapps.lang_pref import LANGUAGE_KEY
from openedx.core.djangoapps.user_api.models import UserPreference
from student.models import anonymous_id_for_user
from student.models import UserProfile
from student.roles import GlobalStaff, CourseStaffRole, CourseInstructorRole
class OpenIDHandler(object):
""" Basic OpenID Connect scope handler. """
def scope_openid(self, _data):
""" Only override the sub (subject) claim. """
return ['sub']
def claim_sub(self, data):
"""
Return the value of the sub (subject) claim. The value should be
unique for each user.
"""
# Use the anonymous ID without any course as unique identifier.
# Note that this ID is derived using the value of the `SECRET_KEY`
# setting, this means that users will have different sub
# values for different deployments.
value = anonymous_id_for_user(data['user'], None)
return value
class PermissionsHandler(object):
""" Permissions scope handler """
def scope_permissions(self, _data):
return ['administrator']
def claim_administrator(self, data):
"""
Return boolean indicating user's administrator status.
For our purposes an administrator is any user with is_staff set to True.
"""
return data['user'].is_staff
class ProfileHandler(object):
""" Basic OpenID Connect `profile` scope handler with `locale` claim. """
def scope_profile(self, _data):
""" Add specialized claims. """
return ['name', 'locale']
def claim_name(self, data):
""" User displayable full name. """
user = data['user']
profile = UserProfile.objects.get(user=user)
return profile.name
def claim_locale(self, data):
"""
Return the locale for the users based on their preferences.
Does not return a value if the users have not set their locale preferences.
"""
# Calling UserPreference directly because it is not clear which user made the request.
language = UserPreference.get_value(data['user'], LANGUAGE_KEY)
# If the user has no language specified, return the default one.
if not language:
language = settings.LANGUAGE_CODE
return language
class CourseAccessHandler(object):
"""
Defines two new scopes: `course_instructor` and `course_staff`. Each one is
valid only if the user is instructor or staff of at least one course.
Each new scope has a corresponding claim: `instructor_courses` and
`staff_courses` that lists the course_ids for which the user has instructor
or staff privileges.
The claims support claim request values: if there is no claim request, the
value of the claim is the list all the courses for which the user has the
corresponding privileges. If a claim request is used, then the value of the
claim the list of courses from the requested values that have the
corresponding privileges.
For example, if the user is staff of course_a and course_b but not
course_c, the claim corresponding to the scope request:
scope = openid course_staff
has the value:
{staff_courses: [course_a, course_b] }
For the claim request:
claims = {userinfo: {staff_courses: {values=[course_b, course_d]}}}
the corresponding claim will have the value:
{staff_courses: [course_b] }.
This is useful to quickly determine if a user has the right privileges for a
given course.
For a description of the function naming and arguments, see:
`edx_oauth2_provider/oidc/handlers.py`
"""
COURSE_CACHE_TIMEOUT = getattr(settings, 'OIDC_COURSE_HANDLER_CACHE_TIMEOUT', 60) # In seconds.
def __init__(self, *_args, **_kwargs):
self._course_cache = {}
def scope_course_instructor(self, data):
"""
Scope `course_instructor` valid only if the user is an instructor
of at least one course.
"""
# TODO: unfortunately there is not a faster and still correct way to
# check if a user is instructor of at least one course other than
# checking the access type against all known courses.
course_ids = self.find_courses(data['user'], CourseInstructorRole.ROLE)
return ['instructor_courses'] if course_ids else None
def scope_course_staff(self, data):
"""
Scope `course_staff` valid only if the user is an instructor of at
least one course.
"""
# TODO: see :method:CourseAccessHandler.scope_course_instructor
course_ids = self.find_courses(data['user'], CourseStaffRole.ROLE)
return ['staff_courses'] if course_ids else None
def claim_instructor_courses(self, data):
"""
Claim `instructor_courses` with list of course_ids for which the
user has instructor privileges.
"""
return self.find_courses(data['user'], CourseInstructorRole.ROLE, data.get('values'))
def claim_staff_courses(self, data):
"""
Claim `staff_courses` with list of course_ids for which the user
has staff privileges.
"""
return self.find_courses(data['user'], CourseStaffRole.ROLE, data.get('values'))
def find_courses(self, user, access_type, values=None):
"""
Find all courses for which the user has the specified access type. If
`values` is specified, check only the courses from `values`.
"""
# Check the instance cache and update if not present. The instance
# cache is useful since there are multiple scope and claims calls in the
# same request.
key = (user.id, access_type)
if key in self._course_cache:
course_ids = self._course_cache[key]
else:
course_ids = self._get_courses_with_access_type(user, access_type)
self._course_cache[key] = course_ids
# If values was specified, filter out other courses.
if values is not None:
course_ids = list(set(course_ids) & set(values))
return course_ids
# pylint: disable=missing-docstring
def
|
(self, user, access_type):
# Check the application cache and update if not present. The application
# cache is useful since there are calls to different endpoints in close
# succession, for example the id_token and user_info endpoints.
key = '-'.join([str(self.__class__), str(user.id), access_type])
course_ids = cache.get(key)
if not course_ids:
course_keys = CourseOverview.get_all_course_keys()
# Global staff have access to all courses. Filter courses for non-global staff.
if not GlobalStaff().has_user(user):
course_keys = [course_key for course_key in course_keys if has_access(user, access_type, course_key)]
course_ids = [unicode(course_key) for course_key in course_keys]
cache.set(key, course_ids, self.COURSE_CACHE_TIMEOUT)
return course_ids
class IDTokenHandler(OpenIDHandler, ProfileHandler, CourseAccessHandler, PermissionsHandler):
""" Configure the ID Token handler for the LMS. """
def claim_instructor_courses(self, data):
# Don't return list of courses unless they are requested as essential.
if data.get('essential'):
return super(IDTokenHandler, self).claim_instructor_courses(data)
else:
return None
def claim_staff_courses(self, data):
# Don't return list of courses unless they are requested as essential.
if data.get('essential'):
return super(IDTokenHandler, self).claim_staff_courses(data)
else:
return None
class UserInfoHandler(OpenIDHandler, ProfileHandler, CourseAccessHandler, PermissionsHandler):
""" Configure the UserInfo handler for the LMS. """
pass
|
_get_courses_with_access_type
|
identifier_name
|
handlers.py
|
""" Handlers for OpenID Connect provider. """
from django.conf import settings
from django.core.cache import cache
from courseware.access import has_access
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from openedx.core.djangoapps.lang_pref import LANGUAGE_KEY
from openedx.core.djangoapps.user_api.models import UserPreference
from student.models import anonymous_id_for_user
from student.models import UserProfile
from student.roles import GlobalStaff, CourseStaffRole, CourseInstructorRole
class OpenIDHandler(object):
""" Basic OpenID Connect scope handler. """
def scope_openid(self, _data):
""" Only override the sub (subject) claim. """
return ['sub']
def claim_sub(self, data):
"""
Return the value of the sub (subject) claim. The value should be
unique for each user.
"""
# Use the anonymous ID without any course as unique identifier.
# Note that this ID is derived using the value of the `SECRET_KEY`
# setting, this means that users will have different sub
# values for different deployments.
value = anonymous_id_for_user(data['user'], None)
return value
class PermissionsHandler(object):
|
class ProfileHandler(object):
""" Basic OpenID Connect `profile` scope handler with `locale` claim. """
def scope_profile(self, _data):
""" Add specialized claims. """
return ['name', 'locale']
def claim_name(self, data):
""" User displayable full name. """
user = data['user']
profile = UserProfile.objects.get(user=user)
return profile.name
def claim_locale(self, data):
"""
Return the locale for the users based on their preferences.
Does not return a value if the users have not set their locale preferences.
"""
# Calling UserPreference directly because it is not clear which user made the request.
language = UserPreference.get_value(data['user'], LANGUAGE_KEY)
# If the user has no language specified, return the default one.
if not language:
language = settings.LANGUAGE_CODE
return language
class CourseAccessHandler(object):
"""
Defines two new scopes: `course_instructor` and `course_staff`. Each one is
valid only if the user is instructor or staff of at least one course.
Each new scope has a corresponding claim: `instructor_courses` and
`staff_courses` that lists the course_ids for which the user has instructor
or staff privileges.
The claims support claim request values: if there is no claim request, the
value of the claim is the list all the courses for which the user has the
corresponding privileges. If a claim request is used, then the value of the
claim the list of courses from the requested values that have the
corresponding privileges.
For example, if the user is staff of course_a and course_b but not
course_c, the claim corresponding to the scope request:
scope = openid course_staff
has the value:
{staff_courses: [course_a, course_b] }
For the claim request:
claims = {userinfo: {staff_courses: {values=[course_b, course_d]}}}
the corresponding claim will have the value:
{staff_courses: [course_b] }.
This is useful to quickly determine if a user has the right privileges for a
given course.
For a description of the function naming and arguments, see:
`edx_oauth2_provider/oidc/handlers.py`
"""
COURSE_CACHE_TIMEOUT = getattr(settings, 'OIDC_COURSE_HANDLER_CACHE_TIMEOUT', 60) # In seconds.
def __init__(self, *_args, **_kwargs):
self._course_cache = {}
def scope_course_instructor(self, data):
"""
Scope `course_instructor` valid only if the user is an instructor
of at least one course.
"""
# TODO: unfortunately there is not a faster and still correct way to
# check if a user is instructor of at least one course other than
# checking the access type against all known courses.
course_ids = self.find_courses(data['user'], CourseInstructorRole.ROLE)
return ['instructor_courses'] if course_ids else None
def scope_course_staff(self, data):
"""
Scope `course_staff` valid only if the user is an instructor of at
least one course.
"""
# TODO: see :method:CourseAccessHandler.scope_course_instructor
course_ids = self.find_courses(data['user'], CourseStaffRole.ROLE)
return ['staff_courses'] if course_ids else None
def claim_instructor_courses(self, data):
"""
Claim `instructor_courses` with list of course_ids for which the
user has instructor privileges.
"""
return self.find_courses(data['user'], CourseInstructorRole.ROLE, data.get('values'))
def claim_staff_courses(self, data):
"""
Claim `staff_courses` with list of course_ids for which the user
has staff privileges.
"""
return self.find_courses(data['user'], CourseStaffRole.ROLE, data.get('values'))
def find_courses(self, user, access_type, values=None):
"""
Find all courses for which the user has the specified access type. If
`values` is specified, check only the courses from `values`.
"""
# Check the instance cache and update if not present. The instance
# cache is useful since there are multiple scope and claims calls in the
# same request.
key = (user.id, access_type)
if key in self._course_cache:
course_ids = self._course_cache[key]
else:
course_ids = self._get_courses_with_access_type(user, access_type)
self._course_cache[key] = course_ids
# If values was specified, filter out other courses.
if values is not None:
course_ids = list(set(course_ids) & set(values))
return course_ids
# pylint: disable=missing-docstring
def _get_courses_with_access_type(self, user, access_type):
# Check the application cache and update if not present. The application
# cache is useful since there are calls to different endpoints in close
# succession, for example the id_token and user_info endpoints.
key = '-'.join([str(self.__class__), str(user.id), access_type])
course_ids = cache.get(key)
if not course_ids:
course_keys = CourseOverview.get_all_course_keys()
# Global staff have access to all courses. Filter courses for non-global staff.
if not GlobalStaff().has_user(user):
course_keys = [course_key for course_key in course_keys if has_access(user, access_type, course_key)]
course_ids = [unicode(course_key) for course_key in course_keys]
cache.set(key, course_ids, self.COURSE_CACHE_TIMEOUT)
return course_ids
class IDTokenHandler(OpenIDHandler, ProfileHandler, CourseAccessHandler, PermissionsHandler):
""" Configure the ID Token handler for the LMS. """
def claim_instructor_courses(self, data):
# Don't return list of courses unless they are requested as essential.
if data.get('essential'):
return super(IDTokenHandler, self).claim_instructor_courses(data)
else:
return None
def claim_staff_courses(self, data):
# Don't return list of courses unless they are requested as essential.
if data.get('essential'):
return super(IDTokenHandler, self).claim_staff_courses(data)
else:
return None
class UserInfoHandler(OpenIDHandler, ProfileHandler, CourseAccessHandler, PermissionsHandler):
""" Configure the UserInfo handler for the LMS. """
pass
|
""" Permissions scope handler """
def scope_permissions(self, _data):
return ['administrator']
def claim_administrator(self, data):
"""
Return boolean indicating user's administrator status.
For our purposes an administrator is any user with is_staff set to True.
"""
return data['user'].is_staff
|
identifier_body
|
main.ts
|
/*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
//tslint:disable
'use strict';
import * as path from 'path';
import * as fs from 'fs';
import * as vscode from 'vscode';
type AutoDetect = 'on' | 'off';
let taskProvider: vscode.Disposable | undefined;
export function activate(_context: vscode.ExtensionContext): void {
if (!vscode.workspace.workspaceFolders) {
return;
}
function onConfigurationChanged() {
let autoDetect = vscode.workspace.getConfiguration('npm').get<AutoDetect>('autoDetect');
if (taskProvider && autoDetect === 'off') {
taskProvider.dispose();
taskProvider = undefined;
} else if (!taskProvider && autoDetect === 'on') {
taskProvider = vscode.workspace.registerTaskProvider('npm', {
provideTasks: () => {
return provideNpmScripts();
},
resolveTask(_task: vscode.Task): vscode.Task | undefined {
return undefined;
}
});
}
}
vscode.workspace.onDidChangeConfiguration(onConfigurationChanged);
onConfigurationChanged();
}
export function deactivate(): void {
if (taskProvider) {
taskProvider.dispose();
}
}
async function
|
(file: string): Promise<boolean> {
return new Promise<boolean>((resolve, _reject) => {
fs.exists(file, (value) => {
resolve(value);
});
});
}
async function readFile(file: string): Promise<string> {
return new Promise<string>((resolve, reject) => {
fs.readFile(file, (err, data) => {
if (err) {
reject(err);
}
resolve(data.toString());
});
});
}
interface NpmTaskDefinition extends vscode.TaskDefinition {
script: string;
file?: string;
}
const buildNames: string[] = ['build', 'compile', 'watch'];
function isBuildTask(name: string): boolean {
for (let buildName of buildNames) {
if (name.indexOf(buildName) !== -1) {
return true;
}
}
return false;
}
const testNames: string[] = ['test'];
function isTestTask(name: string): boolean {
for (let testName of testNames) {
if (name === testName) {
return true;
}
}
return false;
}
function isNotPreOrPostScript(script: string): boolean {
return !(script.startsWith('pre') || script.startsWith('post'));
}
async function provideNpmScripts(): Promise<vscode.Task[]> {
let emptyTasks: vscode.Task[] = [];
let allTasks: vscode.Task[] = [];
let folders = vscode.workspace.workspaceFolders;
if (!folders) {
return emptyTasks;
}
const isSingleRoot = folders.length === 1;
for (let i = 0; i < folders.length; i++) {
let tasks = await provideNpmScriptsForFolder(folders[i], isSingleRoot);
allTasks.push(...tasks);
}
return allTasks;
}
async function provideNpmScriptsForFolder(folder: vscode.WorkspaceFolder, singleRoot: boolean): Promise<vscode.Task[]> {
let rootPath = folder.uri.fsPath;
let emptyTasks: vscode.Task[] = [];
let packageJson = path.join(rootPath, 'package.json');
if (!await exists(packageJson)) {
return emptyTasks;
}
try {
var contents = await readFile(packageJson);
var json = JSON.parse(contents);
if (!json.scripts) {
return emptyTasks;
}
const result: vscode.Task[] = [];
Object.keys(json.scripts).filter(isNotPreOrPostScript).forEach(each => {
const task = createTask(each, `run ${each}`, rootPath, folder.name, singleRoot);
const lowerCaseTaskName = each.toLowerCase();
if (isBuildTask(lowerCaseTaskName)) {
task.group = vscode.TaskGroup.Build;
} else if (isTestTask(lowerCaseTaskName)) {
task.group = vscode.TaskGroup.Test;
}
result.push(task);
});
// always add npm install (without a problem matcher)
result.push(createTask('install', 'install', rootPath, folder.name, singleRoot, []));
return result;
} catch (e) {
return emptyTasks;
}
}
function createTask(script: string, cmd: string, rootPath: string, shortPath: string, singleRoot: boolean, matcher?: any): vscode.Task {
function getTaskName(script: string, shortPath: string, singleRoot: boolean) {
if (singleRoot) {
return script;
}
return `${script} - ${shortPath}`;
}
function getNpmCommandLine(cmd: string): string {
if (vscode.workspace.getConfiguration('npm').get<boolean>('runSilent')) {
return `npm --silent ${cmd}`;
}
return `npm ${cmd}`;
}
let kind: NpmTaskDefinition = {
type: 'npm',
script: script
};
let taskName = getTaskName(script, shortPath, singleRoot);
return new vscode.Task(kind, taskName, 'npm', new vscode.ShellExecution(getNpmCommandLine(cmd), { cwd: rootPath }), matcher);
}
|
exists
|
identifier_name
|
main.ts
|
/*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
//tslint:disable
'use strict';
import * as path from 'path';
import * as fs from 'fs';
import * as vscode from 'vscode';
type AutoDetect = 'on' | 'off';
let taskProvider: vscode.Disposable | undefined;
export function activate(_context: vscode.ExtensionContext): void {
if (!vscode.workspace.workspaceFolders) {
return;
}
function onConfigurationChanged() {
let autoDetect = vscode.workspace.getConfiguration('npm').get<AutoDetect>('autoDetect');
if (taskProvider && autoDetect === 'off') {
taskProvider.dispose();
taskProvider = undefined;
} else if (!taskProvider && autoDetect === 'on') {
taskProvider = vscode.workspace.registerTaskProvider('npm', {
provideTasks: () => {
return provideNpmScripts();
},
resolveTask(_task: vscode.Task): vscode.Task | undefined {
return undefined;
}
});
}
}
vscode.workspace.onDidChangeConfiguration(onConfigurationChanged);
onConfigurationChanged();
}
export function deactivate(): void {
if (taskProvider) {
taskProvider.dispose();
}
}
async function exists(file: string): Promise<boolean> {
return new Promise<boolean>((resolve, _reject) => {
fs.exists(file, (value) => {
resolve(value);
});
});
}
async function readFile(file: string): Promise<string> {
return new Promise<string>((resolve, reject) => {
fs.readFile(file, (err, data) => {
if (err) {
reject(err);
}
resolve(data.toString());
});
});
}
interface NpmTaskDefinition extends vscode.TaskDefinition {
script: string;
file?: string;
}
const buildNames: string[] = ['build', 'compile', 'watch'];
function isBuildTask(name: string): boolean {
for (let buildName of buildNames) {
if (name.indexOf(buildName) !== -1) {
return true;
}
}
return false;
}
const testNames: string[] = ['test'];
function isTestTask(name: string): boolean {
for (let testName of testNames) {
if (name === testName) {
return true;
}
}
return false;
}
function isNotPreOrPostScript(script: string): boolean {
return !(script.startsWith('pre') || script.startsWith('post'));
}
async function provideNpmScripts(): Promise<vscode.Task[]> {
let emptyTasks: vscode.Task[] = [];
let allTasks: vscode.Task[] = [];
let folders = vscode.workspace.workspaceFolders;
if (!folders) {
return emptyTasks;
}
const isSingleRoot = folders.length === 1;
for (let i = 0; i < folders.length; i++) {
let tasks = await provideNpmScriptsForFolder(folders[i], isSingleRoot);
allTasks.push(...tasks);
}
return allTasks;
}
async function provideNpmScriptsForFolder(folder: vscode.WorkspaceFolder, singleRoot: boolean): Promise<vscode.Task[]> {
let rootPath = folder.uri.fsPath;
let emptyTasks: vscode.Task[] = [];
let packageJson = path.join(rootPath, 'package.json');
if (!await exists(packageJson)) {
return emptyTasks;
}
try {
var contents = await readFile(packageJson);
var json = JSON.parse(contents);
if (!json.scripts) {
return emptyTasks;
}
const result: vscode.Task[] = [];
Object.keys(json.scripts).filter(isNotPreOrPostScript).forEach(each => {
const task = createTask(each, `run ${each}`, rootPath, folder.name, singleRoot);
const lowerCaseTaskName = each.toLowerCase();
if (isBuildTask(lowerCaseTaskName)) {
task.group = vscode.TaskGroup.Build;
} else if (isTestTask(lowerCaseTaskName)) {
task.group = vscode.TaskGroup.Test;
}
result.push(task);
});
// always add npm install (without a problem matcher)
result.push(createTask('install', 'install', rootPath, folder.name, singleRoot, []));
return result;
} catch (e) {
return emptyTasks;
}
}
function createTask(script: string, cmd: string, rootPath: string, shortPath: string, singleRoot: boolean, matcher?: any): vscode.Task {
function getTaskName(script: string, shortPath: string, singleRoot: boolean) {
if (singleRoot) {
return script;
|
}
return `${script} - ${shortPath}`;
}
function getNpmCommandLine(cmd: string): string {
if (vscode.workspace.getConfiguration('npm').get<boolean>('runSilent')) {
return `npm --silent ${cmd}`;
}
return `npm ${cmd}`;
}
let kind: NpmTaskDefinition = {
type: 'npm',
script: script
};
let taskName = getTaskName(script, shortPath, singleRoot);
return new vscode.Task(kind, taskName, 'npm', new vscode.ShellExecution(getNpmCommandLine(cmd), { cwd: rootPath }), matcher);
}
|
random_line_split
|
|
main.ts
|
/*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
//tslint:disable
'use strict';
import * as path from 'path';
import * as fs from 'fs';
import * as vscode from 'vscode';
type AutoDetect = 'on' | 'off';
let taskProvider: vscode.Disposable | undefined;
export function activate(_context: vscode.ExtensionContext): void {
if (!vscode.workspace.workspaceFolders) {
return;
}
function onConfigurationChanged() {
let autoDetect = vscode.workspace.getConfiguration('npm').get<AutoDetect>('autoDetect');
if (taskProvider && autoDetect === 'off') {
taskProvider.dispose();
taskProvider = undefined;
} else if (!taskProvider && autoDetect === 'on') {
taskProvider = vscode.workspace.registerTaskProvider('npm', {
provideTasks: () => {
return provideNpmScripts();
},
resolveTask(_task: vscode.Task): vscode.Task | undefined {
return undefined;
}
});
}
}
vscode.workspace.onDidChangeConfiguration(onConfigurationChanged);
onConfigurationChanged();
}
export function deactivate(): void {
if (taskProvider) {
taskProvider.dispose();
}
}
async function exists(file: string): Promise<boolean> {
return new Promise<boolean>((resolve, _reject) => {
fs.exists(file, (value) => {
resolve(value);
});
});
}
async function readFile(file: string): Promise<string> {
return new Promise<string>((resolve, reject) => {
fs.readFile(file, (err, data) => {
if (err) {
reject(err);
}
resolve(data.toString());
});
});
}
interface NpmTaskDefinition extends vscode.TaskDefinition {
script: string;
file?: string;
}
const buildNames: string[] = ['build', 'compile', 'watch'];
function isBuildTask(name: string): boolean {
for (let buildName of buildNames) {
if (name.indexOf(buildName) !== -1) {
return true;
}
}
return false;
}
const testNames: string[] = ['test'];
function isTestTask(name: string): boolean
|
function isNotPreOrPostScript(script: string): boolean {
return !(script.startsWith('pre') || script.startsWith('post'));
}
async function provideNpmScripts(): Promise<vscode.Task[]> {
let emptyTasks: vscode.Task[] = [];
let allTasks: vscode.Task[] = [];
let folders = vscode.workspace.workspaceFolders;
if (!folders) {
return emptyTasks;
}
const isSingleRoot = folders.length === 1;
for (let i = 0; i < folders.length; i++) {
let tasks = await provideNpmScriptsForFolder(folders[i], isSingleRoot);
allTasks.push(...tasks);
}
return allTasks;
}
async function provideNpmScriptsForFolder(folder: vscode.WorkspaceFolder, singleRoot: boolean): Promise<vscode.Task[]> {
let rootPath = folder.uri.fsPath;
let emptyTasks: vscode.Task[] = [];
let packageJson = path.join(rootPath, 'package.json');
if (!await exists(packageJson)) {
return emptyTasks;
}
try {
var contents = await readFile(packageJson);
var json = JSON.parse(contents);
if (!json.scripts) {
return emptyTasks;
}
const result: vscode.Task[] = [];
Object.keys(json.scripts).filter(isNotPreOrPostScript).forEach(each => {
const task = createTask(each, `run ${each}`, rootPath, folder.name, singleRoot);
const lowerCaseTaskName = each.toLowerCase();
if (isBuildTask(lowerCaseTaskName)) {
task.group = vscode.TaskGroup.Build;
} else if (isTestTask(lowerCaseTaskName)) {
task.group = vscode.TaskGroup.Test;
}
result.push(task);
});
// always add npm install (without a problem matcher)
result.push(createTask('install', 'install', rootPath, folder.name, singleRoot, []));
return result;
} catch (e) {
return emptyTasks;
}
}
function createTask(script: string, cmd: string, rootPath: string, shortPath: string, singleRoot: boolean, matcher?: any): vscode.Task {
function getTaskName(script: string, shortPath: string, singleRoot: boolean) {
if (singleRoot) {
return script;
}
return `${script} - ${shortPath}`;
}
function getNpmCommandLine(cmd: string): string {
if (vscode.workspace.getConfiguration('npm').get<boolean>('runSilent')) {
return `npm --silent ${cmd}`;
}
return `npm ${cmd}`;
}
let kind: NpmTaskDefinition = {
type: 'npm',
script: script
};
let taskName = getTaskName(script, shortPath, singleRoot);
return new vscode.Task(kind, taskName, 'npm', new vscode.ShellExecution(getNpmCommandLine(cmd), { cwd: rootPath }), matcher);
}
|
{
for (let testName of testNames) {
if (name === testName) {
return true;
}
}
return false;
}
|
identifier_body
|
inputevent.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
|
use crate::dom::bindings::codegen::Bindings::InputEventBinding::{self, InputEventMethods};
use crate::dom::bindings::codegen::Bindings::UIEventBinding::UIEventBinding::UIEventMethods;
use crate::dom::bindings::error::Fallible;
use crate::dom::bindings::reflector::reflect_dom_object;
use crate::dom::bindings::root::{DomRoot, RootedReference};
use crate::dom::bindings::str::DOMString;
use crate::dom::uievent::UIEvent;
use crate::dom::window::Window;
use dom_struct::dom_struct;
#[dom_struct]
pub struct InputEvent {
uievent: UIEvent,
data: Option<DOMString>,
is_composing: bool,
}
impl InputEvent {
pub fn new(
window: &Window,
type_: DOMString,
can_bubble: bool,
cancelable: bool,
view: Option<&Window>,
detail: i32,
data: Option<DOMString>,
is_composing: bool,
) -> DomRoot<InputEvent> {
let ev = reflect_dom_object(
Box::new(InputEvent {
uievent: UIEvent::new_inherited(),
data: data,
is_composing: is_composing,
}),
window,
InputEventBinding::Wrap,
);
ev.uievent
.InitUIEvent(type_, can_bubble, cancelable, view, detail);
ev
}
pub fn Constructor(
window: &Window,
type_: DOMString,
init: &InputEventBinding::InputEventInit,
) -> Fallible<DomRoot<InputEvent>> {
let event = InputEvent::new(
window,
type_,
init.parent.parent.bubbles,
init.parent.parent.cancelable,
init.parent.view.r(),
init.parent.detail,
init.data.clone(),
init.isComposing,
);
Ok(event)
}
}
impl InputEventMethods for InputEvent {
// https://w3c.github.io/uievents/#dom-inputevent-data
fn GetData(&self) -> Option<DOMString> {
self.data.clone()
}
// https://w3c.github.io/uievents/#dom-inputevent-iscomposing
fn IsComposing(&self) -> bool {
self.is_composing
}
// https://dom.spec.whatwg.org/#dom-event-istrusted
fn IsTrusted(&self) -> bool {
self.uievent.IsTrusted()
}
}
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
|
random_line_split
|
inputevent.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::InputEventBinding::{self, InputEventMethods};
use crate::dom::bindings::codegen::Bindings::UIEventBinding::UIEventBinding::UIEventMethods;
use crate::dom::bindings::error::Fallible;
use crate::dom::bindings::reflector::reflect_dom_object;
use crate::dom::bindings::root::{DomRoot, RootedReference};
use crate::dom::bindings::str::DOMString;
use crate::dom::uievent::UIEvent;
use crate::dom::window::Window;
use dom_struct::dom_struct;
#[dom_struct]
pub struct InputEvent {
uievent: UIEvent,
data: Option<DOMString>,
is_composing: bool,
}
impl InputEvent {
pub fn new(
window: &Window,
type_: DOMString,
can_bubble: bool,
cancelable: bool,
view: Option<&Window>,
detail: i32,
data: Option<DOMString>,
is_composing: bool,
) -> DomRoot<InputEvent> {
let ev = reflect_dom_object(
Box::new(InputEvent {
uievent: UIEvent::new_inherited(),
data: data,
is_composing: is_composing,
}),
window,
InputEventBinding::Wrap,
);
ev.uievent
.InitUIEvent(type_, can_bubble, cancelable, view, detail);
ev
}
pub fn Constructor(
window: &Window,
type_: DOMString,
init: &InputEventBinding::InputEventInit,
) -> Fallible<DomRoot<InputEvent>> {
let event = InputEvent::new(
window,
type_,
init.parent.parent.bubbles,
init.parent.parent.cancelable,
init.parent.view.r(),
init.parent.detail,
init.data.clone(),
init.isComposing,
);
Ok(event)
}
}
impl InputEventMethods for InputEvent {
// https://w3c.github.io/uievents/#dom-inputevent-data
fn GetData(&self) -> Option<DOMString> {
self.data.clone()
}
// https://w3c.github.io/uievents/#dom-inputevent-iscomposing
fn IsComposing(&self) -> bool {
self.is_composing
}
// https://dom.spec.whatwg.org/#dom-event-istrusted
fn IsTrusted(&self) -> bool
|
}
|
{
self.uievent.IsTrusted()
}
|
identifier_body
|
inputevent.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::InputEventBinding::{self, InputEventMethods};
use crate::dom::bindings::codegen::Bindings::UIEventBinding::UIEventBinding::UIEventMethods;
use crate::dom::bindings::error::Fallible;
use crate::dom::bindings::reflector::reflect_dom_object;
use crate::dom::bindings::root::{DomRoot, RootedReference};
use crate::dom::bindings::str::DOMString;
use crate::dom::uievent::UIEvent;
use crate::dom::window::Window;
use dom_struct::dom_struct;
#[dom_struct]
pub struct InputEvent {
uievent: UIEvent,
data: Option<DOMString>,
is_composing: bool,
}
impl InputEvent {
pub fn new(
window: &Window,
type_: DOMString,
can_bubble: bool,
cancelable: bool,
view: Option<&Window>,
detail: i32,
data: Option<DOMString>,
is_composing: bool,
) -> DomRoot<InputEvent> {
let ev = reflect_dom_object(
Box::new(InputEvent {
uievent: UIEvent::new_inherited(),
data: data,
is_composing: is_composing,
}),
window,
InputEventBinding::Wrap,
);
ev.uievent
.InitUIEvent(type_, can_bubble, cancelable, view, detail);
ev
}
pub fn Constructor(
window: &Window,
type_: DOMString,
init: &InputEventBinding::InputEventInit,
) -> Fallible<DomRoot<InputEvent>> {
let event = InputEvent::new(
window,
type_,
init.parent.parent.bubbles,
init.parent.parent.cancelable,
init.parent.view.r(),
init.parent.detail,
init.data.clone(),
init.isComposing,
);
Ok(event)
}
}
impl InputEventMethods for InputEvent {
// https://w3c.github.io/uievents/#dom-inputevent-data
fn GetData(&self) -> Option<DOMString> {
self.data.clone()
}
// https://w3c.github.io/uievents/#dom-inputevent-iscomposing
fn IsComposing(&self) -> bool {
self.is_composing
}
// https://dom.spec.whatwg.org/#dom-event-istrusted
fn
|
(&self) -> bool {
self.uievent.IsTrusted()
}
}
|
IsTrusted
|
identifier_name
|
mount.rs
|
use util::*;
use hyper::status::StatusCode;
use hyper::client::Response;
fn
|
<F>(path: &str, f: F) where F: FnOnce(&mut Response) {
run_example("mount", |port| {
let url = format!("http://localhost:{}{}", port, path);
let ref mut res = response_for(&url);
f(res)
})
}
#[test]
fn trims_the_prefix() {
with_path("/test/foo", |res| {
let s = read_body_to_string(res);
assert_eq!(s, "Got request with uri = '/foo'");
})
}
#[test]
fn ignores_unmatched_prefixes() {
with_path("/this_isnt_matched/foo", |res| {
assert_eq!(res.status, StatusCode::NotFound);
})
}
#[test]
fn works_with_another_middleware() {
with_path("/static/files/thoughtram_logo_brain.png", |res| {
assert_eq!(res.status, StatusCode::Ok);
});
with_path("/static/files/nested/foo.js", |res| {
let s = read_body_to_string(res);
assert!(s.starts_with("function foo"), "unexpected response: {:?}", s);
});
}
#[test]
fn fallthroughs_with_same_prefix() {
// depends on `works_with_another_middleware` passing
with_path("/static/files/a", |res| {
let s = read_body_to_string(res);
assert_eq!(s, "No static file with path '/a'!");
});
}
|
with_path
|
identifier_name
|
mount.rs
|
use util::*;
use hyper::status::StatusCode;
use hyper::client::Response;
fn with_path<F>(path: &str, f: F) where F: FnOnce(&mut Response) {
run_example("mount", |port| {
let url = format!("http://localhost:{}{}", port, path);
let ref mut res = response_for(&url);
f(res)
})
}
#[test]
fn trims_the_prefix() {
with_path("/test/foo", |res| {
let s = read_body_to_string(res);
assert_eq!(s, "Got request with uri = '/foo'");
})
}
#[test]
fn ignores_unmatched_prefixes() {
with_path("/this_isnt_matched/foo", |res| {
assert_eq!(res.status, StatusCode::NotFound);
})
}
#[test]
fn works_with_another_middleware() {
with_path("/static/files/thoughtram_logo_brain.png", |res| {
assert_eq!(res.status, StatusCode::Ok);
});
with_path("/static/files/nested/foo.js", |res| {
let s = read_body_to_string(res);
assert!(s.starts_with("function foo"), "unexpected response: {:?}", s);
});
}
#[test]
fn fallthroughs_with_same_prefix() {
// depends on `works_with_another_middleware` passing
|
with_path("/static/files/a", |res| {
let s = read_body_to_string(res);
assert_eq!(s, "No static file with path '/a'!");
});
}
|
random_line_split
|
|
mount.rs
|
use util::*;
use hyper::status::StatusCode;
use hyper::client::Response;
fn with_path<F>(path: &str, f: F) where F: FnOnce(&mut Response) {
run_example("mount", |port| {
let url = format!("http://localhost:{}{}", port, path);
let ref mut res = response_for(&url);
f(res)
})
}
#[test]
fn trims_the_prefix() {
with_path("/test/foo", |res| {
let s = read_body_to_string(res);
assert_eq!(s, "Got request with uri = '/foo'");
})
}
#[test]
fn ignores_unmatched_prefixes() {
with_path("/this_isnt_matched/foo", |res| {
assert_eq!(res.status, StatusCode::NotFound);
})
}
#[test]
fn works_with_another_middleware() {
with_path("/static/files/thoughtram_logo_brain.png", |res| {
assert_eq!(res.status, StatusCode::Ok);
});
with_path("/static/files/nested/foo.js", |res| {
let s = read_body_to_string(res);
assert!(s.starts_with("function foo"), "unexpected response: {:?}", s);
});
}
#[test]
fn fallthroughs_with_same_prefix()
|
{
// depends on `works_with_another_middleware` passing
with_path("/static/files/a", |res| {
let s = read_body_to_string(res);
assert_eq!(s, "No static file with path '/a'!");
});
}
|
identifier_body
|
|
seed.rs
|
// Copyright 2015 The Noise-rs Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// TODO: Use PrimInt + Signed instead of SignedInt + NumCast once num has
// PrimInt implementations
use num::{NumCast,Signed,PrimInt};
use rand::{Rand, Rng, SeedableRng, XorShiftRng};
use math;
const TABLE_SIZE: usize = 256;
/// A seed table, required by all noise functions.
///
/// Table creation is expensive, so in most circumstances you'll only want to
/// create one of these and reuse it everywhere.
#[allow(missing_copy_implementations)]
pub struct Seed {
values: [u8; TABLE_SIZE],
}
impl Rand for Seed {
/// Generates a random seed.
///
/// # Examples
///
/// ```rust
/// extern crate noise;
/// extern crate rand;
///
/// use noise::Seed;
///
/// # fn main() {
/// let seed = rand::random::<Seed>();
/// # }
/// ```
///
/// ```rust
/// extern crate noise;
/// extern crate rand;
///
/// use noise::Seed;
/// use rand::{SeedableRng, Rng, XorShiftRng};
///
/// # fn main() {
/// let mut rng: XorShiftRng = SeedableRng::from_seed([1, 2, 3, 4]);
/// let seed = rng.gen::<Seed>();
/// # }
/// ```
fn rand<R: Rng>(rng: &mut R) -> Seed {
let mut seq: Vec<u8> = (0 .. TABLE_SIZE).map(|x| x as u8).collect();
rng.shuffle(&mut *seq);
// It's unfortunate that this double-initializes the array, but Rust doesn't currently provide a
// clean way to do this in one pass. Hopefully won't matter, as Seed creation will usually be a
// one-time event.
let mut seed = Seed { values: [0; TABLE_SIZE] };
let seq_it = seq.iter();
for (x, y) in seed.values.iter_mut().zip(seq_it) { *x = *y }
seed
}
}
impl Seed {
/// Deterministically generates a new seed table based on a `u32` value.
///
/// Internally this uses a `XorShiftRng`, but we don't really need to worry
/// about cryptographic security when working with procedural noise.
///
/// # Example
///
/// ```rust
/// use noise::Seed;
///
/// let seed = Seed::new(12);
/// ```
pub fn new(seed: u32) -> Seed {
let mut rng: XorShiftRng = SeedableRng::from_seed([1, seed, seed, seed]);
rng.gen()
}
#[inline(always)]
pub fn get1<T: Signed + PrimInt + NumCast>(&self, x: T) -> usize
|
#[inline(always)]
pub fn get2<T: Signed + PrimInt + NumCast>(&self, pos: math::Point2<T>) -> usize {
let y: usize = math::cast(pos[1] & math::cast(0xff));
self.values[self.get1(pos[0]) ^ y] as usize
}
#[inline(always)]
pub fn get3<T: Signed + PrimInt + NumCast>(&self, pos: math::Point3<T>) -> usize {
let z: usize = math::cast(pos[2] & math::cast(0xff));
self.values[self.get2([pos[0], pos[1]]) ^ z] as usize
}
#[inline(always)]
pub fn get4<T: Signed + PrimInt + NumCast>(&self, pos: math::Point4<T>) -> usize {
let w: usize = math::cast(pos[3] & math::cast(0xff));
self.values[self.get3([pos[0], pos[1], pos[2]]) ^ w] as usize
}
}
#[cfg(test)]
mod tests {
use rand::random;
use perlin::perlin3;
use super::Seed;
#[test]
fn test_random_seed() {
let _ = perlin3::<f32>(&random(), &[1.0, 2.0, 3.0]);
}
#[test]
fn test_negative_params() {
let _ = perlin3::<f32>(&Seed::new(0), &[-1.0, 2.0, 3.0]);
}
}
|
{
let x: usize = math::cast(x & math::cast(0xff));
self.values[x] as usize
}
|
identifier_body
|
seed.rs
|
// Copyright 2015 The Noise-rs Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// TODO: Use PrimInt + Signed instead of SignedInt + NumCast once num has
// PrimInt implementations
use num::{NumCast,Signed,PrimInt};
use rand::{Rand, Rng, SeedableRng, XorShiftRng};
use math;
const TABLE_SIZE: usize = 256;
/// A seed table, required by all noise functions.
///
/// Table creation is expensive, so in most circumstances you'll only want to
/// create one of these and reuse it everywhere.
#[allow(missing_copy_implementations)]
pub struct Seed {
values: [u8; TABLE_SIZE],
}
|
/// # Examples
///
/// ```rust
/// extern crate noise;
/// extern crate rand;
///
/// use noise::Seed;
///
/// # fn main() {
/// let seed = rand::random::<Seed>();
/// # }
/// ```
///
/// ```rust
/// extern crate noise;
/// extern crate rand;
///
/// use noise::Seed;
/// use rand::{SeedableRng, Rng, XorShiftRng};
///
/// # fn main() {
/// let mut rng: XorShiftRng = SeedableRng::from_seed([1, 2, 3, 4]);
/// let seed = rng.gen::<Seed>();
/// # }
/// ```
fn rand<R: Rng>(rng: &mut R) -> Seed {
let mut seq: Vec<u8> = (0 .. TABLE_SIZE).map(|x| x as u8).collect();
rng.shuffle(&mut *seq);
// It's unfortunate that this double-initializes the array, but Rust doesn't currently provide a
// clean way to do this in one pass. Hopefully won't matter, as Seed creation will usually be a
// one-time event.
let mut seed = Seed { values: [0; TABLE_SIZE] };
let seq_it = seq.iter();
for (x, y) in seed.values.iter_mut().zip(seq_it) { *x = *y }
seed
}
}
impl Seed {
/// Deterministically generates a new seed table based on a `u32` value.
///
/// Internally this uses a `XorShiftRng`, but we don't really need to worry
/// about cryptographic security when working with procedural noise.
///
/// # Example
///
/// ```rust
/// use noise::Seed;
///
/// let seed = Seed::new(12);
/// ```
pub fn new(seed: u32) -> Seed {
let mut rng: XorShiftRng = SeedableRng::from_seed([1, seed, seed, seed]);
rng.gen()
}
#[inline(always)]
pub fn get1<T: Signed + PrimInt + NumCast>(&self, x: T) -> usize {
let x: usize = math::cast(x & math::cast(0xff));
self.values[x] as usize
}
#[inline(always)]
pub fn get2<T: Signed + PrimInt + NumCast>(&self, pos: math::Point2<T>) -> usize {
let y: usize = math::cast(pos[1] & math::cast(0xff));
self.values[self.get1(pos[0]) ^ y] as usize
}
#[inline(always)]
pub fn get3<T: Signed + PrimInt + NumCast>(&self, pos: math::Point3<T>) -> usize {
let z: usize = math::cast(pos[2] & math::cast(0xff));
self.values[self.get2([pos[0], pos[1]]) ^ z] as usize
}
#[inline(always)]
pub fn get4<T: Signed + PrimInt + NumCast>(&self, pos: math::Point4<T>) -> usize {
let w: usize = math::cast(pos[3] & math::cast(0xff));
self.values[self.get3([pos[0], pos[1], pos[2]]) ^ w] as usize
}
}
#[cfg(test)]
mod tests {
use rand::random;
use perlin::perlin3;
use super::Seed;
#[test]
fn test_random_seed() {
let _ = perlin3::<f32>(&random(), &[1.0, 2.0, 3.0]);
}
#[test]
fn test_negative_params() {
let _ = perlin3::<f32>(&Seed::new(0), &[-1.0, 2.0, 3.0]);
}
}
|
impl Rand for Seed {
/// Generates a random seed.
///
|
random_line_split
|
seed.rs
|
// Copyright 2015 The Noise-rs Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// TODO: Use PrimInt + Signed instead of SignedInt + NumCast once num has
// PrimInt implementations
use num::{NumCast,Signed,PrimInt};
use rand::{Rand, Rng, SeedableRng, XorShiftRng};
use math;
const TABLE_SIZE: usize = 256;
/// A seed table, required by all noise functions.
///
/// Table creation is expensive, so in most circumstances you'll only want to
/// create one of these and reuse it everywhere.
#[allow(missing_copy_implementations)]
pub struct Seed {
values: [u8; TABLE_SIZE],
}
impl Rand for Seed {
/// Generates a random seed.
///
/// # Examples
///
/// ```rust
/// extern crate noise;
/// extern crate rand;
///
/// use noise::Seed;
///
/// # fn main() {
/// let seed = rand::random::<Seed>();
/// # }
/// ```
///
/// ```rust
/// extern crate noise;
/// extern crate rand;
///
/// use noise::Seed;
/// use rand::{SeedableRng, Rng, XorShiftRng};
///
/// # fn main() {
/// let mut rng: XorShiftRng = SeedableRng::from_seed([1, 2, 3, 4]);
/// let seed = rng.gen::<Seed>();
/// # }
/// ```
fn rand<R: Rng>(rng: &mut R) -> Seed {
let mut seq: Vec<u8> = (0 .. TABLE_SIZE).map(|x| x as u8).collect();
rng.shuffle(&mut *seq);
// It's unfortunate that this double-initializes the array, but Rust doesn't currently provide a
// clean way to do this in one pass. Hopefully won't matter, as Seed creation will usually be a
// one-time event.
let mut seed = Seed { values: [0; TABLE_SIZE] };
let seq_it = seq.iter();
for (x, y) in seed.values.iter_mut().zip(seq_it) { *x = *y }
seed
}
}
impl Seed {
/// Deterministically generates a new seed table based on a `u32` value.
///
/// Internally this uses a `XorShiftRng`, but we don't really need to worry
/// about cryptographic security when working with procedural noise.
///
/// # Example
///
/// ```rust
/// use noise::Seed;
///
/// let seed = Seed::new(12);
/// ```
pub fn new(seed: u32) -> Seed {
let mut rng: XorShiftRng = SeedableRng::from_seed([1, seed, seed, seed]);
rng.gen()
}
#[inline(always)]
pub fn get1<T: Signed + PrimInt + NumCast>(&self, x: T) -> usize {
let x: usize = math::cast(x & math::cast(0xff));
self.values[x] as usize
}
#[inline(always)]
pub fn get2<T: Signed + PrimInt + NumCast>(&self, pos: math::Point2<T>) -> usize {
let y: usize = math::cast(pos[1] & math::cast(0xff));
self.values[self.get1(pos[0]) ^ y] as usize
}
#[inline(always)]
pub fn get3<T: Signed + PrimInt + NumCast>(&self, pos: math::Point3<T>) -> usize {
let z: usize = math::cast(pos[2] & math::cast(0xff));
self.values[self.get2([pos[0], pos[1]]) ^ z] as usize
}
#[inline(always)]
pub fn get4<T: Signed + PrimInt + NumCast>(&self, pos: math::Point4<T>) -> usize {
let w: usize = math::cast(pos[3] & math::cast(0xff));
self.values[self.get3([pos[0], pos[1], pos[2]]) ^ w] as usize
}
}
#[cfg(test)]
mod tests {
use rand::random;
use perlin::perlin3;
use super::Seed;
#[test]
fn
|
() {
let _ = perlin3::<f32>(&random(), &[1.0, 2.0, 3.0]);
}
#[test]
fn test_negative_params() {
let _ = perlin3::<f32>(&Seed::new(0), &[-1.0, 2.0, 3.0]);
}
}
|
test_random_seed
|
identifier_name
|
html_table_import.js
|
var _typeof = typeof Symbol === "function" && typeof Symbol.iterator === "symbol" ? function (obj) { return typeof obj; } : function (obj) { return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj; };
/* Tabulator v4.0.2 (c) Oliver Folkerd */
var HtmlTableImport = function HtmlTableImport(table) {
this.table = table; //hold Tabulator object
this.fieldIndex = [];
this.hasIndex = false;
};
HtmlTableImport.prototype.parseTable = function () {
var self = this,
element = self.table.element,
options = self.table.options,
columns = options.columns,
headers = element.getElementsByTagName("th"),
rows = element.getElementsByTagName("tbody")[0].getElementsByTagName("tr"),
data = [],
newTable;
self.hasIndex = false;
self.table.options.htmlImporting.call(this.table);
//check for tablator inline options
self._extractOptions(element, options);
if (headers.length) {
self._extractHeaders(headers, rows);
} else {
self._generateBlankHeaders(headers, rows);
}
//iterate through table rows and build data set
for (var index = 0; index < rows.length; index++) {
var row = rows[index],
cells = row.getElementsByTagName("td"),
item = {};
//create index if the dont exist in table
if (!self.hasIndex) {
item[options.index] = index;
}
for (var i = 0; i < cells.length; i++) {
var cell = cells[i];
if (typeof this.fieldIndex[i] !== "undefined") {
item[this.fieldIndex[i]] = cell.innerHTML;
}
}
//add row data to item
data.push(item);
}
//create new element
var newElement = document.createElement("div");
//transfer attributes to new element
var attributes = element.attributes;
// loop through attributes and apply them on div
for (var i in attributes) {
if (_typeof(attributes[i]) == "object") {
newElement.setAttribute(attributes[i].name, attributes[i].value);
}
}
// replace table with div element
element.parentNode.replaceChild(newElement, element);
options.data = data;
self.table.options.htmlImported.call(this.table);
// // newElement.tabulator(options);
this.table.element = newElement;
};
//extract tabluator attribute options
HtmlTableImport.prototype._extractOptions = function (element, options) {
var attributes = element.attributes;
for (var index in attributes) {
var attrib = attributes[index];
var name;
if ((typeof attrib === "undefined" ? "undefined" : _typeof(attrib)) == "object" && attrib.name && attrib.name.indexOf("tabulator-") === 0) {
name = attrib.name.replace("tabulator-", "");
for (var key in options) {
if (key.toLowerCase() == name) {
options[key] = this._attribValue(attrib.value);
}
}
}
}
};
//get value of attribute
HtmlTableImport.prototype._attribValue = function (value) {
if (value === "true") {
return true;
}
if (value === "false") {
return false;
}
return value;
};
//find column if it has already been defined
HtmlTableImport.prototype._findCol = function (title) {
var match = this.table.options.columns.find(function (column) {
return column.title === title;
});
return match || false;
};
//extract column from headers
HtmlTableImport.prototype._extractHeaders = function (headers, rows) {
for (var index = 0; index < headers.length; index++) {
var header = headers[index],
exists = false,
col = this._findCol(header.textContent),
width,
attributes;
if (col) {
exists = true;
} else {
col = { title: header.textContent.trim() };
}
if (!col.field) {
col.field = header.textContent.trim().toLowerCase().replace(" ", "_");
}
width = header.getAttribute("width");
if (width && !col.width) {
col.width = width;
}
|
// //check for tablator inline options
this._extractOptions(header, col);
for (var i in attributes) {
var attrib = attributes[i],
name;
if ((typeof attrib === "undefined" ? "undefined" : _typeof(attrib)) == "object" && attrib.name && attrib.name.indexOf("tabulator-") === 0) {
name = attrib.name.replace("tabulator-", "");
col[name] = this._attribValue(attrib.value);
}
}
this.fieldIndex[index] = col.field;
if (col.field == this.table.options.index) {
this.hasIndex = true;
}
if (!exists) {
this.table.options.columns.push(col);
}
}
};
//generate blank headers
HtmlTableImport.prototype._generateBlankHeaders = function (headers, rows) {
for (var index = 0; index < headers.length; index++) {
var header = headers[index],
col = { title: "", field: "col" + index };
this.fieldIndex[index] = col.field;
var width = header.getAttribute("width");
if (width) {
col.width = width;
}
this.table.options.columns.push(col);
}
};
Tabulator.prototype.registerModule("htmlTableImport", HtmlTableImport);
|
//check for tablator inline options
attributes = header.attributes;
|
random_line_split
|
html_table_import.js
|
var _typeof = typeof Symbol === "function" && typeof Symbol.iterator === "symbol" ? function (obj) { return typeof obj; } : function (obj) { return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj; };
/* Tabulator v4.0.2 (c) Oliver Folkerd */
var HtmlTableImport = function HtmlTableImport(table) {
this.table = table; //hold Tabulator object
this.fieldIndex = [];
this.hasIndex = false;
};
HtmlTableImport.prototype.parseTable = function () {
var self = this,
element = self.table.element,
options = self.table.options,
columns = options.columns,
headers = element.getElementsByTagName("th"),
rows = element.getElementsByTagName("tbody")[0].getElementsByTagName("tr"),
data = [],
newTable;
self.hasIndex = false;
self.table.options.htmlImporting.call(this.table);
//check for tablator inline options
self._extractOptions(element, options);
if (headers.length) {
self._extractHeaders(headers, rows);
} else {
self._generateBlankHeaders(headers, rows);
}
//iterate through table rows and build data set
for (var index = 0; index < rows.length; index++) {
var row = rows[index],
cells = row.getElementsByTagName("td"),
item = {};
//create index if the dont exist in table
if (!self.hasIndex) {
item[options.index] = index;
}
for (var i = 0; i < cells.length; i++) {
var cell = cells[i];
if (typeof this.fieldIndex[i] !== "undefined") {
item[this.fieldIndex[i]] = cell.innerHTML;
}
}
//add row data to item
data.push(item);
}
//create new element
var newElement = document.createElement("div");
//transfer attributes to new element
var attributes = element.attributes;
// loop through attributes and apply them on div
for (var i in attributes) {
if (_typeof(attributes[i]) == "object") {
newElement.setAttribute(attributes[i].name, attributes[i].value);
}
}
// replace table with div element
element.parentNode.replaceChild(newElement, element);
options.data = data;
self.table.options.htmlImported.call(this.table);
// // newElement.tabulator(options);
this.table.element = newElement;
};
//extract tabluator attribute options
HtmlTableImport.prototype._extractOptions = function (element, options) {
var attributes = element.attributes;
for (var index in attributes) {
var attrib = attributes[index];
var name;
if ((typeof attrib === "undefined" ? "undefined" : _typeof(attrib)) == "object" && attrib.name && attrib.name.indexOf("tabulator-") === 0) {
name = attrib.name.replace("tabulator-", "");
for (var key in options) {
if (key.toLowerCase() == name) {
options[key] = this._attribValue(attrib.value);
}
}
}
}
};
//get value of attribute
HtmlTableImport.prototype._attribValue = function (value) {
if (value === "true") {
return true;
}
if (value === "false") {
return false;
}
return value;
};
//find column if it has already been defined
HtmlTableImport.prototype._findCol = function (title) {
var match = this.table.options.columns.find(function (column) {
return column.title === title;
});
return match || false;
};
//extract column from headers
HtmlTableImport.prototype._extractHeaders = function (headers, rows) {
for (var index = 0; index < headers.length; index++)
|
};
//generate blank headers
HtmlTableImport.prototype._generateBlankHeaders = function (headers, rows) {
for (var index = 0; index < headers.length; index++) {
var header = headers[index],
col = { title: "", field: "col" + index };
this.fieldIndex[index] = col.field;
var width = header.getAttribute("width");
if (width) {
col.width = width;
}
this.table.options.columns.push(col);
}
};
Tabulator.prototype.registerModule("htmlTableImport", HtmlTableImport);
|
{
var header = headers[index],
exists = false,
col = this._findCol(header.textContent),
width,
attributes;
if (col) {
exists = true;
} else {
col = { title: header.textContent.trim() };
}
if (!col.field) {
col.field = header.textContent.trim().toLowerCase().replace(" ", "_");
}
width = header.getAttribute("width");
if (width && !col.width) {
col.width = width;
}
//check for tablator inline options
attributes = header.attributes;
// //check for tablator inline options
this._extractOptions(header, col);
for (var i in attributes) {
var attrib = attributes[i],
name;
if ((typeof attrib === "undefined" ? "undefined" : _typeof(attrib)) == "object" && attrib.name && attrib.name.indexOf("tabulator-") === 0) {
name = attrib.name.replace("tabulator-", "");
col[name] = this._attribValue(attrib.value);
}
}
this.fieldIndex[index] = col.field;
if (col.field == this.table.options.index) {
this.hasIndex = true;
}
if (!exists) {
this.table.options.columns.push(col);
}
}
|
conditional_block
|
mod.rs
|
// Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0.
use criterion::{Bencher, Criterion};
use kvproto::deadlock::*;
use rand::prelude::*;
use tikv::server::lock_manager::deadlock::DetectTable;
use tikv_util::time::Duration;
struct DetectGenerator {
rng: ThreadRng,
range: u64,
timestamp: u64,
}
impl DetectGenerator {
fn new(range: u64) -> Self {
Self {
rng: ThreadRng::default(),
range,
timestamp: 0,
}
}
/// Generates n detect requests with the same timestamp
fn generate(&mut self, n: u64) -> Vec<WaitForEntry> {
let mut entries = Vec::with_capacity(n as usize);
(0..n).for_each(|_| {
let mut entry = WaitForEntry::default();
entry.set_txn(self.timestamp);
let mut wait_for_txn = self.timestamp;
while wait_for_txn == self.timestamp {
wait_for_txn = self.rng.gen_range(
if self.timestamp < self.range {
0
} else {
self.timestamp - self.range
},
self.timestamp + self.range,
);
}
entry.set_wait_for_txn(wait_for_txn);
entry.set_key_hash(self.rng.gen());
entries.push(entry);
});
self.timestamp += 1;
entries
}
}
#[derive(Debug)]
struct Config {
n: u64,
range: u64,
ttl: Duration,
}
fn bench_detect(b: &mut Bencher, cfg: &Config)
|
fn bench_dense_detect_without_cleanup(c: &mut Criterion) {
let mut group = c.benchmark_group("bench_dense_detect_without_cleanup");
let ranges = vec![
10,
100,
1_000,
10_000,
100_000,
1_000_000,
10_000_000,
100_000_000,
];
for range in ranges {
let config = Config {
n: 10,
range,
ttl: Duration::from_secs(100000000),
};
group.bench_with_input(format!("{:?}", &config), &config, bench_detect);
}
}
fn bench_dense_detect_with_cleanup(c: &mut Criterion) {
let mut group = c.benchmark_group("bench_dense_detect_with_cleanup");
let ttls = vec![1, 3, 5, 10, 100, 500, 1_000, 3_000];
for ttl in &ttls {
let config = Config {
n: 10,
range: 1000,
ttl: Duration::from_millis(*ttl),
};
group.bench_with_input(format!("{:?}", &config), &config, bench_detect);
}
group.finish();
}
fn main() {
let mut criterion = Criterion::default().configure_from_args().sample_size(10);
bench_dense_detect_without_cleanup(&mut criterion);
bench_dense_detect_with_cleanup(&mut criterion);
criterion.final_summary();
}
|
{
let mut detect_table = DetectTable::new(cfg.ttl);
let mut generator = DetectGenerator::new(cfg.range);
b.iter(|| {
for entry in generator.generate(cfg.n) {
detect_table.detect(
entry.get_txn().into(),
entry.get_wait_for_txn().into(),
entry.get_key_hash(),
&[],
&[],
);
}
});
}
|
identifier_body
|
mod.rs
|
// Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0.
use criterion::{Bencher, Criterion};
use kvproto::deadlock::*;
use rand::prelude::*;
use tikv::server::lock_manager::deadlock::DetectTable;
use tikv_util::time::Duration;
struct DetectGenerator {
rng: ThreadRng,
range: u64,
timestamp: u64,
}
impl DetectGenerator {
fn new(range: u64) -> Self {
Self {
rng: ThreadRng::default(),
range,
timestamp: 0,
}
}
/// Generates n detect requests with the same timestamp
fn generate(&mut self, n: u64) -> Vec<WaitForEntry> {
let mut entries = Vec::with_capacity(n as usize);
(0..n).for_each(|_| {
let mut entry = WaitForEntry::default();
entry.set_txn(self.timestamp);
let mut wait_for_txn = self.timestamp;
while wait_for_txn == self.timestamp {
wait_for_txn = self.rng.gen_range(
if self.timestamp < self.range {
0
} else {
self.timestamp - self.range
},
self.timestamp + self.range,
);
}
entry.set_wait_for_txn(wait_for_txn);
entry.set_key_hash(self.rng.gen());
entries.push(entry);
});
self.timestamp += 1;
entries
}
}
#[derive(Debug)]
struct Config {
n: u64,
range: u64,
ttl: Duration,
}
fn bench_detect(b: &mut Bencher, cfg: &Config) {
let mut detect_table = DetectTable::new(cfg.ttl);
let mut generator = DetectGenerator::new(cfg.range);
b.iter(|| {
for entry in generator.generate(cfg.n) {
detect_table.detect(
entry.get_txn().into(),
entry.get_wait_for_txn().into(),
entry.get_key_hash(),
&[],
&[],
);
}
});
}
fn bench_dense_detect_without_cleanup(c: &mut Criterion) {
let mut group = c.benchmark_group("bench_dense_detect_without_cleanup");
let ranges = vec![
10,
100,
1_000,
10_000,
100_000,
1_000_000,
10_000_000,
100_000_000,
];
for range in ranges {
let config = Config {
n: 10,
range,
ttl: Duration::from_secs(100000000),
};
group.bench_with_input(format!("{:?}", &config), &config, bench_detect);
}
}
fn bench_dense_detect_with_cleanup(c: &mut Criterion) {
let mut group = c.benchmark_group("bench_dense_detect_with_cleanup");
let ttls = vec![1, 3, 5, 10, 100, 500, 1_000, 3_000];
for ttl in &ttls {
let config = Config {
n: 10,
range: 1000,
ttl: Duration::from_millis(*ttl),
};
group.bench_with_input(format!("{:?}", &config), &config, bench_detect);
}
group.finish();
}
fn
|
() {
let mut criterion = Criterion::default().configure_from_args().sample_size(10);
bench_dense_detect_without_cleanup(&mut criterion);
bench_dense_detect_with_cleanup(&mut criterion);
criterion.final_summary();
}
|
main
|
identifier_name
|
mod.rs
|
// Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0.
use criterion::{Bencher, Criterion};
use kvproto::deadlock::*;
use rand::prelude::*;
use tikv::server::lock_manager::deadlock::DetectTable;
use tikv_util::time::Duration;
struct DetectGenerator {
rng: ThreadRng,
range: u64,
timestamp: u64,
}
impl DetectGenerator {
fn new(range: u64) -> Self {
Self {
rng: ThreadRng::default(),
range,
timestamp: 0,
}
}
/// Generates n detect requests with the same timestamp
fn generate(&mut self, n: u64) -> Vec<WaitForEntry> {
let mut entries = Vec::with_capacity(n as usize);
(0..n).for_each(|_| {
let mut entry = WaitForEntry::default();
entry.set_txn(self.timestamp);
let mut wait_for_txn = self.timestamp;
while wait_for_txn == self.timestamp {
wait_for_txn = self.rng.gen_range(
if self.timestamp < self.range {
0
} else {
self.timestamp - self.range
},
self.timestamp + self.range,
);
}
entry.set_wait_for_txn(wait_for_txn);
entry.set_key_hash(self.rng.gen());
entries.push(entry);
});
self.timestamp += 1;
|
entries
}
}
#[derive(Debug)]
struct Config {
n: u64,
range: u64,
ttl: Duration,
}
fn bench_detect(b: &mut Bencher, cfg: &Config) {
let mut detect_table = DetectTable::new(cfg.ttl);
let mut generator = DetectGenerator::new(cfg.range);
b.iter(|| {
for entry in generator.generate(cfg.n) {
detect_table.detect(
entry.get_txn().into(),
entry.get_wait_for_txn().into(),
entry.get_key_hash(),
&[],
&[],
);
}
});
}
fn bench_dense_detect_without_cleanup(c: &mut Criterion) {
let mut group = c.benchmark_group("bench_dense_detect_without_cleanup");
let ranges = vec![
10,
100,
1_000,
10_000,
100_000,
1_000_000,
10_000_000,
100_000_000,
];
for range in ranges {
let config = Config {
n: 10,
range,
ttl: Duration::from_secs(100000000),
};
group.bench_with_input(format!("{:?}", &config), &config, bench_detect);
}
}
fn bench_dense_detect_with_cleanup(c: &mut Criterion) {
let mut group = c.benchmark_group("bench_dense_detect_with_cleanup");
let ttls = vec![1, 3, 5, 10, 100, 500, 1_000, 3_000];
for ttl in &ttls {
let config = Config {
n: 10,
range: 1000,
ttl: Duration::from_millis(*ttl),
};
group.bench_with_input(format!("{:?}", &config), &config, bench_detect);
}
group.finish();
}
fn main() {
let mut criterion = Criterion::default().configure_from_args().sample_size(10);
bench_dense_detect_without_cleanup(&mut criterion);
bench_dense_detect_with_cleanup(&mut criterion);
criterion.final_summary();
}
|
random_line_split
|
|
mod.rs
|
// Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0.
use criterion::{Bencher, Criterion};
use kvproto::deadlock::*;
use rand::prelude::*;
use tikv::server::lock_manager::deadlock::DetectTable;
use tikv_util::time::Duration;
struct DetectGenerator {
rng: ThreadRng,
range: u64,
timestamp: u64,
}
impl DetectGenerator {
fn new(range: u64) -> Self {
Self {
rng: ThreadRng::default(),
range,
timestamp: 0,
}
}
/// Generates n detect requests with the same timestamp
fn generate(&mut self, n: u64) -> Vec<WaitForEntry> {
let mut entries = Vec::with_capacity(n as usize);
(0..n).for_each(|_| {
let mut entry = WaitForEntry::default();
entry.set_txn(self.timestamp);
let mut wait_for_txn = self.timestamp;
while wait_for_txn == self.timestamp {
wait_for_txn = self.rng.gen_range(
if self.timestamp < self.range
|
else {
self.timestamp - self.range
},
self.timestamp + self.range,
);
}
entry.set_wait_for_txn(wait_for_txn);
entry.set_key_hash(self.rng.gen());
entries.push(entry);
});
self.timestamp += 1;
entries
}
}
#[derive(Debug)]
struct Config {
n: u64,
range: u64,
ttl: Duration,
}
fn bench_detect(b: &mut Bencher, cfg: &Config) {
let mut detect_table = DetectTable::new(cfg.ttl);
let mut generator = DetectGenerator::new(cfg.range);
b.iter(|| {
for entry in generator.generate(cfg.n) {
detect_table.detect(
entry.get_txn().into(),
entry.get_wait_for_txn().into(),
entry.get_key_hash(),
&[],
&[],
);
}
});
}
fn bench_dense_detect_without_cleanup(c: &mut Criterion) {
let mut group = c.benchmark_group("bench_dense_detect_without_cleanup");
let ranges = vec![
10,
100,
1_000,
10_000,
100_000,
1_000_000,
10_000_000,
100_000_000,
];
for range in ranges {
let config = Config {
n: 10,
range,
ttl: Duration::from_secs(100000000),
};
group.bench_with_input(format!("{:?}", &config), &config, bench_detect);
}
}
fn bench_dense_detect_with_cleanup(c: &mut Criterion) {
let mut group = c.benchmark_group("bench_dense_detect_with_cleanup");
let ttls = vec![1, 3, 5, 10, 100, 500, 1_000, 3_000];
for ttl in &ttls {
let config = Config {
n: 10,
range: 1000,
ttl: Duration::from_millis(*ttl),
};
group.bench_with_input(format!("{:?}", &config), &config, bench_detect);
}
group.finish();
}
fn main() {
let mut criterion = Criterion::default().configure_from_args().sample_size(10);
bench_dense_detect_without_cleanup(&mut criterion);
bench_dense_detect_with_cleanup(&mut criterion);
criterion.final_summary();
}
|
{
0
}
|
conditional_block
|
HTTPFileSystem.ts
|
import { v2 as webdav } from 'webdav-server'
import { Readable, Writable, Transform } from 'stream'
import * as request from 'request'
export class Resource
{
props : webdav.IPropertyManager;
locks : webdav.ILockManager;
constructor(data ?: Resource)
{
this.props = new webdav.LocalPropertyManager(data ? data.props : undefined);
this.locks = new webdav.LocalLockManager();
}
}
// Serializer
export class HTTPFileSystemSerializer implements webdav.FileSystemSerializer
{
uid() : string
{
return "HTTPFileSystemSerializer_1.0.0";
}
serialize(fs : HTTPFileSystem, callback : webdav.ReturnCallback<any>) : void
{
callback(null, {
url: fs.url,
resources: fs.resources
});
}
unserialize(serializedData : any, callback : webdav.ReturnCallback<HTTPFileSystem>) : void
{
const fs = new HTTPFileSystem(serializedData.url);
for(const path of serializedData.resources)
serializedData[path] = new Resource(serializedData.resources[path]);
callback(null, fs);
}
}
// File system
export class HTTPFileSystem extends webdav.FileSystem
{
resources : {
[path : string] : Resource
}
url : string
constructor(url : string)
{
super(new HTTPFileSystemSerializer());
if(!url)
url = '';
if(url.lastIndexOf('/') === url.length - 1)
url = url.substring(0, url.length - 1);
|
this.url = url;
}
protected findResource(path : webdav.Path)
{
const sPath = path.toString();
const r = this.resources[sPath];
if(!r)
return this.resources[sPath] = new Resource();
return r;
}
_openReadStream(path : webdav.Path, info : webdav.OpenReadStreamInfo, callback : webdav.ReturnCallback<Readable>) : void
{
const stream = request(this.url + path.toString());
callback(null, (stream as any) as Readable);
}
_openWriteStream(path : webdav.Path, info : webdav.OpenWriteStreamInfo, callback : webdav.ReturnCallback<Writable>) : void
{
const stream = request.put(this.url + path.toString());
callback(null, (stream as any) as Writable);
}
_propertyManager(path : webdav.Path, info : webdav.PropertyManagerInfo, callback : webdav.ReturnCallback<webdav.IPropertyManager>) : void
{
callback(null, this.findResource(path).props);
}
_lockManager(path : webdav.Path, info : webdav.LockManagerInfo, callback : webdav.ReturnCallback<webdav.ILockManager>) : void
{
callback(null, this.findResource(path).locks);
}
_size(path : webdav.Path, info : webdav.SizeInfo, callback : webdav.ReturnCallback<number>) : void
{
request({
url: this.url + path.toString(),
method: 'HEAD'
}, (e, res) => {
if(e)
return callback(e);
const contentLength = res.headers['content-length'];
console.log(res.headers);
console.log(contentLength);
if(contentLength)
callback(null, parseInt(contentLength.constructor === String ? contentLength as string : contentLength[0]));
else
callback(null, undefined);
})
}
_mimeType(path : webdav.Path, info : webdav.MimeTypeInfo, callback : webdav.ReturnCallback<string>) : void
{
request({
url: this.url + path.toString(),
method: 'HEAD'
}, (e, res) => {
if(e)
return callback(e);
const contentType = res.headers['content-type'];
if(contentType)
callback(null, contentType.constructor === String ? contentType as string : contentType[0]);
else
callback(null, 'application/octet-stream');
})
}
_type(path : webdav.Path, info : webdav.TypeInfo, callback : webdav.ReturnCallback<webdav.ResourceType>) : void
{
callback(null, webdav.ResourceType.File);
}
}
|
this.resources = {};
|
random_line_split
|
HTTPFileSystem.ts
|
import { v2 as webdav } from 'webdav-server'
import { Readable, Writable, Transform } from 'stream'
import * as request from 'request'
export class Resource
{
props : webdav.IPropertyManager;
locks : webdav.ILockManager;
constructor(data ?: Resource)
{
this.props = new webdav.LocalPropertyManager(data ? data.props : undefined);
this.locks = new webdav.LocalLockManager();
}
}
// Serializer
export class HTTPFileSystemSerializer implements webdav.FileSystemSerializer
{
uid() : string
{
return "HTTPFileSystemSerializer_1.0.0";
}
|
(fs : HTTPFileSystem, callback : webdav.ReturnCallback<any>) : void
{
callback(null, {
url: fs.url,
resources: fs.resources
});
}
unserialize(serializedData : any, callback : webdav.ReturnCallback<HTTPFileSystem>) : void
{
const fs = new HTTPFileSystem(serializedData.url);
for(const path of serializedData.resources)
serializedData[path] = new Resource(serializedData.resources[path]);
callback(null, fs);
}
}
// File system
export class HTTPFileSystem extends webdav.FileSystem
{
resources : {
[path : string] : Resource
}
url : string
constructor(url : string)
{
super(new HTTPFileSystemSerializer());
if(!url)
url = '';
if(url.lastIndexOf('/') === url.length - 1)
url = url.substring(0, url.length - 1);
this.resources = {};
this.url = url;
}
protected findResource(path : webdav.Path)
{
const sPath = path.toString();
const r = this.resources[sPath];
if(!r)
return this.resources[sPath] = new Resource();
return r;
}
_openReadStream(path : webdav.Path, info : webdav.OpenReadStreamInfo, callback : webdav.ReturnCallback<Readable>) : void
{
const stream = request(this.url + path.toString());
callback(null, (stream as any) as Readable);
}
_openWriteStream(path : webdav.Path, info : webdav.OpenWriteStreamInfo, callback : webdav.ReturnCallback<Writable>) : void
{
const stream = request.put(this.url + path.toString());
callback(null, (stream as any) as Writable);
}
_propertyManager(path : webdav.Path, info : webdav.PropertyManagerInfo, callback : webdav.ReturnCallback<webdav.IPropertyManager>) : void
{
callback(null, this.findResource(path).props);
}
_lockManager(path : webdav.Path, info : webdav.LockManagerInfo, callback : webdav.ReturnCallback<webdav.ILockManager>) : void
{
callback(null, this.findResource(path).locks);
}
_size(path : webdav.Path, info : webdav.SizeInfo, callback : webdav.ReturnCallback<number>) : void
{
request({
url: this.url + path.toString(),
method: 'HEAD'
}, (e, res) => {
if(e)
return callback(e);
const contentLength = res.headers['content-length'];
console.log(res.headers);
console.log(contentLength);
if(contentLength)
callback(null, parseInt(contentLength.constructor === String ? contentLength as string : contentLength[0]));
else
callback(null, undefined);
})
}
_mimeType(path : webdav.Path, info : webdav.MimeTypeInfo, callback : webdav.ReturnCallback<string>) : void
{
request({
url: this.url + path.toString(),
method: 'HEAD'
}, (e, res) => {
if(e)
return callback(e);
const contentType = res.headers['content-type'];
if(contentType)
callback(null, contentType.constructor === String ? contentType as string : contentType[0]);
else
callback(null, 'application/octet-stream');
})
}
_type(path : webdav.Path, info : webdav.TypeInfo, callback : webdav.ReturnCallback<webdav.ResourceType>) : void
{
callback(null, webdav.ResourceType.File);
}
}
|
serialize
|
identifier_name
|
main.rs
|
fn main() {
// defining-structs
{
struct User {
username: String,
email: String,
sign_in_count: u64,
active: bool,
}
// 实例
let user1 = User {
email: String::from("[email protected]"),
username: String::from("someusername123"),
active: true,
sign_in_count: 1,
};
// 可变实例
let mut user1 = User {
email: String::from("[email protected]"),
username: String::from("someusername123"),
active: true,
sign_in_count: 1,
};
user1.username = String::from("chen");
// 函数返回struct
fn build_user(email: String, username: String) -> User {
User {
email: email,
username: username,
active: true,
sign_in_count: 1,
}
}
// 变量与字段同名时的字段初始化简写语法
let user2 = User {
email: String::from("[email protected]"),
username: String::from("anotherusername567"),
active: user1.active,
sign_in_count: user1.sign_in_count,
};
// 可以简写为
let user2 = User {
email: String::from("[email protected]"),
username: String::from("anotherusername567"),
..user1
};
// 使用没有命名字段的元组结构体来创建不同的类型
// 元组结构体(tuple structs)
struct Color(i32, i32, i32);
struct Point(i32, i32, i32);
let black = Color(0, 0, 0);
let origin = Point(0, 0, 0);
// 没有任何字段的类单元结构体
// 类单元结构体(unit-like structs),因为它们类似于 (),即 unit 类型
// 类单元结构体常常在你想要在某个类型上实现 trait 但不需要在类型内存储数据的时候发挥作用
// 结构体数据的所有权
// 在示例 5-1 中的 User 结构体的定义中,我们使用了自身拥有所有权的 String 类型而不是 &str 字符串 slice 类型。
// 这是一个有意而为之的选择,因为我们想要这个结构体拥有它所有的数据,为此只要整个结构体是有效的话其数据也是有效的。
// 可以使结构体储存被其他对象拥有的数据的引用,不过这么做的话需要用上 生命周期(lifetimes),这是一个第十章会讨论的 Rust 功能。
// 生命周期确保结构体引用的数据有效性跟结构体本身保持一致。如果你尝试在结构体中储存一个引用而不指定生命周期将是无效的
}
// example-structs
// 一个使用结构体的示例程序
{
#[derive(Debug)]
struct Rectangle {
width: u32,
height: u32,
}
let rect1 = Rectangle { width: 30, height: 50 };
// 增加注解来派生 Debug trait
println!("rect1 is {:?}", rect1);
println!("rect1 is {:#?}", rect1);
println!("The area of the rectangle is {} square pixels.", area(&rect1));
fn area(rectangle: &Rectangle) -> u32 {
rectangle.width * rectangle.height
}
}
// method-syntax
// 方法语法
{
{
#[derive(Debug)]
struct Rectangle {
width: u32,
height: u32,
}
impl Rectangle {
fn area(&self) -> u32 {
self.width * self.height
}
}
let rect1 = Rectangle { width: 30, height: 50 };
println!("The area of the rectangle is {} square pixels.", rect1.area());
}
// 关联函数
{
// impl 块的另一个有用的功能是:允许在 impl 块中定义 不 以 self 作为参数的函数。这被称为 关联函数(associated functions),因为它们与结构体相关联。
// 即便如此它们仍是函数而不是方法,因为它们并不作用于一个结构体的实例。我们已经使用过 String::from 关联函数了。
// 关联函数经常被用作返回一个结构体新实例的构造函数。
#[derive(Debug)]
struct Rectangle {
width: u32,
height: u32,
}
impl Rectangle {
fn square(size: u32) -> Rectangle {
Rectangle { wid
|
let sq = Rectangle::square(3);
println!("sq is {:?}", sq);
}
// 每个结构体都允许拥有多个 impl 块
}
}
|
th: size, height: size }
}
}
|
identifier_body
|
main.rs
|
fn main() {
// defining-structs
{
struct User {
username: String,
email: String,
sign_in_count: u64,
active: bool,
}
// 实例
let user1 = User {
email: String::from("[email protected]"),
username: String::from("someusername123"),
active: true,
sign_in_count: 1,
};
// 可变实例
let mut user1 = User {
email: String::from("[email protected]"),
username: String::from("someusername123"),
active: true,
sign_in_count: 1,
};
user1.username = String::from("chen");
// 函数返回struct
fn build_user(email: String, username: String) -> User {
User {
email: email,
username: username,
active: true,
sign_in_count: 1,
}
}
// 变量与字段同名时的字段初始化简写语法
let user2 = User {
email: String::from("[email protected]"),
username: String::from("anotherusername567"),
active: user1.active,
sign_in_count: user1.sign_in_count,
};
|
email: String::from("[email protected]"),
username: String::from("anotherusername567"),
..user1
};
// 使用没有命名字段的元组结构体来创建不同的类型
// 元组结构体(tuple structs)
struct Color(i32, i32, i32);
struct Point(i32, i32, i32);
let black = Color(0, 0, 0);
let origin = Point(0, 0, 0);
// 没有任何字段的类单元结构体
// 类单元结构体(unit-like structs),因为它们类似于 (),即 unit 类型
// 类单元结构体常常在你想要在某个类型上实现 trait 但不需要在类型内存储数据的时候发挥作用
// 结构体数据的所有权
// 在示例 5-1 中的 User 结构体的定义中,我们使用了自身拥有所有权的 String 类型而不是 &str 字符串 slice 类型。
// 这是一个有意而为之的选择,因为我们想要这个结构体拥有它所有的数据,为此只要整个结构体是有效的话其数据也是有效的。
// 可以使结构体储存被其他对象拥有的数据的引用,不过这么做的话需要用上 生命周期(lifetimes),这是一个第十章会讨论的 Rust 功能。
// 生命周期确保结构体引用的数据有效性跟结构体本身保持一致。如果你尝试在结构体中储存一个引用而不指定生命周期将是无效的
}
// example-structs
// 一个使用结构体的示例程序
{
#[derive(Debug)]
struct Rectangle {
width: u32,
height: u32,
}
let rect1 = Rectangle { width: 30, height: 50 };
// 增加注解来派生 Debug trait
println!("rect1 is {:?}", rect1);
println!("rect1 is {:#?}", rect1);
println!("The area of the rectangle is {} square pixels.", area(&rect1));
fn area(rectangle: &Rectangle) -> u32 {
rectangle.width * rectangle.height
}
}
// method-syntax
// 方法语法
{
{
#[derive(Debug)]
struct Rectangle {
width: u32,
height: u32,
}
impl Rectangle {
fn area(&self) -> u32 {
self.width * self.height
}
}
let rect1 = Rectangle { width: 30, height: 50 };
println!("The area of the rectangle is {} square pixels.", rect1.area());
}
// 关联函数
{
// impl 块的另一个有用的功能是:允许在 impl 块中定义 不 以 self 作为参数的函数。这被称为 关联函数(associated functions),因为它们与结构体相关联。
// 即便如此它们仍是函数而不是方法,因为它们并不作用于一个结构体的实例。我们已经使用过 String::from 关联函数了。
// 关联函数经常被用作返回一个结构体新实例的构造函数。
#[derive(Debug)]
struct Rectangle {
width: u32,
height: u32,
}
impl Rectangle {
fn square(size: u32) -> Rectangle {
Rectangle { width: size, height: size }
}
}
let sq = Rectangle::square(3);
println!("sq is {:?}", sq);
}
// 每个结构体都允许拥有多个 impl 块
}
}
|
// 可以简写为
let user2 = User {
|
random_line_split
|
main.rs
|
fn main() {
// defining-structs
{
struct User {
username: String,
email: String,
sign_in_count: u64,
active: bool,
}
// 实例
let user1 = User {
email: String::from("[email protected]"),
username: String::from("someusername123"),
active: true,
sign_in_count: 1,
};
// 可变实例
let mut user1 = User {
email: String::from("[email protected]"),
username: String::from("someusername123"),
active: true,
sign_in_count: 1,
};
user1.username = String::from("chen");
// 函数返回struct
fn build_user(email: String, username: String) -> User {
User {
email: email,
username: username,
active: true,
sign_in_count: 1,
}
}
// 变量与字段同名时的字段初始化简写语法
let user2 = User {
email: String::from("[email protected]"),
username: String::from("anotherusername567"),
active: user1.active,
sign_in_count: user1.sign_in_count,
};
// 可以简写为
let user2 = User {
email: String::from("[email protected]"),
username: String::from("anotherusername567"),
..user1
};
// 使用没有命名字段的元组结构体来创建不同的类型
// 元组结构体(tuple structs)
struct Color(i32, i32, i32);
struct Point(i32, i32, i32);
let black = Color(0, 0, 0);
let origin = Point(0, 0, 0);
// 没有任何字段的类单元结构体
// 类单元结构体(unit-like structs),因为它们类似于 (),即 unit 类型
// 类单元结构体常常在你想要在某个类型上实现 trait 但不需要在类型内存储数据的时候发挥作用
// 结构体数据的所有权
// 在示例 5-1 中的 User 结构体的定义中,我们使用了自身拥有所有权的 String 类型而不是 &str 字符串 slice 类型。
// 这是一个有意而为之的选择,因为我们想要这个结构体拥有它所有的数据,为此只要整个结构体是有效的话其数据也是有效的。
// 可以使结构体储存被其他对象拥有的数据的引用,不过这么做的话需要用上 生命周期(lifetimes),这是一个第十章会讨论的 Rust 功能。
// 生命周期确保结构体引用的数据有效性跟结构体本身保持一致。如果你尝试在结构体中储存一个引用而不指定生命周期将是无效的
}
// example-structs
// 一个使用结构体的示例程序
{
#[derive(Debug)]
struct Rectangle {
width: u32,
height: u32,
}
let rect1 = Rectangle { width: 30, height: 50 };
// 增加注解来派生 Debug trait
println!("rect1 is {:?}", rect1);
println!("rect1 is {:#?}", rect1);
println!("The area of the rectangle is {} square pixels.", area(&rect1));
fn area(rectangle: &Rectangle) -> u32 {
rectangle.width * rectangle.height
}
}
// method-syntax
// 方法语法
{
{
#[derive(Debug)]
struct Rectangle {
width: u32,
height: u32,
}
impl Rectangle {
fn area(&self) -> u32 {
self.width * self.height
}
}
let rect1 = Rectangle { width: 30, height: 50 };
println!("The area of the rectangle is {} square pixels.", rect1.area());
}
// 关联函数
{
// impl 块的另一个有用的功能是:允许在 impl 块中定义 不 以 self 作为参数的函数。这被称为 关联函数(associated functions),因为它们与结构体相关联。
// 即便如此它们仍是函数而不是方法,因为它们并不作用于一个结构体的实例。我们已经使用过 String::from 关联函数了。
// 关联函数经常被用作返回一个结构体新实例的构造函数。
#[derive(Debug)]
struct Rectangle {
width: u32,
|
ight: u32,
}
impl Rectangle {
fn square(size: u32) -> Rectangle {
Rectangle { width: size, height: size }
}
}
let sq = Rectangle::square(3);
println!("sq is {:?}", sq);
}
// 每个结构体都允许拥有多个 impl 块
}
}
|
he
|
identifier_name
|
logreportwriters.py
|
# Copyright 2008-2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
from os.path import basename, splitext
import codecs
from robot.htmldata import HtmlFileWriter, ModelWriter, LOG, REPORT
from robot.utils import utf8open
from .jswriter import JsResultWriter, SplitLogWriter
class _LogReportWriter(object):
def __init__(self, js_model):
self._js_model = js_model
def _write_file(self, path, config, template):
outfile = codecs.open(path, 'wb', encoding='UTF-8')\
if isinstance(path, basestring) else path # unit test hook
with outfile:
model_writer = RobotModelWriter(outfile, self._js_model, config)
writer = HtmlFileWriter(outfile, model_writer)
writer.write(template)
class RobotModelWriter(ModelWriter):
def __init__(self, output, model, config):
self._output = output
self._model = model
self._config = config
def
|
(self, line):
JsResultWriter(self._output).write(self._model, self._config)
class LogWriter(_LogReportWriter):
def write(self, path, config):
self._write_file(path, config, LOG)
if self._js_model.split_results:
self._write_split_logs(splitext(path)[0])
def _write_split_logs(self, base):
for index, (keywords, strings) in enumerate(self._js_model.split_results):
index += 1 # enumerate accepts start index only in Py 2.6+
self._write_split_log(index, keywords, strings, '%s-%d.js' % (base, index))
def _write_split_log(self, index, keywords, strings, path):
with utf8open(path, 'wb') as outfile:
writer = SplitLogWriter(outfile)
writer.write(keywords, strings, index, basename(path))
class ReportWriter(_LogReportWriter):
def write(self, path, config):
self._write_file(path, config, REPORT)
|
write
|
identifier_name
|
logreportwriters.py
|
# Copyright 2008-2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
from os.path import basename, splitext
import codecs
from robot.htmldata import HtmlFileWriter, ModelWriter, LOG, REPORT
from robot.utils import utf8open
from .jswriter import JsResultWriter, SplitLogWriter
class _LogReportWriter(object):
def __init__(self, js_model):
self._js_model = js_model
def _write_file(self, path, config, template):
outfile = codecs.open(path, 'wb', encoding='UTF-8')\
if isinstance(path, basestring) else path # unit test hook
with outfile:
model_writer = RobotModelWriter(outfile, self._js_model, config)
writer = HtmlFileWriter(outfile, model_writer)
writer.write(template)
class RobotModelWriter(ModelWriter):
def __init__(self, output, model, config):
self._output = output
self._model = model
self._config = config
def write(self, line):
JsResultWriter(self._output).write(self._model, self._config)
class LogWriter(_LogReportWriter):
def write(self, path, config):
self._write_file(path, config, LOG)
if self._js_model.split_results:
self._write_split_logs(splitext(path)[0])
def _write_split_logs(self, base):
for index, (keywords, strings) in enumerate(self._js_model.split_results):
index += 1 # enumerate accepts start index only in Py 2.6+
self._write_split_log(index, keywords, strings, '%s-%d.js' % (base, index))
def _write_split_log(self, index, keywords, strings, path):
with utf8open(path, 'wb') as outfile:
writer = SplitLogWriter(outfile)
writer.write(keywords, strings, index, basename(path))
class ReportWriter(_LogReportWriter):
def write(self, path, config):
|
self._write_file(path, config, REPORT)
|
identifier_body
|
|
logreportwriters.py
|
# Copyright 2008-2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
from os.path import basename, splitext
import codecs
from robot.htmldata import HtmlFileWriter, ModelWriter, LOG, REPORT
from robot.utils import utf8open
from .jswriter import JsResultWriter, SplitLogWriter
class _LogReportWriter(object):
def __init__(self, js_model):
self._js_model = js_model
def _write_file(self, path, config, template):
outfile = codecs.open(path, 'wb', encoding='UTF-8')\
if isinstance(path, basestring) else path # unit test hook
with outfile:
model_writer = RobotModelWriter(outfile, self._js_model, config)
writer = HtmlFileWriter(outfile, model_writer)
writer.write(template)
class RobotModelWriter(ModelWriter):
def __init__(self, output, model, config):
self._output = output
self._model = model
self._config = config
def write(self, line):
JsResultWriter(self._output).write(self._model, self._config)
class LogWriter(_LogReportWriter):
def write(self, path, config):
self._write_file(path, config, LOG)
if self._js_model.split_results:
self._write_split_logs(splitext(path)[0])
def _write_split_logs(self, base):
for index, (keywords, strings) in enumerate(self._js_model.split_results):
|
def _write_split_log(self, index, keywords, strings, path):
with utf8open(path, 'wb') as outfile:
writer = SplitLogWriter(outfile)
writer.write(keywords, strings, index, basename(path))
class ReportWriter(_LogReportWriter):
def write(self, path, config):
self._write_file(path, config, REPORT)
|
index += 1 # enumerate accepts start index only in Py 2.6+
self._write_split_log(index, keywords, strings, '%s-%d.js' % (base, index))
|
conditional_block
|
logreportwriters.py
|
# Copyright 2008-2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
from os.path import basename, splitext
import codecs
|
class _LogReportWriter(object):
def __init__(self, js_model):
self._js_model = js_model
def _write_file(self, path, config, template):
outfile = codecs.open(path, 'wb', encoding='UTF-8')\
if isinstance(path, basestring) else path # unit test hook
with outfile:
model_writer = RobotModelWriter(outfile, self._js_model, config)
writer = HtmlFileWriter(outfile, model_writer)
writer.write(template)
class RobotModelWriter(ModelWriter):
def __init__(self, output, model, config):
self._output = output
self._model = model
self._config = config
def write(self, line):
JsResultWriter(self._output).write(self._model, self._config)
class LogWriter(_LogReportWriter):
def write(self, path, config):
self._write_file(path, config, LOG)
if self._js_model.split_results:
self._write_split_logs(splitext(path)[0])
def _write_split_logs(self, base):
for index, (keywords, strings) in enumerate(self._js_model.split_results):
index += 1 # enumerate accepts start index only in Py 2.6+
self._write_split_log(index, keywords, strings, '%s-%d.js' % (base, index))
def _write_split_log(self, index, keywords, strings, path):
with utf8open(path, 'wb') as outfile:
writer = SplitLogWriter(outfile)
writer.write(keywords, strings, index, basename(path))
class ReportWriter(_LogReportWriter):
def write(self, path, config):
self._write_file(path, config, REPORT)
|
from robot.htmldata import HtmlFileWriter, ModelWriter, LOG, REPORT
from robot.utils import utf8open
from .jswriter import JsResultWriter, SplitLogWriter
|
random_line_split
|
update.rs
|
use crate::{
self as exercise,
errors::Result,
structs::{LabeledTest, LabeledTestItem},
};
use failure::format_err;
use std::{collections::HashSet, fs, path::Path};
enum DiffType {
NEW,
UPDATED,
}
fn generate_diff_test(
case: &LabeledTest,
diff_type: &DiffType,
use_maplit: bool,
) -> Result<String> {
Ok(format!(
"//{}\n{}",
match diff_type {
DiffType::NEW => "NEW",
DiffType::UPDATED => "UPDATED",
},
exercise::generate_test_function(case, use_maplit)?
))
}
fn generate_diff_property(property: &str) -> Result<String> {
Ok(format!(
"//{}\n{}",
"NEW",
exercise::generate_property_body(property)?
))
}
fn generate_diffs(
case: &LabeledTest,
tests_content: &str,
diffs: &mut HashSet<String>,
use_maplit: bool,
) -> Result<()> {
let description = &case.description;
let description_formatted = exercise::format_exercise_description(description);
let diff_type = if !tests_content.contains(&format!("test_{}", description_formatted)) {
DiffType::NEW
} else {
DiffType::UPDATED
};
if diffs.insert(generate_diff_test(case, &diff_type, use_maplit)?) {
match diff_type {
DiffType::NEW => println!("New test case detected: {}.", description_formatted),
DiffType::UPDATED => println!("Updated test case: {}.", description_formatted),
}
}
let property = &case.property;
let property_formatted = exercise::format_exercise_property(property);
if !tests_content.contains(&format!("process_{}_case", property_formatted))
&& diffs.insert(generate_diff_property(property)?)
{
println!("New property detected: {}.", property);
}
Ok(())
}
fn get_diffs(
case: &LabeledTestItem,
diffs: &mut HashSet<String>,
tests_content: &str,
use_maplit: bool,
) -> Result<()> {
match case {
LabeledTestItem::Single(case) => generate_diffs(case, &tests_content, diffs, use_maplit)?,
LabeledTestItem::Array(group) => {
for case in &group.cases {
get_diffs(case, diffs, tests_content, use_maplit)?;
}
}
}
Ok(())
}
fn apply_diffs(exercise_name: &str, diffs: &HashSet<String>, tests_content: &str) -> Result<()> {
let updated_tests_content = format!(
"{}\n{}",
tests_content,
diffs
.iter()
.map(|diff| format!("\n{}", diff))
.collect::<String>()
);
let tests_path = Path::new(&*exercise::TRACK_ROOT)
.join("exercises")
.join(exercise_name)
.join("tests")
.join(format!("{}.rs", exercise_name));
fs::write(&tests_path, updated_tests_content.as_bytes())?;
exercise::rustfmt(&tests_path)?;
|
Ok(())
}
pub fn update_exercise(exercise_name: &str, use_maplit: bool) -> Result<()> {
if !exercise::exercise_exists(exercise_name) {
return Err(
format_err!("exercise with the name '{}' does not exist", exercise_name).into(),
);
}
let tests_content = exercise::get_tests_content(exercise_name)?;
let canonical_data = exercise::get_canonical_data(exercise_name)?;
let mut diffs: HashSet<String> = HashSet::new();
for case in &canonical_data.cases {
get_diffs(case, &mut diffs, &tests_content, use_maplit)?;
}
apply_diffs(exercise_name, &diffs, &tests_content)?;
exercise::update_cargo_toml_version(exercise_name, &canonical_data)?;
Ok(())
}
|
random_line_split
|
|
update.rs
|
use crate::{
self as exercise,
errors::Result,
structs::{LabeledTest, LabeledTestItem},
};
use failure::format_err;
use std::{collections::HashSet, fs, path::Path};
enum DiffType {
NEW,
UPDATED,
}
fn generate_diff_test(
case: &LabeledTest,
diff_type: &DiffType,
use_maplit: bool,
) -> Result<String> {
Ok(format!(
"//{}\n{}",
match diff_type {
DiffType::NEW => "NEW",
DiffType::UPDATED => "UPDATED",
},
exercise::generate_test_function(case, use_maplit)?
))
}
fn generate_diff_property(property: &str) -> Result<String> {
Ok(format!(
"//{}\n{}",
"NEW",
exercise::generate_property_body(property)?
))
}
fn generate_diffs(
case: &LabeledTest,
tests_content: &str,
diffs: &mut HashSet<String>,
use_maplit: bool,
) -> Result<()> {
let description = &case.description;
let description_formatted = exercise::format_exercise_description(description);
let diff_type = if !tests_content.contains(&format!("test_{}", description_formatted)) {
DiffType::NEW
} else {
DiffType::UPDATED
};
if diffs.insert(generate_diff_test(case, &diff_type, use_maplit)?) {
match diff_type {
DiffType::NEW => println!("New test case detected: {}.", description_formatted),
DiffType::UPDATED => println!("Updated test case: {}.", description_formatted),
}
}
let property = &case.property;
let property_formatted = exercise::format_exercise_property(property);
if !tests_content.contains(&format!("process_{}_case", property_formatted))
&& diffs.insert(generate_diff_property(property)?)
{
println!("New property detected: {}.", property);
}
Ok(())
}
fn
|
(
case: &LabeledTestItem,
diffs: &mut HashSet<String>,
tests_content: &str,
use_maplit: bool,
) -> Result<()> {
match case {
LabeledTestItem::Single(case) => generate_diffs(case, &tests_content, diffs, use_maplit)?,
LabeledTestItem::Array(group) => {
for case in &group.cases {
get_diffs(case, diffs, tests_content, use_maplit)?;
}
}
}
Ok(())
}
fn apply_diffs(exercise_name: &str, diffs: &HashSet<String>, tests_content: &str) -> Result<()> {
let updated_tests_content = format!(
"{}\n{}",
tests_content,
diffs
.iter()
.map(|diff| format!("\n{}", diff))
.collect::<String>()
);
let tests_path = Path::new(&*exercise::TRACK_ROOT)
.join("exercises")
.join(exercise_name)
.join("tests")
.join(format!("{}.rs", exercise_name));
fs::write(&tests_path, updated_tests_content.as_bytes())?;
exercise::rustfmt(&tests_path)?;
Ok(())
}
pub fn update_exercise(exercise_name: &str, use_maplit: bool) -> Result<()> {
if !exercise::exercise_exists(exercise_name) {
return Err(
format_err!("exercise with the name '{}' does not exist", exercise_name).into(),
);
}
let tests_content = exercise::get_tests_content(exercise_name)?;
let canonical_data = exercise::get_canonical_data(exercise_name)?;
let mut diffs: HashSet<String> = HashSet::new();
for case in &canonical_data.cases {
get_diffs(case, &mut diffs, &tests_content, use_maplit)?;
}
apply_diffs(exercise_name, &diffs, &tests_content)?;
exercise::update_cargo_toml_version(exercise_name, &canonical_data)?;
Ok(())
}
|
get_diffs
|
identifier_name
|
update.rs
|
use crate::{
self as exercise,
errors::Result,
structs::{LabeledTest, LabeledTestItem},
};
use failure::format_err;
use std::{collections::HashSet, fs, path::Path};
enum DiffType {
NEW,
UPDATED,
}
fn generate_diff_test(
case: &LabeledTest,
diff_type: &DiffType,
use_maplit: bool,
) -> Result<String> {
Ok(format!(
"//{}\n{}",
match diff_type {
DiffType::NEW => "NEW",
DiffType::UPDATED => "UPDATED",
},
exercise::generate_test_function(case, use_maplit)?
))
}
fn generate_diff_property(property: &str) -> Result<String> {
Ok(format!(
"//{}\n{}",
"NEW",
exercise::generate_property_body(property)?
))
}
fn generate_diffs(
case: &LabeledTest,
tests_content: &str,
diffs: &mut HashSet<String>,
use_maplit: bool,
) -> Result<()> {
let description = &case.description;
let description_formatted = exercise::format_exercise_description(description);
let diff_type = if !tests_content.contains(&format!("test_{}", description_formatted)) {
DiffType::NEW
} else
|
;
if diffs.insert(generate_diff_test(case, &diff_type, use_maplit)?) {
match diff_type {
DiffType::NEW => println!("New test case detected: {}.", description_formatted),
DiffType::UPDATED => println!("Updated test case: {}.", description_formatted),
}
}
let property = &case.property;
let property_formatted = exercise::format_exercise_property(property);
if !tests_content.contains(&format!("process_{}_case", property_formatted))
&& diffs.insert(generate_diff_property(property)?)
{
println!("New property detected: {}.", property);
}
Ok(())
}
fn get_diffs(
case: &LabeledTestItem,
diffs: &mut HashSet<String>,
tests_content: &str,
use_maplit: bool,
) -> Result<()> {
match case {
LabeledTestItem::Single(case) => generate_diffs(case, &tests_content, diffs, use_maplit)?,
LabeledTestItem::Array(group) => {
for case in &group.cases {
get_diffs(case, diffs, tests_content, use_maplit)?;
}
}
}
Ok(())
}
fn apply_diffs(exercise_name: &str, diffs: &HashSet<String>, tests_content: &str) -> Result<()> {
let updated_tests_content = format!(
"{}\n{}",
tests_content,
diffs
.iter()
.map(|diff| format!("\n{}", diff))
.collect::<String>()
);
let tests_path = Path::new(&*exercise::TRACK_ROOT)
.join("exercises")
.join(exercise_name)
.join("tests")
.join(format!("{}.rs", exercise_name));
fs::write(&tests_path, updated_tests_content.as_bytes())?;
exercise::rustfmt(&tests_path)?;
Ok(())
}
pub fn update_exercise(exercise_name: &str, use_maplit: bool) -> Result<()> {
if !exercise::exercise_exists(exercise_name) {
return Err(
format_err!("exercise with the name '{}' does not exist", exercise_name).into(),
);
}
let tests_content = exercise::get_tests_content(exercise_name)?;
let canonical_data = exercise::get_canonical_data(exercise_name)?;
let mut diffs: HashSet<String> = HashSet::new();
for case in &canonical_data.cases {
get_diffs(case, &mut diffs, &tests_content, use_maplit)?;
}
apply_diffs(exercise_name, &diffs, &tests_content)?;
exercise::update_cargo_toml_version(exercise_name, &canonical_data)?;
Ok(())
}
|
{
DiffType::UPDATED
}
|
conditional_block
|
update.rs
|
use crate::{
self as exercise,
errors::Result,
structs::{LabeledTest, LabeledTestItem},
};
use failure::format_err;
use std::{collections::HashSet, fs, path::Path};
enum DiffType {
NEW,
UPDATED,
}
fn generate_diff_test(
case: &LabeledTest,
diff_type: &DiffType,
use_maplit: bool,
) -> Result<String>
|
fn generate_diff_property(property: &str) -> Result<String> {
Ok(format!(
"//{}\n{}",
"NEW",
exercise::generate_property_body(property)?
))
}
fn generate_diffs(
case: &LabeledTest,
tests_content: &str,
diffs: &mut HashSet<String>,
use_maplit: bool,
) -> Result<()> {
let description = &case.description;
let description_formatted = exercise::format_exercise_description(description);
let diff_type = if !tests_content.contains(&format!("test_{}", description_formatted)) {
DiffType::NEW
} else {
DiffType::UPDATED
};
if diffs.insert(generate_diff_test(case, &diff_type, use_maplit)?) {
match diff_type {
DiffType::NEW => println!("New test case detected: {}.", description_formatted),
DiffType::UPDATED => println!("Updated test case: {}.", description_formatted),
}
}
let property = &case.property;
let property_formatted = exercise::format_exercise_property(property);
if !tests_content.contains(&format!("process_{}_case", property_formatted))
&& diffs.insert(generate_diff_property(property)?)
{
println!("New property detected: {}.", property);
}
Ok(())
}
fn get_diffs(
case: &LabeledTestItem,
diffs: &mut HashSet<String>,
tests_content: &str,
use_maplit: bool,
) -> Result<()> {
match case {
LabeledTestItem::Single(case) => generate_diffs(case, &tests_content, diffs, use_maplit)?,
LabeledTestItem::Array(group) => {
for case in &group.cases {
get_diffs(case, diffs, tests_content, use_maplit)?;
}
}
}
Ok(())
}
fn apply_diffs(exercise_name: &str, diffs: &HashSet<String>, tests_content: &str) -> Result<()> {
let updated_tests_content = format!(
"{}\n{}",
tests_content,
diffs
.iter()
.map(|diff| format!("\n{}", diff))
.collect::<String>()
);
let tests_path = Path::new(&*exercise::TRACK_ROOT)
.join("exercises")
.join(exercise_name)
.join("tests")
.join(format!("{}.rs", exercise_name));
fs::write(&tests_path, updated_tests_content.as_bytes())?;
exercise::rustfmt(&tests_path)?;
Ok(())
}
pub fn update_exercise(exercise_name: &str, use_maplit: bool) -> Result<()> {
if !exercise::exercise_exists(exercise_name) {
return Err(
format_err!("exercise with the name '{}' does not exist", exercise_name).into(),
);
}
let tests_content = exercise::get_tests_content(exercise_name)?;
let canonical_data = exercise::get_canonical_data(exercise_name)?;
let mut diffs: HashSet<String> = HashSet::new();
for case in &canonical_data.cases {
get_diffs(case, &mut diffs, &tests_content, use_maplit)?;
}
apply_diffs(exercise_name, &diffs, &tests_content)?;
exercise::update_cargo_toml_version(exercise_name, &canonical_data)?;
Ok(())
}
|
{
Ok(format!(
"//{}\n{}",
match diff_type {
DiffType::NEW => "NEW",
DiffType::UPDATED => "UPDATED",
},
exercise::generate_test_function(case, use_maplit)?
))
}
|
identifier_body
|
tradchinese.rs
|
// This is a part of rust-encoding.
// Copyright (c) 2013-2015, Kang Seonghoon.
// See README.md and LICENSE.txt for details.
//! Legacy traditional Chinese encodings.
use std::convert::Into;
use std::default::Default;
use util::StrCharIndex;
use index_tradchinese as index;
use types::*;
/**
* Big5-2003 with common extensions. (XXX with asymmetric HKSCS-2008 support)
*
* This is a traditional Chinese encoding spanning the region `[81-FE] [40-7E A1-FE]`.
* Originally a proprietary encoding by the consortium of five companies (hence the name),
* the Republic of China government standardized Big5-2003 in an appendix of CNS 11643
* so that CNS 11643 plane 1 and plane 2 have
* an almost identical set of characters as Big5 (but with a different mapping).
* The Hong Kong government has an official extension to Big5
* named Hong Kong Supplementary Character Set (HKSCS).
*
* This particular implementation of Big5 includes the widespread ETEN and HKSCS extensions,
* but excludes less common extensions such as Big5+, Big-5E and Unicode-at-on.
*/
#[derive(Clone, Copy)]
pub struct BigFive2003Encoding;
impl Encoding for BigFive2003Encoding {
fn name(&self) -> &'static str { "big5-2003" }
fn whatwg_name(&self) -> Option<&'static str> { Some("big5") } // WHATWG compatibility
fn raw_encoder(&self) -> Box<RawEncoder> { BigFive2003Encoder::new() }
fn raw_decoder(&self) -> Box<RawDecoder> { BigFive2003HKSCS2008Decoder::new() }
}
/// An encoder for Big5-2003.
#[derive(Clone, Copy)]
pub struct BigFive2003Encoder;
impl BigFive2003Encoder {
pub fn new() -> Box<RawEncoder> { Box::new(BigFive2003Encoder) }
}
impl RawEncoder for BigFive2003Encoder {
fn from_self(&self) -> Box<RawEncoder> { BigFive2003Encoder::new() }
fn is_ascii_compatible(&self) -> bool { true }
fn raw_feed(&mut self, input: &str, output: &mut ByteWriter) -> (usize, Option<CodecError>) {
output.writer_hint(input.len());
for ((i,j), ch) in input.index_iter() {
if ch < '\u{80}' {
output.write_byte(ch as u8);
} else {
let ptr = index::big5::backward(ch as u32);
if ptr == 0xffff {
return (i, Some(CodecError {
upto: j as isize, cause: "unrepresentable character".into()
}));
}
let lead = ptr / 157 + 0x81;
let trail = ptr % 157;
let trailoffset = if trail < 0x3f {0x40} else {0x62};
output.write_byte(lead as u8);
output.write_byte((trail + trailoffset) as u8);
}
}
(input.len(), None)
}
fn
|
(&mut self, _output: &mut ByteWriter) -> Option<CodecError> {
None
}
}
/// A decoder for Big5-2003 with HKSCS-2008 extension.
#[derive(Clone, Copy)]
struct BigFive2003HKSCS2008Decoder {
st: bigfive2003::State,
}
impl BigFive2003HKSCS2008Decoder {
pub fn new() -> Box<RawDecoder> {
Box::new(BigFive2003HKSCS2008Decoder { st: Default::default() })
}
}
impl RawDecoder for BigFive2003HKSCS2008Decoder {
fn from_self(&self) -> Box<RawDecoder> { BigFive2003HKSCS2008Decoder::new() }
fn is_ascii_compatible(&self) -> bool { true }
fn raw_feed(&mut self, input: &[u8], output: &mut StringWriter) -> (usize, Option<CodecError>) {
let (st, processed, err) = bigfive2003::raw_feed(self.st, input, output, &());
self.st = st;
(processed, err)
}
fn raw_finish(&mut self, output: &mut StringWriter) -> Option<CodecError> {
let (st, err) = bigfive2003::raw_finish(self.st, output, &());
self.st = st;
err
}
}
stateful_decoder! {
module bigfive2003;
internal pub fn map_two_bytes(lead: u8, trail: u8) -> u32 {
use index_tradchinese as index;
let lead = lead as u16;
let trail = trail as u16;
let index = match (lead, trail) {
(0x81...0xfe, 0x40...0x7e) | (0x81...0xfe, 0xa1...0xfe) => {
let trailoffset = if trail < 0x7f {0x40} else {0x62};
(lead - 0x81) * 157 + trail - trailoffset
}
_ => 0xffff,
};
index::big5::forward(index) // may return two-letter replacements 0..3
}
initial:
// big5 lead = 0x00
state S0(ctx: Context) {
case b @ 0x00...0x7f => ctx.emit(b as u32);
case b @ 0x81...0xfe => S1(ctx, b);
case _ => ctx.err("invalid sequence");
}
transient:
// big5 lead != 0x00
state S1(ctx: Context, lead: u8) {
case b => match map_two_bytes(lead, b) {
0xffff => {
let backup = if b < 0x80 {1} else {0};
ctx.backup_and_err(backup, "invalid sequence")
},
0 /*index=1133*/ => ctx.emit_str("\u{ca}\u{304}"),
1 /*index=1135*/ => ctx.emit_str("\u{ca}\u{30c}"),
2 /*index=1164*/ => ctx.emit_str("\u{ea}\u{304}"),
3 /*index=1166*/ => ctx.emit_str("\u{ea}\u{30c}"),
ch => ctx.emit(ch),
};
}
}
#[cfg(test)]
mod bigfive2003_tests {
extern crate test;
use super::BigFive2003Encoding;
use testutils;
use types::*;
#[test]
fn test_encoder_valid() {
let mut e = BigFive2003Encoding.raw_encoder();
assert_feed_ok!(e, "A", "", [0x41]);
assert_feed_ok!(e, "BC", "", [0x42, 0x43]);
assert_feed_ok!(e, "", "", []);
assert_feed_ok!(e, "\u{4e2d}\u{83ef}\u{6c11}\u{570b}", "",
[0xa4, 0xa4, 0xb5, 0xd8, 0xa5, 0xc1, 0xb0, 0xea]);
assert_feed_ok!(e, "1\u{20ac}/m", "", [0x31, 0xa3, 0xe1, 0x2f, 0x6d]);
assert_feed_ok!(e, "\u{ffed}", "", [0xf9, 0xfe]);
assert_feed_ok!(e, "\u{2550}", "", [0xf9, 0xf9]); // not [0xa2, 0xa4]
assert_finish_ok!(e, []);
}
#[test]
fn test_encoder_invalid() {
let mut e = BigFive2003Encoding.raw_encoder();
assert_feed_err!(e, "", "\u{ffff}", "", []);
assert_feed_err!(e, "?", "\u{ffff}", "!", [0x3f]);
assert_feed_err!(e, "", "\u{3eec}", "\u{4e00}", []); // HKSCS-2008 addition
assert_finish_ok!(e, []);
}
#[test]
fn test_decoder_valid() {
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_ok!(d, [0x41], [], "A");
assert_feed_ok!(d, [0x42, 0x43], [], "BC");
assert_feed_ok!(d, [], [], "");
assert_feed_ok!(d, [0xa4, 0xa4, 0xb5, 0xd8, 0xa5, 0xc1, 0xb0, 0xea], [],
"\u{4e2d}\u{83ef}\u{6c11}\u{570b}");
assert_feed_ok!(d, [], [0xa4], "");
assert_feed_ok!(d, [0xa4, 0xb5, 0xd8], [0xa5], "\u{4e2d}\u{83ef}");
assert_feed_ok!(d, [0xc1, 0xb0, 0xea], [], "\u{6c11}\u{570b}");
assert_feed_ok!(d, [0x31, 0xa3, 0xe1, 0x2f, 0x6d], [], "1\u{20ac}/m");
assert_feed_ok!(d, [0xf9, 0xfe], [], "\u{ffed}");
assert_feed_ok!(d, [0xf9, 0xf9], [], "\u{2550}");
assert_feed_ok!(d, [0xa2, 0xa4], [], "\u{2550}");
assert_feed_ok!(d, [0x87, 0x7e], [], "\u{3eec}"); // HKSCS-2008 addition
assert_feed_ok!(d, [0x88, 0x62, 0x88, 0x64, 0x88, 0xa3, 0x88, 0xa5], [],
"\u{ca}\u{304}\u{00ca}\u{30c}\u{ea}\u{304}\u{ea}\u{30c}"); // 2-byte output
assert_finish_ok!(d, "");
}
#[test]
fn test_decoder_invalid_lone_lead_immediate_test_finish() {
for i in 0x81..0xff {
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_ok!(d, [], [i], ""); // wait for a trail
assert_finish_err!(d, "");
}
// 80/FF: immediate failure
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_err!(d, [], [0x80], [], "");
assert_feed_err!(d, [], [0xff], [], "");
assert_finish_ok!(d, "");
}
#[test]
fn test_decoder_invalid_lone_lead_followed_by_space() {
for i in 0x80..0x100 {
let i = i as u8;
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_err!(d, [], [i], [0x20], "");
assert_finish_ok!(d, "");
}
}
#[test]
fn test_decoder_invalid_lead_followed_by_invalid_trail() {
// unlike most other cases, valid lead + invalid MSB-set trail are entirely consumed.
// https://www.w3.org/Bugs/Public/show_bug.cgi?id=16771
for i in 0x81..0xff {
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_err!(d, [], [i, 0x80], [0x20], "");
assert_feed_err!(d, [], [i, 0xff], [0x20], "");
assert_finish_ok!(d, "");
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_ok!(d, [], [i], "");
assert_feed_err!(d, [], [0x80], [0x20], "");
assert_feed_ok!(d, [], [i], "");
assert_feed_err!(d, [], [0xff], [0x20], "");
assert_finish_ok!(d, "");
}
// 80/FF is not a valid lead and the trail is not consumed
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_err!(d, [], [0x80], [0x80], "");
assert_feed_err!(d, [], [0x80], [0xff], "");
assert_feed_err!(d, [], [0xff], [0x80], "");
assert_feed_err!(d, [], [0xff], [0xff], "");
assert_finish_ok!(d, "");
}
#[test]
fn test_decoder_feed_after_finish() {
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_ok!(d, [0xa4, 0x40], [0xa4], "\u{4e00}");
assert_finish_err!(d, "");
assert_feed_ok!(d, [0xa4, 0x40], [], "\u{4e00}");
assert_finish_ok!(d, "");
}
#[bench]
fn bench_encode_short_text(bencher: &mut test::Bencher) {
let s = testutils::TRADITIONAL_CHINESE_TEXT;
bencher.bytes = s.len() as u64;
bencher.iter(|| test::black_box({
BigFive2003Encoding.encode(&s, EncoderTrap::Strict)
}))
}
#[bench]
fn bench_decode_short_text(bencher: &mut test::Bencher) {
let s = BigFive2003Encoding.encode(testutils::TRADITIONAL_CHINESE_TEXT,
EncoderTrap::Strict).ok().unwrap();
bencher.bytes = s.len() as u64;
bencher.iter(|| test::black_box({
BigFive2003Encoding.decode(&s, DecoderTrap::Strict)
}))
}
}
|
raw_finish
|
identifier_name
|
tradchinese.rs
|
// This is a part of rust-encoding.
// Copyright (c) 2013-2015, Kang Seonghoon.
// See README.md and LICENSE.txt for details.
//! Legacy traditional Chinese encodings.
use std::convert::Into;
use std::default::Default;
use util::StrCharIndex;
use index_tradchinese as index;
use types::*;
/**
* Big5-2003 with common extensions. (XXX with asymmetric HKSCS-2008 support)
*
* This is a traditional Chinese encoding spanning the region `[81-FE] [40-7E A1-FE]`.
* Originally a proprietary encoding by the consortium of five companies (hence the name),
* the Republic of China government standardized Big5-2003 in an appendix of CNS 11643
* so that CNS 11643 plane 1 and plane 2 have
* an almost identical set of characters as Big5 (but with a different mapping).
* The Hong Kong government has an official extension to Big5
* named Hong Kong Supplementary Character Set (HKSCS).
*
* This particular implementation of Big5 includes the widespread ETEN and HKSCS extensions,
* but excludes less common extensions such as Big5+, Big-5E and Unicode-at-on.
*/
#[derive(Clone, Copy)]
pub struct BigFive2003Encoding;
impl Encoding for BigFive2003Encoding {
fn name(&self) -> &'static str { "big5-2003" }
fn whatwg_name(&self) -> Option<&'static str> { Some("big5") } // WHATWG compatibility
fn raw_encoder(&self) -> Box<RawEncoder> { BigFive2003Encoder::new() }
fn raw_decoder(&self) -> Box<RawDecoder> { BigFive2003HKSCS2008Decoder::new() }
}
/// An encoder for Big5-2003.
#[derive(Clone, Copy)]
pub struct BigFive2003Encoder;
impl BigFive2003Encoder {
pub fn new() -> Box<RawEncoder>
|
}
impl RawEncoder for BigFive2003Encoder {
fn from_self(&self) -> Box<RawEncoder> { BigFive2003Encoder::new() }
fn is_ascii_compatible(&self) -> bool { true }
fn raw_feed(&mut self, input: &str, output: &mut ByteWriter) -> (usize, Option<CodecError>) {
output.writer_hint(input.len());
for ((i,j), ch) in input.index_iter() {
if ch < '\u{80}' {
output.write_byte(ch as u8);
} else {
let ptr = index::big5::backward(ch as u32);
if ptr == 0xffff {
return (i, Some(CodecError {
upto: j as isize, cause: "unrepresentable character".into()
}));
}
let lead = ptr / 157 + 0x81;
let trail = ptr % 157;
let trailoffset = if trail < 0x3f {0x40} else {0x62};
output.write_byte(lead as u8);
output.write_byte((trail + trailoffset) as u8);
}
}
(input.len(), None)
}
fn raw_finish(&mut self, _output: &mut ByteWriter) -> Option<CodecError> {
None
}
}
/// A decoder for Big5-2003 with HKSCS-2008 extension.
#[derive(Clone, Copy)]
struct BigFive2003HKSCS2008Decoder {
st: bigfive2003::State,
}
impl BigFive2003HKSCS2008Decoder {
pub fn new() -> Box<RawDecoder> {
Box::new(BigFive2003HKSCS2008Decoder { st: Default::default() })
}
}
impl RawDecoder for BigFive2003HKSCS2008Decoder {
fn from_self(&self) -> Box<RawDecoder> { BigFive2003HKSCS2008Decoder::new() }
fn is_ascii_compatible(&self) -> bool { true }
fn raw_feed(&mut self, input: &[u8], output: &mut StringWriter) -> (usize, Option<CodecError>) {
let (st, processed, err) = bigfive2003::raw_feed(self.st, input, output, &());
self.st = st;
(processed, err)
}
fn raw_finish(&mut self, output: &mut StringWriter) -> Option<CodecError> {
let (st, err) = bigfive2003::raw_finish(self.st, output, &());
self.st = st;
err
}
}
stateful_decoder! {
module bigfive2003;
internal pub fn map_two_bytes(lead: u8, trail: u8) -> u32 {
use index_tradchinese as index;
let lead = lead as u16;
let trail = trail as u16;
let index = match (lead, trail) {
(0x81...0xfe, 0x40...0x7e) | (0x81...0xfe, 0xa1...0xfe) => {
let trailoffset = if trail < 0x7f {0x40} else {0x62};
(lead - 0x81) * 157 + trail - trailoffset
}
_ => 0xffff,
};
index::big5::forward(index) // may return two-letter replacements 0..3
}
initial:
// big5 lead = 0x00
state S0(ctx: Context) {
case b @ 0x00...0x7f => ctx.emit(b as u32);
case b @ 0x81...0xfe => S1(ctx, b);
case _ => ctx.err("invalid sequence");
}
transient:
// big5 lead != 0x00
state S1(ctx: Context, lead: u8) {
case b => match map_two_bytes(lead, b) {
0xffff => {
let backup = if b < 0x80 {1} else {0};
ctx.backup_and_err(backup, "invalid sequence")
},
0 /*index=1133*/ => ctx.emit_str("\u{ca}\u{304}"),
1 /*index=1135*/ => ctx.emit_str("\u{ca}\u{30c}"),
2 /*index=1164*/ => ctx.emit_str("\u{ea}\u{304}"),
3 /*index=1166*/ => ctx.emit_str("\u{ea}\u{30c}"),
ch => ctx.emit(ch),
};
}
}
#[cfg(test)]
mod bigfive2003_tests {
extern crate test;
use super::BigFive2003Encoding;
use testutils;
use types::*;
#[test]
fn test_encoder_valid() {
let mut e = BigFive2003Encoding.raw_encoder();
assert_feed_ok!(e, "A", "", [0x41]);
assert_feed_ok!(e, "BC", "", [0x42, 0x43]);
assert_feed_ok!(e, "", "", []);
assert_feed_ok!(e, "\u{4e2d}\u{83ef}\u{6c11}\u{570b}", "",
[0xa4, 0xa4, 0xb5, 0xd8, 0xa5, 0xc1, 0xb0, 0xea]);
assert_feed_ok!(e, "1\u{20ac}/m", "", [0x31, 0xa3, 0xe1, 0x2f, 0x6d]);
assert_feed_ok!(e, "\u{ffed}", "", [0xf9, 0xfe]);
assert_feed_ok!(e, "\u{2550}", "", [0xf9, 0xf9]); // not [0xa2, 0xa4]
assert_finish_ok!(e, []);
}
#[test]
fn test_encoder_invalid() {
let mut e = BigFive2003Encoding.raw_encoder();
assert_feed_err!(e, "", "\u{ffff}", "", []);
assert_feed_err!(e, "?", "\u{ffff}", "!", [0x3f]);
assert_feed_err!(e, "", "\u{3eec}", "\u{4e00}", []); // HKSCS-2008 addition
assert_finish_ok!(e, []);
}
#[test]
fn test_decoder_valid() {
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_ok!(d, [0x41], [], "A");
assert_feed_ok!(d, [0x42, 0x43], [], "BC");
assert_feed_ok!(d, [], [], "");
assert_feed_ok!(d, [0xa4, 0xa4, 0xb5, 0xd8, 0xa5, 0xc1, 0xb0, 0xea], [],
"\u{4e2d}\u{83ef}\u{6c11}\u{570b}");
assert_feed_ok!(d, [], [0xa4], "");
assert_feed_ok!(d, [0xa4, 0xb5, 0xd8], [0xa5], "\u{4e2d}\u{83ef}");
assert_feed_ok!(d, [0xc1, 0xb0, 0xea], [], "\u{6c11}\u{570b}");
assert_feed_ok!(d, [0x31, 0xa3, 0xe1, 0x2f, 0x6d], [], "1\u{20ac}/m");
assert_feed_ok!(d, [0xf9, 0xfe], [], "\u{ffed}");
assert_feed_ok!(d, [0xf9, 0xf9], [], "\u{2550}");
assert_feed_ok!(d, [0xa2, 0xa4], [], "\u{2550}");
assert_feed_ok!(d, [0x87, 0x7e], [], "\u{3eec}"); // HKSCS-2008 addition
assert_feed_ok!(d, [0x88, 0x62, 0x88, 0x64, 0x88, 0xa3, 0x88, 0xa5], [],
"\u{ca}\u{304}\u{00ca}\u{30c}\u{ea}\u{304}\u{ea}\u{30c}"); // 2-byte output
assert_finish_ok!(d, "");
}
#[test]
fn test_decoder_invalid_lone_lead_immediate_test_finish() {
for i in 0x81..0xff {
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_ok!(d, [], [i], ""); // wait for a trail
assert_finish_err!(d, "");
}
// 80/FF: immediate failure
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_err!(d, [], [0x80], [], "");
assert_feed_err!(d, [], [0xff], [], "");
assert_finish_ok!(d, "");
}
#[test]
fn test_decoder_invalid_lone_lead_followed_by_space() {
for i in 0x80..0x100 {
let i = i as u8;
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_err!(d, [], [i], [0x20], "");
assert_finish_ok!(d, "");
}
}
#[test]
fn test_decoder_invalid_lead_followed_by_invalid_trail() {
// unlike most other cases, valid lead + invalid MSB-set trail are entirely consumed.
// https://www.w3.org/Bugs/Public/show_bug.cgi?id=16771
for i in 0x81..0xff {
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_err!(d, [], [i, 0x80], [0x20], "");
assert_feed_err!(d, [], [i, 0xff], [0x20], "");
assert_finish_ok!(d, "");
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_ok!(d, [], [i], "");
assert_feed_err!(d, [], [0x80], [0x20], "");
assert_feed_ok!(d, [], [i], "");
assert_feed_err!(d, [], [0xff], [0x20], "");
assert_finish_ok!(d, "");
}
// 80/FF is not a valid lead and the trail is not consumed
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_err!(d, [], [0x80], [0x80], "");
assert_feed_err!(d, [], [0x80], [0xff], "");
assert_feed_err!(d, [], [0xff], [0x80], "");
assert_feed_err!(d, [], [0xff], [0xff], "");
assert_finish_ok!(d, "");
}
#[test]
fn test_decoder_feed_after_finish() {
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_ok!(d, [0xa4, 0x40], [0xa4], "\u{4e00}");
assert_finish_err!(d, "");
assert_feed_ok!(d, [0xa4, 0x40], [], "\u{4e00}");
assert_finish_ok!(d, "");
}
#[bench]
fn bench_encode_short_text(bencher: &mut test::Bencher) {
let s = testutils::TRADITIONAL_CHINESE_TEXT;
bencher.bytes = s.len() as u64;
bencher.iter(|| test::black_box({
BigFive2003Encoding.encode(&s, EncoderTrap::Strict)
}))
}
#[bench]
fn bench_decode_short_text(bencher: &mut test::Bencher) {
let s = BigFive2003Encoding.encode(testutils::TRADITIONAL_CHINESE_TEXT,
EncoderTrap::Strict).ok().unwrap();
bencher.bytes = s.len() as u64;
bencher.iter(|| test::black_box({
BigFive2003Encoding.decode(&s, DecoderTrap::Strict)
}))
}
}
|
{ Box::new(BigFive2003Encoder) }
|
identifier_body
|
tradchinese.rs
|
// This is a part of rust-encoding.
// Copyright (c) 2013-2015, Kang Seonghoon.
// See README.md and LICENSE.txt for details.
//! Legacy traditional Chinese encodings.
use std::convert::Into;
use std::default::Default;
use util::StrCharIndex;
use index_tradchinese as index;
use types::*;
/**
* Big5-2003 with common extensions. (XXX with asymmetric HKSCS-2008 support)
*
* This is a traditional Chinese encoding spanning the region `[81-FE] [40-7E A1-FE]`.
* Originally a proprietary encoding by the consortium of five companies (hence the name),
* the Republic of China government standardized Big5-2003 in an appendix of CNS 11643
* so that CNS 11643 plane 1 and plane 2 have
* an almost identical set of characters as Big5 (but with a different mapping).
* The Hong Kong government has an official extension to Big5
* named Hong Kong Supplementary Character Set (HKSCS).
*
* This particular implementation of Big5 includes the widespread ETEN and HKSCS extensions,
* but excludes less common extensions such as Big5+, Big-5E and Unicode-at-on.
*/
#[derive(Clone, Copy)]
pub struct BigFive2003Encoding;
impl Encoding for BigFive2003Encoding {
fn name(&self) -> &'static str { "big5-2003" }
fn whatwg_name(&self) -> Option<&'static str> { Some("big5") } // WHATWG compatibility
fn raw_encoder(&self) -> Box<RawEncoder> { BigFive2003Encoder::new() }
fn raw_decoder(&self) -> Box<RawDecoder> { BigFive2003HKSCS2008Decoder::new() }
}
/// An encoder for Big5-2003.
#[derive(Clone, Copy)]
pub struct BigFive2003Encoder;
impl BigFive2003Encoder {
pub fn new() -> Box<RawEncoder> { Box::new(BigFive2003Encoder) }
}
impl RawEncoder for BigFive2003Encoder {
fn from_self(&self) -> Box<RawEncoder> { BigFive2003Encoder::new() }
fn is_ascii_compatible(&self) -> bool { true }
fn raw_feed(&mut self, input: &str, output: &mut ByteWriter) -> (usize, Option<CodecError>) {
output.writer_hint(input.len());
for ((i,j), ch) in input.index_iter() {
if ch < '\u{80}' {
output.write_byte(ch as u8);
} else {
let ptr = index::big5::backward(ch as u32);
if ptr == 0xffff {
return (i, Some(CodecError {
upto: j as isize, cause: "unrepresentable character".into()
}));
}
let lead = ptr / 157 + 0x81;
let trail = ptr % 157;
let trailoffset = if trail < 0x3f {0x40} else {0x62};
output.write_byte(lead as u8);
output.write_byte((trail + trailoffset) as u8);
}
}
(input.len(), None)
}
fn raw_finish(&mut self, _output: &mut ByteWriter) -> Option<CodecError> {
None
}
}
/// A decoder for Big5-2003 with HKSCS-2008 extension.
#[derive(Clone, Copy)]
struct BigFive2003HKSCS2008Decoder {
st: bigfive2003::State,
}
impl BigFive2003HKSCS2008Decoder {
pub fn new() -> Box<RawDecoder> {
Box::new(BigFive2003HKSCS2008Decoder { st: Default::default() })
}
}
impl RawDecoder for BigFive2003HKSCS2008Decoder {
fn from_self(&self) -> Box<RawDecoder> { BigFive2003HKSCS2008Decoder::new() }
fn is_ascii_compatible(&self) -> bool { true }
fn raw_feed(&mut self, input: &[u8], output: &mut StringWriter) -> (usize, Option<CodecError>) {
let (st, processed, err) = bigfive2003::raw_feed(self.st, input, output, &());
self.st = st;
(processed, err)
}
fn raw_finish(&mut self, output: &mut StringWriter) -> Option<CodecError> {
let (st, err) = bigfive2003::raw_finish(self.st, output, &());
self.st = st;
err
}
}
stateful_decoder! {
module bigfive2003;
internal pub fn map_two_bytes(lead: u8, trail: u8) -> u32 {
use index_tradchinese as index;
let lead = lead as u16;
let trail = trail as u16;
let index = match (lead, trail) {
(0x81...0xfe, 0x40...0x7e) | (0x81...0xfe, 0xa1...0xfe) => {
let trailoffset = if trail < 0x7f {0x40} else {0x62};
(lead - 0x81) * 157 + trail - trailoffset
}
_ => 0xffff,
};
index::big5::forward(index) // may return two-letter replacements 0..3
}
initial:
// big5 lead = 0x00
state S0(ctx: Context) {
case b @ 0x00...0x7f => ctx.emit(b as u32);
case b @ 0x81...0xfe => S1(ctx, b);
case _ => ctx.err("invalid sequence");
}
transient:
// big5 lead != 0x00
state S1(ctx: Context, lead: u8) {
case b => match map_two_bytes(lead, b) {
0xffff => {
let backup = if b < 0x80 {1} else {0};
ctx.backup_and_err(backup, "invalid sequence")
},
0 /*index=1133*/ => ctx.emit_str("\u{ca}\u{304}"),
1 /*index=1135*/ => ctx.emit_str("\u{ca}\u{30c}"),
2 /*index=1164*/ => ctx.emit_str("\u{ea}\u{304}"),
3 /*index=1166*/ => ctx.emit_str("\u{ea}\u{30c}"),
ch => ctx.emit(ch),
};
}
}
#[cfg(test)]
mod bigfive2003_tests {
extern crate test;
use super::BigFive2003Encoding;
use testutils;
use types::*;
#[test]
fn test_encoder_valid() {
let mut e = BigFive2003Encoding.raw_encoder();
assert_feed_ok!(e, "A", "", [0x41]);
assert_feed_ok!(e, "BC", "", [0x42, 0x43]);
assert_feed_ok!(e, "", "", []);
assert_feed_ok!(e, "\u{4e2d}\u{83ef}\u{6c11}\u{570b}", "",
[0xa4, 0xa4, 0xb5, 0xd8, 0xa5, 0xc1, 0xb0, 0xea]);
assert_feed_ok!(e, "1\u{20ac}/m", "", [0x31, 0xa3, 0xe1, 0x2f, 0x6d]);
assert_feed_ok!(e, "\u{ffed}", "", [0xf9, 0xfe]);
assert_feed_ok!(e, "\u{2550}", "", [0xf9, 0xf9]); // not [0xa2, 0xa4]
assert_finish_ok!(e, []);
}
#[test]
fn test_encoder_invalid() {
let mut e = BigFive2003Encoding.raw_encoder();
assert_feed_err!(e, "", "\u{ffff}", "", []);
assert_feed_err!(e, "?", "\u{ffff}", "!", [0x3f]);
assert_feed_err!(e, "", "\u{3eec}", "\u{4e00}", []); // HKSCS-2008 addition
assert_finish_ok!(e, []);
}
#[test]
fn test_decoder_valid() {
let mut d = BigFive2003Encoding.raw_decoder();
|
assert_feed_ok!(d, [], [0xa4], "");
assert_feed_ok!(d, [0xa4, 0xb5, 0xd8], [0xa5], "\u{4e2d}\u{83ef}");
assert_feed_ok!(d, [0xc1, 0xb0, 0xea], [], "\u{6c11}\u{570b}");
assert_feed_ok!(d, [0x31, 0xa3, 0xe1, 0x2f, 0x6d], [], "1\u{20ac}/m");
assert_feed_ok!(d, [0xf9, 0xfe], [], "\u{ffed}");
assert_feed_ok!(d, [0xf9, 0xf9], [], "\u{2550}");
assert_feed_ok!(d, [0xa2, 0xa4], [], "\u{2550}");
assert_feed_ok!(d, [0x87, 0x7e], [], "\u{3eec}"); // HKSCS-2008 addition
assert_feed_ok!(d, [0x88, 0x62, 0x88, 0x64, 0x88, 0xa3, 0x88, 0xa5], [],
"\u{ca}\u{304}\u{00ca}\u{30c}\u{ea}\u{304}\u{ea}\u{30c}"); // 2-byte output
assert_finish_ok!(d, "");
}
#[test]
fn test_decoder_invalid_lone_lead_immediate_test_finish() {
for i in 0x81..0xff {
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_ok!(d, [], [i], ""); // wait for a trail
assert_finish_err!(d, "");
}
// 80/FF: immediate failure
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_err!(d, [], [0x80], [], "");
assert_feed_err!(d, [], [0xff], [], "");
assert_finish_ok!(d, "");
}
#[test]
fn test_decoder_invalid_lone_lead_followed_by_space() {
for i in 0x80..0x100 {
let i = i as u8;
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_err!(d, [], [i], [0x20], "");
assert_finish_ok!(d, "");
}
}
#[test]
fn test_decoder_invalid_lead_followed_by_invalid_trail() {
// unlike most other cases, valid lead + invalid MSB-set trail are entirely consumed.
// https://www.w3.org/Bugs/Public/show_bug.cgi?id=16771
for i in 0x81..0xff {
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_err!(d, [], [i, 0x80], [0x20], "");
assert_feed_err!(d, [], [i, 0xff], [0x20], "");
assert_finish_ok!(d, "");
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_ok!(d, [], [i], "");
assert_feed_err!(d, [], [0x80], [0x20], "");
assert_feed_ok!(d, [], [i], "");
assert_feed_err!(d, [], [0xff], [0x20], "");
assert_finish_ok!(d, "");
}
// 80/FF is not a valid lead and the trail is not consumed
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_err!(d, [], [0x80], [0x80], "");
assert_feed_err!(d, [], [0x80], [0xff], "");
assert_feed_err!(d, [], [0xff], [0x80], "");
assert_feed_err!(d, [], [0xff], [0xff], "");
assert_finish_ok!(d, "");
}
#[test]
fn test_decoder_feed_after_finish() {
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_ok!(d, [0xa4, 0x40], [0xa4], "\u{4e00}");
assert_finish_err!(d, "");
assert_feed_ok!(d, [0xa4, 0x40], [], "\u{4e00}");
assert_finish_ok!(d, "");
}
#[bench]
fn bench_encode_short_text(bencher: &mut test::Bencher) {
let s = testutils::TRADITIONAL_CHINESE_TEXT;
bencher.bytes = s.len() as u64;
bencher.iter(|| test::black_box({
BigFive2003Encoding.encode(&s, EncoderTrap::Strict)
}))
}
#[bench]
fn bench_decode_short_text(bencher: &mut test::Bencher) {
let s = BigFive2003Encoding.encode(testutils::TRADITIONAL_CHINESE_TEXT,
EncoderTrap::Strict).ok().unwrap();
bencher.bytes = s.len() as u64;
bencher.iter(|| test::black_box({
BigFive2003Encoding.decode(&s, DecoderTrap::Strict)
}))
}
}
|
assert_feed_ok!(d, [0x41], [], "A");
assert_feed_ok!(d, [0x42, 0x43], [], "BC");
assert_feed_ok!(d, [], [], "");
assert_feed_ok!(d, [0xa4, 0xa4, 0xb5, 0xd8, 0xa5, 0xc1, 0xb0, 0xea], [],
"\u{4e2d}\u{83ef}\u{6c11}\u{570b}");
|
random_line_split
|
tradchinese.rs
|
// This is a part of rust-encoding.
// Copyright (c) 2013-2015, Kang Seonghoon.
// See README.md and LICENSE.txt for details.
//! Legacy traditional Chinese encodings.
use std::convert::Into;
use std::default::Default;
use util::StrCharIndex;
use index_tradchinese as index;
use types::*;
/**
* Big5-2003 with common extensions. (XXX with asymmetric HKSCS-2008 support)
*
* This is a traditional Chinese encoding spanning the region `[81-FE] [40-7E A1-FE]`.
* Originally a proprietary encoding by the consortium of five companies (hence the name),
* the Republic of China government standardized Big5-2003 in an appendix of CNS 11643
* so that CNS 11643 plane 1 and plane 2 have
* an almost identical set of characters as Big5 (but with a different mapping).
* The Hong Kong government has an official extension to Big5
* named Hong Kong Supplementary Character Set (HKSCS).
*
* This particular implementation of Big5 includes the widespread ETEN and HKSCS extensions,
* but excludes less common extensions such as Big5+, Big-5E and Unicode-at-on.
*/
#[derive(Clone, Copy)]
pub struct BigFive2003Encoding;
impl Encoding for BigFive2003Encoding {
fn name(&self) -> &'static str { "big5-2003" }
fn whatwg_name(&self) -> Option<&'static str> { Some("big5") } // WHATWG compatibility
fn raw_encoder(&self) -> Box<RawEncoder> { BigFive2003Encoder::new() }
fn raw_decoder(&self) -> Box<RawDecoder> { BigFive2003HKSCS2008Decoder::new() }
}
/// An encoder for Big5-2003.
#[derive(Clone, Copy)]
pub struct BigFive2003Encoder;
impl BigFive2003Encoder {
pub fn new() -> Box<RawEncoder> { Box::new(BigFive2003Encoder) }
}
impl RawEncoder for BigFive2003Encoder {
fn from_self(&self) -> Box<RawEncoder> { BigFive2003Encoder::new() }
fn is_ascii_compatible(&self) -> bool { true }
fn raw_feed(&mut self, input: &str, output: &mut ByteWriter) -> (usize, Option<CodecError>) {
output.writer_hint(input.len());
for ((i,j), ch) in input.index_iter() {
if ch < '\u{80}'
|
else {
let ptr = index::big5::backward(ch as u32);
if ptr == 0xffff {
return (i, Some(CodecError {
upto: j as isize, cause: "unrepresentable character".into()
}));
}
let lead = ptr / 157 + 0x81;
let trail = ptr % 157;
let trailoffset = if trail < 0x3f {0x40} else {0x62};
output.write_byte(lead as u8);
output.write_byte((trail + trailoffset) as u8);
}
}
(input.len(), None)
}
fn raw_finish(&mut self, _output: &mut ByteWriter) -> Option<CodecError> {
None
}
}
/// A decoder for Big5-2003 with HKSCS-2008 extension.
#[derive(Clone, Copy)]
struct BigFive2003HKSCS2008Decoder {
st: bigfive2003::State,
}
impl BigFive2003HKSCS2008Decoder {
pub fn new() -> Box<RawDecoder> {
Box::new(BigFive2003HKSCS2008Decoder { st: Default::default() })
}
}
impl RawDecoder for BigFive2003HKSCS2008Decoder {
fn from_self(&self) -> Box<RawDecoder> { BigFive2003HKSCS2008Decoder::new() }
fn is_ascii_compatible(&self) -> bool { true }
fn raw_feed(&mut self, input: &[u8], output: &mut StringWriter) -> (usize, Option<CodecError>) {
let (st, processed, err) = bigfive2003::raw_feed(self.st, input, output, &());
self.st = st;
(processed, err)
}
fn raw_finish(&mut self, output: &mut StringWriter) -> Option<CodecError> {
let (st, err) = bigfive2003::raw_finish(self.st, output, &());
self.st = st;
err
}
}
stateful_decoder! {
module bigfive2003;
internal pub fn map_two_bytes(lead: u8, trail: u8) -> u32 {
use index_tradchinese as index;
let lead = lead as u16;
let trail = trail as u16;
let index = match (lead, trail) {
(0x81...0xfe, 0x40...0x7e) | (0x81...0xfe, 0xa1...0xfe) => {
let trailoffset = if trail < 0x7f {0x40} else {0x62};
(lead - 0x81) * 157 + trail - trailoffset
}
_ => 0xffff,
};
index::big5::forward(index) // may return two-letter replacements 0..3
}
initial:
// big5 lead = 0x00
state S0(ctx: Context) {
case b @ 0x00...0x7f => ctx.emit(b as u32);
case b @ 0x81...0xfe => S1(ctx, b);
case _ => ctx.err("invalid sequence");
}
transient:
// big5 lead != 0x00
state S1(ctx: Context, lead: u8) {
case b => match map_two_bytes(lead, b) {
0xffff => {
let backup = if b < 0x80 {1} else {0};
ctx.backup_and_err(backup, "invalid sequence")
},
0 /*index=1133*/ => ctx.emit_str("\u{ca}\u{304}"),
1 /*index=1135*/ => ctx.emit_str("\u{ca}\u{30c}"),
2 /*index=1164*/ => ctx.emit_str("\u{ea}\u{304}"),
3 /*index=1166*/ => ctx.emit_str("\u{ea}\u{30c}"),
ch => ctx.emit(ch),
};
}
}
#[cfg(test)]
mod bigfive2003_tests {
extern crate test;
use super::BigFive2003Encoding;
use testutils;
use types::*;
#[test]
fn test_encoder_valid() {
let mut e = BigFive2003Encoding.raw_encoder();
assert_feed_ok!(e, "A", "", [0x41]);
assert_feed_ok!(e, "BC", "", [0x42, 0x43]);
assert_feed_ok!(e, "", "", []);
assert_feed_ok!(e, "\u{4e2d}\u{83ef}\u{6c11}\u{570b}", "",
[0xa4, 0xa4, 0xb5, 0xd8, 0xa5, 0xc1, 0xb0, 0xea]);
assert_feed_ok!(e, "1\u{20ac}/m", "", [0x31, 0xa3, 0xe1, 0x2f, 0x6d]);
assert_feed_ok!(e, "\u{ffed}", "", [0xf9, 0xfe]);
assert_feed_ok!(e, "\u{2550}", "", [0xf9, 0xf9]); // not [0xa2, 0xa4]
assert_finish_ok!(e, []);
}
#[test]
fn test_encoder_invalid() {
let mut e = BigFive2003Encoding.raw_encoder();
assert_feed_err!(e, "", "\u{ffff}", "", []);
assert_feed_err!(e, "?", "\u{ffff}", "!", [0x3f]);
assert_feed_err!(e, "", "\u{3eec}", "\u{4e00}", []); // HKSCS-2008 addition
assert_finish_ok!(e, []);
}
#[test]
fn test_decoder_valid() {
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_ok!(d, [0x41], [], "A");
assert_feed_ok!(d, [0x42, 0x43], [], "BC");
assert_feed_ok!(d, [], [], "");
assert_feed_ok!(d, [0xa4, 0xa4, 0xb5, 0xd8, 0xa5, 0xc1, 0xb0, 0xea], [],
"\u{4e2d}\u{83ef}\u{6c11}\u{570b}");
assert_feed_ok!(d, [], [0xa4], "");
assert_feed_ok!(d, [0xa4, 0xb5, 0xd8], [0xa5], "\u{4e2d}\u{83ef}");
assert_feed_ok!(d, [0xc1, 0xb0, 0xea], [], "\u{6c11}\u{570b}");
assert_feed_ok!(d, [0x31, 0xa3, 0xe1, 0x2f, 0x6d], [], "1\u{20ac}/m");
assert_feed_ok!(d, [0xf9, 0xfe], [], "\u{ffed}");
assert_feed_ok!(d, [0xf9, 0xf9], [], "\u{2550}");
assert_feed_ok!(d, [0xa2, 0xa4], [], "\u{2550}");
assert_feed_ok!(d, [0x87, 0x7e], [], "\u{3eec}"); // HKSCS-2008 addition
assert_feed_ok!(d, [0x88, 0x62, 0x88, 0x64, 0x88, 0xa3, 0x88, 0xa5], [],
"\u{ca}\u{304}\u{00ca}\u{30c}\u{ea}\u{304}\u{ea}\u{30c}"); // 2-byte output
assert_finish_ok!(d, "");
}
#[test]
fn test_decoder_invalid_lone_lead_immediate_test_finish() {
for i in 0x81..0xff {
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_ok!(d, [], [i], ""); // wait for a trail
assert_finish_err!(d, "");
}
// 80/FF: immediate failure
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_err!(d, [], [0x80], [], "");
assert_feed_err!(d, [], [0xff], [], "");
assert_finish_ok!(d, "");
}
#[test]
fn test_decoder_invalid_lone_lead_followed_by_space() {
for i in 0x80..0x100 {
let i = i as u8;
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_err!(d, [], [i], [0x20], "");
assert_finish_ok!(d, "");
}
}
#[test]
fn test_decoder_invalid_lead_followed_by_invalid_trail() {
// unlike most other cases, valid lead + invalid MSB-set trail are entirely consumed.
// https://www.w3.org/Bugs/Public/show_bug.cgi?id=16771
for i in 0x81..0xff {
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_err!(d, [], [i, 0x80], [0x20], "");
assert_feed_err!(d, [], [i, 0xff], [0x20], "");
assert_finish_ok!(d, "");
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_ok!(d, [], [i], "");
assert_feed_err!(d, [], [0x80], [0x20], "");
assert_feed_ok!(d, [], [i], "");
assert_feed_err!(d, [], [0xff], [0x20], "");
assert_finish_ok!(d, "");
}
// 80/FF is not a valid lead and the trail is not consumed
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_err!(d, [], [0x80], [0x80], "");
assert_feed_err!(d, [], [0x80], [0xff], "");
assert_feed_err!(d, [], [0xff], [0x80], "");
assert_feed_err!(d, [], [0xff], [0xff], "");
assert_finish_ok!(d, "");
}
#[test]
fn test_decoder_feed_after_finish() {
let mut d = BigFive2003Encoding.raw_decoder();
assert_feed_ok!(d, [0xa4, 0x40], [0xa4], "\u{4e00}");
assert_finish_err!(d, "");
assert_feed_ok!(d, [0xa4, 0x40], [], "\u{4e00}");
assert_finish_ok!(d, "");
}
#[bench]
fn bench_encode_short_text(bencher: &mut test::Bencher) {
let s = testutils::TRADITIONAL_CHINESE_TEXT;
bencher.bytes = s.len() as u64;
bencher.iter(|| test::black_box({
BigFive2003Encoding.encode(&s, EncoderTrap::Strict)
}))
}
#[bench]
fn bench_decode_short_text(bencher: &mut test::Bencher) {
let s = BigFive2003Encoding.encode(testutils::TRADITIONAL_CHINESE_TEXT,
EncoderTrap::Strict).ok().unwrap();
bencher.bytes = s.len() as u64;
bencher.iter(|| test::black_box({
BigFive2003Encoding.decode(&s, DecoderTrap::Strict)
}))
}
}
|
{
output.write_byte(ch as u8);
}
|
conditional_block
|
dstr-async-gen-meth-ary-ptrn-rest-obj-prop-id.js
|
// This file was procedurally generated from the following sources:
// - src/dstr-binding/ary-ptrn-rest-obj-prop-id.case
// - src/dstr-binding/default/cls-expr-async-gen-meth.template
/*---
description: Rest element containing an object binding pattern (class expression method)
esid: sec-class-definitions-runtime-semantics-evaluation
features: [async-iteration]
flags: [generated, async]
info: |
ClassExpression : class BindingIdentifieropt ClassTail
1. If BindingIdentifieropt is not present, let className be undefined.
2. Else, let className be StringValue of BindingIdentifier.
3. Let value be the result of ClassDefinitionEvaluation of ClassTail
with argument className.
[...]
|
a. If IsStatic of m is false, then
i. Let status be the result of performing
PropertyDefinitionEvaluation for m with arguments proto and
false.
[...]
Runtime Semantics: PropertyDefinitionEvaluation
AsyncGeneratorMethod :
async [no LineTerminator here] * PropertyName ( UniqueFormalParameters )
{ AsyncGeneratorBody }
1. Let propKey be the result of evaluating PropertyName.
2. ReturnIfAbrupt(propKey).
3. If the function code for this AsyncGeneratorMethod is strict mode code, let strict be true.
Otherwise let strict be false.
4. Let scope be the running execution context's LexicalEnvironment.
5. Let closure be ! AsyncGeneratorFunctionCreate(Method, UniqueFormalParameters,
AsyncGeneratorBody, scope, strict).
[...]
13.3.3.6 Runtime Semantics: IteratorBindingInitialization
BindingRestElement : ... BindingPattern
1. Let A be ArrayCreate(0).
[...]
3. Repeat
[...]
b. If iteratorRecord.[[done]] is true, then
i. Return the result of performing BindingInitialization of
BindingPattern with A and environment as the arguments.
[...]
---*/
let length = "outer";
var callCount = 0;
var C = class {
async *method([...{ 0: v, 1: w, 2: x, 3: y, length: z }]) {
assert.sameValue(v, 7);
assert.sameValue(w, 8);
assert.sameValue(x, 9);
assert.sameValue(y, undefined);
assert.sameValue(z, 3);
assert.sameValue(length, "outer", "the length prop is not set as a binding name");
callCount = callCount + 1;
}
};
new C().method([7, 8, 9]).next().then(() => {
assert.sameValue(callCount, 1, 'invoked exactly once');
}).then($DONE, $DONE);
|
14.5.14 Runtime Semantics: ClassDefinitionEvaluation
21. For each ClassElement m in order from methods
|
random_line_split
|
dstr-async-gen-meth-ary-ptrn-rest-obj-prop-id.js
|
// This file was procedurally generated from the following sources:
// - src/dstr-binding/ary-ptrn-rest-obj-prop-id.case
// - src/dstr-binding/default/cls-expr-async-gen-meth.template
/*---
description: Rest element containing an object binding pattern (class expression method)
esid: sec-class-definitions-runtime-semantics-evaluation
features: [async-iteration]
flags: [generated, async]
info: |
ClassExpression : class BindingIdentifieropt ClassTail
1. If BindingIdentifieropt is not present, let className be undefined.
2. Else, let className be StringValue of BindingIdentifier.
3. Let value be the result of ClassDefinitionEvaluation of ClassTail
with argument className.
[...]
14.5.14 Runtime Semantics: ClassDefinitionEvaluation
21. For each ClassElement m in order from methods
a. If IsStatic of m is false, then
i. Let status be the result of performing
PropertyDefinitionEvaluation for m with arguments proto and
false.
[...]
Runtime Semantics: PropertyDefinitionEvaluation
AsyncGeneratorMethod :
async [no LineTerminator here] * PropertyName ( UniqueFormalParameters )
{ AsyncGeneratorBody }
1. Let propKey be the result of evaluating PropertyName.
2. ReturnIfAbrupt(propKey).
3. If the function code for this AsyncGeneratorMethod is strict mode code, let strict be true.
Otherwise let strict be false.
4. Let scope be the running execution context's LexicalEnvironment.
5. Let closure be ! AsyncGeneratorFunctionCreate(Method, UniqueFormalParameters,
AsyncGeneratorBody, scope, strict).
[...]
13.3.3.6 Runtime Semantics: IteratorBindingInitialization
BindingRestElement : ... BindingPattern
1. Let A be ArrayCreate(0).
[...]
3. Repeat
[...]
b. If iteratorRecord.[[done]] is true, then
i. Return the result of performing BindingInitialization of
BindingPattern with A and environment as the arguments.
[...]
---*/
let length = "outer";
var callCount = 0;
var C = class {
async *
|
([...{ 0: v, 1: w, 2: x, 3: y, length: z }]) {
assert.sameValue(v, 7);
assert.sameValue(w, 8);
assert.sameValue(x, 9);
assert.sameValue(y, undefined);
assert.sameValue(z, 3);
assert.sameValue(length, "outer", "the length prop is not set as a binding name");
callCount = callCount + 1;
}
};
new C().method([7, 8, 9]).next().then(() => {
assert.sameValue(callCount, 1, 'invoked exactly once');
}).then($DONE, $DONE);
|
method
|
identifier_name
|
dstr-async-gen-meth-ary-ptrn-rest-obj-prop-id.js
|
// This file was procedurally generated from the following sources:
// - src/dstr-binding/ary-ptrn-rest-obj-prop-id.case
// - src/dstr-binding/default/cls-expr-async-gen-meth.template
/*---
description: Rest element containing an object binding pattern (class expression method)
esid: sec-class-definitions-runtime-semantics-evaluation
features: [async-iteration]
flags: [generated, async]
info: |
ClassExpression : class BindingIdentifieropt ClassTail
1. If BindingIdentifieropt is not present, let className be undefined.
2. Else, let className be StringValue of BindingIdentifier.
3. Let value be the result of ClassDefinitionEvaluation of ClassTail
with argument className.
[...]
14.5.14 Runtime Semantics: ClassDefinitionEvaluation
21. For each ClassElement m in order from methods
a. If IsStatic of m is false, then
i. Let status be the result of performing
PropertyDefinitionEvaluation for m with arguments proto and
false.
[...]
Runtime Semantics: PropertyDefinitionEvaluation
AsyncGeneratorMethod :
async [no LineTerminator here] * PropertyName ( UniqueFormalParameters )
{ AsyncGeneratorBody }
1. Let propKey be the result of evaluating PropertyName.
2. ReturnIfAbrupt(propKey).
3. If the function code for this AsyncGeneratorMethod is strict mode code, let strict be true.
Otherwise let strict be false.
4. Let scope be the running execution context's LexicalEnvironment.
5. Let closure be ! AsyncGeneratorFunctionCreate(Method, UniqueFormalParameters,
AsyncGeneratorBody, scope, strict).
[...]
13.3.3.6 Runtime Semantics: IteratorBindingInitialization
BindingRestElement : ... BindingPattern
1. Let A be ArrayCreate(0).
[...]
3. Repeat
[...]
b. If iteratorRecord.[[done]] is true, then
i. Return the result of performing BindingInitialization of
BindingPattern with A and environment as the arguments.
[...]
---*/
let length = "outer";
var callCount = 0;
var C = class {
async *method([...{ 0: v, 1: w, 2: x, 3: y, length: z }])
|
};
new C().method([7, 8, 9]).next().then(() => {
assert.sameValue(callCount, 1, 'invoked exactly once');
}).then($DONE, $DONE);
|
{
assert.sameValue(v, 7);
assert.sameValue(w, 8);
assert.sameValue(x, 9);
assert.sameValue(y, undefined);
assert.sameValue(z, 3);
assert.sameValue(length, "outer", "the length prop is not set as a binding name");
callCount = callCount + 1;
}
|
identifier_body
|
show_template_tester.py
|
#!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
from calibre.gui2.actions import InterfaceAction
from calibre.gui2.dialogs.template_dialog import TemplateDialog
from calibre.gui2 import error_dialog
class ShowTemplateTesterAction(InterfaceAction):
name = 'Template tester'
action_spec = (_('Template tester'), 'debug.png', None, '')
dont_add_to = frozenset(['context-menu-device'])
action_type = 'current'
def genesis(self):
self.previous_text = _('Enter a template to test using data from the selected book')
self.first_time = True
self.qaction.triggered.connect(self.show_template_editor)
def show_template_editor(self, *args):
view = self.gui.current_view()
if view is not self.gui.library_view:
return error_dialog(self.gui, _('No template tester available'),
_('Template tester is not available for books '
'on the device.')).exec_()
rows = view.selectionModel().selectedRows()
if not rows:
return error_dialog(self.gui, _('No books selected'),
_('One book must be selected'), show=True)
if len(rows) > 1:
return error_dialog(self.gui, _('Selected multiple books'),
_('Only one book can be selected'), show=True)
index = rows[0]
if index.isValid():
db = view.model().db
t = TemplateDialog(self.gui, self.previous_text,
mi=db.get_metadata(index.row(), index_is_id=False, get_cover=False),
text_is_placeholder=self.first_time)
t.setWindowTitle(_('Template tester'))
if t.exec_() == t.Accepted:
|
self.previous_text = t.rule[1]
self.first_time = False
|
conditional_block
|
|
show_template_tester.py
|
#!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
from calibre.gui2.actions import InterfaceAction
from calibre.gui2.dialogs.template_dialog import TemplateDialog
from calibre.gui2 import error_dialog
class ShowTemplateTesterAction(InterfaceAction):
name = 'Template tester'
action_spec = (_('Template tester'), 'debug.png', None, '')
dont_add_to = frozenset(['context-menu-device'])
action_type = 'current'
def genesis(self):
self.previous_text = _('Enter a template to test using data from the selected book')
self.first_time = True
self.qaction.triggered.connect(self.show_template_editor)
def
|
(self, *args):
view = self.gui.current_view()
if view is not self.gui.library_view:
return error_dialog(self.gui, _('No template tester available'),
_('Template tester is not available for books '
'on the device.')).exec_()
rows = view.selectionModel().selectedRows()
if not rows:
return error_dialog(self.gui, _('No books selected'),
_('One book must be selected'), show=True)
if len(rows) > 1:
return error_dialog(self.gui, _('Selected multiple books'),
_('Only one book can be selected'), show=True)
index = rows[0]
if index.isValid():
db = view.model().db
t = TemplateDialog(self.gui, self.previous_text,
mi=db.get_metadata(index.row(), index_is_id=False, get_cover=False),
text_is_placeholder=self.first_time)
t.setWindowTitle(_('Template tester'))
if t.exec_() == t.Accepted:
self.previous_text = t.rule[1]
self.first_time = False
|
show_template_editor
|
identifier_name
|
show_template_tester.py
|
#!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
from calibre.gui2.actions import InterfaceAction
from calibre.gui2.dialogs.template_dialog import TemplateDialog
from calibre.gui2 import error_dialog
class ShowTemplateTesterAction(InterfaceAction):
|
name = 'Template tester'
action_spec = (_('Template tester'), 'debug.png', None, '')
dont_add_to = frozenset(['context-menu-device'])
action_type = 'current'
def genesis(self):
self.previous_text = _('Enter a template to test using data from the selected book')
self.first_time = True
self.qaction.triggered.connect(self.show_template_editor)
def show_template_editor(self, *args):
view = self.gui.current_view()
if view is not self.gui.library_view:
return error_dialog(self.gui, _('No template tester available'),
_('Template tester is not available for books '
'on the device.')).exec_()
rows = view.selectionModel().selectedRows()
if not rows:
return error_dialog(self.gui, _('No books selected'),
_('One book must be selected'), show=True)
if len(rows) > 1:
return error_dialog(self.gui, _('Selected multiple books'),
_('Only one book can be selected'), show=True)
index = rows[0]
if index.isValid():
db = view.model().db
t = TemplateDialog(self.gui, self.previous_text,
mi=db.get_metadata(index.row(), index_is_id=False, get_cover=False),
text_is_placeholder=self.first_time)
t.setWindowTitle(_('Template tester'))
if t.exec_() == t.Accepted:
self.previous_text = t.rule[1]
self.first_time = False
|
identifier_body
|
|
show_template_tester.py
|
#!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
from calibre.gui2.actions import InterfaceAction
from calibre.gui2.dialogs.template_dialog import TemplateDialog
from calibre.gui2 import error_dialog
class ShowTemplateTesterAction(InterfaceAction):
name = 'Template tester'
action_spec = (_('Template tester'), 'debug.png', None, '')
dont_add_to = frozenset(['context-menu-device'])
action_type = 'current'
def genesis(self):
self.previous_text = _('Enter a template to test using data from the selected book')
self.first_time = True
self.qaction.triggered.connect(self.show_template_editor)
def show_template_editor(self, *args):
view = self.gui.current_view()
if view is not self.gui.library_view:
return error_dialog(self.gui, _('No template tester available'),
_('Template tester is not available for books '
'on the device.')).exec_()
rows = view.selectionModel().selectedRows()
if not rows:
return error_dialog(self.gui, _('No books selected'),
_('One book must be selected'), show=True)
if len(rows) > 1:
return error_dialog(self.gui, _('Selected multiple books'),
_('Only one book can be selected'), show=True)
index = rows[0]
|
if index.isValid():
db = view.model().db
t = TemplateDialog(self.gui, self.previous_text,
mi=db.get_metadata(index.row(), index_is_id=False, get_cover=False),
text_is_placeholder=self.first_time)
t.setWindowTitle(_('Template tester'))
if t.exec_() == t.Accepted:
self.previous_text = t.rule[1]
self.first_time = False
|
random_line_split
|
|
comment.ts
|
import unit = require('tests/tsunit');
import test = require('tests/test');
import common = require('../common');
import Comment = require('../comment');
import Rating = require('../rating');
import RatingCommunicator = require('tests/testratingcommunicator');
import DiscussionCommunicator = require('tests/testdiscussioncommunicator');
|
var viewModel = new Comment.ViewModel();
var communicator = new DiscussionCommunicator();
communicator.rating = new RatingCommunicator.Stub();
var controller = new Comment.Controller(model, viewModel, communicator);
communicator.rating.submitLikeRating = (ratableId: number, rating: string, then: () => void) => {
test.assert(v => v.val(rating) == 'like');
then();
};
controller.commandProcessor.processCommand(new Rating.SelectLikeRatingCommand('like', () => {
counter.inc('command');
}));
test.assert(v => v.val(counter.get('command')) == 1);
}
}
|
export class Main extends unit.TestClass {
handleSelectLikeRatingCommand() {
var counter = new common.Counter();
var model = new Comment.Model();
|
random_line_split
|
comment.ts
|
import unit = require('tests/tsunit');
import test = require('tests/test');
import common = require('../common');
import Comment = require('../comment');
import Rating = require('../rating');
import RatingCommunicator = require('tests/testratingcommunicator');
import DiscussionCommunicator = require('tests/testdiscussioncommunicator');
export class
|
extends unit.TestClass {
handleSelectLikeRatingCommand() {
var counter = new common.Counter();
var model = new Comment.Model();
var viewModel = new Comment.ViewModel();
var communicator = new DiscussionCommunicator();
communicator.rating = new RatingCommunicator.Stub();
var controller = new Comment.Controller(model, viewModel, communicator);
communicator.rating.submitLikeRating = (ratableId: number, rating: string, then: () => void) => {
test.assert(v => v.val(rating) == 'like');
then();
};
controller.commandProcessor.processCommand(new Rating.SelectLikeRatingCommand('like', () => {
counter.inc('command');
}));
test.assert(v => v.val(counter.get('command')) == 1);
}
}
|
Main
|
identifier_name
|
page.tsx
|
import * as React from 'react';
import { Link } from 'react-router-dom';
import { MemberEntity, RepositoryEntity } from '../../model';
import { MemberHeader } from './memberHeader';
import { MemberRow } from './memberRow';
import {RepoHeader} from '../repos/repoHeader';
import {RepoRow} from '../repos/repoRow';
interface Props {
members: MemberEntity[];
repos: RepositoryEntity[];
fetchMembers(): void;
fetchRepos(): void;
}
export class MembersPage extends React.Component<Props,{}> {
constructor(props)
|
public componentDidMount() {
this.props.fetchMembers();
this.props.fetchRepos();
}
public render() {
return (
<div className="row">
<div className="col-5">
<div className="row">
<h2> Members Page</h2>
<Link to="/member">New Member</Link>
</div>
<table className="table" id="members_table">
<thead>
<MemberHeader />
</thead>
<tbody>
{
this.props.members.map((member) =>
<MemberRow
key={member.id}
member={member}
/>
)
}
</tbody>
</table>
</div>
<div className="col-5">
<h2> Repo Page</h2>
<table className="table" id="repos_table">
<thead>
<RepoHeader />
</thead>
<tbody>
{
this.props.repos.map((repo) =>
<RepoRow
key={repo.id}
repo={repo}
/>
)
}
</tbody>
</table>
</div>
</div>
);
}
};
|
{
super(props);
this.state = {
members: [],
repos:[]
};
}
|
identifier_body
|
page.tsx
|
import * as React from 'react';
import { Link } from 'react-router-dom';
import { MemberEntity, RepositoryEntity } from '../../model';
import { MemberHeader } from './memberHeader';
import { MemberRow } from './memberRow';
import {RepoHeader} from '../repos/repoHeader';
import {RepoRow} from '../repos/repoRow';
interface Props {
members: MemberEntity[];
repos: RepositoryEntity[];
fetchMembers(): void;
fetchRepos(): void;
}
export class MembersPage extends React.Component<Props,{}> {
constructor(props) {
super(props);
this.state = {
members: [],
repos:[]
};
}
public componentDidMount() {
this.props.fetchMembers();
this.props.fetchRepos();
}
public render() {
return (
<div className="row">
<div className="col-5">
<div className="row">
<h2> Members Page</h2>
<Link to="/member">New Member</Link>
</div>
<table className="table" id="members_table">
<thead>
<MemberHeader />
</thead>
<tbody>
{
this.props.members.map((member) =>
<MemberRow
key={member.id}
member={member}
/>
)
|
</div>
<div className="col-5">
<h2> Repo Page</h2>
<table className="table" id="repos_table">
<thead>
<RepoHeader />
</thead>
<tbody>
{
this.props.repos.map((repo) =>
<RepoRow
key={repo.id}
repo={repo}
/>
)
}
</tbody>
</table>
</div>
</div>
);
}
};
|
}
</tbody>
</table>
|
random_line_split
|
page.tsx
|
import * as React from 'react';
import { Link } from 'react-router-dom';
import { MemberEntity, RepositoryEntity } from '../../model';
import { MemberHeader } from './memberHeader';
import { MemberRow } from './memberRow';
import {RepoHeader} from '../repos/repoHeader';
import {RepoRow} from '../repos/repoRow';
interface Props {
members: MemberEntity[];
repos: RepositoryEntity[];
fetchMembers(): void;
fetchRepos(): void;
}
export class MembersPage extends React.Component<Props,{}> {
|
(props) {
super(props);
this.state = {
members: [],
repos:[]
};
}
public componentDidMount() {
this.props.fetchMembers();
this.props.fetchRepos();
}
public render() {
return (
<div className="row">
<div className="col-5">
<div className="row">
<h2> Members Page</h2>
<Link to="/member">New Member</Link>
</div>
<table className="table" id="members_table">
<thead>
<MemberHeader />
</thead>
<tbody>
{
this.props.members.map((member) =>
<MemberRow
key={member.id}
member={member}
/>
)
}
</tbody>
</table>
</div>
<div className="col-5">
<h2> Repo Page</h2>
<table className="table" id="repos_table">
<thead>
<RepoHeader />
</thead>
<tbody>
{
this.props.repos.map((repo) =>
<RepoRow
key={repo.id}
repo={repo}
/>
)
}
</tbody>
</table>
</div>
</div>
);
}
};
|
constructor
|
identifier_name
|
Seq.ts
|
import { WithEquality, Ordering, ToOrderable } from "./Comparison";
import { HashMap } from "./HashMap";
import { HashSet } from "./HashSet";
import { Option } from "./Option";
import { Collection } from "./Collection";
import { Stream } from "./Stream";
/**
* IterableArray can take a type and apply iterable to its
* "components". That is useful for instance for [[Vector.zip]]
*
* `IterableArray<[string,number,string]>`
* => `[Iterable<string>, Iterable<number>, Iterable<string>]`
*/
export type IterableArray<T> = { [K in keyof T] : Iterable<T[K]> };
/**
* A generic interface for list-like implementations.
* @param T the item type
*/
export interface Seq<T> extends Collection<T> {
/**
* Append an element at the end of the collection.
*/
append(elt: T): Seq<T>;
/**
* Append multiple elements at the end of the collection.
* Note that arrays are also iterables.
*/
appendAll(elts: Iterable<T>): Seq<T>;
/**
* Remove multiple elements from a collection
*
* Vector.of(1,2,3,4,3,2,1).removeAll([2,4])
* => Vector.of(1,3,3,1)
*/
removeAll(elts: Iterable<T&WithEquality>): Seq<T>;
/**
* Removes the first element matching the predicate
* (use [[Seq.filter]] to remove all elements matching a predicate)
*/
removeFirst(predicate: (v:T)=>boolean): Seq<T>;
/**
* Call a function for element in the collection.
* Return the unchanged collection.
*/
forEach(fn: (v:T)=>void): Seq<T>;
/**
* Get the first value of the collection, if any.
* returns Option.Some if the collection is not empty,
* Option.None if it's empty.
*/
head(): Option<T>;
/**
* Get the last value of the collection, if any.
* returns Option.Some if the collection is not empty,
* Option.None if it's empty.
*/
last(): Option<T>;
/**
* Get all the elements in the collection but the first one.
* Option.None if it's empty.
*/
tail(): Option<Seq<T>>;
/**
* Return a new collection where each element was transformed
* by the mapper function you give.
*/
map<U>(mapper:(v:T)=>U): Seq<U>;
/**
* Apply the mapper function on every element of this collection.
* The mapper function returns an Option; if the Option is a Some,
* the value it contains is added to the result Collection, if it's
* a None, the value is discarded.
*
* Vector.of(1,2,6).mapOption(x => x%2===0 ?
* Option.of(x+1) : Option.none<number>())
* => Vector.of(3, 7)
*/
mapOption<U>(mapper:(v:T)=>Option<U>): Seq<U>;
/**
* Search for an item matching the predicate you pass,
* return Option.Some of that element if found,
* Option.None otherwise.
*/
find(predicate:(v:T)=>boolean): Option<T>;
/**
* Calls the function you give for each item in the collection,
* your function returns a collection, all the collections are
* concatenated.
* This is the monadic bind.
*/
flatMap<U>(mapper:(v:T)=>Seq<U>): Seq<U>;
/**
* Randomly reorder the elements of the collection.
*/
shuffle(): Seq<T>;
/**
* Returns a new collection with elements
* sorted according to the comparator you give.
*
* const activityOrder = ["Writer", "Actor", "Director"];
* Vector.of({name:"George", activity: "Director"}, {name:"Robert", activity: "Actor"})
* .sortBy((p1,p2) => activityOrder.indexOf(p1.activity) - activityOrder.indexOf(p2.activity));
* => Vector.of({"name":"Robert","activity":"Actor"}, {"name":"George","activity":"Director"})
*
* also see [[Seq.sortOn]]
*/
sortBy(compare: (v1:T,v2:T)=>Ordering): Seq<T>;
/**
* Give a function associating a number or a string with
* elements from the collection, and the elements
* are sorted according to that value.
*
* Vector.of({a:3,b:"b"}, {a:1,b:"test"}, {a:2,b:"a"}).sortOn(elt=>elt.a)
* => Vector.of({a:1,b:"test"}, {a:2,b:"a"}, {a:3,b:"b"})
*
* You can also sort by multiple criteria, and request 'descending'
* sorting:
*
* Vector.of({a:1,b:"b"}, {a:1,b:"test"}, {a:2,b:"a"}).sortOn(elt=>elt.a, {desc:elt=>elt.b})
* => Vector.of({a:1,b:"test"}, {a:1,b:"b"}, {a:2,b:"a"})
*
* also see [[Seq.sortBy]]
*/
sortOn(...getKeys: Array<ToOrderable<T>|{desc:ToOrderable<T>}>): Seq<T>;
/**
* Prepend an element at the beginning of the collection.
*/
prepend(elt: T): Seq<T>;
/**
* Prepend multiple elements at the beginning of the collection.
*/
prependAll(elts: Iterable<T>): Seq<T>;
/**
* Joins elements of the collection by a separator.
* Example:
*
* Vector.of(1,2,3).mkString(", ")
* => "1, 2, 3"
*/
mkString(separator: string): string;
/**
* Combine this collection with the collection you give in
* parameter to produce a new collection which combines both,
* in pairs. For instance:
*
* Vector.of(1,2,3).zip(["a","b","c"])
* => Vector.of([1,"a"], [2,"b"], [3,"c"])
*
* The result collection will have the length of the shorter
* of both collections. Extra elements will be discarded.
*
* Also see [[Vector.zip]], [[LinkedListStatic.zip]] and [[StreamStatic.zip]]
* (static versions which can more than two iterables)
*/
zip<U>(other: Iterable<U>): Seq<[T,U]>;
/**
* Combine this collection with the index of the elements
* in it. Handy if you need the index when you map on
* the collection for instance:
*
* Vector.of("a","b").zipWithIndex().map(([v,idx]) => v+idx);
* => Vector.of("a0", "b1")
*/
zipWithIndex(): Seq<[T,number]>;
/**
* Retrieve the element at index idx.
* Returns an option because the collection may
* contain less elements than the index.
*/
get(idx: number): Option<T>;
/**
* Returns a new collection with the first
* n elements discarded.
* If the collection has less than n elements,
* returns the empty collection.
*/
drop(n:number): Seq<T>;
/**
* Returns a new collection, discarding the first elements
* until one element fails the predicate. All elements
* after that point are retained.
*/
dropWhile(predicate:(x:T)=>boolean): Seq<T>;
/**
* Returns a new collection with the last
* n elements discarded.
* If the collection has less than n elements,
* returns the empty collection.
*/
dropRight(n:number): Seq<T>;
/**
* Returns a new collection, discarding the last elements
* until one element fails the predicate. All elements
* before that point are retained.
*/
dropRightWhile(predicate:(x:T)=>boolean): Seq<T>;
/**
* Returns a new collection, discarding the elements
* after the first element which fails the predicate.
*/
takeWhile(predicate:(x:T)=>boolean): Seq<T>;
/**
* Returns a new collection, discarding the elements
* after the first element which fails the predicate,
* but starting from the end of the collection.
*
* Vector.of(1,2,3,4).takeRightWhile(x => x > 2)
* => Vector.of(3,4)
*/
takeRightWhile(predicate:(x:T)=>boolean): Seq<T>;
/**
* Return a new collection containing the first n
* elements from this collection
*
* Vector.of(1,2,3,4).take(2)
* => Vector.of(1,2)
*/
take(n:number): Seq<T>;
/**
* Remove duplicate items; elements are mapped to keys, those
* get compared.
*
* Vector.of(1,1,2,3,2,3,1).distinctBy(x => x);
* => Vector.of(1,2,3)
*/
distinctBy<U>(keyExtractor: (x:T)=>U&WithEquality): Seq<T>;
/**
* Reverse the collection. For instance:
*
* Vector.of(1,2,3).reverse();
|
reverse(): Seq<T>;
/**
* Takes a predicate; returns a pair of collections.
* The first one is the longest prefix of this collection
* which satisfies the predicate, and the second collection
* is the remainder of the collection.
*
* Vector.of(1,2,3,4,5,6).span(x => x <3)
* => [Vector.of(1,2), Vector.of(3,4,5,6)]
*/
span(predicate:(x:T)=>boolean): [Seq<T>,Seq<T>];
/**
* Split the collection at a specific index.
*
* Vector.of(1,2,3,4,5).splitAt(3)
* => [Vector.of(1,2,3), Vector.of(4,5)]
*/
splitAt(index:number): [Seq<T>,Seq<T>];
/**
* Slides a window of a specific size over the sequence.
* Returns a lazy stream so memory use is not prohibitive.
*
* Vector.of(1,2,3,4,5,6,7,8).sliding(3)
* => Stream.of(Vector.of(1,2,3), Vector.of(4,5,6), Vector.of(7,8))
*/
sliding(count:number): Stream<Seq<T>>;
/**
* Apply the function you give to all elements of the sequence
* in turn, keeping the intermediate results and returning them
* along with the final result in a list.
* The last element of the result is the final cumulative result.
*
* Vector.of(1,2,3).scanLeft(0, (cur,soFar)=>soFar+cur)
* => Vector.of(0,1,3,6)
*/
scanLeft<U>(init:U, fn:(soFar:U,cur:T)=>U): Seq<U>;
/**
* Apply the function you give to all elements of the sequence
* in turn, keeping the intermediate results and returning them
* along with the final result in a list.
* The first element of the result is the final cumulative result.
*
* Vector.of(1,2,3).scanRight(0, (cur,soFar)=>soFar+cur)
* => Vector.of(6,5,3,0)
*/
scanRight<U>(init:U, fn:(cur:T,soFar:U)=>U): Seq<U>;
/**
* Convert this collection to a map. You give a function which
* for each element in the collection returns a pair. The
* key of the pair will be used as a key in the map, the value,
* as a value in the map. If several values get the same key,
* entries will be lost.
*
* Vector.of(1,2,3).toMap(x=>[x.toString(), x])
* => HashMap.of(["1",1], ["2",2], ["3",3])
*/
toMap<K,V>(converter:(x:T)=>[K & WithEquality,V]): HashMap<K,V>;
/**
* Convert this collection to a set. Since the elements of the
* Seq may not support equality, you must pass a function returning
* a value supporting equality.
*
* Vector.of(1,2,3,3,4).toSet(x=>x)
* => HashSet.of(1,2,3,4)
*/
toSet<K>(converter:(x:T)=>K&WithEquality): HashSet<K>;
/**
* Transform this value to another value type.
* Enables fluent-style programming by chaining calls.
*/
transform<U>(converter:(x:Seq<T>)=>U): U;
}
|
* => Vector.of(3,2,1)
*/
|
random_line_split
|
emitente.py
|
from base import Entidade
from pynfe.utils.flags import CODIGO_BRASIL
class
|
(Entidade):
# Dados do Emitente
# - Nome/Razao Social (obrigatorio)
razao_social = str()
# - Nome Fantasia
nome_fantasia = str()
# - CNPJ (obrigatorio)
cnpj = str()
# - Inscricao Estadual (obrigatorio)
inscricao_estadual = str()
# - CNAE Fiscal
cnae_fiscal = str()
# - Inscricao Municipal
inscricao_municipal = str()
# - Inscricao Estadual (Subst. Tributario)
inscricao_estadual_subst_tributaria = str()
# - Codigo de Regime Tributario (obrigatorio)
codigo_de_regime_tributario = str()
# Endereco
# - Logradouro (obrigatorio)
endereco_logradouro = str()
# - Numero (obrigatorio)
endereco_numero = str()
# - Complemento
endereco_complemento = str()
# - Bairro (obrigatorio)
endereco_bairro = str()
# - CEP
endereco_cep = str()
# - Pais (aceita somente Brasil)
endereco_pais = CODIGO_BRASIL
# - UF (obrigatorio)
endereco_uf = str()
# - Municipio (obrigatorio)
endereco_municipio = str()
# - Codigo Municipio (opt)
endereco_cod_municipio = str()
# - Telefone
endereco_telefone = str()
# Logotipo
logotipo = None
def __str__(self):
return self.cnpj
|
Emitente
|
identifier_name
|
emitente.py
|
from base import Entidade
from pynfe.utils.flags import CODIGO_BRASIL
class Emitente(Entidade):
# Dados do Emitente
# - Nome/Razao Social (obrigatorio)
razao_social = str()
# - Nome Fantasia
nome_fantasia = str()
# - CNPJ (obrigatorio)
cnpj = str()
# - Inscricao Estadual (obrigatorio)
inscricao_estadual = str()
# - CNAE Fiscal
cnae_fiscal = str()
# - Inscricao Municipal
inscricao_municipal = str()
# - Inscricao Estadual (Subst. Tributario)
inscricao_estadual_subst_tributaria = str()
# - Codigo de Regime Tributario (obrigatorio)
codigo_de_regime_tributario = str()
# Endereco
# - Logradouro (obrigatorio)
endereco_logradouro = str()
# - Numero (obrigatorio)
endereco_numero = str()
# - Complemento
endereco_complemento = str()
# - Bairro (obrigatorio)
endereco_bairro = str()
# - CEP
endereco_cep = str()
# - Pais (aceita somente Brasil)
endereco_pais = CODIGO_BRASIL
# - UF (obrigatorio)
endereco_uf = str()
|
# - Codigo Municipio (opt)
endereco_cod_municipio = str()
# - Telefone
endereco_telefone = str()
# Logotipo
logotipo = None
def __str__(self):
return self.cnpj
|
# - Municipio (obrigatorio)
endereco_municipio = str()
|
random_line_split
|
emitente.py
|
from base import Entidade
from pynfe.utils.flags import CODIGO_BRASIL
class Emitente(Entidade):
# Dados do Emitente
# - Nome/Razao Social (obrigatorio)
|
razao_social = str()
# - Nome Fantasia
nome_fantasia = str()
# - CNPJ (obrigatorio)
cnpj = str()
# - Inscricao Estadual (obrigatorio)
inscricao_estadual = str()
# - CNAE Fiscal
cnae_fiscal = str()
# - Inscricao Municipal
inscricao_municipal = str()
# - Inscricao Estadual (Subst. Tributario)
inscricao_estadual_subst_tributaria = str()
# - Codigo de Regime Tributario (obrigatorio)
codigo_de_regime_tributario = str()
# Endereco
# - Logradouro (obrigatorio)
endereco_logradouro = str()
# - Numero (obrigatorio)
endereco_numero = str()
# - Complemento
endereco_complemento = str()
# - Bairro (obrigatorio)
endereco_bairro = str()
# - CEP
endereco_cep = str()
# - Pais (aceita somente Brasil)
endereco_pais = CODIGO_BRASIL
# - UF (obrigatorio)
endereco_uf = str()
# - Municipio (obrigatorio)
endereco_municipio = str()
# - Codigo Municipio (opt)
endereco_cod_municipio = str()
# - Telefone
endereco_telefone = str()
# Logotipo
logotipo = None
def __str__(self):
return self.cnpj
|
identifier_body
|
|
external_event.py
|
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.objects import base as obj_base
from nova.objects import fields
EVENT_NAMES = [
# Network has changed for this instance, rebuild info_cache
'network-changed',
# VIF plugging notifications, tag is port_id
'network-vif-plugged',
'network-vif-unplugged',
]
EVENT_STATUSES = ['failed', 'completed', 'in-progress']
# TODO(berrange): Remove NovaObjectDictCompat
class
|
(obj_base.NovaObject,
obj_base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Supports network-changed and vif-plugged
VERSION = '1.0'
fields = {
'instance_uuid': fields.UUIDField(),
'name': fields.EnumField(valid_values=EVENT_NAMES),
'status': fields.StringField(),
'tag': fields.StringField(nullable=True),
'data': fields.DictOfStringsField(),
}
@staticmethod
def make_key(name, tag=None):
if tag is not None:
return '%s-%s' % (name, tag)
else:
return name
@property
def key(self):
return self.make_key(self.name, self.tag)
|
InstanceExternalEvent
|
identifier_name
|
external_event.py
|
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.objects import base as obj_base
from nova.objects import fields
EVENT_NAMES = [
# Network has changed for this instance, rebuild info_cache
'network-changed',
# VIF plugging notifications, tag is port_id
'network-vif-plugged',
'network-vif-unplugged',
]
EVENT_STATUSES = ['failed', 'completed', 'in-progress']
# TODO(berrange): Remove NovaObjectDictCompat
class InstanceExternalEvent(obj_base.NovaObject,
obj_base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Supports network-changed and vif-plugged
VERSION = '1.0'
fields = {
'instance_uuid': fields.UUIDField(),
'name': fields.EnumField(valid_values=EVENT_NAMES),
'status': fields.StringField(),
'tag': fields.StringField(nullable=True),
'data': fields.DictOfStringsField(),
}
@staticmethod
def make_key(name, tag=None):
|
@property
def key(self):
return self.make_key(self.name, self.tag)
|
if tag is not None:
return '%s-%s' % (name, tag)
else:
return name
|
identifier_body
|
external_event.py
|
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
|
from nova.objects import base as obj_base
from nova.objects import fields
EVENT_NAMES = [
# Network has changed for this instance, rebuild info_cache
'network-changed',
# VIF plugging notifications, tag is port_id
'network-vif-plugged',
'network-vif-unplugged',
]
EVENT_STATUSES = ['failed', 'completed', 'in-progress']
# TODO(berrange): Remove NovaObjectDictCompat
class InstanceExternalEvent(obj_base.NovaObject,
obj_base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Supports network-changed and vif-plugged
VERSION = '1.0'
fields = {
'instance_uuid': fields.UUIDField(),
'name': fields.EnumField(valid_values=EVENT_NAMES),
'status': fields.StringField(),
'tag': fields.StringField(nullable=True),
'data': fields.DictOfStringsField(),
}
@staticmethod
def make_key(name, tag=None):
if tag is not None:
return '%s-%s' % (name, tag)
else:
return name
@property
def key(self):
return self.make_key(self.name, self.tag)
|
# under the License.
|
random_line_split
|
external_event.py
|
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.objects import base as obj_base
from nova.objects import fields
EVENT_NAMES = [
# Network has changed for this instance, rebuild info_cache
'network-changed',
# VIF plugging notifications, tag is port_id
'network-vif-plugged',
'network-vif-unplugged',
]
EVENT_STATUSES = ['failed', 'completed', 'in-progress']
# TODO(berrange): Remove NovaObjectDictCompat
class InstanceExternalEvent(obj_base.NovaObject,
obj_base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Supports network-changed and vif-plugged
VERSION = '1.0'
fields = {
'instance_uuid': fields.UUIDField(),
'name': fields.EnumField(valid_values=EVENT_NAMES),
'status': fields.StringField(),
'tag': fields.StringField(nullable=True),
'data': fields.DictOfStringsField(),
}
@staticmethod
def make_key(name, tag=None):
if tag is not None:
|
else:
return name
@property
def key(self):
return self.make_key(self.name, self.tag)
|
return '%s-%s' % (name, tag)
|
conditional_block
|
borrowck-loan-rcvr.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(managed_boxes)]
struct point { x: int, y: int }
trait methods {
|
impl methods for point {
fn impurem(&self) {
}
fn blockm(&self, f: ||) { f() }
}
fn a() {
let mut p = point {x: 3, y: 4};
// Here: it's ok to call even though receiver is mutable, because we
// can loan it out.
p.impurem();
// But in this case we do not honor the loan:
p.blockm(|| { //~ ERROR cannot borrow `p` as mutable
p.x = 10;
})
}
fn b() {
let mut p = point {x: 3, y: 4};
// Here I create an outstanding loan and check that we get conflicts:
let l = &mut p;
p.impurem(); //~ ERROR cannot borrow
l.x += 1;
}
fn main() {
}
|
fn impurem(&self);
fn blockm(&self, f: ||);
}
|
random_line_split
|
borrowck-loan-rcvr.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(managed_boxes)]
struct point { x: int, y: int }
trait methods {
fn impurem(&self);
fn blockm(&self, f: ||);
}
impl methods for point {
fn impurem(&self) {
}
fn blockm(&self, f: ||) { f() }
}
fn a() {
let mut p = point {x: 3, y: 4};
// Here: it's ok to call even though receiver is mutable, because we
// can loan it out.
p.impurem();
// But in this case we do not honor the loan:
p.blockm(|| { //~ ERROR cannot borrow `p` as mutable
p.x = 10;
})
}
fn b() {
let mut p = point {x: 3, y: 4};
// Here I create an outstanding loan and check that we get conflicts:
let l = &mut p;
p.impurem(); //~ ERROR cannot borrow
l.x += 1;
}
fn main()
|
{
}
|
identifier_body
|
|
borrowck-loan-rcvr.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(managed_boxes)]
struct point { x: int, y: int }
trait methods {
fn impurem(&self);
fn blockm(&self, f: ||);
}
impl methods for point {
fn impurem(&self) {
}
fn blockm(&self, f: ||) { f() }
}
fn a() {
let mut p = point {x: 3, y: 4};
// Here: it's ok to call even though receiver is mutable, because we
// can loan it out.
p.impurem();
// But in this case we do not honor the loan:
p.blockm(|| { //~ ERROR cannot borrow `p` as mutable
p.x = 10;
})
}
fn b() {
let mut p = point {x: 3, y: 4};
// Here I create an outstanding loan and check that we get conflicts:
let l = &mut p;
p.impurem(); //~ ERROR cannot borrow
l.x += 1;
}
fn
|
() {
}
|
main
|
identifier_name
|
typedParser.ts
|
import { IParser } from './iParser'
import { ParseFailure, ParseResult, ParseSuccess } from './parseResult'
export interface TypedValue {}
export interface TypedConstructor<T extends TypedValue> {
new (value: any): T
}
export class TypedParser<V, T extends TypedValue> implements IParser<T> {
ctor: TypedConstructor<T>
parser: IParser<any>
constructor(ctor: TypedConstructor<T>, parser: IParser<any>) {
this.ctor = ctor
this.parser = parser
}
parse(input: string): ParseResult<T> {
const result = this.parser.parse(input)
if (result instanceof ParseSuccess) {
try {
const value = new this.ctor(result.value)
return new ParseSuccess(value, result.next)
} catch (e) {
return new ParseFailure(this.ctor.toString() + 'can not parse: ' + result.value, result.next)
}
} else {
return result
}
}
}
export function typed<V, T extends TypedValue>(ctor: TypedConstructor<T>, parser: IParser<any>)
|
{
return new TypedParser<V, T>(ctor, parser)
}
|
identifier_body
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.