file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
analisi.py | import numpy
import math
import pylab
from scipy.optimize import curve_fit
import math
import scipy.stats
import lab
def fit_function(x, a, b):
re | FileName='/home/federico/Documenti/Laboratorio2/Diodo/dati_arduino/dati.txt'
N1, N2 = pylab.loadtxt(FileName, unpack="True")
errN2 = numpy.array([1.0 for i in range(len(N2))])
errN1 = numpy.array([1.0 for i in range(len(N1))])
Rd = 3280.0
errRd = 30.0
eta = 4.89/1000
erreta = 0.02/1000
V1 = eta*N1
V2 = eta*N2
I = (V1-V2)/Rd
#Inserire errori per i i V
errV2 = (erreta/eta + errN2/N2)*V2
errV1 = (erreta/eta + errN1/N1)*V1
errI = (errRd/Rd)*I
#for i in range(len(I)):
# errI[i] = 50e-06
for i in range(len(I)):
if(I[i]==0.0): I[i] = 1.0e-11*i
for i in range(len(V2)):
if(V2[i]==0.0): V2[i] = 1.0e-11*i
#da finire vorrei implementare quella cosa che sostituisco le colonne di punti con un solo punto ma non ne ho voglia
#number = 150
#minV = 0.30
#maxV = 0.70
#inc = (maxV - minV)/number
#volt = numpy.array([(minV + i*inc) for i in range(number)])
#voltaggiVeri = numpy.array([])
#ampere = numpy.array([])
#errVolt = numpy.array([0.0 for i in range(number)])
#errAmpere = numpy.array([0.0 for i in range(number)])
#count = numpy.array([])
#for i in range(number):
# for j in range(len(V2)):
# if(volt[i]<=V2[j]<=volt[i+1]):
# voltaggiVeri = numpy.append(voltaggiVeri, V2[
# errVolt[i] = errV2[j]
# errAmpere[i] = errI[j]
# ampere[i] += I[j]
# count[i] += 1
#nonnulli = len(numpy.nonzero(count))
#aNonNulli = numpy.array([0.0 for i in range(nonnulli)])
#for i in range(nonnulli):
# index = (numpy.nonzero(ampere))[i]
# print(index)
# aNonNulli[i] = ampere[index]
#V2 = volt
#I = ampere
#errI = errAmpere
#errV2 = errVolt
print(V2, I, errV2, errI)
pylab.title("Curva corrente tensione")
pylab.xlabel("V (V)")
pylab.ylabel("I (A)")
pylab.grid(color = "gray")
pylab.grid(color = "gray")
pylab.errorbar(V2, I, errI, errV2, "o", color="black")
initial = numpy.array([0.0515, 6.75e-09])
error = errI+errV2/100 #NON posso prendere errore squadrato perchè mescolerei le unità di misura
popt, pcov = curve_fit(fit_function, V2, I, initial, error)
a, b = popt
print(a, b)
print(pcov)
div = 1000
bucket = numpy.array([0.0 for i in range(div)])
funzione = numpy.array([0.0 for i in range(div)])
inc = (V2.max()-V2.min())/div
for i in range(len(bucket)):
bucket[i]=float(i)*inc + V2.min()
funzione[i] = fit_function(bucket[i], a, b)
pylab.plot(bucket, funzione, color = "red")
#calcolo il chi quadro
chisq = (((I - fit_function(V2, a, b))/error)**2).sum()
ndof = len(V2) - 2
p=1.0-scipy.stats.chi2.cdf(chisq, len(V2)-3)
print("Carica Chisquare/ndof = %f/%d" % (chisq, ndof))
print("p = ", p)
pylab.show()
#number = 150
#minV = 0.30
#maxV = 0.70
#inc = (maxV -minV)/number
#volt = numpy.array([(minV + i*inc) for i in range(number)])
#ampere = numpy.array([0.0 for i in range(number)])
#count = numpy.array([0 for i in range(number)])
#for i in range(number):
# for j in range(len(V2)):
# if(V2[j] == volt[i]):
# ampere[j] += I[i]
# count[j] += 1
#ampere = ampere/count
#V2 = volt
#I = ampere
| turn b*(numpy.exp(x/a)-1)
| identifier_body |
job-timeline.component.ts | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {
AfterViewInit,
ChangeDetectionStrategy,
ChangeDetectorRef,
Component,
ElementRef,
OnDestroy,
ViewChild
} from '@angular/core';
import { Subject } from 'rxjs';
import { distinctUntilChanged, filter, takeUntil } from 'rxjs/operators';
import * as G2 from '@antv/g2';
import { Chart } from '@antv/g2';
import { COLOR_MAP, ColorKey } from 'config';
import { JobDetailCorrect, VerticesItemRange } from 'interfaces';
import { JobService } from 'services';
@Component({
selector: 'flink-job-timeline',
templateUrl: './job-timeline.component.html',
styleUrls: ['./job-timeline.component.less'],
changeDetection: ChangeDetectionStrategy.OnPush
})
export class JobTimelineComponent implements AfterViewInit, OnDestroy {
public listOfVertex: VerticesItemRange[] = [];
public listOfSubTaskTimeLine: Array<{ name: string; status: string; range: [number, number] }> = [];
public mainChartInstance: Chart;
public subTaskChartInstance: Chart;
public jobDetail: JobDetailCorrect;
public selectedName: string;
public isShowSubTaskTimeLine = false;
| private readonly destroy$ = new Subject<void>();
constructor(private readonly jobService: JobService, private readonly cdr: ChangeDetectorRef) {}
public ngAfterViewInit(): void {
this.setUpMainChart();
this.setUpSubTaskChart();
this.jobService.jobDetail$
.pipe(
filter(() => !!this.mainChartInstance),
distinctUntilChanged((pre, next) => pre.jid === next.jid),
takeUntil(this.destroy$)
)
.subscribe(data => {
this.jobDetail = data;
this.listOfVertex = data.vertices
.filter(v => v['start-time'] > -1)
.map(vertex => {
const endTime = vertex['end-time'] > -1 ? vertex['end-time'] : vertex['start-time'] + vertex.duration;
return {
...vertex,
range: [vertex['start-time'], endTime]
};
});
this.listOfVertex = this.listOfVertex.sort((a, b) => a.range[0] - b.range[0]);
this.mainChartInstance.changeSize(
this.mainChartInstance.width,
Math.max(this.listOfVertex.length * 50 + 100, 150)
);
this.mainChartInstance.data(this.listOfVertex);
this.mainChartInstance.scale({
range: {
alias: 'Time',
type: 'time',
mask: 'HH:mm:ss',
nice: false
}
});
this.mainChartInstance.render();
this.cdr.markForCheck();
});
}
public ngOnDestroy(): void {
this.destroy$.next();
this.destroy$.complete();
}
public updateSubTaskChart(vertexId: string): void {
this.listOfSubTaskTimeLine = [];
this.jobService.loadSubTaskTimes(this.jobDetail.jid, vertexId).subscribe(data => {
data.subtasks.forEach(task => {
const listOfTimeLine: Array<{ status: string; startTime: number }> = [];
for (const key in task.timestamps) {
// @ts-ignore
const time = task.timestamps[key];
if (time > 0) {
listOfTimeLine.push({
status: key,
startTime: time
});
}
}
listOfTimeLine.sort((pre, next) => pre.startTime - next.startTime);
listOfTimeLine.forEach((item, index) => {
if (index === listOfTimeLine.length - 1) {
this.listOfSubTaskTimeLine.push({
name: `${task.subtask} - ${task.host}`,
status: item.status,
range: [item.startTime, task.duration + listOfTimeLine[0].startTime]
});
} else {
this.listOfSubTaskTimeLine.push({
name: `${task.subtask} - ${task.host}`,
status: item.status,
range: [item.startTime, listOfTimeLine[index + 1].startTime]
});
}
});
});
this.subTaskChartInstance.changeSize(
this.subTaskChartInstance.width,
Math.max(data.subtasks.length * 50 + 100, 150)
);
this.subTaskChartInstance.data(this.listOfSubTaskTimeLine);
this.subTaskChartInstance.scale({
range: {
alias: 'Time',
type: 'time',
mask: 'HH:mm:ss',
nice: false
}
});
this.subTaskChartInstance.render();
this.isShowSubTaskTimeLine = true;
this.cdr.markForCheck();
setTimeout(() => {
try {
// FIXME scrollIntoViewIfNeeded is a non-standard extension and will not work everywhere
(
document.getElementById('subtask') as unknown as {
scrollIntoViewIfNeeded: () => void;
}
).scrollIntoViewIfNeeded();
} catch (e) {}
});
});
}
public setUpMainChart(): void {
this.mainChartInstance = new G2.Chart({
container: this.mainTimeLine.nativeElement,
autoFit: true,
height: 500,
padding: [50, 50, 50, 50]
});
this.mainChartInstance.animate(false);
this.mainChartInstance.axis('id', false);
this.mainChartInstance.coordinate('rect').transpose().scale(1, -1);
this.mainChartInstance
.interval()
.position('id*range')
.color('status', (type: string) => COLOR_MAP[type as ColorKey])
.label('name', {
offset: -20,
position: 'right',
style: {
fill: '#ffffff',
textAlign: 'right',
fontWeight: 'bold'
},
content: data => {
if (data.name.length <= 120) {
return data.name;
} else {
return `${data.name.slice(0, 120)}...`;
}
}
});
this.mainChartInstance.tooltip({
title: 'name'
});
this.mainChartInstance.on('click', (e: { x: number; y: number }) => {
if (this.mainChartInstance.getSnapRecords(e).length) {
const data = (
this.mainChartInstance.getSnapRecords(e)[0] as unknown as {
_origin: { name: string; id: string };
}
)._origin;
this.selectedName = data.name;
this.updateSubTaskChart(data.id);
}
});
}
public setUpSubTaskChart(): void {
this.subTaskChartInstance = new G2.Chart({
container: this.subTaskTimeLine.nativeElement,
autoFit: true,
height: 10,
padding: [50, 50, 50, 300]
});
this.subTaskChartInstance.animate(false);
this.subTaskChartInstance.coordinate('rect').transpose().scale(1, -1);
this.subTaskChartInstance
.interval()
.position('name*range')
.color('status', (type: string) => COLOR_MAP[type as ColorKey]);
}
} | @ViewChild('mainTimeLine', { static: true }) private readonly mainTimeLine: ElementRef;
@ViewChild('subTaskTimeLine', { static: true }) private readonly subTaskTimeLine: ElementRef;
| random_line_split |
job-timeline.component.ts | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {
AfterViewInit,
ChangeDetectionStrategy,
ChangeDetectorRef,
Component,
ElementRef,
OnDestroy,
ViewChild
} from '@angular/core';
import { Subject } from 'rxjs';
import { distinctUntilChanged, filter, takeUntil } from 'rxjs/operators';
import * as G2 from '@antv/g2';
import { Chart } from '@antv/g2';
import { COLOR_MAP, ColorKey } from 'config';
import { JobDetailCorrect, VerticesItemRange } from 'interfaces';
import { JobService } from 'services';
@Component({
selector: 'flink-job-timeline',
templateUrl: './job-timeline.component.html',
styleUrls: ['./job-timeline.component.less'],
changeDetection: ChangeDetectionStrategy.OnPush
})
export class JobTimelineComponent implements AfterViewInit, OnDestroy {
public listOfVertex: VerticesItemRange[] = [];
public listOfSubTaskTimeLine: Array<{ name: string; status: string; range: [number, number] }> = [];
public mainChartInstance: Chart;
public subTaskChartInstance: Chart;
public jobDetail: JobDetailCorrect;
public selectedName: string;
public isShowSubTaskTimeLine = false;
@ViewChild('mainTimeLine', { static: true }) private readonly mainTimeLine: ElementRef;
@ViewChild('subTaskTimeLine', { static: true }) private readonly subTaskTimeLine: ElementRef;
private readonly destroy$ = new Subject<void>();
constructor(private readonly jobService: JobService, private readonly cdr: ChangeDetectorRef) {}
public ngAfterViewInit(): void {
this.setUpMainChart();
this.setUpSubTaskChart();
this.jobService.jobDetail$
.pipe(
filter(() => !!this.mainChartInstance),
distinctUntilChanged((pre, next) => pre.jid === next.jid),
takeUntil(this.destroy$)
)
.subscribe(data => {
this.jobDetail = data;
this.listOfVertex = data.vertices
.filter(v => v['start-time'] > -1)
.map(vertex => {
const endTime = vertex['end-time'] > -1 ? vertex['end-time'] : vertex['start-time'] + vertex.duration;
return {
...vertex,
range: [vertex['start-time'], endTime]
};
});
this.listOfVertex = this.listOfVertex.sort((a, b) => a.range[0] - b.range[0]);
this.mainChartInstance.changeSize(
this.mainChartInstance.width,
Math.max(this.listOfVertex.length * 50 + 100, 150)
);
this.mainChartInstance.data(this.listOfVertex);
this.mainChartInstance.scale({
range: {
alias: 'Time',
type: 'time',
mask: 'HH:mm:ss',
nice: false
}
});
this.mainChartInstance.render();
this.cdr.markForCheck();
});
}
public ngOnDestroy(): void {
this.destroy$.next();
this.destroy$.complete();
}
public updateSubTaskChart(vertexId: string): void {
this.listOfSubTaskTimeLine = [];
this.jobService.loadSubTaskTimes(this.jobDetail.jid, vertexId).subscribe(data => {
data.subtasks.forEach(task => {
const listOfTimeLine: Array<{ status: string; startTime: number }> = [];
for (const key in task.timestamps) {
// @ts-ignore
const time = task.timestamps[key];
if (time > 0) {
listOfTimeLine.push({
status: key,
startTime: time
});
}
}
listOfTimeLine.sort((pre, next) => pre.startTime - next.startTime);
listOfTimeLine.forEach((item, index) => {
if (index === listOfTimeLine.length - 1) {
this.listOfSubTaskTimeLine.push({
name: `${task.subtask} - ${task.host}`,
status: item.status,
range: [item.startTime, task.duration + listOfTimeLine[0].startTime]
});
} else {
this.listOfSubTaskTimeLine.push({
name: `${task.subtask} - ${task.host}`,
status: item.status,
range: [item.startTime, listOfTimeLine[index + 1].startTime]
});
}
});
});
this.subTaskChartInstance.changeSize(
this.subTaskChartInstance.width,
Math.max(data.subtasks.length * 50 + 100, 150)
);
this.subTaskChartInstance.data(this.listOfSubTaskTimeLine);
this.subTaskChartInstance.scale({
range: {
alias: 'Time',
type: 'time',
mask: 'HH:mm:ss',
nice: false
}
});
this.subTaskChartInstance.render();
this.isShowSubTaskTimeLine = true;
this.cdr.markForCheck();
setTimeout(() => {
try {
// FIXME scrollIntoViewIfNeeded is a non-standard extension and will not work everywhere
(
document.getElementById('subtask') as unknown as {
scrollIntoViewIfNeeded: () => void;
}
).scrollIntoViewIfNeeded();
} catch (e) {}
});
});
}
public setUpMainChart(): void {
this.mainChartInstance = new G2.Chart({
container: this.mainTimeLine.nativeElement,
autoFit: true,
height: 500,
padding: [50, 50, 50, 50]
});
this.mainChartInstance.animate(false);
this.mainChartInstance.axis('id', false);
this.mainChartInstance.coordinate('rect').transpose().scale(1, -1);
this.mainChartInstance
.interval()
.position('id*range')
.color('status', (type: string) => COLOR_MAP[type as ColorKey])
.label('name', {
offset: -20,
position: 'right',
style: {
fill: '#ffffff',
textAlign: 'right',
fontWeight: 'bold'
},
content: data => {
if (data.name.length <= 120) {
return data.name;
} else {
return `${data.name.slice(0, 120)}...`;
}
}
});
this.mainChartInstance.tooltip({
title: 'name'
});
this.mainChartInstance.on('click', (e: { x: number; y: number }) => {
if (this.mainChartInstance.getSnapRecords(e).length) {
const data = (
this.mainChartInstance.getSnapRecords(e)[0] as unknown as {
_origin: { name: string; id: string };
}
)._origin;
this.selectedName = data.name;
this.updateSubTaskChart(data.id);
}
});
}
public setUpSubTaskChart(): void |
}
| {
this.subTaskChartInstance = new G2.Chart({
container: this.subTaskTimeLine.nativeElement,
autoFit: true,
height: 10,
padding: [50, 50, 50, 300]
});
this.subTaskChartInstance.animate(false);
this.subTaskChartInstance.coordinate('rect').transpose().scale(1, -1);
this.subTaskChartInstance
.interval()
.position('name*range')
.color('status', (type: string) => COLOR_MAP[type as ColorKey]);
} | identifier_body |
job-timeline.component.ts | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {
AfterViewInit,
ChangeDetectionStrategy,
ChangeDetectorRef,
Component,
ElementRef,
OnDestroy,
ViewChild
} from '@angular/core';
import { Subject } from 'rxjs';
import { distinctUntilChanged, filter, takeUntil } from 'rxjs/operators';
import * as G2 from '@antv/g2';
import { Chart } from '@antv/g2';
import { COLOR_MAP, ColorKey } from 'config';
import { JobDetailCorrect, VerticesItemRange } from 'interfaces';
import { JobService } from 'services';
@Component({
selector: 'flink-job-timeline',
templateUrl: './job-timeline.component.html',
styleUrls: ['./job-timeline.component.less'],
changeDetection: ChangeDetectionStrategy.OnPush
})
export class JobTimelineComponent implements AfterViewInit, OnDestroy {
public listOfVertex: VerticesItemRange[] = [];
public listOfSubTaskTimeLine: Array<{ name: string; status: string; range: [number, number] }> = [];
public mainChartInstance: Chart;
public subTaskChartInstance: Chart;
public jobDetail: JobDetailCorrect;
public selectedName: string;
public isShowSubTaskTimeLine = false;
@ViewChild('mainTimeLine', { static: true }) private readonly mainTimeLine: ElementRef;
@ViewChild('subTaskTimeLine', { static: true }) private readonly subTaskTimeLine: ElementRef;
private readonly destroy$ = new Subject<void>();
| (private readonly jobService: JobService, private readonly cdr: ChangeDetectorRef) {}
public ngAfterViewInit(): void {
this.setUpMainChart();
this.setUpSubTaskChart();
this.jobService.jobDetail$
.pipe(
filter(() => !!this.mainChartInstance),
distinctUntilChanged((pre, next) => pre.jid === next.jid),
takeUntil(this.destroy$)
)
.subscribe(data => {
this.jobDetail = data;
this.listOfVertex = data.vertices
.filter(v => v['start-time'] > -1)
.map(vertex => {
const endTime = vertex['end-time'] > -1 ? vertex['end-time'] : vertex['start-time'] + vertex.duration;
return {
...vertex,
range: [vertex['start-time'], endTime]
};
});
this.listOfVertex = this.listOfVertex.sort((a, b) => a.range[0] - b.range[0]);
this.mainChartInstance.changeSize(
this.mainChartInstance.width,
Math.max(this.listOfVertex.length * 50 + 100, 150)
);
this.mainChartInstance.data(this.listOfVertex);
this.mainChartInstance.scale({
range: {
alias: 'Time',
type: 'time',
mask: 'HH:mm:ss',
nice: false
}
});
this.mainChartInstance.render();
this.cdr.markForCheck();
});
}
public ngOnDestroy(): void {
this.destroy$.next();
this.destroy$.complete();
}
public updateSubTaskChart(vertexId: string): void {
this.listOfSubTaskTimeLine = [];
this.jobService.loadSubTaskTimes(this.jobDetail.jid, vertexId).subscribe(data => {
data.subtasks.forEach(task => {
const listOfTimeLine: Array<{ status: string; startTime: number }> = [];
for (const key in task.timestamps) {
// @ts-ignore
const time = task.timestamps[key];
if (time > 0) {
listOfTimeLine.push({
status: key,
startTime: time
});
}
}
listOfTimeLine.sort((pre, next) => pre.startTime - next.startTime);
listOfTimeLine.forEach((item, index) => {
if (index === listOfTimeLine.length - 1) {
this.listOfSubTaskTimeLine.push({
name: `${task.subtask} - ${task.host}`,
status: item.status,
range: [item.startTime, task.duration + listOfTimeLine[0].startTime]
});
} else {
this.listOfSubTaskTimeLine.push({
name: `${task.subtask} - ${task.host}`,
status: item.status,
range: [item.startTime, listOfTimeLine[index + 1].startTime]
});
}
});
});
this.subTaskChartInstance.changeSize(
this.subTaskChartInstance.width,
Math.max(data.subtasks.length * 50 + 100, 150)
);
this.subTaskChartInstance.data(this.listOfSubTaskTimeLine);
this.subTaskChartInstance.scale({
range: {
alias: 'Time',
type: 'time',
mask: 'HH:mm:ss',
nice: false
}
});
this.subTaskChartInstance.render();
this.isShowSubTaskTimeLine = true;
this.cdr.markForCheck();
setTimeout(() => {
try {
// FIXME scrollIntoViewIfNeeded is a non-standard extension and will not work everywhere
(
document.getElementById('subtask') as unknown as {
scrollIntoViewIfNeeded: () => void;
}
).scrollIntoViewIfNeeded();
} catch (e) {}
});
});
}
public setUpMainChart(): void {
this.mainChartInstance = new G2.Chart({
container: this.mainTimeLine.nativeElement,
autoFit: true,
height: 500,
padding: [50, 50, 50, 50]
});
this.mainChartInstance.animate(false);
this.mainChartInstance.axis('id', false);
this.mainChartInstance.coordinate('rect').transpose().scale(1, -1);
this.mainChartInstance
.interval()
.position('id*range')
.color('status', (type: string) => COLOR_MAP[type as ColorKey])
.label('name', {
offset: -20,
position: 'right',
style: {
fill: '#ffffff',
textAlign: 'right',
fontWeight: 'bold'
},
content: data => {
if (data.name.length <= 120) {
return data.name;
} else {
return `${data.name.slice(0, 120)}...`;
}
}
});
this.mainChartInstance.tooltip({
title: 'name'
});
this.mainChartInstance.on('click', (e: { x: number; y: number }) => {
if (this.mainChartInstance.getSnapRecords(e).length) {
const data = (
this.mainChartInstance.getSnapRecords(e)[0] as unknown as {
_origin: { name: string; id: string };
}
)._origin;
this.selectedName = data.name;
this.updateSubTaskChart(data.id);
}
});
}
public setUpSubTaskChart(): void {
this.subTaskChartInstance = new G2.Chart({
container: this.subTaskTimeLine.nativeElement,
autoFit: true,
height: 10,
padding: [50, 50, 50, 300]
});
this.subTaskChartInstance.animate(false);
this.subTaskChartInstance.coordinate('rect').transpose().scale(1, -1);
this.subTaskChartInstance
.interval()
.position('name*range')
.color('status', (type: string) => COLOR_MAP[type as ColorKey]);
}
}
| constructor | identifier_name |
job-timeline.component.ts | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {
AfterViewInit,
ChangeDetectionStrategy,
ChangeDetectorRef,
Component,
ElementRef,
OnDestroy,
ViewChild
} from '@angular/core';
import { Subject } from 'rxjs';
import { distinctUntilChanged, filter, takeUntil } from 'rxjs/operators';
import * as G2 from '@antv/g2';
import { Chart } from '@antv/g2';
import { COLOR_MAP, ColorKey } from 'config';
import { JobDetailCorrect, VerticesItemRange } from 'interfaces';
import { JobService } from 'services';
@Component({
selector: 'flink-job-timeline',
templateUrl: './job-timeline.component.html',
styleUrls: ['./job-timeline.component.less'],
changeDetection: ChangeDetectionStrategy.OnPush
})
export class JobTimelineComponent implements AfterViewInit, OnDestroy {
public listOfVertex: VerticesItemRange[] = [];
public listOfSubTaskTimeLine: Array<{ name: string; status: string; range: [number, number] }> = [];
public mainChartInstance: Chart;
public subTaskChartInstance: Chart;
public jobDetail: JobDetailCorrect;
public selectedName: string;
public isShowSubTaskTimeLine = false;
@ViewChild('mainTimeLine', { static: true }) private readonly mainTimeLine: ElementRef;
@ViewChild('subTaskTimeLine', { static: true }) private readonly subTaskTimeLine: ElementRef;
private readonly destroy$ = new Subject<void>();
constructor(private readonly jobService: JobService, private readonly cdr: ChangeDetectorRef) {}
public ngAfterViewInit(): void {
this.setUpMainChart();
this.setUpSubTaskChart();
this.jobService.jobDetail$
.pipe(
filter(() => !!this.mainChartInstance),
distinctUntilChanged((pre, next) => pre.jid === next.jid),
takeUntil(this.destroy$)
)
.subscribe(data => {
this.jobDetail = data;
this.listOfVertex = data.vertices
.filter(v => v['start-time'] > -1)
.map(vertex => {
const endTime = vertex['end-time'] > -1 ? vertex['end-time'] : vertex['start-time'] + vertex.duration;
return {
...vertex,
range: [vertex['start-time'], endTime]
};
});
this.listOfVertex = this.listOfVertex.sort((a, b) => a.range[0] - b.range[0]);
this.mainChartInstance.changeSize(
this.mainChartInstance.width,
Math.max(this.listOfVertex.length * 50 + 100, 150)
);
this.mainChartInstance.data(this.listOfVertex);
this.mainChartInstance.scale({
range: {
alias: 'Time',
type: 'time',
mask: 'HH:mm:ss',
nice: false
}
});
this.mainChartInstance.render();
this.cdr.markForCheck();
});
}
public ngOnDestroy(): void {
this.destroy$.next();
this.destroy$.complete();
}
public updateSubTaskChart(vertexId: string): void {
this.listOfSubTaskTimeLine = [];
this.jobService.loadSubTaskTimes(this.jobDetail.jid, vertexId).subscribe(data => {
data.subtasks.forEach(task => {
const listOfTimeLine: Array<{ status: string; startTime: number }> = [];
for (const key in task.timestamps) {
// @ts-ignore
const time = task.timestamps[key];
if (time > 0) {
listOfTimeLine.push({
status: key,
startTime: time
});
}
}
listOfTimeLine.sort((pre, next) => pre.startTime - next.startTime);
listOfTimeLine.forEach((item, index) => {
if (index === listOfTimeLine.length - 1) | else {
this.listOfSubTaskTimeLine.push({
name: `${task.subtask} - ${task.host}`,
status: item.status,
range: [item.startTime, listOfTimeLine[index + 1].startTime]
});
}
});
});
this.subTaskChartInstance.changeSize(
this.subTaskChartInstance.width,
Math.max(data.subtasks.length * 50 + 100, 150)
);
this.subTaskChartInstance.data(this.listOfSubTaskTimeLine);
this.subTaskChartInstance.scale({
range: {
alias: 'Time',
type: 'time',
mask: 'HH:mm:ss',
nice: false
}
});
this.subTaskChartInstance.render();
this.isShowSubTaskTimeLine = true;
this.cdr.markForCheck();
setTimeout(() => {
try {
// FIXME scrollIntoViewIfNeeded is a non-standard extension and will not work everywhere
(
document.getElementById('subtask') as unknown as {
scrollIntoViewIfNeeded: () => void;
}
).scrollIntoViewIfNeeded();
} catch (e) {}
});
});
}
public setUpMainChart(): void {
this.mainChartInstance = new G2.Chart({
container: this.mainTimeLine.nativeElement,
autoFit: true,
height: 500,
padding: [50, 50, 50, 50]
});
this.mainChartInstance.animate(false);
this.mainChartInstance.axis('id', false);
this.mainChartInstance.coordinate('rect').transpose().scale(1, -1);
this.mainChartInstance
.interval()
.position('id*range')
.color('status', (type: string) => COLOR_MAP[type as ColorKey])
.label('name', {
offset: -20,
position: 'right',
style: {
fill: '#ffffff',
textAlign: 'right',
fontWeight: 'bold'
},
content: data => {
if (data.name.length <= 120) {
return data.name;
} else {
return `${data.name.slice(0, 120)}...`;
}
}
});
this.mainChartInstance.tooltip({
title: 'name'
});
this.mainChartInstance.on('click', (e: { x: number; y: number }) => {
if (this.mainChartInstance.getSnapRecords(e).length) {
const data = (
this.mainChartInstance.getSnapRecords(e)[0] as unknown as {
_origin: { name: string; id: string };
}
)._origin;
this.selectedName = data.name;
this.updateSubTaskChart(data.id);
}
});
}
public setUpSubTaskChart(): void {
this.subTaskChartInstance = new G2.Chart({
container: this.subTaskTimeLine.nativeElement,
autoFit: true,
height: 10,
padding: [50, 50, 50, 300]
});
this.subTaskChartInstance.animate(false);
this.subTaskChartInstance.coordinate('rect').transpose().scale(1, -1);
this.subTaskChartInstance
.interval()
.position('name*range')
.color('status', (type: string) => COLOR_MAP[type as ColorKey]);
}
}
| {
this.listOfSubTaskTimeLine.push({
name: `${task.subtask} - ${task.host}`,
status: item.status,
range: [item.startTime, task.duration + listOfTimeLine[0].startTime]
});
} | conditional_block |
square.rs | use malachite_base::num::arithmetic::traits::UnsignedAbs;
use malachite_base::num::basic::floats::PrimitiveFloat;
use malachite_base::num::basic::integers::PrimitiveInt;
use malachite_base::num::basic::signeds::PrimitiveSigned;
use malachite_base::num::basic::unsigneds::PrimitiveUnsigned;
use malachite_base::num::conversion::traits::WrappingFrom;
use malachite_base::num::float::NiceFloat;
use malachite_base_test_util::generators::{
primitive_float_gen, signed_gen_var_10, unsigned_gen_var_21,
};
#[test]
fn test_square() {
fn test<T: PrimitiveInt>(x: T, out: T) {
assert_eq!(x.square(), out);
let mut x = x;
x.square_assign();
assert_eq!(x, out);
}
test::<u8>(0, 0);
test::<i16>(1, 1);
test::<u32>(2, 4);
test::<i64>(3, 9);
test::<u128>(10, 100);
test::<isize>(123, 15129);
test::<u32>(1000, 1000000);
test::<i16>(-1, 1);
test::<i32>(-2, 4);
test::<i64>(-3, 9);
test::<i128>(-10, 100);
test::<isize>(-123, 15129);
test::<i32>(-1000, 1000000);
}
fn square_properties_helper_unsigned<T: PrimitiveUnsigned>() {
unsigned_gen_var_21::<T>().test_properties(|x| {
let mut square = x;
square.square_assign();
assert_eq!(square, x.square());
assert_eq!(square, x.pow(2));
assert_eq!(square.checked_sqrt(), Some(x));
if x > T::ONE {
assert_eq!(square.checked_log_base(x), Some(2));
}
});
}
fn square_properties_helper_signed<
U: PrimitiveUnsigned + WrappingFrom<S>,
S: PrimitiveSigned + UnsignedAbs<Output = U> + WrappingFrom<U>,
>() {
signed_gen_var_10::<U, S>().test_properties(|x| {
let mut square = x;
square.square_assign();
assert_eq!(square, x.square());
assert_eq!(square, x.pow(2));
if x != S::MIN {
assert_eq!((-x).square(), square);
}
assert_eq!(
U::wrapping_from(square).checked_sqrt().unwrap(),
x.unsigned_abs()
);
});
}
fn square_properties_helper_primitive_float<T: PrimitiveFloat>() |
#[test]
fn square_properties() {
apply_fn_to_unsigneds!(square_properties_helper_unsigned);
apply_fn_to_unsigned_signed_pairs!(square_properties_helper_signed);
apply_fn_to_primitive_floats!(square_properties_helper_primitive_float);
}
| {
primitive_float_gen::<T>().test_properties(|x| {
let mut square = x;
square.square_assign();
assert_eq!(NiceFloat(square), NiceFloat(x.square()));
assert_eq!(NiceFloat(square), NiceFloat(x.pow(2)));
assert_eq!(NiceFloat((-x).square()), NiceFloat(square));
});
} | identifier_body |
square.rs | use malachite_base::num::arithmetic::traits::UnsignedAbs;
use malachite_base::num::basic::floats::PrimitiveFloat;
use malachite_base::num::basic::integers::PrimitiveInt;
use malachite_base::num::basic::signeds::PrimitiveSigned;
use malachite_base::num::basic::unsigneds::PrimitiveUnsigned;
use malachite_base::num::conversion::traits::WrappingFrom;
use malachite_base::num::float::NiceFloat;
use malachite_base_test_util::generators::{
primitive_float_gen, signed_gen_var_10, unsigned_gen_var_21,
};
#[test]
fn test_square() {
fn test<T: PrimitiveInt>(x: T, out: T) {
assert_eq!(x.square(), out);
let mut x = x;
x.square_assign();
assert_eq!(x, out);
}
test::<u8>(0, 0);
test::<i16>(1, 1);
test::<u32>(2, 4);
test::<i64>(3, 9);
test::<u128>(10, 100);
test::<isize>(123, 15129);
test::<u32>(1000, 1000000);
test::<i16>(-1, 1);
test::<i32>(-2, 4);
test::<i64>(-3, 9);
test::<i128>(-10, 100);
test::<isize>(-123, 15129);
test::<i32>(-1000, 1000000);
}
fn square_properties_helper_unsigned<T: PrimitiveUnsigned>() {
unsigned_gen_var_21::<T>().test_properties(|x| {
let mut square = x;
square.square_assign();
assert_eq!(square, x.square());
assert_eq!(square, x.pow(2));
assert_eq!(square.checked_sqrt(), Some(x));
if x > T::ONE {
assert_eq!(square.checked_log_base(x), Some(2));
}
});
}
fn square_properties_helper_signed<
U: PrimitiveUnsigned + WrappingFrom<S>,
S: PrimitiveSigned + UnsignedAbs<Output = U> + WrappingFrom<U>,
>() {
signed_gen_var_10::<U, S>().test_properties(|x| {
let mut square = x;
square.square_assign();
assert_eq!(square, x.square());
assert_eq!(square, x.pow(2));
if x != S::MIN |
assert_eq!(
U::wrapping_from(square).checked_sqrt().unwrap(),
x.unsigned_abs()
);
});
}
fn square_properties_helper_primitive_float<T: PrimitiveFloat>() {
primitive_float_gen::<T>().test_properties(|x| {
let mut square = x;
square.square_assign();
assert_eq!(NiceFloat(square), NiceFloat(x.square()));
assert_eq!(NiceFloat(square), NiceFloat(x.pow(2)));
assert_eq!(NiceFloat((-x).square()), NiceFloat(square));
});
}
#[test]
fn square_properties() {
apply_fn_to_unsigneds!(square_properties_helper_unsigned);
apply_fn_to_unsigned_signed_pairs!(square_properties_helper_signed);
apply_fn_to_primitive_floats!(square_properties_helper_primitive_float);
}
| {
assert_eq!((-x).square(), square);
} | conditional_block |
square.rs | use malachite_base::num::arithmetic::traits::UnsignedAbs;
use malachite_base::num::basic::floats::PrimitiveFloat;
use malachite_base::num::basic::integers::PrimitiveInt;
use malachite_base::num::basic::signeds::PrimitiveSigned;
use malachite_base::num::basic::unsigneds::PrimitiveUnsigned;
use malachite_base::num::conversion::traits::WrappingFrom;
use malachite_base::num::float::NiceFloat;
use malachite_base_test_util::generators::{
primitive_float_gen, signed_gen_var_10, unsigned_gen_var_21,
};
#[test]
fn test_square() {
fn test<T: PrimitiveInt>(x: T, out: T) {
assert_eq!(x.square(), out);
let mut x = x;
x.square_assign();
assert_eq!(x, out);
}
test::<u8>(0, 0);
test::<i16>(1, 1);
test::<u32>(2, 4);
test::<i64>(3, 9);
test::<u128>(10, 100);
test::<isize>(123, 15129);
test::<u32>(1000, 1000000);
test::<i16>(-1, 1);
test::<i32>(-2, 4);
test::<i64>(-3, 9);
test::<i128>(-10, 100);
test::<isize>(-123, 15129);
test::<i32>(-1000, 1000000);
}
fn square_properties_helper_unsigned<T: PrimitiveUnsigned>() {
unsigned_gen_var_21::<T>().test_properties(|x| {
let mut square = x;
square.square_assign();
assert_eq!(square, x.square());
assert_eq!(square, x.pow(2));
assert_eq!(square.checked_sqrt(), Some(x));
if x > T::ONE {
assert_eq!(square.checked_log_base(x), Some(2));
}
});
}
fn square_properties_helper_signed<
U: PrimitiveUnsigned + WrappingFrom<S>,
S: PrimitiveSigned + UnsignedAbs<Output = U> + WrappingFrom<U>,
>() {
signed_gen_var_10::<U, S>().test_properties(|x| {
let mut square = x;
square.square_assign();
assert_eq!(square, x.square());
assert_eq!(square, x.pow(2));
if x != S::MIN {
assert_eq!((-x).square(), square);
}
assert_eq!(
U::wrapping_from(square).checked_sqrt().unwrap(),
x.unsigned_abs()
);
});
}
fn | <T: PrimitiveFloat>() {
primitive_float_gen::<T>().test_properties(|x| {
let mut square = x;
square.square_assign();
assert_eq!(NiceFloat(square), NiceFloat(x.square()));
assert_eq!(NiceFloat(square), NiceFloat(x.pow(2)));
assert_eq!(NiceFloat((-x).square()), NiceFloat(square));
});
}
#[test]
fn square_properties() {
apply_fn_to_unsigneds!(square_properties_helper_unsigned);
apply_fn_to_unsigned_signed_pairs!(square_properties_helper_signed);
apply_fn_to_primitive_floats!(square_properties_helper_primitive_float);
}
| square_properties_helper_primitive_float | identifier_name |
square.rs | use malachite_base::num::arithmetic::traits::UnsignedAbs;
use malachite_base::num::basic::floats::PrimitiveFloat;
use malachite_base::num::basic::integers::PrimitiveInt;
use malachite_base::num::basic::signeds::PrimitiveSigned;
use malachite_base::num::basic::unsigneds::PrimitiveUnsigned;
use malachite_base::num::conversion::traits::WrappingFrom;
use malachite_base::num::float::NiceFloat;
use malachite_base_test_util::generators::{
primitive_float_gen, signed_gen_var_10, unsigned_gen_var_21,
};
#[test]
fn test_square() {
fn test<T: PrimitiveInt>(x: T, out: T) {
assert_eq!(x.square(), out);
let mut x = x;
x.square_assign();
assert_eq!(x, out);
}
test::<u8>(0, 0);
test::<i16>(1, 1);
test::<u32>(2, 4);
test::<i64>(3, 9);
test::<u128>(10, 100);
test::<isize>(123, 15129);
test::<u32>(1000, 1000000);
test::<i16>(-1, 1);
test::<i32>(-2, 4);
test::<i64>(-3, 9);
test::<i128>(-10, 100);
test::<isize>(-123, 15129);
test::<i32>(-1000, 1000000);
}
fn square_properties_helper_unsigned<T: PrimitiveUnsigned>() {
unsigned_gen_var_21::<T>().test_properties(|x| {
let mut square = x;
square.square_assign();
assert_eq!(square, x.square());
assert_eq!(square, x.pow(2));
assert_eq!(square.checked_sqrt(), Some(x));
if x > T::ONE { | });
}
fn square_properties_helper_signed<
U: PrimitiveUnsigned + WrappingFrom<S>,
S: PrimitiveSigned + UnsignedAbs<Output = U> + WrappingFrom<U>,
>() {
signed_gen_var_10::<U, S>().test_properties(|x| {
let mut square = x;
square.square_assign();
assert_eq!(square, x.square());
assert_eq!(square, x.pow(2));
if x != S::MIN {
assert_eq!((-x).square(), square);
}
assert_eq!(
U::wrapping_from(square).checked_sqrt().unwrap(),
x.unsigned_abs()
);
});
}
fn square_properties_helper_primitive_float<T: PrimitiveFloat>() {
primitive_float_gen::<T>().test_properties(|x| {
let mut square = x;
square.square_assign();
assert_eq!(NiceFloat(square), NiceFloat(x.square()));
assert_eq!(NiceFloat(square), NiceFloat(x.pow(2)));
assert_eq!(NiceFloat((-x).square()), NiceFloat(square));
});
}
#[test]
fn square_properties() {
apply_fn_to_unsigneds!(square_properties_helper_unsigned);
apply_fn_to_unsigned_signed_pairs!(square_properties_helper_signed);
apply_fn_to_primitive_floats!(square_properties_helper_primitive_float);
} | assert_eq!(square.checked_log_base(x), Some(2));
} | random_line_split |
token.rs | use std;
/// A token.
#[derive(Clone,Debug,PartialEq,Eq)]
pub enum Token
{
/// A word.
Word(String),
/// A string literal.
String(String),
/// An integer literal.
// TODO: use BigNum
Integer(i64),
/// A comment.
///
/// If the comment is inline, it existed on the same line
/// as the previous statement.
///
/// For example
///
/// ``` ir
/// add 2, 4 ; inline comment goes here
/// ```
Comment {
inline: bool,
text: String,
},
/// A symbol.
Symbol(String),
/// A new line.
NewLine,
}
impl Token
{
pub fn comma() -> Self { Token::symbol(",") }
pub fn colon() -> Self { Token::symbol(":") }
pub fn left_parenthesis() -> Self { Token::symbol("(") }
pub fn right_parenthesis() -> Self { Token::symbol(")") }
pub fn at_sign() -> Self { Token::symbol("@") }
pub fn percent_sign() -> Self { Token::symbol("%") }
pub fn left_curly_brace() -> Self { Token::symbol("{") }
pub fn right_curly_brace() -> Self { Token::symbol("}") }
pub fn equal_sign() -> Self { Token::symbol("=") }
pub fn function_arrow() -> Self { Token::symbol("->") }
pub fn boolean_true() -> Self { Token::word("true") }
pub fn boolean_false() -> Self { Token::word("false") }
pub fn word<S>(word: S) -> Self
where S: Into<String> {
Token::Word(word.into())
}
pub fn string<S>(string: S) -> Self
where S: Into<String> {
Token::String(string.into())
}
pub fn integer<I>(integer: I) -> Self
where I: Into<i64> {
Token::Integer(integer.into())
}
pub fn comment<S>(text: S) -> Self
where S: Into<String> {
Token::Comment {
inline: false,
text: text.into(),
}
}
pub fn inline_comment<S>(text: S) -> Self
where S: Into<String> {
Token::Comment {
inline: true,
text: text.into(),
}
}
pub fn symbol<S>(symbol: S) -> Self
where S: Into<String> {
Token::Symbol(symbol.into())
}
pub fn new_line() -> Self {
Token::NewLine
}
pub fn is_word(&self) -> bool {
if let Token::Word(..) = *self { true } else { false }
}
pub fn is_string(&self) -> bool {
if let Token::String(..) = *self { true } else { false }
} | pub fn is_symbol(&self) -> bool {
if let Token::Symbol(..) = *self { true } else { false }
}
pub fn is_comment(&self) -> bool {
if let Token::Comment { .. } = *self { true } else { false }
}
pub fn is_new_line(&self) -> bool {
if let Token::NewLine = *self { true } else { false }
}
pub fn is_boolean(&self) -> bool {
self == &Token::boolean_true() ||
self == &Token::boolean_false()
}
}
impl std::fmt::Display for Token
{
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
&Token::Word(ref w) => write!(fmt, "{}", w),
&Token::String(ref s) => write!(fmt, "\"{}\"", s),
&Token::Integer(ref i) => write!(fmt, "{}", i),
&Token::Symbol(ref s) => write!(fmt, "{}", s),
&Token::Comment { ref text, .. } => write!(fmt, " {}", text),
&Token::NewLine => write!(fmt, "new line"),
}
}
} |
pub fn is_integer(&self) -> bool {
if let Token::String(..) = *self { true } else { false }
}
| random_line_split |
token.rs | use std;
/// A token.
#[derive(Clone,Debug,PartialEq,Eq)]
pub enum Token
{
/// A word.
Word(String),
/// A string literal.
String(String),
/// An integer literal.
// TODO: use BigNum
Integer(i64),
/// A comment.
///
/// If the comment is inline, it existed on the same line
/// as the previous statement.
///
/// For example
///
/// ``` ir
/// add 2, 4 ; inline comment goes here
/// ```
Comment {
inline: bool,
text: String,
},
/// A symbol.
Symbol(String),
/// A new line.
NewLine,
}
impl Token
{
pub fn comma() -> Self { Token::symbol(",") }
pub fn colon() -> Self { Token::symbol(":") }
pub fn left_parenthesis() -> Self { Token::symbol("(") }
pub fn | () -> Self { Token::symbol(")") }
pub fn at_sign() -> Self { Token::symbol("@") }
pub fn percent_sign() -> Self { Token::symbol("%") }
pub fn left_curly_brace() -> Self { Token::symbol("{") }
pub fn right_curly_brace() -> Self { Token::symbol("}") }
pub fn equal_sign() -> Self { Token::symbol("=") }
pub fn function_arrow() -> Self { Token::symbol("->") }
pub fn boolean_true() -> Self { Token::word("true") }
pub fn boolean_false() -> Self { Token::word("false") }
pub fn word<S>(word: S) -> Self
where S: Into<String> {
Token::Word(word.into())
}
pub fn string<S>(string: S) -> Self
where S: Into<String> {
Token::String(string.into())
}
pub fn integer<I>(integer: I) -> Self
where I: Into<i64> {
Token::Integer(integer.into())
}
pub fn comment<S>(text: S) -> Self
where S: Into<String> {
Token::Comment {
inline: false,
text: text.into(),
}
}
pub fn inline_comment<S>(text: S) -> Self
where S: Into<String> {
Token::Comment {
inline: true,
text: text.into(),
}
}
pub fn symbol<S>(symbol: S) -> Self
where S: Into<String> {
Token::Symbol(symbol.into())
}
pub fn new_line() -> Self {
Token::NewLine
}
pub fn is_word(&self) -> bool {
if let Token::Word(..) = *self { true } else { false }
}
pub fn is_string(&self) -> bool {
if let Token::String(..) = *self { true } else { false }
}
pub fn is_integer(&self) -> bool {
if let Token::String(..) = *self { true } else { false }
}
pub fn is_symbol(&self) -> bool {
if let Token::Symbol(..) = *self { true } else { false }
}
pub fn is_comment(&self) -> bool {
if let Token::Comment { .. } = *self { true } else { false }
}
pub fn is_new_line(&self) -> bool {
if let Token::NewLine = *self { true } else { false }
}
pub fn is_boolean(&self) -> bool {
self == &Token::boolean_true() ||
self == &Token::boolean_false()
}
}
impl std::fmt::Display for Token
{
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
&Token::Word(ref w) => write!(fmt, "{}", w),
&Token::String(ref s) => write!(fmt, "\"{}\"", s),
&Token::Integer(ref i) => write!(fmt, "{}", i),
&Token::Symbol(ref s) => write!(fmt, "{}", s),
&Token::Comment { ref text, .. } => write!(fmt, " {}", text),
&Token::NewLine => write!(fmt, "new line"),
}
}
}
| right_parenthesis | identifier_name |
token.rs | use std;
/// A token.
#[derive(Clone,Debug,PartialEq,Eq)]
pub enum Token
{
/// A word.
Word(String),
/// A string literal.
String(String),
/// An integer literal.
// TODO: use BigNum
Integer(i64),
/// A comment.
///
/// If the comment is inline, it existed on the same line
/// as the previous statement.
///
/// For example
///
/// ``` ir
/// add 2, 4 ; inline comment goes here
/// ```
Comment {
inline: bool,
text: String,
},
/// A symbol.
Symbol(String),
/// A new line.
NewLine,
}
impl Token
{
pub fn comma() -> Self { Token::symbol(",") }
pub fn colon() -> Self { Token::symbol(":") }
pub fn left_parenthesis() -> Self { Token::symbol("(") }
pub fn right_parenthesis() -> Self { Token::symbol(")") }
pub fn at_sign() -> Self { Token::symbol("@") }
pub fn percent_sign() -> Self { Token::symbol("%") }
pub fn left_curly_brace() -> Self { Token::symbol("{") }
pub fn right_curly_brace() -> Self { Token::symbol("}") }
pub fn equal_sign() -> Self { Token::symbol("=") }
pub fn function_arrow() -> Self { Token::symbol("->") }
pub fn boolean_true() -> Self { Token::word("true") }
pub fn boolean_false() -> Self { Token::word("false") }
pub fn word<S>(word: S) -> Self
where S: Into<String> {
Token::Word(word.into())
}
pub fn string<S>(string: S) -> Self
where S: Into<String> {
Token::String(string.into())
}
pub fn integer<I>(integer: I) -> Self
where I: Into<i64> {
Token::Integer(integer.into())
}
pub fn comment<S>(text: S) -> Self
where S: Into<String> {
Token::Comment {
inline: false,
text: text.into(),
}
}
pub fn inline_comment<S>(text: S) -> Self
where S: Into<String> {
Token::Comment {
inline: true,
text: text.into(),
}
}
pub fn symbol<S>(symbol: S) -> Self
where S: Into<String> {
Token::Symbol(symbol.into())
}
pub fn new_line() -> Self {
Token::NewLine
}
pub fn is_word(&self) -> bool {
if let Token::Word(..) = *self { true } else { false }
}
pub fn is_string(&self) -> bool {
if let Token::String(..) = *self { true } else |
}
pub fn is_integer(&self) -> bool {
if let Token::String(..) = *self { true } else { false }
}
pub fn is_symbol(&self) -> bool {
if let Token::Symbol(..) = *self { true } else { false }
}
pub fn is_comment(&self) -> bool {
if let Token::Comment { .. } = *self { true } else { false }
}
pub fn is_new_line(&self) -> bool {
if let Token::NewLine = *self { true } else { false }
}
pub fn is_boolean(&self) -> bool {
self == &Token::boolean_true() ||
self == &Token::boolean_false()
}
}
impl std::fmt::Display for Token
{
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
&Token::Word(ref w) => write!(fmt, "{}", w),
&Token::String(ref s) => write!(fmt, "\"{}\"", s),
&Token::Integer(ref i) => write!(fmt, "{}", i),
&Token::Symbol(ref s) => write!(fmt, "{}", s),
&Token::Comment { ref text, .. } => write!(fmt, " {}", text),
&Token::NewLine => write!(fmt, "new line"),
}
}
}
| { false } | conditional_block |
xmppstreams.ts | <?xml version="1.0" ?><!DOCTYPE TS><TS language="pl" sourcelanguage="en" version="2.1">
<context>
<name>XmppStream</name>
<message>
<source>Connection not specified</source>
<translation>Połączenie nie zostało zdefiniowane</translation>
</message>
</context>
<context>
<name>XmppStreamManager</name>
<message>
<source>XMPP Streams Manager</source> | </message>
<message>
<source>XMPP stream destroyed</source>
<translation type="unfinished"/>
</message>
<message>
<source>Secure connection is not established</source>
<translation type="unfinished"/>
</message>
<message>
<source>Connection closed unexpectedly</source>
<translation type="unfinished"/>
</message>
<message>
<source>Failed to start connection</source>
<translation type="unfinished"/>
</message>
</context>
</TS> | <translation type="unfinished"/>
</message>
<message>
<source>Allows other modules to create XMPP streams and get access to them</source>
<translation type="unfinished"/> | random_line_split |
test_chart_date04.py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
from datetime import date
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
| """
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_date04.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {'xl/charts/chart1.xml': ['<c:formatCode']}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'line'})
date_format = workbook.add_format({'num_format': 14})
chart.axis_ids = [51761152, 51762688]
worksheet.set_column('A:A', 12)
dates = [date(2013, 1, 1),
date(2013, 1, 2),
date(2013, 1, 3),
date(2013, 1, 4),
date(2013, 1, 5),
date(2013, 1, 6),
date(2013, 1, 7),
date(2013, 1, 8),
date(2013, 1, 9),
date(2013, 1, 10)]
values = [10, 30, 20, 40, 20, 60, 50, 40, 30, 30]
worksheet.write_column('A1', dates, date_format)
worksheet.write_column('B1', values)
chart.add_series({
'categories': '=Sheet1!$A$1:$A$10',
'values': '=Sheet1!$B$1:$B$10',
})
chart.set_x_axis({
'date_axis': True,
'minor_unit': 1,
'major_unit': 1,
'minor_unit_type': 'months',
'major_unit_type': 'years',
'num_format': 'dd/mm/yyyy',
'num_format_linked': True,
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual() | identifier_body |
|
test_chart_date04.py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
from datetime import date
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
| test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {'xl/charts/chart1.xml': ['<c:formatCode']}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'line'})
date_format = workbook.add_format({'num_format': 14})
chart.axis_ids = [51761152, 51762688]
worksheet.set_column('A:A', 12)
dates = [date(2013, 1, 1),
date(2013, 1, 2),
date(2013, 1, 3),
date(2013, 1, 4),
date(2013, 1, 5),
date(2013, 1, 6),
date(2013, 1, 7),
date(2013, 1, 8),
date(2013, 1, 9),
date(2013, 1, 10)]
values = [10, 30, 20, 40, 20, 60, 50, 40, 30, 30]
worksheet.write_column('A1', dates, date_format)
worksheet.write_column('B1', values)
chart.add_series({
'categories': '=Sheet1!$A$1:$A$10',
'values': '=Sheet1!$B$1:$B$10',
})
chart.set_x_axis({
'date_axis': True,
'minor_unit': 1,
'major_unit': 1,
'minor_unit_type': 'months',
'major_unit_type': 'years',
'num_format': 'dd/mm/yyyy',
'num_format_linked': True,
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual() | filename = 'chart_date04.xlsx'
| random_line_split |
test_chart_date04.py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
from datetime import date
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class | (ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_date04.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {'xl/charts/chart1.xml': ['<c:formatCode']}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'line'})
date_format = workbook.add_format({'num_format': 14})
chart.axis_ids = [51761152, 51762688]
worksheet.set_column('A:A', 12)
dates = [date(2013, 1, 1),
date(2013, 1, 2),
date(2013, 1, 3),
date(2013, 1, 4),
date(2013, 1, 5),
date(2013, 1, 6),
date(2013, 1, 7),
date(2013, 1, 8),
date(2013, 1, 9),
date(2013, 1, 10)]
values = [10, 30, 20, 40, 20, 60, 50, 40, 30, 30]
worksheet.write_column('A1', dates, date_format)
worksheet.write_column('B1', values)
chart.add_series({
'categories': '=Sheet1!$A$1:$A$10',
'values': '=Sheet1!$B$1:$B$10',
})
chart.set_x_axis({
'date_axis': True,
'minor_unit': 1,
'major_unit': 1,
'minor_unit_type': 'months',
'major_unit_type': 'years',
'num_format': 'dd/mm/yyyy',
'num_format_linked': True,
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles | identifier_name |
components.source.js | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
function otherNameFunction () |
// module
define('@weex-component/subvm', function (require, exports, module) {
;
module.exports = {
"components": {
innertpl: otherNameFunction()
},
data: function () {return {
item: {a: 'a', b: 'b'},
className: 'fromOuter',
marginTop: 10
}}
}
;module.exports.style = {}
;module.exports.template = {
"type": "container",
"children": [
{
"type": "innertpl",
"classList": function() {
return this.className
},
"style": {
marginTop: function() {
return this.marginTop
}
},
"events": {
"click": "clicked"
},
"attr": {
"outerData": function () {return this.item}
}
}
]
}
;})
// require module
bootstrap('@weex-component/subvm')
| {
var module = {}
var exports = {}
module.exports = exports
;module.exports.style = {}
;module.exports.template = {
"type": "container",
"children": [
{
"type": "text",
"attr": {
"value": function () {return this.outerData.a}
}
},
{
"type": "text",
"attr": {
"value": function () {return this.outerData.b}
}
}
]
}
;module.exports.created = function() {
this.outerData.a = 'aa'
}
;module.exports.ready = function() {
this.outerData.b = 'bb'
}
return module.exports
} | identifier_body |
components.source.js | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
function | () {
var module = {}
var exports = {}
module.exports = exports
;module.exports.style = {}
;module.exports.template = {
"type": "container",
"children": [
{
"type": "text",
"attr": {
"value": function () {return this.outerData.a}
}
},
{
"type": "text",
"attr": {
"value": function () {return this.outerData.b}
}
}
]
}
;module.exports.created = function() {
this.outerData.a = 'aa'
}
;module.exports.ready = function() {
this.outerData.b = 'bb'
}
return module.exports
}
// module
define('@weex-component/subvm', function (require, exports, module) {
;
module.exports = {
"components": {
innertpl: otherNameFunction()
},
data: function () {return {
item: {a: 'a', b: 'b'},
className: 'fromOuter',
marginTop: 10
}}
}
;module.exports.style = {}
;module.exports.template = {
"type": "container",
"children": [
{
"type": "innertpl",
"classList": function() {
return this.className
},
"style": {
marginTop: function() {
return this.marginTop
}
},
"events": {
"click": "clicked"
},
"attr": {
"outerData": function () {return this.item}
}
}
]
}
;})
// require module
bootstrap('@weex-component/subvm')
| otherNameFunction | identifier_name |
components.source.js | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations | var module = {}
var exports = {}
module.exports = exports
;module.exports.style = {}
;module.exports.template = {
"type": "container",
"children": [
{
"type": "text",
"attr": {
"value": function () {return this.outerData.a}
}
},
{
"type": "text",
"attr": {
"value": function () {return this.outerData.b}
}
}
]
}
;module.exports.created = function() {
this.outerData.a = 'aa'
}
;module.exports.ready = function() {
this.outerData.b = 'bb'
}
return module.exports
}
// module
define('@weex-component/subvm', function (require, exports, module) {
;
module.exports = {
"components": {
innertpl: otherNameFunction()
},
data: function () {return {
item: {a: 'a', b: 'b'},
className: 'fromOuter',
marginTop: 10
}}
}
;module.exports.style = {}
;module.exports.template = {
"type": "container",
"children": [
{
"type": "innertpl",
"classList": function() {
return this.className
},
"style": {
marginTop: function() {
return this.marginTop
}
},
"events": {
"click": "clicked"
},
"attr": {
"outerData": function () {return this.item}
}
}
]
}
;})
// require module
bootstrap('@weex-component/subvm') | * under the License.
*/
function otherNameFunction () { | random_line_split |
oc-quantity-input.js | angular.module('orderCloud')
.directive('ocQuantityInput', OCQuantityInput)
;
function | (toastr, OrderCloudSDK, $rootScope) {
return {
scope: {
product: '=',
lineitem: '=',
label: '@',
order: '=',
onUpdate: '&'
},
templateUrl: 'common/templates/quantityInput.tpl.html',
replace: true,
link: function (scope) {
if (scope.product){
scope.item = scope.product;
scope.content = "product"
}
else if(scope.lineitem){
OrderCloudSDK.Me.GetProduct(scope.lineitem.ProductID)
.then(function(product) {
scope.item = scope.lineitem;
if (product.PriceSchedule && !product.PriceSchedule.RestrictedQuantity) {
scope.item.MinQty = product.PriceSchedule.MinQuantity;
scope.item.MaxQty = product.PriceSchedule.MaxQuantity;
} else {
scope.item.PriceBreaks = product.PriceSchedule.PriceBreaks;
}
scope.content = "lineitem";
scope.updateQuantity = function() {
if (scope.item.Quantity) {
OrderCloudSDK.LineItems.Patch('outgoing', scope.order.ID, scope.item.ID, {Quantity: scope.item.Quantity})
.then(function (data) {
if(data.ProductID === 'AACPunchoutProduct'){
data.Punchout = true;
}
data.Product = scope.lineitem.Product;
scope.item = data;
scope.lineitem = data;
if (typeof scope.onUpdate === "function") scope.onUpdate(scope.lineitem);
toastr.success('Quantity Updated');
$rootScope.$broadcast('OC:UpdateOrder', scope.order.ID, 'Calculating Order Total');
});
}
}
})
}
else {
toastr.error('Please input either a product or lineitem attribute in the directive','Error');
console.error('Please input either a product or lineitem attribute in the quantityInput directive ')
}
}
}
}
| OCQuantityInput | identifier_name |
oc-quantity-input.js | angular.module('orderCloud')
.directive('ocQuantityInput', OCQuantityInput)
;
function OCQuantityInput(toastr, OrderCloudSDK, $rootScope) {
return {
scope: {
product: '=',
lineitem: '=',
label: '@',
order: '=',
onUpdate: '&'
},
templateUrl: 'common/templates/quantityInput.tpl.html',
replace: true,
link: function (scope) {
if (scope.product){
scope.item = scope.product;
scope.content = "product"
}
else if(scope.lineitem){
OrderCloudSDK.Me.GetProduct(scope.lineitem.ProductID)
.then(function(product) {
scope.item = scope.lineitem;
if (product.PriceSchedule && !product.PriceSchedule.RestrictedQuantity) {
scope.item.MinQty = product.PriceSchedule.MinQuantity;
scope.item.MaxQty = product.PriceSchedule.MaxQuantity;
} else {
scope.item.PriceBreaks = product.PriceSchedule.PriceBreaks;
}
scope.content = "lineitem";
scope.updateQuantity = function() {
if (scope.item.Quantity) |
}
})
}
else {
toastr.error('Please input either a product or lineitem attribute in the directive','Error');
console.error('Please input either a product or lineitem attribute in the quantityInput directive ')
}
}
}
}
| {
OrderCloudSDK.LineItems.Patch('outgoing', scope.order.ID, scope.item.ID, {Quantity: scope.item.Quantity})
.then(function (data) {
if(data.ProductID === 'AACPunchoutProduct'){
data.Punchout = true;
}
data.Product = scope.lineitem.Product;
scope.item = data;
scope.lineitem = data;
if (typeof scope.onUpdate === "function") scope.onUpdate(scope.lineitem);
toastr.success('Quantity Updated');
$rootScope.$broadcast('OC:UpdateOrder', scope.order.ID, 'Calculating Order Total');
});
} | conditional_block |
oc-quantity-input.js | angular.module('orderCloud')
.directive('ocQuantityInput', OCQuantityInput)
;
function OCQuantityInput(toastr, OrderCloudSDK, $rootScope) {
return {
scope: {
product: '=',
lineitem: '=',
label: '@',
order: '=',
onUpdate: '&'
}, | scope.content = "product"
}
else if(scope.lineitem){
OrderCloudSDK.Me.GetProduct(scope.lineitem.ProductID)
.then(function(product) {
scope.item = scope.lineitem;
if (product.PriceSchedule && !product.PriceSchedule.RestrictedQuantity) {
scope.item.MinQty = product.PriceSchedule.MinQuantity;
scope.item.MaxQty = product.PriceSchedule.MaxQuantity;
} else {
scope.item.PriceBreaks = product.PriceSchedule.PriceBreaks;
}
scope.content = "lineitem";
scope.updateQuantity = function() {
if (scope.item.Quantity) {
OrderCloudSDK.LineItems.Patch('outgoing', scope.order.ID, scope.item.ID, {Quantity: scope.item.Quantity})
.then(function (data) {
if(data.ProductID === 'AACPunchoutProduct'){
data.Punchout = true;
}
data.Product = scope.lineitem.Product;
scope.item = data;
scope.lineitem = data;
if (typeof scope.onUpdate === "function") scope.onUpdate(scope.lineitem);
toastr.success('Quantity Updated');
$rootScope.$broadcast('OC:UpdateOrder', scope.order.ID, 'Calculating Order Total');
});
}
}
})
}
else {
toastr.error('Please input either a product or lineitem attribute in the directive','Error');
console.error('Please input either a product or lineitem attribute in the quantityInput directive ')
}
}
}
} | templateUrl: 'common/templates/quantityInput.tpl.html',
replace: true,
link: function (scope) {
if (scope.product){
scope.item = scope.product; | random_line_split |
oc-quantity-input.js | angular.module('orderCloud')
.directive('ocQuantityInput', OCQuantityInput)
;
function OCQuantityInput(toastr, OrderCloudSDK, $rootScope) | {
return {
scope: {
product: '=',
lineitem: '=',
label: '@',
order: '=',
onUpdate: '&'
},
templateUrl: 'common/templates/quantityInput.tpl.html',
replace: true,
link: function (scope) {
if (scope.product){
scope.item = scope.product;
scope.content = "product"
}
else if(scope.lineitem){
OrderCloudSDK.Me.GetProduct(scope.lineitem.ProductID)
.then(function(product) {
scope.item = scope.lineitem;
if (product.PriceSchedule && !product.PriceSchedule.RestrictedQuantity) {
scope.item.MinQty = product.PriceSchedule.MinQuantity;
scope.item.MaxQty = product.PriceSchedule.MaxQuantity;
} else {
scope.item.PriceBreaks = product.PriceSchedule.PriceBreaks;
}
scope.content = "lineitem";
scope.updateQuantity = function() {
if (scope.item.Quantity) {
OrderCloudSDK.LineItems.Patch('outgoing', scope.order.ID, scope.item.ID, {Quantity: scope.item.Quantity})
.then(function (data) {
if(data.ProductID === 'AACPunchoutProduct'){
data.Punchout = true;
}
data.Product = scope.lineitem.Product;
scope.item = data;
scope.lineitem = data;
if (typeof scope.onUpdate === "function") scope.onUpdate(scope.lineitem);
toastr.success('Quantity Updated');
$rootScope.$broadcast('OC:UpdateOrder', scope.order.ID, 'Calculating Order Total');
});
}
}
})
}
else {
toastr.error('Please input either a product or lineitem attribute in the directive','Error');
console.error('Please input either a product or lineitem attribute in the quantityInput directive ')
}
}
}
} | identifier_body |
|
articles-form.component.ts | import { Component, Input, OnInit, OnDestroy } from '@angular/core'
import { FormGroup, FormBuilder, Validators } from '@angular/forms'
import { Subscription, Observable } from 'rxjs'
import { MeteorObservable } from 'meteor-rxjs'
import { Tags } from '/both/collections/tags.collection'
import { Tag } from '/both/models/tag.model'
import { retLang } from '/lib/lang'
import { isLang } from '/lib/validate'
import template from './articles-form.component.html'
@Component({
selector: 'articles-form',
template
})
export class ArticlesFormComponent implements OnInit, OnDestroy {
@Input() edit
addForm : FormGroup
tags : Observable<Tag[]>
tagsSub : Subscription
myTag = new Set()
arrayOfTags : string[] = []
image : string = ''
imageSub : Subscription |
if (this.myTag.has(tagValue)) {
this.myTag.delete(tagValue)
} else {
this.myTag.add(tagValue)
}
}
constructor( private formBuilder: FormBuilder ) {}
ngOnInit() {
this.printForm()
}
onImage(imageId : string) : void {
this.image = imageId
this.addForm.value.image = imageId
this.kill()
this.imageSub = MeteorObservable.subscribe('image', this.image).subscribe()
console.log('addForm.image -> ' + this.addForm.value.image)
}
private printForm() : void {
if (this.edit) {
this.kill()
this.image = this.edit.image
this.imageSub = MeteorObservable.subscribe('image', this.image).subscribe()
this.addForm = this.formBuilder.group({
title: [this.edit.lang[retLang(this.lang)].title],
description: [this.edit.lang[retLang(this.lang)].description],
image: [this.image],
lang: [this.lang],
article: [this.edit.lang[retLang(this.lang)].article],
isPublic: [this.edit.isPublic],
toFooter: [this.edit.pastToFooter]
})
} else {
this.addForm = this.formBuilder.group({
title: ['', [Validators.required, Validators.minLength(2)] ],
description: ['', Validators.required],
image: [this.image],
lang: [this.lang, Validators.required],
article: ['', Validators.required],
isPublic: [false],
toFooter: [false]
})
}
this.tagsSub = MeteorObservable.subscribe('tags').subscribe(() => {
this.tags = Tags.find({}).zone()
})
}
onReset() {
this.myTag.clear()
this.arrayOfTags = []
}
addArticle() {
if (this.addForm.valid) {
this.arrayOfTags = Array.from(this.myTag)
this.addForm.value.lang = this.lang
let img = this.addForm.value.image
console.log('We will add img -> ' + img)
if (this.edit) {
console.log('we edit an article')
MeteorObservable.call('updArticle', this.addForm.value, img, this.arrayOfTags, this.edit._id).subscribe(() => {
alert('Article has been update')
}, (err) => {
alert(`Cannot update article cause ${err}`)
})
} else {
console.log('we will create a new article')
MeteorObservable.call('insArticle', this.addForm.value, img, this.arrayOfTags).subscribe(() => {
alert('Article has been created')
}, (err) => {
alert(`Cannot insert article cause ${err}`)
})
}
this.addForm.reset()
} else {
console.log('addForm not valid...')
}
}
langSelected(lang : string) {
if (isLang(lang)) {
this.lang = lang
}
}
private kill() {
if (this.tagsSub)
this.tagsSub.unsubscribe()
if (this.imageSub)
this.imageSub.unsubscribe()
}
ngOnDestroy() {
this.kill()
}
} | lang : string = 'en'
// TODO: Add test if tagValue exist in collection !
addTag(tagValue: string): void { | random_line_split |
articles-form.component.ts | import { Component, Input, OnInit, OnDestroy } from '@angular/core'
import { FormGroup, FormBuilder, Validators } from '@angular/forms'
import { Subscription, Observable } from 'rxjs'
import { MeteorObservable } from 'meteor-rxjs'
import { Tags } from '/both/collections/tags.collection'
import { Tag } from '/both/models/tag.model'
import { retLang } from '/lib/lang'
import { isLang } from '/lib/validate'
import template from './articles-form.component.html'
@Component({
selector: 'articles-form',
template
})
export class ArticlesFormComponent implements OnInit, OnDestroy {
@Input() edit
addForm : FormGroup
tags : Observable<Tag[]>
tagsSub : Subscription
myTag = new Set()
arrayOfTags : string[] = []
image : string = ''
imageSub : Subscription
lang : string = 'en'
// TODO: Add test if tagValue exist in collection !
addTag(tagValue: string): void {
if (this.myTag.has(tagValue)) {
this.myTag.delete(tagValue)
} else {
this.myTag.add(tagValue)
}
}
constructor( private formBuilder: FormBuilder ) {}
ngOnInit() {
this.printForm()
}
onImage(imageId : string) : void {
this.image = imageId
this.addForm.value.image = imageId
this.kill()
this.imageSub = MeteorObservable.subscribe('image', this.image).subscribe()
console.log('addForm.image -> ' + this.addForm.value.image)
}
private printForm() : void {
if (this.edit) {
| e {
this.addForm = this.formBuilder.group({
title: ['', [Validators.required, Validators.minLength(2)] ],
description: ['', Validators.required],
image: [this.image],
lang: [this.lang, Validators.required],
article: ['', Validators.required],
isPublic: [false],
toFooter: [false]
})
}
this.tagsSub = MeteorObservable.subscribe('tags').subscribe(() => {
this.tags = Tags.find({}).zone()
})
}
onReset() {
this.myTag.clear()
this.arrayOfTags = []
}
addArticle() {
if (this.addForm.valid) {
this.arrayOfTags = Array.from(this.myTag)
this.addForm.value.lang = this.lang
let img = this.addForm.value.image
console.log('We will add img -> ' + img)
if (this.edit) {
console.log('we edit an article')
MeteorObservable.call('updArticle', this.addForm.value, img, this.arrayOfTags, this.edit._id).subscribe(() => {
alert('Article has been update')
}, (err) => {
alert(`Cannot update article cause ${err}`)
})
} else {
console.log('we will create a new article')
MeteorObservable.call('insArticle', this.addForm.value, img, this.arrayOfTags).subscribe(() => {
alert('Article has been created')
}, (err) => {
alert(`Cannot insert article cause ${err}`)
})
}
this.addForm.reset()
} else {
console.log('addForm not valid...')
}
}
langSelected(lang : string) {
if (isLang(lang)) {
this.lang = lang
}
}
private kill() {
if (this.tagsSub)
this.tagsSub.unsubscribe()
if (this.imageSub)
this.imageSub.unsubscribe()
}
ngOnDestroy() {
this.kill()
}
}
| this.kill()
this.image = this.edit.image
this.imageSub = MeteorObservable.subscribe('image', this.image).subscribe()
this.addForm = this.formBuilder.group({
title: [this.edit.lang[retLang(this.lang)].title],
description: [this.edit.lang[retLang(this.lang)].description],
image: [this.image],
lang: [this.lang],
article: [this.edit.lang[retLang(this.lang)].article],
isPublic: [this.edit.isPublic],
toFooter: [this.edit.pastToFooter]
})
} els | conditional_block |
articles-form.component.ts | import { Component, Input, OnInit, OnDestroy } from '@angular/core'
import { FormGroup, FormBuilder, Validators } from '@angular/forms'
import { Subscription, Observable } from 'rxjs'
import { MeteorObservable } from 'meteor-rxjs'
import { Tags } from '/both/collections/tags.collection'
import { Tag } from '/both/models/tag.model'
import { retLang } from '/lib/lang'
import { isLang } from '/lib/validate'
import template from './articles-form.component.html'
@Component({
selector: 'articles-form',
template
})
export class ArticlesFormComponent implements OnInit, OnDestroy {
@Input() edit
addForm : FormGroup
tags : Observable<Tag[]>
tagsSub : Subscription
myTag = new Set()
arrayOfTags : string[] = []
image : string = ''
imageSub : Subscription
lang : string = 'en'
// TODO: Add test if tagValue exist in collection !
addTag(tagValue: string): void {
if (this.myTag.has(tagValue)) {
this.myTag.delete(tagValue)
} else {
this.myTag.add(tagValue)
}
}
constructor( private formBuilder: FormBuilder ) {}
ngOnInit() {
this.printForm()
}
onImage(imageId : string) : void {
this.image = imageId
this.addForm.value.image = imageId
this.kill()
this.imageSub = MeteorObservable.subscribe('image', this.image).subscribe()
console.log('addForm.image -> ' + this.addForm.value.image)
}
private printForm() : void {
if (this.edit) {
this.kill()
this.image = this.edit.image
this.imageSub = MeteorObservable.subscribe('image', this.image).subscribe()
this.addForm = this.formBuilder.group({
title: [this.edit.lang[retLang(this.lang)].title],
description: [this.edit.lang[retLang(this.lang)].description],
image: [this.image],
lang: [this.lang],
article: [this.edit.lang[retLang(this.lang)].article],
isPublic: [this.edit.isPublic],
toFooter: [this.edit.pastToFooter]
})
} else {
this.addForm = this.formBuilder.group({
title: ['', [Validators.required, Validators.minLength(2)] ],
description: ['', Validators.required],
image: [this.image],
lang: [this.lang, Validators.required],
article: ['', Validators.required],
isPublic: [false],
toFooter: [false]
})
}
this.tagsSub = MeteorObservable.subscribe('tags').subscribe(() => {
this.tags = Tags.find({}).zone()
})
}
onReset() {
this.myTag.clear()
this.arrayOfTags = []
}
addArticle() {
if (this.addForm.valid) {
this.arrayOfTags = Array.from(this.myTag)
this.addForm.value.lang = this.lang
let img = this.addForm.value.image
console.log('We will add img -> ' + img)
if (this.edit) {
console.log('we edit an article')
MeteorObservable.call('updArticle', this.addForm.value, img, this.arrayOfTags, this.edit._id).subscribe(() => {
alert('Article has been update')
}, (err) => {
alert(`Cannot update article cause ${err}`)
})
} else {
console.log('we will create a new article')
MeteorObservable.call('insArticle', this.addForm.value, img, this.arrayOfTags).subscribe(() => {
alert('Article has been created')
}, (err) => {
alert(`Cannot insert article cause ${err}`)
})
}
this.addForm.reset()
} else {
console.log('addForm not valid...')
}
}
langSelected(lang : string) {
if (isLang(lang)) {
this.lang = lang
}
}
private kill |
if (this.tagsSub)
this.tagsSub.unsubscribe()
if (this.imageSub)
this.imageSub.unsubscribe()
}
ngOnDestroy() {
this.kill()
}
}
| () { | identifier_name |
articles-form.component.ts | import { Component, Input, OnInit, OnDestroy } from '@angular/core'
import { FormGroup, FormBuilder, Validators } from '@angular/forms'
import { Subscription, Observable } from 'rxjs'
import { MeteorObservable } from 'meteor-rxjs'
import { Tags } from '/both/collections/tags.collection'
import { Tag } from '/both/models/tag.model'
import { retLang } from '/lib/lang'
import { isLang } from '/lib/validate'
import template from './articles-form.component.html'
@Component({
selector: 'articles-form',
template
})
export class ArticlesFormComponent implements OnInit, OnDestroy {
@Input() edit
addForm : FormGroup
tags : Observable<Tag[]>
tagsSub : Subscription
myTag = new Set()
arrayOfTags : string[] = []
image : string = ''
imageSub : Subscription
lang : string = 'en'
// TODO: Add test if tagValue exist in collection !
addTag(tagValue: string): void {
if (this.myTag.has(tagValue)) {
this.myTag.delete(tagValue)
} else {
this.myTag.add(tagValue)
}
}
constructor( private formBuilder: FormBuilder ) {}
ngOnInit() {
this.printForm()
}
onImage(imageId : string) : void {
this.image = imageId
this.addForm.value.image = imageId
this.kill()
this.imageSub = MeteorObservable.subscribe('image', this.image).subscribe()
console.log('addForm.image -> ' + this.addForm.value.image)
}
private printForm() : void {
if (this.edit) {
this.kill()
this.image = this.edit.image
this.imageSub = MeteorObservable.subscribe('image', this.image).subscribe()
this.addForm = this.formBuilder.group({
title: [this.edit.lang[retLang(this.lang)].title],
description: [this.edit.lang[retLang(this.lang)].description],
image: [this.image],
lang: [this.lang],
article: [this.edit.lang[retLang(this.lang)].article],
isPublic: [this.edit.isPublic],
toFooter: [this.edit.pastToFooter]
})
} else {
this.addForm = this.formBuilder.group({
title: ['', [Validators.required, Validators.minLength(2)] ],
description: ['', Validators.required],
image: [this.image],
lang: [this.lang, Validators.required],
article: ['', Validators.required],
isPublic: [false],
toFooter: [false]
})
}
this.tagsSub = MeteorObservable.subscribe('tags').subscribe(() => {
this.tags = Tags.find({}).zone()
})
}
onReset() {
this.myTag.clear()
this.arrayOfTags = []
}
addArticle() {
| langSelected(lang : string) {
if (isLang(lang)) {
this.lang = lang
}
}
private kill() {
if (this.tagsSub)
this.tagsSub.unsubscribe()
if (this.imageSub)
this.imageSub.unsubscribe()
}
ngOnDestroy() {
this.kill()
}
}
| if (this.addForm.valid) {
this.arrayOfTags = Array.from(this.myTag)
this.addForm.value.lang = this.lang
let img = this.addForm.value.image
console.log('We will add img -> ' + img)
if (this.edit) {
console.log('we edit an article')
MeteorObservable.call('updArticle', this.addForm.value, img, this.arrayOfTags, this.edit._id).subscribe(() => {
alert('Article has been update')
}, (err) => {
alert(`Cannot update article cause ${err}`)
})
} else {
console.log('we will create a new article')
MeteorObservable.call('insArticle', this.addForm.value, img, this.arrayOfTags).subscribe(() => {
alert('Article has been created')
}, (err) => {
alert(`Cannot insert article cause ${err}`)
})
}
this.addForm.reset()
} else {
console.log('addForm not valid...')
}
}
| identifier_body |
15.2.3.5-4-46.js | /// Copyright (c) 2012 Ecma International. All rights reserved.
/// Ecma International makes this code available under the terms and conditions set
/// forth on http://hg.ecmascript.org/tests/test262/raw-file/tip/LICENSE (the
/// "Use Terms"). Any redistribution of this code must retain the above
/// copyright and this notice and otherwise comply with the Use Terms.
/**
* @path ch15/15.2/15.2.3/15.2.3.5/15.2.3.5-4-46.js
* @description Object.create - 'enumerable' property of one property in 'Properties' is true (8.10.5 step 3)
*/
function testcase() {
var accessed = false;
var newObj = Object.create({}, {
prop: {
enumerable: true
}
});
for (var property in newObj) {
if (property === "prop") |
}
return accessed;
}
runTestCase(testcase);
| {
accessed = true;
} | conditional_block |
15.2.3.5-4-46.js | /// Copyright (c) 2012 Ecma International. All rights reserved.
/// Ecma International makes this code available under the terms and conditions set
/// forth on http://hg.ecmascript.org/tests/test262/raw-file/tip/LICENSE (the
/// "Use Terms"). Any redistribution of this code must retain the above
/// copyright and this notice and otherwise comply with the Use Terms.
/**
* @path ch15/15.2/15.2.3/15.2.3.5/15.2.3.5-4-46.js
* @description Object.create - 'enumerable' property of one property in 'Properties' is true (8.10.5 step 3)
*/
function testcase() |
runTestCase(testcase);
| {
var accessed = false;
var newObj = Object.create({}, {
prop: {
enumerable: true
}
});
for (var property in newObj) {
if (property === "prop") {
accessed = true;
}
}
return accessed;
} | identifier_body |
15.2.3.5-4-46.js | /// Copyright (c) 2012 Ecma International. All rights reserved.
/// Ecma International makes this code available under the terms and conditions set
/// forth on http://hg.ecmascript.org/tests/test262/raw-file/tip/LICENSE (the
/// "Use Terms"). Any redistribution of this code must retain the above
/// copyright and this notice and otherwise comply with the Use Terms.
/**
* @path ch15/15.2/15.2.3/15.2.3.5/15.2.3.5-4-46.js
* @description Object.create - 'enumerable' property of one property in 'Properties' is true (8.10.5 step 3)
*/
function | () {
var accessed = false;
var newObj = Object.create({}, {
prop: {
enumerable: true
}
});
for (var property in newObj) {
if (property === "prop") {
accessed = true;
}
}
return accessed;
}
runTestCase(testcase);
| testcase | identifier_name |
15.2.3.5-4-46.js | /// Copyright (c) 2012 Ecma International. All rights reserved.
/// Ecma International makes this code available under the terms and conditions set
/// forth on http://hg.ecmascript.org/tests/test262/raw-file/tip/LICENSE (the
/// "Use Terms"). Any redistribution of this code must retain the above
/// copyright and this notice and otherwise comply with the Use Terms.
/**
* @path ch15/15.2/15.2.3/15.2.3.5/15.2.3.5-4-46.js
* @description Object.create - 'enumerable' property of one property in 'Properties' is true (8.10.5 step 3)
*/
function testcase() {
var accessed = false;
var newObj = Object.create({}, {
prop: {
enumerable: true
}
});
for (var property in newObj) { | }
}
return accessed;
}
runTestCase(testcase); | if (property === "prop") {
accessed = true; | random_line_split |
conf.py | # -*- coding: utf-8 -*-
#
# Kolibri 'developer docs' build configuration file
#
# This file is execfile()d with the current directory set to its containing dir.
import inspect
import os
import sys
from datetime import datetime
import django
from django.utils.encoding import force_text
from django.utils.html import strip_tags
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
cwd = os.getcwd()
parent = os.path.dirname(cwd)
sys.path.insert(0, os.path.abspath(parent))
builddir = os.path.join(cwd, '_build')
# When we start loading stuff from kolibri, we're gonna need this
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "kolibri.deployment.default.settings.base")
os.environ["KOLIBRI_HOME"] = os.path.join(builddir, 'kolibri_home')
# This is necessary because the directory needs to exist for Kolibri to run when
# not invoked through CLI.
if not os.path.exists(os.environ["KOLIBRI_HOME"]):
os.makedirs(os.environ["KOLIBRI_HOME"])
# This import *must* come after the path insertion, otherwise sphinx won't be able to find the kolibri module
import kolibri # noqa
django.setup()
# Monkey patch this so we don't have any complaints during Sphinx inspect
from django.db.models.fields import files # noqa
files.FileDescriptor.__get__ = lambda *args: None
# Auto list fields from django models - from https://djangosnippets.org/snippets/2533/#c5977
def process_docstring(app, what, name, obj, options, lines):
# This causes import errors if left outside the function
from django.db import models
# Only look at objects that inherit from Django's base model class
if inspect.isclass(obj) and issubclass(obj, models.Model):
# Grab the field list from the meta class
fields = obj._meta.get_fields()
for field in fields:
# Skip ManyToOneRel and ManyToManyRel fields which have no 'verbose_name' or 'help_text'
if not hasattr(field, 'verbose_name'):
continue
# Decode and strip any html out of the field's help text
help_text = strip_tags(force_text(field.help_text))
# Decode and capitalize the verbose name, for use if there isn't
# any help text
verbose_name = force_text(field.verbose_name).capitalize()
if help_text:
# Add the model field to the end of the docstring as a param
# using the help text as the description
lines.append(u':param %s: %s' % (field.attname, help_text))
else:
# Add the model field to the end of the docstring as a param
# using the verbose name as the description
lines.append(u':param %s: %s' % (field.attname, verbose_name))
# Add the field's type to the docstring
if isinstance(field, models.ForeignKey):
to = field.rel.to
lines.append(u':type %s: %s to :class:`~%s`' % (field.attname, type(field).__name__, to))
else:
lines.append(u':type %s: %s' % (field.attname, type(field).__name__))
return lines
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
linkcheck_ignore = [
'https://groups.google.com/a/learningequality.org/forum/#!forum/dev',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Kolibri Developer Docs'
copyright = u'{year:d}, Learning Equality'.format(year=datetime.now().year)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = kolibri.__version__
# The full version, including alpha/beta/rc tags.
release = kolibri.__version__
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = ['.', sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Wide, responsive tables not supported until this is merged
# https://github.com/rtfd/sphinx_rtd_theme/pull/432
# Approach 1, broken because of....
# https://github.com/rtfd/readthedocs.org/issues/2116
# html_context = {
# 'css_files': [
# '_static/theme_overrides.css', # override wide tables in RTD theme
# ],
# }
# Approach 2 for custom stylesheet:
# adapted from: http://rackerlabs.github.io/docs-rackspace/tools/rtd-tables.html
# and https://github.com/altair-viz/altair/pull/418/files
# https://github.com/rtfd/sphinx_rtd_theme/issues/117
def | (app):
# Register the docstring processor with sphinx
app.connect('autodoc-process-docstring', process_docstring)
# Add our custom CSS overrides
app.add_stylesheet('theme_overrides.css')
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# Output file base name for HTML help builder.
htmlhelp_basename = 'kolibri-dev'
# -- I18N ----------------------------------------------------------------------
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
locale_dirs = [
os.path.join(os.getcwd(), "locale", "docs"),
]
| setup | identifier_name |
conf.py | # -*- coding: utf-8 -*-
#
# Kolibri 'developer docs' build configuration file
#
# This file is execfile()d with the current directory set to its containing dir.
import inspect
import os
import sys
from datetime import datetime
import django
from django.utils.encoding import force_text
from django.utils.html import strip_tags
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
cwd = os.getcwd()
parent = os.path.dirname(cwd)
sys.path.insert(0, os.path.abspath(parent))
builddir = os.path.join(cwd, '_build')
# When we start loading stuff from kolibri, we're gonna need this
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "kolibri.deployment.default.settings.base")
os.environ["KOLIBRI_HOME"] = os.path.join(builddir, 'kolibri_home')
# This is necessary because the directory needs to exist for Kolibri to run when
# not invoked through CLI.
if not os.path.exists(os.environ["KOLIBRI_HOME"]):
os.makedirs(os.environ["KOLIBRI_HOME"])
# This import *must* come after the path insertion, otherwise sphinx won't be able to find the kolibri module
import kolibri # noqa
django.setup()
# Monkey patch this so we don't have any complaints during Sphinx inspect
from django.db.models.fields import files # noqa
files.FileDescriptor.__get__ = lambda *args: None
# Auto list fields from django models - from https://djangosnippets.org/snippets/2533/#c5977
def process_docstring(app, what, name, obj, options, lines):
# This causes import errors if left outside the function
from django.db import models
# Only look at objects that inherit from Django's base model class
if inspect.isclass(obj) and issubclass(obj, models.Model):
# Grab the field list from the meta class
fields = obj._meta.get_fields()
for field in fields:
# Skip ManyToOneRel and ManyToManyRel fields which have no 'verbose_name' or 'help_text'
if not hasattr(field, 'verbose_name'):
continue
# Decode and strip any html out of the field's help text
help_text = strip_tags(force_text(field.help_text))
# Decode and capitalize the verbose name, for use if there isn't
# any help text
verbose_name = force_text(field.verbose_name).capitalize()
if help_text:
# Add the model field to the end of the docstring as a param
# using the help text as the description
lines.append(u':param %s: %s' % (field.attname, help_text))
else:
# Add the model field to the end of the docstring as a param
# using the verbose name as the description
lines.append(u':param %s: %s' % (field.attname, verbose_name))
# Add the field's type to the docstring
if isinstance(field, models.ForeignKey):
to = field.rel.to
lines.append(u':type %s: %s to :class:`~%s`' % (field.attname, type(field).__name__, to))
else:
|
return lines
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
linkcheck_ignore = [
'https://groups.google.com/a/learningequality.org/forum/#!forum/dev',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Kolibri Developer Docs'
copyright = u'{year:d}, Learning Equality'.format(year=datetime.now().year)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = kolibri.__version__
# The full version, including alpha/beta/rc tags.
release = kolibri.__version__
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = ['.', sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Wide, responsive tables not supported until this is merged
# https://github.com/rtfd/sphinx_rtd_theme/pull/432
# Approach 1, broken because of....
# https://github.com/rtfd/readthedocs.org/issues/2116
# html_context = {
# 'css_files': [
# '_static/theme_overrides.css', # override wide tables in RTD theme
# ],
# }
# Approach 2 for custom stylesheet:
# adapted from: http://rackerlabs.github.io/docs-rackspace/tools/rtd-tables.html
# and https://github.com/altair-viz/altair/pull/418/files
# https://github.com/rtfd/sphinx_rtd_theme/issues/117
def setup(app):
# Register the docstring processor with sphinx
app.connect('autodoc-process-docstring', process_docstring)
# Add our custom CSS overrides
app.add_stylesheet('theme_overrides.css')
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# Output file base name for HTML help builder.
htmlhelp_basename = 'kolibri-dev'
# -- I18N ----------------------------------------------------------------------
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
locale_dirs = [
os.path.join(os.getcwd(), "locale", "docs"),
]
| lines.append(u':type %s: %s' % (field.attname, type(field).__name__)) | conditional_block |
conf.py | # -*- coding: utf-8 -*-
#
# Kolibri 'developer docs' build configuration file
#
# This file is execfile()d with the current directory set to its containing dir.
import inspect
import os
import sys
from datetime import datetime
import django
from django.utils.encoding import force_text
from django.utils.html import strip_tags
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
cwd = os.getcwd()
parent = os.path.dirname(cwd)
sys.path.insert(0, os.path.abspath(parent)) |
builddir = os.path.join(cwd, '_build')
# When we start loading stuff from kolibri, we're gonna need this
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "kolibri.deployment.default.settings.base")
os.environ["KOLIBRI_HOME"] = os.path.join(builddir, 'kolibri_home')
# This is necessary because the directory needs to exist for Kolibri to run when
# not invoked through CLI.
if not os.path.exists(os.environ["KOLIBRI_HOME"]):
os.makedirs(os.environ["KOLIBRI_HOME"])
# This import *must* come after the path insertion, otherwise sphinx won't be able to find the kolibri module
import kolibri # noqa
django.setup()
# Monkey patch this so we don't have any complaints during Sphinx inspect
from django.db.models.fields import files # noqa
files.FileDescriptor.__get__ = lambda *args: None
# Auto list fields from django models - from https://djangosnippets.org/snippets/2533/#c5977
def process_docstring(app, what, name, obj, options, lines):
# This causes import errors if left outside the function
from django.db import models
# Only look at objects that inherit from Django's base model class
if inspect.isclass(obj) and issubclass(obj, models.Model):
# Grab the field list from the meta class
fields = obj._meta.get_fields()
for field in fields:
# Skip ManyToOneRel and ManyToManyRel fields which have no 'verbose_name' or 'help_text'
if not hasattr(field, 'verbose_name'):
continue
# Decode and strip any html out of the field's help text
help_text = strip_tags(force_text(field.help_text))
# Decode and capitalize the verbose name, for use if there isn't
# any help text
verbose_name = force_text(field.verbose_name).capitalize()
if help_text:
# Add the model field to the end of the docstring as a param
# using the help text as the description
lines.append(u':param %s: %s' % (field.attname, help_text))
else:
# Add the model field to the end of the docstring as a param
# using the verbose name as the description
lines.append(u':param %s: %s' % (field.attname, verbose_name))
# Add the field's type to the docstring
if isinstance(field, models.ForeignKey):
to = field.rel.to
lines.append(u':type %s: %s to :class:`~%s`' % (field.attname, type(field).__name__, to))
else:
lines.append(u':type %s: %s' % (field.attname, type(field).__name__))
return lines
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
linkcheck_ignore = [
'https://groups.google.com/a/learningequality.org/forum/#!forum/dev',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Kolibri Developer Docs'
copyright = u'{year:d}, Learning Equality'.format(year=datetime.now().year)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = kolibri.__version__
# The full version, including alpha/beta/rc tags.
release = kolibri.__version__
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = ['.', sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Wide, responsive tables not supported until this is merged
# https://github.com/rtfd/sphinx_rtd_theme/pull/432
# Approach 1, broken because of....
# https://github.com/rtfd/readthedocs.org/issues/2116
# html_context = {
# 'css_files': [
# '_static/theme_overrides.css', # override wide tables in RTD theme
# ],
# }
# Approach 2 for custom stylesheet:
# adapted from: http://rackerlabs.github.io/docs-rackspace/tools/rtd-tables.html
# and https://github.com/altair-viz/altair/pull/418/files
# https://github.com/rtfd/sphinx_rtd_theme/issues/117
def setup(app):
# Register the docstring processor with sphinx
app.connect('autodoc-process-docstring', process_docstring)
# Add our custom CSS overrides
app.add_stylesheet('theme_overrides.css')
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# Output file base name for HTML help builder.
htmlhelp_basename = 'kolibri-dev'
# -- I18N ----------------------------------------------------------------------
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
locale_dirs = [
os.path.join(os.getcwd(), "locale", "docs"),
] | random_line_split |
|
conf.py | # -*- coding: utf-8 -*-
#
# Kolibri 'developer docs' build configuration file
#
# This file is execfile()d with the current directory set to its containing dir.
import inspect
import os
import sys
from datetime import datetime
import django
from django.utils.encoding import force_text
from django.utils.html import strip_tags
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
cwd = os.getcwd()
parent = os.path.dirname(cwd)
sys.path.insert(0, os.path.abspath(parent))
builddir = os.path.join(cwd, '_build')
# When we start loading stuff from kolibri, we're gonna need this
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "kolibri.deployment.default.settings.base")
os.environ["KOLIBRI_HOME"] = os.path.join(builddir, 'kolibri_home')
# This is necessary because the directory needs to exist for Kolibri to run when
# not invoked through CLI.
if not os.path.exists(os.environ["KOLIBRI_HOME"]):
os.makedirs(os.environ["KOLIBRI_HOME"])
# This import *must* come after the path insertion, otherwise sphinx won't be able to find the kolibri module
import kolibri # noqa
django.setup()
# Monkey patch this so we don't have any complaints during Sphinx inspect
from django.db.models.fields import files # noqa
files.FileDescriptor.__get__ = lambda *args: None
# Auto list fields from django models - from https://djangosnippets.org/snippets/2533/#c5977
def process_docstring(app, what, name, obj, options, lines):
# This causes import errors if left outside the function
from django.db import models
# Only look at objects that inherit from Django's base model class
if inspect.isclass(obj) and issubclass(obj, models.Model):
# Grab the field list from the meta class
fields = obj._meta.get_fields()
for field in fields:
# Skip ManyToOneRel and ManyToManyRel fields which have no 'verbose_name' or 'help_text'
if not hasattr(field, 'verbose_name'):
continue
# Decode and strip any html out of the field's help text
help_text = strip_tags(force_text(field.help_text))
# Decode and capitalize the verbose name, for use if there isn't
# any help text
verbose_name = force_text(field.verbose_name).capitalize()
if help_text:
# Add the model field to the end of the docstring as a param
# using the help text as the description
lines.append(u':param %s: %s' % (field.attname, help_text))
else:
# Add the model field to the end of the docstring as a param
# using the verbose name as the description
lines.append(u':param %s: %s' % (field.attname, verbose_name))
# Add the field's type to the docstring
if isinstance(field, models.ForeignKey):
to = field.rel.to
lines.append(u':type %s: %s to :class:`~%s`' % (field.attname, type(field).__name__, to))
else:
lines.append(u':type %s: %s' % (field.attname, type(field).__name__))
return lines
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
linkcheck_ignore = [
'https://groups.google.com/a/learningequality.org/forum/#!forum/dev',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Kolibri Developer Docs'
copyright = u'{year:d}, Learning Equality'.format(year=datetime.now().year)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = kolibri.__version__
# The full version, including alpha/beta/rc tags.
release = kolibri.__version__
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = ['.', sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Wide, responsive tables not supported until this is merged
# https://github.com/rtfd/sphinx_rtd_theme/pull/432
# Approach 1, broken because of....
# https://github.com/rtfd/readthedocs.org/issues/2116
# html_context = {
# 'css_files': [
# '_static/theme_overrides.css', # override wide tables in RTD theme
# ],
# }
# Approach 2 for custom stylesheet:
# adapted from: http://rackerlabs.github.io/docs-rackspace/tools/rtd-tables.html
# and https://github.com/altair-viz/altair/pull/418/files
# https://github.com/rtfd/sphinx_rtd_theme/issues/117
def setup(app):
# Register the docstring processor with sphinx
|
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# Output file base name for HTML help builder.
htmlhelp_basename = 'kolibri-dev'
# -- I18N ----------------------------------------------------------------------
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
locale_dirs = [
os.path.join(os.getcwd(), "locale", "docs"),
]
| app.connect('autodoc-process-docstring', process_docstring)
# Add our custom CSS overrides
app.add_stylesheet('theme_overrides.css') | identifier_body |
posture-to-jacobian-recurrent.py | import climate
import lmj.cubes
import lmj.plot
import numpy as np
import pandas as pd
import theanets
logging = climate.get_logger('posture->jac')
BATCH = 256
THRESHOLD = 100
def load_markers(fn):
df = pd.read_csv(fn, index_col='time').dropna()
cols = [c for c in df.columns if c.startswith('marker') and c[-1] in 'xyz']
return df[cols].astype('f')
def load_jacobian(fn):
df = pd.read_csv(fn, index_col='time').dropna()
cols = [c for c in df.columns if c.startswith('pc')]
return df[cols].astype('f')
def main(root):
match = lmj.cubes.utils.matching
bodys = [load_markers(f) for f in sorted(match(root, '*_body.csv.gz'))]
nbody = bodys[0].shape[1]
logging.info('loaded %d body-relative files', len(bodys))
goals = [load_markers(f) for f in sorted(match(root, '*_goal.csv.gz'))]
ngoal = goals[0].shape[1]
logging.info('loaded %d goal-relative files', len(goals))
jacs = [load_jacobian(f) for f in sorted(match(root, '*_jac_pca23.csv.gz'))]
njac = jacs[0].shape[1]
logging.info('loaded %d jacobian files', len(jacs))
'''
with lmj.plot.axes() as ax:
ax.hist(np.concatenate([j.values.ravel() for j in jacs]),
bins=np.linspace(-THRESHOLD, THRESHOLD, 127), lw=0)
'''
net = theanets.recurrent.Regressor([
nbody,
(200, 'sigmoid', 'gru'),
(200, 'sigmoid', 'gru'),
(200, 'sigmoid', 'gru'),
(200, 'sigmoid', 'gru'),
(200, 'sigmoid', 'gru'),
njac,
#dict(size=njac, activation='linear', inputs={'hid2:out': 300}, name='mean'),
#dict(size=njac, activation='linear', inputs={'hid2:out': 300}, name='covar'),
], loss='mse')
'''
inputs = []
outputs = []
for s in range(len(bodys)):
body = bodys[s]
goal = goals[s]
jac = jacs[s]
idx = body.index & goal.index & jac.index
b = body.loc[idx, :].values
g = goal.loc[idx, :].values
inputs.append(g)#np.hstack([b, g]))
outputs.append(np.clip(jac.loc[idx, :].values, -THRESHOLD, THRESHOLD))
'''
B = 32
def batches(T):
inputs = np.zeros((B, T, nbody), 'f') | idx = []
while len(idx) <= T:
s = np.random.randint(len(bodys))
body = bodys[s]
goal = goals[s]
jac = jacs[s]
idx = body.index & goal.index & jac.index
i = np.random.randint(len(idx) - T)
inputs[b] = body.loc[idx, :].iloc[i:i+T, :].values
outputs[b] = np.clip(jac.loc[idx, :].iloc[i:i+T, :].values,
-THRESHOLD, THRESHOLD)
return [inputs, outputs]
return batch
net.train(
#[np.vstack(inputs), np.vstack(outputs)],
batches(32),
algo='layerwise',
momentum=0.9,
learning_rate=0.0001,
patience=5,
min_improvement=0.01,
#max_gradient_norm=1,
#input_noise=0.001,
#hidden_l1=0.001,
#hidden_dropout=0.1,
monitors={
#'*:out': (0.1, 0.5, 0.9),
})
net.save('/tmp/posture-jacobian-gru.pkl.gz')
if __name__ == '__main__':
climate.call(main) | outputs = np.zeros((B, T, njac), 'f')
def batch():
for b in range(B): | random_line_split |
posture-to-jacobian-recurrent.py | import climate
import lmj.cubes
import lmj.plot
import numpy as np
import pandas as pd
import theanets
logging = climate.get_logger('posture->jac')
BATCH = 256
THRESHOLD = 100
def load_markers(fn):
df = pd.read_csv(fn, index_col='time').dropna()
cols = [c for c in df.columns if c.startswith('marker') and c[-1] in 'xyz']
return df[cols].astype('f')
def load_jacobian(fn):
df = pd.read_csv(fn, index_col='time').dropna()
cols = [c for c in df.columns if c.startswith('pc')]
return df[cols].astype('f')
def main(root):
match = lmj.cubes.utils.matching
bodys = [load_markers(f) for f in sorted(match(root, '*_body.csv.gz'))]
nbody = bodys[0].shape[1]
logging.info('loaded %d body-relative files', len(bodys))
goals = [load_markers(f) for f in sorted(match(root, '*_goal.csv.gz'))]
ngoal = goals[0].shape[1]
logging.info('loaded %d goal-relative files', len(goals))
jacs = [load_jacobian(f) for f in sorted(match(root, '*_jac_pca23.csv.gz'))]
njac = jacs[0].shape[1]
logging.info('loaded %d jacobian files', len(jacs))
'''
with lmj.plot.axes() as ax:
ax.hist(np.concatenate([j.values.ravel() for j in jacs]),
bins=np.linspace(-THRESHOLD, THRESHOLD, 127), lw=0)
'''
net = theanets.recurrent.Regressor([
nbody,
(200, 'sigmoid', 'gru'),
(200, 'sigmoid', 'gru'),
(200, 'sigmoid', 'gru'),
(200, 'sigmoid', 'gru'),
(200, 'sigmoid', 'gru'),
njac,
#dict(size=njac, activation='linear', inputs={'hid2:out': 300}, name='mean'),
#dict(size=njac, activation='linear', inputs={'hid2:out': 300}, name='covar'),
], loss='mse')
'''
inputs = []
outputs = []
for s in range(len(bodys)):
body = bodys[s]
goal = goals[s]
jac = jacs[s]
idx = body.index & goal.index & jac.index
b = body.loc[idx, :].values
g = goal.loc[idx, :].values
inputs.append(g)#np.hstack([b, g]))
outputs.append(np.clip(jac.loc[idx, :].values, -THRESHOLD, THRESHOLD))
'''
B = 32
def batches(T):
inputs = np.zeros((B, T, nbody), 'f')
outputs = np.zeros((B, T, njac), 'f')
def batch():
for b in range(B):
|
return [inputs, outputs]
return batch
net.train(
#[np.vstack(inputs), np.vstack(outputs)],
batches(32),
algo='layerwise',
momentum=0.9,
learning_rate=0.0001,
patience=5,
min_improvement=0.01,
#max_gradient_norm=1,
#input_noise=0.001,
#hidden_l1=0.001,
#hidden_dropout=0.1,
monitors={
#'*:out': (0.1, 0.5, 0.9),
})
net.save('/tmp/posture-jacobian-gru.pkl.gz')
if __name__ == '__main__':
climate.call(main)
| idx = []
while len(idx) <= T:
s = np.random.randint(len(bodys))
body = bodys[s]
goal = goals[s]
jac = jacs[s]
idx = body.index & goal.index & jac.index
i = np.random.randint(len(idx) - T)
inputs[b] = body.loc[idx, :].iloc[i:i+T, :].values
outputs[b] = np.clip(jac.loc[idx, :].iloc[i:i+T, :].values,
-THRESHOLD, THRESHOLD) | conditional_block |
posture-to-jacobian-recurrent.py | import climate
import lmj.cubes
import lmj.plot
import numpy as np
import pandas as pd
import theanets
logging = climate.get_logger('posture->jac')
BATCH = 256
THRESHOLD = 100
def | (fn):
df = pd.read_csv(fn, index_col='time').dropna()
cols = [c for c in df.columns if c.startswith('marker') and c[-1] in 'xyz']
return df[cols].astype('f')
def load_jacobian(fn):
df = pd.read_csv(fn, index_col='time').dropna()
cols = [c for c in df.columns if c.startswith('pc')]
return df[cols].astype('f')
def main(root):
match = lmj.cubes.utils.matching
bodys = [load_markers(f) for f in sorted(match(root, '*_body.csv.gz'))]
nbody = bodys[0].shape[1]
logging.info('loaded %d body-relative files', len(bodys))
goals = [load_markers(f) for f in sorted(match(root, '*_goal.csv.gz'))]
ngoal = goals[0].shape[1]
logging.info('loaded %d goal-relative files', len(goals))
jacs = [load_jacobian(f) for f in sorted(match(root, '*_jac_pca23.csv.gz'))]
njac = jacs[0].shape[1]
logging.info('loaded %d jacobian files', len(jacs))
'''
with lmj.plot.axes() as ax:
ax.hist(np.concatenate([j.values.ravel() for j in jacs]),
bins=np.linspace(-THRESHOLD, THRESHOLD, 127), lw=0)
'''
net = theanets.recurrent.Regressor([
nbody,
(200, 'sigmoid', 'gru'),
(200, 'sigmoid', 'gru'),
(200, 'sigmoid', 'gru'),
(200, 'sigmoid', 'gru'),
(200, 'sigmoid', 'gru'),
njac,
#dict(size=njac, activation='linear', inputs={'hid2:out': 300}, name='mean'),
#dict(size=njac, activation='linear', inputs={'hid2:out': 300}, name='covar'),
], loss='mse')
'''
inputs = []
outputs = []
for s in range(len(bodys)):
body = bodys[s]
goal = goals[s]
jac = jacs[s]
idx = body.index & goal.index & jac.index
b = body.loc[idx, :].values
g = goal.loc[idx, :].values
inputs.append(g)#np.hstack([b, g]))
outputs.append(np.clip(jac.loc[idx, :].values, -THRESHOLD, THRESHOLD))
'''
B = 32
def batches(T):
inputs = np.zeros((B, T, nbody), 'f')
outputs = np.zeros((B, T, njac), 'f')
def batch():
for b in range(B):
idx = []
while len(idx) <= T:
s = np.random.randint(len(bodys))
body = bodys[s]
goal = goals[s]
jac = jacs[s]
idx = body.index & goal.index & jac.index
i = np.random.randint(len(idx) - T)
inputs[b] = body.loc[idx, :].iloc[i:i+T, :].values
outputs[b] = np.clip(jac.loc[idx, :].iloc[i:i+T, :].values,
-THRESHOLD, THRESHOLD)
return [inputs, outputs]
return batch
net.train(
#[np.vstack(inputs), np.vstack(outputs)],
batches(32),
algo='layerwise',
momentum=0.9,
learning_rate=0.0001,
patience=5,
min_improvement=0.01,
#max_gradient_norm=1,
#input_noise=0.001,
#hidden_l1=0.001,
#hidden_dropout=0.1,
monitors={
#'*:out': (0.1, 0.5, 0.9),
})
net.save('/tmp/posture-jacobian-gru.pkl.gz')
if __name__ == '__main__':
climate.call(main)
| load_markers | identifier_name |
posture-to-jacobian-recurrent.py | import climate
import lmj.cubes
import lmj.plot
import numpy as np
import pandas as pd
import theanets
logging = climate.get_logger('posture->jac')
BATCH = 256
THRESHOLD = 100
def load_markers(fn):
|
def load_jacobian(fn):
df = pd.read_csv(fn, index_col='time').dropna()
cols = [c for c in df.columns if c.startswith('pc')]
return df[cols].astype('f')
def main(root):
match = lmj.cubes.utils.matching
bodys = [load_markers(f) for f in sorted(match(root, '*_body.csv.gz'))]
nbody = bodys[0].shape[1]
logging.info('loaded %d body-relative files', len(bodys))
goals = [load_markers(f) for f in sorted(match(root, '*_goal.csv.gz'))]
ngoal = goals[0].shape[1]
logging.info('loaded %d goal-relative files', len(goals))
jacs = [load_jacobian(f) for f in sorted(match(root, '*_jac_pca23.csv.gz'))]
njac = jacs[0].shape[1]
logging.info('loaded %d jacobian files', len(jacs))
'''
with lmj.plot.axes() as ax:
ax.hist(np.concatenate([j.values.ravel() for j in jacs]),
bins=np.linspace(-THRESHOLD, THRESHOLD, 127), lw=0)
'''
net = theanets.recurrent.Regressor([
nbody,
(200, 'sigmoid', 'gru'),
(200, 'sigmoid', 'gru'),
(200, 'sigmoid', 'gru'),
(200, 'sigmoid', 'gru'),
(200, 'sigmoid', 'gru'),
njac,
#dict(size=njac, activation='linear', inputs={'hid2:out': 300}, name='mean'),
#dict(size=njac, activation='linear', inputs={'hid2:out': 300}, name='covar'),
], loss='mse')
'''
inputs = []
outputs = []
for s in range(len(bodys)):
body = bodys[s]
goal = goals[s]
jac = jacs[s]
idx = body.index & goal.index & jac.index
b = body.loc[idx, :].values
g = goal.loc[idx, :].values
inputs.append(g)#np.hstack([b, g]))
outputs.append(np.clip(jac.loc[idx, :].values, -THRESHOLD, THRESHOLD))
'''
B = 32
def batches(T):
inputs = np.zeros((B, T, nbody), 'f')
outputs = np.zeros((B, T, njac), 'f')
def batch():
for b in range(B):
idx = []
while len(idx) <= T:
s = np.random.randint(len(bodys))
body = bodys[s]
goal = goals[s]
jac = jacs[s]
idx = body.index & goal.index & jac.index
i = np.random.randint(len(idx) - T)
inputs[b] = body.loc[idx, :].iloc[i:i+T, :].values
outputs[b] = np.clip(jac.loc[idx, :].iloc[i:i+T, :].values,
-THRESHOLD, THRESHOLD)
return [inputs, outputs]
return batch
net.train(
#[np.vstack(inputs), np.vstack(outputs)],
batches(32),
algo='layerwise',
momentum=0.9,
learning_rate=0.0001,
patience=5,
min_improvement=0.01,
#max_gradient_norm=1,
#input_noise=0.001,
#hidden_l1=0.001,
#hidden_dropout=0.1,
monitors={
#'*:out': (0.1, 0.5, 0.9),
})
net.save('/tmp/posture-jacobian-gru.pkl.gz')
if __name__ == '__main__':
climate.call(main)
| df = pd.read_csv(fn, index_col='time').dropna()
cols = [c for c in df.columns if c.startswith('marker') and c[-1] in 'xyz']
return df[cols].astype('f') | identifier_body |
updatereplicas_controller_test.js | // Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import replicationControllerDetailModule from 'replicationcontrollerdetail/replicationcontrollerdetail_module';
import UpdateReplicasDialogController from 'replicationcontrollerdetail/updatereplicas_controller';
describe('Update Replicas controller', () => {
/**
* Replication Controller Detail controller. | let ctrl;
/** @type {!md.$dialog} */
let mdDialog;
/** @type {!ui.router.$state} */
let state;
/** @type {!angular.$resource} */
let resource;
/** @type {!angular.$httpBackend} */
let httpBackend;
/** @type {!angular.$log} */
let log;
/** @type {string} */
let namespaceMock = 'foo-namespace';
/** @type {string} */
let replicationControllerMock = 'foo-name';
beforeEach(() => {
angular.mock.module(replicationControllerDetailModule.name);
angular.mock.inject(($log, $state, $mdDialog, $controller, $httpBackend, $resource) => {
mdDialog = $mdDialog;
state = $state;
resource = $resource;
httpBackend = $httpBackend;
log = $log;
ctrl = $controller(
UpdateReplicasDialogController, {
$resource: resource,
namespace: namespaceMock,
replicationController: replicationControllerMock,
currentPods: 1,
desiredPods: 1,
},
{updateReplicasForm: {$valid: true}});
});
});
it('should update controller replicas to given number and log success', () => {
// given
let replicaSpec = {
replicas: 5,
};
spyOn(log, 'info');
spyOn(state, 'reload');
httpBackend.whenPOST('api/v1/replicationcontroller/foo-namespace/foo-name/update/pod')
.respond(200, replicaSpec);
// when
ctrl.updateReplicas();
httpBackend.flush();
// then
expect(log.info).toHaveBeenCalledWith(
`Successfully updated replicas number to ${replicaSpec.replicas}`);
expect(state.reload).toHaveBeenCalled();
});
it('should log error on failed update', () => {
// given
spyOn(log, 'error');
httpBackend.whenPOST('api/v1/replicationcontroller/foo-namespace/foo-name/update/pod')
.respond(404);
// when
ctrl.updateReplicas();
httpBackend.flush();
// then
expect(log.error).toHaveBeenCalled();
});
it('should close the dialog on cancel', () => {
spyOn(state, 'reload');
// given
spyOn(mdDialog, 'cancel');
// when
ctrl.cancel();
// then
expect(mdDialog.cancel).toHaveBeenCalled();
expect(state.reload).not.toHaveBeenCalled();
});
}); | * @type {!UpdateReplicasDialogController}
*/ | random_line_split |
listeners.py | import io
from molotov.api import get_fixture
_UNREADABLE = "***WARNING: Molotov can't display this body***"
_BINARY = "**** Binary content ****"
_FILE = "**** File content ****"
_COMPRESSED = ('gzip', 'compress', 'deflate', 'identity', 'br')
class BaseListener(object):
async def __call__(self, event, **options):
attr = getattr(self, 'on_' + event, None)
if attr is not None:
await attr(**options)
class StdoutListener(BaseListener):
def __init__(self, **options):
self.verbose = options.get('verbose', 0)
self.console = options['console']
def _body2str(self, body):
try:
from aiohttp.payload import Payload
except ImportError:
Payload = None
if Payload is not None and isinstance(body, Payload):
body = body._value
if isinstance(body, io.IOBase):
return _FILE
if not isinstance(body, str):
try:
body = str(body, 'utf8')
except UnicodeDecodeError:
return _UNREADABLE
return body
async def on_sending_request(self, session, request):
if self.verbose < 2:
|
raw = '>' * 45
raw += '\n' + request.method + ' ' + str(request.url)
if len(request.headers) > 0:
headers = '\n'.join('%s: %s' % (k, v) for k, v in
request.headers.items())
raw += '\n' + headers
if request.headers.get('Content-Encoding') in _COMPRESSED:
raw += '\n\n' + _BINARY + '\n'
elif request.body:
raw += '\n\n' + self._body2str(request.body) + '\n'
self.console.print(raw)
async def on_response_received(self, session, response, request):
if self.verbose < 2:
return
raw = '\n' + '=' * 45 + '\n'
raw += 'HTTP/1.1 %d %s\n' % (response.status, response.reason)
items = response.headers.items()
headers = '\n'.join('{}: {}'.format(k, v) for k, v in items)
raw += headers
if response.headers.get('Content-Encoding') in _COMPRESSED:
raw += '\n\n' + _BINARY
elif response.content:
content = await response.content.read()
if len(content) > 0:
# put back the data in the content
response.content.unread_data(content)
try:
raw += '\n\n' + content.decode()
except UnicodeDecodeError:
raw += '\n\n' + _UNREADABLE
else:
raw += '\n\n'
raw += '\n' + '<' * 45 + '\n'
self.console.print(raw)
class CustomListener(object):
def __init__(self, fixture):
self.fixture = fixture
async def __call__(self, event, **options):
await self.fixture(event, **options)
class EventSender(object):
def __init__(self, console, listeners=None):
self.console = console
if listeners is None:
listeners = []
self._listeners = listeners
self._stopped = False
fixture_listeners = get_fixture('events')
if fixture_listeners is not None:
for listener in fixture_listeners:
self.add_listener(CustomListener(listener))
def add_listener(self, listener):
self._listeners.append(listener)
async def stop(self):
self._stopped = True
def stopped(self):
return self._stopped
async def send_event(self, event, **options):
for listener in self._listeners:
try:
await listener(event, **options)
except Exception as e:
self.console.print_error(e)
| return | conditional_block |
listeners.py | import io
from molotov.api import get_fixture
_UNREADABLE = "***WARNING: Molotov can't display this body***"
_BINARY = "**** Binary content ****"
_FILE = "**** File content ****"
_COMPRESSED = ('gzip', 'compress', 'deflate', 'identity', 'br')
class BaseListener(object):
async def | (self, event, **options):
attr = getattr(self, 'on_' + event, None)
if attr is not None:
await attr(**options)
class StdoutListener(BaseListener):
def __init__(self, **options):
self.verbose = options.get('verbose', 0)
self.console = options['console']
def _body2str(self, body):
try:
from aiohttp.payload import Payload
except ImportError:
Payload = None
if Payload is not None and isinstance(body, Payload):
body = body._value
if isinstance(body, io.IOBase):
return _FILE
if not isinstance(body, str):
try:
body = str(body, 'utf8')
except UnicodeDecodeError:
return _UNREADABLE
return body
async def on_sending_request(self, session, request):
if self.verbose < 2:
return
raw = '>' * 45
raw += '\n' + request.method + ' ' + str(request.url)
if len(request.headers) > 0:
headers = '\n'.join('%s: %s' % (k, v) for k, v in
request.headers.items())
raw += '\n' + headers
if request.headers.get('Content-Encoding') in _COMPRESSED:
raw += '\n\n' + _BINARY + '\n'
elif request.body:
raw += '\n\n' + self._body2str(request.body) + '\n'
self.console.print(raw)
async def on_response_received(self, session, response, request):
if self.verbose < 2:
return
raw = '\n' + '=' * 45 + '\n'
raw += 'HTTP/1.1 %d %s\n' % (response.status, response.reason)
items = response.headers.items()
headers = '\n'.join('{}: {}'.format(k, v) for k, v in items)
raw += headers
if response.headers.get('Content-Encoding') in _COMPRESSED:
raw += '\n\n' + _BINARY
elif response.content:
content = await response.content.read()
if len(content) > 0:
# put back the data in the content
response.content.unread_data(content)
try:
raw += '\n\n' + content.decode()
except UnicodeDecodeError:
raw += '\n\n' + _UNREADABLE
else:
raw += '\n\n'
raw += '\n' + '<' * 45 + '\n'
self.console.print(raw)
class CustomListener(object):
def __init__(self, fixture):
self.fixture = fixture
async def __call__(self, event, **options):
await self.fixture(event, **options)
class EventSender(object):
def __init__(self, console, listeners=None):
self.console = console
if listeners is None:
listeners = []
self._listeners = listeners
self._stopped = False
fixture_listeners = get_fixture('events')
if fixture_listeners is not None:
for listener in fixture_listeners:
self.add_listener(CustomListener(listener))
def add_listener(self, listener):
self._listeners.append(listener)
async def stop(self):
self._stopped = True
def stopped(self):
return self._stopped
async def send_event(self, event, **options):
for listener in self._listeners:
try:
await listener(event, **options)
except Exception as e:
self.console.print_error(e)
| __call__ | identifier_name |
listeners.py | import io
from molotov.api import get_fixture
_UNREADABLE = "***WARNING: Molotov can't display this body***"
_BINARY = "**** Binary content ****"
_FILE = "**** File content ****"
_COMPRESSED = ('gzip', 'compress', 'deflate', 'identity', 'br')
class BaseListener(object):
async def __call__(self, event, **options):
attr = getattr(self, 'on_' + event, None)
if attr is not None:
await attr(**options)
class StdoutListener(BaseListener):
def __init__(self, **options):
self.verbose = options.get('verbose', 0)
self.console = options['console']
def _body2str(self, body):
try:
from aiohttp.payload import Payload
except ImportError:
Payload = None
if Payload is not None and isinstance(body, Payload):
body = body._value
if isinstance(body, io.IOBase):
return _FILE
if not isinstance(body, str):
try:
body = str(body, 'utf8')
except UnicodeDecodeError:
return _UNREADABLE
return body
async def on_sending_request(self, session, request):
if self.verbose < 2:
return
raw = '>' * 45
raw += '\n' + request.method + ' ' + str(request.url)
if len(request.headers) > 0:
headers = '\n'.join('%s: %s' % (k, v) for k, v in
request.headers.items())
raw += '\n' + headers
if request.headers.get('Content-Encoding') in _COMPRESSED:
raw += '\n\n' + _BINARY + '\n'
elif request.body:
raw += '\n\n' + self._body2str(request.body) + '\n'
self.console.print(raw)
async def on_response_received(self, session, response, request):
if self.verbose < 2:
return
raw = '\n' + '=' * 45 + '\n'
raw += 'HTTP/1.1 %d %s\n' % (response.status, response.reason)
items = response.headers.items()
headers = '\n'.join('{}: {}'.format(k, v) for k, v in items)
raw += headers | raw += '\n\n' + _BINARY
elif response.content:
content = await response.content.read()
if len(content) > 0:
# put back the data in the content
response.content.unread_data(content)
try:
raw += '\n\n' + content.decode()
except UnicodeDecodeError:
raw += '\n\n' + _UNREADABLE
else:
raw += '\n\n'
raw += '\n' + '<' * 45 + '\n'
self.console.print(raw)
class CustomListener(object):
def __init__(self, fixture):
self.fixture = fixture
async def __call__(self, event, **options):
await self.fixture(event, **options)
class EventSender(object):
def __init__(self, console, listeners=None):
self.console = console
if listeners is None:
listeners = []
self._listeners = listeners
self._stopped = False
fixture_listeners = get_fixture('events')
if fixture_listeners is not None:
for listener in fixture_listeners:
self.add_listener(CustomListener(listener))
def add_listener(self, listener):
self._listeners.append(listener)
async def stop(self):
self._stopped = True
def stopped(self):
return self._stopped
async def send_event(self, event, **options):
for listener in self._listeners:
try:
await listener(event, **options)
except Exception as e:
self.console.print_error(e) | if response.headers.get('Content-Encoding') in _COMPRESSED: | random_line_split |
listeners.py | import io
from molotov.api import get_fixture
_UNREADABLE = "***WARNING: Molotov can't display this body***"
_BINARY = "**** Binary content ****"
_FILE = "**** File content ****"
_COMPRESSED = ('gzip', 'compress', 'deflate', 'identity', 'br')
class BaseListener(object):
async def __call__(self, event, **options):
attr = getattr(self, 'on_' + event, None)
if attr is not None:
await attr(**options)
class StdoutListener(BaseListener):
|
class CustomListener(object):
def __init__(self, fixture):
self.fixture = fixture
async def __call__(self, event, **options):
await self.fixture(event, **options)
class EventSender(object):
def __init__(self, console, listeners=None):
self.console = console
if listeners is None:
listeners = []
self._listeners = listeners
self._stopped = False
fixture_listeners = get_fixture('events')
if fixture_listeners is not None:
for listener in fixture_listeners:
self.add_listener(CustomListener(listener))
def add_listener(self, listener):
self._listeners.append(listener)
async def stop(self):
self._stopped = True
def stopped(self):
return self._stopped
async def send_event(self, event, **options):
for listener in self._listeners:
try:
await listener(event, **options)
except Exception as e:
self.console.print_error(e)
| def __init__(self, **options):
self.verbose = options.get('verbose', 0)
self.console = options['console']
def _body2str(self, body):
try:
from aiohttp.payload import Payload
except ImportError:
Payload = None
if Payload is not None and isinstance(body, Payload):
body = body._value
if isinstance(body, io.IOBase):
return _FILE
if not isinstance(body, str):
try:
body = str(body, 'utf8')
except UnicodeDecodeError:
return _UNREADABLE
return body
async def on_sending_request(self, session, request):
if self.verbose < 2:
return
raw = '>' * 45
raw += '\n' + request.method + ' ' + str(request.url)
if len(request.headers) > 0:
headers = '\n'.join('%s: %s' % (k, v) for k, v in
request.headers.items())
raw += '\n' + headers
if request.headers.get('Content-Encoding') in _COMPRESSED:
raw += '\n\n' + _BINARY + '\n'
elif request.body:
raw += '\n\n' + self._body2str(request.body) + '\n'
self.console.print(raw)
async def on_response_received(self, session, response, request):
if self.verbose < 2:
return
raw = '\n' + '=' * 45 + '\n'
raw += 'HTTP/1.1 %d %s\n' % (response.status, response.reason)
items = response.headers.items()
headers = '\n'.join('{}: {}'.format(k, v) for k, v in items)
raw += headers
if response.headers.get('Content-Encoding') in _COMPRESSED:
raw += '\n\n' + _BINARY
elif response.content:
content = await response.content.read()
if len(content) > 0:
# put back the data in the content
response.content.unread_data(content)
try:
raw += '\n\n' + content.decode()
except UnicodeDecodeError:
raw += '\n\n' + _UNREADABLE
else:
raw += '\n\n'
raw += '\n' + '<' * 45 + '\n'
self.console.print(raw) | identifier_body |
constants.ts | // Copyright (c) 2017, Daniel Andersen ([email protected])
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// 3. The name of the author may not be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
export class | {
//public static apiUrl = 'http://10.0.1.4:5001/service'
public static apiUrl = 'http://10.192.92.93:5001/service'
}
| Constants | identifier_name |
constants.ts | // Copyright (c) 2017, Daniel Andersen ([email protected])
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met: | // this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// 3. The name of the author may not be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
export class Constants {
//public static apiUrl = 'http://10.0.1.4:5001/service'
public static apiUrl = 'http://10.192.92.93:5001/service'
} | //
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, | random_line_split |
cardActionElement.js | var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
var __metadata = (this && this.__metadata) || function (k, v) {
if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v);
};
define(["require", "exports", "aurelia-framework", "./../../config"], function (require, exports, aurelia_framework_1, config_1) {
"use strict";
var CardActionElement = (function () {
function | () {
}
CardActionElement.prototype.attached = function () {
this.element.classList.add("card-action");
};
CardActionElement.prototype.detached = function () {
this.element.classList.remove("card-action");
};
CardActionElement = __decorate([
aurelia_framework_1.customElement(config_1.config.cardAction),
aurelia_framework_1.containerless(),
aurelia_framework_1.inlineView("<template><div ref='element'><slot></slot></div></template>"),
__metadata('design:paramtypes', [])
], CardActionElement);
return CardActionElement;
}());
exports.CardActionElement = CardActionElement;
});
//# sourceMappingURL=cardActionElement.js.map
| CardActionElement | identifier_name |
cardActionElement.js | var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
var __metadata = (this && this.__metadata) || function (k, v) {
if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v);
};
define(["require", "exports", "aurelia-framework", "./../../config"], function (require, exports, aurelia_framework_1, config_1) {
"use strict";
var CardActionElement = (function () {
function CardActionElement() {
}
CardActionElement.prototype.attached = function () {
this.element.classList.add("card-action");
};
CardActionElement.prototype.detached = function () {
this.element.classList.remove("card-action"); | };
CardActionElement = __decorate([
aurelia_framework_1.customElement(config_1.config.cardAction),
aurelia_framework_1.containerless(),
aurelia_framework_1.inlineView("<template><div ref='element'><slot></slot></div></template>"),
__metadata('design:paramtypes', [])
], CardActionElement);
return CardActionElement;
}());
exports.CardActionElement = CardActionElement;
});
//# sourceMappingURL=cardActionElement.js.map | random_line_split |
|
cardActionElement.js | var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
var __metadata = (this && this.__metadata) || function (k, v) {
if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v);
};
define(["require", "exports", "aurelia-framework", "./../../config"], function (require, exports, aurelia_framework_1, config_1) {
"use strict";
var CardActionElement = (function () {
function CardActionElement() |
CardActionElement.prototype.attached = function () {
this.element.classList.add("card-action");
};
CardActionElement.prototype.detached = function () {
this.element.classList.remove("card-action");
};
CardActionElement = __decorate([
aurelia_framework_1.customElement(config_1.config.cardAction),
aurelia_framework_1.containerless(),
aurelia_framework_1.inlineView("<template><div ref='element'><slot></slot></div></template>"),
__metadata('design:paramtypes', [])
], CardActionElement);
return CardActionElement;
}());
exports.CardActionElement = CardActionElement;
});
//# sourceMappingURL=cardActionElement.js.map
| {
} | identifier_body |
arguments.rs | use thiserror::Error;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum Comm {
Type1,
Type2,
None,
}
/// A type of paired token
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum Field {
/// Processes (ex: `$(..)`)
Proc,
/// Literal array (ex: `[ 1 .. 3 ]`)
Array,
/// Brace expansion (ex: `{a,b,c,d}`)
Braces,
}
/// The depth of various paired structures
#[derive(Default, Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct Levels {
/// Parentheses
parens: u8,
/// Array literals
array: u8,
/// Braces
braces: u8,
}
/// Error with paired tokens
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Error)]
pub enum LevelsError {
/// Unmatched opening parenthese
#[error("unmatched opening parenthese")]
UnmatchedParen,
/// Unmatched opening bracket
#[error("unmatched opening bracket")]
UnmatchedBracket,
/// Unmatched opening brace
#[error("unmatched opening brace")]
UnmatchedBrace,
/// Extra closing parenthese(s)
#[error("extra closing parenthese(s)")]
ExtraParen,
/// Extra closing bracket(s)
#[error("extra closing bracket(s)")]
ExtraBracket,
/// Extra closing brace(s)
#[error("extra closing brace(s)")]
ExtraBrace,
}
impl Levels {
/// Add a new depth level
pub fn up(&mut self, field: Field) {
let level = match field {
Field::Proc => &mut self.parens,
Field::Array => &mut self.array,
Field::Braces => &mut self.braces,
};
*level += 1;
}
/// Close paired tokens
pub fn down(&mut self, field: Field) -> Result<(), LevelsError> {
let level = match field {
Field::Proc if self.parens > 0 => &mut self.parens,
Field::Array if self.array > 0 => &mut self.array,
Field::Braces if self.braces > 0 => &mut self.braces,
// errors
Field::Proc => return Err(LevelsError::ExtraParen),
Field::Array => return Err(LevelsError::ExtraBracket),
Field::Braces => return Err(LevelsError::ExtraBrace),
};
*level -= 1;
Ok(())
}
/// Check if all parens where matched
pub const fn are_rooted(self) -> bool {
self.parens == 0 && self.array == 0 && self.braces == 0
}
/// Check if all is ok
pub const fn check(self) -> Result<(), LevelsError> {
if self.parens > 0 {
Err(LevelsError::UnmatchedParen)
} else if self.array > 0 {
Err(LevelsError::UnmatchedBracket)
} else if self.braces > 0 {
Err(LevelsError::UnmatchedBrace)
} else {
Ok(())
}
}
}
/// An efficient `Iterator` structure for splitting arguments
#[derive(Debug)]
pub struct ArgumentSplitter<'a> {
data: &'a str,
/// Number of bytes read
read: usize,
comm: Comm,
quotes: bool,
variab: bool,
array: bool,
method: bool,
}
impl<'a> ArgumentSplitter<'a> {
/// Create a new argument splitter based on the provided data
pub const fn new(data: &'a str) -> ArgumentSplitter<'a> {
ArgumentSplitter {
data,
read: 0,
comm: Comm::None,
quotes: false,
variab: false,
array: false,
method: false,
}
}
fn | <B: Iterator<Item = u8>>(&mut self, bytes: &mut B) {
while let Some(character) = bytes.next() {
match character {
b'\\' => {
self.read += 2;
let _ = bytes.next();
continue;
}
b'\'' => break,
_ => (),
}
self.read += 1;
}
}
}
impl<'a> Iterator for ArgumentSplitter<'a> {
type Item = &'a str;
fn next(&mut self) -> Option<&'a str> {
let data = self.data.as_bytes();
while let Some(&b' ') = data.get(self.read) {
self.read += 1;
}
let start = self.read;
let mut levels = Levels::default();
let mut bytes = data.iter().skip(self.read).copied();
while let Some(character) = bytes.next() {
match character {
// Skip the next byte.
b'\\' => {
self.read += 2;
let _ = bytes.next();
continue;
}
// Disable COMM_1 and enable COMM_2 + ARRAY.
b'@' => {
self.array = true;
self.comm = Comm::Type2;
self.read += 1;
continue;
}
// Disable COMM_2 and enable COMM_1 + VARIAB.
b'$' => {
self.variab = true;
self.comm = Comm::Type1;
self.read += 1;
continue;
}
b'[' => levels.up(Field::Array),
b']' => {
let _ = levels.down(Field::Array);
}
b'{' => levels.up(Field::Braces),
b'}' => {
// TODO: handle errors here
let _ = levels.down(Field::Braces);
}
b'(' => {
// Disable VARIAB + ARRAY and enable METHOD.
// if variab or array are set
if self.array || self.variab {
self.array = false;
self.variab = false;
self.method = true;
}
levels.up(Field::Proc);
}
b')' => {
self.method = false;
let _ = levels.down(Field::Proc);
}
// Toggle double quote rules.
b'"' => {
self.quotes ^= true;
}
// Loop through characters until single quote rules are completed.
b'\'' if !self.quotes => {
self.scan_singlequotes(&mut bytes);
self.read += 2;
continue;
}
// Break from the loop once a root-level space is found.
b' ' => {
if !self.quotes && !self.method && levels.are_rooted() {
break;
}
}
_ => (),
}
self.read += 1;
// disable COMM_1 and COMM_2
self.comm = Comm::None;
}
if start == self.read {
None
} else {
Some(&self.data[start..self.read])
}
}
}
#[cfg(test)]
mod tests {
use super::*;
fn compare(input: &str, expected: Vec<&str>) {
let arguments = ArgumentSplitter::new(input).collect::<Vec<&str>>();
for (left, right) in expected.iter().zip(arguments.iter()) {
assert_eq!(left, right);
}
assert_eq!(expected.len(), arguments.len());
}
#[test]
fn methods() {
let input = "echo $join(array, ', ') @split(var, ', ')";
let expected = vec!["echo", "$join(array, ', ')", "@split(var, ', ')"];
compare(input, expected);
}
#[test]
fn processes() {
let input = "echo $(echo one $(echo two)) @[echo one @[echo two]]";
let expected = vec!["echo", "$(echo one $(echo two))", "@[echo one @[echo two]]"];
compare(input, expected);
}
#[test]
fn arrays() {
let input = "echo [ one two @[echo three four] five ] [ six seven ]";
let expected = vec!["echo", "[ one two @[echo three four] five ]", "[ six seven ]"];
compare(input, expected);
}
#[test]
fn quotes() {
let input = "echo 'one two \"three four\"' \"five six 'seven eight'\"";
let expected = vec!["echo", "'one two \"three four\"'", "\"five six 'seven eight'\""];
compare(input, expected);
}
}
| scan_singlequotes | identifier_name |
arguments.rs | use thiserror::Error;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum Comm {
Type1,
Type2,
None,
}
/// A type of paired token
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum Field {
/// Processes (ex: `$(..)`)
Proc,
/// Literal array (ex: `[ 1 .. 3 ]`)
Array,
/// Brace expansion (ex: `{a,b,c,d}`)
Braces,
}
/// The depth of various paired structures
#[derive(Default, Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct Levels {
/// Parentheses
parens: u8,
/// Array literals
array: u8,
/// Braces
braces: u8,
}
/// Error with paired tokens
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Error)]
pub enum LevelsError {
/// Unmatched opening parenthese
#[error("unmatched opening parenthese")]
UnmatchedParen,
/// Unmatched opening bracket
#[error("unmatched opening bracket")]
UnmatchedBracket,
/// Unmatched opening brace
#[error("unmatched opening brace")]
UnmatchedBrace,
/// Extra closing parenthese(s)
#[error("extra closing parenthese(s)")]
ExtraParen,
/// Extra closing bracket(s)
#[error("extra closing bracket(s)")]
ExtraBracket,
/// Extra closing brace(s)
#[error("extra closing brace(s)")]
ExtraBrace,
}
impl Levels {
/// Add a new depth level
pub fn up(&mut self, field: Field) {
let level = match field {
Field::Proc => &mut self.parens,
Field::Array => &mut self.array,
Field::Braces => &mut self.braces,
};
*level += 1;
}
/// Close paired tokens
pub fn down(&mut self, field: Field) -> Result<(), LevelsError> {
let level = match field {
Field::Proc if self.parens > 0 => &mut self.parens,
Field::Array if self.array > 0 => &mut self.array,
Field::Braces if self.braces > 0 => &mut self.braces,
// errors
Field::Proc => return Err(LevelsError::ExtraParen),
Field::Array => return Err(LevelsError::ExtraBracket),
Field::Braces => return Err(LevelsError::ExtraBrace),
};
*level -= 1;
Ok(())
}
/// Check if all parens where matched
pub const fn are_rooted(self) -> bool {
self.parens == 0 && self.array == 0 && self.braces == 0
}
/// Check if all is ok
pub const fn check(self) -> Result<(), LevelsError> {
if self.parens > 0 {
Err(LevelsError::UnmatchedParen)
} else if self.array > 0 {
Err(LevelsError::UnmatchedBracket)
} else if self.braces > 0 {
Err(LevelsError::UnmatchedBrace)
} else {
Ok(())
}
}
}
/// An efficient `Iterator` structure for splitting arguments
#[derive(Debug)]
pub struct ArgumentSplitter<'a> {
data: &'a str,
/// Number of bytes read
read: usize,
comm: Comm,
quotes: bool,
variab: bool,
array: bool,
method: bool,
}
impl<'a> ArgumentSplitter<'a> {
/// Create a new argument splitter based on the provided data
pub const fn new(data: &'a str) -> ArgumentSplitter<'a> {
ArgumentSplitter {
data,
read: 0,
comm: Comm::None,
quotes: false,
variab: false,
array: false,
method: false,
}
}
fn scan_singlequotes<B: Iterator<Item = u8>>(&mut self, bytes: &mut B) {
while let Some(character) = bytes.next() {
match character {
b'\\' => {
self.read += 2;
let _ = bytes.next();
continue;
}
b'\'' => break,
_ => (),
}
self.read += 1;
}
}
}
impl<'a> Iterator for ArgumentSplitter<'a> {
type Item = &'a str;
fn next(&mut self) -> Option<&'a str> {
let data = self.data.as_bytes();
while let Some(&b' ') = data.get(self.read) {
self.read += 1;
}
let start = self.read;
let mut levels = Levels::default();
let mut bytes = data.iter().skip(self.read).copied();
while let Some(character) = bytes.next() {
match character {
// Skip the next byte.
b'\\' => {
self.read += 2;
let _ = bytes.next();
continue;
}
// Disable COMM_1 and enable COMM_2 + ARRAY.
b'@' => {
self.array = true;
self.comm = Comm::Type2;
self.read += 1;
continue;
}
// Disable COMM_2 and enable COMM_1 + VARIAB.
b'$' => {
self.variab = true;
self.comm = Comm::Type1;
self.read += 1;
continue;
}
b'[' => levels.up(Field::Array),
b']' => {
let _ = levels.down(Field::Array);
}
b'{' => levels.up(Field::Braces),
b'}' => {
// TODO: handle errors here
let _ = levels.down(Field::Braces);
}
b'(' => {
// Disable VARIAB + ARRAY and enable METHOD.
// if variab or array are set
if self.array || self.variab {
self.array = false;
self.variab = false;
self.method = true;
}
levels.up(Field::Proc);
}
b')' => {
self.method = false;
let _ = levels.down(Field::Proc);
}
// Toggle double quote rules.
b'"' => {
self.quotes ^= true;
}
// Loop through characters until single quote rules are completed.
b'\'' if !self.quotes => {
self.scan_singlequotes(&mut bytes);
self.read += 2;
continue;
}
// Break from the loop once a root-level space is found. | b' ' => {
if !self.quotes && !self.method && levels.are_rooted() {
break;
}
}
_ => (),
}
self.read += 1;
// disable COMM_1 and COMM_2
self.comm = Comm::None;
}
if start == self.read {
None
} else {
Some(&self.data[start..self.read])
}
}
}
#[cfg(test)]
mod tests {
use super::*;
fn compare(input: &str, expected: Vec<&str>) {
let arguments = ArgumentSplitter::new(input).collect::<Vec<&str>>();
for (left, right) in expected.iter().zip(arguments.iter()) {
assert_eq!(left, right);
}
assert_eq!(expected.len(), arguments.len());
}
#[test]
fn methods() {
let input = "echo $join(array, ', ') @split(var, ', ')";
let expected = vec!["echo", "$join(array, ', ')", "@split(var, ', ')"];
compare(input, expected);
}
#[test]
fn processes() {
let input = "echo $(echo one $(echo two)) @[echo one @[echo two]]";
let expected = vec!["echo", "$(echo one $(echo two))", "@[echo one @[echo two]]"];
compare(input, expected);
}
#[test]
fn arrays() {
let input = "echo [ one two @[echo three four] five ] [ six seven ]";
let expected = vec!["echo", "[ one two @[echo three four] five ]", "[ six seven ]"];
compare(input, expected);
}
#[test]
fn quotes() {
let input = "echo 'one two \"three four\"' \"five six 'seven eight'\"";
let expected = vec!["echo", "'one two \"three four\"'", "\"five six 'seven eight'\""];
compare(input, expected);
}
} | random_line_split |
|
arguments.rs | use thiserror::Error;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum Comm {
Type1,
Type2,
None,
}
/// A type of paired token
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum Field {
/// Processes (ex: `$(..)`)
Proc,
/// Literal array (ex: `[ 1 .. 3 ]`)
Array,
/// Brace expansion (ex: `{a,b,c,d}`)
Braces,
}
/// The depth of various paired structures
#[derive(Default, Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct Levels {
/// Parentheses
parens: u8,
/// Array literals
array: u8,
/// Braces
braces: u8,
}
/// Error with paired tokens
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Error)]
pub enum LevelsError {
/// Unmatched opening parenthese
#[error("unmatched opening parenthese")]
UnmatchedParen,
/// Unmatched opening bracket
#[error("unmatched opening bracket")]
UnmatchedBracket,
/// Unmatched opening brace
#[error("unmatched opening brace")]
UnmatchedBrace,
/// Extra closing parenthese(s)
#[error("extra closing parenthese(s)")]
ExtraParen,
/// Extra closing bracket(s)
#[error("extra closing bracket(s)")]
ExtraBracket,
/// Extra closing brace(s)
#[error("extra closing brace(s)")]
ExtraBrace,
}
impl Levels {
/// Add a new depth level
pub fn up(&mut self, field: Field) {
let level = match field {
Field::Proc => &mut self.parens,
Field::Array => &mut self.array,
Field::Braces => &mut self.braces,
};
*level += 1;
}
/// Close paired tokens
pub fn down(&mut self, field: Field) -> Result<(), LevelsError> {
let level = match field {
Field::Proc if self.parens > 0 => &mut self.parens,
Field::Array if self.array > 0 => &mut self.array,
Field::Braces if self.braces > 0 => &mut self.braces,
// errors
Field::Proc => return Err(LevelsError::ExtraParen),
Field::Array => return Err(LevelsError::ExtraBracket),
Field::Braces => return Err(LevelsError::ExtraBrace),
};
*level -= 1;
Ok(())
}
/// Check if all parens where matched
pub const fn are_rooted(self) -> bool {
self.parens == 0 && self.array == 0 && self.braces == 0
}
/// Check if all is ok
pub const fn check(self) -> Result<(), LevelsError> {
if self.parens > 0 {
Err(LevelsError::UnmatchedParen)
} else if self.array > 0 {
Err(LevelsError::UnmatchedBracket)
} else if self.braces > 0 {
Err(LevelsError::UnmatchedBrace)
} else {
Ok(())
}
}
}
/// An efficient `Iterator` structure for splitting arguments
#[derive(Debug)]
pub struct ArgumentSplitter<'a> {
data: &'a str,
/// Number of bytes read
read: usize,
comm: Comm,
quotes: bool,
variab: bool,
array: bool,
method: bool,
}
impl<'a> ArgumentSplitter<'a> {
/// Create a new argument splitter based on the provided data
pub const fn new(data: &'a str) -> ArgumentSplitter<'a> {
ArgumentSplitter {
data,
read: 0,
comm: Comm::None,
quotes: false,
variab: false,
array: false,
method: false,
}
}
fn scan_singlequotes<B: Iterator<Item = u8>>(&mut self, bytes: &mut B) {
while let Some(character) = bytes.next() {
match character {
b'\\' => {
self.read += 2;
let _ = bytes.next();
continue;
}
b'\'' => break,
_ => (),
}
self.read += 1;
}
}
}
impl<'a> Iterator for ArgumentSplitter<'a> {
type Item = &'a str;
fn next(&mut self) -> Option<&'a str> {
let data = self.data.as_bytes();
while let Some(&b' ') = data.get(self.read) {
self.read += 1;
}
let start = self.read;
let mut levels = Levels::default();
let mut bytes = data.iter().skip(self.read).copied();
while let Some(character) = bytes.next() {
match character {
// Skip the next byte.
b'\\' => {
self.read += 2;
let _ = bytes.next();
continue;
}
// Disable COMM_1 and enable COMM_2 + ARRAY.
b'@' => {
self.array = true;
self.comm = Comm::Type2;
self.read += 1;
continue;
}
// Disable COMM_2 and enable COMM_1 + VARIAB.
b'$' => {
self.variab = true;
self.comm = Comm::Type1;
self.read += 1;
continue;
}
b'[' => levels.up(Field::Array),
b']' => {
let _ = levels.down(Field::Array);
}
b'{' => levels.up(Field::Braces),
b'}' => {
// TODO: handle errors here
let _ = levels.down(Field::Braces);
}
b'(' => {
// Disable VARIAB + ARRAY and enable METHOD.
// if variab or array are set
if self.array || self.variab {
self.array = false;
self.variab = false;
self.method = true;
}
levels.up(Field::Proc);
}
b')' => {
self.method = false;
let _ = levels.down(Field::Proc);
}
// Toggle double quote rules.
b'"' => {
self.quotes ^= true;
}
// Loop through characters until single quote rules are completed.
b'\'' if !self.quotes => {
self.scan_singlequotes(&mut bytes);
self.read += 2;
continue;
}
// Break from the loop once a root-level space is found.
b' ' => {
if !self.quotes && !self.method && levels.are_rooted() {
break;
}
}
_ => (),
}
self.read += 1;
// disable COMM_1 and COMM_2
self.comm = Comm::None;
}
if start == self.read {
None
} else {
Some(&self.data[start..self.read])
}
}
}
#[cfg(test)]
mod tests {
use super::*;
fn compare(input: &str, expected: Vec<&str>) {
let arguments = ArgumentSplitter::new(input).collect::<Vec<&str>>();
for (left, right) in expected.iter().zip(arguments.iter()) {
assert_eq!(left, right);
}
assert_eq!(expected.len(), arguments.len());
}
#[test]
fn methods() {
let input = "echo $join(array, ', ') @split(var, ', ')";
let expected = vec!["echo", "$join(array, ', ')", "@split(var, ', ')"];
compare(input, expected);
}
#[test]
fn processes() {
let input = "echo $(echo one $(echo two)) @[echo one @[echo two]]";
let expected = vec!["echo", "$(echo one $(echo two))", "@[echo one @[echo two]]"];
compare(input, expected);
}
#[test]
fn arrays() {
let input = "echo [ one two @[echo three four] five ] [ six seven ]";
let expected = vec!["echo", "[ one two @[echo three four] five ]", "[ six seven ]"];
compare(input, expected);
}
#[test]
fn quotes() |
}
| {
let input = "echo 'one two \"three four\"' \"five six 'seven eight'\"";
let expected = vec!["echo", "'one two \"three four\"'", "\"five six 'seven eight'\""];
compare(input, expected);
} | identifier_body |
_windows.py | #*****************************************************************************
# Copyright 2004-2008 Steve Menard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#*****************************************************************************
from . import _jvmfinder
try:
import _winreg as winreg
except ImportError:
import winreg # in Py3, winreg has been moved
# ------------------------------------------------------------------------------
class WindowsJVMFinder(_jvmfinder.JVMFinder):
| """
Linux JVM library finder class
"""
def __init__(self):
"""
Sets up members
"""
# Call the parent constructor
_jvmfinder.JVMFinder.__init__(self)
# Library file name
self._libfile = "jvm.dll"
# Search methods
self._methods = (self._get_from_java_home,
self._get_from_registry)
def _get_from_registry(self):
"""
Retrieves the path to the default Java installation stored in the
Windows registry
:return: The path found in the registry, or None
"""
try :
jreKey = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,
r"SOFTWARE\JavaSoft\Java Runtime Environment")
cv = winreg.QueryValueEx(jreKey, "CurrentVersion")
versionKey = winreg.OpenKey(jreKey, cv[0])
winreg.CloseKey(jreKey)
cv = winreg.QueryValueEx(versionKey, "RuntimeLib")
winreg.CloseKey(versionKey)
return cv[0]
except WindowsError:
return None | identifier_body |
|
_windows.py | #*****************************************************************************
# Copyright 2004-2008 Steve Menard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#*****************************************************************************
from . import _jvmfinder
try:
import _winreg as winreg
except ImportError:
import winreg # in Py3, winreg has been moved
# ------------------------------------------------------------------------------
class | (_jvmfinder.JVMFinder):
"""
Linux JVM library finder class
"""
def __init__(self):
"""
Sets up members
"""
# Call the parent constructor
_jvmfinder.JVMFinder.__init__(self)
# Library file name
self._libfile = "jvm.dll"
# Search methods
self._methods = (self._get_from_java_home,
self._get_from_registry)
def _get_from_registry(self):
"""
Retrieves the path to the default Java installation stored in the
Windows registry
:return: The path found in the registry, or None
"""
try :
jreKey = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,
r"SOFTWARE\JavaSoft\Java Runtime Environment")
cv = winreg.QueryValueEx(jreKey, "CurrentVersion")
versionKey = winreg.OpenKey(jreKey, cv[0])
winreg.CloseKey(jreKey)
cv = winreg.QueryValueEx(versionKey, "RuntimeLib")
winreg.CloseKey(versionKey)
return cv[0]
except WindowsError:
return None
| WindowsJVMFinder | identifier_name |
_windows.py | #*****************************************************************************
# Copyright 2004-2008 Steve Menard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#*****************************************************************************
from . import _jvmfinder
try:
import _winreg as winreg
except ImportError:
import winreg # in Py3, winreg has been moved
# ------------------------------------------------------------------------------
class WindowsJVMFinder(_jvmfinder.JVMFinder):
"""
Linux JVM library finder class
"""
def __init__(self):
"""
Sets up members
"""
# Call the parent constructor
_jvmfinder.JVMFinder.__init__(self)
# Library file name
self._libfile = "jvm.dll"
# Search methods
self._methods = (self._get_from_java_home,
self._get_from_registry)
def _get_from_registry(self):
"""
Retrieves the path to the default Java installation stored in the | try :
jreKey = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,
r"SOFTWARE\JavaSoft\Java Runtime Environment")
cv = winreg.QueryValueEx(jreKey, "CurrentVersion")
versionKey = winreg.OpenKey(jreKey, cv[0])
winreg.CloseKey(jreKey)
cv = winreg.QueryValueEx(versionKey, "RuntimeLib")
winreg.CloseKey(versionKey)
return cv[0]
except WindowsError:
return None | Windows registry
:return: The path found in the registry, or None
""" | random_line_split |
test.ts | import SimplenoteImporter, { convertModificationDates } from './';
import CoreImporter from '../';
jest.mock('../');
describe('SimplenoteImporter', () => {
let importer;
beforeEach(() => {
importer = new SimplenoteImporter(() => {});
importer.emit = jest.spyOn(importer, 'emit');
CoreImporter.mockClear();
CoreImporter.mockImplementation(function () {
this.importNotes = jest.fn(() => ({
then: (callback) => callback(),
}));
});
});
it('should emit error when no notes are passed', () => {
importer.importNotes();
expect(importer.emit).toBeCalledWith(
'status',
'error',
'No file to import.'
);
});
it.skip('should call coreImporter.importNotes with all notes and options', () => {
return new Promise((done) => {
const notes = {
activeNotes: [{}, {}],
trashedNotes: [{}],
};
importer.on('status', () => {
const args = CoreImporter.mock.instances[0].importNotes.mock.calls[0];
expect(args[0].activeNotes).toHaveLength(2);
expect(args[0].trashedNotes).toHaveLength(1);
expect(args[1].foo).toBe(true);
done();
});
importer.importNotes([new File([JSON.stringify(notes)], 'foo.json')]);
});
});
describe('convertModificationDates', () => {
it('should convert `lastModified` ISO strings to `modificationDate` Unix timestamps', () => {
const processedNotes = convertModificationDates([
{
lastModified: '2018-10-15T14:09:10.382Z',
otherProp: 'value',
},
{ | otherProp: 'value',
},
]);
expect(processedNotes).toEqual([
{
modificationDate: 1539612550.382,
otherProp: 'value',
},
{
modificationDate: '1539612550',
otherProp: 'value',
},
]);
});
it('should not add undefined properties', () => {
const processedNotes = convertModificationDates([{}]);
expect(Object.keys(processedNotes[0])).toHaveLength(0);
});
});
}); | modificationDate: '1539612550', | random_line_split |
lib.rs | //! #rust-hackchat
//! A client library for Hack.chat.
//!
//! This library allows you to make custom clients and bots for Hack.chat using Rust.
//!
//! #Examples
//!
//! ```
//! extern crate hackchat;
//! use hackchat::{ChatClient, ChatEvent};
//!
//! fn main() {
//! let mut conn = ChatClient::new("TestBot", "botDev"); //Connects to the ?botDev channel
//! conn.start_ping_thread(); //Sends ping packets regularly
//!
//! for event in conn.iter() {
//! match event {
//! ChatEvent::Message(nick, message, trip_code) => {
//! println!("<{}> {}", nick, message);
//! },
//! _ => {}
//! }
//! }
//! }
//! ```
extern crate websocket;
#[macro_use] extern crate serde_json;
extern crate rustc_serialize;
use std::thread;
use rustc_serialize::json;
use websocket::{Client, Message, WebSocketStream};
use websocket::message::Type;
use websocket::client::request::Url;
use websocket::sender::Sender;
use websocket::receiver::Receiver;
use websocket::ws::sender::Sender as SenderTrait;
use websocket::ws::receiver::Receiver as ReceiverTrait;
use std::sync::Arc;
use std::sync::Mutex;
/// The main struct responsible for the connection and events.
#[derive(Clone)]
pub struct ChatClient {
nick: String,
channel: String,
sender: Arc<Mutex<Sender<WebSocketStream>>>,
receiver: Arc<Mutex<Receiver<WebSocketStream>>>,
}
impl ChatClient {
/// Creates a new connection to hack.chat.
///
/// ```
/// let mut chat = ChatClient::new("WikiBot", "programming");
/// // Joins ?programming with the nick "WikiBot"
/// ```
pub fn new(nick: &str, channel: &str) -> ChatClient {
let url = Url::parse("wss://hack.chat/chat-ws").unwrap();
let request = Client::connect(url).unwrap();
let response = request.send().unwrap();
let client = response.begin();
let (mut sender, receiver) = client.split();
let join_packet = json!({
"cmd": "join",
"nick": nick,
"channel": channel
});
let message = Message::text(join_packet.to_string());
sender.send_message(&message).unwrap();
return ChatClient {
nick: nick.to_string(),
channel: channel.to_string(),
sender: Arc::new(Mutex::new(sender)),
receiver: Arc::new(Mutex::new(receiver))
};
}
/// Sends a message to the current channel.
///
/// ```
/// let mut chat = ChatClient::new("TestBot", "botDev");
/// chat.send_message("Hello there people".to_string());
/// ```
///
/// ```
/// let mut chat = ChatClient::new("TestBot", "botDev");
///
/// let problem_count = 99;
/// chat.send_message(format!("I got {} problems but Rust ain't one", problem_count));
/// ```
pub fn send_message(&mut self, message: String) {
let chat_packet = json!({
"cmd": "chat",
"text": message
});
let message = Message::text(chat_packet.to_string());
self.sender.lock().unwrap().send_message(&message).unwrap();
}
fn send_ping(&mut self) {
let ping_packet = json!({
"cmd": "ping"
});
let message = Message::text(ping_packet.to_string());
self.sender.lock().unwrap().send_message(&message).unwrap();
}
/// Sends a stats request, which results in an Info event that has the number of connected
/// IPs and channels.
pub fn send_stats_request(&mut self) {
let stats_packet = json!({
"cmd": "stats"
});
let message = Message::text(stats_packet.to_string());
self.sender.lock().unwrap().send_message(&message).unwrap();
}
/// Starts the ping thread, which sends regular pings to keep the connection open.
pub fn start_ping_thread(&mut self) {
let mut chat_clone = self.clone();
thread::spawn(move|| {
loop {
thread::sleep_ms(60 * 1000);
chat_clone.send_ping();
}
});
}
/// Returns an iterator of hack.chat events such as messages.
///
/// #Examples
/// ```
/// let mut chat = ChatClient::new("GreetingBot", "botDev");
/// chat.start_ping_thread(); //Start the ping thread so we keep connected
///
/// for event in chat.iter() {
/// match event {
/// ChatEvent::JoinRoom(nick) => {
/// chat.send_message(format!("Welcome to the chat {}!", nick));
/// },
/// ChatEvent::LeaveRoom(nick) => {
/// chat.send_message(format!("Goodbye {}, see you later!", nick));
/// },
/// _ => {}
/// }
/// }
/// ```
pub fn iter(&mut self) -> ChatClient {
return self.clone();
}
}
impl Iterator for ChatClient {
type Item = ChatEvent;
fn next(&mut self) -> Option<ChatEvent> {
loop {
let message: Message = match self.receiver.lock().unwrap().recv_message() {
Ok(message) => message,
Err(e) => {
println!("{}", e);
continue;
}
};
match message.opcode {
Type::Text => {
let data = std::str::from_utf8(&*message.payload).unwrap();
let cmdpacket: serde_json::Value = match serde_json::from_slice(&*message.payload) {
Ok(packet) => packet,
Err(e) => {
println!("{}", e);
continue;
}
};
match cmdpacket.get("cmd").unwrap_or(&serde_json::Value::Null).as_str() {
Some("chat") => {
let decodedpacket: ChatPacket = json::decode(&data).unwrap();
if decodedpacket.nick != self.nick {
return Some(ChatEvent::Message (
decodedpacket.nick,
decodedpacket.text,
decodedpacket.trip.unwrap_or("".to_string())
));
}else {
continue;
}
},
Some("info") => {
let decodedpacket: InfoWarnPacket = json::decode(&data).unwrap();
return Some(ChatEvent::Info (
decodedpacket.text
));
},
Some("onlineAdd") => {
let decodedpacket: OnlineChangePacket = json::decode(&data).unwrap();
return Some(ChatEvent::JoinRoom (
decodedpacket.nick
));
},
Some("onlineRemove") => {
let decodedpacket: OnlineChangePacket = json::decode(&data).unwrap();
return Some(ChatEvent::LeaveRoom (
decodedpacket.nick
));
},
_ => |
}
},
Type::Ping => {
self.sender.lock().unwrap().send_message(&Message::pong(message.payload)).unwrap();
},
_ => {
return None;
}
};
return None;
}
}
}
/// Various Hack.chat events
pub enum ChatEvent {
/// Raised when there is a new message from the channel
///
/// The format is ChatEvent::Message(nick, text, trip_code)
Message (String, String, String),
/// Rasied when someone joins the channel
///
/// The format is ChatEvent::JoinRoom(nick)
JoinRoom (String),
/// Raised when someone leaves the channel
///
/// The format is ChatEvent::LeaveRoom(nick)
LeaveRoom (String),
/// Raised when there is an event from the channel itself.
/// Some examples include:
///
/// * The result of the stats requests
/// * A user being banned.
Info (String)
}
#[derive(RustcEncodable, RustcDecodable)]
struct GenericPacket {
cmd: String
}
#[derive(RustcDecodable)]
struct ChatPacket {
nick: String,
text: String,
trip: Option<String>
}
#[derive(RustcDecodable)]
struct OnlineChangePacket {
nick: String
}
#[derive(RustcDecodable)]
struct InfoWarnPacket {
text: String
}
| {
println!("Unsupported message type");
continue;
} | conditional_block |
lib.rs | //! #rust-hackchat
//! A client library for Hack.chat.
//!
//! This library allows you to make custom clients and bots for Hack.chat using Rust.
//!
//! #Examples
//!
//! ```
//! extern crate hackchat;
//! use hackchat::{ChatClient, ChatEvent};
//!
//! fn main() {
//! let mut conn = ChatClient::new("TestBot", "botDev"); //Connects to the ?botDev channel
//! conn.start_ping_thread(); //Sends ping packets regularly
//!
//! for event in conn.iter() {
//! match event {
//! ChatEvent::Message(nick, message, trip_code) => {
//! println!("<{}> {}", nick, message);
//! },
//! _ => {}
//! }
//! }
//! }
//! ```
extern crate websocket;
#[macro_use] extern crate serde_json;
extern crate rustc_serialize;
use std::thread;
use rustc_serialize::json;
use websocket::{Client, Message, WebSocketStream};
use websocket::message::Type;
use websocket::client::request::Url;
use websocket::sender::Sender;
use websocket::receiver::Receiver;
use websocket::ws::sender::Sender as SenderTrait;
use websocket::ws::receiver::Receiver as ReceiverTrait;
use std::sync::Arc;
use std::sync::Mutex;
/// The main struct responsible for the connection and events.
#[derive(Clone)]
pub struct ChatClient {
nick: String,
channel: String,
sender: Arc<Mutex<Sender<WebSocketStream>>>,
receiver: Arc<Mutex<Receiver<WebSocketStream>>>,
}
impl ChatClient {
/// Creates a new connection to hack.chat.
///
/// ```
/// let mut chat = ChatClient::new("WikiBot", "programming");
/// // Joins ?programming with the nick "WikiBot"
/// ```
pub fn new(nick: &str, channel: &str) -> ChatClient {
let url = Url::parse("wss://hack.chat/chat-ws").unwrap();
let request = Client::connect(url).unwrap();
let response = request.send().unwrap();
let client = response.begin();
let (mut sender, receiver) = client.split();
let join_packet = json!({
"cmd": "join",
"nick": nick,
"channel": channel
});
let message = Message::text(join_packet.to_string());
sender.send_message(&message).unwrap();
return ChatClient {
nick: nick.to_string(),
channel: channel.to_string(),
sender: Arc::new(Mutex::new(sender)),
receiver: Arc::new(Mutex::new(receiver))
};
}
/// Sends a message to the current channel.
///
/// ```
/// let mut chat = ChatClient::new("TestBot", "botDev");
/// chat.send_message("Hello there people".to_string());
/// ```
///
/// ```
/// let mut chat = ChatClient::new("TestBot", "botDev");
///
/// let problem_count = 99;
/// chat.send_message(format!("I got {} problems but Rust ain't one", problem_count));
/// ```
pub fn send_message(&mut self, message: String) {
let chat_packet = json!({
"cmd": "chat",
"text": message
});
let message = Message::text(chat_packet.to_string());
self.sender.lock().unwrap().send_message(&message).unwrap();
}
fn send_ping(&mut self) {
let ping_packet = json!({
"cmd": "ping"
});
let message = Message::text(ping_packet.to_string());
self.sender.lock().unwrap().send_message(&message).unwrap();
}
/// Sends a stats request, which results in an Info event that has the number of connected
/// IPs and channels.
pub fn send_stats_request(&mut self) {
let stats_packet = json!({
"cmd": "stats"
});
let message = Message::text(stats_packet.to_string());
self.sender.lock().unwrap().send_message(&message).unwrap();
}
/// Starts the ping thread, which sends regular pings to keep the connection open.
pub fn start_ping_thread(&mut self) {
let mut chat_clone = self.clone();
thread::spawn(move|| {
loop {
thread::sleep_ms(60 * 1000);
chat_clone.send_ping();
}
});
}
/// Returns an iterator of hack.chat events such as messages.
///
/// #Examples
/// ```
/// let mut chat = ChatClient::new("GreetingBot", "botDev");
/// chat.start_ping_thread(); //Start the ping thread so we keep connected
/// | /// ChatEvent::LeaveRoom(nick) => {
/// chat.send_message(format!("Goodbye {}, see you later!", nick));
/// },
/// _ => {}
/// }
/// }
/// ```
pub fn iter(&mut self) -> ChatClient {
return self.clone();
}
}
impl Iterator for ChatClient {
type Item = ChatEvent;
fn next(&mut self) -> Option<ChatEvent> {
loop {
let message: Message = match self.receiver.lock().unwrap().recv_message() {
Ok(message) => message,
Err(e) => {
println!("{}", e);
continue;
}
};
match message.opcode {
Type::Text => {
let data = std::str::from_utf8(&*message.payload).unwrap();
let cmdpacket: serde_json::Value = match serde_json::from_slice(&*message.payload) {
Ok(packet) => packet,
Err(e) => {
println!("{}", e);
continue;
}
};
match cmdpacket.get("cmd").unwrap_or(&serde_json::Value::Null).as_str() {
Some("chat") => {
let decodedpacket: ChatPacket = json::decode(&data).unwrap();
if decodedpacket.nick != self.nick {
return Some(ChatEvent::Message (
decodedpacket.nick,
decodedpacket.text,
decodedpacket.trip.unwrap_or("".to_string())
));
}else {
continue;
}
},
Some("info") => {
let decodedpacket: InfoWarnPacket = json::decode(&data).unwrap();
return Some(ChatEvent::Info (
decodedpacket.text
));
},
Some("onlineAdd") => {
let decodedpacket: OnlineChangePacket = json::decode(&data).unwrap();
return Some(ChatEvent::JoinRoom (
decodedpacket.nick
));
},
Some("onlineRemove") => {
let decodedpacket: OnlineChangePacket = json::decode(&data).unwrap();
return Some(ChatEvent::LeaveRoom (
decodedpacket.nick
));
},
_ => {
println!("Unsupported message type");
continue;
}
}
},
Type::Ping => {
self.sender.lock().unwrap().send_message(&Message::pong(message.payload)).unwrap();
},
_ => {
return None;
}
};
return None;
}
}
}
/// Various Hack.chat events
pub enum ChatEvent {
/// Raised when there is a new message from the channel
///
/// The format is ChatEvent::Message(nick, text, trip_code)
Message (String, String, String),
/// Rasied when someone joins the channel
///
/// The format is ChatEvent::JoinRoom(nick)
JoinRoom (String),
/// Raised when someone leaves the channel
///
/// The format is ChatEvent::LeaveRoom(nick)
LeaveRoom (String),
/// Raised when there is an event from the channel itself.
/// Some examples include:
///
/// * The result of the stats requests
/// * A user being banned.
Info (String)
}
#[derive(RustcEncodable, RustcDecodable)]
struct GenericPacket {
cmd: String
}
#[derive(RustcDecodable)]
struct ChatPacket {
nick: String,
text: String,
trip: Option<String>
}
#[derive(RustcDecodable)]
struct OnlineChangePacket {
nick: String
}
#[derive(RustcDecodable)]
struct InfoWarnPacket {
text: String
} | /// for event in chat.iter() {
/// match event {
/// ChatEvent::JoinRoom(nick) => {
/// chat.send_message(format!("Welcome to the chat {}!", nick));
/// }, | random_line_split |
lib.rs | //! #rust-hackchat
//! A client library for Hack.chat.
//!
//! This library allows you to make custom clients and bots for Hack.chat using Rust.
//!
//! #Examples
//!
//! ```
//! extern crate hackchat;
//! use hackchat::{ChatClient, ChatEvent};
//!
//! fn main() {
//! let mut conn = ChatClient::new("TestBot", "botDev"); //Connects to the ?botDev channel
//! conn.start_ping_thread(); //Sends ping packets regularly
//!
//! for event in conn.iter() {
//! match event {
//! ChatEvent::Message(nick, message, trip_code) => {
//! println!("<{}> {}", nick, message);
//! },
//! _ => {}
//! }
//! }
//! }
//! ```
extern crate websocket;
#[macro_use] extern crate serde_json;
extern crate rustc_serialize;
use std::thread;
use rustc_serialize::json;
use websocket::{Client, Message, WebSocketStream};
use websocket::message::Type;
use websocket::client::request::Url;
use websocket::sender::Sender;
use websocket::receiver::Receiver;
use websocket::ws::sender::Sender as SenderTrait;
use websocket::ws::receiver::Receiver as ReceiverTrait;
use std::sync::Arc;
use std::sync::Mutex;
/// The main struct responsible for the connection and events.
#[derive(Clone)]
pub struct ChatClient {
nick: String,
channel: String,
sender: Arc<Mutex<Sender<WebSocketStream>>>,
receiver: Arc<Mutex<Receiver<WebSocketStream>>>,
}
impl ChatClient {
/// Creates a new connection to hack.chat.
///
/// ```
/// let mut chat = ChatClient::new("WikiBot", "programming");
/// // Joins ?programming with the nick "WikiBot"
/// ```
pub fn new(nick: &str, channel: &str) -> ChatClient {
let url = Url::parse("wss://hack.chat/chat-ws").unwrap();
let request = Client::connect(url).unwrap();
let response = request.send().unwrap();
let client = response.begin();
let (mut sender, receiver) = client.split();
let join_packet = json!({
"cmd": "join",
"nick": nick,
"channel": channel
});
let message = Message::text(join_packet.to_string());
sender.send_message(&message).unwrap();
return ChatClient {
nick: nick.to_string(),
channel: channel.to_string(),
sender: Arc::new(Mutex::new(sender)),
receiver: Arc::new(Mutex::new(receiver))
};
}
/// Sends a message to the current channel.
///
/// ```
/// let mut chat = ChatClient::new("TestBot", "botDev");
/// chat.send_message("Hello there people".to_string());
/// ```
///
/// ```
/// let mut chat = ChatClient::new("TestBot", "botDev");
///
/// let problem_count = 99;
/// chat.send_message(format!("I got {} problems but Rust ain't one", problem_count));
/// ```
pub fn send_message(&mut self, message: String) {
let chat_packet = json!({
"cmd": "chat",
"text": message
});
let message = Message::text(chat_packet.to_string());
self.sender.lock().unwrap().send_message(&message).unwrap();
}
fn send_ping(&mut self) {
let ping_packet = json!({
"cmd": "ping"
});
let message = Message::text(ping_packet.to_string());
self.sender.lock().unwrap().send_message(&message).unwrap();
}
/// Sends a stats request, which results in an Info event that has the number of connected
/// IPs and channels.
pub fn | (&mut self) {
let stats_packet = json!({
"cmd": "stats"
});
let message = Message::text(stats_packet.to_string());
self.sender.lock().unwrap().send_message(&message).unwrap();
}
/// Starts the ping thread, which sends regular pings to keep the connection open.
pub fn start_ping_thread(&mut self) {
let mut chat_clone = self.clone();
thread::spawn(move|| {
loop {
thread::sleep_ms(60 * 1000);
chat_clone.send_ping();
}
});
}
/// Returns an iterator of hack.chat events such as messages.
///
/// #Examples
/// ```
/// let mut chat = ChatClient::new("GreetingBot", "botDev");
/// chat.start_ping_thread(); //Start the ping thread so we keep connected
///
/// for event in chat.iter() {
/// match event {
/// ChatEvent::JoinRoom(nick) => {
/// chat.send_message(format!("Welcome to the chat {}!", nick));
/// },
/// ChatEvent::LeaveRoom(nick) => {
/// chat.send_message(format!("Goodbye {}, see you later!", nick));
/// },
/// _ => {}
/// }
/// }
/// ```
pub fn iter(&mut self) -> ChatClient {
return self.clone();
}
}
impl Iterator for ChatClient {
type Item = ChatEvent;
fn next(&mut self) -> Option<ChatEvent> {
loop {
let message: Message = match self.receiver.lock().unwrap().recv_message() {
Ok(message) => message,
Err(e) => {
println!("{}", e);
continue;
}
};
match message.opcode {
Type::Text => {
let data = std::str::from_utf8(&*message.payload).unwrap();
let cmdpacket: serde_json::Value = match serde_json::from_slice(&*message.payload) {
Ok(packet) => packet,
Err(e) => {
println!("{}", e);
continue;
}
};
match cmdpacket.get("cmd").unwrap_or(&serde_json::Value::Null).as_str() {
Some("chat") => {
let decodedpacket: ChatPacket = json::decode(&data).unwrap();
if decodedpacket.nick != self.nick {
return Some(ChatEvent::Message (
decodedpacket.nick,
decodedpacket.text,
decodedpacket.trip.unwrap_or("".to_string())
));
}else {
continue;
}
},
Some("info") => {
let decodedpacket: InfoWarnPacket = json::decode(&data).unwrap();
return Some(ChatEvent::Info (
decodedpacket.text
));
},
Some("onlineAdd") => {
let decodedpacket: OnlineChangePacket = json::decode(&data).unwrap();
return Some(ChatEvent::JoinRoom (
decodedpacket.nick
));
},
Some("onlineRemove") => {
let decodedpacket: OnlineChangePacket = json::decode(&data).unwrap();
return Some(ChatEvent::LeaveRoom (
decodedpacket.nick
));
},
_ => {
println!("Unsupported message type");
continue;
}
}
},
Type::Ping => {
self.sender.lock().unwrap().send_message(&Message::pong(message.payload)).unwrap();
},
_ => {
return None;
}
};
return None;
}
}
}
/// Various Hack.chat events
pub enum ChatEvent {
/// Raised when there is a new message from the channel
///
/// The format is ChatEvent::Message(nick, text, trip_code)
Message (String, String, String),
/// Rasied when someone joins the channel
///
/// The format is ChatEvent::JoinRoom(nick)
JoinRoom (String),
/// Raised when someone leaves the channel
///
/// The format is ChatEvent::LeaveRoom(nick)
LeaveRoom (String),
/// Raised when there is an event from the channel itself.
/// Some examples include:
///
/// * The result of the stats requests
/// * A user being banned.
Info (String)
}
#[derive(RustcEncodable, RustcDecodable)]
struct GenericPacket {
cmd: String
}
#[derive(RustcDecodable)]
struct ChatPacket {
nick: String,
text: String,
trip: Option<String>
}
#[derive(RustcDecodable)]
struct OnlineChangePacket {
nick: String
}
#[derive(RustcDecodable)]
struct InfoWarnPacket {
text: String
}
| send_stats_request | identifier_name |
webpack.common.config.js | var HtmlWebpackPlugin = require("html-webpack-plugin"),
autoprefixer = require("autoprefixer"),
path = require("path");
module.exports = {
loaders: [{
test: /\.js$/,
exclude: /node_modules/,
loaders: ['react-hot', 'babel'],
include: path.join(__dirname, 'src')
},{
test: /\.png$/,
loader: "url-loader?limit=100000"
},{
test: /\.jpg$/, | loader: "style!css!postcss-loader"
},{
test: /\.less$/,
loader: "css!less"
}],
postcss: [
autoprefixer({browsers: ['last 4 versions', 'iOS 6', 'Android 2.1']})
],
indexPagePlugin: new HtmlWebpackPlugin({
inject: true,
title: 'himynameisdave',
filename: 'index.html',
template: './src/index_template.html'
})
}; | loader: "file-loader"
},{
test: /\.css$/, | random_line_split |
q17.py | """
问题描述:给定一个矩阵matrix,其中的值有正、负和0,返回子矩阵的最大累加和.
例如,矩阵matrix为
-90 48 78
64 -40 64
-81 -7 66
其中,最大累加和的子矩阵为:
48 78
-40 64
-7 66
所以返回累加和209.
例如,matrix为:
-1 -1 -1
-1 2 2
-1 -1 -1
其中,最大累加和的子矩阵为:
2 2
所以返回累加和为4.
"""
import sys
from arrandmatrix.q16 import MaxSum
class MaxMatrixSum:
@classmethod
def get_max_sum(cls, matrix):
if not matrix:
return 0
max_value = -sys.maxsize
for i in range(len(matrix)):
j = i
pre_arr = [0 for _ in range(len(matrix[0]))]
while j < len(matrix):
arr = cls.arr_add(matrix[j], pre_arr)
max_value = max([MaxSum.get_max_sum(arr), max_value])
j += 1
pre_arr = arr
return max_value | return [arr1[i]+arr2[i] for i in range(len(arr1))]
if __name__ == '__main__':
my_matrix = [
[-90, 48, 78],
[64, -40, 64],
[-81, -7, 66]
]
print(MaxMatrixSum.get_max_sum(my_matrix)) |
@classmethod
def arr_add(cls, arr1, arr2): | random_line_split |
q17.py | """
问题描述:给定一个矩阵matrix,其中的值有正、负和0,返回子矩阵的最大累加和.
例如,矩阵matrix为
-90 48 78
64 -40 64
-81 -7 66
其中,最大累加和的子矩阵为:
48 78
-40 64
-7 66
所以返回累加和209.
例如,matrix为:
-1 -1 -1
-1 2 2
-1 -1 -1
其中,最大累加和的子矩阵为:
2 2
所以返回累加和为4.
"""
import sys
from arrandmatrix.q16 import MaxSum
class MaxMatrixSum:
@classmethod
def get_max_sum(cls, matrix):
if not matrix:
return 0
max_value = -sys.maxsize
fo | (len(matrix)):
j = i
pre_arr = [0 for _ in range(len(matrix[0]))]
while j < len(matrix):
arr = cls.arr_add(matrix[j], pre_arr)
max_value = max([MaxSum.get_max_sum(arr), max_value])
j += 1
pre_arr = arr
return max_value
@classmethod
def arr_add(cls, arr1, arr2):
return [arr1[i]+arr2[i] for i in range(len(arr1))]
if __name__ == '__main__':
my_matrix = [
[-90, 48, 78],
[64, -40, 64],
[-81, -7, 66]
]
print(MaxMatrixSum.get_max_sum(my_matrix)) | r i in range | identifier_name |
q17.py | """
问题描述:给定一个矩阵matrix,其中的值有正、负和0,返回子矩阵的最大累加和.
例如,矩阵matrix为
-90 48 78
64 -40 64
-81 -7 66
其中,最大累加和的子矩阵为:
48 78
-40 64
-7 66
所以返回累加和209.
例如,matrix为:
-1 -1 -1
-1 2 2
-1 -1 -1
其中,最大累加和的子矩阵为:
2 2
所以返回累加和为4.
"""
import sys
from arrandmatrix.q16 import MaxSum
class MaxMatrixSum:
@classmethod
def get_max_sum(cls, matrix):
if not matrix:
return 0
max_value = -sys.maxsize
for i in range(len(matrix)):
j = i
pre_arr = [0 for _ in range(len(matrix[0]))]
while j < len(matrix):
arr = cls.arr_add(matrix[j], pre_arr)
| '__main__':
my_matrix = [
[-90, 48, 78],
[64, -40, 64],
[-81, -7, 66]
]
print(MaxMatrixSum.get_max_sum(my_matrix)) | max_value = max([MaxSum.get_max_sum(arr), max_value])
j += 1
pre_arr = arr
return max_value
@classmethod
def arr_add(cls, arr1, arr2):
return [arr1[i]+arr2[i] for i in range(len(arr1))]
if __name__ == | conditional_block |
q17.py | """
问题描述:给定一个矩阵matrix,其中的值有正、负和0,返回子矩阵的最大累加和.
例如,矩阵matrix为
-90 48 78
64 -40 64
-81 -7 66
其中,最大累加和的子矩阵为:
48 78
-40 64
-7 66
所以返回累加和209.
例如,matrix为:
-1 -1 -1
-1 2 2
-1 -1 -1
其中,最大累加和的子矩阵为:
2 2
所以返回累加和为4.
"""
import sys
from arrandmatrix.q16 import MaxSum
class MaxMatrixSum:
@classmethod
def get_max_sum(cls, matrix):
if not matrix:
return 0
max_value = -sys.maxsize
for i in range(len(m | sum(my_matrix)) | atrix)):
j = i
pre_arr = [0 for _ in range(len(matrix[0]))]
while j < len(matrix):
arr = cls.arr_add(matrix[j], pre_arr)
max_value = max([MaxSum.get_max_sum(arr), max_value])
j += 1
pre_arr = arr
return max_value
@classmethod
def arr_add(cls, arr1, arr2):
return [arr1[i]+arr2[i] for i in range(len(arr1))]
if __name__ == '__main__':
my_matrix = [
[-90, 48, 78],
[64, -40, 64],
[-81, -7, 66]
]
print(MaxMatrixSum.get_max_ | identifier_body |
rectification_geometry.py | ################################################################################
# #
# Copyright (C) 2010,2011,2012,2013,2014, 2015,2016 The ESPResSo project #
# #
# This file is part of ESPResSo. #
# #
# ESPResSo is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# ESPResSo is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
# #
# Active Matter: Rectification System Setup #
# #
################################################################################
from __future__ import print_function
from math import cos, pi, sin
import numpy as np
import os
import sys
from espressomd import assert_features, lb
from espressomd.lbboundaries import LBBoundary
from espressomd.shapes import Cylinder, Wall, HollowCone
assert_features(["LB_GPU","LB_BOUNDARIES_GPU"])
# Setup constants
outdir = "./RESULTS_RECTIFICATION_GEOMETRY/"
try:
os.makedirs(outdir)
except:
print("INFO: Directory \"{}\" exists".format(outdir))
# Setup the box (we pad the diameter to ensure that the LB boundaries
# and therefore the constraints, are away from the edge of the box)
length = 100
diameter = 20
dt = 0.01
# Setup the MD parameters
system = espressomd.System(box_l=[length, dieameter+4, diameter+4])
system.cell_system.skin = 0.1
system.time_step = dt
system.min_global_cut = 0.5
# Setup LB parameters (these are irrelevant here) and fluid
agrid = 1
vskin = 0.1
frict = 20.0
visco = 1.0
densi = 1.0
lbf = lb.LBFluidGPU(agrid=agrid, dens=densi, visc=visco, tau=dt, fric=frict)
| ################################################################################
#
# Now we set up the three LB boundaries that form the rectifying geometry.
# The cylinder boundary/constraint is actually already capped, but we put
# in two planes for safety's sake. If you want to create an cylinder of
# 'infinite length' using the periodic boundaries, then the cylinder must
# extend over the boundary.
#
################################################################################
# Setup cylinder
cylinder = LBBoundary(shape=Cylinder(center=[length/2.0, (diameter+4)/2.0, (diameter+4)/2.0],
axis=[1,0,0],
radius=diameter/2.0,
length=length,
direction=-1))
system.lbboundaries.add(cylinder)
# Setup walls
wall = LBBoundary(shape=Wall(dist=2, normal=[1,0,0]))
system.lbboundaries.add(wall)
wall = LBBoundary(shape=Wall(dist=-(length - 2), normal=[-1,0,0]))
system.lbboundaries.add(wall)
# Setup cone
irad = 4.0
angle = pi/4.0
orad = (diameter - irad)/sin(angle)
shift = 0.25*orad*cos(angle)
hollow_cone = LBBoundary(shape=HollowCone(position_x=length/2.0 - shift,
position_y=(diameter+4)/2.0,
position_z=(diameter+4)/2.0,
orientation_x=1,
orientation_y=0,
orientation_z=0,
outer_radius=orad,
inner_radius=irad,
width=2.0,
opening_angle=angle,
direction=1))
system.lbboundaries.add(hollow_cone)
################################################################################
# Output the geometry
lbf.print_vtk_boundary("{}/boundary.vtk".format(outdir))
################################################################################ | system.actors.add(lbf)
| random_line_split |
testVarArgs.ts | interface IMeteor {
/**
* Subscribe to a record set. Returns a handle that provides `stop()` and `ready()` methods.
*
* @locus Client
*
* @param {String} name - <p>Name of the subscription. Matches the name of the server's <code>publish()</code> call.</p>
* @param {Any} [arg1, arg2...] - <p>Optional arguments passed to publisher function on server.</p>
*/
subscribeWithVargsLast(name:string, ...args:any[]):any;
/**
* Subscribe to a record set. Returns a handle that provides `stop()` and `ready()` methods.
*
* @locus Client
*
* @param {String} name - <p>Name of the subscription. Matches the name of the server's <code>publish()</code> call.</p>
* @param {Any} [arg1, arg2...] - <p>Optional arguments passed to publisher function on server.</p> | */
subscribe(name:string, ...args:any[]):any;
}
declare var Meteor:IMeteor; | * @param {function or Object} [callbacks] - <p>Optional. May include <code>onError</code> and <code>onReady</code> callbacks. If a function is passed instead of an object, it is interpreted as an <code>onReady</code> callback.</p> | random_line_split |
TestHelpers.ts | import * as assert from "assert";
var nock = require("nock");
import fs = require('fs');
const Readable = require('stream').Readable;
const Writable = require('stream').Writable;
const Stats = require('fs').Stats;
import azureBlobUploadHelper = require('../../azure-blob-upload-helper');
/**
* Exit code is used to determine whether unit test passed or not.
* When executing code requires vsts-task-lib somewhere it makes exit code = 0 regardless whether exception was thrown.
* This helper allows to follow default NodeJS exit code behaviour when exception is thrown.
*/
export const assertByExitCode = {
equal: (actual, expected) => wrapAssertWithExitCode(assert.equal, actual, expected),
};
export function basicSetup() {
const uploadDomain = 'https://example.upload.test/release_upload';
const assetId = "00000000-0000-0000-0000-000000000123";
const uploadId = 7;
nock('https://example.test')
.post('/v0.1/apps/testuser/testapp/uploads/releases')
.reply(201, {
id: uploadId,
package_asset_id: assetId,
upload_domain: uploadDomain,
url_encoded_token: "token"
});
nock(uploadDomain)
.post(`/upload/set_metadata/${assetId}`)
.query(true)
.reply(200, {
resume_restart: false,
chunk_list: [1],
chunk_size: 100,
blob_partitions: 1
});
nock(uploadDomain)
.post(`/upload/upload_chunk/${assetId}`)
.query(true)
.reply(200, {
});
nock(uploadDomain)
.post(`/upload/finished/${assetId}`)
.query(true)
.reply(200, {
error: false,
state: "Done",
});
nock('https://example.test')
.get(`/v0.1/apps/testuser/testapp/uploads/releases/${uploadId}`)
.query(true)
.reply(200, {
release_distinct_id: 1,
upload_status: "readyToBePublished",
});
nock('https://example.test')
.patch(`/v0.1/apps/testuser/testapp/uploads/releases/${uploadId}`, {
upload_status: "uploadFinished",
})
.query(true)
.reply(200, {
upload_status: "uploadFinished"
});
nock('https://example.test')
.put('/v0.1/apps/testuser/testapp/releases/1', JSON.stringify({
release_notes: 'my release notes'
}))
.reply(200);
//make it available
nock('https://example.test')
.post('/v0.1/apps/testuser/testapp/releases/1/groups', {
id: "00000000-0000-0000-0000-000000000000"
})
.reply(200);
//finishing symbol upload, commit the symbol
nock('https://example.test')
.patch('/v0.1/apps/testuser/testapp/symbol_uploads/100', {
status: 'committed'
})
.reply(200);
fs.createReadStream = (s: string) => {
let stream = new Readable;
stream.push(s);
stream.push(null);
return stream;
};
fs.createWriteStream = (s: string) => {
let stream = new Writable;
stream.write = () => { };
return stream;
};
}
export function mockFs() {
let fsos = fs.openSync;
fs.openSync = (path: string, flags: string) => {
if (path.endsWith(".ipa")) |
return fsos(path, flags);
};
let fsrs = fs.readSync;
fs.readSync = (fd: number, buffer: Buffer, offset: number, length: number, position: number)=> {
if (fd == 1234567.89) {
buffer = new Buffer(100);
return;
}
return fsrs(fd, buffer, offset, length, position);
};
fs.statSync = (s: string) => {
let stat = new Stats;
stat.isFile = () => {
return !s.toLowerCase().endsWith(".dsym");
}
stat.isDirectory = () => {
return s.toLowerCase().endsWith(".dsym");
}
stat.size = 100;
return stat;
}
}
export function mockAzure() {
azureBlobUploadHelper.AzureBlobUploadHelper.prototype.upload = async () => {
return Promise.resolve();
}
}
function wrapAssertWithExitCode(assert, ...args) {
try {
assert.apply(undefined, args);
} catch (error) {
process.exit(1);
}
}
| {
return 1234567.89;
} | conditional_block |
TestHelpers.ts | import * as assert from "assert";
var nock = require("nock");
import fs = require('fs');
const Readable = require('stream').Readable;
const Writable = require('stream').Writable;
const Stats = require('fs').Stats;
import azureBlobUploadHelper = require('../../azure-blob-upload-helper');
/**
* Exit code is used to determine whether unit test passed or not.
* When executing code requires vsts-task-lib somewhere it makes exit code = 0 regardless whether exception was thrown.
* This helper allows to follow default NodeJS exit code behaviour when exception is thrown.
*/
export const assertByExitCode = {
equal: (actual, expected) => wrapAssertWithExitCode(assert.equal, actual, expected),
};
export function basicSetup() {
const uploadDomain = 'https://example.upload.test/release_upload';
const assetId = "00000000-0000-0000-0000-000000000123";
const uploadId = 7;
nock('https://example.test')
.post('/v0.1/apps/testuser/testapp/uploads/releases')
.reply(201, {
id: uploadId,
package_asset_id: assetId,
upload_domain: uploadDomain,
url_encoded_token: "token"
});
nock(uploadDomain)
.post(`/upload/set_metadata/${assetId}`)
.query(true)
.reply(200, {
resume_restart: false,
chunk_list: [1],
chunk_size: 100,
blob_partitions: 1
});
nock(uploadDomain)
.post(`/upload/upload_chunk/${assetId}`)
.query(true)
.reply(200, {
});
nock(uploadDomain)
.post(`/upload/finished/${assetId}`)
.query(true)
.reply(200, {
error: false,
state: "Done",
});
nock('https://example.test')
.get(`/v0.1/apps/testuser/testapp/uploads/releases/${uploadId}`)
.query(true)
.reply(200, {
release_distinct_id: 1,
upload_status: "readyToBePublished",
});
nock('https://example.test')
.patch(`/v0.1/apps/testuser/testapp/uploads/releases/${uploadId}`, {
upload_status: "uploadFinished",
})
.query(true)
.reply(200, {
upload_status: "uploadFinished"
});
nock('https://example.test')
.put('/v0.1/apps/testuser/testapp/releases/1', JSON.stringify({
release_notes: 'my release notes'
}))
.reply(200);
//make it available
nock('https://example.test')
.post('/v0.1/apps/testuser/testapp/releases/1/groups', {
id: "00000000-0000-0000-0000-000000000000"
})
.reply(200);
//finishing symbol upload, commit the symbol
nock('https://example.test')
.patch('/v0.1/apps/testuser/testapp/symbol_uploads/100', {
status: 'committed'
})
.reply(200);
fs.createReadStream = (s: string) => {
let stream = new Readable;
stream.push(s);
stream.push(null);
return stream;
};
fs.createWriteStream = (s: string) => {
let stream = new Writable;
stream.write = () => { };
return stream;
};
}
export function mockFs() |
export function mockAzure() {
azureBlobUploadHelper.AzureBlobUploadHelper.prototype.upload = async () => {
return Promise.resolve();
}
}
function wrapAssertWithExitCode(assert, ...args) {
try {
assert.apply(undefined, args);
} catch (error) {
process.exit(1);
}
}
| {
let fsos = fs.openSync;
fs.openSync = (path: string, flags: string) => {
if (path.endsWith(".ipa")){
return 1234567.89;
}
return fsos(path, flags);
};
let fsrs = fs.readSync;
fs.readSync = (fd: number, buffer: Buffer, offset: number, length: number, position: number)=> {
if (fd == 1234567.89) {
buffer = new Buffer(100);
return;
}
return fsrs(fd, buffer, offset, length, position);
};
fs.statSync = (s: string) => {
let stat = new Stats;
stat.isFile = () => {
return !s.toLowerCase().endsWith(".dsym");
}
stat.isDirectory = () => {
return s.toLowerCase().endsWith(".dsym");
}
stat.size = 100;
return stat;
}
} | identifier_body |
TestHelpers.ts | import * as assert from "assert";
var nock = require("nock");
import fs = require('fs');
const Readable = require('stream').Readable;
const Writable = require('stream').Writable;
const Stats = require('fs').Stats;
import azureBlobUploadHelper = require('../../azure-blob-upload-helper');
/**
* Exit code is used to determine whether unit test passed or not.
* When executing code requires vsts-task-lib somewhere it makes exit code = 0 regardless whether exception was thrown.
* This helper allows to follow default NodeJS exit code behaviour when exception is thrown.
*/
export const assertByExitCode = {
equal: (actual, expected) => wrapAssertWithExitCode(assert.equal, actual, expected),
};
export function | () {
const uploadDomain = 'https://example.upload.test/release_upload';
const assetId = "00000000-0000-0000-0000-000000000123";
const uploadId = 7;
nock('https://example.test')
.post('/v0.1/apps/testuser/testapp/uploads/releases')
.reply(201, {
id: uploadId,
package_asset_id: assetId,
upload_domain: uploadDomain,
url_encoded_token: "token"
});
nock(uploadDomain)
.post(`/upload/set_metadata/${assetId}`)
.query(true)
.reply(200, {
resume_restart: false,
chunk_list: [1],
chunk_size: 100,
blob_partitions: 1
});
nock(uploadDomain)
.post(`/upload/upload_chunk/${assetId}`)
.query(true)
.reply(200, {
});
nock(uploadDomain)
.post(`/upload/finished/${assetId}`)
.query(true)
.reply(200, {
error: false,
state: "Done",
});
nock('https://example.test')
.get(`/v0.1/apps/testuser/testapp/uploads/releases/${uploadId}`)
.query(true)
.reply(200, {
release_distinct_id: 1,
upload_status: "readyToBePublished",
});
nock('https://example.test')
.patch(`/v0.1/apps/testuser/testapp/uploads/releases/${uploadId}`, {
upload_status: "uploadFinished",
})
.query(true)
.reply(200, {
upload_status: "uploadFinished"
});
nock('https://example.test')
.put('/v0.1/apps/testuser/testapp/releases/1', JSON.stringify({
release_notes: 'my release notes'
}))
.reply(200);
//make it available
nock('https://example.test')
.post('/v0.1/apps/testuser/testapp/releases/1/groups', {
id: "00000000-0000-0000-0000-000000000000"
})
.reply(200);
//finishing symbol upload, commit the symbol
nock('https://example.test')
.patch('/v0.1/apps/testuser/testapp/symbol_uploads/100', {
status: 'committed'
})
.reply(200);
fs.createReadStream = (s: string) => {
let stream = new Readable;
stream.push(s);
stream.push(null);
return stream;
};
fs.createWriteStream = (s: string) => {
let stream = new Writable;
stream.write = () => { };
return stream;
};
}
export function mockFs() {
let fsos = fs.openSync;
fs.openSync = (path: string, flags: string) => {
if (path.endsWith(".ipa")){
return 1234567.89;
}
return fsos(path, flags);
};
let fsrs = fs.readSync;
fs.readSync = (fd: number, buffer: Buffer, offset: number, length: number, position: number)=> {
if (fd == 1234567.89) {
buffer = new Buffer(100);
return;
}
return fsrs(fd, buffer, offset, length, position);
};
fs.statSync = (s: string) => {
let stat = new Stats;
stat.isFile = () => {
return !s.toLowerCase().endsWith(".dsym");
}
stat.isDirectory = () => {
return s.toLowerCase().endsWith(".dsym");
}
stat.size = 100;
return stat;
}
}
export function mockAzure() {
azureBlobUploadHelper.AzureBlobUploadHelper.prototype.upload = async () => {
return Promise.resolve();
}
}
function wrapAssertWithExitCode(assert, ...args) {
try {
assert.apply(undefined, args);
} catch (error) {
process.exit(1);
}
}
| basicSetup | identifier_name |
TestHelpers.ts | import * as assert from "assert";
var nock = require("nock");
import fs = require('fs');
const Readable = require('stream').Readable;
const Writable = require('stream').Writable;
const Stats = require('fs').Stats;
import azureBlobUploadHelper = require('../../azure-blob-upload-helper');
/**
* Exit code is used to determine whether unit test passed or not.
* When executing code requires vsts-task-lib somewhere it makes exit code = 0 regardless whether exception was thrown.
* This helper allows to follow default NodeJS exit code behaviour when exception is thrown.
*/
export const assertByExitCode = {
equal: (actual, expected) => wrapAssertWithExitCode(assert.equal, actual, expected),
};
export function basicSetup() {
const uploadDomain = 'https://example.upload.test/release_upload';
const assetId = "00000000-0000-0000-0000-000000000123";
const uploadId = 7;
nock('https://example.test')
.post('/v0.1/apps/testuser/testapp/uploads/releases')
.reply(201, {
id: uploadId,
package_asset_id: assetId,
upload_domain: uploadDomain,
url_encoded_token: "token"
});
nock(uploadDomain)
.post(`/upload/set_metadata/${assetId}`)
.query(true)
.reply(200, {
resume_restart: false,
chunk_list: [1],
chunk_size: 100,
blob_partitions: 1
});
nock(uploadDomain)
.post(`/upload/upload_chunk/${assetId}`)
.query(true)
.reply(200, {
});
nock(uploadDomain)
.post(`/upload/finished/${assetId}`)
.query(true)
.reply(200, {
error: false,
state: "Done",
});
nock('https://example.test')
.get(`/v0.1/apps/testuser/testapp/uploads/releases/${uploadId}`)
.query(true)
.reply(200, {
release_distinct_id: 1,
upload_status: "readyToBePublished", | .patch(`/v0.1/apps/testuser/testapp/uploads/releases/${uploadId}`, {
upload_status: "uploadFinished",
})
.query(true)
.reply(200, {
upload_status: "uploadFinished"
});
nock('https://example.test')
.put('/v0.1/apps/testuser/testapp/releases/1', JSON.stringify({
release_notes: 'my release notes'
}))
.reply(200);
//make it available
nock('https://example.test')
.post('/v0.1/apps/testuser/testapp/releases/1/groups', {
id: "00000000-0000-0000-0000-000000000000"
})
.reply(200);
//finishing symbol upload, commit the symbol
nock('https://example.test')
.patch('/v0.1/apps/testuser/testapp/symbol_uploads/100', {
status: 'committed'
})
.reply(200);
fs.createReadStream = (s: string) => {
let stream = new Readable;
stream.push(s);
stream.push(null);
return stream;
};
fs.createWriteStream = (s: string) => {
let stream = new Writable;
stream.write = () => { };
return stream;
};
}
export function mockFs() {
let fsos = fs.openSync;
fs.openSync = (path: string, flags: string) => {
if (path.endsWith(".ipa")){
return 1234567.89;
}
return fsos(path, flags);
};
let fsrs = fs.readSync;
fs.readSync = (fd: number, buffer: Buffer, offset: number, length: number, position: number)=> {
if (fd == 1234567.89) {
buffer = new Buffer(100);
return;
}
return fsrs(fd, buffer, offset, length, position);
};
fs.statSync = (s: string) => {
let stat = new Stats;
stat.isFile = () => {
return !s.toLowerCase().endsWith(".dsym");
}
stat.isDirectory = () => {
return s.toLowerCase().endsWith(".dsym");
}
stat.size = 100;
return stat;
}
}
export function mockAzure() {
azureBlobUploadHelper.AzureBlobUploadHelper.prototype.upload = async () => {
return Promise.resolve();
}
}
function wrapAssertWithExitCode(assert, ...args) {
try {
assert.apply(undefined, args);
} catch (error) {
process.exit(1);
}
} | });
nock('https://example.test') | random_line_split |
extern-stress.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This creates a bunch of yielding tasks that run concurrently
// while holding onto C stacks
mod rustrt {
pub extern {
pub fn rust_dbg_call(cb: *u8, data: libc::uintptr_t)
-> libc::uintptr_t;
}
}
extern fn cb(data: libc::uintptr_t) -> libc::uintptr_t {
if data == 1u {
data
} else {
task::yield();
count(data - 1u) + count(data - 1u)
}
}
fn count(n: uint) -> uint {
unsafe {
rustrt::rust_dbg_call(cb, n)
}
}
pub fn main() {
for old_iter::repeat(100u) { | do task::spawn {
assert!(count(5u) == 16u);
};
}
} | random_line_split |
|
extern-stress.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This creates a bunch of yielding tasks that run concurrently
// while holding onto C stacks
mod rustrt {
pub extern {
pub fn rust_dbg_call(cb: *u8, data: libc::uintptr_t)
-> libc::uintptr_t;
}
}
extern fn cb(data: libc::uintptr_t) -> libc::uintptr_t {
if data == 1u {
data
} else |
}
fn count(n: uint) -> uint {
unsafe {
rustrt::rust_dbg_call(cb, n)
}
}
pub fn main() {
for old_iter::repeat(100u) {
do task::spawn {
assert!(count(5u) == 16u);
};
}
}
| {
task::yield();
count(data - 1u) + count(data - 1u)
} | conditional_block |
extern-stress.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This creates a bunch of yielding tasks that run concurrently
// while holding onto C stacks
mod rustrt {
pub extern {
pub fn rust_dbg_call(cb: *u8, data: libc::uintptr_t)
-> libc::uintptr_t;
}
}
extern fn cb(data: libc::uintptr_t) -> libc::uintptr_t {
if data == 1u {
data
} else {
task::yield();
count(data - 1u) + count(data - 1u)
}
}
fn count(n: uint) -> uint {
unsafe {
rustrt::rust_dbg_call(cb, n)
}
}
pub fn main() | {
for old_iter::repeat(100u) {
do task::spawn {
assert!(count(5u) == 16u);
};
}
} | identifier_body |
|
extern-stress.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This creates a bunch of yielding tasks that run concurrently
// while holding onto C stacks
mod rustrt {
pub extern {
pub fn rust_dbg_call(cb: *u8, data: libc::uintptr_t)
-> libc::uintptr_t;
}
}
extern fn cb(data: libc::uintptr_t) -> libc::uintptr_t {
if data == 1u {
data
} else {
task::yield();
count(data - 1u) + count(data - 1u)
}
}
fn | (n: uint) -> uint {
unsafe {
rustrt::rust_dbg_call(cb, n)
}
}
pub fn main() {
for old_iter::repeat(100u) {
do task::spawn {
assert!(count(5u) == 16u);
};
}
}
| count | identifier_name |
XDCC.py | # -*- coding: utf-8 -*-
import os
import re
import select
import socket
import struct
import time
from module.plugins.internal.Hoster import Hoster
from module.plugins.internal.misc import exists, fsjoin
class XDCC(Hoster):
__name__ = "XDCC"
__type__ = "hoster"
__version__ = "0.42" | __status__ = "testing"
__pattern__ = r'xdcc://(?P<SERVER>.*?)/#?(?P<CHAN>.*?)/(?P<BOT>.*?)/#?(?P<PACK>\d+)/?'
__config__ = [("nick", "str", "Nickname", "pyload" ),
("ident", "str", "Ident", "pyloadident" ),
("realname", "str", "Realname", "pyloadreal" ),
("ctcp_version", "str","CTCP version string", "pyLoad! IRC Interface")]
__description__ = """Download from IRC XDCC bot"""
__license__ = "GPLv3"
__authors__ = [("jeix", "[email protected]" ),
("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")]
def setup(self):
self.timeout = 30
self.multiDL = False
def process(self, pyfile):
#: Change request type
self.req = self.pyload.requestFactory.getRequest(self.classname, type="XDCC")
for _i in xrange(0, 3):
try:
nmn = self.do_download(pyfile.url)
self.log_info("Download of %s finished." % nmn)
return
except socket.error, e:
if hasattr(e, "errno") and e.errno is not None:
err_no = e.errno
if err_no in (10054, 10061):
self.log_warning("Server blocked our ip, retry in 5 min")
self.wait(300)
continue
else:
self.log_error(_("Failed due to socket errors. Code: %s") % err_no)
self.fail(_("Failed due to socket errors. Code: %s") % err_no)
else:
err_msg = e.args[0]
self.log_error(_("Failed due to socket errors: '%s'") % err_msg)
self.fail(_("Failed due to socket errors: '%s'") % err_msg)
self.log_error(_("Server blocked our ip, retry again later manually"))
self.fail(_("Server blocked our ip, retry again later manually"))
def do_download(self, url):
self.pyfile.setStatus("waiting")
server, chan, bot, pack = re.match(self.__pattern__, url).groups()
nick = self.config.get('nick')
ident = self.config.get('ident')
realname = self.config.get('realname')
ctcp_version = self.config.get('ctcp_version')
temp = server.split(':')
ln = len(temp)
if ln == 2:
host, port = temp
elif ln == 1:
host, port = temp[0], 6667
else:
self.fail(_("Invalid hostname for IRC Server: %s") % server)
#######################
#: CONNECT TO IRC AND IDLE FOR REAL LINK
dl_time = time.time()
sock = socket.socket()
self.log_info(_("Connecting to: %s:%s") % (host, port))
sock.connect((host, int(port)))
if nick == "pyload":
nick = "pyload-%d" % (time.time() % 1000) #: last 3 digits
sock.send("NICK %s\r\n" % nick)
sock.send("USER %s %s bla :%s\r\n" % (ident, host, realname))
self.log_info(_("Connect success."))
self.wait(5) # Wait for logon to complete
sock.send("JOIN #%s\r\n" % chan)
sock.send("PRIVMSG %s :xdcc send #%s\r\n" % (bot, pack))
#: IRC recv loop
readbuffer = ""
retry = None
m = None
while m is None:
if retry:
if time.time() > retry:
retry = None
dl_time = time.time()
sock.send("PRIVMSG %s :xdcc send #%s\r\n" % (bot, pack))
else:
if (dl_time + self.timeout) < time.time(): #@TODO: add in config
sock.send("QUIT :byebye\r\n")
sock.close()
self.log_error(_("XDCC Bot did not answer"))
self.fail(_("XDCC Bot did not answer"))
fdset = select.select([sock], [], [], 0)
if sock not in fdset[0]:
continue
readbuffer += sock.recv(1024)
lines = readbuffer.split("\n")
readbuffer = lines.pop()
for line in lines:
# if self.pyload.debug:
# self.log_debug("*> " + decode(line))
line = line.rstrip()
first = line.split()
if first[0] == "PING":
sock.send("PONG %s\r\n" % first[1])
if first[0] == "ERROR":
self.fail(_("IRC-Error: %s") % line)
msg = line.split(None, 3)
if len(msg) != 4:
continue
msg = {'origin': msg[0][1:],
'action': msg[1],
'target': msg[2],
'text' : msg[3][1:]}
if msg['target'][0:len(nick)] == nick and msg['action'] == "PRIVMSG":
if msg['text'] == "\x01VERSION\x01":
self.log_debug(_("Sending CTCP VERSION"))
sock.send("NOTICE %s :%s\r\n" % (msg['origin'], ctcp_version))
elif msg['text'] == "\x01TIME\x01":
self.log_debug(_("Sending CTCP TIME"))
sock.send("NOTICE %s :%d\r\n" % (msg['origin'], time.time()))
elif msg['text'] == "\x01LAG\x01":
pass #: don't know how to answer
if msg['origin'][0:len(bot)] != bot\
or msg['target'][0:len(nick)] != nick\
or msg['action'] not in ("PRIVMSG", "NOTICE"):
continue
self.log_debug(_("PrivMsg: <%s> - %s" % (msg['origin'], msg['text'])))
if "You already requested that pack" in msg['text']:
retry = time.time() + 300
elif "you must be on a known channel to request a pack" in msg['text']:
self.log_error(_("Invalid channel"))
self.fail(_("Invalid channel"))
m = re.match('\x01DCC SEND (?P<NAME>.*?) (?P<IP>\d+) (?P<PORT>\d+)(?: (?P<SIZE>\d+))?\x01', msg['text'])
#: Get connection data
ip = socket.inet_ntoa(struct.pack('!I', int(m.group('IP'))))
port = int(m.group('PORT'))
file_name = m.group('NAME')
if m.group('SIZE'):
self.req.filesize = long(m.group('SIZE'))
self.pyfile.name = file_name
dl_folder = fsjoin(self.pyload.config.get('general', 'download_folder'),
self.pyfile.package().folder if self.pyload.config.get("general",
"folder_per_package") else "")
dl_file = fsjoin(dl_folder, file_name)
if not exists(dl_folder):
os.makedirs(dl_folder)
self.set_permissions(dl_folder)
self.log_info(_("Downloading %s from %s:%d") % (file_name, ip, port))
self.pyfile.setStatus("downloading")
newname = self.req.download(ip, port, dl_file, sock, self.pyfile.setProgress)
if newname and newname != dl_file:
self.log_info(_("%(name)s saved as %(newname)s") % {'name': self.pyfile.name, 'newname': newname})
dl_file = newname
#: kill IRC socket
#: sock.send("QUIT :byebye\r\n")
sock.close()
self.last_download = dl_file
return self.last_download | random_line_split |
|
XDCC.py | # -*- coding: utf-8 -*-
import os
import re
import select
import socket
import struct
import time
from module.plugins.internal.Hoster import Hoster
from module.plugins.internal.misc import exists, fsjoin
class XDCC(Hoster):
__name__ = "XDCC"
__type__ = "hoster"
__version__ = "0.42"
__status__ = "testing"
__pattern__ = r'xdcc://(?P<SERVER>.*?)/#?(?P<CHAN>.*?)/(?P<BOT>.*?)/#?(?P<PACK>\d+)/?'
__config__ = [("nick", "str", "Nickname", "pyload" ),
("ident", "str", "Ident", "pyloadident" ),
("realname", "str", "Realname", "pyloadreal" ),
("ctcp_version", "str","CTCP version string", "pyLoad! IRC Interface")]
__description__ = """Download from IRC XDCC bot"""
__license__ = "GPLv3"
__authors__ = [("jeix", "[email protected]" ),
("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")]
def setup(self):
|
def process(self, pyfile):
#: Change request type
self.req = self.pyload.requestFactory.getRequest(self.classname, type="XDCC")
for _i in xrange(0, 3):
try:
nmn = self.do_download(pyfile.url)
self.log_info("Download of %s finished." % nmn)
return
except socket.error, e:
if hasattr(e, "errno") and e.errno is not None:
err_no = e.errno
if err_no in (10054, 10061):
self.log_warning("Server blocked our ip, retry in 5 min")
self.wait(300)
continue
else:
self.log_error(_("Failed due to socket errors. Code: %s") % err_no)
self.fail(_("Failed due to socket errors. Code: %s") % err_no)
else:
err_msg = e.args[0]
self.log_error(_("Failed due to socket errors: '%s'") % err_msg)
self.fail(_("Failed due to socket errors: '%s'") % err_msg)
self.log_error(_("Server blocked our ip, retry again later manually"))
self.fail(_("Server blocked our ip, retry again later manually"))
def do_download(self, url):
self.pyfile.setStatus("waiting")
server, chan, bot, pack = re.match(self.__pattern__, url).groups()
nick = self.config.get('nick')
ident = self.config.get('ident')
realname = self.config.get('realname')
ctcp_version = self.config.get('ctcp_version')
temp = server.split(':')
ln = len(temp)
if ln == 2:
host, port = temp
elif ln == 1:
host, port = temp[0], 6667
else:
self.fail(_("Invalid hostname for IRC Server: %s") % server)
#######################
#: CONNECT TO IRC AND IDLE FOR REAL LINK
dl_time = time.time()
sock = socket.socket()
self.log_info(_("Connecting to: %s:%s") % (host, port))
sock.connect((host, int(port)))
if nick == "pyload":
nick = "pyload-%d" % (time.time() % 1000) #: last 3 digits
sock.send("NICK %s\r\n" % nick)
sock.send("USER %s %s bla :%s\r\n" % (ident, host, realname))
self.log_info(_("Connect success."))
self.wait(5) # Wait for logon to complete
sock.send("JOIN #%s\r\n" % chan)
sock.send("PRIVMSG %s :xdcc send #%s\r\n" % (bot, pack))
#: IRC recv loop
readbuffer = ""
retry = None
m = None
while m is None:
if retry:
if time.time() > retry:
retry = None
dl_time = time.time()
sock.send("PRIVMSG %s :xdcc send #%s\r\n" % (bot, pack))
else:
if (dl_time + self.timeout) < time.time(): #@TODO: add in config
sock.send("QUIT :byebye\r\n")
sock.close()
self.log_error(_("XDCC Bot did not answer"))
self.fail(_("XDCC Bot did not answer"))
fdset = select.select([sock], [], [], 0)
if sock not in fdset[0]:
continue
readbuffer += sock.recv(1024)
lines = readbuffer.split("\n")
readbuffer = lines.pop()
for line in lines:
# if self.pyload.debug:
# self.log_debug("*> " + decode(line))
line = line.rstrip()
first = line.split()
if first[0] == "PING":
sock.send("PONG %s\r\n" % first[1])
if first[0] == "ERROR":
self.fail(_("IRC-Error: %s") % line)
msg = line.split(None, 3)
if len(msg) != 4:
continue
msg = {'origin': msg[0][1:],
'action': msg[1],
'target': msg[2],
'text' : msg[3][1:]}
if msg['target'][0:len(nick)] == nick and msg['action'] == "PRIVMSG":
if msg['text'] == "\x01VERSION\x01":
self.log_debug(_("Sending CTCP VERSION"))
sock.send("NOTICE %s :%s\r\n" % (msg['origin'], ctcp_version))
elif msg['text'] == "\x01TIME\x01":
self.log_debug(_("Sending CTCP TIME"))
sock.send("NOTICE %s :%d\r\n" % (msg['origin'], time.time()))
elif msg['text'] == "\x01LAG\x01":
pass #: don't know how to answer
if msg['origin'][0:len(bot)] != bot\
or msg['target'][0:len(nick)] != nick\
or msg['action'] not in ("PRIVMSG", "NOTICE"):
continue
self.log_debug(_("PrivMsg: <%s> - %s" % (msg['origin'], msg['text'])))
if "You already requested that pack" in msg['text']:
retry = time.time() + 300
elif "you must be on a known channel to request a pack" in msg['text']:
self.log_error(_("Invalid channel"))
self.fail(_("Invalid channel"))
m = re.match('\x01DCC SEND (?P<NAME>.*?) (?P<IP>\d+) (?P<PORT>\d+)(?: (?P<SIZE>\d+))?\x01', msg['text'])
#: Get connection data
ip = socket.inet_ntoa(struct.pack('!I', int(m.group('IP'))))
port = int(m.group('PORT'))
file_name = m.group('NAME')
if m.group('SIZE'):
self.req.filesize = long(m.group('SIZE'))
self.pyfile.name = file_name
dl_folder = fsjoin(self.pyload.config.get('general', 'download_folder'),
self.pyfile.package().folder if self.pyload.config.get("general",
"folder_per_package") else "")
dl_file = fsjoin(dl_folder, file_name)
if not exists(dl_folder):
os.makedirs(dl_folder)
self.set_permissions(dl_folder)
self.log_info(_("Downloading %s from %s:%d") % (file_name, ip, port))
self.pyfile.setStatus("downloading")
newname = self.req.download(ip, port, dl_file, sock, self.pyfile.setProgress)
if newname and newname != dl_file:
self.log_info(_("%(name)s saved as %(newname)s") % {'name': self.pyfile.name, 'newname': newname})
dl_file = newname
#: kill IRC socket
#: sock.send("QUIT :byebye\r\n")
sock.close()
self.last_download = dl_file
return self.last_download
| self.timeout = 30
self.multiDL = False | identifier_body |
XDCC.py | # -*- coding: utf-8 -*-
import os
import re
import select
import socket
import struct
import time
from module.plugins.internal.Hoster import Hoster
from module.plugins.internal.misc import exists, fsjoin
class XDCC(Hoster):
__name__ = "XDCC"
__type__ = "hoster"
__version__ = "0.42"
__status__ = "testing"
__pattern__ = r'xdcc://(?P<SERVER>.*?)/#?(?P<CHAN>.*?)/(?P<BOT>.*?)/#?(?P<PACK>\d+)/?'
__config__ = [("nick", "str", "Nickname", "pyload" ),
("ident", "str", "Ident", "pyloadident" ),
("realname", "str", "Realname", "pyloadreal" ),
("ctcp_version", "str","CTCP version string", "pyLoad! IRC Interface")]
__description__ = """Download from IRC XDCC bot"""
__license__ = "GPLv3"
__authors__ = [("jeix", "[email protected]" ),
("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")]
def setup(self):
self.timeout = 30
self.multiDL = False
def | (self, pyfile):
#: Change request type
self.req = self.pyload.requestFactory.getRequest(self.classname, type="XDCC")
for _i in xrange(0, 3):
try:
nmn = self.do_download(pyfile.url)
self.log_info("Download of %s finished." % nmn)
return
except socket.error, e:
if hasattr(e, "errno") and e.errno is not None:
err_no = e.errno
if err_no in (10054, 10061):
self.log_warning("Server blocked our ip, retry in 5 min")
self.wait(300)
continue
else:
self.log_error(_("Failed due to socket errors. Code: %s") % err_no)
self.fail(_("Failed due to socket errors. Code: %s") % err_no)
else:
err_msg = e.args[0]
self.log_error(_("Failed due to socket errors: '%s'") % err_msg)
self.fail(_("Failed due to socket errors: '%s'") % err_msg)
self.log_error(_("Server blocked our ip, retry again later manually"))
self.fail(_("Server blocked our ip, retry again later manually"))
def do_download(self, url):
self.pyfile.setStatus("waiting")
server, chan, bot, pack = re.match(self.__pattern__, url).groups()
nick = self.config.get('nick')
ident = self.config.get('ident')
realname = self.config.get('realname')
ctcp_version = self.config.get('ctcp_version')
temp = server.split(':')
ln = len(temp)
if ln == 2:
host, port = temp
elif ln == 1:
host, port = temp[0], 6667
else:
self.fail(_("Invalid hostname for IRC Server: %s") % server)
#######################
#: CONNECT TO IRC AND IDLE FOR REAL LINK
dl_time = time.time()
sock = socket.socket()
self.log_info(_("Connecting to: %s:%s") % (host, port))
sock.connect((host, int(port)))
if nick == "pyload":
nick = "pyload-%d" % (time.time() % 1000) #: last 3 digits
sock.send("NICK %s\r\n" % nick)
sock.send("USER %s %s bla :%s\r\n" % (ident, host, realname))
self.log_info(_("Connect success."))
self.wait(5) # Wait for logon to complete
sock.send("JOIN #%s\r\n" % chan)
sock.send("PRIVMSG %s :xdcc send #%s\r\n" % (bot, pack))
#: IRC recv loop
readbuffer = ""
retry = None
m = None
while m is None:
if retry:
if time.time() > retry:
retry = None
dl_time = time.time()
sock.send("PRIVMSG %s :xdcc send #%s\r\n" % (bot, pack))
else:
if (dl_time + self.timeout) < time.time(): #@TODO: add in config
sock.send("QUIT :byebye\r\n")
sock.close()
self.log_error(_("XDCC Bot did not answer"))
self.fail(_("XDCC Bot did not answer"))
fdset = select.select([sock], [], [], 0)
if sock not in fdset[0]:
continue
readbuffer += sock.recv(1024)
lines = readbuffer.split("\n")
readbuffer = lines.pop()
for line in lines:
# if self.pyload.debug:
# self.log_debug("*> " + decode(line))
line = line.rstrip()
first = line.split()
if first[0] == "PING":
sock.send("PONG %s\r\n" % first[1])
if first[0] == "ERROR":
self.fail(_("IRC-Error: %s") % line)
msg = line.split(None, 3)
if len(msg) != 4:
continue
msg = {'origin': msg[0][1:],
'action': msg[1],
'target': msg[2],
'text' : msg[3][1:]}
if msg['target'][0:len(nick)] == nick and msg['action'] == "PRIVMSG":
if msg['text'] == "\x01VERSION\x01":
self.log_debug(_("Sending CTCP VERSION"))
sock.send("NOTICE %s :%s\r\n" % (msg['origin'], ctcp_version))
elif msg['text'] == "\x01TIME\x01":
self.log_debug(_("Sending CTCP TIME"))
sock.send("NOTICE %s :%d\r\n" % (msg['origin'], time.time()))
elif msg['text'] == "\x01LAG\x01":
pass #: don't know how to answer
if msg['origin'][0:len(bot)] != bot\
or msg['target'][0:len(nick)] != nick\
or msg['action'] not in ("PRIVMSG", "NOTICE"):
continue
self.log_debug(_("PrivMsg: <%s> - %s" % (msg['origin'], msg['text'])))
if "You already requested that pack" in msg['text']:
retry = time.time() + 300
elif "you must be on a known channel to request a pack" in msg['text']:
self.log_error(_("Invalid channel"))
self.fail(_("Invalid channel"))
m = re.match('\x01DCC SEND (?P<NAME>.*?) (?P<IP>\d+) (?P<PORT>\d+)(?: (?P<SIZE>\d+))?\x01', msg['text'])
#: Get connection data
ip = socket.inet_ntoa(struct.pack('!I', int(m.group('IP'))))
port = int(m.group('PORT'))
file_name = m.group('NAME')
if m.group('SIZE'):
self.req.filesize = long(m.group('SIZE'))
self.pyfile.name = file_name
dl_folder = fsjoin(self.pyload.config.get('general', 'download_folder'),
self.pyfile.package().folder if self.pyload.config.get("general",
"folder_per_package") else "")
dl_file = fsjoin(dl_folder, file_name)
if not exists(dl_folder):
os.makedirs(dl_folder)
self.set_permissions(dl_folder)
self.log_info(_("Downloading %s from %s:%d") % (file_name, ip, port))
self.pyfile.setStatus("downloading")
newname = self.req.download(ip, port, dl_file, sock, self.pyfile.setProgress)
if newname and newname != dl_file:
self.log_info(_("%(name)s saved as %(newname)s") % {'name': self.pyfile.name, 'newname': newname})
dl_file = newname
#: kill IRC socket
#: sock.send("QUIT :byebye\r\n")
sock.close()
self.last_download = dl_file
return self.last_download
| process | identifier_name |
XDCC.py | # -*- coding: utf-8 -*-
import os
import re
import select
import socket
import struct
import time
from module.plugins.internal.Hoster import Hoster
from module.plugins.internal.misc import exists, fsjoin
class XDCC(Hoster):
__name__ = "XDCC"
__type__ = "hoster"
__version__ = "0.42"
__status__ = "testing"
__pattern__ = r'xdcc://(?P<SERVER>.*?)/#?(?P<CHAN>.*?)/(?P<BOT>.*?)/#?(?P<PACK>\d+)/?'
__config__ = [("nick", "str", "Nickname", "pyload" ),
("ident", "str", "Ident", "pyloadident" ),
("realname", "str", "Realname", "pyloadreal" ),
("ctcp_version", "str","CTCP version string", "pyLoad! IRC Interface")]
__description__ = """Download from IRC XDCC bot"""
__license__ = "GPLv3"
__authors__ = [("jeix", "[email protected]" ),
("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")]
def setup(self):
self.timeout = 30
self.multiDL = False
def process(self, pyfile):
#: Change request type
self.req = self.pyload.requestFactory.getRequest(self.classname, type="XDCC")
for _i in xrange(0, 3):
try:
nmn = self.do_download(pyfile.url)
self.log_info("Download of %s finished." % nmn)
return
except socket.error, e:
if hasattr(e, "errno") and e.errno is not None:
err_no = e.errno
if err_no in (10054, 10061):
self.log_warning("Server blocked our ip, retry in 5 min")
self.wait(300)
continue
else:
self.log_error(_("Failed due to socket errors. Code: %s") % err_no)
self.fail(_("Failed due to socket errors. Code: %s") % err_no)
else:
err_msg = e.args[0]
self.log_error(_("Failed due to socket errors: '%s'") % err_msg)
self.fail(_("Failed due to socket errors: '%s'") % err_msg)
self.log_error(_("Server blocked our ip, retry again later manually"))
self.fail(_("Server blocked our ip, retry again later manually"))
def do_download(self, url):
self.pyfile.setStatus("waiting")
server, chan, bot, pack = re.match(self.__pattern__, url).groups()
nick = self.config.get('nick')
ident = self.config.get('ident')
realname = self.config.get('realname')
ctcp_version = self.config.get('ctcp_version')
temp = server.split(':')
ln = len(temp)
if ln == 2:
host, port = temp
elif ln == 1:
host, port = temp[0], 6667
else:
self.fail(_("Invalid hostname for IRC Server: %s") % server)
#######################
#: CONNECT TO IRC AND IDLE FOR REAL LINK
dl_time = time.time()
sock = socket.socket()
self.log_info(_("Connecting to: %s:%s") % (host, port))
sock.connect((host, int(port)))
if nick == "pyload":
nick = "pyload-%d" % (time.time() % 1000) #: last 3 digits
sock.send("NICK %s\r\n" % nick)
sock.send("USER %s %s bla :%s\r\n" % (ident, host, realname))
self.log_info(_("Connect success."))
self.wait(5) # Wait for logon to complete
sock.send("JOIN #%s\r\n" % chan)
sock.send("PRIVMSG %s :xdcc send #%s\r\n" % (bot, pack))
#: IRC recv loop
readbuffer = ""
retry = None
m = None
while m is None:
if retry:
if time.time() > retry:
|
else:
if (dl_time + self.timeout) < time.time(): #@TODO: add in config
sock.send("QUIT :byebye\r\n")
sock.close()
self.log_error(_("XDCC Bot did not answer"))
self.fail(_("XDCC Bot did not answer"))
fdset = select.select([sock], [], [], 0)
if sock not in fdset[0]:
continue
readbuffer += sock.recv(1024)
lines = readbuffer.split("\n")
readbuffer = lines.pop()
for line in lines:
# if self.pyload.debug:
# self.log_debug("*> " + decode(line))
line = line.rstrip()
first = line.split()
if first[0] == "PING":
sock.send("PONG %s\r\n" % first[1])
if first[0] == "ERROR":
self.fail(_("IRC-Error: %s") % line)
msg = line.split(None, 3)
if len(msg) != 4:
continue
msg = {'origin': msg[0][1:],
'action': msg[1],
'target': msg[2],
'text' : msg[3][1:]}
if msg['target'][0:len(nick)] == nick and msg['action'] == "PRIVMSG":
if msg['text'] == "\x01VERSION\x01":
self.log_debug(_("Sending CTCP VERSION"))
sock.send("NOTICE %s :%s\r\n" % (msg['origin'], ctcp_version))
elif msg['text'] == "\x01TIME\x01":
self.log_debug(_("Sending CTCP TIME"))
sock.send("NOTICE %s :%d\r\n" % (msg['origin'], time.time()))
elif msg['text'] == "\x01LAG\x01":
pass #: don't know how to answer
if msg['origin'][0:len(bot)] != bot\
or msg['target'][0:len(nick)] != nick\
or msg['action'] not in ("PRIVMSG", "NOTICE"):
continue
self.log_debug(_("PrivMsg: <%s> - %s" % (msg['origin'], msg['text'])))
if "You already requested that pack" in msg['text']:
retry = time.time() + 300
elif "you must be on a known channel to request a pack" in msg['text']:
self.log_error(_("Invalid channel"))
self.fail(_("Invalid channel"))
m = re.match('\x01DCC SEND (?P<NAME>.*?) (?P<IP>\d+) (?P<PORT>\d+)(?: (?P<SIZE>\d+))?\x01', msg['text'])
#: Get connection data
ip = socket.inet_ntoa(struct.pack('!I', int(m.group('IP'))))
port = int(m.group('PORT'))
file_name = m.group('NAME')
if m.group('SIZE'):
self.req.filesize = long(m.group('SIZE'))
self.pyfile.name = file_name
dl_folder = fsjoin(self.pyload.config.get('general', 'download_folder'),
self.pyfile.package().folder if self.pyload.config.get("general",
"folder_per_package") else "")
dl_file = fsjoin(dl_folder, file_name)
if not exists(dl_folder):
os.makedirs(dl_folder)
self.set_permissions(dl_folder)
self.log_info(_("Downloading %s from %s:%d") % (file_name, ip, port))
self.pyfile.setStatus("downloading")
newname = self.req.download(ip, port, dl_file, sock, self.pyfile.setProgress)
if newname and newname != dl_file:
self.log_info(_("%(name)s saved as %(newname)s") % {'name': self.pyfile.name, 'newname': newname})
dl_file = newname
#: kill IRC socket
#: sock.send("QUIT :byebye\r\n")
sock.close()
self.last_download = dl_file
return self.last_download
| retry = None
dl_time = time.time()
sock.send("PRIVMSG %s :xdcc send #%s\r\n" % (bot, pack)) | conditional_block |
__init__.py | # -*- coding: utf-8 -*-
import re
from DelogX.utils.i18n import I18n
from DelogX.utils.path import Path
from DelogX.utils.plugin import Plugin
class DelogReadMore(Plugin):
| i18n = None
def run(self):
conf = self.blog.default_conf
self.i18n = I18n(
Path.format_url(self.workspace, 'locale'), conf('local.locale'))
self.manager.add_action('dx_post_update', self.parse_readmore)
def parse_readmore(self, post):
if not post:
return
content_split = re.split(r'<[Hh][Rr](?:\s+\/)?>', post.content, 1)
if len(content_split) == 2:
summary, more = content_split
else:
summary = content_split[0]
more = ''
post_url = self.blog.runtime.get('url_prefix.post')
post_url = Path.format_url(post_url, Path.urlencode(post.url))
content = '''{0}
<div class="{1}"><a href="{2}">{3}</a></div>
<div class="post-more">{4}</div>
'''
more_class = ['read-more']
if not more:
more_class.append('no-more-content')
more_class = ' '.join(more_class)
content = content.format(
summary, more_class, post_url, self.i18n.get('Read More'), more)
post.content = content | identifier_body |
|
__init__.py | # -*- coding: utf-8 -*-
import re
from DelogX.utils.i18n import I18n
from DelogX.utils.path import Path
from DelogX.utils.plugin import Plugin
class DelogReadMore(Plugin):
i18n = None
def run(self):
conf = self.blog.default_conf
self.i18n = I18n(
Path.format_url(self.workspace, 'locale'), conf('local.locale'))
self.manager.add_action('dx_post_update', self.parse_readmore)
def parse_readmore(self, post):
if not post:
return
content_split = re.split(r'<[Hh][Rr](?:\s+\/)?>', post.content, 1)
if len(content_split) == 2:
summary, more = content_split
else:
summary = content_split[0]
more = ''
post_url = self.blog.runtime.get('url_prefix.post')
post_url = Path.format_url(post_url, Path.urlencode(post.url))
content = '''{0}
<div class="{1}"><a href="{2}">{3}</a></div>
<div class="post-more">{4}</div>
'''
more_class = ['read-more']
if not more:
|
more_class = ' '.join(more_class)
content = content.format(
summary, more_class, post_url, self.i18n.get('Read More'), more)
post.content = content
| more_class.append('no-more-content') | conditional_block |
__init__.py | # -*- coding: utf-8 -*-
import re
from DelogX.utils.i18n import I18n
from DelogX.utils.path import Path
from DelogX.utils.plugin import Plugin
class | (Plugin):
i18n = None
def run(self):
conf = self.blog.default_conf
self.i18n = I18n(
Path.format_url(self.workspace, 'locale'), conf('local.locale'))
self.manager.add_action('dx_post_update', self.parse_readmore)
def parse_readmore(self, post):
if not post:
return
content_split = re.split(r'<[Hh][Rr](?:\s+\/)?>', post.content, 1)
if len(content_split) == 2:
summary, more = content_split
else:
summary = content_split[0]
more = ''
post_url = self.blog.runtime.get('url_prefix.post')
post_url = Path.format_url(post_url, Path.urlencode(post.url))
content = '''{0}
<div class="{1}"><a href="{2}">{3}</a></div>
<div class="post-more">{4}</div>
'''
more_class = ['read-more']
if not more:
more_class.append('no-more-content')
more_class = ' '.join(more_class)
content = content.format(
summary, more_class, post_url, self.i18n.get('Read More'), more)
post.content = content
| DelogReadMore | identifier_name |
__init__.py | # -*- coding: utf-8 -*-
import re
from DelogX.utils.i18n import I18n
from DelogX.utils.path import Path
| from DelogX.utils.plugin import Plugin
class DelogReadMore(Plugin):
i18n = None
def run(self):
conf = self.blog.default_conf
self.i18n = I18n(
Path.format_url(self.workspace, 'locale'), conf('local.locale'))
self.manager.add_action('dx_post_update', self.parse_readmore)
def parse_readmore(self, post):
if not post:
return
content_split = re.split(r'<[Hh][Rr](?:\s+\/)?>', post.content, 1)
if len(content_split) == 2:
summary, more = content_split
else:
summary = content_split[0]
more = ''
post_url = self.blog.runtime.get('url_prefix.post')
post_url = Path.format_url(post_url, Path.urlencode(post.url))
content = '''{0}
<div class="{1}"><a href="{2}">{3}</a></div>
<div class="post-more">{4}</div>
'''
more_class = ['read-more']
if not more:
more_class.append('no-more-content')
more_class = ' '.join(more_class)
content = content.format(
summary, more_class, post_url, self.i18n.get('Read More'), more)
post.content = content | random_line_split |
|
main.rs | use std::env;
use std::io::prelude::*;
use std::io::BufReader;
use std::fs::File;
fn | (c: char) -> bool
{
match c {
' ' => true,
'x' => true,
'y' => true,
'=' => true,
_ => false,
}
}
const WIDTH: usize = 50;
const HEIGHT: usize = 6;
fn main() {
let args: Vec<String> = env::args().collect();
let f = File::open(&args[1]).expect("Could not open file");
let reader = BufReader::new(f);
let mut lights = [[false; HEIGHT]; WIDTH];
for line in reader.lines() {
let contents = line.unwrap();
let parts: Vec<&str> = contents.split(|c| is_splitpoint(c)).collect();
match parts[0] {
"rect" => {
let width: usize = parts[1].parse().unwrap();
let height: usize = parts[2].parse().unwrap();
for x in 0..width {
for y in 0..height {
lights[x][y] = true;
}
}
},
"rotate" => {
let index: usize = parts[4].parse().expect("Invalid index");
let amount: usize = parts[7].parse().expect("Invalid row");
match parts[1] {
"row" => {
let mut copy = [false; WIDTH];
for x in 0..WIDTH {
copy[x] = lights[x][index];
}
for x in 0..WIDTH {
lights[(x + amount) % WIDTH][index] = copy[x];
}
},
"column" => {
let mut copy = [false; HEIGHT];
for y in 0..HEIGHT {
copy[y] = lights[index][y];
}
for y in 0..HEIGHT {
lights[index][(y + amount) % HEIGHT] = copy[y];
}
}
_ => panic!("{} is not a supported rotation", parts[1]),
}
},
_ => panic!("{} is not a supported operation", parts[0]),
}
}
let mut count = 0;
for y in 0..HEIGHT {
for x in 0..WIDTH {
let mut c = ' ';
if lights[x][y] {
count += 1;
c = '★';
}
print!("{}", c);
// spacing between letters
if x % 5 == 4 {
print!(" ");
}
}
println!("");
}
println!("{} lights active.", count);
}
| is_splitpoint | identifier_name |
main.rs | use std::env;
use std::io::prelude::*;
use std::io::BufReader;
use std::fs::File;
fn is_splitpoint(c: char) -> bool
{
match c {
' ' => true,
'x' => true,
'y' => true,
'=' => true,
_ => false,
}
}
const WIDTH: usize = 50;
const HEIGHT: usize = 6;
fn main() {
let args: Vec<String> = env::args().collect();
let f = File::open(&args[1]).expect("Could not open file");
let reader = BufReader::new(f);
let mut lights = [[false; HEIGHT]; WIDTH];
for line in reader.lines() {
let contents = line.unwrap();
let parts: Vec<&str> = contents.split(|c| is_splitpoint(c)).collect();
match parts[0] {
"rect" => {
let width: usize = parts[1].parse().unwrap();
let height: usize = parts[2].parse().unwrap();
for x in 0..width {
for y in 0..height {
lights[x][y] = true;
}
}
},
"rotate" => {
let index: usize = parts[4].parse().expect("Invalid index");
let amount: usize = parts[7].parse().expect("Invalid row");
match parts[1] {
"row" => {
let mut copy = [false; WIDTH];
for x in 0..WIDTH {
copy[x] = lights[x][index];
}
for x in 0..WIDTH {
lights[(x + amount) % WIDTH][index] = copy[x];
}
},
"column" => {
let mut copy = [false; HEIGHT];
for y in 0..HEIGHT {
copy[y] = lights[index][y];
}
for y in 0..HEIGHT {
lights[index][(y + amount) % HEIGHT] = copy[y];
}
}
_ => panic!("{} is not a supported rotation", parts[1]),
}
},
_ => panic!("{} is not a supported operation", parts[0]),
}
}
let mut count = 0;
for y in 0..HEIGHT {
for x in 0..WIDTH {
let mut c = ' ';
if lights[x][y] {
count += 1;
c = '★';
}
print!("{}", c);
// spacing between letters
if x % 5 == 4 { | println!("");
}
println!("{} lights active.", count);
} | print!(" ");
}
} | random_line_split |
main.rs | use std::env;
use std::io::prelude::*;
use std::io::BufReader;
use std::fs::File;
fn is_splitpoint(c: char) -> bool
|
const WIDTH: usize = 50;
const HEIGHT: usize = 6;
fn main() {
let args: Vec<String> = env::args().collect();
let f = File::open(&args[1]).expect("Could not open file");
let reader = BufReader::new(f);
let mut lights = [[false; HEIGHT]; WIDTH];
for line in reader.lines() {
let contents = line.unwrap();
let parts: Vec<&str> = contents.split(|c| is_splitpoint(c)).collect();
match parts[0] {
"rect" => {
let width: usize = parts[1].parse().unwrap();
let height: usize = parts[2].parse().unwrap();
for x in 0..width {
for y in 0..height {
lights[x][y] = true;
}
}
},
"rotate" => {
let index: usize = parts[4].parse().expect("Invalid index");
let amount: usize = parts[7].parse().expect("Invalid row");
match parts[1] {
"row" => {
let mut copy = [false; WIDTH];
for x in 0..WIDTH {
copy[x] = lights[x][index];
}
for x in 0..WIDTH {
lights[(x + amount) % WIDTH][index] = copy[x];
}
},
"column" => {
let mut copy = [false; HEIGHT];
for y in 0..HEIGHT {
copy[y] = lights[index][y];
}
for y in 0..HEIGHT {
lights[index][(y + amount) % HEIGHT] = copy[y];
}
}
_ => panic!("{} is not a supported rotation", parts[1]),
}
},
_ => panic!("{} is not a supported operation", parts[0]),
}
}
let mut count = 0;
for y in 0..HEIGHT {
for x in 0..WIDTH {
let mut c = ' ';
if lights[x][y] {
count += 1;
c = '★';
}
print!("{}", c);
// spacing between letters
if x % 5 == 4 {
print!(" ");
}
}
println!("");
}
println!("{} lights active.", count);
}
| {
match c {
' ' => true,
'x' => true,
'y' => true,
'=' => true,
_ => false,
}
} | identifier_body |
pl.js | OC.L10N.register(
"encryption",
{
"Missing recovery key password" : "Brakujące hasło klucza odzyskiwania",
"Please repeat the recovery key password" : "Proszę powtórz nowe hasło klucza odzyskiwania",
"Repeated recovery key password does not match the provided recovery key password" : "Hasła klucza odzyskiwania nie zgadzają się",
"Recovery key successfully enabled" : "Klucz odzyskiwania włączony",
"Could not enable recovery key. Please check your recovery key password!" : "Nie można włączyć klucza odzyskiwania. Proszę sprawdzić swoje hasło odzyskiwania!",
"Recovery key successfully disabled" : "Klucz odzyskiwania wyłączony",
"Could not disable recovery key. Please check your recovery key password!" : "Nie można wyłączyć klucza odzyskiwania. Proszę sprawdzić swoje hasło odzyskiwania!",
"Missing parameters" : "Brakujące dane", | "Password successfully changed." : "Zmiana hasła udana.",
"Could not change the password. Maybe the old password was not correct." : "Nie można zmienić hasła. Może stare hasło nie było poprawne.",
"Recovery Key disabled" : "Klucz odzyskiwania wyłączony",
"Recovery Key enabled" : "Klucz odzyskiwania włączony",
"Could not enable the recovery key, please try again or contact your administrator" : "Nie można włączyć klucza odzyskiwania. Proszę spróbować ponownie lub skontakuj sie z administratorem",
"Could not update the private key password." : "Nie można zmienić hasła klucza prywatnego.",
"The old password was not correct, please try again." : "Stare hasło nie było poprawne. Spróbuj jeszcze raz.",
"The current log-in password was not correct, please try again." : "Obecne hasło logowania nie było poprawne. Spróbuj ponownie.",
"Private key password successfully updated." : "Pomyślnie zaktualizowano hasło klucza prywatnego.",
"You need to migrate your encryption keys from the old encryption (ownCloud <= 8.0) to the new one. Please run 'occ encryption:migrate' or contact your administrator" : "Musisz przenieść swoje klucze szyfrowania ze starego sposobu szyfrowania (Nextcloud <= 8,0) na nowy. Proszę uruchomić 'occ encryption:migrate' lub skontaktować się z administratorem",
"Invalid private key for encryption app. Please update your private key password in your personal settings to recover access to your encrypted files." : "Nieprawidłowy klucz prywatny do szyfrowania aplikacji. Należy zaktualizować hasło klucza prywatnego w ustawieniach osobistych, aby odzyskać dostęp do zaszyfrowanych plików.",
"Encryption App is enabled, but your keys are not initialized. Please log-out and log-in again." : "Aplikacja szyfrująca jest włączona, ale Twoje klucze nie są zainicjowane. Proszę się wylogować i zalogować ponownie.",
"Please enable server side encryption in the admin settings in order to use the encryption module." : "Aby móc korzystać z modułu szyfrowania trzeba włączyć w panelu administratora szyfrowanie po stronie serwera. ",
"Encryption app is enabled and ready" : "Szyfrowanie aplikacja jest włączone i gotowe",
"Bad Signature" : "Zła sygnatura",
"Missing Signature" : "Brakująca sygnatura",
"one-time password for server-side-encryption" : "jednorazowe hasło do serwera szyfrowania strony",
"Can not decrypt this file, probably this is a shared file. Please ask the file owner to reshare the file with you." : "Nie można odszyfrować tego pliku, prawdopodobnie jest to plik udostępniony. Poproś właściciela pliku o ponowne udostępnianie pliku Tobie.",
"Can not read this file, probably this is a shared file. Please ask the file owner to reshare the file with you." : "Nie można odczytać tego pliku, prawdopodobnie plik nie jest współdzielony. Proszę zwrócić się do właściciela pliku, aby udostępnił go dla Ciebie.",
"Default encryption module" : "Domyślny moduł szyfrujący",
"Hey there,\n\nthe admin enabled server-side-encryption. Your files were encrypted using the password '%s'.\n\nPlease login to the web interface, go to the section 'basic encryption module' of your personal settings and update your encryption password by entering this password into the 'old log-in password' field and your current login-password.\n\n" : "Hej tam,\n\nadmin włączył szyfrowanie po stronie serwera. Twoje pliki zostały zaszyfrowane przy użyciu hasła '%s'.\n\nProszę zalogować się do interfejsu internetowego, przejdź do sekcji Nextcloud podstawowy moduł szyfrowania, następnie osobiste ustawienia i zaktualizuj hasło szyfrowania wpisując aktualny login, w polu stare hasło logowania wpisz stare hasło, a następnie aktualne hasło.\n\n",
"The share will expire on %s." : "Ten zasób wygaśnie %s",
"Cheers!" : "Dzięki!",
"Hey there,<br><br>the admin enabled server-side-encryption. Your files were encrypted using the password <strong>%s</strong>.<br><br>Please login to the web interface, go to the section \"basic encryption module\" of your personal settings and update your encryption password by entering this password into the \"old log-in password\" field and your current login-password.<br><br>" : "Hej tam,<br><br>admin włączył szyfrowanie po stronie serwera. Twoje pliki zostały zaszyfrowane przy użyciu hasła <strong>%s</strong>.<br><br>Proszę zalogować się do interfejsu internetowego, przejdź do sekcji Nextcloud podstawowy moduł szyfrowania, następnie osobiste ustawienia i zaktualizuj hasło szyfrowania wpisując aktualny login, w polu stare hasło logowania wpisz stare hasło, a następnie aktualne hasło.<br><br>",
"Encryption app is enabled but your keys are not initialized, please log-out and log-in again" : "Szyfrowanie w aplikacji jest włączone, ale klucze nie są zainicjowane. Prosimy wylogować się i ponownie zalogować się.",
"Encrypt the home storage" : "Szyfrowanie przechowywanie w domu",
"Enabling this option encrypts all files stored on the main storage, otherwise only files on external storage will be encrypted" : "Włączenie tej opcji spowoduje szyfrowanie wszystkich plików zapisanych na pamięci wewnętrznej. W innym wypadku szyfrowane będą tylko pliki na pamięci zewnętrznej.",
"Enable recovery key" : "Włącz klucz odzyskiwania",
"Disable recovery key" : "Wyłącz klucz odzyskiwania",
"The recovery key is an extra encryption key that is used to encrypt files. It allows recovery of a user's files if the user forgets his or her password." : "Kluczem do odzyskiwania jest dodatkowy klucz szyfrujący, który służy do szyfrowania plików. Umożliwia on odzyskanie plików użytkownika, jeśli użytkownik zapomni swoje hasło.",
"Recovery key password" : "Hasło klucza odzyskiwania",
"Repeat recovery key password" : "Powtórz hasło klucza odzyskiwania",
"Change recovery key password:" : "Zmień hasło klucza odzyskiwania",
"Old recovery key password" : "Stare hasło klucza odzyskiwania",
"New recovery key password" : "Nowe hasło klucza odzyskiwania",
"Repeat new recovery key password" : "Powtórz nowe hasło klucza odzyskiwania",
"Change Password" : "Zmień hasło",
"Basic encryption module" : "Podstawowy moduł szyfrujący",
"Your private key password no longer matches your log-in password." : "Hasło Twojego klucza prywatnego nie pasuje już do Twojego hasła logowania.",
"Set your old private key password to your current log-in password:" : "Ustaw stare hasło klucza prywatnego na aktualne hasło logowania:",
" If you don't remember your old password you can ask your administrator to recover your files." : "Jeśli nie pamiętasz swojego starego hasła, poproś swojego administratora, aby odzyskać pliki.",
"Old log-in password" : "Stare hasło logowania",
"Current log-in password" : "Bieżące hasło logowania",
"Update Private Key Password" : "Aktualizacja hasła klucza prywatnego",
"Enable password recovery:" : "Włącz hasło odzyskiwania:",
"Enabling this option will allow you to reobtain access to your encrypted files in case of password loss" : "Włączenie tej opcji umożliwia otrzymać dostęp do zaszyfrowanych plików w przypadku utraty hasła",
"Enabled" : "Włączone",
"Disabled" : "Wyłączone"
},
"nplurals=4; plural=(n==1 ? 0 : (n%10>=2 && n%10<=4) && (n%100<12 || n%100>14) ? 1 : n!=1 && (n%10>=0 && n%10<=1) || (n%10>=5 && n%10<=9) || (n%100>=12 && n%100<=14) ? 2 : 3);"); | "Please provide the old recovery password" : "Podaj stare hasło odzyskiwania",
"Please provide a new recovery password" : "Podaj nowe hasło odzyskiwania",
"Please repeat the new recovery password" : "Proszę powtórz nowe hasło odzyskiwania", | random_line_split |
read_analog.js | var wpi = require('wiring-pi');
// var ref = require('ref'); // required if passing pointers to lib functions
// var ffi = require('ffi');
// var gert = ffi.Library('libwiringPiDev', {
// 'gertboardAnalogSetup': [ 'int', [ 'int' ] ]
// });
// var res = gert.atod();
// console.log('Gertboard ADC is currently measuring: ' + res);
wpi.setup('wpi');
var port = 0;
var gertPinBase = 0;
// gert.gertboardAnalogSetup(gertPinBase);
/*
if (wpi.gertboardSPISetup() < 0){
console.log("error " + errno);
exit;
}
*/
var value;
console.log("RPi board rev = " + wpi.piBoardRev());
console.log("RPi board Id = " + wpi.piBoardId());
console.log(wpi.piBoardId().model);
console.log(wpi.piBoardId().rev);
console.log(wpi.piBoardId().maker);
//for (var i=0; i < board.length; i++){
// console.log(board[i]);
//}
console.log("wiringPi ADC port " + port);
setInterval(function() {
value = wpi.analogRead(port); | console.log("analog input = " + value);
}, 500); | random_line_split |
|
make-middleware.ts | /*!
* Copyright 2018 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as http from 'http';
import onFinished = require('on-finished');
import {getOrInjectContext} from '../../utils/context';
import {
makeHttpRequestData,
ServerRequest,
CloudLoggingHttpRequest,
} from '../../utils/http-request';
interface AnnotatedRequestType<LoggerType> extends ServerRequest {
log: LoggerType;
}
| * the `request` object. It optionally can do HttpRequest timing that can be
* used for generating request logs. This can be used to integrate with logging
* libraries such as winston and bunyan.
*
* @param projectId Generated traceIds will be associated with this project.
* @param makeChildLogger A function that generates logger instances that will
* be installed onto `req` as `req.log`. The logger should include the trace in
* each log entry's metadata (associated with the LOGGING_TRACE_KEY property.
* @param emitRequestLog Optional. A function that will emit a parent request
* log. While some environments like GAE and GCF emit parent request logs
* automatically, other environments do not. When provided this function will be
* called with a populated `CloudLoggingHttpRequest` which can be emitted as
* request log.
*/
export function makeMiddleware<LoggerType>(
projectId: string,
makeChildLogger: (
trace: string,
span?: string,
traceSampled?: boolean
) => LoggerType,
emitRequestLog?: (
httpRequest: CloudLoggingHttpRequest,
trace: string,
span?: string,
traceSampled?: boolean
) => void
) {
return (req: ServerRequest, res: http.ServerResponse, next: Function) => {
// TODO(ofrobots): use high-resolution timer.
const requestStartMs = Date.now();
// Detect & establish context if we were the first actor to detect lack of
// context so traceContext is always available when using middleware.
const traceContext = getOrInjectContext(req, projectId, true);
// Install a child logger on the request object, with detected trace and
// span.
(req as AnnotatedRequestType<LoggerType>).log = makeChildLogger(
traceContext.trace,
traceContext.spanId,
traceContext.traceSampled
);
// Emit a 'Request Log' on the parent logger, with detected trace and
// span.
if (emitRequestLog) {
onFinished(res, () => {
const latencyMs = Date.now() - requestStartMs;
const httpRequest = makeHttpRequestData(req, res, latencyMs);
emitRequestLog(
httpRequest,
traceContext.trace,
traceContext.spanId,
traceContext.traceSampled
);
});
}
next();
};
} | /**
* Generates an express middleware that installs a request-specific logger on | random_line_split |
make-middleware.ts | /*!
* Copyright 2018 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as http from 'http';
import onFinished = require('on-finished');
import {getOrInjectContext} from '../../utils/context';
import {
makeHttpRequestData,
ServerRequest,
CloudLoggingHttpRequest,
} from '../../utils/http-request';
interface AnnotatedRequestType<LoggerType> extends ServerRequest {
log: LoggerType;
}
/**
* Generates an express middleware that installs a request-specific logger on
* the `request` object. It optionally can do HttpRequest timing that can be
* used for generating request logs. This can be used to integrate with logging
* libraries such as winston and bunyan.
*
* @param projectId Generated traceIds will be associated with this project.
* @param makeChildLogger A function that generates logger instances that will
* be installed onto `req` as `req.log`. The logger should include the trace in
* each log entry's metadata (associated with the LOGGING_TRACE_KEY property.
* @param emitRequestLog Optional. A function that will emit a parent request
* log. While some environments like GAE and GCF emit parent request logs
* automatically, other environments do not. When provided this function will be
* called with a populated `CloudLoggingHttpRequest` which can be emitted as
* request log.
*/
export function | <LoggerType>(
projectId: string,
makeChildLogger: (
trace: string,
span?: string,
traceSampled?: boolean
) => LoggerType,
emitRequestLog?: (
httpRequest: CloudLoggingHttpRequest,
trace: string,
span?: string,
traceSampled?: boolean
) => void
) {
return (req: ServerRequest, res: http.ServerResponse, next: Function) => {
// TODO(ofrobots): use high-resolution timer.
const requestStartMs = Date.now();
// Detect & establish context if we were the first actor to detect lack of
// context so traceContext is always available when using middleware.
const traceContext = getOrInjectContext(req, projectId, true);
// Install a child logger on the request object, with detected trace and
// span.
(req as AnnotatedRequestType<LoggerType>).log = makeChildLogger(
traceContext.trace,
traceContext.spanId,
traceContext.traceSampled
);
// Emit a 'Request Log' on the parent logger, with detected trace and
// span.
if (emitRequestLog) {
onFinished(res, () => {
const latencyMs = Date.now() - requestStartMs;
const httpRequest = makeHttpRequestData(req, res, latencyMs);
emitRequestLog(
httpRequest,
traceContext.trace,
traceContext.spanId,
traceContext.traceSampled
);
});
}
next();
};
}
| makeMiddleware | identifier_name |
make-middleware.ts | /*!
* Copyright 2018 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as http from 'http';
import onFinished = require('on-finished');
import {getOrInjectContext} from '../../utils/context';
import {
makeHttpRequestData,
ServerRequest,
CloudLoggingHttpRequest,
} from '../../utils/http-request';
interface AnnotatedRequestType<LoggerType> extends ServerRequest {
log: LoggerType;
}
/**
* Generates an express middleware that installs a request-specific logger on
* the `request` object. It optionally can do HttpRequest timing that can be
* used for generating request logs. This can be used to integrate with logging
* libraries such as winston and bunyan.
*
* @param projectId Generated traceIds will be associated with this project.
* @param makeChildLogger A function that generates logger instances that will
* be installed onto `req` as `req.log`. The logger should include the trace in
* each log entry's metadata (associated with the LOGGING_TRACE_KEY property.
* @param emitRequestLog Optional. A function that will emit a parent request
* log. While some environments like GAE and GCF emit parent request logs
* automatically, other environments do not. When provided this function will be
* called with a populated `CloudLoggingHttpRequest` which can be emitted as
* request log.
*/
export function makeMiddleware<LoggerType>(
projectId: string,
makeChildLogger: (
trace: string,
span?: string,
traceSampled?: boolean
) => LoggerType,
emitRequestLog?: (
httpRequest: CloudLoggingHttpRequest,
trace: string,
span?: string,
traceSampled?: boolean
) => void
) {
return (req: ServerRequest, res: http.ServerResponse, next: Function) => {
// TODO(ofrobots): use high-resolution timer.
const requestStartMs = Date.now();
// Detect & establish context if we were the first actor to detect lack of
// context so traceContext is always available when using middleware.
const traceContext = getOrInjectContext(req, projectId, true);
// Install a child logger on the request object, with detected trace and
// span.
(req as AnnotatedRequestType<LoggerType>).log = makeChildLogger(
traceContext.trace,
traceContext.spanId,
traceContext.traceSampled
);
// Emit a 'Request Log' on the parent logger, with detected trace and
// span.
if (emitRequestLog) |
next();
};
}
| {
onFinished(res, () => {
const latencyMs = Date.now() - requestStartMs;
const httpRequest = makeHttpRequestData(req, res, latencyMs);
emitRequestLog(
httpRequest,
traceContext.trace,
traceContext.spanId,
traceContext.traceSampled
);
});
} | conditional_block |
make-middleware.ts | /*!
* Copyright 2018 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as http from 'http';
import onFinished = require('on-finished');
import {getOrInjectContext} from '../../utils/context';
import {
makeHttpRequestData,
ServerRequest,
CloudLoggingHttpRequest,
} from '../../utils/http-request';
interface AnnotatedRequestType<LoggerType> extends ServerRequest {
log: LoggerType;
}
/**
* Generates an express middleware that installs a request-specific logger on
* the `request` object. It optionally can do HttpRequest timing that can be
* used for generating request logs. This can be used to integrate with logging
* libraries such as winston and bunyan.
*
* @param projectId Generated traceIds will be associated with this project.
* @param makeChildLogger A function that generates logger instances that will
* be installed onto `req` as `req.log`. The logger should include the trace in
* each log entry's metadata (associated with the LOGGING_TRACE_KEY property.
* @param emitRequestLog Optional. A function that will emit a parent request
* log. While some environments like GAE and GCF emit parent request logs
* automatically, other environments do not. When provided this function will be
* called with a populated `CloudLoggingHttpRequest` which can be emitted as
* request log.
*/
export function makeMiddleware<LoggerType>(
projectId: string,
makeChildLogger: (
trace: string,
span?: string,
traceSampled?: boolean
) => LoggerType,
emitRequestLog?: (
httpRequest: CloudLoggingHttpRequest,
trace: string,
span?: string,
traceSampled?: boolean
) => void
) | {
return (req: ServerRequest, res: http.ServerResponse, next: Function) => {
// TODO(ofrobots): use high-resolution timer.
const requestStartMs = Date.now();
// Detect & establish context if we were the first actor to detect lack of
// context so traceContext is always available when using middleware.
const traceContext = getOrInjectContext(req, projectId, true);
// Install a child logger on the request object, with detected trace and
// span.
(req as AnnotatedRequestType<LoggerType>).log = makeChildLogger(
traceContext.trace,
traceContext.spanId,
traceContext.traceSampled
);
// Emit a 'Request Log' on the parent logger, with detected trace and
// span.
if (emitRequestLog) {
onFinished(res, () => {
const latencyMs = Date.now() - requestStartMs;
const httpRequest = makeHttpRequestData(req, res, latencyMs);
emitRequestLog(
httpRequest,
traceContext.trace,
traceContext.spanId,
traceContext.traceSampled
);
});
}
next();
};
} | identifier_body |
|
pingdom.py | # The MIT License
#
# Copyright (c) 2010 Daniel R. Craig
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from urlparse import urljoin
from urllib import urlencode
import urllib2
import json
import time
API_URL = 'https://api.pingdom.com/api/2.0/'
class Pingdom(object):
def __init__(self, url=API_URL, username=None, password=None, appkey=None):
self.url = url
self.appkey= appkey
password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_manager.add_password(None, url, username, password)
auth_handler = urllib2.HTTPBasicAuthHandler(password_manager)
self.opener = urllib2.build_opener(auth_handler)
class RequestWithMethod(urllib2.Request):
def __init__(self, url, data=None, headers={},
origin_req_host=None, unverifiable=False, http_method=None):
|
def get_method(self):
if self.method:
return self.method
return urllib2.Request.get_method(self)
def method(self, url, method="GET", parameters=None):
if parameters:
data = urlencode(parameters)
else:
data = None
method_url = urljoin(self.url, url)
if method == "GET" and data:
method_url = method_url+'?'+data
req = self.RequestWithMethod(method_url, http_method=method, data=None)
else:
req = self.RequestWithMethod(method_url, http_method=method, data=data)
req.add_header('App-Key', self.appkey)
response = self.opener.open(req).read()
return json.loads(response)
def check_by_name(self, name):
resp = self.method('checks')
checks = [check for check in resp['checks'] if check['name'] == name]
return checks
def check_status(self, name):
checks = self.check_by_name(name)
for check in checks:
print '%s check %s' % (check['name'], check['status'])
def modify_check(self, name, parameters={}):
checks = self.check_by_name(name)
if not checks:
print "No checks for %s" % name
return
for check in checks:
id_ = check['id']
response = self.method('checks/%s/' % id_, method='PUT', parameters=parameters)
print response['message']
def pause_check(self, name):
self.modify_check(name, parameters={'paused': True})
self.check_status(name)
def unpause_check(self, name):
self.modify_check(name, parameters={'paused': False})
self.check_status(name)
def avg_response(self, check_id, minutes_back=None, country=None):
parameters = {}
if minutes_back:
from_time = "%.0f" % (time.time() - 60*minutes_back)
parameters['from'] = from_time
if country:
parameters['bycountry'] = 'true'
summary = self.method('summary.average/%s/' % check_id, parameters=parameters)['summary']
avgresponse = summary['responsetime']['avgresponse']
if country:
response_time = None
for c in avgresponse:
countryiso = c['countryiso']
countryresponse = c['avgresponse']
if countryiso == country:
response_time = countryresponse
else:
response_time = avgresponse
return response_time
| urllib2.Request.__init__(self, url, data, headers, origin_req_host, unverifiable)
if http_method:
self.method = http_method | identifier_body |
pingdom.py | # The MIT License
#
# Copyright (c) 2010 Daniel R. Craig
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from urlparse import urljoin
from urllib import urlencode
import urllib2
import json
import time
API_URL = 'https://api.pingdom.com/api/2.0/'
class Pingdom(object):
def __init__(self, url=API_URL, username=None, password=None, appkey=None):
self.url = url
self.appkey= appkey
password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_manager.add_password(None, url, username, password)
auth_handler = urllib2.HTTPBasicAuthHandler(password_manager)
self.opener = urllib2.build_opener(auth_handler)
class RequestWithMethod(urllib2.Request):
def __init__(self, url, data=None, headers={},
origin_req_host=None, unverifiable=False, http_method=None):
urllib2.Request.__init__(self, url, data, headers, origin_req_host, unverifiable)
if http_method:
self.method = http_method
def | (self):
if self.method:
return self.method
return urllib2.Request.get_method(self)
def method(self, url, method="GET", parameters=None):
if parameters:
data = urlencode(parameters)
else:
data = None
method_url = urljoin(self.url, url)
if method == "GET" and data:
method_url = method_url+'?'+data
req = self.RequestWithMethod(method_url, http_method=method, data=None)
else:
req = self.RequestWithMethod(method_url, http_method=method, data=data)
req.add_header('App-Key', self.appkey)
response = self.opener.open(req).read()
return json.loads(response)
def check_by_name(self, name):
resp = self.method('checks')
checks = [check for check in resp['checks'] if check['name'] == name]
return checks
def check_status(self, name):
checks = self.check_by_name(name)
for check in checks:
print '%s check %s' % (check['name'], check['status'])
def modify_check(self, name, parameters={}):
checks = self.check_by_name(name)
if not checks:
print "No checks for %s" % name
return
for check in checks:
id_ = check['id']
response = self.method('checks/%s/' % id_, method='PUT', parameters=parameters)
print response['message']
def pause_check(self, name):
self.modify_check(name, parameters={'paused': True})
self.check_status(name)
def unpause_check(self, name):
self.modify_check(name, parameters={'paused': False})
self.check_status(name)
def avg_response(self, check_id, minutes_back=None, country=None):
parameters = {}
if minutes_back:
from_time = "%.0f" % (time.time() - 60*minutes_back)
parameters['from'] = from_time
if country:
parameters['bycountry'] = 'true'
summary = self.method('summary.average/%s/' % check_id, parameters=parameters)['summary']
avgresponse = summary['responsetime']['avgresponse']
if country:
response_time = None
for c in avgresponse:
countryiso = c['countryiso']
countryresponse = c['avgresponse']
if countryiso == country:
response_time = countryresponse
else:
response_time = avgresponse
return response_time
| get_method | identifier_name |
pingdom.py | # The MIT License
#
# Copyright (c) 2010 Daniel R. Craig
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from urlparse import urljoin
from urllib import urlencode
import urllib2
import json
import time
API_URL = 'https://api.pingdom.com/api/2.0/'
| class Pingdom(object):
def __init__(self, url=API_URL, username=None, password=None, appkey=None):
self.url = url
self.appkey= appkey
password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_manager.add_password(None, url, username, password)
auth_handler = urllib2.HTTPBasicAuthHandler(password_manager)
self.opener = urllib2.build_opener(auth_handler)
class RequestWithMethod(urllib2.Request):
def __init__(self, url, data=None, headers={},
origin_req_host=None, unverifiable=False, http_method=None):
urllib2.Request.__init__(self, url, data, headers, origin_req_host, unverifiable)
if http_method:
self.method = http_method
def get_method(self):
if self.method:
return self.method
return urllib2.Request.get_method(self)
def method(self, url, method="GET", parameters=None):
if parameters:
data = urlencode(parameters)
else:
data = None
method_url = urljoin(self.url, url)
if method == "GET" and data:
method_url = method_url+'?'+data
req = self.RequestWithMethod(method_url, http_method=method, data=None)
else:
req = self.RequestWithMethod(method_url, http_method=method, data=data)
req.add_header('App-Key', self.appkey)
response = self.opener.open(req).read()
return json.loads(response)
def check_by_name(self, name):
resp = self.method('checks')
checks = [check for check in resp['checks'] if check['name'] == name]
return checks
def check_status(self, name):
checks = self.check_by_name(name)
for check in checks:
print '%s check %s' % (check['name'], check['status'])
def modify_check(self, name, parameters={}):
checks = self.check_by_name(name)
if not checks:
print "No checks for %s" % name
return
for check in checks:
id_ = check['id']
response = self.method('checks/%s/' % id_, method='PUT', parameters=parameters)
print response['message']
def pause_check(self, name):
self.modify_check(name, parameters={'paused': True})
self.check_status(name)
def unpause_check(self, name):
self.modify_check(name, parameters={'paused': False})
self.check_status(name)
def avg_response(self, check_id, minutes_back=None, country=None):
parameters = {}
if minutes_back:
from_time = "%.0f" % (time.time() - 60*minutes_back)
parameters['from'] = from_time
if country:
parameters['bycountry'] = 'true'
summary = self.method('summary.average/%s/' % check_id, parameters=parameters)['summary']
avgresponse = summary['responsetime']['avgresponse']
if country:
response_time = None
for c in avgresponse:
countryiso = c['countryiso']
countryresponse = c['avgresponse']
if countryiso == country:
response_time = countryresponse
else:
response_time = avgresponse
return response_time | random_line_split |
|
pingdom.py | # The MIT License
#
# Copyright (c) 2010 Daniel R. Craig
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from urlparse import urljoin
from urllib import urlencode
import urllib2
import json
import time
API_URL = 'https://api.pingdom.com/api/2.0/'
class Pingdom(object):
def __init__(self, url=API_URL, username=None, password=None, appkey=None):
self.url = url
self.appkey= appkey
password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_manager.add_password(None, url, username, password)
auth_handler = urllib2.HTTPBasicAuthHandler(password_manager)
self.opener = urllib2.build_opener(auth_handler)
class RequestWithMethod(urllib2.Request):
def __init__(self, url, data=None, headers={},
origin_req_host=None, unverifiable=False, http_method=None):
urllib2.Request.__init__(self, url, data, headers, origin_req_host, unverifiable)
if http_method:
self.method = http_method
def get_method(self):
if self.method:
return self.method
return urllib2.Request.get_method(self)
def method(self, url, method="GET", parameters=None):
if parameters:
data = urlencode(parameters)
else:
data = None
method_url = urljoin(self.url, url)
if method == "GET" and data:
|
else:
req = self.RequestWithMethod(method_url, http_method=method, data=data)
req.add_header('App-Key', self.appkey)
response = self.opener.open(req).read()
return json.loads(response)
def check_by_name(self, name):
resp = self.method('checks')
checks = [check for check in resp['checks'] if check['name'] == name]
return checks
def check_status(self, name):
checks = self.check_by_name(name)
for check in checks:
print '%s check %s' % (check['name'], check['status'])
def modify_check(self, name, parameters={}):
checks = self.check_by_name(name)
if not checks:
print "No checks for %s" % name
return
for check in checks:
id_ = check['id']
response = self.method('checks/%s/' % id_, method='PUT', parameters=parameters)
print response['message']
def pause_check(self, name):
self.modify_check(name, parameters={'paused': True})
self.check_status(name)
def unpause_check(self, name):
self.modify_check(name, parameters={'paused': False})
self.check_status(name)
def avg_response(self, check_id, minutes_back=None, country=None):
parameters = {}
if minutes_back:
from_time = "%.0f" % (time.time() - 60*minutes_back)
parameters['from'] = from_time
if country:
parameters['bycountry'] = 'true'
summary = self.method('summary.average/%s/' % check_id, parameters=parameters)['summary']
avgresponse = summary['responsetime']['avgresponse']
if country:
response_time = None
for c in avgresponse:
countryiso = c['countryiso']
countryresponse = c['avgresponse']
if countryiso == country:
response_time = countryresponse
else:
response_time = avgresponse
return response_time
| method_url = method_url+'?'+data
req = self.RequestWithMethod(method_url, http_method=method, data=None) | conditional_block |
scan_for_lcmtypes.py | #!/usr/bin/python
import re
import os
import sys
import pyclbr
def find_lcmtypes():
alpha_chars = set("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
valid_chars = set("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_")
lcmtypes = []
regex = re.compile("_get_packed_fingerprint")
dirs_to_check = sys.path
for dir_name in dirs_to_check:
for root, dirs, files in os.walk(dir_name):
subdirs = root[len(dir_name):].split(os.sep)
subdirs = [ s for s in subdirs if s ]
python_package = ".".join(subdirs)
for fname in files:
|
# only recurse into subdirectories that correspond to python
# packages (i.e., they contain a file named "__init__.py")
subdirs_to_traverse = [ subdir_name for subdir_name in dirs \
if os.path.exists(os.path.join(root, subdir_name, "__init__.py")) ]
del dirs[:]
dirs.extend(subdirs_to_traverse)
return lcmtypes
def make_lcmtype_dictionary():
"""Create a dictionary of LCM types keyed by fingerprint.
Searches the specified python package directories for modules
corresponding to LCM types, imports all the discovered types into the
global namespace, and returns a dictionary mapping packed fingerprints
to LCM type classes.
The primary use for this dictionary is to automatically identify and
decode an LCM message.
"""
lcmtypes = find_lcmtypes()
result = {}
for lcmtype_name in lcmtypes:
try:
__import__(lcmtype_name)
mod = sys.modules[lcmtype_name]
type_basename = lcmtype_name.split(".")[-1]
klass = getattr(mod, type_basename)
fingerprint = klass._get_packed_fingerprint()
result[fingerprint] = klass
#print "importing %s" % lcmtype_name
except:
print "Error importing %s" % lcmtype_name
return result
if __name__ == "__main__":
import binascii
print("Searching for LCM types...")
lcmtypes = make_lcmtype_dictionary()
num_types = len(lcmtypes)
print("Found %d type%s" % (num_types, num_types==1 and "" or "s"))
for fingerprint, klass in lcmtypes.items():
print binascii.hexlify(fingerprint), klass.__module__
| if not fname.endswith(".py"):
continue
mod_basename = fname[:-3]
valid_modname = True
for c in mod_basename:
if c not in valid_chars:
valid_modname = False
break
if mod_basename[0] not in alpha_chars:
valid_modname = False
if not valid_modname:
continue
# quick regex test -- check if the file contains the
# word "_get_packed_fingerprint"
full_fname = os.path.join(root, fname)
try:
contents = open(full_fname, "r").read()
except IOError:
continue
if not regex.search(contents):
continue
# More thorough check to see if the file corresponds to a
# LCM type module genereated by lcm-gen. Parse the
# file using pyclbr, and check if it contains a class
# with the right name and methods
if python_package:
modname = "%s.%s" % (python_package, mod_basename)
else:
modname = mod_basename
try:
klass = pyclbr.readmodule(modname)[mod_basename]
if "decode" in klass.methods and \
"_get_packed_fingerprint" in klass.methods:
lcmtypes.append(modname)
except ImportError:
continue
except KeyError:
continue | conditional_block |
scan_for_lcmtypes.py | #!/usr/bin/python
import re
import os
import sys
import pyclbr
def | ():
alpha_chars = set("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
valid_chars = set("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_")
lcmtypes = []
regex = re.compile("_get_packed_fingerprint")
dirs_to_check = sys.path
for dir_name in dirs_to_check:
for root, dirs, files in os.walk(dir_name):
subdirs = root[len(dir_name):].split(os.sep)
subdirs = [ s for s in subdirs if s ]
python_package = ".".join(subdirs)
for fname in files:
if not fname.endswith(".py"):
continue
mod_basename = fname[:-3]
valid_modname = True
for c in mod_basename:
if c not in valid_chars:
valid_modname = False
break
if mod_basename[0] not in alpha_chars:
valid_modname = False
if not valid_modname:
continue
# quick regex test -- check if the file contains the
# word "_get_packed_fingerprint"
full_fname = os.path.join(root, fname)
try:
contents = open(full_fname, "r").read()
except IOError:
continue
if not regex.search(contents):
continue
# More thorough check to see if the file corresponds to a
# LCM type module genereated by lcm-gen. Parse the
# file using pyclbr, and check if it contains a class
# with the right name and methods
if python_package:
modname = "%s.%s" % (python_package, mod_basename)
else:
modname = mod_basename
try:
klass = pyclbr.readmodule(modname)[mod_basename]
if "decode" in klass.methods and \
"_get_packed_fingerprint" in klass.methods:
lcmtypes.append(modname)
except ImportError:
continue
except KeyError:
continue
# only recurse into subdirectories that correspond to python
# packages (i.e., they contain a file named "__init__.py")
subdirs_to_traverse = [ subdir_name for subdir_name in dirs \
if os.path.exists(os.path.join(root, subdir_name, "__init__.py")) ]
del dirs[:]
dirs.extend(subdirs_to_traverse)
return lcmtypes
def make_lcmtype_dictionary():
"""Create a dictionary of LCM types keyed by fingerprint.
Searches the specified python package directories for modules
corresponding to LCM types, imports all the discovered types into the
global namespace, and returns a dictionary mapping packed fingerprints
to LCM type classes.
The primary use for this dictionary is to automatically identify and
decode an LCM message.
"""
lcmtypes = find_lcmtypes()
result = {}
for lcmtype_name in lcmtypes:
try:
__import__(lcmtype_name)
mod = sys.modules[lcmtype_name]
type_basename = lcmtype_name.split(".")[-1]
klass = getattr(mod, type_basename)
fingerprint = klass._get_packed_fingerprint()
result[fingerprint] = klass
#print "importing %s" % lcmtype_name
except:
print "Error importing %s" % lcmtype_name
return result
if __name__ == "__main__":
import binascii
print("Searching for LCM types...")
lcmtypes = make_lcmtype_dictionary()
num_types = len(lcmtypes)
print("Found %d type%s" % (num_types, num_types==1 and "" or "s"))
for fingerprint, klass in lcmtypes.items():
print binascii.hexlify(fingerprint), klass.__module__
| find_lcmtypes | identifier_name |
scan_for_lcmtypes.py | #!/usr/bin/python
import re
import os
import sys
import pyclbr
def find_lcmtypes():
|
def make_lcmtype_dictionary():
"""Create a dictionary of LCM types keyed by fingerprint.
Searches the specified python package directories for modules
corresponding to LCM types, imports all the discovered types into the
global namespace, and returns a dictionary mapping packed fingerprints
to LCM type classes.
The primary use for this dictionary is to automatically identify and
decode an LCM message.
"""
lcmtypes = find_lcmtypes()
result = {}
for lcmtype_name in lcmtypes:
try:
__import__(lcmtype_name)
mod = sys.modules[lcmtype_name]
type_basename = lcmtype_name.split(".")[-1]
klass = getattr(mod, type_basename)
fingerprint = klass._get_packed_fingerprint()
result[fingerprint] = klass
#print "importing %s" % lcmtype_name
except:
print "Error importing %s" % lcmtype_name
return result
if __name__ == "__main__":
import binascii
print("Searching for LCM types...")
lcmtypes = make_lcmtype_dictionary()
num_types = len(lcmtypes)
print("Found %d type%s" % (num_types, num_types==1 and "" or "s"))
for fingerprint, klass in lcmtypes.items():
print binascii.hexlify(fingerprint), klass.__module__
| alpha_chars = set("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
valid_chars = set("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_")
lcmtypes = []
regex = re.compile("_get_packed_fingerprint")
dirs_to_check = sys.path
for dir_name in dirs_to_check:
for root, dirs, files in os.walk(dir_name):
subdirs = root[len(dir_name):].split(os.sep)
subdirs = [ s for s in subdirs if s ]
python_package = ".".join(subdirs)
for fname in files:
if not fname.endswith(".py"):
continue
mod_basename = fname[:-3]
valid_modname = True
for c in mod_basename:
if c not in valid_chars:
valid_modname = False
break
if mod_basename[0] not in alpha_chars:
valid_modname = False
if not valid_modname:
continue
# quick regex test -- check if the file contains the
# word "_get_packed_fingerprint"
full_fname = os.path.join(root, fname)
try:
contents = open(full_fname, "r").read()
except IOError:
continue
if not regex.search(contents):
continue
# More thorough check to see if the file corresponds to a
# LCM type module genereated by lcm-gen. Parse the
# file using pyclbr, and check if it contains a class
# with the right name and methods
if python_package:
modname = "%s.%s" % (python_package, mod_basename)
else:
modname = mod_basename
try:
klass = pyclbr.readmodule(modname)[mod_basename]
if "decode" in klass.methods and \
"_get_packed_fingerprint" in klass.methods:
lcmtypes.append(modname)
except ImportError:
continue
except KeyError:
continue
# only recurse into subdirectories that correspond to python
# packages (i.e., they contain a file named "__init__.py")
subdirs_to_traverse = [ subdir_name for subdir_name in dirs \
if os.path.exists(os.path.join(root, subdir_name, "__init__.py")) ]
del dirs[:]
dirs.extend(subdirs_to_traverse)
return lcmtypes | identifier_body |
scan_for_lcmtypes.py | #!/usr/bin/python
import re
import os
import sys
import pyclbr
def find_lcmtypes():
alpha_chars = set("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
valid_chars = set("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_")
lcmtypes = []
regex = re.compile("_get_packed_fingerprint")
dirs_to_check = sys.path
for dir_name in dirs_to_check:
for root, dirs, files in os.walk(dir_name):
subdirs = root[len(dir_name):].split(os.sep)
subdirs = [ s for s in subdirs if s ]
python_package = ".".join(subdirs)
for fname in files:
if not fname.endswith(".py"):
continue
mod_basename = fname[:-3]
valid_modname = True
for c in mod_basename:
if c not in valid_chars:
valid_modname = False
break
if mod_basename[0] not in alpha_chars:
valid_modname = False
if not valid_modname:
continue
# quick regex test -- check if the file contains the
# word "_get_packed_fingerprint"
full_fname = os.path.join(root, fname)
try:
contents = open(full_fname, "r").read()
except IOError:
continue | # LCM type module genereated by lcm-gen. Parse the
# file using pyclbr, and check if it contains a class
# with the right name and methods
if python_package:
modname = "%s.%s" % (python_package, mod_basename)
else:
modname = mod_basename
try:
klass = pyclbr.readmodule(modname)[mod_basename]
if "decode" in klass.methods and \
"_get_packed_fingerprint" in klass.methods:
lcmtypes.append(modname)
except ImportError:
continue
except KeyError:
continue
# only recurse into subdirectories that correspond to python
# packages (i.e., they contain a file named "__init__.py")
subdirs_to_traverse = [ subdir_name for subdir_name in dirs \
if os.path.exists(os.path.join(root, subdir_name, "__init__.py")) ]
del dirs[:]
dirs.extend(subdirs_to_traverse)
return lcmtypes
def make_lcmtype_dictionary():
"""Create a dictionary of LCM types keyed by fingerprint.
Searches the specified python package directories for modules
corresponding to LCM types, imports all the discovered types into the
global namespace, and returns a dictionary mapping packed fingerprints
to LCM type classes.
The primary use for this dictionary is to automatically identify and
decode an LCM message.
"""
lcmtypes = find_lcmtypes()
result = {}
for lcmtype_name in lcmtypes:
try:
__import__(lcmtype_name)
mod = sys.modules[lcmtype_name]
type_basename = lcmtype_name.split(".")[-1]
klass = getattr(mod, type_basename)
fingerprint = klass._get_packed_fingerprint()
result[fingerprint] = klass
#print "importing %s" % lcmtype_name
except:
print "Error importing %s" % lcmtype_name
return result
if __name__ == "__main__":
import binascii
print("Searching for LCM types...")
lcmtypes = make_lcmtype_dictionary()
num_types = len(lcmtypes)
print("Found %d type%s" % (num_types, num_types==1 and "" or "s"))
for fingerprint, klass in lcmtypes.items():
print binascii.hexlify(fingerprint), klass.__module__ | if not regex.search(contents):
continue
# More thorough check to see if the file corresponds to a | random_line_split |
msi.py | """SCons.Tool.packaging.msi
The msi packager.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/packaging/msi.py 2014/07/05 09:42:21 garyo"
import os
import SCons
from SCons.Action import Action
from SCons.Builder import Builder
from xml.dom.minidom import *
from xml.sax.saxutils import escape
from SCons.Tool.packaging import stripinstallbuilder
#
# Utility functions
#
def convert_to_id(s, id_set):
""" Some parts of .wxs need an Id attribute (for example: The File and
Directory directives. The charset is limited to A-Z, a-z, digits,
underscores, periods. Each Id must begin with a letter or with a
underscore. Google for "CNDL0015" for information about this.
Requirements:
* the string created must only contain chars from the target charset.
* the string created must have a minimal editing distance from the
original string.
* the string created must be unique for the whole .wxs file.
Observation:
* There are 62 chars in the charset.
Idea:
* filter out forbidden characters. Check for a collision with the help
of the id_set. Add the number of the number of the collision at the
end of the created string. Furthermore care for a correct start of
the string.
"""
charset = 'ABCDEFGHIJKLMNOPQRSTUVWXYabcdefghijklmnopqrstuvwxyz0123456789_.'
if s[0] in '0123456789.':
s += '_'+s
id = [c for c in s if c in charset]
# did we already generate an id for this file?
try:
return id_set[id][s]
except KeyError:
# no we did not so initialize with the id
if id not in id_set: id_set[id] = { s : id }
# there is a collision, generate an id which is unique by appending
# the collision number
else: id_set[id][s] = id + str(len(id_set[id]))
return id_set[id][s]
def is_dos_short_file_name(file):
""" examine if the given file is in the 8.3 form.
"""
fname, ext = os.path.splitext(file)
proper_ext = len(ext) == 0 or (2 <= len(ext) <= 4) # the ext contains the dot
proper_fname = file.isupper() and len(fname) <= 8
return proper_ext and proper_fname
def gen_dos_short_file_name(file, filename_set):
""" see http://support.microsoft.com/default.aspx?scid=kb;en-us;Q142982
These are no complete 8.3 dos short names. The ~ char is missing and
replaced with one character from the filename. WiX warns about such
filenames, since a collision might occur. Google for "CNDL1014" for
more information.
"""
# guard this to not confuse the generation
if is_dos_short_file_name(file):
return file
fname, ext = os.path.splitext(file) # ext contains the dot
# first try if it suffices to convert to upper
file = file.upper()
if is_dos_short_file_name(file):
return file
# strip forbidden characters.
forbidden = '."/[]:;=, '
fname = [c for c in fname if c not in forbidden]
# check if we already generated a filename with the same number:
# thisis1.txt, thisis2.txt etc.
duplicate, num = not None, 1
while duplicate:
shortname = "%s%s" % (fname[:8-len(str(num))].upper(),\
str(num))
if len(ext) >= 2:
shortname = "%s%s" % (shortname, ext[:4].upper())
duplicate, num = shortname in filename_set, num+1
assert( is_dos_short_file_name(shortname) ), 'shortname is %s, longname is %s' % (shortname, file)
filename_set.append(shortname)
return shortname
def create_feature_dict(files):
""" X_MSI_FEATURE and doc FileTag's can be used to collect files in a
hierarchy. This function collects the files into this hierarchy.
"""
dict = {}
def add_to_dict( feature, file ):
if not SCons.Util.is_List( feature ):
feature = [ feature ]
for f in feature:
if f not in dict:
dict[ f ] = [ file ]
else:
dict[ f ].append( file )
for file in files:
if hasattr( file, 'PACKAGING_X_MSI_FEATURE' ):
add_to_dict(file.PACKAGING_X_MSI_FEATURE, file)
elif hasattr( file, 'PACKAGING_DOC' ):
add_to_dict( 'PACKAGING_DOC', file )
else:
add_to_dict( 'default', file )
return dict
def generate_guids(root):
""" generates globally unique identifiers for parts of the xml which need
them.
Component tags have a special requirement. Their UUID is only allowed to
change if the list of their contained resources has changed. This allows
for clean removal and proper updates.
To handle this requirement, the uuid is generated with an md5 hashing the
whole subtree of a xml node.
"""
from hashlib import md5
# specify which tags need a guid and in which attribute this should be stored.
needs_id = { 'Product' : 'Id',
'Package' : 'Id',
'Component' : 'Guid',
}
# find all XMl nodes matching the key, retrieve their attribute, hash their
# subtree, convert hash to string and add as a attribute to the xml node.
for (key,value) in needs_id.items():
node_list = root.getElementsByTagName(key)
attribute = value
for node in node_list:
hash = md5(node.toxml()).hexdigest()
hash_str = '%s-%s-%s-%s-%s' % ( hash[:8], hash[8:12], hash[12:16], hash[16:20], hash[20:] )
node.attributes[attribute] = hash_str
def string_wxsfile(target, source, env):
return "building WiX file %s"%( target[0].path )
def build_wxsfile(target, source, env):
""" compiles a .wxs file from the keywords given in env['msi_spec'] and
by analyzing the tree of source nodes and their tags.
"""
file = open(target[0].abspath, 'w')
try:
# Create a document with the Wix root tag
doc = Document()
root = doc.createElement( 'Wix' )
root.attributes['xmlns']='http://schemas.microsoft.com/wix/2003/01/wi'
doc.appendChild( root )
filename_set = [] # this is to circumvent duplicates in the shortnames
id_set = {} # this is to circumvent duplicates in the ids
# Create the content
build_wxsfile_header_section(root, env)
build_wxsfile_file_section(root, source, env['NAME'], env['VERSION'], env['VENDOR'], filename_set, id_set)
generate_guids(root)
build_wxsfile_features_section(root, source, env['NAME'], env['VERSION'], env['SUMMARY'], id_set)
build_wxsfile_default_gui(root)
build_license_file(target[0].get_dir(), env)
| file.write( doc.toprettyxml() )
# call a user specified function
if 'CHANGE_SPECFILE' in env:
env['CHANGE_SPECFILE'](target, source)
except KeyError, e:
raise SCons.Errors.UserError( '"%s" package field for MSI is missing.' % e.args[0] )
#
# setup function
#
def create_default_directory_layout(root, NAME, VERSION, VENDOR, filename_set):
""" Create the wix default target directory layout and return the innermost
directory.
We assume that the XML tree delivered in the root argument already contains
the Product tag.
Everything is put under the PFiles directory property defined by WiX.
After that a directory with the 'VENDOR' tag is placed and then a
directory with the name of the project and its VERSION. This leads to the
following TARGET Directory Layout:
C:\<PFiles>\<Vendor>\<Projectname-Version>\
Example: C:\Programme\Company\Product-1.2\
"""
doc = Document()
d1 = doc.createElement( 'Directory' )
d1.attributes['Id'] = 'TARGETDIR'
d1.attributes['Name'] = 'SourceDir'
d2 = doc.createElement( 'Directory' )
d2.attributes['Id'] = 'ProgramFilesFolder'
d2.attributes['Name'] = 'PFiles'
d3 = doc.createElement( 'Directory' )
d3.attributes['Id'] = 'VENDOR_folder'
d3.attributes['Name'] = escape( gen_dos_short_file_name( VENDOR, filename_set ) )
d3.attributes['LongName'] = escape( VENDOR )
d4 = doc.createElement( 'Directory' )
project_folder = "%s-%s" % ( NAME, VERSION )
d4.attributes['Id'] = 'MY_DEFAULT_FOLDER'
d4.attributes['Name'] = escape( gen_dos_short_file_name( project_folder, filename_set ) )
d4.attributes['LongName'] = escape( project_folder )
d1.childNodes.append( d2 )
d2.childNodes.append( d3 )
d3.childNodes.append( d4 )
root.getElementsByTagName('Product')[0].childNodes.append( d1 )
return d4
#
# mandatory and optional file tags
#
def build_wxsfile_file_section(root, files, NAME, VERSION, VENDOR, filename_set, id_set):
""" builds the Component sections of the wxs file with their included files.
Files need to be specified in 8.3 format and in the long name format, long
filenames will be converted automatically.
Features are specficied with the 'X_MSI_FEATURE' or 'DOC' FileTag.
"""
root = create_default_directory_layout( root, NAME, VERSION, VENDOR, filename_set )
components = create_feature_dict( files )
factory = Document()
def get_directory( node, dir ):
""" returns the node under the given node representing the directory.
Returns the component node if dir is None or empty.
"""
if dir == '' or not dir:
return node
Directory = node
dir_parts = dir.split(os.path.sep)
# to make sure that our directory ids are unique, the parent folders are
# consecutively added to upper_dir
upper_dir = ''
# walk down the xml tree finding parts of the directory
dir_parts = [d for d in dir_parts if d != '']
for d in dir_parts[:]:
already_created = [c for c in Directory.childNodes
if c.nodeName == 'Directory'
and c.attributes['LongName'].value == escape(d)]
if already_created != []:
Directory = already_created[0]
dir_parts.remove(d)
upper_dir += d
else:
break
for d in dir_parts:
nDirectory = factory.createElement( 'Directory' )
nDirectory.attributes['LongName'] = escape( d )
nDirectory.attributes['Name'] = escape( gen_dos_short_file_name( d, filename_set ) )
upper_dir += d
nDirectory.attributes['Id'] = convert_to_id( upper_dir, id_set )
Directory.childNodes.append( nDirectory )
Directory = nDirectory
return Directory
for file in files:
drive, path = os.path.splitdrive( file.PACKAGING_INSTALL_LOCATION )
filename = os.path.basename( path )
dirname = os.path.dirname( path )
h = {
# tagname : default value
'PACKAGING_X_MSI_VITAL' : 'yes',
'PACKAGING_X_MSI_FILEID' : convert_to_id(filename, id_set),
'PACKAGING_X_MSI_LONGNAME' : filename,
'PACKAGING_X_MSI_SHORTNAME' : gen_dos_short_file_name(filename, filename_set),
'PACKAGING_X_MSI_SOURCE' : file.get_path(),
}
# fill in the default tags given above.
for k,v in [ (k, v) for (k,v) in h.items() if not hasattr(file, k) ]:
setattr( file, k, v )
File = factory.createElement( 'File' )
File.attributes['LongName'] = escape( file.PACKAGING_X_MSI_LONGNAME )
File.attributes['Name'] = escape( file.PACKAGING_X_MSI_SHORTNAME )
File.attributes['Source'] = escape( file.PACKAGING_X_MSI_SOURCE )
File.attributes['Id'] = escape( file.PACKAGING_X_MSI_FILEID )
File.attributes['Vital'] = escape( file.PACKAGING_X_MSI_VITAL )
# create the <Component> Tag under which this file should appear
Component = factory.createElement('Component')
Component.attributes['DiskId'] = '1'
Component.attributes['Id'] = convert_to_id( filename, id_set )
# hang the component node under the root node and the file node
# under the component node.
Directory = get_directory( root, dirname )
Directory.childNodes.append( Component )
Component.childNodes.append( File )
#
# additional functions
#
def build_wxsfile_features_section(root, files, NAME, VERSION, SUMMARY, id_set):
""" This function creates the <features> tag based on the supplied xml tree.
This is achieved by finding all <component>s and adding them to a default target.
It should be called after the tree has been built completly. We assume
that a MY_DEFAULT_FOLDER Property is defined in the wxs file tree.
Furthermore a top-level with the name and VERSION of the software will be created.
An PACKAGING_X_MSI_FEATURE can either be a string, where the feature
DESCRIPTION will be the same as its title or a Tuple, where the first
part will be its title and the second its DESCRIPTION.
"""
factory = Document()
Feature = factory.createElement('Feature')
Feature.attributes['Id'] = 'complete'
Feature.attributes['ConfigurableDirectory'] = 'MY_DEFAULT_FOLDER'
Feature.attributes['Level'] = '1'
Feature.attributes['Title'] = escape( '%s %s' % (NAME, VERSION) )
Feature.attributes['Description'] = escape( SUMMARY )
Feature.attributes['Display'] = 'expand'
for (feature, files) in create_feature_dict(files).items():
SubFeature = factory.createElement('Feature')
SubFeature.attributes['Level'] = '1'
if SCons.Util.is_Tuple(feature):
SubFeature.attributes['Id'] = convert_to_id( feature[0], id_set )
SubFeature.attributes['Title'] = escape(feature[0])
SubFeature.attributes['Description'] = escape(feature[1])
else:
SubFeature.attributes['Id'] = convert_to_id( feature, id_set )
if feature=='default':
SubFeature.attributes['Description'] = 'Main Part'
SubFeature.attributes['Title'] = 'Main Part'
elif feature=='PACKAGING_DOC':
SubFeature.attributes['Description'] = 'Documentation'
SubFeature.attributes['Title'] = 'Documentation'
else:
SubFeature.attributes['Description'] = escape(feature)
SubFeature.attributes['Title'] = escape(feature)
# build the componentrefs. As one of the design decision is that every
# file is also a component we walk the list of files and create a
# reference.
for f in files:
ComponentRef = factory.createElement('ComponentRef')
ComponentRef.attributes['Id'] = convert_to_id( os.path.basename(f.get_path()), id_set )
SubFeature.childNodes.append(ComponentRef)
Feature.childNodes.append(SubFeature)
root.getElementsByTagName('Product')[0].childNodes.append(Feature)
def build_wxsfile_default_gui(root):
""" this function adds a default GUI to the wxs file
"""
factory = Document()
Product = root.getElementsByTagName('Product')[0]
UIRef = factory.createElement('UIRef')
UIRef.attributes['Id'] = 'WixUI_Mondo'
Product.childNodes.append(UIRef)
UIRef = factory.createElement('UIRef')
UIRef.attributes['Id'] = 'WixUI_ErrorProgressText'
Product.childNodes.append(UIRef)
def build_license_file(directory, spec):
""" creates a License.rtf file with the content of "X_MSI_LICENSE_TEXT"
in the given directory
"""
name, text = '', ''
try:
name = spec['LICENSE']
text = spec['X_MSI_LICENSE_TEXT']
except KeyError:
pass # ignore this as X_MSI_LICENSE_TEXT is optional
if name!='' or text!='':
file = open( os.path.join(directory.get_path(), 'License.rtf'), 'w' )
file.write('{\\rtf')
if text!='':
file.write(text.replace('\n', '\\par '))
else:
file.write(name+'\\par\\par')
file.write('}')
file.close()
#
# mandatory and optional package tags
#
def build_wxsfile_header_section(root, spec):
""" Adds the xml file node which define the package meta-data.
"""
# Create the needed DOM nodes and add them at the correct position in the tree.
factory = Document()
Product = factory.createElement( 'Product' )
Package = factory.createElement( 'Package' )
root.childNodes.append( Product )
Product.childNodes.append( Package )
# set "mandatory" default values
if 'X_MSI_LANGUAGE' not in spec:
spec['X_MSI_LANGUAGE'] = '1033' # select english
# mandatory sections, will throw a KeyError if the tag is not available
Product.attributes['Name'] = escape( spec['NAME'] )
Product.attributes['Version'] = escape( spec['VERSION'] )
Product.attributes['Manufacturer'] = escape( spec['VENDOR'] )
Product.attributes['Language'] = escape( spec['X_MSI_LANGUAGE'] )
Package.attributes['Description'] = escape( spec['SUMMARY'] )
# now the optional tags, for which we avoid the KeyErrror exception
if 'DESCRIPTION' in spec:
Package.attributes['Comments'] = escape( spec['DESCRIPTION'] )
if 'X_MSI_UPGRADE_CODE' in spec:
Package.attributes['X_MSI_UPGRADE_CODE'] = escape( spec['X_MSI_UPGRADE_CODE'] )
# We hardcode the media tag as our current model cannot handle it.
Media = factory.createElement('Media')
Media.attributes['Id'] = '1'
Media.attributes['Cabinet'] = 'default.cab'
Media.attributes['EmbedCab'] = 'yes'
root.getElementsByTagName('Product')[0].childNodes.append(Media)
# this builder is the entry-point for .wxs file compiler.
wxs_builder = Builder(
action = Action( build_wxsfile, string_wxsfile ),
ensure_suffix = '.wxs' )
def package(env, target, source, PACKAGEROOT, NAME, VERSION,
DESCRIPTION, SUMMARY, VENDOR, X_MSI_LANGUAGE, **kw):
# make sure that the Wix Builder is in the environment
SCons.Tool.Tool('wix').generate(env)
# get put the keywords for the specfile compiler. These are the arguments
# given to the package function and all optional ones stored in kw, minus
# the the source, target and env one.
loc = locals()
del loc['kw']
kw.update(loc)
del kw['source'], kw['target'], kw['env']
# strip the install builder from the source files
target, source = stripinstallbuilder(target, source, env)
# put the arguments into the env and call the specfile builder.
env['msi_spec'] = kw
specfile = wxs_builder(* [env, target, source], **kw)
# now call the WiX Tool with the built specfile added as a source.
msifile = env.WiX(target, specfile)
# return the target and source tuple.
return (msifile, source+[specfile])
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | # write the xml to a file | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.