file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
contacts-details.component.ts | import { Component, OnInit, Input, forwardRef, ViewChild, OnDestroy, Inject, ChangeDetectionStrategy, ChangeDetectorRef, OnChanges, SimpleChanges } from '@angular/core';
import { FormControl, FormGroup, Validators, FormBuilder, NG_VALUE_ACCESSOR, ControlValueAccessor } from '@angular/forms';
import { HttpClient, HttpHeaders, HttpParams } from '@angular/common/http';
import { TimeSheetService, GlobalService, view, ClientService, StaffService, ListService, UploadService, contactGroups, days, gender, types, titles, caldStatuses, roles, ShareService } from '@services/index';
import * as _ from 'lodash';
import { mergeMap, takeUntil, concatMap, switchMap } from 'rxjs/operators';
import { EMPTY,Subject } from 'rxjs';
import { TitleCasePipe } from '@angular/common';
import { ProfileInterface} from '@modules/modules';
const noop = () => {
};
@Component({
selector: 'app-contacts-details',
templateUrl: './contacts-details.component.html',
styleUrls: ['./contacts-details.component.css'],
providers: [
{
provide: NG_VALUE_ACCESSOR,
multi: true,
useExisting: forwardRef(() => ContactsDetailsComponent),
}
],
changeDetection: ChangeDetectionStrategy.OnPush
})
export class ContactsDetailsComponent implements OnInit, OnDestroy, OnChanges,ControlValueAccessor {
private unsubscribe: Subject<void> = new Subject();
selectedCompany;
doctor: any;
@Input() user: any;
private onTouchedCallback: () => void = noop;
private onChangeCallback: (_: any) => void = noop;
innerValue: ProfileInterface;
kinsArray: Array<any> = [];
kindetailsGroup: FormGroup;
inputForm: FormGroup;
contactGroups: Array<string> = contactGroups;
contactTypes : Array<string>;
modalOpen: boolean = false;
postLoading: boolean = false;
selected: any;
current: number = 0;
loading: boolean;
tocken: any;
doctors: Array<any> = [];
constructor(
private globalS: GlobalService,
private clientS: ClientService,
private staffS: StaffService,
private timeS: TimeSheetService,
private sharedS: ShareService,
private listS: ListService,
private formBuilder: FormBuilder,
private cd: ChangeDetectorRef,
private http: HttpClient,
private titleCase: TitleCasePipe
) { }
ngOnInit(): void {
this.user = this.sharedS.getPicked();
this.buildForm();
}
ngOnChanges(changes: SimpleChanges) {
for (let property in changes) {
console.log('run contacts')
this.searchKin(this.user);
}
}
doctorChangeEvent(data: any){
var doc = this.doctors.filter(x => x.name == data).shift();
if(!doc) |
this.inputForm.patchValue({
address1: doc.address1,
address2: doc.address2,
phone1: doc.phone1,
phone2:doc.phone2,
email: doc.email,
mobile: doc.mobile,
fax: doc.fax,
name: doc.name
})
}
populate(){
this.listS.getdoctorinformation().subscribe(data => {
console.log(data);
this.doctors = data;
})
}
buildForm(): void {
this.kindetailsGroup = this.formBuilder.group({
listOrder: [''],
type: [''],
name: [''],
email: [''],
address1: [''],
address2: [''],
suburbcode: [''],
suburb: [''],
postcode: [''],
phone1: [''],
phone2: [''],
mobile: [''],
fax: [''],
notes: [''],
oni1: false,
oni2: false,
ecode: [''],
creator: [''],
recordNumber: null,
subType: ''
});
this.inputForm = this.formBuilder.group({
group: [''],
listOrder: [''],
type: [''],
name: [''],
email: [''],
address1: [''],
address2: [''],
suburbcode: [null],
suburb: [''],
state: [],
postcode: [''],
phone1: [''],
phone2: [''],
mobile: [''],
fax: [''],
notes: [''],
oni1: false,
oni2: false,
ecode: [''],
creator: [''],
recordNumber: null
})
this.inputForm.get('group').valueChanges.pipe(
switchMap(x => {
if(!x)
return EMPTY;
console.log(x);
return this.listS.gettypeother(x) })
).subscribe(data => {
this.contactTypes = data;
});
}
ngAfterViewInit(): void{
}
ngOnDestroy(): void{
this.unsubscribe.next();
this.unsubscribe.complete();
}
searchKin(token: ProfileInterface){
this.loading = true;
console.log(token)
if (token.view == view.recipient) {
this.timeS.getcontactskinrecipient(token.id)
.subscribe(data => {
this.kinsArray = data.list;
if (this.kinsArray.length > 0) {
this.selected = this.kinsArray[0];
this.showDetails(this.kinsArray[0]);
}
this.loading = false
this.cd.markForCheck();
this.cd.detectChanges();
});
}
if (token.view == view.staff) {
this.timeS.getcontactskinstaff(token.code)
.subscribe(data => {
this.kinsArray = data;
if (this.kinsArray.length > 0) {
this.selected = this.kinsArray[0];
this.showDetails(this.kinsArray[0]);
}
this.loading = false
this.cd.markForCheck();
this.cd.detectChanges();
});
}
}
showDetails(kin: any) {
this.timeS.getcontactskinstaffdetails(kin.recordNumber)
.subscribe(data => {
this.kindetailsGroup.patchValue({
address1: data.address1,
address2: data.address2,
name: data.contactName,
type: data.subType,
email: data.email,
fax: data.fax,
mobile: data.mobile,
notes: data.notes,
phone1: data.phone1,
phone2: data.phone2,
suburbcode: (data.postcode != '') ? (data.postcode || '').trim() + ' ' + (data.suburb || '').trim() : '',
suburb: data.suburb,
postcode: data.postcode,
listOrder: '',
oni1: (data.equipmentCode || '').toUpperCase() == 'PERSON1',
oni2: (data.equipmentCode || '').toUpperCase() == 'PERSON2',
recordNumber: data.recordNumber,
// subType: data.subType
})
})
}
//From ControlValueAccessor interface
writeValue(value: any) {
if (value != null) {
console.log(value)
this.innerValue = value;
this.searchKin(this.innerValue);
}
}
//From ControlValueAccessor interface
registerOnChange(fn: any) {
this.onChangeCallback = fn;
}
//From ControlValueAccessor interface
registerOnTouched(fn: any) {
this.onTouchedCallback = fn;
}
save() {
if (this.user.view === view.staff)
{
var sub = this.kindetailsGroup.get('suburbcode').value;
let address = sub ? this.getPostCodeAndSuburb(sub) : null;
if (!this.globalS.isEmpty(address)) {
this.kindetailsGroup.controls["postcode"].setValue(address.pcode);
this.kindetailsGroup.controls["suburb"].setValue(address.suburb);
}
if (this.kindetailsGroup.get('oni1').value) {
this.kindetailsGroup.controls['ecode'].setValue('PERSON1')
} else if (this.kindetailsGroup.get('oni2').value) {
this.kindetailsGroup.controls['ecode'].setValue('PERSON2')
}
const details = this.kindetailsGroup.value;
this.timeS.updatecontactskinstaffdetails(
details,
details.recordNumber
).subscribe(data => {
// this.searchKin(this.user);
this.globalS.sToast('Success', 'Contact Updated');
});
}
if (this.user.view === view.recipient)
{
console.log('recipient');
var sub = this.kindetailsGroup.get('suburbcode').value;
let address = sub ? this.getPostCodeAndSuburb(sub) : null;
if (!this.globalS.isEmpty(address)) {
this.kindetailsGroup.controls["postcode"].setValue(address.pcode);
this.kindetailsGroup.controls["suburb"].setValue(address.suburb);
}
if (this.kindetailsGroup.get('oni1').value) {
this.kindetailsGroup.controls['ecode'].setValue('PERSON1')
} else if (this.kindetailsGroup.get('oni2').value) {
this.kindetailsGroup.controls['ecode'].setValue('PERSON2')
}
const details = this.kindetailsGroup.value;
this.timeS.updatecontactskinrecipientdetails(details,details.recordNumber)
.subscribe(data => {
// this.searchKin(this.user);
this.handleCancel();
this.globalS.sToast('Success', 'Contact Updated');
});
}
}
getPostCodeAndSuburb(address: any): any {
const rs = address;
let pcode = /(\d+)/g.test(rs) ? rs.match(/(\d+)/g)[0] : "";
let suburb = /(\D+)/g.test(rs) ? rs.match(/(\D+)/g)[0].split(',')[0] : "";
return {
pcode: pcode.trim() || '',
suburb: suburb.trim() || ''
}
}
get nextRequired() {
const { group, type, name } = this.inputForm.value;
if (this.current == 0 && this.globalS.isEmpty(group)) {
return false;
}
if (this.current == 1 && (this.globalS.isEmpty(type) || this.globalS.isEmpty(name)) ) {
return false;
}
return true;
}
add() {
if (this.inputForm.controls['suburbcode'].dirty) {
var rs = this.inputForm.get('suburbcode').value;
let pcode = /(\d+)/g.test(rs) ? rs.match(/(\d+)/g)[0].trim() : "";
let suburb = /(\D+)/g.test(rs) ? rs.match(/(\D+)/g)[0].trim() : "";
let state = /(\D+)/g.test(rs) ? rs.match(/(\D+)/g)[1].replace(/,/g, '').trim() : "";
if (pcode !== "") {
this.inputForm.controls["postcode"].setValue(pcode);
this.inputForm.controls["suburb"].setValue(suburb);
this.inputForm.controls["state"].setValue(state);
}
}
if (this.inputForm.get('oni1').value) {
this.inputForm.controls['ecode'].setValue('PERSON1')
} else if (this.inputForm.get('oni2').value) {
this.inputForm.controls['ecode'].setValue('PERSON2')
}
this.timeS.postcontactskinstaffdetails(
this.inputForm.value,
this.user.id
).pipe(takeUntil(this.unsubscribe)).subscribe(data => {
this.globalS.sToast('Success', 'Contact Inserted');
this.handleCancel();
this.searchKin(this.user);
this.handleCancel();
});
}
delete() {
this.timeS.deletecontactskin(this.kindetailsGroup.value.recordNumber).subscribe(data => {
this.globalS.sToast('Success', 'Contact Deleted');
this.searchKin(this.user);
});
}
handleCancel() {
this.modalOpen = false;
this.inputForm.reset();
this.current = 0;
}
pre() {
this.current -= 1;
}
next() {
this.current += 1;
}
}
| {
this.inputForm.patchValue({
address1: '',
address2: '',
phone1: '',
phone2:'',
email: '',
mobile: '',
fax: '',
name: ''
})
return;
} | conditional_block |
contacts-details.component.ts | import { Component, OnInit, Input, forwardRef, ViewChild, OnDestroy, Inject, ChangeDetectionStrategy, ChangeDetectorRef, OnChanges, SimpleChanges } from '@angular/core';
import { FormControl, FormGroup, Validators, FormBuilder, NG_VALUE_ACCESSOR, ControlValueAccessor } from '@angular/forms';
import { HttpClient, HttpHeaders, HttpParams } from '@angular/common/http';
import { TimeSheetService, GlobalService, view, ClientService, StaffService, ListService, UploadService, contactGroups, days, gender, types, titles, caldStatuses, roles, ShareService } from '@services/index';
import * as _ from 'lodash';
import { mergeMap, takeUntil, concatMap, switchMap } from 'rxjs/operators';
import { EMPTY,Subject } from 'rxjs';
import { TitleCasePipe } from '@angular/common';
import { ProfileInterface} from '@modules/modules';
const noop = () => {
};
@Component({
selector: 'app-contacts-details',
templateUrl: './contacts-details.component.html',
styleUrls: ['./contacts-details.component.css'],
providers: [
{
provide: NG_VALUE_ACCESSOR,
multi: true,
useExisting: forwardRef(() => ContactsDetailsComponent),
}
],
changeDetection: ChangeDetectionStrategy.OnPush
})
export class ContactsDetailsComponent implements OnInit, OnDestroy, OnChanges,ControlValueAccessor {
private unsubscribe: Subject<void> = new Subject();
selectedCompany;
doctor: any;
@Input() user: any;
private onTouchedCallback: () => void = noop;
private onChangeCallback: (_: any) => void = noop;
innerValue: ProfileInterface;
kinsArray: Array<any> = [];
kindetailsGroup: FormGroup;
inputForm: FormGroup;
contactGroups: Array<string> = contactGroups;
contactTypes : Array<string>;
modalOpen: boolean = false;
postLoading: boolean = false;
selected: any;
current: number = 0;
loading: boolean;
tocken: any;
doctors: Array<any> = [];
constructor(
private globalS: GlobalService,
private clientS: ClientService,
private staffS: StaffService,
private timeS: TimeSheetService,
private sharedS: ShareService,
private listS: ListService,
private formBuilder: FormBuilder,
private cd: ChangeDetectorRef,
private http: HttpClient,
private titleCase: TitleCasePipe
) { }
ngOnInit(): void {
this.user = this.sharedS.getPicked();
this.buildForm();
}
| (changes: SimpleChanges) {
for (let property in changes) {
console.log('run contacts')
this.searchKin(this.user);
}
}
doctorChangeEvent(data: any){
var doc = this.doctors.filter(x => x.name == data).shift();
if(!doc){
this.inputForm.patchValue({
address1: '',
address2: '',
phone1: '',
phone2:'',
email: '',
mobile: '',
fax: '',
name: ''
})
return;
}
this.inputForm.patchValue({
address1: doc.address1,
address2: doc.address2,
phone1: doc.phone1,
phone2:doc.phone2,
email: doc.email,
mobile: doc.mobile,
fax: doc.fax,
name: doc.name
})
}
populate(){
this.listS.getdoctorinformation().subscribe(data => {
console.log(data);
this.doctors = data;
})
}
buildForm(): void {
this.kindetailsGroup = this.formBuilder.group({
listOrder: [''],
type: [''],
name: [''],
email: [''],
address1: [''],
address2: [''],
suburbcode: [''],
suburb: [''],
postcode: [''],
phone1: [''],
phone2: [''],
mobile: [''],
fax: [''],
notes: [''],
oni1: false,
oni2: false,
ecode: [''],
creator: [''],
recordNumber: null,
subType: ''
});
this.inputForm = this.formBuilder.group({
group: [''],
listOrder: [''],
type: [''],
name: [''],
email: [''],
address1: [''],
address2: [''],
suburbcode: [null],
suburb: [''],
state: [],
postcode: [''],
phone1: [''],
phone2: [''],
mobile: [''],
fax: [''],
notes: [''],
oni1: false,
oni2: false,
ecode: [''],
creator: [''],
recordNumber: null
})
this.inputForm.get('group').valueChanges.pipe(
switchMap(x => {
if(!x)
return EMPTY;
console.log(x);
return this.listS.gettypeother(x) })
).subscribe(data => {
this.contactTypes = data;
});
}
ngAfterViewInit(): void{
}
ngOnDestroy(): void{
this.unsubscribe.next();
this.unsubscribe.complete();
}
searchKin(token: ProfileInterface){
this.loading = true;
console.log(token)
if (token.view == view.recipient) {
this.timeS.getcontactskinrecipient(token.id)
.subscribe(data => {
this.kinsArray = data.list;
if (this.kinsArray.length > 0) {
this.selected = this.kinsArray[0];
this.showDetails(this.kinsArray[0]);
}
this.loading = false
this.cd.markForCheck();
this.cd.detectChanges();
});
}
if (token.view == view.staff) {
this.timeS.getcontactskinstaff(token.code)
.subscribe(data => {
this.kinsArray = data;
if (this.kinsArray.length > 0) {
this.selected = this.kinsArray[0];
this.showDetails(this.kinsArray[0]);
}
this.loading = false
this.cd.markForCheck();
this.cd.detectChanges();
});
}
}
showDetails(kin: any) {
this.timeS.getcontactskinstaffdetails(kin.recordNumber)
.subscribe(data => {
this.kindetailsGroup.patchValue({
address1: data.address1,
address2: data.address2,
name: data.contactName,
type: data.subType,
email: data.email,
fax: data.fax,
mobile: data.mobile,
notes: data.notes,
phone1: data.phone1,
phone2: data.phone2,
suburbcode: (data.postcode != '') ? (data.postcode || '').trim() + ' ' + (data.suburb || '').trim() : '',
suburb: data.suburb,
postcode: data.postcode,
listOrder: '',
oni1: (data.equipmentCode || '').toUpperCase() == 'PERSON1',
oni2: (data.equipmentCode || '').toUpperCase() == 'PERSON2',
recordNumber: data.recordNumber,
// subType: data.subType
})
})
}
//From ControlValueAccessor interface
writeValue(value: any) {
if (value != null) {
console.log(value)
this.innerValue = value;
this.searchKin(this.innerValue);
}
}
//From ControlValueAccessor interface
registerOnChange(fn: any) {
this.onChangeCallback = fn;
}
//From ControlValueAccessor interface
registerOnTouched(fn: any) {
this.onTouchedCallback = fn;
}
save() {
if (this.user.view === view.staff)
{
var sub = this.kindetailsGroup.get('suburbcode').value;
let address = sub ? this.getPostCodeAndSuburb(sub) : null;
if (!this.globalS.isEmpty(address)) {
this.kindetailsGroup.controls["postcode"].setValue(address.pcode);
this.kindetailsGroup.controls["suburb"].setValue(address.suburb);
}
if (this.kindetailsGroup.get('oni1').value) {
this.kindetailsGroup.controls['ecode'].setValue('PERSON1')
} else if (this.kindetailsGroup.get('oni2').value) {
this.kindetailsGroup.controls['ecode'].setValue('PERSON2')
}
const details = this.kindetailsGroup.value;
this.timeS.updatecontactskinstaffdetails(
details,
details.recordNumber
).subscribe(data => {
// this.searchKin(this.user);
this.globalS.sToast('Success', 'Contact Updated');
});
}
if (this.user.view === view.recipient)
{
console.log('recipient');
var sub = this.kindetailsGroup.get('suburbcode').value;
let address = sub ? this.getPostCodeAndSuburb(sub) : null;
if (!this.globalS.isEmpty(address)) {
this.kindetailsGroup.controls["postcode"].setValue(address.pcode);
this.kindetailsGroup.controls["suburb"].setValue(address.suburb);
}
if (this.kindetailsGroup.get('oni1').value) {
this.kindetailsGroup.controls['ecode'].setValue('PERSON1')
} else if (this.kindetailsGroup.get('oni2').value) {
this.kindetailsGroup.controls['ecode'].setValue('PERSON2')
}
const details = this.kindetailsGroup.value;
this.timeS.updatecontactskinrecipientdetails(details,details.recordNumber)
.subscribe(data => {
// this.searchKin(this.user);
this.handleCancel();
this.globalS.sToast('Success', 'Contact Updated');
});
}
}
getPostCodeAndSuburb(address: any): any {
const rs = address;
let pcode = /(\d+)/g.test(rs) ? rs.match(/(\d+)/g)[0] : "";
let suburb = /(\D+)/g.test(rs) ? rs.match(/(\D+)/g)[0].split(',')[0] : "";
return {
pcode: pcode.trim() || '',
suburb: suburb.trim() || ''
}
}
get nextRequired() {
const { group, type, name } = this.inputForm.value;
if (this.current == 0 && this.globalS.isEmpty(group)) {
return false;
}
if (this.current == 1 && (this.globalS.isEmpty(type) || this.globalS.isEmpty(name)) ) {
return false;
}
return true;
}
add() {
if (this.inputForm.controls['suburbcode'].dirty) {
var rs = this.inputForm.get('suburbcode').value;
let pcode = /(\d+)/g.test(rs) ? rs.match(/(\d+)/g)[0].trim() : "";
let suburb = /(\D+)/g.test(rs) ? rs.match(/(\D+)/g)[0].trim() : "";
let state = /(\D+)/g.test(rs) ? rs.match(/(\D+)/g)[1].replace(/,/g, '').trim() : "";
if (pcode !== "") {
this.inputForm.controls["postcode"].setValue(pcode);
this.inputForm.controls["suburb"].setValue(suburb);
this.inputForm.controls["state"].setValue(state);
}
}
if (this.inputForm.get('oni1').value) {
this.inputForm.controls['ecode'].setValue('PERSON1')
} else if (this.inputForm.get('oni2').value) {
this.inputForm.controls['ecode'].setValue('PERSON2')
}
this.timeS.postcontactskinstaffdetails(
this.inputForm.value,
this.user.id
).pipe(takeUntil(this.unsubscribe)).subscribe(data => {
this.globalS.sToast('Success', 'Contact Inserted');
this.handleCancel();
this.searchKin(this.user);
this.handleCancel();
});
}
delete() {
this.timeS.deletecontactskin(this.kindetailsGroup.value.recordNumber).subscribe(data => {
this.globalS.sToast('Success', 'Contact Deleted');
this.searchKin(this.user);
});
}
handleCancel() {
this.modalOpen = false;
this.inputForm.reset();
this.current = 0;
}
pre() {
this.current -= 1;
}
next() {
this.current += 1;
}
}
| ngOnChanges | identifier_name |
jobs_contracts.go | package jobs
import (
"encoding/hex"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/hyperledger/burrow/crypto"
"github.com/hyperledger/burrow/txs/payload"
"github.com/monax/bosmarmot/bos/abi"
compilers "github.com/monax/bosmarmot/bos/compile"
"github.com/monax/bosmarmot/bos/def"
"github.com/monax/bosmarmot/bos/util"
log "github.com/sirupsen/logrus"
)
func DeployJob(deploy *def.Deploy, do *def.Packages) (result string, err error) {
// Preprocess variables
deploy.Source, _ = util.PreProcess(deploy.Source, do)
deploy.Contract, _ = util.PreProcess(deploy.Contract, do)
deploy.Instance, _ = util.PreProcess(deploy.Instance, do)
deploy.Libraries, _ = util.PreProcessLibs(deploy.Libraries, do)
deploy.Amount, _ = util.PreProcess(deploy.Amount, do)
deploy.Sequence, _ = util.PreProcess(deploy.Sequence, do)
deploy.Fee, _ = util.PreProcess(deploy.Fee, do)
deploy.Gas, _ = util.PreProcess(deploy.Gas, do)
// trim the extension
contractName := strings.TrimSuffix(deploy.Contract, filepath.Ext(deploy.Contract))
// Use defaults
deploy.Source = useDefault(deploy.Source, do.Package.Account)
deploy.Instance = useDefault(deploy.Instance, contractName)
deploy.Amount = useDefault(deploy.Amount, do.DefaultAmount)
deploy.Fee = useDefault(deploy.Fee, do.DefaultFee)
deploy.Gas = useDefault(deploy.Gas, do.DefaultGas)
// assemble contract
var contractPath string
if _, err := os.Stat(deploy.Contract); err != nil {
if _, secErr := os.Stat(filepath.Join(do.BinPath, deploy.Contract)); secErr != nil {
if _, thirdErr := os.Stat(filepath.Join(do.BinPath, filepath.Base(deploy.Contract))); thirdErr != nil {
return "", fmt.Errorf("Could not find contract in\n* primary path: %v\n* binary path: %v\n* tertiary path: %v", deploy.Contract, filepath.Join(do.BinPath, deploy.Contract), filepath.Join(do.BinPath, filepath.Base(deploy.Contract)))
} else {
contractPath = filepath.Join(do.BinPath, filepath.Base(deploy.Contract))
}
} else {
contractPath = filepath.Join(do.BinPath, deploy.Contract)
}
} else {
contractPath = deploy.Contract
}
// compile
if filepath.Ext(deploy.Contract) == ".bin" {
log.Info("Binary file detected. Using binary deploy sequence.")
log.WithField("=>", contractPath).Info("Binary path")
binaryResponse, err := compilers.RequestBinaryLinkage(contractPath, deploy.Libraries)
if err != nil {
return "", fmt.Errorf("Something went wrong with your binary deployment: %v", err)
}
if binaryResponse.Error != "" {
return "", fmt.Errorf("Something went wrong when you were trying to link your binaries: %v", binaryResponse.Error)
}
contractCode := binaryResponse.Binary
tx, err := deployTx(do, deploy, contractName, string(contractCode))
if err != nil {
return "could not deploy binary contract", err
}
result, err := deployFinalize(do, tx)
if err != nil {
return "", fmt.Errorf("Error finalizing contract deploy from path %s: %v", contractPath, err)
}
return result.String(), err
} else {
contractPath = deploy.Contract
log.WithField("=>", contractPath).Info("Contract path")
// normal compilation/deploy sequence
resp, err := compilers.RequestCompile(contractPath, false, deploy.Libraries)
if err != nil {
log.Errorln("Error compiling contracts: Compilers error:")
return "", err
} else if resp.Error != "" {
log.Errorln("Error compiling contracts: Language error:")
return "", fmt.Errorf("%v", resp.Error)
} else if resp.Warning != "" {
log.WithField("=>", resp.Warning).Warn("Warning during contract compilation")
}
// loop through objects returned from compiler
switch {
case len(resp.Objects) == 1:
log.WithField("path", contractPath).Info("Deploying the only contract in file")
response := resp.Objects[0]
log.WithField("=>", response.ABI).Info("Abi")
log.WithField("=>", response.Bytecode).Info("Bin")
if response.Bytecode != "" {
result, err = deployContract(deploy, do, response)
if err != nil {
return "", err
}
}
case deploy.Instance == "all":
log.WithField("path", contractPath).Info("Deploying all contracts")
var baseObj string
for _, response := range resp.Objects {
if response.Bytecode == "" {
continue
}
result, err = deployContract(deploy, do, response)
if err != nil {
return "", err
}
if strings.ToLower(response.Objectname) == strings.ToLower(strings.TrimSuffix(filepath.Base(deploy.Contract), filepath.Ext(filepath.Base(deploy.Contract)))) {
baseObj = result
}
}
if baseObj != "" {
result = baseObj
}
default:
log.WithField("contract", deploy.Instance).Info("Deploying a single contract")
for _, response := range resp.Objects {
if response.Bytecode == "" {
continue
}
if matchInstanceName(response.Objectname, deploy.Instance) {
result, err = deployContract(deploy, do, response)
if err != nil {
return "", err
}
}
}
}
}
return result, nil
}
func | (objectName, deployInstance string) bool {
if objectName == "" {
return false
}
// Ignore the filename component that newer versions of Solidity include in object name
objectNameParts := strings.Split(objectName, ":")
return strings.ToLower(objectNameParts[len(objectNameParts)-1]) == strings.ToLower(deployInstance)
}
// TODO [rj] refactor to remove [contractPath] from functions signature => only used in a single error throw.
func deployContract(deploy *def.Deploy, do *def.Packages, compilersResponse compilers.ResponseItem) (string, error) {
log.WithField("=>", string(compilersResponse.ABI)).Debug("ABI Specification (From Compilers)")
contractCode := compilersResponse.Bytecode
// Save ABI
if _, err := os.Stat(do.ABIPath); os.IsNotExist(err) {
if err := os.Mkdir(do.ABIPath, 0775); err != nil {
return "", err
}
}
if _, err := os.Stat(do.BinPath); os.IsNotExist(err) {
if err := os.Mkdir(do.BinPath, 0775); err != nil {
return "", err
}
}
// saving contract/library abi
var abiLocation string
if compilersResponse.Objectname != "" {
abiLocation = filepath.Join(do.ABIPath, compilersResponse.Objectname)
log.WithField("=>", abiLocation).Warn("Saving ABI")
if err := ioutil.WriteFile(abiLocation, []byte(compilersResponse.ABI), 0664); err != nil {
return "", err
}
} else {
log.Debug("Objectname from compilers is blank. Not saving abi.")
}
// additional data may be sent along with the contract
// these are naively added to the end of the contract code using standard
// mint packing
if deploy.Data != nil {
_, callDataArray, err := util.PreProcessInputData(compilersResponse.Objectname, deploy.Data, do, true)
if err != nil {
return "", err
}
packedBytes, err := abi.ReadAbiFormulateCall(compilersResponse.Objectname, "", callDataArray, do)
if err != nil {
return "", err
}
callData := hex.EncodeToString(packedBytes)
contractCode = contractCode + callData
}
tx, err := deployTx(do, deploy, compilersResponse.Objectname, contractCode)
if err != nil {
return "", err
}
// Sign, broadcast, display
contractAddress, err := deployFinalize(do, tx)
if err != nil {
return "", fmt.Errorf("Error finalizing contract deploy %s: %v", deploy.Contract, err)
}
// saving contract/library abi at abi/address
if contractAddress != nil {
abiLocation := filepath.Join(do.ABIPath, contractAddress.String())
log.WithField("=>", abiLocation).Debug("Saving ABI")
if err := ioutil.WriteFile(abiLocation, []byte(compilersResponse.ABI), 0664); err != nil {
return "", err
}
// saving binary
if deploy.SaveBinary {
contractName := filepath.Join(do.BinPath, fmt.Sprintf("%s.bin", compilersResponse.Objectname))
log.WithField("=>", contractName).Warn("Saving Binary")
if err := ioutil.WriteFile(contractName, []byte(contractCode), 0664); err != nil {
return "", err
}
} else {
log.Debug("Not saving binary.")
}
return contractAddress.String(), nil
} else {
// we shouldn't reach this point because we should have an error before this.
return "", fmt.Errorf("The contract did not deploy. Unable to save abi to abi/contractAddress.")
}
}
func deployTx(do *def.Packages, deploy *def.Deploy, contractName, contractCode string) (*payload.CallTx, error) {
// Deploy contract
log.WithFields(log.Fields{
"name": contractName,
}).Warn("Deploying Contract")
log.WithFields(log.Fields{
"source": deploy.Source,
"code": contractCode,
"chain-url": do.ChainURL,
}).Info()
return do.Call(def.CallArg{
Input: deploy.Source,
Amount: deploy.Amount,
Fee: deploy.Fee,
Gas: deploy.Gas,
Data: contractCode,
Sequence: deploy.Sequence,
})
}
func CallJob(call *def.Call, do *def.Packages) (string, []*def.Variable, error) {
var err error
var callData string
var callDataArray []string
// Preprocess variables
call.Source, _ = util.PreProcess(call.Source, do)
call.Destination, _ = util.PreProcess(call.Destination, do)
//todo: find a way to call the fallback function here
call.Function, callDataArray, err = util.PreProcessInputData(call.Function, call.Data, do, false)
if err != nil {
return "", nil, err
}
call.Function, _ = util.PreProcess(call.Function, do)
call.Amount, _ = util.PreProcess(call.Amount, do)
call.Sequence, _ = util.PreProcess(call.Sequence, do)
call.Fee, _ = util.PreProcess(call.Fee, do)
call.Gas, _ = util.PreProcess(call.Gas, do)
call.ABI, _ = util.PreProcess(call.ABI, do)
// Use default
call.Source = useDefault(call.Source, do.Package.Account)
call.Amount = useDefault(call.Amount, do.DefaultAmount)
call.Fee = useDefault(call.Fee, do.DefaultFee)
call.Gas = useDefault(call.Gas, do.DefaultGas)
// formulate call
var packedBytes []byte
if call.ABI == "" {
packedBytes, err = abi.ReadAbiFormulateCall(call.Destination, call.Function, callDataArray, do)
callData = hex.EncodeToString(packedBytes)
} else {
packedBytes, err = abi.ReadAbiFormulateCall(call.ABI, call.Function, callDataArray, do)
callData = hex.EncodeToString(packedBytes)
}
if err != nil {
if call.Function == "()" {
log.Warn("Calling the fallback function")
} else {
var str, err = util.ABIErrorHandler(do, err, call, nil)
return str, nil, err
}
}
log.WithFields(log.Fields{
"destination": call.Destination,
"function": call.Function,
"data": callData,
}).Info("Calling")
tx, err := do.Call(def.CallArg{
Input: call.Source,
Amount: call.Amount,
Address: call.Destination,
Fee: call.Fee,
Gas: call.Gas,
Data: callData,
Sequence: call.Sequence,
})
if err != nil {
return "", nil, err
}
// Sign, broadcast, display
txe, err := do.SignAndBroadcast(tx)
if err != nil {
var err = util.MintChainErrorHandler(do, err)
return "", nil, err
}
var result string
log.Debug(txe.Result.Return)
// Formally process the return
if txe.Result.Return != nil {
log.WithField("=>", result).Debug("Decoding Raw Result")
if call.ABI == "" {
call.Variables, err = abi.ReadAndDecodeContractReturn(call.Destination, call.Function, txe.Result.Return, do)
} else {
call.Variables, err = abi.ReadAndDecodeContractReturn(call.ABI, call.Function, txe.Result.Return, do)
}
if err != nil {
return "", nil, err
}
log.WithField("=>", call.Variables).Debug("call variables:")
result = util.GetReturnValue(call.Variables)
if result != "" {
log.WithField("=>", result).Warn("Return Value")
} else {
log.Debug("No return.")
}
} else {
log.Debug("No return from contract.")
}
if call.Save == "tx" {
log.Info("Saving tx hash instead of contract return")
result = fmt.Sprintf("%X", txe.Receipt.TxHash)
}
return result, call.Variables, nil
}
func deployFinalize(do *def.Packages, tx payload.Payload) (*crypto.Address, error) {
txe, err := do.SignAndBroadcast(tx)
if err != nil {
return nil, util.MintChainErrorHandler(do, err)
}
if err := util.ReadTxSignAndBroadcast(txe, err); err != nil {
return nil, err
}
if !txe.Receipt.CreatesContract || txe.Receipt.ContractAddress == crypto.ZeroAddress {
// Shouldn't get ZeroAddress when CreatesContract is true, but still
return nil, fmt.Errorf("result from SignAndBroadcast does not contain address for the deployed contract")
}
return &txe.Receipt.ContractAddress, nil
}
| matchInstanceName | identifier_name |
jobs_contracts.go | package jobs
import (
"encoding/hex"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/hyperledger/burrow/crypto"
"github.com/hyperledger/burrow/txs/payload"
"github.com/monax/bosmarmot/bos/abi"
compilers "github.com/monax/bosmarmot/bos/compile"
"github.com/monax/bosmarmot/bos/def"
"github.com/monax/bosmarmot/bos/util"
log "github.com/sirupsen/logrus"
)
func DeployJob(deploy *def.Deploy, do *def.Packages) (result string, err error) {
// Preprocess variables
deploy.Source, _ = util.PreProcess(deploy.Source, do)
deploy.Contract, _ = util.PreProcess(deploy.Contract, do)
deploy.Instance, _ = util.PreProcess(deploy.Instance, do)
deploy.Libraries, _ = util.PreProcessLibs(deploy.Libraries, do)
deploy.Amount, _ = util.PreProcess(deploy.Amount, do)
deploy.Sequence, _ = util.PreProcess(deploy.Sequence, do)
deploy.Fee, _ = util.PreProcess(deploy.Fee, do)
deploy.Gas, _ = util.PreProcess(deploy.Gas, do)
// trim the extension
contractName := strings.TrimSuffix(deploy.Contract, filepath.Ext(deploy.Contract))
// Use defaults
deploy.Source = useDefault(deploy.Source, do.Package.Account)
deploy.Instance = useDefault(deploy.Instance, contractName)
deploy.Amount = useDefault(deploy.Amount, do.DefaultAmount)
deploy.Fee = useDefault(deploy.Fee, do.DefaultFee)
deploy.Gas = useDefault(deploy.Gas, do.DefaultGas)
// assemble contract
var contractPath string
if _, err := os.Stat(deploy.Contract); err != nil {
if _, secErr := os.Stat(filepath.Join(do.BinPath, deploy.Contract)); secErr != nil {
if _, thirdErr := os.Stat(filepath.Join(do.BinPath, filepath.Base(deploy.Contract))); thirdErr != nil {
return "", fmt.Errorf("Could not find contract in\n* primary path: %v\n* binary path: %v\n* tertiary path: %v", deploy.Contract, filepath.Join(do.BinPath, deploy.Contract), filepath.Join(do.BinPath, filepath.Base(deploy.Contract)))
} else {
contractPath = filepath.Join(do.BinPath, filepath.Base(deploy.Contract))
}
} else {
contractPath = filepath.Join(do.BinPath, deploy.Contract)
}
} else {
contractPath = deploy.Contract
}
// compile
if filepath.Ext(deploy.Contract) == ".bin" {
log.Info("Binary file detected. Using binary deploy sequence.")
log.WithField("=>", contractPath).Info("Binary path")
binaryResponse, err := compilers.RequestBinaryLinkage(contractPath, deploy.Libraries)
if err != nil {
return "", fmt.Errorf("Something went wrong with your binary deployment: %v", err)
}
if binaryResponse.Error != "" {
return "", fmt.Errorf("Something went wrong when you were trying to link your binaries: %v", binaryResponse.Error)
}
contractCode := binaryResponse.Binary
tx, err := deployTx(do, deploy, contractName, string(contractCode))
if err != nil {
return "could not deploy binary contract", err
}
result, err := deployFinalize(do, tx)
if err != nil {
return "", fmt.Errorf("Error finalizing contract deploy from path %s: %v", contractPath, err)
}
return result.String(), err
} else {
contractPath = deploy.Contract
log.WithField("=>", contractPath).Info("Contract path")
// normal compilation/deploy sequence
resp, err := compilers.RequestCompile(contractPath, false, deploy.Libraries)
if err != nil {
log.Errorln("Error compiling contracts: Compilers error:")
return "", err
} else if resp.Error != "" {
log.Errorln("Error compiling contracts: Language error:")
return "", fmt.Errorf("%v", resp.Error)
} else if resp.Warning != "" {
log.WithField("=>", resp.Warning).Warn("Warning during contract compilation") | response := resp.Objects[0]
log.WithField("=>", response.ABI).Info("Abi")
log.WithField("=>", response.Bytecode).Info("Bin")
if response.Bytecode != "" {
result, err = deployContract(deploy, do, response)
if err != nil {
return "", err
}
}
case deploy.Instance == "all":
log.WithField("path", contractPath).Info("Deploying all contracts")
var baseObj string
for _, response := range resp.Objects {
if response.Bytecode == "" {
continue
}
result, err = deployContract(deploy, do, response)
if err != nil {
return "", err
}
if strings.ToLower(response.Objectname) == strings.ToLower(strings.TrimSuffix(filepath.Base(deploy.Contract), filepath.Ext(filepath.Base(deploy.Contract)))) {
baseObj = result
}
}
if baseObj != "" {
result = baseObj
}
default:
log.WithField("contract", deploy.Instance).Info("Deploying a single contract")
for _, response := range resp.Objects {
if response.Bytecode == "" {
continue
}
if matchInstanceName(response.Objectname, deploy.Instance) {
result, err = deployContract(deploy, do, response)
if err != nil {
return "", err
}
}
}
}
}
return result, nil
}
func matchInstanceName(objectName, deployInstance string) bool {
if objectName == "" {
return false
}
// Ignore the filename component that newer versions of Solidity include in object name
objectNameParts := strings.Split(objectName, ":")
return strings.ToLower(objectNameParts[len(objectNameParts)-1]) == strings.ToLower(deployInstance)
}
// TODO [rj] refactor to remove [contractPath] from functions signature => only used in a single error throw.
func deployContract(deploy *def.Deploy, do *def.Packages, compilersResponse compilers.ResponseItem) (string, error) {
log.WithField("=>", string(compilersResponse.ABI)).Debug("ABI Specification (From Compilers)")
contractCode := compilersResponse.Bytecode
// Save ABI
if _, err := os.Stat(do.ABIPath); os.IsNotExist(err) {
if err := os.Mkdir(do.ABIPath, 0775); err != nil {
return "", err
}
}
if _, err := os.Stat(do.BinPath); os.IsNotExist(err) {
if err := os.Mkdir(do.BinPath, 0775); err != nil {
return "", err
}
}
// saving contract/library abi
var abiLocation string
if compilersResponse.Objectname != "" {
abiLocation = filepath.Join(do.ABIPath, compilersResponse.Objectname)
log.WithField("=>", abiLocation).Warn("Saving ABI")
if err := ioutil.WriteFile(abiLocation, []byte(compilersResponse.ABI), 0664); err != nil {
return "", err
}
} else {
log.Debug("Objectname from compilers is blank. Not saving abi.")
}
// additional data may be sent along with the contract
// these are naively added to the end of the contract code using standard
// mint packing
if deploy.Data != nil {
_, callDataArray, err := util.PreProcessInputData(compilersResponse.Objectname, deploy.Data, do, true)
if err != nil {
return "", err
}
packedBytes, err := abi.ReadAbiFormulateCall(compilersResponse.Objectname, "", callDataArray, do)
if err != nil {
return "", err
}
callData := hex.EncodeToString(packedBytes)
contractCode = contractCode + callData
}
tx, err := deployTx(do, deploy, compilersResponse.Objectname, contractCode)
if err != nil {
return "", err
}
// Sign, broadcast, display
contractAddress, err := deployFinalize(do, tx)
if err != nil {
return "", fmt.Errorf("Error finalizing contract deploy %s: %v", deploy.Contract, err)
}
// saving contract/library abi at abi/address
if contractAddress != nil {
abiLocation := filepath.Join(do.ABIPath, contractAddress.String())
log.WithField("=>", abiLocation).Debug("Saving ABI")
if err := ioutil.WriteFile(abiLocation, []byte(compilersResponse.ABI), 0664); err != nil {
return "", err
}
// saving binary
if deploy.SaveBinary {
contractName := filepath.Join(do.BinPath, fmt.Sprintf("%s.bin", compilersResponse.Objectname))
log.WithField("=>", contractName).Warn("Saving Binary")
if err := ioutil.WriteFile(contractName, []byte(contractCode), 0664); err != nil {
return "", err
}
} else {
log.Debug("Not saving binary.")
}
return contractAddress.String(), nil
} else {
// we shouldn't reach this point because we should have an error before this.
return "", fmt.Errorf("The contract did not deploy. Unable to save abi to abi/contractAddress.")
}
}
func deployTx(do *def.Packages, deploy *def.Deploy, contractName, contractCode string) (*payload.CallTx, error) {
// Deploy contract
log.WithFields(log.Fields{
"name": contractName,
}).Warn("Deploying Contract")
log.WithFields(log.Fields{
"source": deploy.Source,
"code": contractCode,
"chain-url": do.ChainURL,
}).Info()
return do.Call(def.CallArg{
Input: deploy.Source,
Amount: deploy.Amount,
Fee: deploy.Fee,
Gas: deploy.Gas,
Data: contractCode,
Sequence: deploy.Sequence,
})
}
func CallJob(call *def.Call, do *def.Packages) (string, []*def.Variable, error) {
var err error
var callData string
var callDataArray []string
// Preprocess variables
call.Source, _ = util.PreProcess(call.Source, do)
call.Destination, _ = util.PreProcess(call.Destination, do)
//todo: find a way to call the fallback function here
call.Function, callDataArray, err = util.PreProcessInputData(call.Function, call.Data, do, false)
if err != nil {
return "", nil, err
}
call.Function, _ = util.PreProcess(call.Function, do)
call.Amount, _ = util.PreProcess(call.Amount, do)
call.Sequence, _ = util.PreProcess(call.Sequence, do)
call.Fee, _ = util.PreProcess(call.Fee, do)
call.Gas, _ = util.PreProcess(call.Gas, do)
call.ABI, _ = util.PreProcess(call.ABI, do)
// Use default
call.Source = useDefault(call.Source, do.Package.Account)
call.Amount = useDefault(call.Amount, do.DefaultAmount)
call.Fee = useDefault(call.Fee, do.DefaultFee)
call.Gas = useDefault(call.Gas, do.DefaultGas)
// formulate call
var packedBytes []byte
if call.ABI == "" {
packedBytes, err = abi.ReadAbiFormulateCall(call.Destination, call.Function, callDataArray, do)
callData = hex.EncodeToString(packedBytes)
} else {
packedBytes, err = abi.ReadAbiFormulateCall(call.ABI, call.Function, callDataArray, do)
callData = hex.EncodeToString(packedBytes)
}
if err != nil {
if call.Function == "()" {
log.Warn("Calling the fallback function")
} else {
var str, err = util.ABIErrorHandler(do, err, call, nil)
return str, nil, err
}
}
log.WithFields(log.Fields{
"destination": call.Destination,
"function": call.Function,
"data": callData,
}).Info("Calling")
tx, err := do.Call(def.CallArg{
Input: call.Source,
Amount: call.Amount,
Address: call.Destination,
Fee: call.Fee,
Gas: call.Gas,
Data: callData,
Sequence: call.Sequence,
})
if err != nil {
return "", nil, err
}
// Sign, broadcast, display
txe, err := do.SignAndBroadcast(tx)
if err != nil {
var err = util.MintChainErrorHandler(do, err)
return "", nil, err
}
var result string
log.Debug(txe.Result.Return)
// Formally process the return
if txe.Result.Return != nil {
log.WithField("=>", result).Debug("Decoding Raw Result")
if call.ABI == "" {
call.Variables, err = abi.ReadAndDecodeContractReturn(call.Destination, call.Function, txe.Result.Return, do)
} else {
call.Variables, err = abi.ReadAndDecodeContractReturn(call.ABI, call.Function, txe.Result.Return, do)
}
if err != nil {
return "", nil, err
}
log.WithField("=>", call.Variables).Debug("call variables:")
result = util.GetReturnValue(call.Variables)
if result != "" {
log.WithField("=>", result).Warn("Return Value")
} else {
log.Debug("No return.")
}
} else {
log.Debug("No return from contract.")
}
if call.Save == "tx" {
log.Info("Saving tx hash instead of contract return")
result = fmt.Sprintf("%X", txe.Receipt.TxHash)
}
return result, call.Variables, nil
}
func deployFinalize(do *def.Packages, tx payload.Payload) (*crypto.Address, error) {
txe, err := do.SignAndBroadcast(tx)
if err != nil {
return nil, util.MintChainErrorHandler(do, err)
}
if err := util.ReadTxSignAndBroadcast(txe, err); err != nil {
return nil, err
}
if !txe.Receipt.CreatesContract || txe.Receipt.ContractAddress == crypto.ZeroAddress {
// Shouldn't get ZeroAddress when CreatesContract is true, but still
return nil, fmt.Errorf("result from SignAndBroadcast does not contain address for the deployed contract")
}
return &txe.Receipt.ContractAddress, nil
} | }
// loop through objects returned from compiler
switch {
case len(resp.Objects) == 1:
log.WithField("path", contractPath).Info("Deploying the only contract in file") | random_line_split |
jobs_contracts.go | package jobs
import (
"encoding/hex"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/hyperledger/burrow/crypto"
"github.com/hyperledger/burrow/txs/payload"
"github.com/monax/bosmarmot/bos/abi"
compilers "github.com/monax/bosmarmot/bos/compile"
"github.com/monax/bosmarmot/bos/def"
"github.com/monax/bosmarmot/bos/util"
log "github.com/sirupsen/logrus"
)
func DeployJob(deploy *def.Deploy, do *def.Packages) (result string, err error) {
// Preprocess variables
deploy.Source, _ = util.PreProcess(deploy.Source, do)
deploy.Contract, _ = util.PreProcess(deploy.Contract, do)
deploy.Instance, _ = util.PreProcess(deploy.Instance, do)
deploy.Libraries, _ = util.PreProcessLibs(deploy.Libraries, do)
deploy.Amount, _ = util.PreProcess(deploy.Amount, do)
deploy.Sequence, _ = util.PreProcess(deploy.Sequence, do)
deploy.Fee, _ = util.PreProcess(deploy.Fee, do)
deploy.Gas, _ = util.PreProcess(deploy.Gas, do)
// trim the extension
contractName := strings.TrimSuffix(deploy.Contract, filepath.Ext(deploy.Contract))
// Use defaults
deploy.Source = useDefault(deploy.Source, do.Package.Account)
deploy.Instance = useDefault(deploy.Instance, contractName)
deploy.Amount = useDefault(deploy.Amount, do.DefaultAmount)
deploy.Fee = useDefault(deploy.Fee, do.DefaultFee)
deploy.Gas = useDefault(deploy.Gas, do.DefaultGas)
// assemble contract
var contractPath string
if _, err := os.Stat(deploy.Contract); err != nil {
if _, secErr := os.Stat(filepath.Join(do.BinPath, deploy.Contract)); secErr != nil {
if _, thirdErr := os.Stat(filepath.Join(do.BinPath, filepath.Base(deploy.Contract))); thirdErr != nil {
return "", fmt.Errorf("Could not find contract in\n* primary path: %v\n* binary path: %v\n* tertiary path: %v", deploy.Contract, filepath.Join(do.BinPath, deploy.Contract), filepath.Join(do.BinPath, filepath.Base(deploy.Contract)))
} else {
contractPath = filepath.Join(do.BinPath, filepath.Base(deploy.Contract))
}
} else {
contractPath = filepath.Join(do.BinPath, deploy.Contract)
}
} else {
contractPath = deploy.Contract
}
// compile
if filepath.Ext(deploy.Contract) == ".bin" {
log.Info("Binary file detected. Using binary deploy sequence.")
log.WithField("=>", contractPath).Info("Binary path")
binaryResponse, err := compilers.RequestBinaryLinkage(contractPath, deploy.Libraries)
if err != nil {
return "", fmt.Errorf("Something went wrong with your binary deployment: %v", err)
}
if binaryResponse.Error != "" {
return "", fmt.Errorf("Something went wrong when you were trying to link your binaries: %v", binaryResponse.Error)
}
contractCode := binaryResponse.Binary
tx, err := deployTx(do, deploy, contractName, string(contractCode))
if err != nil {
return "could not deploy binary contract", err
}
result, err := deployFinalize(do, tx)
if err != nil {
return "", fmt.Errorf("Error finalizing contract deploy from path %s: %v", contractPath, err)
}
return result.String(), err
} else {
contractPath = deploy.Contract
log.WithField("=>", contractPath).Info("Contract path")
// normal compilation/deploy sequence
resp, err := compilers.RequestCompile(contractPath, false, deploy.Libraries)
if err != nil {
log.Errorln("Error compiling contracts: Compilers error:")
return "", err
} else if resp.Error != "" {
log.Errorln("Error compiling contracts: Language error:")
return "", fmt.Errorf("%v", resp.Error)
} else if resp.Warning != "" {
log.WithField("=>", resp.Warning).Warn("Warning during contract compilation")
}
// loop through objects returned from compiler
switch {
case len(resp.Objects) == 1:
log.WithField("path", contractPath).Info("Deploying the only contract in file")
response := resp.Objects[0]
log.WithField("=>", response.ABI).Info("Abi")
log.WithField("=>", response.Bytecode).Info("Bin")
if response.Bytecode != "" {
result, err = deployContract(deploy, do, response)
if err != nil {
return "", err
}
}
case deploy.Instance == "all":
log.WithField("path", contractPath).Info("Deploying all contracts")
var baseObj string
for _, response := range resp.Objects {
if response.Bytecode == "" {
continue
}
result, err = deployContract(deploy, do, response)
if err != nil {
return "", err
}
if strings.ToLower(response.Objectname) == strings.ToLower(strings.TrimSuffix(filepath.Base(deploy.Contract), filepath.Ext(filepath.Base(deploy.Contract)))) {
baseObj = result
}
}
if baseObj != "" {
result = baseObj
}
default:
log.WithField("contract", deploy.Instance).Info("Deploying a single contract")
for _, response := range resp.Objects {
if response.Bytecode == "" {
continue
}
if matchInstanceName(response.Objectname, deploy.Instance) {
result, err = deployContract(deploy, do, response)
if err != nil {
return "", err
}
}
}
}
}
return result, nil
}
func matchInstanceName(objectName, deployInstance string) bool {
if objectName == "" {
return false
}
// Ignore the filename component that newer versions of Solidity include in object name
objectNameParts := strings.Split(objectName, ":")
return strings.ToLower(objectNameParts[len(objectNameParts)-1]) == strings.ToLower(deployInstance)
}
// TODO [rj] refactor to remove [contractPath] from functions signature => only used in a single error throw.
func deployContract(deploy *def.Deploy, do *def.Packages, compilersResponse compilers.ResponseItem) (string, error) |
func deployTx(do *def.Packages, deploy *def.Deploy, contractName, contractCode string) (*payload.CallTx, error) {
// Deploy contract
log.WithFields(log.Fields{
"name": contractName,
}).Warn("Deploying Contract")
log.WithFields(log.Fields{
"source": deploy.Source,
"code": contractCode,
"chain-url": do.ChainURL,
}).Info()
return do.Call(def.CallArg{
Input: deploy.Source,
Amount: deploy.Amount,
Fee: deploy.Fee,
Gas: deploy.Gas,
Data: contractCode,
Sequence: deploy.Sequence,
})
}
func CallJob(call *def.Call, do *def.Packages) (string, []*def.Variable, error) {
var err error
var callData string
var callDataArray []string
// Preprocess variables
call.Source, _ = util.PreProcess(call.Source, do)
call.Destination, _ = util.PreProcess(call.Destination, do)
//todo: find a way to call the fallback function here
call.Function, callDataArray, err = util.PreProcessInputData(call.Function, call.Data, do, false)
if err != nil {
return "", nil, err
}
call.Function, _ = util.PreProcess(call.Function, do)
call.Amount, _ = util.PreProcess(call.Amount, do)
call.Sequence, _ = util.PreProcess(call.Sequence, do)
call.Fee, _ = util.PreProcess(call.Fee, do)
call.Gas, _ = util.PreProcess(call.Gas, do)
call.ABI, _ = util.PreProcess(call.ABI, do)
// Use default
call.Source = useDefault(call.Source, do.Package.Account)
call.Amount = useDefault(call.Amount, do.DefaultAmount)
call.Fee = useDefault(call.Fee, do.DefaultFee)
call.Gas = useDefault(call.Gas, do.DefaultGas)
// formulate call
var packedBytes []byte
if call.ABI == "" {
packedBytes, err = abi.ReadAbiFormulateCall(call.Destination, call.Function, callDataArray, do)
callData = hex.EncodeToString(packedBytes)
} else {
packedBytes, err = abi.ReadAbiFormulateCall(call.ABI, call.Function, callDataArray, do)
callData = hex.EncodeToString(packedBytes)
}
if err != nil {
if call.Function == "()" {
log.Warn("Calling the fallback function")
} else {
var str, err = util.ABIErrorHandler(do, err, call, nil)
return str, nil, err
}
}
log.WithFields(log.Fields{
"destination": call.Destination,
"function": call.Function,
"data": callData,
}).Info("Calling")
tx, err := do.Call(def.CallArg{
Input: call.Source,
Amount: call.Amount,
Address: call.Destination,
Fee: call.Fee,
Gas: call.Gas,
Data: callData,
Sequence: call.Sequence,
})
if err != nil {
return "", nil, err
}
// Sign, broadcast, display
txe, err := do.SignAndBroadcast(tx)
if err != nil {
var err = util.MintChainErrorHandler(do, err)
return "", nil, err
}
var result string
log.Debug(txe.Result.Return)
// Formally process the return
if txe.Result.Return != nil {
log.WithField("=>", result).Debug("Decoding Raw Result")
if call.ABI == "" {
call.Variables, err = abi.ReadAndDecodeContractReturn(call.Destination, call.Function, txe.Result.Return, do)
} else {
call.Variables, err = abi.ReadAndDecodeContractReturn(call.ABI, call.Function, txe.Result.Return, do)
}
if err != nil {
return "", nil, err
}
log.WithField("=>", call.Variables).Debug("call variables:")
result = util.GetReturnValue(call.Variables)
if result != "" {
log.WithField("=>", result).Warn("Return Value")
} else {
log.Debug("No return.")
}
} else {
log.Debug("No return from contract.")
}
if call.Save == "tx" {
log.Info("Saving tx hash instead of contract return")
result = fmt.Sprintf("%X", txe.Receipt.TxHash)
}
return result, call.Variables, nil
}
func deployFinalize(do *def.Packages, tx payload.Payload) (*crypto.Address, error) {
txe, err := do.SignAndBroadcast(tx)
if err != nil {
return nil, util.MintChainErrorHandler(do, err)
}
if err := util.ReadTxSignAndBroadcast(txe, err); err != nil {
return nil, err
}
if !txe.Receipt.CreatesContract || txe.Receipt.ContractAddress == crypto.ZeroAddress {
// Shouldn't get ZeroAddress when CreatesContract is true, but still
return nil, fmt.Errorf("result from SignAndBroadcast does not contain address for the deployed contract")
}
return &txe.Receipt.ContractAddress, nil
}
| {
log.WithField("=>", string(compilersResponse.ABI)).Debug("ABI Specification (From Compilers)")
contractCode := compilersResponse.Bytecode
// Save ABI
if _, err := os.Stat(do.ABIPath); os.IsNotExist(err) {
if err := os.Mkdir(do.ABIPath, 0775); err != nil {
return "", err
}
}
if _, err := os.Stat(do.BinPath); os.IsNotExist(err) {
if err := os.Mkdir(do.BinPath, 0775); err != nil {
return "", err
}
}
// saving contract/library abi
var abiLocation string
if compilersResponse.Objectname != "" {
abiLocation = filepath.Join(do.ABIPath, compilersResponse.Objectname)
log.WithField("=>", abiLocation).Warn("Saving ABI")
if err := ioutil.WriteFile(abiLocation, []byte(compilersResponse.ABI), 0664); err != nil {
return "", err
}
} else {
log.Debug("Objectname from compilers is blank. Not saving abi.")
}
// additional data may be sent along with the contract
// these are naively added to the end of the contract code using standard
// mint packing
if deploy.Data != nil {
_, callDataArray, err := util.PreProcessInputData(compilersResponse.Objectname, deploy.Data, do, true)
if err != nil {
return "", err
}
packedBytes, err := abi.ReadAbiFormulateCall(compilersResponse.Objectname, "", callDataArray, do)
if err != nil {
return "", err
}
callData := hex.EncodeToString(packedBytes)
contractCode = contractCode + callData
}
tx, err := deployTx(do, deploy, compilersResponse.Objectname, contractCode)
if err != nil {
return "", err
}
// Sign, broadcast, display
contractAddress, err := deployFinalize(do, tx)
if err != nil {
return "", fmt.Errorf("Error finalizing contract deploy %s: %v", deploy.Contract, err)
}
// saving contract/library abi at abi/address
if contractAddress != nil {
abiLocation := filepath.Join(do.ABIPath, contractAddress.String())
log.WithField("=>", abiLocation).Debug("Saving ABI")
if err := ioutil.WriteFile(abiLocation, []byte(compilersResponse.ABI), 0664); err != nil {
return "", err
}
// saving binary
if deploy.SaveBinary {
contractName := filepath.Join(do.BinPath, fmt.Sprintf("%s.bin", compilersResponse.Objectname))
log.WithField("=>", contractName).Warn("Saving Binary")
if err := ioutil.WriteFile(contractName, []byte(contractCode), 0664); err != nil {
return "", err
}
} else {
log.Debug("Not saving binary.")
}
return contractAddress.String(), nil
} else {
// we shouldn't reach this point because we should have an error before this.
return "", fmt.Errorf("The contract did not deploy. Unable to save abi to abi/contractAddress.")
}
} | identifier_body |
jobs_contracts.go | package jobs
import (
"encoding/hex"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/hyperledger/burrow/crypto"
"github.com/hyperledger/burrow/txs/payload"
"github.com/monax/bosmarmot/bos/abi"
compilers "github.com/monax/bosmarmot/bos/compile"
"github.com/monax/bosmarmot/bos/def"
"github.com/monax/bosmarmot/bos/util"
log "github.com/sirupsen/logrus"
)
func DeployJob(deploy *def.Deploy, do *def.Packages) (result string, err error) {
// Preprocess variables
deploy.Source, _ = util.PreProcess(deploy.Source, do)
deploy.Contract, _ = util.PreProcess(deploy.Contract, do)
deploy.Instance, _ = util.PreProcess(deploy.Instance, do)
deploy.Libraries, _ = util.PreProcessLibs(deploy.Libraries, do)
deploy.Amount, _ = util.PreProcess(deploy.Amount, do)
deploy.Sequence, _ = util.PreProcess(deploy.Sequence, do)
deploy.Fee, _ = util.PreProcess(deploy.Fee, do)
deploy.Gas, _ = util.PreProcess(deploy.Gas, do)
// trim the extension
contractName := strings.TrimSuffix(deploy.Contract, filepath.Ext(deploy.Contract))
// Use defaults
deploy.Source = useDefault(deploy.Source, do.Package.Account)
deploy.Instance = useDefault(deploy.Instance, contractName)
deploy.Amount = useDefault(deploy.Amount, do.DefaultAmount)
deploy.Fee = useDefault(deploy.Fee, do.DefaultFee)
deploy.Gas = useDefault(deploy.Gas, do.DefaultGas)
// assemble contract
var contractPath string
if _, err := os.Stat(deploy.Contract); err != nil {
if _, secErr := os.Stat(filepath.Join(do.BinPath, deploy.Contract)); secErr != nil {
if _, thirdErr := os.Stat(filepath.Join(do.BinPath, filepath.Base(deploy.Contract))); thirdErr != nil {
return "", fmt.Errorf("Could not find contract in\n* primary path: %v\n* binary path: %v\n* tertiary path: %v", deploy.Contract, filepath.Join(do.BinPath, deploy.Contract), filepath.Join(do.BinPath, filepath.Base(deploy.Contract)))
} else {
contractPath = filepath.Join(do.BinPath, filepath.Base(deploy.Contract))
}
} else {
contractPath = filepath.Join(do.BinPath, deploy.Contract)
}
} else {
contractPath = deploy.Contract
}
// compile
if filepath.Ext(deploy.Contract) == ".bin" {
log.Info("Binary file detected. Using binary deploy sequence.")
log.WithField("=>", contractPath).Info("Binary path")
binaryResponse, err := compilers.RequestBinaryLinkage(contractPath, deploy.Libraries)
if err != nil {
return "", fmt.Errorf("Something went wrong with your binary deployment: %v", err)
}
if binaryResponse.Error != "" {
return "", fmt.Errorf("Something went wrong when you were trying to link your binaries: %v", binaryResponse.Error)
}
contractCode := binaryResponse.Binary
tx, err := deployTx(do, deploy, contractName, string(contractCode))
if err != nil {
return "could not deploy binary contract", err
}
result, err := deployFinalize(do, tx)
if err != nil {
return "", fmt.Errorf("Error finalizing contract deploy from path %s: %v", contractPath, err)
}
return result.String(), err
} else {
contractPath = deploy.Contract
log.WithField("=>", contractPath).Info("Contract path")
// normal compilation/deploy sequence
resp, err := compilers.RequestCompile(contractPath, false, deploy.Libraries)
if err != nil {
log.Errorln("Error compiling contracts: Compilers error:")
return "", err
} else if resp.Error != "" {
log.Errorln("Error compiling contracts: Language error:")
return "", fmt.Errorf("%v", resp.Error)
} else if resp.Warning != "" {
log.WithField("=>", resp.Warning).Warn("Warning during contract compilation")
}
// loop through objects returned from compiler
switch {
case len(resp.Objects) == 1:
log.WithField("path", contractPath).Info("Deploying the only contract in file")
response := resp.Objects[0]
log.WithField("=>", response.ABI).Info("Abi")
log.WithField("=>", response.Bytecode).Info("Bin")
if response.Bytecode != "" {
result, err = deployContract(deploy, do, response)
if err != nil {
return "", err
}
}
case deploy.Instance == "all":
log.WithField("path", contractPath).Info("Deploying all contracts")
var baseObj string
for _, response := range resp.Objects {
if response.Bytecode == "" {
continue
}
result, err = deployContract(deploy, do, response)
if err != nil {
return "", err
}
if strings.ToLower(response.Objectname) == strings.ToLower(strings.TrimSuffix(filepath.Base(deploy.Contract), filepath.Ext(filepath.Base(deploy.Contract)))) {
baseObj = result
}
}
if baseObj != "" {
result = baseObj
}
default:
log.WithField("contract", deploy.Instance).Info("Deploying a single contract")
for _, response := range resp.Objects {
if response.Bytecode == "" {
continue
}
if matchInstanceName(response.Objectname, deploy.Instance) {
result, err = deployContract(deploy, do, response)
if err != nil |
}
}
}
}
return result, nil
}
func matchInstanceName(objectName, deployInstance string) bool {
if objectName == "" {
return false
}
// Ignore the filename component that newer versions of Solidity include in object name
objectNameParts := strings.Split(objectName, ":")
return strings.ToLower(objectNameParts[len(objectNameParts)-1]) == strings.ToLower(deployInstance)
}
// TODO [rj] refactor to remove [contractPath] from functions signature => only used in a single error throw.
func deployContract(deploy *def.Deploy, do *def.Packages, compilersResponse compilers.ResponseItem) (string, error) {
log.WithField("=>", string(compilersResponse.ABI)).Debug("ABI Specification (From Compilers)")
contractCode := compilersResponse.Bytecode
// Save ABI
if _, err := os.Stat(do.ABIPath); os.IsNotExist(err) {
if err := os.Mkdir(do.ABIPath, 0775); err != nil {
return "", err
}
}
if _, err := os.Stat(do.BinPath); os.IsNotExist(err) {
if err := os.Mkdir(do.BinPath, 0775); err != nil {
return "", err
}
}
// saving contract/library abi
var abiLocation string
if compilersResponse.Objectname != "" {
abiLocation = filepath.Join(do.ABIPath, compilersResponse.Objectname)
log.WithField("=>", abiLocation).Warn("Saving ABI")
if err := ioutil.WriteFile(abiLocation, []byte(compilersResponse.ABI), 0664); err != nil {
return "", err
}
} else {
log.Debug("Objectname from compilers is blank. Not saving abi.")
}
// additional data may be sent along with the contract
// these are naively added to the end of the contract code using standard
// mint packing
if deploy.Data != nil {
_, callDataArray, err := util.PreProcessInputData(compilersResponse.Objectname, deploy.Data, do, true)
if err != nil {
return "", err
}
packedBytes, err := abi.ReadAbiFormulateCall(compilersResponse.Objectname, "", callDataArray, do)
if err != nil {
return "", err
}
callData := hex.EncodeToString(packedBytes)
contractCode = contractCode + callData
}
tx, err := deployTx(do, deploy, compilersResponse.Objectname, contractCode)
if err != nil {
return "", err
}
// Sign, broadcast, display
contractAddress, err := deployFinalize(do, tx)
if err != nil {
return "", fmt.Errorf("Error finalizing contract deploy %s: %v", deploy.Contract, err)
}
// saving contract/library abi at abi/address
if contractAddress != nil {
abiLocation := filepath.Join(do.ABIPath, contractAddress.String())
log.WithField("=>", abiLocation).Debug("Saving ABI")
if err := ioutil.WriteFile(abiLocation, []byte(compilersResponse.ABI), 0664); err != nil {
return "", err
}
// saving binary
if deploy.SaveBinary {
contractName := filepath.Join(do.BinPath, fmt.Sprintf("%s.bin", compilersResponse.Objectname))
log.WithField("=>", contractName).Warn("Saving Binary")
if err := ioutil.WriteFile(contractName, []byte(contractCode), 0664); err != nil {
return "", err
}
} else {
log.Debug("Not saving binary.")
}
return contractAddress.String(), nil
} else {
// we shouldn't reach this point because we should have an error before this.
return "", fmt.Errorf("The contract did not deploy. Unable to save abi to abi/contractAddress.")
}
}
func deployTx(do *def.Packages, deploy *def.Deploy, contractName, contractCode string) (*payload.CallTx, error) {
// Deploy contract
log.WithFields(log.Fields{
"name": contractName,
}).Warn("Deploying Contract")
log.WithFields(log.Fields{
"source": deploy.Source,
"code": contractCode,
"chain-url": do.ChainURL,
}).Info()
return do.Call(def.CallArg{
Input: deploy.Source,
Amount: deploy.Amount,
Fee: deploy.Fee,
Gas: deploy.Gas,
Data: contractCode,
Sequence: deploy.Sequence,
})
}
func CallJob(call *def.Call, do *def.Packages) (string, []*def.Variable, error) {
var err error
var callData string
var callDataArray []string
// Preprocess variables
call.Source, _ = util.PreProcess(call.Source, do)
call.Destination, _ = util.PreProcess(call.Destination, do)
//todo: find a way to call the fallback function here
call.Function, callDataArray, err = util.PreProcessInputData(call.Function, call.Data, do, false)
if err != nil {
return "", nil, err
}
call.Function, _ = util.PreProcess(call.Function, do)
call.Amount, _ = util.PreProcess(call.Amount, do)
call.Sequence, _ = util.PreProcess(call.Sequence, do)
call.Fee, _ = util.PreProcess(call.Fee, do)
call.Gas, _ = util.PreProcess(call.Gas, do)
call.ABI, _ = util.PreProcess(call.ABI, do)
// Use default
call.Source = useDefault(call.Source, do.Package.Account)
call.Amount = useDefault(call.Amount, do.DefaultAmount)
call.Fee = useDefault(call.Fee, do.DefaultFee)
call.Gas = useDefault(call.Gas, do.DefaultGas)
// formulate call
var packedBytes []byte
if call.ABI == "" {
packedBytes, err = abi.ReadAbiFormulateCall(call.Destination, call.Function, callDataArray, do)
callData = hex.EncodeToString(packedBytes)
} else {
packedBytes, err = abi.ReadAbiFormulateCall(call.ABI, call.Function, callDataArray, do)
callData = hex.EncodeToString(packedBytes)
}
if err != nil {
if call.Function == "()" {
log.Warn("Calling the fallback function")
} else {
var str, err = util.ABIErrorHandler(do, err, call, nil)
return str, nil, err
}
}
log.WithFields(log.Fields{
"destination": call.Destination,
"function": call.Function,
"data": callData,
}).Info("Calling")
tx, err := do.Call(def.CallArg{
Input: call.Source,
Amount: call.Amount,
Address: call.Destination,
Fee: call.Fee,
Gas: call.Gas,
Data: callData,
Sequence: call.Sequence,
})
if err != nil {
return "", nil, err
}
// Sign, broadcast, display
txe, err := do.SignAndBroadcast(tx)
if err != nil {
var err = util.MintChainErrorHandler(do, err)
return "", nil, err
}
var result string
log.Debug(txe.Result.Return)
// Formally process the return
if txe.Result.Return != nil {
log.WithField("=>", result).Debug("Decoding Raw Result")
if call.ABI == "" {
call.Variables, err = abi.ReadAndDecodeContractReturn(call.Destination, call.Function, txe.Result.Return, do)
} else {
call.Variables, err = abi.ReadAndDecodeContractReturn(call.ABI, call.Function, txe.Result.Return, do)
}
if err != nil {
return "", nil, err
}
log.WithField("=>", call.Variables).Debug("call variables:")
result = util.GetReturnValue(call.Variables)
if result != "" {
log.WithField("=>", result).Warn("Return Value")
} else {
log.Debug("No return.")
}
} else {
log.Debug("No return from contract.")
}
if call.Save == "tx" {
log.Info("Saving tx hash instead of contract return")
result = fmt.Sprintf("%X", txe.Receipt.TxHash)
}
return result, call.Variables, nil
}
func deployFinalize(do *def.Packages, tx payload.Payload) (*crypto.Address, error) {
txe, err := do.SignAndBroadcast(tx)
if err != nil {
return nil, util.MintChainErrorHandler(do, err)
}
if err := util.ReadTxSignAndBroadcast(txe, err); err != nil {
return nil, err
}
if !txe.Receipt.CreatesContract || txe.Receipt.ContractAddress == crypto.ZeroAddress {
// Shouldn't get ZeroAddress when CreatesContract is true, but still
return nil, fmt.Errorf("result from SignAndBroadcast does not contain address for the deployed contract")
}
return &txe.Receipt.ContractAddress, nil
}
| {
return "", err
} | conditional_block |
client.d.ts | // Project: https://github.com/takayama-lily/oicq
/// <reference types="node" />
import * as events from 'events';
import * as log4js from 'log4js';
export type Uin = string | number;
// 大多数情况下你无需关心这些配置项,因为默认配置就是最常用的,除非你需要一些与默认不同的规则
export interface ConfBot {
log_level?: "trace" | "debug" | "info" | "warn" | "error" | "fatal" | "off", //默认info
platform?: number, //1:安卓手机 2:aPad(默认) 3:安卓手表 4:Mac(实验性)
kickoff?: boolean, //被挤下线是否在3秒后反挤对方,默认false
ignore_self?: boolean,//群聊是否无视自己的发言,默认true
resend?: boolean, //被风控时是否尝试用分片发送,默认true (一种古老的消息,暂不支持分片重组)
data_dir?: string, //数据存储文件夹,需要可写权限,默认主目录下的data文件夹
//触发system.offline.network事件后的重连间隔秒数,默认5(秒),不建议设置低于3(秒)
//瞬间的断线重连不会触发此事件,通常你的机器真的没有网络而导致断线时才会触发
//设置为0则不会自动重连,然后你可以监听此事件自己处理
reconn_interval?: number,
//手动指定ip和port
//默认使用msfwifi.3g.qq.com:8080进行连接,若要修改建议优先更改该域名hosts指向而不是手动指定ip
//@link https://site.ip138.com/msfwifi.3g.qq.com/ 端口通常以下四个都会开放:80,443,8080,14000
remote_ip?: string,
remote_port?: number,
}
export interface Statistics {
readonly start_time: number,
readonly lost_times: number,
readonly recv_pkt_cnt: number,
readonly sent_pkt_cnt: number,
readonly lost_pkt_cnt: number, //超时未响应的包
readonly recv_msg_cnt: number,
readonly sent_msg_cnt: number,
}
export interface Status {
online: boolean,
status: number,
remote_ip?: number,
remote_port?: number,
msg_cnt_per_min: number,
statistics: Statistics,
config: ConfBot,
}
export type LoginInfo = StrangerInfo & VipInfo;
//////////
export interface RetError {
code?: number,
message?: string,
}
export interface RetCommon {
retcode: number, //0ok 1async 100error 102failed 103timeout 104offline
status: string, //"ok", "async", "failed"
data: object | null,
error?: RetError | null,
}
//////////
export interface VipInfo {
readonly user_id?: number,
readonly nickname?: string,
readonly level?: number,
readonly level_speed?: number,
readonly vip_level?: number,
readonly vip_growth_speed?: number,
readonly vip_growth_total?: string,
}
export interface StrangerInfo {
readonly user_id?: number,
readonly nickname?: string,
readonly sex?: string,
readonly age?: number,
readonly area?: string,
readonly signature?: string,
readonly description?: string,
readonly group_id?: number,
}
export interface FriendInfo extends StrangerInfo {
readonly remark?: string
}
export interface GroupInfo {
readonly group_id?: number,
readonly group_name?: string,
readonly member_count?: number,
readonly max_member_count?: number,
readonly owner_id?: number,
readonly last_join_time?: number,
readonly last_sent_time?: number,
readonly shutup_time_whole?: number, //全员禁言到期时间
readonly shutup_time_me?: number, //我的禁言到期时间
readonly create_time?: number,
readonly grade?: number,
readonly max_admin_count?: number,
readonly active_member_count?: number,
readonly update_time?: number, //当前群资料的最后更新时间
}
export interface MemberInfo {
readonly group_id?: number,
readonly user_id?: number,
readonly nickname?: string,
readonly card?: string,
readonly sex?: string,
readonly age?: number,
readonly area?: string,
readonly join_time?: number,
readonly last_sent_time?: number,
readonly level?: number,
readonly rank?: string,
readonly role?: string,
readonly unfriendly?: boolean,
readonly title?: string,
readonly title_expire_time?: number,
readonly card_changeable?: boolean,
readonly shutup_time?: number, //禁言到期时间
readonly update_time?: number, //此群员资料的最后更新时间
}
export interface MessageId {
message_id: string
}
//////////
export interface RetStrangerList extends RetCommon {
data: ReadonlyMap<number, StrangerInfo>
}
export interface RetFriendList extends RetCommon {
data: ReadonlyMap<number, FriendInfo>
}
export interface RetGroupList extends RetCommon {
data: ReadonlyMap<number, GroupInfo>
}
export interface RetMemberList extends RetCommon {
data: ReadonlyMap<number, MemberInfo> | null
}
export interface RetStrangerInfo extends RetCommon {
data: StrangerInfo | null
}
export interface RetGroupInfo extends RetCommon {
data: GroupInfo | null
}
export interface RetMemberInfo extends RetCommon {
data: MemberInfo | null
}
export interface RetSendMsg extends RetCommon {
data: MessageId | null
}
export interface RetStatus extends RetCommon {
data: Status
}
export interface RetLoginInfo extends RetCommon {
data: LoginInfo
}
//////////
/**
* @see https://github.com/howmanybots/onebot/blob/master/v11/specs/message/segment.md
*/
export interface MessageElem {
type: string,
data?: object,
}
export interface Anonymous {
id: number,
name: string,
flag: string,
}
export interface EventData {
self_id: number,
time: number,
post_type: string,
system_type?: string,
request_type?: string,
message_type?: string,
notice_type?: string,
sub_type?: string,
image?: Buffer,
url?: string,
message?: MessageElem | string,
raw_message?: string,
message_id?: string,
user_id?: number,
nickname?: string,
group_id?: number,
group_name?: string,
discuss_id?: number,
discuss_name?: string,
font?: string,
anonymous?: Anonymous | null,
sender?: FriendInfo & MemberInfo,
member?: MemberInfo,
auto_reply?: boolean,
flag?: string,
comment?: string,
source?: string,
role?: string,
inviter_id?: number,
operator_id?: number,
duration?: number,
set?: boolean,
dismiss?: boolean,
signature?: string,
title?: string,
content?: string,
action?: string,
suffix?: string,
enable_guest?: boolean,
enable_anonymous?: boolean,
enable_upload_album?: boolean,
enable_upload_file?: boolean,
enable_temp_chat?: boolean,
enable_new_group?: boolean,
enable_show_honor?: boolean,
enable_show_level?: boolean,
enable_show_title?: boolean,
enable_confess?: boolean,
}
//////////
export class Client extends events.EventEmitter {
private constructor();
readonly uin: number;
readonly password_md5: Buffer;
readonly nickname: string;
readonly sex: string;
readonly age: number;
readonly online_status: number;
readonly fl: ReadonlyMap<number, FriendInfo>;
readonly sl: ReadonlyMap<number, StrangerInfo>;
readonly gl: ReadonlyMap<number, GroupInfo>;
readonly gml: ReadonlyMap<number, ReadonlyMap<number, MemberInfo>>;
readonly logger: log4js.Logger;
readonly dir: string;
readonly config: ConfBot;
readonly stat: Statistics;
login(password?: Buffer | string): void; //密码支持明文和md5
captchaLogin(captcha: string): void;
terminate(): void; //直接关闭连接
logout(): Promise<void>; //先下线再关闭连接
isOnline(): boolean;
setOnlineStatus(status: number): Promise<RetCommon>; //11我在线上 31离开 41隐身 50忙碌 60Q我吧 70请勿打扰
getFriendList(): RetFriendList;
getStrangerList(): RetStrangerList;
getGroupList(): RetGroupList;
getGroupMemberList(group_id: Uin, no_cache?: boolean): Promise<RetMemberList>;
getStrangerInfo(user_id: Uin, no_cache?: boolean): Promise<RetStrangerInfo>;
getGroupInfo(group_id: Uin, no_cache?: boolean): Promise<RetGroupInfo>;
getGroupMemberInfo(group_id: Uin, user_id: Uin, no_cache?: boolean): Promise<RetMemberInfo>;
sendPrivateMsg(user_id: Uin, message: MessageElem[] | string, auto_escape?: boolean): Promise<RetSendMsg>;
sendGroupMsg(group_id: Uin, message: MessageElem[] | string, auto_escape?: boolean): Promise<RetSendMsg>;
sendDiscussMsg(discuss_id: Uin, message: MessageElem[] | string, auto_escape?: boolean): Promise<RetCommon>;
deleteMsg(message_id: string): Promise<RetCommon>;
getMsg(message_id: string): Promise<RetCommon>;
sendGroupNotice(group_id: Uin, content: string): Promise<RetCommon>;
setGroupName(group_id: Uin, group_name: string): Promise<RetCommon>;
setGroupAnonymous(group_id: Uin, enable?: boolean): Promise<RetCommon>;
setGroupWholeBan(group_id: Uin, enable?: boolean): Promise<RetCommon>;
setGroupAdmin(group_id: Uin, user_id: Uin, enable?: boolean): Promise<RetCommon>;
setGroupSpecialTitle(group_id: Uin, user_id: Uin, special_title?: string, duration?: number): Promise<RetCommon>;
setGroupCard(group_id: Uin, user_id: Uin, card?: string): Promise<RetCommon>;
setGroupKick(group_id: Uin, user_id: Uin, reject_add_request?: boolean): Promise<RetCommon>;
setGroupBan(group_id: Uin, user_id: Uin, duration?: number): Promise<RetCommon>;
setGroupLeave(group_id: Uin, is_dismiss?: boolean): Promise<RetCommon>;
sendGroupPoke(group_id: Uin, user_id: Uin): Promise<RetCommon>; //group_id是好友时可以私聊戳一戳
setFriendAddRequest(flag: string, approve?: boolean, remark?: string, block?: boolean): Promise<RetCommon>;
setGroupAddRequest(flag: string, approve?: boolean, reason?: string, block?: boolean): Promise<RetCommon>;
addGroup(group_id: Uin, comment?: string): Promise<RetCommon>;
addFriend(group_id: Uin, user_id: Uin, comment?: string): Promise<RetCommon>;
deleteFriend(user_id: Uin, block?: boolean): Promise<RetCommon>;
inviteFriend(group_id: Uin, user_id: Uin): Promise<RetCommon>;
sendLike(user_id: Uin, times?: number): Promise<RetCommon>;
setNickname(nickname: string): Promise<RetCommon>;
setGender(gender: 0 | 1 | 2): Promise<RetCommon>; //0未知 1男 2女
setBirthday(birthday: string | number): Promise<RetCommon>; //20110202的形式
setDescription(description?: string): Promise<RetCommon>;
setSignature(signature?: string): Promise<RetCommon>;
setPortrait(file: Buffer | string): Promise<RetCommon>; //图片CQ码中file相同格式
setGroupPortrait(group_id: Uin, file: Buffer | string): Promise<RetCommon>; | cleanCache(type?: string): Promise<RetCommon>; //type: "image" or "record" or undefined
canSendImage(): RetCommon;
canSendRecord(): RetCommon;
getVersionInfo(): RetCommon; //暂时为返回package.json中的信息
getStatus(): RetStatus;
getLoginInfo(): RetLoginInfo;
once(event: "system" | "request" | "message" | "notice", listener: (data: EventData) => void): this;
on(event: "system" | "request" | "message" | "notice", listener: (data: EventData) => void): this;
off(event: "system" | "request" | "message" | "notice", listener: (data: EventData) => void): this;
once(event: string, listener: (data: EventData) => void): this;
on(event: string, listener: (data: EventData) => void): this;
off(event: string, listener: (data: EventData) => void): this;
//重载完成之前bot不接受其他任何请求,也不会上报任何事件
reloadFriendList(): Promise<RetCommon>;
reloadGroupList(): Promise<RetCommon>;
}
export function createClient(uin: Uin, config?: ConfBot): Client; |
getCookies(domain?: string): Promise<RetCommon>;
getCsrfToken(): Promise<RetCommon>; | random_line_split |
client.d.ts | // Project: https://github.com/takayama-lily/oicq
/// <reference types="node" />
import * as events from 'events';
import * as log4js from 'log4js';
export type Uin = string | number;
// 大多数情况下你无需关心这些配置项,因为默认配置就是最常用的,除非你需要一些与默认不同的规则
export interface ConfBot {
log_level?: "trace" | "debug" | "info" | "warn" | "error" | "fatal" | "off", //默认info
platform?: number, //1:安卓手机 2:aPad(默认) 3:安卓手表 4:Mac(实验性)
kickoff?: boolean, //被挤下线是否在3秒后反挤对方,默认false
ignore_self?: boolean,//群聊是否无视自己的发言,默认true
resend?: boolean, //被风控时是否尝试用分片发送,默认true (一种古老的消息,暂不支持分片重组)
data_dir?: string, //数据存储文件夹,需要可写权限,默认主目录下的data文件夹
//触发system.offline.network事件后的重连间隔秒数,默认5(秒),不建议设置低于3(秒)
//瞬间的断线重连不会触发此事件,通常你的机器真的没有网络而导致断线时才会触发
//设置为0则不会自动重连,然后你可以监听此事件自己处理
reconn_interval?: number,
//手动指定ip和port
//默认使用msfwifi.3g.qq.com:8080进行连接,若要修改建议优先更改该域名hosts指向而不是手动指定ip
//@link https://site.ip138.com/msfwifi.3g.qq.com/ 端口通常以下四个都会开放:80,443,8080,14000
remote_ip?: string,
remote_port?: number,
}
export interface Statistics {
readonly start_time: number,
readonly lost_times: number,
readonly recv_pkt_cnt: number,
readonly sent_pkt_cnt: number,
readonly lost_pkt_cnt: number, //超时未响应的包
readonly recv_msg_cnt: number,
readonly sent_msg_cnt: number,
}
export interface Status {
online: boolean,
status: number,
remote_ip?: number,
remote_port?: number,
msg_cnt_per_min: number,
statistics: Statistics,
config: ConfBot,
}
export type LoginInfo = StrangerInfo & VipInfo;
//////////
export interface RetError {
code?: number,
message?: string,
}
export interface RetCommon {
retcode: number, //0ok 1async 100error 102failed 103timeout 104offline
status: string, //"ok", "async", "failed"
data: object | null,
error?: RetError | null,
}
//////////
export interface VipInfo {
readonly user_id?: number,
readonly nickname?: string,
readonly level?: number,
readonly level_speed?: number,
readonly vip_level?: number,
readonly vip_growth_speed?: number,
readonly vip_growth_total?: string,
}
export interface StrangerInfo {
readonly user_id?: number,
readonly nickname?: string,
readonly sex?: string,
readonly age?: number,
readonly area?: string,
readonly signature?: string,
readonly description?: string,
readonly group_id?: number,
}
export interface FriendInfo extends StrangerInfo {
readonly remark?: string
}
export interface GroupInfo {
readonly group_id?: number,
readonly group_name?: string,
readonly member_count?: number,
readonly max_member_count?: number,
readonly owner_id?: number,
readonly last_join_time?: number,
readonly last_sent_time?: number,
readonly shutup_time_whole?: number, //全员禁言到期时间
readonly shutup_time_me?: number, //我的禁言到期时间
readonly create_time?: number,
readonly grade?: number,
readonly max_admin_count?: number,
readonly active_member_count?: number,
readonly update_time?: number, //当前群资料的最后更新时间
}
export interface MemberInfo {
readonly group_id?: number,
readonly user_id?: number,
readonly nickname?: string,
readonly card?: string,
readonly sex?: string,
readonly age?: number,
readonly area?: string,
readonly join_time?: number,
readonly last_sent_time?: number,
readonly level?: number,
readonly rank?: string,
readonly role?: string,
readonly unfriendly?: boolean,
readonly title?: string,
readonly title_expire_time?: number,
readonly card_changeable?: boolean,
readonly shutup_time?: number, //禁言到期时间
readonly update_time?: number, //此群员资料的最后更新时间
}
export interface MessageId {
message_id: string
}
//////////
export interface RetStrangerList extends RetCommon {
data: ReadonlyMap<number, StrangerInfo>
}
export interface RetFriendList extends RetCommon {
data: ReadonlyMap<number, FriendInfo>
}
export interface RetGroupList extends RetCommon {
data: ReadonlyMap<number, GroupInfo>
}
export interface RetMemberList extends RetCommon {
data: ReadonlyMap<number, MemberInfo> | null
}
export interface RetStrangerInfo extends RetCommon {
data: StrangerInfo | null
}
export interface RetGroupInfo extends RetCommon {
data: GroupInfo | null
}
export interface RetMemberInfo extends RetCommon {
data: MemberInfo | null
}
export interface RetSendMsg extends RetCommon {
data: MessageId | null
}
export interface RetStatus extends RetCommon {
data: Status
}
export interface RetLoginInfo extends RetCommon {
data: LoginInfo
}
//////////
/**
* @see https://github.com/howmanybots/onebot/blob/master/v11/specs/message/segment.md
*/
export interface MessageElem {
type: string,
data?: object,
}
export interface Anonymous {
id: number,
name: string,
flag: string,
}
export interface EventData {
self_id: number,
time: number,
post_type: string,
system_type?: string,
request_type?: string,
message_type?: string,
notice_type?: string,
sub_type?: string,
image?: Buffer,
url?: string,
message?: MessageElem | string,
raw_message?: string,
message_id?: string,
user_id?: number,
nickname?: string,
group_id?: number,
group_name?: string,
discuss_id?: number,
discuss_name?: string,
font?: string,
anonymous?: Anonymous | null,
sender?: FriendInfo & MemberInfo,
member?: MemberInfo,
auto_reply?: boolean,
flag?: string,
comment?: string,
source?: string,
role?: string,
inviter_id?: number,
operator_id?: number,
duration?: number,
set?: boolean,
dismiss?: boolean,
signature?: string,
title?: string,
content?: string,
action?: string,
suffix?: string,
enable_guest?: boolean,
enable_anonymous?: boolean,
enable_upload_album?: boolean,
enable_upload_file?: boolean,
enable_temp_chat?: boolean,
enable_new_group?: boolean,
enable_show_honor?: boolean,
enable_show_level?: boolean,
enable_show_title?: boolean,
enable_confess?: boolean,
}
//////////
export class Client extends events.EventEmitter {
private constructor();
readonly uin: number;
readonly password_md5: Buffer;
readonly nickname: string;
readonly sex: string;
readonly age: number;
readonly online_status: number;
readonly fl: ReadonlyMap<number, FriendInfo>;
readonly sl: ReadonlyMap<number, StrangerInfo>;
readonly gl: ReadonlyMap<number, GroupInfo>;
readonly gml: ReadonlyMap<number, ReadonlyMap<number, MemberInfo>>;
readonly logger: log4js.Logger;
readonly dir: string;
readonly config: ConfBot;
readonly stat: Statistics;
login(password?: Buffer | string): void; //密码支持明文和md5
captchaLogin(capt | tring): void;
terminate(): void; //直接关闭连接
logout(): Promise<void>; //先下线再关闭连接
isOnline(): boolean;
setOnlineStatus(status: number): Promise<RetCommon>; //11我在线上 31离开 41隐身 50忙碌 60Q我吧 70请勿打扰
getFriendList(): RetFriendList;
getStrangerList(): RetStrangerList;
getGroupList(): RetGroupList;
getGroupMemberList(group_id: Uin, no_cache?: boolean): Promise<RetMemberList>;
getStrangerInfo(user_id: Uin, no_cache?: boolean): Promise<RetStrangerInfo>;
getGroupInfo(group_id: Uin, no_cache?: boolean): Promise<RetGroupInfo>;
getGroupMemberInfo(group_id: Uin, user_id: Uin, no_cache?: boolean): Promise<RetMemberInfo>;
sendPrivateMsg(user_id: Uin, message: MessageElem[] | string, auto_escape?: boolean): Promise<RetSendMsg>;
sendGroupMsg(group_id: Uin, message: MessageElem[] | string, auto_escape?: boolean): Promise<RetSendMsg>;
sendDiscussMsg(discuss_id: Uin, message: MessageElem[] | string, auto_escape?: boolean): Promise<RetCommon>;
deleteMsg(message_id: string): Promise<RetCommon>;
getMsg(message_id: string): Promise<RetCommon>;
sendGroupNotice(group_id: Uin, content: string): Promise<RetCommon>;
setGroupName(group_id: Uin, group_name: string): Promise<RetCommon>;
setGroupAnonymous(group_id: Uin, enable?: boolean): Promise<RetCommon>;
setGroupWholeBan(group_id: Uin, enable?: boolean): Promise<RetCommon>;
setGroupAdmin(group_id: Uin, user_id: Uin, enable?: boolean): Promise<RetCommon>;
setGroupSpecialTitle(group_id: Uin, user_id: Uin, special_title?: string, duration?: number): Promise<RetCommon>;
setGroupCard(group_id: Uin, user_id: Uin, card?: string): Promise<RetCommon>;
setGroupKick(group_id: Uin, user_id: Uin, reject_add_request?: boolean): Promise<RetCommon>;
setGroupBan(group_id: Uin, user_id: Uin, duration?: number): Promise<RetCommon>;
setGroupLeave(group_id: Uin, is_dismiss?: boolean): Promise<RetCommon>;
sendGroupPoke(group_id: Uin, user_id: Uin): Promise<RetCommon>; //group_id是好友时可以私聊戳一戳
setFriendAddRequest(flag: string, approve?: boolean, remark?: string, block?: boolean): Promise<RetCommon>;
setGroupAddRequest(flag: string, approve?: boolean, reason?: string, block?: boolean): Promise<RetCommon>;
addGroup(group_id: Uin, comment?: string): Promise<RetCommon>;
addFriend(group_id: Uin, user_id: Uin, comment?: string): Promise<RetCommon>;
deleteFriend(user_id: Uin, block?: boolean): Promise<RetCommon>;
inviteFriend(group_id: Uin, user_id: Uin): Promise<RetCommon>;
sendLike(user_id: Uin, times?: number): Promise<RetCommon>;
setNickname(nickname: string): Promise<RetCommon>;
setGender(gender: 0 | 1 | 2): Promise<RetCommon>; //0未知 1男 2女
setBirthday(birthday: string | number): Promise<RetCommon>; //20110202的形式
setDescription(description?: string): Promise<RetCommon>;
setSignature(signature?: string): Promise<RetCommon>;
setPortrait(file: Buffer | string): Promise<RetCommon>; //图片CQ码中file相同格式
setGroupPortrait(group_id: Uin, file: Buffer | string): Promise<RetCommon>;
getCookies(domain?: string): Promise<RetCommon>;
getCsrfToken(): Promise<RetCommon>;
cleanCache(type?: string): Promise<RetCommon>; //type: "image" or "record" or undefined
canSendImage(): RetCommon;
canSendRecord(): RetCommon;
getVersionInfo(): RetCommon; //暂时为返回package.json中的信息
getStatus(): RetStatus;
getLoginInfo(): RetLoginInfo;
once(event: "system" | "request" | "message" | "notice", listener: (data: EventData) => void): this;
on(event: "system" | "request" | "message" | "notice", listener: (data: EventData) => void): this;
off(event: "system" | "request" | "message" | "notice", listener: (data: EventData) => void): this;
once(event: string, listener: (data: EventData) => void): this;
on(event: string, listener: (data: EventData) => void): this;
off(event: string, listener: (data: EventData) => void): this;
//重载完成之前bot不接受其他任何请求,也不会上报任何事件
reloadFriendList(): Promise<RetCommon>;
reloadGroupList(): Promise<RetCommon>;
}
export function createClient(uin: Uin, config?: ConfBot): Client;
| cha: s | identifier_name |
utils.py | '''
Copyright (C) 2019-2021, Mo Zhou <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
# pylint: disable=no-member
import torch as th
import json
import fcntl
import contextlib
import os
import re
import numpy as np
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters.terminal import TerminalFormatter
from sklearn.metrics.cluster import normalized_mutual_info_score as __nmi
#
try:
import faiss
faiss.omp_set_num_threads(4)
except ImportError:
from sklearn.cluster import KMeans
IMmean = th.tensor([0.485, 0.456, 0.406]) # pylint: disable=not-callable
IMstd = th.tensor([0.229, 0.224, 0.225]) # pylint: disable=not-callable
IMmean_ibn = th.tensor([0.502, 0.4588, 0.4078])
IMstd_ibn = th.tensor([0.0039, 0.0039, 0.0039])
def renorm(im): return im.sub(IMmean[:, None, None].to(
im.device)).div(IMstd[:, None, None].to(im.device))
def renorm_ibn(im): return im.sub(IMmean_ibn[:, None, None].to(
im.device)).div(IMstd_ibn[:, None, None].to(im.device))[:, range(3)[::-1], :, :]
def denorm(im): return im.mul(IMstd[:, None, None].to(
im.device)).add(IMmean[:, None, None].to(im.device))
def denorm_ibn(im): return im[:, range(3)[::-1], :, :].mul(IMmean_ibn[:, None, None].to(
im.device)).add(IMstd_ibn[:, None, None].to(im.device))
def xdnorm(im): return im.div(IMstd[:, None, None].to(
im.device)).add(IMmean[:, None, None].to(im.device))
def chw2hwc(im): return im.transpose((0, 2, 3, 1)) if len(
im.shape) == 4 else im.transpose((1, 2, 0))
def metric_get_nmi(valvecs: th.Tensor, vallabs: th.Tensor, ncls: int) -> float:
'''
wrapper with a CUDA-OOM (out of memory) guard
'''
try:
nmi = __metric_get_nmi(valvecs, vallabs, ncls)
except RuntimeError as e:
print('! FAISS(GPU) Triggered CUDA OOM. Falling back to CPU clustering...')
os.putenv('FAISS_CPU', '1')
nmi = __metric_get_nmi(valvecs, vallabs, ncls, use_cuda=False)
return nmi
def __metric_get_nmi(valvecs: th.Tensor, vallabs: th.Tensor,
ncls: int, use_cuda: bool = True) -> float:
'''
Compute the NMI score
'''
use_cuda: bool = use_cuda and th.cuda.is_available() \
and hasattr(faiss, 'StandardGpuResources')
if int(os.getenv('FAISS_CPU', 0)) > 0:
use_cuda = False
npvecs = valvecs.detach().cpu().numpy().astype(np.float32)
nplabs = vallabs.detach().cpu().view(-1).numpy().astype(np.float32)
# a weird dispatcher but it works.
if 'faiss' in globals():
if use_cuda:
gpu_resource = faiss.StandardGpuResources()
cluster_idx = faiss.IndexFlatL2(npvecs.shape[1])
if not th.distributed.is_initialized():
cluster_idx = faiss.index_cpu_to_gpu(
gpu_resource, 0, cluster_idx)
else:
cluster_idx = faiss.index_cpu_to_gpu(gpu_resource,
th.distributed.get_rank(), cluster_idx)
kmeans = faiss.Clustering(npvecs.shape[1], ncls)
kmeans.verbose = False
kmeans.train(npvecs, cluster_idx)
_, pred = cluster_idx.search(npvecs, 1)
pred = pred.flatten()
else:
kmeans = faiss.Kmeans(
npvecs.shape[1], ncls, seed=123, verbose=False)
kmeans.train(npvecs)
_, pred = kmeans.index.search(npvecs, 1)
pred = pred.flatten()
nmi = __nmi(nplabs, pred)
elif 'KMeans' in globals():
kmeans = KMeans(n_clusters=ncls, random_state=0).fit(npvecs)
nmi = __nmi(nplabs, kmeans.labels_)
else:
raise NotImplementedError(
'please provide at leaste one kmeans implementation for the NMI metric.')
return nmi
def metric_get_rank(dist: th.Tensor, label: th.Tensor,
vallabels: th.Tensor, ks: list) -> tuple:
'''
Flexibly get the rank of the topmost item in the same class
dist = [dist(anchor,x) for x in validation_set]
dist (1 x len(vallabels)): pairwise distance vector between a single query
to the validation set.
label: int label for the query
vallabels: label array for the validation set
'''
assert(dist.nelement() == vallabels.nelement())
# [important] argsort(...)[:,1] for skipping the diagonal (R@1=1.0)
# we skip the smallest value as it's exactly for the anchor itself
argsort = dist.argsort(descending=False)[1:]
rank = th.where(vallabels[argsort] == label)[0].min().item()
return (rank,) + tuple(rank < k for k in ks)
def test_metric_get_rank():
N = 32
dist = th.arange(N) / N
label = 1
labels = th.zeros(N)
labels[[0,1]] = 1
recall = metric_get_rank(dist, label, labels, [1,2])
assert(recall == (0, True, True))
labels = th.zeros(N)
labels[[0, 2]] = 1
recall = metric_get_rank(dist, label, labels, [1,2])
assert(recall == (1, False, True))
def metric_get_ap(dist: th.Tensor, label: th.Tensor,
vallabels: th.Tensor) -> float:
'''
Get the overall average precision
dist (1 x len(vallabels)): pairwise distance vector between a single query
to the validation set.
label: int label for the query
vallabels: label array for the validation set
'''
assert(dist.nelement() == vallabels.nelement())
# we skip the smallest value as it's exectly for the anchor itself
argsort = dist.argsort(descending=False)[1:]
argwhere1 = th.where(vallabels[argsort] == label)[0] + 1
ap = ((th.arange(len(argwhere1)).float() + 1).to(argwhere1.device) /
argwhere1).sum().item() / len(argwhere1)
return ap
def metric_get_ap_r(dist: th.Tensor, label: th.Tensor,
vallabels: th.Tensor, rs: list) -> float:
'''
computes the mAP@R metric following
"A metric learning reality check", eccv 2020
dist (1 x len(vallabels)): pairwise distance vector between a single query
to the validation set.
label: int label for the query
vallabels: label array for the validation set
'''
assert(dist.nelement() == vallabels.nelement())
# we skip the smallest value as it's exactly for the anchor itself
argsort = dist.argsort(descending=False)[1:].cpu()
mask = (vallabels[argsort] == label).cpu()
cmask = mask.cumsum(dim=0)
mapr = []
for r in rs:
tmp = (cmask[:r] / (th.arange(r) + 1))[mask[:r]].sum() / r
mapr.append(tmp.item())
return tuple(mapr)
def test_metric_get_ap_r():
def et1e_4(a, b):
assert(abs(a - b) < 1e-4)
N = 101
dist = th.arange(N) / N
label = 1
#
labels = th.zeros(N)
labels[[0,1]] = 1
mapr = metric_get_ap_r(dist, label, labels, [10])
et1e_4(mapr[0], 0.1)
#
labels = th.zeros(N)
labels[[0,1,10]] = 1
mapr = metric_get_ap_r(dist, label, labels, [10])
et1e_4(mapr[0], 0.12)
#
labels = th.zeros(N)
labels[[0,1,2]] = 1
mapr = metric_get_ap_r(dist, label, labels, [10])
et1e_4(mapr[0], 0.20)
#
labels = th.zeros(N)
labels[th.arange(11)] = 1
mapr = metric_get_ap_r(dist, label, labels, [10])
et1e_4(mapr[0], 1.00)
def rjson(j: object) -> str:
'''
Render/Highlight the JSON code for pretty print
'''
if isinstance(j, str):
'''
let's assume it's a json string
'''
code = j
elif any(isinstance(j, x) for x in (str, list, dict, float, int)):
'''
let's first serialize it into json then render
'''
code = json.dumps(j)
else:
|
return highlight(code, PythonLexer(), TerminalFormatter())
def pdist(repres: th.Tensor, metric: str) -> th.Tensor:
'''
Helper: compute pairwise distance matrix.
https://github.com/pytorch/pytorch/issues/48306
'''
assert(len(repres.shape) == 2)
with th.no_grad():
if metric == 'C':
# 1. th.nn.functional.cosine_similarity(x[:,:,None],
# x.t()[None,:,:])
repres = th.nn.functional.normalize(repres, dim=-1)
pdist = 1.0 - th.mm(repres, repres.t())
elif metric in ('E', 'N'):
if metric == 'N':
repres = th.nn.functional.normalize(repres, dim=-1)
# Memory efficient pairwise euclidean distance matrix
# 1. th.nn.functional.pairwise_distance(x[:,:,None], x.t()[None,:,:])
# 2. th.cdist(x,x)
prod = th.mm(repres, repres.t())
norm = prod.diag().unsqueeze(1).expand_as(prod)
pdist = (norm + norm.t() - 2 * prod).sqrt()
else:
raise ValueError(f'illegal metric {metric}')
return pdist
def orthogonalRegularization(model, loss):
losses = []
for m in model.modules():
if isinstance(m, th.nn.Linear):
w = m.weight
mat = th.matmul(w, w.t())
diff = mat - th.diag(th.diag(mat))
loss = th.mean(th.pow(diff, 2))
losses.append(loss)
return th.sum(losses)
@contextlib.contextmanager
def openlock(*args, **kwargs):
lock = open(*args, **kwargs)
fcntl.lockf(lock, fcntl.LOCK_EX)
try:
yield lock
finally:
fcntl.lockf(lock, fcntl.LOCK_UN)
lock.close()
def nsort(L: list, R: str):
'''
sort list L by the key:int matched from regex R, descending.
'''
assert(all(re.match(R, item) for item in L))
nL = [(int(re.match(R, item).groups()[0]), item) for item in L]
nL = sorted(nL, key=lambda x: x[0], reverse=True)
return [x[-1] for x in nL]
def test_nsort():
x = [x.strip() for x in '''
version_0
version_2
version_10
version_3
version_1
'''.strip().split('\n')]
y = [y.strip() for y in '''
epoch=0.ckpt
epoch=10.ckpt
epoch=2.ckpt
epoch=7.ckpt
'''.strip().split('\n')]
assert(nsort(x, r'version_(\d+)')[0] == 'version_10')
print(nsort(x, r'.*sion_(\d+)')[0] == 'version_10')
assert(nsort(y, r'epoch=(\d+)')[0] == 'epoch=10.ckpt')
print(nsort(y, r'.*ch=(\d+)')[0] == 'epoch=10.ckpt')
if __name__ == '__main__':
test_nsort()
| raise ValueError('does not know how to deal with such datatype') | conditional_block |
utils.py | '''
Copyright (C) 2019-2021, Mo Zhou <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
# pylint: disable=no-member
import torch as th
import json
import fcntl
import contextlib
import os
import re
import numpy as np
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters.terminal import TerminalFormatter
from sklearn.metrics.cluster import normalized_mutual_info_score as __nmi
#
try:
import faiss
faiss.omp_set_num_threads(4)
except ImportError:
from sklearn.cluster import KMeans
IMmean = th.tensor([0.485, 0.456, 0.406]) # pylint: disable=not-callable
IMstd = th.tensor([0.229, 0.224, 0.225]) # pylint: disable=not-callable
IMmean_ibn = th.tensor([0.502, 0.4588, 0.4078])
IMstd_ibn = th.tensor([0.0039, 0.0039, 0.0039])
def renorm(im): return im.sub(IMmean[:, None, None].to(
im.device)).div(IMstd[:, None, None].to(im.device))
def renorm_ibn(im): return im.sub(IMmean_ibn[:, None, None].to(
im.device)).div(IMstd_ibn[:, None, None].to(im.device))[:, range(3)[::-1], :, :]
def denorm(im): return im.mul(IMstd[:, None, None].to(
im.device)).add(IMmean[:, None, None].to(im.device))
def denorm_ibn(im): |
def xdnorm(im): return im.div(IMstd[:, None, None].to(
im.device)).add(IMmean[:, None, None].to(im.device))
def chw2hwc(im): return im.transpose((0, 2, 3, 1)) if len(
im.shape) == 4 else im.transpose((1, 2, 0))
def metric_get_nmi(valvecs: th.Tensor, vallabs: th.Tensor, ncls: int) -> float:
'''
wrapper with a CUDA-OOM (out of memory) guard
'''
try:
nmi = __metric_get_nmi(valvecs, vallabs, ncls)
except RuntimeError as e:
print('! FAISS(GPU) Triggered CUDA OOM. Falling back to CPU clustering...')
os.putenv('FAISS_CPU', '1')
nmi = __metric_get_nmi(valvecs, vallabs, ncls, use_cuda=False)
return nmi
def __metric_get_nmi(valvecs: th.Tensor, vallabs: th.Tensor,
ncls: int, use_cuda: bool = True) -> float:
'''
Compute the NMI score
'''
use_cuda: bool = use_cuda and th.cuda.is_available() \
and hasattr(faiss, 'StandardGpuResources')
if int(os.getenv('FAISS_CPU', 0)) > 0:
use_cuda = False
npvecs = valvecs.detach().cpu().numpy().astype(np.float32)
nplabs = vallabs.detach().cpu().view(-1).numpy().astype(np.float32)
# a weird dispatcher but it works.
if 'faiss' in globals():
if use_cuda:
gpu_resource = faiss.StandardGpuResources()
cluster_idx = faiss.IndexFlatL2(npvecs.shape[1])
if not th.distributed.is_initialized():
cluster_idx = faiss.index_cpu_to_gpu(
gpu_resource, 0, cluster_idx)
else:
cluster_idx = faiss.index_cpu_to_gpu(gpu_resource,
th.distributed.get_rank(), cluster_idx)
kmeans = faiss.Clustering(npvecs.shape[1], ncls)
kmeans.verbose = False
kmeans.train(npvecs, cluster_idx)
_, pred = cluster_idx.search(npvecs, 1)
pred = pred.flatten()
else:
kmeans = faiss.Kmeans(
npvecs.shape[1], ncls, seed=123, verbose=False)
kmeans.train(npvecs)
_, pred = kmeans.index.search(npvecs, 1)
pred = pred.flatten()
nmi = __nmi(nplabs, pred)
elif 'KMeans' in globals():
kmeans = KMeans(n_clusters=ncls, random_state=0).fit(npvecs)
nmi = __nmi(nplabs, kmeans.labels_)
else:
raise NotImplementedError(
'please provide at leaste one kmeans implementation for the NMI metric.')
return nmi
def metric_get_rank(dist: th.Tensor, label: th.Tensor,
vallabels: th.Tensor, ks: list) -> tuple:
'''
Flexibly get the rank of the topmost item in the same class
dist = [dist(anchor,x) for x in validation_set]
dist (1 x len(vallabels)): pairwise distance vector between a single query
to the validation set.
label: int label for the query
vallabels: label array for the validation set
'''
assert(dist.nelement() == vallabels.nelement())
# [important] argsort(...)[:,1] for skipping the diagonal (R@1=1.0)
# we skip the smallest value as it's exactly for the anchor itself
argsort = dist.argsort(descending=False)[1:]
rank = th.where(vallabels[argsort] == label)[0].min().item()
return (rank,) + tuple(rank < k for k in ks)
def test_metric_get_rank():
N = 32
dist = th.arange(N) / N
label = 1
labels = th.zeros(N)
labels[[0,1]] = 1
recall = metric_get_rank(dist, label, labels, [1,2])
assert(recall == (0, True, True))
labels = th.zeros(N)
labels[[0, 2]] = 1
recall = metric_get_rank(dist, label, labels, [1,2])
assert(recall == (1, False, True))
def metric_get_ap(dist: th.Tensor, label: th.Tensor,
vallabels: th.Tensor) -> float:
'''
Get the overall average precision
dist (1 x len(vallabels)): pairwise distance vector between a single query
to the validation set.
label: int label for the query
vallabels: label array for the validation set
'''
assert(dist.nelement() == vallabels.nelement())
# we skip the smallest value as it's exectly for the anchor itself
argsort = dist.argsort(descending=False)[1:]
argwhere1 = th.where(vallabels[argsort] == label)[0] + 1
ap = ((th.arange(len(argwhere1)).float() + 1).to(argwhere1.device) /
argwhere1).sum().item() / len(argwhere1)
return ap
def metric_get_ap_r(dist: th.Tensor, label: th.Tensor,
vallabels: th.Tensor, rs: list) -> float:
'''
computes the mAP@R metric following
"A metric learning reality check", eccv 2020
dist (1 x len(vallabels)): pairwise distance vector between a single query
to the validation set.
label: int label for the query
vallabels: label array for the validation set
'''
assert(dist.nelement() == vallabels.nelement())
# we skip the smallest value as it's exactly for the anchor itself
argsort = dist.argsort(descending=False)[1:].cpu()
mask = (vallabels[argsort] == label).cpu()
cmask = mask.cumsum(dim=0)
mapr = []
for r in rs:
tmp = (cmask[:r] / (th.arange(r) + 1))[mask[:r]].sum() / r
mapr.append(tmp.item())
return tuple(mapr)
def test_metric_get_ap_r():
def et1e_4(a, b):
assert(abs(a - b) < 1e-4)
N = 101
dist = th.arange(N) / N
label = 1
#
labels = th.zeros(N)
labels[[0,1]] = 1
mapr = metric_get_ap_r(dist, label, labels, [10])
et1e_4(mapr[0], 0.1)
#
labels = th.zeros(N)
labels[[0,1,10]] = 1
mapr = metric_get_ap_r(dist, label, labels, [10])
et1e_4(mapr[0], 0.12)
#
labels = th.zeros(N)
labels[[0,1,2]] = 1
mapr = metric_get_ap_r(dist, label, labels, [10])
et1e_4(mapr[0], 0.20)
#
labels = th.zeros(N)
labels[th.arange(11)] = 1
mapr = metric_get_ap_r(dist, label, labels, [10])
et1e_4(mapr[0], 1.00)
def rjson(j: object) -> str:
'''
Render/Highlight the JSON code for pretty print
'''
if isinstance(j, str):
'''
let's assume it's a json string
'''
code = j
elif any(isinstance(j, x) for x in (str, list, dict, float, int)):
'''
let's first serialize it into json then render
'''
code = json.dumps(j)
else:
raise ValueError('does not know how to deal with such datatype')
return highlight(code, PythonLexer(), TerminalFormatter())
def pdist(repres: th.Tensor, metric: str) -> th.Tensor:
'''
Helper: compute pairwise distance matrix.
https://github.com/pytorch/pytorch/issues/48306
'''
assert(len(repres.shape) == 2)
with th.no_grad():
if metric == 'C':
# 1. th.nn.functional.cosine_similarity(x[:,:,None],
# x.t()[None,:,:])
repres = th.nn.functional.normalize(repres, dim=-1)
pdist = 1.0 - th.mm(repres, repres.t())
elif metric in ('E', 'N'):
if metric == 'N':
repres = th.nn.functional.normalize(repres, dim=-1)
# Memory efficient pairwise euclidean distance matrix
# 1. th.nn.functional.pairwise_distance(x[:,:,None], x.t()[None,:,:])
# 2. th.cdist(x,x)
prod = th.mm(repres, repres.t())
norm = prod.diag().unsqueeze(1).expand_as(prod)
pdist = (norm + norm.t() - 2 * prod).sqrt()
else:
raise ValueError(f'illegal metric {metric}')
return pdist
def orthogonalRegularization(model, loss):
losses = []
for m in model.modules():
if isinstance(m, th.nn.Linear):
w = m.weight
mat = th.matmul(w, w.t())
diff = mat - th.diag(th.diag(mat))
loss = th.mean(th.pow(diff, 2))
losses.append(loss)
return th.sum(losses)
@contextlib.contextmanager
def openlock(*args, **kwargs):
lock = open(*args, **kwargs)
fcntl.lockf(lock, fcntl.LOCK_EX)
try:
yield lock
finally:
fcntl.lockf(lock, fcntl.LOCK_UN)
lock.close()
def nsort(L: list, R: str):
'''
sort list L by the key:int matched from regex R, descending.
'''
assert(all(re.match(R, item) for item in L))
nL = [(int(re.match(R, item).groups()[0]), item) for item in L]
nL = sorted(nL, key=lambda x: x[0], reverse=True)
return [x[-1] for x in nL]
def test_nsort():
x = [x.strip() for x in '''
version_0
version_2
version_10
version_3
version_1
'''.strip().split('\n')]
y = [y.strip() for y in '''
epoch=0.ckpt
epoch=10.ckpt
epoch=2.ckpt
epoch=7.ckpt
'''.strip().split('\n')]
assert(nsort(x, r'version_(\d+)')[0] == 'version_10')
print(nsort(x, r'.*sion_(\d+)')[0] == 'version_10')
assert(nsort(y, r'epoch=(\d+)')[0] == 'epoch=10.ckpt')
print(nsort(y, r'.*ch=(\d+)')[0] == 'epoch=10.ckpt')
if __name__ == '__main__':
test_nsort()
| return im[:, range(3)[::-1], :, :].mul(IMmean_ibn[:, None, None].to(
im.device)).add(IMstd_ibn[:, None, None].to(im.device)) | identifier_body |
utils.py | '''
Copyright (C) 2019-2021, Mo Zhou <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
# pylint: disable=no-member
import torch as th
import json
import fcntl
import contextlib
import os
import re
import numpy as np
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters.terminal import TerminalFormatter
from sklearn.metrics.cluster import normalized_mutual_info_score as __nmi
#
try:
import faiss
faiss.omp_set_num_threads(4)
except ImportError:
from sklearn.cluster import KMeans
IMmean = th.tensor([0.485, 0.456, 0.406]) # pylint: disable=not-callable
IMstd = th.tensor([0.229, 0.224, 0.225]) # pylint: disable=not-callable
IMmean_ibn = th.tensor([0.502, 0.4588, 0.4078])
IMstd_ibn = th.tensor([0.0039, 0.0039, 0.0039])
def renorm(im): return im.sub(IMmean[:, None, None].to(
im.device)).div(IMstd[:, None, None].to(im.device))
def renorm_ibn(im): return im.sub(IMmean_ibn[:, None, None].to(
im.device)).div(IMstd_ibn[:, None, None].to(im.device))[:, range(3)[::-1], :, :]
def denorm(im): return im.mul(IMstd[:, None, None].to(
im.device)).add(IMmean[:, None, None].to(im.device))
def | (im): return im[:, range(3)[::-1], :, :].mul(IMmean_ibn[:, None, None].to(
im.device)).add(IMstd_ibn[:, None, None].to(im.device))
def xdnorm(im): return im.div(IMstd[:, None, None].to(
im.device)).add(IMmean[:, None, None].to(im.device))
def chw2hwc(im): return im.transpose((0, 2, 3, 1)) if len(
im.shape) == 4 else im.transpose((1, 2, 0))
def metric_get_nmi(valvecs: th.Tensor, vallabs: th.Tensor, ncls: int) -> float:
'''
wrapper with a CUDA-OOM (out of memory) guard
'''
try:
nmi = __metric_get_nmi(valvecs, vallabs, ncls)
except RuntimeError as e:
print('! FAISS(GPU) Triggered CUDA OOM. Falling back to CPU clustering...')
os.putenv('FAISS_CPU', '1')
nmi = __metric_get_nmi(valvecs, vallabs, ncls, use_cuda=False)
return nmi
def __metric_get_nmi(valvecs: th.Tensor, vallabs: th.Tensor,
ncls: int, use_cuda: bool = True) -> float:
'''
Compute the NMI score
'''
use_cuda: bool = use_cuda and th.cuda.is_available() \
and hasattr(faiss, 'StandardGpuResources')
if int(os.getenv('FAISS_CPU', 0)) > 0:
use_cuda = False
npvecs = valvecs.detach().cpu().numpy().astype(np.float32)
nplabs = vallabs.detach().cpu().view(-1).numpy().astype(np.float32)
# a weird dispatcher but it works.
if 'faiss' in globals():
if use_cuda:
gpu_resource = faiss.StandardGpuResources()
cluster_idx = faiss.IndexFlatL2(npvecs.shape[1])
if not th.distributed.is_initialized():
cluster_idx = faiss.index_cpu_to_gpu(
gpu_resource, 0, cluster_idx)
else:
cluster_idx = faiss.index_cpu_to_gpu(gpu_resource,
th.distributed.get_rank(), cluster_idx)
kmeans = faiss.Clustering(npvecs.shape[1], ncls)
kmeans.verbose = False
kmeans.train(npvecs, cluster_idx)
_, pred = cluster_idx.search(npvecs, 1)
pred = pred.flatten()
else:
kmeans = faiss.Kmeans(
npvecs.shape[1], ncls, seed=123, verbose=False)
kmeans.train(npvecs)
_, pred = kmeans.index.search(npvecs, 1)
pred = pred.flatten()
nmi = __nmi(nplabs, pred)
elif 'KMeans' in globals():
kmeans = KMeans(n_clusters=ncls, random_state=0).fit(npvecs)
nmi = __nmi(nplabs, kmeans.labels_)
else:
raise NotImplementedError(
'please provide at leaste one kmeans implementation for the NMI metric.')
return nmi
def metric_get_rank(dist: th.Tensor, label: th.Tensor,
vallabels: th.Tensor, ks: list) -> tuple:
'''
Flexibly get the rank of the topmost item in the same class
dist = [dist(anchor,x) for x in validation_set]
dist (1 x len(vallabels)): pairwise distance vector between a single query
to the validation set.
label: int label for the query
vallabels: label array for the validation set
'''
assert(dist.nelement() == vallabels.nelement())
# [important] argsort(...)[:,1] for skipping the diagonal (R@1=1.0)
# we skip the smallest value as it's exactly for the anchor itself
argsort = dist.argsort(descending=False)[1:]
rank = th.where(vallabels[argsort] == label)[0].min().item()
return (rank,) + tuple(rank < k for k in ks)
def test_metric_get_rank():
N = 32
dist = th.arange(N) / N
label = 1
labels = th.zeros(N)
labels[[0,1]] = 1
recall = metric_get_rank(dist, label, labels, [1,2])
assert(recall == (0, True, True))
labels = th.zeros(N)
labels[[0, 2]] = 1
recall = metric_get_rank(dist, label, labels, [1,2])
assert(recall == (1, False, True))
def metric_get_ap(dist: th.Tensor, label: th.Tensor,
vallabels: th.Tensor) -> float:
'''
Get the overall average precision
dist (1 x len(vallabels)): pairwise distance vector between a single query
to the validation set.
label: int label for the query
vallabels: label array for the validation set
'''
assert(dist.nelement() == vallabels.nelement())
# we skip the smallest value as it's exectly for the anchor itself
argsort = dist.argsort(descending=False)[1:]
argwhere1 = th.where(vallabels[argsort] == label)[0] + 1
ap = ((th.arange(len(argwhere1)).float() + 1).to(argwhere1.device) /
argwhere1).sum().item() / len(argwhere1)
return ap
def metric_get_ap_r(dist: th.Tensor, label: th.Tensor,
vallabels: th.Tensor, rs: list) -> float:
'''
computes the mAP@R metric following
"A metric learning reality check", eccv 2020
dist (1 x len(vallabels)): pairwise distance vector between a single query
to the validation set.
label: int label for the query
vallabels: label array for the validation set
'''
assert(dist.nelement() == vallabels.nelement())
# we skip the smallest value as it's exactly for the anchor itself
argsort = dist.argsort(descending=False)[1:].cpu()
mask = (vallabels[argsort] == label).cpu()
cmask = mask.cumsum(dim=0)
mapr = []
for r in rs:
tmp = (cmask[:r] / (th.arange(r) + 1))[mask[:r]].sum() / r
mapr.append(tmp.item())
return tuple(mapr)
def test_metric_get_ap_r():
def et1e_4(a, b):
assert(abs(a - b) < 1e-4)
N = 101
dist = th.arange(N) / N
label = 1
#
labels = th.zeros(N)
labels[[0,1]] = 1
mapr = metric_get_ap_r(dist, label, labels, [10])
et1e_4(mapr[0], 0.1)
#
labels = th.zeros(N)
labels[[0,1,10]] = 1
mapr = metric_get_ap_r(dist, label, labels, [10])
et1e_4(mapr[0], 0.12)
#
labels = th.zeros(N)
labels[[0,1,2]] = 1
mapr = metric_get_ap_r(dist, label, labels, [10])
et1e_4(mapr[0], 0.20)
#
labels = th.zeros(N)
labels[th.arange(11)] = 1
mapr = metric_get_ap_r(dist, label, labels, [10])
et1e_4(mapr[0], 1.00)
def rjson(j: object) -> str:
'''
Render/Highlight the JSON code for pretty print
'''
if isinstance(j, str):
'''
let's assume it's a json string
'''
code = j
elif any(isinstance(j, x) for x in (str, list, dict, float, int)):
'''
let's first serialize it into json then render
'''
code = json.dumps(j)
else:
raise ValueError('does not know how to deal with such datatype')
return highlight(code, PythonLexer(), TerminalFormatter())
def pdist(repres: th.Tensor, metric: str) -> th.Tensor:
'''
Helper: compute pairwise distance matrix.
https://github.com/pytorch/pytorch/issues/48306
'''
assert(len(repres.shape) == 2)
with th.no_grad():
if metric == 'C':
# 1. th.nn.functional.cosine_similarity(x[:,:,None],
# x.t()[None,:,:])
repres = th.nn.functional.normalize(repres, dim=-1)
pdist = 1.0 - th.mm(repres, repres.t())
elif metric in ('E', 'N'):
if metric == 'N':
repres = th.nn.functional.normalize(repres, dim=-1)
# Memory efficient pairwise euclidean distance matrix
# 1. th.nn.functional.pairwise_distance(x[:,:,None], x.t()[None,:,:])
# 2. th.cdist(x,x)
prod = th.mm(repres, repres.t())
norm = prod.diag().unsqueeze(1).expand_as(prod)
pdist = (norm + norm.t() - 2 * prod).sqrt()
else:
raise ValueError(f'illegal metric {metric}')
return pdist
def orthogonalRegularization(model, loss):
losses = []
for m in model.modules():
if isinstance(m, th.nn.Linear):
w = m.weight
mat = th.matmul(w, w.t())
diff = mat - th.diag(th.diag(mat))
loss = th.mean(th.pow(diff, 2))
losses.append(loss)
return th.sum(losses)
@contextlib.contextmanager
def openlock(*args, **kwargs):
lock = open(*args, **kwargs)
fcntl.lockf(lock, fcntl.LOCK_EX)
try:
yield lock
finally:
fcntl.lockf(lock, fcntl.LOCK_UN)
lock.close()
def nsort(L: list, R: str):
'''
sort list L by the key:int matched from regex R, descending.
'''
assert(all(re.match(R, item) for item in L))
nL = [(int(re.match(R, item).groups()[0]), item) for item in L]
nL = sorted(nL, key=lambda x: x[0], reverse=True)
return [x[-1] for x in nL]
def test_nsort():
x = [x.strip() for x in '''
version_0
version_2
version_10
version_3
version_1
'''.strip().split('\n')]
y = [y.strip() for y in '''
epoch=0.ckpt
epoch=10.ckpt
epoch=2.ckpt
epoch=7.ckpt
'''.strip().split('\n')]
assert(nsort(x, r'version_(\d+)')[0] == 'version_10')
print(nsort(x, r'.*sion_(\d+)')[0] == 'version_10')
assert(nsort(y, r'epoch=(\d+)')[0] == 'epoch=10.ckpt')
print(nsort(y, r'.*ch=(\d+)')[0] == 'epoch=10.ckpt')
if __name__ == '__main__':
test_nsort()
| denorm_ibn | identifier_name |
utils.py | '''
Copyright (C) 2019-2021, Mo Zhou <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
# pylint: disable=no-member
import torch as th
import json
import fcntl
import contextlib
import os
import re
import numpy as np
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters.terminal import TerminalFormatter
from sklearn.metrics.cluster import normalized_mutual_info_score as __nmi
#
try:
import faiss
faiss.omp_set_num_threads(4)
except ImportError:
from sklearn.cluster import KMeans
IMmean = th.tensor([0.485, 0.456, 0.406]) # pylint: disable=not-callable
IMstd = th.tensor([0.229, 0.224, 0.225]) # pylint: disable=not-callable
IMmean_ibn = th.tensor([0.502, 0.4588, 0.4078])
IMstd_ibn = th.tensor([0.0039, 0.0039, 0.0039])
def renorm(im): return im.sub(IMmean[:, None, None].to(
im.device)).div(IMstd[:, None, None].to(im.device))
def renorm_ibn(im): return im.sub(IMmean_ibn[:, None, None].to(
im.device)).div(IMstd_ibn[:, None, None].to(im.device))[:, range(3)[::-1], :, :]
def denorm(im): return im.mul(IMstd[:, None, None].to(
im.device)).add(IMmean[:, None, None].to(im.device))
def denorm_ibn(im): return im[:, range(3)[::-1], :, :].mul(IMmean_ibn[:, None, None].to(
im.device)).add(IMstd_ibn[:, None, None].to(im.device))
def xdnorm(im): return im.div(IMstd[:, None, None].to(
im.device)).add(IMmean[:, None, None].to(im.device))
def chw2hwc(im): return im.transpose((0, 2, 3, 1)) if len(
im.shape) == 4 else im.transpose((1, 2, 0))
def metric_get_nmi(valvecs: th.Tensor, vallabs: th.Tensor, ncls: int) -> float:
'''
wrapper with a CUDA-OOM (out of memory) guard
'''
try:
nmi = __metric_get_nmi(valvecs, vallabs, ncls)
except RuntimeError as e:
print('! FAISS(GPU) Triggered CUDA OOM. Falling back to CPU clustering...')
os.putenv('FAISS_CPU', '1')
nmi = __metric_get_nmi(valvecs, vallabs, ncls, use_cuda=False)
return nmi
def __metric_get_nmi(valvecs: th.Tensor, vallabs: th.Tensor,
ncls: int, use_cuda: bool = True) -> float:
'''
Compute the NMI score
'''
use_cuda: bool = use_cuda and th.cuda.is_available() \
and hasattr(faiss, 'StandardGpuResources')
if int(os.getenv('FAISS_CPU', 0)) > 0:
use_cuda = False
npvecs = valvecs.detach().cpu().numpy().astype(np.float32)
nplabs = vallabs.detach().cpu().view(-1).numpy().astype(np.float32)
# a weird dispatcher but it works.
if 'faiss' in globals():
if use_cuda:
gpu_resource = faiss.StandardGpuResources()
cluster_idx = faiss.IndexFlatL2(npvecs.shape[1])
if not th.distributed.is_initialized():
cluster_idx = faiss.index_cpu_to_gpu(
gpu_resource, 0, cluster_idx)
else:
cluster_idx = faiss.index_cpu_to_gpu(gpu_resource,
th.distributed.get_rank(), cluster_idx)
kmeans = faiss.Clustering(npvecs.shape[1], ncls)
kmeans.verbose = False
kmeans.train(npvecs, cluster_idx)
_, pred = cluster_idx.search(npvecs, 1)
pred = pred.flatten()
else:
kmeans = faiss.Kmeans(
npvecs.shape[1], ncls, seed=123, verbose=False)
kmeans.train(npvecs)
_, pred = kmeans.index.search(npvecs, 1)
pred = pred.flatten()
nmi = __nmi(nplabs, pred)
elif 'KMeans' in globals():
kmeans = KMeans(n_clusters=ncls, random_state=0).fit(npvecs)
nmi = __nmi(nplabs, kmeans.labels_)
else:
raise NotImplementedError(
'please provide at leaste one kmeans implementation for the NMI metric.')
return nmi
def metric_get_rank(dist: th.Tensor, label: th.Tensor,
vallabels: th.Tensor, ks: list) -> tuple:
'''
Flexibly get the rank of the topmost item in the same class
dist = [dist(anchor,x) for x in validation_set]
dist (1 x len(vallabels)): pairwise distance vector between a single query
to the validation set.
label: int label for the query
vallabels: label array for the validation set
'''
assert(dist.nelement() == vallabels.nelement())
# [important] argsort(...)[:,1] for skipping the diagonal (R@1=1.0)
# we skip the smallest value as it's exactly for the anchor itself
argsort = dist.argsort(descending=False)[1:]
rank = th.where(vallabels[argsort] == label)[0].min().item()
return (rank,) + tuple(rank < k for k in ks)
def test_metric_get_rank():
N = 32
dist = th.arange(N) / N
label = 1
labels = th.zeros(N)
labels[[0,1]] = 1
recall = metric_get_rank(dist, label, labels, [1,2])
assert(recall == (0, True, True))
labels = th.zeros(N)
labels[[0, 2]] = 1
recall = metric_get_rank(dist, label, labels, [1,2])
assert(recall == (1, False, True))
def metric_get_ap(dist: th.Tensor, label: th.Tensor,
vallabels: th.Tensor) -> float:
'''
Get the overall average precision
dist (1 x len(vallabels)): pairwise distance vector between a single query
to the validation set.
label: int label for the query
vallabels: label array for the validation set
'''
assert(dist.nelement() == vallabels.nelement())
# we skip the smallest value as it's exectly for the anchor itself
argsort = dist.argsort(descending=False)[1:]
argwhere1 = th.where(vallabels[argsort] == label)[0] + 1
ap = ((th.arange(len(argwhere1)).float() + 1).to(argwhere1.device) /
argwhere1).sum().item() / len(argwhere1)
return ap
def metric_get_ap_r(dist: th.Tensor, label: th.Tensor,
vallabels: th.Tensor, rs: list) -> float:
'''
computes the mAP@R metric following
"A metric learning reality check", eccv 2020
dist (1 x len(vallabels)): pairwise distance vector between a single query
to the validation set.
label: int label for the query
vallabels: label array for the validation set
'''
assert(dist.nelement() == vallabels.nelement())
# we skip the smallest value as it's exactly for the anchor itself
argsort = dist.argsort(descending=False)[1:].cpu()
mask = (vallabels[argsort] == label).cpu()
cmask = mask.cumsum(dim=0)
mapr = []
for r in rs:
tmp = (cmask[:r] / (th.arange(r) + 1))[mask[:r]].sum() / r
mapr.append(tmp.item())
return tuple(mapr)
def test_metric_get_ap_r():
def et1e_4(a, b):
assert(abs(a - b) < 1e-4)
N = 101
dist = th.arange(N) / N
label = 1
#
labels = th.zeros(N)
labels[[0,1]] = 1
mapr = metric_get_ap_r(dist, label, labels, [10])
et1e_4(mapr[0], 0.1)
#
labels = th.zeros(N)
labels[[0,1,10]] = 1
mapr = metric_get_ap_r(dist, label, labels, [10])
et1e_4(mapr[0], 0.12)
#
labels = th.zeros(N)
labels[[0,1,2]] = 1
mapr = metric_get_ap_r(dist, label, labels, [10])
et1e_4(mapr[0], 0.20)
#
labels = th.zeros(N)
labels[th.arange(11)] = 1
mapr = metric_get_ap_r(dist, label, labels, [10])
et1e_4(mapr[0], 1.00)
def rjson(j: object) -> str:
'''
Render/Highlight the JSON code for pretty print
'''
if isinstance(j, str):
'''
let's assume it's a json string
'''
code = j
elif any(isinstance(j, x) for x in (str, list, dict, float, int)):
'''
let's first serialize it into json then render
'''
code = json.dumps(j)
else:
raise ValueError('does not know how to deal with such datatype')
return highlight(code, PythonLexer(), TerminalFormatter())
def pdist(repres: th.Tensor, metric: str) -> th.Tensor:
'''
Helper: compute pairwise distance matrix.
https://github.com/pytorch/pytorch/issues/48306
'''
assert(len(repres.shape) == 2)
with th.no_grad():
if metric == 'C':
# 1. th.nn.functional.cosine_similarity(x[:,:,None],
# x.t()[None,:,:])
repres = th.nn.functional.normalize(repres, dim=-1)
pdist = 1.0 - th.mm(repres, repres.t())
elif metric in ('E', 'N'):
if metric == 'N':
repres = th.nn.functional.normalize(repres, dim=-1)
# Memory efficient pairwise euclidean distance matrix
# 1. th.nn.functional.pairwise_distance(x[:,:,None], x.t()[None,:,:])
# 2. th.cdist(x,x)
prod = th.mm(repres, repres.t())
norm = prod.diag().unsqueeze(1).expand_as(prod)
pdist = (norm + norm.t() - 2 * prod).sqrt()
else:
raise ValueError(f'illegal metric {metric}')
return pdist
def orthogonalRegularization(model, loss):
losses = []
for m in model.modules():
if isinstance(m, th.nn.Linear):
w = m.weight
mat = th.matmul(w, w.t())
diff = mat - th.diag(th.diag(mat))
loss = th.mean(th.pow(diff, 2))
losses.append(loss)
return th.sum(losses)
@contextlib.contextmanager
def openlock(*args, **kwargs):
lock = open(*args, **kwargs)
fcntl.lockf(lock, fcntl.LOCK_EX)
try:
yield lock
finally:
fcntl.lockf(lock, fcntl.LOCK_UN)
lock.close()
def nsort(L: list, R: str):
'''
sort list L by the key:int matched from regex R, descending.
'''
assert(all(re.match(R, item) for item in L))
nL = [(int(re.match(R, item).groups()[0]), item) for item in L]
nL = sorted(nL, key=lambda x: x[0], reverse=True)
return [x[-1] for x in nL]
def test_nsort():
x = [x.strip() for x in '''
version_0
version_2
version_10
version_3
version_1
'''.strip().split('\n')]
y = [y.strip() for y in '''
epoch=0.ckpt
epoch=10.ckpt | epoch=7.ckpt
'''.strip().split('\n')]
assert(nsort(x, r'version_(\d+)')[0] == 'version_10')
print(nsort(x, r'.*sion_(\d+)')[0] == 'version_10')
assert(nsort(y, r'epoch=(\d+)')[0] == 'epoch=10.ckpt')
print(nsort(y, r'.*ch=(\d+)')[0] == 'epoch=10.ckpt')
if __name__ == '__main__':
test_nsort() | epoch=2.ckpt | random_line_split |
tree_based_retrieval.py | import sys
import tensorflow as tf
from utils.param import FLAGS
from utils.xletter import XletterPreprocessor
from utils.layers import xletter_feature_extractor, mask_maxpool
import math
from tensorflow.python.ops import lookup_ops
if FLAGS.use_mstf_ops == 1:
import tensorflow.contrib.microsoft as mstf
def parse_dims(dims_str):
dims = [int(dim) for dim in dims_str.split(',')]
return dims
def count_idx(filename):
count = 0
#for line in open(filename,encoding='utf-8'):
for line in tf.gfile.GFile(filename):
count += 1
return count
def default_init():
return tf.contrib.layers.variance_scaling_nitializer(factor=1.0, mode = 'FAN_AVG', uniform = True)
class TreeModel():
|
if __name__ == '__main__':
from data_reader import InputPipe
m = CDSSMModel()
pred_pipe = InputPipe(FLAGS.input_validation_data_path + "/bleu_data.txt", FLAGS.eval_batch_size,1,2,"",True)
query,keyword = pred_pipe.get_next()
output = m.search(query)
with tf.Session() as sess:
scope = tf.get_variable_scope()
scope.reuse_variables()
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
sess.run(pred_pipe.iterator.initializer)
ckpt = tf.train.get_checkpoint_state(FLAGS.input_previous_model_path)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print("Load model from ", ckpt.model_checkpoint_path)
else:
print("No initial model found.")
print(sess.run([query,output]))
| def __init__(self):
TreeHeight = lambda x: int(math.log(x-1)/math.log(2)) + 2
indexCnt = count_idx(FLAGS.input_previous_model_path + "/" + FLAGS.tree_index_file)
self.tree_height = TreeHeight(indexCnt+1)
self.tree_index = lookup_ops.index_table_from_file(FLAGS.input_previous_model_path + "/" + FLAGS.tree_index_file, default_value = indexCnt)
self.reverse_tree_index = lookup_ops.index_to_string_table_from_file(FLAGS.input_previous_model_path + "/" + FLAGS.tree_index_file, default_value = '<unk>')
self.dims = parse_dims(FLAGS.semantic_model_dims)
self.layer_embedding = tf.get_variable(name='tree_node_emb', shape = [pow(2, self.tree_height -1) ,self.dims[-1]])
if not FLAGS.leaf_content_emb:
self.leaf_embedding = tf.get_variable(name='leaf_node_emb', shape = [pow(2, self.tree_height -1) ,self.dims[-1]])
if FLAGS.use_mstf_ops == 1:
self.op_dict = mstf.dssm_dict(FLAGS.xletter_dict)
elif FLAGS.use_mstf_ops == -1:
self.op_dict = XletterPreprocessor(FLAGS.xletter_dict, FLAGS.xletter_win_size)
else:
self.op_dict = None
def inference(self, input_fields, mode):
if mode == tf.contrib.learn.ModeKeys.TRAIN or mode == tf.contrib.learn.ModeKeys.EVAL:
if FLAGS.use_mstf_ops:
query, doc = input_fields[0], input_fields[1]
else:
query, doc = input_fields[0][0], input_fields[1][0]
query_vec = self.vector_generation(query, 'Q')
doc_id = self.tree_index.lookup(doc)
doc_vec = self.vector_generation(doc, 'D', doc_id)
return query_vec, doc_vec, doc_id
elif mode == tf.contrib.learn.ModeKeys.INFER:
if FLAGS.use_mstf_ops:
query = input_fields[0]
else:
query = input_fields[0][0]
query_vec = self.vector_generation(query,'Q')
return [query_vec] + self.search(query_vec)
def search(self, query_vec):
#[batch_size,vec_dim]
batch_size = tf.shape(query_vec)[0]
query_vec = tf.expand_dims(query_vec, axis = 1)
start_layer = math.floor(math.log(FLAGS.top_k)/math.log(2)) + 2
#[batch_size,N]
top_index = tf.tile(tf.expand_dims(tf.range(pow(2,start_layer-2), pow(2,start_layer-1)),axis=0),[batch_size,1])
#top_index = tf.range(pow(2, start_layer-1),pow(2,start_layer))
#return top_index
for i in range(start_layer, self.tree_height):
#[batch_size,2N]
eval_index = tf.concat([tf.cast(top_index * 2,tf.int32), tf.cast(top_index * 2 + 1,tf.int32)],axis = 1)
#return eval_index
#[batch_size,2N,vec_dim]
eval_emb = tf.gather(self.layer_embedding, eval_index)
#return tf.shape(eval_emb)
eval_emb = tf.nn.l2_normalize(eval_emb, dim = 2)
eval_emb_transpose = tf.transpose(eval_emb,[0,2,1])
#return tf.shape(query_vec)
#[batch_size,2N] hope so....
eval_score = tf.matmul(query_vec, eval_emb_transpose)
eval_score = tf.squeeze(eval_score)
#return eval_score
#return tf.shape(eval_score)
#Select Top N
values, top_index = tf.nn.top_k(eval_score,FLAGS.top_k, False)
top_index = tf.reshape(top_index,[-1,FLAGS.top_k])
#top_index = tf.expand_dims(top_index, axis=2)
batch_id = tf.tile(tf.expand_dims(tf.range(batch_size),axis=1),[1,tf.shape(top_index)[1]])
expand_index = tf.concat([tf.expand_dims(batch_id,axis=2),tf.expand_dims(top_index,axis=2)],axis=-1)
#return top_index, batch_id ,expand_index
top_index = tf.gather_nd(eval_index,expand_index)
#return top_index,eval_index, what
res = top_index * 2 - pow(2, self.tree_height - 1)
res = tf.concat([res, res+1],axis=1)
values = tf.concat([values, values], axis=1)
if not FLAGS.leaf_content_emb:
eval_emb = tf.gather(self.leaf_embedding, tf.cast(res - pow(2, self.tree_height - 1),tf.int32))
eval_emb = tf.nn.l2_normalize(eval_emb, dim = 2)
eval_emb_transpose = tf.transpose(eval_emb,[0,2,1])
eval_score = tf.matmul(query_vec, eval_emb_transpose)
eval_score = tf.squeeze(eval_score)
values, top_index = tf.nn.top_k(eval_score,FLAGS.top_k, False)
top_index = tf.reshape(top_index,[-1,FLAGS.top_k])
batch_id = tf.tile(tf.expand_dims(tf.range(batch_size),axis=1),[1,tf.shape(top_index)[1]])
expand_index = tf.concat([tf.expand_dims(batch_id,axis=2),tf.expand_dims(top_index,axis=2)],axis=-1)
#return top_index, batch_id ,expand_index
res = tf.gather_nd(res,expand_index)
return [res,values]
def vector_generation(self, text, model_prefix, doc_id = None):
if model_prefix == "D" and not FLAGS.leaf_content_emb:
return tf.nn.l2_normalize(tf.gather(self.leaf_embedding, doc_id),dim = 1)
dims = parse_dims(FLAGS.semantic_model_dims)
text_vecs, step_mask, sequence_length = xletter_feature_extractor(text, model_prefix, self.op_dict, FLAGS.xletter_cnt, FLAGS.xletter_win_size, FLAGS.dim_xletter_emb)
maxpooling_vec = mask_maxpool(tf.nn.tanh(text_vecs),step_mask)
dim_input = FLAGS.dim_xletter_emb
input_vec = maxpooling_vec
for i, dim in enumerate(self.dims):
dim_output = dim
random_range = math.sqrt(6.0/(dim_input+dim_output))
with tf.variable_scope("semantic_layer{:}".format(i),reuse=tf.AUTO_REUSE):
weight = tf.get_variable("weight_" + model_prefix, shape = [dim_input, dim_output], initializer = tf.random_uniform_initializer(-random_range, random_range))
output_vec = tf.matmul(input_vec, weight)
output_vec = tf.nn.tanh(output_vec)
input_vec = output_vec
normalized_vec = tf.nn.l2_normalize(output_vec, dim = 1)
return normalized_vec
def calc_loss(self, inference_res):
query_vec, doc_vec, doc_id = inference_res
batch_size = tf.shape(query_vec)[0]
#Leaf Layer Loss
posCos = tf.reduce_sum(tf.multiply(query_vec, doc_vec), axis = 1)
allCos = [posCos]
for i in range(0, FLAGS.negative_sample):
random_indices = (tf.range(batch_size) + tf.random_uniform([batch_size],1,batch_size, tf.int32)) % batch_size
negCos = tf.reduce_sum(tf.multiply(query_vec, tf.gather(doc_vec, random_indices)),axis=1)
allCos.append(tf.where(tf.equal(negCos,1),tf.zeros_like(negCos),negCos))
allCos = tf.stack(allCos, axis=1)
softmax = tf.nn.softmax(allCos * FLAGS.softmax_gamma, dim=1)
leafLoss = tf.reduce_sum(-tf.log(softmax[:,0]))
#Node Layer Loss
doc_idx = doc_id + pow(2, self.tree_height - 1)
nodeLoss = tf.zeros_like(leafLoss,dtype=tf.float32)
for i in range(4, self.tree_height):
cosines = []
posIdx = tf.cast(doc_idx // pow(2, self.tree_height - i),tf.int32)
posVec = tf.nn.l2_normalize(tf.gather(self.layer_embedding, posIdx),dim = 1)
cosines.append(tf.reduce_sum(tf.multiply(query_vec, posVec), axis = 1))
nodeCnt = pow(2,i-1)
for j in range(0, FLAGS.layer_negative_sample):
random_idx = nodeCnt + (posIdx - nodeCnt + tf.random_uniform([batch_size],1,nodeCnt,tf.int32)) % nodeCnt
cosines.append(tf.reduce_sum(tf.multiply(query_vec, tf.nn.l2_normalize(tf.gather(self.layer_embedding, random_idx),dim=1)),axis=1))
cosines = tf.stack(cosines, axis=1)
softmax = tf.nn.softmax(cosines * FLAGS.softmax_gamma, dim = 1)
nodeLoss += tf.reduce_sum(-tf.log(softmax[:,0]))
weight = batch_size
loss = tf.cast(leafLoss + FLAGS.layer_weight * nodeLoss,tf.float32)
tf.summary.scalar('softmax_losses',loss)
return [loss, leafLoss, nodeLoss], weight
def calc_score(self, inference_res):
if FLAGS.mode == 'predict':
query_vec, search_res, score = inference_res
else:
query_vec, doc_vec, doc_id = inference_res
score = tf.reduce_sum(tf.multiply(query_vec, doc_vec), axis = 1)
return score
def get_optimizer(self):
return [tf.train.GradientDescentOptimizer(FLAGS.learning_rate)]
def lookup_infer(self, inference_res):
query_vec, search_res, search_values = inference_res
if FLAGS.leaf_content_emb:
seq_len = 2 * FLAGS.top_k
else:
seq_len = FLAGS.top_k
return self.reverse_tree_index.lookup(tf.to_int64(search_res)),tf.ones([tf.shape(query_vec)[0]],dtype=tf.int32)+seq_len | identifier_body |
tree_based_retrieval.py | import sys
import tensorflow as tf
from utils.param import FLAGS
from utils.xletter import XletterPreprocessor
from utils.layers import xletter_feature_extractor, mask_maxpool
import math
from tensorflow.python.ops import lookup_ops
if FLAGS.use_mstf_ops == 1:
import tensorflow.contrib.microsoft as mstf
def parse_dims(dims_str):
dims = [int(dim) for dim in dims_str.split(',')]
return dims
def count_idx(filename):
count = 0
#for line in open(filename,encoding='utf-8'):
for line in tf.gfile.GFile(filename):
count += 1
return count
def default_init():
return tf.contrib.layers.variance_scaling_nitializer(factor=1.0, mode = 'FAN_AVG', uniform = True)
class TreeModel():
def __init__(self):
TreeHeight = lambda x: int(math.log(x-1)/math.log(2)) + 2
indexCnt = count_idx(FLAGS.input_previous_model_path + "/" + FLAGS.tree_index_file)
self.tree_height = TreeHeight(indexCnt+1)
self.tree_index = lookup_ops.index_table_from_file(FLAGS.input_previous_model_path + "/" + FLAGS.tree_index_file, default_value = indexCnt)
self.reverse_tree_index = lookup_ops.index_to_string_table_from_file(FLAGS.input_previous_model_path + "/" + FLAGS.tree_index_file, default_value = '<unk>')
self.dims = parse_dims(FLAGS.semantic_model_dims)
self.layer_embedding = tf.get_variable(name='tree_node_emb', shape = [pow(2, self.tree_height -1) ,self.dims[-1]])
if not FLAGS.leaf_content_emb:
|
if FLAGS.use_mstf_ops == 1:
self.op_dict = mstf.dssm_dict(FLAGS.xletter_dict)
elif FLAGS.use_mstf_ops == -1:
self.op_dict = XletterPreprocessor(FLAGS.xletter_dict, FLAGS.xletter_win_size)
else:
self.op_dict = None
def inference(self, input_fields, mode):
if mode == tf.contrib.learn.ModeKeys.TRAIN or mode == tf.contrib.learn.ModeKeys.EVAL:
if FLAGS.use_mstf_ops:
query, doc = input_fields[0], input_fields[1]
else:
query, doc = input_fields[0][0], input_fields[1][0]
query_vec = self.vector_generation(query, 'Q')
doc_id = self.tree_index.lookup(doc)
doc_vec = self.vector_generation(doc, 'D', doc_id)
return query_vec, doc_vec, doc_id
elif mode == tf.contrib.learn.ModeKeys.INFER:
if FLAGS.use_mstf_ops:
query = input_fields[0]
else:
query = input_fields[0][0]
query_vec = self.vector_generation(query,'Q')
return [query_vec] + self.search(query_vec)
def search(self, query_vec):
#[batch_size,vec_dim]
batch_size = tf.shape(query_vec)[0]
query_vec = tf.expand_dims(query_vec, axis = 1)
start_layer = math.floor(math.log(FLAGS.top_k)/math.log(2)) + 2
#[batch_size,N]
top_index = tf.tile(tf.expand_dims(tf.range(pow(2,start_layer-2), pow(2,start_layer-1)),axis=0),[batch_size,1])
#top_index = tf.range(pow(2, start_layer-1),pow(2,start_layer))
#return top_index
for i in range(start_layer, self.tree_height):
#[batch_size,2N]
eval_index = tf.concat([tf.cast(top_index * 2,tf.int32), tf.cast(top_index * 2 + 1,tf.int32)],axis = 1)
#return eval_index
#[batch_size,2N,vec_dim]
eval_emb = tf.gather(self.layer_embedding, eval_index)
#return tf.shape(eval_emb)
eval_emb = tf.nn.l2_normalize(eval_emb, dim = 2)
eval_emb_transpose = tf.transpose(eval_emb,[0,2,1])
#return tf.shape(query_vec)
#[batch_size,2N] hope so....
eval_score = tf.matmul(query_vec, eval_emb_transpose)
eval_score = tf.squeeze(eval_score)
#return eval_score
#return tf.shape(eval_score)
#Select Top N
values, top_index = tf.nn.top_k(eval_score,FLAGS.top_k, False)
top_index = tf.reshape(top_index,[-1,FLAGS.top_k])
#top_index = tf.expand_dims(top_index, axis=2)
batch_id = tf.tile(tf.expand_dims(tf.range(batch_size),axis=1),[1,tf.shape(top_index)[1]])
expand_index = tf.concat([tf.expand_dims(batch_id,axis=2),tf.expand_dims(top_index,axis=2)],axis=-1)
#return top_index, batch_id ,expand_index
top_index = tf.gather_nd(eval_index,expand_index)
#return top_index,eval_index, what
res = top_index * 2 - pow(2, self.tree_height - 1)
res = tf.concat([res, res+1],axis=1)
values = tf.concat([values, values], axis=1)
if not FLAGS.leaf_content_emb:
eval_emb = tf.gather(self.leaf_embedding, tf.cast(res - pow(2, self.tree_height - 1),tf.int32))
eval_emb = tf.nn.l2_normalize(eval_emb, dim = 2)
eval_emb_transpose = tf.transpose(eval_emb,[0,2,1])
eval_score = tf.matmul(query_vec, eval_emb_transpose)
eval_score = tf.squeeze(eval_score)
values, top_index = tf.nn.top_k(eval_score,FLAGS.top_k, False)
top_index = tf.reshape(top_index,[-1,FLAGS.top_k])
batch_id = tf.tile(tf.expand_dims(tf.range(batch_size),axis=1),[1,tf.shape(top_index)[1]])
expand_index = tf.concat([tf.expand_dims(batch_id,axis=2),tf.expand_dims(top_index,axis=2)],axis=-1)
#return top_index, batch_id ,expand_index
res = tf.gather_nd(res,expand_index)
return [res,values]
def vector_generation(self, text, model_prefix, doc_id = None):
if model_prefix == "D" and not FLAGS.leaf_content_emb:
return tf.nn.l2_normalize(tf.gather(self.leaf_embedding, doc_id),dim = 1)
dims = parse_dims(FLAGS.semantic_model_dims)
text_vecs, step_mask, sequence_length = xletter_feature_extractor(text, model_prefix, self.op_dict, FLAGS.xletter_cnt, FLAGS.xletter_win_size, FLAGS.dim_xletter_emb)
maxpooling_vec = mask_maxpool(tf.nn.tanh(text_vecs),step_mask)
dim_input = FLAGS.dim_xletter_emb
input_vec = maxpooling_vec
for i, dim in enumerate(self.dims):
dim_output = dim
random_range = math.sqrt(6.0/(dim_input+dim_output))
with tf.variable_scope("semantic_layer{:}".format(i),reuse=tf.AUTO_REUSE):
weight = tf.get_variable("weight_" + model_prefix, shape = [dim_input, dim_output], initializer = tf.random_uniform_initializer(-random_range, random_range))
output_vec = tf.matmul(input_vec, weight)
output_vec = tf.nn.tanh(output_vec)
input_vec = output_vec
normalized_vec = tf.nn.l2_normalize(output_vec, dim = 1)
return normalized_vec
def calc_loss(self, inference_res):
query_vec, doc_vec, doc_id = inference_res
batch_size = tf.shape(query_vec)[0]
#Leaf Layer Loss
posCos = tf.reduce_sum(tf.multiply(query_vec, doc_vec), axis = 1)
allCos = [posCos]
for i in range(0, FLAGS.negative_sample):
random_indices = (tf.range(batch_size) + tf.random_uniform([batch_size],1,batch_size, tf.int32)) % batch_size
negCos = tf.reduce_sum(tf.multiply(query_vec, tf.gather(doc_vec, random_indices)),axis=1)
allCos.append(tf.where(tf.equal(negCos,1),tf.zeros_like(negCos),negCos))
allCos = tf.stack(allCos, axis=1)
softmax = tf.nn.softmax(allCos * FLAGS.softmax_gamma, dim=1)
leafLoss = tf.reduce_sum(-tf.log(softmax[:,0]))
#Node Layer Loss
doc_idx = doc_id + pow(2, self.tree_height - 1)
nodeLoss = tf.zeros_like(leafLoss,dtype=tf.float32)
for i in range(4, self.tree_height):
cosines = []
posIdx = tf.cast(doc_idx // pow(2, self.tree_height - i),tf.int32)
posVec = tf.nn.l2_normalize(tf.gather(self.layer_embedding, posIdx),dim = 1)
cosines.append(tf.reduce_sum(tf.multiply(query_vec, posVec), axis = 1))
nodeCnt = pow(2,i-1)
for j in range(0, FLAGS.layer_negative_sample):
random_idx = nodeCnt + (posIdx - nodeCnt + tf.random_uniform([batch_size],1,nodeCnt,tf.int32)) % nodeCnt
cosines.append(tf.reduce_sum(tf.multiply(query_vec, tf.nn.l2_normalize(tf.gather(self.layer_embedding, random_idx),dim=1)),axis=1))
cosines = tf.stack(cosines, axis=1)
softmax = tf.nn.softmax(cosines * FLAGS.softmax_gamma, dim = 1)
nodeLoss += tf.reduce_sum(-tf.log(softmax[:,0]))
weight = batch_size
loss = tf.cast(leafLoss + FLAGS.layer_weight * nodeLoss,tf.float32)
tf.summary.scalar('softmax_losses',loss)
return [loss, leafLoss, nodeLoss], weight
def calc_score(self, inference_res):
if FLAGS.mode == 'predict':
query_vec, search_res, score = inference_res
else:
query_vec, doc_vec, doc_id = inference_res
score = tf.reduce_sum(tf.multiply(query_vec, doc_vec), axis = 1)
return score
def get_optimizer(self):
return [tf.train.GradientDescentOptimizer(FLAGS.learning_rate)]
def lookup_infer(self, inference_res):
query_vec, search_res, search_values = inference_res
if FLAGS.leaf_content_emb:
seq_len = 2 * FLAGS.top_k
else:
seq_len = FLAGS.top_k
return self.reverse_tree_index.lookup(tf.to_int64(search_res)),tf.ones([tf.shape(query_vec)[0]],dtype=tf.int32)+seq_len
if __name__ == '__main__':
from data_reader import InputPipe
m = CDSSMModel()
pred_pipe = InputPipe(FLAGS.input_validation_data_path + "/bleu_data.txt", FLAGS.eval_batch_size,1,2,"",True)
query,keyword = pred_pipe.get_next()
output = m.search(query)
with tf.Session() as sess:
scope = tf.get_variable_scope()
scope.reuse_variables()
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
sess.run(pred_pipe.iterator.initializer)
ckpt = tf.train.get_checkpoint_state(FLAGS.input_previous_model_path)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print("Load model from ", ckpt.model_checkpoint_path)
else:
print("No initial model found.")
print(sess.run([query,output]))
| self.leaf_embedding = tf.get_variable(name='leaf_node_emb', shape = [pow(2, self.tree_height -1) ,self.dims[-1]]) | conditional_block |
tree_based_retrieval.py | import sys
import tensorflow as tf
from utils.param import FLAGS
from utils.xletter import XletterPreprocessor
from utils.layers import xletter_feature_extractor, mask_maxpool
import math
from tensorflow.python.ops import lookup_ops
if FLAGS.use_mstf_ops == 1:
import tensorflow.contrib.microsoft as mstf
def parse_dims(dims_str):
dims = [int(dim) for dim in dims_str.split(',')]
return dims
def count_idx(filename):
count = 0
#for line in open(filename,encoding='utf-8'):
for line in tf.gfile.GFile(filename):
count += 1
return count
def default_init():
return tf.contrib.layers.variance_scaling_nitializer(factor=1.0, mode = 'FAN_AVG', uniform = True)
class TreeModel():
def __init__(self):
TreeHeight = lambda x: int(math.log(x-1)/math.log(2)) + 2
indexCnt = count_idx(FLAGS.input_previous_model_path + "/" + FLAGS.tree_index_file)
self.tree_height = TreeHeight(indexCnt+1)
self.tree_index = lookup_ops.index_table_from_file(FLAGS.input_previous_model_path + "/" + FLAGS.tree_index_file, default_value = indexCnt)
self.reverse_tree_index = lookup_ops.index_to_string_table_from_file(FLAGS.input_previous_model_path + "/" + FLAGS.tree_index_file, default_value = '<unk>')
self.dims = parse_dims(FLAGS.semantic_model_dims)
self.layer_embedding = tf.get_variable(name='tree_node_emb', shape = [pow(2, self.tree_height -1) ,self.dims[-1]])
if not FLAGS.leaf_content_emb:
self.leaf_embedding = tf.get_variable(name='leaf_node_emb', shape = [pow(2, self.tree_height -1) ,self.dims[-1]])
if FLAGS.use_mstf_ops == 1:
self.op_dict = mstf.dssm_dict(FLAGS.xletter_dict)
elif FLAGS.use_mstf_ops == -1:
self.op_dict = XletterPreprocessor(FLAGS.xletter_dict, FLAGS.xletter_win_size)
else:
self.op_dict = None
def inference(self, input_fields, mode):
if mode == tf.contrib.learn.ModeKeys.TRAIN or mode == tf.contrib.learn.ModeKeys.EVAL:
if FLAGS.use_mstf_ops:
query, doc = input_fields[0], input_fields[1]
else:
query, doc = input_fields[0][0], input_fields[1][0]
query_vec = self.vector_generation(query, 'Q')
doc_id = self.tree_index.lookup(doc)
doc_vec = self.vector_generation(doc, 'D', doc_id)
return query_vec, doc_vec, doc_id
elif mode == tf.contrib.learn.ModeKeys.INFER:
if FLAGS.use_mstf_ops:
query = input_fields[0]
else:
query = input_fields[0][0]
query_vec = self.vector_generation(query,'Q')
return [query_vec] + self.search(query_vec)
def search(self, query_vec):
#[batch_size,vec_dim]
batch_size = tf.shape(query_vec)[0]
query_vec = tf.expand_dims(query_vec, axis = 1)
start_layer = math.floor(math.log(FLAGS.top_k)/math.log(2)) + 2
#[batch_size,N]
top_index = tf.tile(tf.expand_dims(tf.range(pow(2,start_layer-2), pow(2,start_layer-1)),axis=0),[batch_size,1])
#top_index = tf.range(pow(2, start_layer-1),pow(2,start_layer))
#return top_index
for i in range(start_layer, self.tree_height):
#[batch_size,2N]
eval_index = tf.concat([tf.cast(top_index * 2,tf.int32), tf.cast(top_index * 2 + 1,tf.int32)],axis = 1)
#return eval_index
#[batch_size,2N,vec_dim]
eval_emb = tf.gather(self.layer_embedding, eval_index)
#return tf.shape(eval_emb)
eval_emb = tf.nn.l2_normalize(eval_emb, dim = 2)
eval_emb_transpose = tf.transpose(eval_emb,[0,2,1])
#return tf.shape(query_vec)
#[batch_size,2N] hope so....
eval_score = tf.matmul(query_vec, eval_emb_transpose)
eval_score = tf.squeeze(eval_score)
#return eval_score
#return tf.shape(eval_score)
#Select Top N
values, top_index = tf.nn.top_k(eval_score,FLAGS.top_k, False)
top_index = tf.reshape(top_index,[-1,FLAGS.top_k])
#top_index = tf.expand_dims(top_index, axis=2)
batch_id = tf.tile(tf.expand_dims(tf.range(batch_size),axis=1),[1,tf.shape(top_index)[1]])
expand_index = tf.concat([tf.expand_dims(batch_id,axis=2),tf.expand_dims(top_index,axis=2)],axis=-1)
#return top_index, batch_id ,expand_index
top_index = tf.gather_nd(eval_index,expand_index)
#return top_index,eval_index, what
res = top_index * 2 - pow(2, self.tree_height - 1)
res = tf.concat([res, res+1],axis=1)
values = tf.concat([values, values], axis=1)
if not FLAGS.leaf_content_emb:
eval_emb = tf.gather(self.leaf_embedding, tf.cast(res - pow(2, self.tree_height - 1),tf.int32))
eval_emb = tf.nn.l2_normalize(eval_emb, dim = 2)
eval_emb_transpose = tf.transpose(eval_emb,[0,2,1])
eval_score = tf.matmul(query_vec, eval_emb_transpose)
eval_score = tf.squeeze(eval_score)
values, top_index = tf.nn.top_k(eval_score,FLAGS.top_k, False)
top_index = tf.reshape(top_index,[-1,FLAGS.top_k])
batch_id = tf.tile(tf.expand_dims(tf.range(batch_size),axis=1),[1,tf.shape(top_index)[1]])
expand_index = tf.concat([tf.expand_dims(batch_id,axis=2),tf.expand_dims(top_index,axis=2)],axis=-1)
#return top_index, batch_id ,expand_index
res = tf.gather_nd(res,expand_index)
return [res,values]
def vector_generation(self, text, model_prefix, doc_id = None):
if model_prefix == "D" and not FLAGS.leaf_content_emb:
return tf.nn.l2_normalize(tf.gather(self.leaf_embedding, doc_id),dim = 1)
dims = parse_dims(FLAGS.semantic_model_dims)
text_vecs, step_mask, sequence_length = xletter_feature_extractor(text, model_prefix, self.op_dict, FLAGS.xletter_cnt, FLAGS.xletter_win_size, FLAGS.dim_xletter_emb)
maxpooling_vec = mask_maxpool(tf.nn.tanh(text_vecs),step_mask)
dim_input = FLAGS.dim_xletter_emb
input_vec = maxpooling_vec
for i, dim in enumerate(self.dims):
dim_output = dim
random_range = math.sqrt(6.0/(dim_input+dim_output))
with tf.variable_scope("semantic_layer{:}".format(i),reuse=tf.AUTO_REUSE):
weight = tf.get_variable("weight_" + model_prefix, shape = [dim_input, dim_output], initializer = tf.random_uniform_initializer(-random_range, random_range))
output_vec = tf.matmul(input_vec, weight)
output_vec = tf.nn.tanh(output_vec)
input_vec = output_vec
normalized_vec = tf.nn.l2_normalize(output_vec, dim = 1)
return normalized_vec
def calc_loss(self, inference_res):
query_vec, doc_vec, doc_id = inference_res
batch_size = tf.shape(query_vec)[0]
#Leaf Layer Loss
posCos = tf.reduce_sum(tf.multiply(query_vec, doc_vec), axis = 1)
allCos = [posCos]
for i in range(0, FLAGS.negative_sample):
random_indices = (tf.range(batch_size) + tf.random_uniform([batch_size],1,batch_size, tf.int32)) % batch_size
negCos = tf.reduce_sum(tf.multiply(query_vec, tf.gather(doc_vec, random_indices)),axis=1)
allCos.append(tf.where(tf.equal(negCos,1),tf.zeros_like(negCos),negCos))
allCos = tf.stack(allCos, axis=1)
softmax = tf.nn.softmax(allCos * FLAGS.softmax_gamma, dim=1)
leafLoss = tf.reduce_sum(-tf.log(softmax[:,0]))
#Node Layer Loss
doc_idx = doc_id + pow(2, self.tree_height - 1)
nodeLoss = tf.zeros_like(leafLoss,dtype=tf.float32)
for i in range(4, self.tree_height):
cosines = []
posIdx = tf.cast(doc_idx // pow(2, self.tree_height - i),tf.int32)
posVec = tf.nn.l2_normalize(tf.gather(self.layer_embedding, posIdx),dim = 1)
cosines.append(tf.reduce_sum(tf.multiply(query_vec, posVec), axis = 1))
nodeCnt = pow(2,i-1)
for j in range(0, FLAGS.layer_negative_sample):
random_idx = nodeCnt + (posIdx - nodeCnt + tf.random_uniform([batch_size],1,nodeCnt,tf.int32)) % nodeCnt
cosines.append(tf.reduce_sum(tf.multiply(query_vec, tf.nn.l2_normalize(tf.gather(self.layer_embedding, random_idx),dim=1)),axis=1))
cosines = tf.stack(cosines, axis=1)
softmax = tf.nn.softmax(cosines * FLAGS.softmax_gamma, dim = 1)
nodeLoss += tf.reduce_sum(-tf.log(softmax[:,0]))
weight = batch_size
loss = tf.cast(leafLoss + FLAGS.layer_weight * nodeLoss,tf.float32)
tf.summary.scalar('softmax_losses',loss)
return [loss, leafLoss, nodeLoss], weight
def | (self, inference_res):
if FLAGS.mode == 'predict':
query_vec, search_res, score = inference_res
else:
query_vec, doc_vec, doc_id = inference_res
score = tf.reduce_sum(tf.multiply(query_vec, doc_vec), axis = 1)
return score
def get_optimizer(self):
return [tf.train.GradientDescentOptimizer(FLAGS.learning_rate)]
def lookup_infer(self, inference_res):
query_vec, search_res, search_values = inference_res
if FLAGS.leaf_content_emb:
seq_len = 2 * FLAGS.top_k
else:
seq_len = FLAGS.top_k
return self.reverse_tree_index.lookup(tf.to_int64(search_res)),tf.ones([tf.shape(query_vec)[0]],dtype=tf.int32)+seq_len
if __name__ == '__main__':
from data_reader import InputPipe
m = CDSSMModel()
pred_pipe = InputPipe(FLAGS.input_validation_data_path + "/bleu_data.txt", FLAGS.eval_batch_size,1,2,"",True)
query,keyword = pred_pipe.get_next()
output = m.search(query)
with tf.Session() as sess:
scope = tf.get_variable_scope()
scope.reuse_variables()
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
sess.run(pred_pipe.iterator.initializer)
ckpt = tf.train.get_checkpoint_state(FLAGS.input_previous_model_path)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print("Load model from ", ckpt.model_checkpoint_path)
else:
print("No initial model found.")
print(sess.run([query,output]))
| calc_score | identifier_name |
tree_based_retrieval.py | import sys
import tensorflow as tf
from utils.param import FLAGS
from utils.xletter import XletterPreprocessor
from utils.layers import xletter_feature_extractor, mask_maxpool
import math
from tensorflow.python.ops import lookup_ops
if FLAGS.use_mstf_ops == 1:
import tensorflow.contrib.microsoft as mstf
def parse_dims(dims_str):
dims = [int(dim) for dim in dims_str.split(',')]
return dims
def count_idx(filename):
count = 0
#for line in open(filename,encoding='utf-8'):
for line in tf.gfile.GFile(filename):
count += 1
return count
def default_init():
return tf.contrib.layers.variance_scaling_nitializer(factor=1.0, mode = 'FAN_AVG', uniform = True) | TreeHeight = lambda x: int(math.log(x-1)/math.log(2)) + 2
indexCnt = count_idx(FLAGS.input_previous_model_path + "/" + FLAGS.tree_index_file)
self.tree_height = TreeHeight(indexCnt+1)
self.tree_index = lookup_ops.index_table_from_file(FLAGS.input_previous_model_path + "/" + FLAGS.tree_index_file, default_value = indexCnt)
self.reverse_tree_index = lookup_ops.index_to_string_table_from_file(FLAGS.input_previous_model_path + "/" + FLAGS.tree_index_file, default_value = '<unk>')
self.dims = parse_dims(FLAGS.semantic_model_dims)
self.layer_embedding = tf.get_variable(name='tree_node_emb', shape = [pow(2, self.tree_height -1) ,self.dims[-1]])
if not FLAGS.leaf_content_emb:
self.leaf_embedding = tf.get_variable(name='leaf_node_emb', shape = [pow(2, self.tree_height -1) ,self.dims[-1]])
if FLAGS.use_mstf_ops == 1:
self.op_dict = mstf.dssm_dict(FLAGS.xletter_dict)
elif FLAGS.use_mstf_ops == -1:
self.op_dict = XletterPreprocessor(FLAGS.xletter_dict, FLAGS.xletter_win_size)
else:
self.op_dict = None
def inference(self, input_fields, mode):
if mode == tf.contrib.learn.ModeKeys.TRAIN or mode == tf.contrib.learn.ModeKeys.EVAL:
if FLAGS.use_mstf_ops:
query, doc = input_fields[0], input_fields[1]
else:
query, doc = input_fields[0][0], input_fields[1][0]
query_vec = self.vector_generation(query, 'Q')
doc_id = self.tree_index.lookup(doc)
doc_vec = self.vector_generation(doc, 'D', doc_id)
return query_vec, doc_vec, doc_id
elif mode == tf.contrib.learn.ModeKeys.INFER:
if FLAGS.use_mstf_ops:
query = input_fields[0]
else:
query = input_fields[0][0]
query_vec = self.vector_generation(query,'Q')
return [query_vec] + self.search(query_vec)
def search(self, query_vec):
#[batch_size,vec_dim]
batch_size = tf.shape(query_vec)[0]
query_vec = tf.expand_dims(query_vec, axis = 1)
start_layer = math.floor(math.log(FLAGS.top_k)/math.log(2)) + 2
#[batch_size,N]
top_index = tf.tile(tf.expand_dims(tf.range(pow(2,start_layer-2), pow(2,start_layer-1)),axis=0),[batch_size,1])
#top_index = tf.range(pow(2, start_layer-1),pow(2,start_layer))
#return top_index
for i in range(start_layer, self.tree_height):
#[batch_size,2N]
eval_index = tf.concat([tf.cast(top_index * 2,tf.int32), tf.cast(top_index * 2 + 1,tf.int32)],axis = 1)
#return eval_index
#[batch_size,2N,vec_dim]
eval_emb = tf.gather(self.layer_embedding, eval_index)
#return tf.shape(eval_emb)
eval_emb = tf.nn.l2_normalize(eval_emb, dim = 2)
eval_emb_transpose = tf.transpose(eval_emb,[0,2,1])
#return tf.shape(query_vec)
#[batch_size,2N] hope so....
eval_score = tf.matmul(query_vec, eval_emb_transpose)
eval_score = tf.squeeze(eval_score)
#return eval_score
#return tf.shape(eval_score)
#Select Top N
values, top_index = tf.nn.top_k(eval_score,FLAGS.top_k, False)
top_index = tf.reshape(top_index,[-1,FLAGS.top_k])
#top_index = tf.expand_dims(top_index, axis=2)
batch_id = tf.tile(tf.expand_dims(tf.range(batch_size),axis=1),[1,tf.shape(top_index)[1]])
expand_index = tf.concat([tf.expand_dims(batch_id,axis=2),tf.expand_dims(top_index,axis=2)],axis=-1)
#return top_index, batch_id ,expand_index
top_index = tf.gather_nd(eval_index,expand_index)
#return top_index,eval_index, what
res = top_index * 2 - pow(2, self.tree_height - 1)
res = tf.concat([res, res+1],axis=1)
values = tf.concat([values, values], axis=1)
if not FLAGS.leaf_content_emb:
eval_emb = tf.gather(self.leaf_embedding, tf.cast(res - pow(2, self.tree_height - 1),tf.int32))
eval_emb = tf.nn.l2_normalize(eval_emb, dim = 2)
eval_emb_transpose = tf.transpose(eval_emb,[0,2,1])
eval_score = tf.matmul(query_vec, eval_emb_transpose)
eval_score = tf.squeeze(eval_score)
values, top_index = tf.nn.top_k(eval_score,FLAGS.top_k, False)
top_index = tf.reshape(top_index,[-1,FLAGS.top_k])
batch_id = tf.tile(tf.expand_dims(tf.range(batch_size),axis=1),[1,tf.shape(top_index)[1]])
expand_index = tf.concat([tf.expand_dims(batch_id,axis=2),tf.expand_dims(top_index,axis=2)],axis=-1)
#return top_index, batch_id ,expand_index
res = tf.gather_nd(res,expand_index)
return [res,values]
def vector_generation(self, text, model_prefix, doc_id = None):
if model_prefix == "D" and not FLAGS.leaf_content_emb:
return tf.nn.l2_normalize(tf.gather(self.leaf_embedding, doc_id),dim = 1)
dims = parse_dims(FLAGS.semantic_model_dims)
text_vecs, step_mask, sequence_length = xletter_feature_extractor(text, model_prefix, self.op_dict, FLAGS.xletter_cnt, FLAGS.xletter_win_size, FLAGS.dim_xletter_emb)
maxpooling_vec = mask_maxpool(tf.nn.tanh(text_vecs),step_mask)
dim_input = FLAGS.dim_xletter_emb
input_vec = maxpooling_vec
for i, dim in enumerate(self.dims):
dim_output = dim
random_range = math.sqrt(6.0/(dim_input+dim_output))
with tf.variable_scope("semantic_layer{:}".format(i),reuse=tf.AUTO_REUSE):
weight = tf.get_variable("weight_" + model_prefix, shape = [dim_input, dim_output], initializer = tf.random_uniform_initializer(-random_range, random_range))
output_vec = tf.matmul(input_vec, weight)
output_vec = tf.nn.tanh(output_vec)
input_vec = output_vec
normalized_vec = tf.nn.l2_normalize(output_vec, dim = 1)
return normalized_vec
def calc_loss(self, inference_res):
query_vec, doc_vec, doc_id = inference_res
batch_size = tf.shape(query_vec)[0]
#Leaf Layer Loss
posCos = tf.reduce_sum(tf.multiply(query_vec, doc_vec), axis = 1)
allCos = [posCos]
for i in range(0, FLAGS.negative_sample):
random_indices = (tf.range(batch_size) + tf.random_uniform([batch_size],1,batch_size, tf.int32)) % batch_size
negCos = tf.reduce_sum(tf.multiply(query_vec, tf.gather(doc_vec, random_indices)),axis=1)
allCos.append(tf.where(tf.equal(negCos,1),tf.zeros_like(negCos),negCos))
allCos = tf.stack(allCos, axis=1)
softmax = tf.nn.softmax(allCos * FLAGS.softmax_gamma, dim=1)
leafLoss = tf.reduce_sum(-tf.log(softmax[:,0]))
#Node Layer Loss
doc_idx = doc_id + pow(2, self.tree_height - 1)
nodeLoss = tf.zeros_like(leafLoss,dtype=tf.float32)
for i in range(4, self.tree_height):
cosines = []
posIdx = tf.cast(doc_idx // pow(2, self.tree_height - i),tf.int32)
posVec = tf.nn.l2_normalize(tf.gather(self.layer_embedding, posIdx),dim = 1)
cosines.append(tf.reduce_sum(tf.multiply(query_vec, posVec), axis = 1))
nodeCnt = pow(2,i-1)
for j in range(0, FLAGS.layer_negative_sample):
random_idx = nodeCnt + (posIdx - nodeCnt + tf.random_uniform([batch_size],1,nodeCnt,tf.int32)) % nodeCnt
cosines.append(tf.reduce_sum(tf.multiply(query_vec, tf.nn.l2_normalize(tf.gather(self.layer_embedding, random_idx),dim=1)),axis=1))
cosines = tf.stack(cosines, axis=1)
softmax = tf.nn.softmax(cosines * FLAGS.softmax_gamma, dim = 1)
nodeLoss += tf.reduce_sum(-tf.log(softmax[:,0]))
weight = batch_size
loss = tf.cast(leafLoss + FLAGS.layer_weight * nodeLoss,tf.float32)
tf.summary.scalar('softmax_losses',loss)
return [loss, leafLoss, nodeLoss], weight
def calc_score(self, inference_res):
if FLAGS.mode == 'predict':
query_vec, search_res, score = inference_res
else:
query_vec, doc_vec, doc_id = inference_res
score = tf.reduce_sum(tf.multiply(query_vec, doc_vec), axis = 1)
return score
def get_optimizer(self):
return [tf.train.GradientDescentOptimizer(FLAGS.learning_rate)]
def lookup_infer(self, inference_res):
query_vec, search_res, search_values = inference_res
if FLAGS.leaf_content_emb:
seq_len = 2 * FLAGS.top_k
else:
seq_len = FLAGS.top_k
return self.reverse_tree_index.lookup(tf.to_int64(search_res)),tf.ones([tf.shape(query_vec)[0]],dtype=tf.int32)+seq_len
if __name__ == '__main__':
from data_reader import InputPipe
m = CDSSMModel()
pred_pipe = InputPipe(FLAGS.input_validation_data_path + "/bleu_data.txt", FLAGS.eval_batch_size,1,2,"",True)
query,keyword = pred_pipe.get_next()
output = m.search(query)
with tf.Session() as sess:
scope = tf.get_variable_scope()
scope.reuse_variables()
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
sess.run(pred_pipe.iterator.initializer)
ckpt = tf.train.get_checkpoint_state(FLAGS.input_previous_model_path)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print("Load model from ", ckpt.model_checkpoint_path)
else:
print("No initial model found.")
print(sess.run([query,output])) |
class TreeModel():
def __init__(self): | random_line_split |
run.py | import glob
import os
import random
import numpy as np
from data_gen import DataSet
from nade import NADE
from keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation, Lambda, add
from keras import backend as K
from keras.models import Model
from keras.callbacks import Callback
import keras.regularizers
from keras.optimizers import Adam
import tensorflow as tf
def prediction_layer(x):
# x.shape = (?,6040,5)
x_cumsum = K.cumsum(x, axis=2)
# x_cumsum.shape = (?,6040,5)
output = K.softmax(x_cumsum)
# output = (?,6040,5)
return output
def prediction_output_shape(input_shape):
return input_shape
def d_layer(x):
return K.sum(x, axis=1)
def d_output_shape(input_shape):
return (input_shape[0], )
def D_layer(x):
return K.sum(x, axis=1)
def D_output_shape(input_shape):
return (input_shape[0], )
def rating_cost_lambda_func(args):
alpha = 1.
std = 0.01
pred_score, true_ratings, input_masks, output_masks, D, d = args
pred_score_cum = K.cumsum(pred_score, axis=2)
prob_item_ratings = K.softmax(pred_score_cum)
accu_prob_1N = K.cumsum(prob_item_ratings, axis=2)
accu_prob_N1 = K.cumsum(prob_item_ratings[:, :, ::-1], axis=2)[:, :, ::-1]
mask1N = K.cumsum(true_ratings[:, :, ::-1], axis=2)[:, :, ::-1]
maskN1 = K.cumsum(true_ratings, axis=2)
cost_ordinal_1N = -K.sum(
(K.log(prob_item_ratings) - K.log(accu_prob_1N)) * mask1N, axis=2)
cost_ordinal_N1 = -K.sum(
(K.log(prob_item_ratings) - K.log(accu_prob_N1)) * maskN1, axis=2)
cost_ordinal = cost_ordinal_1N + cost_ordinal_N1
nll_item_ratings = K.sum(
-(true_ratings * K.log(prob_item_ratings)), axis=2)
nll = std * K.sum(
nll_item_ratings, axis=1) * 1.0 * D / (D - d + 1e-6) + alpha * K.sum(
cost_ordinal, axis=1) * 1.0 * D / (D - d + 1e-6)
cost = K.mean(nll)
cost = K.expand_dims(cost, 0)
return cost
class EvaluationCallback(Callback):
def __init__(self, data_set, new_items, training_set):
self.data_set = data_set
self.rmses = []
self.rate_score = np.array([1, 2, 3, 4, 5], np.float32)
self.new_items = new_items
self.training_set = training_set
def eval_rmse(self):
squared_error = []
n_samples = []
for i, batch in enumerate(self.data_set.generate(max_iters=1)):
inp_r = batch[0]['input_ratings']
out_r = batch[0]['output_ratings']
inp_m = batch[0]['input_masks']
out_m = batch[0]['output_masks']
pred_batch = self.model.predict(batch[0])[1]
true_r = out_r.argmax(axis=2) + 1
pred_r = (
pred_batch * self.rate_score[np.newaxis, np.newaxis, :]).sum(
axis=2)
pred_r[:, self.new_items] = 3
mask = out_r.sum(axis=2)
# '''
# if i == 0:
# print [true_r[0][j] for j in np.nonzero(true_r[0]* mask[0])[0]]
# print [pred_r[0][j] for j in np.nonzero(pred_r[0]* mask[0])[0]]
# '''
se = np.sum(np.square(true_r - pred_r) * mask)
n = np.sum(mask)
squared_error.append(se)
n_samples.append(n)
total_squared_error = np.array(squared_error).sum()
total_n_samples = np.array(n_samples).sum()
rmse = np.sqrt(total_squared_error / (total_n_samples * 1.0 + 1e-8))
return rmse
def on_epoch_end(self, epoch, logs={}):
score = self.eval_rmse()
if self.training_set:
|
else:
print("validation set RMSE for epoch %d is %f" % (epoch, score))
self.rmses.append(score)
def _train(args):
if K.backend() != 'tensorflow':
print("This repository only support tensorflow backend.")
raise NotImplementedError()
batch_size = 64
num_users = 6040
num_items = 3706
data_sample = 1.0
input_dim0 = 6040
input_dim1 = 5
std = 0.0
alpha = 1.0
print('Loading data...')
train_file_list = sorted(
glob.glob(os.path.join(('data/train_set'), 'part*')))
val_file_list = sorted(glob.glob(os.path.join(('data/val_set/'), 'part*')))
test_file_list = sorted(
glob.glob(os.path.join(('data/test_set/'), 'part*')))
train_file_list = [
dfile for dfile in train_file_list if os.stat(dfile).st_size != 0
]
val_file_list = [
dfile for dfile in val_file_list if os.stat(dfile).st_size != 0
]
test_file_list = [
dfile for dfile in test_file_list if os.stat(dfile).st_size != 0
]
random.shuffle(train_file_list)
random.shuffle(val_file_list)
random.shuffle(test_file_list)
train_file_list = train_file_list[:max(
int(len(train_file_list) * data_sample), 1)]
train_set = DataSet(
train_file_list,
num_users=num_users,
num_items=num_items,
batch_size=batch_size,
mode=0)
val_set = DataSet(
val_file_list,
num_users=num_users,
num_items=num_items,
batch_size=batch_size,
mode=1)
test_set = DataSet(
test_file_list,
num_users=num_users,
num_items=num_items,
batch_size=batch_size,
mode=2)
rating_freq = np.zeros((6040, 5))
init_b = np.zeros((6040, 5))
for batch in val_set.generate(max_iters=1):
inp_r = batch[0]['input_ratings']
out_r = batch[0]['output_ratings']
inp_m = batch[0]['input_masks']
out_m = batch[0]['output_masks']
rating_freq += inp_r.sum(axis=0)
log_rating_freq = np.log(rating_freq + 1e-8)
log_rating_freq_diff = np.diff(log_rating_freq, axis=1)
init_b[:, 1:] = log_rating_freq_diff
init_b[:, 0] = log_rating_freq[:, 0]
new_items = np.where(rating_freq.sum(axis=1) == 0)[0]
input_layer = Input(shape=(input_dim0, input_dim1), name='input_ratings')
output_ratings = Input(
shape=(input_dim0, input_dim1), name='output_ratings')
input_masks = Input(shape=(input_dim0, ), name='input_masks')
output_masks = Input(shape=(input_dim0, ), name='output_masks')
# from keras import backend as K
# print(K.tensorflow_backend._get_available_gpus())
# input('Enter >>')
# nade_layer = Dropout(0.0)(input_layer)
nade_layer = input_layer
nade_layer = NADE(
hidden_dim=args.hidden_dim,
activation='tanh',
bias=True,
W_regularizer=keras.regularizers.l2(0.02),
V_regularizer=keras.regularizers.l2(0.02),
b_regularizer=keras.regularizers.l2(0.02),
c_regularizer=keras.regularizers.l2(0.02),
args=args)(nade_layer)
predicted_ratings = Lambda(
prediction_layer,
output_shape=prediction_output_shape,
name='predicted_ratings')(nade_layer)
d = Lambda(d_layer, output_shape=d_output_shape, name='d')(input_masks)
sum_masks = add([input_masks, output_masks])
D = Lambda(D_layer, output_shape=D_output_shape, name='D')(sum_masks)
loss_out = Lambda(
rating_cost_lambda_func, output_shape=(1, ), name='nade_loss')(
[nade_layer, output_ratings, input_masks, output_masks, D, d])
cf_nade_model = Model(
inputs=[input_layer, output_ratings, input_masks, output_masks],
outputs=[loss_out, predicted_ratings])
cf_nade_model.summary()
adam = Adam(lr=args.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
cf_nade_model.compile(
loss={'nade_loss': lambda y_true, y_pred: y_pred}, optimizer=adam)
train_evaluation_callback = EvaluationCallback(
data_set=train_set, new_items=new_items, training_set=True)
valid_evaluation_callback = EvaluationCallback(
data_set=val_set, new_items=new_items, training_set=False)
print('Training...')
cf_nade_model.fit_generator(
train_set.generate(),
steps_per_epoch=(train_set.get_corpus_size() // batch_size),
epochs=30,
validation_data=val_set.generate(),
validation_steps=(val_set.get_corpus_size() // batch_size),
shuffle=True,
callbacks=[
train_set, val_set, train_evaluation_callback,
valid_evaluation_callback
],
verbose=1)
print('Testing...')
rmses = []
rate_score = np.array([1, 2, 3, 4, 5], np.float32)
new_items = new_items
squared_error = []
n_samples = []
for i, batch in enumerate(test_set.generate(max_iters=1)):
inp_r = batch[0]['input_ratings']
out_r = batch[0]['output_ratings']
inp_m = batch[0]['input_masks']
out_m = batch[0]['output_masks']
pred_batch = cf_nade_model.predict(batch[0])[1]
true_r = out_r.argmax(axis=2) + 1
pred_r = (pred_batch * rate_score[np.newaxis, np.newaxis, :]).sum(
axis=2)
pred_r[:, new_items] = 3
mask = out_r.sum(axis=2)
# '''
# if i == 0:
# print [true_r[0][j] for j in np.nonzero(true_r[0]* mask[0])[0]]
# print [pred_r[0][j] for j in np.nonzero(pred_r[0]* mask[0])[0]]
# '''
se = np.sum(np.square(true_r - pred_r) * mask)
n = np.sum(mask)
squared_error.append(se)
n_samples.append(n)
total_squared_error = np.array(squared_error).sum()
total_n_samples = np.array(n_samples).sum()
rmse = np.sqrt(total_squared_error / (total_n_samples * 1.0 + 1e-8))
print("test set RMSE is %f" % (rmse))
def main():
import argparse
parser = argparse.ArgumentParser(description='CFNADE-keras')
parser.add_argument(
'--hidden_dim',
type=int,
default=500,
help='Iteration unit for validation')
# keras-1 에서는 500짜리 keras-2에서는 250짜리 실험 중...
parser.add_argument(
'--normalize_1st_layer',
type=bool,
default=False,
help='normalize 1st layer')
parser.add_argument(
'--learning_rate',
type=float,
default=1e-3,
help='learning rate for optimizer.')
# parser.add_argument(
# '--iter_validation',
# type=int,
# default=10,
# help='Iteration unit for validation')
# parser.add_argument(
# '--max_iter', type=int, default=10000000, help='Max Iteration')
# parser.add_argument(
# '--n_hidden_unit',
# type=int,
# default=500,
# help='The number of hidden unit')
# parser.add_argument(
# '--parameter_sharing',
# type=bool,
# default=False,
# help='parameter sharing')
# parser.add_argument(
# '--lambda_1',
# type=float,
# default=0.015,
# help='lambda for weight decay.')
# parser.add_argument(
# '--lambda_2',
# type=float,
# default=0.015,
# help='lambda for weight decay.')
# parser.add_argument(
# '--dropout_rate', type=float, default=0., help='dropout_rate')
# parser.add_argument(
# '--iter_early_stop',
# type=int,
# default=10000,
# help='the number of iteration for early stop.')
# parser.add_argument(
# '--data_seed', type=int, default=1, help='the seed for dataset')
args = parser.parse_args()
_train(args)
if __name__ == '__main__':
main()
| print("training set RMSE for epoch %d is %f" % (epoch, score)) | conditional_block |
run.py | import glob
import os
import random
import numpy as np
from data_gen import DataSet
from nade import NADE
from keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation, Lambda, add
from keras import backend as K
from keras.models import Model
from keras.callbacks import Callback
import keras.regularizers
from keras.optimizers import Adam
import tensorflow as tf
def | (x):
# x.shape = (?,6040,5)
x_cumsum = K.cumsum(x, axis=2)
# x_cumsum.shape = (?,6040,5)
output = K.softmax(x_cumsum)
# output = (?,6040,5)
return output
def prediction_output_shape(input_shape):
return input_shape
def d_layer(x):
return K.sum(x, axis=1)
def d_output_shape(input_shape):
return (input_shape[0], )
def D_layer(x):
return K.sum(x, axis=1)
def D_output_shape(input_shape):
return (input_shape[0], )
def rating_cost_lambda_func(args):
alpha = 1.
std = 0.01
pred_score, true_ratings, input_masks, output_masks, D, d = args
pred_score_cum = K.cumsum(pred_score, axis=2)
prob_item_ratings = K.softmax(pred_score_cum)
accu_prob_1N = K.cumsum(prob_item_ratings, axis=2)
accu_prob_N1 = K.cumsum(prob_item_ratings[:, :, ::-1], axis=2)[:, :, ::-1]
mask1N = K.cumsum(true_ratings[:, :, ::-1], axis=2)[:, :, ::-1]
maskN1 = K.cumsum(true_ratings, axis=2)
cost_ordinal_1N = -K.sum(
(K.log(prob_item_ratings) - K.log(accu_prob_1N)) * mask1N, axis=2)
cost_ordinal_N1 = -K.sum(
(K.log(prob_item_ratings) - K.log(accu_prob_N1)) * maskN1, axis=2)
cost_ordinal = cost_ordinal_1N + cost_ordinal_N1
nll_item_ratings = K.sum(
-(true_ratings * K.log(prob_item_ratings)), axis=2)
nll = std * K.sum(
nll_item_ratings, axis=1) * 1.0 * D / (D - d + 1e-6) + alpha * K.sum(
cost_ordinal, axis=1) * 1.0 * D / (D - d + 1e-6)
cost = K.mean(nll)
cost = K.expand_dims(cost, 0)
return cost
class EvaluationCallback(Callback):
def __init__(self, data_set, new_items, training_set):
self.data_set = data_set
self.rmses = []
self.rate_score = np.array([1, 2, 3, 4, 5], np.float32)
self.new_items = new_items
self.training_set = training_set
def eval_rmse(self):
squared_error = []
n_samples = []
for i, batch in enumerate(self.data_set.generate(max_iters=1)):
inp_r = batch[0]['input_ratings']
out_r = batch[0]['output_ratings']
inp_m = batch[0]['input_masks']
out_m = batch[0]['output_masks']
pred_batch = self.model.predict(batch[0])[1]
true_r = out_r.argmax(axis=2) + 1
pred_r = (
pred_batch * self.rate_score[np.newaxis, np.newaxis, :]).sum(
axis=2)
pred_r[:, self.new_items] = 3
mask = out_r.sum(axis=2)
# '''
# if i == 0:
# print [true_r[0][j] for j in np.nonzero(true_r[0]* mask[0])[0]]
# print [pred_r[0][j] for j in np.nonzero(pred_r[0]* mask[0])[0]]
# '''
se = np.sum(np.square(true_r - pred_r) * mask)
n = np.sum(mask)
squared_error.append(se)
n_samples.append(n)
total_squared_error = np.array(squared_error).sum()
total_n_samples = np.array(n_samples).sum()
rmse = np.sqrt(total_squared_error / (total_n_samples * 1.0 + 1e-8))
return rmse
def on_epoch_end(self, epoch, logs={}):
score = self.eval_rmse()
if self.training_set:
print("training set RMSE for epoch %d is %f" % (epoch, score))
else:
print("validation set RMSE for epoch %d is %f" % (epoch, score))
self.rmses.append(score)
def _train(args):
if K.backend() != 'tensorflow':
print("This repository only support tensorflow backend.")
raise NotImplementedError()
batch_size = 64
num_users = 6040
num_items = 3706
data_sample = 1.0
input_dim0 = 6040
input_dim1 = 5
std = 0.0
alpha = 1.0
print('Loading data...')
train_file_list = sorted(
glob.glob(os.path.join(('data/train_set'), 'part*')))
val_file_list = sorted(glob.glob(os.path.join(('data/val_set/'), 'part*')))
test_file_list = sorted(
glob.glob(os.path.join(('data/test_set/'), 'part*')))
train_file_list = [
dfile for dfile in train_file_list if os.stat(dfile).st_size != 0
]
val_file_list = [
dfile for dfile in val_file_list if os.stat(dfile).st_size != 0
]
test_file_list = [
dfile for dfile in test_file_list if os.stat(dfile).st_size != 0
]
random.shuffle(train_file_list)
random.shuffle(val_file_list)
random.shuffle(test_file_list)
train_file_list = train_file_list[:max(
int(len(train_file_list) * data_sample), 1)]
train_set = DataSet(
train_file_list,
num_users=num_users,
num_items=num_items,
batch_size=batch_size,
mode=0)
val_set = DataSet(
val_file_list,
num_users=num_users,
num_items=num_items,
batch_size=batch_size,
mode=1)
test_set = DataSet(
test_file_list,
num_users=num_users,
num_items=num_items,
batch_size=batch_size,
mode=2)
rating_freq = np.zeros((6040, 5))
init_b = np.zeros((6040, 5))
for batch in val_set.generate(max_iters=1):
inp_r = batch[0]['input_ratings']
out_r = batch[0]['output_ratings']
inp_m = batch[0]['input_masks']
out_m = batch[0]['output_masks']
rating_freq += inp_r.sum(axis=0)
log_rating_freq = np.log(rating_freq + 1e-8)
log_rating_freq_diff = np.diff(log_rating_freq, axis=1)
init_b[:, 1:] = log_rating_freq_diff
init_b[:, 0] = log_rating_freq[:, 0]
new_items = np.where(rating_freq.sum(axis=1) == 0)[0]
input_layer = Input(shape=(input_dim0, input_dim1), name='input_ratings')
output_ratings = Input(
shape=(input_dim0, input_dim1), name='output_ratings')
input_masks = Input(shape=(input_dim0, ), name='input_masks')
output_masks = Input(shape=(input_dim0, ), name='output_masks')
# from keras import backend as K
# print(K.tensorflow_backend._get_available_gpus())
# input('Enter >>')
# nade_layer = Dropout(0.0)(input_layer)
nade_layer = input_layer
nade_layer = NADE(
hidden_dim=args.hidden_dim,
activation='tanh',
bias=True,
W_regularizer=keras.regularizers.l2(0.02),
V_regularizer=keras.regularizers.l2(0.02),
b_regularizer=keras.regularizers.l2(0.02),
c_regularizer=keras.regularizers.l2(0.02),
args=args)(nade_layer)
predicted_ratings = Lambda(
prediction_layer,
output_shape=prediction_output_shape,
name='predicted_ratings')(nade_layer)
d = Lambda(d_layer, output_shape=d_output_shape, name='d')(input_masks)
sum_masks = add([input_masks, output_masks])
D = Lambda(D_layer, output_shape=D_output_shape, name='D')(sum_masks)
loss_out = Lambda(
rating_cost_lambda_func, output_shape=(1, ), name='nade_loss')(
[nade_layer, output_ratings, input_masks, output_masks, D, d])
cf_nade_model = Model(
inputs=[input_layer, output_ratings, input_masks, output_masks],
outputs=[loss_out, predicted_ratings])
cf_nade_model.summary()
adam = Adam(lr=args.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
cf_nade_model.compile(
loss={'nade_loss': lambda y_true, y_pred: y_pred}, optimizer=adam)
train_evaluation_callback = EvaluationCallback(
data_set=train_set, new_items=new_items, training_set=True)
valid_evaluation_callback = EvaluationCallback(
data_set=val_set, new_items=new_items, training_set=False)
print('Training...')
cf_nade_model.fit_generator(
train_set.generate(),
steps_per_epoch=(train_set.get_corpus_size() // batch_size),
epochs=30,
validation_data=val_set.generate(),
validation_steps=(val_set.get_corpus_size() // batch_size),
shuffle=True,
callbacks=[
train_set, val_set, train_evaluation_callback,
valid_evaluation_callback
],
verbose=1)
print('Testing...')
rmses = []
rate_score = np.array([1, 2, 3, 4, 5], np.float32)
new_items = new_items
squared_error = []
n_samples = []
for i, batch in enumerate(test_set.generate(max_iters=1)):
inp_r = batch[0]['input_ratings']
out_r = batch[0]['output_ratings']
inp_m = batch[0]['input_masks']
out_m = batch[0]['output_masks']
pred_batch = cf_nade_model.predict(batch[0])[1]
true_r = out_r.argmax(axis=2) + 1
pred_r = (pred_batch * rate_score[np.newaxis, np.newaxis, :]).sum(
axis=2)
pred_r[:, new_items] = 3
mask = out_r.sum(axis=2)
# '''
# if i == 0:
# print [true_r[0][j] for j in np.nonzero(true_r[0]* mask[0])[0]]
# print [pred_r[0][j] for j in np.nonzero(pred_r[0]* mask[0])[0]]
# '''
se = np.sum(np.square(true_r - pred_r) * mask)
n = np.sum(mask)
squared_error.append(se)
n_samples.append(n)
total_squared_error = np.array(squared_error).sum()
total_n_samples = np.array(n_samples).sum()
rmse = np.sqrt(total_squared_error / (total_n_samples * 1.0 + 1e-8))
print("test set RMSE is %f" % (rmse))
def main():
import argparse
parser = argparse.ArgumentParser(description='CFNADE-keras')
parser.add_argument(
'--hidden_dim',
type=int,
default=500,
help='Iteration unit for validation')
# keras-1 에서는 500짜리 keras-2에서는 250짜리 실험 중...
parser.add_argument(
'--normalize_1st_layer',
type=bool,
default=False,
help='normalize 1st layer')
parser.add_argument(
'--learning_rate',
type=float,
default=1e-3,
help='learning rate for optimizer.')
# parser.add_argument(
# '--iter_validation',
# type=int,
# default=10,
# help='Iteration unit for validation')
# parser.add_argument(
# '--max_iter', type=int, default=10000000, help='Max Iteration')
# parser.add_argument(
# '--n_hidden_unit',
# type=int,
# default=500,
# help='The number of hidden unit')
# parser.add_argument(
# '--parameter_sharing',
# type=bool,
# default=False,
# help='parameter sharing')
# parser.add_argument(
# '--lambda_1',
# type=float,
# default=0.015,
# help='lambda for weight decay.')
# parser.add_argument(
# '--lambda_2',
# type=float,
# default=0.015,
# help='lambda for weight decay.')
# parser.add_argument(
# '--dropout_rate', type=float, default=0., help='dropout_rate')
# parser.add_argument(
# '--iter_early_stop',
# type=int,
# default=10000,
# help='the number of iteration for early stop.')
# parser.add_argument(
# '--data_seed', type=int, default=1, help='the seed for dataset')
args = parser.parse_args()
_train(args)
if __name__ == '__main__':
main()
| prediction_layer | identifier_name |
run.py | import glob
import os
import random
import numpy as np
from data_gen import DataSet
from nade import NADE
from keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation, Lambda, add
from keras import backend as K
from keras.models import Model
from keras.callbacks import Callback
import keras.regularizers
from keras.optimizers import Adam
import tensorflow as tf
def prediction_layer(x):
# x.shape = (?,6040,5)
x_cumsum = K.cumsum(x, axis=2)
# x_cumsum.shape = (?,6040,5)
output = K.softmax(x_cumsum)
# output = (?,6040,5)
return output
def prediction_output_shape(input_shape):
return input_shape
def d_layer(x):
|
def d_output_shape(input_shape):
return (input_shape[0], )
def D_layer(x):
return K.sum(x, axis=1)
def D_output_shape(input_shape):
return (input_shape[0], )
def rating_cost_lambda_func(args):
alpha = 1.
std = 0.01
pred_score, true_ratings, input_masks, output_masks, D, d = args
pred_score_cum = K.cumsum(pred_score, axis=2)
prob_item_ratings = K.softmax(pred_score_cum)
accu_prob_1N = K.cumsum(prob_item_ratings, axis=2)
accu_prob_N1 = K.cumsum(prob_item_ratings[:, :, ::-1], axis=2)[:, :, ::-1]
mask1N = K.cumsum(true_ratings[:, :, ::-1], axis=2)[:, :, ::-1]
maskN1 = K.cumsum(true_ratings, axis=2)
cost_ordinal_1N = -K.sum(
(K.log(prob_item_ratings) - K.log(accu_prob_1N)) * mask1N, axis=2)
cost_ordinal_N1 = -K.sum(
(K.log(prob_item_ratings) - K.log(accu_prob_N1)) * maskN1, axis=2)
cost_ordinal = cost_ordinal_1N + cost_ordinal_N1
nll_item_ratings = K.sum(
-(true_ratings * K.log(prob_item_ratings)), axis=2)
nll = std * K.sum(
nll_item_ratings, axis=1) * 1.0 * D / (D - d + 1e-6) + alpha * K.sum(
cost_ordinal, axis=1) * 1.0 * D / (D - d + 1e-6)
cost = K.mean(nll)
cost = K.expand_dims(cost, 0)
return cost
class EvaluationCallback(Callback):
def __init__(self, data_set, new_items, training_set):
self.data_set = data_set
self.rmses = []
self.rate_score = np.array([1, 2, 3, 4, 5], np.float32)
self.new_items = new_items
self.training_set = training_set
def eval_rmse(self):
squared_error = []
n_samples = []
for i, batch in enumerate(self.data_set.generate(max_iters=1)):
inp_r = batch[0]['input_ratings']
out_r = batch[0]['output_ratings']
inp_m = batch[0]['input_masks']
out_m = batch[0]['output_masks']
pred_batch = self.model.predict(batch[0])[1]
true_r = out_r.argmax(axis=2) + 1
pred_r = (
pred_batch * self.rate_score[np.newaxis, np.newaxis, :]).sum(
axis=2)
pred_r[:, self.new_items] = 3
mask = out_r.sum(axis=2)
# '''
# if i == 0:
# print [true_r[0][j] for j in np.nonzero(true_r[0]* mask[0])[0]]
# print [pred_r[0][j] for j in np.nonzero(pred_r[0]* mask[0])[0]]
# '''
se = np.sum(np.square(true_r - pred_r) * mask)
n = np.sum(mask)
squared_error.append(se)
n_samples.append(n)
total_squared_error = np.array(squared_error).sum()
total_n_samples = np.array(n_samples).sum()
rmse = np.sqrt(total_squared_error / (total_n_samples * 1.0 + 1e-8))
return rmse
def on_epoch_end(self, epoch, logs={}):
score = self.eval_rmse()
if self.training_set:
print("training set RMSE for epoch %d is %f" % (epoch, score))
else:
print("validation set RMSE for epoch %d is %f" % (epoch, score))
self.rmses.append(score)
def _train(args):
if K.backend() != 'tensorflow':
print("This repository only support tensorflow backend.")
raise NotImplementedError()
batch_size = 64
num_users = 6040
num_items = 3706
data_sample = 1.0
input_dim0 = 6040
input_dim1 = 5
std = 0.0
alpha = 1.0
print('Loading data...')
train_file_list = sorted(
glob.glob(os.path.join(('data/train_set'), 'part*')))
val_file_list = sorted(glob.glob(os.path.join(('data/val_set/'), 'part*')))
test_file_list = sorted(
glob.glob(os.path.join(('data/test_set/'), 'part*')))
train_file_list = [
dfile for dfile in train_file_list if os.stat(dfile).st_size != 0
]
val_file_list = [
dfile for dfile in val_file_list if os.stat(dfile).st_size != 0
]
test_file_list = [
dfile for dfile in test_file_list if os.stat(dfile).st_size != 0
]
random.shuffle(train_file_list)
random.shuffle(val_file_list)
random.shuffle(test_file_list)
train_file_list = train_file_list[:max(
int(len(train_file_list) * data_sample), 1)]
train_set = DataSet(
train_file_list,
num_users=num_users,
num_items=num_items,
batch_size=batch_size,
mode=0)
val_set = DataSet(
val_file_list,
num_users=num_users,
num_items=num_items,
batch_size=batch_size,
mode=1)
test_set = DataSet(
test_file_list,
num_users=num_users,
num_items=num_items,
batch_size=batch_size,
mode=2)
rating_freq = np.zeros((6040, 5))
init_b = np.zeros((6040, 5))
for batch in val_set.generate(max_iters=1):
inp_r = batch[0]['input_ratings']
out_r = batch[0]['output_ratings']
inp_m = batch[0]['input_masks']
out_m = batch[0]['output_masks']
rating_freq += inp_r.sum(axis=0)
log_rating_freq = np.log(rating_freq + 1e-8)
log_rating_freq_diff = np.diff(log_rating_freq, axis=1)
init_b[:, 1:] = log_rating_freq_diff
init_b[:, 0] = log_rating_freq[:, 0]
new_items = np.where(rating_freq.sum(axis=1) == 0)[0]
input_layer = Input(shape=(input_dim0, input_dim1), name='input_ratings')
output_ratings = Input(
shape=(input_dim0, input_dim1), name='output_ratings')
input_masks = Input(shape=(input_dim0, ), name='input_masks')
output_masks = Input(shape=(input_dim0, ), name='output_masks')
# from keras import backend as K
# print(K.tensorflow_backend._get_available_gpus())
# input('Enter >>')
# nade_layer = Dropout(0.0)(input_layer)
nade_layer = input_layer
nade_layer = NADE(
hidden_dim=args.hidden_dim,
activation='tanh',
bias=True,
W_regularizer=keras.regularizers.l2(0.02),
V_regularizer=keras.regularizers.l2(0.02),
b_regularizer=keras.regularizers.l2(0.02),
c_regularizer=keras.regularizers.l2(0.02),
args=args)(nade_layer)
predicted_ratings = Lambda(
prediction_layer,
output_shape=prediction_output_shape,
name='predicted_ratings')(nade_layer)
d = Lambda(d_layer, output_shape=d_output_shape, name='d')(input_masks)
sum_masks = add([input_masks, output_masks])
D = Lambda(D_layer, output_shape=D_output_shape, name='D')(sum_masks)
loss_out = Lambda(
rating_cost_lambda_func, output_shape=(1, ), name='nade_loss')(
[nade_layer, output_ratings, input_masks, output_masks, D, d])
cf_nade_model = Model(
inputs=[input_layer, output_ratings, input_masks, output_masks],
outputs=[loss_out, predicted_ratings])
cf_nade_model.summary()
adam = Adam(lr=args.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
cf_nade_model.compile(
loss={'nade_loss': lambda y_true, y_pred: y_pred}, optimizer=adam)
train_evaluation_callback = EvaluationCallback(
data_set=train_set, new_items=new_items, training_set=True)
valid_evaluation_callback = EvaluationCallback(
data_set=val_set, new_items=new_items, training_set=False)
print('Training...')
cf_nade_model.fit_generator(
train_set.generate(),
steps_per_epoch=(train_set.get_corpus_size() // batch_size),
epochs=30,
validation_data=val_set.generate(),
validation_steps=(val_set.get_corpus_size() // batch_size),
shuffle=True,
callbacks=[
train_set, val_set, train_evaluation_callback,
valid_evaluation_callback
],
verbose=1)
print('Testing...')
rmses = []
rate_score = np.array([1, 2, 3, 4, 5], np.float32)
new_items = new_items
squared_error = []
n_samples = []
for i, batch in enumerate(test_set.generate(max_iters=1)):
inp_r = batch[0]['input_ratings']
out_r = batch[0]['output_ratings']
inp_m = batch[0]['input_masks']
out_m = batch[0]['output_masks']
pred_batch = cf_nade_model.predict(batch[0])[1]
true_r = out_r.argmax(axis=2) + 1
pred_r = (pred_batch * rate_score[np.newaxis, np.newaxis, :]).sum(
axis=2)
pred_r[:, new_items] = 3
mask = out_r.sum(axis=2)
# '''
# if i == 0:
# print [true_r[0][j] for j in np.nonzero(true_r[0]* mask[0])[0]]
# print [pred_r[0][j] for j in np.nonzero(pred_r[0]* mask[0])[0]]
# '''
se = np.sum(np.square(true_r - pred_r) * mask)
n = np.sum(mask)
squared_error.append(se)
n_samples.append(n)
total_squared_error = np.array(squared_error).sum()
total_n_samples = np.array(n_samples).sum()
rmse = np.sqrt(total_squared_error / (total_n_samples * 1.0 + 1e-8))
print("test set RMSE is %f" % (rmse))
def main():
import argparse
parser = argparse.ArgumentParser(description='CFNADE-keras')
parser.add_argument(
'--hidden_dim',
type=int,
default=500,
help='Iteration unit for validation')
# keras-1 에서는 500짜리 keras-2에서는 250짜리 실험 중...
parser.add_argument(
'--normalize_1st_layer',
type=bool,
default=False,
help='normalize 1st layer')
parser.add_argument(
'--learning_rate',
type=float,
default=1e-3,
help='learning rate for optimizer.')
# parser.add_argument(
# '--iter_validation',
# type=int,
# default=10,
# help='Iteration unit for validation')
# parser.add_argument(
# '--max_iter', type=int, default=10000000, help='Max Iteration')
# parser.add_argument(
# '--n_hidden_unit',
# type=int,
# default=500,
# help='The number of hidden unit')
# parser.add_argument(
# '--parameter_sharing',
# type=bool,
# default=False,
# help='parameter sharing')
# parser.add_argument(
# '--lambda_1',
# type=float,
# default=0.015,
# help='lambda for weight decay.')
# parser.add_argument(
# '--lambda_2',
# type=float,
# default=0.015,
# help='lambda for weight decay.')
# parser.add_argument(
# '--dropout_rate', type=float, default=0., help='dropout_rate')
# parser.add_argument(
# '--iter_early_stop',
# type=int,
# default=10000,
# help='the number of iteration for early stop.')
# parser.add_argument(
# '--data_seed', type=int, default=1, help='the seed for dataset')
args = parser.parse_args()
_train(args)
if __name__ == '__main__':
main()
| return K.sum(x, axis=1) | identifier_body |
run.py | import glob
import os
import random
import numpy as np
from data_gen import DataSet
from nade import NADE
from keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation, Lambda, add
from keras import backend as K
from keras.models import Model
from keras.callbacks import Callback
import keras.regularizers
from keras.optimizers import Adam
import tensorflow as tf
def prediction_layer(x):
# x.shape = (?,6040,5)
x_cumsum = K.cumsum(x, axis=2)
# x_cumsum.shape = (?,6040,5)
output = K.softmax(x_cumsum)
# output = (?,6040,5)
return output
def prediction_output_shape(input_shape):
return input_shape
def d_layer(x):
return K.sum(x, axis=1)
def d_output_shape(input_shape):
return (input_shape[0], )
def D_layer(x):
return K.sum(x, axis=1)
def D_output_shape(input_shape):
return (input_shape[0], )
def rating_cost_lambda_func(args):
alpha = 1.
std = 0.01
pred_score, true_ratings, input_masks, output_masks, D, d = args
pred_score_cum = K.cumsum(pred_score, axis=2)
prob_item_ratings = K.softmax(pred_score_cum)
accu_prob_1N = K.cumsum(prob_item_ratings, axis=2)
accu_prob_N1 = K.cumsum(prob_item_ratings[:, :, ::-1], axis=2)[:, :, ::-1]
mask1N = K.cumsum(true_ratings[:, :, ::-1], axis=2)[:, :, ::-1]
maskN1 = K.cumsum(true_ratings, axis=2)
cost_ordinal_1N = -K.sum(
(K.log(prob_item_ratings) - K.log(accu_prob_1N)) * mask1N, axis=2)
cost_ordinal_N1 = -K.sum(
(K.log(prob_item_ratings) - K.log(accu_prob_N1)) * maskN1, axis=2)
cost_ordinal = cost_ordinal_1N + cost_ordinal_N1
nll_item_ratings = K.sum(
-(true_ratings * K.log(prob_item_ratings)), axis=2)
nll = std * K.sum(
nll_item_ratings, axis=1) * 1.0 * D / (D - d + 1e-6) + alpha * K.sum(
cost_ordinal, axis=1) * 1.0 * D / (D - d + 1e-6)
cost = K.mean(nll)
cost = K.expand_dims(cost, 0)
return cost
class EvaluationCallback(Callback):
def __init__(self, data_set, new_items, training_set):
self.data_set = data_set
self.rmses = []
self.rate_score = np.array([1, 2, 3, 4, 5], np.float32)
self.new_items = new_items
self.training_set = training_set
def eval_rmse(self):
squared_error = []
n_samples = []
for i, batch in enumerate(self.data_set.generate(max_iters=1)):
inp_r = batch[0]['input_ratings']
out_r = batch[0]['output_ratings']
inp_m = batch[0]['input_masks']
out_m = batch[0]['output_masks']
pred_batch = self.model.predict(batch[0])[1]
true_r = out_r.argmax(axis=2) + 1
pred_r = (
pred_batch * self.rate_score[np.newaxis, np.newaxis, :]).sum(
axis=2)
pred_r[:, self.new_items] = 3
mask = out_r.sum(axis=2)
# '''
# if i == 0:
# print [true_r[0][j] for j in np.nonzero(true_r[0]* mask[0])[0]]
# print [pred_r[0][j] for j in np.nonzero(pred_r[0]* mask[0])[0]]
# '''
se = np.sum(np.square(true_r - pred_r) * mask)
n = np.sum(mask)
squared_error.append(se)
n_samples.append(n)
total_squared_error = np.array(squared_error).sum()
total_n_samples = np.array(n_samples).sum()
rmse = np.sqrt(total_squared_error / (total_n_samples * 1.0 + 1e-8))
return rmse
def on_epoch_end(self, epoch, logs={}):
score = self.eval_rmse()
if self.training_set:
print("training set RMSE for epoch %d is %f" % (epoch, score))
else:
print("validation set RMSE for epoch %d is %f" % (epoch, score))
self.rmses.append(score)
def _train(args):
if K.backend() != 'tensorflow':
print("This repository only support tensorflow backend.")
raise NotImplementedError()
batch_size = 64
num_users = 6040
num_items = 3706
data_sample = 1.0
input_dim0 = 6040
input_dim1 = 5
std = 0.0
alpha = 1.0
print('Loading data...')
train_file_list = sorted(
glob.glob(os.path.join(('data/train_set'), 'part*')))
val_file_list = sorted(glob.glob(os.path.join(('data/val_set/'), 'part*')))
test_file_list = sorted(
glob.glob(os.path.join(('data/test_set/'), 'part*')))
train_file_list = [
dfile for dfile in train_file_list if os.stat(dfile).st_size != 0
]
val_file_list = [
dfile for dfile in val_file_list if os.stat(dfile).st_size != 0
]
test_file_list = [
dfile for dfile in test_file_list if os.stat(dfile).st_size != 0
]
random.shuffle(train_file_list)
random.shuffle(val_file_list)
random.shuffle(test_file_list)
train_file_list = train_file_list[:max(
int(len(train_file_list) * data_sample), 1)]
train_set = DataSet(
train_file_list,
num_users=num_users,
num_items=num_items,
batch_size=batch_size,
mode=0)
val_set = DataSet(
val_file_list,
num_users=num_users,
num_items=num_items,
batch_size=batch_size,
mode=1)
test_set = DataSet(
test_file_list,
num_users=num_users,
num_items=num_items,
batch_size=batch_size,
mode=2)
rating_freq = np.zeros((6040, 5))
init_b = np.zeros((6040, 5))
for batch in val_set.generate(max_iters=1):
inp_r = batch[0]['input_ratings']
out_r = batch[0]['output_ratings']
inp_m = batch[0]['input_masks']
out_m = batch[0]['output_masks']
rating_freq += inp_r.sum(axis=0)
log_rating_freq = np.log(rating_freq + 1e-8)
log_rating_freq_diff = np.diff(log_rating_freq, axis=1)
init_b[:, 1:] = log_rating_freq_diff
init_b[:, 0] = log_rating_freq[:, 0]
new_items = np.where(rating_freq.sum(axis=1) == 0)[0]
input_layer = Input(shape=(input_dim0, input_dim1), name='input_ratings')
output_ratings = Input(
shape=(input_dim0, input_dim1), name='output_ratings')
input_masks = Input(shape=(input_dim0, ), name='input_masks')
output_masks = Input(shape=(input_dim0, ), name='output_masks')
# from keras import backend as K
# print(K.tensorflow_backend._get_available_gpus())
# input('Enter >>')
# nade_layer = Dropout(0.0)(input_layer)
nade_layer = input_layer
nade_layer = NADE(
hidden_dim=args.hidden_dim,
activation='tanh',
bias=True,
W_regularizer=keras.regularizers.l2(0.02),
V_regularizer=keras.regularizers.l2(0.02),
b_regularizer=keras.regularizers.l2(0.02),
c_regularizer=keras.regularizers.l2(0.02),
args=args)(nade_layer)
predicted_ratings = Lambda(
prediction_layer,
output_shape=prediction_output_shape,
name='predicted_ratings')(nade_layer)
d = Lambda(d_layer, output_shape=d_output_shape, name='d')(input_masks)
sum_masks = add([input_masks, output_masks])
D = Lambda(D_layer, output_shape=D_output_shape, name='D')(sum_masks)
loss_out = Lambda(
rating_cost_lambda_func, output_shape=(1, ), name='nade_loss')(
[nade_layer, output_ratings, input_masks, output_masks, D, d])
cf_nade_model = Model(
inputs=[input_layer, output_ratings, input_masks, output_masks],
outputs=[loss_out, predicted_ratings])
cf_nade_model.summary()
adam = Adam(lr=args.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
cf_nade_model.compile(
loss={'nade_loss': lambda y_true, y_pred: y_pred}, optimizer=adam)
train_evaluation_callback = EvaluationCallback(
data_set=train_set, new_items=new_items, training_set=True)
valid_evaluation_callback = EvaluationCallback(
data_set=val_set, new_items=new_items, training_set=False)
print('Training...')
cf_nade_model.fit_generator(
train_set.generate(),
steps_per_epoch=(train_set.get_corpus_size() // batch_size),
epochs=30,
validation_data=val_set.generate(),
validation_steps=(val_set.get_corpus_size() // batch_size),
shuffle=True,
callbacks=[
train_set, val_set, train_evaluation_callback,
valid_evaluation_callback
],
verbose=1)
print('Testing...')
rmses = []
rate_score = np.array([1, 2, 3, 4, 5], np.float32)
new_items = new_items
squared_error = []
n_samples = []
for i, batch in enumerate(test_set.generate(max_iters=1)):
inp_r = batch[0]['input_ratings']
out_r = batch[0]['output_ratings']
inp_m = batch[0]['input_masks']
out_m = batch[0]['output_masks']
pred_batch = cf_nade_model.predict(batch[0])[1]
true_r = out_r.argmax(axis=2) + 1
pred_r = (pred_batch * rate_score[np.newaxis, np.newaxis, :]).sum(
axis=2)
pred_r[:, new_items] = 3
mask = out_r.sum(axis=2)
# '''
# if i == 0:
# print [true_r[0][j] for j in np.nonzero(true_r[0]* mask[0])[0]]
# print [pred_r[0][j] for j in np.nonzero(pred_r[0]* mask[0])[0]]
# '''
se = np.sum(np.square(true_r - pred_r) * mask)
n = np.sum(mask)
squared_error.append(se)
n_samples.append(n)
total_squared_error = np.array(squared_error).sum()
total_n_samples = np.array(n_samples).sum()
rmse = np.sqrt(total_squared_error / (total_n_samples * 1.0 + 1e-8))
print("test set RMSE is %f" % (rmse))
def main():
import argparse
parser = argparse.ArgumentParser(description='CFNADE-keras')
parser.add_argument(
'--hidden_dim',
type=int,
default=500,
help='Iteration unit for validation')
# keras-1 에서는 500짜리 keras-2에서는 250짜리 실험 중...
parser.add_argument(
'--normalize_1st_layer',
type=bool,
default=False,
help='normalize 1st layer')
parser.add_argument(
'--learning_rate',
type=float,
default=1e-3,
help='learning rate for optimizer.')
# parser.add_argument(
# '--iter_validation',
# type=int,
# default=10,
# help='Iteration unit for validation')
# parser.add_argument(
# '--max_iter', type=int, default=10000000, help='Max Iteration')
# parser.add_argument(
# '--n_hidden_unit',
# type=int,
# default=500,
# help='The number of hidden unit')
# parser.add_argument(
# '--parameter_sharing',
# type=bool,
# default=False,
# help='parameter sharing')
# parser.add_argument(
# '--lambda_1',
# type=float,
# default=0.015,
# help='lambda for weight decay.')
# parser.add_argument(
# '--lambda_2',
# type=float,
# default=0.015,
# help='lambda for weight decay.')
# parser.add_argument(
# '--dropout_rate', type=float, default=0., help='dropout_rate')
# parser.add_argument(
# '--iter_early_stop',
# type=int,
# default=10000,
# help='the number of iteration for early stop.')
# parser.add_argument( | _train(args)
if __name__ == '__main__':
main() | # '--data_seed', type=int, default=1, help='the seed for dataset')
args = parser.parse_args() | random_line_split |
service.go | // Copyright 2019, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package service handles the command-line, configuration, and runs the
// OpenTelemetry Collector.
package service
import (
"context"
"flag"
"fmt"
"log"
"os"
"os/signal"
"runtime"
"syscall"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector/component"
"github.com/open-telemetry/opentelemetry-collector/config"
"github.com/open-telemetry/opentelemetry-collector/config/configcheck"
"github.com/open-telemetry/opentelemetry-collector/config/configmodels"
"github.com/open-telemetry/opentelemetry-collector/extension"
"github.com/open-telemetry/opentelemetry-collector/service/builder"
)
// Application represents a collector application
type Application struct {
info ApplicationStartInfo
rootCmd *cobra.Command
v *viper.Viper
logger *zap.Logger
exporters builder.Exporters
builtReceivers builder.Receivers
builtPipelines builder.BuiltPipelines
factories config.Factories
config *configmodels.Config
extensions []extension.ServiceExtension
// stopTestChan is used to terminate the application in end to end tests.
stopTestChan chan struct{}
// readyChan is used in tests to indicate that the application is ready.
readyChan chan struct{}
// asyncErrorChannel is used to signal a fatal error from any component.
asyncErrorChannel chan error
}
// Command returns Application's root command.
func (app *Application) Command() *cobra.Command {
return app.rootCmd
}
// ApplicationStartInfo is the information that is logged at the application start.
// This information can be overridden in custom builds.
type ApplicationStartInfo struct {
// Executable file name, e.g. "otelcol".
ExeName string
// Long name, used e.g. in the logs.
LongName string
// Version string.
Version string
// Git hash of the source code.
GitHash string
}
var _ component.Host = (*Application)(nil)
// Context returns a context provided by the host to be used on the receiver
// operations.
func (app *Application) Context() context.Context {
// For now simply the background context.
return context.Background()
}
// New creates and returns a new instance of Application.
func New(
factories config.Factories,
appInfo ApplicationStartInfo,
) (*Application, error) {
if err := configcheck.ValidateConfigFromFactories(factories); err != nil {
return nil, err
}
app := &Application{
info: appInfo,
v: viper.New(),
readyChan: make(chan struct{}),
factories: factories,
}
rootCmd := &cobra.Command{
Use: appInfo.ExeName,
Long: appInfo.LongName,
Run: func(cmd *cobra.Command, args []string) {
app.init()
app.execute()
},
}
// TODO: coalesce this code and expose this information to other components.
flagSet := new(flag.FlagSet)
addFlagsFns := []func(*flag.FlagSet){
telemetryFlags,
builder.Flags,
loggerFlags,
}
for _, addFlags := range addFlagsFns {
addFlags(flagSet)
}
rootCmd.Flags().AddGoFlagSet(flagSet)
app.rootCmd = rootCmd
return app, nil
}
// ReportFatalError is used to report to the host that the receiver encountered
// a fatal error (i.e.: an error that the instance can't recover from) after
// its start function has already returned.
func (app *Application) ReportFatalError(err error) {
app.asyncErrorChannel <- err
}
func (app *Application) init() {
file := builder.GetConfigFile()
if file == "" {
log.Fatalf("Config file not specified")
}
app.v.SetConfigFile(file)
err := app.v.ReadInConfig()
if err != nil {
log.Fatalf("Error loading config file %q: %v", file, err)
}
app.logger, err = newLogger()
if err != nil {
log.Fatalf("Failed to get logger: %v", err)
}
}
func (app *Application) setupTelemetry(ballastSizeBytes uint64) {
app.logger.Info("Setting up own telemetry...")
err := AppTelemetry.init(app.asyncErrorChannel, ballastSizeBytes, app.logger)
if err != nil {
app.logger.Error("Failed to initialize telemetry", zap.Error(err))
os.Exit(1)
}
}
// runAndWaitForShutdownEvent waits for one of the shutdown events that can happen.
func (app *Application) runAndWaitForShutdownEvent() {
app.logger.Info("Everything is ready. Begin running and processing data.")
// Plug SIGTERM signal into a channel.
signalsChannel := make(chan os.Signal, 1)
signal.Notify(signalsChannel, os.Interrupt, syscall.SIGTERM)
// set the channel to stop testing.
app.stopTestChan = make(chan struct{})
// notify tests that it is ready.
close(app.readyChan)
select {
case err := <-app.asyncErrorChannel:
app.logger.Error("Asynchronous error received, terminating process", zap.Error(err))
case s := <-signalsChannel:
app.logger.Info("Received signal from OS", zap.String("signal", s.String()))
case <-app.stopTestChan:
app.logger.Info("Received stop test request")
}
}
func (app *Application) setupConfigurationComponents() {
// Load configuration.
app.logger.Info("Loading configuration...")
cfg, err := config.Load(app.v, app.factories, app.logger)
if err != nil {
log.Fatalf("Cannot load configuration: %v", err)
}
app.config = cfg
app.logger.Info("Applying configuration...")
if err := app.setupExtensions(); err != nil {
log.Fatalf("Cannot setup extensions: %v", err)
}
app.setupPipelines()
}
func (app *Application) setupExtensions() error {
for _, extName := range app.config.Service.Extensions {
extCfg, exists := app.config.Extensions[extName]
if !exists {
return fmt.Errorf("extension %q is not configured", extName)
}
factory, exists := app.factories.Extensions[extCfg.Type()]
if !exists {
return fmt.Errorf("extension factory for type %q is not configured", extCfg.Type())
}
ext, err := factory.CreateExtension(app.logger, extCfg)
if err != nil {
return fmt.Errorf("failed to create extension %q: %v", extName, err)
}
// Check if the factory really created the extension.
if ext == nil {
return fmt.Errorf("factory for %q produced a nil extension", extName)
}
if err := ext.Start(app); err != nil {
return fmt.Errorf("error starting extension %q: %v", extName, err)
}
app.extensions = append(app.extensions, ext)
}
return nil
}
func (app *Application) setupPipelines() {
// Pipeline is built backwards, starting from exporters, so that we create objects
// which are referenced before objects which reference them.
// First create exporters.
var err error
app.exporters, err = builder.NewExportersBuilder(app.logger, app.config, app.factories.Exporters).Build()
if err != nil {
log.Fatalf("Cannot build exporters: %v", err)
}
app.logger.Info("Starting exporters...")
err = app.exporters.StartAll(app.logger, app)
if err != nil {
log.Fatalf("Cannot start exporters: %v", err)
}
// Create pipelines and their processors and plug exporters to the
// end of the pipelines.
app.builtPipelines, err = builder.NewPipelinesBuilder(app.logger, app.config, app.exporters, app.factories.Processors).Build()
if err != nil {
log.Fatalf("Cannot build pipelines: %v", err)
}
app.logger.Info("Starting processors...")
err = app.builtPipelines.StartProcessors(app.logger, app)
if err != nil {
log.Fatalf("Cannot start processors: %v", err)
}
// Create receivers and plug them into the start of the pipelines.
app.builtReceivers, err = builder.NewReceiversBuilder(app.logger, app.config, app.builtPipelines, app.factories.Receivers).Build()
if err != nil {
log.Fatalf("Cannot build receivers: %v", err)
}
app.logger.Info("Starting receivers...")
err = app.builtReceivers.StartAll(app.logger, app)
if err != nil |
}
func (app *Application) notifyPipelineReady() {
for i, ext := range app.extensions {
if pw, ok := ext.(extension.PipelineWatcher); ok {
if err := pw.Ready(); err != nil {
log.Fatalf(
"Error notifying extension %q that the pipeline was started: %v",
app.config.Service.Extensions[i],
err,
)
}
}
}
}
func (app *Application) notifyPipelineNotReady() {
// Notify on reverse order.
for i := len(app.extensions) - 1; i >= 0; i-- {
ext := app.extensions[i]
if pw, ok := ext.(extension.PipelineWatcher); ok {
if err := pw.NotReady(); err != nil {
app.logger.Warn(
"Error notifying extension that the pipeline was shutdown",
zap.Error(err),
zap.String("extension", app.config.Service.Extensions[i]),
)
}
}
}
}
func (app *Application) shutdownPipelines() {
// Shutdown order is the reverse of building: first receivers, then flushing pipelines
// giving senders a chance to send all their data. This may take time, the allowed
// time should be part of configuration.
app.logger.Info("Stopping receivers...")
app.builtReceivers.StopAll()
app.logger.Info("Stopping processors...")
app.builtPipelines.ShutdownProcessors(app.logger)
app.logger.Info("Shutting down exporters...")
app.exporters.ShutdownAll()
}
func (app *Application) shutdownExtensions() {
// Shutdown on reverse order.
for i := len(app.extensions) - 1; i >= 0; i-- {
ext := app.extensions[i]
if err := ext.Shutdown(); err != nil {
app.logger.Warn(
"Error shutting down extension",
zap.Error(err),
zap.String("extension", app.config.Service.Extensions[i]),
)
}
}
}
func (app *Application) execute() {
app.logger.Info("Starting "+app.info.LongName+"...",
zap.String("Version", app.info.Version),
zap.String("GitHash", app.info.GitHash),
zap.Int("NumCPU", runtime.NumCPU()),
)
// Set memory ballast
ballast, ballastSizeBytes := app.createMemoryBallast()
app.asyncErrorChannel = make(chan error)
// Setup everything.
app.setupTelemetry(ballastSizeBytes)
app.setupConfigurationComponents()
app.notifyPipelineReady()
// Everything is ready, now run until an event requiring shutdown happens.
app.runAndWaitForShutdownEvent()
// Begin shutdown sequence.
runtime.KeepAlive(ballast)
app.logger.Info("Starting shutdown...")
app.notifyPipelineNotReady()
app.shutdownPipelines()
app.shutdownExtensions()
AppTelemetry.shutdown()
app.logger.Info("Shutdown complete.")
}
// Start starts the collector according to the command and configuration
// given by the user.
func (app *Application) Start() error {
return app.rootCmd.Execute()
}
func (app *Application) createMemoryBallast() ([]byte, uint64) {
ballastSizeMiB := builder.MemBallastSize()
if ballastSizeMiB > 0 {
ballastSizeBytes := uint64(ballastSizeMiB) * 1024 * 1024
ballast := make([]byte, ballastSizeBytes)
app.logger.Info("Using memory ballast", zap.Int("MiBs", ballastSizeMiB))
return ballast, ballastSizeBytes
}
return nil, 0
}
| {
log.Fatalf("Cannot start receivers: %v", err)
} | conditional_block |
service.go | // Copyright 2019, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package service handles the command-line, configuration, and runs the
// OpenTelemetry Collector.
package service
import (
"context"
"flag"
"fmt"
"log"
"os"
"os/signal"
"runtime"
"syscall"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector/component"
"github.com/open-telemetry/opentelemetry-collector/config"
"github.com/open-telemetry/opentelemetry-collector/config/configcheck"
"github.com/open-telemetry/opentelemetry-collector/config/configmodels"
"github.com/open-telemetry/opentelemetry-collector/extension"
"github.com/open-telemetry/opentelemetry-collector/service/builder"
)
// Application represents a collector application
type Application struct {
info ApplicationStartInfo
rootCmd *cobra.Command
v *viper.Viper
logger *zap.Logger
exporters builder.Exporters
builtReceivers builder.Receivers
builtPipelines builder.BuiltPipelines
factories config.Factories
config *configmodels.Config
extensions []extension.ServiceExtension
// stopTestChan is used to terminate the application in end to end tests.
stopTestChan chan struct{}
// readyChan is used in tests to indicate that the application is ready.
readyChan chan struct{}
// asyncErrorChannel is used to signal a fatal error from any component.
asyncErrorChannel chan error
}
// Command returns Application's root command.
func (app *Application) Command() *cobra.Command {
return app.rootCmd
}
// ApplicationStartInfo is the information that is logged at the application start.
// This information can be overridden in custom builds.
type ApplicationStartInfo struct {
// Executable file name, e.g. "otelcol".
ExeName string
// Long name, used e.g. in the logs.
LongName string
// Version string.
Version string
|
// Context returns a context provided by the host to be used on the receiver
// operations.
func (app *Application) Context() context.Context {
// For now simply the background context.
return context.Background()
}
// New creates and returns a new instance of Application.
func New(
factories config.Factories,
appInfo ApplicationStartInfo,
) (*Application, error) {
if err := configcheck.ValidateConfigFromFactories(factories); err != nil {
return nil, err
}
app := &Application{
info: appInfo,
v: viper.New(),
readyChan: make(chan struct{}),
factories: factories,
}
rootCmd := &cobra.Command{
Use: appInfo.ExeName,
Long: appInfo.LongName,
Run: func(cmd *cobra.Command, args []string) {
app.init()
app.execute()
},
}
// TODO: coalesce this code and expose this information to other components.
flagSet := new(flag.FlagSet)
addFlagsFns := []func(*flag.FlagSet){
telemetryFlags,
builder.Flags,
loggerFlags,
}
for _, addFlags := range addFlagsFns {
addFlags(flagSet)
}
rootCmd.Flags().AddGoFlagSet(flagSet)
app.rootCmd = rootCmd
return app, nil
}
// ReportFatalError is used to report to the host that the receiver encountered
// a fatal error (i.e.: an error that the instance can't recover from) after
// its start function has already returned.
func (app *Application) ReportFatalError(err error) {
app.asyncErrorChannel <- err
}
func (app *Application) init() {
file := builder.GetConfigFile()
if file == "" {
log.Fatalf("Config file not specified")
}
app.v.SetConfigFile(file)
err := app.v.ReadInConfig()
if err != nil {
log.Fatalf("Error loading config file %q: %v", file, err)
}
app.logger, err = newLogger()
if err != nil {
log.Fatalf("Failed to get logger: %v", err)
}
}
func (app *Application) setupTelemetry(ballastSizeBytes uint64) {
app.logger.Info("Setting up own telemetry...")
err := AppTelemetry.init(app.asyncErrorChannel, ballastSizeBytes, app.logger)
if err != nil {
app.logger.Error("Failed to initialize telemetry", zap.Error(err))
os.Exit(1)
}
}
// runAndWaitForShutdownEvent waits for one of the shutdown events that can happen.
func (app *Application) runAndWaitForShutdownEvent() {
app.logger.Info("Everything is ready. Begin running and processing data.")
// Plug SIGTERM signal into a channel.
signalsChannel := make(chan os.Signal, 1)
signal.Notify(signalsChannel, os.Interrupt, syscall.SIGTERM)
// set the channel to stop testing.
app.stopTestChan = make(chan struct{})
// notify tests that it is ready.
close(app.readyChan)
select {
case err := <-app.asyncErrorChannel:
app.logger.Error("Asynchronous error received, terminating process", zap.Error(err))
case s := <-signalsChannel:
app.logger.Info("Received signal from OS", zap.String("signal", s.String()))
case <-app.stopTestChan:
app.logger.Info("Received stop test request")
}
}
func (app *Application) setupConfigurationComponents() {
// Load configuration.
app.logger.Info("Loading configuration...")
cfg, err := config.Load(app.v, app.factories, app.logger)
if err != nil {
log.Fatalf("Cannot load configuration: %v", err)
}
app.config = cfg
app.logger.Info("Applying configuration...")
if err := app.setupExtensions(); err != nil {
log.Fatalf("Cannot setup extensions: %v", err)
}
app.setupPipelines()
}
func (app *Application) setupExtensions() error {
for _, extName := range app.config.Service.Extensions {
extCfg, exists := app.config.Extensions[extName]
if !exists {
return fmt.Errorf("extension %q is not configured", extName)
}
factory, exists := app.factories.Extensions[extCfg.Type()]
if !exists {
return fmt.Errorf("extension factory for type %q is not configured", extCfg.Type())
}
ext, err := factory.CreateExtension(app.logger, extCfg)
if err != nil {
return fmt.Errorf("failed to create extension %q: %v", extName, err)
}
// Check if the factory really created the extension.
if ext == nil {
return fmt.Errorf("factory for %q produced a nil extension", extName)
}
if err := ext.Start(app); err != nil {
return fmt.Errorf("error starting extension %q: %v", extName, err)
}
app.extensions = append(app.extensions, ext)
}
return nil
}
func (app *Application) setupPipelines() {
// Pipeline is built backwards, starting from exporters, so that we create objects
// which are referenced before objects which reference them.
// First create exporters.
var err error
app.exporters, err = builder.NewExportersBuilder(app.logger, app.config, app.factories.Exporters).Build()
if err != nil {
log.Fatalf("Cannot build exporters: %v", err)
}
app.logger.Info("Starting exporters...")
err = app.exporters.StartAll(app.logger, app)
if err != nil {
log.Fatalf("Cannot start exporters: %v", err)
}
// Create pipelines and their processors and plug exporters to the
// end of the pipelines.
app.builtPipelines, err = builder.NewPipelinesBuilder(app.logger, app.config, app.exporters, app.factories.Processors).Build()
if err != nil {
log.Fatalf("Cannot build pipelines: %v", err)
}
app.logger.Info("Starting processors...")
err = app.builtPipelines.StartProcessors(app.logger, app)
if err != nil {
log.Fatalf("Cannot start processors: %v", err)
}
// Create receivers and plug them into the start of the pipelines.
app.builtReceivers, err = builder.NewReceiversBuilder(app.logger, app.config, app.builtPipelines, app.factories.Receivers).Build()
if err != nil {
log.Fatalf("Cannot build receivers: %v", err)
}
app.logger.Info("Starting receivers...")
err = app.builtReceivers.StartAll(app.logger, app)
if err != nil {
log.Fatalf("Cannot start receivers: %v", err)
}
}
func (app *Application) notifyPipelineReady() {
for i, ext := range app.extensions {
if pw, ok := ext.(extension.PipelineWatcher); ok {
if err := pw.Ready(); err != nil {
log.Fatalf(
"Error notifying extension %q that the pipeline was started: %v",
app.config.Service.Extensions[i],
err,
)
}
}
}
}
func (app *Application) notifyPipelineNotReady() {
// Notify on reverse order.
for i := len(app.extensions) - 1; i >= 0; i-- {
ext := app.extensions[i]
if pw, ok := ext.(extension.PipelineWatcher); ok {
if err := pw.NotReady(); err != nil {
app.logger.Warn(
"Error notifying extension that the pipeline was shutdown",
zap.Error(err),
zap.String("extension", app.config.Service.Extensions[i]),
)
}
}
}
}
func (app *Application) shutdownPipelines() {
// Shutdown order is the reverse of building: first receivers, then flushing pipelines
// giving senders a chance to send all their data. This may take time, the allowed
// time should be part of configuration.
app.logger.Info("Stopping receivers...")
app.builtReceivers.StopAll()
app.logger.Info("Stopping processors...")
app.builtPipelines.ShutdownProcessors(app.logger)
app.logger.Info("Shutting down exporters...")
app.exporters.ShutdownAll()
}
func (app *Application) shutdownExtensions() {
// Shutdown on reverse order.
for i := len(app.extensions) - 1; i >= 0; i-- {
ext := app.extensions[i]
if err := ext.Shutdown(); err != nil {
app.logger.Warn(
"Error shutting down extension",
zap.Error(err),
zap.String("extension", app.config.Service.Extensions[i]),
)
}
}
}
func (app *Application) execute() {
app.logger.Info("Starting "+app.info.LongName+"...",
zap.String("Version", app.info.Version),
zap.String("GitHash", app.info.GitHash),
zap.Int("NumCPU", runtime.NumCPU()),
)
// Set memory ballast
ballast, ballastSizeBytes := app.createMemoryBallast()
app.asyncErrorChannel = make(chan error)
// Setup everything.
app.setupTelemetry(ballastSizeBytes)
app.setupConfigurationComponents()
app.notifyPipelineReady()
// Everything is ready, now run until an event requiring shutdown happens.
app.runAndWaitForShutdownEvent()
// Begin shutdown sequence.
runtime.KeepAlive(ballast)
app.logger.Info("Starting shutdown...")
app.notifyPipelineNotReady()
app.shutdownPipelines()
app.shutdownExtensions()
AppTelemetry.shutdown()
app.logger.Info("Shutdown complete.")
}
// Start starts the collector according to the command and configuration
// given by the user.
func (app *Application) Start() error {
return app.rootCmd.Execute()
}
func (app *Application) createMemoryBallast() ([]byte, uint64) {
ballastSizeMiB := builder.MemBallastSize()
if ballastSizeMiB > 0 {
ballastSizeBytes := uint64(ballastSizeMiB) * 1024 * 1024
ballast := make([]byte, ballastSizeBytes)
app.logger.Info("Using memory ballast", zap.Int("MiBs", ballastSizeMiB))
return ballast, ballastSizeBytes
}
return nil, 0
} | // Git hash of the source code.
GitHash string
}
var _ component.Host = (*Application)(nil) | random_line_split |
service.go | // Copyright 2019, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package service handles the command-line, configuration, and runs the
// OpenTelemetry Collector.
package service
import (
"context"
"flag"
"fmt"
"log"
"os"
"os/signal"
"runtime"
"syscall"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector/component"
"github.com/open-telemetry/opentelemetry-collector/config"
"github.com/open-telemetry/opentelemetry-collector/config/configcheck"
"github.com/open-telemetry/opentelemetry-collector/config/configmodels"
"github.com/open-telemetry/opentelemetry-collector/extension"
"github.com/open-telemetry/opentelemetry-collector/service/builder"
)
// Application represents a collector application
type Application struct {
info ApplicationStartInfo
rootCmd *cobra.Command
v *viper.Viper
logger *zap.Logger
exporters builder.Exporters
builtReceivers builder.Receivers
builtPipelines builder.BuiltPipelines
factories config.Factories
config *configmodels.Config
extensions []extension.ServiceExtension
// stopTestChan is used to terminate the application in end to end tests.
stopTestChan chan struct{}
// readyChan is used in tests to indicate that the application is ready.
readyChan chan struct{}
// asyncErrorChannel is used to signal a fatal error from any component.
asyncErrorChannel chan error
}
// Command returns Application's root command.
func (app *Application) Command() *cobra.Command {
return app.rootCmd
}
// ApplicationStartInfo is the information that is logged at the application start.
// This information can be overridden in custom builds.
type ApplicationStartInfo struct {
// Executable file name, e.g. "otelcol".
ExeName string
// Long name, used e.g. in the logs.
LongName string
// Version string.
Version string
// Git hash of the source code.
GitHash string
}
var _ component.Host = (*Application)(nil)
// Context returns a context provided by the host to be used on the receiver
// operations.
func (app *Application) Context() context.Context {
// For now simply the background context.
return context.Background()
}
// New creates and returns a new instance of Application.
func New(
factories config.Factories,
appInfo ApplicationStartInfo,
) (*Application, error) {
if err := configcheck.ValidateConfigFromFactories(factories); err != nil {
return nil, err
}
app := &Application{
info: appInfo,
v: viper.New(),
readyChan: make(chan struct{}),
factories: factories,
}
rootCmd := &cobra.Command{
Use: appInfo.ExeName,
Long: appInfo.LongName,
Run: func(cmd *cobra.Command, args []string) {
app.init()
app.execute()
},
}
// TODO: coalesce this code and expose this information to other components.
flagSet := new(flag.FlagSet)
addFlagsFns := []func(*flag.FlagSet){
telemetryFlags,
builder.Flags,
loggerFlags,
}
for _, addFlags := range addFlagsFns {
addFlags(flagSet)
}
rootCmd.Flags().AddGoFlagSet(flagSet)
app.rootCmd = rootCmd
return app, nil
}
// ReportFatalError is used to report to the host that the receiver encountered
// a fatal error (i.e.: an error that the instance can't recover from) after
// its start function has already returned.
func (app *Application) ReportFatalError(err error) {
app.asyncErrorChannel <- err
}
func (app *Application) init() {
file := builder.GetConfigFile()
if file == "" {
log.Fatalf("Config file not specified")
}
app.v.SetConfigFile(file)
err := app.v.ReadInConfig()
if err != nil {
log.Fatalf("Error loading config file %q: %v", file, err)
}
app.logger, err = newLogger()
if err != nil {
log.Fatalf("Failed to get logger: %v", err)
}
}
func (app *Application) setupTelemetry(ballastSizeBytes uint64) {
app.logger.Info("Setting up own telemetry...")
err := AppTelemetry.init(app.asyncErrorChannel, ballastSizeBytes, app.logger)
if err != nil {
app.logger.Error("Failed to initialize telemetry", zap.Error(err))
os.Exit(1)
}
}
// runAndWaitForShutdownEvent waits for one of the shutdown events that can happen.
func (app *Application) runAndWaitForShutdownEvent() {
app.logger.Info("Everything is ready. Begin running and processing data.")
// Plug SIGTERM signal into a channel.
signalsChannel := make(chan os.Signal, 1)
signal.Notify(signalsChannel, os.Interrupt, syscall.SIGTERM)
// set the channel to stop testing.
app.stopTestChan = make(chan struct{})
// notify tests that it is ready.
close(app.readyChan)
select {
case err := <-app.asyncErrorChannel:
app.logger.Error("Asynchronous error received, terminating process", zap.Error(err))
case s := <-signalsChannel:
app.logger.Info("Received signal from OS", zap.String("signal", s.String()))
case <-app.stopTestChan:
app.logger.Info("Received stop test request")
}
}
func (app *Application) setupConfigurationComponents() {
// Load configuration.
app.logger.Info("Loading configuration...")
cfg, err := config.Load(app.v, app.factories, app.logger)
if err != nil {
log.Fatalf("Cannot load configuration: %v", err)
}
app.config = cfg
app.logger.Info("Applying configuration...")
if err := app.setupExtensions(); err != nil {
log.Fatalf("Cannot setup extensions: %v", err)
}
app.setupPipelines()
}
func (app *Application) | () error {
for _, extName := range app.config.Service.Extensions {
extCfg, exists := app.config.Extensions[extName]
if !exists {
return fmt.Errorf("extension %q is not configured", extName)
}
factory, exists := app.factories.Extensions[extCfg.Type()]
if !exists {
return fmt.Errorf("extension factory for type %q is not configured", extCfg.Type())
}
ext, err := factory.CreateExtension(app.logger, extCfg)
if err != nil {
return fmt.Errorf("failed to create extension %q: %v", extName, err)
}
// Check if the factory really created the extension.
if ext == nil {
return fmt.Errorf("factory for %q produced a nil extension", extName)
}
if err := ext.Start(app); err != nil {
return fmt.Errorf("error starting extension %q: %v", extName, err)
}
app.extensions = append(app.extensions, ext)
}
return nil
}
func (app *Application) setupPipelines() {
// Pipeline is built backwards, starting from exporters, so that we create objects
// which are referenced before objects which reference them.
// First create exporters.
var err error
app.exporters, err = builder.NewExportersBuilder(app.logger, app.config, app.factories.Exporters).Build()
if err != nil {
log.Fatalf("Cannot build exporters: %v", err)
}
app.logger.Info("Starting exporters...")
err = app.exporters.StartAll(app.logger, app)
if err != nil {
log.Fatalf("Cannot start exporters: %v", err)
}
// Create pipelines and their processors and plug exporters to the
// end of the pipelines.
app.builtPipelines, err = builder.NewPipelinesBuilder(app.logger, app.config, app.exporters, app.factories.Processors).Build()
if err != nil {
log.Fatalf("Cannot build pipelines: %v", err)
}
app.logger.Info("Starting processors...")
err = app.builtPipelines.StartProcessors(app.logger, app)
if err != nil {
log.Fatalf("Cannot start processors: %v", err)
}
// Create receivers and plug them into the start of the pipelines.
app.builtReceivers, err = builder.NewReceiversBuilder(app.logger, app.config, app.builtPipelines, app.factories.Receivers).Build()
if err != nil {
log.Fatalf("Cannot build receivers: %v", err)
}
app.logger.Info("Starting receivers...")
err = app.builtReceivers.StartAll(app.logger, app)
if err != nil {
log.Fatalf("Cannot start receivers: %v", err)
}
}
func (app *Application) notifyPipelineReady() {
for i, ext := range app.extensions {
if pw, ok := ext.(extension.PipelineWatcher); ok {
if err := pw.Ready(); err != nil {
log.Fatalf(
"Error notifying extension %q that the pipeline was started: %v",
app.config.Service.Extensions[i],
err,
)
}
}
}
}
func (app *Application) notifyPipelineNotReady() {
// Notify on reverse order.
for i := len(app.extensions) - 1; i >= 0; i-- {
ext := app.extensions[i]
if pw, ok := ext.(extension.PipelineWatcher); ok {
if err := pw.NotReady(); err != nil {
app.logger.Warn(
"Error notifying extension that the pipeline was shutdown",
zap.Error(err),
zap.String("extension", app.config.Service.Extensions[i]),
)
}
}
}
}
func (app *Application) shutdownPipelines() {
// Shutdown order is the reverse of building: first receivers, then flushing pipelines
// giving senders a chance to send all their data. This may take time, the allowed
// time should be part of configuration.
app.logger.Info("Stopping receivers...")
app.builtReceivers.StopAll()
app.logger.Info("Stopping processors...")
app.builtPipelines.ShutdownProcessors(app.logger)
app.logger.Info("Shutting down exporters...")
app.exporters.ShutdownAll()
}
func (app *Application) shutdownExtensions() {
// Shutdown on reverse order.
for i := len(app.extensions) - 1; i >= 0; i-- {
ext := app.extensions[i]
if err := ext.Shutdown(); err != nil {
app.logger.Warn(
"Error shutting down extension",
zap.Error(err),
zap.String("extension", app.config.Service.Extensions[i]),
)
}
}
}
func (app *Application) execute() {
app.logger.Info("Starting "+app.info.LongName+"...",
zap.String("Version", app.info.Version),
zap.String("GitHash", app.info.GitHash),
zap.Int("NumCPU", runtime.NumCPU()),
)
// Set memory ballast
ballast, ballastSizeBytes := app.createMemoryBallast()
app.asyncErrorChannel = make(chan error)
// Setup everything.
app.setupTelemetry(ballastSizeBytes)
app.setupConfigurationComponents()
app.notifyPipelineReady()
// Everything is ready, now run until an event requiring shutdown happens.
app.runAndWaitForShutdownEvent()
// Begin shutdown sequence.
runtime.KeepAlive(ballast)
app.logger.Info("Starting shutdown...")
app.notifyPipelineNotReady()
app.shutdownPipelines()
app.shutdownExtensions()
AppTelemetry.shutdown()
app.logger.Info("Shutdown complete.")
}
// Start starts the collector according to the command and configuration
// given by the user.
func (app *Application) Start() error {
return app.rootCmd.Execute()
}
func (app *Application) createMemoryBallast() ([]byte, uint64) {
ballastSizeMiB := builder.MemBallastSize()
if ballastSizeMiB > 0 {
ballastSizeBytes := uint64(ballastSizeMiB) * 1024 * 1024
ballast := make([]byte, ballastSizeBytes)
app.logger.Info("Using memory ballast", zap.Int("MiBs", ballastSizeMiB))
return ballast, ballastSizeBytes
}
return nil, 0
}
| setupExtensions | identifier_name |
service.go | // Copyright 2019, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package service handles the command-line, configuration, and runs the
// OpenTelemetry Collector.
package service
import (
"context"
"flag"
"fmt"
"log"
"os"
"os/signal"
"runtime"
"syscall"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector/component"
"github.com/open-telemetry/opentelemetry-collector/config"
"github.com/open-telemetry/opentelemetry-collector/config/configcheck"
"github.com/open-telemetry/opentelemetry-collector/config/configmodels"
"github.com/open-telemetry/opentelemetry-collector/extension"
"github.com/open-telemetry/opentelemetry-collector/service/builder"
)
// Application represents a collector application
type Application struct {
info ApplicationStartInfo
rootCmd *cobra.Command
v *viper.Viper
logger *zap.Logger
exporters builder.Exporters
builtReceivers builder.Receivers
builtPipelines builder.BuiltPipelines
factories config.Factories
config *configmodels.Config
extensions []extension.ServiceExtension
// stopTestChan is used to terminate the application in end to end tests.
stopTestChan chan struct{}
// readyChan is used in tests to indicate that the application is ready.
readyChan chan struct{}
// asyncErrorChannel is used to signal a fatal error from any component.
asyncErrorChannel chan error
}
// Command returns Application's root command.
func (app *Application) Command() *cobra.Command {
return app.rootCmd
}
// ApplicationStartInfo is the information that is logged at the application start.
// This information can be overridden in custom builds.
type ApplicationStartInfo struct {
// Executable file name, e.g. "otelcol".
ExeName string
// Long name, used e.g. in the logs.
LongName string
// Version string.
Version string
// Git hash of the source code.
GitHash string
}
var _ component.Host = (*Application)(nil)
// Context returns a context provided by the host to be used on the receiver
// operations.
func (app *Application) Context() context.Context {
// For now simply the background context.
return context.Background()
}
// New creates and returns a new instance of Application.
func New(
factories config.Factories,
appInfo ApplicationStartInfo,
) (*Application, error) {
if err := configcheck.ValidateConfigFromFactories(factories); err != nil {
return nil, err
}
app := &Application{
info: appInfo,
v: viper.New(),
readyChan: make(chan struct{}),
factories: factories,
}
rootCmd := &cobra.Command{
Use: appInfo.ExeName,
Long: appInfo.LongName,
Run: func(cmd *cobra.Command, args []string) {
app.init()
app.execute()
},
}
// TODO: coalesce this code and expose this information to other components.
flagSet := new(flag.FlagSet)
addFlagsFns := []func(*flag.FlagSet){
telemetryFlags,
builder.Flags,
loggerFlags,
}
for _, addFlags := range addFlagsFns {
addFlags(flagSet)
}
rootCmd.Flags().AddGoFlagSet(flagSet)
app.rootCmd = rootCmd
return app, nil
}
// ReportFatalError is used to report to the host that the receiver encountered
// a fatal error (i.e.: an error that the instance can't recover from) after
// its start function has already returned.
func (app *Application) ReportFatalError(err error) {
app.asyncErrorChannel <- err
}
func (app *Application) init() {
file := builder.GetConfigFile()
if file == "" {
log.Fatalf("Config file not specified")
}
app.v.SetConfigFile(file)
err := app.v.ReadInConfig()
if err != nil {
log.Fatalf("Error loading config file %q: %v", file, err)
}
app.logger, err = newLogger()
if err != nil {
log.Fatalf("Failed to get logger: %v", err)
}
}
func (app *Application) setupTelemetry(ballastSizeBytes uint64) {
app.logger.Info("Setting up own telemetry...")
err := AppTelemetry.init(app.asyncErrorChannel, ballastSizeBytes, app.logger)
if err != nil {
app.logger.Error("Failed to initialize telemetry", zap.Error(err))
os.Exit(1)
}
}
// runAndWaitForShutdownEvent waits for one of the shutdown events that can happen.
func (app *Application) runAndWaitForShutdownEvent() {
app.logger.Info("Everything is ready. Begin running and processing data.")
// Plug SIGTERM signal into a channel.
signalsChannel := make(chan os.Signal, 1)
signal.Notify(signalsChannel, os.Interrupt, syscall.SIGTERM)
// set the channel to stop testing.
app.stopTestChan = make(chan struct{})
// notify tests that it is ready.
close(app.readyChan)
select {
case err := <-app.asyncErrorChannel:
app.logger.Error("Asynchronous error received, terminating process", zap.Error(err))
case s := <-signalsChannel:
app.logger.Info("Received signal from OS", zap.String("signal", s.String()))
case <-app.stopTestChan:
app.logger.Info("Received stop test request")
}
}
func (app *Application) setupConfigurationComponents() {
// Load configuration.
app.logger.Info("Loading configuration...")
cfg, err := config.Load(app.v, app.factories, app.logger)
if err != nil {
log.Fatalf("Cannot load configuration: %v", err)
}
app.config = cfg
app.logger.Info("Applying configuration...")
if err := app.setupExtensions(); err != nil {
log.Fatalf("Cannot setup extensions: %v", err)
}
app.setupPipelines()
}
func (app *Application) setupExtensions() error {
for _, extName := range app.config.Service.Extensions {
extCfg, exists := app.config.Extensions[extName]
if !exists {
return fmt.Errorf("extension %q is not configured", extName)
}
factory, exists := app.factories.Extensions[extCfg.Type()]
if !exists {
return fmt.Errorf("extension factory for type %q is not configured", extCfg.Type())
}
ext, err := factory.CreateExtension(app.logger, extCfg)
if err != nil {
return fmt.Errorf("failed to create extension %q: %v", extName, err)
}
// Check if the factory really created the extension.
if ext == nil {
return fmt.Errorf("factory for %q produced a nil extension", extName)
}
if err := ext.Start(app); err != nil {
return fmt.Errorf("error starting extension %q: %v", extName, err)
}
app.extensions = append(app.extensions, ext)
}
return nil
}
func (app *Application) setupPipelines() |
func (app *Application) notifyPipelineReady() {
for i, ext := range app.extensions {
if pw, ok := ext.(extension.PipelineWatcher); ok {
if err := pw.Ready(); err != nil {
log.Fatalf(
"Error notifying extension %q that the pipeline was started: %v",
app.config.Service.Extensions[i],
err,
)
}
}
}
}
func (app *Application) notifyPipelineNotReady() {
// Notify on reverse order.
for i := len(app.extensions) - 1; i >= 0; i-- {
ext := app.extensions[i]
if pw, ok := ext.(extension.PipelineWatcher); ok {
if err := pw.NotReady(); err != nil {
app.logger.Warn(
"Error notifying extension that the pipeline was shutdown",
zap.Error(err),
zap.String("extension", app.config.Service.Extensions[i]),
)
}
}
}
}
func (app *Application) shutdownPipelines() {
// Shutdown order is the reverse of building: first receivers, then flushing pipelines
// giving senders a chance to send all their data. This may take time, the allowed
// time should be part of configuration.
app.logger.Info("Stopping receivers...")
app.builtReceivers.StopAll()
app.logger.Info("Stopping processors...")
app.builtPipelines.ShutdownProcessors(app.logger)
app.logger.Info("Shutting down exporters...")
app.exporters.ShutdownAll()
}
func (app *Application) shutdownExtensions() {
// Shutdown on reverse order.
for i := len(app.extensions) - 1; i >= 0; i-- {
ext := app.extensions[i]
if err := ext.Shutdown(); err != nil {
app.logger.Warn(
"Error shutting down extension",
zap.Error(err),
zap.String("extension", app.config.Service.Extensions[i]),
)
}
}
}
func (app *Application) execute() {
app.logger.Info("Starting "+app.info.LongName+"...",
zap.String("Version", app.info.Version),
zap.String("GitHash", app.info.GitHash),
zap.Int("NumCPU", runtime.NumCPU()),
)
// Set memory ballast
ballast, ballastSizeBytes := app.createMemoryBallast()
app.asyncErrorChannel = make(chan error)
// Setup everything.
app.setupTelemetry(ballastSizeBytes)
app.setupConfigurationComponents()
app.notifyPipelineReady()
// Everything is ready, now run until an event requiring shutdown happens.
app.runAndWaitForShutdownEvent()
// Begin shutdown sequence.
runtime.KeepAlive(ballast)
app.logger.Info("Starting shutdown...")
app.notifyPipelineNotReady()
app.shutdownPipelines()
app.shutdownExtensions()
AppTelemetry.shutdown()
app.logger.Info("Shutdown complete.")
}
// Start starts the collector according to the command and configuration
// given by the user.
func (app *Application) Start() error {
return app.rootCmd.Execute()
}
func (app *Application) createMemoryBallast() ([]byte, uint64) {
ballastSizeMiB := builder.MemBallastSize()
if ballastSizeMiB > 0 {
ballastSizeBytes := uint64(ballastSizeMiB) * 1024 * 1024
ballast := make([]byte, ballastSizeBytes)
app.logger.Info("Using memory ballast", zap.Int("MiBs", ballastSizeMiB))
return ballast, ballastSizeBytes
}
return nil, 0
}
| {
// Pipeline is built backwards, starting from exporters, so that we create objects
// which are referenced before objects which reference them.
// First create exporters.
var err error
app.exporters, err = builder.NewExportersBuilder(app.logger, app.config, app.factories.Exporters).Build()
if err != nil {
log.Fatalf("Cannot build exporters: %v", err)
}
app.logger.Info("Starting exporters...")
err = app.exporters.StartAll(app.logger, app)
if err != nil {
log.Fatalf("Cannot start exporters: %v", err)
}
// Create pipelines and their processors and plug exporters to the
// end of the pipelines.
app.builtPipelines, err = builder.NewPipelinesBuilder(app.logger, app.config, app.exporters, app.factories.Processors).Build()
if err != nil {
log.Fatalf("Cannot build pipelines: %v", err)
}
app.logger.Info("Starting processors...")
err = app.builtPipelines.StartProcessors(app.logger, app)
if err != nil {
log.Fatalf("Cannot start processors: %v", err)
}
// Create receivers and plug them into the start of the pipelines.
app.builtReceivers, err = builder.NewReceiversBuilder(app.logger, app.config, app.builtPipelines, app.factories.Receivers).Build()
if err != nil {
log.Fatalf("Cannot build receivers: %v", err)
}
app.logger.Info("Starting receivers...")
err = app.builtReceivers.StartAll(app.logger, app)
if err != nil {
log.Fatalf("Cannot start receivers: %v", err)
}
} | identifier_body |
NLP_V3_SVD_XGBM_HyperParamCV.py | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data=pd.read_excel("C:\ML&AI\TextClassification\RAW DATA(Main Update)#2 (2019-07-02).xlsx", sheet_name="All",skiprows=5)
#data.columns.tolist()
#countries_english=['RLTD','RAME','RIND','RPAC','RSAF','RKOR']
filtered_data = data[(data["Filtering\n(Credit/Age/Class)"]=="No") & (data["Work Type"]=="Visit") & (data["Case Type (Related Case)"]=="Complaint") & (data["Country"].str.contains("RLTD|RAME|RIND|RPAC|RSAF|RKOR"))]
filtered_data.drop_duplicates(subset="SWO Number")
#reset all indexes as we dropped rows above
filtered_data = filtered_data.reset_index(drop=True)
#concatenate 3 columns of text
filtered_data['Text']= filtered_data['Case Description'] +' ' + filtered_data['Investigation'] + ' ' + filtered_data['Corrective Action']
# final columns for classification
dataframe=filtered_data.loc[:,['SWO Number','Text','RMED FaultCode L1(New)']]
#,'RMED FaultCode L2(New)', 'RMED FaultCode L3(New)', 'RMED FaultCode L4(New)',]
#If the count of target is less than 5%, combine to others type
classif = dataframe['RMED FaultCode L1(New)'].value_counts(normalize=True)
idx = classif[classif.lt(0.05)].index
dataframe.loc[dataframe['RMED FaultCode L1(New)'].isin(idx),'RMED FaultCode L1(New)'] = 'Others'
#prediction data - save the rows where tgt is blank from dataframe
pred_X_Y = dataframe.loc[dataframe['RMED FaultCode L1(New)'].isnull(), ['SWO Number','Text','RMED FaultCode L1(New)']]
#get data after filtering where target is blank and text is blank
dataframe = dataframe.loc[dataframe['RMED FaultCode L1(New)'].notnull(), ['SWO Number','Text','RMED FaultCode L1(New)']]
dataframe = dataframe.loc[dataframe['Text'].notnull(), ['SWO Number','Text','RMED FaultCode L1(New)']]
dataframe = dataframe.reset_index(drop=True)
dataframe['RMED FaultCode L1(New)'].value_counts()
#Multi class NLP Classification
#create a column where each class has a unique id called category id
dataframe['category_id'] = dataframe['RMED FaultCode L1(New)'].factorize()[0]
category_id_dataframe = dataframe[['RMED FaultCode L1(New)', 'category_id']].drop_duplicates().sort_values('category_id')
category_to_id = dict(category_id_dataframe.values)
id_to_category = dict(category_id_dataframe[['category_id', 'RMED FaultCode L1(New)']].values)
dataframe.head()
x = dataframe.Text
y = dataframe['RMED FaultCode L1(New)']
from sklearn.model_selection import train_test_split
SEED = 2000
x_train, x_validation_and_test, y_train, y_validation_and_test = train_test_split(x, y, test_size=.02, random_state=SEED)
x_validation, x_test, y_validation, y_test = train_test_split(x_validation_and_test, y_validation_and_test, test_size=.5, random_state=SEED)
#######################################################
#https://bbengfort.github.io/tutorials/2016/05/19/text-classification-nltk-sckit-learn.html
#The difference is that stem might not be an actual word whereas, lemma is an actual language word.
#in lemma, you used WordNet corpus & corpus for stop words to produce lemma which makes it slower than stemming.
import re
from nltk.stem.porter import PorterStemmer
porter_stemmer = PorterStemmer()
def stemming_tokenizer(str_input):
words = re.sub(r"[^A-Za-z0-9\-]", " ", str_input).lower().split()
words = [porter_stemmer.stem(word) for word in words]
return words
from sklearn.feature_extraction.text import TfidfVectorizer
#min_df = When building the vocabulary ignore terms that have a document frequency strictly lower than the given threshold.
#norm='l2' = The cosine similarity between two vectors is their dot product when l2 norm has been applied.
tfidf = TfidfVectorizer(sublinear_tf=True, norm='l2', encoding='latin-1', ngram_range=(1,1),stop_words='english',token_pattern=r'(?u)\b[A-Za-z]+\b', tokenizer=stemming_tokenizer)
tfidf.fit(x_train)
# encode document
xtrain_tfidf = tfidf.transform(x_train).toarray()
# summarize encoded vector
print(xtrain_tfidf.shape)
xvalid_tfidf = tfidf.transform(x_validation).toarray()
xtest_tfidf = tfidf.transform(x_test).toarray()
from imblearn.over_sampling import SMOTE
sm = SMOTE(random_state=2)
X_train_res, y_train_res = sm.fit_sample(xtrain_tfidf, y_train)
##############################################################################
#from sklearn.linear_model import LogisticRegression
#from sklearn.ensemble import RandomForestClassifier
#from sklearn.svm import LinearSVC
#from sklearn.naive_bayes import MultinomialNB
#from sklearn.model_selection import cross_val_score
#models = [
# RandomForestClassifier(n_estimators=200, max_depth=3, random_state=0),
# LinearSVC(),
# MultinomialNB(),
# LogisticRegression(random_state=0),
#]
#CV = 5
#cv_df = pd.DataFrame(index=range(CV * len(models)))
#entries = []
#for model in models:
# model_name = model.__class__.__name__
# accuracies = cross_val_score(model, X_train_res, y_train_res, scoring='accuracy', cv=CV)
# for fold_idx, accuracy in enumerate(accuracies):
# entries.append((model_name, fold_idx, accuracy))
#cv_df = pd.DataFrame(entries, columns=['model_name', 'fold_idx', 'accuracy'])
#
#
| #sns.boxplot(x='model_name', y='accuracy', data=cv_df)
#sns.stripplot(x='model_name', y='accuracy', data=cv_df,size=8, jitter=True, edgecolor="gray", linewidth=2)
#plt.show()
#
#cv_df.groupby('model_name').accuracy.mean()
#
##continue with the best model further
## may be due to imbalance class - balance it further
## confusion matrix and heat map to see what is predicted incorrectly
## major of the predictions end up on the diagonal (predicted label = actual label)
#from sklearn.model_selection import train_test_split
#model = LinearSVC()
##X_train, X_test, y_train, y_test, indices_train, indices_test = train_test_split(features, labels, dataframe.index, test_size=0.20, random_state=0)
#model.fit(X_train_res, y_train_res)
#y_pred = model.predict(xvalid_tfidf)
#from sklearn.metrics import confusion_matrix
#conf_mat = confusion_matrix(y_validation, y_pred)
#fig, ax = plt.subplots(figsize=(6,6))
#sns.heatmap(conf_mat, annot=True, fmt='d',xticklabels=category_id_dataframe['RMED FaultCode L1(New)'].values, yticklabels=category_id_dataframe['RMED FaultCode L1(New)'].values)
#plt.ylabel('Actual')
#plt.xlabel('Predicted')
#plt.show()
##there are misclassifications, and it it is important to see what caused it:
#from IPython.display import display
#for predicted in category_id_dataframe.category_id:
# for actual in category_id_dataframe.category_id:
# if predicted != actual and conf_mat[actual, predicted] >= 5:
# print("'{}' predicted as '{}' : {} examples.".format(id_to_category[actual], id_to_category[predicted], conf_mat[actual, predicted]))
# display(dataframe.loc[indices_test[(y_test == actual) & (y_pred == predicted)]][['RMED FaultCode L1(New)', 'Text']])
# print('')
#
##check the correlated unigram & bigrams in each target classification
#model.fit(features, labels)
#N = 10
#for dataframe['RMED FaultCode L1(New)'], category_id in sorted(category_to_id.items()):
# indices = np.argsort(model.coef_[category_id])
# feature_names = np.array(tfidf.get_feature_names())[indices]
# unigrams = [v for v in reversed(feature_names) if len(v.split(' ')) == 1][:N]
# bigrams = [v for v in reversed(feature_names) if len(v.split(' ')) == 2][:N]
## print("# '{}':".format(dataframe['RMED FaultCode L1(New)']))
# print(category_id)
# print(" . Top unigrams:\n . {}".format('\n . '.join(unigrams)))
# print(" . Top bigrams:\n . {}".format('\n . '.join(bigrams)))
#from sklearn import metrics
##print(metrics.classification_report(y_test, y_pred, target_names=dataframe['RMED FaultCode L1(New)'].unique()))
#from sklearn.metrics import confusion_matrix
#cm = confusion_matrix(y_validation, y_pred)
#acc2=metrics.accuracy_score(y_validation,y_pred)
###########################################
from sklearn.decomposition import TruncatedSVD
svd = TruncatedSVD(n_components=200)
svd.fit(X_train_res)
svd_X_train_res = svd.transform(X_train_res)
svd_xvalid_tfidf = svd.transform(xvalid_tfidf)
svd_xtest_tfidf = svd.transform(xtest_tfidf)
####################### conda install -c anaconda py-xgboost ###########################
#########################################################################
# tuning hyper parameters, test with CV = 5
########################################################################
# from sklearn.grid_search import GridSearchCV
# number of trees, tree depth and the learning rate as most crucial parameters.
# n_estimators captures the number of trees that we add to the model. A high number of trees can be computationally expensive
# max_depth bounds the maximum depth of the tree
# The square root of features is usually a good starting point
# Subsample sets the fraction of samples to be used for fitting the individual base learners. Values lower than 1 generally lead to a reduction of variance and an increase in bias.
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import GradientBoostingClassifier
p_test3 = {'learning_rate':[0.15,0.1,0.05,0.01,0.005,0.001], 'n_estimators':[100,250,500,750,1000,1250,1500,1750]}
tuning = GridSearchCV(estimator =GradientBoostingClassifier(max_depth=4, min_samples_split=2, min_samples_leaf=1, subsample=1,max_features='sqrt', random_state=10),param_grid = p_test3, scoring='accuracy',n_jobs=4,iid=False, cv=5)
tuning.fit(svd_X_train_res, y_train_res)
tuning.best_params_, tuning.best_score_
# ({'learning_rate': 0.1, 'n_estimators': 1250}, 0.9279073046083356)
# ({'learning_rate': 0.15, 'n_estimators': 1750}, 0.5295885100008811)
p_test2 = {'max_depth':[2,3,4,5,6,7] }
tuning = GridSearchCV(estimator =GradientBoostingClassifier(learning_rate=0.01,n_estimators=1500, min_samples_split=2, min_samples_leaf=1, subsample=1,max_features='sqrt', random_state=10),param_grid = p_test2, scoring='accuracy',n_jobs=4,iid=False, cv=5)
tuning.fit(svd_X_train_res, y_train_res)
tuning.best_params_, tuning.best_score_
# ({'max_depth': 7}, 0.935271830117191)
p_test4 = {'min_samples_split':[2,4,6,8,10,20,40,60,100], 'min_samples_leaf':[1,3,5,7,9]}
tuning = GridSearchCV(estimator =GradientBoostingClassifier(learning_rate=0.1, n_estimators=1250,max_depth=7, subsample=1,max_features='sqrt', random_state=10), param_grid = p_test4, scoring='accuracy',n_jobs=4,iid=False, cv=5)
tuning.fit(svd_X_train_res, y_train_res)
tuning.best_params_, tuning.best_score_
# ({'min_samples_leaf': 9, 'min_samples_split': 40}, 0.9356181161335801)
p_test5 = {'max_features':[6,8,9,10,11,12,13,14,15]}
tuning = GridSearchCV(estimator =GradientBoostingClassifier(learning_rate=0.1, n_estimators=1250,max_depth=7, min_samples_split=40, min_samples_leaf=9, subsample=1, random_state=10), param_grid = p_test5, scoring='accuracy',n_jobs=4,iid=False, cv=5)
tuning.fit(svd_X_train_res, y_train_res)
tuning.best_params_, tuning.best_score_
# ({'max_features': 8}, 0.9393840867036743)
p_test6= {'subsample':[0.7,0.75,0.8,0.85,0.9,0.95,1]}
tuning = GridSearchCV(estimator =GradientBoostingClassifier(learning_rate=0.1, n_estimators=1250,max_depth=7, min_samples_split=40, min_samples_leaf=9,max_features=8 , random_state=10), param_grid = p_test6, scoring='accuracy',n_jobs=4,iid=False, cv=5)
tuning.fit(svd_X_train_res, y_train_res)
tuning.best_params_, tuning.best_score_
# ({'subsample': 0.85}, 0.9375046259582343)
#########################################################################
# Upon tuning hyper parameters, apply the same to the model training and testing
########################################################################
import xgboost as xgb
from sklearn.model_selection import train_test_split
#X_train, X_test, y_train, y_test, indices_train, indices_test = train_test_split(svd_result, y_train_res, dataframe.index, test_size=0.20, random_state=0)
clf = xgb.XGBClassifier(max_depth=7, n_estimators=1250, colsample_bytree=0.8,subsample=0.85, nthread=10, learning_rate=0.1, min_samples_split=40, min_samples_leaf=9,max_features=8 ,objective='multi:softprob',silent=1,eta=0.4,num_class=3,num_rounds=15)
clf.fit(svd_X_train_res, y_train_res)
y_pred = clf.predict(svd_xtest_tfidf)
from sklearn.metrics import confusion_matrix
conf_mat = confusion_matrix(y_test, y_pred.ravel())
import seaborn as sns
fig, ax = plt.subplots(figsize=(6,6))
sns.heatmap(conf_mat, annot=True, fmt='d',xticklabels=category_id_dataframe['RMED FaultCode L1(New)'].values, yticklabels=category_id_dataframe['RMED FaultCode L1(New)'].values)
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show()
from sklearn import metrics
print(metrics.classification_report(y_test, y_pred, target_names=dataframe['RMED FaultCode L1(New)'].unique()))
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred.ravel())
print(metrics.accuracy_score(y_test,y_pred.ravel()))
# precision recall f1-score support
#
# Defect 0.00 0.00 0.00 2
# Others 0.33 1.00 0.50 1
# Revisit required 0.75 0.60 0.67 5
#Assisted Replacement 0.82 0.82 0.82 11
# Cleaned 0.67 0.67 0.67 3
# Adjustment 1.00 1.00 1.00 1
#
# accuracy 0.70 23
# macro avg 0.59 0.68 0.61 23
# weighted avg 0.70 0.70 0.69 23
#
#0.6956521739130435
######################################## | #import seaborn as sns
| random_line_split |
NLP_V3_SVD_XGBM_HyperParamCV.py | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data=pd.read_excel("C:\ML&AI\TextClassification\RAW DATA(Main Update)#2 (2019-07-02).xlsx", sheet_name="All",skiprows=5)
#data.columns.tolist()
#countries_english=['RLTD','RAME','RIND','RPAC','RSAF','RKOR']
filtered_data = data[(data["Filtering\n(Credit/Age/Class)"]=="No") & (data["Work Type"]=="Visit") & (data["Case Type (Related Case)"]=="Complaint") & (data["Country"].str.contains("RLTD|RAME|RIND|RPAC|RSAF|RKOR"))]
filtered_data.drop_duplicates(subset="SWO Number")
#reset all indexes as we dropped rows above
filtered_data = filtered_data.reset_index(drop=True)
#concatenate 3 columns of text
filtered_data['Text']= filtered_data['Case Description'] +' ' + filtered_data['Investigation'] + ' ' + filtered_data['Corrective Action']
# final columns for classification
dataframe=filtered_data.loc[:,['SWO Number','Text','RMED FaultCode L1(New)']]
#,'RMED FaultCode L2(New)', 'RMED FaultCode L3(New)', 'RMED FaultCode L4(New)',]
#If the count of target is less than 5%, combine to others type
classif = dataframe['RMED FaultCode L1(New)'].value_counts(normalize=True)
idx = classif[classif.lt(0.05)].index
dataframe.loc[dataframe['RMED FaultCode L1(New)'].isin(idx),'RMED FaultCode L1(New)'] = 'Others'
#prediction data - save the rows where tgt is blank from dataframe
pred_X_Y = dataframe.loc[dataframe['RMED FaultCode L1(New)'].isnull(), ['SWO Number','Text','RMED FaultCode L1(New)']]
#get data after filtering where target is blank and text is blank
dataframe = dataframe.loc[dataframe['RMED FaultCode L1(New)'].notnull(), ['SWO Number','Text','RMED FaultCode L1(New)']]
dataframe = dataframe.loc[dataframe['Text'].notnull(), ['SWO Number','Text','RMED FaultCode L1(New)']]
dataframe = dataframe.reset_index(drop=True)
dataframe['RMED FaultCode L1(New)'].value_counts()
#Multi class NLP Classification
#create a column where each class has a unique id called category id
dataframe['category_id'] = dataframe['RMED FaultCode L1(New)'].factorize()[0]
category_id_dataframe = dataframe[['RMED FaultCode L1(New)', 'category_id']].drop_duplicates().sort_values('category_id')
category_to_id = dict(category_id_dataframe.values)
id_to_category = dict(category_id_dataframe[['category_id', 'RMED FaultCode L1(New)']].values)
dataframe.head()
x = dataframe.Text
y = dataframe['RMED FaultCode L1(New)']
from sklearn.model_selection import train_test_split
SEED = 2000
x_train, x_validation_and_test, y_train, y_validation_and_test = train_test_split(x, y, test_size=.02, random_state=SEED)
x_validation, x_test, y_validation, y_test = train_test_split(x_validation_and_test, y_validation_and_test, test_size=.5, random_state=SEED)
#######################################################
#https://bbengfort.github.io/tutorials/2016/05/19/text-classification-nltk-sckit-learn.html
#The difference is that stem might not be an actual word whereas, lemma is an actual language word.
#in lemma, you used WordNet corpus & corpus for stop words to produce lemma which makes it slower than stemming.
import re
from nltk.stem.porter import PorterStemmer
porter_stemmer = PorterStemmer()
def stemming_tokenizer(str_input):
|
from sklearn.feature_extraction.text import TfidfVectorizer
#min_df = When building the vocabulary ignore terms that have a document frequency strictly lower than the given threshold.
#norm='l2' = The cosine similarity between two vectors is their dot product when l2 norm has been applied.
tfidf = TfidfVectorizer(sublinear_tf=True, norm='l2', encoding='latin-1', ngram_range=(1,1),stop_words='english',token_pattern=r'(?u)\b[A-Za-z]+\b', tokenizer=stemming_tokenizer)
tfidf.fit(x_train)
# encode document
xtrain_tfidf = tfidf.transform(x_train).toarray()
# summarize encoded vector
print(xtrain_tfidf.shape)
xvalid_tfidf = tfidf.transform(x_validation).toarray()
xtest_tfidf = tfidf.transform(x_test).toarray()
from imblearn.over_sampling import SMOTE
sm = SMOTE(random_state=2)
X_train_res, y_train_res = sm.fit_sample(xtrain_tfidf, y_train)
##############################################################################
#from sklearn.linear_model import LogisticRegression
#from sklearn.ensemble import RandomForestClassifier
#from sklearn.svm import LinearSVC
#from sklearn.naive_bayes import MultinomialNB
#from sklearn.model_selection import cross_val_score
#models = [
# RandomForestClassifier(n_estimators=200, max_depth=3, random_state=0),
# LinearSVC(),
# MultinomialNB(),
# LogisticRegression(random_state=0),
#]
#CV = 5
#cv_df = pd.DataFrame(index=range(CV * len(models)))
#entries = []
#for model in models:
# model_name = model.__class__.__name__
# accuracies = cross_val_score(model, X_train_res, y_train_res, scoring='accuracy', cv=CV)
# for fold_idx, accuracy in enumerate(accuracies):
# entries.append((model_name, fold_idx, accuracy))
#cv_df = pd.DataFrame(entries, columns=['model_name', 'fold_idx', 'accuracy'])
#
#
#import seaborn as sns
#sns.boxplot(x='model_name', y='accuracy', data=cv_df)
#sns.stripplot(x='model_name', y='accuracy', data=cv_df,size=8, jitter=True, edgecolor="gray", linewidth=2)
#plt.show()
#
#cv_df.groupby('model_name').accuracy.mean()
#
##continue with the best model further
## may be due to imbalance class - balance it further
## confusion matrix and heat map to see what is predicted incorrectly
## major of the predictions end up on the diagonal (predicted label = actual label)
#from sklearn.model_selection import train_test_split
#model = LinearSVC()
##X_train, X_test, y_train, y_test, indices_train, indices_test = train_test_split(features, labels, dataframe.index, test_size=0.20, random_state=0)
#model.fit(X_train_res, y_train_res)
#y_pred = model.predict(xvalid_tfidf)
#from sklearn.metrics import confusion_matrix
#conf_mat = confusion_matrix(y_validation, y_pred)
#fig, ax = plt.subplots(figsize=(6,6))
#sns.heatmap(conf_mat, annot=True, fmt='d',xticklabels=category_id_dataframe['RMED FaultCode L1(New)'].values, yticklabels=category_id_dataframe['RMED FaultCode L1(New)'].values)
#plt.ylabel('Actual')
#plt.xlabel('Predicted')
#plt.show()
##there are misclassifications, and it it is important to see what caused it:
#from IPython.display import display
#for predicted in category_id_dataframe.category_id:
# for actual in category_id_dataframe.category_id:
# if predicted != actual and conf_mat[actual, predicted] >= 5:
# print("'{}' predicted as '{}' : {} examples.".format(id_to_category[actual], id_to_category[predicted], conf_mat[actual, predicted]))
# display(dataframe.loc[indices_test[(y_test == actual) & (y_pred == predicted)]][['RMED FaultCode L1(New)', 'Text']])
# print('')
#
##check the correlated unigram & bigrams in each target classification
#model.fit(features, labels)
#N = 10
#for dataframe['RMED FaultCode L1(New)'], category_id in sorted(category_to_id.items()):
# indices = np.argsort(model.coef_[category_id])
# feature_names = np.array(tfidf.get_feature_names())[indices]
# unigrams = [v for v in reversed(feature_names) if len(v.split(' ')) == 1][:N]
# bigrams = [v for v in reversed(feature_names) if len(v.split(' ')) == 2][:N]
## print("# '{}':".format(dataframe['RMED FaultCode L1(New)']))
# print(category_id)
# print(" . Top unigrams:\n . {}".format('\n . '.join(unigrams)))
# print(" . Top bigrams:\n . {}".format('\n . '.join(bigrams)))
#from sklearn import metrics
##print(metrics.classification_report(y_test, y_pred, target_names=dataframe['RMED FaultCode L1(New)'].unique()))
#from sklearn.metrics import confusion_matrix
#cm = confusion_matrix(y_validation, y_pred)
#acc2=metrics.accuracy_score(y_validation,y_pred)
###########################################
from sklearn.decomposition import TruncatedSVD
svd = TruncatedSVD(n_components=200)
svd.fit(X_train_res)
svd_X_train_res = svd.transform(X_train_res)
svd_xvalid_tfidf = svd.transform(xvalid_tfidf)
svd_xtest_tfidf = svd.transform(xtest_tfidf)
####################### conda install -c anaconda py-xgboost ###########################
#########################################################################
# tuning hyper parameters, test with CV = 5
########################################################################
# from sklearn.grid_search import GridSearchCV
# number of trees, tree depth and the learning rate as most crucial parameters.
# n_estimators captures the number of trees that we add to the model. A high number of trees can be computationally expensive
# max_depth bounds the maximum depth of the tree
# The square root of features is usually a good starting point
# Subsample sets the fraction of samples to be used for fitting the individual base learners. Values lower than 1 generally lead to a reduction of variance and an increase in bias.
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import GradientBoostingClassifier
p_test3 = {'learning_rate':[0.15,0.1,0.05,0.01,0.005,0.001], 'n_estimators':[100,250,500,750,1000,1250,1500,1750]}
tuning = GridSearchCV(estimator =GradientBoostingClassifier(max_depth=4, min_samples_split=2, min_samples_leaf=1, subsample=1,max_features='sqrt', random_state=10),param_grid = p_test3, scoring='accuracy',n_jobs=4,iid=False, cv=5)
tuning.fit(svd_X_train_res, y_train_res)
tuning.best_params_, tuning.best_score_
# ({'learning_rate': 0.1, 'n_estimators': 1250}, 0.9279073046083356)
# ({'learning_rate': 0.15, 'n_estimators': 1750}, 0.5295885100008811)
p_test2 = {'max_depth':[2,3,4,5,6,7] }
tuning = GridSearchCV(estimator =GradientBoostingClassifier(learning_rate=0.01,n_estimators=1500, min_samples_split=2, min_samples_leaf=1, subsample=1,max_features='sqrt', random_state=10),param_grid = p_test2, scoring='accuracy',n_jobs=4,iid=False, cv=5)
tuning.fit(svd_X_train_res, y_train_res)
tuning.best_params_, tuning.best_score_
# ({'max_depth': 7}, 0.935271830117191)
p_test4 = {'min_samples_split':[2,4,6,8,10,20,40,60,100], 'min_samples_leaf':[1,3,5,7,9]}
tuning = GridSearchCV(estimator =GradientBoostingClassifier(learning_rate=0.1, n_estimators=1250,max_depth=7, subsample=1,max_features='sqrt', random_state=10), param_grid = p_test4, scoring='accuracy',n_jobs=4,iid=False, cv=5)
tuning.fit(svd_X_train_res, y_train_res)
tuning.best_params_, tuning.best_score_
# ({'min_samples_leaf': 9, 'min_samples_split': 40}, 0.9356181161335801)
p_test5 = {'max_features':[6,8,9,10,11,12,13,14,15]}
tuning = GridSearchCV(estimator =GradientBoostingClassifier(learning_rate=0.1, n_estimators=1250,max_depth=7, min_samples_split=40, min_samples_leaf=9, subsample=1, random_state=10), param_grid = p_test5, scoring='accuracy',n_jobs=4,iid=False, cv=5)
tuning.fit(svd_X_train_res, y_train_res)
tuning.best_params_, tuning.best_score_
# ({'max_features': 8}, 0.9393840867036743)
p_test6= {'subsample':[0.7,0.75,0.8,0.85,0.9,0.95,1]}
tuning = GridSearchCV(estimator =GradientBoostingClassifier(learning_rate=0.1, n_estimators=1250,max_depth=7, min_samples_split=40, min_samples_leaf=9,max_features=8 , random_state=10), param_grid = p_test6, scoring='accuracy',n_jobs=4,iid=False, cv=5)
tuning.fit(svd_X_train_res, y_train_res)
tuning.best_params_, tuning.best_score_
# ({'subsample': 0.85}, 0.9375046259582343)
#########################################################################
# Upon tuning hyper parameters, apply the same to the model training and testing
########################################################################
import xgboost as xgb
from sklearn.model_selection import train_test_split
#X_train, X_test, y_train, y_test, indices_train, indices_test = train_test_split(svd_result, y_train_res, dataframe.index, test_size=0.20, random_state=0)
clf = xgb.XGBClassifier(max_depth=7, n_estimators=1250, colsample_bytree=0.8,subsample=0.85, nthread=10, learning_rate=0.1, min_samples_split=40, min_samples_leaf=9,max_features=8 ,objective='multi:softprob',silent=1,eta=0.4,num_class=3,num_rounds=15)
clf.fit(svd_X_train_res, y_train_res)
y_pred = clf.predict(svd_xtest_tfidf)
from sklearn.metrics import confusion_matrix
conf_mat = confusion_matrix(y_test, y_pred.ravel())
import seaborn as sns
fig, ax = plt.subplots(figsize=(6,6))
sns.heatmap(conf_mat, annot=True, fmt='d',xticklabels=category_id_dataframe['RMED FaultCode L1(New)'].values, yticklabels=category_id_dataframe['RMED FaultCode L1(New)'].values)
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show()
from sklearn import metrics
print(metrics.classification_report(y_test, y_pred, target_names=dataframe['RMED FaultCode L1(New)'].unique()))
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred.ravel())
print(metrics.accuracy_score(y_test,y_pred.ravel()))
# precision recall f1-score support
#
# Defect 0.00 0.00 0.00 2
# Others 0.33 1.00 0.50 1
# Revisit required 0.75 0.60 0.67 5
#Assisted Replacement 0.82 0.82 0.82 11
# Cleaned 0.67 0.67 0.67 3
# Adjustment 1.00 1.00 1.00 1
#
# accuracy 0.70 23
# macro avg 0.59 0.68 0.61 23
# weighted avg 0.70 0.70 0.69 23
#
#0.6956521739130435
########################################
| words = re.sub(r"[^A-Za-z0-9\-]", " ", str_input).lower().split()
words = [porter_stemmer.stem(word) for word in words]
return words | identifier_body |
NLP_V3_SVD_XGBM_HyperParamCV.py | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data=pd.read_excel("C:\ML&AI\TextClassification\RAW DATA(Main Update)#2 (2019-07-02).xlsx", sheet_name="All",skiprows=5)
#data.columns.tolist()
#countries_english=['RLTD','RAME','RIND','RPAC','RSAF','RKOR']
filtered_data = data[(data["Filtering\n(Credit/Age/Class)"]=="No") & (data["Work Type"]=="Visit") & (data["Case Type (Related Case)"]=="Complaint") & (data["Country"].str.contains("RLTD|RAME|RIND|RPAC|RSAF|RKOR"))]
filtered_data.drop_duplicates(subset="SWO Number")
#reset all indexes as we dropped rows above
filtered_data = filtered_data.reset_index(drop=True)
#concatenate 3 columns of text
filtered_data['Text']= filtered_data['Case Description'] +' ' + filtered_data['Investigation'] + ' ' + filtered_data['Corrective Action']
# final columns for classification
dataframe=filtered_data.loc[:,['SWO Number','Text','RMED FaultCode L1(New)']]
#,'RMED FaultCode L2(New)', 'RMED FaultCode L3(New)', 'RMED FaultCode L4(New)',]
#If the count of target is less than 5%, combine to others type
classif = dataframe['RMED FaultCode L1(New)'].value_counts(normalize=True)
idx = classif[classif.lt(0.05)].index
dataframe.loc[dataframe['RMED FaultCode L1(New)'].isin(idx),'RMED FaultCode L1(New)'] = 'Others'
#prediction data - save the rows where tgt is blank from dataframe
pred_X_Y = dataframe.loc[dataframe['RMED FaultCode L1(New)'].isnull(), ['SWO Number','Text','RMED FaultCode L1(New)']]
#get data after filtering where target is blank and text is blank
dataframe = dataframe.loc[dataframe['RMED FaultCode L1(New)'].notnull(), ['SWO Number','Text','RMED FaultCode L1(New)']]
dataframe = dataframe.loc[dataframe['Text'].notnull(), ['SWO Number','Text','RMED FaultCode L1(New)']]
dataframe = dataframe.reset_index(drop=True)
dataframe['RMED FaultCode L1(New)'].value_counts()
#Multi class NLP Classification
#create a column where each class has a unique id called category id
dataframe['category_id'] = dataframe['RMED FaultCode L1(New)'].factorize()[0]
category_id_dataframe = dataframe[['RMED FaultCode L1(New)', 'category_id']].drop_duplicates().sort_values('category_id')
category_to_id = dict(category_id_dataframe.values)
id_to_category = dict(category_id_dataframe[['category_id', 'RMED FaultCode L1(New)']].values)
dataframe.head()
x = dataframe.Text
y = dataframe['RMED FaultCode L1(New)']
from sklearn.model_selection import train_test_split
SEED = 2000
x_train, x_validation_and_test, y_train, y_validation_and_test = train_test_split(x, y, test_size=.02, random_state=SEED)
x_validation, x_test, y_validation, y_test = train_test_split(x_validation_and_test, y_validation_and_test, test_size=.5, random_state=SEED)
#######################################################
#https://bbengfort.github.io/tutorials/2016/05/19/text-classification-nltk-sckit-learn.html
#The difference is that stem might not be an actual word whereas, lemma is an actual language word.
#in lemma, you used WordNet corpus & corpus for stop words to produce lemma which makes it slower than stemming.
import re
from nltk.stem.porter import PorterStemmer
porter_stemmer = PorterStemmer()
def | (str_input):
words = re.sub(r"[^A-Za-z0-9\-]", " ", str_input).lower().split()
words = [porter_stemmer.stem(word) for word in words]
return words
from sklearn.feature_extraction.text import TfidfVectorizer
#min_df = When building the vocabulary ignore terms that have a document frequency strictly lower than the given threshold.
#norm='l2' = The cosine similarity between two vectors is their dot product when l2 norm has been applied.
tfidf = TfidfVectorizer(sublinear_tf=True, norm='l2', encoding='latin-1', ngram_range=(1,1),stop_words='english',token_pattern=r'(?u)\b[A-Za-z]+\b', tokenizer=stemming_tokenizer)
tfidf.fit(x_train)
# encode document
xtrain_tfidf = tfidf.transform(x_train).toarray()
# summarize encoded vector
print(xtrain_tfidf.shape)
xvalid_tfidf = tfidf.transform(x_validation).toarray()
xtest_tfidf = tfidf.transform(x_test).toarray()
from imblearn.over_sampling import SMOTE
sm = SMOTE(random_state=2)
X_train_res, y_train_res = sm.fit_sample(xtrain_tfidf, y_train)
##############################################################################
#from sklearn.linear_model import LogisticRegression
#from sklearn.ensemble import RandomForestClassifier
#from sklearn.svm import LinearSVC
#from sklearn.naive_bayes import MultinomialNB
#from sklearn.model_selection import cross_val_score
#models = [
# RandomForestClassifier(n_estimators=200, max_depth=3, random_state=0),
# LinearSVC(),
# MultinomialNB(),
# LogisticRegression(random_state=0),
#]
#CV = 5
#cv_df = pd.DataFrame(index=range(CV * len(models)))
#entries = []
#for model in models:
# model_name = model.__class__.__name__
# accuracies = cross_val_score(model, X_train_res, y_train_res, scoring='accuracy', cv=CV)
# for fold_idx, accuracy in enumerate(accuracies):
# entries.append((model_name, fold_idx, accuracy))
#cv_df = pd.DataFrame(entries, columns=['model_name', 'fold_idx', 'accuracy'])
#
#
#import seaborn as sns
#sns.boxplot(x='model_name', y='accuracy', data=cv_df)
#sns.stripplot(x='model_name', y='accuracy', data=cv_df,size=8, jitter=True, edgecolor="gray", linewidth=2)
#plt.show()
#
#cv_df.groupby('model_name').accuracy.mean()
#
##continue with the best model further
## may be due to imbalance class - balance it further
## confusion matrix and heat map to see what is predicted incorrectly
## major of the predictions end up on the diagonal (predicted label = actual label)
#from sklearn.model_selection import train_test_split
#model = LinearSVC()
##X_train, X_test, y_train, y_test, indices_train, indices_test = train_test_split(features, labels, dataframe.index, test_size=0.20, random_state=0)
#model.fit(X_train_res, y_train_res)
#y_pred = model.predict(xvalid_tfidf)
#from sklearn.metrics import confusion_matrix
#conf_mat = confusion_matrix(y_validation, y_pred)
#fig, ax = plt.subplots(figsize=(6,6))
#sns.heatmap(conf_mat, annot=True, fmt='d',xticklabels=category_id_dataframe['RMED FaultCode L1(New)'].values, yticklabels=category_id_dataframe['RMED FaultCode L1(New)'].values)
#plt.ylabel('Actual')
#plt.xlabel('Predicted')
#plt.show()
##there are misclassifications, and it it is important to see what caused it:
#from IPython.display import display
#for predicted in category_id_dataframe.category_id:
# for actual in category_id_dataframe.category_id:
# if predicted != actual and conf_mat[actual, predicted] >= 5:
# print("'{}' predicted as '{}' : {} examples.".format(id_to_category[actual], id_to_category[predicted], conf_mat[actual, predicted]))
# display(dataframe.loc[indices_test[(y_test == actual) & (y_pred == predicted)]][['RMED FaultCode L1(New)', 'Text']])
# print('')
#
##check the correlated unigram & bigrams in each target classification
#model.fit(features, labels)
#N = 10
#for dataframe['RMED FaultCode L1(New)'], category_id in sorted(category_to_id.items()):
# indices = np.argsort(model.coef_[category_id])
# feature_names = np.array(tfidf.get_feature_names())[indices]
# unigrams = [v for v in reversed(feature_names) if len(v.split(' ')) == 1][:N]
# bigrams = [v for v in reversed(feature_names) if len(v.split(' ')) == 2][:N]
## print("# '{}':".format(dataframe['RMED FaultCode L1(New)']))
# print(category_id)
# print(" . Top unigrams:\n . {}".format('\n . '.join(unigrams)))
# print(" . Top bigrams:\n . {}".format('\n . '.join(bigrams)))
#from sklearn import metrics
##print(metrics.classification_report(y_test, y_pred, target_names=dataframe['RMED FaultCode L1(New)'].unique()))
#from sklearn.metrics import confusion_matrix
#cm = confusion_matrix(y_validation, y_pred)
#acc2=metrics.accuracy_score(y_validation,y_pred)
###########################################
from sklearn.decomposition import TruncatedSVD
svd = TruncatedSVD(n_components=200)
svd.fit(X_train_res)
svd_X_train_res = svd.transform(X_train_res)
svd_xvalid_tfidf = svd.transform(xvalid_tfidf)
svd_xtest_tfidf = svd.transform(xtest_tfidf)
####################### conda install -c anaconda py-xgboost ###########################
#########################################################################
# tuning hyper parameters, test with CV = 5
########################################################################
# from sklearn.grid_search import GridSearchCV
# number of trees, tree depth and the learning rate as most crucial parameters.
# n_estimators captures the number of trees that we add to the model. A high number of trees can be computationally expensive
# max_depth bounds the maximum depth of the tree
# The square root of features is usually a good starting point
# Subsample sets the fraction of samples to be used for fitting the individual base learners. Values lower than 1 generally lead to a reduction of variance and an increase in bias.
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import GradientBoostingClassifier
p_test3 = {'learning_rate':[0.15,0.1,0.05,0.01,0.005,0.001], 'n_estimators':[100,250,500,750,1000,1250,1500,1750]}
tuning = GridSearchCV(estimator =GradientBoostingClassifier(max_depth=4, min_samples_split=2, min_samples_leaf=1, subsample=1,max_features='sqrt', random_state=10),param_grid = p_test3, scoring='accuracy',n_jobs=4,iid=False, cv=5)
tuning.fit(svd_X_train_res, y_train_res)
tuning.best_params_, tuning.best_score_
# ({'learning_rate': 0.1, 'n_estimators': 1250}, 0.9279073046083356)
# ({'learning_rate': 0.15, 'n_estimators': 1750}, 0.5295885100008811)
p_test2 = {'max_depth':[2,3,4,5,6,7] }
tuning = GridSearchCV(estimator =GradientBoostingClassifier(learning_rate=0.01,n_estimators=1500, min_samples_split=2, min_samples_leaf=1, subsample=1,max_features='sqrt', random_state=10),param_grid = p_test2, scoring='accuracy',n_jobs=4,iid=False, cv=5)
tuning.fit(svd_X_train_res, y_train_res)
tuning.best_params_, tuning.best_score_
# ({'max_depth': 7}, 0.935271830117191)
p_test4 = {'min_samples_split':[2,4,6,8,10,20,40,60,100], 'min_samples_leaf':[1,3,5,7,9]}
tuning = GridSearchCV(estimator =GradientBoostingClassifier(learning_rate=0.1, n_estimators=1250,max_depth=7, subsample=1,max_features='sqrt', random_state=10), param_grid = p_test4, scoring='accuracy',n_jobs=4,iid=False, cv=5)
tuning.fit(svd_X_train_res, y_train_res)
tuning.best_params_, tuning.best_score_
# ({'min_samples_leaf': 9, 'min_samples_split': 40}, 0.9356181161335801)
p_test5 = {'max_features':[6,8,9,10,11,12,13,14,15]}
tuning = GridSearchCV(estimator =GradientBoostingClassifier(learning_rate=0.1, n_estimators=1250,max_depth=7, min_samples_split=40, min_samples_leaf=9, subsample=1, random_state=10), param_grid = p_test5, scoring='accuracy',n_jobs=4,iid=False, cv=5)
tuning.fit(svd_X_train_res, y_train_res)
tuning.best_params_, tuning.best_score_
# ({'max_features': 8}, 0.9393840867036743)
p_test6= {'subsample':[0.7,0.75,0.8,0.85,0.9,0.95,1]}
tuning = GridSearchCV(estimator =GradientBoostingClassifier(learning_rate=0.1, n_estimators=1250,max_depth=7, min_samples_split=40, min_samples_leaf=9,max_features=8 , random_state=10), param_grid = p_test6, scoring='accuracy',n_jobs=4,iid=False, cv=5)
tuning.fit(svd_X_train_res, y_train_res)
tuning.best_params_, tuning.best_score_
# ({'subsample': 0.85}, 0.9375046259582343)
#########################################################################
# Upon tuning hyper parameters, apply the same to the model training and testing
########################################################################
import xgboost as xgb
from sklearn.model_selection import train_test_split
#X_train, X_test, y_train, y_test, indices_train, indices_test = train_test_split(svd_result, y_train_res, dataframe.index, test_size=0.20, random_state=0)
clf = xgb.XGBClassifier(max_depth=7, n_estimators=1250, colsample_bytree=0.8,subsample=0.85, nthread=10, learning_rate=0.1, min_samples_split=40, min_samples_leaf=9,max_features=8 ,objective='multi:softprob',silent=1,eta=0.4,num_class=3,num_rounds=15)
clf.fit(svd_X_train_res, y_train_res)
y_pred = clf.predict(svd_xtest_tfidf)
from sklearn.metrics import confusion_matrix
conf_mat = confusion_matrix(y_test, y_pred.ravel())
import seaborn as sns
fig, ax = plt.subplots(figsize=(6,6))
sns.heatmap(conf_mat, annot=True, fmt='d',xticklabels=category_id_dataframe['RMED FaultCode L1(New)'].values, yticklabels=category_id_dataframe['RMED FaultCode L1(New)'].values)
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show()
from sklearn import metrics
print(metrics.classification_report(y_test, y_pred, target_names=dataframe['RMED FaultCode L1(New)'].unique()))
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred.ravel())
print(metrics.accuracy_score(y_test,y_pred.ravel()))
# precision recall f1-score support
#
# Defect 0.00 0.00 0.00 2
# Others 0.33 1.00 0.50 1
# Revisit required 0.75 0.60 0.67 5
#Assisted Replacement 0.82 0.82 0.82 11
# Cleaned 0.67 0.67 0.67 3
# Adjustment 1.00 1.00 1.00 1
#
# accuracy 0.70 23
# macro avg 0.59 0.68 0.61 23
# weighted avg 0.70 0.70 0.69 23
#
#0.6956521739130435
########################################
| stemming_tokenizer | identifier_name |
manager.py | import logging
from collections import defaultdict
from collections.abc import Iterator
from importlib import import_module
from types import ModuleType
from typing import TYPE_CHECKING, Any, Optional
from rotkehlchen.db.constants import BINANCE_MARKETS_KEY, KRAKEN_ACCOUNT_TYPE_KEY
from rotkehlchen.errors.misc import InputError
from rotkehlchen.exchanges.binance import BINANCE_BASE_URL, BINANCEUS_BASE_URL
from rotkehlchen.exchanges.exchange import ExchangeInterface, ExchangeWithExtras
from rotkehlchen.logging import RotkehlchenLogsAdapter
from rotkehlchen.types import (
ApiKey,
ApiSecret,
ExchangeApiCredentials,
ExchangeAuthCredentials,
Location,
LocationEventMappingType,
)
from rotkehlchen.user_messages import MessagesAggregator
from .constants import SUPPORTED_EXCHANGES
if TYPE_CHECKING:
from rotkehlchen.db.dbhandler import DBHandler
from rotkehlchen.exchanges.kraken import KrakenAccountType
logger = logging.getLogger(__name__)
log = RotkehlchenLogsAdapter(logger)
class ExchangeManager:
def __init__(self, msg_aggregator: MessagesAggregator) -> None:
self.connected_exchanges: dict[Location, list[ExchangeInterface]] = defaultdict(list)
self.msg_aggregator = msg_aggregator
@staticmethod
def _get_exchange_module_name(location: Location) -> str:
if location == Location.BINANCEUS:
return str(Location.BINANCE)
return str(location)
def connected_and_syncing_exchanges_num(self) -> int:
return len(list(self.iterate_exchanges()))
def get_exchange(self, name: str, location: Location) -> Optional[ExchangeInterface]:
"""Get the exchange object for an exchange with a given name and location
Returns None if it can not be found
"""
exchanges_list = self.connected_exchanges.get(location)
if exchanges_list is None:
return None
for exchange in exchanges_list:
if exchange.name == name:
return exchange
return None
def iterate_exchanges(self) -> Iterator[ExchangeInterface]:
"""Iterate all connected and syncing exchanges"""
with self.database.conn.read_ctx() as cursor:
excluded = self.database.get_settings(cursor).non_syncing_exchanges
for exchanges in self.connected_exchanges.values():
for exchange in exchanges:
# We are not yielding excluded exchanges
if exchange.location_id() not in excluded:
yield exchange
def edit_exchange(
self,
name: str,
location: Location,
new_name: Optional[str],
api_key: Optional[ApiKey],
api_secret: Optional[ApiSecret],
passphrase: Optional[str],
kraken_account_type: Optional['KrakenAccountType'],
binance_selected_trade_pairs: Optional[list[str]],
) -> tuple[bool, str]:
"""Edits both the exchange object and the database entry
Returns True if an entry was found and edited and false otherwise
"""
exchangeobj = self.get_exchange(name=name, location=location)
if not exchangeobj:
return False, f'Could not find {location!s} exchange {name} for editing'
# First validate exchange credentials
edited = exchangeobj.edit_exchange_credentials(ExchangeAuthCredentials(
api_key=api_key,
api_secret=api_secret,
passphrase=passphrase,
))
if edited is True:
try:
credentials_are_valid, msg = exchangeobj.validate_api_key()
except Exception as e: # pylint: disable=broad-except
msg = str(e)
credentials_are_valid = False
if credentials_are_valid is False:
exchangeobj.reset_to_db_credentials()
return False, f'New credentials are invalid. {msg}'
# Then edit extra properties if needed
if isinstance(exchangeobj, ExchangeWithExtras):
success, msg = exchangeobj.edit_exchange_extras({
KRAKEN_ACCOUNT_TYPE_KEY: kraken_account_type,
BINANCE_MARKETS_KEY: binance_selected_trade_pairs,
})
if success is False:
exchangeobj.reset_to_db_credentials()
return False, f'Failed to edit exchange extras. {msg}'
try:
with self.database.user_write() as cursor:
self.database.edit_exchange(
cursor,
name=name,
location=location,
new_name=new_name,
api_key=api_key,
api_secret=api_secret,
passphrase=passphrase,
kraken_account_type=kraken_account_type,
binance_selected_trade_pairs=binance_selected_trade_pairs,
)
except InputError as e:
exchangeobj.reset_to_db_credentials() # DB is already rolled back at this point
if isinstance(exchangeobj, ExchangeWithExtras):
exchangeobj.reset_to_db_extras()
return False, f"Couldn't update exchange properties in the DB. {e!s}"
# Finally edit the name of the exchange object
if new_name is not None:
exchangeobj.name = new_name
return True, ''
def delete_exchange(self, name: str, location: Location) -> tuple[bool, str]:
"""
Deletes an exchange with the specified name + location from both connected_exchanges
and the DB.
"""
if self.get_exchange(name=name, location=location) is None:
return False, f'{location!s} exchange {name} is not registered'
exchanges_list = self.connected_exchanges.get(location)
if exchanges_list is None:
return False, f'{location!s} exchange {name} is not registered'
if len(exchanges_list) == 1: # if is last exchange of this location
self.connected_exchanges.pop(location)
else:
self.connected_exchanges[location] = [x for x in exchanges_list if x.name != name]
with self.database.user_write() as write_cursor: # Also remove it from the db
self.database.remove_exchange(write_cursor=write_cursor, name=name, location=location) # noqa: E501
self.database.delete_used_query_range_for_exchange(
write_cursor=write_cursor,
location=location,
exchange_name=name,
)
return True, ''
def delete_all_exchanges(self) -> None:
"""Deletes all exchanges from the manager. Not from the DB"""
self.connected_exchanges.clear()
def get_connected_exchanges_info(self) -> list[dict[str, Any]]:
exchange_info = []
for location, exchanges in self.connected_exchanges.items():
for exchangeobj in exchanges:
data = {'location': str(location), 'name': exchangeobj.name}
if location == Location.KRAKEN: # ignore type since we know this is kraken here
data[KRAKEN_ACCOUNT_TYPE_KEY] = str(exchangeobj.account_type) # type: ignore
exchange_info.append(data)
return exchange_info
def _get_exchange_module(self, location: Location) -> ModuleType:
module_name = self._get_exchange_module_name(location)
try:
module = import_module(f'rotkehlchen.exchanges.{module_name}')
except ModuleNotFoundError:
# This should never happen
raise AssertionError(
f'Tried to initialize unknown exchange {location!s}. Should not happen',
) from None
return module
def setup_exchange(
self,
name: str,
location: Location,
api_key: ApiKey,
api_secret: ApiSecret,
database: 'DBHandler',
passphrase: Optional[str] = None,
**kwargs: Any,
) -> tuple[bool, str]:
"""
Setup a new exchange with an api key, an api secret.
For some exchanges there is more attributes to add
"""
if location not in SUPPORTED_EXCHANGES: # also checked via marshmallow
return False, f'Attempted to register unsupported exchange {name}'
if self.get_exchange(name=name, location=location) is not None:
return False, f'{location!s} exchange {name} is already registered'
api_credentials = ExchangeApiCredentials(
name=name,
location=location,
api_key=api_key,
api_secret=api_secret,
passphrase=passphrase,
)
exchange = self.initialize_exchange(
module=self._get_exchange_module(location),
credentials=api_credentials,
database=database,
**kwargs,
)
try:
result, message = exchange.validate_api_key()
except Exception as e: # pylint: disable=broad-except
result = False
message = str(e)
if not result:
log.error(
f'Failed to validate API key for {location!s} exchange {name}'
f' due to {message}',
)
return False, message
self.connected_exchanges[location].append(exchange)
return True, ''
def initialize_exchange(
self,
module: ModuleType,
credentials: ExchangeApiCredentials,
database: 'DBHandler',
**kwargs: Any,
) -> ExchangeInterface:
maybe_exchange = self.get_exchange(name=credentials.name, location=credentials.location)
if maybe_exchange:
|
module_name = module.__name__.split('.')[-1]
exchange_ctor = getattr(module, module_name.capitalize())
if credentials.passphrase is not None:
kwargs['passphrase'] = credentials.passphrase
elif credentials.location == Location.BINANCE:
kwargs['uri'] = BINANCE_BASE_URL
elif credentials.location == Location.BINANCEUS:
kwargs['uri'] = BINANCEUS_BASE_URL
exchange_obj = exchange_ctor(
name=credentials.name,
api_key=credentials.api_key,
secret=credentials.api_secret,
database=database,
msg_aggregator=self.msg_aggregator,
# remove all empty kwargs
**{k: v for k, v in kwargs.items() if v is not None},
)
return exchange_obj
def initialize_exchanges(
self,
exchange_credentials: dict[Location, list[ExchangeApiCredentials]],
database: 'DBHandler',
) -> None:
log.debug('Initializing exchanges')
self.database = database
# initialize exchanges for which we have keys and are not already initialized
for location, credentials_list in exchange_credentials.items():
module = self._get_exchange_module(location)
for credentials in credentials_list:
extras = database.get_exchange_credentials_extras(
name=credentials.name,
location=credentials.location,
)
exchange_obj = self.initialize_exchange(
module=module,
credentials=credentials,
database=database,
**extras,
)
self.connected_exchanges[location].append(exchange_obj)
log.debug('Initialized exchanges')
def get_user_binance_pairs(self, name: str, location: Location) -> list[str]:
is_connected = location in self.connected_exchanges
if is_connected:
return self.database.get_binance_pairs(name, location)
return []
def query_history_events(self) -> None:
"""Queries all history events for exchanges that need it
May raise:
- RemoteError if any exchange's remote query fails
"""
for exchange in self.iterate_exchanges():
exchange.query_history_events()
def get_exchange_mappings(self) -> LocationEventMappingType:
"""Collect event mappings from each exchange"""
mappings: LocationEventMappingType = {}
for location, exchanges in self.connected_exchanges.items():
for exchange in exchanges:
if len(exchange_mapping := exchange.get_event_mappings()) != 0:
mappings[location] = exchange_mapping
break
return mappings
| return maybe_exchange # already initialized | conditional_block |
manager.py | import logging
from collections import defaultdict
from collections.abc import Iterator
from importlib import import_module
from types import ModuleType
from typing import TYPE_CHECKING, Any, Optional
from rotkehlchen.db.constants import BINANCE_MARKETS_KEY, KRAKEN_ACCOUNT_TYPE_KEY
from rotkehlchen.errors.misc import InputError
from rotkehlchen.exchanges.binance import BINANCE_BASE_URL, BINANCEUS_BASE_URL
from rotkehlchen.exchanges.exchange import ExchangeInterface, ExchangeWithExtras
from rotkehlchen.logging import RotkehlchenLogsAdapter
from rotkehlchen.types import (
ApiKey,
ApiSecret,
ExchangeApiCredentials,
ExchangeAuthCredentials,
Location,
LocationEventMappingType,
)
from rotkehlchen.user_messages import MessagesAggregator
from .constants import SUPPORTED_EXCHANGES
if TYPE_CHECKING:
from rotkehlchen.db.dbhandler import DBHandler
from rotkehlchen.exchanges.kraken import KrakenAccountType
logger = logging.getLogger(__name__)
log = RotkehlchenLogsAdapter(logger)
class ExchangeManager:
def __init__(self, msg_aggregator: MessagesAggregator) -> None:
self.connected_exchanges: dict[Location, list[ExchangeInterface]] = defaultdict(list)
self.msg_aggregator = msg_aggregator
@staticmethod
def _get_exchange_module_name(location: Location) -> str:
if location == Location.BINANCEUS:
return str(Location.BINANCE)
return str(location)
def connected_and_syncing_exchanges_num(self) -> int:
|
def get_exchange(self, name: str, location: Location) -> Optional[ExchangeInterface]:
"""Get the exchange object for an exchange with a given name and location
Returns None if it can not be found
"""
exchanges_list = self.connected_exchanges.get(location)
if exchanges_list is None:
return None
for exchange in exchanges_list:
if exchange.name == name:
return exchange
return None
def iterate_exchanges(self) -> Iterator[ExchangeInterface]:
"""Iterate all connected and syncing exchanges"""
with self.database.conn.read_ctx() as cursor:
excluded = self.database.get_settings(cursor).non_syncing_exchanges
for exchanges in self.connected_exchanges.values():
for exchange in exchanges:
# We are not yielding excluded exchanges
if exchange.location_id() not in excluded:
yield exchange
def edit_exchange(
self,
name: str,
location: Location,
new_name: Optional[str],
api_key: Optional[ApiKey],
api_secret: Optional[ApiSecret],
passphrase: Optional[str],
kraken_account_type: Optional['KrakenAccountType'],
binance_selected_trade_pairs: Optional[list[str]],
) -> tuple[bool, str]:
"""Edits both the exchange object and the database entry
Returns True if an entry was found and edited and false otherwise
"""
exchangeobj = self.get_exchange(name=name, location=location)
if not exchangeobj:
return False, f'Could not find {location!s} exchange {name} for editing'
# First validate exchange credentials
edited = exchangeobj.edit_exchange_credentials(ExchangeAuthCredentials(
api_key=api_key,
api_secret=api_secret,
passphrase=passphrase,
))
if edited is True:
try:
credentials_are_valid, msg = exchangeobj.validate_api_key()
except Exception as e: # pylint: disable=broad-except
msg = str(e)
credentials_are_valid = False
if credentials_are_valid is False:
exchangeobj.reset_to_db_credentials()
return False, f'New credentials are invalid. {msg}'
# Then edit extra properties if needed
if isinstance(exchangeobj, ExchangeWithExtras):
success, msg = exchangeobj.edit_exchange_extras({
KRAKEN_ACCOUNT_TYPE_KEY: kraken_account_type,
BINANCE_MARKETS_KEY: binance_selected_trade_pairs,
})
if success is False:
exchangeobj.reset_to_db_credentials()
return False, f'Failed to edit exchange extras. {msg}'
try:
with self.database.user_write() as cursor:
self.database.edit_exchange(
cursor,
name=name,
location=location,
new_name=new_name,
api_key=api_key,
api_secret=api_secret,
passphrase=passphrase,
kraken_account_type=kraken_account_type,
binance_selected_trade_pairs=binance_selected_trade_pairs,
)
except InputError as e:
exchangeobj.reset_to_db_credentials() # DB is already rolled back at this point
if isinstance(exchangeobj, ExchangeWithExtras):
exchangeobj.reset_to_db_extras()
return False, f"Couldn't update exchange properties in the DB. {e!s}"
# Finally edit the name of the exchange object
if new_name is not None:
exchangeobj.name = new_name
return True, ''
def delete_exchange(self, name: str, location: Location) -> tuple[bool, str]:
"""
Deletes an exchange with the specified name + location from both connected_exchanges
and the DB.
"""
if self.get_exchange(name=name, location=location) is None:
return False, f'{location!s} exchange {name} is not registered'
exchanges_list = self.connected_exchanges.get(location)
if exchanges_list is None:
return False, f'{location!s} exchange {name} is not registered'
if len(exchanges_list) == 1: # if is last exchange of this location
self.connected_exchanges.pop(location)
else:
self.connected_exchanges[location] = [x for x in exchanges_list if x.name != name]
with self.database.user_write() as write_cursor: # Also remove it from the db
self.database.remove_exchange(write_cursor=write_cursor, name=name, location=location) # noqa: E501
self.database.delete_used_query_range_for_exchange(
write_cursor=write_cursor,
location=location,
exchange_name=name,
)
return True, ''
def delete_all_exchanges(self) -> None:
"""Deletes all exchanges from the manager. Not from the DB"""
self.connected_exchanges.clear()
def get_connected_exchanges_info(self) -> list[dict[str, Any]]:
exchange_info = []
for location, exchanges in self.connected_exchanges.items():
for exchangeobj in exchanges:
data = {'location': str(location), 'name': exchangeobj.name}
if location == Location.KRAKEN: # ignore type since we know this is kraken here
data[KRAKEN_ACCOUNT_TYPE_KEY] = str(exchangeobj.account_type) # type: ignore
exchange_info.append(data)
return exchange_info
def _get_exchange_module(self, location: Location) -> ModuleType:
module_name = self._get_exchange_module_name(location)
try:
module = import_module(f'rotkehlchen.exchanges.{module_name}')
except ModuleNotFoundError:
# This should never happen
raise AssertionError(
f'Tried to initialize unknown exchange {location!s}. Should not happen',
) from None
return module
def setup_exchange(
self,
name: str,
location: Location,
api_key: ApiKey,
api_secret: ApiSecret,
database: 'DBHandler',
passphrase: Optional[str] = None,
**kwargs: Any,
) -> tuple[bool, str]:
"""
Setup a new exchange with an api key, an api secret.
For some exchanges there is more attributes to add
"""
if location not in SUPPORTED_EXCHANGES: # also checked via marshmallow
return False, f'Attempted to register unsupported exchange {name}'
if self.get_exchange(name=name, location=location) is not None:
return False, f'{location!s} exchange {name} is already registered'
api_credentials = ExchangeApiCredentials(
name=name,
location=location,
api_key=api_key,
api_secret=api_secret,
passphrase=passphrase,
)
exchange = self.initialize_exchange(
module=self._get_exchange_module(location),
credentials=api_credentials,
database=database,
**kwargs,
)
try:
result, message = exchange.validate_api_key()
except Exception as e: # pylint: disable=broad-except
result = False
message = str(e)
if not result:
log.error(
f'Failed to validate API key for {location!s} exchange {name}'
f' due to {message}',
)
return False, message
self.connected_exchanges[location].append(exchange)
return True, ''
def initialize_exchange(
self,
module: ModuleType,
credentials: ExchangeApiCredentials,
database: 'DBHandler',
**kwargs: Any,
) -> ExchangeInterface:
maybe_exchange = self.get_exchange(name=credentials.name, location=credentials.location)
if maybe_exchange:
return maybe_exchange # already initialized
module_name = module.__name__.split('.')[-1]
exchange_ctor = getattr(module, module_name.capitalize())
if credentials.passphrase is not None:
kwargs['passphrase'] = credentials.passphrase
elif credentials.location == Location.BINANCE:
kwargs['uri'] = BINANCE_BASE_URL
elif credentials.location == Location.BINANCEUS:
kwargs['uri'] = BINANCEUS_BASE_URL
exchange_obj = exchange_ctor(
name=credentials.name,
api_key=credentials.api_key,
secret=credentials.api_secret,
database=database,
msg_aggregator=self.msg_aggregator,
# remove all empty kwargs
**{k: v for k, v in kwargs.items() if v is not None},
)
return exchange_obj
def initialize_exchanges(
self,
exchange_credentials: dict[Location, list[ExchangeApiCredentials]],
database: 'DBHandler',
) -> None:
log.debug('Initializing exchanges')
self.database = database
# initialize exchanges for which we have keys and are not already initialized
for location, credentials_list in exchange_credentials.items():
module = self._get_exchange_module(location)
for credentials in credentials_list:
extras = database.get_exchange_credentials_extras(
name=credentials.name,
location=credentials.location,
)
exchange_obj = self.initialize_exchange(
module=module,
credentials=credentials,
database=database,
**extras,
)
self.connected_exchanges[location].append(exchange_obj)
log.debug('Initialized exchanges')
def get_user_binance_pairs(self, name: str, location: Location) -> list[str]:
is_connected = location in self.connected_exchanges
if is_connected:
return self.database.get_binance_pairs(name, location)
return []
def query_history_events(self) -> None:
"""Queries all history events for exchanges that need it
May raise:
- RemoteError if any exchange's remote query fails
"""
for exchange in self.iterate_exchanges():
exchange.query_history_events()
def get_exchange_mappings(self) -> LocationEventMappingType:
"""Collect event mappings from each exchange"""
mappings: LocationEventMappingType = {}
for location, exchanges in self.connected_exchanges.items():
for exchange in exchanges:
if len(exchange_mapping := exchange.get_event_mappings()) != 0:
mappings[location] = exchange_mapping
break
return mappings
| return len(list(self.iterate_exchanges())) | identifier_body |
manager.py | import logging
from collections import defaultdict
from collections.abc import Iterator
from importlib import import_module
from types import ModuleType
from typing import TYPE_CHECKING, Any, Optional
from rotkehlchen.db.constants import BINANCE_MARKETS_KEY, KRAKEN_ACCOUNT_TYPE_KEY
from rotkehlchen.errors.misc import InputError
from rotkehlchen.exchanges.binance import BINANCE_BASE_URL, BINANCEUS_BASE_URL
from rotkehlchen.exchanges.exchange import ExchangeInterface, ExchangeWithExtras
from rotkehlchen.logging import RotkehlchenLogsAdapter
from rotkehlchen.types import (
ApiKey,
ApiSecret,
ExchangeApiCredentials,
ExchangeAuthCredentials,
Location,
LocationEventMappingType,
)
from rotkehlchen.user_messages import MessagesAggregator
from .constants import SUPPORTED_EXCHANGES
if TYPE_CHECKING:
from rotkehlchen.db.dbhandler import DBHandler
from rotkehlchen.exchanges.kraken import KrakenAccountType
logger = logging.getLogger(__name__)
log = RotkehlchenLogsAdapter(logger)
class ExchangeManager:
def __init__(self, msg_aggregator: MessagesAggregator) -> None:
self.connected_exchanges: dict[Location, list[ExchangeInterface]] = defaultdict(list)
self.msg_aggregator = msg_aggregator
@staticmethod
def _get_exchange_module_name(location: Location) -> str:
if location == Location.BINANCEUS:
return str(Location.BINANCE)
return str(location)
def | (self) -> int:
return len(list(self.iterate_exchanges()))
def get_exchange(self, name: str, location: Location) -> Optional[ExchangeInterface]:
"""Get the exchange object for an exchange with a given name and location
Returns None if it can not be found
"""
exchanges_list = self.connected_exchanges.get(location)
if exchanges_list is None:
return None
for exchange in exchanges_list:
if exchange.name == name:
return exchange
return None
def iterate_exchanges(self) -> Iterator[ExchangeInterface]:
"""Iterate all connected and syncing exchanges"""
with self.database.conn.read_ctx() as cursor:
excluded = self.database.get_settings(cursor).non_syncing_exchanges
for exchanges in self.connected_exchanges.values():
for exchange in exchanges:
# We are not yielding excluded exchanges
if exchange.location_id() not in excluded:
yield exchange
def edit_exchange(
self,
name: str,
location: Location,
new_name: Optional[str],
api_key: Optional[ApiKey],
api_secret: Optional[ApiSecret],
passphrase: Optional[str],
kraken_account_type: Optional['KrakenAccountType'],
binance_selected_trade_pairs: Optional[list[str]],
) -> tuple[bool, str]:
"""Edits both the exchange object and the database entry
Returns True if an entry was found and edited and false otherwise
"""
exchangeobj = self.get_exchange(name=name, location=location)
if not exchangeobj:
return False, f'Could not find {location!s} exchange {name} for editing'
# First validate exchange credentials
edited = exchangeobj.edit_exchange_credentials(ExchangeAuthCredentials(
api_key=api_key,
api_secret=api_secret,
passphrase=passphrase,
))
if edited is True:
try:
credentials_are_valid, msg = exchangeobj.validate_api_key()
except Exception as e: # pylint: disable=broad-except
msg = str(e)
credentials_are_valid = False
if credentials_are_valid is False:
exchangeobj.reset_to_db_credentials()
return False, f'New credentials are invalid. {msg}'
# Then edit extra properties if needed
if isinstance(exchangeobj, ExchangeWithExtras):
success, msg = exchangeobj.edit_exchange_extras({
KRAKEN_ACCOUNT_TYPE_KEY: kraken_account_type,
BINANCE_MARKETS_KEY: binance_selected_trade_pairs,
})
if success is False:
exchangeobj.reset_to_db_credentials()
return False, f'Failed to edit exchange extras. {msg}'
try:
with self.database.user_write() as cursor:
self.database.edit_exchange(
cursor,
name=name,
location=location,
new_name=new_name,
api_key=api_key,
api_secret=api_secret,
passphrase=passphrase,
kraken_account_type=kraken_account_type,
binance_selected_trade_pairs=binance_selected_trade_pairs,
)
except InputError as e:
exchangeobj.reset_to_db_credentials() # DB is already rolled back at this point
if isinstance(exchangeobj, ExchangeWithExtras):
exchangeobj.reset_to_db_extras()
return False, f"Couldn't update exchange properties in the DB. {e!s}"
# Finally edit the name of the exchange object
if new_name is not None:
exchangeobj.name = new_name
return True, ''
def delete_exchange(self, name: str, location: Location) -> tuple[bool, str]:
"""
Deletes an exchange with the specified name + location from both connected_exchanges
and the DB.
"""
if self.get_exchange(name=name, location=location) is None:
return False, f'{location!s} exchange {name} is not registered'
exchanges_list = self.connected_exchanges.get(location)
if exchanges_list is None:
return False, f'{location!s} exchange {name} is not registered'
if len(exchanges_list) == 1: # if is last exchange of this location
self.connected_exchanges.pop(location)
else:
self.connected_exchanges[location] = [x for x in exchanges_list if x.name != name]
with self.database.user_write() as write_cursor: # Also remove it from the db
self.database.remove_exchange(write_cursor=write_cursor, name=name, location=location) # noqa: E501
self.database.delete_used_query_range_for_exchange(
write_cursor=write_cursor,
location=location,
exchange_name=name,
)
return True, ''
def delete_all_exchanges(self) -> None:
"""Deletes all exchanges from the manager. Not from the DB"""
self.connected_exchanges.clear()
def get_connected_exchanges_info(self) -> list[dict[str, Any]]:
exchange_info = []
for location, exchanges in self.connected_exchanges.items():
for exchangeobj in exchanges:
data = {'location': str(location), 'name': exchangeobj.name}
if location == Location.KRAKEN: # ignore type since we know this is kraken here
data[KRAKEN_ACCOUNT_TYPE_KEY] = str(exchangeobj.account_type) # type: ignore
exchange_info.append(data)
return exchange_info
def _get_exchange_module(self, location: Location) -> ModuleType:
module_name = self._get_exchange_module_name(location)
try:
module = import_module(f'rotkehlchen.exchanges.{module_name}')
except ModuleNotFoundError:
# This should never happen
raise AssertionError(
f'Tried to initialize unknown exchange {location!s}. Should not happen',
) from None
return module
def setup_exchange(
self,
name: str,
location: Location,
api_key: ApiKey,
api_secret: ApiSecret,
database: 'DBHandler',
passphrase: Optional[str] = None,
**kwargs: Any,
) -> tuple[bool, str]:
"""
Setup a new exchange with an api key, an api secret.
For some exchanges there is more attributes to add
"""
if location not in SUPPORTED_EXCHANGES: # also checked via marshmallow
return False, f'Attempted to register unsupported exchange {name}'
if self.get_exchange(name=name, location=location) is not None:
return False, f'{location!s} exchange {name} is already registered'
api_credentials = ExchangeApiCredentials(
name=name,
location=location,
api_key=api_key,
api_secret=api_secret,
passphrase=passphrase,
)
exchange = self.initialize_exchange(
module=self._get_exchange_module(location),
credentials=api_credentials,
database=database,
**kwargs,
)
try:
result, message = exchange.validate_api_key()
except Exception as e: # pylint: disable=broad-except
result = False
message = str(e)
if not result:
log.error(
f'Failed to validate API key for {location!s} exchange {name}'
f' due to {message}',
)
return False, message
self.connected_exchanges[location].append(exchange)
return True, ''
def initialize_exchange(
self,
module: ModuleType,
credentials: ExchangeApiCredentials,
database: 'DBHandler',
**kwargs: Any,
) -> ExchangeInterface:
maybe_exchange = self.get_exchange(name=credentials.name, location=credentials.location)
if maybe_exchange:
return maybe_exchange # already initialized
module_name = module.__name__.split('.')[-1]
exchange_ctor = getattr(module, module_name.capitalize())
if credentials.passphrase is not None:
kwargs['passphrase'] = credentials.passphrase
elif credentials.location == Location.BINANCE:
kwargs['uri'] = BINANCE_BASE_URL
elif credentials.location == Location.BINANCEUS:
kwargs['uri'] = BINANCEUS_BASE_URL
exchange_obj = exchange_ctor(
name=credentials.name,
api_key=credentials.api_key,
secret=credentials.api_secret,
database=database,
msg_aggregator=self.msg_aggregator,
# remove all empty kwargs
**{k: v for k, v in kwargs.items() if v is not None},
)
return exchange_obj
def initialize_exchanges(
self,
exchange_credentials: dict[Location, list[ExchangeApiCredentials]],
database: 'DBHandler',
) -> None:
log.debug('Initializing exchanges')
self.database = database
# initialize exchanges for which we have keys and are not already initialized
for location, credentials_list in exchange_credentials.items():
module = self._get_exchange_module(location)
for credentials in credentials_list:
extras = database.get_exchange_credentials_extras(
name=credentials.name,
location=credentials.location,
)
exchange_obj = self.initialize_exchange(
module=module,
credentials=credentials,
database=database,
**extras,
)
self.connected_exchanges[location].append(exchange_obj)
log.debug('Initialized exchanges')
def get_user_binance_pairs(self, name: str, location: Location) -> list[str]:
is_connected = location in self.connected_exchanges
if is_connected:
return self.database.get_binance_pairs(name, location)
return []
def query_history_events(self) -> None:
"""Queries all history events for exchanges that need it
May raise:
- RemoteError if any exchange's remote query fails
"""
for exchange in self.iterate_exchanges():
exchange.query_history_events()
def get_exchange_mappings(self) -> LocationEventMappingType:
"""Collect event mappings from each exchange"""
mappings: LocationEventMappingType = {}
for location, exchanges in self.connected_exchanges.items():
for exchange in exchanges:
if len(exchange_mapping := exchange.get_event_mappings()) != 0:
mappings[location] = exchange_mapping
break
return mappings
| connected_and_syncing_exchanges_num | identifier_name |
manager.py | import logging
from collections import defaultdict
from collections.abc import Iterator
from importlib import import_module
from types import ModuleType
from typing import TYPE_CHECKING, Any, Optional
from rotkehlchen.db.constants import BINANCE_MARKETS_KEY, KRAKEN_ACCOUNT_TYPE_KEY
from rotkehlchen.errors.misc import InputError
from rotkehlchen.exchanges.binance import BINANCE_BASE_URL, BINANCEUS_BASE_URL
from rotkehlchen.exchanges.exchange import ExchangeInterface, ExchangeWithExtras
from rotkehlchen.logging import RotkehlchenLogsAdapter
from rotkehlchen.types import (
ApiKey,
ApiSecret,
ExchangeApiCredentials,
ExchangeAuthCredentials,
Location,
LocationEventMappingType,
)
from rotkehlchen.user_messages import MessagesAggregator
from .constants import SUPPORTED_EXCHANGES
if TYPE_CHECKING:
from rotkehlchen.db.dbhandler import DBHandler
from rotkehlchen.exchanges.kraken import KrakenAccountType
logger = logging.getLogger(__name__)
log = RotkehlchenLogsAdapter(logger)
class ExchangeManager:
def __init__(self, msg_aggregator: MessagesAggregator) -> None:
self.connected_exchanges: dict[Location, list[ExchangeInterface]] = defaultdict(list)
self.msg_aggregator = msg_aggregator
@staticmethod
def _get_exchange_module_name(location: Location) -> str:
if location == Location.BINANCEUS:
return str(Location.BINANCE)
return str(location)
def connected_and_syncing_exchanges_num(self) -> int:
return len(list(self.iterate_exchanges()))
def get_exchange(self, name: str, location: Location) -> Optional[ExchangeInterface]:
"""Get the exchange object for an exchange with a given name and location
Returns None if it can not be found
"""
exchanges_list = self.connected_exchanges.get(location)
if exchanges_list is None:
return None
for exchange in exchanges_list:
if exchange.name == name:
return exchange
return None
def iterate_exchanges(self) -> Iterator[ExchangeInterface]:
"""Iterate all connected and syncing exchanges"""
with self.database.conn.read_ctx() as cursor:
excluded = self.database.get_settings(cursor).non_syncing_exchanges
for exchanges in self.connected_exchanges.values():
for exchange in exchanges:
# We are not yielding excluded exchanges
if exchange.location_id() not in excluded:
yield exchange
def edit_exchange(
self,
name: str,
location: Location,
new_name: Optional[str],
api_key: Optional[ApiKey],
api_secret: Optional[ApiSecret],
passphrase: Optional[str],
kraken_account_type: Optional['KrakenAccountType'],
binance_selected_trade_pairs: Optional[list[str]],
) -> tuple[bool, str]:
"""Edits both the exchange object and the database entry
Returns True if an entry was found and edited and false otherwise
"""
exchangeobj = self.get_exchange(name=name, location=location)
if not exchangeobj:
return False, f'Could not find {location!s} exchange {name} for editing'
# First validate exchange credentials
edited = exchangeobj.edit_exchange_credentials(ExchangeAuthCredentials(
api_key=api_key,
api_secret=api_secret,
passphrase=passphrase,
))
if edited is True:
try:
credentials_are_valid, msg = exchangeobj.validate_api_key()
except Exception as e: # pylint: disable=broad-except
msg = str(e)
credentials_are_valid = False
if credentials_are_valid is False:
exchangeobj.reset_to_db_credentials()
return False, f'New credentials are invalid. {msg}'
# Then edit extra properties if needed
if isinstance(exchangeobj, ExchangeWithExtras):
success, msg = exchangeobj.edit_exchange_extras({
KRAKEN_ACCOUNT_TYPE_KEY: kraken_account_type,
BINANCE_MARKETS_KEY: binance_selected_trade_pairs,
})
if success is False:
exchangeobj.reset_to_db_credentials()
return False, f'Failed to edit exchange extras. {msg}'
try:
with self.database.user_write() as cursor:
self.database.edit_exchange(
cursor,
name=name,
location=location,
new_name=new_name,
api_key=api_key,
api_secret=api_secret,
passphrase=passphrase,
kraken_account_type=kraken_account_type,
binance_selected_trade_pairs=binance_selected_trade_pairs,
)
except InputError as e:
exchangeobj.reset_to_db_credentials() # DB is already rolled back at this point
if isinstance(exchangeobj, ExchangeWithExtras):
exchangeobj.reset_to_db_extras()
return False, f"Couldn't update exchange properties in the DB. {e!s}"
# Finally edit the name of the exchange object
if new_name is not None:
exchangeobj.name = new_name
return True, ''
def delete_exchange(self, name: str, location: Location) -> tuple[bool, str]:
"""
Deletes an exchange with the specified name + location from both connected_exchanges
and the DB.
"""
if self.get_exchange(name=name, location=location) is None:
return False, f'{location!s} exchange {name} is not registered'
exchanges_list = self.connected_exchanges.get(location)
if exchanges_list is None:
return False, f'{location!s} exchange {name} is not registered'
if len(exchanges_list) == 1: # if is last exchange of this location
self.connected_exchanges.pop(location)
else:
self.connected_exchanges[location] = [x for x in exchanges_list if x.name != name]
with self.database.user_write() as write_cursor: # Also remove it from the db
self.database.remove_exchange(write_cursor=write_cursor, name=name, location=location) # noqa: E501
self.database.delete_used_query_range_for_exchange(
write_cursor=write_cursor,
location=location,
exchange_name=name,
)
return True, ''
def delete_all_exchanges(self) -> None:
"""Deletes all exchanges from the manager. Not from the DB"""
self.connected_exchanges.clear()
def get_connected_exchanges_info(self) -> list[dict[str, Any]]:
exchange_info = []
for location, exchanges in self.connected_exchanges.items():
for exchangeobj in exchanges:
data = {'location': str(location), 'name': exchangeobj.name}
if location == Location.KRAKEN: # ignore type since we know this is kraken here
data[KRAKEN_ACCOUNT_TYPE_KEY] = str(exchangeobj.account_type) # type: ignore
exchange_info.append(data)
return exchange_info
def _get_exchange_module(self, location: Location) -> ModuleType:
module_name = self._get_exchange_module_name(location)
try:
module = import_module(f'rotkehlchen.exchanges.{module_name}')
except ModuleNotFoundError:
# This should never happen
raise AssertionError(
f'Tried to initialize unknown exchange {location!s}. Should not happen',
) from None
return module
def setup_exchange(
self,
name: str,
location: Location,
api_key: ApiKey,
api_secret: ApiSecret,
database: 'DBHandler',
passphrase: Optional[str] = None,
**kwargs: Any,
) -> tuple[bool, str]:
"""
Setup a new exchange with an api key, an api secret.
For some exchanges there is more attributes to add
"""
if location not in SUPPORTED_EXCHANGES: # also checked via marshmallow
return False, f'Attempted to register unsupported exchange {name}'
if self.get_exchange(name=name, location=location) is not None:
return False, f'{location!s} exchange {name} is already registered'
api_credentials = ExchangeApiCredentials(
name=name,
location=location,
api_key=api_key,
api_secret=api_secret,
passphrase=passphrase,
)
exchange = self.initialize_exchange(
module=self._get_exchange_module(location),
credentials=api_credentials,
database=database,
**kwargs,
)
try:
result, message = exchange.validate_api_key()
except Exception as e: # pylint: disable=broad-except
result = False
message = str(e)
if not result:
log.error(
f'Failed to validate API key for {location!s} exchange {name}'
f' due to {message}',
)
return False, message
self.connected_exchanges[location].append(exchange)
return True, ''
| module: ModuleType,
credentials: ExchangeApiCredentials,
database: 'DBHandler',
**kwargs: Any,
) -> ExchangeInterface:
maybe_exchange = self.get_exchange(name=credentials.name, location=credentials.location)
if maybe_exchange:
return maybe_exchange # already initialized
module_name = module.__name__.split('.')[-1]
exchange_ctor = getattr(module, module_name.capitalize())
if credentials.passphrase is not None:
kwargs['passphrase'] = credentials.passphrase
elif credentials.location == Location.BINANCE:
kwargs['uri'] = BINANCE_BASE_URL
elif credentials.location == Location.BINANCEUS:
kwargs['uri'] = BINANCEUS_BASE_URL
exchange_obj = exchange_ctor(
name=credentials.name,
api_key=credentials.api_key,
secret=credentials.api_secret,
database=database,
msg_aggregator=self.msg_aggregator,
# remove all empty kwargs
**{k: v for k, v in kwargs.items() if v is not None},
)
return exchange_obj
def initialize_exchanges(
self,
exchange_credentials: dict[Location, list[ExchangeApiCredentials]],
database: 'DBHandler',
) -> None:
log.debug('Initializing exchanges')
self.database = database
# initialize exchanges for which we have keys and are not already initialized
for location, credentials_list in exchange_credentials.items():
module = self._get_exchange_module(location)
for credentials in credentials_list:
extras = database.get_exchange_credentials_extras(
name=credentials.name,
location=credentials.location,
)
exchange_obj = self.initialize_exchange(
module=module,
credentials=credentials,
database=database,
**extras,
)
self.connected_exchanges[location].append(exchange_obj)
log.debug('Initialized exchanges')
def get_user_binance_pairs(self, name: str, location: Location) -> list[str]:
is_connected = location in self.connected_exchanges
if is_connected:
return self.database.get_binance_pairs(name, location)
return []
def query_history_events(self) -> None:
"""Queries all history events for exchanges that need it
May raise:
- RemoteError if any exchange's remote query fails
"""
for exchange in self.iterate_exchanges():
exchange.query_history_events()
def get_exchange_mappings(self) -> LocationEventMappingType:
"""Collect event mappings from each exchange"""
mappings: LocationEventMappingType = {}
for location, exchanges in self.connected_exchanges.items():
for exchange in exchanges:
if len(exchange_mapping := exchange.get_event_mappings()) != 0:
mappings[location] = exchange_mapping
break
return mappings | def initialize_exchange(
self, | random_line_split |
interrupt.rs | use crate::{cpu, mm, segment, PAddr, VAddr};
use core::{arch::asm, marker::PhantomData, time::Duration};
use hal_core::interrupt::Control;
use hal_core::interrupt::{ctx, Handlers};
use mycelium_util::{
bits, fmt,
sync::{spin, InitOnce},
};
pub mod apic;
pub mod idt;
pub mod pic;
use self::apic::{IoApic, LocalApic};
pub use idt::Idt;
pub use pic::CascadedPic;
#[derive(Debug)]
pub struct Controller {
model: InterruptModel,
}
#[derive(Debug)]
#[repr(C)]
pub struct Context<'a, T = ()> {
registers: &'a mut Registers,
code: T,
}
pub type ErrorCode = u64;
pub struct CodeFault<'a> {
kind: &'static str,
error_code: Option<&'a dyn fmt::Display>,
}
/// An interrupt service routine.
pub type Isr<T> = extern "x86-interrupt" fn(&mut Context<T>);
#[derive(Debug)]
#[repr(C)]
pub struct Interrupt<T = ()> {
vector: u8,
_t: PhantomData<T>,
}
/// The interrupt controller's active interrupt model.
#[derive(Debug)]
enum InterruptModel {
/// Interrupts are handled by the [8259 Programmable Interrupt Controller
/// (PIC)](pic).
Pic(spin::Mutex<pic::CascadedPic>),
/// Interrupts are handled by the [local] and [I/O] [Advanced Programmable
/// Interrupt Controller (APIC)s][apics].
///
/// [local]: apic::LocalApic
/// [I/O]: apic::IoApic
/// [apics]: apic
Apic {
local: apic::LocalApic,
// TODO(eliza): allow further configuration of the I/O APIC (e.g.
// masking/unmasking stuff...)
#[allow(dead_code)]
io: spin::Mutex<apic::IoApic>,
},
}
bits::bitfield! {
pub struct PageFaultCode<u32> {
/// When set, the page fault was caused by a page-protection violation.
/// When not set, it was caused by a non-present page.
pub const PRESENT: bool;
/// When set, the page fault was caused by a write access. When not set,
/// it was caused by a read access.
pub const WRITE: bool;
/// When set, the page fault was caused while CPL = 3. This does not
/// necessarily mean that the page fault was a privilege violation.
pub const USER: bool;
/// When set, one or more page directory entries contain reserved bits
/// which are set to 1. This only applies when the PSE or PAE flags in
/// CR4 are set to 1.
pub const RESERVED_WRITE: bool;
/// When set, the page fault was caused by an instruction fetch. This
/// only applies when the No-Execute bit is supported and enabled.
pub const INSTRUCTION_FETCH: bool;
/// When set, the page fault was caused by a protection-key violation.
/// The PKRU register (for user-mode accesses) or PKRS MSR (for
/// supervisor-mode accesses) specifies the protection key rights.
pub const PROTECTION_KEY: bool;
/// When set, the page fault was caused by a shadow stack access.
pub const SHADOW_STACK: bool;
const _RESERVED0 = 8;
/// When set, the fault was due to an SGX violation. The fault is
/// unrelated to ordinary paging.
pub const SGX: bool;
}
}
bits::bitfield! {
/// Error code set by the "Invalid TSS", "Segment Not Present", "Stack-Segment
/// Fault", and "General Protection Fault" faults.
///
/// This includes a segment selector index, and includes 2 bits describing
/// which table the segment selector references.
pub struct SelectorErrorCode<u16> {
const EXTERNAL: bool;
const TABLE: cpu::DescriptorTable;
const INDEX = 13;
}
}
#[repr(C)]
pub struct Registers {
pub instruction_ptr: VAddr, // TODO(eliza): add VAddr
pub code_segment: segment::Selector,
_pad: [u16; 3],
pub cpu_flags: u64, // TODO(eliza): rflags type?
pub stack_ptr: VAddr, // TODO(eliza): add VAddr
pub stack_segment: segment::Selector,
_pad2: [u16; 3],
}
static IDT: spin::Mutex<idt::Idt> = spin::Mutex::new(idt::Idt::new());
static INTERRUPT_CONTROLLER: InitOnce<Controller> = InitOnce::uninitialized();
impl Controller {
// const DEFAULT_IOAPIC_BASE_PADDR: u64 = 0xFEC00000;
pub fn idt() -> spin::MutexGuard<'static, idt::Idt> {
IDT.lock()
}
#[tracing::instrument(level = "info", name = "interrupt::Controller::init")]
pub fn init<H: Handlers<Registers>>() {
tracing::info!("intializing IDT...");
let mut idt = IDT.lock();
idt.register_handlers::<H>().unwrap();
unsafe {
idt.load_raw();
}
}
pub fn enable_hardware_interrupts(
acpi: Option<&acpi::InterruptModel>,
frame_alloc: &impl hal_core::mem::page::Alloc<mm::size::Size4Kb>,
) -> &'static Self {
let mut pics = pic::CascadedPic::new();
// regardless of whether APIC or PIC interrupt handling will be used,
// the PIC interrupt vectors must be remapped so that they do not
// conflict with CPU exceptions.
unsafe {
tracing::debug!(
big = Idt::PIC_BIG_START,
little = Idt::PIC_LITTLE_START,
"remapping PIC interrupt vectors"
);
pics.set_irq_address(Idt::PIC_BIG_START as u8, Idt::PIC_LITTLE_START as u8);
}
let model = match acpi {
Some(acpi::InterruptModel::Apic(apic_info)) => {
tracing::info!("detected APIC interrupt model");
let mut pagectrl = mm::PageCtrl::current();
// disable the 8259 PICs so that we can use APIC interrupts instead
unsafe {
pics.disable();
}
tracing::info!("disabled 8259 PICs");
// configure the I/O APIC
let mut io = {
// TODO(eliza): consider actually using other I/O APICs? do
// we need them for anything??
tracing::trace!(?apic_info.io_apics, "found {} IO APICs", apic_info.io_apics.len());
let io_apic = &apic_info.io_apics[0];
let addr = PAddr::from_u64(io_apic.address as u64);
tracing::debug!(ioapic.paddr = ?addr, "IOAPIC");
IoApic::new(addr, &mut pagectrl, frame_alloc)
};
// map the standard ISA hardware interrupts to I/O APIC
// redirection entries.
io.map_isa_irqs(Idt::IOAPIC_START as u8);
// unmask the PIT timer vector --- we'll need this for calibrating
// the local APIC timer...
io.set_masked(IoApic::PIT_TIMER_IRQ, false);
// unmask the PS/2 keyboard interrupt as well.
io.set_masked(IoApic::PS2_KEYBOARD_IRQ, false);
// enable the local APIC
let local = LocalApic::new(&mut pagectrl, frame_alloc);
local.enable(Idt::LOCAL_APIC_SPURIOUS as u8);
InterruptModel::Apic {
local,
io: spin::Mutex::new(io),
}
}
model => {
if model.is_none() {
tracing::warn!("platform does not support ACPI; falling back to 8259 PIC");
} else {
tracing::warn!(
"ACPI does not indicate APIC interrupt model; falling back to 8259 PIC"
)
}
tracing::info!("configuring 8259 PIC interrupts...");
unsafe {
// functionally a no-op, since interrupts from PC/AT PIC are enabled at boot, just being
// clear for you, the reader, that at this point they are definitely intentionally enabled.
pics.enable();
}
InterruptModel::Pic(spin::Mutex::new(pics))
}
};
tracing::trace!(interrupt_model = ?model);
let controller = INTERRUPT_CONTROLLER.init(Self { model });
// `sti` may not be called until the interrupt controller static is
// fully initialized, as an interrupt that occurs before it is
// initialized may attempt to access the static to finish the interrupt!
unsafe {
crate::cpu::intrinsics::sti();
}
controller
}
/// Starts a periodic timer which fires the `timer_tick` interrupt of the
/// provided [`Handlers`] every time `interval` elapses.
pub fn start_periodic_timer(
&self,
interval: Duration,
) -> Result<(), crate::time::InvalidDuration> {
match self.model {
InterruptModel::Pic(_) => crate::time::PIT.lock().start_periodic_timer(interval),
InterruptModel::Apic { ref local, .. } => {
local.start_periodic_timer(interval, Idt::LOCAL_APIC_TIMER as u8)
}
}
}
}
impl<'a, T> hal_core::interrupt::Context for Context<'a, T> {
type Registers = Registers;
fn registers(&self) -> &Registers {
self.registers
}
/// # Safety
///
/// Mutating the value of saved interrupt registers can cause
/// undefined behavior.
unsafe fn registers_mut(&mut self) -> &mut Registers {
self.registers
}
}
impl<'a> ctx::PageFault for Context<'a, PageFaultCode> {
fn fault_vaddr(&self) -> crate::VAddr {
crate::control_regs::Cr2::read()
}
fn debug_error_code(&self) -> &dyn fmt::Debug {
&self.code
}
fn display_error_code(&self) -> &dyn fmt::Display {
&self.code
}
}
impl<'a> ctx::CodeFault for Context<'a, CodeFault<'a>> {
fn is_user_mode(&self) -> bool {
false // TODO(eliza)
}
fn instruction_ptr(&self) -> crate::VAddr {
self.registers.instruction_ptr
}
fn fault_kind(&self) -> &'static str {
self.code.kind
}
fn details(&self) -> Option<&dyn fmt::Display> {
self.code.error_code
}
}
impl<'a> Context<'a, ErrorCode> {
pub fn error_code(&self) -> ErrorCode {
self.code
}
}
impl<'a> Context<'a, PageFaultCode> {
pub fn page_fault_code(&self) -> PageFaultCode {
self.code
}
}
impl hal_core::interrupt::Control for Idt {
// type Vector = u8;
type Registers = Registers;
#[inline]
unsafe fn disable(&mut self) {
crate::cpu::intrinsics::cli();
}
#[inline]
unsafe fn enable(&mut self) {
crate::cpu::intrinsics::sti();
tracing::trace!("interrupts enabled");
}
fn is_enabled(&self) -> bool {
unimplemented!("eliza do this one!!!")
}
fn register_handlers<H>(&mut self) -> Result<(), hal_core::interrupt::RegistrationError>
where
H: Handlers<Registers>,
{
macro_rules! gen_code_faults {
($self:ident, $h:ty, $($vector:path => fn $name:ident($($rest:tt)+),)+) => {
$(
gen_code_faults! {@ $name($($rest)+); }
$self.set_isr($vector, $name::<$h> as *const ());
)+
};
(@ $name:ident($kind:literal);) => {
extern "x86-interrupt" fn $name<H: Handlers<Registers>>(mut registers: Registers) {
let code = CodeFault {
error_code: None,
kind: $kind,
};
H::code_fault(Context { registers: &mut registers, code });
}
};
(@ $name:ident($kind:literal, code);) => {
extern "x86-interrupt" fn $name<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
let code = CodeFault {
error_code: Some(&code),
kind: $kind,
};
H::code_fault(Context { registers: &mut registers, code });
}
};
}
let span = tracing::debug_span!("Idt::register_handlers");
let _enter = span.enter();
extern "x86-interrupt" fn page_fault_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: PageFaultCode,
) {
H::page_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn double_fault_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
H::double_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn pit_timer_isr<H: Handlers<Registers>>(_regs: Registers) {
use core::sync::atomic::Ordering;
// if we weren't trying to do a PIT sleep, handle the timer tick
// instead.
let was_sleeping = crate::time::pit::SLEEPING
.compare_exchange(true, false, Ordering::AcqRel, Ordering::Acquire)
.is_ok();
if !was_sleeping {
H::timer_tick();
} else {
tracing::trace!("PIT sleep completed");
}
unsafe {
match INTERRUPT_CONTROLLER.get_unchecked().model {
InterruptModel::Pic(ref pics) => {
pics.lock().end_interrupt(Idt::PIC_PIT_TIMER as u8)
}
InterruptModel::Apic { ref local, .. } => local.end_interrupt(),
}
}
}
extern "x86-interrupt" fn apic_timer_isr<H: Handlers<Registers>>(_regs: Registers) {
H::timer_tick();
unsafe {
match INTERRUPT_CONTROLLER.get_unchecked().model {
InterruptModel::Pic(_) => unreachable!(),
InterruptModel::Apic { ref local, .. } => local.end_interrupt(),
}
}
}
extern "x86-interrupt" fn keyboard_isr<H: Handlers<Registers>>(_regs: Registers) {
// 0x60 is a magic PC/AT number.
static PORT: cpu::Port = cpu::Port::at(0x60);
// load-bearing read - if we don't read from the keyboard controller it won't
// send another interrupt on later keystrokes.
let scancode = unsafe { PORT.readb() };
H::ps2_keyboard(scancode);
unsafe {
match INTERRUPT_CONTROLLER.get_unchecked().model {
InterruptModel::Pic(ref pics) => {
pics.lock().end_interrupt(Idt::PIC_PS2_KEYBOARD as u8)
}
InterruptModel::Apic { ref local, .. } => local.end_interrupt(),
}
}
}
extern "x86-interrupt" fn test_isr<H: Handlers<Registers>>(mut registers: Registers) {
H::test_interrupt(Context {
registers: &mut registers,
code: (),
});
}
extern "x86-interrupt" fn invalid_tss_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
unsafe {
// Safety: who cares!
crate::vga::writer().force_unlock();
if let Some(com1) = crate::serial::com1() {
com1.force_unlock();
}
}
let selector = SelectorErrorCode(code as u16);
tracing::error!(?selector, "invalid task-state segment!");
let msg = selector.named("task-state segment (TSS)");
let code = CodeFault {
error_code: Some(&msg),
kind: "Invalid TSS (0xA)",
};
H::code_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn segment_not_present_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
unsafe {
// Safety: who cares!
crate::vga::writer().force_unlock();
if let Some(com1) = crate::serial::com1() {
com1.force_unlock();
}
}
let selector = SelectorErrorCode(code as u16);
tracing::error!(?selector, "a segment was not present!");
let msg = selector.named("stack segment");
let code = CodeFault {
error_code: Some(&msg),
kind: "Segment Not Present (0xB)",
};
H::code_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn stack_segment_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
unsafe {
// Safety: who cares!
crate::vga::writer().force_unlock();
if let Some(com1) = crate::serial::com1() {
com1.force_unlock();
}
}
let selector = SelectorErrorCode(code as u16);
tracing::error!(?selector, "a stack-segment fault is happening");
let msg = selector.named("stack segment");
let code = CodeFault {
error_code: Some(&msg),
kind: "Stack-Segment Fault (0xC)",
};
H::code_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn gpf_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
unsafe {
// Safety: who cares!
crate::vga::writer().force_unlock();
if let Some(com1) = crate::serial::com1() {
com1.force_unlock();
}
}
let segment = if code > 0 {
Some(SelectorErrorCode(code as u16))
} else {
None
};
tracing::error!(?segment, "lmao, a general protection fault is happening");
let error_code = segment.map(|seg| seg.named("selector"));
let code = CodeFault {
error_code: error_code.as_ref().map(|code| code as &dyn fmt::Display),
kind: "General Protection Fault (0xD)",
};
H::code_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn spurious_isr() {
tracing::trace!("spurious");
}
// === exceptions ===
// these exceptions are mapped to the HAL `Handlers` trait's "code
// fault" handler, and indicate that the code that was executing did a
// Bad Thing
gen_code_faults! {
self, H,
Self::DIVIDE_BY_ZERO => fn div_0_isr("Divide-By-Zero (0x0)"),
Self::OVERFLOW => fn overflow_isr("Overflow (0x4)"),
Self::BOUND_RANGE_EXCEEDED => fn br_isr("Bound Range Exceeded (0x5)"),
Self::INVALID_OPCODE => fn ud_isr("Invalid Opcode (0x6)"),
Self::DEVICE_NOT_AVAILABLE => fn no_fpu_isr("Device (FPU) Not Available (0x7)"),
Self::ALIGNMENT_CHECK => fn alignment_check_isr("Alignment Check (0x11)", code),
Self::SIMD_FLOATING_POINT => fn simd_fp_exn_isr("SIMD Floating-Point Exception (0x13)"),
Self::X87_FPU_EXCEPTION => fn x87_exn_isr("x87 Floating-Point Exception (0x10)"),
}
// other exceptions, not mapped to the "code fault" handler
self.set_isr(Self::PAGE_FAULT, page_fault_isr::<H> as *const ());
self.set_isr(Self::INVALID_TSS, invalid_tss_isr::<H> as *const ());
self.set_isr(
Self::SEGMENT_NOT_PRESENT,
segment_not_present_isr::<H> as *const (),
);
self.set_isr(
Self::STACK_SEGMENT_FAULT,
stack_segment_isr::<H> as *const (),
);
self.set_isr(Self::GENERAL_PROTECTION_FAULT, gpf_isr::<H> as *const ());
self.set_isr(Self::DOUBLE_FAULT, double_fault_isr::<H> as *const ());
// === hardware interrupts ===
// ISA standard hardware interrupts mapped on both the PICs and IO APIC
// interrupt models.
self.set_isr(Self::PIC_PIT_TIMER, pit_timer_isr::<H> as *const ());
self.set_isr(Self::IOAPIC_PIT_TIMER, pit_timer_isr::<H> as *const ());
self.set_isr(Self::PIC_PS2_KEYBOARD, keyboard_isr::<H> as *const ());
self.set_isr(Self::IOAPIC_PS2_KEYBOARD, keyboard_isr::<H> as *const ());
// local APIC specific hardware itnerrupts
self.set_isr(Self::LOCAL_APIC_SPURIOUS, spurious_isr as *const ());
self.set_isr(Self::LOCAL_APIC_TIMER, apic_timer_isr::<H> as *const ());
// vector 69 (nice) is reserved by the HAL for testing the IDT.
self.set_isr(69, test_isr::<H> as *const ());
Ok(())
}
}
impl fmt::Debug for Registers {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let Self {
instruction_ptr,
code_segment,
stack_ptr,
stack_segment,
_pad: _,
cpu_flags,
_pad2: _,
} = self;
f.debug_struct("Registers")
.field("instruction_ptr", instruction_ptr)
.field("code_segment", code_segment)
.field("cpu_flags", &format_args!("{cpu_flags:#b}"))
.field("stack_ptr", stack_ptr)
.field("stack_segment", stack_segment)
.finish()
}
}
impl fmt::Display for Registers {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(f, " rip: {:?}", self.instruction_ptr)?;
writeln!(f, " cs: {:?}", self.code_segment)?;
writeln!(f, " flags: {:#b}", self.cpu_flags)?;
writeln!(f, " rsp: {:?}", self.stack_ptr)?;
writeln!(f, " ss: {:?}", self.stack_segment)?;
Ok(())
}
}
pub fn fire_test_interrupt() {
unsafe { asm!("int {0}", const 69) }
}
// === impl SelectorErrorCode ===
impl SelectorErrorCode {
#[inline]
fn named(self, segment_kind: &'static str) -> NamedSelectorErrorCode {
NamedSelectorErrorCode {
segment_kind,
code: self,
}
}
fn display(&self) -> impl fmt::Display {
struct PrettyErrorCode(SelectorErrorCode);
impl fmt::Display for PrettyErrorCode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let table = self.0.get(SelectorErrorCode::TABLE);
let index = self.0.get(SelectorErrorCode::INDEX);
write!(f, "{table} index {index}")?;
if self.0.get(SelectorErrorCode::EXTERNAL) {
f.write_str(" (from an external source)")?;
}
write!(f, " (error code {:#b})", self.0.bits())?;
Ok(())
}
}
PrettyErrorCode(*self)
}
}
struct NamedSelectorErrorCode {
segment_kind: &'static str,
code: SelectorErrorCode,
}
impl fmt::Display for NamedSelectorErrorCode {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{} at {}", self.segment_kind, self.code.display())
}
}
#[cfg(test)]
mod tests {
use super::*;
use core::mem::size_of;
#[test]
fn | () {
assert_eq!(size_of::<Registers>(), 40);
}
}
| registers_is_correct_size | identifier_name |
interrupt.rs | use crate::{cpu, mm, segment, PAddr, VAddr};
use core::{arch::asm, marker::PhantomData, time::Duration};
use hal_core::interrupt::Control;
use hal_core::interrupt::{ctx, Handlers};
use mycelium_util::{
bits, fmt,
sync::{spin, InitOnce},
};
pub mod apic;
pub mod idt;
pub mod pic;
use self::apic::{IoApic, LocalApic};
pub use idt::Idt;
pub use pic::CascadedPic;
#[derive(Debug)]
pub struct Controller {
model: InterruptModel,
}
#[derive(Debug)]
#[repr(C)]
pub struct Context<'a, T = ()> {
registers: &'a mut Registers,
code: T,
}
pub type ErrorCode = u64;
pub struct CodeFault<'a> {
kind: &'static str,
error_code: Option<&'a dyn fmt::Display>,
}
/// An interrupt service routine.
pub type Isr<T> = extern "x86-interrupt" fn(&mut Context<T>);
#[derive(Debug)]
#[repr(C)]
pub struct Interrupt<T = ()> {
vector: u8,
_t: PhantomData<T>,
}
/// The interrupt controller's active interrupt model.
#[derive(Debug)]
enum InterruptModel {
/// Interrupts are handled by the [8259 Programmable Interrupt Controller
/// (PIC)](pic).
Pic(spin::Mutex<pic::CascadedPic>),
/// Interrupts are handled by the [local] and [I/O] [Advanced Programmable
/// Interrupt Controller (APIC)s][apics].
///
/// [local]: apic::LocalApic
/// [I/O]: apic::IoApic
/// [apics]: apic
Apic {
local: apic::LocalApic,
// TODO(eliza): allow further configuration of the I/O APIC (e.g.
// masking/unmasking stuff...)
#[allow(dead_code)]
io: spin::Mutex<apic::IoApic>,
},
}
bits::bitfield! {
pub struct PageFaultCode<u32> {
/// When set, the page fault was caused by a page-protection violation.
/// When not set, it was caused by a non-present page.
pub const PRESENT: bool;
/// When set, the page fault was caused by a write access. When not set,
/// it was caused by a read access.
pub const WRITE: bool;
/// When set, the page fault was caused while CPL = 3. This does not
/// necessarily mean that the page fault was a privilege violation.
pub const USER: bool;
/// When set, one or more page directory entries contain reserved bits
/// which are set to 1. This only applies when the PSE or PAE flags in
/// CR4 are set to 1.
pub const RESERVED_WRITE: bool;
/// When set, the page fault was caused by an instruction fetch. This
/// only applies when the No-Execute bit is supported and enabled.
pub const INSTRUCTION_FETCH: bool;
/// When set, the page fault was caused by a protection-key violation.
/// The PKRU register (for user-mode accesses) or PKRS MSR (for
/// supervisor-mode accesses) specifies the protection key rights.
pub const PROTECTION_KEY: bool;
/// When set, the page fault was caused by a shadow stack access.
pub const SHADOW_STACK: bool;
const _RESERVED0 = 8;
/// When set, the fault was due to an SGX violation. The fault is
/// unrelated to ordinary paging.
pub const SGX: bool;
}
}
bits::bitfield! {
/// Error code set by the "Invalid TSS", "Segment Not Present", "Stack-Segment
/// Fault", and "General Protection Fault" faults.
///
/// This includes a segment selector index, and includes 2 bits describing
/// which table the segment selector references.
pub struct SelectorErrorCode<u16> {
const EXTERNAL: bool;
const TABLE: cpu::DescriptorTable;
const INDEX = 13;
}
}
#[repr(C)]
pub struct Registers {
pub instruction_ptr: VAddr, // TODO(eliza): add VAddr
pub code_segment: segment::Selector,
_pad: [u16; 3],
pub cpu_flags: u64, // TODO(eliza): rflags type?
pub stack_ptr: VAddr, // TODO(eliza): add VAddr
pub stack_segment: segment::Selector,
_pad2: [u16; 3],
}
static IDT: spin::Mutex<idt::Idt> = spin::Mutex::new(idt::Idt::new());
static INTERRUPT_CONTROLLER: InitOnce<Controller> = InitOnce::uninitialized();
impl Controller {
// const DEFAULT_IOAPIC_BASE_PADDR: u64 = 0xFEC00000;
pub fn idt() -> spin::MutexGuard<'static, idt::Idt> {
IDT.lock()
}
#[tracing::instrument(level = "info", name = "interrupt::Controller::init")]
pub fn init<H: Handlers<Registers>>() {
tracing::info!("intializing IDT...");
let mut idt = IDT.lock();
idt.register_handlers::<H>().unwrap();
unsafe {
idt.load_raw();
}
}
pub fn enable_hardware_interrupts(
acpi: Option<&acpi::InterruptModel>,
frame_alloc: &impl hal_core::mem::page::Alloc<mm::size::Size4Kb>,
) -> &'static Self {
let mut pics = pic::CascadedPic::new();
// regardless of whether APIC or PIC interrupt handling will be used,
// the PIC interrupt vectors must be remapped so that they do not
// conflict with CPU exceptions.
unsafe {
tracing::debug!(
big = Idt::PIC_BIG_START,
little = Idt::PIC_LITTLE_START,
"remapping PIC interrupt vectors"
);
pics.set_irq_address(Idt::PIC_BIG_START as u8, Idt::PIC_LITTLE_START as u8);
}
let model = match acpi {
Some(acpi::InterruptModel::Apic(apic_info)) => {
tracing::info!("detected APIC interrupt model");
let mut pagectrl = mm::PageCtrl::current();
// disable the 8259 PICs so that we can use APIC interrupts instead
unsafe {
pics.disable();
}
tracing::info!("disabled 8259 PICs");
// configure the I/O APIC
let mut io = {
// TODO(eliza): consider actually using other I/O APICs? do
// we need them for anything??
tracing::trace!(?apic_info.io_apics, "found {} IO APICs", apic_info.io_apics.len());
let io_apic = &apic_info.io_apics[0];
let addr = PAddr::from_u64(io_apic.address as u64);
tracing::debug!(ioapic.paddr = ?addr, "IOAPIC");
IoApic::new(addr, &mut pagectrl, frame_alloc)
};
// map the standard ISA hardware interrupts to I/O APIC
// redirection entries.
io.map_isa_irqs(Idt::IOAPIC_START as u8);
// unmask the PIT timer vector --- we'll need this for calibrating
// the local APIC timer...
io.set_masked(IoApic::PIT_TIMER_IRQ, false);
// unmask the PS/2 keyboard interrupt as well.
io.set_masked(IoApic::PS2_KEYBOARD_IRQ, false);
// enable the local APIC
let local = LocalApic::new(&mut pagectrl, frame_alloc);
local.enable(Idt::LOCAL_APIC_SPURIOUS as u8);
InterruptModel::Apic {
local,
io: spin::Mutex::new(io),
}
}
model => {
if model.is_none() {
tracing::warn!("platform does not support ACPI; falling back to 8259 PIC");
} else {
tracing::warn!(
"ACPI does not indicate APIC interrupt model; falling back to 8259 PIC"
)
}
tracing::info!("configuring 8259 PIC interrupts...");
unsafe {
// functionally a no-op, since interrupts from PC/AT PIC are enabled at boot, just being
// clear for you, the reader, that at this point they are definitely intentionally enabled.
pics.enable();
}
InterruptModel::Pic(spin::Mutex::new(pics))
}
};
tracing::trace!(interrupt_model = ?model);
let controller = INTERRUPT_CONTROLLER.init(Self { model });
// `sti` may not be called until the interrupt controller static is
// fully initialized, as an interrupt that occurs before it is
// initialized may attempt to access the static to finish the interrupt!
unsafe {
crate::cpu::intrinsics::sti();
}
controller
}
/// Starts a periodic timer which fires the `timer_tick` interrupt of the
/// provided [`Handlers`] every time `interval` elapses.
pub fn start_periodic_timer(
&self,
interval: Duration,
) -> Result<(), crate::time::InvalidDuration> {
match self.model {
InterruptModel::Pic(_) => crate::time::PIT.lock().start_periodic_timer(interval),
InterruptModel::Apic { ref local, .. } => {
local.start_periodic_timer(interval, Idt::LOCAL_APIC_TIMER as u8)
}
}
}
}
impl<'a, T> hal_core::interrupt::Context for Context<'a, T> {
type Registers = Registers;
fn registers(&self) -> &Registers {
self.registers
}
/// # Safety
///
/// Mutating the value of saved interrupt registers can cause
/// undefined behavior.
unsafe fn registers_mut(&mut self) -> &mut Registers {
self.registers
}
}
impl<'a> ctx::PageFault for Context<'a, PageFaultCode> {
fn fault_vaddr(&self) -> crate::VAddr |
fn debug_error_code(&self) -> &dyn fmt::Debug {
&self.code
}
fn display_error_code(&self) -> &dyn fmt::Display {
&self.code
}
}
impl<'a> ctx::CodeFault for Context<'a, CodeFault<'a>> {
fn is_user_mode(&self) -> bool {
false // TODO(eliza)
}
fn instruction_ptr(&self) -> crate::VAddr {
self.registers.instruction_ptr
}
fn fault_kind(&self) -> &'static str {
self.code.kind
}
fn details(&self) -> Option<&dyn fmt::Display> {
self.code.error_code
}
}
impl<'a> Context<'a, ErrorCode> {
pub fn error_code(&self) -> ErrorCode {
self.code
}
}
impl<'a> Context<'a, PageFaultCode> {
pub fn page_fault_code(&self) -> PageFaultCode {
self.code
}
}
impl hal_core::interrupt::Control for Idt {
// type Vector = u8;
type Registers = Registers;
#[inline]
unsafe fn disable(&mut self) {
crate::cpu::intrinsics::cli();
}
#[inline]
unsafe fn enable(&mut self) {
crate::cpu::intrinsics::sti();
tracing::trace!("interrupts enabled");
}
fn is_enabled(&self) -> bool {
unimplemented!("eliza do this one!!!")
}
fn register_handlers<H>(&mut self) -> Result<(), hal_core::interrupt::RegistrationError>
where
H: Handlers<Registers>,
{
macro_rules! gen_code_faults {
($self:ident, $h:ty, $($vector:path => fn $name:ident($($rest:tt)+),)+) => {
$(
gen_code_faults! {@ $name($($rest)+); }
$self.set_isr($vector, $name::<$h> as *const ());
)+
};
(@ $name:ident($kind:literal);) => {
extern "x86-interrupt" fn $name<H: Handlers<Registers>>(mut registers: Registers) {
let code = CodeFault {
error_code: None,
kind: $kind,
};
H::code_fault(Context { registers: &mut registers, code });
}
};
(@ $name:ident($kind:literal, code);) => {
extern "x86-interrupt" fn $name<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
let code = CodeFault {
error_code: Some(&code),
kind: $kind,
};
H::code_fault(Context { registers: &mut registers, code });
}
};
}
let span = tracing::debug_span!("Idt::register_handlers");
let _enter = span.enter();
extern "x86-interrupt" fn page_fault_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: PageFaultCode,
) {
H::page_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn double_fault_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
H::double_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn pit_timer_isr<H: Handlers<Registers>>(_regs: Registers) {
use core::sync::atomic::Ordering;
// if we weren't trying to do a PIT sleep, handle the timer tick
// instead.
let was_sleeping = crate::time::pit::SLEEPING
.compare_exchange(true, false, Ordering::AcqRel, Ordering::Acquire)
.is_ok();
if !was_sleeping {
H::timer_tick();
} else {
tracing::trace!("PIT sleep completed");
}
unsafe {
match INTERRUPT_CONTROLLER.get_unchecked().model {
InterruptModel::Pic(ref pics) => {
pics.lock().end_interrupt(Idt::PIC_PIT_TIMER as u8)
}
InterruptModel::Apic { ref local, .. } => local.end_interrupt(),
}
}
}
extern "x86-interrupt" fn apic_timer_isr<H: Handlers<Registers>>(_regs: Registers) {
H::timer_tick();
unsafe {
match INTERRUPT_CONTROLLER.get_unchecked().model {
InterruptModel::Pic(_) => unreachable!(),
InterruptModel::Apic { ref local, .. } => local.end_interrupt(),
}
}
}
extern "x86-interrupt" fn keyboard_isr<H: Handlers<Registers>>(_regs: Registers) {
// 0x60 is a magic PC/AT number.
static PORT: cpu::Port = cpu::Port::at(0x60);
// load-bearing read - if we don't read from the keyboard controller it won't
// send another interrupt on later keystrokes.
let scancode = unsafe { PORT.readb() };
H::ps2_keyboard(scancode);
unsafe {
match INTERRUPT_CONTROLLER.get_unchecked().model {
InterruptModel::Pic(ref pics) => {
pics.lock().end_interrupt(Idt::PIC_PS2_KEYBOARD as u8)
}
InterruptModel::Apic { ref local, .. } => local.end_interrupt(),
}
}
}
extern "x86-interrupt" fn test_isr<H: Handlers<Registers>>(mut registers: Registers) {
H::test_interrupt(Context {
registers: &mut registers,
code: (),
});
}
extern "x86-interrupt" fn invalid_tss_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
unsafe {
// Safety: who cares!
crate::vga::writer().force_unlock();
if let Some(com1) = crate::serial::com1() {
com1.force_unlock();
}
}
let selector = SelectorErrorCode(code as u16);
tracing::error!(?selector, "invalid task-state segment!");
let msg = selector.named("task-state segment (TSS)");
let code = CodeFault {
error_code: Some(&msg),
kind: "Invalid TSS (0xA)",
};
H::code_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn segment_not_present_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
unsafe {
// Safety: who cares!
crate::vga::writer().force_unlock();
if let Some(com1) = crate::serial::com1() {
com1.force_unlock();
}
}
let selector = SelectorErrorCode(code as u16);
tracing::error!(?selector, "a segment was not present!");
let msg = selector.named("stack segment");
let code = CodeFault {
error_code: Some(&msg),
kind: "Segment Not Present (0xB)",
};
H::code_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn stack_segment_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
unsafe {
// Safety: who cares!
crate::vga::writer().force_unlock();
if let Some(com1) = crate::serial::com1() {
com1.force_unlock();
}
}
let selector = SelectorErrorCode(code as u16);
tracing::error!(?selector, "a stack-segment fault is happening");
let msg = selector.named("stack segment");
let code = CodeFault {
error_code: Some(&msg),
kind: "Stack-Segment Fault (0xC)",
};
H::code_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn gpf_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
unsafe {
// Safety: who cares!
crate::vga::writer().force_unlock();
if let Some(com1) = crate::serial::com1() {
com1.force_unlock();
}
}
let segment = if code > 0 {
Some(SelectorErrorCode(code as u16))
} else {
None
};
tracing::error!(?segment, "lmao, a general protection fault is happening");
let error_code = segment.map(|seg| seg.named("selector"));
let code = CodeFault {
error_code: error_code.as_ref().map(|code| code as &dyn fmt::Display),
kind: "General Protection Fault (0xD)",
};
H::code_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn spurious_isr() {
tracing::trace!("spurious");
}
// === exceptions ===
// these exceptions are mapped to the HAL `Handlers` trait's "code
// fault" handler, and indicate that the code that was executing did a
// Bad Thing
gen_code_faults! {
self, H,
Self::DIVIDE_BY_ZERO => fn div_0_isr("Divide-By-Zero (0x0)"),
Self::OVERFLOW => fn overflow_isr("Overflow (0x4)"),
Self::BOUND_RANGE_EXCEEDED => fn br_isr("Bound Range Exceeded (0x5)"),
Self::INVALID_OPCODE => fn ud_isr("Invalid Opcode (0x6)"),
Self::DEVICE_NOT_AVAILABLE => fn no_fpu_isr("Device (FPU) Not Available (0x7)"),
Self::ALIGNMENT_CHECK => fn alignment_check_isr("Alignment Check (0x11)", code),
Self::SIMD_FLOATING_POINT => fn simd_fp_exn_isr("SIMD Floating-Point Exception (0x13)"),
Self::X87_FPU_EXCEPTION => fn x87_exn_isr("x87 Floating-Point Exception (0x10)"),
}
// other exceptions, not mapped to the "code fault" handler
self.set_isr(Self::PAGE_FAULT, page_fault_isr::<H> as *const ());
self.set_isr(Self::INVALID_TSS, invalid_tss_isr::<H> as *const ());
self.set_isr(
Self::SEGMENT_NOT_PRESENT,
segment_not_present_isr::<H> as *const (),
);
self.set_isr(
Self::STACK_SEGMENT_FAULT,
stack_segment_isr::<H> as *const (),
);
self.set_isr(Self::GENERAL_PROTECTION_FAULT, gpf_isr::<H> as *const ());
self.set_isr(Self::DOUBLE_FAULT, double_fault_isr::<H> as *const ());
// === hardware interrupts ===
// ISA standard hardware interrupts mapped on both the PICs and IO APIC
// interrupt models.
self.set_isr(Self::PIC_PIT_TIMER, pit_timer_isr::<H> as *const ());
self.set_isr(Self::IOAPIC_PIT_TIMER, pit_timer_isr::<H> as *const ());
self.set_isr(Self::PIC_PS2_KEYBOARD, keyboard_isr::<H> as *const ());
self.set_isr(Self::IOAPIC_PS2_KEYBOARD, keyboard_isr::<H> as *const ());
// local APIC specific hardware itnerrupts
self.set_isr(Self::LOCAL_APIC_SPURIOUS, spurious_isr as *const ());
self.set_isr(Self::LOCAL_APIC_TIMER, apic_timer_isr::<H> as *const ());
// vector 69 (nice) is reserved by the HAL for testing the IDT.
self.set_isr(69, test_isr::<H> as *const ());
Ok(())
}
}
impl fmt::Debug for Registers {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let Self {
instruction_ptr,
code_segment,
stack_ptr,
stack_segment,
_pad: _,
cpu_flags,
_pad2: _,
} = self;
f.debug_struct("Registers")
.field("instruction_ptr", instruction_ptr)
.field("code_segment", code_segment)
.field("cpu_flags", &format_args!("{cpu_flags:#b}"))
.field("stack_ptr", stack_ptr)
.field("stack_segment", stack_segment)
.finish()
}
}
impl fmt::Display for Registers {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(f, " rip: {:?}", self.instruction_ptr)?;
writeln!(f, " cs: {:?}", self.code_segment)?;
writeln!(f, " flags: {:#b}", self.cpu_flags)?;
writeln!(f, " rsp: {:?}", self.stack_ptr)?;
writeln!(f, " ss: {:?}", self.stack_segment)?;
Ok(())
}
}
pub fn fire_test_interrupt() {
unsafe { asm!("int {0}", const 69) }
}
// === impl SelectorErrorCode ===
impl SelectorErrorCode {
#[inline]
fn named(self, segment_kind: &'static str) -> NamedSelectorErrorCode {
NamedSelectorErrorCode {
segment_kind,
code: self,
}
}
fn display(&self) -> impl fmt::Display {
struct PrettyErrorCode(SelectorErrorCode);
impl fmt::Display for PrettyErrorCode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let table = self.0.get(SelectorErrorCode::TABLE);
let index = self.0.get(SelectorErrorCode::INDEX);
write!(f, "{table} index {index}")?;
if self.0.get(SelectorErrorCode::EXTERNAL) {
f.write_str(" (from an external source)")?;
}
write!(f, " (error code {:#b})", self.0.bits())?;
Ok(())
}
}
PrettyErrorCode(*self)
}
}
struct NamedSelectorErrorCode {
segment_kind: &'static str,
code: SelectorErrorCode,
}
impl fmt::Display for NamedSelectorErrorCode {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{} at {}", self.segment_kind, self.code.display())
}
}
#[cfg(test)]
mod tests {
use super::*;
use core::mem::size_of;
#[test]
fn registers_is_correct_size() {
assert_eq!(size_of::<Registers>(), 40);
}
}
| {
crate::control_regs::Cr2::read()
} | identifier_body |
interrupt.rs | use crate::{cpu, mm, segment, PAddr, VAddr};
use core::{arch::asm, marker::PhantomData, time::Duration};
use hal_core::interrupt::Control;
use hal_core::interrupt::{ctx, Handlers};
use mycelium_util::{
bits, fmt,
sync::{spin, InitOnce},
};
pub mod apic;
pub mod idt;
pub mod pic;
use self::apic::{IoApic, LocalApic};
pub use idt::Idt;
pub use pic::CascadedPic;
#[derive(Debug)]
pub struct Controller {
model: InterruptModel,
}
#[derive(Debug)]
#[repr(C)]
pub struct Context<'a, T = ()> {
registers: &'a mut Registers,
code: T,
}
pub type ErrorCode = u64;
pub struct CodeFault<'a> {
kind: &'static str,
error_code: Option<&'a dyn fmt::Display>,
}
/// An interrupt service routine.
pub type Isr<T> = extern "x86-interrupt" fn(&mut Context<T>);
#[derive(Debug)]
#[repr(C)]
pub struct Interrupt<T = ()> {
vector: u8,
_t: PhantomData<T>,
}
/// The interrupt controller's active interrupt model.
#[derive(Debug)]
enum InterruptModel {
/// Interrupts are handled by the [8259 Programmable Interrupt Controller
/// (PIC)](pic).
Pic(spin::Mutex<pic::CascadedPic>),
/// Interrupts are handled by the [local] and [I/O] [Advanced Programmable
/// Interrupt Controller (APIC)s][apics].
///
/// [local]: apic::LocalApic
/// [I/O]: apic::IoApic
/// [apics]: apic
Apic {
local: apic::LocalApic,
// TODO(eliza): allow further configuration of the I/O APIC (e.g.
// masking/unmasking stuff...)
#[allow(dead_code)]
io: spin::Mutex<apic::IoApic>,
},
}
bits::bitfield! {
pub struct PageFaultCode<u32> {
/// When set, the page fault was caused by a page-protection violation.
/// When not set, it was caused by a non-present page.
pub const PRESENT: bool;
/// When set, the page fault was caused by a write access. When not set,
/// it was caused by a read access.
pub const WRITE: bool;
/// When set, the page fault was caused while CPL = 3. This does not
/// necessarily mean that the page fault was a privilege violation.
pub const USER: bool;
/// When set, one or more page directory entries contain reserved bits
/// which are set to 1. This only applies when the PSE or PAE flags in
/// CR4 are set to 1.
pub const RESERVED_WRITE: bool;
/// When set, the page fault was caused by an instruction fetch. This
/// only applies when the No-Execute bit is supported and enabled.
pub const INSTRUCTION_FETCH: bool;
/// When set, the page fault was caused by a protection-key violation.
/// The PKRU register (for user-mode accesses) or PKRS MSR (for
/// supervisor-mode accesses) specifies the protection key rights.
pub const PROTECTION_KEY: bool;
/// When set, the page fault was caused by a shadow stack access.
pub const SHADOW_STACK: bool;
const _RESERVED0 = 8;
/// When set, the fault was due to an SGX violation. The fault is
/// unrelated to ordinary paging.
pub const SGX: bool;
}
}
bits::bitfield! {
/// Error code set by the "Invalid TSS", "Segment Not Present", "Stack-Segment
/// Fault", and "General Protection Fault" faults.
///
/// This includes a segment selector index, and includes 2 bits describing
/// which table the segment selector references.
pub struct SelectorErrorCode<u16> {
const EXTERNAL: bool;
const TABLE: cpu::DescriptorTable;
const INDEX = 13;
}
}
#[repr(C)]
pub struct Registers {
pub instruction_ptr: VAddr, // TODO(eliza): add VAddr
pub code_segment: segment::Selector,
_pad: [u16; 3],
pub cpu_flags: u64, // TODO(eliza): rflags type?
pub stack_ptr: VAddr, // TODO(eliza): add VAddr
pub stack_segment: segment::Selector,
_pad2: [u16; 3],
}
static IDT: spin::Mutex<idt::Idt> = spin::Mutex::new(idt::Idt::new());
static INTERRUPT_CONTROLLER: InitOnce<Controller> = InitOnce::uninitialized();
impl Controller {
// const DEFAULT_IOAPIC_BASE_PADDR: u64 = 0xFEC00000;
pub fn idt() -> spin::MutexGuard<'static, idt::Idt> {
IDT.lock()
}
#[tracing::instrument(level = "info", name = "interrupt::Controller::init")]
pub fn init<H: Handlers<Registers>>() {
tracing::info!("intializing IDT...");
let mut idt = IDT.lock();
idt.register_handlers::<H>().unwrap();
unsafe {
idt.load_raw();
}
}
pub fn enable_hardware_interrupts(
acpi: Option<&acpi::InterruptModel>,
frame_alloc: &impl hal_core::mem::page::Alloc<mm::size::Size4Kb>,
) -> &'static Self {
let mut pics = pic::CascadedPic::new();
// regardless of whether APIC or PIC interrupt handling will be used,
// the PIC interrupt vectors must be remapped so that they do not
// conflict with CPU exceptions.
unsafe {
tracing::debug!(
big = Idt::PIC_BIG_START,
little = Idt::PIC_LITTLE_START,
"remapping PIC interrupt vectors"
);
pics.set_irq_address(Idt::PIC_BIG_START as u8, Idt::PIC_LITTLE_START as u8);
}
let model = match acpi {
Some(acpi::InterruptModel::Apic(apic_info)) => {
tracing::info!("detected APIC interrupt model");
let mut pagectrl = mm::PageCtrl::current();
// disable the 8259 PICs so that we can use APIC interrupts instead
unsafe {
pics.disable();
}
tracing::info!("disabled 8259 PICs");
// configure the I/O APIC
let mut io = {
// TODO(eliza): consider actually using other I/O APICs? do
// we need them for anything??
tracing::trace!(?apic_info.io_apics, "found {} IO APICs", apic_info.io_apics.len());
let io_apic = &apic_info.io_apics[0];
let addr = PAddr::from_u64(io_apic.address as u64);
tracing::debug!(ioapic.paddr = ?addr, "IOAPIC");
IoApic::new(addr, &mut pagectrl, frame_alloc)
};
// map the standard ISA hardware interrupts to I/O APIC
// redirection entries.
io.map_isa_irqs(Idt::IOAPIC_START as u8);
// unmask the PIT timer vector --- we'll need this for calibrating
// the local APIC timer...
io.set_masked(IoApic::PIT_TIMER_IRQ, false);
// unmask the PS/2 keyboard interrupt as well.
io.set_masked(IoApic::PS2_KEYBOARD_IRQ, false);
// enable the local APIC
let local = LocalApic::new(&mut pagectrl, frame_alloc);
local.enable(Idt::LOCAL_APIC_SPURIOUS as u8);
InterruptModel::Apic {
local,
io: spin::Mutex::new(io),
}
}
model => {
if model.is_none() {
tracing::warn!("platform does not support ACPI; falling back to 8259 PIC");
} else {
tracing::warn!(
"ACPI does not indicate APIC interrupt model; falling back to 8259 PIC"
)
}
tracing::info!("configuring 8259 PIC interrupts...");
unsafe {
// functionally a no-op, since interrupts from PC/AT PIC are enabled at boot, just being
// clear for you, the reader, that at this point they are definitely intentionally enabled.
pics.enable();
}
InterruptModel::Pic(spin::Mutex::new(pics))
}
};
tracing::trace!(interrupt_model = ?model);
let controller = INTERRUPT_CONTROLLER.init(Self { model });
// `sti` may not be called until the interrupt controller static is
// fully initialized, as an interrupt that occurs before it is
// initialized may attempt to access the static to finish the interrupt!
unsafe {
crate::cpu::intrinsics::sti();
}
controller
}
/// Starts a periodic timer which fires the `timer_tick` interrupt of the
/// provided [`Handlers`] every time `interval` elapses.
pub fn start_periodic_timer(
&self,
interval: Duration,
) -> Result<(), crate::time::InvalidDuration> {
match self.model {
InterruptModel::Pic(_) => crate::time::PIT.lock().start_periodic_timer(interval),
InterruptModel::Apic { ref local, .. } => {
local.start_periodic_timer(interval, Idt::LOCAL_APIC_TIMER as u8)
}
}
}
}
impl<'a, T> hal_core::interrupt::Context for Context<'a, T> {
type Registers = Registers;
fn registers(&self) -> &Registers {
self.registers
}
/// # Safety
///
/// Mutating the value of saved interrupt registers can cause
/// undefined behavior.
unsafe fn registers_mut(&mut self) -> &mut Registers {
self.registers
}
}
impl<'a> ctx::PageFault for Context<'a, PageFaultCode> {
fn fault_vaddr(&self) -> crate::VAddr {
crate::control_regs::Cr2::read()
}
fn debug_error_code(&self) -> &dyn fmt::Debug {
&self.code
}
fn display_error_code(&self) -> &dyn fmt::Display {
&self.code
}
}
impl<'a> ctx::CodeFault for Context<'a, CodeFault<'a>> {
fn is_user_mode(&self) -> bool {
false // TODO(eliza)
}
fn instruction_ptr(&self) -> crate::VAddr {
self.registers.instruction_ptr
}
fn fault_kind(&self) -> &'static str {
self.code.kind
}
fn details(&self) -> Option<&dyn fmt::Display> {
self.code.error_code
}
}
impl<'a> Context<'a, ErrorCode> {
pub fn error_code(&self) -> ErrorCode {
self.code
}
}
impl<'a> Context<'a, PageFaultCode> {
pub fn page_fault_code(&self) -> PageFaultCode {
self.code
}
}
impl hal_core::interrupt::Control for Idt {
// type Vector = u8;
type Registers = Registers;
#[inline]
unsafe fn disable(&mut self) {
crate::cpu::intrinsics::cli();
}
#[inline]
unsafe fn enable(&mut self) {
crate::cpu::intrinsics::sti();
tracing::trace!("interrupts enabled");
}
fn is_enabled(&self) -> bool {
unimplemented!("eliza do this one!!!")
}
fn register_handlers<H>(&mut self) -> Result<(), hal_core::interrupt::RegistrationError>
where
H: Handlers<Registers>,
{
macro_rules! gen_code_faults {
($self:ident, $h:ty, $($vector:path => fn $name:ident($($rest:tt)+),)+) => {
$(
gen_code_faults! {@ $name($($rest)+); }
$self.set_isr($vector, $name::<$h> as *const ());
)+
};
(@ $name:ident($kind:literal);) => {
extern "x86-interrupt" fn $name<H: Handlers<Registers>>(mut registers: Registers) {
let code = CodeFault {
error_code: None,
kind: $kind,
};
H::code_fault(Context { registers: &mut registers, code });
}
};
(@ $name:ident($kind:literal, code);) => {
extern "x86-interrupt" fn $name<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
let code = CodeFault {
error_code: Some(&code),
kind: $kind,
};
H::code_fault(Context { registers: &mut registers, code });
}
};
}
let span = tracing::debug_span!("Idt::register_handlers");
let _enter = span.enter();
extern "x86-interrupt" fn page_fault_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: PageFaultCode,
) {
H::page_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn double_fault_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
H::double_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn pit_timer_isr<H: Handlers<Registers>>(_regs: Registers) {
use core::sync::atomic::Ordering;
// if we weren't trying to do a PIT sleep, handle the timer tick
// instead.
let was_sleeping = crate::time::pit::SLEEPING
.compare_exchange(true, false, Ordering::AcqRel, Ordering::Acquire)
.is_ok();
if !was_sleeping {
H::timer_tick();
} else {
tracing::trace!("PIT sleep completed");
}
unsafe {
match INTERRUPT_CONTROLLER.get_unchecked().model {
InterruptModel::Pic(ref pics) => {
pics.lock().end_interrupt(Idt::PIC_PIT_TIMER as u8)
}
InterruptModel::Apic { ref local, .. } => local.end_interrupt(),
}
}
}
extern "x86-interrupt" fn apic_timer_isr<H: Handlers<Registers>>(_regs: Registers) {
H::timer_tick();
unsafe {
match INTERRUPT_CONTROLLER.get_unchecked().model {
InterruptModel::Pic(_) => unreachable!(),
InterruptModel::Apic { ref local, .. } => local.end_interrupt(),
}
}
}
extern "x86-interrupt" fn keyboard_isr<H: Handlers<Registers>>(_regs: Registers) {
// 0x60 is a magic PC/AT number.
static PORT: cpu::Port = cpu::Port::at(0x60);
// load-bearing read - if we don't read from the keyboard controller it won't
// send another interrupt on later keystrokes.
let scancode = unsafe { PORT.readb() };
H::ps2_keyboard(scancode);
unsafe {
match INTERRUPT_CONTROLLER.get_unchecked().model {
InterruptModel::Pic(ref pics) => {
pics.lock().end_interrupt(Idt::PIC_PS2_KEYBOARD as u8)
}
InterruptModel::Apic { ref local, .. } => local.end_interrupt(),
}
}
}
extern "x86-interrupt" fn test_isr<H: Handlers<Registers>>(mut registers: Registers) {
H::test_interrupt(Context {
registers: &mut registers,
code: (),
});
}
extern "x86-interrupt" fn invalid_tss_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
unsafe {
// Safety: who cares!
crate::vga::writer().force_unlock();
if let Some(com1) = crate::serial::com1() {
com1.force_unlock();
}
}
let selector = SelectorErrorCode(code as u16);
tracing::error!(?selector, "invalid task-state segment!");
let msg = selector.named("task-state segment (TSS)");
let code = CodeFault {
error_code: Some(&msg),
kind: "Invalid TSS (0xA)",
};
H::code_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn segment_not_present_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
unsafe {
// Safety: who cares!
crate::vga::writer().force_unlock();
if let Some(com1) = crate::serial::com1() {
com1.force_unlock();
}
}
let selector = SelectorErrorCode(code as u16);
tracing::error!(?selector, "a segment was not present!");
let msg = selector.named("stack segment");
let code = CodeFault {
error_code: Some(&msg),
kind: "Segment Not Present (0xB)",
};
H::code_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn stack_segment_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
unsafe {
// Safety: who cares!
crate::vga::writer().force_unlock();
if let Some(com1) = crate::serial::com1() {
com1.force_unlock();
}
}
let selector = SelectorErrorCode(code as u16);
tracing::error!(?selector, "a stack-segment fault is happening");
let msg = selector.named("stack segment");
let code = CodeFault {
error_code: Some(&msg),
kind: "Stack-Segment Fault (0xC)",
};
H::code_fault(Context {
registers: &mut registers, | mut registers: Registers,
code: u64,
) {
unsafe {
// Safety: who cares!
crate::vga::writer().force_unlock();
if let Some(com1) = crate::serial::com1() {
com1.force_unlock();
}
}
let segment = if code > 0 {
Some(SelectorErrorCode(code as u16))
} else {
None
};
tracing::error!(?segment, "lmao, a general protection fault is happening");
let error_code = segment.map(|seg| seg.named("selector"));
let code = CodeFault {
error_code: error_code.as_ref().map(|code| code as &dyn fmt::Display),
kind: "General Protection Fault (0xD)",
};
H::code_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn spurious_isr() {
tracing::trace!("spurious");
}
// === exceptions ===
// these exceptions are mapped to the HAL `Handlers` trait's "code
// fault" handler, and indicate that the code that was executing did a
// Bad Thing
gen_code_faults! {
self, H,
Self::DIVIDE_BY_ZERO => fn div_0_isr("Divide-By-Zero (0x0)"),
Self::OVERFLOW => fn overflow_isr("Overflow (0x4)"),
Self::BOUND_RANGE_EXCEEDED => fn br_isr("Bound Range Exceeded (0x5)"),
Self::INVALID_OPCODE => fn ud_isr("Invalid Opcode (0x6)"),
Self::DEVICE_NOT_AVAILABLE => fn no_fpu_isr("Device (FPU) Not Available (0x7)"),
Self::ALIGNMENT_CHECK => fn alignment_check_isr("Alignment Check (0x11)", code),
Self::SIMD_FLOATING_POINT => fn simd_fp_exn_isr("SIMD Floating-Point Exception (0x13)"),
Self::X87_FPU_EXCEPTION => fn x87_exn_isr("x87 Floating-Point Exception (0x10)"),
}
// other exceptions, not mapped to the "code fault" handler
self.set_isr(Self::PAGE_FAULT, page_fault_isr::<H> as *const ());
self.set_isr(Self::INVALID_TSS, invalid_tss_isr::<H> as *const ());
self.set_isr(
Self::SEGMENT_NOT_PRESENT,
segment_not_present_isr::<H> as *const (),
);
self.set_isr(
Self::STACK_SEGMENT_FAULT,
stack_segment_isr::<H> as *const (),
);
self.set_isr(Self::GENERAL_PROTECTION_FAULT, gpf_isr::<H> as *const ());
self.set_isr(Self::DOUBLE_FAULT, double_fault_isr::<H> as *const ());
// === hardware interrupts ===
// ISA standard hardware interrupts mapped on both the PICs and IO APIC
// interrupt models.
self.set_isr(Self::PIC_PIT_TIMER, pit_timer_isr::<H> as *const ());
self.set_isr(Self::IOAPIC_PIT_TIMER, pit_timer_isr::<H> as *const ());
self.set_isr(Self::PIC_PS2_KEYBOARD, keyboard_isr::<H> as *const ());
self.set_isr(Self::IOAPIC_PS2_KEYBOARD, keyboard_isr::<H> as *const ());
// local APIC specific hardware itnerrupts
self.set_isr(Self::LOCAL_APIC_SPURIOUS, spurious_isr as *const ());
self.set_isr(Self::LOCAL_APIC_TIMER, apic_timer_isr::<H> as *const ());
// vector 69 (nice) is reserved by the HAL for testing the IDT.
self.set_isr(69, test_isr::<H> as *const ());
Ok(())
}
}
impl fmt::Debug for Registers {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let Self {
instruction_ptr,
code_segment,
stack_ptr,
stack_segment,
_pad: _,
cpu_flags,
_pad2: _,
} = self;
f.debug_struct("Registers")
.field("instruction_ptr", instruction_ptr)
.field("code_segment", code_segment)
.field("cpu_flags", &format_args!("{cpu_flags:#b}"))
.field("stack_ptr", stack_ptr)
.field("stack_segment", stack_segment)
.finish()
}
}
impl fmt::Display for Registers {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(f, " rip: {:?}", self.instruction_ptr)?;
writeln!(f, " cs: {:?}", self.code_segment)?;
writeln!(f, " flags: {:#b}", self.cpu_flags)?;
writeln!(f, " rsp: {:?}", self.stack_ptr)?;
writeln!(f, " ss: {:?}", self.stack_segment)?;
Ok(())
}
}
pub fn fire_test_interrupt() {
unsafe { asm!("int {0}", const 69) }
}
// === impl SelectorErrorCode ===
impl SelectorErrorCode {
#[inline]
fn named(self, segment_kind: &'static str) -> NamedSelectorErrorCode {
NamedSelectorErrorCode {
segment_kind,
code: self,
}
}
fn display(&self) -> impl fmt::Display {
struct PrettyErrorCode(SelectorErrorCode);
impl fmt::Display for PrettyErrorCode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let table = self.0.get(SelectorErrorCode::TABLE);
let index = self.0.get(SelectorErrorCode::INDEX);
write!(f, "{table} index {index}")?;
if self.0.get(SelectorErrorCode::EXTERNAL) {
f.write_str(" (from an external source)")?;
}
write!(f, " (error code {:#b})", self.0.bits())?;
Ok(())
}
}
PrettyErrorCode(*self)
}
}
struct NamedSelectorErrorCode {
segment_kind: &'static str,
code: SelectorErrorCode,
}
impl fmt::Display for NamedSelectorErrorCode {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{} at {}", self.segment_kind, self.code.display())
}
}
#[cfg(test)]
mod tests {
use super::*;
use core::mem::size_of;
#[test]
fn registers_is_correct_size() {
assert_eq!(size_of::<Registers>(), 40);
}
} | code,
});
}
extern "x86-interrupt" fn gpf_isr<H: Handlers<Registers>>( | random_line_split |
node.rs | use petgraph;
use petgraph::graph::NodeIndex; |
use std::sync::mpsc;
use std::sync;
use std::fmt;
use std::collections::HashMap;
use std::ops::{Deref, DerefMut};
use checktable;
use flow::data::DataType;
use ops::{Record, Datas};
use flow::domain;
use flow::{Ingredient, NodeAddress, Edge};
use flow::payload::Packet;
use flow::migrate::materialization::Tag;
use backlog;
/// A StreamUpdate reflects the addition or deletion of a row from a reader node.
#[derive(Clone, Debug, PartialEq)]
pub enum StreamUpdate {
/// Indicates the addition of a new row
AddRow(sync::Arc<Vec<DataType>>),
/// Indicates the removal of an existing row
DeleteRow(sync::Arc<Vec<DataType>>),
}
impl From<Record> for StreamUpdate {
fn from(other: Record) -> Self {
match other {
Record::Positive(u) => StreamUpdate::AddRow(u),
Record::Negative(u) => StreamUpdate::DeleteRow(u),
Record::DeleteRequest(..) => unreachable!(),
}
}
}
impl From<Vec<DataType>> for StreamUpdate {
fn from(other: Vec<DataType>) -> Self {
StreamUpdate::AddRow(sync::Arc::new(other))
}
}
#[derive(Clone)]
pub struct Reader {
pub streamers: sync::Arc<sync::Mutex<Vec<mpsc::Sender<Vec<StreamUpdate>>>>>,
pub state: Option<backlog::ReadHandle>,
pub token_generator: Option<checktable::TokenGenerator>,
}
impl Reader {
pub fn get_reader
(&self)
-> Option<Box<Fn(&DataType) -> Result<Vec<Vec<DataType>>, ()> + Send + Sync>> {
self.state.clone().map(|arc| {
Box::new(move |q: &DataType| -> Result<Datas, ()> {
arc.find_and(q,
|rs| rs.into_iter().map(|v| (&**v).clone()).collect::<Vec<_>>())
.map(|r| r.0)
}) as Box<_>
})
}
pub fn key(&self) -> Result<usize, String> {
match self.state {
None => Err(String::from("no state on reader")),
Some(ref s) => Ok(s.key()),
}
}
pub fn len(&self) -> Result<usize, String> {
match self.state {
None => Err(String::from("no state on reader")),
Some(ref s) => Ok(s.len()),
}
}
}
impl Default for Reader {
fn default() -> Self {
Reader {
streamers: sync::Arc::default(),
state: None,
token_generator: None,
}
}
}
enum NodeHandle {
Owned(Type),
Taken(Type),
}
impl NodeHandle {
pub fn mark_taken(&mut self) {
use std::mem;
match mem::replace(self, NodeHandle::Owned(Type::Source)) {
NodeHandle::Owned(t) => {
mem::replace(self, NodeHandle::Taken(t));
}
NodeHandle::Taken(_) => {
unreachable!("tried to take already taken value");
}
}
}
}
impl Deref for NodeHandle {
type Target = Type;
fn deref(&self) -> &Self::Target {
match *self {
NodeHandle::Owned(ref t) |
NodeHandle::Taken(ref t) => t,
}
}
}
impl DerefMut for NodeHandle {
fn deref_mut(&mut self) -> &mut Self::Target {
match *self {
NodeHandle::Owned(ref mut t) => t,
NodeHandle::Taken(_) => unreachable!("cannot mutate taken node"),
}
}
}
pub enum Type {
Ingress,
Internal(Box<Ingredient>),
Egress {
txs: sync::Arc<sync::Mutex<Vec<(NodeAddress, NodeAddress, mpsc::SyncSender<Packet>)>>>,
tags: sync::Arc<sync::Mutex<HashMap<Tag, NodeAddress>>>,
},
Reader(Option<backlog::WriteHandle>, Reader),
Source,
}
impl Type {
// Returns a map from base node to the column in that base node whose value must match the value
// of this node's column to cause a conflict. Returns None for a given base node if any write to
// that base node might cause a conflict.
pub fn base_columns(&self,
column: usize,
graph: &petgraph::Graph<Node, Edge>,
index: NodeIndex)
-> Vec<(NodeIndex, Option<usize>)> {
fn base_parents(graph: &petgraph::Graph<Node, Edge>,
index: NodeIndex)
-> Vec<(NodeIndex, Option<usize>)> {
if let Type::Internal(ref i) = *graph[index] {
if i.is_base() {
return vec![(index, None)];
}
}
graph.neighbors_directed(index, petgraph::EdgeDirection::Incoming)
.flat_map(|n| base_parents(graph, n))
.collect()
}
let parents: Vec<_> = graph.neighbors_directed(index, petgraph::EdgeDirection::Incoming)
.collect();
match *self {
Type::Ingress |
Type::Reader(..) |
Type::Egress { .. } => {
assert_eq!(parents.len(), 1);
graph[parents[0]].base_columns(column, graph, parents[0])
}
Type::Internal(ref i) => {
if i.is_base() {
vec![(index, Some(column))]
} else {
i.parent_columns(column)
.into_iter()
.flat_map(|(n, c)| {
let n = if n.is_global() {
*n.as_global()
} else {
// Find the parent with node address matching the result from
// parent_columns.
*parents.iter()
.find(|p| match graph[**p].addr {
Some(a) if a == n => true,
_ => false,
})
.unwrap()
};
match c {
Some(c) => graph[n].base_columns(c, graph, n),
None => base_parents(graph, n),
}
})
.collect()
}
}
Type::Source => unreachable!(),
}
}
}
impl fmt::Debug for Type {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Type::Source => write!(f, "source node"),
Type::Ingress => write!(f, "ingress node"),
Type::Egress { .. } => write!(f, "egress node"),
Type::Reader(..) => write!(f, "reader node"),
Type::Internal(ref i) => write!(f, "internal {} node", i.description()),
}
}
}
impl Deref for Type {
type Target = Ingredient;
fn deref(&self) -> &Self::Target {
match *self {
Type::Internal(ref i) => i.deref(),
_ => unreachable!(),
}
}
}
impl DerefMut for Type {
fn deref_mut(&mut self) -> &mut Self::Target {
match *self {
Type::Internal(ref mut i) => i.deref_mut(),
_ => unreachable!(),
}
}
}
impl<I> From<I> for Type
where I: Ingredient + 'static
{
fn from(i: I) -> Type {
Type::Internal(Box::new(i))
}
}
pub struct Node {
name: String,
domain: Option<domain::Index>,
addr: Option<NodeAddress>,
fields: Vec<String>,
inner: NodeHandle,
}
impl Node {
pub fn new<S1, FS, S2>(name: S1, fields: FS, inner: Type) -> Node
where S1: ToString,
S2: ToString,
FS: IntoIterator<Item = S2>
{
Node {
name: name.to_string(),
domain: None,
addr: None,
fields: fields.into_iter().map(|s| s.to_string()).collect(),
inner: NodeHandle::Owned(inner),
}
}
pub fn mirror(&self, n: Type) -> Node {
let mut n = Self::new(&*self.name, &self.fields, n);
n.domain = self.domain;
n
}
pub fn name(&self) -> &str {
&*self.name
}
pub fn fields(&self) -> &[String] {
&self.fields[..]
}
pub fn domain(&self) -> domain::Index {
match self.domain {
Some(domain) => domain,
None => {
unreachable!("asked for unset domain for {:?}", &*self.inner);
}
}
}
pub fn addr(&self) -> NodeAddress {
match self.addr {
Some(addr) => addr,
None => {
unreachable!("asked for unset addr for {:?}", &*self.inner);
}
}
}
pub fn take(&mut self) -> Node {
let inner = match *self.inner {
Type::Egress { ref tags, ref txs } => {
// egress nodes can still be modified externally if subgraphs are added
// so we just make a new one with a clone of the Mutex-protected Vec
Type::Egress {
txs: txs.clone(),
tags: tags.clone(),
}
}
Type::Reader(ref mut w, ref r) => {
// reader nodes can still be modified externally if txs are added
Type::Reader(w.take(), r.clone())
}
Type::Ingress => Type::Ingress,
Type::Internal(ref mut i) if self.domain.is_some() => Type::Internal(i.take()),
Type::Internal(_) |
Type::Source => unreachable!(),
};
self.inner.mark_taken();
let mut n = self.mirror(inner);
n.addr = self.addr;
n
}
pub fn add_to(&mut self, domain: domain::Index) {
self.domain = Some(domain);
}
pub fn set_addr(&mut self, addr: NodeAddress) {
self.addr = Some(addr);
}
pub fn on_commit(&mut self, remap: &HashMap<NodeAddress, NodeAddress>) {
assert!(self.addr.is_some());
self.inner.on_commit(self.addr.unwrap(), remap)
}
pub fn describe(&self, f: &mut fmt::Write, idx: NodeIndex) -> fmt::Result {
use regex::Regex;
let escape = |s: &str| Regex::new("([\"|{}])").unwrap().replace_all(s, "\\$1");
write!(f,
" [style=filled, fillcolor={}, label=\"",
self.domain
.map(|d| -> usize { d.into() })
.map(|d| format!("\"/set312/{}\"", (d % 12) + 1))
.unwrap_or("white".into()))?;
match *self.inner {
Type::Source => write!(f, "(source)"),
Type::Ingress => write!(f, "{{ {} | (ingress) }}", idx.index()),
Type::Egress { .. } => write!(f, "{{ {} | (egress) }}", idx.index()),
Type::Reader(_, ref r) => {
let key = match r.key() {
Err(_) => String::from("none"),
Ok(k) => format!("{}", k),
};
let size = match r.len() {
Err(_) => String::from("empty"),
Ok(s) => format!("{} distinct keys", s),
};
write!(f,
"{{ {} | (reader / key: {}) | {} }}",
idx.index(),
key,
size)
}
Type::Internal(ref i) => {
write!(f, "{{")?;
// Output node name and description. First row.
write!(f,
"{{ {} / {} | {} }}",
idx.index(),
escape(self.name()),
escape(&i.description()))?;
// Output node outputs. Second row.
write!(f, " | {}", self.fields().join(", \\n"))?;
// Maybe output node's HAVING conditions. Optional third row.
// TODO
// if let Some(conds) = n.node().unwrap().having_conditions() {
// let conds = conds.iter()
// .map(|c| format!("{}", c))
// .collect::<Vec<_>>()
// .join(" ∧ ");
// write!(f, " | σ({})", escape(&conds))?;
// }
write!(f, " }}")
}
}?;
writeln!(f, "\"]")
}
pub fn is_egress(&self) -> bool {
if let Type::Egress { .. } = *self.inner {
true
} else {
false
}
}
pub fn is_ingress(&self) -> bool {
if let Type::Ingress = *self.inner {
true
} else {
false
}
}
pub fn is_internal(&self) -> bool {
if let Type::Internal(..) = *self.inner {
true
} else {
false
}
}
/// A node is considered to be an output node if changes to its state are visible outside of
/// its domain.
pub fn is_output(&self) -> bool {
match *self.inner {
Type::Egress { .. } |
Type::Reader(..) => true,
_ => false,
}
}
}
impl Deref for Node {
type Target = Type;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl DerefMut for Node {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
} | random_line_split |
|
node.rs | use petgraph;
use petgraph::graph::NodeIndex;
use std::sync::mpsc;
use std::sync;
use std::fmt;
use std::collections::HashMap;
use std::ops::{Deref, DerefMut};
use checktable;
use flow::data::DataType;
use ops::{Record, Datas};
use flow::domain;
use flow::{Ingredient, NodeAddress, Edge};
use flow::payload::Packet;
use flow::migrate::materialization::Tag;
use backlog;
/// A StreamUpdate reflects the addition or deletion of a row from a reader node.
#[derive(Clone, Debug, PartialEq)]
pub enum StreamUpdate {
/// Indicates the addition of a new row
AddRow(sync::Arc<Vec<DataType>>),
/// Indicates the removal of an existing row
DeleteRow(sync::Arc<Vec<DataType>>),
}
impl From<Record> for StreamUpdate {
fn from(other: Record) -> Self {
match other {
Record::Positive(u) => StreamUpdate::AddRow(u),
Record::Negative(u) => StreamUpdate::DeleteRow(u),
Record::DeleteRequest(..) => unreachable!(),
}
}
}
impl From<Vec<DataType>> for StreamUpdate {
fn from(other: Vec<DataType>) -> Self {
StreamUpdate::AddRow(sync::Arc::new(other))
}
}
#[derive(Clone)]
pub struct Reader {
pub streamers: sync::Arc<sync::Mutex<Vec<mpsc::Sender<Vec<StreamUpdate>>>>>,
pub state: Option<backlog::ReadHandle>,
pub token_generator: Option<checktable::TokenGenerator>,
}
impl Reader {
pub fn get_reader
(&self)
-> Option<Box<Fn(&DataType) -> Result<Vec<Vec<DataType>>, ()> + Send + Sync>> {
self.state.clone().map(|arc| {
Box::new(move |q: &DataType| -> Result<Datas, ()> {
arc.find_and(q,
|rs| rs.into_iter().map(|v| (&**v).clone()).collect::<Vec<_>>())
.map(|r| r.0)
}) as Box<_>
})
}
pub fn key(&self) -> Result<usize, String> {
match self.state {
None => Err(String::from("no state on reader")),
Some(ref s) => Ok(s.key()),
}
}
pub fn len(&self) -> Result<usize, String> {
match self.state {
None => Err(String::from("no state on reader")),
Some(ref s) => Ok(s.len()),
}
}
}
impl Default for Reader {
fn default() -> Self {
Reader {
streamers: sync::Arc::default(),
state: None,
token_generator: None,
}
}
}
enum NodeHandle {
Owned(Type),
Taken(Type),
}
impl NodeHandle {
pub fn mark_taken(&mut self) {
use std::mem;
match mem::replace(self, NodeHandle::Owned(Type::Source)) {
NodeHandle::Owned(t) => {
mem::replace(self, NodeHandle::Taken(t));
}
NodeHandle::Taken(_) => {
unreachable!("tried to take already taken value");
}
}
}
}
impl Deref for NodeHandle {
type Target = Type;
fn deref(&self) -> &Self::Target {
match *self {
NodeHandle::Owned(ref t) |
NodeHandle::Taken(ref t) => t,
}
}
}
impl DerefMut for NodeHandle {
fn deref_mut(&mut self) -> &mut Self::Target {
match *self {
NodeHandle::Owned(ref mut t) => t,
NodeHandle::Taken(_) => unreachable!("cannot mutate taken node"),
}
}
}
pub enum Type {
Ingress,
Internal(Box<Ingredient>),
Egress {
txs: sync::Arc<sync::Mutex<Vec<(NodeAddress, NodeAddress, mpsc::SyncSender<Packet>)>>>,
tags: sync::Arc<sync::Mutex<HashMap<Tag, NodeAddress>>>,
},
Reader(Option<backlog::WriteHandle>, Reader),
Source,
}
impl Type {
// Returns a map from base node to the column in that base node whose value must match the value
// of this node's column to cause a conflict. Returns None for a given base node if any write to
// that base node might cause a conflict.
pub fn base_columns(&self,
column: usize,
graph: &petgraph::Graph<Node, Edge>,
index: NodeIndex)
-> Vec<(NodeIndex, Option<usize>)> {
fn base_parents(graph: &petgraph::Graph<Node, Edge>,
index: NodeIndex)
-> Vec<(NodeIndex, Option<usize>)> {
if let Type::Internal(ref i) = *graph[index] {
if i.is_base() {
return vec![(index, None)];
}
}
graph.neighbors_directed(index, petgraph::EdgeDirection::Incoming)
.flat_map(|n| base_parents(graph, n))
.collect()
}
let parents: Vec<_> = graph.neighbors_directed(index, petgraph::EdgeDirection::Incoming)
.collect();
match *self {
Type::Ingress |
Type::Reader(..) |
Type::Egress { .. } => {
assert_eq!(parents.len(), 1);
graph[parents[0]].base_columns(column, graph, parents[0])
}
Type::Internal(ref i) => {
if i.is_base() {
vec![(index, Some(column))]
} else {
i.parent_columns(column)
.into_iter()
.flat_map(|(n, c)| {
let n = if n.is_global() {
*n.as_global()
} else {
// Find the parent with node address matching the result from
// parent_columns.
*parents.iter()
.find(|p| match graph[**p].addr {
Some(a) if a == n => true,
_ => false,
})
.unwrap()
};
match c {
Some(c) => graph[n].base_columns(c, graph, n),
None => base_parents(graph, n),
}
})
.collect()
}
}
Type::Source => unreachable!(),
}
}
}
impl fmt::Debug for Type {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Type::Source => write!(f, "source node"),
Type::Ingress => write!(f, "ingress node"),
Type::Egress { .. } => write!(f, "egress node"),
Type::Reader(..) => write!(f, "reader node"),
Type::Internal(ref i) => write!(f, "internal {} node", i.description()),
}
}
}
impl Deref for Type {
type Target = Ingredient;
fn deref(&self) -> &Self::Target {
match *self {
Type::Internal(ref i) => i.deref(),
_ => unreachable!(),
}
}
}
impl DerefMut for Type {
fn deref_mut(&mut self) -> &mut Self::Target {
match *self {
Type::Internal(ref mut i) => i.deref_mut(),
_ => unreachable!(),
}
}
}
impl<I> From<I> for Type
where I: Ingredient + 'static
{
fn from(i: I) -> Type {
Type::Internal(Box::new(i))
}
}
pub struct Node {
name: String,
domain: Option<domain::Index>,
addr: Option<NodeAddress>,
fields: Vec<String>,
inner: NodeHandle,
}
impl Node {
pub fn new<S1, FS, S2>(name: S1, fields: FS, inner: Type) -> Node
where S1: ToString,
S2: ToString,
FS: IntoIterator<Item = S2>
{
Node {
name: name.to_string(),
domain: None,
addr: None,
fields: fields.into_iter().map(|s| s.to_string()).collect(),
inner: NodeHandle::Owned(inner),
}
}
pub fn mirror(&self, n: Type) -> Node {
let mut n = Self::new(&*self.name, &self.fields, n);
n.domain = self.domain;
n
}
pub fn name(&self) -> &str {
&*self.name
}
pub fn fields(&self) -> &[String] {
&self.fields[..]
}
pub fn domain(&self) -> domain::Index {
match self.domain {
Some(domain) => domain,
None => {
unreachable!("asked for unset domain for {:?}", &*self.inner);
}
}
}
pub fn addr(&self) -> NodeAddress {
match self.addr {
Some(addr) => addr,
None => {
unreachable!("asked for unset addr for {:?}", &*self.inner);
}
}
}
pub fn | (&mut self) -> Node {
let inner = match *self.inner {
Type::Egress { ref tags, ref txs } => {
// egress nodes can still be modified externally if subgraphs are added
// so we just make a new one with a clone of the Mutex-protected Vec
Type::Egress {
txs: txs.clone(),
tags: tags.clone(),
}
}
Type::Reader(ref mut w, ref r) => {
// reader nodes can still be modified externally if txs are added
Type::Reader(w.take(), r.clone())
}
Type::Ingress => Type::Ingress,
Type::Internal(ref mut i) if self.domain.is_some() => Type::Internal(i.take()),
Type::Internal(_) |
Type::Source => unreachable!(),
};
self.inner.mark_taken();
let mut n = self.mirror(inner);
n.addr = self.addr;
n
}
pub fn add_to(&mut self, domain: domain::Index) {
self.domain = Some(domain);
}
pub fn set_addr(&mut self, addr: NodeAddress) {
self.addr = Some(addr);
}
pub fn on_commit(&mut self, remap: &HashMap<NodeAddress, NodeAddress>) {
assert!(self.addr.is_some());
self.inner.on_commit(self.addr.unwrap(), remap)
}
pub fn describe(&self, f: &mut fmt::Write, idx: NodeIndex) -> fmt::Result {
use regex::Regex;
let escape = |s: &str| Regex::new("([\"|{}])").unwrap().replace_all(s, "\\$1");
write!(f,
" [style=filled, fillcolor={}, label=\"",
self.domain
.map(|d| -> usize { d.into() })
.map(|d| format!("\"/set312/{}\"", (d % 12) + 1))
.unwrap_or("white".into()))?;
match *self.inner {
Type::Source => write!(f, "(source)"),
Type::Ingress => write!(f, "{{ {} | (ingress) }}", idx.index()),
Type::Egress { .. } => write!(f, "{{ {} | (egress) }}", idx.index()),
Type::Reader(_, ref r) => {
let key = match r.key() {
Err(_) => String::from("none"),
Ok(k) => format!("{}", k),
};
let size = match r.len() {
Err(_) => String::from("empty"),
Ok(s) => format!("{} distinct keys", s),
};
write!(f,
"{{ {} | (reader / key: {}) | {} }}",
idx.index(),
key,
size)
}
Type::Internal(ref i) => {
write!(f, "{{")?;
// Output node name and description. First row.
write!(f,
"{{ {} / {} | {} }}",
idx.index(),
escape(self.name()),
escape(&i.description()))?;
// Output node outputs. Second row.
write!(f, " | {}", self.fields().join(", \\n"))?;
// Maybe output node's HAVING conditions. Optional third row.
// TODO
// if let Some(conds) = n.node().unwrap().having_conditions() {
// let conds = conds.iter()
// .map(|c| format!("{}", c))
// .collect::<Vec<_>>()
// .join(" ∧ ");
// write!(f, " | σ({})", escape(&conds))?;
// }
write!(f, " }}")
}
}?;
writeln!(f, "\"]")
}
pub fn is_egress(&self) -> bool {
if let Type::Egress { .. } = *self.inner {
true
} else {
false
}
}
pub fn is_ingress(&self) -> bool {
if let Type::Ingress = *self.inner {
true
} else {
false
}
}
pub fn is_internal(&self) -> bool {
if let Type::Internal(..) = *self.inner {
true
} else {
false
}
}
/// A node is considered to be an output node if changes to its state are visible outside of
/// its domain.
pub fn is_output(&self) -> bool {
match *self.inner {
Type::Egress { .. } |
Type::Reader(..) => true,
_ => false,
}
}
}
impl Deref for Node {
type Target = Type;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl DerefMut for Node {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}
| take | identifier_name |
node.rs | use petgraph;
use petgraph::graph::NodeIndex;
use std::sync::mpsc;
use std::sync;
use std::fmt;
use std::collections::HashMap;
use std::ops::{Deref, DerefMut};
use checktable;
use flow::data::DataType;
use ops::{Record, Datas};
use flow::domain;
use flow::{Ingredient, NodeAddress, Edge};
use flow::payload::Packet;
use flow::migrate::materialization::Tag;
use backlog;
/// A StreamUpdate reflects the addition or deletion of a row from a reader node.
#[derive(Clone, Debug, PartialEq)]
pub enum StreamUpdate {
/// Indicates the addition of a new row
AddRow(sync::Arc<Vec<DataType>>),
/// Indicates the removal of an existing row
DeleteRow(sync::Arc<Vec<DataType>>),
}
impl From<Record> for StreamUpdate {
fn from(other: Record) -> Self {
match other {
Record::Positive(u) => StreamUpdate::AddRow(u),
Record::Negative(u) => StreamUpdate::DeleteRow(u),
Record::DeleteRequest(..) => unreachable!(),
}
}
}
impl From<Vec<DataType>> for StreamUpdate {
fn from(other: Vec<DataType>) -> Self {
StreamUpdate::AddRow(sync::Arc::new(other))
}
}
#[derive(Clone)]
pub struct Reader {
pub streamers: sync::Arc<sync::Mutex<Vec<mpsc::Sender<Vec<StreamUpdate>>>>>,
pub state: Option<backlog::ReadHandle>,
pub token_generator: Option<checktable::TokenGenerator>,
}
impl Reader {
pub fn get_reader
(&self)
-> Option<Box<Fn(&DataType) -> Result<Vec<Vec<DataType>>, ()> + Send + Sync>> {
self.state.clone().map(|arc| {
Box::new(move |q: &DataType| -> Result<Datas, ()> {
arc.find_and(q,
|rs| rs.into_iter().map(|v| (&**v).clone()).collect::<Vec<_>>())
.map(|r| r.0)
}) as Box<_>
})
}
pub fn key(&self) -> Result<usize, String> {
match self.state {
None => Err(String::from("no state on reader")),
Some(ref s) => Ok(s.key()),
}
}
pub fn len(&self) -> Result<usize, String> {
match self.state {
None => Err(String::from("no state on reader")),
Some(ref s) => Ok(s.len()),
}
}
}
impl Default for Reader {
fn default() -> Self {
Reader {
streamers: sync::Arc::default(),
state: None,
token_generator: None,
}
}
}
enum NodeHandle {
Owned(Type),
Taken(Type),
}
impl NodeHandle {
pub fn mark_taken(&mut self) {
use std::mem;
match mem::replace(self, NodeHandle::Owned(Type::Source)) {
NodeHandle::Owned(t) => {
mem::replace(self, NodeHandle::Taken(t));
}
NodeHandle::Taken(_) => {
unreachable!("tried to take already taken value");
}
}
}
}
impl Deref for NodeHandle {
type Target = Type;
fn deref(&self) -> &Self::Target {
match *self {
NodeHandle::Owned(ref t) |
NodeHandle::Taken(ref t) => t,
}
}
}
impl DerefMut for NodeHandle {
fn deref_mut(&mut self) -> &mut Self::Target {
match *self {
NodeHandle::Owned(ref mut t) => t,
NodeHandle::Taken(_) => unreachable!("cannot mutate taken node"),
}
}
}
pub enum Type {
Ingress,
Internal(Box<Ingredient>),
Egress {
txs: sync::Arc<sync::Mutex<Vec<(NodeAddress, NodeAddress, mpsc::SyncSender<Packet>)>>>,
tags: sync::Arc<sync::Mutex<HashMap<Tag, NodeAddress>>>,
},
Reader(Option<backlog::WriteHandle>, Reader),
Source,
}
impl Type {
// Returns a map from base node to the column in that base node whose value must match the value
// of this node's column to cause a conflict. Returns None for a given base node if any write to
// that base node might cause a conflict.
pub fn base_columns(&self,
column: usize,
graph: &petgraph::Graph<Node, Edge>,
index: NodeIndex)
-> Vec<(NodeIndex, Option<usize>)> {
fn base_parents(graph: &petgraph::Graph<Node, Edge>,
index: NodeIndex)
-> Vec<(NodeIndex, Option<usize>)> {
if let Type::Internal(ref i) = *graph[index] {
if i.is_base() {
return vec![(index, None)];
}
}
graph.neighbors_directed(index, petgraph::EdgeDirection::Incoming)
.flat_map(|n| base_parents(graph, n))
.collect()
}
let parents: Vec<_> = graph.neighbors_directed(index, petgraph::EdgeDirection::Incoming)
.collect();
match *self {
Type::Ingress |
Type::Reader(..) |
Type::Egress { .. } => {
assert_eq!(parents.len(), 1);
graph[parents[0]].base_columns(column, graph, parents[0])
}
Type::Internal(ref i) => {
if i.is_base() {
vec![(index, Some(column))]
} else {
i.parent_columns(column)
.into_iter()
.flat_map(|(n, c)| {
let n = if n.is_global() {
*n.as_global()
} else {
// Find the parent with node address matching the result from
// parent_columns.
*parents.iter()
.find(|p| match graph[**p].addr {
Some(a) if a == n => true,
_ => false,
})
.unwrap()
};
match c {
Some(c) => graph[n].base_columns(c, graph, n),
None => base_parents(graph, n),
}
})
.collect()
}
}
Type::Source => unreachable!(),
}
}
}
impl fmt::Debug for Type {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Type::Source => write!(f, "source node"),
Type::Ingress => write!(f, "ingress node"),
Type::Egress { .. } => write!(f, "egress node"),
Type::Reader(..) => write!(f, "reader node"),
Type::Internal(ref i) => write!(f, "internal {} node", i.description()),
}
}
}
impl Deref for Type {
type Target = Ingredient;
fn deref(&self) -> &Self::Target {
match *self {
Type::Internal(ref i) => i.deref(),
_ => unreachable!(),
}
}
}
impl DerefMut for Type {
fn deref_mut(&mut self) -> &mut Self::Target {
match *self {
Type::Internal(ref mut i) => i.deref_mut(),
_ => unreachable!(),
}
}
}
impl<I> From<I> for Type
where I: Ingredient + 'static
{
fn from(i: I) -> Type {
Type::Internal(Box::new(i))
}
}
pub struct Node {
name: String,
domain: Option<domain::Index>,
addr: Option<NodeAddress>,
fields: Vec<String>,
inner: NodeHandle,
}
impl Node {
pub fn new<S1, FS, S2>(name: S1, fields: FS, inner: Type) -> Node
where S1: ToString,
S2: ToString,
FS: IntoIterator<Item = S2>
{
Node {
name: name.to_string(),
domain: None,
addr: None,
fields: fields.into_iter().map(|s| s.to_string()).collect(),
inner: NodeHandle::Owned(inner),
}
}
pub fn mirror(&self, n: Type) -> Node {
let mut n = Self::new(&*self.name, &self.fields, n);
n.domain = self.domain;
n
}
pub fn name(&self) -> &str {
&*self.name
}
pub fn fields(&self) -> &[String] {
&self.fields[..]
}
pub fn domain(&self) -> domain::Index {
match self.domain {
Some(domain) => domain,
None => {
unreachable!("asked for unset domain for {:?}", &*self.inner);
}
}
}
pub fn addr(&self) -> NodeAddress {
match self.addr {
Some(addr) => addr,
None => {
unreachable!("asked for unset addr for {:?}", &*self.inner);
}
}
}
pub fn take(&mut self) -> Node {
let inner = match *self.inner {
Type::Egress { ref tags, ref txs } => {
// egress nodes can still be modified externally if subgraphs are added
// so we just make a new one with a clone of the Mutex-protected Vec
Type::Egress {
txs: txs.clone(),
tags: tags.clone(),
}
}
Type::Reader(ref mut w, ref r) => {
// reader nodes can still be modified externally if txs are added
Type::Reader(w.take(), r.clone())
}
Type::Ingress => Type::Ingress,
Type::Internal(ref mut i) if self.domain.is_some() => Type::Internal(i.take()),
Type::Internal(_) |
Type::Source => unreachable!(),
};
self.inner.mark_taken();
let mut n = self.mirror(inner);
n.addr = self.addr;
n
}
pub fn add_to(&mut self, domain: domain::Index) {
self.domain = Some(domain);
}
pub fn set_addr(&mut self, addr: NodeAddress) {
self.addr = Some(addr);
}
pub fn on_commit(&mut self, remap: &HashMap<NodeAddress, NodeAddress>) {
assert!(self.addr.is_some());
self.inner.on_commit(self.addr.unwrap(), remap)
}
pub fn describe(&self, f: &mut fmt::Write, idx: NodeIndex) -> fmt::Result {
use regex::Regex;
let escape = |s: &str| Regex::new("([\"|{}])").unwrap().replace_all(s, "\\$1");
write!(f,
" [style=filled, fillcolor={}, label=\"",
self.domain
.map(|d| -> usize { d.into() })
.map(|d| format!("\"/set312/{}\"", (d % 12) + 1))
.unwrap_or("white".into()))?;
match *self.inner {
Type::Source => write!(f, "(source)"),
Type::Ingress => write!(f, "{{ {} | (ingress) }}", idx.index()),
Type::Egress { .. } => write!(f, "{{ {} | (egress) }}", idx.index()),
Type::Reader(_, ref r) => {
let key = match r.key() {
Err(_) => String::from("none"),
Ok(k) => format!("{}", k),
};
let size = match r.len() {
Err(_) => String::from("empty"),
Ok(s) => format!("{} distinct keys", s),
};
write!(f,
"{{ {} | (reader / key: {}) | {} }}",
idx.index(),
key,
size)
}
Type::Internal(ref i) => {
write!(f, "{{")?;
// Output node name and description. First row.
write!(f,
"{{ {} / {} | {} }}",
idx.index(),
escape(self.name()),
escape(&i.description()))?;
// Output node outputs. Second row.
write!(f, " | {}", self.fields().join(", \\n"))?;
// Maybe output node's HAVING conditions. Optional third row.
// TODO
// if let Some(conds) = n.node().unwrap().having_conditions() {
// let conds = conds.iter()
// .map(|c| format!("{}", c))
// .collect::<Vec<_>>()
// .join(" ∧ ");
// write!(f, " | σ({})", escape(&conds))?;
// }
write!(f, " }}")
}
}?;
writeln!(f, "\"]")
}
pub fn is_egress(&self) -> bool {
if let Type::Egress { .. } = *self.inner {
true
} else {
false
}
}
pub fn is_ingress(&self) -> bool {
if let Type::Ingress = *self.inner {
| se {
false
}
}
pub fn is_internal(&self) -> bool {
if let Type::Internal(..) = *self.inner {
true
} else {
false
}
}
/// A node is considered to be an output node if changes to its state are visible outside of
/// its domain.
pub fn is_output(&self) -> bool {
match *self.inner {
Type::Egress { .. } |
Type::Reader(..) => true,
_ => false,
}
}
}
impl Deref for Node {
type Target = Type;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl DerefMut for Node {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}
| true
} el | conditional_block |
node.rs | use petgraph;
use petgraph::graph::NodeIndex;
use std::sync::mpsc;
use std::sync;
use std::fmt;
use std::collections::HashMap;
use std::ops::{Deref, DerefMut};
use checktable;
use flow::data::DataType;
use ops::{Record, Datas};
use flow::domain;
use flow::{Ingredient, NodeAddress, Edge};
use flow::payload::Packet;
use flow::migrate::materialization::Tag;
use backlog;
/// A StreamUpdate reflects the addition or deletion of a row from a reader node.
#[derive(Clone, Debug, PartialEq)]
pub enum StreamUpdate {
/// Indicates the addition of a new row
AddRow(sync::Arc<Vec<DataType>>),
/// Indicates the removal of an existing row
DeleteRow(sync::Arc<Vec<DataType>>),
}
impl From<Record> for StreamUpdate {
fn from(other: Record) -> Self {
match other {
Record::Positive(u) => StreamUpdate::AddRow(u),
Record::Negative(u) => StreamUpdate::DeleteRow(u),
Record::DeleteRequest(..) => unreachable!(),
}
}
}
impl From<Vec<DataType>> for StreamUpdate {
fn from(other: Vec<DataType>) -> Self {
StreamUpdate::AddRow(sync::Arc::new(other))
}
}
#[derive(Clone)]
pub struct Reader {
pub streamers: sync::Arc<sync::Mutex<Vec<mpsc::Sender<Vec<StreamUpdate>>>>>,
pub state: Option<backlog::ReadHandle>,
pub token_generator: Option<checktable::TokenGenerator>,
}
impl Reader {
pub fn get_reader
(&self)
-> Option<Box<Fn(&DataType) -> Result<Vec<Vec<DataType>>, ()> + Send + Sync>> {
self.state.clone().map(|arc| {
Box::new(move |q: &DataType| -> Result<Datas, ()> {
arc.find_and(q,
|rs| rs.into_iter().map(|v| (&**v).clone()).collect::<Vec<_>>())
.map(|r| r.0)
}) as Box<_>
})
}
pub fn key(&self) -> Result<usize, String> {
match self.state {
None => Err(String::from("no state on reader")),
Some(ref s) => Ok(s.key()),
}
}
pub fn len(&self) -> Result<usize, String> {
match self.state {
None => Err(String::from("no state on reader")),
Some(ref s) => Ok(s.len()),
}
}
}
impl Default for Reader {
fn default() -> Self {
Reader {
streamers: sync::Arc::default(),
state: None,
token_generator: None,
}
}
}
enum NodeHandle {
Owned(Type),
Taken(Type),
}
impl NodeHandle {
pub fn mark_taken(&mut self) {
use std::mem;
match mem::replace(self, NodeHandle::Owned(Type::Source)) {
NodeHandle::Owned(t) => {
mem::replace(self, NodeHandle::Taken(t));
}
NodeHandle::Taken(_) => {
unreachable!("tried to take already taken value");
}
}
}
}
impl Deref for NodeHandle {
type Target = Type;
fn deref(&self) -> &Self::Target {
match *self {
NodeHandle::Owned(ref t) |
NodeHandle::Taken(ref t) => t,
}
}
}
impl DerefMut for NodeHandle {
fn deref_mut(&mut self) -> &mut Self::Target {
match *self {
NodeHandle::Owned(ref mut t) => t,
NodeHandle::Taken(_) => unreachable!("cannot mutate taken node"),
}
}
}
pub enum Type {
Ingress,
Internal(Box<Ingredient>),
Egress {
txs: sync::Arc<sync::Mutex<Vec<(NodeAddress, NodeAddress, mpsc::SyncSender<Packet>)>>>,
tags: sync::Arc<sync::Mutex<HashMap<Tag, NodeAddress>>>,
},
Reader(Option<backlog::WriteHandle>, Reader),
Source,
}
impl Type {
// Returns a map from base node to the column in that base node whose value must match the value
// of this node's column to cause a conflict. Returns None for a given base node if any write to
// that base node might cause a conflict.
pub fn base_columns(&self,
column: usize,
graph: &petgraph::Graph<Node, Edge>,
index: NodeIndex)
-> Vec<(NodeIndex, Option<usize>)> {
fn base_parents(graph: &petgraph::Graph<Node, Edge>,
index: NodeIndex)
-> Vec<(NodeIndex, Option<usize>)> {
if let Type::Internal(ref i) = *graph[index] {
if i.is_base() {
return vec![(index, None)];
}
}
graph.neighbors_directed(index, petgraph::EdgeDirection::Incoming)
.flat_map(|n| base_parents(graph, n))
.collect()
}
let parents: Vec<_> = graph.neighbors_directed(index, petgraph::EdgeDirection::Incoming)
.collect();
match *self {
Type::Ingress |
Type::Reader(..) |
Type::Egress { .. } => {
assert_eq!(parents.len(), 1);
graph[parents[0]].base_columns(column, graph, parents[0])
}
Type::Internal(ref i) => {
if i.is_base() {
vec![(index, Some(column))]
} else {
i.parent_columns(column)
.into_iter()
.flat_map(|(n, c)| {
let n = if n.is_global() {
*n.as_global()
} else {
// Find the parent with node address matching the result from
// parent_columns.
*parents.iter()
.find(|p| match graph[**p].addr {
Some(a) if a == n => true,
_ => false,
})
.unwrap()
};
match c {
Some(c) => graph[n].base_columns(c, graph, n),
None => base_parents(graph, n),
}
})
.collect()
}
}
Type::Source => unreachable!(),
}
}
}
impl fmt::Debug for Type {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Type::Source => write!(f, "source node"),
Type::Ingress => write!(f, "ingress node"),
Type::Egress { .. } => write!(f, "egress node"),
Type::Reader(..) => write!(f, "reader node"),
Type::Internal(ref i) => write!(f, "internal {} node", i.description()),
}
}
}
impl Deref for Type {
type Target = Ingredient;
fn deref(&self) -> &Self::Target {
match *self {
Type::Internal(ref i) => i.deref(),
_ => unreachable!(),
}
}
}
impl DerefMut for Type {
fn deref_mut(&mut self) -> &mut Self::Target {
match *self {
Type::Internal(ref mut i) => i.deref_mut(),
_ => unreachable!(),
}
}
}
impl<I> From<I> for Type
where I: Ingredient + 'static
{
fn from(i: I) -> Type {
Type::Internal(Box::new(i))
}
}
pub struct Node {
name: String,
domain: Option<domain::Index>,
addr: Option<NodeAddress>,
fields: Vec<String>,
inner: NodeHandle,
}
impl Node {
pub fn new<S1, FS, S2>(name: S1, fields: FS, inner: Type) -> Node
where S1: ToString,
S2: ToString,
FS: IntoIterator<Item = S2>
|
pub fn mirror(&self, n: Type) -> Node {
let mut n = Self::new(&*self.name, &self.fields, n);
n.domain = self.domain;
n
}
pub fn name(&self) -> &str {
&*self.name
}
pub fn fields(&self) -> &[String] {
&self.fields[..]
}
pub fn domain(&self) -> domain::Index {
match self.domain {
Some(domain) => domain,
None => {
unreachable!("asked for unset domain for {:?}", &*self.inner);
}
}
}
pub fn addr(&self) -> NodeAddress {
match self.addr {
Some(addr) => addr,
None => {
unreachable!("asked for unset addr for {:?}", &*self.inner);
}
}
}
pub fn take(&mut self) -> Node {
let inner = match *self.inner {
Type::Egress { ref tags, ref txs } => {
// egress nodes can still be modified externally if subgraphs are added
// so we just make a new one with a clone of the Mutex-protected Vec
Type::Egress {
txs: txs.clone(),
tags: tags.clone(),
}
}
Type::Reader(ref mut w, ref r) => {
// reader nodes can still be modified externally if txs are added
Type::Reader(w.take(), r.clone())
}
Type::Ingress => Type::Ingress,
Type::Internal(ref mut i) if self.domain.is_some() => Type::Internal(i.take()),
Type::Internal(_) |
Type::Source => unreachable!(),
};
self.inner.mark_taken();
let mut n = self.mirror(inner);
n.addr = self.addr;
n
}
pub fn add_to(&mut self, domain: domain::Index) {
self.domain = Some(domain);
}
pub fn set_addr(&mut self, addr: NodeAddress) {
self.addr = Some(addr);
}
pub fn on_commit(&mut self, remap: &HashMap<NodeAddress, NodeAddress>) {
assert!(self.addr.is_some());
self.inner.on_commit(self.addr.unwrap(), remap)
}
pub fn describe(&self, f: &mut fmt::Write, idx: NodeIndex) -> fmt::Result {
use regex::Regex;
let escape = |s: &str| Regex::new("([\"|{}])").unwrap().replace_all(s, "\\$1");
write!(f,
" [style=filled, fillcolor={}, label=\"",
self.domain
.map(|d| -> usize { d.into() })
.map(|d| format!("\"/set312/{}\"", (d % 12) + 1))
.unwrap_or("white".into()))?;
match *self.inner {
Type::Source => write!(f, "(source)"),
Type::Ingress => write!(f, "{{ {} | (ingress) }}", idx.index()),
Type::Egress { .. } => write!(f, "{{ {} | (egress) }}", idx.index()),
Type::Reader(_, ref r) => {
let key = match r.key() {
Err(_) => String::from("none"),
Ok(k) => format!("{}", k),
};
let size = match r.len() {
Err(_) => String::from("empty"),
Ok(s) => format!("{} distinct keys", s),
};
write!(f,
"{{ {} | (reader / key: {}) | {} }}",
idx.index(),
key,
size)
}
Type::Internal(ref i) => {
write!(f, "{{")?;
// Output node name and description. First row.
write!(f,
"{{ {} / {} | {} }}",
idx.index(),
escape(self.name()),
escape(&i.description()))?;
// Output node outputs. Second row.
write!(f, " | {}", self.fields().join(", \\n"))?;
// Maybe output node's HAVING conditions. Optional third row.
// TODO
// if let Some(conds) = n.node().unwrap().having_conditions() {
// let conds = conds.iter()
// .map(|c| format!("{}", c))
// .collect::<Vec<_>>()
// .join(" ∧ ");
// write!(f, " | σ({})", escape(&conds))?;
// }
write!(f, " }}")
}
}?;
writeln!(f, "\"]")
}
pub fn is_egress(&self) -> bool {
if let Type::Egress { .. } = *self.inner {
true
} else {
false
}
}
pub fn is_ingress(&self) -> bool {
if let Type::Ingress = *self.inner {
true
} else {
false
}
}
pub fn is_internal(&self) -> bool {
if let Type::Internal(..) = *self.inner {
true
} else {
false
}
}
/// A node is considered to be an output node if changes to its state are visible outside of
/// its domain.
pub fn is_output(&self) -> bool {
match *self.inner {
Type::Egress { .. } |
Type::Reader(..) => true,
_ => false,
}
}
}
impl Deref for Node {
type Target = Type;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl DerefMut for Node {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}
| {
Node {
name: name.to_string(),
domain: None,
addr: None,
fields: fields.into_iter().map(|s| s.to_string()).collect(),
inner: NodeHandle::Owned(inner),
}
} | identifier_body |
create-contact.component.ts | import {Component, OnInit} from '@angular/core';
import {ContactsService} from '@rhythmsoftware/rolodex-angular-sdk/api/contacts.service';
import {CertificationsService} from '../../services/certifications/certifications.service';
import {AbstractControl, FormArray, FormBuilder, FormGroup, Validators} from '@angular/forms';
import {Router} from '@angular/router';
import {ToastrService} from 'ngx-toastr';
import clean from 'lodash-clean';
import * as nameParser from 'parse-full-name';
import {Contact} from '@rhythmsoftware/rolodex-angular-sdk/model/contact';
import * as googlePhoneNumberLib from 'google-libphonenumber';
import {Address} from 'ngx-google-places-autocomplete/objects/address';
import {GooglePlacesService} from '../../services/google-places/google-places.service';
import {AlgoliaKeyManagementService} from '../../services/algolia/algolia-key-management.service';
import {Observable} from 'rxjs';
import 'rxjs/add/operator/map';
import 'rxjs/add/operator/debounceTime';
import 'rxjs/add/operator/distinctUntilChanged';
import 'rxjs/add/operator/switchMap';
import * as algoliasearch from 'algoliasearch';
import {SecurityContextService} from '../../services/security-context/security-context.service';
import {Organization} from '@rhythmsoftware/rolodex-angular-sdk/model/organization';
import {OrganizationsService} from '@rhythmsoftware/rolodex-angular-sdk/api/organizations.service';
// This method checks to make sure you don't have two phone numbers with the same type
function validatePhoneNumbers(c: AbstractControl): { [key: string]: boolean } | null {
// console.log('validating...' + JSON.stringify( c.value ));
const entriesThatHaveBeenProcessed = {};
for (const phoneAndTypeEntry of c.value) {
if (!phoneAndTypeEntry.phone_number || phoneAndTypeEntry.phone_number.trim() === '') {
continue;
} // don't bother if no phone
if (entriesThatHaveBeenProcessed[phoneAndTypeEntry.phone_number_type]) {
return {'duplicate_phone_number_types': true};
}
entriesThatHaveBeenProcessed[phoneAndTypeEntry.phone_number_type] = true;
}
}
function validateAddresses(c: AbstractControl): { [key: string]: boolean } | null {
// console.log('validating...' + JSON.stringify( c.value ));
const entriesThatHaveBeenProcessed = {};
for (const addressAndTypeEntry of c.value) {
if (!addressAndTypeEntry.line1 || addressAndTypeEntry.line1.trim() === '') {
continue;
} // don't worry about it
if (entriesThatHaveBeenProcessed[addressAndTypeEntry.address_type]) {
return {'duplicate_address_types': true};
}
entriesThatHaveBeenProcessed[addressAndTypeEntry.address_type] = true;
}
}
@Component({
templateUrl: './create-contact.component.html',
styleUrls: ['./create-contact.component.scss']
})
export class CreateContactComponent implements OnInit {
constructor(private _contactsService: ContactsService, private _certificationsService: CertificationsService, private _organizationService: OrganizationsService,
private fb: FormBuilder, private _googlePlacesService: GooglePlacesService, private _algoliaKeyManagementService: AlgoliaKeyManagementService,
private router: Router, private toastr: ToastrService, private securityContext: SecurityContextService) {
}
MAX_ADDRESS_TYPES = 3;
MAX_PHONE_NUMBER_TYPES = 4;
contact: Contact = {};
contactForm: FormGroup;
isSaving: boolean; // supports the Ladda button
// designations: Array<Select2OptionData> = [];
// the name and email panels
showAdditionalNameOptions = false;
showAdditionalEmails = false;
showFullAddress = [false];
ngOnInit() {
this.initFormGroup();
this.initControls();
| this._algoliaKeyManagementService.GetSecureApiKey(this.securityContext.GetCurrentTenant(), 'rolodex-organizations').subscribe(
key => {// populate the index
this.alogliaClient = algoliasearch(key.application_id, key.api_key);
this.algoliaOrganizationIndex = this.alogliaClient.initIndex(key.index_name);
// enable the search
this.contactForm.get('organization_id').enable();
});
}
initFormGroup(): void {
this.contactForm = this.fb.group({
salutation: '',
name: ['', Validators.required],
first_name: '',
middle_name: '',
last_name: '',
suffix: '',
nickname: '',
organization_id: {value: '', disabled: true}, // disabled until initialized
job_title: '',
title: '',
contact_role_ids: '',
phone_numbers: this.fb.array([this.buildPhoneNumberGroup()], validatePhoneNumbers),
addresses: this.fb.array([this.buildAddressGroup()], validateAddresses),
certifications: '',
preferred_phone_number: '',
preferred_address: '',
gender: '',
date_of_birth: '',
email_address: ['', Validators.email],
email_address2: ['', Validators.email],
email_address3: ['', Validators.email],
notes: ''
});
// hook up the name parser
this.contactForm.get('name').valueChanges.subscribe(name => {
// we don't want to run this if someone has changed one of the
// fields
if (this.contactForm.get('first_name').dirty ||
this.contactForm.get('middle_name').dirty ||
this.contactForm.get('last_name').dirty ||
this.contactForm.get('salutation').dirty ||
this.contactForm.get('nickname').dirty ||
this.contactForm.get('suffix').dirty) {
console.log('form name elements changed, disabling name parsing');
return;
}
const nameparts = nameParser.parseFullName(name);
// console.log(JSON.stringify(nameparts));
this.contactForm.patchValue({
first_name: nameparts.first,
middle_name: nameparts.middle,
last_name: nameparts.last,
salutation: nameparts.title,
nickname: nameparts.nick,
suffix: nameparts.suffix
});
if (nameparts.error && nameparts.error.length > 0) {
console.error('could not parse name: ' + JSON.stringify(nameparts.error));
}
});
}
get phone_numbers(): FormArray {
return <FormArray> this.contactForm.get('phone_numbers');
}
buildPhoneNumberGroup() {
return this.fb.group({
phone_number: '',
phone_number_type: 'mobile'
});
}
addPhoneNumber() {
if (this.phone_numbers.length >= this.MAX_PHONE_NUMBER_TYPES) {
alert('No more phone number types are available.');
return;
}
this.phone_numbers.push(this.buildPhoneNumberGroup());
}
formatPhoneNumber(index: number) {
const phone = this.phone_numbers.controls[index].get('phone_number').value;
if (!phone || phone.trim() === '')
return;
// console.log(index);
// console.log(phone);
const PNF = googlePhoneNumberLib.PhoneNumberFormat;
const phoneUtil = googlePhoneNumberLib.PhoneNumberUtil.getInstance();
const number = phoneUtil.parseAndKeepRawInput(phone, 'US');
const typeOfFormat = phoneUtil.getRegionCodeForNumber(number) === 'US' ? PNF.NATIONAL : PNF.INTERNATIONAL;
const formattedPhone = phoneUtil.format(number, typeOfFormat);
this.phone_numbers.controls[index].patchValue({phone_number: formattedPhone});
}
initControls(): void {
//
// this._certificationsService.GetAllCertifications().subscribe((certifications) => this.designations = certifications.map(cert => ({
// id: cert.post_nominal,
// text: cert.post_nominal
// })));
}
shouldShowFormErrorFor(field: string) {
return this.contactForm.get(field).errors && this.contactForm.get(field).touched;
}
shouldShowFormErrorForControl(field: AbstractControl) {
return field.errors; // && field.touched;
}
extractContactFromForm(): Contact {
const contactToSave = this.contactForm.value;
// now, let's get the phone numbers
this.extractPhoneNumbersFromForm(contactToSave);
// get rid of the phone numbers property
delete contactToSave.phone_numbers;
// now, let's get the phone numbers
this.extractAddressesFromForm(contactToSave);
// get rid of the phone numbers property
delete contactToSave.addresses;
console.log(JSON.stringify(contactToSave));
return clean(contactToSave);
}
private extractPhoneNumbersFromForm(contactToSave) {
// let's iterate and get the phone numbers
const phoneNumbers = this.phone_numbers.controls;
if (phoneNumbers && phoneNumbers.length > 0) {
const processedPhoneNumbers = {};
for (const phoneNumber of phoneNumbers) {
// is this a duplicate?
const type = phoneNumber.value.phone_number_type;
if (processedPhoneNumbers[type]) {
// validation error
alert('Duplicate phone number types specified!');
return;
}
processedPhoneNumbers[type] = true;
switch (type) {
case 'home':
contactToSave.home_phone_number = phoneNumber.value.phone_number;
break;
case 'work':
contactToSave.work_phone_number = phoneNumber.value.phone_number;
break;
case 'mobile':
contactToSave.mobile_phone_number = phoneNumber.value.phone_number;
break;
case 'alternate':
contactToSave.alt_phone_number = phoneNumber.value.phone_number;
break;
default:
throw new Error('unknown phone number type ' + phoneNumber.value.phone_number_type);
}
}
}
}
// *********** Address Management ***************************
//
get addresses(): FormArray {
return <FormArray> this.contactForm.get('addresses');
}
buildAddressGroup() {
return this.fb.group({
address_lookup: '',
address_type: 'home',
line1: '',
line2: '',
city: '',
state: '',
postal_code: '',
country: ''
});
}
addAddress() {
if (this.addresses.length >= this.MAX_ADDRESS_TYPES) {
alert('No more address types are available.');
return;
}
this.showFullAddress.push(false);
this.hasAddressBeenPlaced.push(false);
this.addresses.push(this.buildAddressGroup());
}
hasAddressBeenPlaced: boolean[] = [false];
handleAddressChange(address: Address, index: number) {
console.log(JSON.stringify(address));
const addr = this._googlePlacesService.parseGooglePlacesAddress(address);
console.log('Placed Addr: ' + JSON.stringify(addr));
this.addresses.controls[index].patchValue({
line1: addr.line1,
line2: addr.line2,
city: addr.city,
state: addr.state,
postal_code: addr.postal_code,
country: addr.country,
address_lookup: 'Please edit the address below'
});
this.showFullAddress[index] = true;
this.hasAddressBeenPlaced[index] = true;
this.addresses.controls[index].get('address_lookup').disable();
}
private extractAddressesFromForm(contactToSave) {
// let's iterate and get the phone numbers
const addresses = this.addresses.controls;
console.log(JSON.stringify(addresses[0].value));
if (addresses && addresses.length > 0) {
const processedAddressTypes = {};
for (const address of addresses) {
// is this a duplicate?
const type = address.value.address_type;
if (processedAddressTypes[type]) {
// validation error
alert('Duplicate address types specified!');
return;
}
const addr = address.value;
processedAddressTypes[type] = true;
switch (type) {
case 'home':
contactToSave.home_address = addr;
break;
case 'work':
contactToSave.work_address = addr;
break;
case 'alternate':
contactToSave.alt_address = addr;
break;
default:
throw new Error('unknown phone number type ' + address.value.address_type);
}
delete addr.address_lookup; // this isn't part of the REST API
delete addr.address_type; // this isn't part of the REST API
}
}
}
alogliaClient: algoliasearch.Client;
algoliaOrganizationIndex: algoliasearch.Index;
isNewOrganization = false;
searchOrganizations = (text$: Observable<string>) =>
text$
.debounceTime(200)
.switchMap(term => this.algoliaOrganizationIndex.search({query: term}).then((response) => response.hits));
formatSelectedOrganization = (x: { name: string }) => x.name;
onOrganizationFieldChanged() {
// we'll use set timeout to give time for the NgbTypeahead method to be called
setTimeout(() => {
const selectedOrganization = this.contactForm.get('organization_id').value;
this.isNewOrganization = typeof selectedOrganization === 'string' && selectedOrganization.trim() !== '';
}, 300);
}
// saves the contact record via the REST API
saveChanges(): void {
this.toastr.clear();
this.isSaving = true;
const contactToSave = this.extractContactFromForm();
// now... do we need to save an organization?
let organizationObservable: Observable<Organization> = Observable.of(null);
if (typeof contactToSave.organization_id === 'object') {
// no, there's an ID - lets just save that
contactToSave.organization_id = contactToSave.organization_id.id;
} else {
// we have to create an organization
if (contactToSave.organization_id.trim() !== '') {
const newOrganization: Organization = {
name: contactToSave.organization_id,
main_address: contactToSave.work_address,
main_phone_number: contactToSave.work_phone_number
};
console.log('saving organization: ' + JSON.stringify(newOrganization));
organizationObservable = this._organizationService.createOrganization(this.securityContext.GetCurrentTenant(), newOrganization);
}
}
// now, let's save the contact via the REST API
// we'll output an error if unsuccessful via toaster
// we run the org observable, then map it and return the contact observable, and subscribe to THAT
organizationObservable
.mergeMap(organization => {
// if an organization was saved, let's update the org id
if (organization)
contactToSave.organization_id = organization.id;
return this._contactsService.createContact(this.securityContext.GetCurrentTenant(), contactToSave);
})
.subscribe((contact) => this.router.navigate(['/contacts', contact.id]),
(err) => {
const errorMessage = (err.error && err.error.errorMessage) ? err.error.errorMessage : err.message;
this.toastr.error(errorMessage, 'We weren\'t able to save the contact...', {
timeOut: 0,
closeButton: true,
positionClass: 'toast-top-full-width'
});
this.isSaving = false;
});
}
}
} | this.initAlgolia();
}
initAlgolia(): void {
| random_line_split |
create-contact.component.ts | import {Component, OnInit} from '@angular/core';
import {ContactsService} from '@rhythmsoftware/rolodex-angular-sdk/api/contacts.service';
import {CertificationsService} from '../../services/certifications/certifications.service';
import {AbstractControl, FormArray, FormBuilder, FormGroup, Validators} from '@angular/forms';
import {Router} from '@angular/router';
import {ToastrService} from 'ngx-toastr';
import clean from 'lodash-clean';
import * as nameParser from 'parse-full-name';
import {Contact} from '@rhythmsoftware/rolodex-angular-sdk/model/contact';
import * as googlePhoneNumberLib from 'google-libphonenumber';
import {Address} from 'ngx-google-places-autocomplete/objects/address';
import {GooglePlacesService} from '../../services/google-places/google-places.service';
import {AlgoliaKeyManagementService} from '../../services/algolia/algolia-key-management.service';
import {Observable} from 'rxjs';
import 'rxjs/add/operator/map';
import 'rxjs/add/operator/debounceTime';
import 'rxjs/add/operator/distinctUntilChanged';
import 'rxjs/add/operator/switchMap';
import * as algoliasearch from 'algoliasearch';
import {SecurityContextService} from '../../services/security-context/security-context.service';
import {Organization} from '@rhythmsoftware/rolodex-angular-sdk/model/organization';
import {OrganizationsService} from '@rhythmsoftware/rolodex-angular-sdk/api/organizations.service';
// This method checks to make sure you don't have two phone numbers with the same type
function validatePhoneNumbers(c: AbstractControl): { [key: string]: boolean } | null {
// console.log('validating...' + JSON.stringify( c.value ));
const entriesThatHaveBeenProcessed = {};
for (const phoneAndTypeEntry of c.value) {
if (!phoneAndTypeEntry.phone_number || phoneAndTypeEntry.phone_number.trim() === '') {
continue;
} // don't bother if no phone
if (entriesThatHaveBeenProcessed[phoneAndTypeEntry.phone_number_type]) {
return {'duplicate_phone_number_types': true};
}
entriesThatHaveBeenProcessed[phoneAndTypeEntry.phone_number_type] = true;
}
}
function validateAddresses(c: AbstractControl): { [key: string]: boolean } | null {
// console.log('validating...' + JSON.stringify( c.value ));
const entriesThatHaveBeenProcessed = {};
for (const addressAndTypeEntry of c.value) {
if (!addressAndTypeEntry.line1 || addressAndTypeEntry.line1.trim() === '') {
continue;
} // don't worry about it
if (entriesThatHaveBeenProcessed[addressAndTypeEntry.address_type]) {
return {'duplicate_address_types': true};
}
entriesThatHaveBeenProcessed[addressAndTypeEntry.address_type] = true;
}
}
@Component({
templateUrl: './create-contact.component.html',
styleUrls: ['./create-contact.component.scss']
})
export class CreateContactComponent implements OnInit {
constructor(private _contactsService: ContactsService, private _certificationsService: CertificationsService, private _organizationService: OrganizationsService,
private fb: FormBuilder, private _googlePlacesService: GooglePlacesService, private _algoliaKeyManagementService: AlgoliaKeyManagementService,
private router: Router, private toastr: ToastrService, private securityContext: SecurityContextService) {
}
MAX_ADDRESS_TYPES = 3;
MAX_PHONE_NUMBER_TYPES = 4;
contact: Contact = {};
contactForm: FormGroup;
isSaving: boolean; // supports the Ladda button
// designations: Array<Select2OptionData> = [];
// the name and email panels
showAdditionalNameOptions = false;
showAdditionalEmails = false;
showFullAddress = [false];
ngOnInit() {
this.initFormGroup();
this.initControls();
this.initAlgolia();
}
initAlgolia(): void {
this._algoliaKeyManagementService.GetSecureApiKey(this.securityContext.GetCurrentTenant(), 'rolodex-organizations').subscribe(
key => {// populate the index
this.alogliaClient = algoliasearch(key.application_id, key.api_key);
this.algoliaOrganizationIndex = this.alogliaClient.initIndex(key.index_name);
// enable the search
this.contactForm.get('organization_id').enable();
});
}
initFormGroup(): void {
this.contactForm = this.fb.group({
salutation: '',
name: ['', Validators.required],
first_name: '',
middle_name: '',
last_name: '',
suffix: '',
nickname: '',
organization_id: {value: '', disabled: true}, // disabled until initialized
job_title: '',
title: '',
contact_role_ids: '',
phone_numbers: this.fb.array([this.buildPhoneNumberGroup()], validatePhoneNumbers),
addresses: this.fb.array([this.buildAddressGroup()], validateAddresses),
certifications: '',
preferred_phone_number: '',
preferred_address: '',
gender: '',
date_of_birth: '',
email_address: ['', Validators.email],
email_address2: ['', Validators.email],
email_address3: ['', Validators.email],
notes: ''
});
// hook up the name parser
this.contactForm.get('name').valueChanges.subscribe(name => {
// we don't want to run this if someone has changed one of the
// fields
if (this.contactForm.get('first_name').dirty ||
this.contactForm.get('middle_name').dirty ||
this.contactForm.get('last_name').dirty ||
this.contactForm.get('salutation').dirty ||
this.contactForm.get('nickname').dirty ||
this.contactForm.get('suffix').dirty) {
console.log('form name elements changed, disabling name parsing');
return;
}
const nameparts = nameParser.parseFullName(name);
// console.log(JSON.stringify(nameparts));
this.contactForm.patchValue({
first_name: nameparts.first,
middle_name: nameparts.middle,
last_name: nameparts.last,
salutation: nameparts.title,
nickname: nameparts.nick,
suffix: nameparts.suffix
});
if (nameparts.error && nameparts.error.length > 0) {
console.error('could not parse name: ' + JSON.stringify(nameparts.error));
}
});
}
get phone_numbers(): FormArray {
return <FormArray> this.contactForm.get('phone_numbers');
}
buildPhoneNumberGroup() {
return this.fb.group({
phone_number: '',
phone_number_type: 'mobile'
});
}
| () {
if (this.phone_numbers.length >= this.MAX_PHONE_NUMBER_TYPES) {
alert('No more phone number types are available.');
return;
}
this.phone_numbers.push(this.buildPhoneNumberGroup());
}
formatPhoneNumber(index: number) {
const phone = this.phone_numbers.controls[index].get('phone_number').value;
if (!phone || phone.trim() === '')
return;
// console.log(index);
// console.log(phone);
const PNF = googlePhoneNumberLib.PhoneNumberFormat;
const phoneUtil = googlePhoneNumberLib.PhoneNumberUtil.getInstance();
const number = phoneUtil.parseAndKeepRawInput(phone, 'US');
const typeOfFormat = phoneUtil.getRegionCodeForNumber(number) === 'US' ? PNF.NATIONAL : PNF.INTERNATIONAL;
const formattedPhone = phoneUtil.format(number, typeOfFormat);
this.phone_numbers.controls[index].patchValue({phone_number: formattedPhone});
}
initControls(): void {
//
// this._certificationsService.GetAllCertifications().subscribe((certifications) => this.designations = certifications.map(cert => ({
// id: cert.post_nominal,
// text: cert.post_nominal
// })));
}
shouldShowFormErrorFor(field: string) {
return this.contactForm.get(field).errors && this.contactForm.get(field).touched;
}
shouldShowFormErrorForControl(field: AbstractControl) {
return field.errors; // && field.touched;
}
extractContactFromForm(): Contact {
const contactToSave = this.contactForm.value;
// now, let's get the phone numbers
this.extractPhoneNumbersFromForm(contactToSave);
// get rid of the phone numbers property
delete contactToSave.phone_numbers;
// now, let's get the phone numbers
this.extractAddressesFromForm(contactToSave);
// get rid of the phone numbers property
delete contactToSave.addresses;
console.log(JSON.stringify(contactToSave));
return clean(contactToSave);
}
private extractPhoneNumbersFromForm(contactToSave) {
// let's iterate and get the phone numbers
const phoneNumbers = this.phone_numbers.controls;
if (phoneNumbers && phoneNumbers.length > 0) {
const processedPhoneNumbers = {};
for (const phoneNumber of phoneNumbers) {
// is this a duplicate?
const type = phoneNumber.value.phone_number_type;
if (processedPhoneNumbers[type]) {
// validation error
alert('Duplicate phone number types specified!');
return;
}
processedPhoneNumbers[type] = true;
switch (type) {
case 'home':
contactToSave.home_phone_number = phoneNumber.value.phone_number;
break;
case 'work':
contactToSave.work_phone_number = phoneNumber.value.phone_number;
break;
case 'mobile':
contactToSave.mobile_phone_number = phoneNumber.value.phone_number;
break;
case 'alternate':
contactToSave.alt_phone_number = phoneNumber.value.phone_number;
break;
default:
throw new Error('unknown phone number type ' + phoneNumber.value.phone_number_type);
}
}
}
}
// *********** Address Management ***************************
//
get addresses(): FormArray {
return <FormArray> this.contactForm.get('addresses');
}
buildAddressGroup() {
return this.fb.group({
address_lookup: '',
address_type: 'home',
line1: '',
line2: '',
city: '',
state: '',
postal_code: '',
country: ''
});
}
addAddress() {
if (this.addresses.length >= this.MAX_ADDRESS_TYPES) {
alert('No more address types are available.');
return;
}
this.showFullAddress.push(false);
this.hasAddressBeenPlaced.push(false);
this.addresses.push(this.buildAddressGroup());
}
hasAddressBeenPlaced: boolean[] = [false];
handleAddressChange(address: Address, index: number) {
console.log(JSON.stringify(address));
const addr = this._googlePlacesService.parseGooglePlacesAddress(address);
console.log('Placed Addr: ' + JSON.stringify(addr));
this.addresses.controls[index].patchValue({
line1: addr.line1,
line2: addr.line2,
city: addr.city,
state: addr.state,
postal_code: addr.postal_code,
country: addr.country,
address_lookup: 'Please edit the address below'
});
this.showFullAddress[index] = true;
this.hasAddressBeenPlaced[index] = true;
this.addresses.controls[index].get('address_lookup').disable();
}
private extractAddressesFromForm(contactToSave) {
// let's iterate and get the phone numbers
const addresses = this.addresses.controls;
console.log(JSON.stringify(addresses[0].value));
if (addresses && addresses.length > 0) {
const processedAddressTypes = {};
for (const address of addresses) {
// is this a duplicate?
const type = address.value.address_type;
if (processedAddressTypes[type]) {
// validation error
alert('Duplicate address types specified!');
return;
}
const addr = address.value;
processedAddressTypes[type] = true;
switch (type) {
case 'home':
contactToSave.home_address = addr;
break;
case 'work':
contactToSave.work_address = addr;
break;
case 'alternate':
contactToSave.alt_address = addr;
break;
default:
throw new Error('unknown phone number type ' + address.value.address_type);
}
delete addr.address_lookup; // this isn't part of the REST API
delete addr.address_type; // this isn't part of the REST API
}
}
}
alogliaClient: algoliasearch.Client;
algoliaOrganizationIndex: algoliasearch.Index;
isNewOrganization = false;
searchOrganizations = (text$: Observable<string>) =>
text$
.debounceTime(200)
.switchMap(term => this.algoliaOrganizationIndex.search({query: term}).then((response) => response.hits));
formatSelectedOrganization = (x: { name: string }) => x.name;
onOrganizationFieldChanged() {
// we'll use set timeout to give time for the NgbTypeahead method to be called
setTimeout(() => {
const selectedOrganization = this.contactForm.get('organization_id').value;
this.isNewOrganization = typeof selectedOrganization === 'string' && selectedOrganization.trim() !== '';
}, 300);
}
// saves the contact record via the REST API
saveChanges(): void {
this.toastr.clear();
this.isSaving = true;
const contactToSave = this.extractContactFromForm();
// now... do we need to save an organization?
let organizationObservable: Observable<Organization> = Observable.of(null);
if (typeof contactToSave.organization_id === 'object') {
// no, there's an ID - lets just save that
contactToSave.organization_id = contactToSave.organization_id.id;
} else {
// we have to create an organization
if (contactToSave.organization_id.trim() !== '') {
const newOrganization: Organization = {
name: contactToSave.organization_id,
main_address: contactToSave.work_address,
main_phone_number: contactToSave.work_phone_number
};
console.log('saving organization: ' + JSON.stringify(newOrganization));
organizationObservable = this._organizationService.createOrganization(this.securityContext.GetCurrentTenant(), newOrganization);
}
}
// now, let's save the contact via the REST API
// we'll output an error if unsuccessful via toaster
// we run the org observable, then map it and return the contact observable, and subscribe to THAT
organizationObservable
.mergeMap(organization => {
// if an organization was saved, let's update the org id
if (organization)
contactToSave.organization_id = organization.id;
return this._contactsService.createContact(this.securityContext.GetCurrentTenant(), contactToSave);
})
.subscribe((contact) => this.router.navigate(['/contacts', contact.id]),
(err) => {
const errorMessage = (err.error && err.error.errorMessage) ? err.error.errorMessage : err.message;
this.toastr.error(errorMessage, 'We weren\'t able to save the contact...', {
timeOut: 0,
closeButton: true,
positionClass: 'toast-top-full-width'
});
this.isSaving = false;
});
}
}
}
| addPhoneNumber | identifier_name |
create-contact.component.ts | import {Component, OnInit} from '@angular/core';
import {ContactsService} from '@rhythmsoftware/rolodex-angular-sdk/api/contacts.service';
import {CertificationsService} from '../../services/certifications/certifications.service';
import {AbstractControl, FormArray, FormBuilder, FormGroup, Validators} from '@angular/forms';
import {Router} from '@angular/router';
import {ToastrService} from 'ngx-toastr';
import clean from 'lodash-clean';
import * as nameParser from 'parse-full-name';
import {Contact} from '@rhythmsoftware/rolodex-angular-sdk/model/contact';
import * as googlePhoneNumberLib from 'google-libphonenumber';
import {Address} from 'ngx-google-places-autocomplete/objects/address';
import {GooglePlacesService} from '../../services/google-places/google-places.service';
import {AlgoliaKeyManagementService} from '../../services/algolia/algolia-key-management.service';
import {Observable} from 'rxjs';
import 'rxjs/add/operator/map';
import 'rxjs/add/operator/debounceTime';
import 'rxjs/add/operator/distinctUntilChanged';
import 'rxjs/add/operator/switchMap';
import * as algoliasearch from 'algoliasearch';
import {SecurityContextService} from '../../services/security-context/security-context.service';
import {Organization} from '@rhythmsoftware/rolodex-angular-sdk/model/organization';
import {OrganizationsService} from '@rhythmsoftware/rolodex-angular-sdk/api/organizations.service';
// This method checks to make sure you don't have two phone numbers with the same type
function validatePhoneNumbers(c: AbstractControl): { [key: string]: boolean } | null {
// console.log('validating...' + JSON.stringify( c.value ));
const entriesThatHaveBeenProcessed = {};
for (const phoneAndTypeEntry of c.value) {
if (!phoneAndTypeEntry.phone_number || phoneAndTypeEntry.phone_number.trim() === '') {
continue;
} // don't bother if no phone
if (entriesThatHaveBeenProcessed[phoneAndTypeEntry.phone_number_type]) {
return {'duplicate_phone_number_types': true};
}
entriesThatHaveBeenProcessed[phoneAndTypeEntry.phone_number_type] = true;
}
}
function validateAddresses(c: AbstractControl): { [key: string]: boolean } | null {
// console.log('validating...' + JSON.stringify( c.value ));
const entriesThatHaveBeenProcessed = {};
for (const addressAndTypeEntry of c.value) {
if (!addressAndTypeEntry.line1 || addressAndTypeEntry.line1.trim() === '') {
continue;
} // don't worry about it
if (entriesThatHaveBeenProcessed[addressAndTypeEntry.address_type]) {
return {'duplicate_address_types': true};
}
entriesThatHaveBeenProcessed[addressAndTypeEntry.address_type] = true;
}
}
@Component({
templateUrl: './create-contact.component.html',
styleUrls: ['./create-contact.component.scss']
})
export class CreateContactComponent implements OnInit {
constructor(private _contactsService: ContactsService, private _certificationsService: CertificationsService, private _organizationService: OrganizationsService,
private fb: FormBuilder, private _googlePlacesService: GooglePlacesService, private _algoliaKeyManagementService: AlgoliaKeyManagementService,
private router: Router, private toastr: ToastrService, private securityContext: SecurityContextService) {
}
MAX_ADDRESS_TYPES = 3;
MAX_PHONE_NUMBER_TYPES = 4;
contact: Contact = {};
contactForm: FormGroup;
isSaving: boolean; // supports the Ladda button
// designations: Array<Select2OptionData> = [];
// the name and email panels
showAdditionalNameOptions = false;
showAdditionalEmails = false;
showFullAddress = [false];
ngOnInit() {
this.initFormGroup();
this.initControls();
this.initAlgolia();
}
initAlgolia(): void {
this._algoliaKeyManagementService.GetSecureApiKey(this.securityContext.GetCurrentTenant(), 'rolodex-organizations').subscribe(
key => {// populate the index
this.alogliaClient = algoliasearch(key.application_id, key.api_key);
this.algoliaOrganizationIndex = this.alogliaClient.initIndex(key.index_name);
// enable the search
this.contactForm.get('organization_id').enable();
});
}
initFormGroup(): void {
this.contactForm = this.fb.group({
salutation: '',
name: ['', Validators.required],
first_name: '',
middle_name: '',
last_name: '',
suffix: '',
nickname: '',
organization_id: {value: '', disabled: true}, // disabled until initialized
job_title: '',
title: '',
contact_role_ids: '',
phone_numbers: this.fb.array([this.buildPhoneNumberGroup()], validatePhoneNumbers),
addresses: this.fb.array([this.buildAddressGroup()], validateAddresses),
certifications: '',
preferred_phone_number: '',
preferred_address: '',
gender: '',
date_of_birth: '',
email_address: ['', Validators.email],
email_address2: ['', Validators.email],
email_address3: ['', Validators.email],
notes: ''
});
// hook up the name parser
this.contactForm.get('name').valueChanges.subscribe(name => {
// we don't want to run this if someone has changed one of the
// fields
if (this.contactForm.get('first_name').dirty ||
this.contactForm.get('middle_name').dirty ||
this.contactForm.get('last_name').dirty ||
this.contactForm.get('salutation').dirty ||
this.contactForm.get('nickname').dirty ||
this.contactForm.get('suffix').dirty) {
console.log('form name elements changed, disabling name parsing');
return;
}
const nameparts = nameParser.parseFullName(name);
// console.log(JSON.stringify(nameparts));
this.contactForm.patchValue({
first_name: nameparts.first,
middle_name: nameparts.middle,
last_name: nameparts.last,
salutation: nameparts.title,
nickname: nameparts.nick,
suffix: nameparts.suffix
});
if (nameparts.error && nameparts.error.length > 0) {
console.error('could not parse name: ' + JSON.stringify(nameparts.error));
}
});
}
get phone_numbers(): FormArray {
return <FormArray> this.contactForm.get('phone_numbers');
}
buildPhoneNumberGroup() |
addPhoneNumber() {
if (this.phone_numbers.length >= this.MAX_PHONE_NUMBER_TYPES) {
alert('No more phone number types are available.');
return;
}
this.phone_numbers.push(this.buildPhoneNumberGroup());
}
formatPhoneNumber(index: number) {
const phone = this.phone_numbers.controls[index].get('phone_number').value;
if (!phone || phone.trim() === '')
return;
// console.log(index);
// console.log(phone);
const PNF = googlePhoneNumberLib.PhoneNumberFormat;
const phoneUtil = googlePhoneNumberLib.PhoneNumberUtil.getInstance();
const number = phoneUtil.parseAndKeepRawInput(phone, 'US');
const typeOfFormat = phoneUtil.getRegionCodeForNumber(number) === 'US' ? PNF.NATIONAL : PNF.INTERNATIONAL;
const formattedPhone = phoneUtil.format(number, typeOfFormat);
this.phone_numbers.controls[index].patchValue({phone_number: formattedPhone});
}
initControls(): void {
//
// this._certificationsService.GetAllCertifications().subscribe((certifications) => this.designations = certifications.map(cert => ({
// id: cert.post_nominal,
// text: cert.post_nominal
// })));
}
shouldShowFormErrorFor(field: string) {
return this.contactForm.get(field).errors && this.contactForm.get(field).touched;
}
shouldShowFormErrorForControl(field: AbstractControl) {
return field.errors; // && field.touched;
}
extractContactFromForm(): Contact {
const contactToSave = this.contactForm.value;
// now, let's get the phone numbers
this.extractPhoneNumbersFromForm(contactToSave);
// get rid of the phone numbers property
delete contactToSave.phone_numbers;
// now, let's get the phone numbers
this.extractAddressesFromForm(contactToSave);
// get rid of the phone numbers property
delete contactToSave.addresses;
console.log(JSON.stringify(contactToSave));
return clean(contactToSave);
}
private extractPhoneNumbersFromForm(contactToSave) {
// let's iterate and get the phone numbers
const phoneNumbers = this.phone_numbers.controls;
if (phoneNumbers && phoneNumbers.length > 0) {
const processedPhoneNumbers = {};
for (const phoneNumber of phoneNumbers) {
// is this a duplicate?
const type = phoneNumber.value.phone_number_type;
if (processedPhoneNumbers[type]) {
// validation error
alert('Duplicate phone number types specified!');
return;
}
processedPhoneNumbers[type] = true;
switch (type) {
case 'home':
contactToSave.home_phone_number = phoneNumber.value.phone_number;
break;
case 'work':
contactToSave.work_phone_number = phoneNumber.value.phone_number;
break;
case 'mobile':
contactToSave.mobile_phone_number = phoneNumber.value.phone_number;
break;
case 'alternate':
contactToSave.alt_phone_number = phoneNumber.value.phone_number;
break;
default:
throw new Error('unknown phone number type ' + phoneNumber.value.phone_number_type);
}
}
}
}
// *********** Address Management ***************************
//
get addresses(): FormArray {
return <FormArray> this.contactForm.get('addresses');
}
buildAddressGroup() {
return this.fb.group({
address_lookup: '',
address_type: 'home',
line1: '',
line2: '',
city: '',
state: '',
postal_code: '',
country: ''
});
}
addAddress() {
if (this.addresses.length >= this.MAX_ADDRESS_TYPES) {
alert('No more address types are available.');
return;
}
this.showFullAddress.push(false);
this.hasAddressBeenPlaced.push(false);
this.addresses.push(this.buildAddressGroup());
}
hasAddressBeenPlaced: boolean[] = [false];
handleAddressChange(address: Address, index: number) {
console.log(JSON.stringify(address));
const addr = this._googlePlacesService.parseGooglePlacesAddress(address);
console.log('Placed Addr: ' + JSON.stringify(addr));
this.addresses.controls[index].patchValue({
line1: addr.line1,
line2: addr.line2,
city: addr.city,
state: addr.state,
postal_code: addr.postal_code,
country: addr.country,
address_lookup: 'Please edit the address below'
});
this.showFullAddress[index] = true;
this.hasAddressBeenPlaced[index] = true;
this.addresses.controls[index].get('address_lookup').disable();
}
private extractAddressesFromForm(contactToSave) {
// let's iterate and get the phone numbers
const addresses = this.addresses.controls;
console.log(JSON.stringify(addresses[0].value));
if (addresses && addresses.length > 0) {
const processedAddressTypes = {};
for (const address of addresses) {
// is this a duplicate?
const type = address.value.address_type;
if (processedAddressTypes[type]) {
// validation error
alert('Duplicate address types specified!');
return;
}
const addr = address.value;
processedAddressTypes[type] = true;
switch (type) {
case 'home':
contactToSave.home_address = addr;
break;
case 'work':
contactToSave.work_address = addr;
break;
case 'alternate':
contactToSave.alt_address = addr;
break;
default:
throw new Error('unknown phone number type ' + address.value.address_type);
}
delete addr.address_lookup; // this isn't part of the REST API
delete addr.address_type; // this isn't part of the REST API
}
}
}
alogliaClient: algoliasearch.Client;
algoliaOrganizationIndex: algoliasearch.Index;
isNewOrganization = false;
searchOrganizations = (text$: Observable<string>) =>
text$
.debounceTime(200)
.switchMap(term => this.algoliaOrganizationIndex.search({query: term}).then((response) => response.hits));
formatSelectedOrganization = (x: { name: string }) => x.name;
onOrganizationFieldChanged() {
// we'll use set timeout to give time for the NgbTypeahead method to be called
setTimeout(() => {
const selectedOrganization = this.contactForm.get('organization_id').value;
this.isNewOrganization = typeof selectedOrganization === 'string' && selectedOrganization.trim() !== '';
}, 300);
}
// saves the contact record via the REST API
saveChanges(): void {
this.toastr.clear();
this.isSaving = true;
const contactToSave = this.extractContactFromForm();
// now... do we need to save an organization?
let organizationObservable: Observable<Organization> = Observable.of(null);
if (typeof contactToSave.organization_id === 'object') {
// no, there's an ID - lets just save that
contactToSave.organization_id = contactToSave.organization_id.id;
} else {
// we have to create an organization
if (contactToSave.organization_id.trim() !== '') {
const newOrganization: Organization = {
name: contactToSave.organization_id,
main_address: contactToSave.work_address,
main_phone_number: contactToSave.work_phone_number
};
console.log('saving organization: ' + JSON.stringify(newOrganization));
organizationObservable = this._organizationService.createOrganization(this.securityContext.GetCurrentTenant(), newOrganization);
}
}
// now, let's save the contact via the REST API
// we'll output an error if unsuccessful via toaster
// we run the org observable, then map it and return the contact observable, and subscribe to THAT
organizationObservable
.mergeMap(organization => {
// if an organization was saved, let's update the org id
if (organization)
contactToSave.organization_id = organization.id;
return this._contactsService.createContact(this.securityContext.GetCurrentTenant(), contactToSave);
})
.subscribe((contact) => this.router.navigate(['/contacts', contact.id]),
(err) => {
const errorMessage = (err.error && err.error.errorMessage) ? err.error.errorMessage : err.message;
this.toastr.error(errorMessage, 'We weren\'t able to save the contact...', {
timeOut: 0,
closeButton: true,
positionClass: 'toast-top-full-width'
});
this.isSaving = false;
});
}
}
}
| {
return this.fb.group({
phone_number: '',
phone_number_type: 'mobile'
});
} | identifier_body |
deployment.go | /*
Package deployment provides methods for managing application deployment and release files.
A deployment resides on the server running PullDeploy in daemon mode. It has the following
directory structure:
/BASEDIR/APPNAME/artifact
/BASEDIR/APPNAME/current (a symlink)
/BASEDIR/APPNAME/release
Artifacts retrieved from the repository are placed into the "artifact" directory:
/BASEDIR/APPNAME/artifact/APPNAME-VERSION.ARTIFACTTYPE
Deployed releases are unpacked into a directory named for the version, under the |
/BASEDIR/APPNAME/release/VERSION1
/BASEDIR/APPNAME/release/VERSION2
/BASEDIR/APPNAME/release/VERSION3
Releasing a version points the "current" symlink to the specified release directory.
*/
package deployment
import (
"crypto/hmac"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"os/user"
"path"
"strconv"
"strings"
"github.com/mredivo/pulldeploy/pdconfig"
)
const kARTIFACTDIR = "artifact"
const kRELEASEDIR = "release"
const kCURRENTDIR = "current"
const kHMACSUFFIX = "hmac"
// Deployment provides methods for manipulating local deployment files.
type Deployment struct {
appName string // The name of the application
cfg pdconfig.AppConfig // The deployment configuration
acfg pdconfig.ArtifactConfig // The Artifact Type configuration
uid int // The numeric UID to own all files for this deployment
gid int // The numeric GID to own all files for this deployment
baseDir string // The derived top-level directory for this app's files
artifactDir string // The derived subdirectory for fetched build artifacts
releaseDir string // The derived subdirectory for extracted build artifacts
}
// New returns a new Deployment.
func New(appName string, pdcfg pdconfig.PDConfig, cfg *pdconfig.AppConfig) (*Deployment, error) {
d := new(Deployment)
d.cfg = *cfg
// Capture the supplied arguments.
d.appName = appName
// All string arguments are mandatory.
if appName == "" {
return nil, errors.New("Deployment initialization error: Appname is mandatory")
}
if d.cfg.BaseDir == "" {
return nil, errors.New("Deployment initialization error: BaseDir is mandatory")
}
// Validate the artifact type.
if ac, err := pdcfg.GetArtifactConfig(d.cfg.ArtifactType); err == nil {
d.acfg = *ac
} else {
return nil, fmt.Errorf("Deployment initialization error: invalid ArtifactType %q", d.cfg.ArtifactType)
}
// Derive the UID/GID from the username/groupname.
// NOTE: Go doesn't yet support looking up a GID from a name, so
// we use the gid from the user.
if d.cfg.User != "" {
if user, err := user.Lookup(d.cfg.User); err == nil {
if i, err := strconv.ParseInt(user.Uid, 10, 64); err == nil {
d.uid = int(i)
}
if i, err := strconv.ParseInt(user.Gid, 10, 64); err == nil {
d.gid = int(i)
}
}
}
// The parent directory must not be "/".
parentDir := absPath(d.cfg.BaseDir)
if parentDir == "/" {
return nil, errors.New("Deployment initialization error: \"/\" not permitted as BaseDir")
}
// The parent directory must exist.
if _, err := os.Stat(parentDir); err != nil {
return nil, fmt.Errorf("Deployment initialization error: unable to stat BaseDir: %s", err.Error())
}
// If the base dir doesn't exist, create it.
d.baseDir = path.Join(parentDir, appName)
if _, err := os.Stat(d.baseDir); err != nil {
if err := makeDir(d.baseDir, d.uid, d.gid, 0755); err != nil {
return nil, fmt.Errorf("Deployment initialization error: %s", err.Error())
}
}
// If the artifact dir doesn't exist, create it.
d.artifactDir = path.Join(d.baseDir, kARTIFACTDIR)
if _, err := os.Stat(d.artifactDir); err != nil {
if err := makeDir(d.artifactDir, d.uid, d.gid, 0755); err != nil {
return nil, fmt.Errorf("Deployment initialization error: %s", err.Error())
}
}
// If the release dir doesn't exist, create it.
d.releaseDir = path.Join(d.baseDir, kRELEASEDIR)
if _, err := os.Stat(d.releaseDir); err != nil {
if err := makeDir(d.releaseDir, d.uid, d.gid, 0755); err != nil {
return nil, fmt.Errorf("Deployment initialization error: %s", err.Error())
}
}
return d, nil
}
// ArtifactPresent indicates whether the artifact has already been written.
func (d *Deployment) ArtifactPresent(version string) bool {
// Generate the filename, and check whether file already exists.
_, exists := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension)
return exists
}
// Write creates a file in the artifact area from the given stream.
func (d *Deployment) WriteArtifact(version string, rc io.ReadCloser) error {
// Housekeeping: ensure the source is closed when done.
defer rc.Close()
// Generate the filename, and check whether file already exists.
artifactPath, exists := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension)
if exists {
return fmt.Errorf("Artifact already exists: %s", artifactPath)
}
// Open the file, and write the data into it.
if fp, err := os.OpenFile(artifactPath, os.O_WRONLY|os.O_CREATE, 0664); err == nil {
defer fp.Close()
if _, err := io.Copy(fp, rc); err != nil {
return fmt.Errorf("Error while creating %q: %s", artifactPath, err.Error())
}
if err := setOwner(artifactPath, d.uid, d.gid); err != nil {
return fmt.Errorf("Unable to set owner on %q: %s", artifactPath, err.Error())
}
}
return nil
}
// HMACPresent indicates whether the HMAC has already been written.
func (d *Deployment) HMACPresent(version string) bool {
// Generate the filename, and check whether file already exists.
_, exists := makeHMACPath(d.artifactDir, d.appName, version, d.acfg.Extension)
return exists
}
// WriteHMAC writes an HMAC into the artifact area.
func (d *Deployment) WriteHMAC(version string, hmac []byte) error {
// Generate the filename, write to file, set ownership.
hmacPath, _ := makeHMACPath(d.artifactDir, d.appName, version, d.acfg.Extension)
if err := ioutil.WriteFile(hmacPath, hmac, 0664); err != nil {
return fmt.Errorf("Error while writing %q: %s", hmacPath, err.Error())
}
if err := setOwner(hmacPath, d.uid, d.gid); err != nil {
return fmt.Errorf("Unable to set owner on %q: %s", hmacPath, err.Error())
}
return nil
}
// CheckHMAC confirms that the artifact has not been corrupted or tampered with by
// calculating its HMAC and comparing it with the retrieved HMAC.
func (d *Deployment) CheckHMAC(version string) error {
// Build the filenames.
artifactPath, exists := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension)
if !exists {
return fmt.Errorf("Artifact does not exist: %s", artifactPath)
}
hmacPath, exists := makeHMACPath(d.artifactDir, d.appName, version, d.acfg.Extension)
if !exists {
return fmt.Errorf("HMAC does not exist: %s", artifactPath)
}
// Read in the HMAC.
if expectedMAC, err := ioutil.ReadFile(hmacPath); err == nil {
// Open the artifact, and calculate its HMAC.
if fp, err := os.Open(artifactPath); err == nil {
messageMAC := CalculateHMAC(fp, NewHMACCalculator(d.cfg.Secret))
if !hmac.Equal(messageMAC, expectedMAC) {
return fmt.Errorf(
"Artifact is corrupt: Expected HMAC: %q: Calculated HMAC: %q",
string(expectedMAC),
string(messageMAC),
)
}
} else {
return fmt.Errorf("Error while reading %q: %s", artifactPath, err.Error())
}
} else {
return fmt.Errorf("Error while reading %q: %s", hmacPath, err.Error())
}
return nil
}
// Extract transfers an artifact to the version release directory.
func (d *Deployment) Extract(version string) error {
// Ensure that the artifact to be extracted exists.
artifactPath, exists := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension)
if !exists {
return fmt.Errorf("Artifact does not exist: %s", artifactPath)
}
// If running as root, ensure the extract command wasn't loaded from an insecure file.
if os.Geteuid() == 0 && d.acfg.Insecure {
return fmt.Errorf(
"Refusing to execute extract command from insecure \"pulldeploy.yaml\" as root")
}
// Create the version directory if it doesn't exist.
versionDir, exists := makeReleasePath(d.releaseDir, version)
if !exists {
if err := makeDir(versionDir, d.uid, d.gid, 0755); err != nil {
return fmt.Errorf("Cannot create release directory %q: %s", version, err.Error())
}
}
// Extract the archive into the version directory.
cmdlineArgs := substituteVars(d.acfg.Extract.Args,
varValues{artifactPath: artifactPath, versionDir: versionDir})
_, err := sysCommand("", d.acfg.Extract.Cmd, cmdlineArgs)
if err != nil {
return fmt.Errorf("Cannot extract archive %q into %q: %s", artifactPath, versionDir, err.Error())
}
// Set the ownership of all the extracted files.
if err := setOwnerAll(versionDir, d.uid, d.gid); err != nil {
return err
}
return nil
}
// Link sets the "current" symlink to point at the indicated version.
func (d *Deployment) Link(version string) error {
versionDir, exists := makeReleasePath(d.releaseDir, version)
if !exists {
return fmt.Errorf("Release directory does not exist: %q", versionDir)
}
symlinkPath := path.Join(d.baseDir, kCURRENTDIR)
os.Remove(symlinkPath)
return os.Symlink(versionDir, symlinkPath)
}
// PostDeploy executes the configured PostDeploy command.
func (d *Deployment) PostDeploy(version string) (string, error) {
if os.Geteuid() == 0 && d.cfg.Insecure {
return "", fmt.Errorf(
"Refusing to execute post-deploy command from insecure %q configuration as root",
d.appName)
}
if d.cfg.Scripts["postdeploy"].Cmd != "" {
artifactPath, _ := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension)
versionDir, _ := makeReleasePath(d.releaseDir, version)
cmdlineArgs := substituteVars(d.cfg.Scripts["postdeploy"].Args,
varValues{artifactPath: artifactPath, versionDir: versionDir})
return sysCommand(versionDir, d.cfg.Scripts["postdeploy"].Cmd, cmdlineArgs)
}
return "", nil
}
// PostRelease executes the configured PostRelease command.
func (d *Deployment) PostRelease(version string) (string, error) {
if os.Geteuid() == 0 && d.cfg.Insecure {
return "", fmt.Errorf(
"Refusing to execute post-release command from insecure %q configuration as root",
d.appName)
}
if d.cfg.Scripts["postrelease"].Cmd != "" {
artifactPath, _ := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension)
versionDir, _ := makeReleasePath(d.releaseDir, version)
cmdlineArgs := substituteVars(d.cfg.Scripts["postrelease"].Args,
varValues{artifactPath: artifactPath, versionDir: versionDir})
return sysCommand(versionDir, d.cfg.Scripts["postrelease"].Cmd, cmdlineArgs)
}
return "", nil
}
// Remove deletes everything associated with the given name.
func (d *Deployment) Remove(version string) error {
// Removing the currently linked version is not permitted.
if d.GetCurrentLink() == version {
return fmt.Errorf("Removing current version not permitted: %q", version)
}
// Remove the artifact and HMAC.
if artifactPath, exists := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension); exists {
os.Remove(artifactPath)
}
if hmacPath, exists := makeHMACPath(d.artifactDir, d.appName, version, d.acfg.Extension); exists {
os.Remove(hmacPath)
}
// Remove the extracted files.
if versionDir, exists := makeReleasePath(d.releaseDir, version); exists {
return os.RemoveAll(versionDir)
}
return nil
}
// GetCurrentLink returns the name of the currently released version.
func (d *Deployment) GetCurrentLink() string {
// Read the symlink off disk.
symlink, err := os.Readlink(path.Join(d.baseDir, kCURRENTDIR))
if err != nil {
return ""
}
// We are interested in the last element, which is the active version directory.
dirs := strings.Split(symlink, "/")
return dirs[len(dirs)-1]
}
// GetDeployedVersions enumerates all the versions currently available for linking.
func (d *Deployment) GetDeployedVersions() []string {
var versionList []string
// Everything in the release directory is an available version.
if fi, err := ioutil.ReadDir(d.releaseDir); err == nil {
for _, v := range fi {
versionList = append(versionList, v.Name())
}
}
return versionList
} | "release" directory. | random_line_split |
deployment.go | /*
Package deployment provides methods for managing application deployment and release files.
A deployment resides on the server running PullDeploy in daemon mode. It has the following
directory structure:
/BASEDIR/APPNAME/artifact
/BASEDIR/APPNAME/current (a symlink)
/BASEDIR/APPNAME/release
Artifacts retrieved from the repository are placed into the "artifact" directory:
/BASEDIR/APPNAME/artifact/APPNAME-VERSION.ARTIFACTTYPE
Deployed releases are unpacked into a directory named for the version, under the
"release" directory.
/BASEDIR/APPNAME/release/VERSION1
/BASEDIR/APPNAME/release/VERSION2
/BASEDIR/APPNAME/release/VERSION3
Releasing a version points the "current" symlink to the specified release directory.
*/
package deployment
import (
"crypto/hmac"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"os/user"
"path"
"strconv"
"strings"
"github.com/mredivo/pulldeploy/pdconfig"
)
const kARTIFACTDIR = "artifact"
const kRELEASEDIR = "release"
const kCURRENTDIR = "current"
const kHMACSUFFIX = "hmac"
// Deployment provides methods for manipulating local deployment files.
type Deployment struct {
appName string // The name of the application
cfg pdconfig.AppConfig // The deployment configuration
acfg pdconfig.ArtifactConfig // The Artifact Type configuration
uid int // The numeric UID to own all files for this deployment
gid int // The numeric GID to own all files for this deployment
baseDir string // The derived top-level directory for this app's files
artifactDir string // The derived subdirectory for fetched build artifacts
releaseDir string // The derived subdirectory for extracted build artifacts
}
// New returns a new Deployment.
func New(appName string, pdcfg pdconfig.PDConfig, cfg *pdconfig.AppConfig) (*Deployment, error) {
d := new(Deployment)
d.cfg = *cfg
// Capture the supplied arguments.
d.appName = appName
// All string arguments are mandatory.
if appName == "" {
return nil, errors.New("Deployment initialization error: Appname is mandatory")
}
if d.cfg.BaseDir == "" {
return nil, errors.New("Deployment initialization error: BaseDir is mandatory")
}
// Validate the artifact type.
if ac, err := pdcfg.GetArtifactConfig(d.cfg.ArtifactType); err == nil | else {
return nil, fmt.Errorf("Deployment initialization error: invalid ArtifactType %q", d.cfg.ArtifactType)
}
// Derive the UID/GID from the username/groupname.
// NOTE: Go doesn't yet support looking up a GID from a name, so
// we use the gid from the user.
if d.cfg.User != "" {
if user, err := user.Lookup(d.cfg.User); err == nil {
if i, err := strconv.ParseInt(user.Uid, 10, 64); err == nil {
d.uid = int(i)
}
if i, err := strconv.ParseInt(user.Gid, 10, 64); err == nil {
d.gid = int(i)
}
}
}
// The parent directory must not be "/".
parentDir := absPath(d.cfg.BaseDir)
if parentDir == "/" {
return nil, errors.New("Deployment initialization error: \"/\" not permitted as BaseDir")
}
// The parent directory must exist.
if _, err := os.Stat(parentDir); err != nil {
return nil, fmt.Errorf("Deployment initialization error: unable to stat BaseDir: %s", err.Error())
}
// If the base dir doesn't exist, create it.
d.baseDir = path.Join(parentDir, appName)
if _, err := os.Stat(d.baseDir); err != nil {
if err := makeDir(d.baseDir, d.uid, d.gid, 0755); err != nil {
return nil, fmt.Errorf("Deployment initialization error: %s", err.Error())
}
}
// If the artifact dir doesn't exist, create it.
d.artifactDir = path.Join(d.baseDir, kARTIFACTDIR)
if _, err := os.Stat(d.artifactDir); err != nil {
if err := makeDir(d.artifactDir, d.uid, d.gid, 0755); err != nil {
return nil, fmt.Errorf("Deployment initialization error: %s", err.Error())
}
}
// If the release dir doesn't exist, create it.
d.releaseDir = path.Join(d.baseDir, kRELEASEDIR)
if _, err := os.Stat(d.releaseDir); err != nil {
if err := makeDir(d.releaseDir, d.uid, d.gid, 0755); err != nil {
return nil, fmt.Errorf("Deployment initialization error: %s", err.Error())
}
}
return d, nil
}
// ArtifactPresent indicates whether the artifact has already been written.
func (d *Deployment) ArtifactPresent(version string) bool {
// Generate the filename, and check whether file already exists.
_, exists := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension)
return exists
}
// Write creates a file in the artifact area from the given stream.
func (d *Deployment) WriteArtifact(version string, rc io.ReadCloser) error {
// Housekeeping: ensure the source is closed when done.
defer rc.Close()
// Generate the filename, and check whether file already exists.
artifactPath, exists := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension)
if exists {
return fmt.Errorf("Artifact already exists: %s", artifactPath)
}
// Open the file, and write the data into it.
if fp, err := os.OpenFile(artifactPath, os.O_WRONLY|os.O_CREATE, 0664); err == nil {
defer fp.Close()
if _, err := io.Copy(fp, rc); err != nil {
return fmt.Errorf("Error while creating %q: %s", artifactPath, err.Error())
}
if err := setOwner(artifactPath, d.uid, d.gid); err != nil {
return fmt.Errorf("Unable to set owner on %q: %s", artifactPath, err.Error())
}
}
return nil
}
// HMACPresent indicates whether the HMAC has already been written.
func (d *Deployment) HMACPresent(version string) bool {
// Generate the filename, and check whether file already exists.
_, exists := makeHMACPath(d.artifactDir, d.appName, version, d.acfg.Extension)
return exists
}
// WriteHMAC writes an HMAC into the artifact area.
func (d *Deployment) WriteHMAC(version string, hmac []byte) error {
// Generate the filename, write to file, set ownership.
hmacPath, _ := makeHMACPath(d.artifactDir, d.appName, version, d.acfg.Extension)
if err := ioutil.WriteFile(hmacPath, hmac, 0664); err != nil {
return fmt.Errorf("Error while writing %q: %s", hmacPath, err.Error())
}
if err := setOwner(hmacPath, d.uid, d.gid); err != nil {
return fmt.Errorf("Unable to set owner on %q: %s", hmacPath, err.Error())
}
return nil
}
// CheckHMAC confirms that the artifact has not been corrupted or tampered with by
// calculating its HMAC and comparing it with the retrieved HMAC.
func (d *Deployment) CheckHMAC(version string) error {
// Build the filenames.
artifactPath, exists := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension)
if !exists {
return fmt.Errorf("Artifact does not exist: %s", artifactPath)
}
hmacPath, exists := makeHMACPath(d.artifactDir, d.appName, version, d.acfg.Extension)
if !exists {
return fmt.Errorf("HMAC does not exist: %s", artifactPath)
}
// Read in the HMAC.
if expectedMAC, err := ioutil.ReadFile(hmacPath); err == nil {
// Open the artifact, and calculate its HMAC.
if fp, err := os.Open(artifactPath); err == nil {
messageMAC := CalculateHMAC(fp, NewHMACCalculator(d.cfg.Secret))
if !hmac.Equal(messageMAC, expectedMAC) {
return fmt.Errorf(
"Artifact is corrupt: Expected HMAC: %q: Calculated HMAC: %q",
string(expectedMAC),
string(messageMAC),
)
}
} else {
return fmt.Errorf("Error while reading %q: %s", artifactPath, err.Error())
}
} else {
return fmt.Errorf("Error while reading %q: %s", hmacPath, err.Error())
}
return nil
}
// Extract transfers an artifact to the version release directory.
func (d *Deployment) Extract(version string) error {
// Ensure that the artifact to be extracted exists.
artifactPath, exists := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension)
if !exists {
return fmt.Errorf("Artifact does not exist: %s", artifactPath)
}
// If running as root, ensure the extract command wasn't loaded from an insecure file.
if os.Geteuid() == 0 && d.acfg.Insecure {
return fmt.Errorf(
"Refusing to execute extract command from insecure \"pulldeploy.yaml\" as root")
}
// Create the version directory if it doesn't exist.
versionDir, exists := makeReleasePath(d.releaseDir, version)
if !exists {
if err := makeDir(versionDir, d.uid, d.gid, 0755); err != nil {
return fmt.Errorf("Cannot create release directory %q: %s", version, err.Error())
}
}
// Extract the archive into the version directory.
cmdlineArgs := substituteVars(d.acfg.Extract.Args,
varValues{artifactPath: artifactPath, versionDir: versionDir})
_, err := sysCommand("", d.acfg.Extract.Cmd, cmdlineArgs)
if err != nil {
return fmt.Errorf("Cannot extract archive %q into %q: %s", artifactPath, versionDir, err.Error())
}
// Set the ownership of all the extracted files.
if err := setOwnerAll(versionDir, d.uid, d.gid); err != nil {
return err
}
return nil
}
// Link sets the "current" symlink to point at the indicated version.
func (d *Deployment) Link(version string) error {
versionDir, exists := makeReleasePath(d.releaseDir, version)
if !exists {
return fmt.Errorf("Release directory does not exist: %q", versionDir)
}
symlinkPath := path.Join(d.baseDir, kCURRENTDIR)
os.Remove(symlinkPath)
return os.Symlink(versionDir, symlinkPath)
}
// PostDeploy executes the configured PostDeploy command.
func (d *Deployment) PostDeploy(version string) (string, error) {
if os.Geteuid() == 0 && d.cfg.Insecure {
return "", fmt.Errorf(
"Refusing to execute post-deploy command from insecure %q configuration as root",
d.appName)
}
if d.cfg.Scripts["postdeploy"].Cmd != "" {
artifactPath, _ := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension)
versionDir, _ := makeReleasePath(d.releaseDir, version)
cmdlineArgs := substituteVars(d.cfg.Scripts["postdeploy"].Args,
varValues{artifactPath: artifactPath, versionDir: versionDir})
return sysCommand(versionDir, d.cfg.Scripts["postdeploy"].Cmd, cmdlineArgs)
}
return "", nil
}
// PostRelease executes the configured PostRelease command.
func (d *Deployment) PostRelease(version string) (string, error) {
if os.Geteuid() == 0 && d.cfg.Insecure {
return "", fmt.Errorf(
"Refusing to execute post-release command from insecure %q configuration as root",
d.appName)
}
if d.cfg.Scripts["postrelease"].Cmd != "" {
artifactPath, _ := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension)
versionDir, _ := makeReleasePath(d.releaseDir, version)
cmdlineArgs := substituteVars(d.cfg.Scripts["postrelease"].Args,
varValues{artifactPath: artifactPath, versionDir: versionDir})
return sysCommand(versionDir, d.cfg.Scripts["postrelease"].Cmd, cmdlineArgs)
}
return "", nil
}
// Remove deletes everything associated with the given name.
func (d *Deployment) Remove(version string) error {
// Removing the currently linked version is not permitted.
if d.GetCurrentLink() == version {
return fmt.Errorf("Removing current version not permitted: %q", version)
}
// Remove the artifact and HMAC.
if artifactPath, exists := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension); exists {
os.Remove(artifactPath)
}
if hmacPath, exists := makeHMACPath(d.artifactDir, d.appName, version, d.acfg.Extension); exists {
os.Remove(hmacPath)
}
// Remove the extracted files.
if versionDir, exists := makeReleasePath(d.releaseDir, version); exists {
return os.RemoveAll(versionDir)
}
return nil
}
// GetCurrentLink returns the name of the currently released version.
func (d *Deployment) GetCurrentLink() string {
// Read the symlink off disk.
symlink, err := os.Readlink(path.Join(d.baseDir, kCURRENTDIR))
if err != nil {
return ""
}
// We are interested in the last element, which is the active version directory.
dirs := strings.Split(symlink, "/")
return dirs[len(dirs)-1]
}
// GetDeployedVersions enumerates all the versions currently available for linking.
func (d *Deployment) GetDeployedVersions() []string {
var versionList []string
// Everything in the release directory is an available version.
if fi, err := ioutil.ReadDir(d.releaseDir); err == nil {
for _, v := range fi {
versionList = append(versionList, v.Name())
}
}
return versionList
}
| {
d.acfg = *ac
} | conditional_block |
deployment.go | /*
Package deployment provides methods for managing application deployment and release files.
A deployment resides on the server running PullDeploy in daemon mode. It has the following
directory structure:
/BASEDIR/APPNAME/artifact
/BASEDIR/APPNAME/current (a symlink)
/BASEDIR/APPNAME/release
Artifacts retrieved from the repository are placed into the "artifact" directory:
/BASEDIR/APPNAME/artifact/APPNAME-VERSION.ARTIFACTTYPE
Deployed releases are unpacked into a directory named for the version, under the
"release" directory.
/BASEDIR/APPNAME/release/VERSION1
/BASEDIR/APPNAME/release/VERSION2
/BASEDIR/APPNAME/release/VERSION3
Releasing a version points the "current" symlink to the specified release directory.
*/
package deployment
import (
"crypto/hmac"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"os/user"
"path"
"strconv"
"strings"
"github.com/mredivo/pulldeploy/pdconfig"
)
const kARTIFACTDIR = "artifact"
const kRELEASEDIR = "release"
const kCURRENTDIR = "current"
const kHMACSUFFIX = "hmac"
// Deployment provides methods for manipulating local deployment files.
type Deployment struct {
appName string // The name of the application
cfg pdconfig.AppConfig // The deployment configuration
acfg pdconfig.ArtifactConfig // The Artifact Type configuration
uid int // The numeric UID to own all files for this deployment
gid int // The numeric GID to own all files for this deployment
baseDir string // The derived top-level directory for this app's files
artifactDir string // The derived subdirectory for fetched build artifacts
releaseDir string // The derived subdirectory for extracted build artifacts
}
// New returns a new Deployment.
func New(appName string, pdcfg pdconfig.PDConfig, cfg *pdconfig.AppConfig) (*Deployment, error) {
d := new(Deployment)
d.cfg = *cfg
// Capture the supplied arguments.
d.appName = appName
// All string arguments are mandatory.
if appName == "" {
return nil, errors.New("Deployment initialization error: Appname is mandatory")
}
if d.cfg.BaseDir == "" {
return nil, errors.New("Deployment initialization error: BaseDir is mandatory")
}
// Validate the artifact type.
if ac, err := pdcfg.GetArtifactConfig(d.cfg.ArtifactType); err == nil {
d.acfg = *ac
} else {
return nil, fmt.Errorf("Deployment initialization error: invalid ArtifactType %q", d.cfg.ArtifactType)
}
// Derive the UID/GID from the username/groupname.
// NOTE: Go doesn't yet support looking up a GID from a name, so
// we use the gid from the user.
if d.cfg.User != "" {
if user, err := user.Lookup(d.cfg.User); err == nil {
if i, err := strconv.ParseInt(user.Uid, 10, 64); err == nil {
d.uid = int(i)
}
if i, err := strconv.ParseInt(user.Gid, 10, 64); err == nil {
d.gid = int(i)
}
}
}
// The parent directory must not be "/".
parentDir := absPath(d.cfg.BaseDir)
if parentDir == "/" {
return nil, errors.New("Deployment initialization error: \"/\" not permitted as BaseDir")
}
// The parent directory must exist.
if _, err := os.Stat(parentDir); err != nil {
return nil, fmt.Errorf("Deployment initialization error: unable to stat BaseDir: %s", err.Error())
}
// If the base dir doesn't exist, create it.
d.baseDir = path.Join(parentDir, appName)
if _, err := os.Stat(d.baseDir); err != nil {
if err := makeDir(d.baseDir, d.uid, d.gid, 0755); err != nil {
return nil, fmt.Errorf("Deployment initialization error: %s", err.Error())
}
}
// If the artifact dir doesn't exist, create it.
d.artifactDir = path.Join(d.baseDir, kARTIFACTDIR)
if _, err := os.Stat(d.artifactDir); err != nil {
if err := makeDir(d.artifactDir, d.uid, d.gid, 0755); err != nil {
return nil, fmt.Errorf("Deployment initialization error: %s", err.Error())
}
}
// If the release dir doesn't exist, create it.
d.releaseDir = path.Join(d.baseDir, kRELEASEDIR)
if _, err := os.Stat(d.releaseDir); err != nil {
if err := makeDir(d.releaseDir, d.uid, d.gid, 0755); err != nil {
return nil, fmt.Errorf("Deployment initialization error: %s", err.Error())
}
}
return d, nil
}
// ArtifactPresent indicates whether the artifact has already been written.
func (d *Deployment) ArtifactPresent(version string) bool {
// Generate the filename, and check whether file already exists.
_, exists := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension)
return exists
}
// Write creates a file in the artifact area from the given stream.
func (d *Deployment) WriteArtifact(version string, rc io.ReadCloser) error {
// Housekeeping: ensure the source is closed when done.
defer rc.Close()
// Generate the filename, and check whether file already exists.
artifactPath, exists := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension)
if exists {
return fmt.Errorf("Artifact already exists: %s", artifactPath)
}
// Open the file, and write the data into it.
if fp, err := os.OpenFile(artifactPath, os.O_WRONLY|os.O_CREATE, 0664); err == nil {
defer fp.Close()
if _, err := io.Copy(fp, rc); err != nil {
return fmt.Errorf("Error while creating %q: %s", artifactPath, err.Error())
}
if err := setOwner(artifactPath, d.uid, d.gid); err != nil {
return fmt.Errorf("Unable to set owner on %q: %s", artifactPath, err.Error())
}
}
return nil
}
// HMACPresent indicates whether the HMAC has already been written.
func (d *Deployment) HMACPresent(version string) bool {
// Generate the filename, and check whether file already exists.
_, exists := makeHMACPath(d.artifactDir, d.appName, version, d.acfg.Extension)
return exists
}
// WriteHMAC writes an HMAC into the artifact area.
func (d *Deployment) WriteHMAC(version string, hmac []byte) error {
// Generate the filename, write to file, set ownership.
hmacPath, _ := makeHMACPath(d.artifactDir, d.appName, version, d.acfg.Extension)
if err := ioutil.WriteFile(hmacPath, hmac, 0664); err != nil {
return fmt.Errorf("Error while writing %q: %s", hmacPath, err.Error())
}
if err := setOwner(hmacPath, d.uid, d.gid); err != nil {
return fmt.Errorf("Unable to set owner on %q: %s", hmacPath, err.Error())
}
return nil
}
// CheckHMAC confirms that the artifact has not been corrupted or tampered with by
// calculating its HMAC and comparing it with the retrieved HMAC.
func (d *Deployment) CheckHMAC(version string) error {
// Build the filenames.
artifactPath, exists := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension)
if !exists {
return fmt.Errorf("Artifact does not exist: %s", artifactPath)
}
hmacPath, exists := makeHMACPath(d.artifactDir, d.appName, version, d.acfg.Extension)
if !exists {
return fmt.Errorf("HMAC does not exist: %s", artifactPath)
}
// Read in the HMAC.
if expectedMAC, err := ioutil.ReadFile(hmacPath); err == nil {
// Open the artifact, and calculate its HMAC.
if fp, err := os.Open(artifactPath); err == nil {
messageMAC := CalculateHMAC(fp, NewHMACCalculator(d.cfg.Secret))
if !hmac.Equal(messageMAC, expectedMAC) {
return fmt.Errorf(
"Artifact is corrupt: Expected HMAC: %q: Calculated HMAC: %q",
string(expectedMAC),
string(messageMAC),
)
}
} else {
return fmt.Errorf("Error while reading %q: %s", artifactPath, err.Error())
}
} else {
return fmt.Errorf("Error while reading %q: %s", hmacPath, err.Error())
}
return nil
}
// Extract transfers an artifact to the version release directory.
func (d *Deployment) Extract(version string) error {
// Ensure that the artifact to be extracted exists.
artifactPath, exists := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension)
if !exists {
return fmt.Errorf("Artifact does not exist: %s", artifactPath)
}
// If running as root, ensure the extract command wasn't loaded from an insecure file.
if os.Geteuid() == 0 && d.acfg.Insecure {
return fmt.Errorf(
"Refusing to execute extract command from insecure \"pulldeploy.yaml\" as root")
}
// Create the version directory if it doesn't exist.
versionDir, exists := makeReleasePath(d.releaseDir, version)
if !exists {
if err := makeDir(versionDir, d.uid, d.gid, 0755); err != nil {
return fmt.Errorf("Cannot create release directory %q: %s", version, err.Error())
}
}
// Extract the archive into the version directory.
cmdlineArgs := substituteVars(d.acfg.Extract.Args,
varValues{artifactPath: artifactPath, versionDir: versionDir})
_, err := sysCommand("", d.acfg.Extract.Cmd, cmdlineArgs)
if err != nil {
return fmt.Errorf("Cannot extract archive %q into %q: %s", artifactPath, versionDir, err.Error())
}
// Set the ownership of all the extracted files.
if err := setOwnerAll(versionDir, d.uid, d.gid); err != nil {
return err
}
return nil
}
// Link sets the "current" symlink to point at the indicated version.
func (d *Deployment) Link(version string) error |
// PostDeploy executes the configured PostDeploy command.
func (d *Deployment) PostDeploy(version string) (string, error) {
if os.Geteuid() == 0 && d.cfg.Insecure {
return "", fmt.Errorf(
"Refusing to execute post-deploy command from insecure %q configuration as root",
d.appName)
}
if d.cfg.Scripts["postdeploy"].Cmd != "" {
artifactPath, _ := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension)
versionDir, _ := makeReleasePath(d.releaseDir, version)
cmdlineArgs := substituteVars(d.cfg.Scripts["postdeploy"].Args,
varValues{artifactPath: artifactPath, versionDir: versionDir})
return sysCommand(versionDir, d.cfg.Scripts["postdeploy"].Cmd, cmdlineArgs)
}
return "", nil
}
// PostRelease executes the configured PostRelease command.
func (d *Deployment) PostRelease(version string) (string, error) {
if os.Geteuid() == 0 && d.cfg.Insecure {
return "", fmt.Errorf(
"Refusing to execute post-release command from insecure %q configuration as root",
d.appName)
}
if d.cfg.Scripts["postrelease"].Cmd != "" {
artifactPath, _ := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension)
versionDir, _ := makeReleasePath(d.releaseDir, version)
cmdlineArgs := substituteVars(d.cfg.Scripts["postrelease"].Args,
varValues{artifactPath: artifactPath, versionDir: versionDir})
return sysCommand(versionDir, d.cfg.Scripts["postrelease"].Cmd, cmdlineArgs)
}
return "", nil
}
// Remove deletes everything associated with the given name.
func (d *Deployment) Remove(version string) error {
// Removing the currently linked version is not permitted.
if d.GetCurrentLink() == version {
return fmt.Errorf("Removing current version not permitted: %q", version)
}
// Remove the artifact and HMAC.
if artifactPath, exists := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension); exists {
os.Remove(artifactPath)
}
if hmacPath, exists := makeHMACPath(d.artifactDir, d.appName, version, d.acfg.Extension); exists {
os.Remove(hmacPath)
}
// Remove the extracted files.
if versionDir, exists := makeReleasePath(d.releaseDir, version); exists {
return os.RemoveAll(versionDir)
}
return nil
}
// GetCurrentLink returns the name of the currently released version.
func (d *Deployment) GetCurrentLink() string {
// Read the symlink off disk.
symlink, err := os.Readlink(path.Join(d.baseDir, kCURRENTDIR))
if err != nil {
return ""
}
// We are interested in the last element, which is the active version directory.
dirs := strings.Split(symlink, "/")
return dirs[len(dirs)-1]
}
// GetDeployedVersions enumerates all the versions currently available for linking.
func (d *Deployment) GetDeployedVersions() []string {
var versionList []string
// Everything in the release directory is an available version.
if fi, err := ioutil.ReadDir(d.releaseDir); err == nil {
for _, v := range fi {
versionList = append(versionList, v.Name())
}
}
return versionList
}
| {
versionDir, exists := makeReleasePath(d.releaseDir, version)
if !exists {
return fmt.Errorf("Release directory does not exist: %q", versionDir)
}
symlinkPath := path.Join(d.baseDir, kCURRENTDIR)
os.Remove(symlinkPath)
return os.Symlink(versionDir, symlinkPath)
} | identifier_body |
deployment.go | /*
Package deployment provides methods for managing application deployment and release files.
A deployment resides on the server running PullDeploy in daemon mode. It has the following
directory structure:
/BASEDIR/APPNAME/artifact
/BASEDIR/APPNAME/current (a symlink)
/BASEDIR/APPNAME/release
Artifacts retrieved from the repository are placed into the "artifact" directory:
/BASEDIR/APPNAME/artifact/APPNAME-VERSION.ARTIFACTTYPE
Deployed releases are unpacked into a directory named for the version, under the
"release" directory.
/BASEDIR/APPNAME/release/VERSION1
/BASEDIR/APPNAME/release/VERSION2
/BASEDIR/APPNAME/release/VERSION3
Releasing a version points the "current" symlink to the specified release directory.
*/
package deployment
import (
"crypto/hmac"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"os/user"
"path"
"strconv"
"strings"
"github.com/mredivo/pulldeploy/pdconfig"
)
const kARTIFACTDIR = "artifact"
const kRELEASEDIR = "release"
const kCURRENTDIR = "current"
const kHMACSUFFIX = "hmac"
// Deployment provides methods for manipulating local deployment files.
type Deployment struct {
appName string // The name of the application
cfg pdconfig.AppConfig // The deployment configuration
acfg pdconfig.ArtifactConfig // The Artifact Type configuration
uid int // The numeric UID to own all files for this deployment
gid int // The numeric GID to own all files for this deployment
baseDir string // The derived top-level directory for this app's files
artifactDir string // The derived subdirectory for fetched build artifacts
releaseDir string // The derived subdirectory for extracted build artifacts
}
// New returns a new Deployment.
func New(appName string, pdcfg pdconfig.PDConfig, cfg *pdconfig.AppConfig) (*Deployment, error) {
d := new(Deployment)
d.cfg = *cfg
// Capture the supplied arguments.
d.appName = appName
// All string arguments are mandatory.
if appName == "" {
return nil, errors.New("Deployment initialization error: Appname is mandatory")
}
if d.cfg.BaseDir == "" {
return nil, errors.New("Deployment initialization error: BaseDir is mandatory")
}
// Validate the artifact type.
if ac, err := pdcfg.GetArtifactConfig(d.cfg.ArtifactType); err == nil {
d.acfg = *ac
} else {
return nil, fmt.Errorf("Deployment initialization error: invalid ArtifactType %q", d.cfg.ArtifactType)
}
// Derive the UID/GID from the username/groupname.
// NOTE: Go doesn't yet support looking up a GID from a name, so
// we use the gid from the user.
if d.cfg.User != "" {
if user, err := user.Lookup(d.cfg.User); err == nil {
if i, err := strconv.ParseInt(user.Uid, 10, 64); err == nil {
d.uid = int(i)
}
if i, err := strconv.ParseInt(user.Gid, 10, 64); err == nil {
d.gid = int(i)
}
}
}
// The parent directory must not be "/".
parentDir := absPath(d.cfg.BaseDir)
if parentDir == "/" {
return nil, errors.New("Deployment initialization error: \"/\" not permitted as BaseDir")
}
// The parent directory must exist.
if _, err := os.Stat(parentDir); err != nil {
return nil, fmt.Errorf("Deployment initialization error: unable to stat BaseDir: %s", err.Error())
}
// If the base dir doesn't exist, create it.
d.baseDir = path.Join(parentDir, appName)
if _, err := os.Stat(d.baseDir); err != nil {
if err := makeDir(d.baseDir, d.uid, d.gid, 0755); err != nil {
return nil, fmt.Errorf("Deployment initialization error: %s", err.Error())
}
}
// If the artifact dir doesn't exist, create it.
d.artifactDir = path.Join(d.baseDir, kARTIFACTDIR)
if _, err := os.Stat(d.artifactDir); err != nil {
if err := makeDir(d.artifactDir, d.uid, d.gid, 0755); err != nil {
return nil, fmt.Errorf("Deployment initialization error: %s", err.Error())
}
}
// If the release dir doesn't exist, create it.
d.releaseDir = path.Join(d.baseDir, kRELEASEDIR)
if _, err := os.Stat(d.releaseDir); err != nil {
if err := makeDir(d.releaseDir, d.uid, d.gid, 0755); err != nil {
return nil, fmt.Errorf("Deployment initialization error: %s", err.Error())
}
}
return d, nil
}
// ArtifactPresent indicates whether the artifact has already been written.
func (d *Deployment) | (version string) bool {
// Generate the filename, and check whether file already exists.
_, exists := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension)
return exists
}
// Write creates a file in the artifact area from the given stream.
func (d *Deployment) WriteArtifact(version string, rc io.ReadCloser) error {
// Housekeeping: ensure the source is closed when done.
defer rc.Close()
// Generate the filename, and check whether file already exists.
artifactPath, exists := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension)
if exists {
return fmt.Errorf("Artifact already exists: %s", artifactPath)
}
// Open the file, and write the data into it.
if fp, err := os.OpenFile(artifactPath, os.O_WRONLY|os.O_CREATE, 0664); err == nil {
defer fp.Close()
if _, err := io.Copy(fp, rc); err != nil {
return fmt.Errorf("Error while creating %q: %s", artifactPath, err.Error())
}
if err := setOwner(artifactPath, d.uid, d.gid); err != nil {
return fmt.Errorf("Unable to set owner on %q: %s", artifactPath, err.Error())
}
}
return nil
}
// HMACPresent indicates whether the HMAC has already been written.
func (d *Deployment) HMACPresent(version string) bool {
// Generate the filename, and check whether file already exists.
_, exists := makeHMACPath(d.artifactDir, d.appName, version, d.acfg.Extension)
return exists
}
// WriteHMAC writes an HMAC into the artifact area.
func (d *Deployment) WriteHMAC(version string, hmac []byte) error {
// Generate the filename, write to file, set ownership.
hmacPath, _ := makeHMACPath(d.artifactDir, d.appName, version, d.acfg.Extension)
if err := ioutil.WriteFile(hmacPath, hmac, 0664); err != nil {
return fmt.Errorf("Error while writing %q: %s", hmacPath, err.Error())
}
if err := setOwner(hmacPath, d.uid, d.gid); err != nil {
return fmt.Errorf("Unable to set owner on %q: %s", hmacPath, err.Error())
}
return nil
}
// CheckHMAC confirms that the artifact has not been corrupted or tampered with by
// calculating its HMAC and comparing it with the retrieved HMAC.
func (d *Deployment) CheckHMAC(version string) error {
// Build the filenames.
artifactPath, exists := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension)
if !exists {
return fmt.Errorf("Artifact does not exist: %s", artifactPath)
}
hmacPath, exists := makeHMACPath(d.artifactDir, d.appName, version, d.acfg.Extension)
if !exists {
return fmt.Errorf("HMAC does not exist: %s", artifactPath)
}
// Read in the HMAC.
if expectedMAC, err := ioutil.ReadFile(hmacPath); err == nil {
// Open the artifact, and calculate its HMAC.
if fp, err := os.Open(artifactPath); err == nil {
messageMAC := CalculateHMAC(fp, NewHMACCalculator(d.cfg.Secret))
if !hmac.Equal(messageMAC, expectedMAC) {
return fmt.Errorf(
"Artifact is corrupt: Expected HMAC: %q: Calculated HMAC: %q",
string(expectedMAC),
string(messageMAC),
)
}
} else {
return fmt.Errorf("Error while reading %q: %s", artifactPath, err.Error())
}
} else {
return fmt.Errorf("Error while reading %q: %s", hmacPath, err.Error())
}
return nil
}
// Extract transfers an artifact to the version release directory.
func (d *Deployment) Extract(version string) error {
// Ensure that the artifact to be extracted exists.
artifactPath, exists := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension)
if !exists {
return fmt.Errorf("Artifact does not exist: %s", artifactPath)
}
// If running as root, ensure the extract command wasn't loaded from an insecure file.
if os.Geteuid() == 0 && d.acfg.Insecure {
return fmt.Errorf(
"Refusing to execute extract command from insecure \"pulldeploy.yaml\" as root")
}
// Create the version directory if it doesn't exist.
versionDir, exists := makeReleasePath(d.releaseDir, version)
if !exists {
if err := makeDir(versionDir, d.uid, d.gid, 0755); err != nil {
return fmt.Errorf("Cannot create release directory %q: %s", version, err.Error())
}
}
// Extract the archive into the version directory.
cmdlineArgs := substituteVars(d.acfg.Extract.Args,
varValues{artifactPath: artifactPath, versionDir: versionDir})
_, err := sysCommand("", d.acfg.Extract.Cmd, cmdlineArgs)
if err != nil {
return fmt.Errorf("Cannot extract archive %q into %q: %s", artifactPath, versionDir, err.Error())
}
// Set the ownership of all the extracted files.
if err := setOwnerAll(versionDir, d.uid, d.gid); err != nil {
return err
}
return nil
}
// Link sets the "current" symlink to point at the indicated version.
func (d *Deployment) Link(version string) error {
versionDir, exists := makeReleasePath(d.releaseDir, version)
if !exists {
return fmt.Errorf("Release directory does not exist: %q", versionDir)
}
symlinkPath := path.Join(d.baseDir, kCURRENTDIR)
os.Remove(symlinkPath)
return os.Symlink(versionDir, symlinkPath)
}
// PostDeploy executes the configured PostDeploy command.
func (d *Deployment) PostDeploy(version string) (string, error) {
if os.Geteuid() == 0 && d.cfg.Insecure {
return "", fmt.Errorf(
"Refusing to execute post-deploy command from insecure %q configuration as root",
d.appName)
}
if d.cfg.Scripts["postdeploy"].Cmd != "" {
artifactPath, _ := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension)
versionDir, _ := makeReleasePath(d.releaseDir, version)
cmdlineArgs := substituteVars(d.cfg.Scripts["postdeploy"].Args,
varValues{artifactPath: artifactPath, versionDir: versionDir})
return sysCommand(versionDir, d.cfg.Scripts["postdeploy"].Cmd, cmdlineArgs)
}
return "", nil
}
// PostRelease executes the configured PostRelease command.
func (d *Deployment) PostRelease(version string) (string, error) {
if os.Geteuid() == 0 && d.cfg.Insecure {
return "", fmt.Errorf(
"Refusing to execute post-release command from insecure %q configuration as root",
d.appName)
}
if d.cfg.Scripts["postrelease"].Cmd != "" {
artifactPath, _ := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension)
versionDir, _ := makeReleasePath(d.releaseDir, version)
cmdlineArgs := substituteVars(d.cfg.Scripts["postrelease"].Args,
varValues{artifactPath: artifactPath, versionDir: versionDir})
return sysCommand(versionDir, d.cfg.Scripts["postrelease"].Cmd, cmdlineArgs)
}
return "", nil
}
// Remove deletes everything associated with the given name.
func (d *Deployment) Remove(version string) error {
// Removing the currently linked version is not permitted.
if d.GetCurrentLink() == version {
return fmt.Errorf("Removing current version not permitted: %q", version)
}
// Remove the artifact and HMAC.
if artifactPath, exists := makeArtifactPath(d.artifactDir, d.appName, version, d.acfg.Extension); exists {
os.Remove(artifactPath)
}
if hmacPath, exists := makeHMACPath(d.artifactDir, d.appName, version, d.acfg.Extension); exists {
os.Remove(hmacPath)
}
// Remove the extracted files.
if versionDir, exists := makeReleasePath(d.releaseDir, version); exists {
return os.RemoveAll(versionDir)
}
return nil
}
// GetCurrentLink returns the name of the currently released version.
func (d *Deployment) GetCurrentLink() string {
// Read the symlink off disk.
symlink, err := os.Readlink(path.Join(d.baseDir, kCURRENTDIR))
if err != nil {
return ""
}
// We are interested in the last element, which is the active version directory.
dirs := strings.Split(symlink, "/")
return dirs[len(dirs)-1]
}
// GetDeployedVersions enumerates all the versions currently available for linking.
func (d *Deployment) GetDeployedVersions() []string {
var versionList []string
// Everything in the release directory is an available version.
if fi, err := ioutil.ReadDir(d.releaseDir); err == nil {
for _, v := range fi {
versionList = append(versionList, v.Name())
}
}
return versionList
}
| ArtifactPresent | identifier_name |
mod.rs | use std::collections::HashMap;
use std::future::Future;
use std::io::{Read, Write};
use std::net::SocketAddr;
use std::ops::DerefMut;
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use std::sync::mpsc::{channel, Receiver, Sender};
use std::task::{Context, Waker};
use std::thread::spawn;
use log;
use mio::{Events, Interest, Poll, Token};
use mio::event::{Event, Source};
use mio::net::{TcpListener, TcpStream};
use crate::error::Error;
use crate::Result;
#[cfg(test)]
mod test;
/// TcpStream にイベントが発生したときに呼び出されるコールバック用のトレイトです。
/// 返値を使用してその後のアクションを指定することができます。
pub trait TcpStreamListener: Send {
fn on_ready_to_read(&mut self, r: &mut dyn Read) -> DispatcherAction;
fn on_ready_to_write(&mut self, w: &mut dyn Write) -> DispatcherAction;
fn on_error(&mut self, error: std::io::Error) -> DispatcherAction;
}
/// TcpListener にイベントが発生したときに呼び出されるコールバック用のトレイトです。
/// 返値を使用してその後のアクションを指定することができます。
pub trait TcpListenerListener: Send {
fn on_accept(&mut self, stream: TcpStream, address: SocketAddr) -> DispatcherAction;
fn on_error(&mut self, error: std::io::Error) -> DispatcherAction;
}
/// Listener へのコールバック終了後に Listener が Dispatcher に指示する動作を表す列挙型です。
pub enum DispatcherAction {
/// 特に何も行わないで処理を続行することを示します。
Continue,
/// 指定された Interest フラグに変更することを指定します。
ChangeFlag(Interest),
/// イベントの発生元となるソケットなどの Source の破棄を指定します。
Dispose,
}
// ##############################################################################################
// イベントループスレッド内で外部の指定した処理を行うために channel 経由で送受信されるタスクとその結果を返す Future
// の定義。
type Executable<R> = dyn (FnOnce(&mut PollingLoop) -> R) + Send + 'static;
struct TaskState<R> {
result: Option<R>,
waker: Option<Waker>,
}
struct Task<R> {
executable: Box<Executable<R>>,
state: Arc<Mutex<TaskState<R>>>,
}
impl<R> Task<R> {
fn new<E>(executable: Box<E>) -> Self
where
E: (FnOnce(&mut PollingLoop) -> R) + Send + 'static,
{
Self { executable, state: Arc::new(Mutex::new(TaskState { result: None, waker: None })) }
}
}
pub struct TaskFuture<R> {
state: Arc<Mutex<TaskState<R>>>,
}
impl<R> Future for TaskFuture<R> {
type Output = R;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> std::task::Poll<Self::Output> {
use std::task::Poll;
let mut state = self.state.lock().unwrap();
if let Some(result) = state.result.take() {
Poll::Ready(result)
} else {
state.waker = Some(cx.waker().clone());
Poll::Pending
}
}
}
// ##############################################################################################
pub type SocketId = usize;
pub struct Dispatcher {
sender: Sender<Task<Result<SocketId>>>,
waker: mio::Waker,
}
impl Dispatcher {
/// 新しいディスパッチャーを起動します。
/// poll が作成されイベントループが開始します。
///
/// # Arguments
/// * `event_buffer_size` - 一度の poll で読み込むイベントの最大数。
///
pub fn new(event_buffer_size: usize) -> Result<Dispatcher> {
let (sender, receiver) = channel();
let poll = Poll::new()?;
let waker = mio::Waker::new(poll.registry(), Token(0))?;
let mut polling_loop = PollingLoop::new(poll, event_buffer_size);
spawn(move || polling_loop.start(receiver));
Ok(Dispatcher { sender, waker })
}
/// 指定された ID のソケットを
pub fn dispose(&self, id: SocketId) -> Box<dyn Future<Output=Result<SocketId>>> {
self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
polling.close(id);
Ok(id)
}))
}
fn run_in_event_loop<E>(&self, exec: Box<E>) -> Box<dyn Future<Output=Result<SocketId>>>
where
E: (FnOnce(&mut PollingLoop) -> Result<SocketId>) + Send + 'static,
{
let task = Task::new(exec);
let future = TaskFuture { state: task.state.clone() };
self.sender.send(task).unwrap();
self.waker.wake().unwrap();
Box::new(future)
}
}
impl Drop for Dispatcher {
fn drop(&mut self) {
log::debug!("stopping dispatcher...");
let _ = self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
polling.stopped = true;
Ok(0usize)
}));
}
}
trait DispatcherRegister<S, L> {
fn register(&self, source: S, listener: L) -> Box<dyn Future<Output=Result<SocketId>>>;
}
impl DispatcherRegister<TcpListener, Box<dyn TcpListenerListener>> for Dispatcher {
fn register(
&self,
mut listener: TcpListener,
event_listener: Box<dyn TcpListenerListener>,
) -> Box<dyn Future<Output=Result<SocketId>>> {
self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
let id = polling.sockets.available_id()?;
polling.poll.registry().register(&mut listener, Token(id), Interest::READABLE)?;
polling.sockets.set(id, Socket::Listener(listener, event_listener));
Ok(id)
}))
}
}
impl DispatcherRegister<TcpStream, Box<dyn TcpStreamListener>> for Dispatcher {
fn register(
&self,
mut stream: TcpStream,
listener: Box<dyn TcpStreamListener>,
) -> Box<dyn Future<Output=Result<SocketId>>> {
self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
let id = polling.sockets.available_id()?;
polling.poll.registry().register(
&mut stream,
Token(id),
Interest::READABLE | Interest::WRITABLE,
)?;
polling.sockets.set(id, Socket::Stream(stream, listener));
Ok(id)
}))
}
}
struct PollingLoop {
poll: Poll,
event_buffer_size: usize,
sockets: SocketMap,
stopped: bool,
}
impl PollingLoop {
fn new(poll: Poll, event_buffer_size: usize) -> PollingLoop {
let sockets = SocketMap::new();
PollingLoop { poll, event_buffer_size, sockets, stopped: false }
}
/// poll() のためのイベントループを開始します。イベントループスレッドの中で任意の処理を行う場合は receiver に対応
/// する sender に実行するタスクを投入し、self.poll に登録済みの Waker.wake() でブロッキングを抜けます。
fn start<R>(&mut self, receiver: Receiver<Task<Result<R>>>) -> Result<()> {
let mut events = Events::with_capacity(self.event_buffer_size);
while !self.stopped {
self.poll.poll(&mut events, None)?;
// イベントの発生したソケットを取得
let event_sockets = events
.iter()
.map(|e| self.sockets.get(e.token().0).map(|s| (e, s)))
.flatten()
.collect::<Vec<(&Event, Arc<Mutex<Socket>>)>>();
// イベントの発生したソケットの処理を実行
for (event, socket) in event_sockets.iter() {
match socket.lock()?.deref_mut() {
Socket::Stream(stream, listener) => {
log::info!("CLIENT[{}]", event.token().0);
self.on_tcp_stream(event, stream, listener);
}
Socket::Listener(listener, event_listener) => {
log::info!("SERVER[{}]", event.token().0);
self.on_tcp_listener(event, listener, event_listener);
}
Socket::Waker => {
log::info!("WAKER");
}
}
}
self.run_all_tasks(&receiver);
}
self.cleanup();
log::info!("dispatcher stopped");
Ok(())
}
/// 指定された receiver に存在するすべてのタスクを実行します。
fn run_all_tasks<R>(&mut self, receiver: &Receiver<Task<Result<R>>>) {
for Task { executable, state } in receiver.iter() {
let result = executable(self);
let mut state = state.lock().unwrap();
state.result = Some(result);
if let Some(waker) = state.waker.take() {
waker.wake();
}
}
}
/// 指定された ID のソケットを廃棄します。この操作により対応するソケットはクローズします。
fn close(&mut self, id: SocketId) {
if let Some(socket) = self.sockets.sockets.remove(&id) {
log::debug!("closing socket: {}", id);
match socket.lock().unwrap().deref_mut() {
Socket::Waker => (),
Socket::Stream(stream, _) => self.poll.registry().deregister(stream).unwrap(),
Socket::Listener(listener, _) => self.poll.registry().deregister(listener).unwrap(),
};
log::debug!("socket closed: {}", id);
}
}
/// 登録されているすべてのソケットを廃棄します。この操作によりソケットはクローズされます。
fn cleanup(&mut self) {
for id in self.sockets.ids() {
self.close(id);
}
}
fn action<S: Source>(&mut self, id: SocketId, source: &mut S, action: DispatcherAction) {
match action {
DispatcherAction::Continue => (),
DispatcherAction::ChangeFlag(interest) => {
self.poll.registry().reregister(source, Token(id), inter | spatcherAction::Dispose => self.close(id),
}
}
fn on_tcp_stream(
&mut self,
event: &Event,
stream: &mut TcpStream,
listener: &mut Box<dyn TcpStreamListener>,
) {
// 読み込み可能イベント
if event.is_readable() {
let behaviour = listener.on_ready_to_read(stream);
self.action(event.token().0, stream, behaviour);
}
// 書き込み可能イベント
if event.is_writable() {
let behaviour = listener.on_ready_to_write(stream);
self.action(event.token().0, stream, behaviour);
}
if event.is_error() {
let behaviour = match stream.take_error() {
Ok(Some(err)) => listener.on_error(err),
Ok(None) => DispatcherAction::Continue,
Err(err) => listener.on_error(err),
};
self.action(event.token().0, stream, behaviour);
}
}
fn on_tcp_listener(
&mut self,
event: &Event,
listener: &mut TcpListener,
event_listener: &mut Box<dyn TcpListenerListener>,
) {
// ソケット接続イベント
if event.is_readable() {
let (stream, address) = listener.accept().unwrap();
let behaviour = event_listener.on_accept(stream, address);
self.action(event.token().0, listener, behaviour);
}
}
}
/// Poll に登録するソケットを格納する列挙型。
enum Socket {
Waker,
Stream(TcpStream, Box<dyn TcpStreamListener>),
Listener(TcpListener, Box<dyn TcpListenerListener>),
}
/// オブジェクトに対する ID の割当と ID による参照操作を行うためのマップ。
/// Poll で通知されたトークンからソケットを特定するために使用します。
/// Note that this [IdMap] is not thread-safe.
struct SocketMap {
next: usize,
sockets: HashMap<usize, Arc<Mutex<Socket>>>,
}
impl SocketMap {
/// 新規のマップを作成します。
pub fn new() -> SocketMap {
let sockets = HashMap::new();
SocketMap { next: 0, sockets }
}
/// 指定された ID のオブジェクトを参照します。
pub fn get(&self, id: usize) -> Option<Arc<Mutex<Socket>>> {
if id == 0 {
Some(Arc::new(Mutex::new(Socket::Waker)))
} else {
self.sockets.get(&id).map(|a| a.clone())
}
}
/// 管理されているすべての ID を参照します。
pub fn ids(&self) -> Vec<SocketId> {
self.sockets.keys().map(|id| *id).collect::<Vec<usize>>()
}
/// 使用可能な ID を検索します。
pub fn available_id(&mut self) -> Result<SocketId> {
// NOTE: Token(0) は Waker 用、Token(usize::MAX) は Poll が内部的に使用しているためそれぞれ予約されている
let max = std::usize::MAX - 2;
if self.sockets.len() == max {
return Err(Error::TooManySockets { maximum: std::usize::MAX });
}
for i in 0..=max {
let id = (self.next as u64 + i as u64) as usize + 1;
if self.sockets.get(&id).is_none() {
self.next = if self.next + 1 == max { 0 } else { self.next + 1 };
return Ok(id);
}
}
unreachable!()
}
/// 指定された ID のソケットを新規追加または更新します。
pub fn set(&mut self, id: SocketId, socket: Socket) {
self.sockets.insert(id, Arc::new(Mutex::new(socket)));
}
}
| est).unwrap();
}
Di | conditional_block |
mod.rs | use std::collections::HashMap;
use std::future::Future;
use std::io::{Read, Write};
use std::net::SocketAddr;
use std::ops::DerefMut;
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use std::sync::mpsc::{channel, Receiver, Sender};
use std::task::{Context, Waker};
use std::thread::spawn;
use log;
use mio::{Events, Interest, Poll, Token};
use mio::event::{Event, Source};
use mio::net::{TcpListener, TcpStream};
use crate::error::Error;
use crate::Result;
#[cfg(test)]
mod test;
/// TcpStream にイベントが発生したときに呼び出されるコールバック用のトレイトです。
/// 返値を使用してその後のアクションを指定することができます。
pub trait TcpStreamListener: Send {
fn on_ready_to_read(&mut self, r: &mut dyn Read) -> DispatcherAction;
fn on_ready_to_write(&mut self, w: &mut dyn Write) -> DispatcherAction;
fn on_error(&mut self, error: std::io::Error) -> DispatcherAction;
}
/// TcpListener にイベントが発生したときに呼び出されるコールバック用のトレイトです。
/// 返値を使用してその後のアクションを指定することができます。
pub trait TcpListenerListener: Send {
fn on_accept(&mut self, stream: TcpStream, address: SocketAddr) -> DispatcherAction;
fn on_error(&mut self, error: std::io::Error) -> DispatcherAction;
}
/// Listener へのコールバック終了後に Listener が Dispatcher に指示する動作を表す列挙型です。
pub enum DispatcherAction {
/// 特に何も行わないで処理を続行することを示します。
Continue,
/// 指定された Interest フラグに変更することを指定します。
ChangeFlag(Interest),
/// イベントの発生元となるソケットなどの Source の破棄を指定します。
Dispose,
}
// ##############################################################################################
// イベントループスレッド内で外部の指定した処理を行うために channel 経由で送受信されるタスクとその結果を返す Future
// の定義。
type Executable<R> = dyn (FnOnce(&mut PollingLoop) -> R) + Send + 'static;
struct TaskState<R> {
result: Option<R>,
waker: Option<Waker>,
}
struct Task<R> {
executable: Box<Executable<R>>,
state: Arc<Mutex<TaskState<R>>>,
}
impl<R> Task<R> {
fn new<E>(executable: Box<E>) -> Self
where
E: (FnOnce(&mut PollingLoop) -> R) + Send + 'static,
{
Self { executable, state: Arc::new(Mutex::new(TaskState { result: None, waker: None })) }
}
}
pub struct TaskFuture<R> {
state: Arc<Mutex<TaskState<R>>>,
}
impl<R> Future for TaskFuture<R> {
type Output = R;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> std::task::Poll<Self::Output> {
use std::task::Poll;
let mut state = self.state.lock().unwrap();
if let Some(result) = state.result.take() {
Poll::Ready(result)
} else {
state.waker = Some(cx.waker().clone());
Poll::Pending
}
}
}
// ##############################################################################################
pub type SocketId = usize;
pub struct Dispatcher {
sender: Sender<Task<Result<SocketId>>>,
waker: mio::Waker,
}
impl Dispatcher {
/// 新しいディスパッチャーを起動します。
/// poll が作成されイベントループが開始します。
///
/// # Arguments
/// * `event_buffer_size` - 一度の poll で読み込むイベントの最大数。
///
pub fn new(event_buffer_size: usize) -> Result<Dispatcher> {
let (sender, receiver) = channel();
let poll = Poll::new()?;
let waker = mio::Waker::new(poll.registry(), Token(0))?;
let mut polling_loop = PollingLoop::new(poll, event_buffer_size);
spawn(move || polling_loop.start(receiver));
Ok(Dispatcher { sender, waker })
}
/// 指定された ID のソケットを
pub fn dispose(&self, id: SocketId) -> Box<dyn Future<Output=Result<SocketId>>> {
self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
polling.close(id);
Ok(id)
}))
}
fn run_in_event_loop<E>(&self, exec: Box<E>) -> Box<dyn Future<Output=Result<SocketId>>>
where
E: (FnOnce(&mut PollingLoop) -> Result<SocketId>) + Send + 'static,
{
let task = Task::new(exec);
let future = TaskFuture { state: task.state.clone() };
self.sender.send(task).unwrap();
self.waker.wake().unwrap();
Box::new(future)
}
}
impl Drop for Dispatcher {
fn drop(&mut self) {
log::debug!("stopping dispatcher...");
let _ = self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
polling.stopped = true;
Ok(0usize)
}));
}
}
trait DispatcherRegister<S, L> {
fn register(&self, source: S, listener: L) -> Box<dyn Future<Output=Result<SocketId>>>;
}
impl DispatcherRegister<TcpListener, Box<dyn TcpListenerListener>> for Dispatcher {
fn register(
&self,
mut listener: TcpListener,
event_listener: Box<dyn TcpListenerListener>,
) -> Box<dyn Future<Output=Result<SocketId>>> {
self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
let id = polling.sockets.available_id()?;
polling.poll.registry().register(&mut listener, Token(id), Interest::READABLE)?;
polling.sockets.set(id, Socket::Listener(listener, event_listener));
Ok(id)
}))
}
}
impl DispatcherRegister<TcpStream, Box<dyn TcpStreamListener>> for Dispatcher {
fn register(
&self,
mut stream: TcpStream,
listener: Box<dyn TcpStreamListener>,
) -> Box<dyn Future<Output=Result<SocketId>>> {
self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
let id = polling.sockets.available_id()?;
polling.poll.registry().register(
&mut stream,
Token(id),
Interest::READABLE | Interest::WRITABLE,
)?;
polling.sockets.set(id, Socket::Stream(stream, listener));
Ok(id)
}))
}
}
struct PollingLoop {
poll: Poll,
event_buffer_size: usize,
sockets: SocketMap,
stopped: bool,
}
impl PollingLoop {
fn new(poll: Poll, event_buffer_size: usize) -> PollingLoop {
let sockets = SocketMap::new();
PollingLoop { poll, event_buffer_size, sockets, stopped: false }
}
/// poll() のためのイベントループを開始します。イベントループスレッドの中で任意の処理を行う場合は receiver に対応
/// する sender に実行するタスクを投入し、self.poll に登録済みの Waker.wake() でブロッキングを抜けます。
fn start<R>(&mut self, receiver: Receiver<Task<Result<R>>>) -> Result<()> {
let mut events = Events::with_capacity(self.event_buffer_size);
while !self.stopped {
self.poll.poll(&mut events, None)?;
// イベントの発生したソケットを取得
let event_sockets = events
.iter()
.map(|e| self.sockets.get(e.token().0).map(|s| (e, s)))
.flatten()
.collect::<Vec<(&Event, Arc<Mutex<Socket>>)>>();
// イベントの発生したソケットの処理を実行
for (event, socket) in event_sockets.iter() {
match socket.lock()?.deref_mut() {
Socket::Stream(stream, listener) => {
log::info!("CLIENT[{}]", event.token().0);
self.on_tcp_stream(event, stream, listener);
}
Socket::Listener(listener, event_listener) => {
log::info!("SERVER[{}]", event.token().0);
self.on_tcp_listener(event, listener, event_listener);
}
Socket::Waker => {
log::info!("WAKER");
}
}
}
self.run_all_tasks(&receiver);
}
self.cleanup();
log::info!("dispatcher stopped");
Ok(())
}
/// 指定された receiver に存在するすべてのタスクを実行します。
fn run_all_tasks<R>(&mut self, receiver: &Receiver<Task<Result<R>>>) {
for Task { executable, state } in receiver.iter() {
let result = executable(self);
let mut state = state.lock().unwrap();
state.result = Some(result);
if let Some(waker) = state.waker.take() {
waker.wake();
}
}
}
/// 指定された ID のソケットを廃棄します。この操作により対応するソケットはクローズします。
fn close(&mut self, id: SocketId) {
if let Some(socket) = self.sockets.sockets.remove(&id) {
log::debug!("closing socket: {}", id);
match socket.lock().unwrap().deref_mut() {
Socket::Waker => (),
Socket::Stream(stream, _) => self.poll.registry().deregister(stream).unwrap(),
Socket::Listener(listener, _) => self.poll.registry().deregister(listener).unwrap(),
};
log::debug!("socket closed: {}", id);
}
}
/// 登録されているすべてのソケットを廃棄します。この操作によりソケットはクローズされます。
fn cleanup(&mut self) {
for id in self.sockets.ids() {
self.close(id);
}
}
fn action<S: Source>(&mut self, id: SocketId, source: &mut S, action: DispatcherAction) {
match action {
DispatcherAction::Continue => (),
DispatcherAction::ChangeFlag(interest) => {
self.poll.registry().reregister(source, Token(id), interest).unwrap();
}
DispatcherAction::Dispose => self.close(id),
}
}
fn on_tcp_stream(
&mut self,
event: &Event,
stream: &mut TcpStream,
listener: &mut Box<dyn TcpStreamListener>,
) {
// 読み込み可能イベント
if event.is_readable() {
let behaviour = listener.on_ready_to_read(stream);
self.action(event.token().0, stream, behaviour);
}
// 書き込み可能イベント
if event.is_writable() {
let behaviour = listener.on_ready_to_write(stream);
self.action(event.token().0, stream, behaviour);
}
if event.is_error() {
let behaviour = match stream.take_error() {
Ok(Some(err)) => listener.on_error(err),
Ok(None) => DispatcherAction::Continue,
Err(err) => listener.on_error(err),
};
self.action(event.token().0, stream, behaviour);
}
}
fn on_tcp_listener(
&mut self,
event: &Event,
listener: &mut TcpListener,
event_listener: &mut Box<dyn TcpListenerListener>,
) {
// ソケット接続イベント
if event.is_readable() {
let (stream, address) = listener.accept().unwrap();
let behaviour = event_listener.on_accept(stream, address);
self.action(event.token().0, listener, behaviour);
}
}
}
/// Poll に登録するソケットを格納する列挙型。
enum Socket {
Waker,
Stream(TcpStream, Box<dyn TcpStreamListener>),
Listener(TcpListener, Box<dyn TcpListenerListener>),
}
/// オブジェクトに対する ID の割当と ID による参照操作を行うためのマップ。
/// Poll で通知されたトークンからソケットを特定するために使用します。
/// Note that this [IdMap] is not thread-safe.
struct SocketMap {
next: usize,
sockets: HashMap<usize, Arc<Mutex<Socket>>>,
}
impl SocketMap {
/// 新規のマップを作成します。
pub fn new() -> SocketMap {
let sockets = HashMap::new();
SocketMap { next: 0, sockets }
}
/// 指定された ID のオブジェクトを参照します。
pub fn get(&self, id: usize) -> Option<Arc<Mutex<Socket>>> {
if id == 0 {
Some(Arc::new(Mutex::new(Socket::Waker)))
} else {
self.sockets.get(&id).map(|a| a.clone())
}
}
/// 管理されているすべての ID を参照します。
pub fn ids(&self) -> Vec<SocketId> {
self.sockets.keys().map(|id| *id).collect::<Vec<usize>>()
}
/// 使用可能な ID を検索します。
pub fn available_id(&mut self) -> Result<SocketId> {
// NOTE: Token(0) は Waker 用、Token(usize::MAX) は Poll が内部的に使用しているためそれぞれ予約されている
let max = std::usize::MAX - 2;
if self.sockets.len() == max {
return Err(Error::TooManySockets { maximum: std::usize::MAX });
}
for i in 0..=max {
let id = (self.next as u64 + i as u64) as usize + 1;
| f self.sockets.get(&id).is_none() {
self.next = if self.next + 1 == max { 0 } else { self.next + 1 };
return Ok(id);
}
}
unreachable!()
}
/// 指定された ID のソケットを新規追加または更新します。
pub fn set(&mut self, id: SocketId, socket: Socket) {
self.sockets.insert(id, Arc::new(Mutex::new(socket)));
}
}
| i | identifier_name |
mod.rs | use std::collections::HashMap;
use std::future::Future;
use std::io::{Read, Write};
use std::net::SocketAddr;
use std::ops::DerefMut;
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use std::sync::mpsc::{channel, Receiver, Sender};
use std::task::{Context, Waker};
use std::thread::spawn;
use log;
use mio::{Events, Interest, Poll, Token};
use mio::event::{Event, Source};
use mio::net::{TcpListener, TcpStream};
use crate::error::Error;
use crate::Result;
#[cfg(test)]
mod test;
/// TcpStream にイベントが発生したときに呼び出されるコールバック用のトレイトです。
/// 返値を使用してその後のアクションを指定することができます。
pub trait TcpStreamListener: Send {
fn on_ready_to_read(&mut self, r: &mut dyn Read) -> DispatcherAction;
fn on_ready_to_write(&mut self, w: &mut dyn Write) -> DispatcherAction;
fn on_error(&mut self, error: std::io::Error) -> DispatcherAction;
}
/// TcpListener にイベントが発生したときに呼び出されるコールバック用のトレイトです。
/// 返値を使用してその後のアクションを指定することができます。
pub trait TcpListenerListener: Send {
fn on_accept(&mut self, stream: TcpStream, address: SocketAddr) -> DispatcherAction;
fn on_error(&mut self, error: std::io::Error) -> DispatcherAction;
}
/// Listener へのコールバック終了後に Listener が Dispatcher に指示する動作を表す列挙型です。
pub enum DispatcherAction {
/// 特に何も行わないで処理を続行することを示します。
Continue,
/// 指定された Interest フラグに変更することを指定します。
ChangeFlag(Interest),
/// イベントの発生元となるソケットなどの Source の破棄を指定します。
Dispose,
}
// ##############################################################################################
// イベントループスレッド内で外部の指定した処理を行うために channel 経由で送受信されるタスクとその結果を返す Future
// の定義。
type Executable<R> = dyn (FnOnce(&mut PollingLoop) -> R) + Send + 'static;
struct TaskState<R> {
result: Option<R>,
waker: Option<Waker>,
}
struct Task<R> {
executable: Box<Executable<R>>,
state: Arc<Mutex<TaskState<R>>>,
}
impl<R> Task<R> {
fn new<E>(executable: Box<E>) -> Self
where
E: (FnOnce(&mut PollingLoop) -> R) + Send + 'static,
{
Self { executable, state: Arc::new(Mutex::new(TaskState { result: None, waker: None })) }
}
}
pub struct TaskFuture<R> {
state: Arc<Mutex<TaskState<R>>>,
}
impl<R> Future for TaskFuture<R> {
type Output = R;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> std::task::Poll<Self::Output> {
use std::task::Poll;
let mut state = self.state.lock().unwrap();
if let Some(result) = state.result.take() {
Poll::Ready(result)
} else {
state.waker = Some(cx.waker().clone());
Poll::Pending
}
}
}
// ##############################################################################################
pub type SocketId = usize;
pub struct Dispatcher {
sender: Sender<Task<Result<SocketId>>>,
waker: mio::Waker,
}
impl Dispatcher {
/// 新しいディスパッチャーを起動します。
/// poll が作成されイベントループが開始します。
///
/// # Arguments
/// * `event_buffer_size` - 一度の poll で読み込むイベントの最大数。
///
pub fn new(event_buffer_size: usize) -> Result<Dispatcher> {
let (sender, receiver) = channel();
let poll = Poll::new()?;
let waker = mio::Waker::new(poll.registry(), Token(0))?;
let mut polling_loop = PollingLoop::new(poll, event_buffer_size);
spawn(move || polling_loop.start(receiver));
Ok(Dispatcher { sender, waker })
}
/// 指定された ID のソケットを
pub fn dispose(&self, id: SocketId) -> Box<dyn Future<Output=Result<SocketId>>> {
self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
polling.close(id);
Ok(id)
}))
}
fn run_in_event_loop<E>(&self, exec: Box<E>) -> Box<dyn Future<Output=Result<SocketId>>>
where
E: (FnOnce(&mut PollingLoop) -> Result<SocketId>) + Send + 'static,
{
let task = Task::new(exec);
let future = TaskFuture { state: task.state.clone() };
self.sender.send(task).unwrap();
self.waker.wake().unwrap();
Box::new(future)
}
}
impl Drop for Dispatcher {
fn drop(&mut self) {
log::debug!("stopping dispatcher...");
let _ = self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
polling.stopped = true; | Future<Output=Result<SocketId>>>;
}
impl DispatcherRegister<TcpListener, Box<dyn TcpListenerListener>> for Dispatcher {
fn register(
&self,
mut listener: TcpListener,
event_listener: Box<dyn TcpListenerListener>,
) -> Box<dyn Future<Output=Result<SocketId>>> {
self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
let id = polling.sockets.available_id()?;
polling.poll.registry().register(&mut listener, Token(id), Interest::READABLE)?;
polling.sockets.set(id, Socket::Listener(listener, event_listener));
Ok(id)
}))
}
}
impl DispatcherRegister<TcpStream, Box<dyn TcpStreamListener>> for Dispatcher {
fn register(
&self,
mut stream: TcpStream,
listener: Box<dyn TcpStreamListener>,
) -> Box<dyn Future<Output=Result<SocketId>>> {
self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
let id = polling.sockets.available_id()?;
polling.poll.registry().register(
&mut stream,
Token(id),
Interest::READABLE | Interest::WRITABLE,
)?;
polling.sockets.set(id, Socket::Stream(stream, listener));
Ok(id)
}))
}
}
struct PollingLoop {
poll: Poll,
event_buffer_size: usize,
sockets: SocketMap,
stopped: bool,
}
impl PollingLoop {
fn new(poll: Poll, event_buffer_size: usize) -> PollingLoop {
let sockets = SocketMap::new();
PollingLoop { poll, event_buffer_size, sockets, stopped: false }
}
/// poll() のためのイベントループを開始します。イベントループスレッドの中で任意の処理を行う場合は receiver に対応
/// する sender に実行するタスクを投入し、self.poll に登録済みの Waker.wake() でブロッキングを抜けます。
fn start<R>(&mut self, receiver: Receiver<Task<Result<R>>>) -> Result<()> {
let mut events = Events::with_capacity(self.event_buffer_size);
while !self.stopped {
self.poll.poll(&mut events, None)?;
// イベントの発生したソケットを取得
let event_sockets = events
.iter()
.map(|e| self.sockets.get(e.token().0).map(|s| (e, s)))
.flatten()
.collect::<Vec<(&Event, Arc<Mutex<Socket>>)>>();
// イベントの発生したソケットの処理を実行
for (event, socket) in event_sockets.iter() {
match socket.lock()?.deref_mut() {
Socket::Stream(stream, listener) => {
log::info!("CLIENT[{}]", event.token().0);
self.on_tcp_stream(event, stream, listener);
}
Socket::Listener(listener, event_listener) => {
log::info!("SERVER[{}]", event.token().0);
self.on_tcp_listener(event, listener, event_listener);
}
Socket::Waker => {
log::info!("WAKER");
}
}
}
self.run_all_tasks(&receiver);
}
self.cleanup();
log::info!("dispatcher stopped");
Ok(())
}
/// 指定された receiver に存在するすべてのタスクを実行します。
fn run_all_tasks<R>(&mut self, receiver: &Receiver<Task<Result<R>>>) {
for Task { executable, state } in receiver.iter() {
let result = executable(self);
let mut state = state.lock().unwrap();
state.result = Some(result);
if let Some(waker) = state.waker.take() {
waker.wake();
}
}
}
/// 指定された ID のソケットを廃棄します。この操作により対応するソケットはクローズします。
fn close(&mut self, id: SocketId) {
if let Some(socket) = self.sockets.sockets.remove(&id) {
log::debug!("closing socket: {}", id);
match socket.lock().unwrap().deref_mut() {
Socket::Waker => (),
Socket::Stream(stream, _) => self.poll.registry().deregister(stream).unwrap(),
Socket::Listener(listener, _) => self.poll.registry().deregister(listener).unwrap(),
};
log::debug!("socket closed: {}", id);
}
}
/// 登録されているすべてのソケットを廃棄します。この操作によりソケットはクローズされます。
fn cleanup(&mut self) {
for id in self.sockets.ids() {
self.close(id);
}
}
fn action<S: Source>(&mut self, id: SocketId, source: &mut S, action: DispatcherAction) {
match action {
DispatcherAction::Continue => (),
DispatcherAction::ChangeFlag(interest) => {
self.poll.registry().reregister(source, Token(id), interest).unwrap();
}
DispatcherAction::Dispose => self.close(id),
}
}
fn on_tcp_stream(
&mut self,
event: &Event,
stream: &mut TcpStream,
listener: &mut Box<dyn TcpStreamListener>,
) {
// 読み込み可能イベント
if event.is_readable() {
let behaviour = listener.on_ready_to_read(stream);
self.action(event.token().0, stream, behaviour);
}
// 書き込み可能イベント
if event.is_writable() {
let behaviour = listener.on_ready_to_write(stream);
self.action(event.token().0, stream, behaviour);
}
if event.is_error() {
let behaviour = match stream.take_error() {
Ok(Some(err)) => listener.on_error(err),
Ok(None) => DispatcherAction::Continue,
Err(err) => listener.on_error(err),
};
self.action(event.token().0, stream, behaviour);
}
}
fn on_tcp_listener(
&mut self,
event: &Event,
listener: &mut TcpListener,
event_listener: &mut Box<dyn TcpListenerListener>,
) {
// ソケット接続イベント
if event.is_readable() {
let (stream, address) = listener.accept().unwrap();
let behaviour = event_listener.on_accept(stream, address);
self.action(event.token().0, listener, behaviour);
}
}
}
/// Poll に登録するソケットを格納する列挙型。
enum Socket {
Waker,
Stream(TcpStream, Box<dyn TcpStreamListener>),
Listener(TcpListener, Box<dyn TcpListenerListener>),
}
/// オブジェクトに対する ID の割当と ID による参照操作を行うためのマップ。
/// Poll で通知されたトークンからソケットを特定するために使用します。
/// Note that this [IdMap] is not thread-safe.
struct SocketMap {
next: usize,
sockets: HashMap<usize, Arc<Mutex<Socket>>>,
}
impl SocketMap {
/// 新規のマップを作成します。
pub fn new() -> SocketMap {
let sockets = HashMap::new();
SocketMap { next: 0, sockets }
}
/// 指定された ID のオブジェクトを参照します。
pub fn get(&self, id: usize) -> Option<Arc<Mutex<Socket>>> {
if id == 0 {
Some(Arc::new(Mutex::new(Socket::Waker)))
} else {
self.sockets.get(&id).map(|a| a.clone())
}
}
/// 管理されているすべての ID を参照します。
pub fn ids(&self) -> Vec<SocketId> {
self.sockets.keys().map(|id| *id).collect::<Vec<usize>>()
}
/// 使用可能な ID を検索します。
pub fn available_id(&mut self) -> Result<SocketId> {
// NOTE: Token(0) は Waker 用、Token(usize::MAX) は Poll が内部的に使用しているためそれぞれ予約されている
let max = std::usize::MAX - 2;
if self.sockets.len() == max {
return Err(Error::TooManySockets { maximum: std::usize::MAX });
}
for i in 0..=max {
let id = (self.next as u64 + i as u64) as usize + 1;
if self.sockets.get(&id).is_none() {
self.next = if self.next + 1 == max { 0 } else { self.next + 1 };
return Ok(id);
}
}
unreachable!()
}
/// 指定された ID のソケットを新規追加または更新します。
pub fn set(&mut self, id: SocketId, socket: Socket) {
self.sockets.insert(id, Arc::new(Mutex::new(socket)));
}
}
|
Ok(0usize)
}));
}
}
trait DispatcherRegister<S, L> {
fn register(&self, source: S, listener: L) -> Box<dyn | identifier_body |
mod.rs | use std::collections::HashMap;
use std::future::Future;
use std::io::{Read, Write};
use std::net::SocketAddr;
use std::ops::DerefMut;
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use std::sync::mpsc::{channel, Receiver, Sender};
use std::task::{Context, Waker};
use std::thread::spawn;
use log;
use mio::{Events, Interest, Poll, Token};
use mio::event::{Event, Source};
use mio::net::{TcpListener, TcpStream};
use crate::error::Error;
use crate::Result;
#[cfg(test)]
mod test;
/// TcpStream にイベントが発生したときに呼び出されるコールバック用のトレイトです。
/// 返値を使用してその後のアクションを指定することができます。
pub trait TcpStreamListener: Send {
fn on_ready_to_read(&mut self, r: &mut dyn Read) -> DispatcherAction;
fn on_ready_to_write(&mut self, w: &mut dyn Write) -> DispatcherAction;
fn on_error(&mut self, error: std::io::Error) -> DispatcherAction;
}
/// TcpListener にイベントが発生したときに呼び出されるコールバック用のトレイトです。
/// 返値を使用してその後のアクションを指定することができます。
pub trait TcpListenerListener: Send {
fn on_accept(&mut self, stream: TcpStream, address: SocketAddr) -> DispatcherAction;
fn on_error(&mut self, error: std::io::Error) -> DispatcherAction;
}
/// Listener へのコールバック終了後に Listener が Dispatcher に指示する動作を表す列挙型です。
pub enum DispatcherAction {
/// 特に何も行わないで処理を続行することを示します。
Continue,
/// 指定された Interest フラグに変更することを指定します。
ChangeFlag(Interest),
/// イベントの発生元となるソケットなどの Source の破棄を指定します。
Dispose,
}
// ##############################################################################################
// イベントループスレッド内で外部の指定した処理を行うために channel 経由で送受信されるタスクとその結果を返す Future
// の定義。
type Executable<R> = dyn (FnOnce(&mut PollingLoop) -> R) + Send + 'static;
struct TaskState<R> {
result: Option<R>,
waker: Option<Waker>,
}
struct Task<R> {
executable: Box<Executable<R>>,
state: Arc<Mutex<TaskState<R>>>,
}
impl<R> Task<R> {
fn new<E>(executable: Box<E>) -> Self
where
E: (FnOnce(&mut PollingLoop) -> R) + Send + 'static,
{
Self { executable, state: Arc::new(Mutex::new(TaskState { result: None, waker: None })) }
}
}
pub struct TaskFuture<R> {
state: Arc<Mutex<TaskState<R>>>,
}
impl<R> Future for TaskFuture<R> { |
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> std::task::Poll<Self::Output> {
use std::task::Poll;
let mut state = self.state.lock().unwrap();
if let Some(result) = state.result.take() {
Poll::Ready(result)
} else {
state.waker = Some(cx.waker().clone());
Poll::Pending
}
}
}
// ##############################################################################################
pub type SocketId = usize;
pub struct Dispatcher {
sender: Sender<Task<Result<SocketId>>>,
waker: mio::Waker,
}
impl Dispatcher {
/// 新しいディスパッチャーを起動します。
/// poll が作成されイベントループが開始します。
///
/// # Arguments
/// * `event_buffer_size` - 一度の poll で読み込むイベントの最大数。
///
pub fn new(event_buffer_size: usize) -> Result<Dispatcher> {
let (sender, receiver) = channel();
let poll = Poll::new()?;
let waker = mio::Waker::new(poll.registry(), Token(0))?;
let mut polling_loop = PollingLoop::new(poll, event_buffer_size);
spawn(move || polling_loop.start(receiver));
Ok(Dispatcher { sender, waker })
}
/// 指定された ID のソケットを
pub fn dispose(&self, id: SocketId) -> Box<dyn Future<Output=Result<SocketId>>> {
self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
polling.close(id);
Ok(id)
}))
}
fn run_in_event_loop<E>(&self, exec: Box<E>) -> Box<dyn Future<Output=Result<SocketId>>>
where
E: (FnOnce(&mut PollingLoop) -> Result<SocketId>) + Send + 'static,
{
let task = Task::new(exec);
let future = TaskFuture { state: task.state.clone() };
self.sender.send(task).unwrap();
self.waker.wake().unwrap();
Box::new(future)
}
}
impl Drop for Dispatcher {
fn drop(&mut self) {
log::debug!("stopping dispatcher...");
let _ = self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
polling.stopped = true;
Ok(0usize)
}));
}
}
trait DispatcherRegister<S, L> {
fn register(&self, source: S, listener: L) -> Box<dyn Future<Output=Result<SocketId>>>;
}
impl DispatcherRegister<TcpListener, Box<dyn TcpListenerListener>> for Dispatcher {
fn register(
&self,
mut listener: TcpListener,
event_listener: Box<dyn TcpListenerListener>,
) -> Box<dyn Future<Output=Result<SocketId>>> {
self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
let id = polling.sockets.available_id()?;
polling.poll.registry().register(&mut listener, Token(id), Interest::READABLE)?;
polling.sockets.set(id, Socket::Listener(listener, event_listener));
Ok(id)
}))
}
}
impl DispatcherRegister<TcpStream, Box<dyn TcpStreamListener>> for Dispatcher {
fn register(
&self,
mut stream: TcpStream,
listener: Box<dyn TcpStreamListener>,
) -> Box<dyn Future<Output=Result<SocketId>>> {
self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
let id = polling.sockets.available_id()?;
polling.poll.registry().register(
&mut stream,
Token(id),
Interest::READABLE | Interest::WRITABLE,
)?;
polling.sockets.set(id, Socket::Stream(stream, listener));
Ok(id)
}))
}
}
struct PollingLoop {
poll: Poll,
event_buffer_size: usize,
sockets: SocketMap,
stopped: bool,
}
impl PollingLoop {
fn new(poll: Poll, event_buffer_size: usize) -> PollingLoop {
let sockets = SocketMap::new();
PollingLoop { poll, event_buffer_size, sockets, stopped: false }
}
/// poll() のためのイベントループを開始します。イベントループスレッドの中で任意の処理を行う場合は receiver に対応
/// する sender に実行するタスクを投入し、self.poll に登録済みの Waker.wake() でブロッキングを抜けます。
fn start<R>(&mut self, receiver: Receiver<Task<Result<R>>>) -> Result<()> {
let mut events = Events::with_capacity(self.event_buffer_size);
while !self.stopped {
self.poll.poll(&mut events, None)?;
// イベントの発生したソケットを取得
let event_sockets = events
.iter()
.map(|e| self.sockets.get(e.token().0).map(|s| (e, s)))
.flatten()
.collect::<Vec<(&Event, Arc<Mutex<Socket>>)>>();
// イベントの発生したソケットの処理を実行
for (event, socket) in event_sockets.iter() {
match socket.lock()?.deref_mut() {
Socket::Stream(stream, listener) => {
log::info!("CLIENT[{}]", event.token().0);
self.on_tcp_stream(event, stream, listener);
}
Socket::Listener(listener, event_listener) => {
log::info!("SERVER[{}]", event.token().0);
self.on_tcp_listener(event, listener, event_listener);
}
Socket::Waker => {
log::info!("WAKER");
}
}
}
self.run_all_tasks(&receiver);
}
self.cleanup();
log::info!("dispatcher stopped");
Ok(())
}
/// 指定された receiver に存在するすべてのタスクを実行します。
fn run_all_tasks<R>(&mut self, receiver: &Receiver<Task<Result<R>>>) {
for Task { executable, state } in receiver.iter() {
let result = executable(self);
let mut state = state.lock().unwrap();
state.result = Some(result);
if let Some(waker) = state.waker.take() {
waker.wake();
}
}
}
/// 指定された ID のソケットを廃棄します。この操作により対応するソケットはクローズします。
fn close(&mut self, id: SocketId) {
if let Some(socket) = self.sockets.sockets.remove(&id) {
log::debug!("closing socket: {}", id);
match socket.lock().unwrap().deref_mut() {
Socket::Waker => (),
Socket::Stream(stream, _) => self.poll.registry().deregister(stream).unwrap(),
Socket::Listener(listener, _) => self.poll.registry().deregister(listener).unwrap(),
};
log::debug!("socket closed: {}", id);
}
}
/// 登録されているすべてのソケットを廃棄します。この操作によりソケットはクローズされます。
fn cleanup(&mut self) {
for id in self.sockets.ids() {
self.close(id);
}
}
fn action<S: Source>(&mut self, id: SocketId, source: &mut S, action: DispatcherAction) {
match action {
DispatcherAction::Continue => (),
DispatcherAction::ChangeFlag(interest) => {
self.poll.registry().reregister(source, Token(id), interest).unwrap();
}
DispatcherAction::Dispose => self.close(id),
}
}
fn on_tcp_stream(
&mut self,
event: &Event,
stream: &mut TcpStream,
listener: &mut Box<dyn TcpStreamListener>,
) {
// 読み込み可能イベント
if event.is_readable() {
let behaviour = listener.on_ready_to_read(stream);
self.action(event.token().0, stream, behaviour);
}
// 書き込み可能イベント
if event.is_writable() {
let behaviour = listener.on_ready_to_write(stream);
self.action(event.token().0, stream, behaviour);
}
if event.is_error() {
let behaviour = match stream.take_error() {
Ok(Some(err)) => listener.on_error(err),
Ok(None) => DispatcherAction::Continue,
Err(err) => listener.on_error(err),
};
self.action(event.token().0, stream, behaviour);
}
}
fn on_tcp_listener(
&mut self,
event: &Event,
listener: &mut TcpListener,
event_listener: &mut Box<dyn TcpListenerListener>,
) {
// ソケット接続イベント
if event.is_readable() {
let (stream, address) = listener.accept().unwrap();
let behaviour = event_listener.on_accept(stream, address);
self.action(event.token().0, listener, behaviour);
}
}
}
/// Poll に登録するソケットを格納する列挙型。
enum Socket {
Waker,
Stream(TcpStream, Box<dyn TcpStreamListener>),
Listener(TcpListener, Box<dyn TcpListenerListener>),
}
/// オブジェクトに対する ID の割当と ID による参照操作を行うためのマップ。
/// Poll で通知されたトークンからソケットを特定するために使用します。
/// Note that this [IdMap] is not thread-safe.
struct SocketMap {
next: usize,
sockets: HashMap<usize, Arc<Mutex<Socket>>>,
}
impl SocketMap {
/// 新規のマップを作成します。
pub fn new() -> SocketMap {
let sockets = HashMap::new();
SocketMap { next: 0, sockets }
}
/// 指定された ID のオブジェクトを参照します。
pub fn get(&self, id: usize) -> Option<Arc<Mutex<Socket>>> {
if id == 0 {
Some(Arc::new(Mutex::new(Socket::Waker)))
} else {
self.sockets.get(&id).map(|a| a.clone())
}
}
/// 管理されているすべての ID を参照します。
pub fn ids(&self) -> Vec<SocketId> {
self.sockets.keys().map(|id| *id).collect::<Vec<usize>>()
}
/// 使用可能な ID を検索します。
pub fn available_id(&mut self) -> Result<SocketId> {
// NOTE: Token(0) は Waker 用、Token(usize::MAX) は Poll が内部的に使用しているためそれぞれ予約されている
let max = std::usize::MAX - 2;
if self.sockets.len() == max {
return Err(Error::TooManySockets { maximum: std::usize::MAX });
}
for i in 0..=max {
let id = (self.next as u64 + i as u64) as usize + 1;
if self.sockets.get(&id).is_none() {
self.next = if self.next + 1 == max { 0 } else { self.next + 1 };
return Ok(id);
}
}
unreachable!()
}
/// 指定された ID のソケットを新規追加または更新します。
pub fn set(&mut self, id: SocketId, socket: Socket) {
self.sockets.insert(id, Arc::new(Mutex::new(socket)));
}
} | type Output = R; | random_line_split |
autoencoder.py | # -*- coding: utf-8 -*-
"""code_draft.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1SFG3__AoM7dvKI06qiu76tW-mtbZDsxn
"""
import PIL
from PIL import Image
import numpy as np
import sys
from matplotlib import image
from matplotlib import pyplot as plt
#from google.colab import files
import os.path
from os import path
import pandas as pd
'''
import tensorflow as tf
from sklearn.metrics import accuracy_score, precision_score, recall_score
from sklearn.model_selection import train_test_split
from tensorflow.keras import layers, losses
from tensorflow.keras.models import Model
class Autoencoder(Model):
''
Define the autoencoder with help from the tensorflow library.
Attributes:
_latent_dim: The number of latent dimentions the images get mapped onto.
_epochs: The number of epochs the autoencoder goes through to train.
encoder: The encoder layers of the autoencoder.
decoder: The decoder layers of the autoencoder.
''
def __init__(self):
super(Autoencoder, self).__init__()
self._latent_dim = 64
self._epochs = 500
self.encoder = tf.keras.Sequential([
layers.Flatten(),
layers.Dense(self._latent_dim, activation='relu'),
])
self.decoder = tf.keras.Sequential([
layers.Dense(10800, activation='sigmoid'),
layers.Reshape((60, 60, 3))
])
def call(self, x):
''
Call the encoder and decoder.
''
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return decoded
def set_latent_dim(self, latent_dim):
''
Change number of latent dimentions.
''
self._latent_dim = latent_dim
def set_epochs(self, epochs):
''
Change number of epochs.
''
self._epochs = epochs
'''
class Controller:
"""
Generic controller for the autoencoder.
This class should not be used directly, as its get_input method does
nothing. Instead, this class should be inherited from and the subclass used
to actually send input to the autoencoder.
"""
def get_input(self):
"""
Translate input from the user into a the file locations of the images to
tranform.
"""
pass
class TakeInput(Controller):
'''
Get input from the user.
'''
def get_input(self):
'''
Take input from the user and determine if the input is valid and what
should be done with it.
Print a prompt for the user and given input, call _invalid_input() or
return file locations and file names to be used in the FormatData class.
'''
# Get the path for the folder of training images.
train_folder_input = input(
"Enter the path for the folder of training images: ")
train_folder = train_folder_input.strip()
# Get the type of fruit they want to train on.
train_image_input = input(
"Enter the name of the images you want the autoencoder to train on: ")
train_image = train_image_input.strip()
# Get the number of images to train on.
train_num_input = input("Enter the number of training images: ")
train_num = train_num_input.strip()
if self._check_train_input(train_folder, train_image, train_num):
self._invalid_input()
else:
train_num = int(train_num)
# Get the path for the folder of the user image(s).
test_folder_input = input(
"Enter the folder path for the image(s) you want to transform: ")
test_folder = test_folder_input.strip()
# Get the name of the image(s) to transform.
test_image_input = input(
"Enter the name of the image(s) you want to transform: ")
test_image = test_image_input.strip()
# Get the number of images the user wants to transform.
test_num_input = input(
"Enter the number of image(s) you want to transform (1-10): ")
test_num = test_num_input.strip()
if self._check_test_input(test_folder, test_image, test_num):
self._invalid_input()
else:
test_num = int(test_num)
return (train_folder, train_image, train_num, test_folder, test_image,
test_num)
def _check_train_input(self, train_folder, train_image, train_num):
'''
Check that the training input leads to a valid file location and the number
of images desired is an integer.
Arguments:
train_folder: A string of user input for the path to the folder with
training images.
train_image: A string of user input for the name of training images.
train_num: A string of ser input for the number of images to train on.
Returns:
True if there are any issues found with the input and nothing otherwise.
'''
# Make sure the train inputs are valid and there is at least one training
# image in the location provided.
file_name = (f'{train_image}_{1}.jpg')
file_location = (f'{train_folder}/{file_name}')
if not os.path.isfile(file_location):
return True
# Make sure the number given is an integer.
try:
int(train_num)
except ValueError:
return True
def _check_test_input(self, test_folder, test_image, test_num):
'''
Check that the test input has files at all of the locations specified and
the number of images desired is an integer.
Arguments:
train_folder: User input for the path to the folder with testing images.
train_image: User input for the name of testing images.
train_num: User input for the number of images to transform (integer from
1-10).
returns:
True if there are any issues found with the input and nothing otherwise.
'''
# Make sure the image number input is an integer between 1 and 10.
try:
int(test_num)
except ValueError:
return True
test_num = int(test_num)
if 0 > test_num or test_num > 10:
return True
# Make sure the testing inputs are valid and there are files in the
# location provided.
for i in range(1, test_num + 1):
file_name = (f'{test_image}_{i}.jpg')
file_location = (f'{test_folder}/{file_name}')
if not os.path.isfile(file_location):
return True
def _invalid_input(self):
'''
Print an error message if user input is invalid and run the get_input
function again.
'''
print("Invalid input. Please try again.")
class View:
"""
Generic view for the autoencoder.
This class should not be used directly, as its methods do nothing. Instead,
this class should be inherited from and the subclass used to actually
display the images.
"""
def display_input(self, images, num_images):
|
def display_output(self, decoded, num_images):
'''
Display output image(s).
'''
pass
class ViewImages(View):
'''
Display the input images and their output after going through the
autoencoder.
'''
def display_input(self, images, num_images):
'''
Display original input image(s).
'''
plt.figure(figsize=(20, 4))
for i in range(num_images):
ax = plt.subplot(2, num_images, i + 1)
plt.imshow(images[i])
plt.title("original")
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
def display_output(self, decoded, num_images):
'''
Display output image(s).
'''
plt.figure(figsize=(20, 4))
for i in range(num_images):
ax = plt.subplot(2, num_images, i + 1 + num_images)
plt.imshow(decoded[i])
plt.title("reconstructed")
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
class FormatData:
'''
Format image data for use in the autoencoder.
Attributes:
_file_location: Folder path for images.
_image_name: Name of all images.
_image_size: Pixel dimention for square images.
'''
def __init__(self):
self._file_location = '/content/drive/MyDrive/Colab Notebooks/train'
self._image_name = 'orange'
self._image_size = 60
def set_file_location(self, file_location):
'''
Define the location of the image files.
'''
self._file_location = file_location
def set_image_name(self, image_name):
'''
Define the name for all the image. This assumes images used all have the
same name with numbers after (ex. 'image_1.jpg', 'image_2.jpg', etc.)
'''
self._image_name = image_name
def set_image_size(self, image_size):
'''
Define the dimention of input and output images.
'''
self._image_size = image_size
def _get_image(self, image_num):
'''
Get individual image file.
Arguments:
image_num: An integer representing the number of the image desired.
Returns:
A Pillow image.
'''
file_name = (f'{self._image_name}_{image_num}.jpg')
file_location = (f'{self._file_location}/{file_name}')
if not os.path.isfile(file_location):
return False
else:
im = Image.open(file_location)
im = im.convert('RGB')
return im
def _square_image(self, image):
'''
Crop image into a squre with the dimentions defined by _image_size.
Arguments:
image: A Pillow image.
Returns:
A square Pillow image with the specified pixel dimentions.
'''
width, height = image.size
# Determine if width or height is smaller.
if width >= height:
min_dimention = height
else:
min_dimention = width
# Crop the image evenly on all sides.
width_1 = int((width - min_dimention) / 2)
height_1 = int((height - min_dimention) / 2)
box = (width_1, height_1, width - width_1, height - height_1)
image_cropped = image.crop(box)
image = image_cropped.resize((self._image_size, self._image_size))
return image
def _get_vector(self, image):
'''
Return an image as a vector of values between 0 and 1.
Arguments:
image: A Pillow image.
Returns:
The image as a numpy array of values between 0 and 1.
'''
vector = np.ravel(np.asarray(image)) / 255
return vector
def get_image_set(self, num_images):
'''
Return a matrix of images given the number of images desired.
Arguments:
num_images: An integer representing the number of images the function
will look for.
Returns:
A numpy array of values representing multiple images.
'''
image_count = num_images
image = self._get_image(1)
crop = self._square_image(image)
vector_all = self._get_vector(crop)
if num_images > 1:
for i in range(2, num_images + 1):
# If there is not an image at one of the locations specified, skip it and
# change the number of images so the final matrix is the correct
# dimentions.
if not self._get_image(i):
image_count -= 1
continue
else:
image = self._get_image(i)
crop = self._square_image(image)
vector = self._get_vector(crop)
vector_all = np.vstack((vector_all, vector))
return np.reshape(vector_all, ((image_count), self._image_size, self._image_size, 3))
'''
def main():
''
Run the software to train on a set of images and transform a new set of one
or more images.
''
autoencoder = Autoencoder()
controller = TakeInput()
display = ViewImages()
data = FormatData()
response = controller.get_input()
while response is None:
response = controller.get_input()
train_folder, train, train_num, test_folder, test, test_num = response
data.set_file_location(train_folder)
data.set_image_name(train)
train_images = data.get_image_set(train_num)
data.set_file_location(test_folder)
data.set_image_name(test)
user_images = data.get_image_set(test_num)
autoencoder.compile(optimizer='adam', loss=losses.MeanSquaredError())
autoencoder.fit(train_images, train_images,
epochs=500,
shuffle=True,
validation_data=(user_images, user_images))
encoded = autoencoder.encoder(user_images).numpy()
decoded = autoencoder.decoder(encoded).numpy()
display.display_input(user_images, test_num)
display.display_output(decoded, test_num)
if __name__ == "__main__":
main()
'''
| '''
Display original input image(s).
'''
pass | identifier_body |
autoencoder.py | # -*- coding: utf-8 -*-
"""code_draft.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1SFG3__AoM7dvKI06qiu76tW-mtbZDsxn
"""
import PIL
from PIL import Image
import numpy as np
import sys
from matplotlib import image
from matplotlib import pyplot as plt
#from google.colab import files
import os.path
from os import path
import pandas as pd
'''
import tensorflow as tf
from sklearn.metrics import accuracy_score, precision_score, recall_score
from sklearn.model_selection import train_test_split
from tensorflow.keras import layers, losses
from tensorflow.keras.models import Model
class Autoencoder(Model):
''
Define the autoencoder with help from the tensorflow library.
Attributes:
_latent_dim: The number of latent dimentions the images get mapped onto.
_epochs: The number of epochs the autoencoder goes through to train.
encoder: The encoder layers of the autoencoder.
decoder: The decoder layers of the autoencoder.
''
def __init__(self):
super(Autoencoder, self).__init__()
self._latent_dim = 64
self._epochs = 500
self.encoder = tf.keras.Sequential([
layers.Flatten(),
layers.Dense(self._latent_dim, activation='relu'),
])
self.decoder = tf.keras.Sequential([
layers.Dense(10800, activation='sigmoid'),
layers.Reshape((60, 60, 3))
])
def call(self, x):
''
Call the encoder and decoder.
''
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return decoded
def set_latent_dim(self, latent_dim):
''
Change number of latent dimentions.
''
self._latent_dim = latent_dim
def set_epochs(self, epochs):
''
Change number of epochs.
''
self._epochs = epochs
'''
class Controller:
"""
Generic controller for the autoencoder.
This class should not be used directly, as its get_input method does
nothing. Instead, this class should be inherited from and the subclass used
to actually send input to the autoencoder.
"""
def get_input(self):
"""
Translate input from the user into a the file locations of the images to
tranform.
"""
pass
class TakeInput(Controller):
'''
Get input from the user.
'''
def get_input(self):
'''
Take input from the user and determine if the input is valid and what
should be done with it.
Print a prompt for the user and given input, call _invalid_input() or
return file locations and file names to be used in the FormatData class.
'''
# Get the path for the folder of training images.
train_folder_input = input(
"Enter the path for the folder of training images: ")
train_folder = train_folder_input.strip()
# Get the type of fruit they want to train on.
train_image_input = input(
"Enter the name of the images you want the autoencoder to train on: ")
train_image = train_image_input.strip()
# Get the number of images to train on.
train_num_input = input("Enter the number of training images: ")
train_num = train_num_input.strip()
if self._check_train_input(train_folder, train_image, train_num):
|
else:
train_num = int(train_num)
# Get the path for the folder of the user image(s).
test_folder_input = input(
"Enter the folder path for the image(s) you want to transform: ")
test_folder = test_folder_input.strip()
# Get the name of the image(s) to transform.
test_image_input = input(
"Enter the name of the image(s) you want to transform: ")
test_image = test_image_input.strip()
# Get the number of images the user wants to transform.
test_num_input = input(
"Enter the number of image(s) you want to transform (1-10): ")
test_num = test_num_input.strip()
if self._check_test_input(test_folder, test_image, test_num):
self._invalid_input()
else:
test_num = int(test_num)
return (train_folder, train_image, train_num, test_folder, test_image,
test_num)
def _check_train_input(self, train_folder, train_image, train_num):
'''
Check that the training input leads to a valid file location and the number
of images desired is an integer.
Arguments:
train_folder: A string of user input for the path to the folder with
training images.
train_image: A string of user input for the name of training images.
train_num: A string of ser input for the number of images to train on.
Returns:
True if there are any issues found with the input and nothing otherwise.
'''
# Make sure the train inputs are valid and there is at least one training
# image in the location provided.
file_name = (f'{train_image}_{1}.jpg')
file_location = (f'{train_folder}/{file_name}')
if not os.path.isfile(file_location):
return True
# Make sure the number given is an integer.
try:
int(train_num)
except ValueError:
return True
def _check_test_input(self, test_folder, test_image, test_num):
'''
Check that the test input has files at all of the locations specified and
the number of images desired is an integer.
Arguments:
train_folder: User input for the path to the folder with testing images.
train_image: User input for the name of testing images.
train_num: User input for the number of images to transform (integer from
1-10).
returns:
True if there are any issues found with the input and nothing otherwise.
'''
# Make sure the image number input is an integer between 1 and 10.
try:
int(test_num)
except ValueError:
return True
test_num = int(test_num)
if 0 > test_num or test_num > 10:
return True
# Make sure the testing inputs are valid and there are files in the
# location provided.
for i in range(1, test_num + 1):
file_name = (f'{test_image}_{i}.jpg')
file_location = (f'{test_folder}/{file_name}')
if not os.path.isfile(file_location):
return True
def _invalid_input(self):
'''
Print an error message if user input is invalid and run the get_input
function again.
'''
print("Invalid input. Please try again.")
class View:
"""
Generic view for the autoencoder.
This class should not be used directly, as its methods do nothing. Instead,
this class should be inherited from and the subclass used to actually
display the images.
"""
def display_input(self, images, num_images):
'''
Display original input image(s).
'''
pass
def display_output(self, decoded, num_images):
'''
Display output image(s).
'''
pass
class ViewImages(View):
'''
Display the input images and their output after going through the
autoencoder.
'''
def display_input(self, images, num_images):
'''
Display original input image(s).
'''
plt.figure(figsize=(20, 4))
for i in range(num_images):
ax = plt.subplot(2, num_images, i + 1)
plt.imshow(images[i])
plt.title("original")
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
def display_output(self, decoded, num_images):
'''
Display output image(s).
'''
plt.figure(figsize=(20, 4))
for i in range(num_images):
ax = plt.subplot(2, num_images, i + 1 + num_images)
plt.imshow(decoded[i])
plt.title("reconstructed")
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
class FormatData:
'''
Format image data for use in the autoencoder.
Attributes:
_file_location: Folder path for images.
_image_name: Name of all images.
_image_size: Pixel dimention for square images.
'''
def __init__(self):
self._file_location = '/content/drive/MyDrive/Colab Notebooks/train'
self._image_name = 'orange'
self._image_size = 60
def set_file_location(self, file_location):
'''
Define the location of the image files.
'''
self._file_location = file_location
def set_image_name(self, image_name):
'''
Define the name for all the image. This assumes images used all have the
same name with numbers after (ex. 'image_1.jpg', 'image_2.jpg', etc.)
'''
self._image_name = image_name
def set_image_size(self, image_size):
'''
Define the dimention of input and output images.
'''
self._image_size = image_size
def _get_image(self, image_num):
'''
Get individual image file.
Arguments:
image_num: An integer representing the number of the image desired.
Returns:
A Pillow image.
'''
file_name = (f'{self._image_name}_{image_num}.jpg')
file_location = (f'{self._file_location}/{file_name}')
if not os.path.isfile(file_location):
return False
else:
im = Image.open(file_location)
im = im.convert('RGB')
return im
def _square_image(self, image):
'''
Crop image into a squre with the dimentions defined by _image_size.
Arguments:
image: A Pillow image.
Returns:
A square Pillow image with the specified pixel dimentions.
'''
width, height = image.size
# Determine if width or height is smaller.
if width >= height:
min_dimention = height
else:
min_dimention = width
# Crop the image evenly on all sides.
width_1 = int((width - min_dimention) / 2)
height_1 = int((height - min_dimention) / 2)
box = (width_1, height_1, width - width_1, height - height_1)
image_cropped = image.crop(box)
image = image_cropped.resize((self._image_size, self._image_size))
return image
def _get_vector(self, image):
'''
Return an image as a vector of values between 0 and 1.
Arguments:
image: A Pillow image.
Returns:
The image as a numpy array of values between 0 and 1.
'''
vector = np.ravel(np.asarray(image)) / 255
return vector
def get_image_set(self, num_images):
'''
Return a matrix of images given the number of images desired.
Arguments:
num_images: An integer representing the number of images the function
will look for.
Returns:
A numpy array of values representing multiple images.
'''
image_count = num_images
image = self._get_image(1)
crop = self._square_image(image)
vector_all = self._get_vector(crop)
if num_images > 1:
for i in range(2, num_images + 1):
# If there is not an image at one of the locations specified, skip it and
# change the number of images so the final matrix is the correct
# dimentions.
if not self._get_image(i):
image_count -= 1
continue
else:
image = self._get_image(i)
crop = self._square_image(image)
vector = self._get_vector(crop)
vector_all = np.vstack((vector_all, vector))
return np.reshape(vector_all, ((image_count), self._image_size, self._image_size, 3))
'''
def main():
''
Run the software to train on a set of images and transform a new set of one
or more images.
''
autoencoder = Autoencoder()
controller = TakeInput()
display = ViewImages()
data = FormatData()
response = controller.get_input()
while response is None:
response = controller.get_input()
train_folder, train, train_num, test_folder, test, test_num = response
data.set_file_location(train_folder)
data.set_image_name(train)
train_images = data.get_image_set(train_num)
data.set_file_location(test_folder)
data.set_image_name(test)
user_images = data.get_image_set(test_num)
autoencoder.compile(optimizer='adam', loss=losses.MeanSquaredError())
autoencoder.fit(train_images, train_images,
epochs=500,
shuffle=True,
validation_data=(user_images, user_images))
encoded = autoencoder.encoder(user_images).numpy()
decoded = autoencoder.decoder(encoded).numpy()
display.display_input(user_images, test_num)
display.display_output(decoded, test_num)
if __name__ == "__main__":
main()
'''
| self._invalid_input() | conditional_block |
autoencoder.py | # -*- coding: utf-8 -*-
"""code_draft.ipynb
Automatically generated by Colaboratory.
| """
import PIL
from PIL import Image
import numpy as np
import sys
from matplotlib import image
from matplotlib import pyplot as plt
#from google.colab import files
import os.path
from os import path
import pandas as pd
'''
import tensorflow as tf
from sklearn.metrics import accuracy_score, precision_score, recall_score
from sklearn.model_selection import train_test_split
from tensorflow.keras import layers, losses
from tensorflow.keras.models import Model
class Autoencoder(Model):
''
Define the autoencoder with help from the tensorflow library.
Attributes:
_latent_dim: The number of latent dimentions the images get mapped onto.
_epochs: The number of epochs the autoencoder goes through to train.
encoder: The encoder layers of the autoencoder.
decoder: The decoder layers of the autoencoder.
''
def __init__(self):
super(Autoencoder, self).__init__()
self._latent_dim = 64
self._epochs = 500
self.encoder = tf.keras.Sequential([
layers.Flatten(),
layers.Dense(self._latent_dim, activation='relu'),
])
self.decoder = tf.keras.Sequential([
layers.Dense(10800, activation='sigmoid'),
layers.Reshape((60, 60, 3))
])
def call(self, x):
''
Call the encoder and decoder.
''
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return decoded
def set_latent_dim(self, latent_dim):
''
Change number of latent dimentions.
''
self._latent_dim = latent_dim
def set_epochs(self, epochs):
''
Change number of epochs.
''
self._epochs = epochs
'''
class Controller:
"""
Generic controller for the autoencoder.
This class should not be used directly, as its get_input method does
nothing. Instead, this class should be inherited from and the subclass used
to actually send input to the autoencoder.
"""
def get_input(self):
"""
Translate input from the user into a the file locations of the images to
tranform.
"""
pass
class TakeInput(Controller):
'''
Get input from the user.
'''
def get_input(self):
'''
Take input from the user and determine if the input is valid and what
should be done with it.
Print a prompt for the user and given input, call _invalid_input() or
return file locations and file names to be used in the FormatData class.
'''
# Get the path for the folder of training images.
train_folder_input = input(
"Enter the path for the folder of training images: ")
train_folder = train_folder_input.strip()
# Get the type of fruit they want to train on.
train_image_input = input(
"Enter the name of the images you want the autoencoder to train on: ")
train_image = train_image_input.strip()
# Get the number of images to train on.
train_num_input = input("Enter the number of training images: ")
train_num = train_num_input.strip()
if self._check_train_input(train_folder, train_image, train_num):
self._invalid_input()
else:
train_num = int(train_num)
# Get the path for the folder of the user image(s).
test_folder_input = input(
"Enter the folder path for the image(s) you want to transform: ")
test_folder = test_folder_input.strip()
# Get the name of the image(s) to transform.
test_image_input = input(
"Enter the name of the image(s) you want to transform: ")
test_image = test_image_input.strip()
# Get the number of images the user wants to transform.
test_num_input = input(
"Enter the number of image(s) you want to transform (1-10): ")
test_num = test_num_input.strip()
if self._check_test_input(test_folder, test_image, test_num):
self._invalid_input()
else:
test_num = int(test_num)
return (train_folder, train_image, train_num, test_folder, test_image,
test_num)
def _check_train_input(self, train_folder, train_image, train_num):
'''
Check that the training input leads to a valid file location and the number
of images desired is an integer.
Arguments:
train_folder: A string of user input for the path to the folder with
training images.
train_image: A string of user input for the name of training images.
train_num: A string of ser input for the number of images to train on.
Returns:
True if there are any issues found with the input and nothing otherwise.
'''
# Make sure the train inputs are valid and there is at least one training
# image in the location provided.
file_name = (f'{train_image}_{1}.jpg')
file_location = (f'{train_folder}/{file_name}')
if not os.path.isfile(file_location):
return True
# Make sure the number given is an integer.
try:
int(train_num)
except ValueError:
return True
def _check_test_input(self, test_folder, test_image, test_num):
'''
Check that the test input has files at all of the locations specified and
the number of images desired is an integer.
Arguments:
train_folder: User input for the path to the folder with testing images.
train_image: User input for the name of testing images.
train_num: User input for the number of images to transform (integer from
1-10).
returns:
True if there are any issues found with the input and nothing otherwise.
'''
# Make sure the image number input is an integer between 1 and 10.
try:
int(test_num)
except ValueError:
return True
test_num = int(test_num)
if 0 > test_num or test_num > 10:
return True
# Make sure the testing inputs are valid and there are files in the
# location provided.
for i in range(1, test_num + 1):
file_name = (f'{test_image}_{i}.jpg')
file_location = (f'{test_folder}/{file_name}')
if not os.path.isfile(file_location):
return True
def _invalid_input(self):
'''
Print an error message if user input is invalid and run the get_input
function again.
'''
print("Invalid input. Please try again.")
class View:
"""
Generic view for the autoencoder.
This class should not be used directly, as its methods do nothing. Instead,
this class should be inherited from and the subclass used to actually
display the images.
"""
def display_input(self, images, num_images):
'''
Display original input image(s).
'''
pass
def display_output(self, decoded, num_images):
'''
Display output image(s).
'''
pass
class ViewImages(View):
'''
Display the input images and their output after going through the
autoencoder.
'''
def display_input(self, images, num_images):
'''
Display original input image(s).
'''
plt.figure(figsize=(20, 4))
for i in range(num_images):
ax = plt.subplot(2, num_images, i + 1)
plt.imshow(images[i])
plt.title("original")
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
def display_output(self, decoded, num_images):
'''
Display output image(s).
'''
plt.figure(figsize=(20, 4))
for i in range(num_images):
ax = plt.subplot(2, num_images, i + 1 + num_images)
plt.imshow(decoded[i])
plt.title("reconstructed")
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
class FormatData:
'''
Format image data for use in the autoencoder.
Attributes:
_file_location: Folder path for images.
_image_name: Name of all images.
_image_size: Pixel dimention for square images.
'''
def __init__(self):
self._file_location = '/content/drive/MyDrive/Colab Notebooks/train'
self._image_name = 'orange'
self._image_size = 60
def set_file_location(self, file_location):
'''
Define the location of the image files.
'''
self._file_location = file_location
def set_image_name(self, image_name):
'''
Define the name for all the image. This assumes images used all have the
same name with numbers after (ex. 'image_1.jpg', 'image_2.jpg', etc.)
'''
self._image_name = image_name
def set_image_size(self, image_size):
'''
Define the dimention of input and output images.
'''
self._image_size = image_size
def _get_image(self, image_num):
'''
Get individual image file.
Arguments:
image_num: An integer representing the number of the image desired.
Returns:
A Pillow image.
'''
file_name = (f'{self._image_name}_{image_num}.jpg')
file_location = (f'{self._file_location}/{file_name}')
if not os.path.isfile(file_location):
return False
else:
im = Image.open(file_location)
im = im.convert('RGB')
return im
def _square_image(self, image):
'''
Crop image into a squre with the dimentions defined by _image_size.
Arguments:
image: A Pillow image.
Returns:
A square Pillow image with the specified pixel dimentions.
'''
width, height = image.size
# Determine if width or height is smaller.
if width >= height:
min_dimention = height
else:
min_dimention = width
# Crop the image evenly on all sides.
width_1 = int((width - min_dimention) / 2)
height_1 = int((height - min_dimention) / 2)
box = (width_1, height_1, width - width_1, height - height_1)
image_cropped = image.crop(box)
image = image_cropped.resize((self._image_size, self._image_size))
return image
def _get_vector(self, image):
'''
Return an image as a vector of values between 0 and 1.
Arguments:
image: A Pillow image.
Returns:
The image as a numpy array of values between 0 and 1.
'''
vector = np.ravel(np.asarray(image)) / 255
return vector
def get_image_set(self, num_images):
'''
Return a matrix of images given the number of images desired.
Arguments:
num_images: An integer representing the number of images the function
will look for.
Returns:
A numpy array of values representing multiple images.
'''
image_count = num_images
image = self._get_image(1)
crop = self._square_image(image)
vector_all = self._get_vector(crop)
if num_images > 1:
for i in range(2, num_images + 1):
# If there is not an image at one of the locations specified, skip it and
# change the number of images so the final matrix is the correct
# dimentions.
if not self._get_image(i):
image_count -= 1
continue
else:
image = self._get_image(i)
crop = self._square_image(image)
vector = self._get_vector(crop)
vector_all = np.vstack((vector_all, vector))
return np.reshape(vector_all, ((image_count), self._image_size, self._image_size, 3))
'''
def main():
''
Run the software to train on a set of images and transform a new set of one
or more images.
''
autoencoder = Autoencoder()
controller = TakeInput()
display = ViewImages()
data = FormatData()
response = controller.get_input()
while response is None:
response = controller.get_input()
train_folder, train, train_num, test_folder, test, test_num = response
data.set_file_location(train_folder)
data.set_image_name(train)
train_images = data.get_image_set(train_num)
data.set_file_location(test_folder)
data.set_image_name(test)
user_images = data.get_image_set(test_num)
autoencoder.compile(optimizer='adam', loss=losses.MeanSquaredError())
autoencoder.fit(train_images, train_images,
epochs=500,
shuffle=True,
validation_data=(user_images, user_images))
encoded = autoencoder.encoder(user_images).numpy()
decoded = autoencoder.decoder(encoded).numpy()
display.display_input(user_images, test_num)
display.display_output(decoded, test_num)
if __name__ == "__main__":
main()
''' | Original file is located at
https://colab.research.google.com/drive/1SFG3__AoM7dvKI06qiu76tW-mtbZDsxn | random_line_split |
autoencoder.py | # -*- coding: utf-8 -*-
"""code_draft.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1SFG3__AoM7dvKI06qiu76tW-mtbZDsxn
"""
import PIL
from PIL import Image
import numpy as np
import sys
from matplotlib import image
from matplotlib import pyplot as plt
#from google.colab import files
import os.path
from os import path
import pandas as pd
'''
import tensorflow as tf
from sklearn.metrics import accuracy_score, precision_score, recall_score
from sklearn.model_selection import train_test_split
from tensorflow.keras import layers, losses
from tensorflow.keras.models import Model
class Autoencoder(Model):
''
Define the autoencoder with help from the tensorflow library.
Attributes:
_latent_dim: The number of latent dimentions the images get mapped onto.
_epochs: The number of epochs the autoencoder goes through to train.
encoder: The encoder layers of the autoencoder.
decoder: The decoder layers of the autoencoder.
''
def __init__(self):
super(Autoencoder, self).__init__()
self._latent_dim = 64
self._epochs = 500
self.encoder = tf.keras.Sequential([
layers.Flatten(),
layers.Dense(self._latent_dim, activation='relu'),
])
self.decoder = tf.keras.Sequential([
layers.Dense(10800, activation='sigmoid'),
layers.Reshape((60, 60, 3))
])
def call(self, x):
''
Call the encoder and decoder.
''
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return decoded
def set_latent_dim(self, latent_dim):
''
Change number of latent dimentions.
''
self._latent_dim = latent_dim
def set_epochs(self, epochs):
''
Change number of epochs.
''
self._epochs = epochs
'''
class Controller:
"""
Generic controller for the autoencoder.
This class should not be used directly, as its get_input method does
nothing. Instead, this class should be inherited from and the subclass used
to actually send input to the autoencoder.
"""
def get_input(self):
"""
Translate input from the user into a the file locations of the images to
tranform.
"""
pass
class TakeInput(Controller):
'''
Get input from the user.
'''
def get_input(self):
'''
Take input from the user and determine if the input is valid and what
should be done with it.
Print a prompt for the user and given input, call _invalid_input() or
return file locations and file names to be used in the FormatData class.
'''
# Get the path for the folder of training images.
train_folder_input = input(
"Enter the path for the folder of training images: ")
train_folder = train_folder_input.strip()
# Get the type of fruit they want to train on.
train_image_input = input(
"Enter the name of the images you want the autoencoder to train on: ")
train_image = train_image_input.strip()
# Get the number of images to train on.
train_num_input = input("Enter the number of training images: ")
train_num = train_num_input.strip()
if self._check_train_input(train_folder, train_image, train_num):
self._invalid_input()
else:
train_num = int(train_num)
# Get the path for the folder of the user image(s).
test_folder_input = input(
"Enter the folder path for the image(s) you want to transform: ")
test_folder = test_folder_input.strip()
# Get the name of the image(s) to transform.
test_image_input = input(
"Enter the name of the image(s) you want to transform: ")
test_image = test_image_input.strip()
# Get the number of images the user wants to transform.
test_num_input = input(
"Enter the number of image(s) you want to transform (1-10): ")
test_num = test_num_input.strip()
if self._check_test_input(test_folder, test_image, test_num):
self._invalid_input()
else:
test_num = int(test_num)
return (train_folder, train_image, train_num, test_folder, test_image,
test_num)
def _check_train_input(self, train_folder, train_image, train_num):
'''
Check that the training input leads to a valid file location and the number
of images desired is an integer.
Arguments:
train_folder: A string of user input for the path to the folder with
training images.
train_image: A string of user input for the name of training images.
train_num: A string of ser input for the number of images to train on.
Returns:
True if there are any issues found with the input and nothing otherwise.
'''
# Make sure the train inputs are valid and there is at least one training
# image in the location provided.
file_name = (f'{train_image}_{1}.jpg')
file_location = (f'{train_folder}/{file_name}')
if not os.path.isfile(file_location):
return True
# Make sure the number given is an integer.
try:
int(train_num)
except ValueError:
return True
def _check_test_input(self, test_folder, test_image, test_num):
'''
Check that the test input has files at all of the locations specified and
the number of images desired is an integer.
Arguments:
train_folder: User input for the path to the folder with testing images.
train_image: User input for the name of testing images.
train_num: User input for the number of images to transform (integer from
1-10).
returns:
True if there are any issues found with the input and nothing otherwise.
'''
# Make sure the image number input is an integer between 1 and 10.
try:
int(test_num)
except ValueError:
return True
test_num = int(test_num)
if 0 > test_num or test_num > 10:
return True
# Make sure the testing inputs are valid and there are files in the
# location provided.
for i in range(1, test_num + 1):
file_name = (f'{test_image}_{i}.jpg')
file_location = (f'{test_folder}/{file_name}')
if not os.path.isfile(file_location):
return True
def _invalid_input(self):
'''
Print an error message if user input is invalid and run the get_input
function again.
'''
print("Invalid input. Please try again.")
class View:
"""
Generic view for the autoencoder.
This class should not be used directly, as its methods do nothing. Instead,
this class should be inherited from and the subclass used to actually
display the images.
"""
def display_input(self, images, num_images):
'''
Display original input image(s).
'''
pass
def display_output(self, decoded, num_images):
'''
Display output image(s).
'''
pass
class ViewImages(View):
'''
Display the input images and their output after going through the
autoencoder.
'''
def display_input(self, images, num_images):
'''
Display original input image(s).
'''
plt.figure(figsize=(20, 4))
for i in range(num_images):
ax = plt.subplot(2, num_images, i + 1)
plt.imshow(images[i])
plt.title("original")
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
def display_output(self, decoded, num_images):
'''
Display output image(s).
'''
plt.figure(figsize=(20, 4))
for i in range(num_images):
ax = plt.subplot(2, num_images, i + 1 + num_images)
plt.imshow(decoded[i])
plt.title("reconstructed")
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
class FormatData:
'''
Format image data for use in the autoencoder.
Attributes:
_file_location: Folder path for images.
_image_name: Name of all images.
_image_size: Pixel dimention for square images.
'''
def __init__(self):
self._file_location = '/content/drive/MyDrive/Colab Notebooks/train'
self._image_name = 'orange'
self._image_size = 60
def set_file_location(self, file_location):
'''
Define the location of the image files.
'''
self._file_location = file_location
def set_image_name(self, image_name):
'''
Define the name for all the image. This assumes images used all have the
same name with numbers after (ex. 'image_1.jpg', 'image_2.jpg', etc.)
'''
self._image_name = image_name
def set_image_size(self, image_size):
'''
Define the dimention of input and output images.
'''
self._image_size = image_size
def _get_image(self, image_num):
'''
Get individual image file.
Arguments:
image_num: An integer representing the number of the image desired.
Returns:
A Pillow image.
'''
file_name = (f'{self._image_name}_{image_num}.jpg')
file_location = (f'{self._file_location}/{file_name}')
if not os.path.isfile(file_location):
return False
else:
im = Image.open(file_location)
im = im.convert('RGB')
return im
def | (self, image):
'''
Crop image into a squre with the dimentions defined by _image_size.
Arguments:
image: A Pillow image.
Returns:
A square Pillow image with the specified pixel dimentions.
'''
width, height = image.size
# Determine if width or height is smaller.
if width >= height:
min_dimention = height
else:
min_dimention = width
# Crop the image evenly on all sides.
width_1 = int((width - min_dimention) / 2)
height_1 = int((height - min_dimention) / 2)
box = (width_1, height_1, width - width_1, height - height_1)
image_cropped = image.crop(box)
image = image_cropped.resize((self._image_size, self._image_size))
return image
def _get_vector(self, image):
'''
Return an image as a vector of values between 0 and 1.
Arguments:
image: A Pillow image.
Returns:
The image as a numpy array of values between 0 and 1.
'''
vector = np.ravel(np.asarray(image)) / 255
return vector
def get_image_set(self, num_images):
'''
Return a matrix of images given the number of images desired.
Arguments:
num_images: An integer representing the number of images the function
will look for.
Returns:
A numpy array of values representing multiple images.
'''
image_count = num_images
image = self._get_image(1)
crop = self._square_image(image)
vector_all = self._get_vector(crop)
if num_images > 1:
for i in range(2, num_images + 1):
# If there is not an image at one of the locations specified, skip it and
# change the number of images so the final matrix is the correct
# dimentions.
if not self._get_image(i):
image_count -= 1
continue
else:
image = self._get_image(i)
crop = self._square_image(image)
vector = self._get_vector(crop)
vector_all = np.vstack((vector_all, vector))
return np.reshape(vector_all, ((image_count), self._image_size, self._image_size, 3))
'''
def main():
''
Run the software to train on a set of images and transform a new set of one
or more images.
''
autoencoder = Autoencoder()
controller = TakeInput()
display = ViewImages()
data = FormatData()
response = controller.get_input()
while response is None:
response = controller.get_input()
train_folder, train, train_num, test_folder, test, test_num = response
data.set_file_location(train_folder)
data.set_image_name(train)
train_images = data.get_image_set(train_num)
data.set_file_location(test_folder)
data.set_image_name(test)
user_images = data.get_image_set(test_num)
autoencoder.compile(optimizer='adam', loss=losses.MeanSquaredError())
autoencoder.fit(train_images, train_images,
epochs=500,
shuffle=True,
validation_data=(user_images, user_images))
encoded = autoencoder.encoder(user_images).numpy()
decoded = autoencoder.decoder(encoded).numpy()
display.display_input(user_images, test_num)
display.display_output(decoded, test_num)
if __name__ == "__main__":
main()
'''
| _square_image | identifier_name |
client.go | // Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package client is a client for communicating with the Key Server.
// It wraps the gRPC APIs and verifies all responses.
package client
import (
"bytes"
"context"
"errors"
"fmt"
"io/ioutil"
"log"
"time"
"github.com/google/keytransparency/core/crypto/signatures"
"github.com/google/keytransparency/core/mutator"
"github.com/google/keytransparency/core/mutator/entry"
"github.com/google/trillian"
"github.com/google/trillian/client/backoff"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
tpb "github.com/google/keytransparency/core/api/type/type_proto"
pb "github.com/google/keytransparency/core/api/v1/keytransparency_proto"
)
const (
// Each page contains pageSize profiles. Each profile contains multiple
// keys. Assuming 2 keys per profile (each of size 2048-bit), a page of
// size 16 will contain about 8KB of data.
pageSize = 16
// TODO: Public keys of trusted monitors.
)
var (
// ErrRetry occurs when an update has been queued, but the
// results of the update differ from the one requested.
// This indicates that a separate update was in-flight while
// this update was being submitted. To continue, the client
// should make a fresh update and try again.
ErrRetry = errors.New("client: update race condition - try again")
// ErrWait occurs when an update has been queued, but no change has been
// observed in the user's account yet.
ErrWait = errors.New("client: update not present yet - wait some more")
// ErrIncomplete occurs when the server indicates that requested epochs
// are not available.
ErrIncomplete = errors.New("incomplete account history")
// Vlog is the verbose logger. By default it outputs to /dev/null.
Vlog = log.New(ioutil.Discard, "", 0)
)
// Client is a helper library for issuing updates to the key server.
// Client Responsibilities
// - Trust Model:
// - - Trusted Monitors
// - - Verify last X days
// - Gossip - What is the current value of the root?
// - - Gossip advancement: advance state between current and server.
// - Sender queries - Do queries match up against the gossip root?
// - - List trusted monitors.
// - Key Owner
// - - Periodically query own keys. Do they match the private keys I have?
// - - Sign key update requests.
type Client struct {
*Verifier
cli pb.KeyTransparencyClient
domainID string
mutator mutator.Func
RetryDelay time.Duration
trusted trillian.SignedLogRoot
}
// NewFromConfig creates a new client from a config
func NewFromConfig(ktClient pb.KeyTransparencyClient, config *pb.Domain) (*Client, error) {
ktVerifier, err := NewVerifierFromDomain(config)
if err != nil {
return nil, err
}
return New(ktClient, config.DomainId, ktVerifier), nil
}
// New creates a new client.
// TODO(gbelvin): set retry delay.
func New(ktClient pb.KeyTransparencyClient,
domainID string,
ktVerifier *Verifier) *Client {
return &Client{
Verifier: ktVerifier,
cli: ktClient,
domainID: domainID,
mutator: entry.New(),
RetryDelay: 3 * time.Second,
}
}
func (c *Client) updateTrusted(newTrusted *trillian.SignedLogRoot) {
if newTrusted.TimestampNanos > c.trusted.TimestampNanos &&
newTrusted.TreeSize >= c.trusted.TreeSize {
c.trusted = *newTrusted
}
}
// GetEntry returns an entry if it exists, and nil if it does not.
func (c *Client) GetEntry(ctx context.Context, userID, appID string, opts ...grpc.CallOption) ([]byte, *trillian.SignedMapRoot, error) {
e, err := c.VerifiedGetEntry(ctx, appID, userID)
if err != nil {
return nil, nil, err
}
// Empty case.
if e.GetCommitted() == nil {
return nil, e.GetSmr(), nil
}
return e.GetCommitted().GetData(), e.GetSmr(), nil
}
func min(x, y int32) int32 {
if x < y {
return x
}
return y
}
// ListHistory returns a list of profiles starting and ending at given epochs.
// It also filters out all identical consecutive profiles.
func (c *Client) ListHistory(ctx context.Context, userID, appID string, start, end int64, opts ...grpc.CallOption) (map[*trillian.SignedMapRoot][]byte, error) {
if start < 0 {
return nil, fmt.Errorf("start=%v, want >= 0", start)
}
var currentProfile []byte
profiles := make(map[*trillian.SignedMapRoot][]byte)
epochsReceived := int64(0)
epochsWant := end - start + 1
for epochsReceived < epochsWant {
trustedSnapshot := c.trusted
resp, err := c.cli.ListEntryHistory(ctx, &pb.ListEntryHistoryRequest{
DomainId: c.domainID,
UserId: userID,
AppId: appID,
FirstTreeSize: trustedSnapshot.TreeSize,
Start: start,
PageSize: min(int32((end-start)+1), pageSize),
}, opts...)
if err != nil {
return nil, err
}
epochsReceived += int64(len(resp.GetValues()))
for i, v := range resp.GetValues() {
Vlog.Printf("Processing entry for %v, epoch %v", userID, start+int64(i))
err = c.VerifyGetEntryResponse(ctx, c.domainID, appID, userID, trustedSnapshot, v)
if err != nil {
return nil, err
}
c.updateTrusted(v.GetLogRoot())
// Compress profiles that are equal through time. All
// nil profiles before the first profile are ignored.
profile := v.GetCommitted().GetData()
if bytes.Equal(currentProfile, profile) {
continue
}
// Append the slice and update currentProfile.
profiles[v.GetSmr()] = profile
currentProfile = profile
}
if resp.NextStart == 0 {
break // No more data.
}
start = resp.NextStart // Fetch the next block of results.
}
if epochsReceived < epochsWant {
return nil, ErrIncomplete
}
return profiles, nil
}
// Update creates an UpdateEntryRequest for a user,
// attempt to submit it multiple times depending until ctx times out.
// Returns context.DeadlineExceeded if ctx times out.
func (c *Client) | (ctx context.Context, u *tpb.User, signers []signatures.Signer) (*entry.Mutation, error) {
if got, want := u.DomainId, c.domainID; got != want {
return nil, fmt.Errorf("u.DomainID: %v, want %v", got, want)
}
// 1. pb.User + ExistingEntry -> Mutation.
m, err := c.newMutation(ctx, u)
if err != nil {
return nil, err
}
// 2. Queue Mutation.
if err := c.QueueMutation(ctx, m, signers); err != nil {
return nil, err
}
// 3. Wait for update.
m, err = c.waitOnceForUserUpdate(ctx, m)
for {
switch {
case err == ErrWait:
// Try again.
case err == ErrRetry:
if err := c.QueueMutation(ctx, m, signers); err != nil {
return nil, err
}
case status.Code(err) == codes.DeadlineExceeded:
// Sometimes the timeout occurs during an rpc.
// Convert to a standard context.DeadlineExceeded for consistent error handling.
return m, context.DeadlineExceeded
default:
return m, err
}
m, err = c.waitOnceForUserUpdate(ctx, m)
}
}
// QueueMutation signs an entry.Mutation and sends it to the server.
func (c *Client) QueueMutation(ctx context.Context, m *entry.Mutation, signers []signatures.Signer) error {
req, err := m.SerializeAndSign(signers, c.trusted.GetTreeSize())
if err != nil {
return fmt.Errorf("SerializeAndSign(): %v", err)
}
Vlog.Printf("Sending Update request...")
// TODO(gdbelvin): Change name from UpdateEntry to QueueUpdate.
_, err = c.cli.UpdateEntry(ctx, req)
return err
}
// newMutation fetches the current index and value for a user and prepares a mutation.
func (c *Client) newMutation(ctx context.Context, u *tpb.User) (*entry.Mutation, error) {
e, err := c.VerifiedGetEntry(ctx, u.AppId, u.UserId)
if err != nil {
return nil, err
}
oldLeaf := e.GetLeafProof().GetLeaf().GetLeafValue()
Vlog.Printf("Got current entry...")
index, err := c.Index(e.GetVrfProof(), u.DomainId, u.AppId, u.UserId)
if err != nil {
return nil, err
}
mutation := entry.NewMutation(index, u.DomainId, u.AppId, u.UserId)
if err := mutation.SetPrevious(oldLeaf, true); err != nil {
return nil, err
}
if err := mutation.SetCommitment(u.PublicKeyData); err != nil {
return nil, err
}
if len(u.AuthorizedKeys) != 0 {
if err := mutation.ReplaceAuthorizedKeys(u.AuthorizedKeys); err != nil {
return nil, err
}
}
return mutation, nil
}
// WaitForUserUpdate waits for the mutation to be applied or the context to timeout or cancel.
func (c *Client) WaitForUserUpdate(ctx context.Context, m *entry.Mutation) (*entry.Mutation, error) {
for {
m, err := c.waitOnceForUserUpdate(ctx, m)
switch {
case err == ErrWait:
// Try again.
case status.Code(err) == codes.DeadlineExceeded:
// Sometimes the timeout occurs during an rpc.
// Convert to a standard context.DeadlineExceeded for consistent error handling.
return m, context.DeadlineExceeded
default:
return m, err
}
}
}
// waitOnceForUserUpdate waits for the STH to be updated, indicating the next epoch has been created,
// it then queries the current value for the user and checks it against the requested mutation.
// If the current value has not changed, WaitForUpdate returns ErrWait.
// If the current value has changed, but does not match the requested mutation,
// WaitForUpdate returns a new mutation, built with the current value and ErrRetry.
// If the current value matches the request, no mutation and no error are returned.
func (c *Client) waitOnceForUserUpdate(ctx context.Context, m *entry.Mutation) (*entry.Mutation, error) {
if m == nil {
return nil, fmt.Errorf("nil mutation")
}
// Wait for STH to change.
if err := c.WaitForSTHUpdate(ctx, c.trusted.TreeSize+1); err != nil {
return m, err
}
// GetEntry.
e, err := c.VerifiedGetEntry(ctx, m.AppID, m.UserID)
if err != nil {
return m, err
}
Vlog.Printf("Got current entry...")
// Verify.
cntLeaf := e.GetLeafProof().GetLeaf().GetLeafValue()
cntValue, err := entry.FromLeafValue(cntLeaf)
if err != nil {
return m, err
}
switch {
case m.EqualsRequested(cntValue):
return nil, nil
case m.EqualsPrevious(cntValue):
return m, ErrWait
default:
// Race condition: some change got in first.
// Value has changed, but it's not what we asked for.
// Retry based on new cntValue.
// To break the tie between two devices that are fighting
// each other, this error should be propagated back to the user.
copyPreviousLeafData := false
if err := m.SetPrevious(cntLeaf, copyPreviousLeafData); err != nil {
return nil, fmt.Errorf("waitforupdate: SetPrevious(): %v", err)
}
return m, errors.New("client: update race condition - try again")
}
}
// sthForRevision returns the minimum STH.TreeSize that will contain the map revision.
// Map revision N is stored at Log index N, the minimum TreeSize will be N+1.
func sthForRevision(revision int64) int64 {
return revision + 1
}
// WaitForRevision waits until a given map revision is available.
func (c *Client) WaitForRevision(ctx context.Context, revision int64) error {
return c.WaitForSTHUpdate(ctx, sthForRevision(revision))
}
// WaitForSTHUpdate blocks until the log root reported by the server has moved
// to at least treeSize or times out.
func (c *Client) WaitForSTHUpdate(ctx context.Context, treeSize int64) error {
b := &backoff.Backoff{
Min: 100 * time.Millisecond,
Max: 10 * time.Second,
Factor: 1.2,
Jitter: true,
}
for {
select {
case <-time.After(b.Duration()):
resp, err := c.cli.GetLatestEpoch(ctx, &pb.GetLatestEpochRequest{
DomainId: c.domainID,
})
if err != nil {
return err
}
if resp.GetLogRoot().TreeSize >= treeSize {
return nil // We're done!
}
// The LogRoot is not updated yet.
// Wait some more.
continue
case <-ctx.Done():
return ctx.Err()
}
}
}
| Update | identifier_name |
client.go | // Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package client is a client for communicating with the Key Server.
// It wraps the gRPC APIs and verifies all responses.
package client
import (
"bytes"
"context"
"errors"
"fmt"
"io/ioutil"
"log"
"time"
"github.com/google/keytransparency/core/crypto/signatures"
"github.com/google/keytransparency/core/mutator"
"github.com/google/keytransparency/core/mutator/entry"
"github.com/google/trillian"
"github.com/google/trillian/client/backoff"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
tpb "github.com/google/keytransparency/core/api/type/type_proto"
pb "github.com/google/keytransparency/core/api/v1/keytransparency_proto"
)
const (
// Each page contains pageSize profiles. Each profile contains multiple
// keys. Assuming 2 keys per profile (each of size 2048-bit), a page of
// size 16 will contain about 8KB of data.
pageSize = 16
// TODO: Public keys of trusted monitors.
)
var (
// ErrRetry occurs when an update has been queued, but the
// results of the update differ from the one requested.
// This indicates that a separate update was in-flight while
// this update was being submitted. To continue, the client
// should make a fresh update and try again.
ErrRetry = errors.New("client: update race condition - try again")
// ErrWait occurs when an update has been queued, but no change has been
// observed in the user's account yet.
ErrWait = errors.New("client: update not present yet - wait some more")
// ErrIncomplete occurs when the server indicates that requested epochs
// are not available.
ErrIncomplete = errors.New("incomplete account history")
// Vlog is the verbose logger. By default it outputs to /dev/null.
Vlog = log.New(ioutil.Discard, "", 0)
)
// Client is a helper library for issuing updates to the key server.
// Client Responsibilities
// - Trust Model:
// - - Trusted Monitors
// - - Verify last X days
// - Gossip - What is the current value of the root?
// - - Gossip advancement: advance state between current and server.
// - Sender queries - Do queries match up against the gossip root?
// - - List trusted monitors.
// - Key Owner
// - - Periodically query own keys. Do they match the private keys I have?
// - - Sign key update requests.
type Client struct {
*Verifier
cli pb.KeyTransparencyClient
domainID string
mutator mutator.Func
RetryDelay time.Duration
trusted trillian.SignedLogRoot
}
// NewFromConfig creates a new client from a config
func NewFromConfig(ktClient pb.KeyTransparencyClient, config *pb.Domain) (*Client, error) {
ktVerifier, err := NewVerifierFromDomain(config)
if err != nil {
return nil, err
}
return New(ktClient, config.DomainId, ktVerifier), nil
}
// New creates a new client.
// TODO(gbelvin): set retry delay.
func New(ktClient pb.KeyTransparencyClient,
domainID string,
ktVerifier *Verifier) *Client {
return &Client{
Verifier: ktVerifier,
cli: ktClient,
domainID: domainID,
mutator: entry.New(),
RetryDelay: 3 * time.Second,
}
}
func (c *Client) updateTrusted(newTrusted *trillian.SignedLogRoot) {
if newTrusted.TimestampNanos > c.trusted.TimestampNanos &&
newTrusted.TreeSize >= c.trusted.TreeSize {
c.trusted = *newTrusted
}
}
// GetEntry returns an entry if it exists, and nil if it does not.
func (c *Client) GetEntry(ctx context.Context, userID, appID string, opts ...grpc.CallOption) ([]byte, *trillian.SignedMapRoot, error) {
e, err := c.VerifiedGetEntry(ctx, appID, userID)
if err != nil {
return nil, nil, err
}
// Empty case.
if e.GetCommitted() == nil {
return nil, e.GetSmr(), nil
}
return e.GetCommitted().GetData(), e.GetSmr(), nil
}
func min(x, y int32) int32 {
if x < y {
return x
}
return y
}
// ListHistory returns a list of profiles starting and ending at given epochs.
// It also filters out all identical consecutive profiles.
func (c *Client) ListHistory(ctx context.Context, userID, appID string, start, end int64, opts ...grpc.CallOption) (map[*trillian.SignedMapRoot][]byte, error) {
if start < 0 {
return nil, fmt.Errorf("start=%v, want >= 0", start)
}
var currentProfile []byte
profiles := make(map[*trillian.SignedMapRoot][]byte)
epochsReceived := int64(0)
epochsWant := end - start + 1
for epochsReceived < epochsWant {
trustedSnapshot := c.trusted
resp, err := c.cli.ListEntryHistory(ctx, &pb.ListEntryHistoryRequest{
DomainId: c.domainID,
UserId: userID,
AppId: appID,
FirstTreeSize: trustedSnapshot.TreeSize,
Start: start,
PageSize: min(int32((end-start)+1), pageSize),
}, opts...)
if err != nil {
return nil, err
}
epochsReceived += int64(len(resp.GetValues()))
for i, v := range resp.GetValues() {
Vlog.Printf("Processing entry for %v, epoch %v", userID, start+int64(i))
err = c.VerifyGetEntryResponse(ctx, c.domainID, appID, userID, trustedSnapshot, v)
if err != nil {
return nil, err
}
c.updateTrusted(v.GetLogRoot())
// Compress profiles that are equal through time. All
// nil profiles before the first profile are ignored.
profile := v.GetCommitted().GetData()
if bytes.Equal(currentProfile, profile) {
continue
}
// Append the slice and update currentProfile.
profiles[v.GetSmr()] = profile
currentProfile = profile
}
if resp.NextStart == 0 {
break // No more data.
}
start = resp.NextStart // Fetch the next block of results.
}
if epochsReceived < epochsWant {
return nil, ErrIncomplete
}
return profiles, nil
}
// Update creates an UpdateEntryRequest for a user,
// attempt to submit it multiple times depending until ctx times out.
// Returns context.DeadlineExceeded if ctx times out.
func (c *Client) Update(ctx context.Context, u *tpb.User, signers []signatures.Signer) (*entry.Mutation, error) {
if got, want := u.DomainId, c.domainID; got != want {
return nil, fmt.Errorf("u.DomainID: %v, want %v", got, want)
}
// 1. pb.User + ExistingEntry -> Mutation.
m, err := c.newMutation(ctx, u)
if err != nil {
return nil, err
}
// 2. Queue Mutation.
if err := c.QueueMutation(ctx, m, signers); err != nil {
return nil, err
}
// 3. Wait for update.
m, err = c.waitOnceForUserUpdate(ctx, m)
for {
switch {
case err == ErrWait:
// Try again.
case err == ErrRetry:
if err := c.QueueMutation(ctx, m, signers); err != nil {
return nil, err
}
case status.Code(err) == codes.DeadlineExceeded:
// Sometimes the timeout occurs during an rpc.
// Convert to a standard context.DeadlineExceeded for consistent error handling.
return m, context.DeadlineExceeded
default:
return m, err
}
m, err = c.waitOnceForUserUpdate(ctx, m)
}
}
// QueueMutation signs an entry.Mutation and sends it to the server.
func (c *Client) QueueMutation(ctx context.Context, m *entry.Mutation, signers []signatures.Signer) error {
req, err := m.SerializeAndSign(signers, c.trusted.GetTreeSize())
if err != nil {
return fmt.Errorf("SerializeAndSign(): %v", err)
}
Vlog.Printf("Sending Update request...")
// TODO(gdbelvin): Change name from UpdateEntry to QueueUpdate.
_, err = c.cli.UpdateEntry(ctx, req)
return err
}
// newMutation fetches the current index and value for a user and prepares a mutation.
func (c *Client) newMutation(ctx context.Context, u *tpb.User) (*entry.Mutation, error) {
e, err := c.VerifiedGetEntry(ctx, u.AppId, u.UserId)
if err != nil {
return nil, err
}
oldLeaf := e.GetLeafProof().GetLeaf().GetLeafValue()
Vlog.Printf("Got current entry...")
index, err := c.Index(e.GetVrfProof(), u.DomainId, u.AppId, u.UserId)
if err != nil {
return nil, err
}
mutation := entry.NewMutation(index, u.DomainId, u.AppId, u.UserId)
if err := mutation.SetPrevious(oldLeaf, true); err != nil {
return nil, err
}
if err := mutation.SetCommitment(u.PublicKeyData); err != nil |
if len(u.AuthorizedKeys) != 0 {
if err := mutation.ReplaceAuthorizedKeys(u.AuthorizedKeys); err != nil {
return nil, err
}
}
return mutation, nil
}
// WaitForUserUpdate waits for the mutation to be applied or the context to timeout or cancel.
func (c *Client) WaitForUserUpdate(ctx context.Context, m *entry.Mutation) (*entry.Mutation, error) {
for {
m, err := c.waitOnceForUserUpdate(ctx, m)
switch {
case err == ErrWait:
// Try again.
case status.Code(err) == codes.DeadlineExceeded:
// Sometimes the timeout occurs during an rpc.
// Convert to a standard context.DeadlineExceeded for consistent error handling.
return m, context.DeadlineExceeded
default:
return m, err
}
}
}
// waitOnceForUserUpdate waits for the STH to be updated, indicating the next epoch has been created,
// it then queries the current value for the user and checks it against the requested mutation.
// If the current value has not changed, WaitForUpdate returns ErrWait.
// If the current value has changed, but does not match the requested mutation,
// WaitForUpdate returns a new mutation, built with the current value and ErrRetry.
// If the current value matches the request, no mutation and no error are returned.
func (c *Client) waitOnceForUserUpdate(ctx context.Context, m *entry.Mutation) (*entry.Mutation, error) {
if m == nil {
return nil, fmt.Errorf("nil mutation")
}
// Wait for STH to change.
if err := c.WaitForSTHUpdate(ctx, c.trusted.TreeSize+1); err != nil {
return m, err
}
// GetEntry.
e, err := c.VerifiedGetEntry(ctx, m.AppID, m.UserID)
if err != nil {
return m, err
}
Vlog.Printf("Got current entry...")
// Verify.
cntLeaf := e.GetLeafProof().GetLeaf().GetLeafValue()
cntValue, err := entry.FromLeafValue(cntLeaf)
if err != nil {
return m, err
}
switch {
case m.EqualsRequested(cntValue):
return nil, nil
case m.EqualsPrevious(cntValue):
return m, ErrWait
default:
// Race condition: some change got in first.
// Value has changed, but it's not what we asked for.
// Retry based on new cntValue.
// To break the tie between two devices that are fighting
// each other, this error should be propagated back to the user.
copyPreviousLeafData := false
if err := m.SetPrevious(cntLeaf, copyPreviousLeafData); err != nil {
return nil, fmt.Errorf("waitforupdate: SetPrevious(): %v", err)
}
return m, errors.New("client: update race condition - try again")
}
}
// sthForRevision returns the minimum STH.TreeSize that will contain the map revision.
// Map revision N is stored at Log index N, the minimum TreeSize will be N+1.
func sthForRevision(revision int64) int64 {
return revision + 1
}
// WaitForRevision waits until a given map revision is available.
func (c *Client) WaitForRevision(ctx context.Context, revision int64) error {
return c.WaitForSTHUpdate(ctx, sthForRevision(revision))
}
// WaitForSTHUpdate blocks until the log root reported by the server has moved
// to at least treeSize or times out.
func (c *Client) WaitForSTHUpdate(ctx context.Context, treeSize int64) error {
b := &backoff.Backoff{
Min: 100 * time.Millisecond,
Max: 10 * time.Second,
Factor: 1.2,
Jitter: true,
}
for {
select {
case <-time.After(b.Duration()):
resp, err := c.cli.GetLatestEpoch(ctx, &pb.GetLatestEpochRequest{
DomainId: c.domainID,
})
if err != nil {
return err
}
if resp.GetLogRoot().TreeSize >= treeSize {
return nil // We're done!
}
// The LogRoot is not updated yet.
// Wait some more.
continue
case <-ctx.Done():
return ctx.Err()
}
}
}
| {
return nil, err
} | conditional_block |
client.go | // Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package client is a client for communicating with the Key Server.
// It wraps the gRPC APIs and verifies all responses.
package client
import (
"bytes"
"context"
"errors"
"fmt"
"io/ioutil"
"log"
"time"
"github.com/google/keytransparency/core/crypto/signatures"
"github.com/google/keytransparency/core/mutator"
"github.com/google/keytransparency/core/mutator/entry"
"github.com/google/trillian"
"github.com/google/trillian/client/backoff"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
tpb "github.com/google/keytransparency/core/api/type/type_proto"
pb "github.com/google/keytransparency/core/api/v1/keytransparency_proto"
)
const (
// Each page contains pageSize profiles. Each profile contains multiple
// keys. Assuming 2 keys per profile (each of size 2048-bit), a page of
// size 16 will contain about 8KB of data.
pageSize = 16
// TODO: Public keys of trusted monitors.
)
var (
// ErrRetry occurs when an update has been queued, but the
// results of the update differ from the one requested.
// This indicates that a separate update was in-flight while
// this update was being submitted. To continue, the client
// should make a fresh update and try again.
ErrRetry = errors.New("client: update race condition - try again")
// ErrWait occurs when an update has been queued, but no change has been
// observed in the user's account yet.
ErrWait = errors.New("client: update not present yet - wait some more")
// ErrIncomplete occurs when the server indicates that requested epochs
// are not available.
ErrIncomplete = errors.New("incomplete account history")
// Vlog is the verbose logger. By default it outputs to /dev/null.
Vlog = log.New(ioutil.Discard, "", 0)
)
// Client is a helper library for issuing updates to the key server.
// Client Responsibilities
// - Trust Model:
// - - Trusted Monitors
// - - Verify last X days
// - Gossip - What is the current value of the root?
// - - Gossip advancement: advance state between current and server.
// - Sender queries - Do queries match up against the gossip root?
// - - List trusted monitors.
// - Key Owner
// - - Periodically query own keys. Do they match the private keys I have?
// - - Sign key update requests.
type Client struct {
*Verifier
cli pb.KeyTransparencyClient
domainID string
mutator mutator.Func
RetryDelay time.Duration
trusted trillian.SignedLogRoot
}
// NewFromConfig creates a new client from a config
func NewFromConfig(ktClient pb.KeyTransparencyClient, config *pb.Domain) (*Client, error) {
ktVerifier, err := NewVerifierFromDomain(config)
if err != nil {
return nil, err
}
return New(ktClient, config.DomainId, ktVerifier), nil
}
// New creates a new client.
// TODO(gbelvin): set retry delay.
func New(ktClient pb.KeyTransparencyClient,
domainID string,
ktVerifier *Verifier) *Client |
func (c *Client) updateTrusted(newTrusted *trillian.SignedLogRoot) {
if newTrusted.TimestampNanos > c.trusted.TimestampNanos &&
newTrusted.TreeSize >= c.trusted.TreeSize {
c.trusted = *newTrusted
}
}
// GetEntry returns an entry if it exists, and nil if it does not.
func (c *Client) GetEntry(ctx context.Context, userID, appID string, opts ...grpc.CallOption) ([]byte, *trillian.SignedMapRoot, error) {
e, err := c.VerifiedGetEntry(ctx, appID, userID)
if err != nil {
return nil, nil, err
}
// Empty case.
if e.GetCommitted() == nil {
return nil, e.GetSmr(), nil
}
return e.GetCommitted().GetData(), e.GetSmr(), nil
}
func min(x, y int32) int32 {
if x < y {
return x
}
return y
}
// ListHistory returns a list of profiles starting and ending at given epochs.
// It also filters out all identical consecutive profiles.
func (c *Client) ListHistory(ctx context.Context, userID, appID string, start, end int64, opts ...grpc.CallOption) (map[*trillian.SignedMapRoot][]byte, error) {
if start < 0 {
return nil, fmt.Errorf("start=%v, want >= 0", start)
}
var currentProfile []byte
profiles := make(map[*trillian.SignedMapRoot][]byte)
epochsReceived := int64(0)
epochsWant := end - start + 1
for epochsReceived < epochsWant {
trustedSnapshot := c.trusted
resp, err := c.cli.ListEntryHistory(ctx, &pb.ListEntryHistoryRequest{
DomainId: c.domainID,
UserId: userID,
AppId: appID,
FirstTreeSize: trustedSnapshot.TreeSize,
Start: start,
PageSize: min(int32((end-start)+1), pageSize),
}, opts...)
if err != nil {
return nil, err
}
epochsReceived += int64(len(resp.GetValues()))
for i, v := range resp.GetValues() {
Vlog.Printf("Processing entry for %v, epoch %v", userID, start+int64(i))
err = c.VerifyGetEntryResponse(ctx, c.domainID, appID, userID, trustedSnapshot, v)
if err != nil {
return nil, err
}
c.updateTrusted(v.GetLogRoot())
// Compress profiles that are equal through time. All
// nil profiles before the first profile are ignored.
profile := v.GetCommitted().GetData()
if bytes.Equal(currentProfile, profile) {
continue
}
// Append the slice and update currentProfile.
profiles[v.GetSmr()] = profile
currentProfile = profile
}
if resp.NextStart == 0 {
break // No more data.
}
start = resp.NextStart // Fetch the next block of results.
}
if epochsReceived < epochsWant {
return nil, ErrIncomplete
}
return profiles, nil
}
// Update creates an UpdateEntryRequest for a user,
// attempt to submit it multiple times depending until ctx times out.
// Returns context.DeadlineExceeded if ctx times out.
func (c *Client) Update(ctx context.Context, u *tpb.User, signers []signatures.Signer) (*entry.Mutation, error) {
if got, want := u.DomainId, c.domainID; got != want {
return nil, fmt.Errorf("u.DomainID: %v, want %v", got, want)
}
// 1. pb.User + ExistingEntry -> Mutation.
m, err := c.newMutation(ctx, u)
if err != nil {
return nil, err
}
// 2. Queue Mutation.
if err := c.QueueMutation(ctx, m, signers); err != nil {
return nil, err
}
// 3. Wait for update.
m, err = c.waitOnceForUserUpdate(ctx, m)
for {
switch {
case err == ErrWait:
// Try again.
case err == ErrRetry:
if err := c.QueueMutation(ctx, m, signers); err != nil {
return nil, err
}
case status.Code(err) == codes.DeadlineExceeded:
// Sometimes the timeout occurs during an rpc.
// Convert to a standard context.DeadlineExceeded for consistent error handling.
return m, context.DeadlineExceeded
default:
return m, err
}
m, err = c.waitOnceForUserUpdate(ctx, m)
}
}
// QueueMutation signs an entry.Mutation and sends it to the server.
func (c *Client) QueueMutation(ctx context.Context, m *entry.Mutation, signers []signatures.Signer) error {
req, err := m.SerializeAndSign(signers, c.trusted.GetTreeSize())
if err != nil {
return fmt.Errorf("SerializeAndSign(): %v", err)
}
Vlog.Printf("Sending Update request...")
// TODO(gdbelvin): Change name from UpdateEntry to QueueUpdate.
_, err = c.cli.UpdateEntry(ctx, req)
return err
}
// newMutation fetches the current index and value for a user and prepares a mutation.
func (c *Client) newMutation(ctx context.Context, u *tpb.User) (*entry.Mutation, error) {
e, err := c.VerifiedGetEntry(ctx, u.AppId, u.UserId)
if err != nil {
return nil, err
}
oldLeaf := e.GetLeafProof().GetLeaf().GetLeafValue()
Vlog.Printf("Got current entry...")
index, err := c.Index(e.GetVrfProof(), u.DomainId, u.AppId, u.UserId)
if err != nil {
return nil, err
}
mutation := entry.NewMutation(index, u.DomainId, u.AppId, u.UserId)
if err := mutation.SetPrevious(oldLeaf, true); err != nil {
return nil, err
}
if err := mutation.SetCommitment(u.PublicKeyData); err != nil {
return nil, err
}
if len(u.AuthorizedKeys) != 0 {
if err := mutation.ReplaceAuthorizedKeys(u.AuthorizedKeys); err != nil {
return nil, err
}
}
return mutation, nil
}
// WaitForUserUpdate waits for the mutation to be applied or the context to timeout or cancel.
func (c *Client) WaitForUserUpdate(ctx context.Context, m *entry.Mutation) (*entry.Mutation, error) {
for {
m, err := c.waitOnceForUserUpdate(ctx, m)
switch {
case err == ErrWait:
// Try again.
case status.Code(err) == codes.DeadlineExceeded:
// Sometimes the timeout occurs during an rpc.
// Convert to a standard context.DeadlineExceeded for consistent error handling.
return m, context.DeadlineExceeded
default:
return m, err
}
}
}
// waitOnceForUserUpdate waits for the STH to be updated, indicating the next epoch has been created,
// it then queries the current value for the user and checks it against the requested mutation.
// If the current value has not changed, WaitForUpdate returns ErrWait.
// If the current value has changed, but does not match the requested mutation,
// WaitForUpdate returns a new mutation, built with the current value and ErrRetry.
// If the current value matches the request, no mutation and no error are returned.
func (c *Client) waitOnceForUserUpdate(ctx context.Context, m *entry.Mutation) (*entry.Mutation, error) {
if m == nil {
return nil, fmt.Errorf("nil mutation")
}
// Wait for STH to change.
if err := c.WaitForSTHUpdate(ctx, c.trusted.TreeSize+1); err != nil {
return m, err
}
// GetEntry.
e, err := c.VerifiedGetEntry(ctx, m.AppID, m.UserID)
if err != nil {
return m, err
}
Vlog.Printf("Got current entry...")
// Verify.
cntLeaf := e.GetLeafProof().GetLeaf().GetLeafValue()
cntValue, err := entry.FromLeafValue(cntLeaf)
if err != nil {
return m, err
}
switch {
case m.EqualsRequested(cntValue):
return nil, nil
case m.EqualsPrevious(cntValue):
return m, ErrWait
default:
// Race condition: some change got in first.
// Value has changed, but it's not what we asked for.
// Retry based on new cntValue.
// To break the tie between two devices that are fighting
// each other, this error should be propagated back to the user.
copyPreviousLeafData := false
if err := m.SetPrevious(cntLeaf, copyPreviousLeafData); err != nil {
return nil, fmt.Errorf("waitforupdate: SetPrevious(): %v", err)
}
return m, errors.New("client: update race condition - try again")
}
}
// sthForRevision returns the minimum STH.TreeSize that will contain the map revision.
// Map revision N is stored at Log index N, the minimum TreeSize will be N+1.
func sthForRevision(revision int64) int64 {
return revision + 1
}
// WaitForRevision waits until a given map revision is available.
func (c *Client) WaitForRevision(ctx context.Context, revision int64) error {
return c.WaitForSTHUpdate(ctx, sthForRevision(revision))
}
// WaitForSTHUpdate blocks until the log root reported by the server has moved
// to at least treeSize or times out.
func (c *Client) WaitForSTHUpdate(ctx context.Context, treeSize int64) error {
b := &backoff.Backoff{
Min: 100 * time.Millisecond,
Max: 10 * time.Second,
Factor: 1.2,
Jitter: true,
}
for {
select {
case <-time.After(b.Duration()):
resp, err := c.cli.GetLatestEpoch(ctx, &pb.GetLatestEpochRequest{
DomainId: c.domainID,
})
if err != nil {
return err
}
if resp.GetLogRoot().TreeSize >= treeSize {
return nil // We're done!
}
// The LogRoot is not updated yet.
// Wait some more.
continue
case <-ctx.Done():
return ctx.Err()
}
}
}
| {
return &Client{
Verifier: ktVerifier,
cli: ktClient,
domainID: domainID,
mutator: entry.New(),
RetryDelay: 3 * time.Second,
}
} | identifier_body |
client.go | // Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package client is a client for communicating with the Key Server.
// It wraps the gRPC APIs and verifies all responses.
package client
import (
"bytes"
"context"
"errors"
"fmt"
"io/ioutil"
"log"
"time"
"github.com/google/keytransparency/core/crypto/signatures"
"github.com/google/keytransparency/core/mutator"
"github.com/google/keytransparency/core/mutator/entry"
"github.com/google/trillian"
"github.com/google/trillian/client/backoff"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
tpb "github.com/google/keytransparency/core/api/type/type_proto"
pb "github.com/google/keytransparency/core/api/v1/keytransparency_proto"
)
const (
// Each page contains pageSize profiles. Each profile contains multiple
// keys. Assuming 2 keys per profile (each of size 2048-bit), a page of
// size 16 will contain about 8KB of data.
pageSize = 16
// TODO: Public keys of trusted monitors.
)
var (
// ErrRetry occurs when an update has been queued, but the
// results of the update differ from the one requested.
// This indicates that a separate update was in-flight while
// this update was being submitted. To continue, the client
// should make a fresh update and try again.
ErrRetry = errors.New("client: update race condition - try again")
// ErrWait occurs when an update has been queued, but no change has been
// observed in the user's account yet.
ErrWait = errors.New("client: update not present yet - wait some more")
// ErrIncomplete occurs when the server indicates that requested epochs
// are not available.
ErrIncomplete = errors.New("incomplete account history")
// Vlog is the verbose logger. By default it outputs to /dev/null.
Vlog = log.New(ioutil.Discard, "", 0)
)
// Client is a helper library for issuing updates to the key server.
// Client Responsibilities
// - Trust Model:
// - - Trusted Monitors
// - - Verify last X days
// - Gossip - What is the current value of the root?
// - - Gossip advancement: advance state between current and server.
// - Sender queries - Do queries match up against the gossip root?
// - - List trusted monitors.
// - Key Owner
// - - Periodically query own keys. Do they match the private keys I have?
// - - Sign key update requests.
type Client struct {
*Verifier
cli pb.KeyTransparencyClient
domainID string
mutator mutator.Func
RetryDelay time.Duration
trusted trillian.SignedLogRoot
} | if err != nil {
return nil, err
}
return New(ktClient, config.DomainId, ktVerifier), nil
}
// New creates a new client.
// TODO(gbelvin): set retry delay.
func New(ktClient pb.KeyTransparencyClient,
domainID string,
ktVerifier *Verifier) *Client {
return &Client{
Verifier: ktVerifier,
cli: ktClient,
domainID: domainID,
mutator: entry.New(),
RetryDelay: 3 * time.Second,
}
}
func (c *Client) updateTrusted(newTrusted *trillian.SignedLogRoot) {
if newTrusted.TimestampNanos > c.trusted.TimestampNanos &&
newTrusted.TreeSize >= c.trusted.TreeSize {
c.trusted = *newTrusted
}
}
// GetEntry returns an entry if it exists, and nil if it does not.
func (c *Client) GetEntry(ctx context.Context, userID, appID string, opts ...grpc.CallOption) ([]byte, *trillian.SignedMapRoot, error) {
e, err := c.VerifiedGetEntry(ctx, appID, userID)
if err != nil {
return nil, nil, err
}
// Empty case.
if e.GetCommitted() == nil {
return nil, e.GetSmr(), nil
}
return e.GetCommitted().GetData(), e.GetSmr(), nil
}
func min(x, y int32) int32 {
if x < y {
return x
}
return y
}
// ListHistory returns a list of profiles starting and ending at given epochs.
// It also filters out all identical consecutive profiles.
func (c *Client) ListHistory(ctx context.Context, userID, appID string, start, end int64, opts ...grpc.CallOption) (map[*trillian.SignedMapRoot][]byte, error) {
if start < 0 {
return nil, fmt.Errorf("start=%v, want >= 0", start)
}
var currentProfile []byte
profiles := make(map[*trillian.SignedMapRoot][]byte)
epochsReceived := int64(0)
epochsWant := end - start + 1
for epochsReceived < epochsWant {
trustedSnapshot := c.trusted
resp, err := c.cli.ListEntryHistory(ctx, &pb.ListEntryHistoryRequest{
DomainId: c.domainID,
UserId: userID,
AppId: appID,
FirstTreeSize: trustedSnapshot.TreeSize,
Start: start,
PageSize: min(int32((end-start)+1), pageSize),
}, opts...)
if err != nil {
return nil, err
}
epochsReceived += int64(len(resp.GetValues()))
for i, v := range resp.GetValues() {
Vlog.Printf("Processing entry for %v, epoch %v", userID, start+int64(i))
err = c.VerifyGetEntryResponse(ctx, c.domainID, appID, userID, trustedSnapshot, v)
if err != nil {
return nil, err
}
c.updateTrusted(v.GetLogRoot())
// Compress profiles that are equal through time. All
// nil profiles before the first profile are ignored.
profile := v.GetCommitted().GetData()
if bytes.Equal(currentProfile, profile) {
continue
}
// Append the slice and update currentProfile.
profiles[v.GetSmr()] = profile
currentProfile = profile
}
if resp.NextStart == 0 {
break // No more data.
}
start = resp.NextStart // Fetch the next block of results.
}
if epochsReceived < epochsWant {
return nil, ErrIncomplete
}
return profiles, nil
}
// Update creates an UpdateEntryRequest for a user,
// attempt to submit it multiple times depending until ctx times out.
// Returns context.DeadlineExceeded if ctx times out.
func (c *Client) Update(ctx context.Context, u *tpb.User, signers []signatures.Signer) (*entry.Mutation, error) {
if got, want := u.DomainId, c.domainID; got != want {
return nil, fmt.Errorf("u.DomainID: %v, want %v", got, want)
}
// 1. pb.User + ExistingEntry -> Mutation.
m, err := c.newMutation(ctx, u)
if err != nil {
return nil, err
}
// 2. Queue Mutation.
if err := c.QueueMutation(ctx, m, signers); err != nil {
return nil, err
}
// 3. Wait for update.
m, err = c.waitOnceForUserUpdate(ctx, m)
for {
switch {
case err == ErrWait:
// Try again.
case err == ErrRetry:
if err := c.QueueMutation(ctx, m, signers); err != nil {
return nil, err
}
case status.Code(err) == codes.DeadlineExceeded:
// Sometimes the timeout occurs during an rpc.
// Convert to a standard context.DeadlineExceeded for consistent error handling.
return m, context.DeadlineExceeded
default:
return m, err
}
m, err = c.waitOnceForUserUpdate(ctx, m)
}
}
// QueueMutation signs an entry.Mutation and sends it to the server.
func (c *Client) QueueMutation(ctx context.Context, m *entry.Mutation, signers []signatures.Signer) error {
req, err := m.SerializeAndSign(signers, c.trusted.GetTreeSize())
if err != nil {
return fmt.Errorf("SerializeAndSign(): %v", err)
}
Vlog.Printf("Sending Update request...")
// TODO(gdbelvin): Change name from UpdateEntry to QueueUpdate.
_, err = c.cli.UpdateEntry(ctx, req)
return err
}
// newMutation fetches the current index and value for a user and prepares a mutation.
func (c *Client) newMutation(ctx context.Context, u *tpb.User) (*entry.Mutation, error) {
e, err := c.VerifiedGetEntry(ctx, u.AppId, u.UserId)
if err != nil {
return nil, err
}
oldLeaf := e.GetLeafProof().GetLeaf().GetLeafValue()
Vlog.Printf("Got current entry...")
index, err := c.Index(e.GetVrfProof(), u.DomainId, u.AppId, u.UserId)
if err != nil {
return nil, err
}
mutation := entry.NewMutation(index, u.DomainId, u.AppId, u.UserId)
if err := mutation.SetPrevious(oldLeaf, true); err != nil {
return nil, err
}
if err := mutation.SetCommitment(u.PublicKeyData); err != nil {
return nil, err
}
if len(u.AuthorizedKeys) != 0 {
if err := mutation.ReplaceAuthorizedKeys(u.AuthorizedKeys); err != nil {
return nil, err
}
}
return mutation, nil
}
// WaitForUserUpdate waits for the mutation to be applied or the context to timeout or cancel.
func (c *Client) WaitForUserUpdate(ctx context.Context, m *entry.Mutation) (*entry.Mutation, error) {
for {
m, err := c.waitOnceForUserUpdate(ctx, m)
switch {
case err == ErrWait:
// Try again.
case status.Code(err) == codes.DeadlineExceeded:
// Sometimes the timeout occurs during an rpc.
// Convert to a standard context.DeadlineExceeded for consistent error handling.
return m, context.DeadlineExceeded
default:
return m, err
}
}
}
// waitOnceForUserUpdate waits for the STH to be updated, indicating the next epoch has been created,
// it then queries the current value for the user and checks it against the requested mutation.
// If the current value has not changed, WaitForUpdate returns ErrWait.
// If the current value has changed, but does not match the requested mutation,
// WaitForUpdate returns a new mutation, built with the current value and ErrRetry.
// If the current value matches the request, no mutation and no error are returned.
func (c *Client) waitOnceForUserUpdate(ctx context.Context, m *entry.Mutation) (*entry.Mutation, error) {
if m == nil {
return nil, fmt.Errorf("nil mutation")
}
// Wait for STH to change.
if err := c.WaitForSTHUpdate(ctx, c.trusted.TreeSize+1); err != nil {
return m, err
}
// GetEntry.
e, err := c.VerifiedGetEntry(ctx, m.AppID, m.UserID)
if err != nil {
return m, err
}
Vlog.Printf("Got current entry...")
// Verify.
cntLeaf := e.GetLeafProof().GetLeaf().GetLeafValue()
cntValue, err := entry.FromLeafValue(cntLeaf)
if err != nil {
return m, err
}
switch {
case m.EqualsRequested(cntValue):
return nil, nil
case m.EqualsPrevious(cntValue):
return m, ErrWait
default:
// Race condition: some change got in first.
// Value has changed, but it's not what we asked for.
// Retry based on new cntValue.
// To break the tie between two devices that are fighting
// each other, this error should be propagated back to the user.
copyPreviousLeafData := false
if err := m.SetPrevious(cntLeaf, copyPreviousLeafData); err != nil {
return nil, fmt.Errorf("waitforupdate: SetPrevious(): %v", err)
}
return m, errors.New("client: update race condition - try again")
}
}
// sthForRevision returns the minimum STH.TreeSize that will contain the map revision.
// Map revision N is stored at Log index N, the minimum TreeSize will be N+1.
func sthForRevision(revision int64) int64 {
return revision + 1
}
// WaitForRevision waits until a given map revision is available.
func (c *Client) WaitForRevision(ctx context.Context, revision int64) error {
return c.WaitForSTHUpdate(ctx, sthForRevision(revision))
}
// WaitForSTHUpdate blocks until the log root reported by the server has moved
// to at least treeSize or times out.
func (c *Client) WaitForSTHUpdate(ctx context.Context, treeSize int64) error {
b := &backoff.Backoff{
Min: 100 * time.Millisecond,
Max: 10 * time.Second,
Factor: 1.2,
Jitter: true,
}
for {
select {
case <-time.After(b.Duration()):
resp, err := c.cli.GetLatestEpoch(ctx, &pb.GetLatestEpochRequest{
DomainId: c.domainID,
})
if err != nil {
return err
}
if resp.GetLogRoot().TreeSize >= treeSize {
return nil // We're done!
}
// The LogRoot is not updated yet.
// Wait some more.
continue
case <-ctx.Done():
return ctx.Err()
}
}
} |
// NewFromConfig creates a new client from a config
func NewFromConfig(ktClient pb.KeyTransparencyClient, config *pb.Domain) (*Client, error) {
ktVerifier, err := NewVerifierFromDomain(config) | random_line_split |
LyftDataAnalysis.py |
# coding: utf-8
# # Import library
# run two cells below first
# In[2]:
import numpy as np
import pandas as pd
from collections import defaultdict
import time
from datetime import datetime
import sys
import math
# In[2]:
if 'holidays' not in sys.modules:
get_ipython().system('pip install holidays')
import holidays
# # Load three csv files
# Summary: 937 unique drivers and 193502 unique rides in total
# In[2]:
driver_df = pd.read_csv('driver_ids.csv')
ride_df = pd.read_csv('ride_ids.csv')
ride_timestamps_df = pd.read_csv('ride_timestamps.csv')
# In[3]:
'''
Get the shape of each dataframe
'''
print('driver ids:',driver_df.shape)
display(driver_df.head())
print('ride ids:',ride_df.shape)
display(ride_df.head())
print('ride timestamps:',ride_timestamps_df.shape)
display(ride_timestamps_df.head())
# # Inspect Nan and abnormal values
# In[4]:
'''
Nan value inspection
'''
print('driver ids info:------------------------------')
driver_df.info()
print('ride ids info:--------------------------------')
ride_df.info()
print('ride timestamps info:-------------------------')
ride_timestamps_df.info()
'''
ride_timestamps has one Nan value in the column timestamp
TODO: delete this ride or fill it with an artificial value?
'''
display(ride_timestamps_df[ride_timestamps_df.isnull().any(axis=1)])
# In[5]:
'''
Abnormal value inspection
'''
display(driver_df.describe())
display(ride_df.describe())
display(ride_timestamps_df.describe())
# In[6]:
'''
TODO: Need to think about how to deal with this case, why will ride_distance <= 0?
TODO: the number of ride_id in ride_df and that of ride_timestamps doesn't fit (193502 vs 194081)
'''
abnormal_ride_df = ride_df[ride_df.ride_distance <= 0]
print(abnormal_ride_df.shape)
display(abnormal_ride_df.head())
# In[7]:
'''
find overlap of driver_id between dirver_df and ride_df
TODO: some drivers don't have ride information--->delete? (937 vs 854)
'''
print(len(set(driver_df.driver_id.unique()).intersection(set(ride_df.driver_id.unique()))))
# In[8]:
'''
find overlap of ride_id between ride_df and ride_timestamps_df
TODO: some rides don't have ride timestamps--->delete? (193502 vs 184819)
'''
print(len(set(ride_df.ride_id.unique()).intersection(set(ride_timestamps_df.ride_id.unique()))))
# # Merge all dfs to one df
# In[9]:
'''
merge driver_df and ride_df (Get intersection based on driver_id)
'''
big_df = ride_df.merge(driver_df,left_on='driver_id',right_on='driver_id')
print(big_df.shape)
display(big_df.head())
# In[10]:
# get overlapped ride_id between big_df and ride_timestamps_df
big_df = big_df[big_df['ride_id'].isin(ride_timestamps_df.ride_id.unique())]
big_df.reset_index(drop=True,inplace=True)
print(big_df.shape)
display(big_df.head())
# In[11]:
start = time.time()
# for each unique ride id in big_df
for idx in range(big_df.shape[0]):
rideid = big_df.iloc[idx]['ride_id']
# first find rideid timestamps info in ride_timestamps_df
target = ride_timestamps_df[ride_timestamps_df.ride_id == rideid]
# for each (event,timestamp) pair
for (e,t) in zip(list(target.event),list(target.timestamp)):
big_df.at[idx,e] = t
# double check index
if big_df[big_df.ride_id == rideid]['requested_at'].values[0] != ride_timestamps_df[ride_timestamps_df.ride_id == rideid].iloc[0,-1]:
print(idx)
print('duration:',(time.time()-start)/3600,'hrs')
# In[12]:
big_df.info()
# In[13]:
# saved for future use
big_df.to_csv('merged_big_driver_ride_df.csv',index=False)
# # Start to work on calculating extra variables
# If already have file 'merged_big_driver_ride_df.csv', directly start running code below
# In[26]:
def get_fare(driver_rides):
total_fare = 0
# if one single ride
if driver_rides.ndim == 1:
total_fare = (1 + driver_rides['ride_prime_time']/100)*(min(max(5,(2 + 1.15*driver_rides['ride_distance'] *0.00062 + 0.22 *driver_rides['ride_duration']/60 + 1.75)),400))
else:
for (distance,duration,prime) in zip(driver_rides['ride_distance'].values, driver_rides['ride_duration'].values, driver_rides['ride_prime_time'].values):
|
return total_fare
# In[27]:
merged_big_df = pd.read_csv('merged_big_driver_ride_df.csv')
print(merged_big_df.shape)
display(merged_big_df.head())
# In[19]:
# '''
# validate the correctness of combined df by randomly selecting ride ids to verify (random checking)
# '''
# ids = test1.ride_id
# i = np.random.choice(ids,10)
# for x in i:
# display(test1[test1.ride_id == x])
# display(ride_timestamps_df[ride_timestamps_df.ride_id == x])
# ## Get new variables related to ride info
# time of day: 0-6(midnight); 6-9(morning rush); 9-16(normal day); 16-19(evening rush); 19-24(fun) <br>
# season: {0: Spring, 1: Summer, 2: Autumn, 3: Winter}
# In[28]:
# variables related to ride
def add_vars(row):
# convert time to datetime
# source: https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior
for i in range(5,len(row)):
if type(row[i]) != float:
row[i] = datetime.strptime(row[i], '%Y-%m-%d %H:%M:%S')
# get speed
row['speed(m/s)'] = row['ride_distance'] / row['ride_duration']
# holiday? reference: https://stackoverflow.com/questions/2394235/detecting-a-us-holiday
row['holiday'] = int(row['requested_at'] in holidays.US())
# requested_at time is weekend? (0-6 --> Mon-Sun)
row['weekend'] = int(row['requested_at'].weekday() > 4)
# time of day (6-12/12-15/16-20/20-24/24-6)
# {0: [6,9), 1: [9,16), 2: [16,19), 3: [19,24), 4: [0,6)}
if 6 <= row['requested_at'].hour < 9:
row['time of day'] = 0
elif 9 <= row['requested_at'].hour < 16:
row['time of day'] = 1
elif 16 <= row['requested_at'].hour < 19:
row['time of day'] = 2
elif 19 <= row['requested_at'].hour < 24:
row['time of day'] = 3
else:
row['time of day'] = 4
# season (12-2 Winter/3-5 Spring/6-8 Summer/9-11 Autumn)
# {0: Spring, 1: Summer, 2: Autumn, 3: Winter}
if 3 <= row['requested_at'].month <= 5:
row['season'] = 0
elif 6 <= row['requested_at'].month <= 8:
row['season'] = 1
elif 9 <= row['requested_at'].month <= 11:
row['season'] = 2
else:
row['season'] = 3
# time spent from requested_at to arrived_at (efficiency of picking up a customer)
if type(row['arrived_at']) != float:
row['time spent to arrive at the customer(minutes)'] = (row['arrived_at']- row['requested_at']).total_seconds()/60
else:
row['time spent to arrive at the customer(minutes)'] = (row['picked_up_at']- row['requested_at']).total_seconds()/60
# fare for this ride
row['fare'] = get_fare(row)
return row
start = time.time()
rides_added_vars_df = merged_big_df.apply(add_vars,axis=1)
print('duration:',(time.time() - start)/60,'minutes')
# In[29]:
print(rides_added_vars_df.shape)
display(rides_added_vars_df.head())
# In[30]:
# saved for future use
rides_added_vars_df.to_csv('added_variables_rides_info.csv',index=False)
# ## Get new variables related to drivers
# In[22]:
drivers_df = pd.DataFrame(rides_added_vars_df.driver_id.unique())
drivers_df.rename(columns={0:'driver_id'},inplace=True)
drivers_df.reset_index(drop=True,inplace=True)
print(drivers_df.shape)
display(drivers_df.head())
# In[24]:
def add_driver_vars(row):
# first find all rides under driverid
rides = rides_added_vars_df[rides_added_vars_df.driver_id == row['driver_id']]
# Percentage of rides completed in prime time among all rides for each driver
row['prime time rides percentage'] = rides[rides.ride_prime_time != 0].shape[0] / rides.shape[0]
# Average active time per day for each driver (total duration / total days)
sorted_days = rides.groupby('requested_at', as_index=False).mean()['requested_at']
days = len(sorted_days.dt.normalize().unique())
row['average daily active time(hrs/day)'] = rides.ride_duration.sum() / (3600*days)
# Average fare(daily/per ride/monthly) received for each driver
total_fare = get_fare(rides)
row['gross fare(over all rides)'] = total_fare
row['average daily fare'] = total_fare / days
row['average fare per ride'] = total_fare / rides.shape[0]
number_of_months = len(set([(x.year,x.month) for x in sorted_days]))
row['average monthly fare'] = total_fare / number_of_months
# Total number of rides
row['total rides'] = rides.shape[0]
# Number of abnormal rides (ride_distance <= 0)
row['number of abnormal rides'] = rides[rides.ride_distance <= 0].shape[0]
# Ride completion rate (1 - (# of abnormal / total rides))
row['completion rate'] = 1 - (row['number of abnormal rides'] / row['total rides'])
# Unique days of work
row['active days'] = days
# Average time spent on each ride (requested_at --> arrived_at) in minutes
row['average arriving time(minutes)'] = rides['time spent to arrive at the customer(minutes)'].mean()
return row
start = time.time()
drivers_added_vars_df = drivers_df.apply(add_driver_vars,axis=1)
print('duration:',(time.time() - start)/60,'minutes')
# In[25]:
print(drivers_added_vars_df.shape)
display(drivers_added_vars_df.head())
# In[26]:
# saved for future use
drivers_added_vars_df.to_csv('added_variables_drivers_info.csv',index=False)
# # Start to construct models
# In[27]:
'''
rides info combined dataframe
'''
big_rides_info = pd.read_csv('added_variables_rides_info.csv')
print(big_rides_info.shape)
display(big_rides_info.head())
'''
drivers info combined dataframe
'''
big_drivers_info = pd.read_csv('added_variables_drivers_info.csv')
print(big_drivers_info.shape)
display(big_drivers_info.head())
| total_fare += (1 + prime/100)*(min(max(5,(2 + 1.15*distance*0.00062 + 0.22*duration/60 + 1.75)),400)) | conditional_block |
LyftDataAnalysis.py |
# coding: utf-8
# # Import library
# run two cells below first
# In[2]:
import numpy as np
import pandas as pd
from collections import defaultdict
import time
from datetime import datetime
import sys
import math
# In[2]:
if 'holidays' not in sys.modules:
get_ipython().system('pip install holidays')
import holidays
# # Load three csv files
# Summary: 937 unique drivers and 193502 unique rides in total
# In[2]:
driver_df = pd.read_csv('driver_ids.csv')
ride_df = pd.read_csv('ride_ids.csv')
ride_timestamps_df = pd.read_csv('ride_timestamps.csv')
# In[3]:
'''
Get the shape of each dataframe
'''
print('driver ids:',driver_df.shape)
display(driver_df.head())
print('ride ids:',ride_df.shape)
display(ride_df.head())
print('ride timestamps:',ride_timestamps_df.shape)
display(ride_timestamps_df.head())
# # Inspect Nan and abnormal values
# In[4]:
'''
Nan value inspection
'''
print('driver ids info:------------------------------')
driver_df.info()
print('ride ids info:--------------------------------')
ride_df.info()
print('ride timestamps info:-------------------------')
ride_timestamps_df.info()
'''
ride_timestamps has one Nan value in the column timestamp
TODO: delete this ride or fill it with an artificial value?
'''
display(ride_timestamps_df[ride_timestamps_df.isnull().any(axis=1)])
# In[5]:
'''
Abnormal value inspection
'''
display(driver_df.describe())
display(ride_df.describe())
display(ride_timestamps_df.describe())
# In[6]:
'''
TODO: Need to think about how to deal with this case, why will ride_distance <= 0?
TODO: the number of ride_id in ride_df and that of ride_timestamps doesn't fit (193502 vs 194081)
'''
abnormal_ride_df = ride_df[ride_df.ride_distance <= 0]
print(abnormal_ride_df.shape)
display(abnormal_ride_df.head())
# In[7]:
'''
find overlap of driver_id between dirver_df and ride_df
TODO: some drivers don't have ride information--->delete? (937 vs 854)
'''
print(len(set(driver_df.driver_id.unique()).intersection(set(ride_df.driver_id.unique()))))
# In[8]:
'''
find overlap of ride_id between ride_df and ride_timestamps_df
TODO: some rides don't have ride timestamps--->delete? (193502 vs 184819)
'''
print(len(set(ride_df.ride_id.unique()).intersection(set(ride_timestamps_df.ride_id.unique()))))
# # Merge all dfs to one df
# In[9]:
'''
merge driver_df and ride_df (Get intersection based on driver_id)
'''
big_df = ride_df.merge(driver_df,left_on='driver_id',right_on='driver_id')
print(big_df.shape)
display(big_df.head())
# In[10]:
# get overlapped ride_id between big_df and ride_timestamps_df
big_df = big_df[big_df['ride_id'].isin(ride_timestamps_df.ride_id.unique())]
big_df.reset_index(drop=True,inplace=True)
print(big_df.shape)
display(big_df.head())
# In[11]:
start = time.time()
# for each unique ride id in big_df
for idx in range(big_df.shape[0]):
rideid = big_df.iloc[idx]['ride_id']
# first find rideid timestamps info in ride_timestamps_df
target = ride_timestamps_df[ride_timestamps_df.ride_id == rideid]
# for each (event,timestamp) pair
for (e,t) in zip(list(target.event),list(target.timestamp)):
big_df.at[idx,e] = t
# double check index
if big_df[big_df.ride_id == rideid]['requested_at'].values[0] != ride_timestamps_df[ride_timestamps_df.ride_id == rideid].iloc[0,-1]:
print(idx)
print('duration:',(time.time()-start)/3600,'hrs')
# In[12]:
big_df.info()
# In[13]:
# saved for future use
big_df.to_csv('merged_big_driver_ride_df.csv',index=False)
# # Start to work on calculating extra variables
# If already have file 'merged_big_driver_ride_df.csv', directly start running code below
# In[26]:
def | (driver_rides):
total_fare = 0
# if one single ride
if driver_rides.ndim == 1:
total_fare = (1 + driver_rides['ride_prime_time']/100)*(min(max(5,(2 + 1.15*driver_rides['ride_distance'] *0.00062 + 0.22 *driver_rides['ride_duration']/60 + 1.75)),400))
else:
for (distance,duration,prime) in zip(driver_rides['ride_distance'].values, driver_rides['ride_duration'].values, driver_rides['ride_prime_time'].values):
total_fare += (1 + prime/100)*(min(max(5,(2 + 1.15*distance*0.00062 + 0.22*duration/60 + 1.75)),400))
return total_fare
# In[27]:
merged_big_df = pd.read_csv('merged_big_driver_ride_df.csv')
print(merged_big_df.shape)
display(merged_big_df.head())
# In[19]:
# '''
# validate the correctness of combined df by randomly selecting ride ids to verify (random checking)
# '''
# ids = test1.ride_id
# i = np.random.choice(ids,10)
# for x in i:
# display(test1[test1.ride_id == x])
# display(ride_timestamps_df[ride_timestamps_df.ride_id == x])
# ## Get new variables related to ride info
# time of day: 0-6(midnight); 6-9(morning rush); 9-16(normal day); 16-19(evening rush); 19-24(fun) <br>
# season: {0: Spring, 1: Summer, 2: Autumn, 3: Winter}
# In[28]:
# variables related to ride
def add_vars(row):
# convert time to datetime
# source: https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior
for i in range(5,len(row)):
if type(row[i]) != float:
row[i] = datetime.strptime(row[i], '%Y-%m-%d %H:%M:%S')
# get speed
row['speed(m/s)'] = row['ride_distance'] / row['ride_duration']
# holiday? reference: https://stackoverflow.com/questions/2394235/detecting-a-us-holiday
row['holiday'] = int(row['requested_at'] in holidays.US())
# requested_at time is weekend? (0-6 --> Mon-Sun)
row['weekend'] = int(row['requested_at'].weekday() > 4)
# time of day (6-12/12-15/16-20/20-24/24-6)
# {0: [6,9), 1: [9,16), 2: [16,19), 3: [19,24), 4: [0,6)}
if 6 <= row['requested_at'].hour < 9:
row['time of day'] = 0
elif 9 <= row['requested_at'].hour < 16:
row['time of day'] = 1
elif 16 <= row['requested_at'].hour < 19:
row['time of day'] = 2
elif 19 <= row['requested_at'].hour < 24:
row['time of day'] = 3
else:
row['time of day'] = 4
# season (12-2 Winter/3-5 Spring/6-8 Summer/9-11 Autumn)
# {0: Spring, 1: Summer, 2: Autumn, 3: Winter}
if 3 <= row['requested_at'].month <= 5:
row['season'] = 0
elif 6 <= row['requested_at'].month <= 8:
row['season'] = 1
elif 9 <= row['requested_at'].month <= 11:
row['season'] = 2
else:
row['season'] = 3
# time spent from requested_at to arrived_at (efficiency of picking up a customer)
if type(row['arrived_at']) != float:
row['time spent to arrive at the customer(minutes)'] = (row['arrived_at']- row['requested_at']).total_seconds()/60
else:
row['time spent to arrive at the customer(minutes)'] = (row['picked_up_at']- row['requested_at']).total_seconds()/60
# fare for this ride
row['fare'] = get_fare(row)
return row
start = time.time()
rides_added_vars_df = merged_big_df.apply(add_vars,axis=1)
print('duration:',(time.time() - start)/60,'minutes')
# In[29]:
print(rides_added_vars_df.shape)
display(rides_added_vars_df.head())
# In[30]:
# saved for future use
rides_added_vars_df.to_csv('added_variables_rides_info.csv',index=False)
# ## Get new variables related to drivers
# In[22]:
drivers_df = pd.DataFrame(rides_added_vars_df.driver_id.unique())
drivers_df.rename(columns={0:'driver_id'},inplace=True)
drivers_df.reset_index(drop=True,inplace=True)
print(drivers_df.shape)
display(drivers_df.head())
# In[24]:
def add_driver_vars(row):
# first find all rides under driverid
rides = rides_added_vars_df[rides_added_vars_df.driver_id == row['driver_id']]
# Percentage of rides completed in prime time among all rides for each driver
row['prime time rides percentage'] = rides[rides.ride_prime_time != 0].shape[0] / rides.shape[0]
# Average active time per day for each driver (total duration / total days)
sorted_days = rides.groupby('requested_at', as_index=False).mean()['requested_at']
days = len(sorted_days.dt.normalize().unique())
row['average daily active time(hrs/day)'] = rides.ride_duration.sum() / (3600*days)
# Average fare(daily/per ride/monthly) received for each driver
total_fare = get_fare(rides)
row['gross fare(over all rides)'] = total_fare
row['average daily fare'] = total_fare / days
row['average fare per ride'] = total_fare / rides.shape[0]
number_of_months = len(set([(x.year,x.month) for x in sorted_days]))
row['average monthly fare'] = total_fare / number_of_months
# Total number of rides
row['total rides'] = rides.shape[0]
# Number of abnormal rides (ride_distance <= 0)
row['number of abnormal rides'] = rides[rides.ride_distance <= 0].shape[0]
# Ride completion rate (1 - (# of abnormal / total rides))
row['completion rate'] = 1 - (row['number of abnormal rides'] / row['total rides'])
# Unique days of work
row['active days'] = days
# Average time spent on each ride (requested_at --> arrived_at) in minutes
row['average arriving time(minutes)'] = rides['time spent to arrive at the customer(minutes)'].mean()
return row
start = time.time()
drivers_added_vars_df = drivers_df.apply(add_driver_vars,axis=1)
print('duration:',(time.time() - start)/60,'minutes')
# In[25]:
print(drivers_added_vars_df.shape)
display(drivers_added_vars_df.head())
# In[26]:
# saved for future use
drivers_added_vars_df.to_csv('added_variables_drivers_info.csv',index=False)
# # Start to construct models
# In[27]:
'''
rides info combined dataframe
'''
big_rides_info = pd.read_csv('added_variables_rides_info.csv')
print(big_rides_info.shape)
display(big_rides_info.head())
'''
drivers info combined dataframe
'''
big_drivers_info = pd.read_csv('added_variables_drivers_info.csv')
print(big_drivers_info.shape)
display(big_drivers_info.head())
| get_fare | identifier_name |
LyftDataAnalysis.py |
# coding: utf-8
# # Import library
# run two cells below first
# In[2]:
import numpy as np
import pandas as pd
from collections import defaultdict
import time
from datetime import datetime
import sys
import math
# In[2]:
if 'holidays' not in sys.modules:
get_ipython().system('pip install holidays')
import holidays
# # Load three csv files
# Summary: 937 unique drivers and 193502 unique rides in total
# In[2]:
driver_df = pd.read_csv('driver_ids.csv')
ride_df = pd.read_csv('ride_ids.csv')
ride_timestamps_df = pd.read_csv('ride_timestamps.csv')
# In[3]:
'''
Get the shape of each dataframe
'''
print('driver ids:',driver_df.shape)
display(driver_df.head())
print('ride ids:',ride_df.shape)
display(ride_df.head())
print('ride timestamps:',ride_timestamps_df.shape)
display(ride_timestamps_df.head())
# # Inspect Nan and abnormal values
# In[4]:
'''
Nan value inspection
'''
print('driver ids info:------------------------------')
driver_df.info()
print('ride ids info:--------------------------------')
ride_df.info()
print('ride timestamps info:-------------------------')
ride_timestamps_df.info()
'''
ride_timestamps has one Nan value in the column timestamp
TODO: delete this ride or fill it with an artificial value?
'''
display(ride_timestamps_df[ride_timestamps_df.isnull().any(axis=1)])
# In[5]:
'''
Abnormal value inspection
'''
display(driver_df.describe())
display(ride_df.describe())
display(ride_timestamps_df.describe())
# In[6]:
'''
TODO: Need to think about how to deal with this case, why will ride_distance <= 0?
TODO: the number of ride_id in ride_df and that of ride_timestamps doesn't fit (193502 vs 194081)
'''
abnormal_ride_df = ride_df[ride_df.ride_distance <= 0]
print(abnormal_ride_df.shape)
display(abnormal_ride_df.head())
# In[7]:
'''
find overlap of driver_id between dirver_df and ride_df
TODO: some drivers don't have ride information--->delete? (937 vs 854)
'''
print(len(set(driver_df.driver_id.unique()).intersection(set(ride_df.driver_id.unique()))))
# In[8]:
'''
find overlap of ride_id between ride_df and ride_timestamps_df
TODO: some rides don't have ride timestamps--->delete? (193502 vs 184819)
'''
print(len(set(ride_df.ride_id.unique()).intersection(set(ride_timestamps_df.ride_id.unique()))))
# # Merge all dfs to one df
# In[9]:
'''
merge driver_df and ride_df (Get intersection based on driver_id)
'''
big_df = ride_df.merge(driver_df,left_on='driver_id',right_on='driver_id')
print(big_df.shape)
display(big_df.head())
# In[10]:
# get overlapped ride_id between big_df and ride_timestamps_df
big_df = big_df[big_df['ride_id'].isin(ride_timestamps_df.ride_id.unique())]
big_df.reset_index(drop=True,inplace=True)
print(big_df.shape)
display(big_df.head())
# In[11]:
start = time.time()
# for each unique ride id in big_df
for idx in range(big_df.shape[0]):
rideid = big_df.iloc[idx]['ride_id']
# first find rideid timestamps info in ride_timestamps_df
target = ride_timestamps_df[ride_timestamps_df.ride_id == rideid]
# for each (event,timestamp) pair
for (e,t) in zip(list(target.event),list(target.timestamp)):
big_df.at[idx,e] = t
# double check index
if big_df[big_df.ride_id == rideid]['requested_at'].values[0] != ride_timestamps_df[ride_timestamps_df.ride_id == rideid].iloc[0,-1]:
print(idx)
print('duration:',(time.time()-start)/3600,'hrs')
# In[12]:
big_df.info()
# In[13]:
# saved for future use
big_df.to_csv('merged_big_driver_ride_df.csv',index=False)
# # Start to work on calculating extra variables
# If already have file 'merged_big_driver_ride_df.csv', directly start running code below
# In[26]:
def get_fare(driver_rides):
|
# In[27]:
merged_big_df = pd.read_csv('merged_big_driver_ride_df.csv')
print(merged_big_df.shape)
display(merged_big_df.head())
# In[19]:
# '''
# validate the correctness of combined df by randomly selecting ride ids to verify (random checking)
# '''
# ids = test1.ride_id
# i = np.random.choice(ids,10)
# for x in i:
# display(test1[test1.ride_id == x])
# display(ride_timestamps_df[ride_timestamps_df.ride_id == x])
# ## Get new variables related to ride info
# time of day: 0-6(midnight); 6-9(morning rush); 9-16(normal day); 16-19(evening rush); 19-24(fun) <br>
# season: {0: Spring, 1: Summer, 2: Autumn, 3: Winter}
# In[28]:
# variables related to ride
def add_vars(row):
# convert time to datetime
# source: https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior
for i in range(5,len(row)):
if type(row[i]) != float:
row[i] = datetime.strptime(row[i], '%Y-%m-%d %H:%M:%S')
# get speed
row['speed(m/s)'] = row['ride_distance'] / row['ride_duration']
# holiday? reference: https://stackoverflow.com/questions/2394235/detecting-a-us-holiday
row['holiday'] = int(row['requested_at'] in holidays.US())
# requested_at time is weekend? (0-6 --> Mon-Sun)
row['weekend'] = int(row['requested_at'].weekday() > 4)
# time of day (6-12/12-15/16-20/20-24/24-6)
# {0: [6,9), 1: [9,16), 2: [16,19), 3: [19,24), 4: [0,6)}
if 6 <= row['requested_at'].hour < 9:
row['time of day'] = 0
elif 9 <= row['requested_at'].hour < 16:
row['time of day'] = 1
elif 16 <= row['requested_at'].hour < 19:
row['time of day'] = 2
elif 19 <= row['requested_at'].hour < 24:
row['time of day'] = 3
else:
row['time of day'] = 4
# season (12-2 Winter/3-5 Spring/6-8 Summer/9-11 Autumn)
# {0: Spring, 1: Summer, 2: Autumn, 3: Winter}
if 3 <= row['requested_at'].month <= 5:
row['season'] = 0
elif 6 <= row['requested_at'].month <= 8:
row['season'] = 1
elif 9 <= row['requested_at'].month <= 11:
row['season'] = 2
else:
row['season'] = 3
# time spent from requested_at to arrived_at (efficiency of picking up a customer)
if type(row['arrived_at']) != float:
row['time spent to arrive at the customer(minutes)'] = (row['arrived_at']- row['requested_at']).total_seconds()/60
else:
row['time spent to arrive at the customer(minutes)'] = (row['picked_up_at']- row['requested_at']).total_seconds()/60
# fare for this ride
row['fare'] = get_fare(row)
return row
start = time.time()
rides_added_vars_df = merged_big_df.apply(add_vars,axis=1)
print('duration:',(time.time() - start)/60,'minutes')
# In[29]:
print(rides_added_vars_df.shape)
display(rides_added_vars_df.head())
# In[30]:
# saved for future use
rides_added_vars_df.to_csv('added_variables_rides_info.csv',index=False)
# ## Get new variables related to drivers
# In[22]:
drivers_df = pd.DataFrame(rides_added_vars_df.driver_id.unique())
drivers_df.rename(columns={0:'driver_id'},inplace=True)
drivers_df.reset_index(drop=True,inplace=True)
print(drivers_df.shape)
display(drivers_df.head())
# In[24]:
def add_driver_vars(row):
# first find all rides under driverid
rides = rides_added_vars_df[rides_added_vars_df.driver_id == row['driver_id']]
# Percentage of rides completed in prime time among all rides for each driver
row['prime time rides percentage'] = rides[rides.ride_prime_time != 0].shape[0] / rides.shape[0]
# Average active time per day for each driver (total duration / total days)
sorted_days = rides.groupby('requested_at', as_index=False).mean()['requested_at']
days = len(sorted_days.dt.normalize().unique())
row['average daily active time(hrs/day)'] = rides.ride_duration.sum() / (3600*days)
# Average fare(daily/per ride/monthly) received for each driver
total_fare = get_fare(rides)
row['gross fare(over all rides)'] = total_fare
row['average daily fare'] = total_fare / days
row['average fare per ride'] = total_fare / rides.shape[0]
number_of_months = len(set([(x.year,x.month) for x in sorted_days]))
row['average monthly fare'] = total_fare / number_of_months
# Total number of rides
row['total rides'] = rides.shape[0]
# Number of abnormal rides (ride_distance <= 0)
row['number of abnormal rides'] = rides[rides.ride_distance <= 0].shape[0]
# Ride completion rate (1 - (# of abnormal / total rides))
row['completion rate'] = 1 - (row['number of abnormal rides'] / row['total rides'])
# Unique days of work
row['active days'] = days
# Average time spent on each ride (requested_at --> arrived_at) in minutes
row['average arriving time(minutes)'] = rides['time spent to arrive at the customer(minutes)'].mean()
return row
start = time.time()
drivers_added_vars_df = drivers_df.apply(add_driver_vars,axis=1)
print('duration:',(time.time() - start)/60,'minutes')
# In[25]:
print(drivers_added_vars_df.shape)
display(drivers_added_vars_df.head())
# In[26]:
# saved for future use
drivers_added_vars_df.to_csv('added_variables_drivers_info.csv',index=False)
# # Start to construct models
# In[27]:
'''
rides info combined dataframe
'''
big_rides_info = pd.read_csv('added_variables_rides_info.csv')
print(big_rides_info.shape)
display(big_rides_info.head())
'''
drivers info combined dataframe
'''
big_drivers_info = pd.read_csv('added_variables_drivers_info.csv')
print(big_drivers_info.shape)
display(big_drivers_info.head())
| total_fare = 0
# if one single ride
if driver_rides.ndim == 1:
total_fare = (1 + driver_rides['ride_prime_time']/100)*(min(max(5,(2 + 1.15*driver_rides['ride_distance'] *0.00062 + 0.22 *driver_rides['ride_duration']/60 + 1.75)),400))
else:
for (distance,duration,prime) in zip(driver_rides['ride_distance'].values, driver_rides['ride_duration'].values, driver_rides['ride_prime_time'].values):
total_fare += (1 + prime/100)*(min(max(5,(2 + 1.15*distance*0.00062 + 0.22*duration/60 + 1.75)),400))
return total_fare | identifier_body |
LyftDataAnalysis.py | # coding: utf-8
# # Import library
# run two cells below first
# In[2]:
import numpy as np
import pandas as pd
from collections import defaultdict
import time
from datetime import datetime
import sys
import math
# In[2]:
if 'holidays' not in sys.modules:
get_ipython().system('pip install holidays')
import holidays
# # Load three csv files
# Summary: 937 unique drivers and 193502 unique rides in total
# In[2]:
driver_df = pd.read_csv('driver_ids.csv')
ride_df = pd.read_csv('ride_ids.csv')
ride_timestamps_df = pd.read_csv('ride_timestamps.csv')
# In[3]:
'''
Get the shape of each dataframe
'''
print('driver ids:',driver_df.shape)
display(driver_df.head())
print('ride ids:',ride_df.shape)
display(ride_df.head())
print('ride timestamps:',ride_timestamps_df.shape)
display(ride_timestamps_df.head())
# # Inspect Nan and abnormal values
# In[4]:
'''
Nan value inspection
'''
print('driver ids info:------------------------------')
driver_df.info()
print('ride ids info:--------------------------------')
ride_df.info()
print('ride timestamps info:-------------------------')
ride_timestamps_df.info()
'''
ride_timestamps has one Nan value in the column timestamp
TODO: delete this ride or fill it with an artificial value?
'''
display(ride_timestamps_df[ride_timestamps_df.isnull().any(axis=1)])
# In[5]:
'''
Abnormal value inspection
'''
display(driver_df.describe())
display(ride_df.describe())
display(ride_timestamps_df.describe())
# In[6]:
'''
TODO: Need to think about how to deal with this case, why will ride_distance <= 0?
TODO: the number of ride_id in ride_df and that of ride_timestamps doesn't fit (193502 vs 194081)
'''
abnormal_ride_df = ride_df[ride_df.ride_distance <= 0]
print(abnormal_ride_df.shape)
display(abnormal_ride_df.head())
# In[7]:
'''
find overlap of driver_id between dirver_df and ride_df
TODO: some drivers don't have ride information--->delete? (937 vs 854)
'''
print(len(set(driver_df.driver_id.unique()).intersection(set(ride_df.driver_id.unique()))))
# In[8]:
'''
find overlap of ride_id between ride_df and ride_timestamps_df
TODO: some rides don't have ride timestamps--->delete? (193502 vs 184819)
'''
print(len(set(ride_df.ride_id.unique()).intersection(set(ride_timestamps_df.ride_id.unique()))))
# # Merge all dfs to one df
# In[9]:
'''
merge driver_df and ride_df (Get intersection based on driver_id)
'''
big_df = ride_df.merge(driver_df,left_on='driver_id',right_on='driver_id')
print(big_df.shape)
display(big_df.head())
# In[10]:
# get overlapped ride_id between big_df and ride_timestamps_df
big_df = big_df[big_df['ride_id'].isin(ride_timestamps_df.ride_id.unique())]
big_df.reset_index(drop=True,inplace=True)
print(big_df.shape)
display(big_df.head())
# In[11]:
start = time.time()
# for each unique ride id in big_df
for idx in range(big_df.shape[0]):
rideid = big_df.iloc[idx]['ride_id']
# first find rideid timestamps info in ride_timestamps_df
target = ride_timestamps_df[ride_timestamps_df.ride_id == rideid]
# for each (event,timestamp) pair
for (e,t) in zip(list(target.event),list(target.timestamp)):
big_df.at[idx,e] = t
# double check index
if big_df[big_df.ride_id == rideid]['requested_at'].values[0] != ride_timestamps_df[ride_timestamps_df.ride_id == rideid].iloc[0,-1]:
print(idx)
print('duration:',(time.time()-start)/3600,'hrs')
# In[12]:
big_df.info()
# In[13]:
# saved for future use
big_df.to_csv('merged_big_driver_ride_df.csv',index=False)
# # Start to work on calculating extra variables
# If already have file 'merged_big_driver_ride_df.csv', directly start running code below
# In[26]:
def get_fare(driver_rides):
total_fare = 0
# if one single ride
if driver_rides.ndim == 1:
total_fare = (1 + driver_rides['ride_prime_time']/100)*(min(max(5,(2 + 1.15*driver_rides['ride_distance'] *0.00062 + 0.22 *driver_rides['ride_duration']/60 + 1.75)),400))
else:
for (distance,duration,prime) in zip(driver_rides['ride_distance'].values, driver_rides['ride_duration'].values, driver_rides['ride_prime_time'].values):
total_fare += (1 + prime/100)*(min(max(5,(2 + 1.15*distance*0.00062 + 0.22*duration/60 + 1.75)),400))
return total_fare
# In[27]:
merged_big_df = pd.read_csv('merged_big_driver_ride_df.csv')
print(merged_big_df.shape)
display(merged_big_df.head())
# In[19]:
# '''
# validate the correctness of combined df by randomly selecting ride ids to verify (random checking)
# '''
# ids = test1.ride_id
# i = np.random.choice(ids,10)
# for x in i:
# display(test1[test1.ride_id == x])
# display(ride_timestamps_df[ride_timestamps_df.ride_id == x])
# ## Get new variables related to ride info
# time of day: 0-6(midnight); 6-9(morning rush); 9-16(normal day); 16-19(evening rush); 19-24(fun) <br>
# season: {0: Spring, 1: Summer, 2: Autumn, 3: Winter}
# In[28]:
# variables related to ride
def add_vars(row):
# convert time to datetime
# source: https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior
for i in range(5,len(row)):
if type(row[i]) != float:
row[i] = datetime.strptime(row[i], '%Y-%m-%d %H:%M:%S')
# get speed
row['speed(m/s)'] = row['ride_distance'] / row['ride_duration']
# holiday? reference: https://stackoverflow.com/questions/2394235/detecting-a-us-holiday
row['holiday'] = int(row['requested_at'] in holidays.US())
# requested_at time is weekend? (0-6 --> Mon-Sun)
row['weekend'] = int(row['requested_at'].weekday() > 4)
# time of day (6-12/12-15/16-20/20-24/24-6)
# {0: [6,9), 1: [9,16), 2: [16,19), 3: [19,24), 4: [0,6)}
if 6 <= row['requested_at'].hour < 9:
row['time of day'] = 0
elif 9 <= row['requested_at'].hour < 16:
row['time of day'] = 1
elif 16 <= row['requested_at'].hour < 19:
row['time of day'] = 2
elif 19 <= row['requested_at'].hour < 24:
row['time of day'] = 3
else:
row['time of day'] = 4
# season (12-2 Winter/3-5 Spring/6-8 Summer/9-11 Autumn)
# {0: Spring, 1: Summer, 2: Autumn, 3: Winter}
if 3 <= row['requested_at'].month <= 5:
row['season'] = 0
elif 6 <= row['requested_at'].month <= 8:
row['season'] = 1
elif 9 <= row['requested_at'].month <= 11:
row['season'] = 2
else:
row['season'] = 3
# time spent from requested_at to arrived_at (efficiency of picking up a customer)
if type(row['arrived_at']) != float:
row['time spent to arrive at the customer(minutes)'] = (row['arrived_at']- row['requested_at']).total_seconds()/60
else:
row['time spent to arrive at the customer(minutes)'] = (row['picked_up_at']- row['requested_at']).total_seconds()/60
# fare for this ride
row['fare'] = get_fare(row)
return row
| rides_added_vars_df = merged_big_df.apply(add_vars,axis=1)
print('duration:',(time.time() - start)/60,'minutes')
# In[29]:
print(rides_added_vars_df.shape)
display(rides_added_vars_df.head())
# In[30]:
# saved for future use
rides_added_vars_df.to_csv('added_variables_rides_info.csv',index=False)
# ## Get new variables related to drivers
# In[22]:
drivers_df = pd.DataFrame(rides_added_vars_df.driver_id.unique())
drivers_df.rename(columns={0:'driver_id'},inplace=True)
drivers_df.reset_index(drop=True,inplace=True)
print(drivers_df.shape)
display(drivers_df.head())
# In[24]:
def add_driver_vars(row):
# first find all rides under driverid
rides = rides_added_vars_df[rides_added_vars_df.driver_id == row['driver_id']]
# Percentage of rides completed in prime time among all rides for each driver
row['prime time rides percentage'] = rides[rides.ride_prime_time != 0].shape[0] / rides.shape[0]
# Average active time per day for each driver (total duration / total days)
sorted_days = rides.groupby('requested_at', as_index=False).mean()['requested_at']
days = len(sorted_days.dt.normalize().unique())
row['average daily active time(hrs/day)'] = rides.ride_duration.sum() / (3600*days)
# Average fare(daily/per ride/monthly) received for each driver
total_fare = get_fare(rides)
row['gross fare(over all rides)'] = total_fare
row['average daily fare'] = total_fare / days
row['average fare per ride'] = total_fare / rides.shape[0]
number_of_months = len(set([(x.year,x.month) for x in sorted_days]))
row['average monthly fare'] = total_fare / number_of_months
# Total number of rides
row['total rides'] = rides.shape[0]
# Number of abnormal rides (ride_distance <= 0)
row['number of abnormal rides'] = rides[rides.ride_distance <= 0].shape[0]
# Ride completion rate (1 - (# of abnormal / total rides))
row['completion rate'] = 1 - (row['number of abnormal rides'] / row['total rides'])
# Unique days of work
row['active days'] = days
# Average time spent on each ride (requested_at --> arrived_at) in minutes
row['average arriving time(minutes)'] = rides['time spent to arrive at the customer(minutes)'].mean()
return row
start = time.time()
drivers_added_vars_df = drivers_df.apply(add_driver_vars,axis=1)
print('duration:',(time.time() - start)/60,'minutes')
# In[25]:
print(drivers_added_vars_df.shape)
display(drivers_added_vars_df.head())
# In[26]:
# saved for future use
drivers_added_vars_df.to_csv('added_variables_drivers_info.csv',index=False)
# # Start to construct models
# In[27]:
'''
rides info combined dataframe
'''
big_rides_info = pd.read_csv('added_variables_rides_info.csv')
print(big_rides_info.shape)
display(big_rides_info.head())
'''
drivers info combined dataframe
'''
big_drivers_info = pd.read_csv('added_variables_drivers_info.csv')
print(big_drivers_info.shape)
display(big_drivers_info.head()) | start = time.time() | random_line_split |
lib.rs | //! This crate is part of [Sophia],
//! an [RDF] and [Linked Data] toolkit in Rust.
//!
//! Terms are the building blocks of an [RDF] graph.
//! There are four types of terms: IRIs, blank nodes (BNode for short),
//! literals and variables.
//!
//! NB: variable only exist in [generalized RDF].
//!
//! This module defines a generic type [`Term`](enum.Term.html)
//! which can be derived differently depending on your needs.
//!
//! * [`RefTerm<'a>`](type.RefTerm.html) (alias of `Term<&'a str>`)
//! should be used for very short-lived terms,
//! *i.e.* terms that live less than `'a`,
//! which is the lifetime of their underlying text.
//!
//! * [`BoxTerm`](type.BoxTerm.html) (alias of `Term<Box<str>>`)
//! should be used when the term may outlive the text used to create it.
//!
//! * [`RcTerm`](type.RcTerm.html) (alias of `Term<Rc<str>>`)
//! should also be used for long-lived terms,
//! especially if they need to be cloned multiple times.
//! The use of `Rc` prevents the duplication of the underlying text,
//! while ensuring that it is cleaned when appropriate.
//!
//! * [`ArcTerm`](type.ArcTerm.html) (alias of `Term<Arc<str>>`)
//! should be used when, additionally,
//! terms need to be sent to other threads.
//!
//! * [`StaticTerm`](type.StaticTerm.html) (alias of `Term<&'static str>)
//! is a special case of `RefTerm`
//! where the underlying text is a static string.
//! Those terms can live as long as the program runs,
//! and be cloned and sent without any restriction.
//!
//! * [`MownTerm`](type.MownTerm.html) (alias of `Term<MownStr<'a>>)
//! should be used in situations where some terms can borrow their data,
//! while others need to own it.
//!
//! [Sophia]: https://docs.rs/sophia/latest/sophia/
//! [RDF]: https://www.w3.org/TR/rdf-primer/
//! [Linked Data]: http://linkeddata.org/
//! [generalized RDF]: https://docs.rs/sophia/latest/sophia/#generalized-vs-strict-rdf-model
#![deny(missing_docs)]
use mownstr::MownStr;
use sophia_api::term::{
term_cmp, term_eq, term_format, term_hash, term_to_string, CopyTerm, RawValue, SimpleIri,
TTerm, TermKind, TryCopyTerm,
};
use std::borrow::Borrow;
use std::convert::TryInto;
use std::fmt::Debug;
use std::hash::{Hash, Hasher};
use std::rc::Rc;
use std::sync::Arc;
pub mod factory;
pub mod index_map;
pub mod variable;
use self::variable::Variable;
pub mod blank_node;
use self::blank_node::BlankNode;
pub mod iri;
use self::iri::{Iri, Normalization};
pub mod literal;
use literal::convert::{AsLiteral, DataType, NativeLiteral};
use literal::Literal;
mod _display;
mod _error;
pub use self::_error::*;
/// Generic type for RDF terms.
///
/// See [module documentation](index.html) for more detail.
///
#[derive(Clone, Copy, Debug, Eq, Ord)]
pub enum Term<TD>
where
TD: TermData,
{
/// An IRI referencing a resource.
Iri(Iri<TD>),
/// A blank node.
///
/// Also known as existentially quantified variable.
BNode(BlankNode<TD>),
/// An RDF literal.
Literal(Literal<TD>),
/// A universally quantified variable like in SPARQL or Notation3.
Variable(Variable<TD>),
}
/// Trait alias for types holding the textual data of terms.
pub trait TermData: AsRef<str> + Clone + Eq + Hash {}
impl<T> TermData for T where T: AsRef<str> + Clone + Eq + Hash {}
/// Convenient alias for a specialization of `Term<T>`.
///
/// See [module documentation](index.html)
/// for more detail on when to use it.
pub type BoxTerm = Term<Box<str>>;
/// Convenient alias for a specialization of `Term<T>`.
///
/// See [module documentation](index.html)
/// for more detail on when to use it.
pub type RcTerm = Term<Rc<str>>;
/// Convenient alias for a specialization of `Term<T>`.
///
/// See [module documentation](index.html)
/// for more detail on when to use it.
pub type ArcTerm = Term<Arc<str>>;
/// Convenient alias for a specialization of `Term<T>`.
///
/// See [module documentation](index.html)
/// for more detail on when to use it.
pub type RefTerm<'a> = Term<&'a str>;
/// Convenient alias for a specialization of `Term<T>`.
///
/// See [module documentation](index.html)
/// for more detail on when to use it.
pub type StaticTerm = RefTerm<'static>;
/// Convenient alias for a specialization of `Term<T>`.
///
/// See [module documentation](index.html)
/// for more detail on when to use it.
pub type MownTerm<'a> = Term<MownStr<'a>>;
impl<T> Term<T>
where
T: TermData,
{
/// Return a new IRI term from the given text.
///
/// May fail if `txt` is not a valid IRI. | U: AsRef<str>,
T: From<U>,
{
Iri::<T>::new(iri).map(Into::into)
}
/// Return a new IRI term from the two given parts (prefix and suffix).
///
/// May fail if the concatenation of `ns` and `suffix`
/// does not produce a valid IRI.
pub fn new_iri_suffixed<U, V>(ns: U, suffix: V) -> Result<Term<T>>
where
U: AsRef<str>,
V: AsRef<str>,
T: From<U> + From<V>,
{
Iri::<T>::new_suffixed(ns, suffix).map(Into::into)
}
/// Return a new blank node term with the given bnode ID.
///
/// Currently, this may never fail;
/// however it returns a result for homogeneity with other constructor methods,
/// and because future versions may be more picky regarding bnode IDs.
pub fn new_bnode<U>(id: U) -> Result<Term<T>>
where
U: AsRef<str>,
T: From<U>,
{
BlankNode::new(id).map(Into::into)
}
/// Return a new literal term with the given value and language tag.
///
/// May fail if the language tag is not a valid BCP47 language tag.
pub fn new_literal_lang<U, V>(txt: U, lang: V) -> Result<Self>
where
V: AsRef<str>,
T: From<U> + From<V>,
{
Literal::<T>::new_lang(txt, lang).map(Into::into)
}
/// Return a new literal term with the given value and datatype.
///
/// May fail if `dt` is not an IRI.
pub fn new_literal_dt<U, V>(txt: U, dt: V) -> Result<Self>
where
T: From<U>,
V: TryInto<Iri<T>>,
TermError: From<<V as TryInto<Iri<T>>>::Error>,
{
Ok(Literal::new_dt(txt, dt.try_into()?).into())
}
/// Return a new variable term with the given name.
///
/// May fail if `name` is not a valid variable name.
pub fn new_variable<U>(name: U) -> Result<Term<T>>
where
U: AsRef<str>,
T: From<U>,
{
Variable::new(name).map(Into::into)
}
/// Borrow the inner contents of the term.
pub fn as_ref(&self) -> Term<&T> {
use self::Term::*;
match &self {
Iri(iri) => Iri(iri.as_ref()),
Literal(lit) => Literal(lit.as_ref()),
BNode(bn) => BNode(bn.as_ref()),
Variable(var) => Variable(var.as_ref()),
}
}
/// Borrow the inner contents of the term as `&str`.
pub fn as_ref_str(&self) -> Term<&str> {
use self::Term::*;
match &self {
Iri(iri) => Iri(iri.as_ref_str()),
Literal(lit) => Literal(lit.as_ref_str()),
BNode(bn) => BNode(bn.as_ref_str()),
Variable(var) => Variable(var.as_ref_str()),
}
}
/// Create a new term by applying `f` to the `TermData` of `self`.
pub fn map<F, TD2>(self, f: F) -> Term<TD2>
where
F: FnMut(T) -> TD2,
TD2: TermData,
{
use self::Term::*;
match self {
Iri(iri) => Iri(iri.map(f)),
Literal(lit) => Literal(lit.map(f)),
BNode(bn) => BNode(bn.map(f)),
Variable(var) => Variable(var.map(f)),
}
}
/// Maps the term using the `Into` trait.
pub fn map_into<TD2>(self) -> Term<TD2>
where
T: Into<TD2>,
TD2: TermData,
{
self.map(Into::into)
}
/// Clone self while transforming the inner `TermData` with the given
/// factory.
///
/// This is done in one step in contrast to calling `clone().map(factory)`.
pub fn clone_map<'a, U, F>(&'a self, factory: F) -> Term<U>
where
U: TermData,
F: FnMut(&'a str) -> U,
{
use self::Term::*;
match self {
Iri(iri) => iri.clone_map(factory).into(),
BNode(bn) => bn.clone_map(factory).into(),
Literal(lit) => lit.clone_map(factory).into(),
Variable(var) => var.clone_map(factory).into(),
}
}
/// Apply `clone_map()` using the `Into` trait.
pub fn clone_into<'src, U>(&'src self) -> Term<U>
where
U: TermData + From<&'src str>,
{
self.clone_map(Into::into)
}
/// Return a term equivalent to this one,
/// with all IRIs (if any)
/// internally represented with all its data in `ns`, and an empty `suffix`.
///
/// # Performances
/// The returned term will borrow data from this one as much as possible,
/// but strings may be allocated in case a concatenation is required.
pub fn normalized(&self, policy: Normalization) -> MownTerm {
match self {
Term::Iri(iri) => iri.normalized(policy).into(),
Term::Literal(lit) => lit.normalized(policy).into(),
_ => self.as_ref_str().map_into(),
}
}
/// Create a new IRI-term from a given IRI without checking its validity.
///
/// # Pre-conditions
///
/// This function conducts no checks if the resulting IRI is valid. This is
/// a contract that is generally assumed. Breaking it could result in
/// unexpected behavior.
///
/// However, in `debug` builds assertions that perform checks are enabled.
pub fn new_iri_unchecked<U>(iri: U) -> Term<T>
where
T: From<U>,
{
Iri::<T>::new_unchecked(iri).into()
}
/// Create a new IRI-term from a given namespace and suffix.
///
/// # Pre-conditions
///
/// It is expected that
///
/// * the resulting IRI is valid per RFC3987,
/// * `suffix` is not the empty string
/// (otherwise, [`new_iri_unchecked`](#method.new_iri_unchecked) should be used instead).
///
/// This is a contract that is generally assumed.
/// Breaking it could result in unexpected behavior.
/// However in `debug` mode, assertions that perform checks are enabled.
pub fn new_iri_suffixed_unchecked<U, V>(ns: U, suffix: V) -> Term<T>
where
T: From<U> + From<V>,
{
Iri::<T>::new_suffixed_unchecked(ns, suffix).into()
}
/// Return a new blank node term.
///
/// # Pre-condition
///
/// This function requires that `id` is a valid bnode ID.
pub fn new_bnode_unchecked<U>(id: U) -> Term<T>
where
U: AsRef<str>,
T: From<U>,
{
BlankNode::<T>::new_unchecked(id).into()
}
/// Return a literal term.
///
/// # Pre-condition
///
/// This function requires that `lang` is a valid language tag.
/// In debug mode this constraint is asserted.
pub fn new_literal_lang_unchecked<U, V>(txt: U, lang: V) -> Self
where
V: AsRef<str>,
T: From<U> + From<V>,
{
Literal::<T>::new_lang_unchecked(txt, lang).into()
}
/// Return a typed literal term.
///
/// # Panics
///
/// Panics if `dt` cannot be converted into an IRI.
pub fn new_literal_dt_unchecked<U, V>(txt: U, dt: V) -> Self
where
T: From<U>,
V: TryInto<Iri<T>>,
<V as TryInto<Iri<T>>>::Error: Debug,
{
Literal::new_dt(txt, dt.try_into().unwrap()).into()
}
/// Return a new variable term.
///
/// # Pre-condition
///
/// This function requires that `name` is a valid variable name.
pub fn new_variable_unchecked<U>(name: U) -> Term<T>
where
U: AsRef<str>,
T: From<U>,
{
Variable::<T>::new_unchecked(name).into()
}
}
impl<T: TermData> TTerm for Term<T> {
fn kind(&self) -> TermKind {
use Term::*;
match self {
Iri(_) => TermKind::Iri,
Literal(_) => TermKind::Literal,
BNode(_) => TermKind::BlankNode,
Variable(_) => TermKind::Variable,
}
}
fn value_raw(&self) -> RawValue {
use Term::*;
match self {
Iri(i) => i.value_raw(),
Literal(l) => l.value_raw(),
BNode(b) => b.value_raw(),
Variable(v) => v.value_raw(),
}
}
fn datatype(&self) -> Option<SimpleIri> {
if let Term::Literal(lit) = self {
lit.datatype()
} else {
None
}
}
fn language(&self) -> Option<&str> {
if let Term::Literal(lit) = self {
lit.language()
} else {
None
}
}
fn as_dyn(&self) -> &dyn TTerm {
self
}
}
impl<TD, TE> PartialEq<TE> for Term<TD>
where
TD: TermData,
TE: TTerm + ?Sized,
{
fn eq(&self, other: &TE) -> bool {
term_eq(self, other)
}
}
impl<TD, TE> PartialOrd<TE> for Term<TD>
where
TD: TermData,
TE: TTerm + ?Sized,
{
fn partial_cmp(&self, other: &TE) -> Option<std::cmp::Ordering> {
Some(term_cmp(self, other))
}
}
impl<TD> Hash for Term<TD>
where
TD: TermData,
{
fn hash<H: Hasher>(&self, state: &mut H) {
term_hash(self, state)
}
}
impl<TD> From<Iri<TD>> for Term<TD>
where
TD: TermData,
{
fn from(iri: Iri<TD>) -> Self {
Term::Iri(iri)
}
}
impl<TD> From<Literal<TD>> for Term<TD>
where
TD: TermData,
{
fn from(lit: Literal<TD>) -> Self {
Term::Literal(lit)
}
}
impl<TD> From<Variable<TD>> for Term<TD>
where
TD: TermData,
{
fn from(var: Variable<TD>) -> Self {
Term::Variable(var)
}
}
impl<TD> From<BlankNode<TD>> for Term<TD>
where
TD: TermData,
{
fn from(bn: BlankNode<TD>) -> Self {
Term::BNode(bn)
}
}
impl<TD> From<String> for Term<TD>
where
TD: TermData + From<Box<str>> + From<&'static str>,
{
fn from(txt: String) -> Self {
txt.as_literal().into()
}
}
impl<'a> From<SimpleIri<'a>> for RefTerm<'a> {
fn from(other: SimpleIri<'a>) -> Self {
Iri::from(other).into()
}
}
impl<T, TD> From<NativeLiteral<T>> for Term<TD>
where
T: DataType + ?Sized,
TD: TermData + From<Box<str>> + From<&'static str>,
{
fn from(other: NativeLiteral<T>) -> Self {
Literal::from(other).into()
}
}
impl<'a, T> From<NativeLiteral<T, &'a str>> for RefTerm<'a>
where
T: DataType + ?Sized,
{
fn from(other: NativeLiteral<T, &'a str>) -> Self {
Literal::from(other).into()
}
}
impl<TD> CopyTerm for Term<TD>
where
TD: TermData + for<'x> From<&'x str>,
{
fn copy<T>(term: &T) -> Self
where
T: TTerm + ?Sized,
{
match term.kind() {
TermKind::Iri => Term::Iri(Iri::try_copy(term).unwrap()),
TermKind::Literal => Term::Literal(Literal::try_copy(term).unwrap()),
TermKind::BlankNode => Term::BNode(BlankNode::try_copy(term).unwrap()),
TermKind::Variable => Term::Variable(Variable::try_copy(term).unwrap()),
}
}
}
impl<'a, T> From<&'a T> for RefTerm<'a>
where
T: TTerm + ?Sized,
{
fn from(t: &'a T) -> Self {
let v = t.value_raw();
match t.kind() {
TermKind::Iri => Term::Iri(match v.1 {
None => Iri::new_unchecked(v.0),
Some(suffix) => Iri::new_suffixed_unchecked(v.0, suffix),
}),
TermKind::Literal => Term::Literal(match t.language() {
None => {
let dt: Iri<&'a str> = t.datatype().unwrap().into();
Literal::new_dt(v.0, dt)
}
Some(tag) => Literal::new_lang_unchecked(v.0, tag),
}),
TermKind::BlankNode => Term::BNode(BlankNode::new_unchecked(v.0)),
TermKind::Variable => Term::Variable(Variable::new_unchecked(v.0)),
}
}
}
impl<'a, TD: TermData + 'a> Borrow<dyn TTerm + 'a> for Term<TD> {
fn borrow(&self) -> &(dyn TTerm + 'a) {
self as _
}
}
#[cfg(test)]
pub(crate) mod test;
/// This line re-exports `same_graph_name` from `sophia_api::term`,
/// to ease transition from older versions of Sophia.
/// It will eventually be deprecated.
///
/// See [`sophia_api`](https://docs.rs/sophia_api/latest/sophia_api/)
pub use sophia_api::term::same_graph_name;
/// This module re-exports things from `sophia_api::ns`,
/// to ease transition from older versions of Sophia.
/// It will eventually be deprecated.
///
/// See [`sophia_api`](https://docs.rs/sophia_api/latest/sophia_api/)
pub mod ns {
pub use sophia_api::ns::*;
}
/// This line re-exports the module `sophia_api::term::matcher`,
/// to ease transition from older versions of Sophia.
/// It will eventually be deprecated.
///
/// See [`sophia_api`](https://docs.rs/sophia_api/latest/sophia_api/)
pub use sophia_api::term::matcher; | pub fn new_iri<U>(iri: U) -> Result<Term<T>>
where | random_line_split |
lib.rs | //! This crate is part of [Sophia],
//! an [RDF] and [Linked Data] toolkit in Rust.
//!
//! Terms are the building blocks of an [RDF] graph.
//! There are four types of terms: IRIs, blank nodes (BNode for short),
//! literals and variables.
//!
//! NB: variable only exist in [generalized RDF].
//!
//! This module defines a generic type [`Term`](enum.Term.html)
//! which can be derived differently depending on your needs.
//!
//! * [`RefTerm<'a>`](type.RefTerm.html) (alias of `Term<&'a str>`)
//! should be used for very short-lived terms,
//! *i.e.* terms that live less than `'a`,
//! which is the lifetime of their underlying text.
//!
//! * [`BoxTerm`](type.BoxTerm.html) (alias of `Term<Box<str>>`)
//! should be used when the term may outlive the text used to create it.
//!
//! * [`RcTerm`](type.RcTerm.html) (alias of `Term<Rc<str>>`)
//! should also be used for long-lived terms,
//! especially if they need to be cloned multiple times.
//! The use of `Rc` prevents the duplication of the underlying text,
//! while ensuring that it is cleaned when appropriate.
//!
//! * [`ArcTerm`](type.ArcTerm.html) (alias of `Term<Arc<str>>`)
//! should be used when, additionally,
//! terms need to be sent to other threads.
//!
//! * [`StaticTerm`](type.StaticTerm.html) (alias of `Term<&'static str>)
//! is a special case of `RefTerm`
//! where the underlying text is a static string.
//! Those terms can live as long as the program runs,
//! and be cloned and sent without any restriction.
//!
//! * [`MownTerm`](type.MownTerm.html) (alias of `Term<MownStr<'a>>)
//! should be used in situations where some terms can borrow their data,
//! while others need to own it.
//!
//! [Sophia]: https://docs.rs/sophia/latest/sophia/
//! [RDF]: https://www.w3.org/TR/rdf-primer/
//! [Linked Data]: http://linkeddata.org/
//! [generalized RDF]: https://docs.rs/sophia/latest/sophia/#generalized-vs-strict-rdf-model
#![deny(missing_docs)]
use mownstr::MownStr;
use sophia_api::term::{
term_cmp, term_eq, term_format, term_hash, term_to_string, CopyTerm, RawValue, SimpleIri,
TTerm, TermKind, TryCopyTerm,
};
use std::borrow::Borrow;
use std::convert::TryInto;
use std::fmt::Debug;
use std::hash::{Hash, Hasher};
use std::rc::Rc;
use std::sync::Arc;
pub mod factory;
pub mod index_map;
pub mod variable;
use self::variable::Variable;
pub mod blank_node;
use self::blank_node::BlankNode;
pub mod iri;
use self::iri::{Iri, Normalization};
pub mod literal;
use literal::convert::{AsLiteral, DataType, NativeLiteral};
use literal::Literal;
mod _display;
mod _error;
pub use self::_error::*;
/// Generic type for RDF terms.
///
/// See [module documentation](index.html) for more detail.
///
#[derive(Clone, Copy, Debug, Eq, Ord)]
pub enum Term<TD>
where
TD: TermData,
{
/// An IRI referencing a resource.
Iri(Iri<TD>),
/// A blank node.
///
/// Also known as existentially quantified variable.
BNode(BlankNode<TD>),
/// An RDF literal.
Literal(Literal<TD>),
/// A universally quantified variable like in SPARQL or Notation3.
Variable(Variable<TD>),
}
/// Trait alias for types holding the textual data of terms.
pub trait TermData: AsRef<str> + Clone + Eq + Hash {}
impl<T> TermData for T where T: AsRef<str> + Clone + Eq + Hash {}
/// Convenient alias for a specialization of `Term<T>`.
///
/// See [module documentation](index.html)
/// for more detail on when to use it.
pub type BoxTerm = Term<Box<str>>;
/// Convenient alias for a specialization of `Term<T>`.
///
/// See [module documentation](index.html)
/// for more detail on when to use it.
pub type RcTerm = Term<Rc<str>>;
/// Convenient alias for a specialization of `Term<T>`.
///
/// See [module documentation](index.html)
/// for more detail on when to use it.
pub type ArcTerm = Term<Arc<str>>;
/// Convenient alias for a specialization of `Term<T>`.
///
/// See [module documentation](index.html)
/// for more detail on when to use it.
pub type RefTerm<'a> = Term<&'a str>;
/// Convenient alias for a specialization of `Term<T>`.
///
/// See [module documentation](index.html)
/// for more detail on when to use it.
pub type StaticTerm = RefTerm<'static>;
/// Convenient alias for a specialization of `Term<T>`.
///
/// See [module documentation](index.html)
/// for more detail on when to use it.
pub type MownTerm<'a> = Term<MownStr<'a>>;
impl<T> Term<T>
where
T: TermData,
{
/// Return a new IRI term from the given text.
///
/// May fail if `txt` is not a valid IRI.
pub fn new_iri<U>(iri: U) -> Result<Term<T>>
where
U: AsRef<str>,
T: From<U>,
{
Iri::<T>::new(iri).map(Into::into)
}
/// Return a new IRI term from the two given parts (prefix and suffix).
///
/// May fail if the concatenation of `ns` and `suffix`
/// does not produce a valid IRI.
pub fn new_iri_suffixed<U, V>(ns: U, suffix: V) -> Result<Term<T>>
where
U: AsRef<str>,
V: AsRef<str>,
T: From<U> + From<V>,
{
Iri::<T>::new_suffixed(ns, suffix).map(Into::into)
}
/// Return a new blank node term with the given bnode ID.
///
/// Currently, this may never fail;
/// however it returns a result for homogeneity with other constructor methods,
/// and because future versions may be more picky regarding bnode IDs.
pub fn new_bnode<U>(id: U) -> Result<Term<T>>
where
U: AsRef<str>,
T: From<U>,
{
BlankNode::new(id).map(Into::into)
}
/// Return a new literal term with the given value and language tag.
///
/// May fail if the language tag is not a valid BCP47 language tag.
pub fn new_literal_lang<U, V>(txt: U, lang: V) -> Result<Self>
where
V: AsRef<str>,
T: From<U> + From<V>,
{
Literal::<T>::new_lang(txt, lang).map(Into::into)
}
/// Return a new literal term with the given value and datatype.
///
/// May fail if `dt` is not an IRI.
pub fn new_literal_dt<U, V>(txt: U, dt: V) -> Result<Self>
where
T: From<U>,
V: TryInto<Iri<T>>,
TermError: From<<V as TryInto<Iri<T>>>::Error>,
{
Ok(Literal::new_dt(txt, dt.try_into()?).into())
}
/// Return a new variable term with the given name.
///
/// May fail if `name` is not a valid variable name.
pub fn new_variable<U>(name: U) -> Result<Term<T>>
where
U: AsRef<str>,
T: From<U>,
{
Variable::new(name).map(Into::into)
}
/// Borrow the inner contents of the term.
pub fn as_ref(&self) -> Term<&T> {
use self::Term::*;
match &self {
Iri(iri) => Iri(iri.as_ref()),
Literal(lit) => Literal(lit.as_ref()),
BNode(bn) => BNode(bn.as_ref()),
Variable(var) => Variable(var.as_ref()),
}
}
/// Borrow the inner contents of the term as `&str`.
pub fn as_ref_str(&self) -> Term<&str> {
use self::Term::*;
match &self {
Iri(iri) => Iri(iri.as_ref_str()),
Literal(lit) => Literal(lit.as_ref_str()),
BNode(bn) => BNode(bn.as_ref_str()),
Variable(var) => Variable(var.as_ref_str()),
}
}
/// Create a new term by applying `f` to the `TermData` of `self`.
pub fn map<F, TD2>(self, f: F) -> Term<TD2>
where
F: FnMut(T) -> TD2,
TD2: TermData,
{
use self::Term::*;
match self {
Iri(iri) => Iri(iri.map(f)),
Literal(lit) => Literal(lit.map(f)),
BNode(bn) => BNode(bn.map(f)),
Variable(var) => Variable(var.map(f)),
}
}
/// Maps the term using the `Into` trait.
pub fn map_into<TD2>(self) -> Term<TD2>
where
T: Into<TD2>,
TD2: TermData,
{
self.map(Into::into)
}
/// Clone self while transforming the inner `TermData` with the given
/// factory.
///
/// This is done in one step in contrast to calling `clone().map(factory)`.
pub fn clone_map<'a, U, F>(&'a self, factory: F) -> Term<U>
where
U: TermData,
F: FnMut(&'a str) -> U,
{
use self::Term::*;
match self {
Iri(iri) => iri.clone_map(factory).into(),
BNode(bn) => bn.clone_map(factory).into(),
Literal(lit) => lit.clone_map(factory).into(),
Variable(var) => var.clone_map(factory).into(),
}
}
/// Apply `clone_map()` using the `Into` trait.
pub fn clone_into<'src, U>(&'src self) -> Term<U>
where
U: TermData + From<&'src str>,
{
self.clone_map(Into::into)
}
/// Return a term equivalent to this one,
/// with all IRIs (if any)
/// internally represented with all its data in `ns`, and an empty `suffix`.
///
/// # Performances
/// The returned term will borrow data from this one as much as possible,
/// but strings may be allocated in case a concatenation is required.
pub fn normalized(&self, policy: Normalization) -> MownTerm {
match self {
Term::Iri(iri) => iri.normalized(policy).into(),
Term::Literal(lit) => lit.normalized(policy).into(),
_ => self.as_ref_str().map_into(),
}
}
/// Create a new IRI-term from a given IRI without checking its validity.
///
/// # Pre-conditions
///
/// This function conducts no checks if the resulting IRI is valid. This is
/// a contract that is generally assumed. Breaking it could result in
/// unexpected behavior.
///
/// However, in `debug` builds assertions that perform checks are enabled.
pub fn new_iri_unchecked<U>(iri: U) -> Term<T>
where
T: From<U>,
{
Iri::<T>::new_unchecked(iri).into()
}
/// Create a new IRI-term from a given namespace and suffix.
///
/// # Pre-conditions
///
/// It is expected that
///
/// * the resulting IRI is valid per RFC3987,
/// * `suffix` is not the empty string
/// (otherwise, [`new_iri_unchecked`](#method.new_iri_unchecked) should be used instead).
///
/// This is a contract that is generally assumed.
/// Breaking it could result in unexpected behavior.
/// However in `debug` mode, assertions that perform checks are enabled.
pub fn new_iri_suffixed_unchecked<U, V>(ns: U, suffix: V) -> Term<T>
where
T: From<U> + From<V>,
{
Iri::<T>::new_suffixed_unchecked(ns, suffix).into()
}
/// Return a new blank node term.
///
/// # Pre-condition
///
/// This function requires that `id` is a valid bnode ID.
pub fn new_bnode_unchecked<U>(id: U) -> Term<T>
where
U: AsRef<str>,
T: From<U>,
{
BlankNode::<T>::new_unchecked(id).into()
}
/// Return a literal term.
///
/// # Pre-condition
///
/// This function requires that `lang` is a valid language tag.
/// In debug mode this constraint is asserted.
pub fn new_literal_lang_unchecked<U, V>(txt: U, lang: V) -> Self
where
V: AsRef<str>,
T: From<U> + From<V>,
{
Literal::<T>::new_lang_unchecked(txt, lang).into()
}
/// Return a typed literal term.
///
/// # Panics
///
/// Panics if `dt` cannot be converted into an IRI.
pub fn new_literal_dt_unchecked<U, V>(txt: U, dt: V) -> Self
where
T: From<U>,
V: TryInto<Iri<T>>,
<V as TryInto<Iri<T>>>::Error: Debug,
{
Literal::new_dt(txt, dt.try_into().unwrap()).into()
}
/// Return a new variable term.
///
/// # Pre-condition
///
/// This function requires that `name` is a valid variable name.
pub fn new_variable_unchecked<U>(name: U) -> Term<T>
where
U: AsRef<str>,
T: From<U>,
{
Variable::<T>::new_unchecked(name).into()
}
}
impl<T: TermData> TTerm for Term<T> {
fn kind(&self) -> TermKind {
use Term::*;
match self {
Iri(_) => TermKind::Iri,
Literal(_) => TermKind::Literal,
BNode(_) => TermKind::BlankNode,
Variable(_) => TermKind::Variable,
}
}
fn value_raw(&self) -> RawValue {
use Term::*;
match self {
Iri(i) => i.value_raw(),
Literal(l) => l.value_raw(),
BNode(b) => b.value_raw(),
Variable(v) => v.value_raw(),
}
}
fn datatype(&self) -> Option<SimpleIri> {
if let Term::Literal(lit) = self {
lit.datatype()
} else {
None
}
}
fn language(&self) -> Option<&str> {
if let Term::Literal(lit) = self {
lit.language()
} else {
None
}
}
fn as_dyn(&self) -> &dyn TTerm {
self
}
}
impl<TD, TE> PartialEq<TE> for Term<TD>
where
TD: TermData,
TE: TTerm + ?Sized,
{
fn eq(&self, other: &TE) -> bool {
term_eq(self, other)
}
}
impl<TD, TE> PartialOrd<TE> for Term<TD>
where
TD: TermData,
TE: TTerm + ?Sized,
{
fn | (&self, other: &TE) -> Option<std::cmp::Ordering> {
Some(term_cmp(self, other))
}
}
impl<TD> Hash for Term<TD>
where
TD: TermData,
{
fn hash<H: Hasher>(&self, state: &mut H) {
term_hash(self, state)
}
}
impl<TD> From<Iri<TD>> for Term<TD>
where
TD: TermData,
{
fn from(iri: Iri<TD>) -> Self {
Term::Iri(iri)
}
}
impl<TD> From<Literal<TD>> for Term<TD>
where
TD: TermData,
{
fn from(lit: Literal<TD>) -> Self {
Term::Literal(lit)
}
}
impl<TD> From<Variable<TD>> for Term<TD>
where
TD: TermData,
{
fn from(var: Variable<TD>) -> Self {
Term::Variable(var)
}
}
impl<TD> From<BlankNode<TD>> for Term<TD>
where
TD: TermData,
{
fn from(bn: BlankNode<TD>) -> Self {
Term::BNode(bn)
}
}
impl<TD> From<String> for Term<TD>
where
TD: TermData + From<Box<str>> + From<&'static str>,
{
fn from(txt: String) -> Self {
txt.as_literal().into()
}
}
impl<'a> From<SimpleIri<'a>> for RefTerm<'a> {
fn from(other: SimpleIri<'a>) -> Self {
Iri::from(other).into()
}
}
impl<T, TD> From<NativeLiteral<T>> for Term<TD>
where
T: DataType + ?Sized,
TD: TermData + From<Box<str>> + From<&'static str>,
{
fn from(other: NativeLiteral<T>) -> Self {
Literal::from(other).into()
}
}
impl<'a, T> From<NativeLiteral<T, &'a str>> for RefTerm<'a>
where
T: DataType + ?Sized,
{
fn from(other: NativeLiteral<T, &'a str>) -> Self {
Literal::from(other).into()
}
}
impl<TD> CopyTerm for Term<TD>
where
TD: TermData + for<'x> From<&'x str>,
{
fn copy<T>(term: &T) -> Self
where
T: TTerm + ?Sized,
{
match term.kind() {
TermKind::Iri => Term::Iri(Iri::try_copy(term).unwrap()),
TermKind::Literal => Term::Literal(Literal::try_copy(term).unwrap()),
TermKind::BlankNode => Term::BNode(BlankNode::try_copy(term).unwrap()),
TermKind::Variable => Term::Variable(Variable::try_copy(term).unwrap()),
}
}
}
impl<'a, T> From<&'a T> for RefTerm<'a>
where
T: TTerm + ?Sized,
{
fn from(t: &'a T) -> Self {
let v = t.value_raw();
match t.kind() {
TermKind::Iri => Term::Iri(match v.1 {
None => Iri::new_unchecked(v.0),
Some(suffix) => Iri::new_suffixed_unchecked(v.0, suffix),
}),
TermKind::Literal => Term::Literal(match t.language() {
None => {
let dt: Iri<&'a str> = t.datatype().unwrap().into();
Literal::new_dt(v.0, dt)
}
Some(tag) => Literal::new_lang_unchecked(v.0, tag),
}),
TermKind::BlankNode => Term::BNode(BlankNode::new_unchecked(v.0)),
TermKind::Variable => Term::Variable(Variable::new_unchecked(v.0)),
}
}
}
impl<'a, TD: TermData + 'a> Borrow<dyn TTerm + 'a> for Term<TD> {
fn borrow(&self) -> &(dyn TTerm + 'a) {
self as _
}
}
#[cfg(test)]
pub(crate) mod test;
/// This line re-exports `same_graph_name` from `sophia_api::term`,
/// to ease transition from older versions of Sophia.
/// It will eventually be deprecated.
///
/// See [`sophia_api`](https://docs.rs/sophia_api/latest/sophia_api/)
pub use sophia_api::term::same_graph_name;
/// This module re-exports things from `sophia_api::ns`,
/// to ease transition from older versions of Sophia.
/// It will eventually be deprecated.
///
/// See [`sophia_api`](https://docs.rs/sophia_api/latest/sophia_api/)
pub mod ns {
pub use sophia_api::ns::*;
}
/// This line re-exports the module `sophia_api::term::matcher`,
/// to ease transition from older versions of Sophia.
/// It will eventually be deprecated.
///
/// See [`sophia_api`](https://docs.rs/sophia_api/latest/sophia_api/)
pub use sophia_api::term::matcher;
| partial_cmp | identifier_name |
custom_insts.rs | //! SPIR-V (extended) instructions specific to `rustc_codegen_spirv`, produced
//! during the original codegen of a crate, and consumed by the `linker`.
use lazy_static::lazy_static;
use rspirv::dr::{Instruction, Operand};
use rspirv::spirv::Op;
use smallvec::SmallVec;
/// Prefix for `CUSTOM_EXT_INST_SET` (`OpExtInstImport` "instruction set" name),
/// without any of the disambiguating suffixes added for specific revisions.
///
/// This **should not** be changed (if possible), to ensure version mismatches
/// can be detected (i.e. starting with this prefix, but the full name differs).
///
/// See `CUSTOM_EXT_INST_SET`'s docs for further constraints on the full name.
pub const CUSTOM_EXT_INST_SET_PREFIX: &str = concat!("Rust.", env!("CARGO_PKG_NAME"), ".");
macro_rules! join_cargo_pkg_version_major_minor_patch {
($sep:literal) => {
concat!(
env!("CARGO_PKG_VERSION_MAJOR"),
$sep,
env!("CARGO_PKG_VERSION_MINOR"),
$sep,
env!("CARGO_PKG_VERSION_PATCH"),
)
}; | /// These considerations are relevant to the specific choice of name:
/// * does *not* start with `NonSemantic.`, as:
/// * some custom instructions may need to be semantic
/// * these custom instructions are not meant for the final SPIR-V
/// (so no third-party support is *technically* required for them)
/// * `NonSemantic.` requires SPIR-V 1.6 (or `SPV_KHR_non_semantic_info`)
/// * always starts with `CUSTOM_EXT_INST_SET_PREFIX` (see also its docs),
/// regardless of Rust-GPU version or custom instruction set definition
/// * contains enough disambiguating information to avoid misinterpretation
/// if the definitions of the custom instructions have changed - this is
/// achieved by hashing the `SCHEMA` constant from `def_custom_insts!` below
pub static ref CUSTOM_EXT_INST_SET: String = {
let schema_hash = {
use rustc_data_structures::stable_hasher::StableHasher;
use std::hash::Hash;
let mut hasher = StableHasher::new();
SCHEMA.hash(&mut hasher);
let (lo, hi) = hasher.finalize();
(lo as u128) | ((hi as u128) << 64)
};
let version = join_cargo_pkg_version_major_minor_patch!("_");
format!("{CUSTOM_EXT_INST_SET_PREFIX}{version}.{schema_hash:x}")
};
}
pub fn register_to_spirt_context(cx: &spirt::Context) {
use spirt::spv::spec::{ExtInstSetDesc, ExtInstSetInstructionDesc};
cx.register_custom_ext_inst_set(
&CUSTOM_EXT_INST_SET,
ExtInstSetDesc {
// HACK(eddyb) this is the most compact form I've found, that isn't
// outright lossy by omitting "Rust vs Rust-GPU" or the version.
short_alias: Some(
concat!("Rust-GPU ", join_cargo_pkg_version_major_minor_patch!(".")).into(),
),
instructions: SCHEMA
.iter()
.map(|&(i, name, operand_names)| {
(
i,
ExtInstSetInstructionDesc {
name: name.into(),
operand_names: operand_names
.iter()
.map(|name| {
name.strip_prefix("..")
.unwrap_or(name)
.replace('_', " ")
.into()
})
.collect(),
is_debuginfo: name.contains("Debug")
|| name.contains("InlinedCallFrame"),
},
)
})
.collect(),
},
);
}
macro_rules! def_custom_insts {
($($num:literal => $name:ident $({ $($field:ident),+ $(, ..$variadic_field:ident)? $(,)? })?),+ $(,)?) => {
const SCHEMA: &[(u32, &str, &[&str])] = &[
$(($num, stringify!($name), &[$($(stringify!($field),)+ $(stringify!(..$variadic_field),)?)?])),+
];
#[repr(u32)]
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum CustomOp { $($name = $num),+ }
impl CustomOp {
pub fn decode(i: u32) -> Self {
match i {
$($num => Self::$name,)+
_ => unreachable!("{i} is not a valid custom instruction number"),
}
}
pub fn decode_from_ext_inst(inst: &Instruction) -> Self {
assert_eq!(inst.class.opcode, Op::ExtInst);
Self::decode(inst.operands[1].unwrap_literal_ext_inst_integer())
}
pub fn with_operands<T: Clone>(self, operands: &[T]) -> CustomInst<T> {
match self {
$(Self::$name => match operands {
[$($($field,)+ $(ref $variadic_field @ ..)?)?] => CustomInst::$name $({
$($field: $field.clone(),)+
$($variadic_field: $variadic_field.iter().cloned().collect())?
})?,
_ => unreachable!("{self:?} does not have the right number of operands"),
}),+
}
}
}
#[derive(Clone, Debug)]
pub enum CustomInst<T> {
$($name $({ $($field: T,)+ $($variadic_field: SmallVec<[T; 4]>)? })?),+
}
impl<T> CustomInst<T> {
pub fn op(&self) -> CustomOp {
match *self {
$(Self::$name { .. } => CustomOp::$name),+
}
}
// HACK(eddyb) this should return an iterator, but that's too much effort.
pub fn into_operands(self) -> SmallVec<[T; 8]> {
match self {
$(Self::$name $({ $($field,)+ $($variadic_field)? })? => {
[$($($field),+)?].into_iter() $($(.chain($variadic_field))?)? .collect()
})+
}
}
}
impl CustomInst<Operand> {
pub fn decode(inst: &Instruction) -> Self {
CustomOp::decode_from_ext_inst(inst).with_operands(&inst.operands[2..])
}
}
}
}
// NOTE(eddyb) several of these are similar to `NonSemantic.Shader.DebugInfo.100`
// instructions, but simpler (to aid implementation, for now).
def_custom_insts! {
// Like `DebugLine` (from `NonSemantic.Shader.DebugInfo.100`) or `OpLine`.
0 => SetDebugSrcLoc { file, line_start, line_end, col_start, col_end },
// Like `DebugNoLine` (from `NonSemantic.Shader.DebugInfo.100`) or `OpNoLine`.
1 => ClearDebugSrcLoc,
// Similar to `DebugInlinedAt` (from `NonSemantic.Shader.DebugInfo.100`),
// but simpler: there are no "scope objects", the location of the inlined
// callsite is given by other debuginfo (`SetDebugSrcLoc`/`OpLine`) active
// before this instruction, and only the name of the callee is recorded.
2 => PushInlinedCallFrame { callee_name },
// Leave the most recent inlined call frame entered by a `PushInlinedCallFrame`
// (i.e. the inlined call frames form a virtual call stack in debuginfo).
3 => PopInlinedCallFrame,
// [Semantic] Similar to some proposed `OpAbort`, but without any ability to
// indicate abnormal termination (so it's closer to `OpTerminateInvocation`,
// which we could theoretically use, but that's limited to fragment shaders).
//
// Lowering takes advantage of inlining happening before CFG structurization
// (by forcing inlining of `Abort`s all the way up to entry-points, as to be
// able to turn the `Abort`s into regular `OpReturn`s, from an entry-point),
// but if/when inlining works on structured SPIR-T instead, it's not much
// harder to make any call to a "may (transitively) abort" function branch on
// an additional returned `bool`, instead (i.e. a form of emulated unwinding).
//
// As this is a custom terminator, it must only appear before `OpUnreachable`,
// with at most debuginfo instructions (standard or custom), between the two.
//
// FIXME(eddyb) long-term this kind of custom control-flow could be generalized
// to fully emulate unwinding (resulting in codegen similar to `?` in functions
// returning `Option` or `Result`), to e.g. run destructors, or even allow
// users to do `catch_unwind` at the top-level of their shader to handle
// panics specially (e.g. by appending to a custom buffer, or using some
// specific color in a fragment shader, to indicate a panic happened).
// NOTE(eddyb) `message_debug_printf` operands form a complete `debugPrintf`
// invocation (format string followed by inputs) for the "message", while
// `kind` only distinguishes broad categories like `"abort"` vs `"panic"`.
4 => Abort { kind, ..message_debug_printf },
}
impl CustomOp {
/// Returns `true` iff this `CustomOp` is a custom debuginfo instruction,
/// i.e. non-semantic (can/must be ignored wherever `OpLine`/`OpNoLine` are).
pub fn is_debuginfo(self) -> bool {
match self {
CustomOp::SetDebugSrcLoc
| CustomOp::ClearDebugSrcLoc
| CustomOp::PushInlinedCallFrame
| CustomOp::PopInlinedCallFrame => true,
CustomOp::Abort => false,
}
}
/// Returns `true` iff this `CustomOp` is a custom terminator instruction,
/// i.e. semantic and must precede an `OpUnreachable` standard terminator,
/// with at most debuginfo instructions (standard or custom), between the two.
pub fn is_terminator(self) -> bool {
match self {
CustomOp::SetDebugSrcLoc
| CustomOp::ClearDebugSrcLoc
| CustomOp::PushInlinedCallFrame
| CustomOp::PopInlinedCallFrame => false,
CustomOp::Abort => true,
}
}
} | }
lazy_static! {
/// `OpExtInstImport` "instruction set" name for all Rust-GPU instructions.
/// | random_line_split |
custom_insts.rs | //! SPIR-V (extended) instructions specific to `rustc_codegen_spirv`, produced
//! during the original codegen of a crate, and consumed by the `linker`.
use lazy_static::lazy_static;
use rspirv::dr::{Instruction, Operand};
use rspirv::spirv::Op;
use smallvec::SmallVec;
/// Prefix for `CUSTOM_EXT_INST_SET` (`OpExtInstImport` "instruction set" name),
/// without any of the disambiguating suffixes added for specific revisions.
///
/// This **should not** be changed (if possible), to ensure version mismatches
/// can be detected (i.e. starting with this prefix, but the full name differs).
///
/// See `CUSTOM_EXT_INST_SET`'s docs for further constraints on the full name.
pub const CUSTOM_EXT_INST_SET_PREFIX: &str = concat!("Rust.", env!("CARGO_PKG_NAME"), ".");
macro_rules! join_cargo_pkg_version_major_minor_patch {
($sep:literal) => {
concat!(
env!("CARGO_PKG_VERSION_MAJOR"),
$sep,
env!("CARGO_PKG_VERSION_MINOR"),
$sep,
env!("CARGO_PKG_VERSION_PATCH"),
)
};
}
lazy_static! {
/// `OpExtInstImport` "instruction set" name for all Rust-GPU instructions.
///
/// These considerations are relevant to the specific choice of name:
/// * does *not* start with `NonSemantic.`, as:
/// * some custom instructions may need to be semantic
/// * these custom instructions are not meant for the final SPIR-V
/// (so no third-party support is *technically* required for them)
/// * `NonSemantic.` requires SPIR-V 1.6 (or `SPV_KHR_non_semantic_info`)
/// * always starts with `CUSTOM_EXT_INST_SET_PREFIX` (see also its docs),
/// regardless of Rust-GPU version or custom instruction set definition
/// * contains enough disambiguating information to avoid misinterpretation
/// if the definitions of the custom instructions have changed - this is
/// achieved by hashing the `SCHEMA` constant from `def_custom_insts!` below
pub static ref CUSTOM_EXT_INST_SET: String = {
let schema_hash = {
use rustc_data_structures::stable_hasher::StableHasher;
use std::hash::Hash;
let mut hasher = StableHasher::new();
SCHEMA.hash(&mut hasher);
let (lo, hi) = hasher.finalize();
(lo as u128) | ((hi as u128) << 64)
};
let version = join_cargo_pkg_version_major_minor_patch!("_");
format!("{CUSTOM_EXT_INST_SET_PREFIX}{version}.{schema_hash:x}")
};
}
pub fn register_to_spirt_context(cx: &spirt::Context) {
use spirt::spv::spec::{ExtInstSetDesc, ExtInstSetInstructionDesc};
cx.register_custom_ext_inst_set(
&CUSTOM_EXT_INST_SET,
ExtInstSetDesc {
// HACK(eddyb) this is the most compact form I've found, that isn't
// outright lossy by omitting "Rust vs Rust-GPU" or the version.
short_alias: Some(
concat!("Rust-GPU ", join_cargo_pkg_version_major_minor_patch!(".")).into(),
),
instructions: SCHEMA
.iter()
.map(|&(i, name, operand_names)| {
(
i,
ExtInstSetInstructionDesc {
name: name.into(),
operand_names: operand_names
.iter()
.map(|name| {
name.strip_prefix("..")
.unwrap_or(name)
.replace('_', " ")
.into()
})
.collect(),
is_debuginfo: name.contains("Debug")
|| name.contains("InlinedCallFrame"),
},
)
})
.collect(),
},
);
}
macro_rules! def_custom_insts {
($($num:literal => $name:ident $({ $($field:ident),+ $(, ..$variadic_field:ident)? $(,)? })?),+ $(,)?) => {
const SCHEMA: &[(u32, &str, &[&str])] = &[
$(($num, stringify!($name), &[$($(stringify!($field),)+ $(stringify!(..$variadic_field),)?)?])),+
];
#[repr(u32)]
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum CustomOp { $($name = $num),+ }
impl CustomOp {
pub fn decode(i: u32) -> Self {
match i {
$($num => Self::$name,)+
_ => unreachable!("{i} is not a valid custom instruction number"),
}
}
pub fn decode_from_ext_inst(inst: &Instruction) -> Self {
assert_eq!(inst.class.opcode, Op::ExtInst);
Self::decode(inst.operands[1].unwrap_literal_ext_inst_integer())
}
pub fn with_operands<T: Clone>(self, operands: &[T]) -> CustomInst<T> {
match self {
$(Self::$name => match operands {
[$($($field,)+ $(ref $variadic_field @ ..)?)?] => CustomInst::$name $({
$($field: $field.clone(),)+
$($variadic_field: $variadic_field.iter().cloned().collect())?
})?,
_ => unreachable!("{self:?} does not have the right number of operands"),
}),+
}
}
}
#[derive(Clone, Debug)]
pub enum CustomInst<T> {
$($name $({ $($field: T,)+ $($variadic_field: SmallVec<[T; 4]>)? })?),+
}
impl<T> CustomInst<T> {
pub fn op(&self) -> CustomOp {
match *self {
$(Self::$name { .. } => CustomOp::$name),+
}
}
// HACK(eddyb) this should return an iterator, but that's too much effort.
pub fn into_operands(self) -> SmallVec<[T; 8]> {
match self {
$(Self::$name $({ $($field,)+ $($variadic_field)? })? => {
[$($($field),+)?].into_iter() $($(.chain($variadic_field))?)? .collect()
})+
}
}
}
impl CustomInst<Operand> {
pub fn decode(inst: &Instruction) -> Self {
CustomOp::decode_from_ext_inst(inst).with_operands(&inst.operands[2..])
}
}
}
}
// NOTE(eddyb) several of these are similar to `NonSemantic.Shader.DebugInfo.100`
// instructions, but simpler (to aid implementation, for now).
def_custom_insts! {
// Like `DebugLine` (from `NonSemantic.Shader.DebugInfo.100`) or `OpLine`.
0 => SetDebugSrcLoc { file, line_start, line_end, col_start, col_end },
// Like `DebugNoLine` (from `NonSemantic.Shader.DebugInfo.100`) or `OpNoLine`.
1 => ClearDebugSrcLoc,
// Similar to `DebugInlinedAt` (from `NonSemantic.Shader.DebugInfo.100`),
// but simpler: there are no "scope objects", the location of the inlined
// callsite is given by other debuginfo (`SetDebugSrcLoc`/`OpLine`) active
// before this instruction, and only the name of the callee is recorded.
2 => PushInlinedCallFrame { callee_name },
// Leave the most recent inlined call frame entered by a `PushInlinedCallFrame`
// (i.e. the inlined call frames form a virtual call stack in debuginfo).
3 => PopInlinedCallFrame,
// [Semantic] Similar to some proposed `OpAbort`, but without any ability to
// indicate abnormal termination (so it's closer to `OpTerminateInvocation`,
// which we could theoretically use, but that's limited to fragment shaders).
//
// Lowering takes advantage of inlining happening before CFG structurization
// (by forcing inlining of `Abort`s all the way up to entry-points, as to be
// able to turn the `Abort`s into regular `OpReturn`s, from an entry-point),
// but if/when inlining works on structured SPIR-T instead, it's not much
// harder to make any call to a "may (transitively) abort" function branch on
// an additional returned `bool`, instead (i.e. a form of emulated unwinding).
//
// As this is a custom terminator, it must only appear before `OpUnreachable`,
// with at most debuginfo instructions (standard or custom), between the two.
//
// FIXME(eddyb) long-term this kind of custom control-flow could be generalized
// to fully emulate unwinding (resulting in codegen similar to `?` in functions
// returning `Option` or `Result`), to e.g. run destructors, or even allow
// users to do `catch_unwind` at the top-level of their shader to handle
// panics specially (e.g. by appending to a custom buffer, or using some
// specific color in a fragment shader, to indicate a panic happened).
// NOTE(eddyb) `message_debug_printf` operands form a complete `debugPrintf`
// invocation (format string followed by inputs) for the "message", while
// `kind` only distinguishes broad categories like `"abort"` vs `"panic"`.
4 => Abort { kind, ..message_debug_printf },
}
impl CustomOp {
/// Returns `true` iff this `CustomOp` is a custom debuginfo instruction,
/// i.e. non-semantic (can/must be ignored wherever `OpLine`/`OpNoLine` are).
pub fn is_debuginfo(self) -> bool |
/// Returns `true` iff this `CustomOp` is a custom terminator instruction,
/// i.e. semantic and must precede an `OpUnreachable` standard terminator,
/// with at most debuginfo instructions (standard or custom), between the two.
pub fn is_terminator(self) -> bool {
match self {
CustomOp::SetDebugSrcLoc
| CustomOp::ClearDebugSrcLoc
| CustomOp::PushInlinedCallFrame
| CustomOp::PopInlinedCallFrame => false,
CustomOp::Abort => true,
}
}
}
| {
match self {
CustomOp::SetDebugSrcLoc
| CustomOp::ClearDebugSrcLoc
| CustomOp::PushInlinedCallFrame
| CustomOp::PopInlinedCallFrame => true,
CustomOp::Abort => false,
}
} | identifier_body |
custom_insts.rs | //! SPIR-V (extended) instructions specific to `rustc_codegen_spirv`, produced
//! during the original codegen of a crate, and consumed by the `linker`.
use lazy_static::lazy_static;
use rspirv::dr::{Instruction, Operand};
use rspirv::spirv::Op;
use smallvec::SmallVec;
/// Prefix for `CUSTOM_EXT_INST_SET` (`OpExtInstImport` "instruction set" name),
/// without any of the disambiguating suffixes added for specific revisions.
///
/// This **should not** be changed (if possible), to ensure version mismatches
/// can be detected (i.e. starting with this prefix, but the full name differs).
///
/// See `CUSTOM_EXT_INST_SET`'s docs for further constraints on the full name.
pub const CUSTOM_EXT_INST_SET_PREFIX: &str = concat!("Rust.", env!("CARGO_PKG_NAME"), ".");
macro_rules! join_cargo_pkg_version_major_minor_patch {
($sep:literal) => {
concat!(
env!("CARGO_PKG_VERSION_MAJOR"),
$sep,
env!("CARGO_PKG_VERSION_MINOR"),
$sep,
env!("CARGO_PKG_VERSION_PATCH"),
)
};
}
lazy_static! {
/// `OpExtInstImport` "instruction set" name for all Rust-GPU instructions.
///
/// These considerations are relevant to the specific choice of name:
/// * does *not* start with `NonSemantic.`, as:
/// * some custom instructions may need to be semantic
/// * these custom instructions are not meant for the final SPIR-V
/// (so no third-party support is *technically* required for them)
/// * `NonSemantic.` requires SPIR-V 1.6 (or `SPV_KHR_non_semantic_info`)
/// * always starts with `CUSTOM_EXT_INST_SET_PREFIX` (see also its docs),
/// regardless of Rust-GPU version or custom instruction set definition
/// * contains enough disambiguating information to avoid misinterpretation
/// if the definitions of the custom instructions have changed - this is
/// achieved by hashing the `SCHEMA` constant from `def_custom_insts!` below
pub static ref CUSTOM_EXT_INST_SET: String = {
let schema_hash = {
use rustc_data_structures::stable_hasher::StableHasher;
use std::hash::Hash;
let mut hasher = StableHasher::new();
SCHEMA.hash(&mut hasher);
let (lo, hi) = hasher.finalize();
(lo as u128) | ((hi as u128) << 64)
};
let version = join_cargo_pkg_version_major_minor_patch!("_");
format!("{CUSTOM_EXT_INST_SET_PREFIX}{version}.{schema_hash:x}")
};
}
pub fn register_to_spirt_context(cx: &spirt::Context) {
use spirt::spv::spec::{ExtInstSetDesc, ExtInstSetInstructionDesc};
cx.register_custom_ext_inst_set(
&CUSTOM_EXT_INST_SET,
ExtInstSetDesc {
// HACK(eddyb) this is the most compact form I've found, that isn't
// outright lossy by omitting "Rust vs Rust-GPU" or the version.
short_alias: Some(
concat!("Rust-GPU ", join_cargo_pkg_version_major_minor_patch!(".")).into(),
),
instructions: SCHEMA
.iter()
.map(|&(i, name, operand_names)| {
(
i,
ExtInstSetInstructionDesc {
name: name.into(),
operand_names: operand_names
.iter()
.map(|name| {
name.strip_prefix("..")
.unwrap_or(name)
.replace('_', " ")
.into()
})
.collect(),
is_debuginfo: name.contains("Debug")
|| name.contains("InlinedCallFrame"),
},
)
})
.collect(),
},
);
}
macro_rules! def_custom_insts {
($($num:literal => $name:ident $({ $($field:ident),+ $(, ..$variadic_field:ident)? $(,)? })?),+ $(,)?) => {
const SCHEMA: &[(u32, &str, &[&str])] = &[
$(($num, stringify!($name), &[$($(stringify!($field),)+ $(stringify!(..$variadic_field),)?)?])),+
];
#[repr(u32)]
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum CustomOp { $($name = $num),+ }
impl CustomOp {
pub fn decode(i: u32) -> Self {
match i {
$($num => Self::$name,)+
_ => unreachable!("{i} is not a valid custom instruction number"),
}
}
pub fn decode_from_ext_inst(inst: &Instruction) -> Self {
assert_eq!(inst.class.opcode, Op::ExtInst);
Self::decode(inst.operands[1].unwrap_literal_ext_inst_integer())
}
pub fn with_operands<T: Clone>(self, operands: &[T]) -> CustomInst<T> {
match self {
$(Self::$name => match operands {
[$($($field,)+ $(ref $variadic_field @ ..)?)?] => CustomInst::$name $({
$($field: $field.clone(),)+
$($variadic_field: $variadic_field.iter().cloned().collect())?
})?,
_ => unreachable!("{self:?} does not have the right number of operands"),
}),+
}
}
}
#[derive(Clone, Debug)]
pub enum CustomInst<T> {
$($name $({ $($field: T,)+ $($variadic_field: SmallVec<[T; 4]>)? })?),+
}
impl<T> CustomInst<T> {
pub fn op(&self) -> CustomOp {
match *self {
$(Self::$name { .. } => CustomOp::$name),+
}
}
// HACK(eddyb) this should return an iterator, but that's too much effort.
pub fn into_operands(self) -> SmallVec<[T; 8]> {
match self {
$(Self::$name $({ $($field,)+ $($variadic_field)? })? => {
[$($($field),+)?].into_iter() $($(.chain($variadic_field))?)? .collect()
})+
}
}
}
impl CustomInst<Operand> {
pub fn decode(inst: &Instruction) -> Self {
CustomOp::decode_from_ext_inst(inst).with_operands(&inst.operands[2..])
}
}
}
}
// NOTE(eddyb) several of these are similar to `NonSemantic.Shader.DebugInfo.100`
// instructions, but simpler (to aid implementation, for now).
def_custom_insts! {
// Like `DebugLine` (from `NonSemantic.Shader.DebugInfo.100`) or `OpLine`.
0 => SetDebugSrcLoc { file, line_start, line_end, col_start, col_end },
// Like `DebugNoLine` (from `NonSemantic.Shader.DebugInfo.100`) or `OpNoLine`.
1 => ClearDebugSrcLoc,
// Similar to `DebugInlinedAt` (from `NonSemantic.Shader.DebugInfo.100`),
// but simpler: there are no "scope objects", the location of the inlined
// callsite is given by other debuginfo (`SetDebugSrcLoc`/`OpLine`) active
// before this instruction, and only the name of the callee is recorded.
2 => PushInlinedCallFrame { callee_name },
// Leave the most recent inlined call frame entered by a `PushInlinedCallFrame`
// (i.e. the inlined call frames form a virtual call stack in debuginfo).
3 => PopInlinedCallFrame,
// [Semantic] Similar to some proposed `OpAbort`, but without any ability to
// indicate abnormal termination (so it's closer to `OpTerminateInvocation`,
// which we could theoretically use, but that's limited to fragment shaders).
//
// Lowering takes advantage of inlining happening before CFG structurization
// (by forcing inlining of `Abort`s all the way up to entry-points, as to be
// able to turn the `Abort`s into regular `OpReturn`s, from an entry-point),
// but if/when inlining works on structured SPIR-T instead, it's not much
// harder to make any call to a "may (transitively) abort" function branch on
// an additional returned `bool`, instead (i.e. a form of emulated unwinding).
//
// As this is a custom terminator, it must only appear before `OpUnreachable`,
// with at most debuginfo instructions (standard or custom), between the two.
//
// FIXME(eddyb) long-term this kind of custom control-flow could be generalized
// to fully emulate unwinding (resulting in codegen similar to `?` in functions
// returning `Option` or `Result`), to e.g. run destructors, or even allow
// users to do `catch_unwind` at the top-level of their shader to handle
// panics specially (e.g. by appending to a custom buffer, or using some
// specific color in a fragment shader, to indicate a panic happened).
// NOTE(eddyb) `message_debug_printf` operands form a complete `debugPrintf`
// invocation (format string followed by inputs) for the "message", while
// `kind` only distinguishes broad categories like `"abort"` vs `"panic"`.
4 => Abort { kind, ..message_debug_printf },
}
impl CustomOp {
/// Returns `true` iff this `CustomOp` is a custom debuginfo instruction,
/// i.e. non-semantic (can/must be ignored wherever `OpLine`/`OpNoLine` are).
pub fn | (self) -> bool {
match self {
CustomOp::SetDebugSrcLoc
| CustomOp::ClearDebugSrcLoc
| CustomOp::PushInlinedCallFrame
| CustomOp::PopInlinedCallFrame => true,
CustomOp::Abort => false,
}
}
/// Returns `true` iff this `CustomOp` is a custom terminator instruction,
/// i.e. semantic and must precede an `OpUnreachable` standard terminator,
/// with at most debuginfo instructions (standard or custom), between the two.
pub fn is_terminator(self) -> bool {
match self {
CustomOp::SetDebugSrcLoc
| CustomOp::ClearDebugSrcLoc
| CustomOp::PushInlinedCallFrame
| CustomOp::PopInlinedCallFrame => false,
CustomOp::Abort => true,
}
}
}
| is_debuginfo | identifier_name |
mod.rs | use std::collections::{HashMap, HashSet};
#[cfg(feature = "egl")]
use smithay::backend::renderer::ImportEgl;
use smithay::{
backend::{
allocator::{
dmabuf::{AnyError, Dmabuf, DmabufAllocator},
gbm::{GbmAllocator, GbmBufferFlags},
vulkan::{ImageUsageFlags, VulkanAllocator},
Allocator,
},
drm::{DrmNode, NodeType},
libinput::{LibinputInputBackend, LibinputSessionInterface},
renderer::{
element::texture::TextureBuffer,
gles::GlesRenderer,
multigpu::{gbm::GbmGlesBackend, GpuManager, MultiTexture},
ImportDma, ImportMemWl,
},
session::libseat::LibSeatSession,
session::Session,
udev::{self, UdevBackend},
vulkan::{version::Version, Instance, PhysicalDevice},
},
delegate_dmabuf,
reexports::{
ash::vk::ExtPhysicalDeviceDrmFn,
calloop::{EventLoop, LoopSignal},
input::Libinput,
wayland_protocols::wp::linux_dmabuf::zv1::server::zwp_linux_dmabuf_feedback_v1,
wayland_server::{protocol::wl_surface::WlSurface, Display},
},
wayland::dmabuf::{
DmabufFeedback, DmabufFeedbackBuilder, DmabufGlobal, DmabufHandler, DmabufState,
ImportError,
},
};
use self::drm::{BackendData, SurfaceComposition, UdevOutputId};
use crate::{cursor::Cursor, drawing::PointerElement, state::Backend, CalloopData, Corrosion};
mod drm;
mod utils;
pub struct UdevData {
pub loop_signal: LoopSignal,
pub session: LibSeatSession,
primary_gpu: DrmNode,
dmabuf_state: Option<(DmabufState, DmabufGlobal)>,
allocator: Option<Box<dyn Allocator<Buffer = Dmabuf, Error = AnyError>>>,
gpu_manager: GpuManager<GbmGlesBackend<GlesRenderer>>,
backends: HashMap<DrmNode, BackendData>,
pointer_element: PointerElement<MultiTexture>,
cursor_image: Cursor,
cursor_images: Vec<(xcursor::parser::Image, TextureBuffer<MultiTexture>)>,
}
impl DmabufHandler for Corrosion<UdevData> {
fn dmabuf_state(&mut self) -> &mut smithay::wayland::dmabuf::DmabufState {
&mut self.backend_data.dmabuf_state.as_mut().unwrap().0
}
fn dmabuf_imported(
&mut self,
_global: &DmabufGlobal,
dmabuf: Dmabuf,
) -> Result<(), smithay::wayland::dmabuf::ImportError> {
self.backend_data
.gpu_manager
.single_renderer(&self.backend_data.primary_gpu)
.and_then(|mut renderer| renderer.import_dmabuf(&dmabuf, None))
.map(|_| ())
.map_err(|_| ImportError::Failed)
}
}
delegate_dmabuf!(Corrosion<UdevData>);
impl Backend for UdevData {
fn early_import(&mut self, output: &WlSurface) {
match self
.gpu_manager
.early_import(Some(self.primary_gpu), self.primary_gpu, output)
{
Ok(()) => {}
Err(err) => tracing::error!("Error on early buffer import: {}", err),
};
}
fn loop_signal(&self) -> &LoopSignal {
&self.loop_signal
}
fn reset_buffers(&mut self, output: &smithay::output::Output) {
if let Some(id) = output.user_data().get::<UdevOutputId>() {
if let Some(gpu) = self.backends.get_mut(&id.device_id) {
if let Some(surface) = gpu.surfaces.get_mut(&id.crtc) {
surface.compositor.reset_buffers();
}
}
}
}
fn seat_name(&self) -> String {
self.session.seat()
}
}
pub fn | () {
let mut event_loop = EventLoop::try_new().expect("Unable to initialize event loop");
let (session, mut _notifier) = match LibSeatSession::new() {
Ok((session, notifier)) => (session, notifier),
Err(err) => {
tracing::error!("Error in creating libseat session: {}", err);
return;
}
};
let mut display = Display::new().expect("Unable to create wayland display");
let primary_gpu = udev::primary_gpu(&session.seat())
.unwrap()
.and_then(|p| {
DrmNode::from_path(p)
.ok()
.expect("Unable to create drm node")
.node_with_type(NodeType::Render)
.expect("Unable to create drm node")
.ok()
})
.unwrap_or_else(|| {
udev::all_gpus(&session.seat())
.unwrap()
.into_iter()
.find_map(|g| DrmNode::from_path(g).ok())
.expect("no gpu")
});
tracing::info!("Using {} as a primary gpu", primary_gpu);
let gpus = GpuManager::new(GbmGlesBackend::default()).unwrap();
let data = UdevData {
loop_signal: event_loop.get_signal(),
dmabuf_state: None,
session,
primary_gpu,
allocator: None,
gpu_manager: gpus,
backends: HashMap::new(),
cursor_image: Cursor::load(),
cursor_images: Vec::new(),
pointer_element: PointerElement::default(),
};
let mut state = Corrosion::new(event_loop.handle(), &mut display, data);
let backend = match UdevBackend::new(&state.seat_name) {
Ok(backend) => backend,
Err(err) => {
tracing::error!("Unable to create udev backend: {}", err);
return;
}
};
for (dev, path) in backend.device_list() {
state.device_added(DrmNode::from_dev_id(dev).unwrap(), &path);
}
state.shm_state.update_formats(
state
.backend_data
.gpu_manager
.single_renderer(&primary_gpu)
.unwrap()
.shm_formats(),
);
if let Ok(instance) = Instance::new(Version::VERSION_1_2, None) {
if let Some(physical_device) =
PhysicalDevice::enumerate(&instance)
.ok()
.and_then(|devices| {
devices
.filter(|phd| phd.has_device_extension(ExtPhysicalDeviceDrmFn::name()))
.find(|phd| {
phd.primary_node().unwrap() == Some(primary_gpu)
|| phd.render_node().unwrap() == Some(primary_gpu)
})
})
{
match VulkanAllocator::new(
&physical_device,
ImageUsageFlags::COLOR_ATTACHMENT | ImageUsageFlags::SAMPLED,
) {
Ok(allocator) => {
state.backend_data.allocator = Some(Box::new(DmabufAllocator(allocator))
as Box<dyn Allocator<Buffer = Dmabuf, Error = AnyError>>);
}
Err(err) => {
tracing::warn!("Failed to create vulkan allocator: {}", err);
}
}
}
}
let mut libinput_context = Libinput::new_with_udev::<LibinputSessionInterface<LibSeatSession>>(
state.backend_data.session.clone().into(),
);
libinput_context.udev_assign_seat(&state.seat_name).unwrap();
let libinput_backend = LibinputInputBackend::new(libinput_context.clone());
state
.handle
.insert_source(libinput_backend, move |event, _, data| {
data.state.process_input_event(event);
})
.unwrap();
let gbm = state
.backend_data
.backends
.get(&primary_gpu)
// If the primary_gpu failed to initialize, we likely have a kmsro device
.or_else(|| state.backend_data.backends.values().next())
// Don't fail, if there is no allocator. There is a chance, that this a single gpu system and we don't need one.
.map(|backend| backend.gbm.clone());
state.backend_data.allocator = gbm.map(|gbm| {
Box::new(DmabufAllocator(GbmAllocator::new(
gbm,
GbmBufferFlags::RENDERING,
))) as Box<_>
});
#[cfg_attr(not(feature = "egl"), allow(unused_mut))]
let mut renderer = state
.backend_data
.gpu_manager
.single_renderer(&primary_gpu)
.unwrap();
#[cfg(feature = "egl")]
{
match renderer.bind_wl_display(&state.display_handle) {
Ok(_) => tracing::info!("Enabled egl hardware acceleration"),
Err(err) => tracing::error!("Error in enabling egl hardware acceleration: {:?}", err),
}
}
let dmabuf_formats = renderer.dmabuf_formats().collect::<Vec<_>>();
let default_feedback = DmabufFeedbackBuilder::new(primary_gpu.dev_id(), dmabuf_formats)
.build()
.unwrap();
let mut dmabuf_state = DmabufState::new();
let dmabuf_global = dmabuf_state.create_global_with_default_feedback::<Corrosion<UdevData>>(
&display.handle(),
&default_feedback,
);
state.backend_data.dmabuf_state = Some((dmabuf_state, dmabuf_global));
let gpus = &mut state.backend_data.gpu_manager;
state
.backend_data
.backends
.values_mut()
.for_each(|backend_data| {
backend_data.surfaces.values_mut().for_each(|surface_data| {
surface_data.dmabuf_feedback = surface_data.dmabuf_feedback.take().or_else(|| {
get_surface_dmabuf_feedback(
primary_gpu,
surface_data.render_node,
gpus,
&surface_data.compositor,
)
});
});
});
event_loop
.handle()
.insert_source(backend, move |event, _, data| match event {
udev::UdevEvent::Added { device_id, path } => {
data.state
.device_added(DrmNode::from_dev_id(device_id).unwrap(), &path);
}
udev::UdevEvent::Changed { device_id } => {
data.state
.device_changed(DrmNode::from_dev_id(device_id).unwrap());
}
udev::UdevEvent::Removed { device_id } => {
data.state
.device_removed(DrmNode::from_dev_id(device_id).unwrap());
}
})
.expect("Error inserting event loop source");
std::env::set_var("WAYLAND_DISPLAY", &state.socket_name);
let mut calloop_data = CalloopData { state, display };
event_loop
.run(
std::time::Duration::from_millis(16),
&mut calloop_data,
|data| {
data.state.space.refresh();
data.state.popup_manager.cleanup();
data.display.flush_clients().unwrap();
},
)
.unwrap();
}
pub struct DrmSurfaceDmabufFeedback {
render_feedback: DmabufFeedback,
scanout_feedback: DmabufFeedback,
}
fn get_surface_dmabuf_feedback(
primary_node: DrmNode,
render_node: DrmNode,
gpus: &mut GpuManager<GbmGlesBackend<GlesRenderer>>,
composition: &SurfaceComposition,
) -> Option<DrmSurfaceDmabufFeedback> {
let primary_formats = gpus
.single_renderer(&primary_node)
.ok()?
.dmabuf_formats()
.collect::<HashSet<_>>();
let render_formats = gpus
.single_renderer(&render_node)
.ok()?
.dmabuf_formats()
.collect::<HashSet<_>>();
let all_render_formats = primary_formats
.iter()
.chain(render_formats.iter())
.copied()
.collect::<HashSet<_>>();
let surface = composition.surface();
let planes = surface.planes().unwrap();
let planes_formats = surface
.supported_formats(planes.primary.handle)
.unwrap()
.into_iter()
.chain(
planes
.overlay
.iter()
.flat_map(|p| surface.supported_formats(p.handle).unwrap()),
)
.collect::<HashSet<_>>()
.intersection(&all_render_formats)
.copied()
.collect::<Vec<_>>();
let builder = DmabufFeedbackBuilder::new(primary_node.dev_id(), primary_formats);
let render_feedback = builder
.clone()
.add_preference_tranche(render_node.dev_id(), None, render_formats.clone())
.build()
.unwrap();
let scanout_feedback = builder
.clone()
.add_preference_tranche(
surface.device_fd().dev_id().unwrap(),
Some(zwp_linux_dmabuf_feedback_v1::TrancheFlags::Scanout),
planes_formats,
)
.add_preference_tranche(render_node.dev_id(), None, render_formats)
.build()
.unwrap();
Some(DrmSurfaceDmabufFeedback {
render_feedback,
scanout_feedback,
})
}
| initialize_backend | identifier_name |
mod.rs | use std::collections::{HashMap, HashSet};
#[cfg(feature = "egl")]
use smithay::backend::renderer::ImportEgl;
use smithay::{
backend::{
allocator::{
dmabuf::{AnyError, Dmabuf, DmabufAllocator},
gbm::{GbmAllocator, GbmBufferFlags},
vulkan::{ImageUsageFlags, VulkanAllocator},
Allocator,
},
drm::{DrmNode, NodeType},
libinput::{LibinputInputBackend, LibinputSessionInterface},
renderer::{
element::texture::TextureBuffer,
gles::GlesRenderer,
multigpu::{gbm::GbmGlesBackend, GpuManager, MultiTexture},
ImportDma, ImportMemWl,
},
session::libseat::LibSeatSession,
session::Session,
udev::{self, UdevBackend},
vulkan::{version::Version, Instance, PhysicalDevice},
},
delegate_dmabuf,
reexports::{
ash::vk::ExtPhysicalDeviceDrmFn,
calloop::{EventLoop, LoopSignal},
input::Libinput,
wayland_protocols::wp::linux_dmabuf::zv1::server::zwp_linux_dmabuf_feedback_v1,
wayland_server::{protocol::wl_surface::WlSurface, Display},
},
wayland::dmabuf::{
DmabufFeedback, DmabufFeedbackBuilder, DmabufGlobal, DmabufHandler, DmabufState,
ImportError,
},
};
use self::drm::{BackendData, SurfaceComposition, UdevOutputId};
use crate::{cursor::Cursor, drawing::PointerElement, state::Backend, CalloopData, Corrosion};
mod drm;
mod utils;
pub struct UdevData {
pub loop_signal: LoopSignal,
pub session: LibSeatSession,
primary_gpu: DrmNode,
dmabuf_state: Option<(DmabufState, DmabufGlobal)>,
allocator: Option<Box<dyn Allocator<Buffer = Dmabuf, Error = AnyError>>>,
gpu_manager: GpuManager<GbmGlesBackend<GlesRenderer>>,
backends: HashMap<DrmNode, BackendData>,
pointer_element: PointerElement<MultiTexture>,
cursor_image: Cursor,
cursor_images: Vec<(xcursor::parser::Image, TextureBuffer<MultiTexture>)>,
}
impl DmabufHandler for Corrosion<UdevData> {
fn dmabuf_state(&mut self) -> &mut smithay::wayland::dmabuf::DmabufState {
&mut self.backend_data.dmabuf_state.as_mut().unwrap().0
}
fn dmabuf_imported(
&mut self,
_global: &DmabufGlobal,
dmabuf: Dmabuf,
) -> Result<(), smithay::wayland::dmabuf::ImportError> {
self.backend_data
.gpu_manager
.single_renderer(&self.backend_data.primary_gpu)
.and_then(|mut renderer| renderer.import_dmabuf(&dmabuf, None))
.map(|_| ())
.map_err(|_| ImportError::Failed)
}
}
delegate_dmabuf!(Corrosion<UdevData>);
impl Backend for UdevData {
fn early_import(&mut self, output: &WlSurface) {
match self
.gpu_manager
.early_import(Some(self.primary_gpu), self.primary_gpu, output)
{
Ok(()) => {}
Err(err) => tracing::error!("Error on early buffer import: {}", err),
};
}
fn loop_signal(&self) -> &LoopSignal {
&self.loop_signal
}
fn reset_buffers(&mut self, output: &smithay::output::Output) {
if let Some(id) = output.user_data().get::<UdevOutputId>() {
if let Some(gpu) = self.backends.get_mut(&id.device_id) {
if let Some(surface) = gpu.surfaces.get_mut(&id.crtc) {
surface.compositor.reset_buffers();
}
}
}
}
fn seat_name(&self) -> String {
self.session.seat()
}
}
pub fn initialize_backend() {
let mut event_loop = EventLoop::try_new().expect("Unable to initialize event loop");
let (session, mut _notifier) = match LibSeatSession::new() {
Ok((session, notifier)) => (session, notifier),
Err(err) => {
tracing::error!("Error in creating libseat session: {}", err);
return;
}
};
let mut display = Display::new().expect("Unable to create wayland display");
let primary_gpu = udev::primary_gpu(&session.seat())
.unwrap()
.and_then(|p| {
DrmNode::from_path(p)
.ok()
.expect("Unable to create drm node")
.node_with_type(NodeType::Render)
.expect("Unable to create drm node")
.ok()
})
.unwrap_or_else(|| {
udev::all_gpus(&session.seat())
.unwrap()
.into_iter()
.find_map(|g| DrmNode::from_path(g).ok())
.expect("no gpu")
});
tracing::info!("Using {} as a primary gpu", primary_gpu);
let gpus = GpuManager::new(GbmGlesBackend::default()).unwrap();
let data = UdevData {
loop_signal: event_loop.get_signal(),
dmabuf_state: None,
session,
primary_gpu,
allocator: None,
gpu_manager: gpus,
backends: HashMap::new(),
cursor_image: Cursor::load(),
cursor_images: Vec::new(),
pointer_element: PointerElement::default(),
};
let mut state = Corrosion::new(event_loop.handle(), &mut display, data);
let backend = match UdevBackend::new(&state.seat_name) {
Ok(backend) => backend,
Err(err) => {
tracing::error!("Unable to create udev backend: {}", err);
return;
}
};
for (dev, path) in backend.device_list() {
state.device_added(DrmNode::from_dev_id(dev).unwrap(), &path);
}
state.shm_state.update_formats(
state
.backend_data
.gpu_manager
.single_renderer(&primary_gpu)
.unwrap()
.shm_formats(),
);
if let Ok(instance) = Instance::new(Version::VERSION_1_2, None) {
if let Some(physical_device) =
PhysicalDevice::enumerate(&instance)
.ok()
.and_then(|devices| {
devices
.filter(|phd| phd.has_device_extension(ExtPhysicalDeviceDrmFn::name()))
.find(|phd| {
phd.primary_node().unwrap() == Some(primary_gpu)
|| phd.render_node().unwrap() == Some(primary_gpu)
})
})
{
match VulkanAllocator::new(
&physical_device,
ImageUsageFlags::COLOR_ATTACHMENT | ImageUsageFlags::SAMPLED,
) {
Ok(allocator) => {
state.backend_data.allocator = Some(Box::new(DmabufAllocator(allocator))
as Box<dyn Allocator<Buffer = Dmabuf, Error = AnyError>>);
}
Err(err) => {
tracing::warn!("Failed to create vulkan allocator: {}", err);
}
}
}
}
let mut libinput_context = Libinput::new_with_udev::<LibinputSessionInterface<LibSeatSession>>(
state.backend_data.session.clone().into(),
);
libinput_context.udev_assign_seat(&state.seat_name).unwrap();
let libinput_backend = LibinputInputBackend::new(libinput_context.clone());
state
.handle
.insert_source(libinput_backend, move |event, _, data| {
data.state.process_input_event(event);
})
.unwrap();
let gbm = state
.backend_data
.backends
.get(&primary_gpu)
// If the primary_gpu failed to initialize, we likely have a kmsro device
.or_else(|| state.backend_data.backends.values().next())
// Don't fail, if there is no allocator. There is a chance, that this a single gpu system and we don't need one.
.map(|backend| backend.gbm.clone());
state.backend_data.allocator = gbm.map(|gbm| {
Box::new(DmabufAllocator(GbmAllocator::new(
gbm,
GbmBufferFlags::RENDERING,
))) as Box<_> | #[cfg_attr(not(feature = "egl"), allow(unused_mut))]
let mut renderer = state
.backend_data
.gpu_manager
.single_renderer(&primary_gpu)
.unwrap();
#[cfg(feature = "egl")]
{
match renderer.bind_wl_display(&state.display_handle) {
Ok(_) => tracing::info!("Enabled egl hardware acceleration"),
Err(err) => tracing::error!("Error in enabling egl hardware acceleration: {:?}", err),
}
}
let dmabuf_formats = renderer.dmabuf_formats().collect::<Vec<_>>();
let default_feedback = DmabufFeedbackBuilder::new(primary_gpu.dev_id(), dmabuf_formats)
.build()
.unwrap();
let mut dmabuf_state = DmabufState::new();
let dmabuf_global = dmabuf_state.create_global_with_default_feedback::<Corrosion<UdevData>>(
&display.handle(),
&default_feedback,
);
state.backend_data.dmabuf_state = Some((dmabuf_state, dmabuf_global));
let gpus = &mut state.backend_data.gpu_manager;
state
.backend_data
.backends
.values_mut()
.for_each(|backend_data| {
backend_data.surfaces.values_mut().for_each(|surface_data| {
surface_data.dmabuf_feedback = surface_data.dmabuf_feedback.take().or_else(|| {
get_surface_dmabuf_feedback(
primary_gpu,
surface_data.render_node,
gpus,
&surface_data.compositor,
)
});
});
});
event_loop
.handle()
.insert_source(backend, move |event, _, data| match event {
udev::UdevEvent::Added { device_id, path } => {
data.state
.device_added(DrmNode::from_dev_id(device_id).unwrap(), &path);
}
udev::UdevEvent::Changed { device_id } => {
data.state
.device_changed(DrmNode::from_dev_id(device_id).unwrap());
}
udev::UdevEvent::Removed { device_id } => {
data.state
.device_removed(DrmNode::from_dev_id(device_id).unwrap());
}
})
.expect("Error inserting event loop source");
std::env::set_var("WAYLAND_DISPLAY", &state.socket_name);
let mut calloop_data = CalloopData { state, display };
event_loop
.run(
std::time::Duration::from_millis(16),
&mut calloop_data,
|data| {
data.state.space.refresh();
data.state.popup_manager.cleanup();
data.display.flush_clients().unwrap();
},
)
.unwrap();
}
pub struct DrmSurfaceDmabufFeedback {
render_feedback: DmabufFeedback,
scanout_feedback: DmabufFeedback,
}
fn get_surface_dmabuf_feedback(
primary_node: DrmNode,
render_node: DrmNode,
gpus: &mut GpuManager<GbmGlesBackend<GlesRenderer>>,
composition: &SurfaceComposition,
) -> Option<DrmSurfaceDmabufFeedback> {
let primary_formats = gpus
.single_renderer(&primary_node)
.ok()?
.dmabuf_formats()
.collect::<HashSet<_>>();
let render_formats = gpus
.single_renderer(&render_node)
.ok()?
.dmabuf_formats()
.collect::<HashSet<_>>();
let all_render_formats = primary_formats
.iter()
.chain(render_formats.iter())
.copied()
.collect::<HashSet<_>>();
let surface = composition.surface();
let planes = surface.planes().unwrap();
let planes_formats = surface
.supported_formats(planes.primary.handle)
.unwrap()
.into_iter()
.chain(
planes
.overlay
.iter()
.flat_map(|p| surface.supported_formats(p.handle).unwrap()),
)
.collect::<HashSet<_>>()
.intersection(&all_render_formats)
.copied()
.collect::<Vec<_>>();
let builder = DmabufFeedbackBuilder::new(primary_node.dev_id(), primary_formats);
let render_feedback = builder
.clone()
.add_preference_tranche(render_node.dev_id(), None, render_formats.clone())
.build()
.unwrap();
let scanout_feedback = builder
.clone()
.add_preference_tranche(
surface.device_fd().dev_id().unwrap(),
Some(zwp_linux_dmabuf_feedback_v1::TrancheFlags::Scanout),
planes_formats,
)
.add_preference_tranche(render_node.dev_id(), None, render_formats)
.build()
.unwrap();
Some(DrmSurfaceDmabufFeedback {
render_feedback,
scanout_feedback,
})
} | }); | random_line_split |
mod.rs | use std::collections::{HashMap, HashSet};
#[cfg(feature = "egl")]
use smithay::backend::renderer::ImportEgl;
use smithay::{
backend::{
allocator::{
dmabuf::{AnyError, Dmabuf, DmabufAllocator},
gbm::{GbmAllocator, GbmBufferFlags},
vulkan::{ImageUsageFlags, VulkanAllocator},
Allocator,
},
drm::{DrmNode, NodeType},
libinput::{LibinputInputBackend, LibinputSessionInterface},
renderer::{
element::texture::TextureBuffer,
gles::GlesRenderer,
multigpu::{gbm::GbmGlesBackend, GpuManager, MultiTexture},
ImportDma, ImportMemWl,
},
session::libseat::LibSeatSession,
session::Session,
udev::{self, UdevBackend},
vulkan::{version::Version, Instance, PhysicalDevice},
},
delegate_dmabuf,
reexports::{
ash::vk::ExtPhysicalDeviceDrmFn,
calloop::{EventLoop, LoopSignal},
input::Libinput,
wayland_protocols::wp::linux_dmabuf::zv1::server::zwp_linux_dmabuf_feedback_v1,
wayland_server::{protocol::wl_surface::WlSurface, Display},
},
wayland::dmabuf::{
DmabufFeedback, DmabufFeedbackBuilder, DmabufGlobal, DmabufHandler, DmabufState,
ImportError,
},
};
use self::drm::{BackendData, SurfaceComposition, UdevOutputId};
use crate::{cursor::Cursor, drawing::PointerElement, state::Backend, CalloopData, Corrosion};
mod drm;
mod utils;
pub struct UdevData {
pub loop_signal: LoopSignal,
pub session: LibSeatSession,
primary_gpu: DrmNode,
dmabuf_state: Option<(DmabufState, DmabufGlobal)>,
allocator: Option<Box<dyn Allocator<Buffer = Dmabuf, Error = AnyError>>>,
gpu_manager: GpuManager<GbmGlesBackend<GlesRenderer>>,
backends: HashMap<DrmNode, BackendData>,
pointer_element: PointerElement<MultiTexture>,
cursor_image: Cursor,
cursor_images: Vec<(xcursor::parser::Image, TextureBuffer<MultiTexture>)>,
}
impl DmabufHandler for Corrosion<UdevData> {
fn dmabuf_state(&mut self) -> &mut smithay::wayland::dmabuf::DmabufState {
&mut self.backend_data.dmabuf_state.as_mut().unwrap().0
}
fn dmabuf_imported(
&mut self,
_global: &DmabufGlobal,
dmabuf: Dmabuf,
) -> Result<(), smithay::wayland::dmabuf::ImportError> {
self.backend_data
.gpu_manager
.single_renderer(&self.backend_data.primary_gpu)
.and_then(|mut renderer| renderer.import_dmabuf(&dmabuf, None))
.map(|_| ())
.map_err(|_| ImportError::Failed)
}
}
delegate_dmabuf!(Corrosion<UdevData>);
impl Backend for UdevData {
fn early_import(&mut self, output: &WlSurface) {
match self
.gpu_manager
.early_import(Some(self.primary_gpu), self.primary_gpu, output)
{
Ok(()) => {}
Err(err) => tracing::error!("Error on early buffer import: {}", err),
};
}
fn loop_signal(&self) -> &LoopSignal |
fn reset_buffers(&mut self, output: &smithay::output::Output) {
if let Some(id) = output.user_data().get::<UdevOutputId>() {
if let Some(gpu) = self.backends.get_mut(&id.device_id) {
if let Some(surface) = gpu.surfaces.get_mut(&id.crtc) {
surface.compositor.reset_buffers();
}
}
}
}
fn seat_name(&self) -> String {
self.session.seat()
}
}
pub fn initialize_backend() {
let mut event_loop = EventLoop::try_new().expect("Unable to initialize event loop");
let (session, mut _notifier) = match LibSeatSession::new() {
Ok((session, notifier)) => (session, notifier),
Err(err) => {
tracing::error!("Error in creating libseat session: {}", err);
return;
}
};
let mut display = Display::new().expect("Unable to create wayland display");
let primary_gpu = udev::primary_gpu(&session.seat())
.unwrap()
.and_then(|p| {
DrmNode::from_path(p)
.ok()
.expect("Unable to create drm node")
.node_with_type(NodeType::Render)
.expect("Unable to create drm node")
.ok()
})
.unwrap_or_else(|| {
udev::all_gpus(&session.seat())
.unwrap()
.into_iter()
.find_map(|g| DrmNode::from_path(g).ok())
.expect("no gpu")
});
tracing::info!("Using {} as a primary gpu", primary_gpu);
let gpus = GpuManager::new(GbmGlesBackend::default()).unwrap();
let data = UdevData {
loop_signal: event_loop.get_signal(),
dmabuf_state: None,
session,
primary_gpu,
allocator: None,
gpu_manager: gpus,
backends: HashMap::new(),
cursor_image: Cursor::load(),
cursor_images: Vec::new(),
pointer_element: PointerElement::default(),
};
let mut state = Corrosion::new(event_loop.handle(), &mut display, data);
let backend = match UdevBackend::new(&state.seat_name) {
Ok(backend) => backend,
Err(err) => {
tracing::error!("Unable to create udev backend: {}", err);
return;
}
};
for (dev, path) in backend.device_list() {
state.device_added(DrmNode::from_dev_id(dev).unwrap(), &path);
}
state.shm_state.update_formats(
state
.backend_data
.gpu_manager
.single_renderer(&primary_gpu)
.unwrap()
.shm_formats(),
);
if let Ok(instance) = Instance::new(Version::VERSION_1_2, None) {
if let Some(physical_device) =
PhysicalDevice::enumerate(&instance)
.ok()
.and_then(|devices| {
devices
.filter(|phd| phd.has_device_extension(ExtPhysicalDeviceDrmFn::name()))
.find(|phd| {
phd.primary_node().unwrap() == Some(primary_gpu)
|| phd.render_node().unwrap() == Some(primary_gpu)
})
})
{
match VulkanAllocator::new(
&physical_device,
ImageUsageFlags::COLOR_ATTACHMENT | ImageUsageFlags::SAMPLED,
) {
Ok(allocator) => {
state.backend_data.allocator = Some(Box::new(DmabufAllocator(allocator))
as Box<dyn Allocator<Buffer = Dmabuf, Error = AnyError>>);
}
Err(err) => {
tracing::warn!("Failed to create vulkan allocator: {}", err);
}
}
}
}
let mut libinput_context = Libinput::new_with_udev::<LibinputSessionInterface<LibSeatSession>>(
state.backend_data.session.clone().into(),
);
libinput_context.udev_assign_seat(&state.seat_name).unwrap();
let libinput_backend = LibinputInputBackend::new(libinput_context.clone());
state
.handle
.insert_source(libinput_backend, move |event, _, data| {
data.state.process_input_event(event);
})
.unwrap();
let gbm = state
.backend_data
.backends
.get(&primary_gpu)
// If the primary_gpu failed to initialize, we likely have a kmsro device
.or_else(|| state.backend_data.backends.values().next())
// Don't fail, if there is no allocator. There is a chance, that this a single gpu system and we don't need one.
.map(|backend| backend.gbm.clone());
state.backend_data.allocator = gbm.map(|gbm| {
Box::new(DmabufAllocator(GbmAllocator::new(
gbm,
GbmBufferFlags::RENDERING,
))) as Box<_>
});
#[cfg_attr(not(feature = "egl"), allow(unused_mut))]
let mut renderer = state
.backend_data
.gpu_manager
.single_renderer(&primary_gpu)
.unwrap();
#[cfg(feature = "egl")]
{
match renderer.bind_wl_display(&state.display_handle) {
Ok(_) => tracing::info!("Enabled egl hardware acceleration"),
Err(err) => tracing::error!("Error in enabling egl hardware acceleration: {:?}", err),
}
}
let dmabuf_formats = renderer.dmabuf_formats().collect::<Vec<_>>();
let default_feedback = DmabufFeedbackBuilder::new(primary_gpu.dev_id(), dmabuf_formats)
.build()
.unwrap();
let mut dmabuf_state = DmabufState::new();
let dmabuf_global = dmabuf_state.create_global_with_default_feedback::<Corrosion<UdevData>>(
&display.handle(),
&default_feedback,
);
state.backend_data.dmabuf_state = Some((dmabuf_state, dmabuf_global));
let gpus = &mut state.backend_data.gpu_manager;
state
.backend_data
.backends
.values_mut()
.for_each(|backend_data| {
backend_data.surfaces.values_mut().for_each(|surface_data| {
surface_data.dmabuf_feedback = surface_data.dmabuf_feedback.take().or_else(|| {
get_surface_dmabuf_feedback(
primary_gpu,
surface_data.render_node,
gpus,
&surface_data.compositor,
)
});
});
});
event_loop
.handle()
.insert_source(backend, move |event, _, data| match event {
udev::UdevEvent::Added { device_id, path } => {
data.state
.device_added(DrmNode::from_dev_id(device_id).unwrap(), &path);
}
udev::UdevEvent::Changed { device_id } => {
data.state
.device_changed(DrmNode::from_dev_id(device_id).unwrap());
}
udev::UdevEvent::Removed { device_id } => {
data.state
.device_removed(DrmNode::from_dev_id(device_id).unwrap());
}
})
.expect("Error inserting event loop source");
std::env::set_var("WAYLAND_DISPLAY", &state.socket_name);
let mut calloop_data = CalloopData { state, display };
event_loop
.run(
std::time::Duration::from_millis(16),
&mut calloop_data,
|data| {
data.state.space.refresh();
data.state.popup_manager.cleanup();
data.display.flush_clients().unwrap();
},
)
.unwrap();
}
pub struct DrmSurfaceDmabufFeedback {
render_feedback: DmabufFeedback,
scanout_feedback: DmabufFeedback,
}
fn get_surface_dmabuf_feedback(
primary_node: DrmNode,
render_node: DrmNode,
gpus: &mut GpuManager<GbmGlesBackend<GlesRenderer>>,
composition: &SurfaceComposition,
) -> Option<DrmSurfaceDmabufFeedback> {
let primary_formats = gpus
.single_renderer(&primary_node)
.ok()?
.dmabuf_formats()
.collect::<HashSet<_>>();
let render_formats = gpus
.single_renderer(&render_node)
.ok()?
.dmabuf_formats()
.collect::<HashSet<_>>();
let all_render_formats = primary_formats
.iter()
.chain(render_formats.iter())
.copied()
.collect::<HashSet<_>>();
let surface = composition.surface();
let planes = surface.planes().unwrap();
let planes_formats = surface
.supported_formats(planes.primary.handle)
.unwrap()
.into_iter()
.chain(
planes
.overlay
.iter()
.flat_map(|p| surface.supported_formats(p.handle).unwrap()),
)
.collect::<HashSet<_>>()
.intersection(&all_render_formats)
.copied()
.collect::<Vec<_>>();
let builder = DmabufFeedbackBuilder::new(primary_node.dev_id(), primary_formats);
let render_feedback = builder
.clone()
.add_preference_tranche(render_node.dev_id(), None, render_formats.clone())
.build()
.unwrap();
let scanout_feedback = builder
.clone()
.add_preference_tranche(
surface.device_fd().dev_id().unwrap(),
Some(zwp_linux_dmabuf_feedback_v1::TrancheFlags::Scanout),
planes_formats,
)
.add_preference_tranche(render_node.dev_id(), None, render_formats)
.build()
.unwrap();
Some(DrmSurfaceDmabufFeedback {
render_feedback,
scanout_feedback,
})
}
| {
&self.loop_signal
} | identifier_body |
mod.rs | //! A batteries included runtime for applications using Tokio.
//!
//! Applications using Tokio require some runtime support in order to work:
//!
//! * A [reactor] to drive I/O resources.
//! * An [executor] to execute tasks that use these I/O resources.
//! * A [timer] for scheduling work to run after a set period of time.
//!
//! While it is possible to setup each component manually, this involves a bunch
//! of boilerplate.
//!
//! [`Runtime`] bundles all of these various runtime components into a single
//! handle that can be started and shutdown together, eliminating the necessary
//! boilerplate to run a Tokio application.
//!
//! Most applications wont need to use [`Runtime`] directly. Instead, they will
//! use the [`run`] function, which uses [`Runtime`] under the hood.
//!
//! Creating a [`Runtime`] does the following:
//!
//! * Spawn a background thread running a [`Reactor`] instance.
//! * Start a [`ThreadPool`] for executing futures.
//! * Run an instance of [`Timer`] **per** thread pool worker thread.
//!
//! The thread pool uses a work-stealing strategy and is configured to start a
//! worker thread for each CPU core available on the system. This tends to be
//! the ideal setup for Tokio applications.
//!
//! A timer per thread pool worker thread is used to minimize the amount of
//! synchronization that is required for working with the timer.
//!
//! # Usage
//!
//! Most applications will use the [`run`] function. This takes a future to
//! "seed" the application, blocking the thread until the runtime becomes
//! [idle].
//!
//! ```rust
//! # extern crate tokio;
//! # extern crate futures;
//! # use futures::{Future, Stream};
//! use tokio::net::TcpListener;
//!
//! # fn process<T>(_: T) -> Box<Future<Item = (), Error = ()> + Send> {
//! # unimplemented!();
//! # }
//! # fn dox() {
//! # let addr = "127.0.0.1:8080".parse().unwrap();
//! let listener = TcpListener::bind(&addr).unwrap();
//!
//! let server = listener.incoming()
//! .map_err(|e| println!("error = {:?}", e))
//! .for_each(|socket| {
//! tokio::spawn(process(socket))
//! });
//!
//! tokio::run(server);
//! # }
//! # pub fn main() {}
//! ```
//!
//! In this function, the `run` function blocks until the runtime becomes idle.
//! See [`shutdown_on_idle`][idle] for more shutdown details.
//!
//! From within the context of the runtime, additional tasks are spawned using
//! the [`tokio::spawn`] function. Futures spawned using this function will be
//! executed on the same thread pool used by the [`Runtime`].
//!
//! A [`Runtime`] instance can also be used directly.
//!
//! ```rust
//! # extern crate tokio;
//! # extern crate futures;
//! # use futures::{Future, Stream};
//! use tokio::runtime::Runtime;
//! use tokio::net::TcpListener;
//!
//! # fn process<T>(_: T) -> Box<Future<Item = (), Error = ()> + Send> {
//! # unimplemented!();
//! # }
//! # fn dox() {
//! # let addr = "127.0.0.1:8080".parse().unwrap();
//! let listener = TcpListener::bind(&addr).unwrap();
//!
//! let server = listener.incoming()
//! .map_err(|e| println!("error = {:?}", e))
//! .for_each(|socket| {
//! tokio::spawn(process(socket))
//! });
//!
//! // Create the runtime
//! let mut rt = Runtime::new().unwrap();
//!
//! // Spawn the server task
//! rt.spawn(server);
//!
//! // Wait until the runtime becomes idle and shut it down.
//! rt.shutdown_on_idle()
//! .wait().unwrap();
//! # }
//! # pub fn main() {}
//! ```
//!
//! [reactor]: ../reactor/struct.Reactor.html
//! [executor]: https://tokio.rs/docs/getting-started/runtime-model/#executors
//! [timer]: ../timer/index.html
//! [`Runtime`]: struct.Runtime.html
//! [`Reactor`]: ../reactor/struct.Reactor.html
//! [`ThreadPool`]: ../executor/thread_pool/struct.ThreadPool.html
//! [`run`]: fn.run.html
//! [idle]: struct.Runtime.html#method.shutdown_on_idle
//! [`tokio::spawn`]: ../executor/fn.spawn.html
//! [`Timer`]: https://docs.rs/tokio-timer/0.2/tokio_timer/timer/struct.Timer.html
mod builder;
pub mod current_thread;
mod shutdown;
mod task_executor;
pub use self::builder::Builder;
pub use self::shutdown::Shutdown;
pub use self::task_executor::TaskExecutor;
use reactor::Handle;
use std::io;
use tokio_executor::enter;
use tokio_threadpool as threadpool;
use futures;
use futures::future::Future;
/// Handle to the Tokio runtime.
///
/// The Tokio runtime includes a reactor as well as an executor for running
/// tasks.
///
/// Instances of `Runtime` can be created using [`new`] or [`Builder`]. However,
/// most users will use [`tokio::run`], which uses a `Runtime` internally.
///
/// See [module level][mod] documentation for more details.
///
/// [mod]: index.html
/// [`new`]: #method.new
/// [`Builder`]: struct.Builder.html
/// [`tokio::run`]: fn.run.html
#[derive(Debug)]
pub struct Runtime {
inner: Option<Inner>,
}
#[derive(Debug)]
struct Inner {
/// A handle to one of the per-worker reactors.
reactor: Handle,
/// Task execution pool.
pool: threadpool::ThreadPool,
}
// ===== impl Runtime =====
/// Start the Tokio runtime using the supplied future to bootstrap execution.
///
/// This function is used to bootstrap the execution of a Tokio application. It
/// does the following:
///
/// * Start the Tokio runtime using a default configuration.
/// * Spawn the given future onto the thread pool.
/// * Block the current thread until the runtime shuts down.
///
/// Note that the function will not return immediately once `future` has
/// completed. Instead it waits for the entire runtime to become idle.
///
/// See the [module level][mod] documentation for more details.
///
/// # Examples
///
/// ```rust
/// # extern crate tokio;
/// # extern crate futures;
/// # use futures::{Future, Stream};
/// use tokio::net::TcpListener;
///
/// # fn process<T>(_: T) -> Box<Future<Item = (), Error = ()> + Send> {
/// # unimplemented!();
/// # }
/// # fn dox() {
/// # let addr = "127.0.0.1:8080".parse().unwrap();
/// let listener = TcpListener::bind(&addr).unwrap();
///
/// let server = listener.incoming()
/// .map_err(|e| println!("error = {:?}", e))
/// .for_each(|socket| {
/// tokio::spawn(process(socket))
/// });
///
/// tokio::run(server);
/// # }
/// # pub fn main() {}
/// ```
///
/// # Panics
///
/// This function panics if called from the context of an executor.
///
/// [mod]: ../index.html
pub fn run<F>(future: F)
where F: Future<Item = (), Error = ()> + Send + 'static,
{
let mut runtime = Runtime::new().unwrap();
runtime.spawn(future);
enter().expect("nested tokio::run")
.block_on(runtime.shutdown_on_idle())
.unwrap();
}
impl Runtime {
/// Create a new runtime instance with default configuration values.
///
/// This results in a reactor, thread pool, and timer being initialized. The
/// thread pool will not spawn any worker threads until it needs to, i.e.
/// tasks are scheduled to run.
///
/// Most users will not need to call this function directly, instead they
/// will use [`tokio::run`](fn.run.html).
///
/// See [module level][mod] documentation for more details.
///
/// # Examples
///
/// Creating a new `Runtime` with default configuration values.
///
/// ```
/// use tokio::runtime::Runtime;
/// use tokio::prelude::*;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// // Use the runtime...
///
/// // Shutdown the runtime
/// rt.shutdown_now()
/// .wait().unwrap();
/// ```
///
/// [mod]: index.html
pub fn new() -> io::Result<Self> {
Builder::new().build()
}
#[deprecated(since = "0.1.5", note = "use `reactor` instead")]
#[doc(hidden)]
pub fn handle(&self) -> &Handle {
#[allow(deprecated)]
self.reactor()
}
/// Return a reference to the reactor handle for this runtime instance.
///
/// The returned handle reference can be cloned in order to get an owned
/// value of the handle. This handle can be used to initialize I/O resources
/// (like TCP or UDP sockets) that will not be used on the runtime.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// let reactor_handle = rt.reactor().clone();
///
/// // use `reactor_handle`
/// ```
#[deprecated(since = "0.1.11", note = "there is now a reactor per worker thread")]
pub fn reactor(&self) -> &Handle {
&self.inner().reactor
}
/// Return a handle to the runtime's executor.
///
/// The returned handle can be used to spawn tasks that run on this runtime.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// let executor_handle = rt.executor();
///
/// // use `executor_handle`
/// ```
pub fn executor(&self) -> TaskExecutor {
let inner = self.inner().pool.sender().clone();
TaskExecutor { inner }
}
/// Spawn a future onto the Tokio runtime.
///
/// This spawns the given future onto the runtime's executor, usually a
/// thread pool. The thread pool is then responsible for polling the future
/// until it completes.
///
/// See [module level][mod] documentation for more details.
///
/// [mod]: index.html
///
/// # Examples
///
/// ```rust
/// # extern crate tokio;
/// # extern crate futures;
/// # use futures::{future, Future, Stream};
/// use tokio::runtime::Runtime;
///
/// # fn dox() {
/// // Create the runtime
/// let mut rt = Runtime::new().unwrap();
///
/// // Spawn a future onto the runtime
/// rt.spawn(future::lazy(|| {
/// println!("now running on a worker thread");
/// Ok(())
/// }));
/// # }
/// # pub fn main() {}
/// ```
///
/// # Panics
///
/// This function panics if the spawn fails. Failure occurs if the executor
/// is currently at capacity and is unable to spawn a new future.
pub fn spawn<F>(&mut self, future: F) -> &mut Self
where F: Future<Item = (), Error = ()> + Send + 'static,
{
self.inner_mut().pool.sender().spawn(future).unwrap();
self
}
/// Run a future to completion on the Tokio runtime.
///
/// This runs the given future on the runtime, blocking until it is
/// complete, and yielding its resolved result. Any tasks or timers which
/// the future spawns internally will be executed on the runtime.
///
/// This method should not be called from an asynchronous context.
///
/// # Panics
///
/// This function panics if the executor is at capacity, if the provided
/// future panics, or if called within an asynchronous execution context.
pub fn block_on<F, R, E>(&mut self, future: F) -> Result<R, E>
where
F: Send + 'static + Future<Item = R, Error = E>,
R: Send + 'static,
E: Send + 'static,
{
let (tx, rx) = futures::sync::oneshot::channel();
self.spawn(future.then(move |r| tx.send(r).map_err(|_| unreachable!())));
rx.wait().unwrap()
}
/// Run a future to completion on the Tokio runtime, then wait for all
/// background futures to complete too.
///
/// This runs the given future on the runtime, blocking until it is
/// complete, waiting for background futures to complete, and yielding
/// its resolved result. Any tasks or timers which the future spawns
/// internally will be executed on the runtime and waited for completion.
///
/// This method should not be called from an asynchronous context.
///
/// # Panics
///
/// This function panics if the executor is at capacity, if the provided
/// future panics, or if called within an asynchronous execution context.
pub fn block_on_all<F, R, E>(mut self, future: F) -> Result<R, E>
where
F: Send + 'static + Future<Item = R, Error = E>,
R: Send + 'static,
E: Send + 'static,
{
let res = self.block_on(future);
self.shutdown_on_idle().wait().unwrap();
res
}
/// Signals the runtime to shutdown once it becomes idle.
///
/// Returns a future that completes once the shutdown operation has
/// completed.
///
/// This function can be used to perform a graceful shutdown of the runtime.
///
/// The runtime enters an idle state once **all** of the following occur.
///
/// * The thread pool has no tasks to execute, i.e., all tasks that were
/// spawned have completed.
/// * The reactor is not managing any I/O resources.
///
/// See [module level][mod] documentation for more details.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
/// use tokio::prelude::*;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// // Use the runtime...
///
/// // Shutdown the runtime
/// rt.shutdown_on_idle()
/// .wait().unwrap();
/// ```
///
/// [mod]: index.html
pub fn shutdown_on_idle(mut self) -> Shutdown {
let inner = self.inner.take().unwrap();
let inner = inner.pool.shutdown_on_idle();
Shutdown { inner }
}
/// Signals the runtime to shutdown immediately.
///
/// Returns a future that completes once the shutdown operation has
/// completed.
///
/// This function will forcibly shutdown the runtime, causing any
/// in-progress work to become canceled. The shutdown steps are:
///
/// * Drain any scheduled work queues.
/// * Drop any futures that have not yet completed.
/// * Drop the reactor.
///
/// Once the reactor has dropped, any outstanding I/O resources bound to
/// that reactor will no longer function. Calling any method on them will
/// result in an error.
///
/// See [module level][mod] documentation for more details.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
/// use tokio::prelude::*;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// // Use the runtime...
///
/// // Shutdown the runtime
/// rt.shutdown_now()
/// .wait().unwrap();
/// ```
///
/// [mod]: index.html
pub fn shutdown_now(mut self) -> Shutdown {
let inner = self.inner.take().unwrap();
Shutdown::shutdown_now(inner)
}
fn inner(&self) -> &Inner {
self.inner.as_ref().unwrap()
}
fn inner_mut(&mut self) -> &mut Inner {
self.inner.as_mut().unwrap()
}
}
impl Drop for Runtime {
fn drop(&mut self) {
if let Some(inner) = self.inner.take() |
}
}
| {
let shutdown = Shutdown::shutdown_now(inner);
let _ = shutdown.wait();
} | conditional_block |
mod.rs | //! A batteries included runtime for applications using Tokio.
//!
//! Applications using Tokio require some runtime support in order to work:
//!
//! * A [reactor] to drive I/O resources.
//! * An [executor] to execute tasks that use these I/O resources.
//! * A [timer] for scheduling work to run after a set period of time.
//!
//! While it is possible to setup each component manually, this involves a bunch
//! of boilerplate.
//!
//! [`Runtime`] bundles all of these various runtime components into a single
//! handle that can be started and shutdown together, eliminating the necessary
//! boilerplate to run a Tokio application.
//!
//! Most applications wont need to use [`Runtime`] directly. Instead, they will
//! use the [`run`] function, which uses [`Runtime`] under the hood.
//!
//! Creating a [`Runtime`] does the following:
//!
//! * Spawn a background thread running a [`Reactor`] instance.
//! * Start a [`ThreadPool`] for executing futures.
//! * Run an instance of [`Timer`] **per** thread pool worker thread.
//!
//! The thread pool uses a work-stealing strategy and is configured to start a
//! worker thread for each CPU core available on the system. This tends to be
//! the ideal setup for Tokio applications.
//!
//! A timer per thread pool worker thread is used to minimize the amount of
//! synchronization that is required for working with the timer.
//!
//! # Usage
//!
//! Most applications will use the [`run`] function. This takes a future to
//! "seed" the application, blocking the thread until the runtime becomes
//! [idle].
//!
//! ```rust
//! # extern crate tokio;
//! # extern crate futures;
//! # use futures::{Future, Stream};
//! use tokio::net::TcpListener;
//!
//! # fn process<T>(_: T) -> Box<Future<Item = (), Error = ()> + Send> {
//! # unimplemented!();
//! # }
//! # fn dox() {
//! # let addr = "127.0.0.1:8080".parse().unwrap();
//! let listener = TcpListener::bind(&addr).unwrap();
//!
//! let server = listener.incoming()
//! .map_err(|e| println!("error = {:?}", e))
//! .for_each(|socket| {
//! tokio::spawn(process(socket))
//! });
//!
//! tokio::run(server);
//! # }
//! # pub fn main() {}
//! ```
//!
//! In this function, the `run` function blocks until the runtime becomes idle.
//! See [`shutdown_on_idle`][idle] for more shutdown details.
//!
//! From within the context of the runtime, additional tasks are spawned using
//! the [`tokio::spawn`] function. Futures spawned using this function will be
//! executed on the same thread pool used by the [`Runtime`].
//!
//! A [`Runtime`] instance can also be used directly.
//!
//! ```rust
//! # extern crate tokio;
//! # extern crate futures;
//! # use futures::{Future, Stream};
//! use tokio::runtime::Runtime;
//! use tokio::net::TcpListener;
//!
//! # fn process<T>(_: T) -> Box<Future<Item = (), Error = ()> + Send> {
//! # unimplemented!();
//! # }
//! # fn dox() {
//! # let addr = "127.0.0.1:8080".parse().unwrap();
//! let listener = TcpListener::bind(&addr).unwrap();
//!
//! let server = listener.incoming()
//! .map_err(|e| println!("error = {:?}", e))
//! .for_each(|socket| {
//! tokio::spawn(process(socket))
//! });
//!
//! // Create the runtime
//! let mut rt = Runtime::new().unwrap();
//!
//! // Spawn the server task
//! rt.spawn(server);
//!
//! // Wait until the runtime becomes idle and shut it down.
//! rt.shutdown_on_idle()
//! .wait().unwrap();
//! # }
//! # pub fn main() {}
//! ```
//!
//! [reactor]: ../reactor/struct.Reactor.html
//! [executor]: https://tokio.rs/docs/getting-started/runtime-model/#executors
//! [timer]: ../timer/index.html
//! [`Runtime`]: struct.Runtime.html
//! [`Reactor`]: ../reactor/struct.Reactor.html
//! [`ThreadPool`]: ../executor/thread_pool/struct.ThreadPool.html
//! [`run`]: fn.run.html
//! [idle]: struct.Runtime.html#method.shutdown_on_idle
//! [`tokio::spawn`]: ../executor/fn.spawn.html
//! [`Timer`]: https://docs.rs/tokio-timer/0.2/tokio_timer/timer/struct.Timer.html
mod builder;
pub mod current_thread;
mod shutdown;
mod task_executor;
pub use self::builder::Builder;
pub use self::shutdown::Shutdown;
pub use self::task_executor::TaskExecutor;
use reactor::Handle;
use std::io;
use tokio_executor::enter;
use tokio_threadpool as threadpool;
use futures;
use futures::future::Future;
/// Handle to the Tokio runtime.
///
/// The Tokio runtime includes a reactor as well as an executor for running
/// tasks.
///
/// Instances of `Runtime` can be created using [`new`] or [`Builder`]. However,
/// most users will use [`tokio::run`], which uses a `Runtime` internally.
///
/// See [module level][mod] documentation for more details.
///
/// [mod]: index.html
/// [`new`]: #method.new
/// [`Builder`]: struct.Builder.html
/// [`tokio::run`]: fn.run.html
#[derive(Debug)]
pub struct Runtime {
inner: Option<Inner>,
}
#[derive(Debug)]
struct Inner {
/// A handle to one of the per-worker reactors.
reactor: Handle,
/// Task execution pool.
pool: threadpool::ThreadPool,
}
// ===== impl Runtime =====
/// Start the Tokio runtime using the supplied future to bootstrap execution.
///
/// This function is used to bootstrap the execution of a Tokio application. It
/// does the following:
///
/// * Start the Tokio runtime using a default configuration.
/// * Spawn the given future onto the thread pool.
/// * Block the current thread until the runtime shuts down.
///
/// Note that the function will not return immediately once `future` has
/// completed. Instead it waits for the entire runtime to become idle.
///
/// See the [module level][mod] documentation for more details.
///
/// # Examples
///
/// ```rust
/// # extern crate tokio;
/// # extern crate futures;
/// # use futures::{Future, Stream};
/// use tokio::net::TcpListener;
///
/// # fn process<T>(_: T) -> Box<Future<Item = (), Error = ()> + Send> {
/// # unimplemented!();
/// # }
/// # fn dox() {
/// # let addr = "127.0.0.1:8080".parse().unwrap();
/// let listener = TcpListener::bind(&addr).unwrap();
///
/// let server = listener.incoming()
/// .map_err(|e| println!("error = {:?}", e))
/// .for_each(|socket| {
/// tokio::spawn(process(socket))
/// });
///
/// tokio::run(server);
/// # }
/// # pub fn main() {}
/// ```
///
/// # Panics
///
/// This function panics if called from the context of an executor.
///
/// [mod]: ../index.html
pub fn run<F>(future: F)
where F: Future<Item = (), Error = ()> + Send + 'static,
{
let mut runtime = Runtime::new().unwrap();
runtime.spawn(future);
enter().expect("nested tokio::run")
.block_on(runtime.shutdown_on_idle())
.unwrap();
}
impl Runtime {
/// Create a new runtime instance with default configuration values.
///
/// This results in a reactor, thread pool, and timer being initialized. The
/// thread pool will not spawn any worker threads until it needs to, i.e.
/// tasks are scheduled to run.
///
/// Most users will not need to call this function directly, instead they
/// will use [`tokio::run`](fn.run.html).
///
/// See [module level][mod] documentation for more details.
///
/// # Examples
///
/// Creating a new `Runtime` with default configuration values.
///
/// ```
/// use tokio::runtime::Runtime;
/// use tokio::prelude::*;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// // Use the runtime...
///
/// // Shutdown the runtime
/// rt.shutdown_now()
/// .wait().unwrap();
/// ```
///
/// [mod]: index.html
pub fn new() -> io::Result<Self> {
Builder::new().build()
}
#[deprecated(since = "0.1.5", note = "use `reactor` instead")]
#[doc(hidden)]
pub fn handle(&self) -> &Handle {
#[allow(deprecated)]
self.reactor()
}
/// Return a reference to the reactor handle for this runtime instance.
///
/// The returned handle reference can be cloned in order to get an owned
/// value of the handle. This handle can be used to initialize I/O resources
/// (like TCP or UDP sockets) that will not be used on the runtime.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// let reactor_handle = rt.reactor().clone();
///
/// // use `reactor_handle`
/// ```
#[deprecated(since = "0.1.11", note = "there is now a reactor per worker thread")]
pub fn reactor(&self) -> &Handle {
&self.inner().reactor
}
/// Return a handle to the runtime's executor.
///
/// The returned handle can be used to spawn tasks that run on this runtime.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// let executor_handle = rt.executor();
///
/// // use `executor_handle`
/// ```
pub fn executor(&self) -> TaskExecutor {
let inner = self.inner().pool.sender().clone();
TaskExecutor { inner }
}
/// Spawn a future onto the Tokio runtime.
///
/// This spawns the given future onto the runtime's executor, usually a
/// thread pool. The thread pool is then responsible for polling the future
/// until it completes.
///
/// See [module level][mod] documentation for more details.
///
/// [mod]: index.html
///
/// # Examples
///
/// ```rust
/// # extern crate tokio;
/// # extern crate futures;
/// # use futures::{future, Future, Stream};
/// use tokio::runtime::Runtime;
///
/// # fn dox() {
/// // Create the runtime
/// let mut rt = Runtime::new().unwrap();
///
/// // Spawn a future onto the runtime
/// rt.spawn(future::lazy(|| {
/// println!("now running on a worker thread");
/// Ok(())
/// }));
/// # }
/// # pub fn main() {}
/// ```
///
/// # Panics
///
/// This function panics if the spawn fails. Failure occurs if the executor
/// is currently at capacity and is unable to spawn a new future.
pub fn spawn<F>(&mut self, future: F) -> &mut Self
where F: Future<Item = (), Error = ()> + Send + 'static,
{
self.inner_mut().pool.sender().spawn(future).unwrap();
self
}
/// Run a future to completion on the Tokio runtime.
///
/// This runs the given future on the runtime, blocking until it is
/// complete, and yielding its resolved result. Any tasks or timers which
/// the future spawns internally will be executed on the runtime.
///
/// This method should not be called from an asynchronous context.
///
/// # Panics
///
/// This function panics if the executor is at capacity, if the provided
/// future panics, or if called within an asynchronous execution context.
pub fn block_on<F, R, E>(&mut self, future: F) -> Result<R, E>
where
F: Send + 'static + Future<Item = R, Error = E>,
R: Send + 'static,
E: Send + 'static,
{
let (tx, rx) = futures::sync::oneshot::channel();
self.spawn(future.then(move |r| tx.send(r).map_err(|_| unreachable!())));
rx.wait().unwrap()
}
/// Run a future to completion on the Tokio runtime, then wait for all
/// background futures to complete too.
///
/// This runs the given future on the runtime, blocking until it is
/// complete, waiting for background futures to complete, and yielding
/// its resolved result. Any tasks or timers which the future spawns
/// internally will be executed on the runtime and waited for completion.
///
/// This method should not be called from an asynchronous context.
///
/// # Panics
///
/// This function panics if the executor is at capacity, if the provided
/// future panics, or if called within an asynchronous execution context.
pub fn block_on_all<F, R, E>(mut self, future: F) -> Result<R, E>
where
F: Send + 'static + Future<Item = R, Error = E>,
R: Send + 'static,
E: Send + 'static,
{
let res = self.block_on(future);
self.shutdown_on_idle().wait().unwrap();
res
}
/// Signals the runtime to shutdown once it becomes idle.
///
/// Returns a future that completes once the shutdown operation has
/// completed.
///
/// This function can be used to perform a graceful shutdown of the runtime.
///
/// The runtime enters an idle state once **all** of the following occur.
///
/// * The thread pool has no tasks to execute, i.e., all tasks that were
/// spawned have completed.
/// * The reactor is not managing any I/O resources.
///
/// See [module level][mod] documentation for more details.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
/// use tokio::prelude::*;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// // Use the runtime...
///
/// // Shutdown the runtime
/// rt.shutdown_on_idle()
/// .wait().unwrap();
/// ```
///
/// [mod]: index.html
pub fn shutdown_on_idle(mut self) -> Shutdown |
/// Signals the runtime to shutdown immediately.
///
/// Returns a future that completes once the shutdown operation has
/// completed.
///
/// This function will forcibly shutdown the runtime, causing any
/// in-progress work to become canceled. The shutdown steps are:
///
/// * Drain any scheduled work queues.
/// * Drop any futures that have not yet completed.
/// * Drop the reactor.
///
/// Once the reactor has dropped, any outstanding I/O resources bound to
/// that reactor will no longer function. Calling any method on them will
/// result in an error.
///
/// See [module level][mod] documentation for more details.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
/// use tokio::prelude::*;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// // Use the runtime...
///
/// // Shutdown the runtime
/// rt.shutdown_now()
/// .wait().unwrap();
/// ```
///
/// [mod]: index.html
pub fn shutdown_now(mut self) -> Shutdown {
let inner = self.inner.take().unwrap();
Shutdown::shutdown_now(inner)
}
fn inner(&self) -> &Inner {
self.inner.as_ref().unwrap()
}
fn inner_mut(&mut self) -> &mut Inner {
self.inner.as_mut().unwrap()
}
}
impl Drop for Runtime {
fn drop(&mut self) {
if let Some(inner) = self.inner.take() {
let shutdown = Shutdown::shutdown_now(inner);
let _ = shutdown.wait();
}
}
}
| {
let inner = self.inner.take().unwrap();
let inner = inner.pool.shutdown_on_idle();
Shutdown { inner }
} | identifier_body |
mod.rs | //! A batteries included runtime for applications using Tokio.
//!
//! Applications using Tokio require some runtime support in order to work:
//!
//! * A [reactor] to drive I/O resources.
//! * An [executor] to execute tasks that use these I/O resources.
//! * A [timer] for scheduling work to run after a set period of time.
//!
//! While it is possible to setup each component manually, this involves a bunch
//! of boilerplate.
//!
//! [`Runtime`] bundles all of these various runtime components into a single
//! handle that can be started and shutdown together, eliminating the necessary
//! boilerplate to run a Tokio application.
//!
//! Most applications wont need to use [`Runtime`] directly. Instead, they will
//! use the [`run`] function, which uses [`Runtime`] under the hood.
//!
//! Creating a [`Runtime`] does the following:
//!
//! * Spawn a background thread running a [`Reactor`] instance.
//! * Start a [`ThreadPool`] for executing futures.
//! * Run an instance of [`Timer`] **per** thread pool worker thread.
//!
//! The thread pool uses a work-stealing strategy and is configured to start a
//! worker thread for each CPU core available on the system. This tends to be
//! the ideal setup for Tokio applications.
//!
//! A timer per thread pool worker thread is used to minimize the amount of
//! synchronization that is required for working with the timer.
//!
//! # Usage
//!
//! Most applications will use the [`run`] function. This takes a future to
//! "seed" the application, blocking the thread until the runtime becomes
//! [idle].
//!
//! ```rust
//! # extern crate tokio;
//! # extern crate futures;
//! # use futures::{Future, Stream};
//! use tokio::net::TcpListener;
//!
//! # fn process<T>(_: T) -> Box<Future<Item = (), Error = ()> + Send> {
//! # unimplemented!();
//! # }
//! # fn dox() {
//! # let addr = "127.0.0.1:8080".parse().unwrap();
//! let listener = TcpListener::bind(&addr).unwrap();
//!
//! let server = listener.incoming()
//! .map_err(|e| println!("error = {:?}", e))
//! .for_each(|socket| {
//! tokio::spawn(process(socket))
//! });
//!
//! tokio::run(server);
//! # }
//! # pub fn main() {}
//! ```
//!
//! In this function, the `run` function blocks until the runtime becomes idle.
//! See [`shutdown_on_idle`][idle] for more shutdown details.
//!
//! From within the context of the runtime, additional tasks are spawned using
//! the [`tokio::spawn`] function. Futures spawned using this function will be
//! executed on the same thread pool used by the [`Runtime`].
//!
//! A [`Runtime`] instance can also be used directly.
//!
//! ```rust
//! # extern crate tokio;
//! # extern crate futures;
//! # use futures::{Future, Stream};
//! use tokio::runtime::Runtime;
//! use tokio::net::TcpListener;
//!
//! # fn process<T>(_: T) -> Box<Future<Item = (), Error = ()> + Send> {
//! # unimplemented!();
//! # }
//! # fn dox() {
//! # let addr = "127.0.0.1:8080".parse().unwrap();
//! let listener = TcpListener::bind(&addr).unwrap();
//!
//! let server = listener.incoming()
//! .map_err(|e| println!("error = {:?}", e))
//! .for_each(|socket| {
//! tokio::spawn(process(socket))
//! });
//!
//! // Create the runtime
//! let mut rt = Runtime::new().unwrap();
//!
//! // Spawn the server task
//! rt.spawn(server);
//!
//! // Wait until the runtime becomes idle and shut it down.
//! rt.shutdown_on_idle()
//! .wait().unwrap();
//! # }
//! # pub fn main() {}
//! ```
//!
//! [reactor]: ../reactor/struct.Reactor.html
//! [executor]: https://tokio.rs/docs/getting-started/runtime-model/#executors
//! [timer]: ../timer/index.html
//! [`Runtime`]: struct.Runtime.html
//! [`Reactor`]: ../reactor/struct.Reactor.html
//! [`ThreadPool`]: ../executor/thread_pool/struct.ThreadPool.html
//! [`run`]: fn.run.html
//! [idle]: struct.Runtime.html#method.shutdown_on_idle
//! [`tokio::spawn`]: ../executor/fn.spawn.html
//! [`Timer`]: https://docs.rs/tokio-timer/0.2/tokio_timer/timer/struct.Timer.html
mod builder;
pub mod current_thread;
mod shutdown;
mod task_executor;
pub use self::builder::Builder;
pub use self::shutdown::Shutdown;
pub use self::task_executor::TaskExecutor;
use reactor::Handle;
use std::io;
use tokio_executor::enter;
use tokio_threadpool as threadpool;
use futures;
use futures::future::Future;
/// Handle to the Tokio runtime.
///
/// The Tokio runtime includes a reactor as well as an executor for running
/// tasks.
///
/// Instances of `Runtime` can be created using [`new`] or [`Builder`]. However,
/// most users will use [`tokio::run`], which uses a `Runtime` internally.
///
/// See [module level][mod] documentation for more details.
///
/// [mod]: index.html
/// [`new`]: #method.new
/// [`Builder`]: struct.Builder.html
/// [`tokio::run`]: fn.run.html
#[derive(Debug)]
pub struct Runtime {
inner: Option<Inner>,
}
#[derive(Debug)]
struct Inner {
/// A handle to one of the per-worker reactors.
reactor: Handle,
/// Task execution pool.
pool: threadpool::ThreadPool,
}
// ===== impl Runtime =====
/// Start the Tokio runtime using the supplied future to bootstrap execution.
///
/// This function is used to bootstrap the execution of a Tokio application. It
/// does the following:
///
/// * Start the Tokio runtime using a default configuration.
/// * Spawn the given future onto the thread pool.
/// * Block the current thread until the runtime shuts down.
///
/// Note that the function will not return immediately once `future` has
/// completed. Instead it waits for the entire runtime to become idle.
///
/// See the [module level][mod] documentation for more details.
///
/// # Examples
///
/// ```rust
/// # extern crate tokio;
/// # extern crate futures;
/// # use futures::{Future, Stream};
/// use tokio::net::TcpListener;
///
/// # fn process<T>(_: T) -> Box<Future<Item = (), Error = ()> + Send> {
/// # unimplemented!();
/// # }
/// # fn dox() {
/// # let addr = "127.0.0.1:8080".parse().unwrap();
/// let listener = TcpListener::bind(&addr).unwrap();
///
/// let server = listener.incoming()
/// .map_err(|e| println!("error = {:?}", e))
/// .for_each(|socket| {
/// tokio::spawn(process(socket))
/// });
///
/// tokio::run(server);
/// # }
/// # pub fn main() {}
/// ```
///
/// # Panics
///
/// This function panics if called from the context of an executor.
///
/// [mod]: ../index.html
pub fn run<F>(future: F)
where F: Future<Item = (), Error = ()> + Send + 'static,
{
let mut runtime = Runtime::new().unwrap();
runtime.spawn(future);
enter().expect("nested tokio::run")
.block_on(runtime.shutdown_on_idle())
.unwrap();
}
impl Runtime {
/// Create a new runtime instance with default configuration values.
///
/// This results in a reactor, thread pool, and timer being initialized. The
/// thread pool will not spawn any worker threads until it needs to, i.e.
/// tasks are scheduled to run.
///
/// Most users will not need to call this function directly, instead they
/// will use [`tokio::run`](fn.run.html).
///
/// See [module level][mod] documentation for more details.
///
/// # Examples
///
/// Creating a new `Runtime` with default configuration values.
///
/// ```
/// use tokio::runtime::Runtime;
/// use tokio::prelude::*;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// // Use the runtime...
///
/// // Shutdown the runtime
/// rt.shutdown_now()
/// .wait().unwrap();
/// ```
///
/// [mod]: index.html
pub fn new() -> io::Result<Self> {
Builder::new().build()
}
#[deprecated(since = "0.1.5", note = "use `reactor` instead")]
#[doc(hidden)]
pub fn handle(&self) -> &Handle {
#[allow(deprecated)]
self.reactor()
}
/// Return a reference to the reactor handle for this runtime instance.
///
/// The returned handle reference can be cloned in order to get an owned
/// value of the handle. This handle can be used to initialize I/O resources
/// (like TCP or UDP sockets) that will not be used on the runtime.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// let reactor_handle = rt.reactor().clone();
///
/// // use `reactor_handle`
/// ```
#[deprecated(since = "0.1.11", note = "there is now a reactor per worker thread")]
pub fn reactor(&self) -> &Handle {
&self.inner().reactor
}
/// Return a handle to the runtime's executor.
///
/// The returned handle can be used to spawn tasks that run on this runtime.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// let executor_handle = rt.executor();
///
/// // use `executor_handle`
/// ```
pub fn executor(&self) -> TaskExecutor {
let inner = self.inner().pool.sender().clone();
TaskExecutor { inner }
}
/// Spawn a future onto the Tokio runtime.
///
/// This spawns the given future onto the runtime's executor, usually a
/// thread pool. The thread pool is then responsible for polling the future
/// until it completes.
///
/// See [module level][mod] documentation for more details.
///
/// [mod]: index.html
///
/// # Examples
///
/// ```rust
/// # extern crate tokio;
/// # extern crate futures;
/// # use futures::{future, Future, Stream};
/// use tokio::runtime::Runtime;
///
/// # fn dox() {
/// // Create the runtime
/// let mut rt = Runtime::new().unwrap();
///
/// // Spawn a future onto the runtime
/// rt.spawn(future::lazy(|| {
/// println!("now running on a worker thread");
/// Ok(())
/// }));
/// # }
/// # pub fn main() {}
/// ```
///
/// # Panics
///
/// This function panics if the spawn fails. Failure occurs if the executor
/// is currently at capacity and is unable to spawn a new future.
pub fn spawn<F>(&mut self, future: F) -> &mut Self
where F: Future<Item = (), Error = ()> + Send + 'static,
{
self.inner_mut().pool.sender().spawn(future).unwrap();
self
}
/// Run a future to completion on the Tokio runtime.
///
/// This runs the given future on the runtime, blocking until it is
/// complete, and yielding its resolved result. Any tasks or timers which
/// the future spawns internally will be executed on the runtime.
///
/// This method should not be called from an asynchronous context.
///
/// # Panics
///
/// This function panics if the executor is at capacity, if the provided
/// future panics, or if called within an asynchronous execution context.
pub fn block_on<F, R, E>(&mut self, future: F) -> Result<R, E>
where | F: Send + 'static + Future<Item = R, Error = E>,
R: Send + 'static,
E: Send + 'static,
{
let (tx, rx) = futures::sync::oneshot::channel();
self.spawn(future.then(move |r| tx.send(r).map_err(|_| unreachable!())));
rx.wait().unwrap()
}
/// Run a future to completion on the Tokio runtime, then wait for all
/// background futures to complete too.
///
/// This runs the given future on the runtime, blocking until it is
/// complete, waiting for background futures to complete, and yielding
/// its resolved result. Any tasks or timers which the future spawns
/// internally will be executed on the runtime and waited for completion.
///
/// This method should not be called from an asynchronous context.
///
/// # Panics
///
/// This function panics if the executor is at capacity, if the provided
/// future panics, or if called within an asynchronous execution context.
pub fn block_on_all<F, R, E>(mut self, future: F) -> Result<R, E>
where
F: Send + 'static + Future<Item = R, Error = E>,
R: Send + 'static,
E: Send + 'static,
{
let res = self.block_on(future);
self.shutdown_on_idle().wait().unwrap();
res
}
/// Signals the runtime to shutdown once it becomes idle.
///
/// Returns a future that completes once the shutdown operation has
/// completed.
///
/// This function can be used to perform a graceful shutdown of the runtime.
///
/// The runtime enters an idle state once **all** of the following occur.
///
/// * The thread pool has no tasks to execute, i.e., all tasks that were
/// spawned have completed.
/// * The reactor is not managing any I/O resources.
///
/// See [module level][mod] documentation for more details.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
/// use tokio::prelude::*;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// // Use the runtime...
///
/// // Shutdown the runtime
/// rt.shutdown_on_idle()
/// .wait().unwrap();
/// ```
///
/// [mod]: index.html
pub fn shutdown_on_idle(mut self) -> Shutdown {
let inner = self.inner.take().unwrap();
let inner = inner.pool.shutdown_on_idle();
Shutdown { inner }
}
/// Signals the runtime to shutdown immediately.
///
/// Returns a future that completes once the shutdown operation has
/// completed.
///
/// This function will forcibly shutdown the runtime, causing any
/// in-progress work to become canceled. The shutdown steps are:
///
/// * Drain any scheduled work queues.
/// * Drop any futures that have not yet completed.
/// * Drop the reactor.
///
/// Once the reactor has dropped, any outstanding I/O resources bound to
/// that reactor will no longer function. Calling any method on them will
/// result in an error.
///
/// See [module level][mod] documentation for more details.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
/// use tokio::prelude::*;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// // Use the runtime...
///
/// // Shutdown the runtime
/// rt.shutdown_now()
/// .wait().unwrap();
/// ```
///
/// [mod]: index.html
pub fn shutdown_now(mut self) -> Shutdown {
let inner = self.inner.take().unwrap();
Shutdown::shutdown_now(inner)
}
fn inner(&self) -> &Inner {
self.inner.as_ref().unwrap()
}
fn inner_mut(&mut self) -> &mut Inner {
self.inner.as_mut().unwrap()
}
}
impl Drop for Runtime {
fn drop(&mut self) {
if let Some(inner) = self.inner.take() {
let shutdown = Shutdown::shutdown_now(inner);
let _ = shutdown.wait();
}
}
} | random_line_split |
|
mod.rs | //! A batteries included runtime for applications using Tokio.
//!
//! Applications using Tokio require some runtime support in order to work:
//!
//! * A [reactor] to drive I/O resources.
//! * An [executor] to execute tasks that use these I/O resources.
//! * A [timer] for scheduling work to run after a set period of time.
//!
//! While it is possible to setup each component manually, this involves a bunch
//! of boilerplate.
//!
//! [`Runtime`] bundles all of these various runtime components into a single
//! handle that can be started and shutdown together, eliminating the necessary
//! boilerplate to run a Tokio application.
//!
//! Most applications wont need to use [`Runtime`] directly. Instead, they will
//! use the [`run`] function, which uses [`Runtime`] under the hood.
//!
//! Creating a [`Runtime`] does the following:
//!
//! * Spawn a background thread running a [`Reactor`] instance.
//! * Start a [`ThreadPool`] for executing futures.
//! * Run an instance of [`Timer`] **per** thread pool worker thread.
//!
//! The thread pool uses a work-stealing strategy and is configured to start a
//! worker thread for each CPU core available on the system. This tends to be
//! the ideal setup for Tokio applications.
//!
//! A timer per thread pool worker thread is used to minimize the amount of
//! synchronization that is required for working with the timer.
//!
//! # Usage
//!
//! Most applications will use the [`run`] function. This takes a future to
//! "seed" the application, blocking the thread until the runtime becomes
//! [idle].
//!
//! ```rust
//! # extern crate tokio;
//! # extern crate futures;
//! # use futures::{Future, Stream};
//! use tokio::net::TcpListener;
//!
//! # fn process<T>(_: T) -> Box<Future<Item = (), Error = ()> + Send> {
//! # unimplemented!();
//! # }
//! # fn dox() {
//! # let addr = "127.0.0.1:8080".parse().unwrap();
//! let listener = TcpListener::bind(&addr).unwrap();
//!
//! let server = listener.incoming()
//! .map_err(|e| println!("error = {:?}", e))
//! .for_each(|socket| {
//! tokio::spawn(process(socket))
//! });
//!
//! tokio::run(server);
//! # }
//! # pub fn main() {}
//! ```
//!
//! In this function, the `run` function blocks until the runtime becomes idle.
//! See [`shutdown_on_idle`][idle] for more shutdown details.
//!
//! From within the context of the runtime, additional tasks are spawned using
//! the [`tokio::spawn`] function. Futures spawned using this function will be
//! executed on the same thread pool used by the [`Runtime`].
//!
//! A [`Runtime`] instance can also be used directly.
//!
//! ```rust
//! # extern crate tokio;
//! # extern crate futures;
//! # use futures::{Future, Stream};
//! use tokio::runtime::Runtime;
//! use tokio::net::TcpListener;
//!
//! # fn process<T>(_: T) -> Box<Future<Item = (), Error = ()> + Send> {
//! # unimplemented!();
//! # }
//! # fn dox() {
//! # let addr = "127.0.0.1:8080".parse().unwrap();
//! let listener = TcpListener::bind(&addr).unwrap();
//!
//! let server = listener.incoming()
//! .map_err(|e| println!("error = {:?}", e))
//! .for_each(|socket| {
//! tokio::spawn(process(socket))
//! });
//!
//! // Create the runtime
//! let mut rt = Runtime::new().unwrap();
//!
//! // Spawn the server task
//! rt.spawn(server);
//!
//! // Wait until the runtime becomes idle and shut it down.
//! rt.shutdown_on_idle()
//! .wait().unwrap();
//! # }
//! # pub fn main() {}
//! ```
//!
//! [reactor]: ../reactor/struct.Reactor.html
//! [executor]: https://tokio.rs/docs/getting-started/runtime-model/#executors
//! [timer]: ../timer/index.html
//! [`Runtime`]: struct.Runtime.html
//! [`Reactor`]: ../reactor/struct.Reactor.html
//! [`ThreadPool`]: ../executor/thread_pool/struct.ThreadPool.html
//! [`run`]: fn.run.html
//! [idle]: struct.Runtime.html#method.shutdown_on_idle
//! [`tokio::spawn`]: ../executor/fn.spawn.html
//! [`Timer`]: https://docs.rs/tokio-timer/0.2/tokio_timer/timer/struct.Timer.html
mod builder;
pub mod current_thread;
mod shutdown;
mod task_executor;
pub use self::builder::Builder;
pub use self::shutdown::Shutdown;
pub use self::task_executor::TaskExecutor;
use reactor::Handle;
use std::io;
use tokio_executor::enter;
use tokio_threadpool as threadpool;
use futures;
use futures::future::Future;
/// Handle to the Tokio runtime.
///
/// The Tokio runtime includes a reactor as well as an executor for running
/// tasks.
///
/// Instances of `Runtime` can be created using [`new`] or [`Builder`]. However,
/// most users will use [`tokio::run`], which uses a `Runtime` internally.
///
/// See [module level][mod] documentation for more details.
///
/// [mod]: index.html
/// [`new`]: #method.new
/// [`Builder`]: struct.Builder.html
/// [`tokio::run`]: fn.run.html
#[derive(Debug)]
pub struct Runtime {
inner: Option<Inner>,
}
#[derive(Debug)]
struct Inner {
/// A handle to one of the per-worker reactors.
reactor: Handle,
/// Task execution pool.
pool: threadpool::ThreadPool,
}
// ===== impl Runtime =====
/// Start the Tokio runtime using the supplied future to bootstrap execution.
///
/// This function is used to bootstrap the execution of a Tokio application. It
/// does the following:
///
/// * Start the Tokio runtime using a default configuration.
/// * Spawn the given future onto the thread pool.
/// * Block the current thread until the runtime shuts down.
///
/// Note that the function will not return immediately once `future` has
/// completed. Instead it waits for the entire runtime to become idle.
///
/// See the [module level][mod] documentation for more details.
///
/// # Examples
///
/// ```rust
/// # extern crate tokio;
/// # extern crate futures;
/// # use futures::{Future, Stream};
/// use tokio::net::TcpListener;
///
/// # fn process<T>(_: T) -> Box<Future<Item = (), Error = ()> + Send> {
/// # unimplemented!();
/// # }
/// # fn dox() {
/// # let addr = "127.0.0.1:8080".parse().unwrap();
/// let listener = TcpListener::bind(&addr).unwrap();
///
/// let server = listener.incoming()
/// .map_err(|e| println!("error = {:?}", e))
/// .for_each(|socket| {
/// tokio::spawn(process(socket))
/// });
///
/// tokio::run(server);
/// # }
/// # pub fn main() {}
/// ```
///
/// # Panics
///
/// This function panics if called from the context of an executor.
///
/// [mod]: ../index.html
pub fn run<F>(future: F)
where F: Future<Item = (), Error = ()> + Send + 'static,
{
let mut runtime = Runtime::new().unwrap();
runtime.spawn(future);
enter().expect("nested tokio::run")
.block_on(runtime.shutdown_on_idle())
.unwrap();
}
impl Runtime {
/// Create a new runtime instance with default configuration values.
///
/// This results in a reactor, thread pool, and timer being initialized. The
/// thread pool will not spawn any worker threads until it needs to, i.e.
/// tasks are scheduled to run.
///
/// Most users will not need to call this function directly, instead they
/// will use [`tokio::run`](fn.run.html).
///
/// See [module level][mod] documentation for more details.
///
/// # Examples
///
/// Creating a new `Runtime` with default configuration values.
///
/// ```
/// use tokio::runtime::Runtime;
/// use tokio::prelude::*;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// // Use the runtime...
///
/// // Shutdown the runtime
/// rt.shutdown_now()
/// .wait().unwrap();
/// ```
///
/// [mod]: index.html
pub fn | () -> io::Result<Self> {
Builder::new().build()
}
#[deprecated(since = "0.1.5", note = "use `reactor` instead")]
#[doc(hidden)]
pub fn handle(&self) -> &Handle {
#[allow(deprecated)]
self.reactor()
}
/// Return a reference to the reactor handle for this runtime instance.
///
/// The returned handle reference can be cloned in order to get an owned
/// value of the handle. This handle can be used to initialize I/O resources
/// (like TCP or UDP sockets) that will not be used on the runtime.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// let reactor_handle = rt.reactor().clone();
///
/// // use `reactor_handle`
/// ```
#[deprecated(since = "0.1.11", note = "there is now a reactor per worker thread")]
pub fn reactor(&self) -> &Handle {
&self.inner().reactor
}
/// Return a handle to the runtime's executor.
///
/// The returned handle can be used to spawn tasks that run on this runtime.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// let executor_handle = rt.executor();
///
/// // use `executor_handle`
/// ```
pub fn executor(&self) -> TaskExecutor {
let inner = self.inner().pool.sender().clone();
TaskExecutor { inner }
}
/// Spawn a future onto the Tokio runtime.
///
/// This spawns the given future onto the runtime's executor, usually a
/// thread pool. The thread pool is then responsible for polling the future
/// until it completes.
///
/// See [module level][mod] documentation for more details.
///
/// [mod]: index.html
///
/// # Examples
///
/// ```rust
/// # extern crate tokio;
/// # extern crate futures;
/// # use futures::{future, Future, Stream};
/// use tokio::runtime::Runtime;
///
/// # fn dox() {
/// // Create the runtime
/// let mut rt = Runtime::new().unwrap();
///
/// // Spawn a future onto the runtime
/// rt.spawn(future::lazy(|| {
/// println!("now running on a worker thread");
/// Ok(())
/// }));
/// # }
/// # pub fn main() {}
/// ```
///
/// # Panics
///
/// This function panics if the spawn fails. Failure occurs if the executor
/// is currently at capacity and is unable to spawn a new future.
pub fn spawn<F>(&mut self, future: F) -> &mut Self
where F: Future<Item = (), Error = ()> + Send + 'static,
{
self.inner_mut().pool.sender().spawn(future).unwrap();
self
}
/// Run a future to completion on the Tokio runtime.
///
/// This runs the given future on the runtime, blocking until it is
/// complete, and yielding its resolved result. Any tasks or timers which
/// the future spawns internally will be executed on the runtime.
///
/// This method should not be called from an asynchronous context.
///
/// # Panics
///
/// This function panics if the executor is at capacity, if the provided
/// future panics, or if called within an asynchronous execution context.
pub fn block_on<F, R, E>(&mut self, future: F) -> Result<R, E>
where
F: Send + 'static + Future<Item = R, Error = E>,
R: Send + 'static,
E: Send + 'static,
{
let (tx, rx) = futures::sync::oneshot::channel();
self.spawn(future.then(move |r| tx.send(r).map_err(|_| unreachable!())));
rx.wait().unwrap()
}
/// Run a future to completion on the Tokio runtime, then wait for all
/// background futures to complete too.
///
/// This runs the given future on the runtime, blocking until it is
/// complete, waiting for background futures to complete, and yielding
/// its resolved result. Any tasks or timers which the future spawns
/// internally will be executed on the runtime and waited for completion.
///
/// This method should not be called from an asynchronous context.
///
/// # Panics
///
/// This function panics if the executor is at capacity, if the provided
/// future panics, or if called within an asynchronous execution context.
pub fn block_on_all<F, R, E>(mut self, future: F) -> Result<R, E>
where
F: Send + 'static + Future<Item = R, Error = E>,
R: Send + 'static,
E: Send + 'static,
{
let res = self.block_on(future);
self.shutdown_on_idle().wait().unwrap();
res
}
/// Signals the runtime to shutdown once it becomes idle.
///
/// Returns a future that completes once the shutdown operation has
/// completed.
///
/// This function can be used to perform a graceful shutdown of the runtime.
///
/// The runtime enters an idle state once **all** of the following occur.
///
/// * The thread pool has no tasks to execute, i.e., all tasks that were
/// spawned have completed.
/// * The reactor is not managing any I/O resources.
///
/// See [module level][mod] documentation for more details.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
/// use tokio::prelude::*;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// // Use the runtime...
///
/// // Shutdown the runtime
/// rt.shutdown_on_idle()
/// .wait().unwrap();
/// ```
///
/// [mod]: index.html
pub fn shutdown_on_idle(mut self) -> Shutdown {
let inner = self.inner.take().unwrap();
let inner = inner.pool.shutdown_on_idle();
Shutdown { inner }
}
/// Signals the runtime to shutdown immediately.
///
/// Returns a future that completes once the shutdown operation has
/// completed.
///
/// This function will forcibly shutdown the runtime, causing any
/// in-progress work to become canceled. The shutdown steps are:
///
/// * Drain any scheduled work queues.
/// * Drop any futures that have not yet completed.
/// * Drop the reactor.
///
/// Once the reactor has dropped, any outstanding I/O resources bound to
/// that reactor will no longer function. Calling any method on them will
/// result in an error.
///
/// See [module level][mod] documentation for more details.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
/// use tokio::prelude::*;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// // Use the runtime...
///
/// // Shutdown the runtime
/// rt.shutdown_now()
/// .wait().unwrap();
/// ```
///
/// [mod]: index.html
pub fn shutdown_now(mut self) -> Shutdown {
let inner = self.inner.take().unwrap();
Shutdown::shutdown_now(inner)
}
fn inner(&self) -> &Inner {
self.inner.as_ref().unwrap()
}
fn inner_mut(&mut self) -> &mut Inner {
self.inner.as_mut().unwrap()
}
}
impl Drop for Runtime {
fn drop(&mut self) {
if let Some(inner) = self.inner.take() {
let shutdown = Shutdown::shutdown_now(inner);
let _ = shutdown.wait();
}
}
}
| new | identifier_name |
manager.py | """
============================
The Component Manager System
============================
The :mod:`vivarium` component manager system is responsible for maintaining a
reference to all of the managers and components in a simulation, providing an
interface for adding additional components or managers, and applying default
configurations and initiating the ``setup`` stage of the lifecycle. This module
provides the default implementation and interface.
The :class:`ComponentManager` is the first plugin loaded by the
:class:`SimulationContext <vivarium.framework.engine.SimulationContext>`
and managers and components are given to it by the context. It is called on to
setup everything it holds when the context itself is setup.
"""
import inspect
import typing
from typing import Any, Dict, Iterator, List, Tuple, Type, Union
from vivarium.config_tree import ConfigurationError, DuplicatedConfigurationError
from vivarium.exceptions import VivariumError
if typing.TYPE_CHECKING:
from vivarium.framework.engine import Builder
class ComponentConfigError(VivariumError):
"""Error while interpreting configuration file or initializing components"""
pass
class OrderedComponentSet:
"""A container for Vivarium components.
It preserves ordering, enforces uniqueness by name, and provides a
subset of set-like semantics.
"""
def __init__(self, *args):
self.components = []
if args:
self.update(args)
def add(self, component: Any):
if component in self:
raise ComponentConfigError(
f"Attempting to add a component with duplicate name: {component}"
)
self.components.append(component)
def update(self, components: Union[List[Any], Tuple[Any]]):
for c in components:
self.add(c)
def pop(self) -> Any:
component = self.components.pop(0)
return component
def __contains__(self, component: Any) -> bool:
|
def __iter__(self) -> Iterator:
return iter(self.components)
def __len__(self) -> int:
return len(self.components)
def __bool__(self) -> bool:
return bool(self.components)
def __add__(self, other: "OrderedComponentSet") -> "OrderedComponentSet":
return OrderedComponentSet(*(self.components + other.components))
def __eq__(self, other: "OrderedComponentSet") -> bool:
try:
return type(self) is type(other) and [c.name for c in self.components] == [
c.name for c in other.components
]
except TypeError:
return False
def __getitem__(self, index: int) -> Any:
return self.components[index]
def __repr__(self):
return f"OrderedComponentSet({[c.name for c in self.components]})"
class ComponentManager:
"""Manages the initialization and setup of :mod:`vivarium` components.
Maintains references to all components and managers in a :mod:`vivarium`
simulation, applies their default configuration and initiates their
``setup`` life-cycle stage.
The component manager maintains a separate list of managers and components
and provides methods for adding to these lists and getting members that
correspond to a specific type. It also initiates the ``setup`` lifecycle
phase for all components and managers it controls. This is done first for
managers and then components, and involves applying default configurations
and calling the object's ``setup`` method.
"""
def __init__(self):
self._managers = OrderedComponentSet()
self._components = OrderedComponentSet()
self.configuration = None
self.lifecycle = None
@property
def name(self):
"""The name of this component."""
return "component_manager"
def setup(self, configuration, lifecycle_manager):
"""Called by the simulation context."""
self.configuration = configuration
self.lifecycle = lifecycle_manager
self.lifecycle.add_constraint(
self.get_components_by_type,
restrict_during=["initialization", "population_creation"],
)
self.lifecycle.add_constraint(
self.get_component, restrict_during=["population_creation"]
)
self.lifecycle.add_constraint(
self.list_components, restrict_during=["initialization"]
)
def add_managers(self, managers: Union[List[Any], Tuple[Any]]):
"""Registers new managers with the component manager.
Managers are configured and setup before components.
Parameters
----------
managers
Instantiated managers to register.
"""
for m in self._flatten(managers):
self.apply_configuration_defaults(m)
self._managers.add(m)
def add_components(self, components: Union[List[Any], Tuple[Any]]):
"""Register new components with the component manager.
Components are configured and setup after managers.
Parameters
----------
components
Instantiated components to register.
"""
for c in self._flatten(components):
self.apply_configuration_defaults(c)
self._components.add(c)
def get_components_by_type(
self, component_type: Union[type, Tuple[type, ...]]
) -> List[Any]:
"""Get all components that are an instance of ``component_type``.
Parameters
----------
component_type
A component type.
Returns
-------
List[Any]
A list of components of type ``component_type``.
"""
return [c for c in self._components if isinstance(c, component_type)]
def get_component(self, name: str) -> Any:
"""Get the component with name ``name``.
Names are guaranteed to be unique.
Parameters
----------
name
A component name.
Returns
-------
Any
A component that has name ``name``.
Raises
------
ValueError
No component exists in the component manager with ``name``.
"""
for c in self._components:
if c.name == name:
return c
raise ValueError(f"No component found with name {name}")
def list_components(self) -> Dict[str, Any]:
"""Get a mapping of component names to components held by the manager.
Returns
-------
Dict[str, Any]
A mapping of component names to components.
"""
return {c.name: c for c in self._components}
def setup_components(self, builder: "Builder"):
"""Separately configure and set up the managers and components held by
the component manager, in that order.
The setup process involves applying default configurations and then
calling the manager or component's setup method. This can result in new
components as a side effect of setup because components themselves have
access to this interface through the builder in their setup method.
Parameters
----------
builder
Interface to several simulation tools.
"""
self._setup_components(builder, self._managers + self._components)
def apply_configuration_defaults(self, component: Any):
if not hasattr(component, "configuration_defaults"):
return
try:
self.configuration.update(
component.configuration_defaults,
layer="component_configs",
source=component.name,
)
except DuplicatedConfigurationError as e:
new_name, new_file = component.name, self._get_file(component)
old_name, old_file = e.source, self._get_file(self.get_component(e.source))
raise ComponentConfigError(
f"Component {new_name} in file {new_file} is attempting to "
f"set the configuration value {e.value_name}, but it has already "
f"been set by {old_name} in file {old_file}."
)
except ConfigurationError as e:
new_name, new_file = component.name, self._get_file(component)
raise ComponentConfigError(
f"Component {new_name} in file {new_file} is attempting to "
f"alter the structure of the configuration at key {e.value_name}. "
f"This happens if one component attempts to set a value at an interior "
f"configuration key or if it attempts to turn an interior key into a "
f"configuration value."
)
@staticmethod
def _get_file(component):
if component.__module__ == "__main__":
# This is defined directly in a script or notebook so there's no
# file to attribute it to.
return "__main__"
else:
return inspect.getfile(component.__class__)
@staticmethod
def _flatten(components: List):
out = []
components = components[::-1]
while components:
current = components.pop()
if isinstance(current, (list, tuple)):
components.extend(current[::-1])
else:
if hasattr(current, "sub_components"):
components.extend(current.sub_components[::-1])
out.append(current)
return out
@staticmethod
def _setup_components(builder: "Builder", components: OrderedComponentSet):
for c in components:
if hasattr(c, "setup"):
c.setup(builder)
def __repr__(self):
return "ComponentManager()"
class ComponentInterface:
"""The builder interface for the component manager system. This class
defines component manager methods a ``vivarium`` component can access from
the builder. It provides methods for querying and adding components to the
:class:`ComponentManager`.
"""
def __init__(self, manager: ComponentManager):
self._manager = manager
def get_component(self, name: str) -> Any:
"""Get the component that has ``name`` if presently held by the component
manager. Names are guaranteed to be unique.
Parameters
----------
name
A component name.
Returns
-------
A component that has name ``name``.
"""
return self._manager.get_component(name)
def get_components_by_type(
self, component_type: Union[type, Tuple[type, ...]]
) -> List[Any]:
"""Get all components that are an instance of ``component_type``.
Parameters
----------
component_type
A component type to retrieve, compared against internal components
using isinstance().
Returns
-------
List[Any]
A list of components of type ``component_type``.
"""
return self._manager.get_components_by_type(component_type)
def list_components(self) -> Dict[str, Any]:
"""Get a mapping of component names to components held by the manager.
Returns
-------
Dict[str, Any]
A dictionary mapping component names to components.
"""
return self._manager.list_components()
| if not hasattr(component, "name"):
raise ComponentConfigError(f"Component {component} has no name attribute")
return component.name in [c.name for c in self.components] | identifier_body |
manager.py | """
============================
The Component Manager System
============================
The :mod:`vivarium` component manager system is responsible for maintaining a
reference to all of the managers and components in a simulation, providing an
interface for adding additional components or managers, and applying default
configurations and initiating the ``setup`` stage of the lifecycle. This module
provides the default implementation and interface.
The :class:`ComponentManager` is the first plugin loaded by the
:class:`SimulationContext <vivarium.framework.engine.SimulationContext>`
and managers and components are given to it by the context. It is called on to
setup everything it holds when the context itself is setup.
"""
import inspect
import typing
from typing import Any, Dict, Iterator, List, Tuple, Type, Union
from vivarium.config_tree import ConfigurationError, DuplicatedConfigurationError
from vivarium.exceptions import VivariumError
if typing.TYPE_CHECKING:
from vivarium.framework.engine import Builder
class ComponentConfigError(VivariumError):
"""Error while interpreting configuration file or initializing components"""
pass
class OrderedComponentSet:
"""A container for Vivarium components.
It preserves ordering, enforces uniqueness by name, and provides a
subset of set-like semantics.
"""
def __init__(self, *args):
self.components = []
if args:
self.update(args)
def add(self, component: Any):
if component in self:
raise ComponentConfigError(
f"Attempting to add a component with duplicate name: {component}"
)
self.components.append(component)
def update(self, components: Union[List[Any], Tuple[Any]]):
for c in components:
self.add(c)
def pop(self) -> Any:
component = self.components.pop(0)
return component
def | (self, component: Any) -> bool:
if not hasattr(component, "name"):
raise ComponentConfigError(f"Component {component} has no name attribute")
return component.name in [c.name for c in self.components]
def __iter__(self) -> Iterator:
return iter(self.components)
def __len__(self) -> int:
return len(self.components)
def __bool__(self) -> bool:
return bool(self.components)
def __add__(self, other: "OrderedComponentSet") -> "OrderedComponentSet":
return OrderedComponentSet(*(self.components + other.components))
def __eq__(self, other: "OrderedComponentSet") -> bool:
try:
return type(self) is type(other) and [c.name for c in self.components] == [
c.name for c in other.components
]
except TypeError:
return False
def __getitem__(self, index: int) -> Any:
return self.components[index]
def __repr__(self):
return f"OrderedComponentSet({[c.name for c in self.components]})"
class ComponentManager:
"""Manages the initialization and setup of :mod:`vivarium` components.
Maintains references to all components and managers in a :mod:`vivarium`
simulation, applies their default configuration and initiates their
``setup`` life-cycle stage.
The component manager maintains a separate list of managers and components
and provides methods for adding to these lists and getting members that
correspond to a specific type. It also initiates the ``setup`` lifecycle
phase for all components and managers it controls. This is done first for
managers and then components, and involves applying default configurations
and calling the object's ``setup`` method.
"""
def __init__(self):
self._managers = OrderedComponentSet()
self._components = OrderedComponentSet()
self.configuration = None
self.lifecycle = None
@property
def name(self):
"""The name of this component."""
return "component_manager"
def setup(self, configuration, lifecycle_manager):
"""Called by the simulation context."""
self.configuration = configuration
self.lifecycle = lifecycle_manager
self.lifecycle.add_constraint(
self.get_components_by_type,
restrict_during=["initialization", "population_creation"],
)
self.lifecycle.add_constraint(
self.get_component, restrict_during=["population_creation"]
)
self.lifecycle.add_constraint(
self.list_components, restrict_during=["initialization"]
)
def add_managers(self, managers: Union[List[Any], Tuple[Any]]):
"""Registers new managers with the component manager.
Managers are configured and setup before components.
Parameters
----------
managers
Instantiated managers to register.
"""
for m in self._flatten(managers):
self.apply_configuration_defaults(m)
self._managers.add(m)
def add_components(self, components: Union[List[Any], Tuple[Any]]):
"""Register new components with the component manager.
Components are configured and setup after managers.
Parameters
----------
components
Instantiated components to register.
"""
for c in self._flatten(components):
self.apply_configuration_defaults(c)
self._components.add(c)
def get_components_by_type(
self, component_type: Union[type, Tuple[type, ...]]
) -> List[Any]:
"""Get all components that are an instance of ``component_type``.
Parameters
----------
component_type
A component type.
Returns
-------
List[Any]
A list of components of type ``component_type``.
"""
return [c for c in self._components if isinstance(c, component_type)]
def get_component(self, name: str) -> Any:
"""Get the component with name ``name``.
Names are guaranteed to be unique.
Parameters
----------
name
A component name.
Returns
-------
Any
A component that has name ``name``.
Raises
------
ValueError
No component exists in the component manager with ``name``.
"""
for c in self._components:
if c.name == name:
return c
raise ValueError(f"No component found with name {name}")
def list_components(self) -> Dict[str, Any]:
"""Get a mapping of component names to components held by the manager.
Returns
-------
Dict[str, Any]
A mapping of component names to components.
"""
return {c.name: c for c in self._components}
def setup_components(self, builder: "Builder"):
"""Separately configure and set up the managers and components held by
the component manager, in that order.
The setup process involves applying default configurations and then
calling the manager or component's setup method. This can result in new
components as a side effect of setup because components themselves have
access to this interface through the builder in their setup method.
Parameters
----------
builder
Interface to several simulation tools.
"""
self._setup_components(builder, self._managers + self._components)
def apply_configuration_defaults(self, component: Any):
if not hasattr(component, "configuration_defaults"):
return
try:
self.configuration.update(
component.configuration_defaults,
layer="component_configs",
source=component.name,
)
except DuplicatedConfigurationError as e:
new_name, new_file = component.name, self._get_file(component)
old_name, old_file = e.source, self._get_file(self.get_component(e.source))
raise ComponentConfigError(
f"Component {new_name} in file {new_file} is attempting to "
f"set the configuration value {e.value_name}, but it has already "
f"been set by {old_name} in file {old_file}."
)
except ConfigurationError as e:
new_name, new_file = component.name, self._get_file(component)
raise ComponentConfigError(
f"Component {new_name} in file {new_file} is attempting to "
f"alter the structure of the configuration at key {e.value_name}. "
f"This happens if one component attempts to set a value at an interior "
f"configuration key or if it attempts to turn an interior key into a "
f"configuration value."
)
@staticmethod
def _get_file(component):
if component.__module__ == "__main__":
# This is defined directly in a script or notebook so there's no
# file to attribute it to.
return "__main__"
else:
return inspect.getfile(component.__class__)
@staticmethod
def _flatten(components: List):
out = []
components = components[::-1]
while components:
current = components.pop()
if isinstance(current, (list, tuple)):
components.extend(current[::-1])
else:
if hasattr(current, "sub_components"):
components.extend(current.sub_components[::-1])
out.append(current)
return out
@staticmethod
def _setup_components(builder: "Builder", components: OrderedComponentSet):
for c in components:
if hasattr(c, "setup"):
c.setup(builder)
def __repr__(self):
return "ComponentManager()"
class ComponentInterface:
"""The builder interface for the component manager system. This class
defines component manager methods a ``vivarium`` component can access from
the builder. It provides methods for querying and adding components to the
:class:`ComponentManager`.
"""
def __init__(self, manager: ComponentManager):
self._manager = manager
def get_component(self, name: str) -> Any:
"""Get the component that has ``name`` if presently held by the component
manager. Names are guaranteed to be unique.
Parameters
----------
name
A component name.
Returns
-------
A component that has name ``name``.
"""
return self._manager.get_component(name)
def get_components_by_type(
self, component_type: Union[type, Tuple[type, ...]]
) -> List[Any]:
"""Get all components that are an instance of ``component_type``.
Parameters
----------
component_type
A component type to retrieve, compared against internal components
using isinstance().
Returns
-------
List[Any]
A list of components of type ``component_type``.
"""
return self._manager.get_components_by_type(component_type)
def list_components(self) -> Dict[str, Any]:
"""Get a mapping of component names to components held by the manager.
Returns
-------
Dict[str, Any]
A dictionary mapping component names to components.
"""
return self._manager.list_components()
| __contains__ | identifier_name |
manager.py | """
============================
The Component Manager System
============================
The :mod:`vivarium` component manager system is responsible for maintaining a
reference to all of the managers and components in a simulation, providing an
interface for adding additional components or managers, and applying default
configurations and initiating the ``setup`` stage of the lifecycle. This module
provides the default implementation and interface.
The :class:`ComponentManager` is the first plugin loaded by the
:class:`SimulationContext <vivarium.framework.engine.SimulationContext>`
and managers and components are given to it by the context. It is called on to
setup everything it holds when the context itself is setup.
"""
import inspect
import typing
from typing import Any, Dict, Iterator, List, Tuple, Type, Union
from vivarium.config_tree import ConfigurationError, DuplicatedConfigurationError
from vivarium.exceptions import VivariumError
if typing.TYPE_CHECKING:
from vivarium.framework.engine import Builder
class ComponentConfigError(VivariumError):
"""Error while interpreting configuration file or initializing components"""
pass
class OrderedComponentSet:
"""A container for Vivarium components.
It preserves ordering, enforces uniqueness by name, and provides a
subset of set-like semantics.
"""
def __init__(self, *args):
self.components = []
if args:
self.update(args)
def add(self, component: Any):
if component in self:
raise ComponentConfigError(
f"Attempting to add a component with duplicate name: {component}"
)
self.components.append(component)
def update(self, components: Union[List[Any], Tuple[Any]]):
for c in components:
self.add(c)
def pop(self) -> Any:
component = self.components.pop(0)
return component
def __contains__(self, component: Any) -> bool:
if not hasattr(component, "name"):
raise ComponentConfigError(f"Component {component} has no name attribute")
return component.name in [c.name for c in self.components]
def __iter__(self) -> Iterator:
return iter(self.components)
def __len__(self) -> int:
return len(self.components)
def __bool__(self) -> bool:
return bool(self.components)
def __add__(self, other: "OrderedComponentSet") -> "OrderedComponentSet":
return OrderedComponentSet(*(self.components + other.components))
def __eq__(self, other: "OrderedComponentSet") -> bool:
try:
return type(self) is type(other) and [c.name for c in self.components] == [
c.name for c in other.components
]
except TypeError:
return False
def __getitem__(self, index: int) -> Any:
return self.components[index]
def __repr__(self):
return f"OrderedComponentSet({[c.name for c in self.components]})"
class ComponentManager:
"""Manages the initialization and setup of :mod:`vivarium` components.
Maintains references to all components and managers in a :mod:`vivarium`
simulation, applies their default configuration and initiates their
``setup`` life-cycle stage.
The component manager maintains a separate list of managers and components
and provides methods for adding to these lists and getting members that
correspond to a specific type. It also initiates the ``setup`` lifecycle
phase for all components and managers it controls. This is done first for
managers and then components, and involves applying default configurations
and calling the object's ``setup`` method.
"""
def __init__(self):
self._managers = OrderedComponentSet()
self._components = OrderedComponentSet()
self.configuration = None
self.lifecycle = None
@property
def name(self):
"""The name of this component."""
return "component_manager"
def setup(self, configuration, lifecycle_manager):
"""Called by the simulation context."""
self.configuration = configuration
self.lifecycle = lifecycle_manager
self.lifecycle.add_constraint(
self.get_components_by_type,
restrict_during=["initialization", "population_creation"],
)
self.lifecycle.add_constraint(
self.get_component, restrict_during=["population_creation"]
)
self.lifecycle.add_constraint(
self.list_components, restrict_during=["initialization"]
)
def add_managers(self, managers: Union[List[Any], Tuple[Any]]):
"""Registers new managers with the component manager.
Managers are configured and setup before components.
Parameters
----------
managers
Instantiated managers to register.
"""
for m in self._flatten(managers):
self.apply_configuration_defaults(m)
self._managers.add(m)
def add_components(self, components: Union[List[Any], Tuple[Any]]):
"""Register new components with the component manager.
Components are configured and setup after managers.
Parameters
----------
components
Instantiated components to register.
"""
for c in self._flatten(components):
self.apply_configuration_defaults(c)
self._components.add(c)
def get_components_by_type(
self, component_type: Union[type, Tuple[type, ...]]
) -> List[Any]:
"""Get all components that are an instance of ``component_type``.
Parameters
----------
component_type
A component type.
Returns
-------
List[Any]
A list of components of type ``component_type``.
"""
return [c for c in self._components if isinstance(c, component_type)]
def get_component(self, name: str) -> Any:
"""Get the component with name ``name``.
Names are guaranteed to be unique.
Parameters
----------
name
A component name.
Returns
-------
Any
A component that has name ``name``.
Raises
------
ValueError
No component exists in the component manager with ``name``.
"""
for c in self._components:
if c.name == name:
return c
raise ValueError(f"No component found with name {name}")
def list_components(self) -> Dict[str, Any]:
"""Get a mapping of component names to components held by the manager.
Returns
-------
Dict[str, Any]
A mapping of component names to components.
"""
return {c.name: c for c in self._components}
def setup_components(self, builder: "Builder"):
"""Separately configure and set up the managers and components held by
the component manager, in that order.
The setup process involves applying default configurations and then
calling the manager or component's setup method. This can result in new
components as a side effect of setup because components themselves have
access to this interface through the builder in their setup method.
Parameters
----------
builder
Interface to several simulation tools.
"""
self._setup_components(builder, self._managers + self._components)
def apply_configuration_defaults(self, component: Any):
if not hasattr(component, "configuration_defaults"):
return
try:
self.configuration.update(
component.configuration_defaults,
layer="component_configs",
source=component.name,
)
except DuplicatedConfigurationError as e:
new_name, new_file = component.name, self._get_file(component)
old_name, old_file = e.source, self._get_file(self.get_component(e.source))
raise ComponentConfigError(
f"Component {new_name} in file {new_file} is attempting to "
f"set the configuration value {e.value_name}, but it has already "
f"been set by {old_name} in file {old_file}."
)
except ConfigurationError as e:
new_name, new_file = component.name, self._get_file(component)
raise ComponentConfigError(
f"Component {new_name} in file {new_file} is attempting to "
f"alter the structure of the configuration at key {e.value_name}. "
f"This happens if one component attempts to set a value at an interior "
f"configuration key or if it attempts to turn an interior key into a "
f"configuration value."
)
@staticmethod
def _get_file(component):
if component.__module__ == "__main__":
# This is defined directly in a script or notebook so there's no
# file to attribute it to.
return "__main__"
else:
return inspect.getfile(component.__class__)
@staticmethod
def _flatten(components: List):
out = []
components = components[::-1]
while components:
|
return out
@staticmethod
def _setup_components(builder: "Builder", components: OrderedComponentSet):
for c in components:
if hasattr(c, "setup"):
c.setup(builder)
def __repr__(self):
return "ComponentManager()"
class ComponentInterface:
"""The builder interface for the component manager system. This class
defines component manager methods a ``vivarium`` component can access from
the builder. It provides methods for querying and adding components to the
:class:`ComponentManager`.
"""
def __init__(self, manager: ComponentManager):
self._manager = manager
def get_component(self, name: str) -> Any:
"""Get the component that has ``name`` if presently held by the component
manager. Names are guaranteed to be unique.
Parameters
----------
name
A component name.
Returns
-------
A component that has name ``name``.
"""
return self._manager.get_component(name)
def get_components_by_type(
self, component_type: Union[type, Tuple[type, ...]]
) -> List[Any]:
"""Get all components that are an instance of ``component_type``.
Parameters
----------
component_type
A component type to retrieve, compared against internal components
using isinstance().
Returns
-------
List[Any]
A list of components of type ``component_type``.
"""
return self._manager.get_components_by_type(component_type)
def list_components(self) -> Dict[str, Any]:
"""Get a mapping of component names to components held by the manager.
Returns
-------
Dict[str, Any]
A dictionary mapping component names to components.
"""
return self._manager.list_components()
| current = components.pop()
if isinstance(current, (list, tuple)):
components.extend(current[::-1])
else:
if hasattr(current, "sub_components"):
components.extend(current.sub_components[::-1])
out.append(current) | conditional_block |
manager.py | """
============================
The Component Manager System
============================
The :mod:`vivarium` component manager system is responsible for maintaining a
reference to all of the managers and components in a simulation, providing an
interface for adding additional components or managers, and applying default
configurations and initiating the ``setup`` stage of the lifecycle. This module
provides the default implementation and interface.
The :class:`ComponentManager` is the first plugin loaded by the
:class:`SimulationContext <vivarium.framework.engine.SimulationContext>`
and managers and components are given to it by the context. It is called on to
setup everything it holds when the context itself is setup.
"""
import inspect
import typing
from typing import Any, Dict, Iterator, List, Tuple, Type, Union
from vivarium.config_tree import ConfigurationError, DuplicatedConfigurationError
from vivarium.exceptions import VivariumError
if typing.TYPE_CHECKING:
from vivarium.framework.engine import Builder
class ComponentConfigError(VivariumError):
"""Error while interpreting configuration file or initializing components"""
pass
class OrderedComponentSet:
"""A container for Vivarium components.
It preserves ordering, enforces uniqueness by name, and provides a
subset of set-like semantics.
"""
def __init__(self, *args):
self.components = []
if args:
self.update(args)
def add(self, component: Any):
if component in self:
raise ComponentConfigError(
f"Attempting to add a component with duplicate name: {component}"
)
self.components.append(component)
def update(self, components: Union[List[Any], Tuple[Any]]):
for c in components:
self.add(c)
def pop(self) -> Any:
component = self.components.pop(0)
return component
def __contains__(self, component: Any) -> bool:
if not hasattr(component, "name"):
raise ComponentConfigError(f"Component {component} has no name attribute")
return component.name in [c.name for c in self.components]
def __iter__(self) -> Iterator:
return iter(self.components)
def __len__(self) -> int:
return len(self.components)
def __bool__(self) -> bool:
return bool(self.components)
def __add__(self, other: "OrderedComponentSet") -> "OrderedComponentSet":
return OrderedComponentSet(*(self.components + other.components))
def __eq__(self, other: "OrderedComponentSet") -> bool:
try:
return type(self) is type(other) and [c.name for c in self.components] == [
c.name for c in other.components
]
except TypeError:
return False
def __getitem__(self, index: int) -> Any:
return self.components[index]
def __repr__(self):
return f"OrderedComponentSet({[c.name for c in self.components]})"
class ComponentManager:
"""Manages the initialization and setup of :mod:`vivarium` components.
Maintains references to all components and managers in a :mod:`vivarium`
simulation, applies their default configuration and initiates their
``setup`` life-cycle stage.
The component manager maintains a separate list of managers and components
and provides methods for adding to these lists and getting members that
correspond to a specific type. It also initiates the ``setup`` lifecycle
phase for all components and managers it controls. This is done first for
managers and then components, and involves applying default configurations
and calling the object's ``setup`` method.
"""
def __init__(self):
self._managers = OrderedComponentSet()
self._components = OrderedComponentSet()
self.configuration = None
self.lifecycle = None
@property
def name(self):
"""The name of this component."""
return "component_manager"
def setup(self, configuration, lifecycle_manager):
"""Called by the simulation context."""
self.configuration = configuration
self.lifecycle = lifecycle_manager
self.lifecycle.add_constraint(
self.get_components_by_type,
restrict_during=["initialization", "population_creation"],
)
self.lifecycle.add_constraint(
self.get_component, restrict_during=["population_creation"]
)
self.lifecycle.add_constraint(
self.list_components, restrict_during=["initialization"]
)
def add_managers(self, managers: Union[List[Any], Tuple[Any]]):
"""Registers new managers with the component manager.
Managers are configured and setup before components.
Parameters
----------
managers
Instantiated managers to register.
"""
for m in self._flatten(managers):
self.apply_configuration_defaults(m)
self._managers.add(m)
def add_components(self, components: Union[List[Any], Tuple[Any]]):
"""Register new components with the component manager.
Components are configured and setup after managers.
Parameters
----------
components |
"""
for c in self._flatten(components):
self.apply_configuration_defaults(c)
self._components.add(c)
def get_components_by_type(
self, component_type: Union[type, Tuple[type, ...]]
) -> List[Any]:
"""Get all components that are an instance of ``component_type``.
Parameters
----------
component_type
A component type.
Returns
-------
List[Any]
A list of components of type ``component_type``.
"""
return [c for c in self._components if isinstance(c, component_type)]
def get_component(self, name: str) -> Any:
"""Get the component with name ``name``.
Names are guaranteed to be unique.
Parameters
----------
name
A component name.
Returns
-------
Any
A component that has name ``name``.
Raises
------
ValueError
No component exists in the component manager with ``name``.
"""
for c in self._components:
if c.name == name:
return c
raise ValueError(f"No component found with name {name}")
def list_components(self) -> Dict[str, Any]:
"""Get a mapping of component names to components held by the manager.
Returns
-------
Dict[str, Any]
A mapping of component names to components.
"""
return {c.name: c for c in self._components}
def setup_components(self, builder: "Builder"):
"""Separately configure and set up the managers and components held by
the component manager, in that order.
The setup process involves applying default configurations and then
calling the manager or component's setup method. This can result in new
components as a side effect of setup because components themselves have
access to this interface through the builder in their setup method.
Parameters
----------
builder
Interface to several simulation tools.
"""
self._setup_components(builder, self._managers + self._components)
def apply_configuration_defaults(self, component: Any):
if not hasattr(component, "configuration_defaults"):
return
try:
self.configuration.update(
component.configuration_defaults,
layer="component_configs",
source=component.name,
)
except DuplicatedConfigurationError as e:
new_name, new_file = component.name, self._get_file(component)
old_name, old_file = e.source, self._get_file(self.get_component(e.source))
raise ComponentConfigError(
f"Component {new_name} in file {new_file} is attempting to "
f"set the configuration value {e.value_name}, but it has already "
f"been set by {old_name} in file {old_file}."
)
except ConfigurationError as e:
new_name, new_file = component.name, self._get_file(component)
raise ComponentConfigError(
f"Component {new_name} in file {new_file} is attempting to "
f"alter the structure of the configuration at key {e.value_name}. "
f"This happens if one component attempts to set a value at an interior "
f"configuration key or if it attempts to turn an interior key into a "
f"configuration value."
)
@staticmethod
def _get_file(component):
if component.__module__ == "__main__":
# This is defined directly in a script or notebook so there's no
# file to attribute it to.
return "__main__"
else:
return inspect.getfile(component.__class__)
@staticmethod
def _flatten(components: List):
out = []
components = components[::-1]
while components:
current = components.pop()
if isinstance(current, (list, tuple)):
components.extend(current[::-1])
else:
if hasattr(current, "sub_components"):
components.extend(current.sub_components[::-1])
out.append(current)
return out
@staticmethod
def _setup_components(builder: "Builder", components: OrderedComponentSet):
for c in components:
if hasattr(c, "setup"):
c.setup(builder)
def __repr__(self):
return "ComponentManager()"
class ComponentInterface:
"""The builder interface for the component manager system. This class
defines component manager methods a ``vivarium`` component can access from
the builder. It provides methods for querying and adding components to the
:class:`ComponentManager`.
"""
def __init__(self, manager: ComponentManager):
self._manager = manager
def get_component(self, name: str) -> Any:
"""Get the component that has ``name`` if presently held by the component
manager. Names are guaranteed to be unique.
Parameters
----------
name
A component name.
Returns
-------
A component that has name ``name``.
"""
return self._manager.get_component(name)
def get_components_by_type(
self, component_type: Union[type, Tuple[type, ...]]
) -> List[Any]:
"""Get all components that are an instance of ``component_type``.
Parameters
----------
component_type
A component type to retrieve, compared against internal components
using isinstance().
Returns
-------
List[Any]
A list of components of type ``component_type``.
"""
return self._manager.get_components_by_type(component_type)
def list_components(self) -> Dict[str, Any]:
"""Get a mapping of component names to components held by the manager.
Returns
-------
Dict[str, Any]
A dictionary mapping component names to components.
"""
return self._manager.list_components() | Instantiated components to register. | random_line_split |
digest.py | #!/usr/bin/env python3
# coding: utf-8
"""Genome digestion
Functions used to write auxiliary instagraal compatible
sparse matrices.
"""
from Bio import SeqIO, SeqUtils
from Bio.Seq import Seq
from Bio.Restriction import RestrictionBatch, Analysis
import os, sys, csv
import re
import collections
import copy
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from hicstuff.log import logger
import hicstuff.io as hio
DEFAULT_FRAGMENTS_LIST_FILE_NAME = "fragments_list.txt"
DEFAULT_INFO_CONTIGS_FILE_NAME = "info_contigs.txt"
DEFAULT_SPARSE_MATRIX_FILE_NAME = "abs_fragments_contacts_weighted.txt"
DEFAULT_KB_BINNING = 1
DEFAULT_THRESHOLD_SIZE = 0
# Most used enzyme for eukaryotes
DEFAULT_ENZYME = "DpnII"
# If using evenly-sized chunks instead of restriction
# enzymes, they shouldn't be too short
DEFAULT_MIN_CHUNK_SIZE = 50
def write_frag_info(
fasta,
enzyme,
min_size=DEFAULT_THRESHOLD_SIZE,
circular=False,
output_contigs=DEFAULT_INFO_CONTIGS_FILE_NAME,
output_frags=DEFAULT_FRAGMENTS_LIST_FILE_NAME,
output_dir=None,
):
"""Digest and write fragment information
Write the fragments_list.txt and info_contigs.txt that are necessary for
instagraal to run.
Parameters
----------
fasta : pathlib.Path or str
The path to the reference genome
enzyme : str, int or list of str
If a string, must be the name of an enzyme (e.g. DpnII) and the genome
will be cut at the enzyme's restriction sites. If a number, the genome
will be cut uniformly into chunks with length equal to that number. A
list of enzymes can also be specified if using multiple enzymes.
min_size : float, optional
Size below which shorter contigs are discarded. Default is 0, i.e. all
contigs are retained.
circular : bool, optional
Whether the genome is circular. Default is False.
output_contigs : str, optional
The name of the file with contig info. Default is info_contigs.txt
output_frags : str, optional
The name of the file with fragment info. Default is fragments_list.txt
output_dir : [type], optional
The path to the output directory, which will be created if not already
existing. Default is the current directory.
"""
records = SeqIO.parse(hio.read_compressed(fasta), "fasta")
try:
info_contigs_path = os.path.join(output_dir, output_contigs)
frag_list_path = os.path.join(output_dir, output_frags)
except TypeError:
info_contigs_path = output_contigs
frag_list_path = output_frags
with open(info_contigs_path, "w") as info_contigs:
info_contigs.write("contig\tlength\tn_frags\tcumul_length\n")
with open(frag_list_path, "w") as fragments_list:
fragments_list.write(
"id\tchrom\tstart_pos" "\tend_pos\tsize\tgc_content\n"
)
total_frags = 0
for record in records:
contig_seq = record.seq
contig_name = record.id
contig_length = len(contig_seq)
if contig_length < int(min_size):
continue
sites = get_restriction_table(
contig_seq, enzyme, circular=circular
)
fragments = (
contig_seq[sites[i] : sites[i + 1]]
for i in range(len(sites) - 1)
)
n_frags = 0
current_id = 1
start_pos = 0
for frag in fragments:
frag_length = len(frag)
if frag_length > 0:
end_pos = start_pos + frag_length
gc_content = SeqUtils.GC(frag) / 100.0
current_fragment_line = "%s\t%s\t%s\t%s\t%s\t%s\n" % (
current_id,
contig_name,
start_pos,
end_pos,
frag_length,
gc_content,
)
fragments_list.write(current_fragment_line)
try:
assert (current_id == 1 and start_pos == 0) or (
current_id > 1 and start_pos > 0
)
except AssertionError:
logger.error((current_id, start_pos))
raise
start_pos = end_pos
current_id += 1
n_frags += 1
current_contig_line = "%s\t%s\t%s\t%s\n" % (
contig_name,
contig_length,
n_frags,
total_frags,
)
total_frags += n_frags
info_contigs.write(current_contig_line)
def | (pairs_file, idx_pairs_file, restriction_table):
"""
Writes the indexed pairs file, which has two more columns than the input
pairs file corresponding to the restriction fragment index of each read.
Note that pairs files have 1bp point positions whereas restriction table
has 0bp point poisitions.
Parameters
----------
pairs_file: str
Path the the input pairs file. Consists of 7 tab-separated
columns: readID, chr1, pos1, chr2, pos2, strand1, strand2
idx_pairs_file: str
Path to the output indexed pairs file. Consists of 9 white space
separated columns: readID, chr1, pos1, chr2, pos2, strand1, strand2,
frag1, frag2. frag1 and frag2 are 0-based restriction fragments based
on whole genome.
restriction_table: dict
Dictionary with chromosome identifiers (str) as keys and list of
positions (int) of restriction sites as values.
"""
# NOTE: Bottlenecks here are 1. binary search in find_frag and 2. writerow
# 1. could be reduced by searching groups of N frags in parallel and 2. by
# writing N frags simultaneously using a single call of writerows.
# Parse and update header section
pairs_header = hio.get_pairs_header(pairs_file)
header_size = len(pairs_header)
chrom_order = []
with open(idx_pairs_file, "w") as idx_pairs:
for line in pairs_header:
# Add new column names to header
if line.startswith("#columns"):
line = line.rstrip() + " frag1 frag2"
if line.startswith("#chromsize"):
chrom_order.append(line.split()[1])
idx_pairs.write(line + "\n")
# Get number of fragments per chrom to allow genome-based indices
shift_frags = {}
prev_frags = 0
for rank, chrom in enumerate(chrom_order):
if rank > 0:
# Note the "-1" because there are nfrags + 1 sites in rest table
prev_frags += len(restriction_table[chrom_order[rank - 1]]) - 1
# Idx of each chrom's frags will be shifted by n frags in previous chroms
shift_frags[chrom] = prev_frags
missing_contigs = set()
# Attribute pairs to fragments and append them to output file (after header)
with open(pairs_file, "r") as pairs, open(
idx_pairs_file, "a"
) as idx_pairs:
# Skip header lines
for _ in range(header_size):
next(pairs)
# Define input and output fields
pairs_cols = [
"readID",
"chr1",
"pos1",
"chr2",
"pos2",
"strand1",
"strand2",
]
idx_cols = pairs_cols + ["frag1", "frag2"]
# Use csv reader / writer to automatically parse columns into a dict
pairs_reader = csv.DictReader(
pairs, fieldnames=pairs_cols, delimiter="\t"
)
pairs_writer = csv.DictWriter(
idx_pairs, fieldnames=idx_cols, delimiter="\t"
)
for pair in pairs_reader:
# Get the 0-based indices of corresponding restriction fragments
# Deducing 1 from pair position to get it into 0bp point
pair["frag1"] = find_frag(
int(pair["pos1"]) - 1, restriction_table[pair["chr1"]]
)
pair["frag2"] = find_frag(
int(pair["pos2"]) - 1, restriction_table[pair["chr2"]]
)
# Shift fragment indices to make them genome-based instead of
# chromosome-based
try:
pair["frag1"] += shift_frags[pair["chr1"]]
except KeyError:
missing_contigs.add(pair["chr1"])
try:
pair["frag2"] += shift_frags[pair["chr2"]]
except KeyError:
missing_contigs.add(pair["chr2"])
# Write indexed pairs in the new file
pairs_writer.writerow(pair)
if missing_contigs:
logger.warning(
"Pairs on the following contigs were discarded as "
"those contigs are not listed in the paris file header. "
"This is normal if you filtered out small contigs: %s"
% " ".join(list(missing_contigs))
)
def get_restriction_table(seq, enzyme, circular=False):
"""
Get the restriction table for a single genomic sequence.
Parameters
----------
seq : Seq object
A biopython Seq object representing a chromosomes or contig.
enzyme : int, str or list of str
The name of the restriction enzyme used, or a list of restriction
enzyme names. Can also be an integer, to digest by fixed chunk size.
circular : bool
Wether the genome is circular.
Returns
-------
numpy.array:
List of restriction fragment boundary positions for the input sequence.
>>> from Bio.Seq import Seq
>>> get_restriction_table(Seq("AAGCCGGATCGG"),"HpaII")
array([ 0, 4, 12])
>>> get_restriction_table(Seq("AA"),["HpaII", "MluCI"])
array([0, 2])
>>> get_restriction_table(Seq("AA"),"aeiou1")
Traceback (most recent call last):
...
ValueError: aeiou1 is not a valid restriction enzyme.
>>> get_restriction_table("AA","HpaII")
Traceback (most recent call last):
...
TypeError: Expected Seq or MutableSeq instance, got <class 'str'> instead
"""
chrom_len = len(seq)
wrong_enzyme = "{} is not a valid restriction enzyme.".format(enzyme)
# Restriction batch containing the restriction enzyme
try:
enz = [enzyme] if isinstance(enzyme, str) else enzyme
cutter = RestrictionBatch(enz)
except (TypeError, ValueError):
try:
cutter = max(int(enzyme), DEFAULT_MIN_CHUNK_SIZE)
except ValueError:
raise ValueError(wrong_enzyme)
# Conversion from string type to restriction type
if isinstance(cutter, int):
sites = [i for i in range(0, chrom_len, cutter)]
if sites[-1] < chrom_len:
sites.append(chrom_len)
else:
# Find sites of all restriction enzymes given
ana = Analysis(cutter, seq, linear=not circular)
sites = ana.full()
# Gets all sites into a single flat list with 0-based index
sites = [site - 1 for enz in sites.values() for site in enz]
# Sort by position and allow first add start and end of seq
sites.sort()
sites.insert(0, 0)
sites.append(chrom_len)
return np.array(sites)
def find_frag(pos, r_sites):
"""
Use binary search to find the index of a chromosome restriction fragment
corresponding to an input genomic position.
Parameters
----------
pos : int
Genomic position, in base pairs.
r_sites : list
List of genomic positions corresponding to restriction sites.
Returns
-------
int
The 0-based index of the restriction fragment to which the position belongs.
>>> find_frag(15, [0, 20, 30])
0
>>> find_frag(15, [10, 20, 30])
Traceback (most recent call last):
...
ValueError: The first position in the restriction table is not 0.
>>> find_frag(31, [0, 20, 30])
Traceback (most recent call last):
...
ValueError: Read position is larger than last entry in restriction table.
"""
if r_sites[0] != 0:
raise ValueError(
"The first position in the restriction table is not 0."
)
if pos > r_sites[-1]:
raise ValueError(
"Read position is larger than last entry in restriction table."
)
# binary search for the index of the read
index = max(np.searchsorted(r_sites, pos, side="right") - 1, 0)
# Last site = end of the chrom, index of last fragment is last site - 1
index = min(len(r_sites) - 2, index)
return index
def frag_len(
frags_file_name=DEFAULT_FRAGMENTS_LIST_FILE_NAME,
output_dir=None,
plot=False,
fig_path=None,
):
"""
logs summary statistics of fragment length distribution based on an
input fragment file. Can optionally show a histogram instead
of text summary.
Parameters
----------
frags_file_name : str
Path to the output list of fragments.
output_dir : str
Directory where the list should be saved.
plot : bool
Wether a histogram of fragment length should be shown.
fig_path : str
If a path is given, the figure will be saved instead of shown.
"""
try:
frag_list_path = os.path.join(output_dir, frags_file_name)
except TypeError:
frag_list_path = frags_file_name
frags = pd.read_csv(frag_list_path, sep="\t")
nfrags = frags.shape[0]
med_len = frags["size"].median()
nbins = 40
if plot:
fig, ax = plt.subplots()
_, _, _ = ax.hist(frags["size"], bins=nbins)
ax.set_xlabel("Fragment length [bp]")
ax.set_ylabel("Log10 number of fragments")
ax.set_title("Distribution of restriction fragment length")
ax.set_yscale("log", base=10)
ax.annotate(
"Total fragments: {}".format(nfrags),
xy=(0.95, 0.95),
xycoords="axes fraction",
fontsize=12,
horizontalalignment="right",
verticalalignment="top",
)
ax.annotate(
"Median length: {}".format(med_len),
xy=(0.95, 0.90),
xycoords="axes fraction",
fontsize=12,
horizontalalignment="right",
verticalalignment="top",
)
# Tweak spacing to prevent clipping of ylabel
fig.tight_layout()
if fig_path:
plt.savefig(fig_path)
else:
plt.show()
plt.clf()
else:
logger.info(
"Genome digested into {0} fragments with a median "
"length of {1}".format(nfrags, med_len)
)
def gen_enzyme_religation_regex(enzyme):
"""Return a regex which corresponds to all possible religation sites given a
set of enzyme.
Parameters:
-----------
enzyme : str
String that contains the names of the enzyme separated by a comma.
Returns:
--------
re.Pattern :
Regex that corresponds to all possible ligation sites given a set of
enzyme.
Examples:
---------
>>> gen_enzyme_religation_regex('HpaII')
re.compile('CCGCGG')
>>> gen_enzyme_religation_regex('HpaII,MluCI')
re.compile('AATTAATT|AATTCGG|CCGAATT|CCGCGG')
"""
# Split the str on the comma to separate the different enzymes.
enzyme = enzyme.split(",")
# Check on Biopython dictionnary the enzyme.
rb = RestrictionBatch(enzyme)
# Initiation:
give_list = []
accept_list = []
ligation_list = []
# Iterates on the enzymes.
for enz in rb:
# Extract restriction sites and look for cut sites.
site = enz.elucidate()
fw_cut = site.find("^")
rev_cut = site.find("_")
# Process "give" site. Remove N on the left (useless).
give_site = site[:rev_cut].replace("^", "")
while give_site[0] == "N":
give_site = give_site[1:]
give_list.append(give_site)
# Process "accept" site. Remove N on the rigth (useless).
accept_site = site[fw_cut + 1 :].replace("_", "")
while accept_site[-1] == "N":
accept_site = accept_site[:-1]
accept_list.append(accept_site)
# Iterates on the two list to build all the possible HiC ligation sites.
for give_site in give_list:
for accept_site in accept_list:
# Replace "N" by "." for regex searching of the sites
ligation_list.append((give_site + accept_site).replace("N", "."))
ligation_list.append(
str(Seq(give_site + accept_site).reverse_complement()).replace(
"N", "."
)
)
# Build the regex for any ligation sites.
pattern = "|".join(sorted(list(set(ligation_list))))
return re.compile(pattern)
| attribute_fragments | identifier_name |
digest.py | #!/usr/bin/env python3
# coding: utf-8
"""Genome digestion
Functions used to write auxiliary instagraal compatible
sparse matrices.
"""
from Bio import SeqIO, SeqUtils
from Bio.Seq import Seq
from Bio.Restriction import RestrictionBatch, Analysis
import os, sys, csv
import re
import collections
import copy
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from hicstuff.log import logger
import hicstuff.io as hio
DEFAULT_FRAGMENTS_LIST_FILE_NAME = "fragments_list.txt"
DEFAULT_INFO_CONTIGS_FILE_NAME = "info_contigs.txt"
DEFAULT_SPARSE_MATRIX_FILE_NAME = "abs_fragments_contacts_weighted.txt"
DEFAULT_KB_BINNING = 1
DEFAULT_THRESHOLD_SIZE = 0
# Most used enzyme for eukaryotes
DEFAULT_ENZYME = "DpnII"
# If using evenly-sized chunks instead of restriction
# enzymes, they shouldn't be too short
DEFAULT_MIN_CHUNK_SIZE = 50
def write_frag_info(
fasta,
enzyme,
min_size=DEFAULT_THRESHOLD_SIZE,
circular=False,
output_contigs=DEFAULT_INFO_CONTIGS_FILE_NAME,
output_frags=DEFAULT_FRAGMENTS_LIST_FILE_NAME,
output_dir=None,
):
"""Digest and write fragment information
Write the fragments_list.txt and info_contigs.txt that are necessary for
instagraal to run.
Parameters
----------
fasta : pathlib.Path or str
The path to the reference genome
enzyme : str, int or list of str
If a string, must be the name of an enzyme (e.g. DpnII) and the genome
will be cut at the enzyme's restriction sites. If a number, the genome
will be cut uniformly into chunks with length equal to that number. A
list of enzymes can also be specified if using multiple enzymes.
min_size : float, optional
Size below which shorter contigs are discarded. Default is 0, i.e. all
contigs are retained.
circular : bool, optional
Whether the genome is circular. Default is False.
output_contigs : str, optional
The name of the file with contig info. Default is info_contigs.txt
output_frags : str, optional
The name of the file with fragment info. Default is fragments_list.txt
output_dir : [type], optional
The path to the output directory, which will be created if not already
existing. Default is the current directory.
"""
records = SeqIO.parse(hio.read_compressed(fasta), "fasta")
try:
info_contigs_path = os.path.join(output_dir, output_contigs)
frag_list_path = os.path.join(output_dir, output_frags)
except TypeError:
info_contigs_path = output_contigs
frag_list_path = output_frags
with open(info_contigs_path, "w") as info_contigs:
info_contigs.write("contig\tlength\tn_frags\tcumul_length\n")
with open(frag_list_path, "w") as fragments_list:
fragments_list.write(
"id\tchrom\tstart_pos" "\tend_pos\tsize\tgc_content\n"
)
total_frags = 0
for record in records:
contig_seq = record.seq
contig_name = record.id
contig_length = len(contig_seq)
if contig_length < int(min_size):
continue
sites = get_restriction_table(
contig_seq, enzyme, circular=circular
)
fragments = (
contig_seq[sites[i] : sites[i + 1]]
for i in range(len(sites) - 1)
)
n_frags = 0
current_id = 1
start_pos = 0
for frag in fragments:
frag_length = len(frag)
if frag_length > 0:
end_pos = start_pos + frag_length
gc_content = SeqUtils.GC(frag) / 100.0
current_fragment_line = "%s\t%s\t%s\t%s\t%s\t%s\n" % (
current_id,
contig_name,
start_pos,
end_pos,
frag_length,
gc_content,
)
fragments_list.write(current_fragment_line)
try:
assert (current_id == 1 and start_pos == 0) or (
current_id > 1 and start_pos > 0
)
except AssertionError:
logger.error((current_id, start_pos))
raise
start_pos = end_pos
current_id += 1
n_frags += 1
current_contig_line = "%s\t%s\t%s\t%s\n" % (
contig_name,
contig_length,
n_frags,
total_frags,
)
total_frags += n_frags
info_contigs.write(current_contig_line)
def attribute_fragments(pairs_file, idx_pairs_file, restriction_table):
"""
Writes the indexed pairs file, which has two more columns than the input
pairs file corresponding to the restriction fragment index of each read.
Note that pairs files have 1bp point positions whereas restriction table
has 0bp point poisitions.
Parameters
----------
pairs_file: str
Path the the input pairs file. Consists of 7 tab-separated
columns: readID, chr1, pos1, chr2, pos2, strand1, strand2
idx_pairs_file: str
Path to the output indexed pairs file. Consists of 9 white space
separated columns: readID, chr1, pos1, chr2, pos2, strand1, strand2,
frag1, frag2. frag1 and frag2 are 0-based restriction fragments based
on whole genome.
restriction_table: dict
Dictionary with chromosome identifiers (str) as keys and list of
positions (int) of restriction sites as values.
"""
# NOTE: Bottlenecks here are 1. binary search in find_frag and 2. writerow
# 1. could be reduced by searching groups of N frags in parallel and 2. by
# writing N frags simultaneously using a single call of writerows.
# Parse and update header section
pairs_header = hio.get_pairs_header(pairs_file)
header_size = len(pairs_header)
chrom_order = []
with open(idx_pairs_file, "w") as idx_pairs:
for line in pairs_header:
# Add new column names to header
if line.startswith("#columns"):
line = line.rstrip() + " frag1 frag2"
if line.startswith("#chromsize"):
chrom_order.append(line.split()[1])
idx_pairs.write(line + "\n")
# Get number of fragments per chrom to allow genome-based indices
shift_frags = {}
prev_frags = 0
for rank, chrom in enumerate(chrom_order):
if rank > 0:
# Note the "-1" because there are nfrags + 1 sites in rest table
prev_frags += len(restriction_table[chrom_order[rank - 1]]) - 1
# Idx of each chrom's frags will be shifted by n frags in previous chroms
shift_frags[chrom] = prev_frags
missing_contigs = set()
# Attribute pairs to fragments and append them to output file (after header)
with open(pairs_file, "r") as pairs, open(
idx_pairs_file, "a"
) as idx_pairs:
# Skip header lines
for _ in range(header_size):
next(pairs)
# Define input and output fields
pairs_cols = [
"readID",
"chr1",
"pos1",
"chr2",
"pos2",
"strand1",
"strand2",
]
idx_cols = pairs_cols + ["frag1", "frag2"]
# Use csv reader / writer to automatically parse columns into a dict
pairs_reader = csv.DictReader(
pairs, fieldnames=pairs_cols, delimiter="\t"
)
pairs_writer = csv.DictWriter(
idx_pairs, fieldnames=idx_cols, delimiter="\t"
)
for pair in pairs_reader:
# Get the 0-based indices of corresponding restriction fragments
# Deducing 1 from pair position to get it into 0bp point
pair["frag1"] = find_frag(
int(pair["pos1"]) - 1, restriction_table[pair["chr1"]]
)
pair["frag2"] = find_frag(
int(pair["pos2"]) - 1, restriction_table[pair["chr2"]]
)
# Shift fragment indices to make them genome-based instead of
# chromosome-based
try:
pair["frag1"] += shift_frags[pair["chr1"]]
except KeyError:
missing_contigs.add(pair["chr1"])
try:
pair["frag2"] += shift_frags[pair["chr2"]]
except KeyError:
missing_contigs.add(pair["chr2"])
# Write indexed pairs in the new file
pairs_writer.writerow(pair)
if missing_contigs:
logger.warning(
"Pairs on the following contigs were discarded as "
"those contigs are not listed in the paris file header. "
"This is normal if you filtered out small contigs: %s"
% " ".join(list(missing_contigs))
)
def get_restriction_table(seq, enzyme, circular=False):
"""
Get the restriction table for a single genomic sequence.
Parameters
----------
seq : Seq object
A biopython Seq object representing a chromosomes or contig.
enzyme : int, str or list of str
The name of the restriction enzyme used, or a list of restriction
enzyme names. Can also be an integer, to digest by fixed chunk size.
circular : bool
Wether the genome is circular.
Returns
-------
numpy.array:
List of restriction fragment boundary positions for the input sequence.
>>> from Bio.Seq import Seq
>>> get_restriction_table(Seq("AAGCCGGATCGG"),"HpaII")
array([ 0, 4, 12])
>>> get_restriction_table(Seq("AA"),["HpaII", "MluCI"])
array([0, 2])
>>> get_restriction_table(Seq("AA"),"aeiou1")
Traceback (most recent call last):
...
ValueError: aeiou1 is not a valid restriction enzyme.
>>> get_restriction_table("AA","HpaII")
Traceback (most recent call last):
...
TypeError: Expected Seq or MutableSeq instance, got <class 'str'> instead
"""
chrom_len = len(seq)
wrong_enzyme = "{} is not a valid restriction enzyme.".format(enzyme)
# Restriction batch containing the restriction enzyme
try:
enz = [enzyme] if isinstance(enzyme, str) else enzyme
cutter = RestrictionBatch(enz)
except (TypeError, ValueError):
try:
cutter = max(int(enzyme), DEFAULT_MIN_CHUNK_SIZE)
except ValueError:
raise ValueError(wrong_enzyme)
# Conversion from string type to restriction type
if isinstance(cutter, int):
sites = [i for i in range(0, chrom_len, cutter)]
if sites[-1] < chrom_len:
sites.append(chrom_len)
else:
# Find sites of all restriction enzymes given
ana = Analysis(cutter, seq, linear=not circular)
sites = ana.full()
# Gets all sites into a single flat list with 0-based index
sites = [site - 1 for enz in sites.values() for site in enz]
# Sort by position and allow first add start and end of seq
sites.sort()
sites.insert(0, 0)
sites.append(chrom_len)
return np.array(sites)
def find_frag(pos, r_sites):
"""
Use binary search to find the index of a chromosome restriction fragment
corresponding to an input genomic position.
Parameters
----------
pos : int
Genomic position, in base pairs.
r_sites : list
List of genomic positions corresponding to restriction sites.
Returns
-------
int
The 0-based index of the restriction fragment to which the position belongs.
>>> find_frag(15, [0, 20, 30])
0
>>> find_frag(15, [10, 20, 30])
Traceback (most recent call last):
...
ValueError: The first position in the restriction table is not 0.
>>> find_frag(31, [0, 20, 30])
Traceback (most recent call last):
...
ValueError: Read position is larger than last entry in restriction table.
"""
if r_sites[0] != 0:
raise ValueError(
"The first position in the restriction table is not 0."
)
if pos > r_sites[-1]:
raise ValueError(
"Read position is larger than last entry in restriction table."
)
# binary search for the index of the read
index = max(np.searchsorted(r_sites, pos, side="right") - 1, 0)
# Last site = end of the chrom, index of last fragment is last site - 1
index = min(len(r_sites) - 2, index)
return index
def frag_len(
frags_file_name=DEFAULT_FRAGMENTS_LIST_FILE_NAME,
output_dir=None,
plot=False,
fig_path=None,
):
"""
logs summary statistics of fragment length distribution based on an
input fragment file. Can optionally show a histogram instead
of text summary.
Parameters
----------
frags_file_name : str
Path to the output list of fragments.
output_dir : str
Directory where the list should be saved.
plot : bool
Wether a histogram of fragment length should be shown.
fig_path : str
If a path is given, the figure will be saved instead of shown.
"""
try:
frag_list_path = os.path.join(output_dir, frags_file_name)
except TypeError:
frag_list_path = frags_file_name
frags = pd.read_csv(frag_list_path, sep="\t")
nfrags = frags.shape[0]
med_len = frags["size"].median()
nbins = 40
if plot:
fig, ax = plt.subplots()
_, _, _ = ax.hist(frags["size"], bins=nbins)
ax.set_xlabel("Fragment length [bp]")
ax.set_ylabel("Log10 number of fragments")
ax.set_title("Distribution of restriction fragment length")
ax.set_yscale("log", base=10)
ax.annotate(
"Total fragments: {}".format(nfrags),
xy=(0.95, 0.95),
xycoords="axes fraction",
fontsize=12,
horizontalalignment="right",
verticalalignment="top",
)
ax.annotate(
"Median length: {}".format(med_len),
xy=(0.95, 0.90),
xycoords="axes fraction",
fontsize=12,
horizontalalignment="right",
verticalalignment="top",
)
# Tweak spacing to prevent clipping of ylabel
fig.tight_layout()
if fig_path:
plt.savefig(fig_path)
else:
plt.show()
plt.clf()
else:
logger.info(
"Genome digested into {0} fragments with a median "
"length of {1}".format(nfrags, med_len)
)
def gen_enzyme_religation_regex(enzyme):
"""Return a regex which corresponds to all possible religation sites given a
set of enzyme.
Parameters:
-----------
enzyme : str
String that contains the names of the enzyme separated by a comma.
Returns:
--------
re.Pattern :
Regex that corresponds to all possible ligation sites given a set of
enzyme.
Examples:
---------
>>> gen_enzyme_religation_regex('HpaII')
re.compile('CCGCGG')
>>> gen_enzyme_religation_regex('HpaII,MluCI')
re.compile('AATTAATT|AATTCGG|CCGAATT|CCGCGG')
"""
# Split the str on the comma to separate the different enzymes.
enzyme = enzyme.split(",")
# Check on Biopython dictionnary the enzyme.
rb = RestrictionBatch(enzyme)
# Initiation:
give_list = []
accept_list = []
ligation_list = []
# Iterates on the enzymes.
for enz in rb:
# Extract restriction sites and look for cut sites.
|
# Iterates on the two list to build all the possible HiC ligation sites.
for give_site in give_list:
for accept_site in accept_list:
# Replace "N" by "." for regex searching of the sites
ligation_list.append((give_site + accept_site).replace("N", "."))
ligation_list.append(
str(Seq(give_site + accept_site).reverse_complement()).replace(
"N", "."
)
)
# Build the regex for any ligation sites.
pattern = "|".join(sorted(list(set(ligation_list))))
return re.compile(pattern)
| site = enz.elucidate()
fw_cut = site.find("^")
rev_cut = site.find("_")
# Process "give" site. Remove N on the left (useless).
give_site = site[:rev_cut].replace("^", "")
while give_site[0] == "N":
give_site = give_site[1:]
give_list.append(give_site)
# Process "accept" site. Remove N on the rigth (useless).
accept_site = site[fw_cut + 1 :].replace("_", "")
while accept_site[-1] == "N":
accept_site = accept_site[:-1]
accept_list.append(accept_site) | conditional_block |
digest.py | #!/usr/bin/env python3
# coding: utf-8
"""Genome digestion
Functions used to write auxiliary instagraal compatible
sparse matrices.
"""
from Bio import SeqIO, SeqUtils
from Bio.Seq import Seq
from Bio.Restriction import RestrictionBatch, Analysis
import os, sys, csv
import re
import collections
import copy
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from hicstuff.log import logger
import hicstuff.io as hio
DEFAULT_FRAGMENTS_LIST_FILE_NAME = "fragments_list.txt"
DEFAULT_INFO_CONTIGS_FILE_NAME = "info_contigs.txt"
DEFAULT_SPARSE_MATRIX_FILE_NAME = "abs_fragments_contacts_weighted.txt"
DEFAULT_KB_BINNING = 1
DEFAULT_THRESHOLD_SIZE = 0
# Most used enzyme for eukaryotes
DEFAULT_ENZYME = "DpnII"
# If using evenly-sized chunks instead of restriction
# enzymes, they shouldn't be too short
DEFAULT_MIN_CHUNK_SIZE = 50
def write_frag_info(
fasta,
enzyme,
min_size=DEFAULT_THRESHOLD_SIZE,
circular=False,
output_contigs=DEFAULT_INFO_CONTIGS_FILE_NAME,
output_frags=DEFAULT_FRAGMENTS_LIST_FILE_NAME,
output_dir=None,
):
"""Digest and write fragment information
Write the fragments_list.txt and info_contigs.txt that are necessary for
instagraal to run.
Parameters
----------
fasta : pathlib.Path or str
The path to the reference genome
enzyme : str, int or list of str
If a string, must be the name of an enzyme (e.g. DpnII) and the genome
will be cut at the enzyme's restriction sites. If a number, the genome
will be cut uniformly into chunks with length equal to that number. A
list of enzymes can also be specified if using multiple enzymes.
min_size : float, optional
Size below which shorter contigs are discarded. Default is 0, i.e. all
contigs are retained.
circular : bool, optional
Whether the genome is circular. Default is False.
output_contigs : str, optional
The name of the file with contig info. Default is info_contigs.txt
output_frags : str, optional
The name of the file with fragment info. Default is fragments_list.txt
output_dir : [type], optional
The path to the output directory, which will be created if not already
existing. Default is the current directory.
"""
records = SeqIO.parse(hio.read_compressed(fasta), "fasta")
try:
info_contigs_path = os.path.join(output_dir, output_contigs)
frag_list_path = os.path.join(output_dir, output_frags)
except TypeError:
info_contigs_path = output_contigs
frag_list_path = output_frags
with open(info_contigs_path, "w") as info_contigs:
info_contigs.write("contig\tlength\tn_frags\tcumul_length\n")
with open(frag_list_path, "w") as fragments_list:
fragments_list.write(
"id\tchrom\tstart_pos" "\tend_pos\tsize\tgc_content\n"
)
total_frags = 0
for record in records:
contig_seq = record.seq
contig_name = record.id
contig_length = len(contig_seq)
if contig_length < int(min_size):
continue
sites = get_restriction_table(
contig_seq, enzyme, circular=circular
)
fragments = (
contig_seq[sites[i] : sites[i + 1]]
for i in range(len(sites) - 1)
)
n_frags = 0
current_id = 1
start_pos = 0
for frag in fragments:
frag_length = len(frag)
if frag_length > 0:
end_pos = start_pos + frag_length
gc_content = SeqUtils.GC(frag) / 100.0
current_fragment_line = "%s\t%s\t%s\t%s\t%s\t%s\n" % (
current_id,
contig_name,
start_pos,
end_pos,
frag_length,
gc_content,
)
fragments_list.write(current_fragment_line)
try:
assert (current_id == 1 and start_pos == 0) or (
current_id > 1 and start_pos > 0
)
except AssertionError:
logger.error((current_id, start_pos))
raise
start_pos = end_pos
current_id += 1
n_frags += 1
current_contig_line = "%s\t%s\t%s\t%s\n" % (
contig_name,
contig_length,
n_frags,
total_frags,
)
total_frags += n_frags
info_contigs.write(current_contig_line)
def attribute_fragments(pairs_file, idx_pairs_file, restriction_table):
"""
Writes the indexed pairs file, which has two more columns than the input
pairs file corresponding to the restriction fragment index of each read.
Note that pairs files have 1bp point positions whereas restriction table
has 0bp point poisitions.
Parameters
----------
pairs_file: str
Path the the input pairs file. Consists of 7 tab-separated
columns: readID, chr1, pos1, chr2, pos2, strand1, strand2
idx_pairs_file: str
Path to the output indexed pairs file. Consists of 9 white space
separated columns: readID, chr1, pos1, chr2, pos2, strand1, strand2,
frag1, frag2. frag1 and frag2 are 0-based restriction fragments based
on whole genome.
restriction_table: dict
Dictionary with chromosome identifiers (str) as keys and list of
positions (int) of restriction sites as values.
"""
# NOTE: Bottlenecks here are 1. binary search in find_frag and 2. writerow
# 1. could be reduced by searching groups of N frags in parallel and 2. by
# writing N frags simultaneously using a single call of writerows.
# Parse and update header section
pairs_header = hio.get_pairs_header(pairs_file)
header_size = len(pairs_header)
chrom_order = []
with open(idx_pairs_file, "w") as idx_pairs:
for line in pairs_header:
# Add new column names to header
if line.startswith("#columns"):
line = line.rstrip() + " frag1 frag2"
if line.startswith("#chromsize"):
chrom_order.append(line.split()[1])
idx_pairs.write(line + "\n")
# Get number of fragments per chrom to allow genome-based indices
shift_frags = {}
prev_frags = 0
for rank, chrom in enumerate(chrom_order):
if rank > 0:
# Note the "-1" because there are nfrags + 1 sites in rest table
prev_frags += len(restriction_table[chrom_order[rank - 1]]) - 1
# Idx of each chrom's frags will be shifted by n frags in previous chroms
shift_frags[chrom] = prev_frags
missing_contigs = set()
# Attribute pairs to fragments and append them to output file (after header)
with open(pairs_file, "r") as pairs, open(
idx_pairs_file, "a"
) as idx_pairs:
# Skip header lines
for _ in range(header_size):
next(pairs)
# Define input and output fields
pairs_cols = [
"readID",
"chr1",
"pos1",
"chr2",
"pos2",
"strand1",
"strand2",
]
idx_cols = pairs_cols + ["frag1", "frag2"]
# Use csv reader / writer to automatically parse columns into a dict
pairs_reader = csv.DictReader(
pairs, fieldnames=pairs_cols, delimiter="\t"
)
pairs_writer = csv.DictWriter(
idx_pairs, fieldnames=idx_cols, delimiter="\t"
)
for pair in pairs_reader:
# Get the 0-based indices of corresponding restriction fragments
# Deducing 1 from pair position to get it into 0bp point
pair["frag1"] = find_frag(
int(pair["pos1"]) - 1, restriction_table[pair["chr1"]]
)
pair["frag2"] = find_frag(
int(pair["pos2"]) - 1, restriction_table[pair["chr2"]]
)
# Shift fragment indices to make them genome-based instead of
# chromosome-based
try:
pair["frag1"] += shift_frags[pair["chr1"]]
except KeyError:
missing_contigs.add(pair["chr1"])
try:
pair["frag2"] += shift_frags[pair["chr2"]]
except KeyError:
missing_contigs.add(pair["chr2"])
# Write indexed pairs in the new file
pairs_writer.writerow(pair)
if missing_contigs:
logger.warning(
"Pairs on the following contigs were discarded as "
"those contigs are not listed in the paris file header. "
"This is normal if you filtered out small contigs: %s"
% " ".join(list(missing_contigs))
)
def get_restriction_table(seq, enzyme, circular=False):
"""
Get the restriction table for a single genomic sequence.
Parameters
----------
seq : Seq object
A biopython Seq object representing a chromosomes or contig.
enzyme : int, str or list of str
The name of the restriction enzyme used, or a list of restriction
enzyme names. Can also be an integer, to digest by fixed chunk size.
circular : bool
Wether the genome is circular.
Returns
-------
numpy.array:
List of restriction fragment boundary positions for the input sequence.
>>> from Bio.Seq import Seq
>>> get_restriction_table(Seq("AAGCCGGATCGG"),"HpaII")
array([ 0, 4, 12])
>>> get_restriction_table(Seq("AA"),["HpaII", "MluCI"])
array([0, 2])
>>> get_restriction_table(Seq("AA"),"aeiou1")
Traceback (most recent call last):
...
ValueError: aeiou1 is not a valid restriction enzyme.
>>> get_restriction_table("AA","HpaII")
Traceback (most recent call last):
...
TypeError: Expected Seq or MutableSeq instance, got <class 'str'> instead
"""
chrom_len = len(seq)
wrong_enzyme = "{} is not a valid restriction enzyme.".format(enzyme)
# Restriction batch containing the restriction enzyme
try:
enz = [enzyme] if isinstance(enzyme, str) else enzyme
cutter = RestrictionBatch(enz)
except (TypeError, ValueError):
try:
cutter = max(int(enzyme), DEFAULT_MIN_CHUNK_SIZE)
except ValueError:
raise ValueError(wrong_enzyme)
# Conversion from string type to restriction type
if isinstance(cutter, int):
sites = [i for i in range(0, chrom_len, cutter)]
if sites[-1] < chrom_len:
sites.append(chrom_len)
else:
# Find sites of all restriction enzymes given
ana = Analysis(cutter, seq, linear=not circular)
sites = ana.full()
# Gets all sites into a single flat list with 0-based index
sites = [site - 1 for enz in sites.values() for site in enz]
# Sort by position and allow first add start and end of seq
sites.sort()
sites.insert(0, 0)
sites.append(chrom_len)
return np.array(sites)
def find_frag(pos, r_sites):
|
def frag_len(
frags_file_name=DEFAULT_FRAGMENTS_LIST_FILE_NAME,
output_dir=None,
plot=False,
fig_path=None,
):
"""
logs summary statistics of fragment length distribution based on an
input fragment file. Can optionally show a histogram instead
of text summary.
Parameters
----------
frags_file_name : str
Path to the output list of fragments.
output_dir : str
Directory where the list should be saved.
plot : bool
Wether a histogram of fragment length should be shown.
fig_path : str
If a path is given, the figure will be saved instead of shown.
"""
try:
frag_list_path = os.path.join(output_dir, frags_file_name)
except TypeError:
frag_list_path = frags_file_name
frags = pd.read_csv(frag_list_path, sep="\t")
nfrags = frags.shape[0]
med_len = frags["size"].median()
nbins = 40
if plot:
fig, ax = plt.subplots()
_, _, _ = ax.hist(frags["size"], bins=nbins)
ax.set_xlabel("Fragment length [bp]")
ax.set_ylabel("Log10 number of fragments")
ax.set_title("Distribution of restriction fragment length")
ax.set_yscale("log", base=10)
ax.annotate(
"Total fragments: {}".format(nfrags),
xy=(0.95, 0.95),
xycoords="axes fraction",
fontsize=12,
horizontalalignment="right",
verticalalignment="top",
)
ax.annotate(
"Median length: {}".format(med_len),
xy=(0.95, 0.90),
xycoords="axes fraction",
fontsize=12,
horizontalalignment="right",
verticalalignment="top",
)
# Tweak spacing to prevent clipping of ylabel
fig.tight_layout()
if fig_path:
plt.savefig(fig_path)
else:
plt.show()
plt.clf()
else:
logger.info(
"Genome digested into {0} fragments with a median "
"length of {1}".format(nfrags, med_len)
)
def gen_enzyme_religation_regex(enzyme):
"""Return a regex which corresponds to all possible religation sites given a
set of enzyme.
Parameters:
-----------
enzyme : str
String that contains the names of the enzyme separated by a comma.
Returns:
--------
re.Pattern :
Regex that corresponds to all possible ligation sites given a set of
enzyme.
Examples:
---------
>>> gen_enzyme_religation_regex('HpaII')
re.compile('CCGCGG')
>>> gen_enzyme_religation_regex('HpaII,MluCI')
re.compile('AATTAATT|AATTCGG|CCGAATT|CCGCGG')
"""
# Split the str on the comma to separate the different enzymes.
enzyme = enzyme.split(",")
# Check on Biopython dictionnary the enzyme.
rb = RestrictionBatch(enzyme)
# Initiation:
give_list = []
accept_list = []
ligation_list = []
# Iterates on the enzymes.
for enz in rb:
# Extract restriction sites and look for cut sites.
site = enz.elucidate()
fw_cut = site.find("^")
rev_cut = site.find("_")
# Process "give" site. Remove N on the left (useless).
give_site = site[:rev_cut].replace("^", "")
while give_site[0] == "N":
give_site = give_site[1:]
give_list.append(give_site)
# Process "accept" site. Remove N on the rigth (useless).
accept_site = site[fw_cut + 1 :].replace("_", "")
while accept_site[-1] == "N":
accept_site = accept_site[:-1]
accept_list.append(accept_site)
# Iterates on the two list to build all the possible HiC ligation sites.
for give_site in give_list:
for accept_site in accept_list:
# Replace "N" by "." for regex searching of the sites
ligation_list.append((give_site + accept_site).replace("N", "."))
ligation_list.append(
str(Seq(give_site + accept_site).reverse_complement()).replace(
"N", "."
)
)
# Build the regex for any ligation sites.
pattern = "|".join(sorted(list(set(ligation_list))))
return re.compile(pattern)
| """
Use binary search to find the index of a chromosome restriction fragment
corresponding to an input genomic position.
Parameters
----------
pos : int
Genomic position, in base pairs.
r_sites : list
List of genomic positions corresponding to restriction sites.
Returns
-------
int
The 0-based index of the restriction fragment to which the position belongs.
>>> find_frag(15, [0, 20, 30])
0
>>> find_frag(15, [10, 20, 30])
Traceback (most recent call last):
...
ValueError: The first position in the restriction table is not 0.
>>> find_frag(31, [0, 20, 30])
Traceback (most recent call last):
...
ValueError: Read position is larger than last entry in restriction table.
"""
if r_sites[0] != 0:
raise ValueError(
"The first position in the restriction table is not 0."
)
if pos > r_sites[-1]:
raise ValueError(
"Read position is larger than last entry in restriction table."
)
# binary search for the index of the read
index = max(np.searchsorted(r_sites, pos, side="right") - 1, 0)
# Last site = end of the chrom, index of last fragment is last site - 1
index = min(len(r_sites) - 2, index)
return index | identifier_body |
digest.py | #!/usr/bin/env python3
# coding: utf-8
"""Genome digestion
Functions used to write auxiliary instagraal compatible
sparse matrices.
"""
from Bio import SeqIO, SeqUtils
from Bio.Seq import Seq
from Bio.Restriction import RestrictionBatch, Analysis
import os, sys, csv
import re
import collections
import copy
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from hicstuff.log import logger
import hicstuff.io as hio
DEFAULT_FRAGMENTS_LIST_FILE_NAME = "fragments_list.txt"
DEFAULT_INFO_CONTIGS_FILE_NAME = "info_contigs.txt"
DEFAULT_SPARSE_MATRIX_FILE_NAME = "abs_fragments_contacts_weighted.txt"
DEFAULT_KB_BINNING = 1
DEFAULT_THRESHOLD_SIZE = 0
# Most used enzyme for eukaryotes
DEFAULT_ENZYME = "DpnII"
# If using evenly-sized chunks instead of restriction
# enzymes, they shouldn't be too short
DEFAULT_MIN_CHUNK_SIZE = 50
def write_frag_info(
fasta,
enzyme,
min_size=DEFAULT_THRESHOLD_SIZE,
circular=False,
output_contigs=DEFAULT_INFO_CONTIGS_FILE_NAME,
output_frags=DEFAULT_FRAGMENTS_LIST_FILE_NAME,
output_dir=None,
):
"""Digest and write fragment information
Write the fragments_list.txt and info_contigs.txt that are necessary for
instagraal to run.
Parameters
----------
fasta : pathlib.Path or str
The path to the reference genome
enzyme : str, int or list of str
If a string, must be the name of an enzyme (e.g. DpnII) and the genome
will be cut at the enzyme's restriction sites. If a number, the genome
will be cut uniformly into chunks with length equal to that number. A
list of enzymes can also be specified if using multiple enzymes.
min_size : float, optional
Size below which shorter contigs are discarded. Default is 0, i.e. all
contigs are retained.
circular : bool, optional
Whether the genome is circular. Default is False.
output_contigs : str, optional
The name of the file with contig info. Default is info_contigs.txt
output_frags : str, optional
The name of the file with fragment info. Default is fragments_list.txt
output_dir : [type], optional
The path to the output directory, which will be created if not already
existing. Default is the current directory.
"""
records = SeqIO.parse(hio.read_compressed(fasta), "fasta")
try:
info_contigs_path = os.path.join(output_dir, output_contigs)
frag_list_path = os.path.join(output_dir, output_frags)
except TypeError:
info_contigs_path = output_contigs
frag_list_path = output_frags
with open(info_contigs_path, "w") as info_contigs:
info_contigs.write("contig\tlength\tn_frags\tcumul_length\n")
with open(frag_list_path, "w") as fragments_list:
fragments_list.write(
"id\tchrom\tstart_pos" "\tend_pos\tsize\tgc_content\n"
)
total_frags = 0
for record in records:
contig_seq = record.seq
contig_name = record.id
contig_length = len(contig_seq)
if contig_length < int(min_size):
continue
sites = get_restriction_table(
contig_seq, enzyme, circular=circular
)
fragments = (
contig_seq[sites[i] : sites[i + 1]]
for i in range(len(sites) - 1)
)
n_frags = 0
current_id = 1
start_pos = 0
for frag in fragments:
frag_length = len(frag)
if frag_length > 0:
end_pos = start_pos + frag_length
gc_content = SeqUtils.GC(frag) / 100.0
current_fragment_line = "%s\t%s\t%s\t%s\t%s\t%s\n" % (
current_id,
contig_name,
start_pos,
end_pos,
frag_length,
gc_content,
)
fragments_list.write(current_fragment_line)
try:
assert (current_id == 1 and start_pos == 0) or (
current_id > 1 and start_pos > 0
)
except AssertionError:
logger.error((current_id, start_pos))
raise
start_pos = end_pos
current_id += 1
n_frags += 1
| n_frags,
total_frags,
)
total_frags += n_frags
info_contigs.write(current_contig_line)
def attribute_fragments(pairs_file, idx_pairs_file, restriction_table):
"""
Writes the indexed pairs file, which has two more columns than the input
pairs file corresponding to the restriction fragment index of each read.
Note that pairs files have 1bp point positions whereas restriction table
has 0bp point poisitions.
Parameters
----------
pairs_file: str
Path the the input pairs file. Consists of 7 tab-separated
columns: readID, chr1, pos1, chr2, pos2, strand1, strand2
idx_pairs_file: str
Path to the output indexed pairs file. Consists of 9 white space
separated columns: readID, chr1, pos1, chr2, pos2, strand1, strand2,
frag1, frag2. frag1 and frag2 are 0-based restriction fragments based
on whole genome.
restriction_table: dict
Dictionary with chromosome identifiers (str) as keys and list of
positions (int) of restriction sites as values.
"""
# NOTE: Bottlenecks here are 1. binary search in find_frag and 2. writerow
# 1. could be reduced by searching groups of N frags in parallel and 2. by
# writing N frags simultaneously using a single call of writerows.
# Parse and update header section
pairs_header = hio.get_pairs_header(pairs_file)
header_size = len(pairs_header)
chrom_order = []
with open(idx_pairs_file, "w") as idx_pairs:
for line in pairs_header:
# Add new column names to header
if line.startswith("#columns"):
line = line.rstrip() + " frag1 frag2"
if line.startswith("#chromsize"):
chrom_order.append(line.split()[1])
idx_pairs.write(line + "\n")
# Get number of fragments per chrom to allow genome-based indices
shift_frags = {}
prev_frags = 0
for rank, chrom in enumerate(chrom_order):
if rank > 0:
# Note the "-1" because there are nfrags + 1 sites in rest table
prev_frags += len(restriction_table[chrom_order[rank - 1]]) - 1
# Idx of each chrom's frags will be shifted by n frags in previous chroms
shift_frags[chrom] = prev_frags
missing_contigs = set()
# Attribute pairs to fragments and append them to output file (after header)
with open(pairs_file, "r") as pairs, open(
idx_pairs_file, "a"
) as idx_pairs:
# Skip header lines
for _ in range(header_size):
next(pairs)
# Define input and output fields
pairs_cols = [
"readID",
"chr1",
"pos1",
"chr2",
"pos2",
"strand1",
"strand2",
]
idx_cols = pairs_cols + ["frag1", "frag2"]
# Use csv reader / writer to automatically parse columns into a dict
pairs_reader = csv.DictReader(
pairs, fieldnames=pairs_cols, delimiter="\t"
)
pairs_writer = csv.DictWriter(
idx_pairs, fieldnames=idx_cols, delimiter="\t"
)
for pair in pairs_reader:
# Get the 0-based indices of corresponding restriction fragments
# Deducing 1 from pair position to get it into 0bp point
pair["frag1"] = find_frag(
int(pair["pos1"]) - 1, restriction_table[pair["chr1"]]
)
pair["frag2"] = find_frag(
int(pair["pos2"]) - 1, restriction_table[pair["chr2"]]
)
# Shift fragment indices to make them genome-based instead of
# chromosome-based
try:
pair["frag1"] += shift_frags[pair["chr1"]]
except KeyError:
missing_contigs.add(pair["chr1"])
try:
pair["frag2"] += shift_frags[pair["chr2"]]
except KeyError:
missing_contigs.add(pair["chr2"])
# Write indexed pairs in the new file
pairs_writer.writerow(pair)
if missing_contigs:
logger.warning(
"Pairs on the following contigs were discarded as "
"those contigs are not listed in the paris file header. "
"This is normal if you filtered out small contigs: %s"
% " ".join(list(missing_contigs))
)
def get_restriction_table(seq, enzyme, circular=False):
"""
Get the restriction table for a single genomic sequence.
Parameters
----------
seq : Seq object
A biopython Seq object representing a chromosomes or contig.
enzyme : int, str or list of str
The name of the restriction enzyme used, or a list of restriction
enzyme names. Can also be an integer, to digest by fixed chunk size.
circular : bool
Wether the genome is circular.
Returns
-------
numpy.array:
List of restriction fragment boundary positions for the input sequence.
>>> from Bio.Seq import Seq
>>> get_restriction_table(Seq("AAGCCGGATCGG"),"HpaII")
array([ 0, 4, 12])
>>> get_restriction_table(Seq("AA"),["HpaII", "MluCI"])
array([0, 2])
>>> get_restriction_table(Seq("AA"),"aeiou1")
Traceback (most recent call last):
...
ValueError: aeiou1 is not a valid restriction enzyme.
>>> get_restriction_table("AA","HpaII")
Traceback (most recent call last):
...
TypeError: Expected Seq or MutableSeq instance, got <class 'str'> instead
"""
chrom_len = len(seq)
wrong_enzyme = "{} is not a valid restriction enzyme.".format(enzyme)
# Restriction batch containing the restriction enzyme
try:
enz = [enzyme] if isinstance(enzyme, str) else enzyme
cutter = RestrictionBatch(enz)
except (TypeError, ValueError):
try:
cutter = max(int(enzyme), DEFAULT_MIN_CHUNK_SIZE)
except ValueError:
raise ValueError(wrong_enzyme)
# Conversion from string type to restriction type
if isinstance(cutter, int):
sites = [i for i in range(0, chrom_len, cutter)]
if sites[-1] < chrom_len:
sites.append(chrom_len)
else:
# Find sites of all restriction enzymes given
ana = Analysis(cutter, seq, linear=not circular)
sites = ana.full()
# Gets all sites into a single flat list with 0-based index
sites = [site - 1 for enz in sites.values() for site in enz]
# Sort by position and allow first add start and end of seq
sites.sort()
sites.insert(0, 0)
sites.append(chrom_len)
return np.array(sites)
def find_frag(pos, r_sites):
"""
Use binary search to find the index of a chromosome restriction fragment
corresponding to an input genomic position.
Parameters
----------
pos : int
Genomic position, in base pairs.
r_sites : list
List of genomic positions corresponding to restriction sites.
Returns
-------
int
The 0-based index of the restriction fragment to which the position belongs.
>>> find_frag(15, [0, 20, 30])
0
>>> find_frag(15, [10, 20, 30])
Traceback (most recent call last):
...
ValueError: The first position in the restriction table is not 0.
>>> find_frag(31, [0, 20, 30])
Traceback (most recent call last):
...
ValueError: Read position is larger than last entry in restriction table.
"""
if r_sites[0] != 0:
raise ValueError(
"The first position in the restriction table is not 0."
)
if pos > r_sites[-1]:
raise ValueError(
"Read position is larger than last entry in restriction table."
)
# binary search for the index of the read
index = max(np.searchsorted(r_sites, pos, side="right") - 1, 0)
# Last site = end of the chrom, index of last fragment is last site - 1
index = min(len(r_sites) - 2, index)
return index
def frag_len(
frags_file_name=DEFAULT_FRAGMENTS_LIST_FILE_NAME,
output_dir=None,
plot=False,
fig_path=None,
):
"""
logs summary statistics of fragment length distribution based on an
input fragment file. Can optionally show a histogram instead
of text summary.
Parameters
----------
frags_file_name : str
Path to the output list of fragments.
output_dir : str
Directory where the list should be saved.
plot : bool
Wether a histogram of fragment length should be shown.
fig_path : str
If a path is given, the figure will be saved instead of shown.
"""
try:
frag_list_path = os.path.join(output_dir, frags_file_name)
except TypeError:
frag_list_path = frags_file_name
frags = pd.read_csv(frag_list_path, sep="\t")
nfrags = frags.shape[0]
med_len = frags["size"].median()
nbins = 40
if plot:
fig, ax = plt.subplots()
_, _, _ = ax.hist(frags["size"], bins=nbins)
ax.set_xlabel("Fragment length [bp]")
ax.set_ylabel("Log10 number of fragments")
ax.set_title("Distribution of restriction fragment length")
ax.set_yscale("log", base=10)
ax.annotate(
"Total fragments: {}".format(nfrags),
xy=(0.95, 0.95),
xycoords="axes fraction",
fontsize=12,
horizontalalignment="right",
verticalalignment="top",
)
ax.annotate(
"Median length: {}".format(med_len),
xy=(0.95, 0.90),
xycoords="axes fraction",
fontsize=12,
horizontalalignment="right",
verticalalignment="top",
)
# Tweak spacing to prevent clipping of ylabel
fig.tight_layout()
if fig_path:
plt.savefig(fig_path)
else:
plt.show()
plt.clf()
else:
logger.info(
"Genome digested into {0} fragments with a median "
"length of {1}".format(nfrags, med_len)
)
def gen_enzyme_religation_regex(enzyme):
"""Return a regex which corresponds to all possible religation sites given a
set of enzyme.
Parameters:
-----------
enzyme : str
String that contains the names of the enzyme separated by a comma.
Returns:
--------
re.Pattern :
Regex that corresponds to all possible ligation sites given a set of
enzyme.
Examples:
---------
>>> gen_enzyme_religation_regex('HpaII')
re.compile('CCGCGG')
>>> gen_enzyme_religation_regex('HpaII,MluCI')
re.compile('AATTAATT|AATTCGG|CCGAATT|CCGCGG')
"""
# Split the str on the comma to separate the different enzymes.
enzyme = enzyme.split(",")
# Check on Biopython dictionnary the enzyme.
rb = RestrictionBatch(enzyme)
# Initiation:
give_list = []
accept_list = []
ligation_list = []
# Iterates on the enzymes.
for enz in rb:
# Extract restriction sites and look for cut sites.
site = enz.elucidate()
fw_cut = site.find("^")
rev_cut = site.find("_")
# Process "give" site. Remove N on the left (useless).
give_site = site[:rev_cut].replace("^", "")
while give_site[0] == "N":
give_site = give_site[1:]
give_list.append(give_site)
# Process "accept" site. Remove N on the rigth (useless).
accept_site = site[fw_cut + 1 :].replace("_", "")
while accept_site[-1] == "N":
accept_site = accept_site[:-1]
accept_list.append(accept_site)
# Iterates on the two list to build all the possible HiC ligation sites.
for give_site in give_list:
for accept_site in accept_list:
# Replace "N" by "." for regex searching of the sites
ligation_list.append((give_site + accept_site).replace("N", "."))
ligation_list.append(
str(Seq(give_site + accept_site).reverse_complement()).replace(
"N", "."
)
)
# Build the regex for any ligation sites.
pattern = "|".join(sorted(list(set(ligation_list))))
return re.compile(pattern) | current_contig_line = "%s\t%s\t%s\t%s\n" % (
contig_name,
contig_length, | random_line_split |
mod.rs | // Copyright 2018 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! The module containing building blocks for creating blockchains powered by
//! the Exonum framework.
//!
//! Services are the main extension point for the Exonum framework. To create
//! your service on top of Exonum blockchain you need to perform the following steps:
//!
//! - Define your own information schema.
//! - Create one or more transaction types using the [`transactions!`] macro and
//! implement the [`Transaction`] trait for them.
//! - Create a data structure implementing the [`Service`] trait.
//! - Write API handlers for the service, if required.
//!
//! You may consult [the service creation tutorial][doc:create-service] for a detailed
//! instruction on how to create services.
//!
//! [`transactions!`]: ../macro.transactions.html
//! [`Transaction`]: ./trait.Transaction.html
//! [`Service`]: ./trait.Service.html
//! [doc:create-service]: https://exonum.com/doc/get-started/create-service
pub use self::{
block::{Block, BlockProof}, config::{ConsensusConfig, StoredConfiguration, ValidatorKeys},
genesis::GenesisConfig, schema::{Schema, TxLocation},
service::{Service, ServiceContext, SharedNodeState},
transaction::{
ExecutionError, ExecutionResult, Transaction, TransactionContext, TransactionError,
TransactionErrorType, TransactionMessage, TransactionResult, TransactionSet,
},
};
pub mod config;
use byteorder::{ByteOrder, LittleEndian};
use failure;
use vec_map::VecMap;
use std::{
collections::{BTreeMap, HashMap}, error::Error as StdError, fmt, iter, mem, panic, sync::Arc,
};
use crypto::{self, CryptoHash, Hash, PublicKey, SecretKey};
use encoding::Error as MessageError;
use helpers::{Height, Round, ValidatorId};
use messages::{Connect, Message, Precommit, ProtocolMessage, RawTransaction, Signed};
use node::ApiSender;
use storage::{self, Database, Error, Fork, Patch, Snapshot};
mod block;
mod genesis;
mod schema;
mod service;
#[macro_use]
mod transaction;
#[cfg(test)]
mod tests;
/// Id of core service table family.
pub const CORE_SERVICE: u16 = 0;
/// Exonum blockchain instance with a certain services set and data storage.
///
/// Only nodes with an identical set of services and genesis block can be combined
/// into a single network.
pub struct Blockchain {
db: Arc<dyn Database>,
service_map: Arc<VecMap<Box<dyn Service>>>,
#[doc(hidden)]
pub service_keypair: (PublicKey, SecretKey),
pub(crate) api_sender: ApiSender,
}
impl Blockchain {
/// Constructs a blockchain for the given `storage` and list of `services`.
pub fn new<D: Into<Arc<dyn Database>>>(
storage: D,
services: Vec<Box<dyn Service>>,
service_public_key: PublicKey,
service_secret_key: SecretKey,
api_sender: ApiSender,
) -> Self {
let mut service_map = VecMap::new();
for service in services {
let id = service.service_id() as usize;
if service_map.contains_key(id) {
panic!(
"Services have already contain service with id={}, please change it.",
id
);
}
service_map.insert(id, service);
}
Self {
db: storage.into(),
service_map: Arc::new(service_map),
service_keypair: (service_public_key, service_secret_key),
api_sender,
}
}
/// Recreates the blockchain to reuse with a sandbox.
#[doc(hidden)]
pub fn clone_with_api_sender(&self, api_sender: ApiSender) -> Self {
Self {
api_sender,
..self.clone()
}
}
/// Returns the `VecMap` for all services. This is a map which
/// contains service identifiers and service interfaces. The VecMap
/// allows proceeding from the service identifier to the service itself.
pub fn service_map(&self) -> &Arc<VecMap<Box<dyn Service>>> {
&self.service_map
}
/// Creates a read-only snapshot of the current storage state.
pub fn snapshot(&self) -> Box<dyn Snapshot> {
self.db.snapshot()
}
/// Creates a snapshot of the current storage state that can be later committed into the storage
/// via the `merge` method.
pub fn fork(&self) -> Fork {
self.db.fork()
}
/// Tries to create a `Transaction` object from the given raw message.
/// A raw message can be converted into a `Transaction` object only
/// if the following conditions are met:
///
/// - Blockchain has a service with the `service_id` of the given raw message.
/// - Service can deserialize the given raw message.
pub fn tx_from_raw(&self, raw: RawTransaction) -> Result<Box<dyn Transaction>, MessageError> {
let id = raw.service_id() as usize;
let service = self.service_map
.get(id)
.ok_or_else(|| MessageError::from("Service not found."))?;
service.tx_from_raw(raw)
}
/// Commits changes from the patch to the blockchain storage.
/// See [`Fork`](../storage/struct.Fork.html) for details.
pub fn merge(&mut self, patch: Patch) -> Result<(), Error> {
self.db.merge(patch)
}
/// Returns the hash of the latest committed block.
///
/// # Panics
///
/// If the genesis block was not committed.
pub fn last_hash(&self) -> Hash {
Schema::new(&self.snapshot())
.block_hashes_by_height()
.last()
.unwrap_or_else(Hash::default)
}
/// Returns the latest committed block.
pub fn last_block(&self) -> Block {
Schema::new(&self.snapshot()).last_block()
}
/// Creates and commits the genesis block with the given genesis configuration
/// if the blockchain has not been initialized.
///
/// # Panics
///
/// * If the genesis block was not committed.
/// * If storage version is not specified or not supported.
pub fn initialize(&mut self, cfg: GenesisConfig) -> Result<(), Error> {
let has_genesis_block = !Schema::new(&self.snapshot())
.block_hashes_by_height()
.is_empty();
if has_genesis_block {
self.assert_storage_version();
} else {
self.initialize_metadata();
self.create_genesis_block(cfg)?;
}
Ok(())
}
/// Initialized node-local metadata.
fn initialize_metadata(&mut self) {
let mut fork = self.db.fork();
storage::StorageMetadata::write_current(&mut fork);
if self.merge(fork.into_patch()).is_ok() | else {
panic!("Could not set database version.")
}
}
/// Checks if storage version is supported.
///
/// # Panics
///
/// Panics if version is not supported or is not specified.
fn assert_storage_version(&self) {
match storage::StorageMetadata::read(self.db.snapshot()) {
Ok(ver) => info!("Storage version is supported with value [{}].", ver),
Err(e) => panic!("{}", e),
}
}
/// Creates and commits the genesis block with the given genesis configuration.
fn create_genesis_block(&mut self, cfg: GenesisConfig) -> Result<(), Error> {
let mut config_propose = StoredConfiguration {
previous_cfg_hash: Hash::zero(),
actual_from: Height::zero(),
validator_keys: cfg.validator_keys,
consensus: cfg.consensus,
services: BTreeMap::new(),
};
let patch = {
let mut fork = self.fork();
// Update service tables
for (_, service) in self.service_map.iter() {
let cfg = service.initialize(&mut fork);
let name = service.service_name();
if config_propose.services.contains_key(name) {
panic!(
"Services already contain service with '{}' name, please change it",
name
);
}
config_propose.services.insert(name.into(), cfg);
}
// Commit actual configuration
{
let mut schema = Schema::new(&mut fork);
if schema.block_hash_by_height(Height::zero()).is_some() {
// TODO create genesis block for MemoryDB and compare it hash with zero block. (ECR-1630)
return Ok(());
}
schema.commit_configuration(config_propose);
};
self.merge(fork.into_patch())?;
self.create_patch(ValidatorId::zero(), Height::zero(), &[])
.1
};
self.merge(patch)?;
Ok(())
}
/// Helper function to map a tuple (`u16`, `u16`) of service table coordinates
/// to a 32-byte value to be used as the `ProofMapIndex` key (it currently
/// supports only fixed size keys). The `hash` function is used to distribute
/// keys uniformly (compared to padding).
/// # Arguments
///
/// * `service_id` - `service_id` as returned by instance of type of
/// `Service` trait
/// * `table_idx` - index of service table in `Vec`, returned by the
/// `state_hash` method of instance of type of `Service` trait
// also, it was the first idea around, to use `hash`
pub fn service_table_unique_key(service_id: u16, table_idx: usize) -> Hash {
debug_assert!(table_idx <= u16::max_value() as usize);
let size = mem::size_of::<u16>();
let mut vec = vec![0; 2 * size];
LittleEndian::write_u16(&mut vec[0..size], service_id);
LittleEndian::write_u16(&mut vec[size..2 * size], table_idx as u16);
crypto::hash(&vec)
}
/// Executes the given transactions from the pool.
/// Then collects the resulting changes from the current storage state and returns them
/// with the hash of the resulting block.
pub fn create_patch(
&self,
proposer_id: ValidatorId,
height: Height,
tx_hashes: &[Hash],
) -> (Hash, Patch) {
// Create fork
let mut fork = self.fork();
let block_hash = {
// Get last hash.
let last_hash = self.last_hash();
// Save & execute transactions.
for (index, hash) in tx_hashes.iter().enumerate() {
self.execute_transaction(*hash, height, index, &mut fork)
// Execution could fail if the transaction
// cannot be deserialized or it isn't in the pool.
.expect("Transaction execution error.");
}
// Invoke execute method for all services.
for service in self.service_map.values() {
// Skip execution for genesis block.
if height > Height(0) {
before_commit(service.as_ref(), &mut fork);
}
}
// Get tx & state hash.
let (tx_hash, state_hash) = {
let state_hashes = {
let schema = Schema::new(&fork);
let vec_core_state = schema.core_state_hash();
let mut state_hashes = Vec::new();
for (idx, core_table_hash) in vec_core_state.into_iter().enumerate() {
let key = Self::service_table_unique_key(CORE_SERVICE, idx);
state_hashes.push((key, core_table_hash));
}
for service in self.service_map.values() {
let service_id = service.service_id();
let vec_service_state = service.state_hash(&fork);
for (idx, service_table_hash) in vec_service_state.into_iter().enumerate() {
let key = Self::service_table_unique_key(service_id, idx);
state_hashes.push((key, service_table_hash));
}
}
state_hashes
};
let mut schema = Schema::new(&mut fork);
let state_hash = {
let mut sum_table = schema.state_hash_aggregator_mut();
for (key, hash) in state_hashes {
sum_table.put(&key, hash)
}
sum_table.merkle_root()
};
let tx_hash = schema.block_transactions(height).merkle_root();
(tx_hash, state_hash)
};
// Create block.
let block = Block::new(
proposer_id,
height,
tx_hashes.len() as u32,
&last_hash,
&tx_hash,
&state_hash,
);
trace!("execute block = {:?}", block);
// Calculate block hash.
let block_hash = block.hash();
// Update height.
let mut schema = Schema::new(&mut fork);
schema.block_hashes_by_height_mut().push(block_hash);
// Save block.
schema.blocks_mut().put(&block_hash, block);
block_hash
};
(block_hash, fork.into_patch())
}
fn execute_transaction(
&self,
tx_hash: Hash,
height: Height,
index: usize,
fork: &mut Fork,
) -> Result<(), failure::Error> {
let (tx, raw, service_name) = {
let schema = Schema::new(&fork);
let raw = schema.transactions().get(&tx_hash).ok_or_else(|| {
failure::err_msg(format!(
"BUG: Cannot find transaction in database. tx: {:?}",
tx_hash
))
})?;
let service_name = self.service_map
.get(raw.service_id() as usize)
.ok_or_else(|| {
failure::err_msg(format!(
"Service not found. Service id: {}",
raw.service_id()
))
})?
.service_name();
let tx = self.tx_from_raw(raw.payload().clone()).or_else(|error| {
Err(failure::err_msg(format!(
"Service <{}>: {}, tx: {:?}",
service_name,
error.description(),
tx_hash
)))
})?;
(tx, raw, service_name)
};
fork.checkpoint();
let catch_result = panic::catch_unwind(panic::AssertUnwindSafe(|| {
let context = TransactionContext::new(&mut *fork, &raw);
tx.execute(context)
}));
let tx_result = TransactionResult(match catch_result {
Ok(execution_result) => {
match execution_result {
Ok(()) => {
fork.commit();
}
Err(ref e) => {
// Unlike panic, transaction failure isn't that rare, so logging the
// whole transaction body is an overkill: it can be relatively big.
info!(
"Service <{}>: {:?} transaction execution failed: {:?}",
service_name, tx_hash, e
);
fork.rollback();
}
}
execution_result.map_err(TransactionError::from)
}
Err(err) => {
if err.is::<Error>() {
// Continue panic unwind if the reason is StorageError.
panic::resume_unwind(err);
}
fork.rollback();
error!(
"Service <{}>: {:?} transaction execution panicked: {:?}",
service_name, tx, err
);
Err(TransactionError::from_panic(&err))
}
});
let mut schema = Schema::new(fork);
schema.transaction_results_mut().put(&tx_hash, tx_result);
schema.commit_transaction(&tx_hash);
schema.block_transactions_mut(height).push(tx_hash);
let location = TxLocation::new(height, index as u64);
schema.transactions_locations_mut().put(&tx_hash, location);
Ok(())
}
/// Commits to the blockchain a new block with the indicated changes (patch),
/// hash and Precommit messages. After that invokes `after_commit`
/// for each service in the increasing order of their identifiers.
pub fn commit<I>(&mut self, patch: &Patch, block_hash: Hash, precommits: I) -> Result<(), Error>
where
I: Iterator<Item = Signed<Precommit>>,
{
let patch = {
let mut fork = {
let mut fork = self.db.fork();
fork.merge(patch.clone()); // FIXME: Avoid cloning here. (ECR-1631)
fork
};
{
let mut schema = Schema::new(&mut fork);
for precommit in precommits {
schema.precommits_mut(&block_hash).push(precommit.clone());
}
// Consensus messages cache is useful only during one height, so it should be
// cleared when a new height is achieved.
schema.consensus_messages_cache_mut().clear();
let txs_in_block = schema.last_block().tx_count();
let txs_count = schema.transactions_pool_len_index().get().unwrap_or(0);
debug_assert!(txs_count >= u64::from(txs_in_block));
schema
.transactions_pool_len_index_mut()
.set(txs_count - u64::from(txs_in_block));
}
fork.into_patch()
};
self.merge(patch)?;
// Invokes `after_commit` for each service in order of their identifiers
for (service_id, service) in self.service_map.iter() {
let context = ServiceContext::new(
self.service_keypair.0,
self.service_keypair.1.clone(),
self.api_sender.clone(),
self.fork(),
service_id as u16,
);
service.after_commit(&context);
}
Ok(())
}
/// Saves the `Connect` message from a peer to the cache.
pub(crate) fn save_peer(&mut self, pubkey: &PublicKey, peer: Signed<Connect>) {
let mut fork = self.fork();
{
let mut schema = Schema::new(&mut fork);
schema.peers_cache_mut().put(pubkey, peer);
}
self.merge(fork.into_patch())
.expect("Unable to save peer to the peers cache");
}
/// Removes from the cache the `Connect` message from a peer.
pub fn remove_peer_with_pubkey(&mut self, key: &PublicKey) {
let mut fork = self.fork();
{
let mut schema = Schema::new(&mut fork);
let mut peers = schema.peers_cache_mut();
peers.remove(key);
}
self.merge(fork.into_patch())
.expect("Unable to remove peer from the peers cache");
}
/// Returns `Connect` messages from peers saved in the cache, if any.
pub fn get_saved_peers(&self) -> HashMap<PublicKey, Signed<Connect>> {
let schema = Schema::new(self.snapshot());
let peers_cache = schema.peers_cache();
let it = peers_cache.iter().map(|(k, v)| (k, v.clone()));
it.collect()
}
/// Saves the given raw message to the consensus messages cache.
pub(crate) fn save_message<T: ProtocolMessage>(&mut self, round: Round, raw: Signed<T>) {
self.save_messages(round, iter::once(raw.into()));
}
/// Saves a collection of SignedMessage to the consensus messages cache with single access to the
/// `Fork` instance.
pub(crate) fn save_messages<I>(&mut self, round: Round, iter: I)
where
I: IntoIterator<Item = Message>,
{
let mut fork = self.fork();
{
let mut schema = Schema::new(&mut fork);
schema.consensus_messages_cache_mut().extend(iter);
schema.set_consensus_round(round);
}
self.merge(fork.into_patch())
.expect("Unable to save messages to the consensus cache");
}
}
fn before_commit(service: &dyn Service, fork: &mut Fork) {
fork.checkpoint();
match panic::catch_unwind(panic::AssertUnwindSafe(|| service.before_commit(fork))) {
Ok(..) => fork.commit(),
Err(err) => {
if err.is::<Error>() {
// Continue panic unwind if the reason is StorageError.
panic::resume_unwind(err);
}
fork.rollback();
error!(
"{} service before_commit failed with error: {:?}",
service.service_name(),
err
);
}
}
}
impl fmt::Debug for Blockchain {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Blockchain(..)")
}
}
impl Clone for Blockchain {
fn clone(&self) -> Self {
Self {
db: Arc::clone(&self.db),
service_map: Arc::clone(&self.service_map),
api_sender: self.api_sender.clone(),
service_keypair: self.service_keypair.clone(),
}
}
}
| {
info!(
"Storage version successfully initialized with value [{}].",
storage::StorageMetadata::read(&self.db.snapshot()).unwrap(),
)
} | conditional_block |
mod.rs | // Copyright 2018 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! The module containing building blocks for creating blockchains powered by
//! the Exonum framework.
//!
//! Services are the main extension point for the Exonum framework. To create
//! your service on top of Exonum blockchain you need to perform the following steps:
//!
//! - Define your own information schema.
//! - Create one or more transaction types using the [`transactions!`] macro and
//! implement the [`Transaction`] trait for them.
//! - Create a data structure implementing the [`Service`] trait.
//! - Write API handlers for the service, if required.
//!
//! You may consult [the service creation tutorial][doc:create-service] for a detailed
//! instruction on how to create services.
//!
//! [`transactions!`]: ../macro.transactions.html
//! [`Transaction`]: ./trait.Transaction.html
//! [`Service`]: ./trait.Service.html
//! [doc:create-service]: https://exonum.com/doc/get-started/create-service
pub use self::{
block::{Block, BlockProof}, config::{ConsensusConfig, StoredConfiguration, ValidatorKeys},
genesis::GenesisConfig, schema::{Schema, TxLocation},
service::{Service, ServiceContext, SharedNodeState},
transaction::{
ExecutionError, ExecutionResult, Transaction, TransactionContext, TransactionError,
TransactionErrorType, TransactionMessage, TransactionResult, TransactionSet,
},
};
pub mod config;
use byteorder::{ByteOrder, LittleEndian};
use failure;
use vec_map::VecMap;
use std::{
collections::{BTreeMap, HashMap}, error::Error as StdError, fmt, iter, mem, panic, sync::Arc,
};
use crypto::{self, CryptoHash, Hash, PublicKey, SecretKey};
use encoding::Error as MessageError;
use helpers::{Height, Round, ValidatorId};
use messages::{Connect, Message, Precommit, ProtocolMessage, RawTransaction, Signed};
use node::ApiSender;
use storage::{self, Database, Error, Fork, Patch, Snapshot};
mod block;
mod genesis;
mod schema;
mod service;
#[macro_use]
mod transaction;
#[cfg(test)]
mod tests;
/// Id of core service table family.
pub const CORE_SERVICE: u16 = 0;
/// Exonum blockchain instance with a certain services set and data storage.
///
/// Only nodes with an identical set of services and genesis block can be combined
/// into a single network.
pub struct Blockchain {
db: Arc<dyn Database>,
service_map: Arc<VecMap<Box<dyn Service>>>,
#[doc(hidden)]
pub service_keypair: (PublicKey, SecretKey),
pub(crate) api_sender: ApiSender,
}
impl Blockchain {
/// Constructs a blockchain for the given `storage` and list of `services`.
pub fn new<D: Into<Arc<dyn Database>>>(
storage: D,
services: Vec<Box<dyn Service>>,
service_public_key: PublicKey,
service_secret_key: SecretKey,
api_sender: ApiSender,
) -> Self {
let mut service_map = VecMap::new();
for service in services {
let id = service.service_id() as usize;
if service_map.contains_key(id) {
panic!(
"Services have already contain service with id={}, please change it.",
id
);
}
service_map.insert(id, service);
}
Self {
db: storage.into(),
service_map: Arc::new(service_map),
service_keypair: (service_public_key, service_secret_key),
api_sender,
}
}
/// Recreates the blockchain to reuse with a sandbox.
#[doc(hidden)]
pub fn clone_with_api_sender(&self, api_sender: ApiSender) -> Self {
Self {
api_sender,
..self.clone()
}
}
/// Returns the `VecMap` for all services. This is a map which
/// contains service identifiers and service interfaces. The VecMap
/// allows proceeding from the service identifier to the service itself.
pub fn service_map(&self) -> &Arc<VecMap<Box<dyn Service>>> {
&self.service_map
}
/// Creates a read-only snapshot of the current storage state.
pub fn snapshot(&self) -> Box<dyn Snapshot> {
self.db.snapshot()
}
/// Creates a snapshot of the current storage state that can be later committed into the storage
/// via the `merge` method.
pub fn fork(&self) -> Fork {
self.db.fork()
}
/// Tries to create a `Transaction` object from the given raw message.
/// A raw message can be converted into a `Transaction` object only
/// if the following conditions are met:
///
/// - Blockchain has a service with the `service_id` of the given raw message.
/// - Service can deserialize the given raw message.
pub fn tx_from_raw(&self, raw: RawTransaction) -> Result<Box<dyn Transaction>, MessageError> {
let id = raw.service_id() as usize;
let service = self.service_map
.get(id)
.ok_or_else(|| MessageError::from("Service not found."))?;
service.tx_from_raw(raw)
}
/// Commits changes from the patch to the blockchain storage.
/// See [`Fork`](../storage/struct.Fork.html) for details.
pub fn merge(&mut self, patch: Patch) -> Result<(), Error> |
/// Returns the hash of the latest committed block.
///
/// # Panics
///
/// If the genesis block was not committed.
pub fn last_hash(&self) -> Hash {
Schema::new(&self.snapshot())
.block_hashes_by_height()
.last()
.unwrap_or_else(Hash::default)
}
/// Returns the latest committed block.
pub fn last_block(&self) -> Block {
Schema::new(&self.snapshot()).last_block()
}
/// Creates and commits the genesis block with the given genesis configuration
/// if the blockchain has not been initialized.
///
/// # Panics
///
/// * If the genesis block was not committed.
/// * If storage version is not specified or not supported.
pub fn initialize(&mut self, cfg: GenesisConfig) -> Result<(), Error> {
let has_genesis_block = !Schema::new(&self.snapshot())
.block_hashes_by_height()
.is_empty();
if has_genesis_block {
self.assert_storage_version();
} else {
self.initialize_metadata();
self.create_genesis_block(cfg)?;
}
Ok(())
}
/// Initialized node-local metadata.
fn initialize_metadata(&mut self) {
let mut fork = self.db.fork();
storage::StorageMetadata::write_current(&mut fork);
if self.merge(fork.into_patch()).is_ok() {
info!(
"Storage version successfully initialized with value [{}].",
storage::StorageMetadata::read(&self.db.snapshot()).unwrap(),
)
} else {
panic!("Could not set database version.")
}
}
/// Checks if storage version is supported.
///
/// # Panics
///
/// Panics if version is not supported or is not specified.
fn assert_storage_version(&self) {
match storage::StorageMetadata::read(self.db.snapshot()) {
Ok(ver) => info!("Storage version is supported with value [{}].", ver),
Err(e) => panic!("{}", e),
}
}
/// Creates and commits the genesis block with the given genesis configuration.
fn create_genesis_block(&mut self, cfg: GenesisConfig) -> Result<(), Error> {
let mut config_propose = StoredConfiguration {
previous_cfg_hash: Hash::zero(),
actual_from: Height::zero(),
validator_keys: cfg.validator_keys,
consensus: cfg.consensus,
services: BTreeMap::new(),
};
let patch = {
let mut fork = self.fork();
// Update service tables
for (_, service) in self.service_map.iter() {
let cfg = service.initialize(&mut fork);
let name = service.service_name();
if config_propose.services.contains_key(name) {
panic!(
"Services already contain service with '{}' name, please change it",
name
);
}
config_propose.services.insert(name.into(), cfg);
}
// Commit actual configuration
{
let mut schema = Schema::new(&mut fork);
if schema.block_hash_by_height(Height::zero()).is_some() {
// TODO create genesis block for MemoryDB and compare it hash with zero block. (ECR-1630)
return Ok(());
}
schema.commit_configuration(config_propose);
};
self.merge(fork.into_patch())?;
self.create_patch(ValidatorId::zero(), Height::zero(), &[])
.1
};
self.merge(patch)?;
Ok(())
}
/// Helper function to map a tuple (`u16`, `u16`) of service table coordinates
/// to a 32-byte value to be used as the `ProofMapIndex` key (it currently
/// supports only fixed size keys). The `hash` function is used to distribute
/// keys uniformly (compared to padding).
/// # Arguments
///
/// * `service_id` - `service_id` as returned by instance of type of
/// `Service` trait
/// * `table_idx` - index of service table in `Vec`, returned by the
/// `state_hash` method of instance of type of `Service` trait
// also, it was the first idea around, to use `hash`
pub fn service_table_unique_key(service_id: u16, table_idx: usize) -> Hash {
debug_assert!(table_idx <= u16::max_value() as usize);
let size = mem::size_of::<u16>();
let mut vec = vec![0; 2 * size];
LittleEndian::write_u16(&mut vec[0..size], service_id);
LittleEndian::write_u16(&mut vec[size..2 * size], table_idx as u16);
crypto::hash(&vec)
}
/// Executes the given transactions from the pool.
/// Then collects the resulting changes from the current storage state and returns them
/// with the hash of the resulting block.
pub fn create_patch(
&self,
proposer_id: ValidatorId,
height: Height,
tx_hashes: &[Hash],
) -> (Hash, Patch) {
// Create fork
let mut fork = self.fork();
let block_hash = {
// Get last hash.
let last_hash = self.last_hash();
// Save & execute transactions.
for (index, hash) in tx_hashes.iter().enumerate() {
self.execute_transaction(*hash, height, index, &mut fork)
// Execution could fail if the transaction
// cannot be deserialized or it isn't in the pool.
.expect("Transaction execution error.");
}
// Invoke execute method for all services.
for service in self.service_map.values() {
// Skip execution for genesis block.
if height > Height(0) {
before_commit(service.as_ref(), &mut fork);
}
}
// Get tx & state hash.
let (tx_hash, state_hash) = {
let state_hashes = {
let schema = Schema::new(&fork);
let vec_core_state = schema.core_state_hash();
let mut state_hashes = Vec::new();
for (idx, core_table_hash) in vec_core_state.into_iter().enumerate() {
let key = Self::service_table_unique_key(CORE_SERVICE, idx);
state_hashes.push((key, core_table_hash));
}
for service in self.service_map.values() {
let service_id = service.service_id();
let vec_service_state = service.state_hash(&fork);
for (idx, service_table_hash) in vec_service_state.into_iter().enumerate() {
let key = Self::service_table_unique_key(service_id, idx);
state_hashes.push((key, service_table_hash));
}
}
state_hashes
};
let mut schema = Schema::new(&mut fork);
let state_hash = {
let mut sum_table = schema.state_hash_aggregator_mut();
for (key, hash) in state_hashes {
sum_table.put(&key, hash)
}
sum_table.merkle_root()
};
let tx_hash = schema.block_transactions(height).merkle_root();
(tx_hash, state_hash)
};
// Create block.
let block = Block::new(
proposer_id,
height,
tx_hashes.len() as u32,
&last_hash,
&tx_hash,
&state_hash,
);
trace!("execute block = {:?}", block);
// Calculate block hash.
let block_hash = block.hash();
// Update height.
let mut schema = Schema::new(&mut fork);
schema.block_hashes_by_height_mut().push(block_hash);
// Save block.
schema.blocks_mut().put(&block_hash, block);
block_hash
};
(block_hash, fork.into_patch())
}
fn execute_transaction(
&self,
tx_hash: Hash,
height: Height,
index: usize,
fork: &mut Fork,
) -> Result<(), failure::Error> {
let (tx, raw, service_name) = {
let schema = Schema::new(&fork);
let raw = schema.transactions().get(&tx_hash).ok_or_else(|| {
failure::err_msg(format!(
"BUG: Cannot find transaction in database. tx: {:?}",
tx_hash
))
})?;
let service_name = self.service_map
.get(raw.service_id() as usize)
.ok_or_else(|| {
failure::err_msg(format!(
"Service not found. Service id: {}",
raw.service_id()
))
})?
.service_name();
let tx = self.tx_from_raw(raw.payload().clone()).or_else(|error| {
Err(failure::err_msg(format!(
"Service <{}>: {}, tx: {:?}",
service_name,
error.description(),
tx_hash
)))
})?;
(tx, raw, service_name)
};
fork.checkpoint();
let catch_result = panic::catch_unwind(panic::AssertUnwindSafe(|| {
let context = TransactionContext::new(&mut *fork, &raw);
tx.execute(context)
}));
let tx_result = TransactionResult(match catch_result {
Ok(execution_result) => {
match execution_result {
Ok(()) => {
fork.commit();
}
Err(ref e) => {
// Unlike panic, transaction failure isn't that rare, so logging the
// whole transaction body is an overkill: it can be relatively big.
info!(
"Service <{}>: {:?} transaction execution failed: {:?}",
service_name, tx_hash, e
);
fork.rollback();
}
}
execution_result.map_err(TransactionError::from)
}
Err(err) => {
if err.is::<Error>() {
// Continue panic unwind if the reason is StorageError.
panic::resume_unwind(err);
}
fork.rollback();
error!(
"Service <{}>: {:?} transaction execution panicked: {:?}",
service_name, tx, err
);
Err(TransactionError::from_panic(&err))
}
});
let mut schema = Schema::new(fork);
schema.transaction_results_mut().put(&tx_hash, tx_result);
schema.commit_transaction(&tx_hash);
schema.block_transactions_mut(height).push(tx_hash);
let location = TxLocation::new(height, index as u64);
schema.transactions_locations_mut().put(&tx_hash, location);
Ok(())
}
/// Commits to the blockchain a new block with the indicated changes (patch),
/// hash and Precommit messages. After that invokes `after_commit`
/// for each service in the increasing order of their identifiers.
pub fn commit<I>(&mut self, patch: &Patch, block_hash: Hash, precommits: I) -> Result<(), Error>
where
I: Iterator<Item = Signed<Precommit>>,
{
let patch = {
let mut fork = {
let mut fork = self.db.fork();
fork.merge(patch.clone()); // FIXME: Avoid cloning here. (ECR-1631)
fork
};
{
let mut schema = Schema::new(&mut fork);
for precommit in precommits {
schema.precommits_mut(&block_hash).push(precommit.clone());
}
// Consensus messages cache is useful only during one height, so it should be
// cleared when a new height is achieved.
schema.consensus_messages_cache_mut().clear();
let txs_in_block = schema.last_block().tx_count();
let txs_count = schema.transactions_pool_len_index().get().unwrap_or(0);
debug_assert!(txs_count >= u64::from(txs_in_block));
schema
.transactions_pool_len_index_mut()
.set(txs_count - u64::from(txs_in_block));
}
fork.into_patch()
};
self.merge(patch)?;
// Invokes `after_commit` for each service in order of their identifiers
for (service_id, service) in self.service_map.iter() {
let context = ServiceContext::new(
self.service_keypair.0,
self.service_keypair.1.clone(),
self.api_sender.clone(),
self.fork(),
service_id as u16,
);
service.after_commit(&context);
}
Ok(())
}
/// Saves the `Connect` message from a peer to the cache.
pub(crate) fn save_peer(&mut self, pubkey: &PublicKey, peer: Signed<Connect>) {
let mut fork = self.fork();
{
let mut schema = Schema::new(&mut fork);
schema.peers_cache_mut().put(pubkey, peer);
}
self.merge(fork.into_patch())
.expect("Unable to save peer to the peers cache");
}
/// Removes from the cache the `Connect` message from a peer.
pub fn remove_peer_with_pubkey(&mut self, key: &PublicKey) {
let mut fork = self.fork();
{
let mut schema = Schema::new(&mut fork);
let mut peers = schema.peers_cache_mut();
peers.remove(key);
}
self.merge(fork.into_patch())
.expect("Unable to remove peer from the peers cache");
}
/// Returns `Connect` messages from peers saved in the cache, if any.
pub fn get_saved_peers(&self) -> HashMap<PublicKey, Signed<Connect>> {
let schema = Schema::new(self.snapshot());
let peers_cache = schema.peers_cache();
let it = peers_cache.iter().map(|(k, v)| (k, v.clone()));
it.collect()
}
/// Saves the given raw message to the consensus messages cache.
pub(crate) fn save_message<T: ProtocolMessage>(&mut self, round: Round, raw: Signed<T>) {
self.save_messages(round, iter::once(raw.into()));
}
/// Saves a collection of SignedMessage to the consensus messages cache with single access to the
/// `Fork` instance.
pub(crate) fn save_messages<I>(&mut self, round: Round, iter: I)
where
I: IntoIterator<Item = Message>,
{
let mut fork = self.fork();
{
let mut schema = Schema::new(&mut fork);
schema.consensus_messages_cache_mut().extend(iter);
schema.set_consensus_round(round);
}
self.merge(fork.into_patch())
.expect("Unable to save messages to the consensus cache");
}
}
fn before_commit(service: &dyn Service, fork: &mut Fork) {
fork.checkpoint();
match panic::catch_unwind(panic::AssertUnwindSafe(|| service.before_commit(fork))) {
Ok(..) => fork.commit(),
Err(err) => {
if err.is::<Error>() {
// Continue panic unwind if the reason is StorageError.
panic::resume_unwind(err);
}
fork.rollback();
error!(
"{} service before_commit failed with error: {:?}",
service.service_name(),
err
);
}
}
}
impl fmt::Debug for Blockchain {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Blockchain(..)")
}
}
impl Clone for Blockchain {
fn clone(&self) -> Self {
Self {
db: Arc::clone(&self.db),
service_map: Arc::clone(&self.service_map),
api_sender: self.api_sender.clone(),
service_keypair: self.service_keypair.clone(),
}
}
}
| {
self.db.merge(patch)
} | identifier_body |
mod.rs | // Copyright 2018 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! The module containing building blocks for creating blockchains powered by
//! the Exonum framework.
//!
//! Services are the main extension point for the Exonum framework. To create
//! your service on top of Exonum blockchain you need to perform the following steps:
//!
//! - Define your own information schema.
//! - Create one or more transaction types using the [`transactions!`] macro and
//! implement the [`Transaction`] trait for them.
//! - Create a data structure implementing the [`Service`] trait.
//! - Write API handlers for the service, if required.
//!
//! You may consult [the service creation tutorial][doc:create-service] for a detailed
//! instruction on how to create services.
//!
//! [`transactions!`]: ../macro.transactions.html
//! [`Transaction`]: ./trait.Transaction.html
//! [`Service`]: ./trait.Service.html
//! [doc:create-service]: https://exonum.com/doc/get-started/create-service
pub use self::{
block::{Block, BlockProof}, config::{ConsensusConfig, StoredConfiguration, ValidatorKeys},
genesis::GenesisConfig, schema::{Schema, TxLocation},
service::{Service, ServiceContext, SharedNodeState},
transaction::{
ExecutionError, ExecutionResult, Transaction, TransactionContext, TransactionError,
TransactionErrorType, TransactionMessage, TransactionResult, TransactionSet,
},
};
pub mod config;
use byteorder::{ByteOrder, LittleEndian};
use failure;
use vec_map::VecMap;
use std::{
collections::{BTreeMap, HashMap}, error::Error as StdError, fmt, iter, mem, panic, sync::Arc,
};
use crypto::{self, CryptoHash, Hash, PublicKey, SecretKey};
use encoding::Error as MessageError;
use helpers::{Height, Round, ValidatorId};
use messages::{Connect, Message, Precommit, ProtocolMessage, RawTransaction, Signed};
use node::ApiSender;
use storage::{self, Database, Error, Fork, Patch, Snapshot};
mod block;
mod genesis;
mod schema;
mod service;
#[macro_use]
mod transaction;
#[cfg(test)]
mod tests;
/// Id of core service table family.
pub const CORE_SERVICE: u16 = 0;
/// Exonum blockchain instance with a certain services set and data storage.
///
/// Only nodes with an identical set of services and genesis block can be combined
/// into a single network.
pub struct Blockchain {
db: Arc<dyn Database>,
service_map: Arc<VecMap<Box<dyn Service>>>,
#[doc(hidden)]
pub service_keypair: (PublicKey, SecretKey),
pub(crate) api_sender: ApiSender,
}
impl Blockchain {
/// Constructs a blockchain for the given `storage` and list of `services`.
pub fn new<D: Into<Arc<dyn Database>>>(
storage: D,
services: Vec<Box<dyn Service>>,
service_public_key: PublicKey,
service_secret_key: SecretKey,
api_sender: ApiSender,
) -> Self {
let mut service_map = VecMap::new();
for service in services {
let id = service.service_id() as usize;
if service_map.contains_key(id) {
panic!(
"Services have already contain service with id={}, please change it.",
id
);
}
service_map.insert(id, service);
}
Self {
db: storage.into(),
service_map: Arc::new(service_map),
service_keypair: (service_public_key, service_secret_key),
api_sender,
}
}
/// Recreates the blockchain to reuse with a sandbox.
#[doc(hidden)]
pub fn clone_with_api_sender(&self, api_sender: ApiSender) -> Self {
Self {
api_sender,
..self.clone()
}
}
/// Returns the `VecMap` for all services. This is a map which
/// contains service identifiers and service interfaces. The VecMap
/// allows proceeding from the service identifier to the service itself.
pub fn service_map(&self) -> &Arc<VecMap<Box<dyn Service>>> {
&self.service_map
}
/// Creates a read-only snapshot of the current storage state.
pub fn snapshot(&self) -> Box<dyn Snapshot> {
self.db.snapshot()
}
/// Creates a snapshot of the current storage state that can be later committed into the storage
/// via the `merge` method.
pub fn fork(&self) -> Fork {
self.db.fork()
}
/// Tries to create a `Transaction` object from the given raw message.
/// A raw message can be converted into a `Transaction` object only
/// if the following conditions are met:
///
/// - Blockchain has a service with the `service_id` of the given raw message.
/// - Service can deserialize the given raw message.
pub fn tx_from_raw(&self, raw: RawTransaction) -> Result<Box<dyn Transaction>, MessageError> {
let id = raw.service_id() as usize;
let service = self.service_map
.get(id)
.ok_or_else(|| MessageError::from("Service not found."))?;
service.tx_from_raw(raw)
}
/// Commits changes from the patch to the blockchain storage.
/// See [`Fork`](../storage/struct.Fork.html) for details.
pub fn merge(&mut self, patch: Patch) -> Result<(), Error> {
self.db.merge(patch)
}
/// Returns the hash of the latest committed block.
///
/// # Panics
///
/// If the genesis block was not committed.
pub fn last_hash(&self) -> Hash {
Schema::new(&self.snapshot())
.block_hashes_by_height()
.last()
.unwrap_or_else(Hash::default)
}
/// Returns the latest committed block.
pub fn last_block(&self) -> Block {
Schema::new(&self.snapshot()).last_block()
}
/// Creates and commits the genesis block with the given genesis configuration
/// if the blockchain has not been initialized.
///
/// # Panics
///
/// * If the genesis block was not committed.
/// * If storage version is not specified or not supported.
pub fn initialize(&mut self, cfg: GenesisConfig) -> Result<(), Error> {
let has_genesis_block = !Schema::new(&self.snapshot())
.block_hashes_by_height()
.is_empty();
if has_genesis_block {
self.assert_storage_version();
} else {
self.initialize_metadata();
self.create_genesis_block(cfg)?;
}
Ok(())
}
/// Initialized node-local metadata.
fn initialize_metadata(&mut self) {
let mut fork = self.db.fork();
storage::StorageMetadata::write_current(&mut fork);
if self.merge(fork.into_patch()).is_ok() {
info!(
"Storage version successfully initialized with value [{}].",
storage::StorageMetadata::read(&self.db.snapshot()).unwrap(),
)
} else {
panic!("Could not set database version.")
}
}
/// Checks if storage version is supported.
///
/// # Panics
///
/// Panics if version is not supported or is not specified.
fn assert_storage_version(&self) {
match storage::StorageMetadata::read(self.db.snapshot()) {
Ok(ver) => info!("Storage version is supported with value [{}].", ver),
Err(e) => panic!("{}", e),
}
}
/// Creates and commits the genesis block with the given genesis configuration.
fn create_genesis_block(&mut self, cfg: GenesisConfig) -> Result<(), Error> {
let mut config_propose = StoredConfiguration {
previous_cfg_hash: Hash::zero(),
actual_from: Height::zero(),
validator_keys: cfg.validator_keys,
consensus: cfg.consensus,
services: BTreeMap::new(),
};
let patch = {
let mut fork = self.fork();
// Update service tables
for (_, service) in self.service_map.iter() {
let cfg = service.initialize(&mut fork);
let name = service.service_name();
if config_propose.services.contains_key(name) {
panic!(
"Services already contain service with '{}' name, please change it",
name
);
}
config_propose.services.insert(name.into(), cfg);
}
// Commit actual configuration
{
let mut schema = Schema::new(&mut fork);
if schema.block_hash_by_height(Height::zero()).is_some() {
// TODO create genesis block for MemoryDB and compare it hash with zero block. (ECR-1630)
return Ok(());
}
schema.commit_configuration(config_propose);
};
self.merge(fork.into_patch())?;
self.create_patch(ValidatorId::zero(), Height::zero(), &[])
.1
};
self.merge(patch)?;
Ok(())
}
/// Helper function to map a tuple (`u16`, `u16`) of service table coordinates
/// to a 32-byte value to be used as the `ProofMapIndex` key (it currently
/// supports only fixed size keys). The `hash` function is used to distribute
/// keys uniformly (compared to padding).
/// # Arguments
///
/// * `service_id` - `service_id` as returned by instance of type of
/// `Service` trait
/// * `table_idx` - index of service table in `Vec`, returned by the
/// `state_hash` method of instance of type of `Service` trait
// also, it was the first idea around, to use `hash`
pub fn service_table_unique_key(service_id: u16, table_idx: usize) -> Hash {
debug_assert!(table_idx <= u16::max_value() as usize);
let size = mem::size_of::<u16>();
let mut vec = vec![0; 2 * size];
LittleEndian::write_u16(&mut vec[0..size], service_id);
LittleEndian::write_u16(&mut vec[size..2 * size], table_idx as u16);
crypto::hash(&vec)
}
/// Executes the given transactions from the pool.
/// Then collects the resulting changes from the current storage state and returns them
/// with the hash of the resulting block.
pub fn create_patch(
&self,
proposer_id: ValidatorId,
height: Height,
tx_hashes: &[Hash],
) -> (Hash, Patch) {
// Create fork
let mut fork = self.fork();
let block_hash = {
// Get last hash.
let last_hash = self.last_hash();
// Save & execute transactions.
for (index, hash) in tx_hashes.iter().enumerate() {
self.execute_transaction(*hash, height, index, &mut fork)
// Execution could fail if the transaction
// cannot be deserialized or it isn't in the pool.
.expect("Transaction execution error.");
}
// Invoke execute method for all services.
for service in self.service_map.values() {
// Skip execution for genesis block.
if height > Height(0) {
before_commit(service.as_ref(), &mut fork);
}
}
// Get tx & state hash.
let (tx_hash, state_hash) = {
let state_hashes = {
let schema = Schema::new(&fork);
let vec_core_state = schema.core_state_hash();
let mut state_hashes = Vec::new();
for (idx, core_table_hash) in vec_core_state.into_iter().enumerate() {
let key = Self::service_table_unique_key(CORE_SERVICE, idx);
state_hashes.push((key, core_table_hash));
}
for service in self.service_map.values() {
let service_id = service.service_id();
let vec_service_state = service.state_hash(&fork);
for (idx, service_table_hash) in vec_service_state.into_iter().enumerate() {
let key = Self::service_table_unique_key(service_id, idx);
state_hashes.push((key, service_table_hash));
}
}
state_hashes
};
let mut schema = Schema::new(&mut fork);
let state_hash = {
let mut sum_table = schema.state_hash_aggregator_mut();
for (key, hash) in state_hashes {
sum_table.put(&key, hash)
}
sum_table.merkle_root()
};
let tx_hash = schema.block_transactions(height).merkle_root();
(tx_hash, state_hash)
};
// Create block.
let block = Block::new(
proposer_id,
height,
tx_hashes.len() as u32,
&last_hash,
&tx_hash,
&state_hash,
);
trace!("execute block = {:?}", block);
// Calculate block hash.
let block_hash = block.hash();
// Update height.
let mut schema = Schema::new(&mut fork);
schema.block_hashes_by_height_mut().push(block_hash);
// Save block.
schema.blocks_mut().put(&block_hash, block);
block_hash
};
(block_hash, fork.into_patch())
}
fn execute_transaction(
&self,
tx_hash: Hash,
height: Height,
index: usize,
fork: &mut Fork,
) -> Result<(), failure::Error> {
let (tx, raw, service_name) = {
let schema = Schema::new(&fork);
let raw = schema.transactions().get(&tx_hash).ok_or_else(|| {
failure::err_msg(format!(
"BUG: Cannot find transaction in database. tx: {:?}",
tx_hash
))
})?;
let service_name = self.service_map
.get(raw.service_id() as usize)
.ok_or_else(|| {
failure::err_msg(format!(
"Service not found. Service id: {}",
raw.service_id()
))
})?
.service_name();
let tx = self.tx_from_raw(raw.payload().clone()).or_else(|error| {
Err(failure::err_msg(format!(
"Service <{}>: {}, tx: {:?}",
service_name,
error.description(),
tx_hash
)))
})?;
(tx, raw, service_name)
};
fork.checkpoint();
let catch_result = panic::catch_unwind(panic::AssertUnwindSafe(|| {
let context = TransactionContext::new(&mut *fork, &raw);
tx.execute(context)
}));
let tx_result = TransactionResult(match catch_result {
Ok(execution_result) => {
match execution_result {
Ok(()) => {
fork.commit();
}
Err(ref e) => {
// Unlike panic, transaction failure isn't that rare, so logging the
// whole transaction body is an overkill: it can be relatively big.
info!(
"Service <{}>: {:?} transaction execution failed: {:?}",
service_name, tx_hash, e
);
fork.rollback();
}
}
execution_result.map_err(TransactionError::from)
}
Err(err) => {
if err.is::<Error>() {
// Continue panic unwind if the reason is StorageError.
panic::resume_unwind(err);
}
fork.rollback();
error!(
"Service <{}>: {:?} transaction execution panicked: {:?}",
service_name, tx, err
);
Err(TransactionError::from_panic(&err))
}
});
let mut schema = Schema::new(fork);
schema.transaction_results_mut().put(&tx_hash, tx_result);
schema.commit_transaction(&tx_hash);
schema.block_transactions_mut(height).push(tx_hash);
let location = TxLocation::new(height, index as u64);
schema.transactions_locations_mut().put(&tx_hash, location);
Ok(())
}
/// Commits to the blockchain a new block with the indicated changes (patch),
/// hash and Precommit messages. After that invokes `after_commit`
/// for each service in the increasing order of their identifiers.
pub fn commit<I>(&mut self, patch: &Patch, block_hash: Hash, precommits: I) -> Result<(), Error>
where
I: Iterator<Item = Signed<Precommit>>,
{
let patch = {
let mut fork = {
let mut fork = self.db.fork();
fork.merge(patch.clone()); // FIXME: Avoid cloning here. (ECR-1631)
fork
};
{
let mut schema = Schema::new(&mut fork);
for precommit in precommits {
schema.precommits_mut(&block_hash).push(precommit.clone());
}
// Consensus messages cache is useful only during one height, so it should be
// cleared when a new height is achieved.
schema.consensus_messages_cache_mut().clear();
let txs_in_block = schema.last_block().tx_count();
let txs_count = schema.transactions_pool_len_index().get().unwrap_or(0);
debug_assert!(txs_count >= u64::from(txs_in_block));
schema
.transactions_pool_len_index_mut()
.set(txs_count - u64::from(txs_in_block));
}
fork.into_patch()
};
self.merge(patch)?;
// Invokes `after_commit` for each service in order of their identifiers
for (service_id, service) in self.service_map.iter() {
let context = ServiceContext::new(
self.service_keypair.0,
self.service_keypair.1.clone(),
self.api_sender.clone(),
self.fork(),
service_id as u16,
);
service.after_commit(&context);
}
Ok(())
}
/// Saves the `Connect` message from a peer to the cache.
pub(crate) fn save_peer(&mut self, pubkey: &PublicKey, peer: Signed<Connect>) {
let mut fork = self.fork();
{
let mut schema = Schema::new(&mut fork);
schema.peers_cache_mut().put(pubkey, peer);
}
| pub fn remove_peer_with_pubkey(&mut self, key: &PublicKey) {
let mut fork = self.fork();
{
let mut schema = Schema::new(&mut fork);
let mut peers = schema.peers_cache_mut();
peers.remove(key);
}
self.merge(fork.into_patch())
.expect("Unable to remove peer from the peers cache");
}
/// Returns `Connect` messages from peers saved in the cache, if any.
pub fn get_saved_peers(&self) -> HashMap<PublicKey, Signed<Connect>> {
let schema = Schema::new(self.snapshot());
let peers_cache = schema.peers_cache();
let it = peers_cache.iter().map(|(k, v)| (k, v.clone()));
it.collect()
}
/// Saves the given raw message to the consensus messages cache.
pub(crate) fn save_message<T: ProtocolMessage>(&mut self, round: Round, raw: Signed<T>) {
self.save_messages(round, iter::once(raw.into()));
}
/// Saves a collection of SignedMessage to the consensus messages cache with single access to the
/// `Fork` instance.
pub(crate) fn save_messages<I>(&mut self, round: Round, iter: I)
where
I: IntoIterator<Item = Message>,
{
let mut fork = self.fork();
{
let mut schema = Schema::new(&mut fork);
schema.consensus_messages_cache_mut().extend(iter);
schema.set_consensus_round(round);
}
self.merge(fork.into_patch())
.expect("Unable to save messages to the consensus cache");
}
}
fn before_commit(service: &dyn Service, fork: &mut Fork) {
fork.checkpoint();
match panic::catch_unwind(panic::AssertUnwindSafe(|| service.before_commit(fork))) {
Ok(..) => fork.commit(),
Err(err) => {
if err.is::<Error>() {
// Continue panic unwind if the reason is StorageError.
panic::resume_unwind(err);
}
fork.rollback();
error!(
"{} service before_commit failed with error: {:?}",
service.service_name(),
err
);
}
}
}
impl fmt::Debug for Blockchain {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Blockchain(..)")
}
}
impl Clone for Blockchain {
fn clone(&self) -> Self {
Self {
db: Arc::clone(&self.db),
service_map: Arc::clone(&self.service_map),
api_sender: self.api_sender.clone(),
service_keypair: self.service_keypair.clone(),
}
}
} | self.merge(fork.into_patch())
.expect("Unable to save peer to the peers cache");
}
/// Removes from the cache the `Connect` message from a peer. | random_line_split |
mod.rs | // Copyright 2018 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! The module containing building blocks for creating blockchains powered by
//! the Exonum framework.
//!
//! Services are the main extension point for the Exonum framework. To create
//! your service on top of Exonum blockchain you need to perform the following steps:
//!
//! - Define your own information schema.
//! - Create one or more transaction types using the [`transactions!`] macro and
//! implement the [`Transaction`] trait for them.
//! - Create a data structure implementing the [`Service`] trait.
//! - Write API handlers for the service, if required.
//!
//! You may consult [the service creation tutorial][doc:create-service] for a detailed
//! instruction on how to create services.
//!
//! [`transactions!`]: ../macro.transactions.html
//! [`Transaction`]: ./trait.Transaction.html
//! [`Service`]: ./trait.Service.html
//! [doc:create-service]: https://exonum.com/doc/get-started/create-service
pub use self::{
block::{Block, BlockProof}, config::{ConsensusConfig, StoredConfiguration, ValidatorKeys},
genesis::GenesisConfig, schema::{Schema, TxLocation},
service::{Service, ServiceContext, SharedNodeState},
transaction::{
ExecutionError, ExecutionResult, Transaction, TransactionContext, TransactionError,
TransactionErrorType, TransactionMessage, TransactionResult, TransactionSet,
},
};
pub mod config;
use byteorder::{ByteOrder, LittleEndian};
use failure;
use vec_map::VecMap;
use std::{
collections::{BTreeMap, HashMap}, error::Error as StdError, fmt, iter, mem, panic, sync::Arc,
};
use crypto::{self, CryptoHash, Hash, PublicKey, SecretKey};
use encoding::Error as MessageError;
use helpers::{Height, Round, ValidatorId};
use messages::{Connect, Message, Precommit, ProtocolMessage, RawTransaction, Signed};
use node::ApiSender;
use storage::{self, Database, Error, Fork, Patch, Snapshot};
mod block;
mod genesis;
mod schema;
mod service;
#[macro_use]
mod transaction;
#[cfg(test)]
mod tests;
/// Id of core service table family.
pub const CORE_SERVICE: u16 = 0;
/// Exonum blockchain instance with a certain services set and data storage.
///
/// Only nodes with an identical set of services and genesis block can be combined
/// into a single network.
pub struct Blockchain {
db: Arc<dyn Database>,
service_map: Arc<VecMap<Box<dyn Service>>>,
#[doc(hidden)]
pub service_keypair: (PublicKey, SecretKey),
pub(crate) api_sender: ApiSender,
}
impl Blockchain {
/// Constructs a blockchain for the given `storage` and list of `services`.
pub fn new<D: Into<Arc<dyn Database>>>(
storage: D,
services: Vec<Box<dyn Service>>,
service_public_key: PublicKey,
service_secret_key: SecretKey,
api_sender: ApiSender,
) -> Self {
let mut service_map = VecMap::new();
for service in services {
let id = service.service_id() as usize;
if service_map.contains_key(id) {
panic!(
"Services have already contain service with id={}, please change it.",
id
);
}
service_map.insert(id, service);
}
Self {
db: storage.into(),
service_map: Arc::new(service_map),
service_keypair: (service_public_key, service_secret_key),
api_sender,
}
}
/// Recreates the blockchain to reuse with a sandbox.
#[doc(hidden)]
pub fn clone_with_api_sender(&self, api_sender: ApiSender) -> Self {
Self {
api_sender,
..self.clone()
}
}
/// Returns the `VecMap` for all services. This is a map which
/// contains service identifiers and service interfaces. The VecMap
/// allows proceeding from the service identifier to the service itself.
pub fn service_map(&self) -> &Arc<VecMap<Box<dyn Service>>> {
&self.service_map
}
/// Creates a read-only snapshot of the current storage state.
pub fn snapshot(&self) -> Box<dyn Snapshot> {
self.db.snapshot()
}
/// Creates a snapshot of the current storage state that can be later committed into the storage
/// via the `merge` method.
pub fn fork(&self) -> Fork {
self.db.fork()
}
/// Tries to create a `Transaction` object from the given raw message.
/// A raw message can be converted into a `Transaction` object only
/// if the following conditions are met:
///
/// - Blockchain has a service with the `service_id` of the given raw message.
/// - Service can deserialize the given raw message.
pub fn tx_from_raw(&self, raw: RawTransaction) -> Result<Box<dyn Transaction>, MessageError> {
let id = raw.service_id() as usize;
let service = self.service_map
.get(id)
.ok_or_else(|| MessageError::from("Service not found."))?;
service.tx_from_raw(raw)
}
/// Commits changes from the patch to the blockchain storage.
/// See [`Fork`](../storage/struct.Fork.html) for details.
pub fn merge(&mut self, patch: Patch) -> Result<(), Error> {
self.db.merge(patch)
}
/// Returns the hash of the latest committed block.
///
/// # Panics
///
/// If the genesis block was not committed.
pub fn last_hash(&self) -> Hash {
Schema::new(&self.snapshot())
.block_hashes_by_height()
.last()
.unwrap_or_else(Hash::default)
}
/// Returns the latest committed block.
pub fn last_block(&self) -> Block {
Schema::new(&self.snapshot()).last_block()
}
/// Creates and commits the genesis block with the given genesis configuration
/// if the blockchain has not been initialized.
///
/// # Panics
///
/// * If the genesis block was not committed.
/// * If storage version is not specified or not supported.
pub fn initialize(&mut self, cfg: GenesisConfig) -> Result<(), Error> {
let has_genesis_block = !Schema::new(&self.snapshot())
.block_hashes_by_height()
.is_empty();
if has_genesis_block {
self.assert_storage_version();
} else {
self.initialize_metadata();
self.create_genesis_block(cfg)?;
}
Ok(())
}
/// Initialized node-local metadata.
fn initialize_metadata(&mut self) {
let mut fork = self.db.fork();
storage::StorageMetadata::write_current(&mut fork);
if self.merge(fork.into_patch()).is_ok() {
info!(
"Storage version successfully initialized with value [{}].",
storage::StorageMetadata::read(&self.db.snapshot()).unwrap(),
)
} else {
panic!("Could not set database version.")
}
}
/// Checks if storage version is supported.
///
/// # Panics
///
/// Panics if version is not supported or is not specified.
fn assert_storage_version(&self) {
match storage::StorageMetadata::read(self.db.snapshot()) {
Ok(ver) => info!("Storage version is supported with value [{}].", ver),
Err(e) => panic!("{}", e),
}
}
/// Creates and commits the genesis block with the given genesis configuration.
fn create_genesis_block(&mut self, cfg: GenesisConfig) -> Result<(), Error> {
let mut config_propose = StoredConfiguration {
previous_cfg_hash: Hash::zero(),
actual_from: Height::zero(),
validator_keys: cfg.validator_keys,
consensus: cfg.consensus,
services: BTreeMap::new(),
};
let patch = {
let mut fork = self.fork();
// Update service tables
for (_, service) in self.service_map.iter() {
let cfg = service.initialize(&mut fork);
let name = service.service_name();
if config_propose.services.contains_key(name) {
panic!(
"Services already contain service with '{}' name, please change it",
name
);
}
config_propose.services.insert(name.into(), cfg);
}
// Commit actual configuration
{
let mut schema = Schema::new(&mut fork);
if schema.block_hash_by_height(Height::zero()).is_some() {
// TODO create genesis block for MemoryDB and compare it hash with zero block. (ECR-1630)
return Ok(());
}
schema.commit_configuration(config_propose);
};
self.merge(fork.into_patch())?;
self.create_patch(ValidatorId::zero(), Height::zero(), &[])
.1
};
self.merge(patch)?;
Ok(())
}
/// Helper function to map a tuple (`u16`, `u16`) of service table coordinates
/// to a 32-byte value to be used as the `ProofMapIndex` key (it currently
/// supports only fixed size keys). The `hash` function is used to distribute
/// keys uniformly (compared to padding).
/// # Arguments
///
/// * `service_id` - `service_id` as returned by instance of type of
/// `Service` trait
/// * `table_idx` - index of service table in `Vec`, returned by the
/// `state_hash` method of instance of type of `Service` trait
// also, it was the first idea around, to use `hash`
pub fn service_table_unique_key(service_id: u16, table_idx: usize) -> Hash {
debug_assert!(table_idx <= u16::max_value() as usize);
let size = mem::size_of::<u16>();
let mut vec = vec![0; 2 * size];
LittleEndian::write_u16(&mut vec[0..size], service_id);
LittleEndian::write_u16(&mut vec[size..2 * size], table_idx as u16);
crypto::hash(&vec)
}
/// Executes the given transactions from the pool.
/// Then collects the resulting changes from the current storage state and returns them
/// with the hash of the resulting block.
pub fn create_patch(
&self,
proposer_id: ValidatorId,
height: Height,
tx_hashes: &[Hash],
) -> (Hash, Patch) {
// Create fork
let mut fork = self.fork();
let block_hash = {
// Get last hash.
let last_hash = self.last_hash();
// Save & execute transactions.
for (index, hash) in tx_hashes.iter().enumerate() {
self.execute_transaction(*hash, height, index, &mut fork)
// Execution could fail if the transaction
// cannot be deserialized or it isn't in the pool.
.expect("Transaction execution error.");
}
// Invoke execute method for all services.
for service in self.service_map.values() {
// Skip execution for genesis block.
if height > Height(0) {
before_commit(service.as_ref(), &mut fork);
}
}
// Get tx & state hash.
let (tx_hash, state_hash) = {
let state_hashes = {
let schema = Schema::new(&fork);
let vec_core_state = schema.core_state_hash();
let mut state_hashes = Vec::new();
for (idx, core_table_hash) in vec_core_state.into_iter().enumerate() {
let key = Self::service_table_unique_key(CORE_SERVICE, idx);
state_hashes.push((key, core_table_hash));
}
for service in self.service_map.values() {
let service_id = service.service_id();
let vec_service_state = service.state_hash(&fork);
for (idx, service_table_hash) in vec_service_state.into_iter().enumerate() {
let key = Self::service_table_unique_key(service_id, idx);
state_hashes.push((key, service_table_hash));
}
}
state_hashes
};
let mut schema = Schema::new(&mut fork);
let state_hash = {
let mut sum_table = schema.state_hash_aggregator_mut();
for (key, hash) in state_hashes {
sum_table.put(&key, hash)
}
sum_table.merkle_root()
};
let tx_hash = schema.block_transactions(height).merkle_root();
(tx_hash, state_hash)
};
// Create block.
let block = Block::new(
proposer_id,
height,
tx_hashes.len() as u32,
&last_hash,
&tx_hash,
&state_hash,
);
trace!("execute block = {:?}", block);
// Calculate block hash.
let block_hash = block.hash();
// Update height.
let mut schema = Schema::new(&mut fork);
schema.block_hashes_by_height_mut().push(block_hash);
// Save block.
schema.blocks_mut().put(&block_hash, block);
block_hash
};
(block_hash, fork.into_patch())
}
fn execute_transaction(
&self,
tx_hash: Hash,
height: Height,
index: usize,
fork: &mut Fork,
) -> Result<(), failure::Error> {
let (tx, raw, service_name) = {
let schema = Schema::new(&fork);
let raw = schema.transactions().get(&tx_hash).ok_or_else(|| {
failure::err_msg(format!(
"BUG: Cannot find transaction in database. tx: {:?}",
tx_hash
))
})?;
let service_name = self.service_map
.get(raw.service_id() as usize)
.ok_or_else(|| {
failure::err_msg(format!(
"Service not found. Service id: {}",
raw.service_id()
))
})?
.service_name();
let tx = self.tx_from_raw(raw.payload().clone()).or_else(|error| {
Err(failure::err_msg(format!(
"Service <{}>: {}, tx: {:?}",
service_name,
error.description(),
tx_hash
)))
})?;
(tx, raw, service_name)
};
fork.checkpoint();
let catch_result = panic::catch_unwind(panic::AssertUnwindSafe(|| {
let context = TransactionContext::new(&mut *fork, &raw);
tx.execute(context)
}));
let tx_result = TransactionResult(match catch_result {
Ok(execution_result) => {
match execution_result {
Ok(()) => {
fork.commit();
}
Err(ref e) => {
// Unlike panic, transaction failure isn't that rare, so logging the
// whole transaction body is an overkill: it can be relatively big.
info!(
"Service <{}>: {:?} transaction execution failed: {:?}",
service_name, tx_hash, e
);
fork.rollback();
}
}
execution_result.map_err(TransactionError::from)
}
Err(err) => {
if err.is::<Error>() {
// Continue panic unwind if the reason is StorageError.
panic::resume_unwind(err);
}
fork.rollback();
error!(
"Service <{}>: {:?} transaction execution panicked: {:?}",
service_name, tx, err
);
Err(TransactionError::from_panic(&err))
}
});
let mut schema = Schema::new(fork);
schema.transaction_results_mut().put(&tx_hash, tx_result);
schema.commit_transaction(&tx_hash);
schema.block_transactions_mut(height).push(tx_hash);
let location = TxLocation::new(height, index as u64);
schema.transactions_locations_mut().put(&tx_hash, location);
Ok(())
}
/// Commits to the blockchain a new block with the indicated changes (patch),
/// hash and Precommit messages. After that invokes `after_commit`
/// for each service in the increasing order of their identifiers.
pub fn commit<I>(&mut self, patch: &Patch, block_hash: Hash, precommits: I) -> Result<(), Error>
where
I: Iterator<Item = Signed<Precommit>>,
{
let patch = {
let mut fork = {
let mut fork = self.db.fork();
fork.merge(patch.clone()); // FIXME: Avoid cloning here. (ECR-1631)
fork
};
{
let mut schema = Schema::new(&mut fork);
for precommit in precommits {
schema.precommits_mut(&block_hash).push(precommit.clone());
}
// Consensus messages cache is useful only during one height, so it should be
// cleared when a new height is achieved.
schema.consensus_messages_cache_mut().clear();
let txs_in_block = schema.last_block().tx_count();
let txs_count = schema.transactions_pool_len_index().get().unwrap_or(0);
debug_assert!(txs_count >= u64::from(txs_in_block));
schema
.transactions_pool_len_index_mut()
.set(txs_count - u64::from(txs_in_block));
}
fork.into_patch()
};
self.merge(patch)?;
// Invokes `after_commit` for each service in order of their identifiers
for (service_id, service) in self.service_map.iter() {
let context = ServiceContext::new(
self.service_keypair.0,
self.service_keypair.1.clone(),
self.api_sender.clone(),
self.fork(),
service_id as u16,
);
service.after_commit(&context);
}
Ok(())
}
/// Saves the `Connect` message from a peer to the cache.
pub(crate) fn save_peer(&mut self, pubkey: &PublicKey, peer: Signed<Connect>) {
let mut fork = self.fork();
{
let mut schema = Schema::new(&mut fork);
schema.peers_cache_mut().put(pubkey, peer);
}
self.merge(fork.into_patch())
.expect("Unable to save peer to the peers cache");
}
/// Removes from the cache the `Connect` message from a peer.
pub fn remove_peer_with_pubkey(&mut self, key: &PublicKey) {
let mut fork = self.fork();
{
let mut schema = Schema::new(&mut fork);
let mut peers = schema.peers_cache_mut();
peers.remove(key);
}
self.merge(fork.into_patch())
.expect("Unable to remove peer from the peers cache");
}
/// Returns `Connect` messages from peers saved in the cache, if any.
pub fn get_saved_peers(&self) -> HashMap<PublicKey, Signed<Connect>> {
let schema = Schema::new(self.snapshot());
let peers_cache = schema.peers_cache();
let it = peers_cache.iter().map(|(k, v)| (k, v.clone()));
it.collect()
}
/// Saves the given raw message to the consensus messages cache.
pub(crate) fn save_message<T: ProtocolMessage>(&mut self, round: Round, raw: Signed<T>) {
self.save_messages(round, iter::once(raw.into()));
}
/// Saves a collection of SignedMessage to the consensus messages cache with single access to the
/// `Fork` instance.
pub(crate) fn save_messages<I>(&mut self, round: Round, iter: I)
where
I: IntoIterator<Item = Message>,
{
let mut fork = self.fork();
{
let mut schema = Schema::new(&mut fork);
schema.consensus_messages_cache_mut().extend(iter);
schema.set_consensus_round(round);
}
self.merge(fork.into_patch())
.expect("Unable to save messages to the consensus cache");
}
}
fn | (service: &dyn Service, fork: &mut Fork) {
fork.checkpoint();
match panic::catch_unwind(panic::AssertUnwindSafe(|| service.before_commit(fork))) {
Ok(..) => fork.commit(),
Err(err) => {
if err.is::<Error>() {
// Continue panic unwind if the reason is StorageError.
panic::resume_unwind(err);
}
fork.rollback();
error!(
"{} service before_commit failed with error: {:?}",
service.service_name(),
err
);
}
}
}
impl fmt::Debug for Blockchain {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Blockchain(..)")
}
}
impl Clone for Blockchain {
fn clone(&self) -> Self {
Self {
db: Arc::clone(&self.db),
service_map: Arc::clone(&self.service_map),
api_sender: self.api_sender.clone(),
service_keypair: self.service_keypair.clone(),
}
}
}
| before_commit | identifier_name |
setup.py | """
To build with coverage of Cython files
export SM_CYTHON_COVERAGE=1
python -m pip install -e .
pytest --cov=statsmodels statsmodels
coverage html
"""
from setuptools import Command, Extension, find_packages, setup
from setuptools.dist import Distribution
from collections import defaultdict
import fnmatch
import inspect
import os
from os.path import dirname, join as pjoin, relpath
from pathlib import Path
import shutil
import sys
SETUP_DIR = Path(__file__).parent.resolve()
try:
# SM_FORCE_C is a testing shim to force setup to use C source files
FORCE_C = int(os.environ.get("SM_FORCE_C", 0))
if FORCE_C:
raise ImportError("Force import error for testing")
from Cython import Tempita
from Cython.Build import cythonize
from Cython.Distutils import build_ext
HAS_CYTHON = True
except ImportError:
from setuptools.command.build_ext import build_ext
HAS_CYTHON = False
try:
import numpy # noqa: F401
HAS_NUMPY = True
except ImportError:
HAS_NUMPY = False
###############################################################################
# Key Values that Change Each Release
###############################################################################
# These are strictly installation requirements. Builds requirements are
# managed in pyproject.toml
INSTALL_REQUIRES = []
with open("requirements.txt", encoding="utf-8") as req:
for line in req.readlines():
INSTALL_REQUIRES.append(line.split("#")[0].strip())
DEVELOP_REQUIRES = []
with open("requirements-dev.txt", encoding="utf-8") as req:
for line in req.readlines():
DEVELOP_REQUIRES.append(line.split("#")[0].strip())
CYTHON_MIN_VER = "0.29.26" # released 2020
EXTRAS_REQUIRE = {
"build": ["cython>=" + CYTHON_MIN_VER],
"develop": ["cython>=" + CYTHON_MIN_VER] + DEVELOP_REQUIRES,
"docs": [
"sphinx",
"nbconvert",
"jupyter_client",
"ipykernel",
"matplotlib",
"nbformat",
"numpydoc",
"pandas-datareader",
],
}
###############################################################################
# Values that rarely change
###############################################################################
DISTNAME = "statsmodels"
DESCRIPTION = "Statistical computations and models for Python"
README = SETUP_DIR.joinpath("README.rst").read_text()
LONG_DESCRIPTION = README
MAINTAINER = "statsmodels Developers"
MAINTAINER_EMAIL = "[email protected]"
URL = "https://www.statsmodels.org/"
LICENSE = "BSD License"
DOWNLOAD_URL = ""
PROJECT_URLS = {
"Bug Tracker": "https://github.com/statsmodels/statsmodels/issues",
"Documentation": "https://www.statsmodels.org/stable/index.html",
"Source Code": "https://github.com/statsmodels/statsmodels",
}
CLASSIFIERS = [
"Development Status :: 4 - Beta",
"Environment :: Console",
"Programming Language :: Cython",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Operating System :: OS Independent",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Natural Language :: English",
"License :: OSI Approved :: BSD License",
"Topic :: Office/Business :: Financial",
"Topic :: Scientific/Engineering",
]
FILES_TO_INCLUDE_IN_PACKAGE = ["LICENSE.txt", "setup.cfg"]
FILES_COPIED_TO_PACKAGE = []
for filename in FILES_TO_INCLUDE_IN_PACKAGE:
if os.path.exists(filename):
dest = os.path.join("statsmodels", filename)
shutil.copy2(filename, dest)
FILES_COPIED_TO_PACKAGE.append(dest)
STATESPACE_RESULTS = "statsmodels.tsa.statespace.tests.results"
ADDITIONAL_PACKAGE_DATA = {
"statsmodels": FILES_TO_INCLUDE_IN_PACKAGE,
"statsmodels.datasets.tests": ["*.zip"],
"statsmodels.iolib.tests.results": ["*.dta"],
"statsmodels.stats.tests.results": ["*.json"],
"statsmodels.tsa.stl.tests.results": ["*.csv"],
"statsmodels.tsa.vector_ar.tests.results": ["*.npz", "*.dat"],
"statsmodels.stats.tests": ["*.txt"],
"statsmodels.stats.libqsturng": ["*.r", "*.txt", "*.dat"],
"statsmodels.stats.libqsturng.tests": ["*.csv", "*.dat"],
"statsmodels.sandbox.regression.tests": ["*.dta", "*.csv"],
STATESPACE_RESULTS: ["*.pkl", "*.csv"],
STATESPACE_RESULTS + ".frbny_nowcast": ["test*.mat"],
STATESPACE_RESULTS + ".frbny_nowcast.Nowcasting.data.US": ["*.csv"],
}
##############################################################################
# Extension Building
##############################################################################
CYTHON_COVERAGE = os.environ.get("SM_CYTHON_COVERAGE", False)
CYTHON_COVERAGE = CYTHON_COVERAGE in ("1", "true", '"true"')
CYTHON_TRACE_NOGIL = str(int(CYTHON_COVERAGE))
if CYTHON_COVERAGE:
print("Building with coverage for Cython code")
COMPILER_DIRECTIVES = {"linetrace": CYTHON_COVERAGE}
DEFINE_MACROS = [
("CYTHON_TRACE_NOGIL", CYTHON_TRACE_NOGIL),
("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION"),
]
exts = dict(
_stl={"source": "statsmodels/tsa/stl/_stl.pyx"},
_exponential_smoothers={
"source": "statsmodels/tsa/holtwinters/_exponential_smoothers.pyx"
}, # noqa: E501
_ets_smooth={
"source": "statsmodels/tsa/exponential_smoothing/_ets_smooth.pyx"
}, # noqa: E501
_innovations={"source": "statsmodels/tsa/_innovations.pyx"},
_hamilton_filter={
"source": "statsmodels/tsa/regime_switching/_hamilton_filter.pyx.in"
}, # noqa: E501
_kim_smoother={
"source": "statsmodels/tsa/regime_switching/_kim_smoother.pyx.in"
}, # noqa: E501
_arma_innovations={
"source": "statsmodels/tsa/innovations/_arma_innovations.pyx.in"
}, # noqa: E501
linbin={"source": "statsmodels/nonparametric/linbin.pyx"},
_qn={"source": "statsmodels/robust/_qn.pyx"},
_smoothers_lowess={
"source": "statsmodels/nonparametric/_smoothers_lowess.pyx"
}, # noqa: E501
)
statespace_exts = [
"statsmodels/tsa/statespace/_initialization.pyx.in",
"statsmodels/tsa/statespace/_representation.pyx.in",
"statsmodels/tsa/statespace/_kalman_filter.pyx.in",
"statsmodels/tsa/statespace/_filters/_conventional.pyx.in",
"statsmodels/tsa/statespace/_filters/_inversions.pyx.in",
"statsmodels/tsa/statespace/_filters/_univariate.pyx.in",
"statsmodels/tsa/statespace/_filters/_univariate_diffuse.pyx.in",
"statsmodels/tsa/statespace/_kalman_smoother.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_alternative.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_classical.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_conventional.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_univariate.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_univariate_diffuse.pyx.in",
"statsmodels/tsa/statespace/_simulation_smoother.pyx.in",
"statsmodels/tsa/statespace/_cfa_simulation_smoother.pyx.in",
"statsmodels/tsa/statespace/_tools.pyx.in",
]
class CleanCommand(Command):
user_options = []
def initialize_options(self) -> None:
pass
def finalize_options(self) -> None:
pass
def run(self) -> None:
msg = """
python setup.py clean is not supported.
Use one of:
* `git clean -xdf` to clean all untracked files
* `git clean -Xdf` to clean untracked files ignored by .gitignore
"""
print(msg)
sys.exit(1)
def update_extension(extension, requires_math=True):
import numpy as np
numpy_includes = [np.get_include()]
extra_incl = pjoin(dirname(inspect.getfile(np.core)), "include")
numpy_includes += [extra_incl]
numpy_includes = list(set(numpy_includes))
numpy_math_libs = {
"include_dirs": [np.get_include()],
"library_dirs": [os.path.join(np.get_include(), '..', 'lib')],
"libraries": ["npymath"]
}
if not hasattr(extension, "include_dirs"):
return
extension.include_dirs = list(set(extension.include_dirs + numpy_includes))
if requires_math:
extension.include_dirs += numpy_math_libs["include_dirs"]
extension.libraries += numpy_math_libs["libraries"]
extension.library_dirs += numpy_math_libs["library_dirs"]
class DeferredBuildExt(build_ext):
"""build_ext command for use when numpy headers are needed."""
def build_extensions(self):
self._update_extensions()
build_ext.build_extensions(self)
def _update_extensions(self):
for extension in self.extensions:
requires_math = extension.name in EXT_REQUIRES_NUMPY_MATH_LIBS
update_extension(extension, requires_math=requires_math)
cmdclass = {"clean": CleanCommand}
if not HAS_NUMPY:
cmdclass["build_ext"] = DeferredBuildExt
def check_source(source_name):
"""Chooses C or pyx source files, and raises if C is needed but missing"""
source_ext = ".pyx"
if not HAS_CYTHON:
source_name = source_name.replace(".pyx.in", ".c")
source_name = source_name.replace(".pyx", ".c")
source_ext = ".c"
if not os.path.exists(source_name):
msg = (
"C source not found. You must have Cython installed to "
"build if the C source files have not been generated."
)
raise IOError(msg)
return source_name, source_ext
def | (source_name):
"""Runs pyx.in files through tempita is needed"""
if source_name.endswith("pyx.in"):
with open(source_name, "r", encoding="utf-8") as templated:
pyx_template = templated.read()
pyx = Tempita.sub(pyx_template)
pyx_filename = source_name[:-3]
with open(pyx_filename, "w", encoding="utf-8") as pyx_file:
pyx_file.write(pyx)
file_stats = os.stat(source_name)
try:
os.utime(
pyx_filename,
ns=(file_stats.st_atime_ns, file_stats.st_mtime_ns),
)
except AttributeError:
os.utime(pyx_filename, (file_stats.st_atime, file_stats.st_mtime))
source_name = pyx_filename
return source_name
EXT_REQUIRES_NUMPY_MATH_LIBS = []
extensions = []
for config in exts.values():
uses_blas = True
source, ext = check_source(config["source"])
source = process_tempita(source)
name = source.replace("/", ".").replace(ext, "")
include_dirs = config.get("include_dirs", [])
depends = config.get("depends", [])
libraries = config.get("libraries", [])
library_dirs = config.get("library_dirs", [])
uses_numpy_libraries = config.get("numpy_libraries", False)
if uses_blas or uses_numpy_libraries:
EXT_REQUIRES_NUMPY_MATH_LIBS.append(name)
ext = Extension(
name,
[source],
include_dirs=include_dirs,
depends=depends,
libraries=libraries,
library_dirs=library_dirs,
define_macros=DEFINE_MACROS,
)
extensions.append(ext)
for source in statespace_exts:
source, ext = check_source(source)
source = process_tempita(source)
name = source.replace("/", ".").replace(ext, "")
EXT_REQUIRES_NUMPY_MATH_LIBS.append(name)
ext = Extension(
name,
[source],
include_dirs=["statsmodels/src"],
depends=[],
libraries=[],
library_dirs=[],
define_macros=DEFINE_MACROS,
)
extensions.append(ext)
if HAS_NUMPY:
for extension in extensions:
requires_math = extension.name in EXT_REQUIRES_NUMPY_MATH_LIBS
update_extension(extension, requires_math=requires_math)
if HAS_CYTHON:
extensions = cythonize(
extensions,
compiler_directives=COMPILER_DIRECTIVES,
language_level=3,
force=CYTHON_COVERAGE,
)
##############################################################################
# Construct package data
##############################################################################
package_data = defaultdict(list)
filetypes = ["*.csv", "*.txt", "*.dta"]
for root, _, filenames in os.walk(
pjoin(os.getcwd(), "statsmodels", "datasets")
): # noqa: E501
matches = []
for filetype in filetypes:
for filename in fnmatch.filter(filenames, filetype):
matches.append(filename)
if matches:
package_data[".".join(relpath(root).split(os.path.sep))] = filetypes
for root, _, _ in os.walk(pjoin(os.getcwd(), "statsmodels")):
if root.endswith("results"):
package_data[".".join(relpath(root).split(os.path.sep))] = filetypes
for path, filetypes in ADDITIONAL_PACKAGE_DATA.items():
package_data[path].extend(filetypes)
if os.path.exists("MANIFEST"):
os.unlink("MANIFEST")
class BinaryDistribution(Distribution):
def is_pure(self):
return False
setup(
name=DISTNAME,
maintainer=MAINTAINER,
ext_modules=extensions,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
download_url=DOWNLOAD_URL,
project_urls=PROJECT_URLS,
long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
platforms="any",
cmdclass=cmdclass,
packages=find_packages(),
package_data=package_data,
distclass=BinaryDistribution,
include_package_data=False, # True will install all files in repo
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE,
zip_safe=False,
python_requires=">=3.8",
)
# Clean-up copied files
for copy in FILES_COPIED_TO_PACKAGE:
os.unlink(copy)
| process_tempita | identifier_name |
setup.py | """
To build with coverage of Cython files
export SM_CYTHON_COVERAGE=1
python -m pip install -e .
pytest --cov=statsmodels statsmodels
coverage html
"""
from setuptools import Command, Extension, find_packages, setup
from setuptools.dist import Distribution
from collections import defaultdict
import fnmatch
import inspect
import os
from os.path import dirname, join as pjoin, relpath
from pathlib import Path
import shutil
import sys
SETUP_DIR = Path(__file__).parent.resolve()
try:
# SM_FORCE_C is a testing shim to force setup to use C source files
FORCE_C = int(os.environ.get("SM_FORCE_C", 0))
if FORCE_C:
raise ImportError("Force import error for testing")
from Cython import Tempita
from Cython.Build import cythonize
from Cython.Distutils import build_ext
HAS_CYTHON = True
except ImportError:
from setuptools.command.build_ext import build_ext
HAS_CYTHON = False
try:
import numpy # noqa: F401
HAS_NUMPY = True
except ImportError:
HAS_NUMPY = False
###############################################################################
# Key Values that Change Each Release
###############################################################################
# These are strictly installation requirements. Builds requirements are
# managed in pyproject.toml
INSTALL_REQUIRES = []
with open("requirements.txt", encoding="utf-8") as req:
for line in req.readlines():
INSTALL_REQUIRES.append(line.split("#")[0].strip())
DEVELOP_REQUIRES = []
with open("requirements-dev.txt", encoding="utf-8") as req:
for line in req.readlines():
DEVELOP_REQUIRES.append(line.split("#")[0].strip())
CYTHON_MIN_VER = "0.29.26" # released 2020
EXTRAS_REQUIRE = {
"build": ["cython>=" + CYTHON_MIN_VER],
"develop": ["cython>=" + CYTHON_MIN_VER] + DEVELOP_REQUIRES,
"docs": [
"sphinx",
"nbconvert",
"jupyter_client",
"ipykernel",
"matplotlib",
"nbformat",
"numpydoc",
"pandas-datareader",
],
}
###############################################################################
# Values that rarely change
###############################################################################
DISTNAME = "statsmodels"
DESCRIPTION = "Statistical computations and models for Python"
README = SETUP_DIR.joinpath("README.rst").read_text()
LONG_DESCRIPTION = README
MAINTAINER = "statsmodels Developers"
MAINTAINER_EMAIL = "[email protected]"
URL = "https://www.statsmodels.org/"
LICENSE = "BSD License"
DOWNLOAD_URL = ""
PROJECT_URLS = {
"Bug Tracker": "https://github.com/statsmodels/statsmodels/issues",
"Documentation": "https://www.statsmodels.org/stable/index.html",
"Source Code": "https://github.com/statsmodels/statsmodels",
}
CLASSIFIERS = [
"Development Status :: 4 - Beta",
"Environment :: Console",
"Programming Language :: Cython",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Operating System :: OS Independent",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Natural Language :: English",
"License :: OSI Approved :: BSD License",
"Topic :: Office/Business :: Financial",
"Topic :: Scientific/Engineering",
]
FILES_TO_INCLUDE_IN_PACKAGE = ["LICENSE.txt", "setup.cfg"]
FILES_COPIED_TO_PACKAGE = []
for filename in FILES_TO_INCLUDE_IN_PACKAGE:
if os.path.exists(filename):
dest = os.path.join("statsmodels", filename)
shutil.copy2(filename, dest)
FILES_COPIED_TO_PACKAGE.append(dest)
STATESPACE_RESULTS = "statsmodels.tsa.statespace.tests.results"
ADDITIONAL_PACKAGE_DATA = {
"statsmodels": FILES_TO_INCLUDE_IN_PACKAGE,
"statsmodels.datasets.tests": ["*.zip"],
"statsmodels.iolib.tests.results": ["*.dta"],
"statsmodels.stats.tests.results": ["*.json"],
"statsmodels.tsa.stl.tests.results": ["*.csv"],
"statsmodels.tsa.vector_ar.tests.results": ["*.npz", "*.dat"],
"statsmodels.stats.tests": ["*.txt"],
"statsmodels.stats.libqsturng": ["*.r", "*.txt", "*.dat"],
"statsmodels.stats.libqsturng.tests": ["*.csv", "*.dat"],
"statsmodels.sandbox.regression.tests": ["*.dta", "*.csv"],
STATESPACE_RESULTS: ["*.pkl", "*.csv"],
STATESPACE_RESULTS + ".frbny_nowcast": ["test*.mat"],
STATESPACE_RESULTS + ".frbny_nowcast.Nowcasting.data.US": ["*.csv"],
}
##############################################################################
# Extension Building
##############################################################################
CYTHON_COVERAGE = os.environ.get("SM_CYTHON_COVERAGE", False)
CYTHON_COVERAGE = CYTHON_COVERAGE in ("1", "true", '"true"')
CYTHON_TRACE_NOGIL = str(int(CYTHON_COVERAGE))
if CYTHON_COVERAGE:
print("Building with coverage for Cython code")
COMPILER_DIRECTIVES = {"linetrace": CYTHON_COVERAGE}
DEFINE_MACROS = [
("CYTHON_TRACE_NOGIL", CYTHON_TRACE_NOGIL),
("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION"),
]
exts = dict(
_stl={"source": "statsmodels/tsa/stl/_stl.pyx"},
_exponential_smoothers={
"source": "statsmodels/tsa/holtwinters/_exponential_smoothers.pyx"
}, # noqa: E501
_ets_smooth={
"source": "statsmodels/tsa/exponential_smoothing/_ets_smooth.pyx"
}, # noqa: E501
_innovations={"source": "statsmodels/tsa/_innovations.pyx"},
_hamilton_filter={
"source": "statsmodels/tsa/regime_switching/_hamilton_filter.pyx.in"
}, # noqa: E501
_kim_smoother={
"source": "statsmodels/tsa/regime_switching/_kim_smoother.pyx.in"
}, # noqa: E501
_arma_innovations={
"source": "statsmodels/tsa/innovations/_arma_innovations.pyx.in"
}, # noqa: E501
linbin={"source": "statsmodels/nonparametric/linbin.pyx"},
_qn={"source": "statsmodels/robust/_qn.pyx"},
_smoothers_lowess={
"source": "statsmodels/nonparametric/_smoothers_lowess.pyx"
}, # noqa: E501
)
statespace_exts = [
"statsmodels/tsa/statespace/_initialization.pyx.in",
"statsmodels/tsa/statespace/_representation.pyx.in",
"statsmodels/tsa/statespace/_kalman_filter.pyx.in",
"statsmodels/tsa/statespace/_filters/_conventional.pyx.in",
"statsmodels/tsa/statespace/_filters/_inversions.pyx.in",
"statsmodels/tsa/statespace/_filters/_univariate.pyx.in",
"statsmodels/tsa/statespace/_filters/_univariate_diffuse.pyx.in",
"statsmodels/tsa/statespace/_kalman_smoother.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_alternative.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_classical.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_conventional.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_univariate.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_univariate_diffuse.pyx.in",
"statsmodels/tsa/statespace/_simulation_smoother.pyx.in",
"statsmodels/tsa/statespace/_cfa_simulation_smoother.pyx.in",
"statsmodels/tsa/statespace/_tools.pyx.in",
]
class CleanCommand(Command):
user_options = []
def initialize_options(self) -> None:
pass
def finalize_options(self) -> None:
pass
def run(self) -> None:
msg = """
python setup.py clean is not supported.
Use one of:
* `git clean -xdf` to clean all untracked files
* `git clean -Xdf` to clean untracked files ignored by .gitignore
"""
print(msg)
sys.exit(1)
def update_extension(extension, requires_math=True):
import numpy as np
numpy_includes = [np.get_include()]
extra_incl = pjoin(dirname(inspect.getfile(np.core)), "include")
numpy_includes += [extra_incl]
numpy_includes = list(set(numpy_includes))
numpy_math_libs = {
"include_dirs": [np.get_include()],
"library_dirs": [os.path.join(np.get_include(), '..', 'lib')],
"libraries": ["npymath"]
}
if not hasattr(extension, "include_dirs"):
return
extension.include_dirs = list(set(extension.include_dirs + numpy_includes))
if requires_math:
extension.include_dirs += numpy_math_libs["include_dirs"]
extension.libraries += numpy_math_libs["libraries"]
extension.library_dirs += numpy_math_libs["library_dirs"]
class DeferredBuildExt(build_ext):
"""build_ext command for use when numpy headers are needed."""
def build_extensions(self):
self._update_extensions()
build_ext.build_extensions(self)
def _update_extensions(self):
for extension in self.extensions:
requires_math = extension.name in EXT_REQUIRES_NUMPY_MATH_LIBS
update_extension(extension, requires_math=requires_math)
cmdclass = {"clean": CleanCommand}
if not HAS_NUMPY:
cmdclass["build_ext"] = DeferredBuildExt
def check_source(source_name):
"""Chooses C or pyx source files, and raises if C is needed but missing"""
source_ext = ".pyx"
if not HAS_CYTHON:
source_name = source_name.replace(".pyx.in", ".c")
source_name = source_name.replace(".pyx", ".c")
source_ext = ".c"
if not os.path.exists(source_name):
msg = (
"C source not found. You must have Cython installed to "
"build if the C source files have not been generated."
)
raise IOError(msg)
return source_name, source_ext
def process_tempita(source_name):
"""Runs pyx.in files through tempita is needed"""
if source_name.endswith("pyx.in"):
with open(source_name, "r", encoding="utf-8") as templated:
pyx_template = templated.read()
pyx = Tempita.sub(pyx_template)
pyx_filename = source_name[:-3]
with open(pyx_filename, "w", encoding="utf-8") as pyx_file:
pyx_file.write(pyx)
file_stats = os.stat(source_name)
try:
os.utime(
pyx_filename,
ns=(file_stats.st_atime_ns, file_stats.st_mtime_ns),
)
except AttributeError:
os.utime(pyx_filename, (file_stats.st_atime, file_stats.st_mtime))
source_name = pyx_filename
return source_name
EXT_REQUIRES_NUMPY_MATH_LIBS = []
extensions = []
for config in exts.values():
uses_blas = True
source, ext = check_source(config["source"])
source = process_tempita(source)
name = source.replace("/", ".").replace(ext, "")
include_dirs = config.get("include_dirs", [])
depends = config.get("depends", [])
libraries = config.get("libraries", [])
library_dirs = config.get("library_dirs", [])
uses_numpy_libraries = config.get("numpy_libraries", False)
if uses_blas or uses_numpy_libraries:
EXT_REQUIRES_NUMPY_MATH_LIBS.append(name)
ext = Extension(
name,
[source],
include_dirs=include_dirs,
depends=depends,
libraries=libraries,
library_dirs=library_dirs,
define_macros=DEFINE_MACROS,
)
extensions.append(ext)
for source in statespace_exts:
source, ext = check_source(source)
source = process_tempita(source)
name = source.replace("/", ".").replace(ext, "")
EXT_REQUIRES_NUMPY_MATH_LIBS.append(name)
ext = Extension(
name,
[source],
include_dirs=["statsmodels/src"],
depends=[],
libraries=[],
library_dirs=[],
define_macros=DEFINE_MACROS,
)
extensions.append(ext)
if HAS_NUMPY:
for extension in extensions:
requires_math = extension.name in EXT_REQUIRES_NUMPY_MATH_LIBS
update_extension(extension, requires_math=requires_math)
if HAS_CYTHON:
extensions = cythonize(
extensions,
compiler_directives=COMPILER_DIRECTIVES,
language_level=3,
force=CYTHON_COVERAGE,
)
##############################################################################
# Construct package data
##############################################################################
package_data = defaultdict(list)
filetypes = ["*.csv", "*.txt", "*.dta"]
for root, _, filenames in os.walk(
pjoin(os.getcwd(), "statsmodels", "datasets")
): # noqa: E501
matches = []
for filetype in filetypes:
for filename in fnmatch.filter(filenames, filetype):
matches.append(filename)
if matches:
package_data[".".join(relpath(root).split(os.path.sep))] = filetypes
for root, _, _ in os.walk(pjoin(os.getcwd(), "statsmodels")):
if root.endswith("results"):
package_data[".".join(relpath(root).split(os.path.sep))] = filetypes
for path, filetypes in ADDITIONAL_PACKAGE_DATA.items():
package_data[path].extend(filetypes)
if os.path.exists("MANIFEST"):
os.unlink("MANIFEST")
class BinaryDistribution(Distribution):
def is_pure(self):
return False
setup(
name=DISTNAME,
maintainer=MAINTAINER,
ext_modules=extensions,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
download_url=DOWNLOAD_URL, | long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
platforms="any",
cmdclass=cmdclass,
packages=find_packages(),
package_data=package_data,
distclass=BinaryDistribution,
include_package_data=False, # True will install all files in repo
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE,
zip_safe=False,
python_requires=">=3.8",
)
# Clean-up copied files
for copy in FILES_COPIED_TO_PACKAGE:
os.unlink(copy) | project_urls=PROJECT_URLS, | random_line_split |
setup.py | """
To build with coverage of Cython files
export SM_CYTHON_COVERAGE=1
python -m pip install -e .
pytest --cov=statsmodels statsmodels
coverage html
"""
from setuptools import Command, Extension, find_packages, setup
from setuptools.dist import Distribution
from collections import defaultdict
import fnmatch
import inspect
import os
from os.path import dirname, join as pjoin, relpath
from pathlib import Path
import shutil
import sys
SETUP_DIR = Path(__file__).parent.resolve()
try:
# SM_FORCE_C is a testing shim to force setup to use C source files
FORCE_C = int(os.environ.get("SM_FORCE_C", 0))
if FORCE_C:
raise ImportError("Force import error for testing")
from Cython import Tempita
from Cython.Build import cythonize
from Cython.Distutils import build_ext
HAS_CYTHON = True
except ImportError:
from setuptools.command.build_ext import build_ext
HAS_CYTHON = False
try:
import numpy # noqa: F401
HAS_NUMPY = True
except ImportError:
HAS_NUMPY = False
###############################################################################
# Key Values that Change Each Release
###############################################################################
# These are strictly installation requirements. Builds requirements are
# managed in pyproject.toml
INSTALL_REQUIRES = []
with open("requirements.txt", encoding="utf-8") as req:
for line in req.readlines():
INSTALL_REQUIRES.append(line.split("#")[0].strip())
DEVELOP_REQUIRES = []
with open("requirements-dev.txt", encoding="utf-8") as req:
for line in req.readlines():
DEVELOP_REQUIRES.append(line.split("#")[0].strip())
CYTHON_MIN_VER = "0.29.26" # released 2020
EXTRAS_REQUIRE = {
"build": ["cython>=" + CYTHON_MIN_VER],
"develop": ["cython>=" + CYTHON_MIN_VER] + DEVELOP_REQUIRES,
"docs": [
"sphinx",
"nbconvert",
"jupyter_client",
"ipykernel",
"matplotlib",
"nbformat",
"numpydoc",
"pandas-datareader",
],
}
###############################################################################
# Values that rarely change
###############################################################################
DISTNAME = "statsmodels"
DESCRIPTION = "Statistical computations and models for Python"
README = SETUP_DIR.joinpath("README.rst").read_text()
LONG_DESCRIPTION = README
MAINTAINER = "statsmodels Developers"
MAINTAINER_EMAIL = "[email protected]"
URL = "https://www.statsmodels.org/"
LICENSE = "BSD License"
DOWNLOAD_URL = ""
PROJECT_URLS = {
"Bug Tracker": "https://github.com/statsmodels/statsmodels/issues",
"Documentation": "https://www.statsmodels.org/stable/index.html",
"Source Code": "https://github.com/statsmodels/statsmodels",
}
CLASSIFIERS = [
"Development Status :: 4 - Beta",
"Environment :: Console",
"Programming Language :: Cython",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Operating System :: OS Independent",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Natural Language :: English",
"License :: OSI Approved :: BSD License",
"Topic :: Office/Business :: Financial",
"Topic :: Scientific/Engineering",
]
FILES_TO_INCLUDE_IN_PACKAGE = ["LICENSE.txt", "setup.cfg"]
FILES_COPIED_TO_PACKAGE = []
for filename in FILES_TO_INCLUDE_IN_PACKAGE:
if os.path.exists(filename):
dest = os.path.join("statsmodels", filename)
shutil.copy2(filename, dest)
FILES_COPIED_TO_PACKAGE.append(dest)
STATESPACE_RESULTS = "statsmodels.tsa.statespace.tests.results"
ADDITIONAL_PACKAGE_DATA = {
"statsmodels": FILES_TO_INCLUDE_IN_PACKAGE,
"statsmodels.datasets.tests": ["*.zip"],
"statsmodels.iolib.tests.results": ["*.dta"],
"statsmodels.stats.tests.results": ["*.json"],
"statsmodels.tsa.stl.tests.results": ["*.csv"],
"statsmodels.tsa.vector_ar.tests.results": ["*.npz", "*.dat"],
"statsmodels.stats.tests": ["*.txt"],
"statsmodels.stats.libqsturng": ["*.r", "*.txt", "*.dat"],
"statsmodels.stats.libqsturng.tests": ["*.csv", "*.dat"],
"statsmodels.sandbox.regression.tests": ["*.dta", "*.csv"],
STATESPACE_RESULTS: ["*.pkl", "*.csv"],
STATESPACE_RESULTS + ".frbny_nowcast": ["test*.mat"],
STATESPACE_RESULTS + ".frbny_nowcast.Nowcasting.data.US": ["*.csv"],
}
##############################################################################
# Extension Building
##############################################################################
CYTHON_COVERAGE = os.environ.get("SM_CYTHON_COVERAGE", False)
CYTHON_COVERAGE = CYTHON_COVERAGE in ("1", "true", '"true"')
CYTHON_TRACE_NOGIL = str(int(CYTHON_COVERAGE))
if CYTHON_COVERAGE:
print("Building with coverage for Cython code")
COMPILER_DIRECTIVES = {"linetrace": CYTHON_COVERAGE}
DEFINE_MACROS = [
("CYTHON_TRACE_NOGIL", CYTHON_TRACE_NOGIL),
("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION"),
]
exts = dict(
_stl={"source": "statsmodels/tsa/stl/_stl.pyx"},
_exponential_smoothers={
"source": "statsmodels/tsa/holtwinters/_exponential_smoothers.pyx"
}, # noqa: E501
_ets_smooth={
"source": "statsmodels/tsa/exponential_smoothing/_ets_smooth.pyx"
}, # noqa: E501
_innovations={"source": "statsmodels/tsa/_innovations.pyx"},
_hamilton_filter={
"source": "statsmodels/tsa/regime_switching/_hamilton_filter.pyx.in"
}, # noqa: E501
_kim_smoother={
"source": "statsmodels/tsa/regime_switching/_kim_smoother.pyx.in"
}, # noqa: E501
_arma_innovations={
"source": "statsmodels/tsa/innovations/_arma_innovations.pyx.in"
}, # noqa: E501
linbin={"source": "statsmodels/nonparametric/linbin.pyx"},
_qn={"source": "statsmodels/robust/_qn.pyx"},
_smoothers_lowess={
"source": "statsmodels/nonparametric/_smoothers_lowess.pyx"
}, # noqa: E501
)
statespace_exts = [
"statsmodels/tsa/statespace/_initialization.pyx.in",
"statsmodels/tsa/statespace/_representation.pyx.in",
"statsmodels/tsa/statespace/_kalman_filter.pyx.in",
"statsmodels/tsa/statespace/_filters/_conventional.pyx.in",
"statsmodels/tsa/statespace/_filters/_inversions.pyx.in",
"statsmodels/tsa/statespace/_filters/_univariate.pyx.in",
"statsmodels/tsa/statespace/_filters/_univariate_diffuse.pyx.in",
"statsmodels/tsa/statespace/_kalman_smoother.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_alternative.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_classical.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_conventional.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_univariate.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_univariate_diffuse.pyx.in",
"statsmodels/tsa/statespace/_simulation_smoother.pyx.in",
"statsmodels/tsa/statespace/_cfa_simulation_smoother.pyx.in",
"statsmodels/tsa/statespace/_tools.pyx.in",
]
class CleanCommand(Command):
user_options = []
def initialize_options(self) -> None:
pass
def finalize_options(self) -> None:
pass
def run(self) -> None:
msg = """
python setup.py clean is not supported.
Use one of:
* `git clean -xdf` to clean all untracked files
* `git clean -Xdf` to clean untracked files ignored by .gitignore
"""
print(msg)
sys.exit(1)
def update_extension(extension, requires_math=True):
import numpy as np
numpy_includes = [np.get_include()]
extra_incl = pjoin(dirname(inspect.getfile(np.core)), "include")
numpy_includes += [extra_incl]
numpy_includes = list(set(numpy_includes))
numpy_math_libs = {
"include_dirs": [np.get_include()],
"library_dirs": [os.path.join(np.get_include(), '..', 'lib')],
"libraries": ["npymath"]
}
if not hasattr(extension, "include_dirs"):
return
extension.include_dirs = list(set(extension.include_dirs + numpy_includes))
if requires_math:
extension.include_dirs += numpy_math_libs["include_dirs"]
extension.libraries += numpy_math_libs["libraries"]
extension.library_dirs += numpy_math_libs["library_dirs"]
class DeferredBuildExt(build_ext):
"""build_ext command for use when numpy headers are needed."""
def build_extensions(self):
self._update_extensions()
build_ext.build_extensions(self)
def _update_extensions(self):
for extension in self.extensions:
requires_math = extension.name in EXT_REQUIRES_NUMPY_MATH_LIBS
update_extension(extension, requires_math=requires_math)
cmdclass = {"clean": CleanCommand}
if not HAS_NUMPY:
cmdclass["build_ext"] = DeferredBuildExt
def check_source(source_name):
"""Chooses C or pyx source files, and raises if C is needed but missing"""
source_ext = ".pyx"
if not HAS_CYTHON:
source_name = source_name.replace(".pyx.in", ".c")
source_name = source_name.replace(".pyx", ".c")
source_ext = ".c"
if not os.path.exists(source_name):
msg = (
"C source not found. You must have Cython installed to "
"build if the C source files have not been generated."
)
raise IOError(msg)
return source_name, source_ext
def process_tempita(source_name):
"""Runs pyx.in files through tempita is needed"""
if source_name.endswith("pyx.in"):
with open(source_name, "r", encoding="utf-8") as templated:
pyx_template = templated.read()
pyx = Tempita.sub(pyx_template)
pyx_filename = source_name[:-3]
with open(pyx_filename, "w", encoding="utf-8") as pyx_file:
pyx_file.write(pyx)
file_stats = os.stat(source_name)
try:
os.utime(
pyx_filename,
ns=(file_stats.st_atime_ns, file_stats.st_mtime_ns),
)
except AttributeError:
os.utime(pyx_filename, (file_stats.st_atime, file_stats.st_mtime))
source_name = pyx_filename
return source_name
EXT_REQUIRES_NUMPY_MATH_LIBS = []
extensions = []
for config in exts.values():
uses_blas = True
source, ext = check_source(config["source"])
source = process_tempita(source)
name = source.replace("/", ".").replace(ext, "")
include_dirs = config.get("include_dirs", [])
depends = config.get("depends", [])
libraries = config.get("libraries", [])
library_dirs = config.get("library_dirs", [])
uses_numpy_libraries = config.get("numpy_libraries", False)
if uses_blas or uses_numpy_libraries:
EXT_REQUIRES_NUMPY_MATH_LIBS.append(name)
ext = Extension(
name,
[source],
include_dirs=include_dirs,
depends=depends,
libraries=libraries,
library_dirs=library_dirs,
define_macros=DEFINE_MACROS,
)
extensions.append(ext)
for source in statespace_exts:
source, ext = check_source(source)
source = process_tempita(source)
name = source.replace("/", ".").replace(ext, "")
EXT_REQUIRES_NUMPY_MATH_LIBS.append(name)
ext = Extension(
name,
[source],
include_dirs=["statsmodels/src"],
depends=[],
libraries=[],
library_dirs=[],
define_macros=DEFINE_MACROS,
)
extensions.append(ext)
if HAS_NUMPY:
for extension in extensions:
requires_math = extension.name in EXT_REQUIRES_NUMPY_MATH_LIBS
update_extension(extension, requires_math=requires_math)
if HAS_CYTHON:
extensions = cythonize(
extensions,
compiler_directives=COMPILER_DIRECTIVES,
language_level=3,
force=CYTHON_COVERAGE,
)
##############################################################################
# Construct package data
##############################################################################
package_data = defaultdict(list)
filetypes = ["*.csv", "*.txt", "*.dta"]
for root, _, filenames in os.walk(
pjoin(os.getcwd(), "statsmodels", "datasets")
): # noqa: E501
matches = []
for filetype in filetypes:
for filename in fnmatch.filter(filenames, filetype):
matches.append(filename)
if matches:
package_data[".".join(relpath(root).split(os.path.sep))] = filetypes
for root, _, _ in os.walk(pjoin(os.getcwd(), "statsmodels")):
if root.endswith("results"):
package_data[".".join(relpath(root).split(os.path.sep))] = filetypes
for path, filetypes in ADDITIONAL_PACKAGE_DATA.items():
package_data[path].extend(filetypes)
if os.path.exists("MANIFEST"):
os.unlink("MANIFEST")
class BinaryDistribution(Distribution):
def is_pure(self):
|
setup(
name=DISTNAME,
maintainer=MAINTAINER,
ext_modules=extensions,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
download_url=DOWNLOAD_URL,
project_urls=PROJECT_URLS,
long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
platforms="any",
cmdclass=cmdclass,
packages=find_packages(),
package_data=package_data,
distclass=BinaryDistribution,
include_package_data=False, # True will install all files in repo
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE,
zip_safe=False,
python_requires=">=3.8",
)
# Clean-up copied files
for copy in FILES_COPIED_TO_PACKAGE:
os.unlink(copy)
| return False | identifier_body |
setup.py | """
To build with coverage of Cython files
export SM_CYTHON_COVERAGE=1
python -m pip install -e .
pytest --cov=statsmodels statsmodels
coverage html
"""
from setuptools import Command, Extension, find_packages, setup
from setuptools.dist import Distribution
from collections import defaultdict
import fnmatch
import inspect
import os
from os.path import dirname, join as pjoin, relpath
from pathlib import Path
import shutil
import sys
SETUP_DIR = Path(__file__).parent.resolve()
try:
# SM_FORCE_C is a testing shim to force setup to use C source files
FORCE_C = int(os.environ.get("SM_FORCE_C", 0))
if FORCE_C:
raise ImportError("Force import error for testing")
from Cython import Tempita
from Cython.Build import cythonize
from Cython.Distutils import build_ext
HAS_CYTHON = True
except ImportError:
from setuptools.command.build_ext import build_ext
HAS_CYTHON = False
try:
import numpy # noqa: F401
HAS_NUMPY = True
except ImportError:
HAS_NUMPY = False
###############################################################################
# Key Values that Change Each Release
###############################################################################
# These are strictly installation requirements. Builds requirements are
# managed in pyproject.toml
INSTALL_REQUIRES = []
with open("requirements.txt", encoding="utf-8") as req:
for line in req.readlines():
INSTALL_REQUIRES.append(line.split("#")[0].strip())
DEVELOP_REQUIRES = []
with open("requirements-dev.txt", encoding="utf-8") as req:
for line in req.readlines():
DEVELOP_REQUIRES.append(line.split("#")[0].strip())
CYTHON_MIN_VER = "0.29.26" # released 2020
EXTRAS_REQUIRE = {
"build": ["cython>=" + CYTHON_MIN_VER],
"develop": ["cython>=" + CYTHON_MIN_VER] + DEVELOP_REQUIRES,
"docs": [
"sphinx",
"nbconvert",
"jupyter_client",
"ipykernel",
"matplotlib",
"nbformat",
"numpydoc",
"pandas-datareader",
],
}
###############################################################################
# Values that rarely change
###############################################################################
DISTNAME = "statsmodels"
DESCRIPTION = "Statistical computations and models for Python"
README = SETUP_DIR.joinpath("README.rst").read_text()
LONG_DESCRIPTION = README
MAINTAINER = "statsmodels Developers"
MAINTAINER_EMAIL = "[email protected]"
URL = "https://www.statsmodels.org/"
LICENSE = "BSD License"
DOWNLOAD_URL = ""
PROJECT_URLS = {
"Bug Tracker": "https://github.com/statsmodels/statsmodels/issues",
"Documentation": "https://www.statsmodels.org/stable/index.html",
"Source Code": "https://github.com/statsmodels/statsmodels",
}
CLASSIFIERS = [
"Development Status :: 4 - Beta",
"Environment :: Console",
"Programming Language :: Cython",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Operating System :: OS Independent",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Natural Language :: English",
"License :: OSI Approved :: BSD License",
"Topic :: Office/Business :: Financial",
"Topic :: Scientific/Engineering",
]
FILES_TO_INCLUDE_IN_PACKAGE = ["LICENSE.txt", "setup.cfg"]
FILES_COPIED_TO_PACKAGE = []
for filename in FILES_TO_INCLUDE_IN_PACKAGE:
if os.path.exists(filename):
|
STATESPACE_RESULTS = "statsmodels.tsa.statespace.tests.results"
ADDITIONAL_PACKAGE_DATA = {
"statsmodels": FILES_TO_INCLUDE_IN_PACKAGE,
"statsmodels.datasets.tests": ["*.zip"],
"statsmodels.iolib.tests.results": ["*.dta"],
"statsmodels.stats.tests.results": ["*.json"],
"statsmodels.tsa.stl.tests.results": ["*.csv"],
"statsmodels.tsa.vector_ar.tests.results": ["*.npz", "*.dat"],
"statsmodels.stats.tests": ["*.txt"],
"statsmodels.stats.libqsturng": ["*.r", "*.txt", "*.dat"],
"statsmodels.stats.libqsturng.tests": ["*.csv", "*.dat"],
"statsmodels.sandbox.regression.tests": ["*.dta", "*.csv"],
STATESPACE_RESULTS: ["*.pkl", "*.csv"],
STATESPACE_RESULTS + ".frbny_nowcast": ["test*.mat"],
STATESPACE_RESULTS + ".frbny_nowcast.Nowcasting.data.US": ["*.csv"],
}
##############################################################################
# Extension Building
##############################################################################
CYTHON_COVERAGE = os.environ.get("SM_CYTHON_COVERAGE", False)
CYTHON_COVERAGE = CYTHON_COVERAGE in ("1", "true", '"true"')
CYTHON_TRACE_NOGIL = str(int(CYTHON_COVERAGE))
if CYTHON_COVERAGE:
print("Building with coverage for Cython code")
COMPILER_DIRECTIVES = {"linetrace": CYTHON_COVERAGE}
DEFINE_MACROS = [
("CYTHON_TRACE_NOGIL", CYTHON_TRACE_NOGIL),
("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION"),
]
exts = dict(
_stl={"source": "statsmodels/tsa/stl/_stl.pyx"},
_exponential_smoothers={
"source": "statsmodels/tsa/holtwinters/_exponential_smoothers.pyx"
}, # noqa: E501
_ets_smooth={
"source": "statsmodels/tsa/exponential_smoothing/_ets_smooth.pyx"
}, # noqa: E501
_innovations={"source": "statsmodels/tsa/_innovations.pyx"},
_hamilton_filter={
"source": "statsmodels/tsa/regime_switching/_hamilton_filter.pyx.in"
}, # noqa: E501
_kim_smoother={
"source": "statsmodels/tsa/regime_switching/_kim_smoother.pyx.in"
}, # noqa: E501
_arma_innovations={
"source": "statsmodels/tsa/innovations/_arma_innovations.pyx.in"
}, # noqa: E501
linbin={"source": "statsmodels/nonparametric/linbin.pyx"},
_qn={"source": "statsmodels/robust/_qn.pyx"},
_smoothers_lowess={
"source": "statsmodels/nonparametric/_smoothers_lowess.pyx"
}, # noqa: E501
)
statespace_exts = [
"statsmodels/tsa/statespace/_initialization.pyx.in",
"statsmodels/tsa/statespace/_representation.pyx.in",
"statsmodels/tsa/statespace/_kalman_filter.pyx.in",
"statsmodels/tsa/statespace/_filters/_conventional.pyx.in",
"statsmodels/tsa/statespace/_filters/_inversions.pyx.in",
"statsmodels/tsa/statespace/_filters/_univariate.pyx.in",
"statsmodels/tsa/statespace/_filters/_univariate_diffuse.pyx.in",
"statsmodels/tsa/statespace/_kalman_smoother.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_alternative.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_classical.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_conventional.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_univariate.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_univariate_diffuse.pyx.in",
"statsmodels/tsa/statespace/_simulation_smoother.pyx.in",
"statsmodels/tsa/statespace/_cfa_simulation_smoother.pyx.in",
"statsmodels/tsa/statespace/_tools.pyx.in",
]
class CleanCommand(Command):
user_options = []
def initialize_options(self) -> None:
pass
def finalize_options(self) -> None:
pass
def run(self) -> None:
msg = """
python setup.py clean is not supported.
Use one of:
* `git clean -xdf` to clean all untracked files
* `git clean -Xdf` to clean untracked files ignored by .gitignore
"""
print(msg)
sys.exit(1)
def update_extension(extension, requires_math=True):
import numpy as np
numpy_includes = [np.get_include()]
extra_incl = pjoin(dirname(inspect.getfile(np.core)), "include")
numpy_includes += [extra_incl]
numpy_includes = list(set(numpy_includes))
numpy_math_libs = {
"include_dirs": [np.get_include()],
"library_dirs": [os.path.join(np.get_include(), '..', 'lib')],
"libraries": ["npymath"]
}
if not hasattr(extension, "include_dirs"):
return
extension.include_dirs = list(set(extension.include_dirs + numpy_includes))
if requires_math:
extension.include_dirs += numpy_math_libs["include_dirs"]
extension.libraries += numpy_math_libs["libraries"]
extension.library_dirs += numpy_math_libs["library_dirs"]
class DeferredBuildExt(build_ext):
"""build_ext command for use when numpy headers are needed."""
def build_extensions(self):
self._update_extensions()
build_ext.build_extensions(self)
def _update_extensions(self):
for extension in self.extensions:
requires_math = extension.name in EXT_REQUIRES_NUMPY_MATH_LIBS
update_extension(extension, requires_math=requires_math)
cmdclass = {"clean": CleanCommand}
if not HAS_NUMPY:
cmdclass["build_ext"] = DeferredBuildExt
def check_source(source_name):
"""Chooses C or pyx source files, and raises if C is needed but missing"""
source_ext = ".pyx"
if not HAS_CYTHON:
source_name = source_name.replace(".pyx.in", ".c")
source_name = source_name.replace(".pyx", ".c")
source_ext = ".c"
if not os.path.exists(source_name):
msg = (
"C source not found. You must have Cython installed to "
"build if the C source files have not been generated."
)
raise IOError(msg)
return source_name, source_ext
def process_tempita(source_name):
"""Runs pyx.in files through tempita is needed"""
if source_name.endswith("pyx.in"):
with open(source_name, "r", encoding="utf-8") as templated:
pyx_template = templated.read()
pyx = Tempita.sub(pyx_template)
pyx_filename = source_name[:-3]
with open(pyx_filename, "w", encoding="utf-8") as pyx_file:
pyx_file.write(pyx)
file_stats = os.stat(source_name)
try:
os.utime(
pyx_filename,
ns=(file_stats.st_atime_ns, file_stats.st_mtime_ns),
)
except AttributeError:
os.utime(pyx_filename, (file_stats.st_atime, file_stats.st_mtime))
source_name = pyx_filename
return source_name
EXT_REQUIRES_NUMPY_MATH_LIBS = []
extensions = []
for config in exts.values():
uses_blas = True
source, ext = check_source(config["source"])
source = process_tempita(source)
name = source.replace("/", ".").replace(ext, "")
include_dirs = config.get("include_dirs", [])
depends = config.get("depends", [])
libraries = config.get("libraries", [])
library_dirs = config.get("library_dirs", [])
uses_numpy_libraries = config.get("numpy_libraries", False)
if uses_blas or uses_numpy_libraries:
EXT_REQUIRES_NUMPY_MATH_LIBS.append(name)
ext = Extension(
name,
[source],
include_dirs=include_dirs,
depends=depends,
libraries=libraries,
library_dirs=library_dirs,
define_macros=DEFINE_MACROS,
)
extensions.append(ext)
for source in statespace_exts:
source, ext = check_source(source)
source = process_tempita(source)
name = source.replace("/", ".").replace(ext, "")
EXT_REQUIRES_NUMPY_MATH_LIBS.append(name)
ext = Extension(
name,
[source],
include_dirs=["statsmodels/src"],
depends=[],
libraries=[],
library_dirs=[],
define_macros=DEFINE_MACROS,
)
extensions.append(ext)
if HAS_NUMPY:
for extension in extensions:
requires_math = extension.name in EXT_REQUIRES_NUMPY_MATH_LIBS
update_extension(extension, requires_math=requires_math)
if HAS_CYTHON:
extensions = cythonize(
extensions,
compiler_directives=COMPILER_DIRECTIVES,
language_level=3,
force=CYTHON_COVERAGE,
)
##############################################################################
# Construct package data
##############################################################################
package_data = defaultdict(list)
filetypes = ["*.csv", "*.txt", "*.dta"]
for root, _, filenames in os.walk(
pjoin(os.getcwd(), "statsmodels", "datasets")
): # noqa: E501
matches = []
for filetype in filetypes:
for filename in fnmatch.filter(filenames, filetype):
matches.append(filename)
if matches:
package_data[".".join(relpath(root).split(os.path.sep))] = filetypes
for root, _, _ in os.walk(pjoin(os.getcwd(), "statsmodels")):
if root.endswith("results"):
package_data[".".join(relpath(root).split(os.path.sep))] = filetypes
for path, filetypes in ADDITIONAL_PACKAGE_DATA.items():
package_data[path].extend(filetypes)
if os.path.exists("MANIFEST"):
os.unlink("MANIFEST")
class BinaryDistribution(Distribution):
def is_pure(self):
return False
setup(
name=DISTNAME,
maintainer=MAINTAINER,
ext_modules=extensions,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
download_url=DOWNLOAD_URL,
project_urls=PROJECT_URLS,
long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
platforms="any",
cmdclass=cmdclass,
packages=find_packages(),
package_data=package_data,
distclass=BinaryDistribution,
include_package_data=False, # True will install all files in repo
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE,
zip_safe=False,
python_requires=">=3.8",
)
# Clean-up copied files
for copy in FILES_COPIED_TO_PACKAGE:
os.unlink(copy)
| dest = os.path.join("statsmodels", filename)
shutil.copy2(filename, dest)
FILES_COPIED_TO_PACKAGE.append(dest) | conditional_block |
cpu.go | // Copyright 2018 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package cpu measures CPU usage.
package cpu
import (
"context"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strconv"
"sync"
"time"
"golang.org/x/sys/unix"
"chromiumos/tast/common/testexec"
upstartcommon "chromiumos/tast/common/upstart"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/local/cpu"
"chromiumos/tast/local/gtest"
"chromiumos/tast/local/upstart"
"chromiumos/tast/testing"
)
// ExitOption describes how to clean up the child process upon function exit.
type ExitOption int
const (
// KillProcess option kills the child process when the function is done.
KillProcess ExitOption = iota
// WaitProcess option waits for the child process to finish.
WaitProcess
)
// raplExec is the command used to measure power consumption, only supported on Intel platforms.
const raplExec = "/usr/bin/dump_intel_rapl_consumption"
// MeasureProcessUsage starts one or more gtest processes and measures CPU usage and power consumption asynchronously
// for the given duration. A map is returned containing CPU usage (percentage in [0-100] range) with key "cpu" and power
// consumption (Watts) with key "power" if supported.
func MeasureProcessUsage(ctx context.Context, duration time.Duration,
exitOption ExitOption, ts ...*gtest.GTest) (measurements map[string]float64, retErr error) {
const (
stabilizeTime = 1 * time.Second // time to wait for CPU to stabilize after launching proc.
cleanupTime = 5 * time.Second // time reserved for cleanup after measuring.
)
for _, t := range ts {
// Start the process asynchronously by calling the provided startup function.
cmd, err := t.Start(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to run binary")
}
// Clean up the process upon exiting the function.
defer func() {
// If the exit option is 'WaitProcess' wait for the process to terminate.
if exitOption == WaitProcess {
if err := cmd.Wait(); err != nil {
retErr = err
testing.ContextLog(ctx, "Failed waiting for the command to exit: ", retErr)
}
return
}
// If the exit option is 'KillProcess' we will send a 'SIGKILL' signal
// to the process after collecting performance metrics.
if err := cmd.Kill(); err != nil {
retErr = err
testing.ContextLog(ctx, "Failed to kill process: ", retErr)
return
}
// After sending a 'SIGKILL' signal to the process we need to wait
// for the process to terminate. If Wait() doesn't return any error,
// we know the process already terminated before we explicitly killed
// it and the measured performance metrics are invalid.
err = cmd.Wait()
if err == nil {
retErr = errors.New("process did not run for entire measurement duration")
testing.ContextLog(ctx, retErr)
return
}
// Check whether the process was terminated with a 'SIGKILL' signal.
ws, ok := testexec.GetWaitStatus(err)
if !ok {
retErr = errors.Wrap(err, "failed to get wait status")
testing.ContextLog(ctx, retErr)
} else if !ws.Signaled() || ws.Signal() != unix.SIGKILL {
retErr = errors.Wrap(err, "process did not terminate with SIGKILL signal")
testing.ContextLog(ctx, retErr)
}
}()
}
// Use a shorter context to leave time for cleanup upon failure.
ctx, cancel := ctxutil.Shorten(ctx, cleanupTime)
defer cancel()
if err := testing.Sleep(ctx, stabilizeTime); err != nil {
return nil, errors.Wrap(err, "failed waiting for CPU usage to stabilize")
}
testing.ContextLog(ctx, "Measuring CPU usage and power consumption for ", duration.Round(time.Second))
return MeasureUsage(ctx, duration)
}
// SetUpBenchmark performs setup needed for running benchmarks. It disables CPU
// frequency scaling and thermal throttling. A deferred call to the returned
// cleanUp function should be scheduled by the caller if err is non-nil.
func SetUpBenchmark(ctx context.Context) (cleanUp func(ctx context.Context), err error) {
const cleanupTime = 10 * time.Second // time reserved for cleanup on error.
var restoreScaling func(ctx context.Context) error
var restoreThrottling func(ctx context.Context) error
cleanUp = func(ctx context.Context) {
if restoreScaling != nil {
if err = restoreScaling(ctx); err != nil {
testing.ContextLog(ctx, "Failed to restore CPU frequency scaling to original values: ", err)
}
}
if restoreThrottling != nil {
if err = restoreThrottling(ctx); err != nil {
testing.ContextLog(ctx, "Failed to restore CPU thermal throttling to original values: ", err)
}
}
}
// Run the cleanUp function automatically if we encounter an error.
doCleanup := cleanUp
defer func() {
if doCleanup != nil {
doCleanup(ctx)
}
}()
// Run all non-cleanup operations with a shorter context. This ensures
// thermal throttling and CPU frequency scaling get re-enabled, even when
// test execution exceeds the maximum time allowed.
ctx, cancel := ctxutil.Shorten(ctx, cleanupTime)
defer cancel()
// CPU frequency scaling and thermal throttling might influence our test results.
if restoreScaling, err = disableCPUFrequencyScaling(ctx); err != nil {
return nil, errors.Wrap(err, "failed to disable CPU frequency scaling")
}
if restoreThrottling, err = disableThermalThrottling(ctx); err != nil {
return nil, errors.Wrap(err, "failed to disable thermal throttling")
}
// Disarm running the cleanUp function now that we expect the caller to do it.
doCleanup = nil
return cleanUp, nil
}
// MeasureUsage measures the average utilization across all CPUs and the
// average SoC 'pkg' power consumption during the specified duration. Measuring
// power consumption is currently not supported on all platforms. A map is
// returned containing CPU usage (percentage in [0-100] range) and power
// consumption (Watts) if supported.
func MeasureUsage(ctx context.Context, duration time.Duration) (map[string]float64, error) {
var cpuUsage, powerConsumption float64
var cpuErr, powerErr error
var wg sync.WaitGroup
// Start measuring CPU usage asynchronously.
wg.Add(1)
go func() {
defer wg.Done()
cpuUsage, cpuErr = cpu.MeasureUsage(ctx, duration)
}()
// Start measuring power consumption asynchronously. Power consumption
// is currently only measured on Intel devices that support the
// dump_intel_rapl_consumption command.
if _, powerErr = os.Stat(raplExec); powerErr == nil {
wg.Add(1)
go func() {
defer wg.Done()
if powerConsumption, powerErr = MeasurePowerConsumption(ctx, duration); powerErr != nil {
testing.ContextLog(ctx, "Measuring power consumption failed: ", powerErr)
}
}()
}
wg.Wait()
measurements := make(map[string]float64)
if cpuErr == nil {
measurements["cpu"] = cpuUsage
}
if powerErr == nil {
measurements["power"] = powerConsumption
}
// Ignore powerErr as not all platforms support measuring power consumption.
return measurements, cpuErr
}
// MeasurePowerConsumption measures power consumption during the specified
// duration and returns the average power consumption (in Watts). The power
// consumption is acquired by reading the RAPL 'pkg' entry, which gives a
// measure of the total SoC power consumption.
func MeasurePowerConsumption(ctx context.Context, duration time.Duration) (float64, error) {
cmd := testexec.CommandContext(ctx, raplExec, "--interval_ms="+
strconv.FormatInt(int64(duration/time.Millisecond), 10))
powerConsumptionOutput, err := cmd.CombinedOutput()
if err != nil {
return 0.0, err
}
var powerConsumptionRegex = regexp.MustCompile(`(\d+\.\d+)`)
match := powerConsumptionRegex.FindAllString(string(powerConsumptionOutput), 1)
if len(match) != 1 {
return 0.0, errors.Errorf("failed to parse output of %s", raplExec)
}
powerConsumption, err := strconv.ParseFloat(match[0], 64)
if err != nil {
return 0.0, err
}
return powerConsumption, nil
}
// cpuConfigEntry holds a single CPU config entry. If ignoreErrors is true
// failure to apply the config will result in a warning, rather than an error.
// This is needed as on some platforms we might not have the right permissions
// to disable frequency scaling.
type cpuConfigEntry struct {
path string
value string
ignoreErrors bool
}
// disableCPUFrequencyScaling disables frequency scaling. All CPU cores will be
// set to always run at their maximum frequency. A function is returned so the
// caller can restore the original CPU frequency scaling configuration.
// Depending on the platform different mechanisms are present:
// - Some Intel-based platforms (e.g. Eve and Nocturne) ignore the values set
// in the scaling_governor, and instead use the intel_pstate application to
// control CPU frequency scaling.
// - Most platforms use the scaling_governor to control CPU frequency scaling.
// - Some platforms (e.g. Dru) use a different CPU frequency scaling governor.
func disableCPUFrequencyScaling(ctx context.Context) (func(ctx context.Context) error, error) {
configPatterns := []cpuConfigEntry{
// crbug.com/938729: BIOS settings might prevent us from overwriting intel_pstate/no_turbo.
{"/sys/devices/system/cpu/intel_pstate/no_turbo", "1", true},
// Fix the intel_pstate percentage to 100 if possible. We raise the
// maximum value before the minimum value as the min cannot exceed the
// max. To restore them, the order must be inverted. Note that we set
// and save the original values for these values because changing
// scaling_governor to "performance" can change these values as well.
{"/sys/devices/system/cpu/intel_pstate/max_perf_pct", "100", false},
{"/sys/devices/system/cpu/intel_pstate/min_perf_pct", "100", false},
// crbug.com/977925: Disabled hyperthreading cores are listed but
// writing config for these disabled cores results in 'invalid argument'.
// TODO(dstaessens): Skip disabled CPU cores when setting scaling_governor.
{"/sys/devices/system/cpu/cpu[0-9]*/cpufreq/scaling_governor", "performance", true},
{"/sys/class/devfreq/devfreq[0-9]*/governor", "performance", true},
}
var optimizedConfig []cpuConfigEntry
// Expands patterns in configPatterns and pack actual configs into
// optimizedConfig.
for _, config := range configPatterns {
paths, err := filepath.Glob(config.path)
if err != nil {
return nil, err
}
for _, path := range paths {
optimizedConfig = append(optimizedConfig, cpuConfigEntry{
path,
config.value,
config.ignoreErrors,
})
}
}
origConfig, err := applyConfig(ctx, optimizedConfig)
undo := func(ctx context.Context) error {
_, err := applyConfig(ctx, origConfig)
return err
}
if err != nil {
undo(ctx)
return nil, err
}
return undo, nil
}
// applyConfig applies the specified frequency scaling configuration. A slice of
// cpuConfigEntry needs to be provided and will be processed in order. A slice
// of the original cpuConfigEntry values that were successfully processed is
// returned in reverse order so the caller can restore the original config by
// passing the slice to this function as is. If ignoreErrors is true for a
// config entry we won't return an error upon failure, but will only show a
// warning. The provided context will only be used for logging, so the config
// will even be applied upon timeout.
func applyConfig(ctx context.Context, cpuConfig []cpuConfigEntry) ([]cpuConfigEntry, error) {
var origConfig []cpuConfigEntry
for _, config := range cpuConfig {
origValue, err := ioutil.ReadFile(config.path)
if err != nil {
if !config.ignoreErrors {
return origConfig, err
}
testing.ContextLogf(ctx, "Failed to read %v: %v", config.path, err)
continue
}
if err = ioutil.WriteFile(config.path, []byte(config.value), 0644); err != nil {
if !config.ignoreErrors {
return origConfig, err
}
testing.ContextLogf(ctx, "Failed to write to %v: %v", config.path, err)
continue
}
// Inserts a new entry at the front of origConfig.
e := cpuConfigEntry{config.path, string(origValue), false}
origConfig = append([]cpuConfigEntry{e}, origConfig...)
}
return origConfig, nil
}
// disableThermalThrottling disables thermal throttling, as it might interfere
// with test execution. A function is returned that restores the original
// settings, so the caller can re-enable thermal throttling after testing.
func disableThermalThrottling(ctx context.Context) (func(context.Context) error, error) {
job := getThermalThrottlingJob(ctx)
if job == "" {
return func(ctx context.Context) error { return nil }, nil
}
_, state, _, err := upstart.JobStatus(ctx, job)
if err != nil {
return nil, err
} else if state != upstartcommon.RunningState {
return func(ctx context.Context) error { return nil }, nil
}
if err := upstart.StopJob(ctx, job); err != nil {
return nil, err
}
undo := func(ctx context.Context) error { return upstart.EnsureJobRunning(ctx, job) }
return undo, nil
}
// getThermalThrottlingJob tries to determine the name of the thermal throttling
// job used by the current platform.
func | (ctx context.Context) string {
// List of possible thermal throttling jobs that should be disabled:
// - dptf for intel >= baytrail
// - temp_metrics for link
// - thermal for daisy, snow, pit,...
for _, job := range []string{"dptf", "temp_metrics", "thermal"} {
if upstart.JobExists(ctx, job) {
return job
}
}
return ""
}
| getThermalThrottlingJob | identifier_name |
cpu.go | // Copyright 2018 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package cpu measures CPU usage.
package cpu
import (
"context"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strconv"
"sync"
"time"
"golang.org/x/sys/unix"
"chromiumos/tast/common/testexec"
upstartcommon "chromiumos/tast/common/upstart"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/local/cpu"
"chromiumos/tast/local/gtest"
"chromiumos/tast/local/upstart"
"chromiumos/tast/testing"
)
// ExitOption describes how to clean up the child process upon function exit.
type ExitOption int
const (
// KillProcess option kills the child process when the function is done.
KillProcess ExitOption = iota
// WaitProcess option waits for the child process to finish.
WaitProcess
)
// raplExec is the command used to measure power consumption, only supported on Intel platforms.
const raplExec = "/usr/bin/dump_intel_rapl_consumption"
// MeasureProcessUsage starts one or more gtest processes and measures CPU usage and power consumption asynchronously
// for the given duration. A map is returned containing CPU usage (percentage in [0-100] range) with key "cpu" and power
// consumption (Watts) with key "power" if supported.
func MeasureProcessUsage(ctx context.Context, duration time.Duration,
exitOption ExitOption, ts ...*gtest.GTest) (measurements map[string]float64, retErr error) {
const (
stabilizeTime = 1 * time.Second // time to wait for CPU to stabilize after launching proc.
cleanupTime = 5 * time.Second // time reserved for cleanup after measuring.
)
for _, t := range ts {
// Start the process asynchronously by calling the provided startup function.
cmd, err := t.Start(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to run binary")
}
// Clean up the process upon exiting the function.
defer func() {
// If the exit option is 'WaitProcess' wait for the process to terminate.
if exitOption == WaitProcess {
if err := cmd.Wait(); err != nil {
retErr = err
testing.ContextLog(ctx, "Failed waiting for the command to exit: ", retErr)
}
return
}
// If the exit option is 'KillProcess' we will send a 'SIGKILL' signal
// to the process after collecting performance metrics.
if err := cmd.Kill(); err != nil {
retErr = err
testing.ContextLog(ctx, "Failed to kill process: ", retErr)
return
}
// After sending a 'SIGKILL' signal to the process we need to wait
// for the process to terminate. If Wait() doesn't return any error,
// we know the process already terminated before we explicitly killed
// it and the measured performance metrics are invalid.
err = cmd.Wait()
if err == nil {
retErr = errors.New("process did not run for entire measurement duration")
testing.ContextLog(ctx, retErr)
return
}
// Check whether the process was terminated with a 'SIGKILL' signal.
ws, ok := testexec.GetWaitStatus(err)
if !ok {
retErr = errors.Wrap(err, "failed to get wait status")
testing.ContextLog(ctx, retErr)
} else if !ws.Signaled() || ws.Signal() != unix.SIGKILL {
retErr = errors.Wrap(err, "process did not terminate with SIGKILL signal")
testing.ContextLog(ctx, retErr)
}
}()
}
// Use a shorter context to leave time for cleanup upon failure.
ctx, cancel := ctxutil.Shorten(ctx, cleanupTime)
defer cancel()
if err := testing.Sleep(ctx, stabilizeTime); err != nil {
return nil, errors.Wrap(err, "failed waiting for CPU usage to stabilize")
}
testing.ContextLog(ctx, "Measuring CPU usage and power consumption for ", duration.Round(time.Second))
return MeasureUsage(ctx, duration)
}
// SetUpBenchmark performs setup needed for running benchmarks. It disables CPU
// frequency scaling and thermal throttling. A deferred call to the returned
// cleanUp function should be scheduled by the caller if err is non-nil.
func SetUpBenchmark(ctx context.Context) (cleanUp func(ctx context.Context), err error) {
const cleanupTime = 10 * time.Second // time reserved for cleanup on error.
var restoreScaling func(ctx context.Context) error
var restoreThrottling func(ctx context.Context) error
cleanUp = func(ctx context.Context) {
if restoreScaling != nil {
if err = restoreScaling(ctx); err != nil {
testing.ContextLog(ctx, "Failed to restore CPU frequency scaling to original values: ", err)
}
}
if restoreThrottling != nil {
if err = restoreThrottling(ctx); err != nil {
testing.ContextLog(ctx, "Failed to restore CPU thermal throttling to original values: ", err)
}
}
}
// Run the cleanUp function automatically if we encounter an error.
doCleanup := cleanUp
defer func() {
if doCleanup != nil {
doCleanup(ctx)
}
}()
// Run all non-cleanup operations with a shorter context. This ensures
// thermal throttling and CPU frequency scaling get re-enabled, even when
// test execution exceeds the maximum time allowed.
ctx, cancel := ctxutil.Shorten(ctx, cleanupTime)
defer cancel()
// CPU frequency scaling and thermal throttling might influence our test results.
if restoreScaling, err = disableCPUFrequencyScaling(ctx); err != nil {
return nil, errors.Wrap(err, "failed to disable CPU frequency scaling")
}
if restoreThrottling, err = disableThermalThrottling(ctx); err != nil {
return nil, errors.Wrap(err, "failed to disable thermal throttling")
}
// Disarm running the cleanUp function now that we expect the caller to do it.
doCleanup = nil
return cleanUp, nil
}
// MeasureUsage measures the average utilization across all CPUs and the
// average SoC 'pkg' power consumption during the specified duration. Measuring
// power consumption is currently not supported on all platforms. A map is
// returned containing CPU usage (percentage in [0-100] range) and power
// consumption (Watts) if supported.
func MeasureUsage(ctx context.Context, duration time.Duration) (map[string]float64, error) {
var cpuUsage, powerConsumption float64
var cpuErr, powerErr error
var wg sync.WaitGroup
// Start measuring CPU usage asynchronously.
wg.Add(1)
go func() {
defer wg.Done()
cpuUsage, cpuErr = cpu.MeasureUsage(ctx, duration)
}()
// Start measuring power consumption asynchronously. Power consumption
// is currently only measured on Intel devices that support the
// dump_intel_rapl_consumption command.
if _, powerErr = os.Stat(raplExec); powerErr == nil {
wg.Add(1)
go func() {
defer wg.Done()
if powerConsumption, powerErr = MeasurePowerConsumption(ctx, duration); powerErr != nil {
testing.ContextLog(ctx, "Measuring power consumption failed: ", powerErr)
}
}()
}
wg.Wait()
measurements := make(map[string]float64)
if cpuErr == nil {
measurements["cpu"] = cpuUsage
}
if powerErr == nil {
measurements["power"] = powerConsumption
}
// Ignore powerErr as not all platforms support measuring power consumption.
return measurements, cpuErr
}
// MeasurePowerConsumption measures power consumption during the specified
// duration and returns the average power consumption (in Watts). The power
// consumption is acquired by reading the RAPL 'pkg' entry, which gives a
// measure of the total SoC power consumption.
func MeasurePowerConsumption(ctx context.Context, duration time.Duration) (float64, error) {
cmd := testexec.CommandContext(ctx, raplExec, "--interval_ms="+
strconv.FormatInt(int64(duration/time.Millisecond), 10))
powerConsumptionOutput, err := cmd.CombinedOutput()
if err != nil {
return 0.0, err
}
var powerConsumptionRegex = regexp.MustCompile(`(\d+\.\d+)`)
match := powerConsumptionRegex.FindAllString(string(powerConsumptionOutput), 1)
if len(match) != 1 {
return 0.0, errors.Errorf("failed to parse output of %s", raplExec)
}
powerConsumption, err := strconv.ParseFloat(match[0], 64)
if err != nil {
return 0.0, err
}
return powerConsumption, nil
}
// cpuConfigEntry holds a single CPU config entry. If ignoreErrors is true
// failure to apply the config will result in a warning, rather than an error.
// This is needed as on some platforms we might not have the right permissions
// to disable frequency scaling.
type cpuConfigEntry struct {
path string
value string
ignoreErrors bool
}
// disableCPUFrequencyScaling disables frequency scaling. All CPU cores will be
// set to always run at their maximum frequency. A function is returned so the
// caller can restore the original CPU frequency scaling configuration.
// Depending on the platform different mechanisms are present:
// - Some Intel-based platforms (e.g. Eve and Nocturne) ignore the values set
// in the scaling_governor, and instead use the intel_pstate application to
// control CPU frequency scaling.
// - Most platforms use the scaling_governor to control CPU frequency scaling.
// - Some platforms (e.g. Dru) use a different CPU frequency scaling governor.
func disableCPUFrequencyScaling(ctx context.Context) (func(ctx context.Context) error, error) {
configPatterns := []cpuConfigEntry{
// crbug.com/938729: BIOS settings might prevent us from overwriting intel_pstate/no_turbo.
{"/sys/devices/system/cpu/intel_pstate/no_turbo", "1", true},
// Fix the intel_pstate percentage to 100 if possible. We raise the
// maximum value before the minimum value as the min cannot exceed the
// max. To restore them, the order must be inverted. Note that we set
// and save the original values for these values because changing
// scaling_governor to "performance" can change these values as well.
{"/sys/devices/system/cpu/intel_pstate/max_perf_pct", "100", false},
{"/sys/devices/system/cpu/intel_pstate/min_perf_pct", "100", false},
// crbug.com/977925: Disabled hyperthreading cores are listed but
// writing config for these disabled cores results in 'invalid argument'.
// TODO(dstaessens): Skip disabled CPU cores when setting scaling_governor.
{"/sys/devices/system/cpu/cpu[0-9]*/cpufreq/scaling_governor", "performance", true},
{"/sys/class/devfreq/devfreq[0-9]*/governor", "performance", true},
}
var optimizedConfig []cpuConfigEntry
// Expands patterns in configPatterns and pack actual configs into
// optimizedConfig.
for _, config := range configPatterns {
paths, err := filepath.Glob(config.path)
if err != nil {
return nil, err
}
for _, path := range paths {
optimizedConfig = append(optimizedConfig, cpuConfigEntry{
path,
config.value,
config.ignoreErrors,
})
}
}
origConfig, err := applyConfig(ctx, optimizedConfig)
undo := func(ctx context.Context) error {
_, err := applyConfig(ctx, origConfig)
return err
}
if err != nil {
undo(ctx)
return nil, err
}
return undo, nil
}
// applyConfig applies the specified frequency scaling configuration. A slice of
// cpuConfigEntry needs to be provided and will be processed in order. A slice
// of the original cpuConfigEntry values that were successfully processed is
// returned in reverse order so the caller can restore the original config by
// passing the slice to this function as is. If ignoreErrors is true for a
// config entry we won't return an error upon failure, but will only show a
// warning. The provided context will only be used for logging, so the config
// will even be applied upon timeout.
func applyConfig(ctx context.Context, cpuConfig []cpuConfigEntry) ([]cpuConfigEntry, error) {
var origConfig []cpuConfigEntry
for _, config := range cpuConfig {
origValue, err := ioutil.ReadFile(config.path)
if err != nil {
if !config.ignoreErrors {
return origConfig, err
}
testing.ContextLogf(ctx, "Failed to read %v: %v", config.path, err)
continue
}
if err = ioutil.WriteFile(config.path, []byte(config.value), 0644); err != nil {
if !config.ignoreErrors {
return origConfig, err
}
testing.ContextLogf(ctx, "Failed to write to %v: %v", config.path, err)
continue
}
// Inserts a new entry at the front of origConfig.
e := cpuConfigEntry{config.path, string(origValue), false}
origConfig = append([]cpuConfigEntry{e}, origConfig...)
}
return origConfig, nil
}
// disableThermalThrottling disables thermal throttling, as it might interfere
// with test execution. A function is returned that restores the original
// settings, so the caller can re-enable thermal throttling after testing.
func disableThermalThrottling(ctx context.Context) (func(context.Context) error, error) {
job := getThermalThrottlingJob(ctx)
if job == "" {
return func(ctx context.Context) error { return nil }, nil
}
_, state, _, err := upstart.JobStatus(ctx, job)
if err != nil {
return nil, err
} else if state != upstartcommon.RunningState {
return func(ctx context.Context) error { return nil }, nil
}
if err := upstart.StopJob(ctx, job); err != nil {
return nil, err
}
undo := func(ctx context.Context) error { return upstart.EnsureJobRunning(ctx, job) }
return undo, nil
}
// getThermalThrottlingJob tries to determine the name of the thermal throttling | // job used by the current platform.
func getThermalThrottlingJob(ctx context.Context) string {
// List of possible thermal throttling jobs that should be disabled:
// - dptf for intel >= baytrail
// - temp_metrics for link
// - thermal for daisy, snow, pit,...
for _, job := range []string{"dptf", "temp_metrics", "thermal"} {
if upstart.JobExists(ctx, job) {
return job
}
}
return ""
} | random_line_split |
|
cpu.go | // Copyright 2018 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package cpu measures CPU usage.
package cpu
import (
"context"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strconv"
"sync"
"time"
"golang.org/x/sys/unix"
"chromiumos/tast/common/testexec"
upstartcommon "chromiumos/tast/common/upstart"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/local/cpu"
"chromiumos/tast/local/gtest"
"chromiumos/tast/local/upstart"
"chromiumos/tast/testing"
)
// ExitOption describes how to clean up the child process upon function exit.
type ExitOption int
const (
// KillProcess option kills the child process when the function is done.
KillProcess ExitOption = iota
// WaitProcess option waits for the child process to finish.
WaitProcess
)
// raplExec is the command used to measure power consumption, only supported on Intel platforms.
const raplExec = "/usr/bin/dump_intel_rapl_consumption"
// MeasureProcessUsage starts one or more gtest processes and measures CPU usage and power consumption asynchronously
// for the given duration. A map is returned containing CPU usage (percentage in [0-100] range) with key "cpu" and power
// consumption (Watts) with key "power" if supported.
func MeasureProcessUsage(ctx context.Context, duration time.Duration,
exitOption ExitOption, ts ...*gtest.GTest) (measurements map[string]float64, retErr error) {
const (
stabilizeTime = 1 * time.Second // time to wait for CPU to stabilize after launching proc.
cleanupTime = 5 * time.Second // time reserved for cleanup after measuring.
)
for _, t := range ts {
// Start the process asynchronously by calling the provided startup function.
cmd, err := t.Start(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to run binary")
}
// Clean up the process upon exiting the function.
defer func() {
// If the exit option is 'WaitProcess' wait for the process to terminate.
if exitOption == WaitProcess {
if err := cmd.Wait(); err != nil {
retErr = err
testing.ContextLog(ctx, "Failed waiting for the command to exit: ", retErr)
}
return
}
// If the exit option is 'KillProcess' we will send a 'SIGKILL' signal
// to the process after collecting performance metrics.
if err := cmd.Kill(); err != nil {
retErr = err
testing.ContextLog(ctx, "Failed to kill process: ", retErr)
return
}
// After sending a 'SIGKILL' signal to the process we need to wait
// for the process to terminate. If Wait() doesn't return any error,
// we know the process already terminated before we explicitly killed
// it and the measured performance metrics are invalid.
err = cmd.Wait()
if err == nil {
retErr = errors.New("process did not run for entire measurement duration")
testing.ContextLog(ctx, retErr)
return
}
// Check whether the process was terminated with a 'SIGKILL' signal.
ws, ok := testexec.GetWaitStatus(err)
if !ok {
retErr = errors.Wrap(err, "failed to get wait status")
testing.ContextLog(ctx, retErr)
} else if !ws.Signaled() || ws.Signal() != unix.SIGKILL {
retErr = errors.Wrap(err, "process did not terminate with SIGKILL signal")
testing.ContextLog(ctx, retErr)
}
}()
}
// Use a shorter context to leave time for cleanup upon failure.
ctx, cancel := ctxutil.Shorten(ctx, cleanupTime)
defer cancel()
if err := testing.Sleep(ctx, stabilizeTime); err != nil {
return nil, errors.Wrap(err, "failed waiting for CPU usage to stabilize")
}
testing.ContextLog(ctx, "Measuring CPU usage and power consumption for ", duration.Round(time.Second))
return MeasureUsage(ctx, duration)
}
// SetUpBenchmark performs setup needed for running benchmarks. It disables CPU
// frequency scaling and thermal throttling. A deferred call to the returned
// cleanUp function should be scheduled by the caller if err is non-nil.
func SetUpBenchmark(ctx context.Context) (cleanUp func(ctx context.Context), err error) {
const cleanupTime = 10 * time.Second // time reserved for cleanup on error.
var restoreScaling func(ctx context.Context) error
var restoreThrottling func(ctx context.Context) error
cleanUp = func(ctx context.Context) {
if restoreScaling != nil {
if err = restoreScaling(ctx); err != nil {
testing.ContextLog(ctx, "Failed to restore CPU frequency scaling to original values: ", err)
}
}
if restoreThrottling != nil {
if err = restoreThrottling(ctx); err != nil {
testing.ContextLog(ctx, "Failed to restore CPU thermal throttling to original values: ", err)
}
}
}
// Run the cleanUp function automatically if we encounter an error.
doCleanup := cleanUp
defer func() {
if doCleanup != nil {
doCleanup(ctx)
}
}()
// Run all non-cleanup operations with a shorter context. This ensures
// thermal throttling and CPU frequency scaling get re-enabled, even when
// test execution exceeds the maximum time allowed.
ctx, cancel := ctxutil.Shorten(ctx, cleanupTime)
defer cancel()
// CPU frequency scaling and thermal throttling might influence our test results.
if restoreScaling, err = disableCPUFrequencyScaling(ctx); err != nil {
return nil, errors.Wrap(err, "failed to disable CPU frequency scaling")
}
if restoreThrottling, err = disableThermalThrottling(ctx); err != nil {
return nil, errors.Wrap(err, "failed to disable thermal throttling")
}
// Disarm running the cleanUp function now that we expect the caller to do it.
doCleanup = nil
return cleanUp, nil
}
// MeasureUsage measures the average utilization across all CPUs and the
// average SoC 'pkg' power consumption during the specified duration. Measuring
// power consumption is currently not supported on all platforms. A map is
// returned containing CPU usage (percentage in [0-100] range) and power
// consumption (Watts) if supported.
func MeasureUsage(ctx context.Context, duration time.Duration) (map[string]float64, error) {
var cpuUsage, powerConsumption float64
var cpuErr, powerErr error
var wg sync.WaitGroup
// Start measuring CPU usage asynchronously.
wg.Add(1)
go func() {
defer wg.Done()
cpuUsage, cpuErr = cpu.MeasureUsage(ctx, duration)
}()
// Start measuring power consumption asynchronously. Power consumption
// is currently only measured on Intel devices that support the
// dump_intel_rapl_consumption command.
if _, powerErr = os.Stat(raplExec); powerErr == nil {
wg.Add(1)
go func() {
defer wg.Done()
if powerConsumption, powerErr = MeasurePowerConsumption(ctx, duration); powerErr != nil {
testing.ContextLog(ctx, "Measuring power consumption failed: ", powerErr)
}
}()
}
wg.Wait()
measurements := make(map[string]float64)
if cpuErr == nil {
measurements["cpu"] = cpuUsage
}
if powerErr == nil {
measurements["power"] = powerConsumption
}
// Ignore powerErr as not all platforms support measuring power consumption.
return measurements, cpuErr
}
// MeasurePowerConsumption measures power consumption during the specified
// duration and returns the average power consumption (in Watts). The power
// consumption is acquired by reading the RAPL 'pkg' entry, which gives a
// measure of the total SoC power consumption.
func MeasurePowerConsumption(ctx context.Context, duration time.Duration) (float64, error) {
cmd := testexec.CommandContext(ctx, raplExec, "--interval_ms="+
strconv.FormatInt(int64(duration/time.Millisecond), 10))
powerConsumptionOutput, err := cmd.CombinedOutput()
if err != nil {
return 0.0, err
}
var powerConsumptionRegex = regexp.MustCompile(`(\d+\.\d+)`)
match := powerConsumptionRegex.FindAllString(string(powerConsumptionOutput), 1)
if len(match) != 1 |
powerConsumption, err := strconv.ParseFloat(match[0], 64)
if err != nil {
return 0.0, err
}
return powerConsumption, nil
}
// cpuConfigEntry holds a single CPU config entry. If ignoreErrors is true
// failure to apply the config will result in a warning, rather than an error.
// This is needed as on some platforms we might not have the right permissions
// to disable frequency scaling.
type cpuConfigEntry struct {
path string
value string
ignoreErrors bool
}
// disableCPUFrequencyScaling disables frequency scaling. All CPU cores will be
// set to always run at their maximum frequency. A function is returned so the
// caller can restore the original CPU frequency scaling configuration.
// Depending on the platform different mechanisms are present:
// - Some Intel-based platforms (e.g. Eve and Nocturne) ignore the values set
// in the scaling_governor, and instead use the intel_pstate application to
// control CPU frequency scaling.
// - Most platforms use the scaling_governor to control CPU frequency scaling.
// - Some platforms (e.g. Dru) use a different CPU frequency scaling governor.
func disableCPUFrequencyScaling(ctx context.Context) (func(ctx context.Context) error, error) {
configPatterns := []cpuConfigEntry{
// crbug.com/938729: BIOS settings might prevent us from overwriting intel_pstate/no_turbo.
{"/sys/devices/system/cpu/intel_pstate/no_turbo", "1", true},
// Fix the intel_pstate percentage to 100 if possible. We raise the
// maximum value before the minimum value as the min cannot exceed the
// max. To restore them, the order must be inverted. Note that we set
// and save the original values for these values because changing
// scaling_governor to "performance" can change these values as well.
{"/sys/devices/system/cpu/intel_pstate/max_perf_pct", "100", false},
{"/sys/devices/system/cpu/intel_pstate/min_perf_pct", "100", false},
// crbug.com/977925: Disabled hyperthreading cores are listed but
// writing config for these disabled cores results in 'invalid argument'.
// TODO(dstaessens): Skip disabled CPU cores when setting scaling_governor.
{"/sys/devices/system/cpu/cpu[0-9]*/cpufreq/scaling_governor", "performance", true},
{"/sys/class/devfreq/devfreq[0-9]*/governor", "performance", true},
}
var optimizedConfig []cpuConfigEntry
// Expands patterns in configPatterns and pack actual configs into
// optimizedConfig.
for _, config := range configPatterns {
paths, err := filepath.Glob(config.path)
if err != nil {
return nil, err
}
for _, path := range paths {
optimizedConfig = append(optimizedConfig, cpuConfigEntry{
path,
config.value,
config.ignoreErrors,
})
}
}
origConfig, err := applyConfig(ctx, optimizedConfig)
undo := func(ctx context.Context) error {
_, err := applyConfig(ctx, origConfig)
return err
}
if err != nil {
undo(ctx)
return nil, err
}
return undo, nil
}
// applyConfig applies the specified frequency scaling configuration. A slice of
// cpuConfigEntry needs to be provided and will be processed in order. A slice
// of the original cpuConfigEntry values that were successfully processed is
// returned in reverse order so the caller can restore the original config by
// passing the slice to this function as is. If ignoreErrors is true for a
// config entry we won't return an error upon failure, but will only show a
// warning. The provided context will only be used for logging, so the config
// will even be applied upon timeout.
func applyConfig(ctx context.Context, cpuConfig []cpuConfigEntry) ([]cpuConfigEntry, error) {
var origConfig []cpuConfigEntry
for _, config := range cpuConfig {
origValue, err := ioutil.ReadFile(config.path)
if err != nil {
if !config.ignoreErrors {
return origConfig, err
}
testing.ContextLogf(ctx, "Failed to read %v: %v", config.path, err)
continue
}
if err = ioutil.WriteFile(config.path, []byte(config.value), 0644); err != nil {
if !config.ignoreErrors {
return origConfig, err
}
testing.ContextLogf(ctx, "Failed to write to %v: %v", config.path, err)
continue
}
// Inserts a new entry at the front of origConfig.
e := cpuConfigEntry{config.path, string(origValue), false}
origConfig = append([]cpuConfigEntry{e}, origConfig...)
}
return origConfig, nil
}
// disableThermalThrottling disables thermal throttling, as it might interfere
// with test execution. A function is returned that restores the original
// settings, so the caller can re-enable thermal throttling after testing.
func disableThermalThrottling(ctx context.Context) (func(context.Context) error, error) {
job := getThermalThrottlingJob(ctx)
if job == "" {
return func(ctx context.Context) error { return nil }, nil
}
_, state, _, err := upstart.JobStatus(ctx, job)
if err != nil {
return nil, err
} else if state != upstartcommon.RunningState {
return func(ctx context.Context) error { return nil }, nil
}
if err := upstart.StopJob(ctx, job); err != nil {
return nil, err
}
undo := func(ctx context.Context) error { return upstart.EnsureJobRunning(ctx, job) }
return undo, nil
}
// getThermalThrottlingJob tries to determine the name of the thermal throttling
// job used by the current platform.
func getThermalThrottlingJob(ctx context.Context) string {
// List of possible thermal throttling jobs that should be disabled:
// - dptf for intel >= baytrail
// - temp_metrics for link
// - thermal for daisy, snow, pit,...
for _, job := range []string{"dptf", "temp_metrics", "thermal"} {
if upstart.JobExists(ctx, job) {
return job
}
}
return ""
}
| {
return 0.0, errors.Errorf("failed to parse output of %s", raplExec)
} | conditional_block |
cpu.go | // Copyright 2018 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package cpu measures CPU usage.
package cpu
import (
"context"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strconv"
"sync"
"time"
"golang.org/x/sys/unix"
"chromiumos/tast/common/testexec"
upstartcommon "chromiumos/tast/common/upstart"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/local/cpu"
"chromiumos/tast/local/gtest"
"chromiumos/tast/local/upstart"
"chromiumos/tast/testing"
)
// ExitOption describes how to clean up the child process upon function exit.
type ExitOption int
const (
// KillProcess option kills the child process when the function is done.
KillProcess ExitOption = iota
// WaitProcess option waits for the child process to finish.
WaitProcess
)
// raplExec is the command used to measure power consumption, only supported on Intel platforms.
const raplExec = "/usr/bin/dump_intel_rapl_consumption"
// MeasureProcessUsage starts one or more gtest processes and measures CPU usage and power consumption asynchronously
// for the given duration. A map is returned containing CPU usage (percentage in [0-100] range) with key "cpu" and power
// consumption (Watts) with key "power" if supported.
func MeasureProcessUsage(ctx context.Context, duration time.Duration,
exitOption ExitOption, ts ...*gtest.GTest) (measurements map[string]float64, retErr error) {
const (
stabilizeTime = 1 * time.Second // time to wait for CPU to stabilize after launching proc.
cleanupTime = 5 * time.Second // time reserved for cleanup after measuring.
)
for _, t := range ts {
// Start the process asynchronously by calling the provided startup function.
cmd, err := t.Start(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to run binary")
}
// Clean up the process upon exiting the function.
defer func() {
// If the exit option is 'WaitProcess' wait for the process to terminate.
if exitOption == WaitProcess {
if err := cmd.Wait(); err != nil {
retErr = err
testing.ContextLog(ctx, "Failed waiting for the command to exit: ", retErr)
}
return
}
// If the exit option is 'KillProcess' we will send a 'SIGKILL' signal
// to the process after collecting performance metrics.
if err := cmd.Kill(); err != nil {
retErr = err
testing.ContextLog(ctx, "Failed to kill process: ", retErr)
return
}
// After sending a 'SIGKILL' signal to the process we need to wait
// for the process to terminate. If Wait() doesn't return any error,
// we know the process already terminated before we explicitly killed
// it and the measured performance metrics are invalid.
err = cmd.Wait()
if err == nil {
retErr = errors.New("process did not run for entire measurement duration")
testing.ContextLog(ctx, retErr)
return
}
// Check whether the process was terminated with a 'SIGKILL' signal.
ws, ok := testexec.GetWaitStatus(err)
if !ok {
retErr = errors.Wrap(err, "failed to get wait status")
testing.ContextLog(ctx, retErr)
} else if !ws.Signaled() || ws.Signal() != unix.SIGKILL {
retErr = errors.Wrap(err, "process did not terminate with SIGKILL signal")
testing.ContextLog(ctx, retErr)
}
}()
}
// Use a shorter context to leave time for cleanup upon failure.
ctx, cancel := ctxutil.Shorten(ctx, cleanupTime)
defer cancel()
if err := testing.Sleep(ctx, stabilizeTime); err != nil {
return nil, errors.Wrap(err, "failed waiting for CPU usage to stabilize")
}
testing.ContextLog(ctx, "Measuring CPU usage and power consumption for ", duration.Round(time.Second))
return MeasureUsage(ctx, duration)
}
// SetUpBenchmark performs setup needed for running benchmarks. It disables CPU
// frequency scaling and thermal throttling. A deferred call to the returned
// cleanUp function should be scheduled by the caller if err is non-nil.
func SetUpBenchmark(ctx context.Context) (cleanUp func(ctx context.Context), err error) {
const cleanupTime = 10 * time.Second // time reserved for cleanup on error.
var restoreScaling func(ctx context.Context) error
var restoreThrottling func(ctx context.Context) error
cleanUp = func(ctx context.Context) {
if restoreScaling != nil {
if err = restoreScaling(ctx); err != nil {
testing.ContextLog(ctx, "Failed to restore CPU frequency scaling to original values: ", err)
}
}
if restoreThrottling != nil {
if err = restoreThrottling(ctx); err != nil {
testing.ContextLog(ctx, "Failed to restore CPU thermal throttling to original values: ", err)
}
}
}
// Run the cleanUp function automatically if we encounter an error.
doCleanup := cleanUp
defer func() {
if doCleanup != nil {
doCleanup(ctx)
}
}()
// Run all non-cleanup operations with a shorter context. This ensures
// thermal throttling and CPU frequency scaling get re-enabled, even when
// test execution exceeds the maximum time allowed.
ctx, cancel := ctxutil.Shorten(ctx, cleanupTime)
defer cancel()
// CPU frequency scaling and thermal throttling might influence our test results.
if restoreScaling, err = disableCPUFrequencyScaling(ctx); err != nil {
return nil, errors.Wrap(err, "failed to disable CPU frequency scaling")
}
if restoreThrottling, err = disableThermalThrottling(ctx); err != nil {
return nil, errors.Wrap(err, "failed to disable thermal throttling")
}
// Disarm running the cleanUp function now that we expect the caller to do it.
doCleanup = nil
return cleanUp, nil
}
// MeasureUsage measures the average utilization across all CPUs and the
// average SoC 'pkg' power consumption during the specified duration. Measuring
// power consumption is currently not supported on all platforms. A map is
// returned containing CPU usage (percentage in [0-100] range) and power
// consumption (Watts) if supported.
func MeasureUsage(ctx context.Context, duration time.Duration) (map[string]float64, error) {
var cpuUsage, powerConsumption float64
var cpuErr, powerErr error
var wg sync.WaitGroup
// Start measuring CPU usage asynchronously.
wg.Add(1)
go func() {
defer wg.Done()
cpuUsage, cpuErr = cpu.MeasureUsage(ctx, duration)
}()
// Start measuring power consumption asynchronously. Power consumption
// is currently only measured on Intel devices that support the
// dump_intel_rapl_consumption command.
if _, powerErr = os.Stat(raplExec); powerErr == nil {
wg.Add(1)
go func() {
defer wg.Done()
if powerConsumption, powerErr = MeasurePowerConsumption(ctx, duration); powerErr != nil {
testing.ContextLog(ctx, "Measuring power consumption failed: ", powerErr)
}
}()
}
wg.Wait()
measurements := make(map[string]float64)
if cpuErr == nil {
measurements["cpu"] = cpuUsage
}
if powerErr == nil {
measurements["power"] = powerConsumption
}
// Ignore powerErr as not all platforms support measuring power consumption.
return measurements, cpuErr
}
// MeasurePowerConsumption measures power consumption during the specified
// duration and returns the average power consumption (in Watts). The power
// consumption is acquired by reading the RAPL 'pkg' entry, which gives a
// measure of the total SoC power consumption.
func MeasurePowerConsumption(ctx context.Context, duration time.Duration) (float64, error) {
cmd := testexec.CommandContext(ctx, raplExec, "--interval_ms="+
strconv.FormatInt(int64(duration/time.Millisecond), 10))
powerConsumptionOutput, err := cmd.CombinedOutput()
if err != nil {
return 0.0, err
}
var powerConsumptionRegex = regexp.MustCompile(`(\d+\.\d+)`)
match := powerConsumptionRegex.FindAllString(string(powerConsumptionOutput), 1)
if len(match) != 1 {
return 0.0, errors.Errorf("failed to parse output of %s", raplExec)
}
powerConsumption, err := strconv.ParseFloat(match[0], 64)
if err != nil {
return 0.0, err
}
return powerConsumption, nil
}
// cpuConfigEntry holds a single CPU config entry. If ignoreErrors is true
// failure to apply the config will result in a warning, rather than an error.
// This is needed as on some platforms we might not have the right permissions
// to disable frequency scaling.
type cpuConfigEntry struct {
path string
value string
ignoreErrors bool
}
// disableCPUFrequencyScaling disables frequency scaling. All CPU cores will be
// set to always run at their maximum frequency. A function is returned so the
// caller can restore the original CPU frequency scaling configuration.
// Depending on the platform different mechanisms are present:
// - Some Intel-based platforms (e.g. Eve and Nocturne) ignore the values set
// in the scaling_governor, and instead use the intel_pstate application to
// control CPU frequency scaling.
// - Most platforms use the scaling_governor to control CPU frequency scaling.
// - Some platforms (e.g. Dru) use a different CPU frequency scaling governor.
func disableCPUFrequencyScaling(ctx context.Context) (func(ctx context.Context) error, error) {
configPatterns := []cpuConfigEntry{
// crbug.com/938729: BIOS settings might prevent us from overwriting intel_pstate/no_turbo.
{"/sys/devices/system/cpu/intel_pstate/no_turbo", "1", true},
// Fix the intel_pstate percentage to 100 if possible. We raise the
// maximum value before the minimum value as the min cannot exceed the
// max. To restore them, the order must be inverted. Note that we set
// and save the original values for these values because changing
// scaling_governor to "performance" can change these values as well.
{"/sys/devices/system/cpu/intel_pstate/max_perf_pct", "100", false},
{"/sys/devices/system/cpu/intel_pstate/min_perf_pct", "100", false},
// crbug.com/977925: Disabled hyperthreading cores are listed but
// writing config for these disabled cores results in 'invalid argument'.
// TODO(dstaessens): Skip disabled CPU cores when setting scaling_governor.
{"/sys/devices/system/cpu/cpu[0-9]*/cpufreq/scaling_governor", "performance", true},
{"/sys/class/devfreq/devfreq[0-9]*/governor", "performance", true},
}
var optimizedConfig []cpuConfigEntry
// Expands patterns in configPatterns and pack actual configs into
// optimizedConfig.
for _, config := range configPatterns {
paths, err := filepath.Glob(config.path)
if err != nil {
return nil, err
}
for _, path := range paths {
optimizedConfig = append(optimizedConfig, cpuConfigEntry{
path,
config.value,
config.ignoreErrors,
})
}
}
origConfig, err := applyConfig(ctx, optimizedConfig)
undo := func(ctx context.Context) error {
_, err := applyConfig(ctx, origConfig)
return err
}
if err != nil {
undo(ctx)
return nil, err
}
return undo, nil
}
// applyConfig applies the specified frequency scaling configuration. A slice of
// cpuConfigEntry needs to be provided and will be processed in order. A slice
// of the original cpuConfigEntry values that were successfully processed is
// returned in reverse order so the caller can restore the original config by
// passing the slice to this function as is. If ignoreErrors is true for a
// config entry we won't return an error upon failure, but will only show a
// warning. The provided context will only be used for logging, so the config
// will even be applied upon timeout.
func applyConfig(ctx context.Context, cpuConfig []cpuConfigEntry) ([]cpuConfigEntry, error) {
var origConfig []cpuConfigEntry
for _, config := range cpuConfig {
origValue, err := ioutil.ReadFile(config.path)
if err != nil {
if !config.ignoreErrors {
return origConfig, err
}
testing.ContextLogf(ctx, "Failed to read %v: %v", config.path, err)
continue
}
if err = ioutil.WriteFile(config.path, []byte(config.value), 0644); err != nil {
if !config.ignoreErrors {
return origConfig, err
}
testing.ContextLogf(ctx, "Failed to write to %v: %v", config.path, err)
continue
}
// Inserts a new entry at the front of origConfig.
e := cpuConfigEntry{config.path, string(origValue), false}
origConfig = append([]cpuConfigEntry{e}, origConfig...)
}
return origConfig, nil
}
// disableThermalThrottling disables thermal throttling, as it might interfere
// with test execution. A function is returned that restores the original
// settings, so the caller can re-enable thermal throttling after testing.
func disableThermalThrottling(ctx context.Context) (func(context.Context) error, error) {
job := getThermalThrottlingJob(ctx)
if job == "" {
return func(ctx context.Context) error { return nil }, nil
}
_, state, _, err := upstart.JobStatus(ctx, job)
if err != nil {
return nil, err
} else if state != upstartcommon.RunningState {
return func(ctx context.Context) error { return nil }, nil
}
if err := upstart.StopJob(ctx, job); err != nil {
return nil, err
}
undo := func(ctx context.Context) error { return upstart.EnsureJobRunning(ctx, job) }
return undo, nil
}
// getThermalThrottlingJob tries to determine the name of the thermal throttling
// job used by the current platform.
func getThermalThrottlingJob(ctx context.Context) string | {
// List of possible thermal throttling jobs that should be disabled:
// - dptf for intel >= baytrail
// - temp_metrics for link
// - thermal for daisy, snow, pit,...
for _, job := range []string{"dptf", "temp_metrics", "thermal"} {
if upstart.JobExists(ctx, job) {
return job
}
}
return ""
} | identifier_body |
|
manage.py | #!/usr/bin/env python
import os
import subprocess
from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager, Shell, Server
from redis import Redis
from rq import Connection, Queue, Worker
from sqlalchemy.sql import exists
from sqlalchemy.exc import IntegrityError
from sqlalchemy import desc
from app import create_app, db, dprint
from app.models import Role, Employee, Contact, Plant, Orders, Sales, WaterLog, Species, Genus, Supplier, Product, Item
from config import Config
from datetime import datetime, date, timedelta
from random import randrange, choice
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=db, Employee=Employee, Role=Role)
manager.add_command('shell', Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
manager.add_command('runserver', Server(host="0.0.0.0"))
# Zone A: Full shade, high humidity
# Zone B: Partial shade, high humidity
# Zone C: Partial shade, ambient humidity
# Zone D: Full sun, ambient humidity
@manager.command
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
@manager.command
def recreate_db():
"""
Recreates a local database. You probably should not use this on
production.
"""
db.session.execute('SET FOREIGN_KEY_CHECKS=0;')
db.session.execute('DROP TABLE IF EXISTS logs;')
db.session.execute('DROP TABLE IF EXISTS employees;')
db.session.execute('DROP TABLE IF EXISTS sales;')
db.session.execute('DROP TABLE IF EXISTS plants;')
db.session.execute('DROP TABLE IF EXISTS products;')
db.session.execute('DROP TABLE IF EXISTS suppliers;')
db.session.execute('DROP TABLE IF EXISTS orders;')
db.session.execute('DROP TABLE IF EXISTS contacts;')
db.session.execute('DROP TABLE IF EXISTS varieties;')
db.session.execute('DROP TABLE IF EXISTS species;')
db.session.execute('DROP TABLE IF EXISTS genera;')
db.session.execute('DROP TABLE IF EXISTS families;')
db.drop_all()
db.create_all()
db.session.commit()
fakePlant = Plant(living = True)
db.session.add(fakePlant)
db.session.commit()
db.session.delete(fakePlant)
db.session.execute('SET FOREIGN_KEY_CHECKS=1;')
db.session.commit()
@manager.command
def OrderMerchandise( numItems, orderDate, dayNum ):
# Simulation
# Generate fake order and add to inventory
newOrder = Orders()
StuffWeSell = [
('Shovel', 14.30, 24.99, 'B'),
('Peat moss - 5L', 4.75, 12.99, 'D'),
('Peat moss - 10L', 6.00, 19.99, 'D'),
('Perlite - 5L', 3.50, 10.99, 'D'),
('Perlite - 10L', 5.00, 16.99, 'D'),
('Hydroton - 10L', 7.31, 14.99, 'D'),
('Vermiculite - 5L', 3.75, 9.99, 'D'),
('Vermiculite - 10L', 5.75, 13.99, 'D'),
('"Premium" dirt - 5L', 0.50, 24.99, 'D'),
('Systemic granules', 7.50, 17.99, 'A'),
('Copper Fungicide', 11.45, 19.99, 'A'),
('Spray bottle', 0.75, 2.99, 'A'),
('Nursery pot - 3in', 0.25, 1.99, 'B'),
('Nursery pot - 6in', 0.35, 2.99, 'B'),
('Nursery pot - 9in', 0.45, 3.99, 'B')
]
for item in StuffWeSell:
newItem = Product(
name = item[0],
quantity = randrange(5,10,1),
price = item[2],
location = item[3],
)
invoice = Orders(
item = newItem.sku,
qty = newItem.quantity,
price = item[1],
date = orderDate,
date_received = (orderDate + timedelta(days=2)),
supplier = 'TAGda'
)
invoice.id = newOrder.id
db.session.add(invoice)
db.session.add(newItem)
try:
db.session.commit()
except IntegrityError:
dprint('uh oh')
db.session.rollback()
else:
dprint("Day {}: Order[{}] Item[{}]".format(dayNum, invoice.id, invoice.item))
@manager.command
def OrderPlants( numPlants, orderDate, dayNum ):
# Simulation
# Generate fake plant order and add to inventory
|
@manager.command
def updateLog(plantSKU, waterDate, dayNum):
water = WaterLog(
plant = plantSKU,
water = 1,
date = waterDate
)
f = randrange(0,2,1)
if f == 1:
water.feed = 1
water.notes = "General purpose fertilizer; half-strength"
else:
water.feed = 0
db.session.add(water)
try:
db.session.commit()
except IntegrityError:
dprint('uh oh')
db.session.rollback()
else:
dprint("Day {}: LogEntry[{}] Plant[{}]".format(dayNum, water.id, water.plant))
@manager.command
def checkStock(simDay, dayNum):
stock = Product.query.all()
reOrder = Orders()
for i in stock:
if i.quantity == 0:
newItem = Product(
name = i.name,
quantity = randrange(5,10,1),
price = i.price,
location = i.location,
)
invoice = Orders(
item = newItem.sku,
qty = newItem.quantity,
price = newItem.price / 3,
date = simDay,
date_received = (simDay + timedelta(days=2)),
supplier = 'TAGda'
)
invoice.id = reOrder.id
db.session.delete(i)
db.session.add(invoice)
db.session.add(newItem)
try:
db.session.commit()
except IntegrityError:
dprint('uh oh')
db.session.rollback()
else:
dprint("Day {}: Order[{}] Item[{}]".format(dayNum, invoice.id, invoice.item))
@manager.command
def waterPlants(simDay, dayNum):
# Water simulated plants
# Group plants by watering requirements
simPlants = db.session.query(Plant).filter(Plant.quantity > 0).all()
for i in simPlants:
lastLog = db.session.query(WaterLog)\
.filter(WaterLog.plant == i.sku)\
.order_by(desc('date')).first()
if i.location == 'A':
if (simDay - lastLog.date) == timedelta(days=5):
updateLog(i.sku, simDay, dayNum)
elif i.location == 'B':
if (simDay - lastLog.date) == timedelta(days=7):
updateLog(i.sku, simDay, dayNum)
else:
if (simDay - lastLog.date) >= timedelta(days=10):
updateLog(i.sku, simDay, dayNum)
@manager.command
def add_fake_data():
# Generate fake customers
Contact.generate_fake(count=50)
# All simulated customers are returning customers
simContacts = db.session.query(Contact.id).all()
# Simulate 60 days of slow, daily business
simDay = date.today() - timedelta(days=60)
dayNum = 0
# Initial inventory
OrderPlants(5, (simDay - timedelta(days=2)), dayNum)
OrderMerchandise(5, (simDay - timedelta(days=2)), dayNum)
# Begin simulation
while (date.today() - simDay) > timedelta(days=0):
waterPlants(simDay, dayNum)
checkStock(simDay, dayNum)
if (dayNum % 7) == 0:
OrderPlants(5, (simDay - timedelta(days=2)), dayNum)
# Between 2 and 5 customers per day, chosen at random
# (It's ok, everything's overpriced)
# Customers come in an buy a handful of random items
# Sales are generated and added to DB
# Inventory updates
for i in range(1, randrange(2,5,1)):
transactionID = Sales().id
numItems = randrange(1,6,1)
shopper = choice(simContacts)
merch = []
#plantsAvailable = Plant.query.all()
itemsAvailable = Item.query.all()
for item in itemsAvailable:
merch.append(item)
alreadyPicked = []
for j in range(1, numItems):
merchChoice = choice(merch)
if merchChoice.sku in alreadyPicked:
pass
elif merchChoice.quantity > 0:
multiplier = randrange(1,5,1)
cart = Sales(
id = transactionID,
date = simDay,
customer = shopper,
item = merchChoice.sku,
salePrice = merchChoice.price
)
# Check if we have enough of that item
if multiplier <= merchChoice.quantity:
cart.qty = multiplier
else:
cart.qty = 1
# Create entry in Sales
db.session.add(cart)
# Update quantities in Inventory DB
merchChoice.updateQty( -cart.qty )
# Don't pick the same item twice
alreadyPicked.append(cart.item)
dprint("Day {}: Customer[{}] purchased Item[{}]".format(dayNum, shopper, cart.item))
try:
db.session.commit()
except IntegrityError:
dprint('uh oh')
db.session.rollback()
# Advance simulation one day
simDay += timedelta(days=1)
dayNum += 1
@manager.command
def setup_dev():
"""Runs the set-up needed for local development."""
setup_general()
@manager.command
def setup_prod():
"""Runs the set-up needed for production."""
setup_general()
def setup_general():
"""Runs the set-up needed for both local development and production.
Also sets up first admin user."""
Role.insert_roles()
admin_query = Role.query.filter_by(name='Administrator')
if admin_query.first() is not None:
if Employee.query.filter_by(email=Config.ADMIN_EMAIL).first() is None:
user = Employee(first_name='Admin',
last_name='Account',
password=Config.ADMIN_PASSWORD,
email=Config.ADMIN_EMAIL)
db.session.add(user)
db.session.commit()
print('Added administrator {}'.format(user.full_name()))
@manager.command
def run_worker():
"""Initializes a slim rq task queue."""
listen = ['default']
conn = Redis(host=app.config['RQ_DEFAULT_HOST'],
port=app.config['RQ_DEFAULT_PORT'],
db=0,
password=app.config['RQ_DEFAULT_PASSWORD'])
with Connection(conn):
worker = Worker(map(Queue, listen))
worker.work()
@manager.command
def format():
"""Runs the yapf and isort formatters over the project."""
isort = 'isort -rc *.py app/'
yapf = 'yapf -r -i *.py app/'
print('Running {}'.format(isort))
subprocess.call(isort, shell=True)
print('Running {}'.format(yapf))
subprocess.call(yapf, shell=True)
if __name__ == '__main__':
manager.run()
| newOrder = Orders()
plants = Plant.generate_fake( numPlants )
for i in plants:
invoice = Orders(
supplier = 'TGKmf',
date = orderDate,
date_received = orderDate + timedelta(days=2),
item = i.sku,
price = i.price/randrange(2, 5, 1),
qty = i.quantity
)
invoice.id = newOrder.id
db.session.add(invoice)
# Water plants when they arrive
updateLog(i.sku, orderDate, dayNum)
try:
db.session.commit()
except IntegrityError:
dprint('uh oh')
db.session.rollback()
else:
dprint("Day {}: Order[{}] Item[{}]".format( dayNum, invoice.id, invoice.item)) | identifier_body |
manage.py | #!/usr/bin/env python
import os
import subprocess
from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager, Shell, Server
from redis import Redis
from rq import Connection, Queue, Worker
from sqlalchemy.sql import exists
from sqlalchemy.exc import IntegrityError
from sqlalchemy import desc
from app import create_app, db, dprint
from app.models import Role, Employee, Contact, Plant, Orders, Sales, WaterLog, Species, Genus, Supplier, Product, Item
from config import Config
from datetime import datetime, date, timedelta
from random import randrange, choice
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=db, Employee=Employee, Role=Role)
manager.add_command('shell', Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
manager.add_command('runserver', Server(host="0.0.0.0"))
# Zone A: Full shade, high humidity
# Zone B: Partial shade, high humidity
# Zone C: Partial shade, ambient humidity
# Zone D: Full sun, ambient humidity
@manager.command
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
@manager.command
def recreate_db():
"""
Recreates a local database. You probably should not use this on
production.
"""
db.session.execute('SET FOREIGN_KEY_CHECKS=0;')
db.session.execute('DROP TABLE IF EXISTS logs;')
db.session.execute('DROP TABLE IF EXISTS employees;')
db.session.execute('DROP TABLE IF EXISTS sales;')
db.session.execute('DROP TABLE IF EXISTS plants;')
db.session.execute('DROP TABLE IF EXISTS products;')
db.session.execute('DROP TABLE IF EXISTS suppliers;')
db.session.execute('DROP TABLE IF EXISTS orders;')
db.session.execute('DROP TABLE IF EXISTS contacts;')
db.session.execute('DROP TABLE IF EXISTS varieties;')
db.session.execute('DROP TABLE IF EXISTS species;')
db.session.execute('DROP TABLE IF EXISTS genera;')
db.session.execute('DROP TABLE IF EXISTS families;')
db.drop_all()
db.create_all()
db.session.commit()
fakePlant = Plant(living = True)
db.session.add(fakePlant)
db.session.commit()
db.session.delete(fakePlant)
db.session.execute('SET FOREIGN_KEY_CHECKS=1;')
db.session.commit()
@manager.command
def OrderMerchandise( numItems, orderDate, dayNum ):
# Simulation
# Generate fake order and add to inventory
newOrder = Orders()
StuffWeSell = [
('Shovel', 14.30, 24.99, 'B'),
('Peat moss - 5L', 4.75, 12.99, 'D'),
('Peat moss - 10L', 6.00, 19.99, 'D'),
('Perlite - 5L', 3.50, 10.99, 'D'),
('Perlite - 10L', 5.00, 16.99, 'D'),
('Hydroton - 10L', 7.31, 14.99, 'D'),
('Vermiculite - 5L', 3.75, 9.99, 'D'),
('Vermiculite - 10L', 5.75, 13.99, 'D'),
('"Premium" dirt - 5L', 0.50, 24.99, 'D'),
('Systemic granules', 7.50, 17.99, 'A'),
('Copper Fungicide', 11.45, 19.99, 'A'),
('Spray bottle', 0.75, 2.99, 'A'),
('Nursery pot - 3in', 0.25, 1.99, 'B'),
('Nursery pot - 6in', 0.35, 2.99, 'B'),
('Nursery pot - 9in', 0.45, 3.99, 'B')
]
for item in StuffWeSell:
newItem = Product(
name = item[0],
quantity = randrange(5,10,1),
price = item[2],
location = item[3],
)
invoice = Orders(
item = newItem.sku,
qty = newItem.quantity,
price = item[1],
date = orderDate,
date_received = (orderDate + timedelta(days=2)),
supplier = 'TAGda'
)
invoice.id = newOrder.id
db.session.add(invoice)
db.session.add(newItem)
try:
db.session.commit()
except IntegrityError:
dprint('uh oh')
db.session.rollback()
else:
dprint("Day {}: Order[{}] Item[{}]".format(dayNum, invoice.id, invoice.item))
@manager.command
def OrderPlants( numPlants, orderDate, dayNum ):
# Simulation
# Generate fake plant order and add to inventory
newOrder = Orders()
plants = Plant.generate_fake( numPlants )
for i in plants:
invoice = Orders(
supplier = 'TGKmf',
date = orderDate,
date_received = orderDate + timedelta(days=2),
item = i.sku,
price = i.price/randrange(2, 5, 1),
qty = i.quantity
)
invoice.id = newOrder.id
db.session.add(invoice)
# Water plants when they arrive
updateLog(i.sku, orderDate, dayNum)
try:
db.session.commit()
except IntegrityError:
dprint('uh oh')
db.session.rollback()
else:
dprint("Day {}: Order[{}] Item[{}]".format( dayNum, invoice.id, invoice.item))
@manager.command
def updateLog(plantSKU, waterDate, dayNum):
water = WaterLog(
plant = plantSKU,
water = 1,
date = waterDate
)
f = randrange(0,2,1)
if f == 1:
water.feed = 1
water.notes = "General purpose fertilizer; half-strength"
else:
water.feed = 0
db.session.add(water)
try:
db.session.commit()
except IntegrityError:
dprint('uh oh')
db.session.rollback()
else:
dprint("Day {}: LogEntry[{}] Plant[{}]".format(dayNum, water.id, water.plant))
@manager.command
def checkStock(simDay, dayNum):
stock = Product.query.all()
reOrder = Orders()
for i in stock:
if i.quantity == 0:
newItem = Product(
name = i.name,
quantity = randrange(5,10,1),
price = i.price,
location = i.location,
)
invoice = Orders(
item = newItem.sku,
qty = newItem.quantity,
price = newItem.price / 3,
date = simDay,
date_received = (simDay + timedelta(days=2)),
supplier = 'TAGda'
)
invoice.id = reOrder.id
db.session.delete(i)
db.session.add(invoice)
db.session.add(newItem)
try:
db.session.commit()
except IntegrityError:
dprint('uh oh')
db.session.rollback()
else:
dprint("Day {}: Order[{}] Item[{}]".format(dayNum, invoice.id, invoice.item))
@manager.command
def waterPlants(simDay, dayNum):
# Water simulated plants
# Group plants by watering requirements
simPlants = db.session.query(Plant).filter(Plant.quantity > 0).all()
for i in simPlants:
lastLog = db.session.query(WaterLog)\
.filter(WaterLog.plant == i.sku)\
.order_by(desc('date')).first()
if i.location == 'A':
if (simDay - lastLog.date) == timedelta(days=5):
updateLog(i.sku, simDay, dayNum)
elif i.location == 'B':
if (simDay - lastLog.date) == timedelta(days=7):
updateLog(i.sku, simDay, dayNum)
else:
if (simDay - lastLog.date) >= timedelta(days=10):
updateLog(i.sku, simDay, dayNum)
@manager.command
def add_fake_data():
# Generate fake customers
Contact.generate_fake(count=50)
# All simulated customers are returning customers
simContacts = db.session.query(Contact.id).all()
# Simulate 60 days of slow, daily business
simDay = date.today() - timedelta(days=60)
dayNum = 0
# Initial inventory
OrderPlants(5, (simDay - timedelta(days=2)), dayNum)
OrderMerchandise(5, (simDay - timedelta(days=2)), dayNum)
# Begin simulation
while (date.today() - simDay) > timedelta(days=0):
waterPlants(simDay, dayNum)
checkStock(simDay, dayNum)
if (dayNum % 7) == 0:
OrderPlants(5, (simDay - timedelta(days=2)), dayNum)
# Between 2 and 5 customers per day, chosen at random
# (It's ok, everything's overpriced)
# Customers come in an buy a handful of random items
# Sales are generated and added to DB
# Inventory updates
for i in range(1, randrange(2,5,1)):
transactionID = Sales().id
numItems = randrange(1,6,1)
shopper = choice(simContacts)
merch = []
#plantsAvailable = Plant.query.all()
itemsAvailable = Item.query.all()
for item in itemsAvailable:
merch.append(item)
alreadyPicked = []
for j in range(1, numItems):
merchChoice = choice(merch)
if merchChoice.sku in alreadyPicked:
pass
elif merchChoice.quantity > 0:
multiplier = randrange(1,5,1)
cart = Sales(
id = transactionID,
date = simDay,
customer = shopper,
item = merchChoice.sku,
salePrice = merchChoice.price
)
# Check if we have enough of that item
if multiplier <= merchChoice.quantity:
cart.qty = multiplier
else:
cart.qty = 1
# Create entry in Sales
db.session.add(cart)
# Update quantities in Inventory DB
merchChoice.updateQty( -cart.qty )
# Don't pick the same item twice
alreadyPicked.append(cart.item)
dprint("Day {}: Customer[{}] purchased Item[{}]".format(dayNum, shopper, cart.item))
try:
db.session.commit()
except IntegrityError:
dprint('uh oh')
db.session.rollback()
# Advance simulation one day |
@manager.command
def setup_dev():
"""Runs the set-up needed for local development."""
setup_general()
@manager.command
def setup_prod():
"""Runs the set-up needed for production."""
setup_general()
def setup_general():
"""Runs the set-up needed for both local development and production.
Also sets up first admin user."""
Role.insert_roles()
admin_query = Role.query.filter_by(name='Administrator')
if admin_query.first() is not None:
if Employee.query.filter_by(email=Config.ADMIN_EMAIL).first() is None:
user = Employee(first_name='Admin',
last_name='Account',
password=Config.ADMIN_PASSWORD,
email=Config.ADMIN_EMAIL)
db.session.add(user)
db.session.commit()
print('Added administrator {}'.format(user.full_name()))
@manager.command
def run_worker():
"""Initializes a slim rq task queue."""
listen = ['default']
conn = Redis(host=app.config['RQ_DEFAULT_HOST'],
port=app.config['RQ_DEFAULT_PORT'],
db=0,
password=app.config['RQ_DEFAULT_PASSWORD'])
with Connection(conn):
worker = Worker(map(Queue, listen))
worker.work()
@manager.command
def format():
"""Runs the yapf and isort formatters over the project."""
isort = 'isort -rc *.py app/'
yapf = 'yapf -r -i *.py app/'
print('Running {}'.format(isort))
subprocess.call(isort, shell=True)
print('Running {}'.format(yapf))
subprocess.call(yapf, shell=True)
if __name__ == '__main__':
manager.run() | simDay += timedelta(days=1)
dayNum += 1 | random_line_split |
manage.py | #!/usr/bin/env python
import os
import subprocess
from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager, Shell, Server
from redis import Redis
from rq import Connection, Queue, Worker
from sqlalchemy.sql import exists
from sqlalchemy.exc import IntegrityError
from sqlalchemy import desc
from app import create_app, db, dprint
from app.models import Role, Employee, Contact, Plant, Orders, Sales, WaterLog, Species, Genus, Supplier, Product, Item
from config import Config
from datetime import datetime, date, timedelta
from random import randrange, choice
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=db, Employee=Employee, Role=Role)
manager.add_command('shell', Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
manager.add_command('runserver', Server(host="0.0.0.0"))
# Zone A: Full shade, high humidity
# Zone B: Partial shade, high humidity
# Zone C: Partial shade, ambient humidity
# Zone D: Full sun, ambient humidity
@manager.command
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
@manager.command
def recreate_db():
"""
Recreates a local database. You probably should not use this on
production.
"""
db.session.execute('SET FOREIGN_KEY_CHECKS=0;')
db.session.execute('DROP TABLE IF EXISTS logs;')
db.session.execute('DROP TABLE IF EXISTS employees;')
db.session.execute('DROP TABLE IF EXISTS sales;')
db.session.execute('DROP TABLE IF EXISTS plants;')
db.session.execute('DROP TABLE IF EXISTS products;')
db.session.execute('DROP TABLE IF EXISTS suppliers;')
db.session.execute('DROP TABLE IF EXISTS orders;')
db.session.execute('DROP TABLE IF EXISTS contacts;')
db.session.execute('DROP TABLE IF EXISTS varieties;')
db.session.execute('DROP TABLE IF EXISTS species;')
db.session.execute('DROP TABLE IF EXISTS genera;')
db.session.execute('DROP TABLE IF EXISTS families;')
db.drop_all()
db.create_all()
db.session.commit()
fakePlant = Plant(living = True)
db.session.add(fakePlant)
db.session.commit()
db.session.delete(fakePlant)
db.session.execute('SET FOREIGN_KEY_CHECKS=1;')
db.session.commit()
@manager.command
def OrderMerchandise( numItems, orderDate, dayNum ):
# Simulation
# Generate fake order and add to inventory
newOrder = Orders()
StuffWeSell = [
('Shovel', 14.30, 24.99, 'B'),
('Peat moss - 5L', 4.75, 12.99, 'D'),
('Peat moss - 10L', 6.00, 19.99, 'D'),
('Perlite - 5L', 3.50, 10.99, 'D'),
('Perlite - 10L', 5.00, 16.99, 'D'),
('Hydroton - 10L', 7.31, 14.99, 'D'),
('Vermiculite - 5L', 3.75, 9.99, 'D'),
('Vermiculite - 10L', 5.75, 13.99, 'D'),
('"Premium" dirt - 5L', 0.50, 24.99, 'D'),
('Systemic granules', 7.50, 17.99, 'A'),
('Copper Fungicide', 11.45, 19.99, 'A'),
('Spray bottle', 0.75, 2.99, 'A'),
('Nursery pot - 3in', 0.25, 1.99, 'B'),
('Nursery pot - 6in', 0.35, 2.99, 'B'),
('Nursery pot - 9in', 0.45, 3.99, 'B')
]
for item in StuffWeSell:
newItem = Product(
name = item[0],
quantity = randrange(5,10,1),
price = item[2],
location = item[3],
)
invoice = Orders(
item = newItem.sku,
qty = newItem.quantity,
price = item[1],
date = orderDate,
date_received = (orderDate + timedelta(days=2)),
supplier = 'TAGda'
)
invoice.id = newOrder.id
db.session.add(invoice)
db.session.add(newItem)
try:
db.session.commit()
except IntegrityError:
dprint('uh oh')
db.session.rollback()
else:
dprint("Day {}: Order[{}] Item[{}]".format(dayNum, invoice.id, invoice.item))
@manager.command
def OrderPlants( numPlants, orderDate, dayNum ):
# Simulation
# Generate fake plant order and add to inventory
newOrder = Orders()
plants = Plant.generate_fake( numPlants )
for i in plants:
invoice = Orders(
supplier = 'TGKmf',
date = orderDate,
date_received = orderDate + timedelta(days=2),
item = i.sku,
price = i.price/randrange(2, 5, 1),
qty = i.quantity
)
invoice.id = newOrder.id
db.session.add(invoice)
# Water plants when they arrive
updateLog(i.sku, orderDate, dayNum)
try:
db.session.commit()
except IntegrityError:
dprint('uh oh')
db.session.rollback()
else:
dprint("Day {}: Order[{}] Item[{}]".format( dayNum, invoice.id, invoice.item))
@manager.command
def updateLog(plantSKU, waterDate, dayNum):
water = WaterLog(
plant = plantSKU,
water = 1,
date = waterDate
)
f = randrange(0,2,1)
if f == 1:
water.feed = 1
water.notes = "General purpose fertilizer; half-strength"
else:
water.feed = 0
db.session.add(water)
try:
db.session.commit()
except IntegrityError:
dprint('uh oh')
db.session.rollback()
else:
dprint("Day {}: LogEntry[{}] Plant[{}]".format(dayNum, water.id, water.plant))
@manager.command
def checkStock(simDay, dayNum):
stock = Product.query.all()
reOrder = Orders()
for i in stock:
if i.quantity == 0:
newItem = Product(
name = i.name,
quantity = randrange(5,10,1),
price = i.price,
location = i.location,
)
invoice = Orders(
item = newItem.sku,
qty = newItem.quantity,
price = newItem.price / 3,
date = simDay,
date_received = (simDay + timedelta(days=2)),
supplier = 'TAGda'
)
invoice.id = reOrder.id
db.session.delete(i)
db.session.add(invoice)
db.session.add(newItem)
try:
db.session.commit()
except IntegrityError:
dprint('uh oh')
db.session.rollback()
else:
dprint("Day {}: Order[{}] Item[{}]".format(dayNum, invoice.id, invoice.item))
@manager.command
def waterPlants(simDay, dayNum):
# Water simulated plants
# Group plants by watering requirements
simPlants = db.session.query(Plant).filter(Plant.quantity > 0).all()
for i in simPlants:
lastLog = db.session.query(WaterLog)\
.filter(WaterLog.plant == i.sku)\
.order_by(desc('date')).first()
if i.location == 'A':
if (simDay - lastLog.date) == timedelta(days=5):
updateLog(i.sku, simDay, dayNum)
elif i.location == 'B':
if (simDay - lastLog.date) == timedelta(days=7):
updateLog(i.sku, simDay, dayNum)
else:
if (simDay - lastLog.date) >= timedelta(days=10):
updateLog(i.sku, simDay, dayNum)
@manager.command
def add_fake_data():
# Generate fake customers
Contact.generate_fake(count=50)
# All simulated customers are returning customers
simContacts = db.session.query(Contact.id).all()
# Simulate 60 days of slow, daily business
simDay = date.today() - timedelta(days=60)
dayNum = 0
# Initial inventory
OrderPlants(5, (simDay - timedelta(days=2)), dayNum)
OrderMerchandise(5, (simDay - timedelta(days=2)), dayNum)
# Begin simulation
while (date.today() - simDay) > timedelta(days=0):
waterPlants(simDay, dayNum)
checkStock(simDay, dayNum)
if (dayNum % 7) == 0:
OrderPlants(5, (simDay - timedelta(days=2)), dayNum)
# Between 2 and 5 customers per day, chosen at random
# (It's ok, everything's overpriced)
# Customers come in an buy a handful of random items
# Sales are generated and added to DB
# Inventory updates
for i in range(1, randrange(2,5,1)):
transactionID = Sales().id
numItems = randrange(1,6,1)
shopper = choice(simContacts)
merch = []
#plantsAvailable = Plant.query.all()
itemsAvailable = Item.query.all()
for item in itemsAvailable:
merch.append(item)
alreadyPicked = []
for j in range(1, numItems):
merchChoice = choice(merch)
if merchChoice.sku in alreadyPicked:
pass
elif merchChoice.quantity > 0:
multiplier = randrange(1,5,1)
cart = Sales(
id = transactionID,
date = simDay,
customer = shopper,
item = merchChoice.sku,
salePrice = merchChoice.price
)
# Check if we have enough of that item
if multiplier <= merchChoice.quantity:
cart.qty = multiplier
else:
cart.qty = 1
# Create entry in Sales
db.session.add(cart)
# Update quantities in Inventory DB
merchChoice.updateQty( -cart.qty )
# Don't pick the same item twice
alreadyPicked.append(cart.item)
dprint("Day {}: Customer[{}] purchased Item[{}]".format(dayNum, shopper, cart.item))
try:
db.session.commit()
except IntegrityError:
dprint('uh oh')
db.session.rollback()
# Advance simulation one day
simDay += timedelta(days=1)
dayNum += 1
@manager.command
def setup_dev():
"""Runs the set-up needed for local development."""
setup_general()
@manager.command
def setup_prod():
"""Runs the set-up needed for production."""
setup_general()
def setup_general():
"""Runs the set-up needed for both local development and production.
Also sets up first admin user."""
Role.insert_roles()
admin_query = Role.query.filter_by(name='Administrator')
if admin_query.first() is not None:
if Employee.query.filter_by(email=Config.ADMIN_EMAIL).first() is None:
|
@manager.command
def run_worker():
"""Initializes a slim rq task queue."""
listen = ['default']
conn = Redis(host=app.config['RQ_DEFAULT_HOST'],
port=app.config['RQ_DEFAULT_PORT'],
db=0,
password=app.config['RQ_DEFAULT_PASSWORD'])
with Connection(conn):
worker = Worker(map(Queue, listen))
worker.work()
@manager.command
def format():
"""Runs the yapf and isort formatters over the project."""
isort = 'isort -rc *.py app/'
yapf = 'yapf -r -i *.py app/'
print('Running {}'.format(isort))
subprocess.call(isort, shell=True)
print('Running {}'.format(yapf))
subprocess.call(yapf, shell=True)
if __name__ == '__main__':
manager.run()
| user = Employee(first_name='Admin',
last_name='Account',
password=Config.ADMIN_PASSWORD,
email=Config.ADMIN_EMAIL)
db.session.add(user)
db.session.commit()
print('Added administrator {}'.format(user.full_name())) | conditional_block |
manage.py | #!/usr/bin/env python
import os
import subprocess
from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager, Shell, Server
from redis import Redis
from rq import Connection, Queue, Worker
from sqlalchemy.sql import exists
from sqlalchemy.exc import IntegrityError
from sqlalchemy import desc
from app import create_app, db, dprint
from app.models import Role, Employee, Contact, Plant, Orders, Sales, WaterLog, Species, Genus, Supplier, Product, Item
from config import Config
from datetime import datetime, date, timedelta
from random import randrange, choice
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=db, Employee=Employee, Role=Role)
manager.add_command('shell', Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
manager.add_command('runserver', Server(host="0.0.0.0"))
# Zone A: Full shade, high humidity
# Zone B: Partial shade, high humidity
# Zone C: Partial shade, ambient humidity
# Zone D: Full sun, ambient humidity
@manager.command
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
@manager.command
def recreate_db():
"""
Recreates a local database. You probably should not use this on
production.
"""
db.session.execute('SET FOREIGN_KEY_CHECKS=0;')
db.session.execute('DROP TABLE IF EXISTS logs;')
db.session.execute('DROP TABLE IF EXISTS employees;')
db.session.execute('DROP TABLE IF EXISTS sales;')
db.session.execute('DROP TABLE IF EXISTS plants;')
db.session.execute('DROP TABLE IF EXISTS products;')
db.session.execute('DROP TABLE IF EXISTS suppliers;')
db.session.execute('DROP TABLE IF EXISTS orders;')
db.session.execute('DROP TABLE IF EXISTS contacts;')
db.session.execute('DROP TABLE IF EXISTS varieties;')
db.session.execute('DROP TABLE IF EXISTS species;')
db.session.execute('DROP TABLE IF EXISTS genera;')
db.session.execute('DROP TABLE IF EXISTS families;')
db.drop_all()
db.create_all()
db.session.commit()
fakePlant = Plant(living = True)
db.session.add(fakePlant)
db.session.commit()
db.session.delete(fakePlant)
db.session.execute('SET FOREIGN_KEY_CHECKS=1;')
db.session.commit()
@manager.command
def | ( numItems, orderDate, dayNum ):
# Simulation
# Generate fake order and add to inventory
newOrder = Orders()
StuffWeSell = [
('Shovel', 14.30, 24.99, 'B'),
('Peat moss - 5L', 4.75, 12.99, 'D'),
('Peat moss - 10L', 6.00, 19.99, 'D'),
('Perlite - 5L', 3.50, 10.99, 'D'),
('Perlite - 10L', 5.00, 16.99, 'D'),
('Hydroton - 10L', 7.31, 14.99, 'D'),
('Vermiculite - 5L', 3.75, 9.99, 'D'),
('Vermiculite - 10L', 5.75, 13.99, 'D'),
('"Premium" dirt - 5L', 0.50, 24.99, 'D'),
('Systemic granules', 7.50, 17.99, 'A'),
('Copper Fungicide', 11.45, 19.99, 'A'),
('Spray bottle', 0.75, 2.99, 'A'),
('Nursery pot - 3in', 0.25, 1.99, 'B'),
('Nursery pot - 6in', 0.35, 2.99, 'B'),
('Nursery pot - 9in', 0.45, 3.99, 'B')
]
for item in StuffWeSell:
newItem = Product(
name = item[0],
quantity = randrange(5,10,1),
price = item[2],
location = item[3],
)
invoice = Orders(
item = newItem.sku,
qty = newItem.quantity,
price = item[1],
date = orderDate,
date_received = (orderDate + timedelta(days=2)),
supplier = 'TAGda'
)
invoice.id = newOrder.id
db.session.add(invoice)
db.session.add(newItem)
try:
db.session.commit()
except IntegrityError:
dprint('uh oh')
db.session.rollback()
else:
dprint("Day {}: Order[{}] Item[{}]".format(dayNum, invoice.id, invoice.item))
@manager.command
def OrderPlants( numPlants, orderDate, dayNum ):
# Simulation
# Generate fake plant order and add to inventory
newOrder = Orders()
plants = Plant.generate_fake( numPlants )
for i in plants:
invoice = Orders(
supplier = 'TGKmf',
date = orderDate,
date_received = orderDate + timedelta(days=2),
item = i.sku,
price = i.price/randrange(2, 5, 1),
qty = i.quantity
)
invoice.id = newOrder.id
db.session.add(invoice)
# Water plants when they arrive
updateLog(i.sku, orderDate, dayNum)
try:
db.session.commit()
except IntegrityError:
dprint('uh oh')
db.session.rollback()
else:
dprint("Day {}: Order[{}] Item[{}]".format( dayNum, invoice.id, invoice.item))
@manager.command
def updateLog(plantSKU, waterDate, dayNum):
water = WaterLog(
plant = plantSKU,
water = 1,
date = waterDate
)
f = randrange(0,2,1)
if f == 1:
water.feed = 1
water.notes = "General purpose fertilizer; half-strength"
else:
water.feed = 0
db.session.add(water)
try:
db.session.commit()
except IntegrityError:
dprint('uh oh')
db.session.rollback()
else:
dprint("Day {}: LogEntry[{}] Plant[{}]".format(dayNum, water.id, water.plant))
@manager.command
def checkStock(simDay, dayNum):
stock = Product.query.all()
reOrder = Orders()
for i in stock:
if i.quantity == 0:
newItem = Product(
name = i.name,
quantity = randrange(5,10,1),
price = i.price,
location = i.location,
)
invoice = Orders(
item = newItem.sku,
qty = newItem.quantity,
price = newItem.price / 3,
date = simDay,
date_received = (simDay + timedelta(days=2)),
supplier = 'TAGda'
)
invoice.id = reOrder.id
db.session.delete(i)
db.session.add(invoice)
db.session.add(newItem)
try:
db.session.commit()
except IntegrityError:
dprint('uh oh')
db.session.rollback()
else:
dprint("Day {}: Order[{}] Item[{}]".format(dayNum, invoice.id, invoice.item))
@manager.command
def waterPlants(simDay, dayNum):
# Water simulated plants
# Group plants by watering requirements
simPlants = db.session.query(Plant).filter(Plant.quantity > 0).all()
for i in simPlants:
lastLog = db.session.query(WaterLog)\
.filter(WaterLog.plant == i.sku)\
.order_by(desc('date')).first()
if i.location == 'A':
if (simDay - lastLog.date) == timedelta(days=5):
updateLog(i.sku, simDay, dayNum)
elif i.location == 'B':
if (simDay - lastLog.date) == timedelta(days=7):
updateLog(i.sku, simDay, dayNum)
else:
if (simDay - lastLog.date) >= timedelta(days=10):
updateLog(i.sku, simDay, dayNum)
@manager.command
def add_fake_data():
# Generate fake customers
Contact.generate_fake(count=50)
# All simulated customers are returning customers
simContacts = db.session.query(Contact.id).all()
# Simulate 60 days of slow, daily business
simDay = date.today() - timedelta(days=60)
dayNum = 0
# Initial inventory
OrderPlants(5, (simDay - timedelta(days=2)), dayNum)
OrderMerchandise(5, (simDay - timedelta(days=2)), dayNum)
# Begin simulation
while (date.today() - simDay) > timedelta(days=0):
waterPlants(simDay, dayNum)
checkStock(simDay, dayNum)
if (dayNum % 7) == 0:
OrderPlants(5, (simDay - timedelta(days=2)), dayNum)
# Between 2 and 5 customers per day, chosen at random
# (It's ok, everything's overpriced)
# Customers come in an buy a handful of random items
# Sales are generated and added to DB
# Inventory updates
for i in range(1, randrange(2,5,1)):
transactionID = Sales().id
numItems = randrange(1,6,1)
shopper = choice(simContacts)
merch = []
#plantsAvailable = Plant.query.all()
itemsAvailable = Item.query.all()
for item in itemsAvailable:
merch.append(item)
alreadyPicked = []
for j in range(1, numItems):
merchChoice = choice(merch)
if merchChoice.sku in alreadyPicked:
pass
elif merchChoice.quantity > 0:
multiplier = randrange(1,5,1)
cart = Sales(
id = transactionID,
date = simDay,
customer = shopper,
item = merchChoice.sku,
salePrice = merchChoice.price
)
# Check if we have enough of that item
if multiplier <= merchChoice.quantity:
cart.qty = multiplier
else:
cart.qty = 1
# Create entry in Sales
db.session.add(cart)
# Update quantities in Inventory DB
merchChoice.updateQty( -cart.qty )
# Don't pick the same item twice
alreadyPicked.append(cart.item)
dprint("Day {}: Customer[{}] purchased Item[{}]".format(dayNum, shopper, cart.item))
try:
db.session.commit()
except IntegrityError:
dprint('uh oh')
db.session.rollback()
# Advance simulation one day
simDay += timedelta(days=1)
dayNum += 1
@manager.command
def setup_dev():
"""Runs the set-up needed for local development."""
setup_general()
@manager.command
def setup_prod():
"""Runs the set-up needed for production."""
setup_general()
def setup_general():
"""Runs the set-up needed for both local development and production.
Also sets up first admin user."""
Role.insert_roles()
admin_query = Role.query.filter_by(name='Administrator')
if admin_query.first() is not None:
if Employee.query.filter_by(email=Config.ADMIN_EMAIL).first() is None:
user = Employee(first_name='Admin',
last_name='Account',
password=Config.ADMIN_PASSWORD,
email=Config.ADMIN_EMAIL)
db.session.add(user)
db.session.commit()
print('Added administrator {}'.format(user.full_name()))
@manager.command
def run_worker():
"""Initializes a slim rq task queue."""
listen = ['default']
conn = Redis(host=app.config['RQ_DEFAULT_HOST'],
port=app.config['RQ_DEFAULT_PORT'],
db=0,
password=app.config['RQ_DEFAULT_PASSWORD'])
with Connection(conn):
worker = Worker(map(Queue, listen))
worker.work()
@manager.command
def format():
"""Runs the yapf and isort formatters over the project."""
isort = 'isort -rc *.py app/'
yapf = 'yapf -r -i *.py app/'
print('Running {}'.format(isort))
subprocess.call(isort, shell=True)
print('Running {}'.format(yapf))
subprocess.call(yapf, shell=True)
if __name__ == '__main__':
manager.run()
| OrderMerchandise | identifier_name |
lambda.rs | use dotenv;
use fastspring_keygen_integration::fastspring;
use fastspring_keygen_integration::keygen;
use fastspring_keygen_integration::keygen::{generate_licenses, suspend_license};
use fastspring_keygen_integration::util;
use fastspring_keygen_integration::patreon;
use http::header::CONTENT_TYPE;
use lambda_http::{lambda, Body, Request, RequestExt, Response};
use lambda_runtime::error::HandlerError;
use lambda_runtime::Context;
use log::{debug, info, warn};
use std::collections::HashMap;
use std::error::Error;
use std::env;
use lazy_static::lazy_static;
use lettre::transport::smtp::authentication::Credentials;
use lettre::{Message, SmtpTransport, Transport};
lazy_static! {
static ref MNPRX_COMMUNITY_KEYGEN_POLICY_ID: String = env::var("MNPRX_COMMUNITY_KEYGEN_POLICY_ID").unwrap();
static ref SMTP_SERVER: String = env::var("SMTP_SERVER").unwrap();
static ref SMTP_USERNAME: String = env::var("SMTP_USERNAME").unwrap();
static ref SMTP_PASSWORD: String = env::var("SMTP_PASSWORD").unwrap();
}
fn router(req: Request, c: Context) -> Result<Response<Body>, HandlerError> {
debug!("router request={:?}", req);
debug!("path={:?}", req.uri().path());
debug!("query={:?}", req.query_string_parameters());
let client = reqwest::Client::new();
match req.uri().path() {
"/fastspring-keygen-integration-service/keygen/create" => match *req.method() {
http::Method::POST => handle_keygen_create(req, c),
_ => not_allowed(req, c),
},
"/fastspring-keygen-integration-service/webhooks" => match *req.method() {
http::Method::POST => handle_webhook(&client, req, c),
_ => not_allowed(req, c),
},
"/fastspring-keygen-integration-service/patreon" => match *req.method() {
http::Method::POST => handle_patreon_webhook(&client, req, c),
_ => not_allowed(req, c),
},
_ => not_found(req, c),
}
}
fn license_key(code: &str) -> Option<&str> {
code.split('.').nth(1)
}
fn handle_patreon_webhook(
client: &reqwest::Client,
req: Request,
_c: Context,
) -> Result<Response<Body>, HandlerError>
{
if !patreon::authentify_web_hook(&req) {
return Ok(Response::builder()
.status(http::StatusCode::UNAUTHORIZED)
.body(Body::default())
.unwrap());
}
let trigger = req.headers().get("X-Patreon-Event")
.ok_or("invalid format (X-Patreon-Event)")?
.to_str().ok().ok_or("invalid format (X-Patreon-Event)")?;
debug!("X-Patreon-Event: {}", trigger);
let body = util::body_to_json(req.body())?;
if trigger == "pledges:create" {
patreon_handle_pledge_create(client, &body)?; | Ok(Response::builder()
.status(http::StatusCode::OK)
.body(Body::default())
.unwrap())
}
/// Patreon pledge create trigger
fn patreon_handle_pledge_create(
client: &reqwest::Client,
body: &serde_json::Value,
) -> Result<Response<Body>, HandlerError>
{
debug!("handle_pledge_create {:?}", body);
let user_id = body["data"]["relationships"]["patron"]["data"]["id"].as_str().ok_or("invalid format (.data.relationships.patron.data.id)")?;
let mut user_email = None;
let mut user_first_name = None;
for included in body["included"].as_array().ok_or("invalid format (.included)")?.iter() {
if included["id"].as_str().ok_or("invalid format (.included.#.id)")? == user_id {
user_email = Some(included["attributes"]["email"].as_str().ok_or("invalid format (.included.#.attributes.email)")?);
user_first_name = included["attributes"]["first_name"].as_str();
}
}
let user_email = user_email.ok_or("could not find patron email")?;
debug!("patron email: {}", user_email);
let license=
keygen::generate_license(
client,
"PATREON",
MNPRX_COMMUNITY_KEYGEN_POLICY_ID.as_ref(),
None,
Some(user_id),
false)?;
let user_name = body["data"]["relationships"]["patron"]["data"]["id"].as_str().unwrap_or("");
let email_body = format!(r##"Hi,
Thank you for becoming our Patreon!
You can activate your Flair Community license with the following key:
{}
For more information on how to install and activate your license, please refer to the documentation: https://docs.artineering.io/flair/setup/
If you encounter any issues, please feel free to reach out to us through Discord, we are here to help.
Have fun using Flair and make sure to share your results with the community.
Cheers,
Your team at Artineering."##, license);
// send the license to the patron
let email = Message::builder()
.from("Artineering <[email protected]>".parse().unwrap())
.reply_to("Artineering <[email protected]>".parse().unwrap())
.to(user_email.parse().unwrap())
.bcc("[email protected]".parse().unwrap())
.subject("[Flair] Your Community license key")
.body(email_body)
.unwrap();
let creds = Credentials::new(SMTP_USERNAME.clone(), SMTP_PASSWORD.clone());
let mailer = SmtpTransport::relay(SMTP_SERVER.as_ref())
.unwrap()
.credentials(creds)
.build();
match mailer.send(&email) {
Ok(_) => info!("Email sent successfully"),
Err(e) => panic!("Could not send email: {:?}", e),
}
Ok(Response::builder()
.status(http::StatusCode::OK)
.body(().into())
.unwrap())
}
/// Patreon pledge delete trigger
fn patreon_handle_pledge_delete(
client: &reqwest::Client,
data: &serde_json::Value,
) -> Result<Response<Body>, HandlerError>
{
debug!("handle_pledge_delete {:?}", data);
Ok(Response::builder()
.status(http::StatusCode::OK)
.body(().into())
.unwrap())
}
fn handle_webhook(
client: &reqwest::Client,
req: Request,
_c: Context,
) -> Result<Response<Body>, HandlerError> {
if !fastspring::authentify_web_hook(&req) {
return Ok(Response::builder()
.status(http::StatusCode::UNAUTHORIZED)
.body(Body::default())
.unwrap());
}
let events_json = util::body_to_json(req.body())?;
let events_json = events_json["events"].as_array().ok_or("invalid format")?;
// TODO do not reply OK every time: check each event
for e in events_json {
let ty = e["type"].as_str().ok_or("invalid format")?;
let data = &e["data"];
match ty {
"subscription.deactivated" => {
handle_subscription_deactivated(client, data)?;
}
_ => {
warn!("unhandled webhook: {}", ty);
}
};
}
Ok(Response::builder()
.status(http::StatusCode::OK)
.body(Body::default())
.unwrap())
}
/// Handles deactivation of subscriptions.
///
/// This will suspend all licenses associated with the order.
fn handle_subscription_deactivated(
client: &reqwest::Client,
data: &serde_json::Value,
) -> Result<Response<Body>, HandlerError> {
debug!("handle_subscription_deactivated {:?}", data);
let subscription_id = data["id"].as_str().ok_or("invalid format (.id)")?;
info!("subscription deactivated: {}", subscription_id);
let orders = fastspring::get_subscription_entries(client, subscription_id)?;
// find the original order
// according to the API, this is the entry whose ".reference" field does not include
// a "B" (for "billing") at the end. All the others are subscription billing orders.
let original_order = orders.as_array().ok_or("invalid format (orders)")?.iter().find(|&order| {
let order = &order["order"];
if order["reference"].is_null() { return false; }
if let Some(s) = order["reference"].as_str() {
!s.ends_with('B')
} else {
false
}
});
let original_order = original_order.ok_or("could not find original order")?;
let order_items = original_order["order"]["items"]
.as_array()
.ok_or("invalid format (.order.items)")?;
// Collect all licenses to revoke
let mut licenses_to_revoke = Vec::new();
for item in order_items.iter() {
//let product = &item["product"];
for (_k, v) in item["fulfillments"]
.as_object()
.ok_or("invalid format (.fulfillments)")?
.iter()
{
if let Some(licenses) = v.as_array() {
for l in licenses {
let code = if let Some(s) = l["license"].as_str() {
s
} else {
continue;
};
licenses_to_revoke.push(String::from(code));
}
}
}
}
// revoke all licenses
for lic in licenses_to_revoke.iter() {
let key = license_key(lic).ok_or("invalid license key")?;
keygen::revoke_license(key)?;
}
Ok(Response::builder()
.status(http::StatusCode::OK)
.body(().into())
.unwrap())
}
/// Handles license creation requests (coming from FastSpring).
fn handle_keygen_create(req: Request, _c: Context) -> Result<Response<Body>, HandlerError> {
if !fastspring::verify_license_gen(&req) {
return Ok(Response::builder()
.status(http::StatusCode::UNAUTHORIZED)
.body(Body::default())
.unwrap());
}
let params: HashMap<_, _> = url::form_urlencoded::parse(match req.body() {
Body::Text(ref s) => s.as_bytes(),
_ => return Err("invalid request body".into()),
})
.collect();
//debug!("params = {:?}", params);
let subscription = params
.get("subscription")
.ok_or("invalid query parameters (no subscription)")?;
let policy_id = params
.get("policy")
.ok_or("invalid query parameters (no policy)")?;
let quantity: u32 = params
.get("quantity")
.ok_or("invalid query parameters (no quantity)")?
.parse()?;
let (codes,errors) = generate_licenses(subscription, policy_id, quantity, None, false);
if !errors.is_empty() {
Err(format!("errors encountered while generating licenses ({} successfully generated)", codes.len()).as_str())?
}
let codes = codes.join("\n");
Ok(Response::builder()
.status(http::StatusCode::OK)
.header(CONTENT_TYPE, "text/plain")
.body(codes.into())
.unwrap())
}
fn not_found(_req: Request, _c: Context) -> Result<Response<Body>, HandlerError> {
Ok(Response::builder()
.status(http::StatusCode::NOT_FOUND)
.body(Body::default())
.unwrap())
}
fn not_allowed(_req: Request, _c: Context) -> Result<Response<Body>, HandlerError> {
Ok(Response::builder()
.status(http::StatusCode::METHOD_NOT_ALLOWED)
.body(Body::default())
.unwrap())
}
fn main() -> Result<(), Box<dyn Error>> {
env_logger::init();
dotenv::dotenv().ok();
lambda!(router);
Ok(())
} | } else if trigger == "pledges:delete" {
patreon_handle_pledge_delete(client, &body)?;
}
| random_line_split |
lambda.rs | use dotenv;
use fastspring_keygen_integration::fastspring;
use fastspring_keygen_integration::keygen;
use fastspring_keygen_integration::keygen::{generate_licenses, suspend_license};
use fastspring_keygen_integration::util;
use fastspring_keygen_integration::patreon;
use http::header::CONTENT_TYPE;
use lambda_http::{lambda, Body, Request, RequestExt, Response};
use lambda_runtime::error::HandlerError;
use lambda_runtime::Context;
use log::{debug, info, warn};
use std::collections::HashMap;
use std::error::Error;
use std::env;
use lazy_static::lazy_static;
use lettre::transport::smtp::authentication::Credentials;
use lettre::{Message, SmtpTransport, Transport};
lazy_static! {
static ref MNPRX_COMMUNITY_KEYGEN_POLICY_ID: String = env::var("MNPRX_COMMUNITY_KEYGEN_POLICY_ID").unwrap();
static ref SMTP_SERVER: String = env::var("SMTP_SERVER").unwrap();
static ref SMTP_USERNAME: String = env::var("SMTP_USERNAME").unwrap();
static ref SMTP_PASSWORD: String = env::var("SMTP_PASSWORD").unwrap();
}
fn router(req: Request, c: Context) -> Result<Response<Body>, HandlerError> {
debug!("router request={:?}", req);
debug!("path={:?}", req.uri().path());
debug!("query={:?}", req.query_string_parameters());
let client = reqwest::Client::new();
match req.uri().path() {
"/fastspring-keygen-integration-service/keygen/create" => match *req.method() {
http::Method::POST => handle_keygen_create(req, c),
_ => not_allowed(req, c),
},
"/fastspring-keygen-integration-service/webhooks" => match *req.method() {
http::Method::POST => handle_webhook(&client, req, c),
_ => not_allowed(req, c),
},
"/fastspring-keygen-integration-service/patreon" => match *req.method() {
http::Method::POST => handle_patreon_webhook(&client, req, c),
_ => not_allowed(req, c),
},
_ => not_found(req, c),
}
}
fn license_key(code: &str) -> Option<&str> |
fn handle_patreon_webhook(
client: &reqwest::Client,
req: Request,
_c: Context,
) -> Result<Response<Body>, HandlerError>
{
if !patreon::authentify_web_hook(&req) {
return Ok(Response::builder()
.status(http::StatusCode::UNAUTHORIZED)
.body(Body::default())
.unwrap());
}
let trigger = req.headers().get("X-Patreon-Event")
.ok_or("invalid format (X-Patreon-Event)")?
.to_str().ok().ok_or("invalid format (X-Patreon-Event)")?;
debug!("X-Patreon-Event: {}", trigger);
let body = util::body_to_json(req.body())?;
if trigger == "pledges:create" {
patreon_handle_pledge_create(client, &body)?;
} else if trigger == "pledges:delete" {
patreon_handle_pledge_delete(client, &body)?;
}
Ok(Response::builder()
.status(http::StatusCode::OK)
.body(Body::default())
.unwrap())
}
/// Patreon pledge create trigger
fn patreon_handle_pledge_create(
client: &reqwest::Client,
body: &serde_json::Value,
) -> Result<Response<Body>, HandlerError>
{
debug!("handle_pledge_create {:?}", body);
let user_id = body["data"]["relationships"]["patron"]["data"]["id"].as_str().ok_or("invalid format (.data.relationships.patron.data.id)")?;
let mut user_email = None;
let mut user_first_name = None;
for included in body["included"].as_array().ok_or("invalid format (.included)")?.iter() {
if included["id"].as_str().ok_or("invalid format (.included.#.id)")? == user_id {
user_email = Some(included["attributes"]["email"].as_str().ok_or("invalid format (.included.#.attributes.email)")?);
user_first_name = included["attributes"]["first_name"].as_str();
}
}
let user_email = user_email.ok_or("could not find patron email")?;
debug!("patron email: {}", user_email);
let license=
keygen::generate_license(
client,
"PATREON",
MNPRX_COMMUNITY_KEYGEN_POLICY_ID.as_ref(),
None,
Some(user_id),
false)?;
let user_name = body["data"]["relationships"]["patron"]["data"]["id"].as_str().unwrap_or("");
let email_body = format!(r##"Hi,
Thank you for becoming our Patreon!
You can activate your Flair Community license with the following key:
{}
For more information on how to install and activate your license, please refer to the documentation: https://docs.artineering.io/flair/setup/
If you encounter any issues, please feel free to reach out to us through Discord, we are here to help.
Have fun using Flair and make sure to share your results with the community.
Cheers,
Your team at Artineering."##, license);
// send the license to the patron
let email = Message::builder()
.from("Artineering <[email protected]>".parse().unwrap())
.reply_to("Artineering <[email protected]>".parse().unwrap())
.to(user_email.parse().unwrap())
.bcc("[email protected]".parse().unwrap())
.subject("[Flair] Your Community license key")
.body(email_body)
.unwrap();
let creds = Credentials::new(SMTP_USERNAME.clone(), SMTP_PASSWORD.clone());
let mailer = SmtpTransport::relay(SMTP_SERVER.as_ref())
.unwrap()
.credentials(creds)
.build();
match mailer.send(&email) {
Ok(_) => info!("Email sent successfully"),
Err(e) => panic!("Could not send email: {:?}", e),
}
Ok(Response::builder()
.status(http::StatusCode::OK)
.body(().into())
.unwrap())
}
/// Patreon pledge delete trigger
fn patreon_handle_pledge_delete(
client: &reqwest::Client,
data: &serde_json::Value,
) -> Result<Response<Body>, HandlerError>
{
debug!("handle_pledge_delete {:?}", data);
Ok(Response::builder()
.status(http::StatusCode::OK)
.body(().into())
.unwrap())
}
fn handle_webhook(
client: &reqwest::Client,
req: Request,
_c: Context,
) -> Result<Response<Body>, HandlerError> {
if !fastspring::authentify_web_hook(&req) {
return Ok(Response::builder()
.status(http::StatusCode::UNAUTHORIZED)
.body(Body::default())
.unwrap());
}
let events_json = util::body_to_json(req.body())?;
let events_json = events_json["events"].as_array().ok_or("invalid format")?;
// TODO do not reply OK every time: check each event
for e in events_json {
let ty = e["type"].as_str().ok_or("invalid format")?;
let data = &e["data"];
match ty {
"subscription.deactivated" => {
handle_subscription_deactivated(client, data)?;
}
_ => {
warn!("unhandled webhook: {}", ty);
}
};
}
Ok(Response::builder()
.status(http::StatusCode::OK)
.body(Body::default())
.unwrap())
}
/// Handles deactivation of subscriptions.
///
/// This will suspend all licenses associated with the order.
fn handle_subscription_deactivated(
client: &reqwest::Client,
data: &serde_json::Value,
) -> Result<Response<Body>, HandlerError> {
debug!("handle_subscription_deactivated {:?}", data);
let subscription_id = data["id"].as_str().ok_or("invalid format (.id)")?;
info!("subscription deactivated: {}", subscription_id);
let orders = fastspring::get_subscription_entries(client, subscription_id)?;
// find the original order
// according to the API, this is the entry whose ".reference" field does not include
// a "B" (for "billing") at the end. All the others are subscription billing orders.
let original_order = orders.as_array().ok_or("invalid format (orders)")?.iter().find(|&order| {
let order = &order["order"];
if order["reference"].is_null() { return false; }
if let Some(s) = order["reference"].as_str() {
!s.ends_with('B')
} else {
false
}
});
let original_order = original_order.ok_or("could not find original order")?;
let order_items = original_order["order"]["items"]
.as_array()
.ok_or("invalid format (.order.items)")?;
// Collect all licenses to revoke
let mut licenses_to_revoke = Vec::new();
for item in order_items.iter() {
//let product = &item["product"];
for (_k, v) in item["fulfillments"]
.as_object()
.ok_or("invalid format (.fulfillments)")?
.iter()
{
if let Some(licenses) = v.as_array() {
for l in licenses {
let code = if let Some(s) = l["license"].as_str() {
s
} else {
continue;
};
licenses_to_revoke.push(String::from(code));
}
}
}
}
// revoke all licenses
for lic in licenses_to_revoke.iter() {
let key = license_key(lic).ok_or("invalid license key")?;
keygen::revoke_license(key)?;
}
Ok(Response::builder()
.status(http::StatusCode::OK)
.body(().into())
.unwrap())
}
/// Handles license creation requests (coming from FastSpring).
fn handle_keygen_create(req: Request, _c: Context) -> Result<Response<Body>, HandlerError> {
if !fastspring::verify_license_gen(&req) {
return Ok(Response::builder()
.status(http::StatusCode::UNAUTHORIZED)
.body(Body::default())
.unwrap());
}
let params: HashMap<_, _> = url::form_urlencoded::parse(match req.body() {
Body::Text(ref s) => s.as_bytes(),
_ => return Err("invalid request body".into()),
})
.collect();
//debug!("params = {:?}", params);
let subscription = params
.get("subscription")
.ok_or("invalid query parameters (no subscription)")?;
let policy_id = params
.get("policy")
.ok_or("invalid query parameters (no policy)")?;
let quantity: u32 = params
.get("quantity")
.ok_or("invalid query parameters (no quantity)")?
.parse()?;
let (codes,errors) = generate_licenses(subscription, policy_id, quantity, None, false);
if !errors.is_empty() {
Err(format!("errors encountered while generating licenses ({} successfully generated)", codes.len()).as_str())?
}
let codes = codes.join("\n");
Ok(Response::builder()
.status(http::StatusCode::OK)
.header(CONTENT_TYPE, "text/plain")
.body(codes.into())
.unwrap())
}
fn not_found(_req: Request, _c: Context) -> Result<Response<Body>, HandlerError> {
Ok(Response::builder()
.status(http::StatusCode::NOT_FOUND)
.body(Body::default())
.unwrap())
}
fn not_allowed(_req: Request, _c: Context) -> Result<Response<Body>, HandlerError> {
Ok(Response::builder()
.status(http::StatusCode::METHOD_NOT_ALLOWED)
.body(Body::default())
.unwrap())
}
fn main() -> Result<(), Box<dyn Error>> {
env_logger::init();
dotenv::dotenv().ok();
lambda!(router);
Ok(())
}
| {
code.split('.').nth(1)
} | identifier_body |
lambda.rs | use dotenv;
use fastspring_keygen_integration::fastspring;
use fastspring_keygen_integration::keygen;
use fastspring_keygen_integration::keygen::{generate_licenses, suspend_license};
use fastspring_keygen_integration::util;
use fastspring_keygen_integration::patreon;
use http::header::CONTENT_TYPE;
use lambda_http::{lambda, Body, Request, RequestExt, Response};
use lambda_runtime::error::HandlerError;
use lambda_runtime::Context;
use log::{debug, info, warn};
use std::collections::HashMap;
use std::error::Error;
use std::env;
use lazy_static::lazy_static;
use lettre::transport::smtp::authentication::Credentials;
use lettre::{Message, SmtpTransport, Transport};
lazy_static! {
static ref MNPRX_COMMUNITY_KEYGEN_POLICY_ID: String = env::var("MNPRX_COMMUNITY_KEYGEN_POLICY_ID").unwrap();
static ref SMTP_SERVER: String = env::var("SMTP_SERVER").unwrap();
static ref SMTP_USERNAME: String = env::var("SMTP_USERNAME").unwrap();
static ref SMTP_PASSWORD: String = env::var("SMTP_PASSWORD").unwrap();
}
fn | (req: Request, c: Context) -> Result<Response<Body>, HandlerError> {
debug!("router request={:?}", req);
debug!("path={:?}", req.uri().path());
debug!("query={:?}", req.query_string_parameters());
let client = reqwest::Client::new();
match req.uri().path() {
"/fastspring-keygen-integration-service/keygen/create" => match *req.method() {
http::Method::POST => handle_keygen_create(req, c),
_ => not_allowed(req, c),
},
"/fastspring-keygen-integration-service/webhooks" => match *req.method() {
http::Method::POST => handle_webhook(&client, req, c),
_ => not_allowed(req, c),
},
"/fastspring-keygen-integration-service/patreon" => match *req.method() {
http::Method::POST => handle_patreon_webhook(&client, req, c),
_ => not_allowed(req, c),
},
_ => not_found(req, c),
}
}
fn license_key(code: &str) -> Option<&str> {
code.split('.').nth(1)
}
fn handle_patreon_webhook(
client: &reqwest::Client,
req: Request,
_c: Context,
) -> Result<Response<Body>, HandlerError>
{
if !patreon::authentify_web_hook(&req) {
return Ok(Response::builder()
.status(http::StatusCode::UNAUTHORIZED)
.body(Body::default())
.unwrap());
}
let trigger = req.headers().get("X-Patreon-Event")
.ok_or("invalid format (X-Patreon-Event)")?
.to_str().ok().ok_or("invalid format (X-Patreon-Event)")?;
debug!("X-Patreon-Event: {}", trigger);
let body = util::body_to_json(req.body())?;
if trigger == "pledges:create" {
patreon_handle_pledge_create(client, &body)?;
} else if trigger == "pledges:delete" {
patreon_handle_pledge_delete(client, &body)?;
}
Ok(Response::builder()
.status(http::StatusCode::OK)
.body(Body::default())
.unwrap())
}
/// Patreon pledge create trigger
fn patreon_handle_pledge_create(
client: &reqwest::Client,
body: &serde_json::Value,
) -> Result<Response<Body>, HandlerError>
{
debug!("handle_pledge_create {:?}", body);
let user_id = body["data"]["relationships"]["patron"]["data"]["id"].as_str().ok_or("invalid format (.data.relationships.patron.data.id)")?;
let mut user_email = None;
let mut user_first_name = None;
for included in body["included"].as_array().ok_or("invalid format (.included)")?.iter() {
if included["id"].as_str().ok_or("invalid format (.included.#.id)")? == user_id {
user_email = Some(included["attributes"]["email"].as_str().ok_or("invalid format (.included.#.attributes.email)")?);
user_first_name = included["attributes"]["first_name"].as_str();
}
}
let user_email = user_email.ok_or("could not find patron email")?;
debug!("patron email: {}", user_email);
let license=
keygen::generate_license(
client,
"PATREON",
MNPRX_COMMUNITY_KEYGEN_POLICY_ID.as_ref(),
None,
Some(user_id),
false)?;
let user_name = body["data"]["relationships"]["patron"]["data"]["id"].as_str().unwrap_or("");
let email_body = format!(r##"Hi,
Thank you for becoming our Patreon!
You can activate your Flair Community license with the following key:
{}
For more information on how to install and activate your license, please refer to the documentation: https://docs.artineering.io/flair/setup/
If you encounter any issues, please feel free to reach out to us through Discord, we are here to help.
Have fun using Flair and make sure to share your results with the community.
Cheers,
Your team at Artineering."##, license);
// send the license to the patron
let email = Message::builder()
.from("Artineering <[email protected]>".parse().unwrap())
.reply_to("Artineering <[email protected]>".parse().unwrap())
.to(user_email.parse().unwrap())
.bcc("[email protected]".parse().unwrap())
.subject("[Flair] Your Community license key")
.body(email_body)
.unwrap();
let creds = Credentials::new(SMTP_USERNAME.clone(), SMTP_PASSWORD.clone());
let mailer = SmtpTransport::relay(SMTP_SERVER.as_ref())
.unwrap()
.credentials(creds)
.build();
match mailer.send(&email) {
Ok(_) => info!("Email sent successfully"),
Err(e) => panic!("Could not send email: {:?}", e),
}
Ok(Response::builder()
.status(http::StatusCode::OK)
.body(().into())
.unwrap())
}
/// Patreon pledge delete trigger
fn patreon_handle_pledge_delete(
client: &reqwest::Client,
data: &serde_json::Value,
) -> Result<Response<Body>, HandlerError>
{
debug!("handle_pledge_delete {:?}", data);
Ok(Response::builder()
.status(http::StatusCode::OK)
.body(().into())
.unwrap())
}
fn handle_webhook(
client: &reqwest::Client,
req: Request,
_c: Context,
) -> Result<Response<Body>, HandlerError> {
if !fastspring::authentify_web_hook(&req) {
return Ok(Response::builder()
.status(http::StatusCode::UNAUTHORIZED)
.body(Body::default())
.unwrap());
}
let events_json = util::body_to_json(req.body())?;
let events_json = events_json["events"].as_array().ok_or("invalid format")?;
// TODO do not reply OK every time: check each event
for e in events_json {
let ty = e["type"].as_str().ok_or("invalid format")?;
let data = &e["data"];
match ty {
"subscription.deactivated" => {
handle_subscription_deactivated(client, data)?;
}
_ => {
warn!("unhandled webhook: {}", ty);
}
};
}
Ok(Response::builder()
.status(http::StatusCode::OK)
.body(Body::default())
.unwrap())
}
/// Handles deactivation of subscriptions.
///
/// This will suspend all licenses associated with the order.
fn handle_subscription_deactivated(
client: &reqwest::Client,
data: &serde_json::Value,
) -> Result<Response<Body>, HandlerError> {
debug!("handle_subscription_deactivated {:?}", data);
let subscription_id = data["id"].as_str().ok_or("invalid format (.id)")?;
info!("subscription deactivated: {}", subscription_id);
let orders = fastspring::get_subscription_entries(client, subscription_id)?;
// find the original order
// according to the API, this is the entry whose ".reference" field does not include
// a "B" (for "billing") at the end. All the others are subscription billing orders.
let original_order = orders.as_array().ok_or("invalid format (orders)")?.iter().find(|&order| {
let order = &order["order"];
if order["reference"].is_null() { return false; }
if let Some(s) = order["reference"].as_str() {
!s.ends_with('B')
} else {
false
}
});
let original_order = original_order.ok_or("could not find original order")?;
let order_items = original_order["order"]["items"]
.as_array()
.ok_or("invalid format (.order.items)")?;
// Collect all licenses to revoke
let mut licenses_to_revoke = Vec::new();
for item in order_items.iter() {
//let product = &item["product"];
for (_k, v) in item["fulfillments"]
.as_object()
.ok_or("invalid format (.fulfillments)")?
.iter()
{
if let Some(licenses) = v.as_array() {
for l in licenses {
let code = if let Some(s) = l["license"].as_str() {
s
} else {
continue;
};
licenses_to_revoke.push(String::from(code));
}
}
}
}
// revoke all licenses
for lic in licenses_to_revoke.iter() {
let key = license_key(lic).ok_or("invalid license key")?;
keygen::revoke_license(key)?;
}
Ok(Response::builder()
.status(http::StatusCode::OK)
.body(().into())
.unwrap())
}
/// Handles license creation requests (coming from FastSpring).
fn handle_keygen_create(req: Request, _c: Context) -> Result<Response<Body>, HandlerError> {
if !fastspring::verify_license_gen(&req) {
return Ok(Response::builder()
.status(http::StatusCode::UNAUTHORIZED)
.body(Body::default())
.unwrap());
}
let params: HashMap<_, _> = url::form_urlencoded::parse(match req.body() {
Body::Text(ref s) => s.as_bytes(),
_ => return Err("invalid request body".into()),
})
.collect();
//debug!("params = {:?}", params);
let subscription = params
.get("subscription")
.ok_or("invalid query parameters (no subscription)")?;
let policy_id = params
.get("policy")
.ok_or("invalid query parameters (no policy)")?;
let quantity: u32 = params
.get("quantity")
.ok_or("invalid query parameters (no quantity)")?
.parse()?;
let (codes,errors) = generate_licenses(subscription, policy_id, quantity, None, false);
if !errors.is_empty() {
Err(format!("errors encountered while generating licenses ({} successfully generated)", codes.len()).as_str())?
}
let codes = codes.join("\n");
Ok(Response::builder()
.status(http::StatusCode::OK)
.header(CONTENT_TYPE, "text/plain")
.body(codes.into())
.unwrap())
}
fn not_found(_req: Request, _c: Context) -> Result<Response<Body>, HandlerError> {
Ok(Response::builder()
.status(http::StatusCode::NOT_FOUND)
.body(Body::default())
.unwrap())
}
fn not_allowed(_req: Request, _c: Context) -> Result<Response<Body>, HandlerError> {
Ok(Response::builder()
.status(http::StatusCode::METHOD_NOT_ALLOWED)
.body(Body::default())
.unwrap())
}
fn main() -> Result<(), Box<dyn Error>> {
env_logger::init();
dotenv::dotenv().ok();
lambda!(router);
Ok(())
}
| router | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.