file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
cadastro.ts
|
import { Component } from '@angular/core';
import { IonicPage, NavController, NavParams, ToastController, Events } from 'ionic-angular';
import { AppPreferences } from '@ionic-native/app-preferences';
import { Cliente } from '../../models/cliente';
import { EnderecoPage } from '../endereco/endereco';
import { Link } from '../../models/link';
import { LogineventProvider } from '../../providers/loginevent/loginevent';
import { Http } from '@angular/http';
import 'rxjs/add/operator/map';
/**
* Generated class for the Cadastro page.
*
* See http://ionicframework.com/docs/components/#navigation for more info
* on Ionic pages and navigation.
*/
@IonicPage()
@Component({
selector: 'page-cadastro',
templateUrl: 'cadastro.html',
})
export class CadastroPage {
public data: any;
public link: Link;
nome: string = '';
email: string = '';
senha: string = '';
cliente: Cliente;
constructor(private toastCtrl: ToastController, public navCtrl: NavController, public navParams: NavParams, public http: Http, private appPreferences: AppPreferences, public loginevent: LogineventProvider) {
this.link = new Link();
}
ionViewDidLoad() {
}
usuario_add() {
if (this.validaCampos()) {
this.http.post(this.link.api_url + 'clientes/add', {'Cliente': {'nome': this.nome, 'email': this.email, 'senha': this.senha}})
.map(res => res.json())
.subscribe(data => {
if (typeof data.message == "object") {
this.cliente = data.message['0'];
this.appPreferences.store('key', this.cliente['Cliente']['id'].toString()).then((res) => {
this.loginevent.cadastro();
this.goToEndereco(0);
});
} else {
this.toast(data.message);
}
});
} else {
let toast = this.toastCtrl.create({
message: "Preencha os campos, por gentileza",
duration: 3000,
position: 'top'
});
toast.present();
}
}
toast(cod: Number) {
switch (cod) {
case -2:
let toast = this.toastCtrl.create({
message: "Usuário já existe, tente novamente",
duration: 3000,
position: 'top'
});
toast.present();
break;
case -10:
toast = this.toastCtrl.create({
message: "Ocorreu algum erro, tente novamente",
duration: 3000,
position: 'top'
});
toast.present();
break;
}
}
goToEndereco(id: number) {
if (id != 0) {
this.navCtrl.setRoot(EnderecoPage, {cliente: id});
} else {
this.navCtrl.setRoot(EnderecoPage, {cliente: this.cliente});
}
}
validaCampos() {
if (this.nome == "" || this.email == "" || this.senha == "") {
|
return true;
}
}
|
return false;
}
|
conditional_block
|
cadastro.ts
|
import { Component } from '@angular/core';
import { IonicPage, NavController, NavParams, ToastController, Events } from 'ionic-angular';
import { AppPreferences } from '@ionic-native/app-preferences';
import { Cliente } from '../../models/cliente';
import { EnderecoPage } from '../endereco/endereco';
import { Link } from '../../models/link';
import { LogineventProvider } from '../../providers/loginevent/loginevent';
import { Http } from '@angular/http';
import 'rxjs/add/operator/map';
/**
* Generated class for the Cadastro page.
*
* See http://ionicframework.com/docs/components/#navigation for more info
* on Ionic pages and navigation.
*/
@IonicPage()
@Component({
selector: 'page-cadastro',
templateUrl: 'cadastro.html',
})
export class CadastroPage {
public data: any;
public link: Link;
nome: string = '';
email: string = '';
senha: string = '';
cliente: Cliente;
constructor(private toastCtrl: ToastController, public navCtrl: NavController, public navParams: NavParams, public http: Http, private appPreferences: AppPreferences, public loginevent: LogineventProvider)
|
ionViewDidLoad() {
}
usuario_add() {
if (this.validaCampos()) {
this.http.post(this.link.api_url + 'clientes/add', {'Cliente': {'nome': this.nome, 'email': this.email, 'senha': this.senha}})
.map(res => res.json())
.subscribe(data => {
if (typeof data.message == "object") {
this.cliente = data.message['0'];
this.appPreferences.store('key', this.cliente['Cliente']['id'].toString()).then((res) => {
this.loginevent.cadastro();
this.goToEndereco(0);
});
} else {
this.toast(data.message);
}
});
} else {
let toast = this.toastCtrl.create({
message: "Preencha os campos, por gentileza",
duration: 3000,
position: 'top'
});
toast.present();
}
}
toast(cod: Number) {
switch (cod) {
case -2:
let toast = this.toastCtrl.create({
message: "Usuário já existe, tente novamente",
duration: 3000,
position: 'top'
});
toast.present();
break;
case -10:
toast = this.toastCtrl.create({
message: "Ocorreu algum erro, tente novamente",
duration: 3000,
position: 'top'
});
toast.present();
break;
}
}
goToEndereco(id: number) {
if (id != 0) {
this.navCtrl.setRoot(EnderecoPage, {cliente: id});
} else {
this.navCtrl.setRoot(EnderecoPage, {cliente: this.cliente});
}
}
validaCampos() {
if (this.nome == "" || this.email == "" || this.senha == "") {
return false;
}
return true;
}
}
|
{
this.link = new Link();
}
|
identifier_body
|
cadastro.ts
|
import { Component } from '@angular/core';
import { IonicPage, NavController, NavParams, ToastController, Events } from 'ionic-angular';
import { AppPreferences } from '@ionic-native/app-preferences';
import { Cliente } from '../../models/cliente';
import { EnderecoPage } from '../endereco/endereco';
import { Link } from '../../models/link';
import { LogineventProvider } from '../../providers/loginevent/loginevent';
import { Http } from '@angular/http';
import 'rxjs/add/operator/map';
/**
* Generated class for the Cadastro page.
*
* See http://ionicframework.com/docs/components/#navigation for more info
* on Ionic pages and navigation.
*/
@IonicPage()
@Component({
selector: 'page-cadastro',
templateUrl: 'cadastro.html',
})
export class CadastroPage {
public data: any;
public link: Link;
nome: string = '';
email: string = '';
senha: string = '';
cliente: Cliente;
constructor(private toastCtrl: ToastController, public navCtrl: NavController, public navParams: NavParams, public http: Http, private appPreferences: AppPreferences, public loginevent: LogineventProvider) {
this.link = new Link();
}
ionViewDidLoad() {
}
|
() {
if (this.validaCampos()) {
this.http.post(this.link.api_url + 'clientes/add', {'Cliente': {'nome': this.nome, 'email': this.email, 'senha': this.senha}})
.map(res => res.json())
.subscribe(data => {
if (typeof data.message == "object") {
this.cliente = data.message['0'];
this.appPreferences.store('key', this.cliente['Cliente']['id'].toString()).then((res) => {
this.loginevent.cadastro();
this.goToEndereco(0);
});
} else {
this.toast(data.message);
}
});
} else {
let toast = this.toastCtrl.create({
message: "Preencha os campos, por gentileza",
duration: 3000,
position: 'top'
});
toast.present();
}
}
toast(cod: Number) {
switch (cod) {
case -2:
let toast = this.toastCtrl.create({
message: "Usuário já existe, tente novamente",
duration: 3000,
position: 'top'
});
toast.present();
break;
case -10:
toast = this.toastCtrl.create({
message: "Ocorreu algum erro, tente novamente",
duration: 3000,
position: 'top'
});
toast.present();
break;
}
}
goToEndereco(id: number) {
if (id != 0) {
this.navCtrl.setRoot(EnderecoPage, {cliente: id});
} else {
this.navCtrl.setRoot(EnderecoPage, {cliente: this.cliente});
}
}
validaCampos() {
if (this.nome == "" || this.email == "" || this.senha == "") {
return false;
}
return true;
}
}
|
usuario_add
|
identifier_name
|
biscotti-auth.service.ts
|
import { Injectable } from '@angular/core';
import {
HttpClient,
HttpHeaders,
HttpHeaderResponse,
HttpErrorResponse,
HttpEventType,
HttpResponse,
} from '@angular/common/http';
import { Observable } from 'rxjs/Observable';
import { BehaviorSubject } from 'rxjs/BehaviorSubject';
import 'rxjs/add/operator/do';
import { AuthRequest } from './biscotti-auth/auth-request.type';
@Injectable()
export class
|
{
public token = new BehaviorSubject<string>('');
constructor(
private http: HttpClient
) {
}
authorize(form: AuthRequest): Observable<string> {
return this
.http
.post('/biscotti/setup/authorize', form, {
headers: new HttpHeaders({
'Content-Type': 'application/json',
'Accept': 'application/json'
}),
responseType: 'json',
observe: 'body'
})
.map(res => res['token'])
.do(tok => this.token.next(tok));
}
}
|
BiscottiAuthService
|
identifier_name
|
biscotti-auth.service.ts
|
import { Injectable } from '@angular/core';
import {
HttpClient,
HttpHeaders,
HttpHeaderResponse,
HttpErrorResponse,
HttpEventType,
HttpResponse,
} from '@angular/common/http';
import { Observable } from 'rxjs/Observable';
import { BehaviorSubject } from 'rxjs/BehaviorSubject';
import 'rxjs/add/operator/do';
import { AuthRequest } from './biscotti-auth/auth-request.type';
@Injectable()
|
) {
}
authorize(form: AuthRequest): Observable<string> {
return this
.http
.post('/biscotti/setup/authorize', form, {
headers: new HttpHeaders({
'Content-Type': 'application/json',
'Accept': 'application/json'
}),
responseType: 'json',
observe: 'body'
})
.map(res => res['token'])
.do(tok => this.token.next(tok));
}
}
|
export class BiscottiAuthService {
public token = new BehaviorSubject<string>('');
constructor(
private http: HttpClient
|
random_line_split
|
AttachmentType_Dc.js
|
/**
* Solutii Ecommerce, Automatizare, Validare si Analiza | Seava.ro
* Copyright: 2013 Nan21 Electronics SRL. All rights reserved.
* Use is subject to license terms.
*/
Ext.define("seava.ad.ui.extjs.dc.AttachmentType_Dc", {
extend: "e4e.dc.AbstractDc",
recordModel: seava.ad.ui.extjs.ds.AttachmentType_Ds
});
/* ================= FILTER FORM: Filter ================= */
Ext.define("seava.ad.ui.extjs.dc.AttachmentType_Dc$Filter", {
extend: "e4e.dc.view.AbstractDcvFilterForm",
alias: "widget.ad_AttachmentType_Dc$Filter",
/**
|
/* =========== controls =========== */
.addTextField({ name:"name", dataIndex:"name"})
.addBooleanField({ name:"active", dataIndex:"active"})
.addCombo({ xtype:"combo", name:"category", dataIndex:"category", store:[ "link", "upload"]})
/* =========== containers =========== */
.addPanel({ name:"main", autoScroll:true, layout: {type:"hbox", align:'top', pack:'start', defaultMargins: {right:5, left:5}},
autoScroll:true, padding:"0 30 5 0"})
.addPanel({ name:"col1", width:210, layout:"form"})
.addPanel({ name:"col2", width:170, layout:"form"});
},
/**
* Combine the components
*/
_linkElements_: function() {
this._getBuilder_()
.addChildrenTo("main", ["col1", "col2"])
.addChildrenTo("col1", ["name", "category"])
.addChildrenTo("col2", ["active"]);
}
});
/* ================= EDIT-GRID: EditList ================= */
Ext.define("seava.ad.ui.extjs.dc.AttachmentType_Dc$EditList", {
extend: "e4e.dc.view.AbstractDcvEditableGrid",
alias: "widget.ad_AttachmentType_Dc$EditList",
_bulkEditFields_: ["active","description","category","uploadPath","baseUrl"],
/**
* Columns definition
*/
_defineColumns_: function() {
this._getBuilder_()
.addTextColumn({name:"name", dataIndex:"name", width:200,
editor: { xtype:"textfield"}})
.addTextColumn({name:"description", dataIndex:"description", width:200,
editor: { xtype:"textfield"}})
.addComboColumn({name:"category", dataIndex:"category", width:80,
editor:{xtype:"combo", mode: 'local', triggerAction:'all', forceSelection:true, store:[ "link", "upload"]}})
.addTextColumn({name:"uploadPath", dataIndex:"uploadPath", width:200,
editor: { xtype:"textfield"}})
.addTextColumn({name:"baseUrl", dataIndex:"baseUrl", width:100,
editor: { xtype:"textfield"}})
.addBooleanColumn({name:"active", dataIndex:"active"})
.addDefaults();
}
});
|
* Components definition
*/
_defineElements_: function() {
this._getBuilder_()
|
random_line_split
|
admin.py
|
""" Django admin pages for student app """
from django import forms
from django.contrib.auth.models import User
from ratelimitbackend import admin
from xmodule.modulestore.django import modulestore
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from config_models.admin import ConfigurationModelAdmin
from student.models import (
UserProfile, UserTestGroup, CourseEnrollmentAllowed, DashboardConfiguration, CourseEnrollment, Registration,
PendingNameChange, CourseAccessRole, LinkedInAddToProfileConfiguration
)
from student.roles import REGISTERED_ACCESS_ROLES
class CourseAccessRoleForm(forms.ModelForm):
"""Form for adding new Course Access Roles view the Django Admin Panel."""
class Meta(object):
model = CourseAccessRole
fields = '__all__'
email = forms.EmailField(required=True)
COURSE_ACCESS_ROLES = [(role_name, role_name) for role_name in REGISTERED_ACCESS_ROLES.keys()]
role = forms.ChoiceField(choices=COURSE_ACCESS_ROLES)
def clean_course_id(self):
"""
Checking course-id format and course exists in module store.
This field can be null.
"""
if self.cleaned_data['course_id']:
course_id = self.cleaned_data['course_id']
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
raise forms.ValidationError(u"Invalid CourseID. Please check the format and re-try.")
if not modulestore().has_course(course_key):
raise forms.ValidationError(u"Cannot find course with id {} in the modulestore".format(course_id))
return course_key
return None
def clean_org(self):
"""If org and course-id exists then Check organization name
against the given course.
"""
if self.cleaned_data.get('course_id') and self.cleaned_data['org']:
org = self.cleaned_data['org']
org_name = self.cleaned_data.get('course_id').org
if org.lower() != org_name.lower():
raise forms.ValidationError(
u"Org name {} is not valid. Valid name is {}.".format(
org, org_name
)
)
return self.cleaned_data['org']
def clean_email(self):
"""
Checking user object against given email id.
"""
email = self.cleaned_data['email']
try:
user = User.objects.get(email=email)
except Exception:
raise forms.ValidationError(
u"Email does not exist. Could not find {email}. Please re-enter email address".format(
email=email
)
)
return user
def clean(self):
"""
Checking the course already exists in db.
"""
cleaned_data = super(CourseAccessRoleForm, self).clean()
if not self.errors:
if CourseAccessRole.objects.filter(
user=cleaned_data.get("email"),
org=cleaned_data.get("org"),
course_id=cleaned_data.get("course_id"),
role=cleaned_data.get("role")
).exists():
raise forms.ValidationError("Duplicate Record.")
return cleaned_data
def __init__(self, *args, **kwargs):
super(CourseAccessRoleForm, self).__init__(*args, **kwargs)
if self.instance.user_id:
self.fields['email'].initial = self.instance.user.email
class CourseAccessRoleAdmin(admin.ModelAdmin):
"""Admin panel for the Course Access Role. """
form = CourseAccessRoleForm
raw_id_fields = ("user",)
exclude = ("user",)
fieldsets = (
(None, {
'fields': ('email', 'course_id', 'org', 'role',)
}),
)
list_display = (
'id', 'user', 'org', 'course_id', 'role',
)
search_fields = (
'id', 'user__username', 'user__email', 'org', 'course_id', 'role',
)
def save_model(self, request, obj, form, change):
obj.user = form.cleaned_data['email']
super(CourseAccessRoleAdmin, self).save_model(request, obj, form, change)
class LinkedInAddToProfileConfigurationAdmin(admin.ModelAdmin):
"""Admin interface for the LinkedIn Add to Profile configuration. """
class Meta(object):
model = LinkedInAddToProfileConfiguration
# Exclude deprecated fields
exclude = ('dashboard_tracking_code',)
class CourseEnrollmentAdmin(admin.ModelAdmin):
""" Admin interface for the CourseEnrollment model. """
list_display = ('id', 'course_id', 'mode', 'user', 'is_active',)
list_filter = ('mode', 'is_active',)
|
return super(CourseEnrollmentAdmin, self).queryset(request).select_related('user')
class Meta(object):
model = CourseEnrollment
class UserProfileAdmin(admin.ModelAdmin):
""" Admin interface for UserProfile model. """
list_display = ('user', 'name',)
raw_id_fields = ('user',)
search_fields = ('user__username', 'user__first_name', 'user__last_name', 'user__email', 'name',)
def get_readonly_fields(self, request, obj=None):
# The user field should not be editable for an existing user profile.
if obj:
return self.readonly_fields + ('user',)
return self.readonly_fields
class Meta(object):
model = UserProfile
admin.site.register(UserTestGroup)
admin.site.register(CourseEnrollmentAllowed)
admin.site.register(Registration)
admin.site.register(PendingNameChange)
admin.site.register(CourseAccessRole, CourseAccessRoleAdmin)
admin.site.register(DashboardConfiguration, ConfigurationModelAdmin)
admin.site.register(LinkedInAddToProfileConfiguration, LinkedInAddToProfileConfigurationAdmin)
admin.site.register(CourseEnrollment, CourseEnrollmentAdmin)
admin.site.register(UserProfile, UserProfileAdmin)
|
raw_id_fields = ('user',)
search_fields = ('course_id', 'mode', 'user__username',)
def queryset(self, request):
|
random_line_split
|
admin.py
|
""" Django admin pages for student app """
from django import forms
from django.contrib.auth.models import User
from ratelimitbackend import admin
from xmodule.modulestore.django import modulestore
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from config_models.admin import ConfigurationModelAdmin
from student.models import (
UserProfile, UserTestGroup, CourseEnrollmentAllowed, DashboardConfiguration, CourseEnrollment, Registration,
PendingNameChange, CourseAccessRole, LinkedInAddToProfileConfiguration
)
from student.roles import REGISTERED_ACCESS_ROLES
class CourseAccessRoleForm(forms.ModelForm):
"""Form for adding new Course Access Roles view the Django Admin Panel."""
class
|
(object):
model = CourseAccessRole
fields = '__all__'
email = forms.EmailField(required=True)
COURSE_ACCESS_ROLES = [(role_name, role_name) for role_name in REGISTERED_ACCESS_ROLES.keys()]
role = forms.ChoiceField(choices=COURSE_ACCESS_ROLES)
def clean_course_id(self):
"""
Checking course-id format and course exists in module store.
This field can be null.
"""
if self.cleaned_data['course_id']:
course_id = self.cleaned_data['course_id']
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
raise forms.ValidationError(u"Invalid CourseID. Please check the format and re-try.")
if not modulestore().has_course(course_key):
raise forms.ValidationError(u"Cannot find course with id {} in the modulestore".format(course_id))
return course_key
return None
def clean_org(self):
"""If org and course-id exists then Check organization name
against the given course.
"""
if self.cleaned_data.get('course_id') and self.cleaned_data['org']:
org = self.cleaned_data['org']
org_name = self.cleaned_data.get('course_id').org
if org.lower() != org_name.lower():
raise forms.ValidationError(
u"Org name {} is not valid. Valid name is {}.".format(
org, org_name
)
)
return self.cleaned_data['org']
def clean_email(self):
"""
Checking user object against given email id.
"""
email = self.cleaned_data['email']
try:
user = User.objects.get(email=email)
except Exception:
raise forms.ValidationError(
u"Email does not exist. Could not find {email}. Please re-enter email address".format(
email=email
)
)
return user
def clean(self):
"""
Checking the course already exists in db.
"""
cleaned_data = super(CourseAccessRoleForm, self).clean()
if not self.errors:
if CourseAccessRole.objects.filter(
user=cleaned_data.get("email"),
org=cleaned_data.get("org"),
course_id=cleaned_data.get("course_id"),
role=cleaned_data.get("role")
).exists():
raise forms.ValidationError("Duplicate Record.")
return cleaned_data
def __init__(self, *args, **kwargs):
super(CourseAccessRoleForm, self).__init__(*args, **kwargs)
if self.instance.user_id:
self.fields['email'].initial = self.instance.user.email
class CourseAccessRoleAdmin(admin.ModelAdmin):
"""Admin panel for the Course Access Role. """
form = CourseAccessRoleForm
raw_id_fields = ("user",)
exclude = ("user",)
fieldsets = (
(None, {
'fields': ('email', 'course_id', 'org', 'role',)
}),
)
list_display = (
'id', 'user', 'org', 'course_id', 'role',
)
search_fields = (
'id', 'user__username', 'user__email', 'org', 'course_id', 'role',
)
def save_model(self, request, obj, form, change):
obj.user = form.cleaned_data['email']
super(CourseAccessRoleAdmin, self).save_model(request, obj, form, change)
class LinkedInAddToProfileConfigurationAdmin(admin.ModelAdmin):
"""Admin interface for the LinkedIn Add to Profile configuration. """
class Meta(object):
model = LinkedInAddToProfileConfiguration
# Exclude deprecated fields
exclude = ('dashboard_tracking_code',)
class CourseEnrollmentAdmin(admin.ModelAdmin):
""" Admin interface for the CourseEnrollment model. """
list_display = ('id', 'course_id', 'mode', 'user', 'is_active',)
list_filter = ('mode', 'is_active',)
raw_id_fields = ('user',)
search_fields = ('course_id', 'mode', 'user__username',)
def queryset(self, request):
return super(CourseEnrollmentAdmin, self).queryset(request).select_related('user')
class Meta(object):
model = CourseEnrollment
class UserProfileAdmin(admin.ModelAdmin):
""" Admin interface for UserProfile model. """
list_display = ('user', 'name',)
raw_id_fields = ('user',)
search_fields = ('user__username', 'user__first_name', 'user__last_name', 'user__email', 'name',)
def get_readonly_fields(self, request, obj=None):
# The user field should not be editable for an existing user profile.
if obj:
return self.readonly_fields + ('user',)
return self.readonly_fields
class Meta(object):
model = UserProfile
admin.site.register(UserTestGroup)
admin.site.register(CourseEnrollmentAllowed)
admin.site.register(Registration)
admin.site.register(PendingNameChange)
admin.site.register(CourseAccessRole, CourseAccessRoleAdmin)
admin.site.register(DashboardConfiguration, ConfigurationModelAdmin)
admin.site.register(LinkedInAddToProfileConfiguration, LinkedInAddToProfileConfigurationAdmin)
admin.site.register(CourseEnrollment, CourseEnrollmentAdmin)
admin.site.register(UserProfile, UserProfileAdmin)
|
Meta
|
identifier_name
|
admin.py
|
""" Django admin pages for student app """
from django import forms
from django.contrib.auth.models import User
from ratelimitbackend import admin
from xmodule.modulestore.django import modulestore
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from config_models.admin import ConfigurationModelAdmin
from student.models import (
UserProfile, UserTestGroup, CourseEnrollmentAllowed, DashboardConfiguration, CourseEnrollment, Registration,
PendingNameChange, CourseAccessRole, LinkedInAddToProfileConfiguration
)
from student.roles import REGISTERED_ACCESS_ROLES
class CourseAccessRoleForm(forms.ModelForm):
"""Form for adding new Course Access Roles view the Django Admin Panel."""
class Meta(object):
model = CourseAccessRole
fields = '__all__'
email = forms.EmailField(required=True)
COURSE_ACCESS_ROLES = [(role_name, role_name) for role_name in REGISTERED_ACCESS_ROLES.keys()]
role = forms.ChoiceField(choices=COURSE_ACCESS_ROLES)
def clean_course_id(self):
|
def clean_org(self):
"""If org and course-id exists then Check organization name
against the given course.
"""
if self.cleaned_data.get('course_id') and self.cleaned_data['org']:
org = self.cleaned_data['org']
org_name = self.cleaned_data.get('course_id').org
if org.lower() != org_name.lower():
raise forms.ValidationError(
u"Org name {} is not valid. Valid name is {}.".format(
org, org_name
)
)
return self.cleaned_data['org']
def clean_email(self):
"""
Checking user object against given email id.
"""
email = self.cleaned_data['email']
try:
user = User.objects.get(email=email)
except Exception:
raise forms.ValidationError(
u"Email does not exist. Could not find {email}. Please re-enter email address".format(
email=email
)
)
return user
def clean(self):
"""
Checking the course already exists in db.
"""
cleaned_data = super(CourseAccessRoleForm, self).clean()
if not self.errors:
if CourseAccessRole.objects.filter(
user=cleaned_data.get("email"),
org=cleaned_data.get("org"),
course_id=cleaned_data.get("course_id"),
role=cleaned_data.get("role")
).exists():
raise forms.ValidationError("Duplicate Record.")
return cleaned_data
def __init__(self, *args, **kwargs):
super(CourseAccessRoleForm, self).__init__(*args, **kwargs)
if self.instance.user_id:
self.fields['email'].initial = self.instance.user.email
class CourseAccessRoleAdmin(admin.ModelAdmin):
"""Admin panel for the Course Access Role. """
form = CourseAccessRoleForm
raw_id_fields = ("user",)
exclude = ("user",)
fieldsets = (
(None, {
'fields': ('email', 'course_id', 'org', 'role',)
}),
)
list_display = (
'id', 'user', 'org', 'course_id', 'role',
)
search_fields = (
'id', 'user__username', 'user__email', 'org', 'course_id', 'role',
)
def save_model(self, request, obj, form, change):
obj.user = form.cleaned_data['email']
super(CourseAccessRoleAdmin, self).save_model(request, obj, form, change)
class LinkedInAddToProfileConfigurationAdmin(admin.ModelAdmin):
"""Admin interface for the LinkedIn Add to Profile configuration. """
class Meta(object):
model = LinkedInAddToProfileConfiguration
# Exclude deprecated fields
exclude = ('dashboard_tracking_code',)
class CourseEnrollmentAdmin(admin.ModelAdmin):
""" Admin interface for the CourseEnrollment model. """
list_display = ('id', 'course_id', 'mode', 'user', 'is_active',)
list_filter = ('mode', 'is_active',)
raw_id_fields = ('user',)
search_fields = ('course_id', 'mode', 'user__username',)
def queryset(self, request):
return super(CourseEnrollmentAdmin, self).queryset(request).select_related('user')
class Meta(object):
model = CourseEnrollment
class UserProfileAdmin(admin.ModelAdmin):
""" Admin interface for UserProfile model. """
list_display = ('user', 'name',)
raw_id_fields = ('user',)
search_fields = ('user__username', 'user__first_name', 'user__last_name', 'user__email', 'name',)
def get_readonly_fields(self, request, obj=None):
# The user field should not be editable for an existing user profile.
if obj:
return self.readonly_fields + ('user',)
return self.readonly_fields
class Meta(object):
model = UserProfile
admin.site.register(UserTestGroup)
admin.site.register(CourseEnrollmentAllowed)
admin.site.register(Registration)
admin.site.register(PendingNameChange)
admin.site.register(CourseAccessRole, CourseAccessRoleAdmin)
admin.site.register(DashboardConfiguration, ConfigurationModelAdmin)
admin.site.register(LinkedInAddToProfileConfiguration, LinkedInAddToProfileConfigurationAdmin)
admin.site.register(CourseEnrollment, CourseEnrollmentAdmin)
admin.site.register(UserProfile, UserProfileAdmin)
|
"""
Checking course-id format and course exists in module store.
This field can be null.
"""
if self.cleaned_data['course_id']:
course_id = self.cleaned_data['course_id']
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
raise forms.ValidationError(u"Invalid CourseID. Please check the format and re-try.")
if not modulestore().has_course(course_key):
raise forms.ValidationError(u"Cannot find course with id {} in the modulestore".format(course_id))
return course_key
return None
|
identifier_body
|
admin.py
|
""" Django admin pages for student app """
from django import forms
from django.contrib.auth.models import User
from ratelimitbackend import admin
from xmodule.modulestore.django import modulestore
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from config_models.admin import ConfigurationModelAdmin
from student.models import (
UserProfile, UserTestGroup, CourseEnrollmentAllowed, DashboardConfiguration, CourseEnrollment, Registration,
PendingNameChange, CourseAccessRole, LinkedInAddToProfileConfiguration
)
from student.roles import REGISTERED_ACCESS_ROLES
class CourseAccessRoleForm(forms.ModelForm):
"""Form for adding new Course Access Roles view the Django Admin Panel."""
class Meta(object):
model = CourseAccessRole
fields = '__all__'
email = forms.EmailField(required=True)
COURSE_ACCESS_ROLES = [(role_name, role_name) for role_name in REGISTERED_ACCESS_ROLES.keys()]
role = forms.ChoiceField(choices=COURSE_ACCESS_ROLES)
def clean_course_id(self):
"""
Checking course-id format and course exists in module store.
This field can be null.
"""
if self.cleaned_data['course_id']:
course_id = self.cleaned_data['course_id']
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
raise forms.ValidationError(u"Invalid CourseID. Please check the format and re-try.")
if not modulestore().has_course(course_key):
raise forms.ValidationError(u"Cannot find course with id {} in the modulestore".format(course_id))
return course_key
return None
def clean_org(self):
"""If org and course-id exists then Check organization name
against the given course.
"""
if self.cleaned_data.get('course_id') and self.cleaned_data['org']:
org = self.cleaned_data['org']
org_name = self.cleaned_data.get('course_id').org
if org.lower() != org_name.lower():
raise forms.ValidationError(
u"Org name {} is not valid. Valid name is {}.".format(
org, org_name
)
)
return self.cleaned_data['org']
def clean_email(self):
"""
Checking user object against given email id.
"""
email = self.cleaned_data['email']
try:
user = User.objects.get(email=email)
except Exception:
raise forms.ValidationError(
u"Email does not exist. Could not find {email}. Please re-enter email address".format(
email=email
)
)
return user
def clean(self):
"""
Checking the course already exists in db.
"""
cleaned_data = super(CourseAccessRoleForm, self).clean()
if not self.errors:
if CourseAccessRole.objects.filter(
user=cleaned_data.get("email"),
org=cleaned_data.get("org"),
course_id=cleaned_data.get("course_id"),
role=cleaned_data.get("role")
).exists():
raise forms.ValidationError("Duplicate Record.")
return cleaned_data
def __init__(self, *args, **kwargs):
super(CourseAccessRoleForm, self).__init__(*args, **kwargs)
if self.instance.user_id:
|
class CourseAccessRoleAdmin(admin.ModelAdmin):
"""Admin panel for the Course Access Role. """
form = CourseAccessRoleForm
raw_id_fields = ("user",)
exclude = ("user",)
fieldsets = (
(None, {
'fields': ('email', 'course_id', 'org', 'role',)
}),
)
list_display = (
'id', 'user', 'org', 'course_id', 'role',
)
search_fields = (
'id', 'user__username', 'user__email', 'org', 'course_id', 'role',
)
def save_model(self, request, obj, form, change):
obj.user = form.cleaned_data['email']
super(CourseAccessRoleAdmin, self).save_model(request, obj, form, change)
class LinkedInAddToProfileConfigurationAdmin(admin.ModelAdmin):
"""Admin interface for the LinkedIn Add to Profile configuration. """
class Meta(object):
model = LinkedInAddToProfileConfiguration
# Exclude deprecated fields
exclude = ('dashboard_tracking_code',)
class CourseEnrollmentAdmin(admin.ModelAdmin):
""" Admin interface for the CourseEnrollment model. """
list_display = ('id', 'course_id', 'mode', 'user', 'is_active',)
list_filter = ('mode', 'is_active',)
raw_id_fields = ('user',)
search_fields = ('course_id', 'mode', 'user__username',)
def queryset(self, request):
return super(CourseEnrollmentAdmin, self).queryset(request).select_related('user')
class Meta(object):
model = CourseEnrollment
class UserProfileAdmin(admin.ModelAdmin):
""" Admin interface for UserProfile model. """
list_display = ('user', 'name',)
raw_id_fields = ('user',)
search_fields = ('user__username', 'user__first_name', 'user__last_name', 'user__email', 'name',)
def get_readonly_fields(self, request, obj=None):
# The user field should not be editable for an existing user profile.
if obj:
return self.readonly_fields + ('user',)
return self.readonly_fields
class Meta(object):
model = UserProfile
admin.site.register(UserTestGroup)
admin.site.register(CourseEnrollmentAllowed)
admin.site.register(Registration)
admin.site.register(PendingNameChange)
admin.site.register(CourseAccessRole, CourseAccessRoleAdmin)
admin.site.register(DashboardConfiguration, ConfigurationModelAdmin)
admin.site.register(LinkedInAddToProfileConfiguration, LinkedInAddToProfileConfigurationAdmin)
admin.site.register(CourseEnrollment, CourseEnrollmentAdmin)
admin.site.register(UserProfile, UserProfileAdmin)
|
self.fields['email'].initial = self.instance.user.email
|
conditional_block
|
freqs.rs
|
// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// NOTE: The following code was generated by "scripts/frequencies.py", do not
// edit directly
pub const BYTE_FREQUENCIES: [u8; 256] = [
55, // '\x00'
52, // '\x01'
51, // '\x02'
50, // '\x03'
49, // '\x04'
48, // '\x05'
47, // '\x06'
46, // '\x07'
45, // '\x08'
103, // '\t'
242, // '\n'
66, // '\x0b'
67, // '\x0c'
229, // '\r'
44, // '\x0e'
43, // '\x0f'
42, // '\x10'
41, // '\x11'
40, // '\x12'
39, // '\x13'
38, // '\x14'
37, // '\x15'
36, // '\x16'
35, // '\x17'
34, // '\x18'
33, // '\x19'
56, // '\x1a'
32, // '\x1b'
31, // '\x1c'
30, // '\x1d'
29, // '\x1e'
28, // '\x1f'
255, // ' '
148, // '!'
164, // '"'
149, // '#'
136, // '$'
160, // '%'
155, // '&'
173, // "'"
221, // '('
222, // ')'
134, // '*'
122, // '+'
232, // ','
202, // '-'
215, // '.'
224, // '/'
208, // '0'
220, // '1'
204, // '2'
187, // '3'
183, // '4'
179, // '5'
177, // '6'
168, // '7'
178, // '8'
200, // '9'
226, // ':'
195, // ';'
154, // '<'
184, // '='
174, // '>'
126, // '?'
120, // '@'
191, // 'A'
157, // 'B'
194, // 'C'
170, // 'D'
189, // 'E'
162, // 'F'
161, // 'G'
150, // 'H'
193, // 'I'
142, // 'J'
137, // 'K'
171, // 'L'
176, // 'M'
185, // 'N'
167, // 'O'
186, // 'P'
112, // 'Q'
175, // 'R'
192, // 'S'
188, // 'T'
156, // 'U'
140, // 'V'
143, // 'W'
123, // 'X'
133, // 'Y'
128, // 'Z'
147, // '['
138, // '\\'
146, // ']'
114, // '^'
223, // '_'
151, // '`'
249, // 'a'
216, // 'b'
238, // 'c'
236, // 'd'
253, // 'e'
227, // 'f'
218, // 'g'
230, // 'h'
247, // 'i'
135, // 'j'
180, // 'k'
241, // 'l'
233, // 'm'
246, // 'n'
244, // 'o'
|
235, // 'u'
201, // 'v'
196, // 'w'
240, // 'x'
214, // 'y'
152, // 'z'
182, // '{'
205, // '|'
181, // '}'
127, // '~'
27, // '\x7f'
212, // '\x80'
211, // '\x81'
210, // '\x82'
213, // '\x83'
228, // '\x84'
197, // '\x85'
169, // '\x86'
159, // '\x87'
131, // '\x88'
172, // '\x89'
105, // '\x8a'
80, // '\x8b'
98, // '\x8c'
96, // '\x8d'
97, // '\x8e'
81, // '\x8f'
207, // '\x90'
145, // '\x91'
116, // '\x92'
115, // '\x93'
144, // '\x94'
130, // '\x95'
153, // '\x96'
121, // '\x97'
107, // '\x98'
132, // '\x99'
109, // '\x9a'
110, // '\x9b'
124, // '\x9c'
111, // '\x9d'
82, // '\x9e'
108, // '\x9f'
118, // '\xa0'
141, // '¡'
113, // '¢'
129, // '£'
119, // '¤'
125, // '¥'
165, // '¦'
117, // '§'
92, // '¨'
106, // '©'
83, // 'ª'
72, // '«'
99, // '¬'
93, // '\xad'
65, // '®'
79, // '¯'
166, // '°'
237, // '±'
163, // '²'
199, // '³'
190, // '´'
225, // 'µ'
209, // '¶'
203, // '·'
198, // '¸'
217, // '¹'
219, // 'º'
206, // '»'
234, // '¼'
248, // '½'
158, // '¾'
239, // '¿'
255, // 'À'
255, // 'Á'
255, // 'Â'
255, // 'Ã'
255, // 'Ä'
255, // 'Å'
255, // 'Æ'
255, // 'Ç'
255, // 'È'
255, // 'É'
255, // 'Ê'
255, // 'Ë'
255, // 'Ì'
255, // 'Í'
255, // 'Î'
255, // 'Ï'
255, // 'Ð'
255, // 'Ñ'
255, // 'Ò'
255, // 'Ó'
255, // 'Ô'
255, // 'Õ'
255, // 'Ö'
255, // '×'
255, // 'Ø'
255, // 'Ù'
255, // 'Ú'
255, // 'Û'
255, // 'Ü'
255, // 'Ý'
255, // 'Þ'
255, // 'ß'
255, // 'à'
255, // 'á'
255, // 'â'
255, // 'ã'
255, // 'ä'
255, // 'å'
255, // 'æ'
255, // 'ç'
255, // 'è'
255, // 'é'
255, // 'ê'
255, // 'ë'
255, // 'ì'
255, // 'í'
255, // 'î'
255, // 'ï'
255, // 'ð'
255, // 'ñ'
255, // 'ò'
255, // 'ó'
255, // 'ô'
255, // 'õ'
255, // 'ö'
255, // '÷'
255, // 'ø'
255, // 'ù'
255, // 'ú'
255, // 'û'
255, // 'ü'
255, // 'ý'
255, // 'þ'
255, // 'ÿ'
];
|
231, // 'p'
139, // 'q'
245, // 'r'
243, // 's'
251, // 't'
|
random_line_split
|
tsxStatelessFunctionComponentOverload5.tsx
|
// @filename: file.tsx
// @jsx: preserve
// @module: amd
// @noLib: true
// @libFiles: react.d.ts,lib.d.ts
import React = require('react')
export interface ClickableProps {
children?: string;
className?: string;
}
export interface ButtonProps extends ClickableProps {
onClick: React.MouseEventHandler<any>;
}
export interface LinkProps extends ClickableProps {
to: string;
}
export interface HyphenProps extends ClickableProps {
"data-format": string;
}
let obj0 = {
to: "world"
};
let obj1 = {
children: "hi",
to: "boo"
}
let obj2 = {
onClick: ()=>{}
}
let obj3: any;
export function MainButton(buttonProps: ButtonProps): JSX.Element;
export function MainButton(linkProps: LinkProps): JSX.Element;
export function MainButton(hyphenProps: HyphenProps): JSX.Element;
export function MainButton(props: ButtonProps | LinkProps | HyphenProps): JSX.Element {
const linkProps = props as LinkProps;
if(linkProps.to) {
return this._buildMainLink(props);
}
return this._buildMainButton(props);
}
// Error
const b0 = <MainButton to='/some/path' onClick={(e)=>{}}>GO</MainButton>; // extra property;
const b1 = <MainButton onClick={(e: any)=> {}} {...obj0}>Hello world</MainButton>; // extra property;
const b2 = <MainButton {...{to: "10000"}} {...obj2} />; // extra property
const b3 = <MainButton {...{to: "10000"}} {...{onClick: (k) => {}}} />; // extra property
const b4 = <MainButton {...obj3} to />; // Should error because Incorrect type; but attributes are any so everything is allowed
const b5 = <MainButton {...{ onClick(e: any) { } }} {...obj0} />; // Spread retain method declaration (see GitHub #13365), so now there is an extra attributes
const b6 = <MainButton {...{ onClick(e: any){} }} children={10} />; // incorrect type for optional attribute
const b7 = <MainButton {...{
|
(e: any){} }} children="hello" className />; // incorrect type for optional attribute
const b8 = <MainButton data-format />; // incorrect type for specified hyphanated name
|
onClick
|
identifier_name
|
tsxStatelessFunctionComponentOverload5.tsx
|
// @filename: file.tsx
// @jsx: preserve
// @module: amd
// @noLib: true
// @libFiles: react.d.ts,lib.d.ts
import React = require('react')
export interface ClickableProps {
children?: string;
className?: string;
}
export interface ButtonProps extends ClickableProps {
onClick: React.MouseEventHandler<any>;
}
export interface LinkProps extends ClickableProps {
to: string;
}
export interface HyphenProps extends ClickableProps {
"data-format": string;
}
let obj0 = {
to: "world"
};
let obj1 = {
children: "hi",
to: "boo"
}
let obj2 = {
onClick: ()=>{}
}
let obj3: any;
export function MainButton(buttonProps: ButtonProps): JSX.Element;
export function MainButton(linkProps: LinkProps): JSX.Element;
export function MainButton(hyphenProps: HyphenProps): JSX.Element;
export function MainButton(props: ButtonProps | LinkProps | HyphenProps): JSX.Element {
const linkProps = props as LinkProps;
if(linkProps.to)
|
return this._buildMainButton(props);
}
// Error
const b0 = <MainButton to='/some/path' onClick={(e)=>{}}>GO</MainButton>; // extra property;
const b1 = <MainButton onClick={(e: any)=> {}} {...obj0}>Hello world</MainButton>; // extra property;
const b2 = <MainButton {...{to: "10000"}} {...obj2} />; // extra property
const b3 = <MainButton {...{to: "10000"}} {...{onClick: (k) => {}}} />; // extra property
const b4 = <MainButton {...obj3} to />; // Should error because Incorrect type; but attributes are any so everything is allowed
const b5 = <MainButton {...{ onClick(e: any) { } }} {...obj0} />; // Spread retain method declaration (see GitHub #13365), so now there is an extra attributes
const b6 = <MainButton {...{ onClick(e: any){} }} children={10} />; // incorrect type for optional attribute
const b7 = <MainButton {...{ onClick(e: any){} }} children="hello" className />; // incorrect type for optional attribute
const b8 = <MainButton data-format />; // incorrect type for specified hyphanated name
|
{
return this._buildMainLink(props);
}
|
conditional_block
|
tsxStatelessFunctionComponentOverload5.tsx
|
// @filename: file.tsx
// @jsx: preserve
// @module: amd
// @noLib: true
// @libFiles: react.d.ts,lib.d.ts
import React = require('react')
export interface ClickableProps {
children?: string;
className?: string;
}
export interface ButtonProps extends ClickableProps {
onClick: React.MouseEventHandler<any>;
}
export interface LinkProps extends ClickableProps {
to: string;
}
export interface HyphenProps extends ClickableProps {
"data-format": string;
}
let obj0 = {
to: "world"
};
let obj1 = {
children: "hi",
to: "boo"
}
let obj2 = {
onClick: ()=>{}
}
let obj3: any;
export function MainButton(buttonProps: ButtonProps): JSX.Element;
export function MainButton(linkProps: LinkProps): JSX.Element;
export function MainButton(hyphenProps: HyphenProps): JSX.Element;
export function MainButton(props: ButtonProps | LinkProps | HyphenProps): JSX.Element {
const linkProps = props as LinkProps;
if(linkProps.to) {
return this._buildMainLink(props);
}
return this._buildMainButton(props);
}
// Error
const b0 = <MainButton to='/some/path' onClick={(e)=>{}}>GO</MainButton>; // extra property;
const b1 = <MainButton onClick={(e: any)=> {}} {...obj0}>Hello world</MainButton>; // extra property;
const b2 = <MainButton {...{to: "10000"}} {...obj2} />; // extra property
const b3 = <MainButton {...{to: "10000"}} {...{onClick: (k) => {}}} />; // extra property
const b4 = <MainButton {...obj3} to />; // Should error because Incorrect type; but attributes are any so everything is allowed
const b5 = <MainButton {...{ onClick(e: any) { } }} {...obj0} />; // Spread retain method declaration (see GitHub #13365), so now there is an extra attributes
const b6 = <MainButton {...{ onClick(e: any){} }} children={10} />; // incorrect type for optional attribute
const b7 = <MainButton {...{ onClick(e: any)
|
}} children="hello" className />; // incorrect type for optional attribute
const b8 = <MainButton data-format />; // incorrect type for specified hyphanated name
|
{}
|
identifier_body
|
tsxStatelessFunctionComponentOverload5.tsx
|
// @filename: file.tsx
// @jsx: preserve
// @module: amd
// @noLib: true
// @libFiles: react.d.ts,lib.d.ts
import React = require('react')
|
className?: string;
}
export interface ButtonProps extends ClickableProps {
onClick: React.MouseEventHandler<any>;
}
export interface LinkProps extends ClickableProps {
to: string;
}
export interface HyphenProps extends ClickableProps {
"data-format": string;
}
let obj0 = {
to: "world"
};
let obj1 = {
children: "hi",
to: "boo"
}
let obj2 = {
onClick: ()=>{}
}
let obj3: any;
export function MainButton(buttonProps: ButtonProps): JSX.Element;
export function MainButton(linkProps: LinkProps): JSX.Element;
export function MainButton(hyphenProps: HyphenProps): JSX.Element;
export function MainButton(props: ButtonProps | LinkProps | HyphenProps): JSX.Element {
const linkProps = props as LinkProps;
if(linkProps.to) {
return this._buildMainLink(props);
}
return this._buildMainButton(props);
}
// Error
const b0 = <MainButton to='/some/path' onClick={(e)=>{}}>GO</MainButton>; // extra property;
const b1 = <MainButton onClick={(e: any)=> {}} {...obj0}>Hello world</MainButton>; // extra property;
const b2 = <MainButton {...{to: "10000"}} {...obj2} />; // extra property
const b3 = <MainButton {...{to: "10000"}} {...{onClick: (k) => {}}} />; // extra property
const b4 = <MainButton {...obj3} to />; // Should error because Incorrect type; but attributes are any so everything is allowed
const b5 = <MainButton {...{ onClick(e: any) { } }} {...obj0} />; // Spread retain method declaration (see GitHub #13365), so now there is an extra attributes
const b6 = <MainButton {...{ onClick(e: any){} }} children={10} />; // incorrect type for optional attribute
const b7 = <MainButton {...{ onClick(e: any){} }} children="hello" className />; // incorrect type for optional attribute
const b8 = <MainButton data-format />; // incorrect type for specified hyphanated name
|
export interface ClickableProps {
children?: string;
|
random_line_split
|
unzipWith.ts
|
/**
* Reverse the zip process, and reconstruct the original
* arrays as nested arrays. Pass in iterator to control
* how the final array is composed.
*
* implemented as a wrapper over the zipWith() function.
*
* NOTE: Once an iterator is specified, the function will mostly
* produce a single dimension array, as iterator always combine
* various elements into one, and only return one element.
*
* @since 0.0.1
* @category Array
*
* @export
* @param {any[][]} arrays
* @returns {any[]}
*/
import { FnAny } from '../constants';
import { unzip } from './unzip';
import { zip } from './zip';
import { zipWith } from './zipWith';
export function unzipWith(arrays: any[], iterator?: FnAny): any[]
|
{
// must do some of the checking here as calling
// zipWith(...arrays) is assuming arrays is of array type
// and it will throw if data type is wrong.
if (!arrays || arrays.length <= 0) return [];
if (!Array.isArray(arrays)) return [];
// check the passed in parameter,
// if it consists of at least one array, then it will be passed with
// separator to the zipWith() function. It is not a nested array at all,
// then it will be passed as it to the ZipWith(), to make itself the array
// element.
// Reason: if pass a single dimension array to zipWith() using ... separator,
// the result would be : [1,2,3]
// where as passed as single argument to zipWith(), the output would be
// [[1],[2],[3]]. The later is the desired behavior.
let nestedArray: boolean = false;
const len: number = arrays.length;
for (let i: number = 0; i < len; i++) {
if (Array.isArray(arrays[i])) {
nestedArray = true;
break;
}
}
// use zipWith() to transform the array back to before zip
const output: any[] = nestedArray ? zipWith(...arrays, iterator) : zipWith(arrays, iterator);
return output.map((array: any[]) => {
if (Array.isArray(array)) {
// loop thru every individual array and remove null or undefined
// at the tail end of the array as this is added by zip() due to
// non uniform size of array
let { length } = array;
do length--; while (length > 0 && array[length] == null);
array.length = length + 1;
}
return array;
});
}
|
identifier_body
|
|
unzipWith.ts
|
/**
* Reverse the zip process, and reconstruct the original
* arrays as nested arrays. Pass in iterator to control
* how the final array is composed.
*
* implemented as a wrapper over the zipWith() function.
*
* NOTE: Once an iterator is specified, the function will mostly
* produce a single dimension array, as iterator always combine
* various elements into one, and only return one element.
*
* @since 0.0.1
* @category Array
*
* @export
* @param {any[][]} arrays
* @returns {any[]}
*/
import { FnAny } from '../constants';
import { unzip } from './unzip';
import { zip } from './zip';
import { zipWith } from './zipWith';
export function unzipWith(arrays: any[], iterator?: FnAny): any[] {
// must do some of the checking here as calling
// zipWith(...arrays) is assuming arrays is of array type
// and it will throw if data type is wrong.
if (!arrays || arrays.length <= 0) return [];
if (!Array.isArray(arrays)) return [];
// check the passed in parameter,
// if it consists of at least one array, then it will be passed with
// separator to the zipWith() function. It is not a nested array at all,
// then it will be passed as it to the ZipWith(), to make itself the array
// element.
// Reason: if pass a single dimension array to zipWith() using ... separator,
// the result would be : [1,2,3]
// where as passed as single argument to zipWith(), the output would be
// [[1],[2],[3]]. The later is the desired behavior.
let nestedArray: boolean = false;
const len: number = arrays.length;
for (let i: number = 0; i < len; i++) {
if (Array.isArray(arrays[i])) {
nestedArray = true;
break;
}
}
// use zipWith() to transform the array back to before zip
const output: any[] = nestedArray ? zipWith(...arrays, iterator) : zipWith(arrays, iterator);
return output.map((array: any[]) => {
if (Array.isArray(array))
|
return array;
});
}
|
{
// loop thru every individual array and remove null or undefined
// at the tail end of the array as this is added by zip() due to
// non uniform size of array
let { length } = array;
do length--; while (length > 0 && array[length] == null);
array.length = length + 1;
}
|
conditional_block
|
unzipWith.ts
|
/**
* Reverse the zip process, and reconstruct the original
* arrays as nested arrays. Pass in iterator to control
* how the final array is composed.
*
* implemented as a wrapper over the zipWith() function.
*
* NOTE: Once an iterator is specified, the function will mostly
* produce a single dimension array, as iterator always combine
* various elements into one, and only return one element.
*
* @since 0.0.1
* @category Array
*
* @export
* @param {any[][]} arrays
* @returns {any[]}
*/
import { FnAny } from '../constants';
import { unzip } from './unzip';
import { zip } from './zip';
import { zipWith } from './zipWith';
export function unzipWith(arrays: any[], iterator?: FnAny): any[] {
// must do some of the checking here as calling
// zipWith(...arrays) is assuming arrays is of array type
// and it will throw if data type is wrong.
if (!arrays || arrays.length <= 0) return [];
if (!Array.isArray(arrays)) return [];
// check the passed in parameter,
// if it consists of at least one array, then it will be passed with
// separator to the zipWith() function. It is not a nested array at all,
// then it will be passed as it to the ZipWith(), to make itself the array
// element.
// Reason: if pass a single dimension array to zipWith() using ... separator,
// the result would be : [1,2,3]
// where as passed as single argument to zipWith(), the output would be
// [[1],[2],[3]]. The later is the desired behavior.
let nestedArray: boolean = false;
const len: number = arrays.length;
for (let i: number = 0; i < len; i++) {
if (Array.isArray(arrays[i])) {
nestedArray = true;
break;
}
}
// use zipWith() to transform the array back to before zip
const output: any[] = nestedArray ? zipWith(...arrays, iterator) : zipWith(arrays, iterator);
return output.map((array: any[]) => {
if (Array.isArray(array)) {
// loop thru every individual array and remove null or undefined
// at the tail end of the array as this is added by zip() due to
// non uniform size of array
let { length } = array;
do length--; while (length > 0 && array[length] == null);
array.length = length + 1;
}
return array;
|
});
}
|
random_line_split
|
|
unzipWith.ts
|
/**
* Reverse the zip process, and reconstruct the original
* arrays as nested arrays. Pass in iterator to control
* how the final array is composed.
*
* implemented as a wrapper over the zipWith() function.
*
* NOTE: Once an iterator is specified, the function will mostly
* produce a single dimension array, as iterator always combine
* various elements into one, and only return one element.
*
* @since 0.0.1
* @category Array
*
* @export
* @param {any[][]} arrays
* @returns {any[]}
*/
import { FnAny } from '../constants';
import { unzip } from './unzip';
import { zip } from './zip';
import { zipWith } from './zipWith';
export function
|
(arrays: any[], iterator?: FnAny): any[] {
// must do some of the checking here as calling
// zipWith(...arrays) is assuming arrays is of array type
// and it will throw if data type is wrong.
if (!arrays || arrays.length <= 0) return [];
if (!Array.isArray(arrays)) return [];
// check the passed in parameter,
// if it consists of at least one array, then it will be passed with
// separator to the zipWith() function. It is not a nested array at all,
// then it will be passed as it to the ZipWith(), to make itself the array
// element.
// Reason: if pass a single dimension array to zipWith() using ... separator,
// the result would be : [1,2,3]
// where as passed as single argument to zipWith(), the output would be
// [[1],[2],[3]]. The later is the desired behavior.
let nestedArray: boolean = false;
const len: number = arrays.length;
for (let i: number = 0; i < len; i++) {
if (Array.isArray(arrays[i])) {
nestedArray = true;
break;
}
}
// use zipWith() to transform the array back to before zip
const output: any[] = nestedArray ? zipWith(...arrays, iterator) : zipWith(arrays, iterator);
return output.map((array: any[]) => {
if (Array.isArray(array)) {
// loop thru every individual array and remove null or undefined
// at the tail end of the array as this is added by zip() due to
// non uniform size of array
let { length } = array;
do length--; while (length > 0 && array[length] == null);
array.length = length + 1;
}
return array;
});
}
|
unzipWith
|
identifier_name
|
map-bind.js
|
var map = L.map('clom_map');
L.tileLayer('https://api.tiles.mapbox.com/v4/{id}/{z}/{x}/{y}.png?access_token=pk.eyJ1IjoibWFwYm94IiwiYSI6IjZjNmRjNzk3ZmE2MTcwOTEwMGY0MzU3YjUzOWFmNWZhIn0.Y8bhBaUMqFiPrDRW9hieoQ', {
maxZoom: 18,
id: 'mapbox.streets'
}).addTo(map);
function onLocationFound(e) {
var radius = e.accuracy / 2;
L.marker(e.latlng).addTo(map)
.bindPopup("You are within " + radius + " meters from this point").openPopup();
|
function onLocationError(e) {
alert(e.message);
}
map.on('locationfound', onLocationFound);
map.on('locationerror', onLocationError);
map.locate({setView: true, maxZoom: 16});
|
L.circle(e.latlng, radius).addTo(map);
}
|
random_line_split
|
map-bind.js
|
var map = L.map('clom_map');
L.tileLayer('https://api.tiles.mapbox.com/v4/{id}/{z}/{x}/{y}.png?access_token=pk.eyJ1IjoibWFwYm94IiwiYSI6IjZjNmRjNzk3ZmE2MTcwOTEwMGY0MzU3YjUzOWFmNWZhIn0.Y8bhBaUMqFiPrDRW9hieoQ', {
maxZoom: 18,
id: 'mapbox.streets'
}).addTo(map);
function
|
(e) {
var radius = e.accuracy / 2;
L.marker(e.latlng).addTo(map)
.bindPopup("You are within " + radius + " meters from this point").openPopup();
L.circle(e.latlng, radius).addTo(map);
}
function onLocationError(e) {
alert(e.message);
}
map.on('locationfound', onLocationFound);
map.on('locationerror', onLocationError);
map.locate({setView: true, maxZoom: 16});
|
onLocationFound
|
identifier_name
|
map-bind.js
|
var map = L.map('clom_map');
L.tileLayer('https://api.tiles.mapbox.com/v4/{id}/{z}/{x}/{y}.png?access_token=pk.eyJ1IjoibWFwYm94IiwiYSI6IjZjNmRjNzk3ZmE2MTcwOTEwMGY0MzU3YjUzOWFmNWZhIn0.Y8bhBaUMqFiPrDRW9hieoQ', {
maxZoom: 18,
id: 'mapbox.streets'
}).addTo(map);
function onLocationFound(e) {
var radius = e.accuracy / 2;
L.marker(e.latlng).addTo(map)
.bindPopup("You are within " + radius + " meters from this point").openPopup();
L.circle(e.latlng, radius).addTo(map);
}
function onLocationError(e)
|
map.on('locationfound', onLocationFound);
map.on('locationerror', onLocationError);
map.locate({setView: true, maxZoom: 16});
|
{
alert(e.message);
}
|
identifier_body
|
getFormValues.spec.js
|
import createGetFormValues from '../getFormValues';
import plain from '../../structure/plain';
import plainExpectations from '../../structure/plain/expectations';
import immutable from '../../structure/immutable';
import immutableExpectations from '../../structure/immutable/expectations';
import addExpectations from '../../__tests__/addExpectations';
var describeGetFormValues = function describeGetFormValues(name, structure, expect) {
var getFormValues = createGetFormValues(structure);
|
describe(name, function () {
it('should return a function', function () {
expect(getFormValues('foo')).toBeA('function');
});
it('should get the form values from state', function () {
expect(getFormValues('foo')(fromJS({
form: {
foo: {
values: {
dog: 'Snoopy',
cat: 'Garfield'
}
}
}
}))).toEqualMap({
dog: 'Snoopy',
cat: 'Garfield'
});
});
it('should use getFormState if provided', function () {
expect(getFormValues('foo', function (state) {
return getIn(state, 'someOtherSlice');
})(fromJS({
someOtherSlice: {
foo: {
values: {
dog: 'Snoopy',
cat: 'Garfield'
}
}
}
}))).toEqualMap({
dog: 'Snoopy',
cat: 'Garfield'
});
});
});
};
describeGetFormValues('getFormValues.plain', plain, addExpectations(plainExpectations));
describeGetFormValues('getFormValues.immutable', immutable, addExpectations(immutableExpectations));
|
var fromJS = structure.fromJS;
var getIn = structure.getIn;
|
random_line_split
|
utils.py
|
#!/usr/bin/env python
#
"""oocgcm.core.utils
Define various generic utilities tools to be used in several submodules.
"""
import numpy as np
import xarray as xr
import dask.array as da
#
#=========================== General purpose ==================================
#
class _SliceGetter(object):
"""Class that returns the slice that is passed to __getitem__.
Example
-------
>>getslice = SliceGetter()
>>getslice[100:150,300:340]
"""
def __init__(self):
pass
def
|
(self, index):
return index
returnslice = _SliceGetter()
#
#================= Applying numpy functions to dataarray =======================
#
def map_apply(func,scalararray):
"""Return a xarray dataarray with value func(scalararray.data)
Parameters
----------
func : function
Any function that works on numpy arrays such that input and output
arrays have the same shape.
scalararray : xarray.DataArray
Returns
-------
out : xarray.DataArray
Methods
-------
uses dask map_block without ghost cells (map_overlap)
"""
data = scalararray.data
coords = scalararray.coords
dims = scalararray.dims
if is_daskarray(data):
_out = data.map_block(func)
else:
_out = func(data)
out = xr.DataArray(_out,coords,dims)
return out
#
#============================ Testing types ==================================
#
def is_numpy(array):
"""Return True if array is a numpy array
Parameters
----------
array : array-like
array is either a numpy array, a masked array, a dask array or a xarray.
Returns
-------
test : bool
"""
test = bool( isinstance(array,np.ndarray)
+ isinstance(array,np.ma.masked_array) )
return test
def is_xarray(array):
"""Return True if array is a xarray.DataArray
Parameters
----------
array : array-like
Returns
-------
test : bool
"""
return isinstance(array,xr.DataArray)
def is_daskarray(array):
"""Return True if array is a dask array
Parameters
----------
array : array-like
Returns
-------
test : bool
"""
return isinstance(array,da.core.Array)
def has_chunks(array):
"""Return True if array is a xarray or a daskarray with chunks.
Parameters
----------
array : array-like
array is either a numpy array, a masked array, a dask array or a xarray.
Returns
-------
test : bool
"""
if is_xarray(array) or is_daskarray(array):
return not(array.chunks is None)
else:
return False
#
#============================ Testing dataarrays ==================================
#
def _append_dataarray_extra_attrs(xarr,**extra_kwargs):
"""Update the dictionnary of attributes a xarray dataarray (xarr.attrs).
Parameters
----------
xarr : xarray.DataArray
The function will add extra arguments to xarr.attrs
**extra_kwargs
not used
Returns
-------
da : xarray.DataArray
"""
if not(is_xarray(xarr)):
raise TypeError('except a xarray.DataArray')
for kwargs in extra_kwargs:
xarr.attrs[kwargs] = extra_kwargs[kwargs]
return xarr
def _grid_location_equals(xarr,grid_location=None):
"""Return True when the xarr grid_location attribute is grid_location
Parameters
----------
xarr : xarray.DataArray
xarray dataarray which attributes should be tested.
grid_location : str
string describing the grid location : eg 'u','v','t','f'...
Returns
-------
test : bool
boolean value of the test.
"""
test = True
if xarr.attrs.has_key('grid_location'):
test *= (xarr.attrs['grid_location']==grid_location)
return test
def _chunks_are_compatible(chunks1=None,chunks2=None,ndims=None):
"""Return True when two chunks are aligned over their common dimensions.
Parameters
----------
chunks1 : list-like of list-like object
chunks associated to a xarray data array
chunks2 : list-like of list-like object
chunks associated to a xarray data array
ndims : int
number of dimensions over which chunks should ne compared.
Returns
-------
test : bool
boolean value of the test.
"""
# TODO : not clear whether to compare a priori description of chunks
# (dictionnaries) or a posteriori values (tuple of tuples).
test = True
if (chunks1 is None) or (chunks2 is None):
if (chunks1 is None) and (chunks2 is None):
return True
else:
return False
for idim in range(ndims):
test *= chunks1[-idim-1] == chunks2[-idim-1]
return test
def _assert_are_compatible_dataarrays(array1,array2):
"""Assert whether two arrays are dataarray with similar dimensions, shapes
and dask chunk (if relevant).
Parameters
----------
array1 : array-like
array2 : array-like
Returns
-------
test : bool
True if the two arrays are compatible
"""
assert(is_xarray(array1) and is_xarray(array2))
assert(array1.dims == array2.dims)
assert(array1.shape == array2.shape)
assert(has_chunks(array1) == has_chunks(array2))
if (has_chunks(array1) and has_chunks(array2)):
assert_chunks_are_compatible(array1.chunks, array2.chunks)
return True
def _assert_and_set_grid_location_attribute(xarr,grid_location=None):
"""Assert whether xarr holds an extra attribute 'grid_location' that
equals grid_location. If xarr does not have such extra-attribute, create
one with value grid_location.
Parameters
----------
xarr : xarray.DataArray
xarray dataarray that should be associated with a grid location
grid_location : str
string describing the grid location : eg 'u','v','t','f'...
"""
if xarr.attrs.has_key('grid_location'):
assert ( xarr.attrs['grid_location'] == grid_location )
else:
xarr.attrs['grid_location'] = grid_location
def check_input_array(xarr,shape=None,chunks=None,\
grid_location=None,ndims=None):
"""Return true if arr is a dataarray with expected shape, chunks at
grid_location attribute. Raise an error if one of the tests fails.
Parameters
----------
xarr : xarray.DataArray
xarray dataarray which attributes should be tested.
shape : tuple
expected shape of the xarray dataarray xarr
chunks : list-like of list-like object
expected chunks of the xarray dataarray xarr
grid_location : str
string describing the expected grid location : eg 'u','v','t','f'...
ndims : int
number of dimensions over which chunks should be compared.
Returns
-------
test : bool
boolean value of the test.
"""
if hasattr(xarr,'name'):
arrayname = xarr.name
else:
arrayname = 'array'
if not(is_xarray(xarr)):
raise TypeError(arrayname + 'is expected to be a xarray.DataArray')
if not(_chunks_are_compatible(xarr.chunks,chunks,ndims=ndims)):
raise ChunkError()
if not(_grid_location_equals(xarr,grid_location)):
raise GridLocationError()
return True
#
#========================== Minimal exceptions =================================
#
# TODO : should probably move to a dedicated file oocgcm.core.exceptions.py
#
class ChunkError(Exception):
"""Minimal exception for chunk incompatibility.
"""
def __init__(self):
Exception.__init__(self,"incompatible chunk size")
class GridLocationError(Exception):
"""Minimal exception for grid location incompatibility.
"""
def __init__(self):
Exception.__init__(self,"incompatible grid_location")
|
__getitem__
|
identifier_name
|
utils.py
|
#!/usr/bin/env python
#
"""oocgcm.core.utils
Define various generic utilities tools to be used in several submodules.
"""
import numpy as np
import xarray as xr
import dask.array as da
#
#=========================== General purpose ==================================
#
class _SliceGetter(object):
"""Class that returns the slice that is passed to __getitem__.
Example
-------
>>getslice = SliceGetter()
>>getslice[100:150,300:340]
"""
def __init__(self):
pass
def __getitem__(self, index):
return index
returnslice = _SliceGetter()
#
#================= Applying numpy functions to dataarray =======================
#
def map_apply(func,scalararray):
"""Return a xarray dataarray with value func(scalararray.data)
Parameters
----------
func : function
Any function that works on numpy arrays such that input and output
arrays have the same shape.
scalararray : xarray.DataArray
Returns
-------
out : xarray.DataArray
Methods
-------
uses dask map_block without ghost cells (map_overlap)
"""
data = scalararray.data
coords = scalararray.coords
dims = scalararray.dims
if is_daskarray(data):
_out = data.map_block(func)
else:
_out = func(data)
out = xr.DataArray(_out,coords,dims)
return out
#
#============================ Testing types ==================================
#
def is_numpy(array):
"""Return True if array is a numpy array
Parameters
----------
array : array-like
array is either a numpy array, a masked array, a dask array or a xarray.
Returns
-------
test : bool
"""
test = bool( isinstance(array,np.ndarray)
+ isinstance(array,np.ma.masked_array) )
return test
def is_xarray(array):
"""Return True if array is a xarray.DataArray
Parameters
----------
array : array-like
Returns
-------
test : bool
"""
return isinstance(array,xr.DataArray)
def is_daskarray(array):
"""Return True if array is a dask array
Parameters
----------
array : array-like
Returns
-------
test : bool
"""
return isinstance(array,da.core.Array)
def has_chunks(array):
"""Return True if array is a xarray or a daskarray with chunks.
Parameters
----------
array : array-like
array is either a numpy array, a masked array, a dask array or a xarray.
Returns
-------
test : bool
"""
if is_xarray(array) or is_daskarray(array):
return not(array.chunks is None)
else:
return False
#
#============================ Testing dataarrays ==================================
#
def _append_dataarray_extra_attrs(xarr,**extra_kwargs):
"""Update the dictionnary of attributes a xarray dataarray (xarr.attrs).
Parameters
----------
xarr : xarray.DataArray
The function will add extra arguments to xarr.attrs
**extra_kwargs
not used
Returns
-------
da : xarray.DataArray
"""
if not(is_xarray(xarr)):
raise TypeError('except a xarray.DataArray')
for kwargs in extra_kwargs:
xarr.attrs[kwargs] = extra_kwargs[kwargs]
return xarr
def _grid_location_equals(xarr,grid_location=None):
"""Return True when the xarr grid_location attribute is grid_location
Parameters
----------
xarr : xarray.DataArray
xarray dataarray which attributes should be tested.
grid_location : str
string describing the grid location : eg 'u','v','t','f'...
Returns
-------
test : bool
boolean value of the test.
"""
test = True
if xarr.attrs.has_key('grid_location'):
test *= (xarr.attrs['grid_location']==grid_location)
return test
def _chunks_are_compatible(chunks1=None,chunks2=None,ndims=None):
"""Return True when two chunks are aligned over their common dimensions.
Parameters
----------
chunks1 : list-like of list-like object
chunks associated to a xarray data array
chunks2 : list-like of list-like object
chunks associated to a xarray data array
ndims : int
number of dimensions over which chunks should ne compared.
Returns
-------
test : bool
boolean value of the test.
"""
# TODO : not clear whether to compare a priori description of chunks
# (dictionnaries) or a posteriori values (tuple of tuples).
test = True
if (chunks1 is None) or (chunks2 is None):
if (chunks1 is None) and (chunks2 is None):
return True
else:
return False
for idim in range(ndims):
test *= chunks1[-idim-1] == chunks2[-idim-1]
return test
def _assert_are_compatible_dataarrays(array1,array2):
"""Assert whether two arrays are dataarray with similar dimensions, shapes
and dask chunk (if relevant).
Parameters
----------
array1 : array-like
array2 : array-like
Returns
-------
test : bool
True if the two arrays are compatible
"""
assert(is_xarray(array1) and is_xarray(array2))
assert(array1.dims == array2.dims)
assert(array1.shape == array2.shape)
assert(has_chunks(array1) == has_chunks(array2))
if (has_chunks(array1) and has_chunks(array2)):
assert_chunks_are_compatible(array1.chunks, array2.chunks)
return True
def _assert_and_set_grid_location_attribute(xarr,grid_location=None):
"""Assert whether xarr holds an extra attribute 'grid_location' that
equals grid_location. If xarr does not have such extra-attribute, create
one with value grid_location.
Parameters
----------
xarr : xarray.DataArray
xarray dataarray that should be associated with a grid location
grid_location : str
string describing the grid location : eg 'u','v','t','f'...
"""
if xarr.attrs.has_key('grid_location'):
assert ( xarr.attrs['grid_location'] == grid_location )
else:
xarr.attrs['grid_location'] = grid_location
def check_input_array(xarr,shape=None,chunks=None,\
grid_location=None,ndims=None):
"""Return true if arr is a dataarray with expected shape, chunks at
grid_location attribute. Raise an error if one of the tests fails.
Parameters
----------
xarr : xarray.DataArray
xarray dataarray which attributes should be tested.
shape : tuple
expected shape of the xarray dataarray xarr
chunks : list-like of list-like object
expected chunks of the xarray dataarray xarr
grid_location : str
string describing the expected grid location : eg 'u','v','t','f'...
ndims : int
|
Returns
-------
test : bool
boolean value of the test.
"""
if hasattr(xarr,'name'):
arrayname = xarr.name
else:
arrayname = 'array'
if not(is_xarray(xarr)):
raise TypeError(arrayname + 'is expected to be a xarray.DataArray')
if not(_chunks_are_compatible(xarr.chunks,chunks,ndims=ndims)):
raise ChunkError()
if not(_grid_location_equals(xarr,grid_location)):
raise GridLocationError()
return True
#
#========================== Minimal exceptions =================================
#
# TODO : should probably move to a dedicated file oocgcm.core.exceptions.py
#
class ChunkError(Exception):
"""Minimal exception for chunk incompatibility.
"""
def __init__(self):
Exception.__init__(self,"incompatible chunk size")
class GridLocationError(Exception):
"""Minimal exception for grid location incompatibility.
"""
def __init__(self):
Exception.__init__(self,"incompatible grid_location")
|
number of dimensions over which chunks should be compared.
|
random_line_split
|
utils.py
|
#!/usr/bin/env python
#
"""oocgcm.core.utils
Define various generic utilities tools to be used in several submodules.
"""
import numpy as np
import xarray as xr
import dask.array as da
#
#=========================== General purpose ==================================
#
class _SliceGetter(object):
"""Class that returns the slice that is passed to __getitem__.
Example
-------
>>getslice = SliceGetter()
>>getslice[100:150,300:340]
"""
def __init__(self):
pass
def __getitem__(self, index):
return index
returnslice = _SliceGetter()
#
#================= Applying numpy functions to dataarray =======================
#
def map_apply(func,scalararray):
"""Return a xarray dataarray with value func(scalararray.data)
Parameters
----------
func : function
Any function that works on numpy arrays such that input and output
arrays have the same shape.
scalararray : xarray.DataArray
Returns
-------
out : xarray.DataArray
Methods
-------
uses dask map_block without ghost cells (map_overlap)
"""
data = scalararray.data
coords = scalararray.coords
dims = scalararray.dims
if is_daskarray(data):
_out = data.map_block(func)
else:
_out = func(data)
out = xr.DataArray(_out,coords,dims)
return out
#
#============================ Testing types ==================================
#
def is_numpy(array):
"""Return True if array is a numpy array
Parameters
----------
array : array-like
array is either a numpy array, a masked array, a dask array or a xarray.
Returns
-------
test : bool
"""
test = bool( isinstance(array,np.ndarray)
+ isinstance(array,np.ma.masked_array) )
return test
def is_xarray(array):
"""Return True if array is a xarray.DataArray
Parameters
----------
array : array-like
Returns
-------
test : bool
"""
return isinstance(array,xr.DataArray)
def is_daskarray(array):
"""Return True if array is a dask array
Parameters
----------
array : array-like
Returns
-------
test : bool
"""
return isinstance(array,da.core.Array)
def has_chunks(array):
"""Return True if array is a xarray or a daskarray with chunks.
Parameters
----------
array : array-like
array is either a numpy array, a masked array, a dask array or a xarray.
Returns
-------
test : bool
"""
if is_xarray(array) or is_daskarray(array):
return not(array.chunks is None)
else:
return False
#
#============================ Testing dataarrays ==================================
#
def _append_dataarray_extra_attrs(xarr,**extra_kwargs):
"""Update the dictionnary of attributes a xarray dataarray (xarr.attrs).
Parameters
----------
xarr : xarray.DataArray
The function will add extra arguments to xarr.attrs
**extra_kwargs
not used
Returns
-------
da : xarray.DataArray
"""
if not(is_xarray(xarr)):
raise TypeError('except a xarray.DataArray')
for kwargs in extra_kwargs:
xarr.attrs[kwargs] = extra_kwargs[kwargs]
return xarr
def _grid_location_equals(xarr,grid_location=None):
"""Return True when the xarr grid_location attribute is grid_location
Parameters
----------
xarr : xarray.DataArray
xarray dataarray which attributes should be tested.
grid_location : str
string describing the grid location : eg 'u','v','t','f'...
Returns
-------
test : bool
boolean value of the test.
"""
test = True
if xarr.attrs.has_key('grid_location'):
test *= (xarr.attrs['grid_location']==grid_location)
return test
def _chunks_are_compatible(chunks1=None,chunks2=None,ndims=None):
"""Return True when two chunks are aligned over their common dimensions.
Parameters
----------
chunks1 : list-like of list-like object
chunks associated to a xarray data array
chunks2 : list-like of list-like object
chunks associated to a xarray data array
ndims : int
number of dimensions over which chunks should ne compared.
Returns
-------
test : bool
boolean value of the test.
"""
# TODO : not clear whether to compare a priori description of chunks
# (dictionnaries) or a posteriori values (tuple of tuples).
test = True
if (chunks1 is None) or (chunks2 is None):
if (chunks1 is None) and (chunks2 is None):
return True
else:
return False
for idim in range(ndims):
test *= chunks1[-idim-1] == chunks2[-idim-1]
return test
def _assert_are_compatible_dataarrays(array1,array2):
"""Assert whether two arrays are dataarray with similar dimensions, shapes
and dask chunk (if relevant).
Parameters
----------
array1 : array-like
array2 : array-like
Returns
-------
test : bool
True if the two arrays are compatible
"""
assert(is_xarray(array1) and is_xarray(array2))
assert(array1.dims == array2.dims)
assert(array1.shape == array2.shape)
assert(has_chunks(array1) == has_chunks(array2))
if (has_chunks(array1) and has_chunks(array2)):
assert_chunks_are_compatible(array1.chunks, array2.chunks)
return True
def _assert_and_set_grid_location_attribute(xarr,grid_location=None):
"""Assert whether xarr holds an extra attribute 'grid_location' that
equals grid_location. If xarr does not have such extra-attribute, create
one with value grid_location.
Parameters
----------
xarr : xarray.DataArray
xarray dataarray that should be associated with a grid location
grid_location : str
string describing the grid location : eg 'u','v','t','f'...
"""
if xarr.attrs.has_key('grid_location'):
assert ( xarr.attrs['grid_location'] == grid_location )
else:
xarr.attrs['grid_location'] = grid_location
def check_input_array(xarr,shape=None,chunks=None,\
grid_location=None,ndims=None):
"""Return true if arr is a dataarray with expected shape, chunks at
grid_location attribute. Raise an error if one of the tests fails.
Parameters
----------
xarr : xarray.DataArray
xarray dataarray which attributes should be tested.
shape : tuple
expected shape of the xarray dataarray xarr
chunks : list-like of list-like object
expected chunks of the xarray dataarray xarr
grid_location : str
string describing the expected grid location : eg 'u','v','t','f'...
ndims : int
number of dimensions over which chunks should be compared.
Returns
-------
test : bool
boolean value of the test.
"""
if hasattr(xarr,'name'):
arrayname = xarr.name
else:
arrayname = 'array'
if not(is_xarray(xarr)):
raise TypeError(arrayname + 'is expected to be a xarray.DataArray')
if not(_chunks_are_compatible(xarr.chunks,chunks,ndims=ndims)):
raise ChunkError()
if not(_grid_location_equals(xarr,grid_location)):
raise GridLocationError()
return True
#
#========================== Minimal exceptions =================================
#
# TODO : should probably move to a dedicated file oocgcm.core.exceptions.py
#
class ChunkError(Exception):
"""Minimal exception for chunk incompatibility.
"""
def __init__(self):
Exception.__init__(self,"incompatible chunk size")
class GridLocationError(Exception):
|
"""Minimal exception for grid location incompatibility.
"""
def __init__(self):
Exception.__init__(self,"incompatible grid_location")
|
identifier_body
|
|
utils.py
|
#!/usr/bin/env python
#
"""oocgcm.core.utils
Define various generic utilities tools to be used in several submodules.
"""
import numpy as np
import xarray as xr
import dask.array as da
#
#=========================== General purpose ==================================
#
class _SliceGetter(object):
"""Class that returns the slice that is passed to __getitem__.
Example
-------
>>getslice = SliceGetter()
>>getslice[100:150,300:340]
"""
def __init__(self):
pass
def __getitem__(self, index):
return index
returnslice = _SliceGetter()
#
#================= Applying numpy functions to dataarray =======================
#
def map_apply(func,scalararray):
"""Return a xarray dataarray with value func(scalararray.data)
Parameters
----------
func : function
Any function that works on numpy arrays such that input and output
arrays have the same shape.
scalararray : xarray.DataArray
Returns
-------
out : xarray.DataArray
Methods
-------
uses dask map_block without ghost cells (map_overlap)
"""
data = scalararray.data
coords = scalararray.coords
dims = scalararray.dims
if is_daskarray(data):
_out = data.map_block(func)
else:
_out = func(data)
out = xr.DataArray(_out,coords,dims)
return out
#
#============================ Testing types ==================================
#
def is_numpy(array):
"""Return True if array is a numpy array
Parameters
----------
array : array-like
array is either a numpy array, a masked array, a dask array or a xarray.
Returns
-------
test : bool
"""
test = bool( isinstance(array,np.ndarray)
+ isinstance(array,np.ma.masked_array) )
return test
def is_xarray(array):
"""Return True if array is a xarray.DataArray
Parameters
----------
array : array-like
Returns
-------
test : bool
"""
return isinstance(array,xr.DataArray)
def is_daskarray(array):
"""Return True if array is a dask array
Parameters
----------
array : array-like
Returns
-------
test : bool
"""
return isinstance(array,da.core.Array)
def has_chunks(array):
"""Return True if array is a xarray or a daskarray with chunks.
Parameters
----------
array : array-like
array is either a numpy array, a masked array, a dask array or a xarray.
Returns
-------
test : bool
"""
if is_xarray(array) or is_daskarray(array):
return not(array.chunks is None)
else:
return False
#
#============================ Testing dataarrays ==================================
#
def _append_dataarray_extra_attrs(xarr,**extra_kwargs):
"""Update the dictionnary of attributes a xarray dataarray (xarr.attrs).
Parameters
----------
xarr : xarray.DataArray
The function will add extra arguments to xarr.attrs
**extra_kwargs
not used
Returns
-------
da : xarray.DataArray
"""
if not(is_xarray(xarr)):
|
for kwargs in extra_kwargs:
xarr.attrs[kwargs] = extra_kwargs[kwargs]
return xarr
def _grid_location_equals(xarr,grid_location=None):
"""Return True when the xarr grid_location attribute is grid_location
Parameters
----------
xarr : xarray.DataArray
xarray dataarray which attributes should be tested.
grid_location : str
string describing the grid location : eg 'u','v','t','f'...
Returns
-------
test : bool
boolean value of the test.
"""
test = True
if xarr.attrs.has_key('grid_location'):
test *= (xarr.attrs['grid_location']==grid_location)
return test
def _chunks_are_compatible(chunks1=None,chunks2=None,ndims=None):
"""Return True when two chunks are aligned over their common dimensions.
Parameters
----------
chunks1 : list-like of list-like object
chunks associated to a xarray data array
chunks2 : list-like of list-like object
chunks associated to a xarray data array
ndims : int
number of dimensions over which chunks should ne compared.
Returns
-------
test : bool
boolean value of the test.
"""
# TODO : not clear whether to compare a priori description of chunks
# (dictionnaries) or a posteriori values (tuple of tuples).
test = True
if (chunks1 is None) or (chunks2 is None):
if (chunks1 is None) and (chunks2 is None):
return True
else:
return False
for idim in range(ndims):
test *= chunks1[-idim-1] == chunks2[-idim-1]
return test
def _assert_are_compatible_dataarrays(array1,array2):
"""Assert whether two arrays are dataarray with similar dimensions, shapes
and dask chunk (if relevant).
Parameters
----------
array1 : array-like
array2 : array-like
Returns
-------
test : bool
True if the two arrays are compatible
"""
assert(is_xarray(array1) and is_xarray(array2))
assert(array1.dims == array2.dims)
assert(array1.shape == array2.shape)
assert(has_chunks(array1) == has_chunks(array2))
if (has_chunks(array1) and has_chunks(array2)):
assert_chunks_are_compatible(array1.chunks, array2.chunks)
return True
def _assert_and_set_grid_location_attribute(xarr,grid_location=None):
"""Assert whether xarr holds an extra attribute 'grid_location' that
equals grid_location. If xarr does not have such extra-attribute, create
one with value grid_location.
Parameters
----------
xarr : xarray.DataArray
xarray dataarray that should be associated with a grid location
grid_location : str
string describing the grid location : eg 'u','v','t','f'...
"""
if xarr.attrs.has_key('grid_location'):
assert ( xarr.attrs['grid_location'] == grid_location )
else:
xarr.attrs['grid_location'] = grid_location
def check_input_array(xarr,shape=None,chunks=None,\
grid_location=None,ndims=None):
"""Return true if arr is a dataarray with expected shape, chunks at
grid_location attribute. Raise an error if one of the tests fails.
Parameters
----------
xarr : xarray.DataArray
xarray dataarray which attributes should be tested.
shape : tuple
expected shape of the xarray dataarray xarr
chunks : list-like of list-like object
expected chunks of the xarray dataarray xarr
grid_location : str
string describing the expected grid location : eg 'u','v','t','f'...
ndims : int
number of dimensions over which chunks should be compared.
Returns
-------
test : bool
boolean value of the test.
"""
if hasattr(xarr,'name'):
arrayname = xarr.name
else:
arrayname = 'array'
if not(is_xarray(xarr)):
raise TypeError(arrayname + 'is expected to be a xarray.DataArray')
if not(_chunks_are_compatible(xarr.chunks,chunks,ndims=ndims)):
raise ChunkError()
if not(_grid_location_equals(xarr,grid_location)):
raise GridLocationError()
return True
#
#========================== Minimal exceptions =================================
#
# TODO : should probably move to a dedicated file oocgcm.core.exceptions.py
#
class ChunkError(Exception):
"""Minimal exception for chunk incompatibility.
"""
def __init__(self):
Exception.__init__(self,"incompatible chunk size")
class GridLocationError(Exception):
"""Minimal exception for grid location incompatibility.
"""
def __init__(self):
Exception.__init__(self,"incompatible grid_location")
|
raise TypeError('except a xarray.DataArray')
|
conditional_block
|
ph2.py
|
import json
import argparse
import numpy
import sys
import copy
from astropy.coordinates import SkyCoord
from astropy import units
import operator
class Program(object):
|
class Target(object):
def __init__(self, filename=None):
self.config = json.load(open(filename))
@property
def token(self):
return self.config["identifier"]["client_token"]
@property
def mag(self):
return self.config["moving_target"]["ephemeris_points"][0]["mag"]
@property
def coordinate(self):
return SkyCoord(self.config["moving_target"]["ephemeris_points"][0]["coordinate"]["ra"],
self.config["moving_target"]["ephemeris_points"][0]["coordinate"]["dec"],
unit='degree')
class ObservingBlock(object):
def __init__(self, client_token, target_token):
self.config = {"identifier": {"client_token": client_token},
"target_identifier": {"client_token": target_token},
"constraint_identifiers": [{"server_token": "C1"}],
"instrument_config_identifiers": [{"server_token": "I1"}]}
@property
def token(self):
return self.config["identifier"]["client_token"]
class ObservingGroup(object):
def __init__(self, client_token):
self.config = {"identifier": {"client_token": client_token},
"observing_block_identifiers": []}
def add_ob(self, client_token):
self.config["observing_block_identifiers"].append({"client_token": client_token})
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('ogname')
parser.add_argument('mjdates', nargs='+')
args = parser.parse_args()
# Break the mjdates into OBs based on their max mag of source in pointing.
cuts = numpy.array([23.0, 23.5, 24.0, 24.5, 25.0, 25.5, 26.0, 30.0])
IC_exptimes = [50, 100, 200, 300, 400, 500, 600, 700]
program = Program()
ob_tokens = []
mags = {}
ob_coordinate = {}
for filename in args.mjdates:
target = Target(filename)
program.add_target(target.config)
ob_token = "OB-{}-{}".format(target.token, target.mag)
ob = ObservingBlock(ob_token, target.token)
idx = (target.mag > cuts).sum() + 4
ob.config["instrument_config_identifiers"] = [{"server_token": "I{}".format(idx)}]
program.add_observing_block(ob.config)
ob_tokens.append(ob_token)
mags[ob_token] = target.mag
ob_coordinate[ob_token] = target.coordinate
sf = lambda x, y: cmp(x.ra, y.ra)
order_tokens = sorted(ob_coordinate, cmp=sf, key=ob_coordinate.get)
total_itime = 0
ogs = {}
scheduled = {}
og_idx = 0
while len(scheduled) < len(ob_tokens):
og_idx += 1
og_token = "OG_{}_{}_{}".format(args.ogname, og_idx, 0)
sys.stdout.write("{}: ".format(og_token))
og = ObservingGroup(og_token)
og_coord = None
og_itime = 0
for ob_token in order_tokens:
if ob_token not in scheduled:
if og_coord is None:
og_coord = ob_coordinate[ob_token]
if ob_coordinate[ob_token].separation(og_coord) > 30 * units.degree:
continue
og.add_ob(ob_token)
scheduled[ob_token] = True
sys.stdout.write("{} ".format(ob_token))
sys.stdout.flush()
idx = (mags[ob_token] > cuts).sum()
print ob_token, mags[ob_token], idx + 4
og_itime += IC_exptimes[idx] + 40
if og_itime > 3000.0:
break
break
total_itime += og_itime
sys.stdout.write(" {}s \n".format(og_itime))
program.add_observing_group(og.config)
nrepeats = 0
for repeat in range(nrepeats):
total_itime += og_itime
og_token = "OG_{}_{}_{}".format(args.ogname, og_idx, repeat + 1)
og = copy.deepcopy(og)
og.config["identifier"]["client_token"] = og_token
program.add_observing_group(og.config)
print "Total I-Time: {} hrs".format(total_itime/3600.)
json.dump(program.config, open('program.json', 'w'), indent=4, sort_keys=True)
|
def __init__(self, runid="16BP06", pi_login="gladman"):
self.config = {"runid": runid,
"pi_login": pi_login,
"program_configuration": {"mjdates": [],
"observing_blocks": [],
"observing_groups": []
}}
def add_target(self, target):
self.config["program_configuration"]["mjdates"].append(target)
def add_observing_block(self, observing_block):
self.config["program_configuration"]["observing_blocks"].append(observing_block)
def add_observing_group(self, observing_group):
self.config["program_configuration"]["observing_groups"].append(observing_group)
|
identifier_body
|
ph2.py
|
import json
import argparse
import numpy
import sys
import copy
from astropy.coordinates import SkyCoord
from astropy import units
import operator
class Program(object):
def __init__(self, runid="16BP06", pi_login="gladman"):
self.config = {"runid": runid,
"pi_login": pi_login,
"program_configuration": {"mjdates": [],
"observing_blocks": [],
"observing_groups": []
}}
def
|
(self, target):
self.config["program_configuration"]["mjdates"].append(target)
def add_observing_block(self, observing_block):
self.config["program_configuration"]["observing_blocks"].append(observing_block)
def add_observing_group(self, observing_group):
self.config["program_configuration"]["observing_groups"].append(observing_group)
class Target(object):
def __init__(self, filename=None):
self.config = json.load(open(filename))
@property
def token(self):
return self.config["identifier"]["client_token"]
@property
def mag(self):
return self.config["moving_target"]["ephemeris_points"][0]["mag"]
@property
def coordinate(self):
return SkyCoord(self.config["moving_target"]["ephemeris_points"][0]["coordinate"]["ra"],
self.config["moving_target"]["ephemeris_points"][0]["coordinate"]["dec"],
unit='degree')
class ObservingBlock(object):
def __init__(self, client_token, target_token):
self.config = {"identifier": {"client_token": client_token},
"target_identifier": {"client_token": target_token},
"constraint_identifiers": [{"server_token": "C1"}],
"instrument_config_identifiers": [{"server_token": "I1"}]}
@property
def token(self):
return self.config["identifier"]["client_token"]
class ObservingGroup(object):
def __init__(self, client_token):
self.config = {"identifier": {"client_token": client_token},
"observing_block_identifiers": []}
def add_ob(self, client_token):
self.config["observing_block_identifiers"].append({"client_token": client_token})
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('ogname')
parser.add_argument('mjdates', nargs='+')
args = parser.parse_args()
# Break the mjdates into OBs based on their max mag of source in pointing.
cuts = numpy.array([23.0, 23.5, 24.0, 24.5, 25.0, 25.5, 26.0, 30.0])
IC_exptimes = [50, 100, 200, 300, 400, 500, 600, 700]
program = Program()
ob_tokens = []
mags = {}
ob_coordinate = {}
for filename in args.mjdates:
target = Target(filename)
program.add_target(target.config)
ob_token = "OB-{}-{}".format(target.token, target.mag)
ob = ObservingBlock(ob_token, target.token)
idx = (target.mag > cuts).sum() + 4
ob.config["instrument_config_identifiers"] = [{"server_token": "I{}".format(idx)}]
program.add_observing_block(ob.config)
ob_tokens.append(ob_token)
mags[ob_token] = target.mag
ob_coordinate[ob_token] = target.coordinate
sf = lambda x, y: cmp(x.ra, y.ra)
order_tokens = sorted(ob_coordinate, cmp=sf, key=ob_coordinate.get)
total_itime = 0
ogs = {}
scheduled = {}
og_idx = 0
while len(scheduled) < len(ob_tokens):
og_idx += 1
og_token = "OG_{}_{}_{}".format(args.ogname, og_idx, 0)
sys.stdout.write("{}: ".format(og_token))
og = ObservingGroup(og_token)
og_coord = None
og_itime = 0
for ob_token in order_tokens:
if ob_token not in scheduled:
if og_coord is None:
og_coord = ob_coordinate[ob_token]
if ob_coordinate[ob_token].separation(og_coord) > 30 * units.degree:
continue
og.add_ob(ob_token)
scheduled[ob_token] = True
sys.stdout.write("{} ".format(ob_token))
sys.stdout.flush()
idx = (mags[ob_token] > cuts).sum()
print ob_token, mags[ob_token], idx + 4
og_itime += IC_exptimes[idx] + 40
if og_itime > 3000.0:
break
break
total_itime += og_itime
sys.stdout.write(" {}s \n".format(og_itime))
program.add_observing_group(og.config)
nrepeats = 0
for repeat in range(nrepeats):
total_itime += og_itime
og_token = "OG_{}_{}_{}".format(args.ogname, og_idx, repeat + 1)
og = copy.deepcopy(og)
og.config["identifier"]["client_token"] = og_token
program.add_observing_group(og.config)
print "Total I-Time: {} hrs".format(total_itime/3600.)
json.dump(program.config, open('program.json', 'w'), indent=4, sort_keys=True)
|
add_target
|
identifier_name
|
ph2.py
|
import json
import argparse
import numpy
import sys
import copy
from astropy.coordinates import SkyCoord
from astropy import units
import operator
class Program(object):
def __init__(self, runid="16BP06", pi_login="gladman"):
self.config = {"runid": runid,
"pi_login": pi_login,
"program_configuration": {"mjdates": [],
"observing_blocks": [],
"observing_groups": []
}}
def add_target(self, target):
self.config["program_configuration"]["mjdates"].append(target)
def add_observing_block(self, observing_block):
self.config["program_configuration"]["observing_blocks"].append(observing_block)
def add_observing_group(self, observing_group):
self.config["program_configuration"]["observing_groups"].append(observing_group)
class Target(object):
def __init__(self, filename=None):
self.config = json.load(open(filename))
@property
def token(self):
return self.config["identifier"]["client_token"]
@property
def mag(self):
return self.config["moving_target"]["ephemeris_points"][0]["mag"]
@property
def coordinate(self):
return SkyCoord(self.config["moving_target"]["ephemeris_points"][0]["coordinate"]["ra"],
self.config["moving_target"]["ephemeris_points"][0]["coordinate"]["dec"],
unit='degree')
class ObservingBlock(object):
def __init__(self, client_token, target_token):
self.config = {"identifier": {"client_token": client_token},
"target_identifier": {"client_token": target_token},
"constraint_identifiers": [{"server_token": "C1"}],
"instrument_config_identifiers": [{"server_token": "I1"}]}
@property
def token(self):
return self.config["identifier"]["client_token"]
class ObservingGroup(object):
def __init__(self, client_token):
self.config = {"identifier": {"client_token": client_token},
"observing_block_identifiers": []}
def add_ob(self, client_token):
self.config["observing_block_identifiers"].append({"client_token": client_token})
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('ogname')
parser.add_argument('mjdates', nargs='+')
args = parser.parse_args()
# Break the mjdates into OBs based on their max mag of source in pointing.
cuts = numpy.array([23.0, 23.5, 24.0, 24.5, 25.0, 25.5, 26.0, 30.0])
IC_exptimes = [50, 100, 200, 300, 400, 500, 600, 700]
program = Program()
ob_tokens = []
mags = {}
ob_coordinate = {}
for filename in args.mjdates:
target = Target(filename)
program.add_target(target.config)
ob_token = "OB-{}-{}".format(target.token, target.mag)
ob = ObservingBlock(ob_token, target.token)
idx = (target.mag > cuts).sum() + 4
|
sf = lambda x, y: cmp(x.ra, y.ra)
order_tokens = sorted(ob_coordinate, cmp=sf, key=ob_coordinate.get)
total_itime = 0
ogs = {}
scheduled = {}
og_idx = 0
while len(scheduled) < len(ob_tokens):
og_idx += 1
og_token = "OG_{}_{}_{}".format(args.ogname, og_idx, 0)
sys.stdout.write("{}: ".format(og_token))
og = ObservingGroup(og_token)
og_coord = None
og_itime = 0
for ob_token in order_tokens:
if ob_token not in scheduled:
if og_coord is None:
og_coord = ob_coordinate[ob_token]
if ob_coordinate[ob_token].separation(og_coord) > 30 * units.degree:
continue
og.add_ob(ob_token)
scheduled[ob_token] = True
sys.stdout.write("{} ".format(ob_token))
sys.stdout.flush()
idx = (mags[ob_token] > cuts).sum()
print ob_token, mags[ob_token], idx + 4
og_itime += IC_exptimes[idx] + 40
if og_itime > 3000.0:
break
break
total_itime += og_itime
sys.stdout.write(" {}s \n".format(og_itime))
program.add_observing_group(og.config)
nrepeats = 0
for repeat in range(nrepeats):
total_itime += og_itime
og_token = "OG_{}_{}_{}".format(args.ogname, og_idx, repeat + 1)
og = copy.deepcopy(og)
og.config["identifier"]["client_token"] = og_token
program.add_observing_group(og.config)
print "Total I-Time: {} hrs".format(total_itime/3600.)
json.dump(program.config, open('program.json', 'w'), indent=4, sort_keys=True)
|
ob.config["instrument_config_identifiers"] = [{"server_token": "I{}".format(idx)}]
program.add_observing_block(ob.config)
ob_tokens.append(ob_token)
mags[ob_token] = target.mag
ob_coordinate[ob_token] = target.coordinate
|
random_line_split
|
ph2.py
|
import json
import argparse
import numpy
import sys
import copy
from astropy.coordinates import SkyCoord
from astropy import units
import operator
class Program(object):
def __init__(self, runid="16BP06", pi_login="gladman"):
self.config = {"runid": runid,
"pi_login": pi_login,
"program_configuration": {"mjdates": [],
"observing_blocks": [],
"observing_groups": []
}}
def add_target(self, target):
self.config["program_configuration"]["mjdates"].append(target)
def add_observing_block(self, observing_block):
self.config["program_configuration"]["observing_blocks"].append(observing_block)
def add_observing_group(self, observing_group):
self.config["program_configuration"]["observing_groups"].append(observing_group)
class Target(object):
def __init__(self, filename=None):
self.config = json.load(open(filename))
@property
def token(self):
return self.config["identifier"]["client_token"]
@property
def mag(self):
return self.config["moving_target"]["ephemeris_points"][0]["mag"]
@property
def coordinate(self):
return SkyCoord(self.config["moving_target"]["ephemeris_points"][0]["coordinate"]["ra"],
self.config["moving_target"]["ephemeris_points"][0]["coordinate"]["dec"],
unit='degree')
class ObservingBlock(object):
def __init__(self, client_token, target_token):
self.config = {"identifier": {"client_token": client_token},
"target_identifier": {"client_token": target_token},
"constraint_identifiers": [{"server_token": "C1"}],
"instrument_config_identifiers": [{"server_token": "I1"}]}
@property
def token(self):
return self.config["identifier"]["client_token"]
class ObservingGroup(object):
def __init__(self, client_token):
self.config = {"identifier": {"client_token": client_token},
"observing_block_identifiers": []}
def add_ob(self, client_token):
self.config["observing_block_identifiers"].append({"client_token": client_token})
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('ogname')
parser.add_argument('mjdates', nargs='+')
args = parser.parse_args()
# Break the mjdates into OBs based on their max mag of source in pointing.
cuts = numpy.array([23.0, 23.5, 24.0, 24.5, 25.0, 25.5, 26.0, 30.0])
IC_exptimes = [50, 100, 200, 300, 400, 500, 600, 700]
program = Program()
ob_tokens = []
mags = {}
ob_coordinate = {}
for filename in args.mjdates:
target = Target(filename)
program.add_target(target.config)
ob_token = "OB-{}-{}".format(target.token, target.mag)
ob = ObservingBlock(ob_token, target.token)
idx = (target.mag > cuts).sum() + 4
ob.config["instrument_config_identifiers"] = [{"server_token": "I{}".format(idx)}]
program.add_observing_block(ob.config)
ob_tokens.append(ob_token)
mags[ob_token] = target.mag
ob_coordinate[ob_token] = target.coordinate
sf = lambda x, y: cmp(x.ra, y.ra)
order_tokens = sorted(ob_coordinate, cmp=sf, key=ob_coordinate.get)
total_itime = 0
ogs = {}
scheduled = {}
og_idx = 0
while len(scheduled) < len(ob_tokens):
|
print "Total I-Time: {} hrs".format(total_itime/3600.)
json.dump(program.config, open('program.json', 'w'), indent=4, sort_keys=True)
|
og_idx += 1
og_token = "OG_{}_{}_{}".format(args.ogname, og_idx, 0)
sys.stdout.write("{}: ".format(og_token))
og = ObservingGroup(og_token)
og_coord = None
og_itime = 0
for ob_token in order_tokens:
if ob_token not in scheduled:
if og_coord is None:
og_coord = ob_coordinate[ob_token]
if ob_coordinate[ob_token].separation(og_coord) > 30 * units.degree:
continue
og.add_ob(ob_token)
scheduled[ob_token] = True
sys.stdout.write("{} ".format(ob_token))
sys.stdout.flush()
idx = (mags[ob_token] > cuts).sum()
print ob_token, mags[ob_token], idx + 4
og_itime += IC_exptimes[idx] + 40
if og_itime > 3000.0:
break
break
total_itime += og_itime
sys.stdout.write(" {}s \n".format(og_itime))
program.add_observing_group(og.config)
nrepeats = 0
for repeat in range(nrepeats):
total_itime += og_itime
og_token = "OG_{}_{}_{}".format(args.ogname, og_idx, repeat + 1)
og = copy.deepcopy(og)
og.config["identifier"]["client_token"] = og_token
program.add_observing_group(og.config)
|
conditional_block
|
select.rs
|
use super::{parse_index_range, Index, Range};
use std::{
iter::{empty, FromIterator},
str::FromStr,
};
/// Represents a filter on a vector-like object
#[derive(Debug, PartialEq, Clone)]
pub enum Select<K> {
/// Select all elements
All,
/// Select a single element based on its index
|
/// Select an element by mapped key
Key(K),
}
pub trait SelectWithSize {
type Item;
fn select<O, K>(&mut self, selection: &Select<K>, len: usize) -> O
where
O: FromIterator<Self::Item>;
}
impl<I, T> SelectWithSize for I
where
I: DoubleEndedIterator<Item = T>,
{
type Item = T;
fn select<O, K>(&mut self, s: &Select<K>, size: usize) -> O
where
O: FromIterator<Self::Item>,
{
match s {
Select::Key(_) => empty().collect(),
Select::All => self.collect(),
Select::Index(Index::Forward(idx)) => self.nth(*idx).into_iter().collect(),
Select::Index(Index::Backward(idx)) => self.rev().nth(*idx).into_iter().collect(),
Select::Range(range) => range
.bounds(size)
.map(|(start, length)| self.skip(start).take(length).collect())
.unwrap_or_else(|| empty().collect()),
}
}
}
impl<K: FromStr> FromStr for Select<K> {
type Err = ();
fn from_str(data: &str) -> Result<Self, ()> {
if data == ".." {
Ok(Select::All)
} else if let Ok(index) = data.parse::<isize>() {
Ok(Select::Index(Index::new(index)))
} else if let Some(range) = parse_index_range(data) {
Ok(Select::Range(range))
} else {
Ok(Select::Key(K::from_str(data).map_err(|_| ())?))
}
}
}
|
Index(Index),
/// Select a range of elements
Range(Range),
|
random_line_split
|
select.rs
|
use super::{parse_index_range, Index, Range};
use std::{
iter::{empty, FromIterator},
str::FromStr,
};
/// Represents a filter on a vector-like object
#[derive(Debug, PartialEq, Clone)]
pub enum Select<K> {
/// Select all elements
All,
/// Select a single element based on its index
Index(Index),
/// Select a range of elements
Range(Range),
/// Select an element by mapped key
Key(K),
}
pub trait SelectWithSize {
type Item;
fn select<O, K>(&mut self, selection: &Select<K>, len: usize) -> O
where
O: FromIterator<Self::Item>;
}
impl<I, T> SelectWithSize for I
where
I: DoubleEndedIterator<Item = T>,
{
type Item = T;
fn
|
<O, K>(&mut self, s: &Select<K>, size: usize) -> O
where
O: FromIterator<Self::Item>,
{
match s {
Select::Key(_) => empty().collect(),
Select::All => self.collect(),
Select::Index(Index::Forward(idx)) => self.nth(*idx).into_iter().collect(),
Select::Index(Index::Backward(idx)) => self.rev().nth(*idx).into_iter().collect(),
Select::Range(range) => range
.bounds(size)
.map(|(start, length)| self.skip(start).take(length).collect())
.unwrap_or_else(|| empty().collect()),
}
}
}
impl<K: FromStr> FromStr for Select<K> {
type Err = ();
fn from_str(data: &str) -> Result<Self, ()> {
if data == ".." {
Ok(Select::All)
} else if let Ok(index) = data.parse::<isize>() {
Ok(Select::Index(Index::new(index)))
} else if let Some(range) = parse_index_range(data) {
Ok(Select::Range(range))
} else {
Ok(Select::Key(K::from_str(data).map_err(|_| ())?))
}
}
}
|
select
|
identifier_name
|
select.rs
|
use super::{parse_index_range, Index, Range};
use std::{
iter::{empty, FromIterator},
str::FromStr,
};
/// Represents a filter on a vector-like object
#[derive(Debug, PartialEq, Clone)]
pub enum Select<K> {
/// Select all elements
All,
/// Select a single element based on its index
Index(Index),
/// Select a range of elements
Range(Range),
/// Select an element by mapped key
Key(K),
}
pub trait SelectWithSize {
type Item;
fn select<O, K>(&mut self, selection: &Select<K>, len: usize) -> O
where
O: FromIterator<Self::Item>;
}
impl<I, T> SelectWithSize for I
where
I: DoubleEndedIterator<Item = T>,
{
type Item = T;
fn select<O, K>(&mut self, s: &Select<K>, size: usize) -> O
where
O: FromIterator<Self::Item>,
|
}
impl<K: FromStr> FromStr for Select<K> {
type Err = ();
fn from_str(data: &str) -> Result<Self, ()> {
if data == ".." {
Ok(Select::All)
} else if let Ok(index) = data.parse::<isize>() {
Ok(Select::Index(Index::new(index)))
} else if let Some(range) = parse_index_range(data) {
Ok(Select::Range(range))
} else {
Ok(Select::Key(K::from_str(data).map_err(|_| ())?))
}
}
}
|
{
match s {
Select::Key(_) => empty().collect(),
Select::All => self.collect(),
Select::Index(Index::Forward(idx)) => self.nth(*idx).into_iter().collect(),
Select::Index(Index::Backward(idx)) => self.rev().nth(*idx).into_iter().collect(),
Select::Range(range) => range
.bounds(size)
.map(|(start, length)| self.skip(start).take(length).collect())
.unwrap_or_else(|| empty().collect()),
}
}
|
identifier_body
|
t2t_trainer_test.py
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
"""Tests for t2t_trainer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.bin import t2t_trainer
from tensor2tensor.utils import trainer_lib_test
import tensorflow as tf
FLAGS = tf.flags.FLAGS
class TrainerTest(tf.test.TestCase):
@classmethod
def setUpClass(cls):
trainer_lib_test.TrainerLibTest.setUpClass()
def testTrain(self):
FLAGS.problem = "tiny_algo"
FLAGS.model = "transformer"
FLAGS.hparams_set = "transformer_tiny"
FLAGS.train_steps = 1
FLAGS.eval_steps = 1
FLAGS.output_dir = tf.test.get_temp_dir()
FLAGS.data_dir = tf.test.get_temp_dir()
t2t_trainer.main(None)
if __name__ == "__main__":
tf.test.main()
|
# See the License for the specific language governing permissions and
# limitations under the License.
|
random_line_split
|
t2t_trainer_test.py
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for t2t_trainer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.bin import t2t_trainer
from tensor2tensor.utils import trainer_lib_test
import tensorflow as tf
FLAGS = tf.flags.FLAGS
class TrainerTest(tf.test.TestCase):
@classmethod
def setUpClass(cls):
trainer_lib_test.TrainerLibTest.setUpClass()
def testTrain(self):
|
if __name__ == "__main__":
tf.test.main()
|
FLAGS.problem = "tiny_algo"
FLAGS.model = "transformer"
FLAGS.hparams_set = "transformer_tiny"
FLAGS.train_steps = 1
FLAGS.eval_steps = 1
FLAGS.output_dir = tf.test.get_temp_dir()
FLAGS.data_dir = tf.test.get_temp_dir()
t2t_trainer.main(None)
|
identifier_body
|
t2t_trainer_test.py
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for t2t_trainer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.bin import t2t_trainer
from tensor2tensor.utils import trainer_lib_test
import tensorflow as tf
FLAGS = tf.flags.FLAGS
class TrainerTest(tf.test.TestCase):
@classmethod
def setUpClass(cls):
trainer_lib_test.TrainerLibTest.setUpClass()
def testTrain(self):
FLAGS.problem = "tiny_algo"
FLAGS.model = "transformer"
FLAGS.hparams_set = "transformer_tiny"
FLAGS.train_steps = 1
FLAGS.eval_steps = 1
FLAGS.output_dir = tf.test.get_temp_dir()
FLAGS.data_dir = tf.test.get_temp_dir()
t2t_trainer.main(None)
if __name__ == "__main__":
|
tf.test.main()
|
conditional_block
|
|
t2t_trainer_test.py
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for t2t_trainer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.bin import t2t_trainer
from tensor2tensor.utils import trainer_lib_test
import tensorflow as tf
FLAGS = tf.flags.FLAGS
class
|
(tf.test.TestCase):
@classmethod
def setUpClass(cls):
trainer_lib_test.TrainerLibTest.setUpClass()
def testTrain(self):
FLAGS.problem = "tiny_algo"
FLAGS.model = "transformer"
FLAGS.hparams_set = "transformer_tiny"
FLAGS.train_steps = 1
FLAGS.eval_steps = 1
FLAGS.output_dir = tf.test.get_temp_dir()
FLAGS.data_dir = tf.test.get_temp_dir()
t2t_trainer.main(None)
if __name__ == "__main__":
tf.test.main()
|
TrainerTest
|
identifier_name
|
Point.js
|
function Point(x, y) {
this.x = x;
this.y = y;
}
function
|
(x, y) { return new Point(x, y); } // shorthand
Point.ORIGIN = P(0, 0);
Point.DIRECTIONS = [P(0, 1), P(1, 0), P(-1, 0), P(0, -1)];
Point.prototype.plus = function(point) {
return P(this.x + point.x, this.y + point.y);
};
Point.prototype.minus = function(point) {
return P(this.x - point.x, this.y - point.y);
};
Point.prototype.times = function(scalar) {
return P(this.x * scalar, this.y * scalar);
};
Point.prototype.norm1 = function() {
return Math.abs(this.x) + Math.abs(this.y);
};
Point.prototype.equals = function(point) {
return this.x === point.x && this.y === point.y;
};
Point.prototype.neighbors = function() {
var _this = this;
return Point.DIRECTIONS.map(function(p) { return p.plus(_this); });
};
Point.prototype.toString = function() {
return '(' + this.x + ', ' + this.y + ')';
};
|
P
|
identifier_name
|
Point.js
|
function Point(x, y)
|
function P(x, y) { return new Point(x, y); } // shorthand
Point.ORIGIN = P(0, 0);
Point.DIRECTIONS = [P(0, 1), P(1, 0), P(-1, 0), P(0, -1)];
Point.prototype.plus = function(point) {
return P(this.x + point.x, this.y + point.y);
};
Point.prototype.minus = function(point) {
return P(this.x - point.x, this.y - point.y);
};
Point.prototype.times = function(scalar) {
return P(this.x * scalar, this.y * scalar);
};
Point.prototype.norm1 = function() {
return Math.abs(this.x) + Math.abs(this.y);
};
Point.prototype.equals = function(point) {
return this.x === point.x && this.y === point.y;
};
Point.prototype.neighbors = function() {
var _this = this;
return Point.DIRECTIONS.map(function(p) { return p.plus(_this); });
};
Point.prototype.toString = function() {
return '(' + this.x + ', ' + this.y + ')';
};
|
{
this.x = x;
this.y = y;
}
|
identifier_body
|
Point.js
|
function Point(x, y) {
this.x = x;
this.y = y;
}
function P(x, y) { return new Point(x, y); } // shorthand
Point.ORIGIN = P(0, 0);
Point.DIRECTIONS = [P(0, 1), P(1, 0), P(-1, 0), P(0, -1)];
Point.prototype.plus = function(point) {
|
};
Point.prototype.minus = function(point) {
return P(this.x - point.x, this.y - point.y);
};
Point.prototype.times = function(scalar) {
return P(this.x * scalar, this.y * scalar);
};
Point.prototype.norm1 = function() {
return Math.abs(this.x) + Math.abs(this.y);
};
Point.prototype.equals = function(point) {
return this.x === point.x && this.y === point.y;
};
Point.prototype.neighbors = function() {
var _this = this;
return Point.DIRECTIONS.map(function(p) { return p.plus(_this); });
};
Point.prototype.toString = function() {
return '(' + this.x + ', ' + this.y + ')';
};
|
return P(this.x + point.x, this.y + point.y);
|
random_line_split
|
view_compiler.d.ts
|
import { AnimationEntryCompileResult } from '../animation/animation_compiler';
import { CompileDirectiveMetadata, CompilePipeMetadata } from '../compile_metadata';
import { CompilerConfig } from '../config';
import * as o from '../output/output_ast';
import { TemplateAst } from '../template_parser/template_ast';
import { ComponentFactoryDependency, ViewFactoryDependency } from './view_builder';
export { ComponentFactoryDependency, ViewFactoryDependency } from './view_builder';
export declare class
|
{
statements: o.Statement[];
viewFactoryVar: string;
dependencies: Array<ViewFactoryDependency | ComponentFactoryDependency>;
constructor(statements: o.Statement[], viewFactoryVar: string, dependencies: Array<ViewFactoryDependency | ComponentFactoryDependency>);
}
export declare class ViewCompiler {
private _genConfig;
private _animationCompiler;
constructor(_genConfig: CompilerConfig);
compileComponent(component: CompileDirectiveMetadata, template: TemplateAst[], styles: o.Expression, pipes: CompilePipeMetadata[], compiledAnimations: AnimationEntryCompileResult[]): ViewCompileResult;
}
|
ViewCompileResult
|
identifier_name
|
view_compiler.d.ts
|
import { AnimationEntryCompileResult } from '../animation/animation_compiler';
import { CompileDirectiveMetadata, CompilePipeMetadata } from '../compile_metadata';
import { CompilerConfig } from '../config';
import * as o from '../output/output_ast';
import { TemplateAst } from '../template_parser/template_ast';
import { ComponentFactoryDependency, ViewFactoryDependency } from './view_builder';
export { ComponentFactoryDependency, ViewFactoryDependency } from './view_builder';
export declare class ViewCompileResult {
statements: o.Statement[];
viewFactoryVar: string;
dependencies: Array<ViewFactoryDependency | ComponentFactoryDependency>;
constructor(statements: o.Statement[], viewFactoryVar: string, dependencies: Array<ViewFactoryDependency | ComponentFactoryDependency>);
}
export declare class ViewCompiler {
private _genConfig;
|
private _animationCompiler;
constructor(_genConfig: CompilerConfig);
compileComponent(component: CompileDirectiveMetadata, template: TemplateAst[], styles: o.Expression, pipes: CompilePipeMetadata[], compiledAnimations: AnimationEntryCompileResult[]): ViewCompileResult;
}
|
random_line_split
|
|
local_actions.rs
|
// Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
use crate::{
data::{await_next_session, get_session},
db,
error::{self, ActionRunnerError},
Sender, Sessions, Shared,
};
use futures::{channel::oneshot, future::BoxFuture, Future, FutureExt, TryFutureExt};
use iml_postgres::sqlx;
use iml_wire_types::{Action, ActionId, ToJsonValue};
use serde_json::value::Value;
use std::{collections::HashMap, fmt::Display, ops::Deref, sync::Arc};
pub type LocalActionsInFlight = HashMap<ActionId, Sender>;
pub type SharedLocalActionsInFlight = Shared<LocalActionsInFlight>;
/// Adds an action id to the in-flight list.
/// A tx handle is stored internally, and the rx side is returned.
///
/// The rx will resolve once the plugin has completed or is cancelled.
async fn add_in_flight(
in_flight: SharedLocalActionsInFlight,
id: ActionId,
) -> oneshot::Receiver<Result<Value, String>> {
let (tx, rx) = oneshot::channel();
let mut in_flight = in_flight.lock().await;
in_flight.insert(id.clone(), tx);
rx
}
/// Removes an action id from the in-flight list.
///
/// Returns the tx handle which can then be used to cancel the action if needed.
async fn remove_in_flight(
in_flight: SharedLocalActionsInFlight,
id: &ActionId,
) -> Option<oneshot::Sender<Result<Value, String>>> {
let mut in_flight = in_flight.lock().await;
in_flight.remove(id).or_else(|| {
tracing::info!(
"Local action {:?} not found, perhaps it was already cancelled.",
id
);
None
})
}
/// Spawn the plugin within a new task.
///
/// When the plugin completes or is cancelled, it will notify the rx
/// handle associated with the action id.
pub fn spawn_plugin(
fut: impl Future<Output = Result<Value, String>> + Send + 'static,
in_flight: SharedLocalActionsInFlight,
id: ActionId,
) {
tokio::spawn(fut.then(move |result| async move {
let _ = remove_in_flight(in_flight, &id)
.await
.map(|tx| tx.send(result));
}));
}
/// Wraps a `FnOnce` so it will be called with a deserialized value and return a serialized value.
///
/// This is subetly different from a usual action plugin in that it's meant to be used with closures.
fn wrap_plugin<T, R, E: Display, Fut>(
v: Value,
f: impl FnOnce(T) -> Fut + Send + 'static,
) -> BoxFuture<'static, Result<Value, String>>
where
T: serde::de::DeserializeOwned + Send,
R: serde::Serialize + Send,
Fut: Future<Output = Result<R, E>> + Send,
{
Box::pin(async {
let x = serde_json::from_value(v).map_err(|e| format!("{}", e))?;
let x = f(x).await.map_err(|e| format!("{}", e))?;
x.to_json_value()
})
}
async fn run_plugin(
id: ActionId,
in_flight: SharedLocalActionsInFlight,
fut: impl Future<Output = Result<Value, String>> + Send + 'static,
) -> Result<Result<serde_json::value::Value, String>, ActionRunnerError> {
let rx = add_in_flight(Arc::clone(&in_flight), id.clone()).await;
spawn_plugin(fut, in_flight, id);
rx.err_into().await
}
/// Try to locate and start or cancel a local action.
pub async fn handle_local_action(
action: Action,
in_flight: SharedLocalActionsInFlight,
sessions: Shared<Sessions>,
db_pool: sqlx::PgPool,
) -> Result<Result<serde_json::value::Value, String>, ActionRunnerError>
|
{
match action {
Action::ActionCancel { id } => {
let _ = remove_in_flight(in_flight, &id)
.await
.map(|tx| tx.send(Ok(serde_json::Value::Null)));
Ok(Ok(serde_json::Value::Null))
}
Action::ActionStart { id, action, args } => {
let plugin = match action.deref() {
"get_session" => wrap_plugin(args, move |fqdn| get_session(fqdn, sessions)),
"await_next_session" => {
wrap_plugin(args, move |(fqdn, last_session, wait_secs)| {
await_next_session(fqdn, last_session, wait_secs, sessions)
})
}
"get_fqdn_by_id" => {
wrap_plugin(args, move |id: i32| db::get_host_fqdn_by_id(id, db_pool))
}
_ => {
return Err(ActionRunnerError::RequiredError(error::RequiredError(
format!("Could not find action {} in local registry", action),
)))
}
};
run_plugin(id, in_flight, plugin).await
}
}
}
|
identifier_body
|
|
local_actions.rs
|
// Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
use crate::{
data::{await_next_session, get_session},
db,
error::{self, ActionRunnerError},
Sender, Sessions, Shared,
};
use futures::{channel::oneshot, future::BoxFuture, Future, FutureExt, TryFutureExt};
use iml_postgres::sqlx;
use iml_wire_types::{Action, ActionId, ToJsonValue};
use serde_json::value::Value;
use std::{collections::HashMap, fmt::Display, ops::Deref, sync::Arc};
pub type LocalActionsInFlight = HashMap<ActionId, Sender>;
pub type SharedLocalActionsInFlight = Shared<LocalActionsInFlight>;
/// Adds an action id to the in-flight list.
/// A tx handle is stored internally, and the rx side is returned.
///
/// The rx will resolve once the plugin has completed or is cancelled.
async fn add_in_flight(
in_flight: SharedLocalActionsInFlight,
id: ActionId,
) -> oneshot::Receiver<Result<Value, String>> {
let (tx, rx) = oneshot::channel();
let mut in_flight = in_flight.lock().await;
in_flight.insert(id.clone(), tx);
rx
}
/// Removes an action id from the in-flight list.
///
/// Returns the tx handle which can then be used to cancel the action if needed.
async fn remove_in_flight(
in_flight: SharedLocalActionsInFlight,
id: &ActionId,
) -> Option<oneshot::Sender<Result<Value, String>>> {
let mut in_flight = in_flight.lock().await;
in_flight.remove(id).or_else(|| {
tracing::info!(
"Local action {:?} not found, perhaps it was already cancelled.",
id
);
None
})
}
/// Spawn the plugin within a new task.
///
/// When the plugin completes or is cancelled, it will notify the rx
/// handle associated with the action id.
pub fn spawn_plugin(
fut: impl Future<Output = Result<Value, String>> + Send + 'static,
in_flight: SharedLocalActionsInFlight,
id: ActionId,
) {
tokio::spawn(fut.then(move |result| async move {
let _ = remove_in_flight(in_flight, &id)
.await
.map(|tx| tx.send(result));
}));
}
/// Wraps a `FnOnce` so it will be called with a deserialized value and return a serialized value.
///
/// This is subetly different from a usual action plugin in that it's meant to be used with closures.
fn wrap_plugin<T, R, E: Display, Fut>(
v: Value,
f: impl FnOnce(T) -> Fut + Send + 'static,
) -> BoxFuture<'static, Result<Value, String>>
where
T: serde::de::DeserializeOwned + Send,
R: serde::Serialize + Send,
Fut: Future<Output = Result<R, E>> + Send,
{
Box::pin(async {
let x = serde_json::from_value(v).map_err(|e| format!("{}", e))?;
let x = f(x).await.map_err(|e| format!("{}", e))?;
x.to_json_value()
})
}
async fn
|
(
id: ActionId,
in_flight: SharedLocalActionsInFlight,
fut: impl Future<Output = Result<Value, String>> + Send + 'static,
) -> Result<Result<serde_json::value::Value, String>, ActionRunnerError> {
let rx = add_in_flight(Arc::clone(&in_flight), id.clone()).await;
spawn_plugin(fut, in_flight, id);
rx.err_into().await
}
/// Try to locate and start or cancel a local action.
pub async fn handle_local_action(
action: Action,
in_flight: SharedLocalActionsInFlight,
sessions: Shared<Sessions>,
db_pool: sqlx::PgPool,
) -> Result<Result<serde_json::value::Value, String>, ActionRunnerError> {
match action {
Action::ActionCancel { id } => {
let _ = remove_in_flight(in_flight, &id)
.await
.map(|tx| tx.send(Ok(serde_json::Value::Null)));
Ok(Ok(serde_json::Value::Null))
}
Action::ActionStart { id, action, args } => {
let plugin = match action.deref() {
"get_session" => wrap_plugin(args, move |fqdn| get_session(fqdn, sessions)),
"await_next_session" => {
wrap_plugin(args, move |(fqdn, last_session, wait_secs)| {
await_next_session(fqdn, last_session, wait_secs, sessions)
})
}
"get_fqdn_by_id" => {
wrap_plugin(args, move |id: i32| db::get_host_fqdn_by_id(id, db_pool))
}
_ => {
return Err(ActionRunnerError::RequiredError(error::RequiredError(
format!("Could not find action {} in local registry", action),
)))
}
};
run_plugin(id, in_flight, plugin).await
}
}
}
|
run_plugin
|
identifier_name
|
local_actions.rs
|
// Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
use crate::{
data::{await_next_session, get_session},
db,
error::{self, ActionRunnerError},
Sender, Sessions, Shared,
};
use futures::{channel::oneshot, future::BoxFuture, Future, FutureExt, TryFutureExt};
use iml_postgres::sqlx;
use iml_wire_types::{Action, ActionId, ToJsonValue};
use serde_json::value::Value;
use std::{collections::HashMap, fmt::Display, ops::Deref, sync::Arc};
pub type LocalActionsInFlight = HashMap<ActionId, Sender>;
pub type SharedLocalActionsInFlight = Shared<LocalActionsInFlight>;
/// Adds an action id to the in-flight list.
/// A tx handle is stored internally, and the rx side is returned.
///
/// The rx will resolve once the plugin has completed or is cancelled.
async fn add_in_flight(
in_flight: SharedLocalActionsInFlight,
id: ActionId,
) -> oneshot::Receiver<Result<Value, String>> {
let (tx, rx) = oneshot::channel();
let mut in_flight = in_flight.lock().await;
in_flight.insert(id.clone(), tx);
rx
|
/// Removes an action id from the in-flight list.
///
/// Returns the tx handle which can then be used to cancel the action if needed.
async fn remove_in_flight(
in_flight: SharedLocalActionsInFlight,
id: &ActionId,
) -> Option<oneshot::Sender<Result<Value, String>>> {
let mut in_flight = in_flight.lock().await;
in_flight.remove(id).or_else(|| {
tracing::info!(
"Local action {:?} not found, perhaps it was already cancelled.",
id
);
None
})
}
/// Spawn the plugin within a new task.
///
/// When the plugin completes or is cancelled, it will notify the rx
/// handle associated with the action id.
pub fn spawn_plugin(
fut: impl Future<Output = Result<Value, String>> + Send + 'static,
in_flight: SharedLocalActionsInFlight,
id: ActionId,
) {
tokio::spawn(fut.then(move |result| async move {
let _ = remove_in_flight(in_flight, &id)
.await
.map(|tx| tx.send(result));
}));
}
/// Wraps a `FnOnce` so it will be called with a deserialized value and return a serialized value.
///
/// This is subetly different from a usual action plugin in that it's meant to be used with closures.
fn wrap_plugin<T, R, E: Display, Fut>(
v: Value,
f: impl FnOnce(T) -> Fut + Send + 'static,
) -> BoxFuture<'static, Result<Value, String>>
where
T: serde::de::DeserializeOwned + Send,
R: serde::Serialize + Send,
Fut: Future<Output = Result<R, E>> + Send,
{
Box::pin(async {
let x = serde_json::from_value(v).map_err(|e| format!("{}", e))?;
let x = f(x).await.map_err(|e| format!("{}", e))?;
x.to_json_value()
})
}
async fn run_plugin(
id: ActionId,
in_flight: SharedLocalActionsInFlight,
fut: impl Future<Output = Result<Value, String>> + Send + 'static,
) -> Result<Result<serde_json::value::Value, String>, ActionRunnerError> {
let rx = add_in_flight(Arc::clone(&in_flight), id.clone()).await;
spawn_plugin(fut, in_flight, id);
rx.err_into().await
}
/// Try to locate and start or cancel a local action.
pub async fn handle_local_action(
action: Action,
in_flight: SharedLocalActionsInFlight,
sessions: Shared<Sessions>,
db_pool: sqlx::PgPool,
) -> Result<Result<serde_json::value::Value, String>, ActionRunnerError> {
match action {
Action::ActionCancel { id } => {
let _ = remove_in_flight(in_flight, &id)
.await
.map(|tx| tx.send(Ok(serde_json::Value::Null)));
Ok(Ok(serde_json::Value::Null))
}
Action::ActionStart { id, action, args } => {
let plugin = match action.deref() {
"get_session" => wrap_plugin(args, move |fqdn| get_session(fqdn, sessions)),
"await_next_session" => {
wrap_plugin(args, move |(fqdn, last_session, wait_secs)| {
await_next_session(fqdn, last_session, wait_secs, sessions)
})
}
"get_fqdn_by_id" => {
wrap_plugin(args, move |id: i32| db::get_host_fqdn_by_id(id, db_pool))
}
_ => {
return Err(ActionRunnerError::RequiredError(error::RequiredError(
format!("Could not find action {} in local registry", action),
)))
}
};
run_plugin(id, in_flight, plugin).await
}
}
}
|
}
|
random_line_split
|
local_actions.rs
|
// Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
use crate::{
data::{await_next_session, get_session},
db,
error::{self, ActionRunnerError},
Sender, Sessions, Shared,
};
use futures::{channel::oneshot, future::BoxFuture, Future, FutureExt, TryFutureExt};
use iml_postgres::sqlx;
use iml_wire_types::{Action, ActionId, ToJsonValue};
use serde_json::value::Value;
use std::{collections::HashMap, fmt::Display, ops::Deref, sync::Arc};
pub type LocalActionsInFlight = HashMap<ActionId, Sender>;
pub type SharedLocalActionsInFlight = Shared<LocalActionsInFlight>;
/// Adds an action id to the in-flight list.
/// A tx handle is stored internally, and the rx side is returned.
///
/// The rx will resolve once the plugin has completed or is cancelled.
async fn add_in_flight(
in_flight: SharedLocalActionsInFlight,
id: ActionId,
) -> oneshot::Receiver<Result<Value, String>> {
let (tx, rx) = oneshot::channel();
let mut in_flight = in_flight.lock().await;
in_flight.insert(id.clone(), tx);
rx
}
/// Removes an action id from the in-flight list.
///
/// Returns the tx handle which can then be used to cancel the action if needed.
async fn remove_in_flight(
in_flight: SharedLocalActionsInFlight,
id: &ActionId,
) -> Option<oneshot::Sender<Result<Value, String>>> {
let mut in_flight = in_flight.lock().await;
in_flight.remove(id).or_else(|| {
tracing::info!(
"Local action {:?} not found, perhaps it was already cancelled.",
id
);
None
})
}
/// Spawn the plugin within a new task.
///
/// When the plugin completes or is cancelled, it will notify the rx
/// handle associated with the action id.
pub fn spawn_plugin(
fut: impl Future<Output = Result<Value, String>> + Send + 'static,
in_flight: SharedLocalActionsInFlight,
id: ActionId,
) {
tokio::spawn(fut.then(move |result| async move {
let _ = remove_in_flight(in_flight, &id)
.await
.map(|tx| tx.send(result));
}));
}
/// Wraps a `FnOnce` so it will be called with a deserialized value and return a serialized value.
///
/// This is subetly different from a usual action plugin in that it's meant to be used with closures.
fn wrap_plugin<T, R, E: Display, Fut>(
v: Value,
f: impl FnOnce(T) -> Fut + Send + 'static,
) -> BoxFuture<'static, Result<Value, String>>
where
T: serde::de::DeserializeOwned + Send,
R: serde::Serialize + Send,
Fut: Future<Output = Result<R, E>> + Send,
{
Box::pin(async {
let x = serde_json::from_value(v).map_err(|e| format!("{}", e))?;
let x = f(x).await.map_err(|e| format!("{}", e))?;
x.to_json_value()
})
}
async fn run_plugin(
id: ActionId,
in_flight: SharedLocalActionsInFlight,
fut: impl Future<Output = Result<Value, String>> + Send + 'static,
) -> Result<Result<serde_json::value::Value, String>, ActionRunnerError> {
let rx = add_in_flight(Arc::clone(&in_flight), id.clone()).await;
spawn_plugin(fut, in_flight, id);
rx.err_into().await
}
/// Try to locate and start or cancel a local action.
pub async fn handle_local_action(
action: Action,
in_flight: SharedLocalActionsInFlight,
sessions: Shared<Sessions>,
db_pool: sqlx::PgPool,
) -> Result<Result<serde_json::value::Value, String>, ActionRunnerError> {
match action {
Action::ActionCancel { id } => {
let _ = remove_in_flight(in_flight, &id)
.await
.map(|tx| tx.send(Ok(serde_json::Value::Null)));
Ok(Ok(serde_json::Value::Null))
}
Action::ActionStart { id, action, args } => {
let plugin = match action.deref() {
"get_session" => wrap_plugin(args, move |fqdn| get_session(fqdn, sessions)),
"await_next_session" => {
wrap_plugin(args, move |(fqdn, last_session, wait_secs)| {
await_next_session(fqdn, last_session, wait_secs, sessions)
})
}
"get_fqdn_by_id" => {
wrap_plugin(args, move |id: i32| db::get_host_fqdn_by_id(id, db_pool))
}
_ =>
|
};
run_plugin(id, in_flight, plugin).await
}
}
}
|
{
return Err(ActionRunnerError::RequiredError(error::RequiredError(
format!("Could not find action {} in local registry", action),
)))
}
|
conditional_block
|
addComment.js
|
Template.addComment.events({
// press enter on input
'keyup #addcomment': function(event) {
if (event.which === 13) {
event.stopPropagation();
const comment = event.target.value;
if (comment) {
event.target.value = '';
const userName = Meteor.users.findOne().username;
const pageId = Pages.findOne().slug;
Meteor.call('addComment', comment, userName, pageId, function(err, res) {
if (err) {
alert(err);
}
});
}
return false;
}
},
'click #submitcomment': function() {
const commentBox = document.querySelector('#addcomment');
if (commentBox.value) {
const comment = commentBox.value;
commentBox.value = '';
const userName = Meteor.users.findOne().username;
const pageId = Pages.findOne().slug;
Meteor.call('addComment', comment, userName, pageId, function(err, res) {
if (err) {
alert(err);
}
});
}
return false;
}
|
});
|
random_line_split
|
|
addComment.js
|
Template.addComment.events({
// press enter on input
'keyup #addcomment': function(event) {
if (event.which === 13) {
event.stopPropagation();
const comment = event.target.value;
if (comment) {
event.target.value = '';
const userName = Meteor.users.findOne().username;
const pageId = Pages.findOne().slug;
Meteor.call('addComment', comment, userName, pageId, function(err, res) {
if (err)
|
});
}
return false;
}
},
'click #submitcomment': function() {
const commentBox = document.querySelector('#addcomment');
if (commentBox.value) {
const comment = commentBox.value;
commentBox.value = '';
const userName = Meteor.users.findOne().username;
const pageId = Pages.findOne().slug;
Meteor.call('addComment', comment, userName, pageId, function(err, res) {
if (err) {
alert(err);
}
});
}
return false;
}
});
|
{
alert(err);
}
|
conditional_block
|
SiteMapClient.py
|
""" Client-side transfer class for monitoring system
"""
import time
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC import S_OK
class SiteMapClient:
###########################################################################
def __init__( self, getRPCClient = None ):
self.getRPCClient = getRPCClient
self.lastDataRetrievalTime = 0
self.sitesData = {}
def __getRPCClient( self ):
if self.getRPCClient:
return self.getRPCClient( "Framework/SiteMap" )
return RPCClient( "Framework/SiteMap" )
###########################################################################
def getSitesData( self ):
|
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
|
""" Retrieves a single file and puts it in the output directory
"""
if self.lastDataRetrievalTime - time.time() < 300:
result = self.__getRPCClient().getSitesData()
if 'rpcStub' in result:
del( result[ 'rpcStub' ] )
if not result[ 'OK' ]:
return result
self.sitesData = result[ 'Value' ]
if self.sitesData:
self.lastDataRetrievalTime = time.time()
return S_OK( self.sitesData )
|
identifier_body
|
SiteMapClient.py
|
""" Client-side transfer class for monitoring system
"""
import time
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC import S_OK
class SiteMapClient:
###########################################################################
def __init__( self, getRPCClient = None ):
self.getRPCClient = getRPCClient
self.lastDataRetrievalTime = 0
self.sitesData = {}
def __getRPCClient( self ):
if self.getRPCClient:
return self.getRPCClient( "Framework/SiteMap" )
return RPCClient( "Framework/SiteMap" )
###########################################################################
def getSitesData( self ):
""" Retrieves a single file and puts it in the output directory
"""
if self.lastDataRetrievalTime - time.time() < 300:
result = self.__getRPCClient().getSitesData()
if 'rpcStub' in result:
|
if not result[ 'OK' ]:
return result
self.sitesData = result[ 'Value' ]
if self.sitesData:
self.lastDataRetrievalTime = time.time()
return S_OK( self.sitesData )
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
|
del( result[ 'rpcStub' ] )
|
conditional_block
|
SiteMapClient.py
|
""" Client-side transfer class for monitoring system
"""
import time
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC import S_OK
class SiteMapClient:
###########################################################################
def
|
( self, getRPCClient = None ):
self.getRPCClient = getRPCClient
self.lastDataRetrievalTime = 0
self.sitesData = {}
def __getRPCClient( self ):
if self.getRPCClient:
return self.getRPCClient( "Framework/SiteMap" )
return RPCClient( "Framework/SiteMap" )
###########################################################################
def getSitesData( self ):
""" Retrieves a single file and puts it in the output directory
"""
if self.lastDataRetrievalTime - time.time() < 300:
result = self.__getRPCClient().getSitesData()
if 'rpcStub' in result:
del( result[ 'rpcStub' ] )
if not result[ 'OK' ]:
return result
self.sitesData = result[ 'Value' ]
if self.sitesData:
self.lastDataRetrievalTime = time.time()
return S_OK( self.sitesData )
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
|
__init__
|
identifier_name
|
SiteMapClient.py
|
""" Client-side transfer class for monitoring system
"""
import time
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC import S_OK
class SiteMapClient:
###########################################################################
def __init__( self, getRPCClient = None ):
self.getRPCClient = getRPCClient
self.lastDataRetrievalTime = 0
self.sitesData = {}
def __getRPCClient( self ):
if self.getRPCClient:
return self.getRPCClient( "Framework/SiteMap" )
return RPCClient( "Framework/SiteMap" )
###########################################################################
def getSitesData( self ):
""" Retrieves a single file and puts it in the output directory
"""
if self.lastDataRetrievalTime - time.time() < 300:
result = self.__getRPCClient().getSitesData()
|
if not result[ 'OK' ]:
return result
self.sitesData = result[ 'Value' ]
if self.sitesData:
self.lastDataRetrievalTime = time.time()
return S_OK( self.sitesData )
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
|
if 'rpcStub' in result:
del( result[ 'rpcStub' ] )
|
random_line_split
|
fallback.rs
|
/*
This module implements a "fallback" prefilter that only relies on memchr to
function. While memchr works best when it's explicitly vectorized, its
fallback implementations are fast enough to make a prefilter like this
worthwhile.
The essence of this implementation is to identify two rare bytes in a needle
based on a background frequency distribution of bytes. We then run memchr on the
rarer byte. For each match, we use the second rare byte as a guard to quickly
check if a match is possible. If the position passes the guard test, then we do
a naive memcmp to confirm the match.
In practice, this formulation works amazingly well, primarily because of the
heuristic use of a background frequency distribution. However, it does have a
number of weaknesses where it can get quite slow when its background frequency
distribution doesn't line up with the haystack being searched. This is why we
have specialized vector routines that essentially take this idea and move the
guard check into vectorized code. (Those specialized vector routines do still
make use of the background frequency distribution of bytes though.)
This fallback implementation was originally formulated in regex many moons ago:
https://github.com/rust-lang/regex/blob/3db8722d0b204a85380fe2a65e13d7065d7dd968/src/literal/imp.rs#L370-L501
Prior to that, I'm not aware of anyone using this technique in any prominent
substring search implementation. Although, I'm sure folks have had this same
insight long before me.
Another version of this also appeared in bstr:
https://github.com/BurntSushi/bstr/blob/a444256ca7407fe180ee32534688549655b7a38e/src/search/prefilter.rs#L83-L340
*/
use crate::memmem::{
prefilter::{PrefilterFnTy, PrefilterState},
NeedleInfo,
};
// Check that the functions below satisfy the Prefilter function type.
const _: PrefilterFnTy = find;
/// Look for a possible occurrence of needle. The position returned
/// corresponds to the beginning of the occurrence, if one exists.
///
/// Callers may assume that this never returns false negatives (i.e., it
/// never misses an actual occurrence), but must check that the returned
/// position corresponds to a match. That is, it can return false
/// positives.
///
/// This should only be used when Freqy is constructed for forward
/// searching.
pub(crate) fn find(
prestate: &mut PrefilterState,
ninfo: &NeedleInfo,
haystack: &[u8],
needle: &[u8],
) -> Option<usize>
|
#[cfg(all(test, feature = "std"))]
mod tests {
use super::*;
fn freqy_find(haystack: &[u8], needle: &[u8]) -> Option<usize> {
let ninfo = NeedleInfo::new(needle);
let mut prestate = PrefilterState::new();
find(&mut prestate, &ninfo, haystack, needle)
}
#[test]
fn freqy_forward() {
assert_eq!(Some(0), freqy_find(b"BARFOO", b"BAR"));
assert_eq!(Some(3), freqy_find(b"FOOBAR", b"BAR"));
assert_eq!(Some(0), freqy_find(b"zyzz", b"zyzy"));
assert_eq!(Some(2), freqy_find(b"zzzy", b"zyzy"));
assert_eq!(None, freqy_find(b"zazb", b"zyzy"));
assert_eq!(Some(0), freqy_find(b"yzyy", b"yzyz"));
assert_eq!(Some(2), freqy_find(b"yyyz", b"yzyz"));
assert_eq!(None, freqy_find(b"yayb", b"yzyz"));
}
#[test]
#[cfg(not(miri))]
fn prefilter_permutations() {
use crate::memmem::prefilter::tests::PrefilterTest;
// SAFETY: super::find is safe to call for all inputs and on all
// platforms.
unsafe { PrefilterTest::run_all_tests(super::find) };
}
}
|
{
let mut i = 0;
let (rare1i, rare2i) = ninfo.rarebytes.as_rare_usize();
let (rare1, rare2) = ninfo.rarebytes.as_rare_bytes(needle);
while prestate.is_effective() {
// Use a fast vectorized implementation to skip to the next
// occurrence of the rarest byte (heuristically chosen) in the
// needle.
let found = crate::memchr(rare1, &haystack[i..])?;
prestate.update(found);
i += found;
// If we can't align our first match with the haystack, then a
// match is impossible.
if i < rare1i {
i += 1;
continue;
}
// Align our rare2 byte with the haystack. A mismatch means that
// a match is impossible.
let aligned_rare2i = i - rare1i + rare2i;
if haystack.get(aligned_rare2i) != Some(&rare2) {
i += 1;
continue;
}
// We've done what we can. There might be a match here.
return Some(i - rare1i);
}
// The only way we get here is if we believe our skipping heuristic
// has become ineffective. We're allowed to return false positives,
// so return the position at which we advanced to, aligned to the
// haystack.
Some(i.saturating_sub(rare1i))
}
|
identifier_body
|
fallback.rs
|
/*
This module implements a "fallback" prefilter that only relies on memchr to
function. While memchr works best when it's explicitly vectorized, its
fallback implementations are fast enough to make a prefilter like this
worthwhile.
The essence of this implementation is to identify two rare bytes in a needle
based on a background frequency distribution of bytes. We then run memchr on the
rarer byte. For each match, we use the second rare byte as a guard to quickly
check if a match is possible. If the position passes the guard test, then we do
a naive memcmp to confirm the match.
In practice, this formulation works amazingly well, primarily because of the
heuristic use of a background frequency distribution. However, it does have a
number of weaknesses where it can get quite slow when its background frequency
distribution doesn't line up with the haystack being searched. This is why we
have specialized vector routines that essentially take this idea and move the
guard check into vectorized code. (Those specialized vector routines do still
make use of the background frequency distribution of bytes though.)
This fallback implementation was originally formulated in regex many moons ago:
https://github.com/rust-lang/regex/blob/3db8722d0b204a85380fe2a65e13d7065d7dd968/src/literal/imp.rs#L370-L501
Prior to that, I'm not aware of anyone using this technique in any prominent
substring search implementation. Although, I'm sure folks have had this same
insight long before me.
Another version of this also appeared in bstr:
https://github.com/BurntSushi/bstr/blob/a444256ca7407fe180ee32534688549655b7a38e/src/search/prefilter.rs#L83-L340
*/
use crate::memmem::{
prefilter::{PrefilterFnTy, PrefilterState},
NeedleInfo,
};
// Check that the functions below satisfy the Prefilter function type.
const _: PrefilterFnTy = find;
/// Look for a possible occurrence of needle. The position returned
/// corresponds to the beginning of the occurrence, if one exists.
///
/// Callers may assume that this never returns false negatives (i.e., it
/// never misses an actual occurrence), but must check that the returned
/// position corresponds to a match. That is, it can return false
/// positives.
///
/// This should only be used when Freqy is constructed for forward
/// searching.
pub(crate) fn find(
prestate: &mut PrefilterState,
ninfo: &NeedleInfo,
haystack: &[u8],
needle: &[u8],
) -> Option<usize> {
let mut i = 0;
let (rare1i, rare2i) = ninfo.rarebytes.as_rare_usize();
let (rare1, rare2) = ninfo.rarebytes.as_rare_bytes(needle);
while prestate.is_effective() {
// Use a fast vectorized implementation to skip to the next
// occurrence of the rarest byte (heuristically chosen) in the
// needle.
let found = crate::memchr(rare1, &haystack[i..])?;
prestate.update(found);
i += found;
// If we can't align our first match with the haystack, then a
// match is impossible.
if i < rare1i {
i += 1;
continue;
}
// Align our rare2 byte with the haystack. A mismatch means that
// a match is impossible.
let aligned_rare2i = i - rare1i + rare2i;
if haystack.get(aligned_rare2i) != Some(&rare2)
|
// We've done what we can. There might be a match here.
return Some(i - rare1i);
}
// The only way we get here is if we believe our skipping heuristic
// has become ineffective. We're allowed to return false positives,
// so return the position at which we advanced to, aligned to the
// haystack.
Some(i.saturating_sub(rare1i))
}
#[cfg(all(test, feature = "std"))]
mod tests {
use super::*;
fn freqy_find(haystack: &[u8], needle: &[u8]) -> Option<usize> {
let ninfo = NeedleInfo::new(needle);
let mut prestate = PrefilterState::new();
find(&mut prestate, &ninfo, haystack, needle)
}
#[test]
fn freqy_forward() {
assert_eq!(Some(0), freqy_find(b"BARFOO", b"BAR"));
assert_eq!(Some(3), freqy_find(b"FOOBAR", b"BAR"));
assert_eq!(Some(0), freqy_find(b"zyzz", b"zyzy"));
assert_eq!(Some(2), freqy_find(b"zzzy", b"zyzy"));
assert_eq!(None, freqy_find(b"zazb", b"zyzy"));
assert_eq!(Some(0), freqy_find(b"yzyy", b"yzyz"));
assert_eq!(Some(2), freqy_find(b"yyyz", b"yzyz"));
assert_eq!(None, freqy_find(b"yayb", b"yzyz"));
}
#[test]
#[cfg(not(miri))]
fn prefilter_permutations() {
use crate::memmem::prefilter::tests::PrefilterTest;
// SAFETY: super::find is safe to call for all inputs and on all
// platforms.
unsafe { PrefilterTest::run_all_tests(super::find) };
}
}
|
{
i += 1;
continue;
}
|
conditional_block
|
fallback.rs
|
/*
This module implements a "fallback" prefilter that only relies on memchr to
function. While memchr works best when it's explicitly vectorized, its
fallback implementations are fast enough to make a prefilter like this
worthwhile.
The essence of this implementation is to identify two rare bytes in a needle
based on a background frequency distribution of bytes. We then run memchr on the
rarer byte. For each match, we use the second rare byte as a guard to quickly
check if a match is possible. If the position passes the guard test, then we do
a naive memcmp to confirm the match.
In practice, this formulation works amazingly well, primarily because of the
heuristic use of a background frequency distribution. However, it does have a
number of weaknesses where it can get quite slow when its background frequency
distribution doesn't line up with the haystack being searched. This is why we
have specialized vector routines that essentially take this idea and move the
guard check into vectorized code. (Those specialized vector routines do still
make use of the background frequency distribution of bytes though.)
This fallback implementation was originally formulated in regex many moons ago:
https://github.com/rust-lang/regex/blob/3db8722d0b204a85380fe2a65e13d7065d7dd968/src/literal/imp.rs#L370-L501
Prior to that, I'm not aware of anyone using this technique in any prominent
substring search implementation. Although, I'm sure folks have had this same
insight long before me.
Another version of this also appeared in bstr:
https://github.com/BurntSushi/bstr/blob/a444256ca7407fe180ee32534688549655b7a38e/src/search/prefilter.rs#L83-L340
*/
|
// Check that the functions below satisfy the Prefilter function type.
const _: PrefilterFnTy = find;
/// Look for a possible occurrence of needle. The position returned
/// corresponds to the beginning of the occurrence, if one exists.
///
/// Callers may assume that this never returns false negatives (i.e., it
/// never misses an actual occurrence), but must check that the returned
/// position corresponds to a match. That is, it can return false
/// positives.
///
/// This should only be used when Freqy is constructed for forward
/// searching.
pub(crate) fn find(
prestate: &mut PrefilterState,
ninfo: &NeedleInfo,
haystack: &[u8],
needle: &[u8],
) -> Option<usize> {
let mut i = 0;
let (rare1i, rare2i) = ninfo.rarebytes.as_rare_usize();
let (rare1, rare2) = ninfo.rarebytes.as_rare_bytes(needle);
while prestate.is_effective() {
// Use a fast vectorized implementation to skip to the next
// occurrence of the rarest byte (heuristically chosen) in the
// needle.
let found = crate::memchr(rare1, &haystack[i..])?;
prestate.update(found);
i += found;
// If we can't align our first match with the haystack, then a
// match is impossible.
if i < rare1i {
i += 1;
continue;
}
// Align our rare2 byte with the haystack. A mismatch means that
// a match is impossible.
let aligned_rare2i = i - rare1i + rare2i;
if haystack.get(aligned_rare2i) != Some(&rare2) {
i += 1;
continue;
}
// We've done what we can. There might be a match here.
return Some(i - rare1i);
}
// The only way we get here is if we believe our skipping heuristic
// has become ineffective. We're allowed to return false positives,
// so return the position at which we advanced to, aligned to the
// haystack.
Some(i.saturating_sub(rare1i))
}
#[cfg(all(test, feature = "std"))]
mod tests {
use super::*;
fn freqy_find(haystack: &[u8], needle: &[u8]) -> Option<usize> {
let ninfo = NeedleInfo::new(needle);
let mut prestate = PrefilterState::new();
find(&mut prestate, &ninfo, haystack, needle)
}
#[test]
fn freqy_forward() {
assert_eq!(Some(0), freqy_find(b"BARFOO", b"BAR"));
assert_eq!(Some(3), freqy_find(b"FOOBAR", b"BAR"));
assert_eq!(Some(0), freqy_find(b"zyzz", b"zyzy"));
assert_eq!(Some(2), freqy_find(b"zzzy", b"zyzy"));
assert_eq!(None, freqy_find(b"zazb", b"zyzy"));
assert_eq!(Some(0), freqy_find(b"yzyy", b"yzyz"));
assert_eq!(Some(2), freqy_find(b"yyyz", b"yzyz"));
assert_eq!(None, freqy_find(b"yayb", b"yzyz"));
}
#[test]
#[cfg(not(miri))]
fn prefilter_permutations() {
use crate::memmem::prefilter::tests::PrefilterTest;
// SAFETY: super::find is safe to call for all inputs and on all
// platforms.
unsafe { PrefilterTest::run_all_tests(super::find) };
}
}
|
use crate::memmem::{
prefilter::{PrefilterFnTy, PrefilterState},
NeedleInfo,
};
|
random_line_split
|
fallback.rs
|
/*
This module implements a "fallback" prefilter that only relies on memchr to
function. While memchr works best when it's explicitly vectorized, its
fallback implementations are fast enough to make a prefilter like this
worthwhile.
The essence of this implementation is to identify two rare bytes in a needle
based on a background frequency distribution of bytes. We then run memchr on the
rarer byte. For each match, we use the second rare byte as a guard to quickly
check if a match is possible. If the position passes the guard test, then we do
a naive memcmp to confirm the match.
In practice, this formulation works amazingly well, primarily because of the
heuristic use of a background frequency distribution. However, it does have a
number of weaknesses where it can get quite slow when its background frequency
distribution doesn't line up with the haystack being searched. This is why we
have specialized vector routines that essentially take this idea and move the
guard check into vectorized code. (Those specialized vector routines do still
make use of the background frequency distribution of bytes though.)
This fallback implementation was originally formulated in regex many moons ago:
https://github.com/rust-lang/regex/blob/3db8722d0b204a85380fe2a65e13d7065d7dd968/src/literal/imp.rs#L370-L501
Prior to that, I'm not aware of anyone using this technique in any prominent
substring search implementation. Although, I'm sure folks have had this same
insight long before me.
Another version of this also appeared in bstr:
https://github.com/BurntSushi/bstr/blob/a444256ca7407fe180ee32534688549655b7a38e/src/search/prefilter.rs#L83-L340
*/
use crate::memmem::{
prefilter::{PrefilterFnTy, PrefilterState},
NeedleInfo,
};
// Check that the functions below satisfy the Prefilter function type.
const _: PrefilterFnTy = find;
/// Look for a possible occurrence of needle. The position returned
/// corresponds to the beginning of the occurrence, if one exists.
///
/// Callers may assume that this never returns false negatives (i.e., it
/// never misses an actual occurrence), but must check that the returned
/// position corresponds to a match. That is, it can return false
/// positives.
///
/// This should only be used when Freqy is constructed for forward
/// searching.
pub(crate) fn
|
(
prestate: &mut PrefilterState,
ninfo: &NeedleInfo,
haystack: &[u8],
needle: &[u8],
) -> Option<usize> {
let mut i = 0;
let (rare1i, rare2i) = ninfo.rarebytes.as_rare_usize();
let (rare1, rare2) = ninfo.rarebytes.as_rare_bytes(needle);
while prestate.is_effective() {
// Use a fast vectorized implementation to skip to the next
// occurrence of the rarest byte (heuristically chosen) in the
// needle.
let found = crate::memchr(rare1, &haystack[i..])?;
prestate.update(found);
i += found;
// If we can't align our first match with the haystack, then a
// match is impossible.
if i < rare1i {
i += 1;
continue;
}
// Align our rare2 byte with the haystack. A mismatch means that
// a match is impossible.
let aligned_rare2i = i - rare1i + rare2i;
if haystack.get(aligned_rare2i) != Some(&rare2) {
i += 1;
continue;
}
// We've done what we can. There might be a match here.
return Some(i - rare1i);
}
// The only way we get here is if we believe our skipping heuristic
// has become ineffective. We're allowed to return false positives,
// so return the position at which we advanced to, aligned to the
// haystack.
Some(i.saturating_sub(rare1i))
}
#[cfg(all(test, feature = "std"))]
mod tests {
use super::*;
fn freqy_find(haystack: &[u8], needle: &[u8]) -> Option<usize> {
let ninfo = NeedleInfo::new(needle);
let mut prestate = PrefilterState::new();
find(&mut prestate, &ninfo, haystack, needle)
}
#[test]
fn freqy_forward() {
assert_eq!(Some(0), freqy_find(b"BARFOO", b"BAR"));
assert_eq!(Some(3), freqy_find(b"FOOBAR", b"BAR"));
assert_eq!(Some(0), freqy_find(b"zyzz", b"zyzy"));
assert_eq!(Some(2), freqy_find(b"zzzy", b"zyzy"));
assert_eq!(None, freqy_find(b"zazb", b"zyzy"));
assert_eq!(Some(0), freqy_find(b"yzyy", b"yzyz"));
assert_eq!(Some(2), freqy_find(b"yyyz", b"yzyz"));
assert_eq!(None, freqy_find(b"yayb", b"yzyz"));
}
#[test]
#[cfg(not(miri))]
fn prefilter_permutations() {
use crate::memmem::prefilter::tests::PrefilterTest;
// SAFETY: super::find is safe to call for all inputs and on all
// platforms.
unsafe { PrefilterTest::run_all_tests(super::find) };
}
}
|
find
|
identifier_name
|
branch_implpermissions.rs
|
/*
* Swaggy Jenkins
*
* Jenkins API clients generated from Swagger / Open API specification
*
* The version of the OpenAPI document: 1.1.2-pre.0
* Contact: [email protected]
* Generated by: https://openapi-generator.tech
*/
#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)]
pub struct BranchImplpermissions {
#[serde(rename = "create", skip_serializing_if = "Option::is_none")]
pub create: Option<bool>,
#[serde(rename = "read", skip_serializing_if = "Option::is_none")]
pub read: Option<bool>,
#[serde(rename = "start", skip_serializing_if = "Option::is_none")]
pub start: Option<bool>,
#[serde(rename = "stop", skip_serializing_if = "Option::is_none")]
pub stop: Option<bool>,
#[serde(rename = "_class", skip_serializing_if = "Option::is_none")]
pub _class: Option<String>,
}
impl BranchImplpermissions {
pub fn
|
() -> BranchImplpermissions {
BranchImplpermissions {
create: None,
read: None,
start: None,
stop: None,
_class: None,
}
}
}
|
new
|
identifier_name
|
branch_implpermissions.rs
|
/*
* Swaggy Jenkins
|
* Jenkins API clients generated from Swagger / Open API specification
*
* The version of the OpenAPI document: 1.1.2-pre.0
* Contact: [email protected]
* Generated by: https://openapi-generator.tech
*/
#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)]
pub struct BranchImplpermissions {
#[serde(rename = "create", skip_serializing_if = "Option::is_none")]
pub create: Option<bool>,
#[serde(rename = "read", skip_serializing_if = "Option::is_none")]
pub read: Option<bool>,
#[serde(rename = "start", skip_serializing_if = "Option::is_none")]
pub start: Option<bool>,
#[serde(rename = "stop", skip_serializing_if = "Option::is_none")]
pub stop: Option<bool>,
#[serde(rename = "_class", skip_serializing_if = "Option::is_none")]
pub _class: Option<String>,
}
impl BranchImplpermissions {
pub fn new() -> BranchImplpermissions {
BranchImplpermissions {
create: None,
read: None,
start: None,
stop: None,
_class: None,
}
}
}
|
*
|
random_line_split
|
TextArea.tsx
|
import * as React from 'react';
interface TextAreaProps extends React.TextareaHTMLAttributes<HTMLTextAreaElement> {
innerRef?: React.Ref<HTMLTextAreaElement>;
autoResize?: boolean;
minHeight?: string
}
export default function TextArea(p: TextAreaProps) {
function handleResize(ta: HTMLTextAreaElement) {
ta.style.height = "0";
ta.style.height = ta.scrollHeight + 'px';
ta.style.minHeight = p.minHeight!;
ta.scrollTop = ta.scrollHeight;
}
|
const handleRef = React.useCallback((a: HTMLTextAreaElement | null) => {
a && handleResize(a);
innerRef && (typeof innerRef == "function" ? innerRef(a) : (innerRef as any).current = a);
}, [innerRef, minHeight]);
return (
<textarea onInput={autoResize ? (e => handleResize(e.currentTarget)) : undefined} style={
{
...(autoResize ? { display: "block", overflow: "hidden", resize: "none" } : {}),
...props.style
}
} {...props} ref={handleRef} />
);
}
TextArea.defaultProps = { autoResize: true, minHeight: "50px" };
|
const { autoResize, innerRef, minHeight, ...props } = p;
|
random_line_split
|
TextArea.tsx
|
import * as React from 'react';
interface TextAreaProps extends React.TextareaHTMLAttributes<HTMLTextAreaElement> {
innerRef?: React.Ref<HTMLTextAreaElement>;
autoResize?: boolean;
minHeight?: string
}
export default function TextArea(p: TextAreaProps)
|
TextArea.defaultProps = { autoResize: true, minHeight: "50px" };
|
{
function handleResize(ta: HTMLTextAreaElement) {
ta.style.height = "0";
ta.style.height = ta.scrollHeight + 'px';
ta.style.minHeight = p.minHeight!;
ta.scrollTop = ta.scrollHeight;
}
const { autoResize, innerRef, minHeight, ...props } = p;
const handleRef = React.useCallback((a: HTMLTextAreaElement | null) => {
a && handleResize(a);
innerRef && (typeof innerRef == "function" ? innerRef(a) : (innerRef as any).current = a);
}, [innerRef, minHeight]);
return (
<textarea onInput={autoResize ? (e => handleResize(e.currentTarget)) : undefined} style={
{
...(autoResize ? { display: "block", overflow: "hidden", resize: "none" } : {}),
...props.style
}
} {...props} ref={handleRef} />
);
}
|
identifier_body
|
TextArea.tsx
|
import * as React from 'react';
interface TextAreaProps extends React.TextareaHTMLAttributes<HTMLTextAreaElement> {
innerRef?: React.Ref<HTMLTextAreaElement>;
autoResize?: boolean;
minHeight?: string
}
export default function TextArea(p: TextAreaProps) {
function
|
(ta: HTMLTextAreaElement) {
ta.style.height = "0";
ta.style.height = ta.scrollHeight + 'px';
ta.style.minHeight = p.minHeight!;
ta.scrollTop = ta.scrollHeight;
}
const { autoResize, innerRef, minHeight, ...props } = p;
const handleRef = React.useCallback((a: HTMLTextAreaElement | null) => {
a && handleResize(a);
innerRef && (typeof innerRef == "function" ? innerRef(a) : (innerRef as any).current = a);
}, [innerRef, minHeight]);
return (
<textarea onInput={autoResize ? (e => handleResize(e.currentTarget)) : undefined} style={
{
...(autoResize ? { display: "block", overflow: "hidden", resize: "none" } : {}),
...props.style
}
} {...props} ref={handleRef} />
);
}
TextArea.defaultProps = { autoResize: true, minHeight: "50px" };
|
handleResize
|
identifier_name
|
_health.service.ts
|
<%#
Copyright 2013-2018 the original author or authors from the JHipster project.
This file is part of the JHipster project, see http://www.jhipster.tech/
for more information.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-%>
import { Injectable } from '@angular/core';
import { HttpClient } from '@angular/common/http';
import { Observable } from 'rxjs/Observable';
import { SERVER_API_URL } from '../../app.constants';
@Injectable()
export class <%=jhiPrefixCapitalized%>HealthService {
separator: string;
constructor(private http: HttpClient) {
this.separator = '.';
}
checkHealth(): Observable<any> {
return this.http.get(SERVER_API_URL + 'management/health');
}
transformHealthData(data): any {
const response = [];
this.flattenHealthData(response, null, data);
return response;
}
getBaseName(name): string {
if (name) {
const split = name.split('.');
return split[0];
}
|
getSubSystemName(name): string {
if (name) {
const split = name.split('.');
split.splice(0, 1);
const remainder = split.join('.');
return remainder ? ' - ' + remainder : '';
}
}
/* private methods */
private addHealthObject(result, isLeaf, healthObject, name): any {
const healthData: any = {
name
};
const details = {};
let hasDetails = false;
for (const key in healthObject) {
if (healthObject.hasOwnProperty(key)) {
const value = healthObject[key];
if (key === 'status' || key === 'error') {
healthData[key] = value;
} else {
if (!this.isHealthObject(value)) {
details[key] = value;
hasDetails = true;
}
}
}
}
// Add the details
if (hasDetails) {
healthData.details = details;
}
// Only add nodes if they provide additional information
if (isLeaf || hasDetails || healthData.error) {
result.push(healthData);
}
return healthData;
}
private flattenHealthData(result, path, data): any {
for (const key in data) {
if (data.hasOwnProperty(key)) {
const value = data[key];
if (this.isHealthObject(value)) {
if (this.hasSubSystem(value)) {
this.addHealthObject(result, false, value, this.getModuleName(path, key));
this.flattenHealthData(result, this.getModuleName(path, key), value);
} else {
this.addHealthObject(result, true, value, this.getModuleName(path, key));
}
}
}
}
return result;
}
private getModuleName(path, name): string {
let result;
if (path && name) {
result = path + this.separator + name;
} else if (path) {
result = path;
} else if (name) {
result = name;
} else {
result = '';
}
return result;
}
private hasSubSystem(healthObject): boolean {
let result = false;
for (const key in healthObject) {
if (healthObject.hasOwnProperty(key)) {
const value = healthObject[key];
if (value && value.status) {
result = true;
}
}
}
return result;
}
private isHealthObject(healthObject): boolean {
let result = false;
for (const key in healthObject) {
if (healthObject.hasOwnProperty(key)) {
if (key === 'status') {
result = true;
}
}
}
return result;
}
}
|
}
|
random_line_split
|
_health.service.ts
|
<%#
Copyright 2013-2018 the original author or authors from the JHipster project.
This file is part of the JHipster project, see http://www.jhipster.tech/
for more information.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-%>
import { Injectable } from '@angular/core';
import { HttpClient } from '@angular/common/http';
import { Observable } from 'rxjs/Observable';
import { SERVER_API_URL } from '../../app.constants';
@Injectable()
export class <%=jhiPrefixCapitalized%>HealthService {
separator: string;
constructor(private http: HttpClient) {
this.separator = '.';
}
checkHealth(): Observable<any> {
return this.http.get(SERVER_API_URL + 'management/health');
}
transformHealthData(data): any {
const response = [];
this.flattenHealthData(response, null, data);
return response;
}
getBaseName(name): string {
if (name) {
const split = name.split('.');
return split[0];
}
}
getSubSystemName(name): string {
if (name) {
const split = name.split('.');
split.splice(0, 1);
const remainder = split.join('.');
return remainder ? ' - ' + remainder : '';
}
}
/* private methods */
private addHealthObject(result, isLeaf, healthObject, name): any {
const healthData: any = {
name
};
const details = {};
let hasDetails = false;
for (const key in healthObject) {
if (healthObject.hasOwnProperty(key)) {
const value = healthObject[key];
if (key === 'status' || key === 'error') {
healthData[key] = value;
} else {
if (!this.isHealthObject(value))
|
}
}
}
// Add the details
if (hasDetails) {
healthData.details = details;
}
// Only add nodes if they provide additional information
if (isLeaf || hasDetails || healthData.error) {
result.push(healthData);
}
return healthData;
}
private flattenHealthData(result, path, data): any {
for (const key in data) {
if (data.hasOwnProperty(key)) {
const value = data[key];
if (this.isHealthObject(value)) {
if (this.hasSubSystem(value)) {
this.addHealthObject(result, false, value, this.getModuleName(path, key));
this.flattenHealthData(result, this.getModuleName(path, key), value);
} else {
this.addHealthObject(result, true, value, this.getModuleName(path, key));
}
}
}
}
return result;
}
private getModuleName(path, name): string {
let result;
if (path && name) {
result = path + this.separator + name;
} else if (path) {
result = path;
} else if (name) {
result = name;
} else {
result = '';
}
return result;
}
private hasSubSystem(healthObject): boolean {
let result = false;
for (const key in healthObject) {
if (healthObject.hasOwnProperty(key)) {
const value = healthObject[key];
if (value && value.status) {
result = true;
}
}
}
return result;
}
private isHealthObject(healthObject): boolean {
let result = false;
for (const key in healthObject) {
if (healthObject.hasOwnProperty(key)) {
if (key === 'status') {
result = true;
}
}
}
return result;
}
}
|
{
details[key] = value;
hasDetails = true;
}
|
conditional_block
|
_health.service.ts
|
<%#
Copyright 2013-2018 the original author or authors from the JHipster project.
This file is part of the JHipster project, see http://www.jhipster.tech/
for more information.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-%>
import { Injectable } from '@angular/core';
import { HttpClient } from '@angular/common/http';
import { Observable } from 'rxjs/Observable';
import { SERVER_API_URL } from '../../app.constants';
@Injectable()
export class <%=jhiPrefixCapitalized%>HealthService {
separator: string;
constructor(private http: HttpClient) {
this.separator = '.';
}
checkHealth(): Observable<any> {
return this.http.get(SERVER_API_URL + 'management/health');
}
transformHealthData(data): any {
const response = [];
this.flattenHealthData(response, null, data);
return response;
}
getBaseName(name): string {
if (name) {
const split = name.split('.');
return split[0];
}
}
getSubSystemName(name): string
|
/* private methods */
private addHealthObject(result, isLeaf, healthObject, name): any {
const healthData: any = {
name
};
const details = {};
let hasDetails = false;
for (const key in healthObject) {
if (healthObject.hasOwnProperty(key)) {
const value = healthObject[key];
if (key === 'status' || key === 'error') {
healthData[key] = value;
} else {
if (!this.isHealthObject(value)) {
details[key] = value;
hasDetails = true;
}
}
}
}
// Add the details
if (hasDetails) {
healthData.details = details;
}
// Only add nodes if they provide additional information
if (isLeaf || hasDetails || healthData.error) {
result.push(healthData);
}
return healthData;
}
private flattenHealthData(result, path, data): any {
for (const key in data) {
if (data.hasOwnProperty(key)) {
const value = data[key];
if (this.isHealthObject(value)) {
if (this.hasSubSystem(value)) {
this.addHealthObject(result, false, value, this.getModuleName(path, key));
this.flattenHealthData(result, this.getModuleName(path, key), value);
} else {
this.addHealthObject(result, true, value, this.getModuleName(path, key));
}
}
}
}
return result;
}
private getModuleName(path, name): string {
let result;
if (path && name) {
result = path + this.separator + name;
} else if (path) {
result = path;
} else if (name) {
result = name;
} else {
result = '';
}
return result;
}
private hasSubSystem(healthObject): boolean {
let result = false;
for (const key in healthObject) {
if (healthObject.hasOwnProperty(key)) {
const value = healthObject[key];
if (value && value.status) {
result = true;
}
}
}
return result;
}
private isHealthObject(healthObject): boolean {
let result = false;
for (const key in healthObject) {
if (healthObject.hasOwnProperty(key)) {
if (key === 'status') {
result = true;
}
}
}
return result;
}
}
|
{
if (name) {
const split = name.split('.');
split.splice(0, 1);
const remainder = split.join('.');
return remainder ? ' - ' + remainder : '';
}
}
|
identifier_body
|
_health.service.ts
|
<%#
Copyright 2013-2018 the original author or authors from the JHipster project.
This file is part of the JHipster project, see http://www.jhipster.tech/
for more information.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-%>
import { Injectable } from '@angular/core';
import { HttpClient } from '@angular/common/http';
import { Observable } from 'rxjs/Observable';
import { SERVER_API_URL } from '../../app.constants';
@Injectable()
export class <%=jhiPrefixCapitalized%>HealthService {
separator: string;
constructor(private http: HttpClient) {
this.separator = '.';
}
checkHealth(): Observable<any> {
return this.http.get(SERVER_API_URL + 'management/health');
}
transformHealthData(data): any {
const response = [];
this.flattenHealthData(response, null, data);
return response;
}
getBaseName(name): string {
if (name) {
const split = name.split('.');
return split[0];
}
}
|
(name): string {
if (name) {
const split = name.split('.');
split.splice(0, 1);
const remainder = split.join('.');
return remainder ? ' - ' + remainder : '';
}
}
/* private methods */
private addHealthObject(result, isLeaf, healthObject, name): any {
const healthData: any = {
name
};
const details = {};
let hasDetails = false;
for (const key in healthObject) {
if (healthObject.hasOwnProperty(key)) {
const value = healthObject[key];
if (key === 'status' || key === 'error') {
healthData[key] = value;
} else {
if (!this.isHealthObject(value)) {
details[key] = value;
hasDetails = true;
}
}
}
}
// Add the details
if (hasDetails) {
healthData.details = details;
}
// Only add nodes if they provide additional information
if (isLeaf || hasDetails || healthData.error) {
result.push(healthData);
}
return healthData;
}
private flattenHealthData(result, path, data): any {
for (const key in data) {
if (data.hasOwnProperty(key)) {
const value = data[key];
if (this.isHealthObject(value)) {
if (this.hasSubSystem(value)) {
this.addHealthObject(result, false, value, this.getModuleName(path, key));
this.flattenHealthData(result, this.getModuleName(path, key), value);
} else {
this.addHealthObject(result, true, value, this.getModuleName(path, key));
}
}
}
}
return result;
}
private getModuleName(path, name): string {
let result;
if (path && name) {
result = path + this.separator + name;
} else if (path) {
result = path;
} else if (name) {
result = name;
} else {
result = '';
}
return result;
}
private hasSubSystem(healthObject): boolean {
let result = false;
for (const key in healthObject) {
if (healthObject.hasOwnProperty(key)) {
const value = healthObject[key];
if (value && value.status) {
result = true;
}
}
}
return result;
}
private isHealthObject(healthObject): boolean {
let result = false;
for (const key in healthObject) {
if (healthObject.hasOwnProperty(key)) {
if (key === 'status') {
result = true;
}
}
}
return result;
}
}
|
getSubSystemName
|
identifier_name
|
log.rs
|
/* Copyright (C) 2018 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
* Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
// written by Pierre Chifflier <[email protected]>
use json::*;
use ikev2::ikev2::{IKEV2State,IKEV2Transaction};
use ikev2::ipsec_parser::IKEV2_FLAG_INITIATOR;
#[no_mangle]
pub extern "C" fn rs_ikev2_log_json_response(state: &mut IKEV2State, tx: &mut IKEV2Transaction) -> *mut JsonT
{
let js = Json::object();
js.set_integer("version_major", tx.hdr.maj_ver as u64);
js.set_integer("version_minor", tx.hdr.min_ver as u64);
js.set_integer("exchange_type", tx.hdr.exch_type.0 as u64);
js.set_integer("message_id", tx.hdr.msg_id as u64);
js.set_string("init_spi", &format!("{:016x}", tx.hdr.init_spi));
js.set_string("resp_spi", &format!("{:016x}", tx.hdr.resp_spi));
if tx.hdr.flags & IKEV2_FLAG_INITIATOR != 0 {
js.set_string("role", &"initiator");
} else
|
js.set_integer("errors", tx.errors as u64);
let jsa = Json::array();
for payload in tx.payload_types.iter() {
jsa.array_append_string(&format!("{:?}", payload));
}
js.set("payload", jsa);
let jsa = Json::array();
for notify in tx.notify_types.iter() {
jsa.array_append_string(&format!("{:?}", notify));
}
js.set("notify", jsa);
return js.unwrap();
}
|
{
js.set_string("role", &"responder");
js.set_string("alg_enc", &format!("{:?}", state.alg_enc));
js.set_string("alg_auth", &format!("{:?}", state.alg_auth));
js.set_string("alg_prf", &format!("{:?}", state.alg_prf));
js.set_string("alg_dh", &format!("{:?}", state.alg_dh));
js.set_string("alg_esn", &format!("{:?}", state.alg_esn));
}
|
conditional_block
|
log.rs
|
/* Copyright (C) 2018 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
* Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
// written by Pierre Chifflier <[email protected]>
use json::*;
use ikev2::ikev2::{IKEV2State,IKEV2Transaction};
use ikev2::ipsec_parser::IKEV2_FLAG_INITIATOR;
#[no_mangle]
pub extern "C" fn rs_ikev2_log_json_response(state: &mut IKEV2State, tx: &mut IKEV2Transaction) -> *mut JsonT
{
let js = Json::object();
js.set_integer("version_major", tx.hdr.maj_ver as u64);
js.set_integer("version_minor", tx.hdr.min_ver as u64);
js.set_integer("exchange_type", tx.hdr.exch_type.0 as u64);
js.set_integer("message_id", tx.hdr.msg_id as u64);
js.set_string("init_spi", &format!("{:016x}", tx.hdr.init_spi));
js.set_string("resp_spi", &format!("{:016x}", tx.hdr.resp_spi));
if tx.hdr.flags & IKEV2_FLAG_INITIATOR != 0 {
js.set_string("role", &"initiator");
} else {
js.set_string("role", &"responder");
js.set_string("alg_enc", &format!("{:?}", state.alg_enc));
js.set_string("alg_auth", &format!("{:?}", state.alg_auth));
js.set_string("alg_prf", &format!("{:?}", state.alg_prf));
js.set_string("alg_dh", &format!("{:?}", state.alg_dh));
js.set_string("alg_esn", &format!("{:?}", state.alg_esn));
}
js.set_integer("errors", tx.errors as u64);
let jsa = Json::array();
for payload in tx.payload_types.iter() {
jsa.array_append_string(&format!("{:?}", payload));
}
js.set("payload", jsa);
let jsa = Json::array();
for notify in tx.notify_types.iter() {
jsa.array_append_string(&format!("{:?}", notify));
}
js.set("notify", jsa);
|
}
|
return js.unwrap();
|
random_line_split
|
log.rs
|
/* Copyright (C) 2018 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
* Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
// written by Pierre Chifflier <[email protected]>
use json::*;
use ikev2::ikev2::{IKEV2State,IKEV2Transaction};
use ikev2::ipsec_parser::IKEV2_FLAG_INITIATOR;
#[no_mangle]
pub extern "C" fn rs_ikev2_log_json_response(state: &mut IKEV2State, tx: &mut IKEV2Transaction) -> *mut JsonT
|
{
let js = Json::object();
js.set_integer("version_major", tx.hdr.maj_ver as u64);
js.set_integer("version_minor", tx.hdr.min_ver as u64);
js.set_integer("exchange_type", tx.hdr.exch_type.0 as u64);
js.set_integer("message_id", tx.hdr.msg_id as u64);
js.set_string("init_spi", &format!("{:016x}", tx.hdr.init_spi));
js.set_string("resp_spi", &format!("{:016x}", tx.hdr.resp_spi));
if tx.hdr.flags & IKEV2_FLAG_INITIATOR != 0 {
js.set_string("role", &"initiator");
} else {
js.set_string("role", &"responder");
js.set_string("alg_enc", &format!("{:?}", state.alg_enc));
js.set_string("alg_auth", &format!("{:?}", state.alg_auth));
js.set_string("alg_prf", &format!("{:?}", state.alg_prf));
js.set_string("alg_dh", &format!("{:?}", state.alg_dh));
js.set_string("alg_esn", &format!("{:?}", state.alg_esn));
}
js.set_integer("errors", tx.errors as u64);
let jsa = Json::array();
for payload in tx.payload_types.iter() {
jsa.array_append_string(&format!("{:?}", payload));
}
js.set("payload", jsa);
let jsa = Json::array();
for notify in tx.notify_types.iter() {
jsa.array_append_string(&format!("{:?}", notify));
}
js.set("notify", jsa);
return js.unwrap();
}
|
identifier_body
|
|
log.rs
|
/* Copyright (C) 2018 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
* Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
// written by Pierre Chifflier <[email protected]>
use json::*;
use ikev2::ikev2::{IKEV2State,IKEV2Transaction};
use ikev2::ipsec_parser::IKEV2_FLAG_INITIATOR;
#[no_mangle]
pub extern "C" fn
|
(state: &mut IKEV2State, tx: &mut IKEV2Transaction) -> *mut JsonT
{
let js = Json::object();
js.set_integer("version_major", tx.hdr.maj_ver as u64);
js.set_integer("version_minor", tx.hdr.min_ver as u64);
js.set_integer("exchange_type", tx.hdr.exch_type.0 as u64);
js.set_integer("message_id", tx.hdr.msg_id as u64);
js.set_string("init_spi", &format!("{:016x}", tx.hdr.init_spi));
js.set_string("resp_spi", &format!("{:016x}", tx.hdr.resp_spi));
if tx.hdr.flags & IKEV2_FLAG_INITIATOR != 0 {
js.set_string("role", &"initiator");
} else {
js.set_string("role", &"responder");
js.set_string("alg_enc", &format!("{:?}", state.alg_enc));
js.set_string("alg_auth", &format!("{:?}", state.alg_auth));
js.set_string("alg_prf", &format!("{:?}", state.alg_prf));
js.set_string("alg_dh", &format!("{:?}", state.alg_dh));
js.set_string("alg_esn", &format!("{:?}", state.alg_esn));
}
js.set_integer("errors", tx.errors as u64);
let jsa = Json::array();
for payload in tx.payload_types.iter() {
jsa.array_append_string(&format!("{:?}", payload));
}
js.set("payload", jsa);
let jsa = Json::array();
for notify in tx.notify_types.iter() {
jsa.array_append_string(&format!("{:?}", notify));
}
js.set("notify", jsa);
return js.unwrap();
}
|
rs_ikev2_log_json_response
|
identifier_name
|
twilio-mnc-mcc-getter.py
|
import logging
import os
from twilio.rest import Client
class TwilioClient(object):
def __init__(self):
self.logger = logging.getLogger("botosan.logger")
self.account_sid = os.environ["TWILIO_SID"]
self.account_token = os.environ["TWILIO_TOKEN"]
|
:param phone_number: The phone number, containing the +CC Number, ex: +12345678901 for the US.
:return: a tuple containing the mcc and mnc
"""
number = self.client.lookups.phone_numbers(phone_number).fetch(type="carrier")
self.logger.info(number.carrier['mobile_country_code'])
self.logger.info(number.carrier['mobile_network_code'])
return number.carrier['mobile_country_code'], number.carrier['mobile_network_code']
def get_available_numbers(self):
numbers = self.client.available_phone_numbers("GB").local.list(exclude_local_address_required=True)
print(numbers.count())
phone_numbers = []
for number in numbers:
phone_numbers.append(number.phone_number)
return phone_numbers
|
self.client = Client(self.account_sid, self.account_token)
def get_mcc_and_mnc(self, phone_number):
"""
Gets the Mobile Country Code and Mobile Network code for a given Twilio Number
|
random_line_split
|
twilio-mnc-mcc-getter.py
|
import logging
import os
from twilio.rest import Client
class TwilioClient(object):
def __init__(self):
self.logger = logging.getLogger("botosan.logger")
self.account_sid = os.environ["TWILIO_SID"]
self.account_token = os.environ["TWILIO_TOKEN"]
self.client = Client(self.account_sid, self.account_token)
def get_mcc_and_mnc(self, phone_number):
"""
Gets the Mobile Country Code and Mobile Network code for a given Twilio Number
:param phone_number: The phone number, containing the +CC Number, ex: +12345678901 for the US.
:return: a tuple containing the mcc and mnc
"""
number = self.client.lookups.phone_numbers(phone_number).fetch(type="carrier")
self.logger.info(number.carrier['mobile_country_code'])
self.logger.info(number.carrier['mobile_network_code'])
return number.carrier['mobile_country_code'], number.carrier['mobile_network_code']
def get_available_numbers(self):
numbers = self.client.available_phone_numbers("GB").local.list(exclude_local_address_required=True)
print(numbers.count())
phone_numbers = []
for number in numbers:
|
return phone_numbers
|
phone_numbers.append(number.phone_number)
|
conditional_block
|
twilio-mnc-mcc-getter.py
|
import logging
import os
from twilio.rest import Client
class TwilioClient(object):
def __init__(self):
self.logger = logging.getLogger("botosan.logger")
self.account_sid = os.environ["TWILIO_SID"]
self.account_token = os.environ["TWILIO_TOKEN"]
self.client = Client(self.account_sid, self.account_token)
def
|
(self, phone_number):
"""
Gets the Mobile Country Code and Mobile Network code for a given Twilio Number
:param phone_number: The phone number, containing the +CC Number, ex: +12345678901 for the US.
:return: a tuple containing the mcc and mnc
"""
number = self.client.lookups.phone_numbers(phone_number).fetch(type="carrier")
self.logger.info(number.carrier['mobile_country_code'])
self.logger.info(number.carrier['mobile_network_code'])
return number.carrier['mobile_country_code'], number.carrier['mobile_network_code']
def get_available_numbers(self):
numbers = self.client.available_phone_numbers("GB").local.list(exclude_local_address_required=True)
print(numbers.count())
phone_numbers = []
for number in numbers:
phone_numbers.append(number.phone_number)
return phone_numbers
|
get_mcc_and_mnc
|
identifier_name
|
twilio-mnc-mcc-getter.py
|
import logging
import os
from twilio.rest import Client
class TwilioClient(object):
def __init__(self):
self.logger = logging.getLogger("botosan.logger")
self.account_sid = os.environ["TWILIO_SID"]
self.account_token = os.environ["TWILIO_TOKEN"]
self.client = Client(self.account_sid, self.account_token)
def get_mcc_and_mnc(self, phone_number):
|
def get_available_numbers(self):
numbers = self.client.available_phone_numbers("GB").local.list(exclude_local_address_required=True)
print(numbers.count())
phone_numbers = []
for number in numbers:
phone_numbers.append(number.phone_number)
return phone_numbers
|
"""
Gets the Mobile Country Code and Mobile Network code for a given Twilio Number
:param phone_number: The phone number, containing the +CC Number, ex: +12345678901 for the US.
:return: a tuple containing the mcc and mnc
"""
number = self.client.lookups.phone_numbers(phone_number).fetch(type="carrier")
self.logger.info(number.carrier['mobile_country_code'])
self.logger.info(number.carrier['mobile_network_code'])
return number.carrier['mobile_country_code'], number.carrier['mobile_network_code']
|
identifier_body
|
vtkExporter.py
|
from yade import export,polyhedra_utils
mat = PolyhedraMat()
O.bodies.append([
sphere((0,0,0),1),
sphere((0,3,0),1),
sphere((0,2,4),2),
sphere((0,5,2),1.5),
facet([Vector3(0,-3,-1),Vector3(0,-2,5),Vector3(5,4,0)]),
facet([Vector3(0,-3,-1),Vector3(0,-2,5),Vector3(-5,4,0)]),
polyhedra_utils.polyhedra(mat,(1,2,3),0),
polyhedra_utils.polyhedralBall(2,20,mat,(-2,-2,4)),
])
O.bodies[-1].state.pos = (-2,-2,-2)
O.bodies[-1].state.ori = Quaternion((1,1,2),1)
O.bodies[-2].state.pos = (-2,-2,3)
O.bodies[-2].state.ori = Quaternion((1,2,0),1)
createInteraction(0,1)
createInteraction(0,2)
createInteraction(0,3)
createInteraction(1,2)
createInteraction(1,3)
createInteraction(2,3)
|
O.step()
vtkExporter = export.VTKExporter('vtkExporterTesting')
vtkExporter.exportSpheres(what=[('dist','b.state.pos.norm()')])
vtkExporter.exportFacets(what=[('pos','b.state.pos')])
vtkExporter.exportInteractions(what=[('kn','i.phys.kn')])
vtkExporter.exportPolyhedra(what=[('n','b.id')])
|
random_line_split
|
|
lib.rs
|
#![cfg_attr(feature = "nightly-testing", feature(plugin))]
#![cfg_attr(feature = "nightly-testing", plugin(clippy))]
#![cfg_attr(not(feature = "unstable"), deny(warnings))]
//! Types for loading and managing AWS access credentials for API requests.
extern crate chrono;
extern crate reqwest;
extern crate regex;
extern crate serde_json;
pub use environment::EnvironmentProvider;
pub use container::ContainerProvider;
pub use instance_metadata::InstanceMetadataProvider;
pub use profile::ProfileProvider;
mod container;
mod environment;
mod instance_metadata;
mod profile;
pub mod claims;
use std::fmt;
use std::error::Error;
use std::io::Error as IoError;
use std::sync::Mutex;
use std::cell::RefCell;
use std::collections::BTreeMap;
use chrono::{Duration, UTC, DateTime, ParseError};
use serde_json::Value;
/// AWS API access credentials, including access key, secret key, token (for IAM profiles),
/// expiration timestamp, and claims from federated login.
#[derive(Clone, Debug)]
pub struct AwsCredentials {
key: String,
secret: String,
token: Option<String>,
expires_at: DateTime<UTC>,
claims: BTreeMap<String, String>,
}
impl AwsCredentials {
/// Create a new `AwsCredentials` from a key ID, secret key, optional access token, and expiry
/// time.
pub fn new<K, S>(key:K, secret:S, token:Option<String>, expires_at:DateTime<UTC>)
-> AwsCredentials where K:Into<String>, S:Into<String> {
AwsCredentials {
key: key.into(),
secret: secret.into(),
token: token,
expires_at: expires_at,
claims: BTreeMap::new(),
}
}
/// Get a reference to the access key ID.
pub fn aws_access_key_id(&self) -> &str {
&self.key
}
/// Get a reference to the secret access key.
pub fn aws_secret_access_key(&self) -> &str {
&self.secret
}
/// Get a reference to the expiry time.
pub fn expires_at(&self) -> &DateTime<UTC> {
&self.expires_at
}
/// Get a reference to the access token.
pub fn token(&self) -> &Option<String> {
&self.token
}
/// Determine whether or not the credentials are expired.
fn credentials_are_expired(&self) -> bool {
// This is a rough hack to hopefully avoid someone requesting creds then sitting on them
// before issuing the request:
self.expires_at < UTC::now() + Duration::seconds(20)
}
/// Get the token claims
pub fn claims(&self) -> &BTreeMap<String, String> {
&self.claims
}
/// Get the mutable token claims
pub fn claims_mut(&mut self) -> &mut BTreeMap<String, String> {
&mut self.claims
}
}
#[derive(Debug, PartialEq)]
pub struct CredentialsError {
pub message: String
}
impl CredentialsError {
pub fn new<S>(message: S) -> CredentialsError where S: Into<String> {
CredentialsError {
message: message.into()
}
}
}
impl fmt::Display for CredentialsError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.description())
}
}
impl Error for CredentialsError {
fn description(&self) -> &str {
&self.message
}
}
impl From<ParseError> for CredentialsError {
fn from(err: ParseError) -> CredentialsError {
CredentialsError::new(err.description())
}
}
impl From<IoError> for CredentialsError {
fn from(err: IoError) -> CredentialsError {
CredentialsError::new(err.description())
}
}
/// A trait for types that produce `AwsCredentials`.
pub trait ProvideAwsCredentials {
/// Produce a new `AwsCredentials`.
fn credentials(&self) -> Result<AwsCredentials, CredentialsError>;
}
impl ProvideAwsCredentials for AwsCredentials {
fn credentials(&self) -> Result<AwsCredentials, CredentialsError> {
Ok(self.clone())
}
}
/// Wrapper for `ProvideAwsCredentials` that caches the credentials returned by the
/// wrapped provider. Each time the credentials are accessed, they are checked to see if
/// they have expired, in which case they are retrieved from the wrapped provider again.
#[derive(Debug)]
pub struct BaseAutoRefreshingProvider<P, T> {
credentials_provider: P,
cached_credentials: T
}
/// Threadsafe `AutoRefreshingProvider` that locks cached credentials with a `Mutex`
pub type AutoRefreshingProviderSync<P> = BaseAutoRefreshingProvider<P, Mutex<AwsCredentials>>;
impl <P: ProvideAwsCredentials> AutoRefreshingProviderSync<P> {
pub fn
|
(provider: P) -> Result<AutoRefreshingProviderSync<P>, CredentialsError> {
let creds = try!(provider.credentials());
Ok(BaseAutoRefreshingProvider {
credentials_provider: provider,
cached_credentials: Mutex::new(creds)
})
}
}
impl <P: ProvideAwsCredentials> ProvideAwsCredentials for BaseAutoRefreshingProvider<P, Mutex<AwsCredentials>> {
fn credentials(&self) -> Result<AwsCredentials, CredentialsError> {
let mut creds = self.cached_credentials.lock().expect("Failed to lock the cached credentials Mutex");
if creds.credentials_are_expired() {
*creds = try!(self.credentials_provider.credentials());
}
Ok(creds.clone())
}
}
/// `!Sync` `AutoRefreshingProvider` that caches credentials in a `RefCell`
pub type AutoRefreshingProvider<P> = BaseAutoRefreshingProvider<P, RefCell<AwsCredentials>>;
impl <P: ProvideAwsCredentials> AutoRefreshingProvider<P> {
pub fn with_refcell(provider: P) -> Result<AutoRefreshingProvider<P>, CredentialsError> {
let creds = try!(provider.credentials());
Ok(BaseAutoRefreshingProvider {
credentials_provider: provider,
cached_credentials: RefCell::new(creds)
})
}
}
impl <P: ProvideAwsCredentials> ProvideAwsCredentials for BaseAutoRefreshingProvider<P, RefCell<AwsCredentials>> {
fn credentials(&self) -> Result<AwsCredentials, CredentialsError> {
let mut creds = self.cached_credentials.borrow_mut();
if creds.credentials_are_expired() {
*creds = try!(self.credentials_provider.credentials());
}
Ok(creds.clone())
}
}
/// The credentials provider you probably want to use if you don't require Sync for your AWS services.
/// Wraps a `ChainProvider` in an `AutoRefreshingProvider` that uses a `RefCell` to cache credentials
///
/// The underlying `ChainProvider` checks multiple sources for credentials, and the `AutoRefreshingProvider`
/// refreshes the credentials automatically when they expire. The `RefCell` allows this caching to happen
/// without the overhead of a `Mutex`, but is `!Sync`.
///
/// For a `Sync` implementation of the same, see `DefaultCredentialsProviderSync`
pub type DefaultCredentialsProvider = AutoRefreshingProvider<ChainProvider>;
impl DefaultCredentialsProvider {
pub fn new() -> Result<DefaultCredentialsProvider, CredentialsError> {
Ok(try!(AutoRefreshingProvider::with_refcell(ChainProvider::new())))
}
}
/// The credentials provider you probably want to use if you do require Sync for your AWS services.
/// Wraps a `ChainProvider` in an `AutoRefreshingProvider` that uses a `Mutex` to lock credentials in a
/// threadsafe manner.
///
/// The underlying `ChainProvider` checks multiple sources for credentials, and the `AutoRefreshingProvider`
/// refreshes the credentials automatically when they expire. The `Mutex` allows this caching to happen
/// in a Sync manner, incurring the overhead of a Mutex when credentials expire and need to be refreshed.
///
/// For a `!Sync` implementation of the same, see `DefaultCredentialsProvider`
pub type DefaultCredentialsProviderSync = AutoRefreshingProviderSync<ChainProvider>;
impl DefaultCredentialsProviderSync {
pub fn new() -> Result<DefaultCredentialsProviderSync, CredentialsError> {
Ok(try!(AutoRefreshingProviderSync::with_mutex(ChainProvider::new())))
}
}
/// Provides AWS credentials from multiple possible sources using a priority order.
///
/// The following sources are checked in order for credentials when calling `credentials`:
///
/// 1. Environment variables: `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`
/// 2. AWS credentials file. Usually located at `~/.aws/credentials`.
/// 3. IAM instance profile. Will only work if running on an EC2 instance with an instance profile/role.
///
/// If the sources are exhausted without finding credentials, an error is returned.
#[derive(Debug, Default, Clone)]
pub struct ChainProvider {
profile_provider: Option<ProfileProvider>,
}
impl ProvideAwsCredentials for ChainProvider {
fn credentials(&self) -> Result<AwsCredentials, CredentialsError> {
EnvironmentProvider.credentials()
.or_else(|_| {
match self.profile_provider {
Some(ref provider) => provider.credentials(),
None => Err(CredentialsError::new(""))
}
})
.or_else(|_| ContainerProvider.credentials())
.or_else(|_| InstanceMetadataProvider.credentials())
.or_else(|_| Err(CredentialsError::new("Couldn't find AWS credentials in environment, credentials file, or IAM role.")))
}
}
impl ChainProvider {
/// Create a new `ChainProvider` using a `ProfileProvider` with the default settings.
pub fn new() -> ChainProvider {
ChainProvider {
profile_provider: ProfileProvider::new().ok(),
}
}
/// Create a new `ChainProvider` using the provided `ProfileProvider`.
pub fn with_profile_provider(profile_provider: ProfileProvider)
-> ChainProvider {
ChainProvider {
profile_provider: Some(profile_provider),
}
}
}
fn in_ten_minutes() -> DateTime<UTC> {
UTC::now() + Duration::seconds(600)
}
fn extract_string_value_from_json(json_object: &Value, key: &str) -> Result<String, CredentialsError> {
match json_object.get(key) {
Some(v) => Ok(v.as_str().expect(&format!("{} value was not a string", key)).to_owned()),
None => Err(CredentialsError::new(format!("Couldn't find {} in response.", key))),
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn credential_chain_explicit_profile_provider() {
let profile_provider = ProfileProvider::with_configuration(
"tests/sample-data/multiple_profile_credentials",
"foo",
);
let chain = ChainProvider::with_profile_provider(profile_provider);
let credentials = chain.credentials().expect(
"Failed to get credentials from default provider chain with manual profile",
);
assert_eq!(credentials.aws_access_key_id(), "foo_access_key");
assert_eq!(credentials.aws_secret_access_key(), "foo_secret_key");
}
}
|
with_mutex
|
identifier_name
|
lib.rs
|
#![cfg_attr(feature = "nightly-testing", feature(plugin))]
#![cfg_attr(feature = "nightly-testing", plugin(clippy))]
#![cfg_attr(not(feature = "unstable"), deny(warnings))]
//! Types for loading and managing AWS access credentials for API requests.
extern crate chrono;
extern crate reqwest;
extern crate regex;
extern crate serde_json;
pub use environment::EnvironmentProvider;
pub use container::ContainerProvider;
pub use instance_metadata::InstanceMetadataProvider;
pub use profile::ProfileProvider;
mod container;
mod environment;
mod instance_metadata;
mod profile;
pub mod claims;
use std::fmt;
use std::error::Error;
use std::io::Error as IoError;
use std::sync::Mutex;
use std::cell::RefCell;
use std::collections::BTreeMap;
use chrono::{Duration, UTC, DateTime, ParseError};
use serde_json::Value;
/// AWS API access credentials, including access key, secret key, token (for IAM profiles),
/// expiration timestamp, and claims from federated login.
#[derive(Clone, Debug)]
pub struct AwsCredentials {
key: String,
secret: String,
token: Option<String>,
expires_at: DateTime<UTC>,
claims: BTreeMap<String, String>,
}
impl AwsCredentials {
/// Create a new `AwsCredentials` from a key ID, secret key, optional access token, and expiry
/// time.
pub fn new<K, S>(key:K, secret:S, token:Option<String>, expires_at:DateTime<UTC>)
-> AwsCredentials where K:Into<String>, S:Into<String> {
AwsCredentials {
key: key.into(),
secret: secret.into(),
token: token,
expires_at: expires_at,
claims: BTreeMap::new(),
}
}
/// Get a reference to the access key ID.
pub fn aws_access_key_id(&self) -> &str {
&self.key
}
/// Get a reference to the secret access key.
pub fn aws_secret_access_key(&self) -> &str {
&self.secret
}
/// Get a reference to the expiry time.
pub fn expires_at(&self) -> &DateTime<UTC> {
&self.expires_at
}
/// Get a reference to the access token.
pub fn token(&self) -> &Option<String> {
&self.token
}
/// Determine whether or not the credentials are expired.
fn credentials_are_expired(&self) -> bool {
// This is a rough hack to hopefully avoid someone requesting creds then sitting on them
// before issuing the request:
self.expires_at < UTC::now() + Duration::seconds(20)
}
/// Get the token claims
pub fn claims(&self) -> &BTreeMap<String, String> {
&self.claims
}
/// Get the mutable token claims
pub fn claims_mut(&mut self) -> &mut BTreeMap<String, String> {
&mut self.claims
}
}
#[derive(Debug, PartialEq)]
pub struct CredentialsError {
pub message: String
}
impl CredentialsError {
pub fn new<S>(message: S) -> CredentialsError where S: Into<String> {
CredentialsError {
message: message.into()
}
}
}
impl fmt::Display for CredentialsError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.description())
}
}
impl Error for CredentialsError {
fn description(&self) -> &str {
&self.message
}
}
impl From<ParseError> for CredentialsError {
fn from(err: ParseError) -> CredentialsError {
CredentialsError::new(err.description())
}
}
impl From<IoError> for CredentialsError {
fn from(err: IoError) -> CredentialsError {
CredentialsError::new(err.description())
}
}
/// A trait for types that produce `AwsCredentials`.
pub trait ProvideAwsCredentials {
/// Produce a new `AwsCredentials`.
fn credentials(&self) -> Result<AwsCredentials, CredentialsError>;
}
impl ProvideAwsCredentials for AwsCredentials {
fn credentials(&self) -> Result<AwsCredentials, CredentialsError> {
Ok(self.clone())
}
}
/// Wrapper for `ProvideAwsCredentials` that caches the credentials returned by the
/// wrapped provider. Each time the credentials are accessed, they are checked to see if
/// they have expired, in which case they are retrieved from the wrapped provider again.
#[derive(Debug)]
pub struct BaseAutoRefreshingProvider<P, T> {
credentials_provider: P,
cached_credentials: T
}
/// Threadsafe `AutoRefreshingProvider` that locks cached credentials with a `Mutex`
pub type AutoRefreshingProviderSync<P> = BaseAutoRefreshingProvider<P, Mutex<AwsCredentials>>;
impl <P: ProvideAwsCredentials> AutoRefreshingProviderSync<P> {
pub fn with_mutex(provider: P) -> Result<AutoRefreshingProviderSync<P>, CredentialsError> {
let creds = try!(provider.credentials());
Ok(BaseAutoRefreshingProvider {
credentials_provider: provider,
cached_credentials: Mutex::new(creds)
})
}
}
impl <P: ProvideAwsCredentials> ProvideAwsCredentials for BaseAutoRefreshingProvider<P, Mutex<AwsCredentials>> {
fn credentials(&self) -> Result<AwsCredentials, CredentialsError> {
let mut creds = self.cached_credentials.lock().expect("Failed to lock the cached credentials Mutex");
if creds.credentials_are_expired() {
*creds = try!(self.credentials_provider.credentials());
}
Ok(creds.clone())
}
}
/// `!Sync` `AutoRefreshingProvider` that caches credentials in a `RefCell`
pub type AutoRefreshingProvider<P> = BaseAutoRefreshingProvider<P, RefCell<AwsCredentials>>;
impl <P: ProvideAwsCredentials> AutoRefreshingProvider<P> {
pub fn with_refcell(provider: P) -> Result<AutoRefreshingProvider<P>, CredentialsError> {
let creds = try!(provider.credentials());
Ok(BaseAutoRefreshingProvider {
credentials_provider: provider,
cached_credentials: RefCell::new(creds)
})
}
}
impl <P: ProvideAwsCredentials> ProvideAwsCredentials for BaseAutoRefreshingProvider<P, RefCell<AwsCredentials>> {
fn credentials(&self) -> Result<AwsCredentials, CredentialsError> {
let mut creds = self.cached_credentials.borrow_mut();
if creds.credentials_are_expired() {
*creds = try!(self.credentials_provider.credentials());
}
Ok(creds.clone())
}
}
/// The credentials provider you probably want to use if you don't require Sync for your AWS services.
/// Wraps a `ChainProvider` in an `AutoRefreshingProvider` that uses a `RefCell` to cache credentials
///
/// The underlying `ChainProvider` checks multiple sources for credentials, and the `AutoRefreshingProvider`
/// refreshes the credentials automatically when they expire. The `RefCell` allows this caching to happen
/// without the overhead of a `Mutex`, but is `!Sync`.
///
/// For a `Sync` implementation of the same, see `DefaultCredentialsProviderSync`
pub type DefaultCredentialsProvider = AutoRefreshingProvider<ChainProvider>;
impl DefaultCredentialsProvider {
pub fn new() -> Result<DefaultCredentialsProvider, CredentialsError> {
Ok(try!(AutoRefreshingProvider::with_refcell(ChainProvider::new())))
}
}
/// The credentials provider you probably want to use if you do require Sync for your AWS services.
/// Wraps a `ChainProvider` in an `AutoRefreshingProvider` that uses a `Mutex` to lock credentials in a
/// threadsafe manner.
///
/// The underlying `ChainProvider` checks multiple sources for credentials, and the `AutoRefreshingProvider`
/// refreshes the credentials automatically when they expire. The `Mutex` allows this caching to happen
/// in a Sync manner, incurring the overhead of a Mutex when credentials expire and need to be refreshed.
///
/// For a `!Sync` implementation of the same, see `DefaultCredentialsProvider`
pub type DefaultCredentialsProviderSync = AutoRefreshingProviderSync<ChainProvider>;
impl DefaultCredentialsProviderSync {
pub fn new() -> Result<DefaultCredentialsProviderSync, CredentialsError> {
Ok(try!(AutoRefreshingProviderSync::with_mutex(ChainProvider::new())))
}
}
/// Provides AWS credentials from multiple possible sources using a priority order.
///
/// The following sources are checked in order for credentials when calling `credentials`:
///
/// 1. Environment variables: `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`
/// 2. AWS credentials file. Usually located at `~/.aws/credentials`.
/// 3. IAM instance profile. Will only work if running on an EC2 instance with an instance profile/role.
///
/// If the sources are exhausted without finding credentials, an error is returned.
#[derive(Debug, Default, Clone)]
pub struct ChainProvider {
profile_provider: Option<ProfileProvider>,
}
impl ProvideAwsCredentials for ChainProvider {
fn credentials(&self) -> Result<AwsCredentials, CredentialsError> {
EnvironmentProvider.credentials()
.or_else(|_| {
match self.profile_provider {
Some(ref provider) => provider.credentials(),
None => Err(CredentialsError::new(""))
}
})
.or_else(|_| ContainerProvider.credentials())
.or_else(|_| InstanceMetadataProvider.credentials())
.or_else(|_| Err(CredentialsError::new("Couldn't find AWS credentials in environment, credentials file, or IAM role.")))
}
}
impl ChainProvider {
/// Create a new `ChainProvider` using a `ProfileProvider` with the default settings.
pub fn new() -> ChainProvider {
ChainProvider {
profile_provider: ProfileProvider::new().ok(),
}
}
/// Create a new `ChainProvider` using the provided `ProfileProvider`.
pub fn with_profile_provider(profile_provider: ProfileProvider)
-> ChainProvider {
ChainProvider {
profile_provider: Some(profile_provider),
}
}
}
fn in_ten_minutes() -> DateTime<UTC> {
UTC::now() + Duration::seconds(600)
}
fn extract_string_value_from_json(json_object: &Value, key: &str) -> Result<String, CredentialsError>
|
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn credential_chain_explicit_profile_provider() {
let profile_provider = ProfileProvider::with_configuration(
"tests/sample-data/multiple_profile_credentials",
"foo",
);
let chain = ChainProvider::with_profile_provider(profile_provider);
let credentials = chain.credentials().expect(
"Failed to get credentials from default provider chain with manual profile",
);
assert_eq!(credentials.aws_access_key_id(), "foo_access_key");
assert_eq!(credentials.aws_secret_access_key(), "foo_secret_key");
}
}
|
{
match json_object.get(key) {
Some(v) => Ok(v.as_str().expect(&format!("{} value was not a string", key)).to_owned()),
None => Err(CredentialsError::new(format!("Couldn't find {} in response.", key))),
}
}
|
identifier_body
|
lib.rs
|
#![cfg_attr(feature = "nightly-testing", feature(plugin))]
#![cfg_attr(feature = "nightly-testing", plugin(clippy))]
#![cfg_attr(not(feature = "unstable"), deny(warnings))]
//! Types for loading and managing AWS access credentials for API requests.
extern crate chrono;
extern crate reqwest;
extern crate regex;
extern crate serde_json;
pub use environment::EnvironmentProvider;
pub use container::ContainerProvider;
pub use instance_metadata::InstanceMetadataProvider;
pub use profile::ProfileProvider;
mod container;
mod environment;
mod instance_metadata;
mod profile;
pub mod claims;
use std::fmt;
use std::error::Error;
use std::io::Error as IoError;
use std::sync::Mutex;
use std::cell::RefCell;
use std::collections::BTreeMap;
use chrono::{Duration, UTC, DateTime, ParseError};
use serde_json::Value;
/// AWS API access credentials, including access key, secret key, token (for IAM profiles),
/// expiration timestamp, and claims from federated login.
#[derive(Clone, Debug)]
pub struct AwsCredentials {
key: String,
secret: String,
token: Option<String>,
expires_at: DateTime<UTC>,
claims: BTreeMap<String, String>,
}
impl AwsCredentials {
/// Create a new `AwsCredentials` from a key ID, secret key, optional access token, and expiry
/// time.
pub fn new<K, S>(key:K, secret:S, token:Option<String>, expires_at:DateTime<UTC>)
-> AwsCredentials where K:Into<String>, S:Into<String> {
AwsCredentials {
key: key.into(),
secret: secret.into(),
token: token,
expires_at: expires_at,
claims: BTreeMap::new(),
}
}
/// Get a reference to the access key ID.
pub fn aws_access_key_id(&self) -> &str {
&self.key
}
/// Get a reference to the secret access key.
pub fn aws_secret_access_key(&self) -> &str {
&self.secret
}
/// Get a reference to the expiry time.
pub fn expires_at(&self) -> &DateTime<UTC> {
&self.expires_at
}
|
&self.token
}
/// Determine whether or not the credentials are expired.
fn credentials_are_expired(&self) -> bool {
// This is a rough hack to hopefully avoid someone requesting creds then sitting on them
// before issuing the request:
self.expires_at < UTC::now() + Duration::seconds(20)
}
/// Get the token claims
pub fn claims(&self) -> &BTreeMap<String, String> {
&self.claims
}
/// Get the mutable token claims
pub fn claims_mut(&mut self) -> &mut BTreeMap<String, String> {
&mut self.claims
}
}
#[derive(Debug, PartialEq)]
pub struct CredentialsError {
pub message: String
}
impl CredentialsError {
pub fn new<S>(message: S) -> CredentialsError where S: Into<String> {
CredentialsError {
message: message.into()
}
}
}
impl fmt::Display for CredentialsError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.description())
}
}
impl Error for CredentialsError {
fn description(&self) -> &str {
&self.message
}
}
impl From<ParseError> for CredentialsError {
fn from(err: ParseError) -> CredentialsError {
CredentialsError::new(err.description())
}
}
impl From<IoError> for CredentialsError {
fn from(err: IoError) -> CredentialsError {
CredentialsError::new(err.description())
}
}
/// A trait for types that produce `AwsCredentials`.
pub trait ProvideAwsCredentials {
/// Produce a new `AwsCredentials`.
fn credentials(&self) -> Result<AwsCredentials, CredentialsError>;
}
impl ProvideAwsCredentials for AwsCredentials {
fn credentials(&self) -> Result<AwsCredentials, CredentialsError> {
Ok(self.clone())
}
}
/// Wrapper for `ProvideAwsCredentials` that caches the credentials returned by the
/// wrapped provider. Each time the credentials are accessed, they are checked to see if
/// they have expired, in which case they are retrieved from the wrapped provider again.
#[derive(Debug)]
pub struct BaseAutoRefreshingProvider<P, T> {
credentials_provider: P,
cached_credentials: T
}
/// Threadsafe `AutoRefreshingProvider` that locks cached credentials with a `Mutex`
pub type AutoRefreshingProviderSync<P> = BaseAutoRefreshingProvider<P, Mutex<AwsCredentials>>;
impl <P: ProvideAwsCredentials> AutoRefreshingProviderSync<P> {
pub fn with_mutex(provider: P) -> Result<AutoRefreshingProviderSync<P>, CredentialsError> {
let creds = try!(provider.credentials());
Ok(BaseAutoRefreshingProvider {
credentials_provider: provider,
cached_credentials: Mutex::new(creds)
})
}
}
impl <P: ProvideAwsCredentials> ProvideAwsCredentials for BaseAutoRefreshingProvider<P, Mutex<AwsCredentials>> {
fn credentials(&self) -> Result<AwsCredentials, CredentialsError> {
let mut creds = self.cached_credentials.lock().expect("Failed to lock the cached credentials Mutex");
if creds.credentials_are_expired() {
*creds = try!(self.credentials_provider.credentials());
}
Ok(creds.clone())
}
}
/// `!Sync` `AutoRefreshingProvider` that caches credentials in a `RefCell`
pub type AutoRefreshingProvider<P> = BaseAutoRefreshingProvider<P, RefCell<AwsCredentials>>;
impl <P: ProvideAwsCredentials> AutoRefreshingProvider<P> {
pub fn with_refcell(provider: P) -> Result<AutoRefreshingProvider<P>, CredentialsError> {
let creds = try!(provider.credentials());
Ok(BaseAutoRefreshingProvider {
credentials_provider: provider,
cached_credentials: RefCell::new(creds)
})
}
}
impl <P: ProvideAwsCredentials> ProvideAwsCredentials for BaseAutoRefreshingProvider<P, RefCell<AwsCredentials>> {
fn credentials(&self) -> Result<AwsCredentials, CredentialsError> {
let mut creds = self.cached_credentials.borrow_mut();
if creds.credentials_are_expired() {
*creds = try!(self.credentials_provider.credentials());
}
Ok(creds.clone())
}
}
/// The credentials provider you probably want to use if you don't require Sync for your AWS services.
/// Wraps a `ChainProvider` in an `AutoRefreshingProvider` that uses a `RefCell` to cache credentials
///
/// The underlying `ChainProvider` checks multiple sources for credentials, and the `AutoRefreshingProvider`
/// refreshes the credentials automatically when they expire. The `RefCell` allows this caching to happen
/// without the overhead of a `Mutex`, but is `!Sync`.
///
/// For a `Sync` implementation of the same, see `DefaultCredentialsProviderSync`
pub type DefaultCredentialsProvider = AutoRefreshingProvider<ChainProvider>;
impl DefaultCredentialsProvider {
pub fn new() -> Result<DefaultCredentialsProvider, CredentialsError> {
Ok(try!(AutoRefreshingProvider::with_refcell(ChainProvider::new())))
}
}
/// The credentials provider you probably want to use if you do require Sync for your AWS services.
/// Wraps a `ChainProvider` in an `AutoRefreshingProvider` that uses a `Mutex` to lock credentials in a
/// threadsafe manner.
///
/// The underlying `ChainProvider` checks multiple sources for credentials, and the `AutoRefreshingProvider`
/// refreshes the credentials automatically when they expire. The `Mutex` allows this caching to happen
/// in a Sync manner, incurring the overhead of a Mutex when credentials expire and need to be refreshed.
///
/// For a `!Sync` implementation of the same, see `DefaultCredentialsProvider`
pub type DefaultCredentialsProviderSync = AutoRefreshingProviderSync<ChainProvider>;
impl DefaultCredentialsProviderSync {
pub fn new() -> Result<DefaultCredentialsProviderSync, CredentialsError> {
Ok(try!(AutoRefreshingProviderSync::with_mutex(ChainProvider::new())))
}
}
/// Provides AWS credentials from multiple possible sources using a priority order.
///
/// The following sources are checked in order for credentials when calling `credentials`:
///
/// 1. Environment variables: `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`
/// 2. AWS credentials file. Usually located at `~/.aws/credentials`.
/// 3. IAM instance profile. Will only work if running on an EC2 instance with an instance profile/role.
///
/// If the sources are exhausted without finding credentials, an error is returned.
#[derive(Debug, Default, Clone)]
pub struct ChainProvider {
profile_provider: Option<ProfileProvider>,
}
impl ProvideAwsCredentials for ChainProvider {
fn credentials(&self) -> Result<AwsCredentials, CredentialsError> {
EnvironmentProvider.credentials()
.or_else(|_| {
match self.profile_provider {
Some(ref provider) => provider.credentials(),
None => Err(CredentialsError::new(""))
}
})
.or_else(|_| ContainerProvider.credentials())
.or_else(|_| InstanceMetadataProvider.credentials())
.or_else(|_| Err(CredentialsError::new("Couldn't find AWS credentials in environment, credentials file, or IAM role.")))
}
}
impl ChainProvider {
/// Create a new `ChainProvider` using a `ProfileProvider` with the default settings.
pub fn new() -> ChainProvider {
ChainProvider {
profile_provider: ProfileProvider::new().ok(),
}
}
/// Create a new `ChainProvider` using the provided `ProfileProvider`.
pub fn with_profile_provider(profile_provider: ProfileProvider)
-> ChainProvider {
ChainProvider {
profile_provider: Some(profile_provider),
}
}
}
fn in_ten_minutes() -> DateTime<UTC> {
UTC::now() + Duration::seconds(600)
}
fn extract_string_value_from_json(json_object: &Value, key: &str) -> Result<String, CredentialsError> {
match json_object.get(key) {
Some(v) => Ok(v.as_str().expect(&format!("{} value was not a string", key)).to_owned()),
None => Err(CredentialsError::new(format!("Couldn't find {} in response.", key))),
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn credential_chain_explicit_profile_provider() {
let profile_provider = ProfileProvider::with_configuration(
"tests/sample-data/multiple_profile_credentials",
"foo",
);
let chain = ChainProvider::with_profile_provider(profile_provider);
let credentials = chain.credentials().expect(
"Failed to get credentials from default provider chain with manual profile",
);
assert_eq!(credentials.aws_access_key_id(), "foo_access_key");
assert_eq!(credentials.aws_secret_access_key(), "foo_secret_key");
}
}
|
/// Get a reference to the access token.
pub fn token(&self) -> &Option<String> {
|
random_line_split
|
testmethods.py
|
import zmq
class JRPC:
def __init__(self):
self.id = 0
def make_noti(self, method, params=None):
noti = {"jsonrpc":"2.0", "method":method}
if params is not None:
noti["params"] = params
return noti
def make_req(self, method, params=None):
req = self.make_noti(method, params)
req["id"] = self.id
self.id += 1
return req
zctx = zmq.Context.instance()
zsock = zctx.socket(zmq.REQ)
zsock.connect("tcp://127.0.0.1:10000")
jrpc = JRPC()
# test "echo" method
req = jrpc.make_req("echo", [10, 5])
zsock.send_json(req)
rep = zsock.recv_json()
assert(rep['result']==req['params'])
# test "counter" method and batch
req = jrpc.make_req("counter")
zsock.send_json([req]*10)
batchrep = zsock.recv_json()
counts = [rep['result'] for rep in batchrep]
for k in range(1,len(counts)):
assert counts[k] - counts[k-1] == 1
# test "sum" method and batch
batchreq = []
for k in range(10):
batchreq.append(jrpc.make_req("sum", range(1+k)))
zsock.send_json(batchreq)
batchrep = zsock.recv_json()
for k in range(10):
assert(batchrep[k]['result']==sum(range(1+k)))
a = range(3)
o = {1:1, 2:2, 3:3}
d = { "one": "un", "two": 2, "three": 3.0, "four": True, "five": False, "six": None, "seven":a, "eight":o }
req = jrpc.make_noti("iterate", d)
zsock.send_json(req)
rep = zsock.recv()
assert not rep
req = jrpc.make_noti("iterate", a)
zsock.send_json(req)
rep = zsock.recv()
assert not rep
|
req = jrpc.make_noti("foreach", a)
zsock.send_json(req)
rep = zsock.recv()
assert not rep
|
req = jrpc.make_noti("foreach", d)
zsock.send_json(req)
rep = zsock.recv()
assert not rep
|
random_line_split
|
testmethods.py
|
import zmq
class JRPC:
def __init__(self):
|
def make_noti(self, method, params=None):
noti = {"jsonrpc":"2.0", "method":method}
if params is not None:
noti["params"] = params
return noti
def make_req(self, method, params=None):
req = self.make_noti(method, params)
req["id"] = self.id
self.id += 1
return req
zctx = zmq.Context.instance()
zsock = zctx.socket(zmq.REQ)
zsock.connect("tcp://127.0.0.1:10000")
jrpc = JRPC()
# test "echo" method
req = jrpc.make_req("echo", [10, 5])
zsock.send_json(req)
rep = zsock.recv_json()
assert(rep['result']==req['params'])
# test "counter" method and batch
req = jrpc.make_req("counter")
zsock.send_json([req]*10)
batchrep = zsock.recv_json()
counts = [rep['result'] for rep in batchrep]
for k in range(1,len(counts)):
assert counts[k] - counts[k-1] == 1
# test "sum" method and batch
batchreq = []
for k in range(10):
batchreq.append(jrpc.make_req("sum", range(1+k)))
zsock.send_json(batchreq)
batchrep = zsock.recv_json()
for k in range(10):
assert(batchrep[k]['result']==sum(range(1+k)))
a = range(3)
o = {1:1, 2:2, 3:3}
d = { "one": "un", "two": 2, "three": 3.0, "four": True, "five": False, "six": None, "seven":a, "eight":o }
req = jrpc.make_noti("iterate", d)
zsock.send_json(req)
rep = zsock.recv()
assert not rep
req = jrpc.make_noti("iterate", a)
zsock.send_json(req)
rep = zsock.recv()
assert not rep
req = jrpc.make_noti("foreach", d)
zsock.send_json(req)
rep = zsock.recv()
assert not rep
req = jrpc.make_noti("foreach", a)
zsock.send_json(req)
rep = zsock.recv()
assert not rep
|
self.id = 0
|
identifier_body
|
testmethods.py
|
import zmq
class JRPC:
def __init__(self):
self.id = 0
def make_noti(self, method, params=None):
noti = {"jsonrpc":"2.0", "method":method}
if params is not None:
noti["params"] = params
return noti
def make_req(self, method, params=None):
req = self.make_noti(method, params)
req["id"] = self.id
self.id += 1
return req
zctx = zmq.Context.instance()
zsock = zctx.socket(zmq.REQ)
zsock.connect("tcp://127.0.0.1:10000")
jrpc = JRPC()
# test "echo" method
req = jrpc.make_req("echo", [10, 5])
zsock.send_json(req)
rep = zsock.recv_json()
assert(rep['result']==req['params'])
# test "counter" method and batch
req = jrpc.make_req("counter")
zsock.send_json([req]*10)
batchrep = zsock.recv_json()
counts = [rep['result'] for rep in batchrep]
for k in range(1,len(counts)):
assert counts[k] - counts[k-1] == 1
# test "sum" method and batch
batchreq = []
for k in range(10):
|
zsock.send_json(batchreq)
batchrep = zsock.recv_json()
for k in range(10):
assert(batchrep[k]['result']==sum(range(1+k)))
a = range(3)
o = {1:1, 2:2, 3:3}
d = { "one": "un", "two": 2, "three": 3.0, "four": True, "five": False, "six": None, "seven":a, "eight":o }
req = jrpc.make_noti("iterate", d)
zsock.send_json(req)
rep = zsock.recv()
assert not rep
req = jrpc.make_noti("iterate", a)
zsock.send_json(req)
rep = zsock.recv()
assert not rep
req = jrpc.make_noti("foreach", d)
zsock.send_json(req)
rep = zsock.recv()
assert not rep
req = jrpc.make_noti("foreach", a)
zsock.send_json(req)
rep = zsock.recv()
assert not rep
|
batchreq.append(jrpc.make_req("sum", range(1+k)))
|
conditional_block
|
testmethods.py
|
import zmq
class JRPC:
def
|
(self):
self.id = 0
def make_noti(self, method, params=None):
noti = {"jsonrpc":"2.0", "method":method}
if params is not None:
noti["params"] = params
return noti
def make_req(self, method, params=None):
req = self.make_noti(method, params)
req["id"] = self.id
self.id += 1
return req
zctx = zmq.Context.instance()
zsock = zctx.socket(zmq.REQ)
zsock.connect("tcp://127.0.0.1:10000")
jrpc = JRPC()
# test "echo" method
req = jrpc.make_req("echo", [10, 5])
zsock.send_json(req)
rep = zsock.recv_json()
assert(rep['result']==req['params'])
# test "counter" method and batch
req = jrpc.make_req("counter")
zsock.send_json([req]*10)
batchrep = zsock.recv_json()
counts = [rep['result'] for rep in batchrep]
for k in range(1,len(counts)):
assert counts[k] - counts[k-1] == 1
# test "sum" method and batch
batchreq = []
for k in range(10):
batchreq.append(jrpc.make_req("sum", range(1+k)))
zsock.send_json(batchreq)
batchrep = zsock.recv_json()
for k in range(10):
assert(batchrep[k]['result']==sum(range(1+k)))
a = range(3)
o = {1:1, 2:2, 3:3}
d = { "one": "un", "two": 2, "three": 3.0, "four": True, "five": False, "six": None, "seven":a, "eight":o }
req = jrpc.make_noti("iterate", d)
zsock.send_json(req)
rep = zsock.recv()
assert not rep
req = jrpc.make_noti("iterate", a)
zsock.send_json(req)
rep = zsock.recv()
assert not rep
req = jrpc.make_noti("foreach", d)
zsock.send_json(req)
rep = zsock.recv()
assert not rep
req = jrpc.make_noti("foreach", a)
zsock.send_json(req)
rep = zsock.recv()
assert not rep
|
__init__
|
identifier_name
|
product.gears.js
|
var script = new Script();
function Product () {
this.showProductResult = function (){
var that = this;
$("#excelDataTable").find("tbody").remove();
|
method: "POST",
url: "/product/result",
data: data,
complete: function(data){
if(data.status !== 500){
data = data.responseJSON;
data = JSON.parse(data);
console.log(data);
script.buildHtmlTable(data);
}
},
error: function (data){
alert("ERROR " + data);
}
});
}
this.showAllProductCount = function (){
var that = this;
$.ajax({
method: "GET",
url: "/product/count",
complete: function(data){
if(data.status !== 500){
data = data.responseJSON;
data = JSON.parse(data);
var template = "{{#.}}" +
"<option>{{кількість одиниць виробу}}</option>" +
"{{/.}}";
var rendered = Mustache.render(template, data);
$('#ProductCount').append(rendered);
}
}
});
}
this.showAllProductCountry = function (){
var that = this;
$.ajax({
method: "GET",
url: "/product/country",
complete: function(data){
if(data.status !== 500){
data = data.responseJSON;
data = JSON.parse(data);
var template = "{{#.}}" +
"<option>{{країна виробника}}</option>" +
"{{/.}}";
var rendered = Mustache.render(template, data);
$('#ProductCountry').append(rendered);
}
}
});
}
}
|
var data = {
ProductCount: $('#ProductCount').val(),
ProductCountry: $('#ProductCountry').val()
};
$.ajax({
|
random_line_split
|
product.gears.js
|
var script = new Script();
function
|
() {
this.showProductResult = function (){
var that = this;
$("#excelDataTable").find("tbody").remove();
var data = {
ProductCount: $('#ProductCount').val(),
ProductCountry: $('#ProductCountry').val()
};
$.ajax({
method: "POST",
url: "/product/result",
data: data,
complete: function(data){
if(data.status !== 500){
data = data.responseJSON;
data = JSON.parse(data);
console.log(data);
script.buildHtmlTable(data);
}
},
error: function (data){
alert("ERROR " + data);
}
});
}
this.showAllProductCount = function (){
var that = this;
$.ajax({
method: "GET",
url: "/product/count",
complete: function(data){
if(data.status !== 500){
data = data.responseJSON;
data = JSON.parse(data);
var template = "{{#.}}" +
"<option>{{кількість одиниць виробу}}</option>" +
"{{/.}}";
var rendered = Mustache.render(template, data);
$('#ProductCount').append(rendered);
}
}
});
}
this.showAllProductCountry = function (){
var that = this;
$.ajax({
method: "GET",
url: "/product/country",
complete: function(data){
if(data.status !== 500){
data = data.responseJSON;
data = JSON.parse(data);
var template = "{{#.}}" +
"<option>{{країна виробника}}</option>" +
"{{/.}}";
var rendered = Mustache.render(template, data);
$('#ProductCountry').append(rendered);
}
}
});
}
}
|
Product
|
identifier_name
|
product.gears.js
|
var script = new Script();
function Product ()
|
{
this.showProductResult = function (){
var that = this;
$("#excelDataTable").find("tbody").remove();
var data = {
ProductCount: $('#ProductCount').val(),
ProductCountry: $('#ProductCountry').val()
};
$.ajax({
method: "POST",
url: "/product/result",
data: data,
complete: function(data){
if(data.status !== 500){
data = data.responseJSON;
data = JSON.parse(data);
console.log(data);
script.buildHtmlTable(data);
}
},
error: function (data){
alert("ERROR " + data);
}
});
}
this.showAllProductCount = function (){
var that = this;
$.ajax({
method: "GET",
url: "/product/count",
complete: function(data){
if(data.status !== 500){
data = data.responseJSON;
data = JSON.parse(data);
var template = "{{#.}}" +
"<option>{{кількість одиниць виробу}}</option>" +
"{{/.}}";
var rendered = Mustache.render(template, data);
$('#ProductCount').append(rendered);
}
}
});
}
this.showAllProductCountry = function (){
var that = this;
$.ajax({
method: "GET",
url: "/product/country",
complete: function(data){
if(data.status !== 500){
data = data.responseJSON;
data = JSON.parse(data);
var template = "{{#.}}" +
"<option>{{країна виробника}}</option>" +
"{{/.}}";
var rendered = Mustache.render(template, data);
$('#ProductCountry').append(rendered);
}
}
});
}
}
|
identifier_body
|
|
product.gears.js
|
var script = new Script();
function Product () {
this.showProductResult = function (){
var that = this;
$("#excelDataTable").find("tbody").remove();
var data = {
ProductCount: $('#ProductCount').val(),
ProductCountry: $('#ProductCountry').val()
};
$.ajax({
method: "POST",
url: "/product/result",
data: data,
complete: function(data){
if(data.status !== 500){
data = data.responseJSON;
data = JSON.parse(data);
console.log(data);
script.buildHtmlTable(data);
}
},
error: function (data){
alert("ERROR " + data);
}
});
}
this.showAllProductCount = function (){
var that = this;
$.ajax({
method: "GET",
url: "/product/count",
complete: function(data){
if(data.status !== 500)
|
});
}
this.showAllProductCountry = function (){
var that = this;
$.ajax({
method: "GET",
url: "/product/country",
complete: function(data){
if(data.status !== 500){
data = data.responseJSON;
data = JSON.parse(data);
var template = "{{#.}}" +
"<option>{{країна виробника}}</option>" +
"{{/.}}";
var rendered = Mustache.render(template, data);
$('#ProductCountry').append(rendered);
}
}
});
}
}
|
{
data = data.responseJSON;
data = JSON.parse(data);
var template = "{{#.}}" +
"<option>{{кількість одиниць виробу}}</option>" +
"{{/.}}";
var rendered = Mustache.render(template, data);
$('#ProductCount').append(rendered);
}
}
|
conditional_block
|
cache_repair.rs
|
use anyhow::Result;
mod common;
use common::cache::*;
use common::common_args::*;
use common::input_arg::*;
use common::output_option::*;
use common::program::*;
use common::target::*;
use common::test_dir::*;
//------------------------------------------
const USAGE: &str = concat!(
"cache_repair ",
include_str!("../VERSION"),
"Repair binary cache metadata, and write it to a different device or file
USAGE:
cache_repair [OPTIONS] --input <FILE> --output <FILE>
OPTIONS:
-h, --help Print help information
-i, --input <FILE> Specify the input device
-o, --output <FILE> Specify the output device
-q, --quiet Suppress output messages, return only exit code.
-V, --version Print version information"
);
//-----------------------------------------
struct CacheRepair;
impl<'a> Program<'a> for CacheRepair {
fn name() -> &'a str {
"cache_repair"
}
fn cmd<I>(args: I) -> Command
where
I: IntoIterator,
I::Item: Into<std::ffi::OsString>,
{
cache_repair_cmd(args)
}
fn usage() -> &'a str {
USAGE
}
fn arg_type() -> ArgType {
ArgType::IoOptions
}
fn bad_option_hint(option: &str) -> String {
msg::bad_option_hint(option)
}
}
impl<'a> InputProgram<'a> for CacheRepair {
fn mk_valid_input(td: &mut TestDir) -> Result<std::path::PathBuf> {
mk_valid_md(td)
}
fn file_not_found() -> &'a str {
msg::FILE_NOT_FOUND
}
fn missing_input_arg() -> &'a str {
msg::MISSING_INPUT_ARG
}
fn corrupted_input() -> &'a str
|
}
impl<'a> OutputProgram<'a> for CacheRepair {
fn missing_output_arg() -> &'a str {
msg::MISSING_OUTPUT_ARG
}
}
impl<'a> MetadataWriter<'a> for CacheRepair {
fn file_not_found() -> &'a str {
msg::FILE_NOT_FOUND
}
}
//-----------------------------------------
test_accepts_help!(CacheRepair);
test_accepts_version!(CacheRepair);
test_rejects_bad_option!(CacheRepair);
test_input_file_not_found!(CacheRepair);
test_input_cannot_be_a_directory!(CacheRepair);
test_corrupted_input_data!(CacheRepair);
test_missing_output_option!(CacheRepair);
//-----------------------------------------
|
{
"bad checksum in superblock"
}
|
identifier_body
|
cache_repair.rs
|
use anyhow::Result;
mod common;
use common::cache::*;
use common::common_args::*;
use common::input_arg::*;
use common::output_option::*;
use common::program::*;
use common::target::*;
use common::test_dir::*;
//------------------------------------------
const USAGE: &str = concat!(
"cache_repair ",
include_str!("../VERSION"),
"Repair binary cache metadata, and write it to a different device or file
USAGE:
cache_repair [OPTIONS] --input <FILE> --output <FILE>
OPTIONS:
-h, --help Print help information
-i, --input <FILE> Specify the input device
-o, --output <FILE> Specify the output device
-q, --quiet Suppress output messages, return only exit code.
-V, --version Print version information"
);
//-----------------------------------------
struct CacheRepair;
impl<'a> Program<'a> for CacheRepair {
fn name() -> &'a str {
"cache_repair"
}
fn cmd<I>(args: I) -> Command
where
I: IntoIterator,
I::Item: Into<std::ffi::OsString>,
{
cache_repair_cmd(args)
}
fn usage() -> &'a str {
USAGE
}
fn arg_type() -> ArgType {
ArgType::IoOptions
}
fn bad_option_hint(option: &str) -> String {
msg::bad_option_hint(option)
}
}
impl<'a> InputProgram<'a> for CacheRepair {
fn mk_valid_input(td: &mut TestDir) -> Result<std::path::PathBuf> {
mk_valid_md(td)
}
fn file_not_found() -> &'a str {
msg::FILE_NOT_FOUND
}
fn missing_input_arg() -> &'a str {
msg::MISSING_INPUT_ARG
}
fn corrupted_input() -> &'a str {
|
impl<'a> OutputProgram<'a> for CacheRepair {
fn missing_output_arg() -> &'a str {
msg::MISSING_OUTPUT_ARG
}
}
impl<'a> MetadataWriter<'a> for CacheRepair {
fn file_not_found() -> &'a str {
msg::FILE_NOT_FOUND
}
}
//-----------------------------------------
test_accepts_help!(CacheRepair);
test_accepts_version!(CacheRepair);
test_rejects_bad_option!(CacheRepair);
test_input_file_not_found!(CacheRepair);
test_input_cannot_be_a_directory!(CacheRepair);
test_corrupted_input_data!(CacheRepair);
test_missing_output_option!(CacheRepair);
//-----------------------------------------
|
"bad checksum in superblock"
}
}
|
random_line_split
|
cache_repair.rs
|
use anyhow::Result;
mod common;
use common::cache::*;
use common::common_args::*;
use common::input_arg::*;
use common::output_option::*;
use common::program::*;
use common::target::*;
use common::test_dir::*;
//------------------------------------------
const USAGE: &str = concat!(
"cache_repair ",
include_str!("../VERSION"),
"Repair binary cache metadata, and write it to a different device or file
USAGE:
cache_repair [OPTIONS] --input <FILE> --output <FILE>
OPTIONS:
-h, --help Print help information
-i, --input <FILE> Specify the input device
-o, --output <FILE> Specify the output device
-q, --quiet Suppress output messages, return only exit code.
-V, --version Print version information"
);
//-----------------------------------------
struct CacheRepair;
impl<'a> Program<'a> for CacheRepair {
fn name() -> &'a str {
"cache_repair"
}
fn cmd<I>(args: I) -> Command
where
I: IntoIterator,
I::Item: Into<std::ffi::OsString>,
{
cache_repair_cmd(args)
}
fn usage() -> &'a str {
USAGE
}
fn arg_type() -> ArgType {
ArgType::IoOptions
}
fn bad_option_hint(option: &str) -> String {
msg::bad_option_hint(option)
}
}
impl<'a> InputProgram<'a> for CacheRepair {
fn mk_valid_input(td: &mut TestDir) -> Result<std::path::PathBuf> {
mk_valid_md(td)
}
fn
|
() -> &'a str {
msg::FILE_NOT_FOUND
}
fn missing_input_arg() -> &'a str {
msg::MISSING_INPUT_ARG
}
fn corrupted_input() -> &'a str {
"bad checksum in superblock"
}
}
impl<'a> OutputProgram<'a> for CacheRepair {
fn missing_output_arg() -> &'a str {
msg::MISSING_OUTPUT_ARG
}
}
impl<'a> MetadataWriter<'a> for CacheRepair {
fn file_not_found() -> &'a str {
msg::FILE_NOT_FOUND
}
}
//-----------------------------------------
test_accepts_help!(CacheRepair);
test_accepts_version!(CacheRepair);
test_rejects_bad_option!(CacheRepair);
test_input_file_not_found!(CacheRepair);
test_input_cannot_be_a_directory!(CacheRepair);
test_corrupted_input_data!(CacheRepair);
test_missing_output_option!(CacheRepair);
//-----------------------------------------
|
file_not_found
|
identifier_name
|
arena.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//
//! The arena, a fast but limited type of allocator.
//!
//! Arenas are a type of allocator that destroy the objects within, all at
//! once, once the arena itself is destroyed. They do not support deallocation
//! of individual objects while the arena itself is still alive. The benefit
//! of an arena is very fast allocation; just a pointer bump.
#[allow(missing_doc)];
use list::{List, Cons, Nil};
use list;
use std::at_vec;
use std::cast::{transmute, transmute_mut, transmute_mut_region};
use std::cast;
use std::cell::{Cell, RefCell};
use std::num;
use std::ptr;
use std::mem;
use std::rt::global_heap;
use std::uint;
use std::unstable::intrinsics::{TyDesc, get_tydesc};
use std::unstable::intrinsics;
use std::util;
// The way arena uses arrays is really deeply awful. The arrays are
// allocated, and have capacities reserved, but the fill for the array
// will always stay at 0.
#[deriving(Clone)]
struct Chunk {
data: RefCell<@[u8]>,
fill: Cell<uint>,
is_pod: Cell<bool>,
}
// Arenas are used to quickly allocate objects that share a
// lifetime. The arena uses ~[u8] vectors as a backing store to
// allocate objects from. For each allocated object, the arena stores
// a pointer to the type descriptor followed by the
// object. (Potentially with alignment padding after each of them.)
// When the arena is destroyed, it iterates through all of its chunks,
// and uses the tydesc information to trace through the objects,
// calling the destructors on them.
// One subtle point that needs to be addressed is how to handle
// failures while running the user provided initializer function. It
// is important to not run the destructor on uninitialized objects, but
// how to detect them is somewhat subtle. Since alloc() can be invoked
// recursively, it is not sufficient to simply exclude the most recent
// object. To solve this without requiring extra space, we use the low
// order bit of the tydesc pointer to encode whether the object it
// describes has been fully initialized.
// As an optimization, objects with destructors are stored in
// different chunks than objects without destructors. This reduces
// overhead when initializing plain-old-data and means we don't need
// to waste time running the destructors of POD.
#[no_freeze]
pub struct Arena {
// The head is separated out from the list as a unbenchmarked
// microoptimization, to avoid needing to case on the list to
// access the head.
priv head: Chunk,
priv pod_head: Chunk,
priv chunks: RefCell<@List<Chunk>>,
}
impl Arena {
pub fn new() -> Arena {
Arena::new_with_size(32u)
}
pub fn new_with_size(initial_size: uint) -> Arena {
Arena {
head: chunk(initial_size, false),
pod_head: chunk(initial_size, true),
chunks: RefCell::new(@Nil),
}
}
}
fn chunk(size: uint, is_pod: bool) -> Chunk {
let mut v: @[u8] = @[];
unsafe { at_vec::raw::reserve(&mut v, size); }
Chunk {
data: RefCell::new(unsafe { cast::transmute(v) }),
fill: Cell::new(0u),
is_pod: Cell::new(is_pod),
}
}
#[unsafe_destructor]
impl Drop for Arena {
fn drop(&mut self) {
unsafe {
destroy_chunk(&self.head);
list::each(self.chunks.get(), |chunk| {
if !chunk.is_pod.get() {
destroy_chunk(chunk);
}
true
});
}
}
}
#[inline]
fn round_up(base: uint, align: uint) -> uint {
(base.checked_add(&(align - 1))).unwrap() & !(&(align - 1))
}
// Walk down a chunk, running the destructors for any objects stored
// in it.
unsafe fn destroy_chunk(chunk: &Chunk) {
let mut idx = 0;
let buf = {
let data = chunk.data.borrow();
data.get().as_ptr()
};
let fill = chunk.fill.get();
while idx < fill {
let tydesc_data: *uint = transmute(ptr::offset(buf, idx as int));
let (tydesc, is_done) = un_bitpack_tydesc_ptr(*tydesc_data);
let (size, align) = ((*tydesc).size, (*tydesc).align);
let after_tydesc = idx + mem::size_of::<*TyDesc>();
let start = round_up(after_tydesc, align);
//debug!("freeing object: idx = {}, size = {}, align = {}, done = {}",
// start, size, align, is_done);
if is_done {
((*tydesc).drop_glue)(ptr::offset(buf, start as int) as *i8);
}
// Find where the next tydesc lives
idx = round_up(start + size, mem::pref_align_of::<*TyDesc>());
}
}
// We encode whether the object a tydesc describes has been
// initialized in the arena in the low bit of the tydesc pointer. This
// is necessary in order to properly do cleanup if a failure occurs
// during an initializer.
#[inline]
unsafe fn bitpack_tydesc_ptr(p: *TyDesc, is_done: bool) -> uint {
let p_bits: uint = transmute(p);
p_bits | (is_done as uint)
}
#[inline]
unsafe fn un_bitpack_tydesc_ptr(p: uint) -> (*TyDesc, bool) {
(transmute(p & !1), p & 1 == 1)
}
impl Arena {
// Functions for the POD part of the arena
fn alloc_pod_grow(&mut self, n_bytes: uint, align: uint) -> *u8 {
// Allocate a new chunk.
let chunk_size = at_vec::capacity(self.pod_head.data.get());
let new_min_chunk_size = num::max(n_bytes, chunk_size);
self.chunks.set(@Cons(self.pod_head.clone(), self.chunks.get()));
self.pod_head =
chunk(uint::next_power_of_two(new_min_chunk_size + 1u), true);
return self.alloc_pod_inner(n_bytes, align);
}
#[inline]
fn alloc_pod_inner(&mut self, n_bytes: uint, align: uint) -> *u8 {
unsafe {
let this = transmute_mut_region(self);
let start = round_up(this.pod_head.fill.get(), align);
let end = start + n_bytes;
if end > at_vec::capacity(this.pod_head.data.get()) {
return this.alloc_pod_grow(n_bytes, align);
}
this.pod_head.fill.set(end);
//debug!("idx = {}, size = {}, align = {}, fill = {}",
// start, n_bytes, align, head.fill.get());
ptr::offset(this.pod_head.data.get().as_ptr(), start as int)
}
}
#[inline]
fn alloc_pod<'a, T>(&'a mut self, op: || -> T) -> &'a T {
unsafe {
let tydesc = get_tydesc::<T>();
let ptr = self.alloc_pod_inner((*tydesc).size, (*tydesc).align);
let ptr: *mut T = transmute(ptr);
intrinsics::move_val_init(&mut (*ptr), op());
return transmute(ptr);
}
}
// Functions for the non-POD part of the arena
fn alloc_nonpod_grow(&mut self, n_bytes: uint, align: uint)
-> (*u8, *u8) {
// Allocate a new chunk.
let chunk_size = at_vec::capacity(self.head.data.get());
let new_min_chunk_size = num::max(n_bytes, chunk_size);
self.chunks.set(@Cons(self.head.clone(), self.chunks.get()));
self.head =
chunk(uint::next_power_of_two(new_min_chunk_size + 1u), false);
return self.alloc_nonpod_inner(n_bytes, align);
}
#[inline]
fn alloc_nonpod_inner(&mut self, n_bytes: uint, align: uint)
-> (*u8, *u8) {
unsafe {
let start;
let end;
let tydesc_start;
let after_tydesc;
{
let head = transmute_mut_region(&mut self.head);
tydesc_start = head.fill.get();
after_tydesc = head.fill.get() + mem::size_of::<*TyDesc>();
start = round_up(after_tydesc, align);
end = start + n_bytes;
}
if end > at_vec::capacity(self.head.data.get()) {
return self.alloc_nonpod_grow(n_bytes, align);
}
let head = transmute_mut_region(&mut self.head);
head.fill.set(round_up(end, mem::pref_align_of::<*TyDesc>()));
//debug!("idx = {}, size = {}, align = {}, fill = {}",
// start, n_bytes, align, head.fill);
let buf = self.head.data.get().as_ptr();
return (ptr::offset(buf, tydesc_start as int), ptr::offset(buf, start as int));
}
}
#[inline]
fn alloc_nonpod<'a, T>(&'a mut self, op: || -> T) -> &'a T {
unsafe {
let tydesc = get_tydesc::<T>();
let (ty_ptr, ptr) =
self.alloc_nonpod_inner((*tydesc).size, (*tydesc).align);
let ty_ptr: *mut uint = transmute(ty_ptr);
let ptr: *mut T = transmute(ptr);
// Write in our tydesc along with a bit indicating that it
// has *not* been initialized yet.
*ty_ptr = transmute(tydesc);
// Actually initialize it
intrinsics::move_val_init(&mut(*ptr), op());
// Now that we are done, update the tydesc to indicate that
// the object is there.
*ty_ptr = bitpack_tydesc_ptr(tydesc, true);
return transmute(ptr);
}
}
// The external interface
#[inline]
pub fn alloc<'a, T>(&'a self, op: || -> T) -> &'a T {
unsafe {
// XXX: Borrow check
let this = transmute_mut(self);
if intrinsics::needs_drop::<T>() {
this.alloc_nonpod(op)
} else {
this.alloc_pod(op)
}
}
}
}
#[test]
fn test_arena_destructors() {
let arena = Arena::new();
for i in range(0u, 10) {
// Arena allocate something with drop glue to make sure it
// doesn't leak.
arena.alloc(|| @i);
// Allocate something with funny size and alignment, to keep
// things interesting.
arena.alloc(|| [0u8, 1u8, 2u8]);
}
}
#[test]
#[should_fail]
fn test_arena_destructors_fail() {
let arena = Arena::new();
// Put some stuff in the arena.
for i in range(0u, 10) {
// Arena allocate something with drop glue to make sure it
// doesn't leak.
arena.alloc(|| { @i });
// Allocate something with funny size and alignment, to keep
// things interesting.
arena.alloc(|| { [0u8, 1u8, 2u8] });
}
// Now, fail while allocating
arena.alloc::<@int>(|| {
// Now fail.
fail!();
});
}
/// An arena that can hold objects of only one type.
///
/// Safety note: Modifying objects in the arena that have already had their
/// `drop` destructors run can cause leaks, because the destructor will not
/// run again for these objects.
pub struct TypedArena<T> {
/// A pointer to the next object to be allocated.
priv ptr: *T,
/// A pointer to the end of the allocated area. When this pointer is
/// reached, a new chunk is allocated.
priv end: *T,
/// The type descriptor of the objects in the arena. This should not be
/// necessary, but is until generic destructors are supported.
priv tydesc: *TyDesc,
/// A pointer to the first arena segment.
priv first: Option<~TypedArenaChunk>,
}
struct TypedArenaChunk {
/// Pointer to the next arena segment.
next: Option<~TypedArenaChunk>,
/// The number of elements that this chunk can hold.
capacity: uint,
// Objects follow here, suitably aligned.
}
impl TypedArenaChunk {
#[inline]
fn new<T>(next: Option<~TypedArenaChunk>, capacity: uint)
-> ~TypedArenaChunk {
let mut size = mem::size_of::<TypedArenaChunk>();
size = round_up(size, mem::min_align_of::<T>());
let elem_size = mem::size_of::<T>();
let elems_size = elem_size.checked_mul(&capacity).unwrap();
size = size.checked_add(&elems_size).unwrap();
let mut chunk = unsafe {
let chunk = global_heap::exchange_malloc(size);
let mut chunk: ~TypedArenaChunk = cast::transmute(chunk);
intrinsics::move_val_init(&mut chunk.next, next);
chunk
};
chunk.capacity = capacity;
chunk
}
/// Destroys this arena chunk. If the type descriptor is supplied, the
/// drop glue is called; otherwise, drop glue is not called.
#[inline]
unsafe fn destroy(&mut self, len: uint, opt_tydesc: Option<*TyDesc>) {
// Destroy all the allocated objects.
match opt_tydesc {
None => {}
Some(tydesc) => {
let mut start = self.start(tydesc);
for _ in range(0, len) {
((*tydesc).drop_glue)(start as *i8);
start = start.offset((*tydesc).size as int)
}
}
}
// Destroy the next chunk.
let next_opt = util::replace(&mut self.next, None);
match next_opt {
None => {}
Some(mut next) => {
// We assume that the next chunk is completely filled.
next.destroy(next.capacity, opt_tydesc)
}
}
}
// Returns a pointer to the first allocated object.
#[inline]
fn start(&self, tydesc: *TyDesc) -> *u8 {
let this: *TypedArenaChunk = self;
unsafe {
cast::transmute(round_up(this.offset(1) as uint, (*tydesc).align))
}
}
// Returns a pointer to the end of the allocated space.
#[inline]
fn end(&self, tydesc: *TyDesc) -> *u8 {
unsafe {
let size = (*tydesc).size.checked_mul(&self.capacity).unwrap();
self.start(tydesc).offset(size as int)
}
}
}
impl<T> TypedArena<T> {
/// Creates a new arena with preallocated space for 8 objects.
#[inline]
pub fn new() -> TypedArena<T> {
TypedArena::with_capacity(8)
}
/// Creates a new arena with preallocated space for the given number of
/// objects.
#[inline]
pub fn with_capacity(capacity: uint) -> TypedArena<T> {
let chunk = TypedArenaChunk::new::<T>(None, capacity);
let tydesc = unsafe {
intrinsics::get_tydesc::<T>()
};
TypedArena {
ptr: chunk.start(tydesc) as *T,
end: chunk.end(tydesc) as *T,
tydesc: tydesc,
first: Some(chunk),
}
}
/// Allocates an object into this arena.
#[inline]
pub fn alloc<'a>(&'a self, object: T) -> &'a T {
unsafe {
let this = cast::transmute_mut(self);
if this.ptr == this.end {
this.grow()
}
let ptr: &'a mut T = cast::transmute(this.ptr);
intrinsics::move_val_init(ptr, object);
this.ptr = this.ptr.offset(1);
let ptr: &'a T = ptr;
ptr
}
}
/// Grows the arena.
#[inline(never)]
fn grow(&mut self) {
let chunk = self.first.take_unwrap();
let new_capacity = chunk.capacity.checked_mul(&2).unwrap();
let chunk = TypedArenaChunk::new::<T>(Some(chunk), new_capacity);
self.ptr = chunk.start(self.tydesc) as *T;
self.end = chunk.end(self.tydesc) as *T;
self.first = Some(chunk)
}
}
#[unsafe_destructor]
impl<T> Drop for TypedArena<T> {
fn drop(&mut self)
|
}
#[cfg(test)]
mod test {
use super::{Arena, TypedArena};
use test::BenchHarness;
struct Point {
x: int,
y: int,
z: int,
}
#[test]
pub fn test_pod() {
let arena = TypedArena::new();
for _ in range(0, 1000000) {
arena.alloc(Point {
x: 1,
y: 2,
z: 3,
});
}
}
#[bench]
pub fn bench_pod(bh: &mut BenchHarness) {
let arena = TypedArena::new();
bh.iter(|| {
arena.alloc(Point {
x: 1,
y: 2,
z: 3,
});
})
}
#[bench]
pub fn bench_pod_nonarena(bh: &mut BenchHarness) {
bh.iter(|| {
let _ = ~Point {
x: 1,
y: 2,
z: 3,
};
})
}
#[bench]
pub fn bench_pod_old_arena(bh: &mut BenchHarness) {
let arena = Arena::new();
bh.iter(|| {
arena.alloc(|| {
Point {
x: 1,
y: 2,
z: 3,
}
});
})
}
struct Nonpod {
string: ~str,
array: ~[int],
}
#[test]
pub fn test_nonpod() {
let arena = TypedArena::new();
for _ in range(0, 1000000) {
arena.alloc(Nonpod {
string: ~"hello world",
array: ~[ 1, 2, 3, 4, 5 ],
});
}
}
#[bench]
pub fn bench_nonpod(bh: &mut BenchHarness) {
let arena = TypedArena::new();
bh.iter(|| {
arena.alloc(Nonpod {
string: ~"hello world",
array: ~[ 1, 2, 3, 4, 5 ],
});
})
}
#[bench]
pub fn bench_nonpod_nonarena(bh: &mut BenchHarness) {
bh.iter(|| {
let _ = ~Nonpod {
string: ~"hello world",
array: ~[ 1, 2, 3, 4, 5 ],
};
})
}
#[bench]
pub fn bench_nonpod_old_arena(bh: &mut BenchHarness) {
let arena = Arena::new();
bh.iter(|| {
let _ = arena.alloc(|| Nonpod {
string: ~"hello world",
array: ~[ 1, 2, 3, 4, 5 ],
});
})
}
}
|
{
// Determine how much was filled.
let start = self.first.get_ref().start(self.tydesc) as uint;
let end = self.ptr as uint;
let diff = (end - start) / mem::size_of::<T>();
// Pass that to the `destroy` method.
unsafe {
let opt_tydesc = if intrinsics::needs_drop::<T>() {
Some(self.tydesc)
} else {
None
};
self.first.get_mut_ref().destroy(diff, opt_tydesc)
}
}
|
identifier_body
|
arena.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//
//! The arena, a fast but limited type of allocator.
//!
//! Arenas are a type of allocator that destroy the objects within, all at
//! once, once the arena itself is destroyed. They do not support deallocation
//! of individual objects while the arena itself is still alive. The benefit
//! of an arena is very fast allocation; just a pointer bump.
#[allow(missing_doc)];
use list::{List, Cons, Nil};
use list;
use std::at_vec;
use std::cast::{transmute, transmute_mut, transmute_mut_region};
use std::cast;
use std::cell::{Cell, RefCell};
use std::num;
use std::ptr;
use std::mem;
use std::rt::global_heap;
use std::uint;
use std::unstable::intrinsics::{TyDesc, get_tydesc};
use std::unstable::intrinsics;
use std::util;
// The way arena uses arrays is really deeply awful. The arrays are
// allocated, and have capacities reserved, but the fill for the array
// will always stay at 0.
#[deriving(Clone)]
struct Chunk {
data: RefCell<@[u8]>,
fill: Cell<uint>,
is_pod: Cell<bool>,
}
// Arenas are used to quickly allocate objects that share a
// lifetime. The arena uses ~[u8] vectors as a backing store to
// allocate objects from. For each allocated object, the arena stores
// a pointer to the type descriptor followed by the
// object. (Potentially with alignment padding after each of them.)
// When the arena is destroyed, it iterates through all of its chunks,
// and uses the tydesc information to trace through the objects,
// calling the destructors on them.
// One subtle point that needs to be addressed is how to handle
// failures while running the user provided initializer function. It
// is important to not run the destructor on uninitialized objects, but
// how to detect them is somewhat subtle. Since alloc() can be invoked
// recursively, it is not sufficient to simply exclude the most recent
// object. To solve this without requiring extra space, we use the low
// order bit of the tydesc pointer to encode whether the object it
// describes has been fully initialized.
// As an optimization, objects with destructors are stored in
// different chunks than objects without destructors. This reduces
// overhead when initializing plain-old-data and means we don't need
// to waste time running the destructors of POD.
#[no_freeze]
pub struct Arena {
// The head is separated out from the list as a unbenchmarked
// microoptimization, to avoid needing to case on the list to
// access the head.
priv head: Chunk,
priv pod_head: Chunk,
priv chunks: RefCell<@List<Chunk>>,
}
impl Arena {
pub fn new() -> Arena {
Arena::new_with_size(32u)
}
pub fn new_with_size(initial_size: uint) -> Arena {
Arena {
head: chunk(initial_size, false),
pod_head: chunk(initial_size, true),
chunks: RefCell::new(@Nil),
}
}
}
fn chunk(size: uint, is_pod: bool) -> Chunk {
let mut v: @[u8] = @[];
unsafe { at_vec::raw::reserve(&mut v, size); }
Chunk {
data: RefCell::new(unsafe { cast::transmute(v) }),
fill: Cell::new(0u),
is_pod: Cell::new(is_pod),
}
}
#[unsafe_destructor]
impl Drop for Arena {
fn drop(&mut self) {
unsafe {
destroy_chunk(&self.head);
list::each(self.chunks.get(), |chunk| {
if !chunk.is_pod.get() {
destroy_chunk(chunk);
}
true
});
}
}
}
#[inline]
fn round_up(base: uint, align: uint) -> uint {
(base.checked_add(&(align - 1))).unwrap() & !(&(align - 1))
|
// Walk down a chunk, running the destructors for any objects stored
// in it.
unsafe fn destroy_chunk(chunk: &Chunk) {
let mut idx = 0;
let buf = {
let data = chunk.data.borrow();
data.get().as_ptr()
};
let fill = chunk.fill.get();
while idx < fill {
let tydesc_data: *uint = transmute(ptr::offset(buf, idx as int));
let (tydesc, is_done) = un_bitpack_tydesc_ptr(*tydesc_data);
let (size, align) = ((*tydesc).size, (*tydesc).align);
let after_tydesc = idx + mem::size_of::<*TyDesc>();
let start = round_up(after_tydesc, align);
//debug!("freeing object: idx = {}, size = {}, align = {}, done = {}",
// start, size, align, is_done);
if is_done {
((*tydesc).drop_glue)(ptr::offset(buf, start as int) as *i8);
}
// Find where the next tydesc lives
idx = round_up(start + size, mem::pref_align_of::<*TyDesc>());
}
}
// We encode whether the object a tydesc describes has been
// initialized in the arena in the low bit of the tydesc pointer. This
// is necessary in order to properly do cleanup if a failure occurs
// during an initializer.
#[inline]
unsafe fn bitpack_tydesc_ptr(p: *TyDesc, is_done: bool) -> uint {
let p_bits: uint = transmute(p);
p_bits | (is_done as uint)
}
#[inline]
unsafe fn un_bitpack_tydesc_ptr(p: uint) -> (*TyDesc, bool) {
(transmute(p & !1), p & 1 == 1)
}
impl Arena {
// Functions for the POD part of the arena
fn alloc_pod_grow(&mut self, n_bytes: uint, align: uint) -> *u8 {
// Allocate a new chunk.
let chunk_size = at_vec::capacity(self.pod_head.data.get());
let new_min_chunk_size = num::max(n_bytes, chunk_size);
self.chunks.set(@Cons(self.pod_head.clone(), self.chunks.get()));
self.pod_head =
chunk(uint::next_power_of_two(new_min_chunk_size + 1u), true);
return self.alloc_pod_inner(n_bytes, align);
}
#[inline]
fn alloc_pod_inner(&mut self, n_bytes: uint, align: uint) -> *u8 {
unsafe {
let this = transmute_mut_region(self);
let start = round_up(this.pod_head.fill.get(), align);
let end = start + n_bytes;
if end > at_vec::capacity(this.pod_head.data.get()) {
return this.alloc_pod_grow(n_bytes, align);
}
this.pod_head.fill.set(end);
//debug!("idx = {}, size = {}, align = {}, fill = {}",
// start, n_bytes, align, head.fill.get());
ptr::offset(this.pod_head.data.get().as_ptr(), start as int)
}
}
#[inline]
fn alloc_pod<'a, T>(&'a mut self, op: || -> T) -> &'a T {
unsafe {
let tydesc = get_tydesc::<T>();
let ptr = self.alloc_pod_inner((*tydesc).size, (*tydesc).align);
let ptr: *mut T = transmute(ptr);
intrinsics::move_val_init(&mut (*ptr), op());
return transmute(ptr);
}
}
// Functions for the non-POD part of the arena
fn alloc_nonpod_grow(&mut self, n_bytes: uint, align: uint)
-> (*u8, *u8) {
// Allocate a new chunk.
let chunk_size = at_vec::capacity(self.head.data.get());
let new_min_chunk_size = num::max(n_bytes, chunk_size);
self.chunks.set(@Cons(self.head.clone(), self.chunks.get()));
self.head =
chunk(uint::next_power_of_two(new_min_chunk_size + 1u), false);
return self.alloc_nonpod_inner(n_bytes, align);
}
#[inline]
fn alloc_nonpod_inner(&mut self, n_bytes: uint, align: uint)
-> (*u8, *u8) {
unsafe {
let start;
let end;
let tydesc_start;
let after_tydesc;
{
let head = transmute_mut_region(&mut self.head);
tydesc_start = head.fill.get();
after_tydesc = head.fill.get() + mem::size_of::<*TyDesc>();
start = round_up(after_tydesc, align);
end = start + n_bytes;
}
if end > at_vec::capacity(self.head.data.get()) {
return self.alloc_nonpod_grow(n_bytes, align);
}
let head = transmute_mut_region(&mut self.head);
head.fill.set(round_up(end, mem::pref_align_of::<*TyDesc>()));
//debug!("idx = {}, size = {}, align = {}, fill = {}",
// start, n_bytes, align, head.fill);
let buf = self.head.data.get().as_ptr();
return (ptr::offset(buf, tydesc_start as int), ptr::offset(buf, start as int));
}
}
#[inline]
fn alloc_nonpod<'a, T>(&'a mut self, op: || -> T) -> &'a T {
unsafe {
let tydesc = get_tydesc::<T>();
let (ty_ptr, ptr) =
self.alloc_nonpod_inner((*tydesc).size, (*tydesc).align);
let ty_ptr: *mut uint = transmute(ty_ptr);
let ptr: *mut T = transmute(ptr);
// Write in our tydesc along with a bit indicating that it
// has *not* been initialized yet.
*ty_ptr = transmute(tydesc);
// Actually initialize it
intrinsics::move_val_init(&mut(*ptr), op());
// Now that we are done, update the tydesc to indicate that
// the object is there.
*ty_ptr = bitpack_tydesc_ptr(tydesc, true);
return transmute(ptr);
}
}
// The external interface
#[inline]
pub fn alloc<'a, T>(&'a self, op: || -> T) -> &'a T {
unsafe {
// XXX: Borrow check
let this = transmute_mut(self);
if intrinsics::needs_drop::<T>() {
this.alloc_nonpod(op)
} else {
this.alloc_pod(op)
}
}
}
}
#[test]
fn test_arena_destructors() {
let arena = Arena::new();
for i in range(0u, 10) {
// Arena allocate something with drop glue to make sure it
// doesn't leak.
arena.alloc(|| @i);
// Allocate something with funny size and alignment, to keep
// things interesting.
arena.alloc(|| [0u8, 1u8, 2u8]);
}
}
#[test]
#[should_fail]
fn test_arena_destructors_fail() {
let arena = Arena::new();
// Put some stuff in the arena.
for i in range(0u, 10) {
// Arena allocate something with drop glue to make sure it
// doesn't leak.
arena.alloc(|| { @i });
// Allocate something with funny size and alignment, to keep
// things interesting.
arena.alloc(|| { [0u8, 1u8, 2u8] });
}
// Now, fail while allocating
arena.alloc::<@int>(|| {
// Now fail.
fail!();
});
}
/// An arena that can hold objects of only one type.
///
/// Safety note: Modifying objects in the arena that have already had their
/// `drop` destructors run can cause leaks, because the destructor will not
/// run again for these objects.
pub struct TypedArena<T> {
/// A pointer to the next object to be allocated.
priv ptr: *T,
/// A pointer to the end of the allocated area. When this pointer is
/// reached, a new chunk is allocated.
priv end: *T,
/// The type descriptor of the objects in the arena. This should not be
/// necessary, but is until generic destructors are supported.
priv tydesc: *TyDesc,
/// A pointer to the first arena segment.
priv first: Option<~TypedArenaChunk>,
}
struct TypedArenaChunk {
/// Pointer to the next arena segment.
next: Option<~TypedArenaChunk>,
/// The number of elements that this chunk can hold.
capacity: uint,
// Objects follow here, suitably aligned.
}
impl TypedArenaChunk {
#[inline]
fn new<T>(next: Option<~TypedArenaChunk>, capacity: uint)
-> ~TypedArenaChunk {
let mut size = mem::size_of::<TypedArenaChunk>();
size = round_up(size, mem::min_align_of::<T>());
let elem_size = mem::size_of::<T>();
let elems_size = elem_size.checked_mul(&capacity).unwrap();
size = size.checked_add(&elems_size).unwrap();
let mut chunk = unsafe {
let chunk = global_heap::exchange_malloc(size);
let mut chunk: ~TypedArenaChunk = cast::transmute(chunk);
intrinsics::move_val_init(&mut chunk.next, next);
chunk
};
chunk.capacity = capacity;
chunk
}
/// Destroys this arena chunk. If the type descriptor is supplied, the
/// drop glue is called; otherwise, drop glue is not called.
#[inline]
unsafe fn destroy(&mut self, len: uint, opt_tydesc: Option<*TyDesc>) {
// Destroy all the allocated objects.
match opt_tydesc {
None => {}
Some(tydesc) => {
let mut start = self.start(tydesc);
for _ in range(0, len) {
((*tydesc).drop_glue)(start as *i8);
start = start.offset((*tydesc).size as int)
}
}
}
// Destroy the next chunk.
let next_opt = util::replace(&mut self.next, None);
match next_opt {
None => {}
Some(mut next) => {
// We assume that the next chunk is completely filled.
next.destroy(next.capacity, opt_tydesc)
}
}
}
// Returns a pointer to the first allocated object.
#[inline]
fn start(&self, tydesc: *TyDesc) -> *u8 {
let this: *TypedArenaChunk = self;
unsafe {
cast::transmute(round_up(this.offset(1) as uint, (*tydesc).align))
}
}
// Returns a pointer to the end of the allocated space.
#[inline]
fn end(&self, tydesc: *TyDesc) -> *u8 {
unsafe {
let size = (*tydesc).size.checked_mul(&self.capacity).unwrap();
self.start(tydesc).offset(size as int)
}
}
}
impl<T> TypedArena<T> {
/// Creates a new arena with preallocated space for 8 objects.
#[inline]
pub fn new() -> TypedArena<T> {
TypedArena::with_capacity(8)
}
/// Creates a new arena with preallocated space for the given number of
/// objects.
#[inline]
pub fn with_capacity(capacity: uint) -> TypedArena<T> {
let chunk = TypedArenaChunk::new::<T>(None, capacity);
let tydesc = unsafe {
intrinsics::get_tydesc::<T>()
};
TypedArena {
ptr: chunk.start(tydesc) as *T,
end: chunk.end(tydesc) as *T,
tydesc: tydesc,
first: Some(chunk),
}
}
/// Allocates an object into this arena.
#[inline]
pub fn alloc<'a>(&'a self, object: T) -> &'a T {
unsafe {
let this = cast::transmute_mut(self);
if this.ptr == this.end {
this.grow()
}
let ptr: &'a mut T = cast::transmute(this.ptr);
intrinsics::move_val_init(ptr, object);
this.ptr = this.ptr.offset(1);
let ptr: &'a T = ptr;
ptr
}
}
/// Grows the arena.
#[inline(never)]
fn grow(&mut self) {
let chunk = self.first.take_unwrap();
let new_capacity = chunk.capacity.checked_mul(&2).unwrap();
let chunk = TypedArenaChunk::new::<T>(Some(chunk), new_capacity);
self.ptr = chunk.start(self.tydesc) as *T;
self.end = chunk.end(self.tydesc) as *T;
self.first = Some(chunk)
}
}
#[unsafe_destructor]
impl<T> Drop for TypedArena<T> {
fn drop(&mut self) {
// Determine how much was filled.
let start = self.first.get_ref().start(self.tydesc) as uint;
let end = self.ptr as uint;
let diff = (end - start) / mem::size_of::<T>();
// Pass that to the `destroy` method.
unsafe {
let opt_tydesc = if intrinsics::needs_drop::<T>() {
Some(self.tydesc)
} else {
None
};
self.first.get_mut_ref().destroy(diff, opt_tydesc)
}
}
}
#[cfg(test)]
mod test {
use super::{Arena, TypedArena};
use test::BenchHarness;
struct Point {
x: int,
y: int,
z: int,
}
#[test]
pub fn test_pod() {
let arena = TypedArena::new();
for _ in range(0, 1000000) {
arena.alloc(Point {
x: 1,
y: 2,
z: 3,
});
}
}
#[bench]
pub fn bench_pod(bh: &mut BenchHarness) {
let arena = TypedArena::new();
bh.iter(|| {
arena.alloc(Point {
x: 1,
y: 2,
z: 3,
});
})
}
#[bench]
pub fn bench_pod_nonarena(bh: &mut BenchHarness) {
bh.iter(|| {
let _ = ~Point {
x: 1,
y: 2,
z: 3,
};
})
}
#[bench]
pub fn bench_pod_old_arena(bh: &mut BenchHarness) {
let arena = Arena::new();
bh.iter(|| {
arena.alloc(|| {
Point {
x: 1,
y: 2,
z: 3,
}
});
})
}
struct Nonpod {
string: ~str,
array: ~[int],
}
#[test]
pub fn test_nonpod() {
let arena = TypedArena::new();
for _ in range(0, 1000000) {
arena.alloc(Nonpod {
string: ~"hello world",
array: ~[ 1, 2, 3, 4, 5 ],
});
}
}
#[bench]
pub fn bench_nonpod(bh: &mut BenchHarness) {
let arena = TypedArena::new();
bh.iter(|| {
arena.alloc(Nonpod {
string: ~"hello world",
array: ~[ 1, 2, 3, 4, 5 ],
});
})
}
#[bench]
pub fn bench_nonpod_nonarena(bh: &mut BenchHarness) {
bh.iter(|| {
let _ = ~Nonpod {
string: ~"hello world",
array: ~[ 1, 2, 3, 4, 5 ],
};
})
}
#[bench]
pub fn bench_nonpod_old_arena(bh: &mut BenchHarness) {
let arena = Arena::new();
bh.iter(|| {
let _ = arena.alloc(|| Nonpod {
string: ~"hello world",
array: ~[ 1, 2, 3, 4, 5 ],
});
})
}
}
|
}
|
random_line_split
|
arena.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//
//! The arena, a fast but limited type of allocator.
//!
//! Arenas are a type of allocator that destroy the objects within, all at
//! once, once the arena itself is destroyed. They do not support deallocation
//! of individual objects while the arena itself is still alive. The benefit
//! of an arena is very fast allocation; just a pointer bump.
#[allow(missing_doc)];
use list::{List, Cons, Nil};
use list;
use std::at_vec;
use std::cast::{transmute, transmute_mut, transmute_mut_region};
use std::cast;
use std::cell::{Cell, RefCell};
use std::num;
use std::ptr;
use std::mem;
use std::rt::global_heap;
use std::uint;
use std::unstable::intrinsics::{TyDesc, get_tydesc};
use std::unstable::intrinsics;
use std::util;
// The way arena uses arrays is really deeply awful. The arrays are
// allocated, and have capacities reserved, but the fill for the array
// will always stay at 0.
#[deriving(Clone)]
struct Chunk {
data: RefCell<@[u8]>,
fill: Cell<uint>,
is_pod: Cell<bool>,
}
// Arenas are used to quickly allocate objects that share a
// lifetime. The arena uses ~[u8] vectors as a backing store to
// allocate objects from. For each allocated object, the arena stores
// a pointer to the type descriptor followed by the
// object. (Potentially with alignment padding after each of them.)
// When the arena is destroyed, it iterates through all of its chunks,
// and uses the tydesc information to trace through the objects,
// calling the destructors on them.
// One subtle point that needs to be addressed is how to handle
// failures while running the user provided initializer function. It
// is important to not run the destructor on uninitialized objects, but
// how to detect them is somewhat subtle. Since alloc() can be invoked
// recursively, it is not sufficient to simply exclude the most recent
// object. To solve this without requiring extra space, we use the low
// order bit of the tydesc pointer to encode whether the object it
// describes has been fully initialized.
// As an optimization, objects with destructors are stored in
// different chunks than objects without destructors. This reduces
// overhead when initializing plain-old-data and means we don't need
// to waste time running the destructors of POD.
#[no_freeze]
pub struct Arena {
// The head is separated out from the list as a unbenchmarked
// microoptimization, to avoid needing to case on the list to
// access the head.
priv head: Chunk,
priv pod_head: Chunk,
priv chunks: RefCell<@List<Chunk>>,
}
impl Arena {
pub fn new() -> Arena {
Arena::new_with_size(32u)
}
pub fn new_with_size(initial_size: uint) -> Arena {
Arena {
head: chunk(initial_size, false),
pod_head: chunk(initial_size, true),
chunks: RefCell::new(@Nil),
}
}
}
fn chunk(size: uint, is_pod: bool) -> Chunk {
let mut v: @[u8] = @[];
unsafe { at_vec::raw::reserve(&mut v, size); }
Chunk {
data: RefCell::new(unsafe { cast::transmute(v) }),
fill: Cell::new(0u),
is_pod: Cell::new(is_pod),
}
}
#[unsafe_destructor]
impl Drop for Arena {
fn drop(&mut self) {
unsafe {
destroy_chunk(&self.head);
list::each(self.chunks.get(), |chunk| {
if !chunk.is_pod.get() {
destroy_chunk(chunk);
}
true
});
}
}
}
#[inline]
fn round_up(base: uint, align: uint) -> uint {
(base.checked_add(&(align - 1))).unwrap() & !(&(align - 1))
}
// Walk down a chunk, running the destructors for any objects stored
// in it.
unsafe fn destroy_chunk(chunk: &Chunk) {
let mut idx = 0;
let buf = {
let data = chunk.data.borrow();
data.get().as_ptr()
};
let fill = chunk.fill.get();
while idx < fill {
let tydesc_data: *uint = transmute(ptr::offset(buf, idx as int));
let (tydesc, is_done) = un_bitpack_tydesc_ptr(*tydesc_data);
let (size, align) = ((*tydesc).size, (*tydesc).align);
let after_tydesc = idx + mem::size_of::<*TyDesc>();
let start = round_up(after_tydesc, align);
//debug!("freeing object: idx = {}, size = {}, align = {}, done = {}",
// start, size, align, is_done);
if is_done {
((*tydesc).drop_glue)(ptr::offset(buf, start as int) as *i8);
}
// Find where the next tydesc lives
idx = round_up(start + size, mem::pref_align_of::<*TyDesc>());
}
}
// We encode whether the object a tydesc describes has been
// initialized in the arena in the low bit of the tydesc pointer. This
// is necessary in order to properly do cleanup if a failure occurs
// during an initializer.
#[inline]
unsafe fn bitpack_tydesc_ptr(p: *TyDesc, is_done: bool) -> uint {
let p_bits: uint = transmute(p);
p_bits | (is_done as uint)
}
#[inline]
unsafe fn un_bitpack_tydesc_ptr(p: uint) -> (*TyDesc, bool) {
(transmute(p & !1), p & 1 == 1)
}
impl Arena {
// Functions for the POD part of the arena
fn alloc_pod_grow(&mut self, n_bytes: uint, align: uint) -> *u8 {
// Allocate a new chunk.
let chunk_size = at_vec::capacity(self.pod_head.data.get());
let new_min_chunk_size = num::max(n_bytes, chunk_size);
self.chunks.set(@Cons(self.pod_head.clone(), self.chunks.get()));
self.pod_head =
chunk(uint::next_power_of_two(new_min_chunk_size + 1u), true);
return self.alloc_pod_inner(n_bytes, align);
}
#[inline]
fn alloc_pod_inner(&mut self, n_bytes: uint, align: uint) -> *u8 {
unsafe {
let this = transmute_mut_region(self);
let start = round_up(this.pod_head.fill.get(), align);
let end = start + n_bytes;
if end > at_vec::capacity(this.pod_head.data.get()) {
return this.alloc_pod_grow(n_bytes, align);
}
this.pod_head.fill.set(end);
//debug!("idx = {}, size = {}, align = {}, fill = {}",
// start, n_bytes, align, head.fill.get());
ptr::offset(this.pod_head.data.get().as_ptr(), start as int)
}
}
#[inline]
fn alloc_pod<'a, T>(&'a mut self, op: || -> T) -> &'a T {
unsafe {
let tydesc = get_tydesc::<T>();
let ptr = self.alloc_pod_inner((*tydesc).size, (*tydesc).align);
let ptr: *mut T = transmute(ptr);
intrinsics::move_val_init(&mut (*ptr), op());
return transmute(ptr);
}
}
// Functions for the non-POD part of the arena
fn alloc_nonpod_grow(&mut self, n_bytes: uint, align: uint)
-> (*u8, *u8) {
// Allocate a new chunk.
let chunk_size = at_vec::capacity(self.head.data.get());
let new_min_chunk_size = num::max(n_bytes, chunk_size);
self.chunks.set(@Cons(self.head.clone(), self.chunks.get()));
self.head =
chunk(uint::next_power_of_two(new_min_chunk_size + 1u), false);
return self.alloc_nonpod_inner(n_bytes, align);
}
#[inline]
fn alloc_nonpod_inner(&mut self, n_bytes: uint, align: uint)
-> (*u8, *u8) {
unsafe {
let start;
let end;
let tydesc_start;
let after_tydesc;
{
let head = transmute_mut_region(&mut self.head);
tydesc_start = head.fill.get();
after_tydesc = head.fill.get() + mem::size_of::<*TyDesc>();
start = round_up(after_tydesc, align);
end = start + n_bytes;
}
if end > at_vec::capacity(self.head.data.get()) {
return self.alloc_nonpod_grow(n_bytes, align);
}
let head = transmute_mut_region(&mut self.head);
head.fill.set(round_up(end, mem::pref_align_of::<*TyDesc>()));
//debug!("idx = {}, size = {}, align = {}, fill = {}",
// start, n_bytes, align, head.fill);
let buf = self.head.data.get().as_ptr();
return (ptr::offset(buf, tydesc_start as int), ptr::offset(buf, start as int));
}
}
#[inline]
fn alloc_nonpod<'a, T>(&'a mut self, op: || -> T) -> &'a T {
unsafe {
let tydesc = get_tydesc::<T>();
let (ty_ptr, ptr) =
self.alloc_nonpod_inner((*tydesc).size, (*tydesc).align);
let ty_ptr: *mut uint = transmute(ty_ptr);
let ptr: *mut T = transmute(ptr);
// Write in our tydesc along with a bit indicating that it
// has *not* been initialized yet.
*ty_ptr = transmute(tydesc);
// Actually initialize it
intrinsics::move_val_init(&mut(*ptr), op());
// Now that we are done, update the tydesc to indicate that
// the object is there.
*ty_ptr = bitpack_tydesc_ptr(tydesc, true);
return transmute(ptr);
}
}
// The external interface
#[inline]
pub fn alloc<'a, T>(&'a self, op: || -> T) -> &'a T {
unsafe {
// XXX: Borrow check
let this = transmute_mut(self);
if intrinsics::needs_drop::<T>() {
this.alloc_nonpod(op)
} else {
this.alloc_pod(op)
}
}
}
}
#[test]
fn test_arena_destructors() {
let arena = Arena::new();
for i in range(0u, 10) {
// Arena allocate something with drop glue to make sure it
// doesn't leak.
arena.alloc(|| @i);
// Allocate something with funny size and alignment, to keep
// things interesting.
arena.alloc(|| [0u8, 1u8, 2u8]);
}
}
#[test]
#[should_fail]
fn test_arena_destructors_fail() {
let arena = Arena::new();
// Put some stuff in the arena.
for i in range(0u, 10) {
// Arena allocate something with drop glue to make sure it
// doesn't leak.
arena.alloc(|| { @i });
// Allocate something with funny size and alignment, to keep
// things interesting.
arena.alloc(|| { [0u8, 1u8, 2u8] });
}
// Now, fail while allocating
arena.alloc::<@int>(|| {
// Now fail.
fail!();
});
}
/// An arena that can hold objects of only one type.
///
/// Safety note: Modifying objects in the arena that have already had their
/// `drop` destructors run can cause leaks, because the destructor will not
/// run again for these objects.
pub struct TypedArena<T> {
/// A pointer to the next object to be allocated.
priv ptr: *T,
/// A pointer to the end of the allocated area. When this pointer is
/// reached, a new chunk is allocated.
priv end: *T,
/// The type descriptor of the objects in the arena. This should not be
/// necessary, but is until generic destructors are supported.
priv tydesc: *TyDesc,
/// A pointer to the first arena segment.
priv first: Option<~TypedArenaChunk>,
}
struct TypedArenaChunk {
/// Pointer to the next arena segment.
next: Option<~TypedArenaChunk>,
/// The number of elements that this chunk can hold.
capacity: uint,
// Objects follow here, suitably aligned.
}
impl TypedArenaChunk {
#[inline]
fn new<T>(next: Option<~TypedArenaChunk>, capacity: uint)
-> ~TypedArenaChunk {
let mut size = mem::size_of::<TypedArenaChunk>();
size = round_up(size, mem::min_align_of::<T>());
let elem_size = mem::size_of::<T>();
let elems_size = elem_size.checked_mul(&capacity).unwrap();
size = size.checked_add(&elems_size).unwrap();
let mut chunk = unsafe {
let chunk = global_heap::exchange_malloc(size);
let mut chunk: ~TypedArenaChunk = cast::transmute(chunk);
intrinsics::move_val_init(&mut chunk.next, next);
chunk
};
chunk.capacity = capacity;
chunk
}
/// Destroys this arena chunk. If the type descriptor is supplied, the
/// drop glue is called; otherwise, drop glue is not called.
#[inline]
unsafe fn destroy(&mut self, len: uint, opt_tydesc: Option<*TyDesc>) {
// Destroy all the allocated objects.
match opt_tydesc {
None => {}
Some(tydesc) => {
let mut start = self.start(tydesc);
for _ in range(0, len) {
((*tydesc).drop_glue)(start as *i8);
start = start.offset((*tydesc).size as int)
}
}
}
// Destroy the next chunk.
let next_opt = util::replace(&mut self.next, None);
match next_opt {
None => {}
Some(mut next) => {
// We assume that the next chunk is completely filled.
next.destroy(next.capacity, opt_tydesc)
}
}
}
// Returns a pointer to the first allocated object.
#[inline]
fn start(&self, tydesc: *TyDesc) -> *u8 {
let this: *TypedArenaChunk = self;
unsafe {
cast::transmute(round_up(this.offset(1) as uint, (*tydesc).align))
}
}
// Returns a pointer to the end of the allocated space.
#[inline]
fn end(&self, tydesc: *TyDesc) -> *u8 {
unsafe {
let size = (*tydesc).size.checked_mul(&self.capacity).unwrap();
self.start(tydesc).offset(size as int)
}
}
}
impl<T> TypedArena<T> {
/// Creates a new arena with preallocated space for 8 objects.
#[inline]
pub fn new() -> TypedArena<T> {
TypedArena::with_capacity(8)
}
/// Creates a new arena with preallocated space for the given number of
/// objects.
#[inline]
pub fn with_capacity(capacity: uint) -> TypedArena<T> {
let chunk = TypedArenaChunk::new::<T>(None, capacity);
let tydesc = unsafe {
intrinsics::get_tydesc::<T>()
};
TypedArena {
ptr: chunk.start(tydesc) as *T,
end: chunk.end(tydesc) as *T,
tydesc: tydesc,
first: Some(chunk),
}
}
/// Allocates an object into this arena.
#[inline]
pub fn alloc<'a>(&'a self, object: T) -> &'a T {
unsafe {
let this = cast::transmute_mut(self);
if this.ptr == this.end {
this.grow()
}
let ptr: &'a mut T = cast::transmute(this.ptr);
intrinsics::move_val_init(ptr, object);
this.ptr = this.ptr.offset(1);
let ptr: &'a T = ptr;
ptr
}
}
/// Grows the arena.
#[inline(never)]
fn grow(&mut self) {
let chunk = self.first.take_unwrap();
let new_capacity = chunk.capacity.checked_mul(&2).unwrap();
let chunk = TypedArenaChunk::new::<T>(Some(chunk), new_capacity);
self.ptr = chunk.start(self.tydesc) as *T;
self.end = chunk.end(self.tydesc) as *T;
self.first = Some(chunk)
}
}
#[unsafe_destructor]
impl<T> Drop for TypedArena<T> {
fn drop(&mut self) {
// Determine how much was filled.
let start = self.first.get_ref().start(self.tydesc) as uint;
let end = self.ptr as uint;
let diff = (end - start) / mem::size_of::<T>();
// Pass that to the `destroy` method.
unsafe {
let opt_tydesc = if intrinsics::needs_drop::<T>() {
Some(self.tydesc)
} else {
None
};
self.first.get_mut_ref().destroy(diff, opt_tydesc)
}
}
}
#[cfg(test)]
mod test {
use super::{Arena, TypedArena};
use test::BenchHarness;
struct Point {
x: int,
y: int,
z: int,
}
#[test]
pub fn
|
() {
let arena = TypedArena::new();
for _ in range(0, 1000000) {
arena.alloc(Point {
x: 1,
y: 2,
z: 3,
});
}
}
#[bench]
pub fn bench_pod(bh: &mut BenchHarness) {
let arena = TypedArena::new();
bh.iter(|| {
arena.alloc(Point {
x: 1,
y: 2,
z: 3,
});
})
}
#[bench]
pub fn bench_pod_nonarena(bh: &mut BenchHarness) {
bh.iter(|| {
let _ = ~Point {
x: 1,
y: 2,
z: 3,
};
})
}
#[bench]
pub fn bench_pod_old_arena(bh: &mut BenchHarness) {
let arena = Arena::new();
bh.iter(|| {
arena.alloc(|| {
Point {
x: 1,
y: 2,
z: 3,
}
});
})
}
struct Nonpod {
string: ~str,
array: ~[int],
}
#[test]
pub fn test_nonpod() {
let arena = TypedArena::new();
for _ in range(0, 1000000) {
arena.alloc(Nonpod {
string: ~"hello world",
array: ~[ 1, 2, 3, 4, 5 ],
});
}
}
#[bench]
pub fn bench_nonpod(bh: &mut BenchHarness) {
let arena = TypedArena::new();
bh.iter(|| {
arena.alloc(Nonpod {
string: ~"hello world",
array: ~[ 1, 2, 3, 4, 5 ],
});
})
}
#[bench]
pub fn bench_nonpod_nonarena(bh: &mut BenchHarness) {
bh.iter(|| {
let _ = ~Nonpod {
string: ~"hello world",
array: ~[ 1, 2, 3, 4, 5 ],
};
})
}
#[bench]
pub fn bench_nonpod_old_arena(bh: &mut BenchHarness) {
let arena = Arena::new();
bh.iter(|| {
let _ = arena.alloc(|| Nonpod {
string: ~"hello world",
array: ~[ 1, 2, 3, 4, 5 ],
});
})
}
}
|
test_pod
|
identifier_name
|
arena.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//
//! The arena, a fast but limited type of allocator.
//!
//! Arenas are a type of allocator that destroy the objects within, all at
//! once, once the arena itself is destroyed. They do not support deallocation
//! of individual objects while the arena itself is still alive. The benefit
//! of an arena is very fast allocation; just a pointer bump.
#[allow(missing_doc)];
use list::{List, Cons, Nil};
use list;
use std::at_vec;
use std::cast::{transmute, transmute_mut, transmute_mut_region};
use std::cast;
use std::cell::{Cell, RefCell};
use std::num;
use std::ptr;
use std::mem;
use std::rt::global_heap;
use std::uint;
use std::unstable::intrinsics::{TyDesc, get_tydesc};
use std::unstable::intrinsics;
use std::util;
// The way arena uses arrays is really deeply awful. The arrays are
// allocated, and have capacities reserved, but the fill for the array
// will always stay at 0.
#[deriving(Clone)]
struct Chunk {
data: RefCell<@[u8]>,
fill: Cell<uint>,
is_pod: Cell<bool>,
}
// Arenas are used to quickly allocate objects that share a
// lifetime. The arena uses ~[u8] vectors as a backing store to
// allocate objects from. For each allocated object, the arena stores
// a pointer to the type descriptor followed by the
// object. (Potentially with alignment padding after each of them.)
// When the arena is destroyed, it iterates through all of its chunks,
// and uses the tydesc information to trace through the objects,
// calling the destructors on them.
// One subtle point that needs to be addressed is how to handle
// failures while running the user provided initializer function. It
// is important to not run the destructor on uninitialized objects, but
// how to detect them is somewhat subtle. Since alloc() can be invoked
// recursively, it is not sufficient to simply exclude the most recent
// object. To solve this without requiring extra space, we use the low
// order bit of the tydesc pointer to encode whether the object it
// describes has been fully initialized.
// As an optimization, objects with destructors are stored in
// different chunks than objects without destructors. This reduces
// overhead when initializing plain-old-data and means we don't need
// to waste time running the destructors of POD.
#[no_freeze]
pub struct Arena {
// The head is separated out from the list as a unbenchmarked
// microoptimization, to avoid needing to case on the list to
// access the head.
priv head: Chunk,
priv pod_head: Chunk,
priv chunks: RefCell<@List<Chunk>>,
}
impl Arena {
pub fn new() -> Arena {
Arena::new_with_size(32u)
}
pub fn new_with_size(initial_size: uint) -> Arena {
Arena {
head: chunk(initial_size, false),
pod_head: chunk(initial_size, true),
chunks: RefCell::new(@Nil),
}
}
}
fn chunk(size: uint, is_pod: bool) -> Chunk {
let mut v: @[u8] = @[];
unsafe { at_vec::raw::reserve(&mut v, size); }
Chunk {
data: RefCell::new(unsafe { cast::transmute(v) }),
fill: Cell::new(0u),
is_pod: Cell::new(is_pod),
}
}
#[unsafe_destructor]
impl Drop for Arena {
fn drop(&mut self) {
unsafe {
destroy_chunk(&self.head);
list::each(self.chunks.get(), |chunk| {
if !chunk.is_pod.get() {
destroy_chunk(chunk);
}
true
});
}
}
}
#[inline]
fn round_up(base: uint, align: uint) -> uint {
(base.checked_add(&(align - 1))).unwrap() & !(&(align - 1))
}
// Walk down a chunk, running the destructors for any objects stored
// in it.
unsafe fn destroy_chunk(chunk: &Chunk) {
let mut idx = 0;
let buf = {
let data = chunk.data.borrow();
data.get().as_ptr()
};
let fill = chunk.fill.get();
while idx < fill {
let tydesc_data: *uint = transmute(ptr::offset(buf, idx as int));
let (tydesc, is_done) = un_bitpack_tydesc_ptr(*tydesc_data);
let (size, align) = ((*tydesc).size, (*tydesc).align);
let after_tydesc = idx + mem::size_of::<*TyDesc>();
let start = round_up(after_tydesc, align);
//debug!("freeing object: idx = {}, size = {}, align = {}, done = {}",
// start, size, align, is_done);
if is_done {
((*tydesc).drop_glue)(ptr::offset(buf, start as int) as *i8);
}
// Find where the next tydesc lives
idx = round_up(start + size, mem::pref_align_of::<*TyDesc>());
}
}
// We encode whether the object a tydesc describes has been
// initialized in the arena in the low bit of the tydesc pointer. This
// is necessary in order to properly do cleanup if a failure occurs
// during an initializer.
#[inline]
unsafe fn bitpack_tydesc_ptr(p: *TyDesc, is_done: bool) -> uint {
let p_bits: uint = transmute(p);
p_bits | (is_done as uint)
}
#[inline]
unsafe fn un_bitpack_tydesc_ptr(p: uint) -> (*TyDesc, bool) {
(transmute(p & !1), p & 1 == 1)
}
impl Arena {
// Functions for the POD part of the arena
fn alloc_pod_grow(&mut self, n_bytes: uint, align: uint) -> *u8 {
// Allocate a new chunk.
let chunk_size = at_vec::capacity(self.pod_head.data.get());
let new_min_chunk_size = num::max(n_bytes, chunk_size);
self.chunks.set(@Cons(self.pod_head.clone(), self.chunks.get()));
self.pod_head =
chunk(uint::next_power_of_two(new_min_chunk_size + 1u), true);
return self.alloc_pod_inner(n_bytes, align);
}
#[inline]
fn alloc_pod_inner(&mut self, n_bytes: uint, align: uint) -> *u8 {
unsafe {
let this = transmute_mut_region(self);
let start = round_up(this.pod_head.fill.get(), align);
let end = start + n_bytes;
if end > at_vec::capacity(this.pod_head.data.get())
|
this.pod_head.fill.set(end);
//debug!("idx = {}, size = {}, align = {}, fill = {}",
// start, n_bytes, align, head.fill.get());
ptr::offset(this.pod_head.data.get().as_ptr(), start as int)
}
}
#[inline]
fn alloc_pod<'a, T>(&'a mut self, op: || -> T) -> &'a T {
unsafe {
let tydesc = get_tydesc::<T>();
let ptr = self.alloc_pod_inner((*tydesc).size, (*tydesc).align);
let ptr: *mut T = transmute(ptr);
intrinsics::move_val_init(&mut (*ptr), op());
return transmute(ptr);
}
}
// Functions for the non-POD part of the arena
fn alloc_nonpod_grow(&mut self, n_bytes: uint, align: uint)
-> (*u8, *u8) {
// Allocate a new chunk.
let chunk_size = at_vec::capacity(self.head.data.get());
let new_min_chunk_size = num::max(n_bytes, chunk_size);
self.chunks.set(@Cons(self.head.clone(), self.chunks.get()));
self.head =
chunk(uint::next_power_of_two(new_min_chunk_size + 1u), false);
return self.alloc_nonpod_inner(n_bytes, align);
}
#[inline]
fn alloc_nonpod_inner(&mut self, n_bytes: uint, align: uint)
-> (*u8, *u8) {
unsafe {
let start;
let end;
let tydesc_start;
let after_tydesc;
{
let head = transmute_mut_region(&mut self.head);
tydesc_start = head.fill.get();
after_tydesc = head.fill.get() + mem::size_of::<*TyDesc>();
start = round_up(after_tydesc, align);
end = start + n_bytes;
}
if end > at_vec::capacity(self.head.data.get()) {
return self.alloc_nonpod_grow(n_bytes, align);
}
let head = transmute_mut_region(&mut self.head);
head.fill.set(round_up(end, mem::pref_align_of::<*TyDesc>()));
//debug!("idx = {}, size = {}, align = {}, fill = {}",
// start, n_bytes, align, head.fill);
let buf = self.head.data.get().as_ptr();
return (ptr::offset(buf, tydesc_start as int), ptr::offset(buf, start as int));
}
}
#[inline]
fn alloc_nonpod<'a, T>(&'a mut self, op: || -> T) -> &'a T {
unsafe {
let tydesc = get_tydesc::<T>();
let (ty_ptr, ptr) =
self.alloc_nonpod_inner((*tydesc).size, (*tydesc).align);
let ty_ptr: *mut uint = transmute(ty_ptr);
let ptr: *mut T = transmute(ptr);
// Write in our tydesc along with a bit indicating that it
// has *not* been initialized yet.
*ty_ptr = transmute(tydesc);
// Actually initialize it
intrinsics::move_val_init(&mut(*ptr), op());
// Now that we are done, update the tydesc to indicate that
// the object is there.
*ty_ptr = bitpack_tydesc_ptr(tydesc, true);
return transmute(ptr);
}
}
// The external interface
#[inline]
pub fn alloc<'a, T>(&'a self, op: || -> T) -> &'a T {
unsafe {
// XXX: Borrow check
let this = transmute_mut(self);
if intrinsics::needs_drop::<T>() {
this.alloc_nonpod(op)
} else {
this.alloc_pod(op)
}
}
}
}
#[test]
fn test_arena_destructors() {
let arena = Arena::new();
for i in range(0u, 10) {
// Arena allocate something with drop glue to make sure it
// doesn't leak.
arena.alloc(|| @i);
// Allocate something with funny size and alignment, to keep
// things interesting.
arena.alloc(|| [0u8, 1u8, 2u8]);
}
}
#[test]
#[should_fail]
fn test_arena_destructors_fail() {
let arena = Arena::new();
// Put some stuff in the arena.
for i in range(0u, 10) {
// Arena allocate something with drop glue to make sure it
// doesn't leak.
arena.alloc(|| { @i });
// Allocate something with funny size and alignment, to keep
// things interesting.
arena.alloc(|| { [0u8, 1u8, 2u8] });
}
// Now, fail while allocating
arena.alloc::<@int>(|| {
// Now fail.
fail!();
});
}
/// An arena that can hold objects of only one type.
///
/// Safety note: Modifying objects in the arena that have already had their
/// `drop` destructors run can cause leaks, because the destructor will not
/// run again for these objects.
pub struct TypedArena<T> {
/// A pointer to the next object to be allocated.
priv ptr: *T,
/// A pointer to the end of the allocated area. When this pointer is
/// reached, a new chunk is allocated.
priv end: *T,
/// The type descriptor of the objects in the arena. This should not be
/// necessary, but is until generic destructors are supported.
priv tydesc: *TyDesc,
/// A pointer to the first arena segment.
priv first: Option<~TypedArenaChunk>,
}
struct TypedArenaChunk {
/// Pointer to the next arena segment.
next: Option<~TypedArenaChunk>,
/// The number of elements that this chunk can hold.
capacity: uint,
// Objects follow here, suitably aligned.
}
impl TypedArenaChunk {
#[inline]
fn new<T>(next: Option<~TypedArenaChunk>, capacity: uint)
-> ~TypedArenaChunk {
let mut size = mem::size_of::<TypedArenaChunk>();
size = round_up(size, mem::min_align_of::<T>());
let elem_size = mem::size_of::<T>();
let elems_size = elem_size.checked_mul(&capacity).unwrap();
size = size.checked_add(&elems_size).unwrap();
let mut chunk = unsafe {
let chunk = global_heap::exchange_malloc(size);
let mut chunk: ~TypedArenaChunk = cast::transmute(chunk);
intrinsics::move_val_init(&mut chunk.next, next);
chunk
};
chunk.capacity = capacity;
chunk
}
/// Destroys this arena chunk. If the type descriptor is supplied, the
/// drop glue is called; otherwise, drop glue is not called.
#[inline]
unsafe fn destroy(&mut self, len: uint, opt_tydesc: Option<*TyDesc>) {
// Destroy all the allocated objects.
match opt_tydesc {
None => {}
Some(tydesc) => {
let mut start = self.start(tydesc);
for _ in range(0, len) {
((*tydesc).drop_glue)(start as *i8);
start = start.offset((*tydesc).size as int)
}
}
}
// Destroy the next chunk.
let next_opt = util::replace(&mut self.next, None);
match next_opt {
None => {}
Some(mut next) => {
// We assume that the next chunk is completely filled.
next.destroy(next.capacity, opt_tydesc)
}
}
}
// Returns a pointer to the first allocated object.
#[inline]
fn start(&self, tydesc: *TyDesc) -> *u8 {
let this: *TypedArenaChunk = self;
unsafe {
cast::transmute(round_up(this.offset(1) as uint, (*tydesc).align))
}
}
// Returns a pointer to the end of the allocated space.
#[inline]
fn end(&self, tydesc: *TyDesc) -> *u8 {
unsafe {
let size = (*tydesc).size.checked_mul(&self.capacity).unwrap();
self.start(tydesc).offset(size as int)
}
}
}
impl<T> TypedArena<T> {
/// Creates a new arena with preallocated space for 8 objects.
#[inline]
pub fn new() -> TypedArena<T> {
TypedArena::with_capacity(8)
}
/// Creates a new arena with preallocated space for the given number of
/// objects.
#[inline]
pub fn with_capacity(capacity: uint) -> TypedArena<T> {
let chunk = TypedArenaChunk::new::<T>(None, capacity);
let tydesc = unsafe {
intrinsics::get_tydesc::<T>()
};
TypedArena {
ptr: chunk.start(tydesc) as *T,
end: chunk.end(tydesc) as *T,
tydesc: tydesc,
first: Some(chunk),
}
}
/// Allocates an object into this arena.
#[inline]
pub fn alloc<'a>(&'a self, object: T) -> &'a T {
unsafe {
let this = cast::transmute_mut(self);
if this.ptr == this.end {
this.grow()
}
let ptr: &'a mut T = cast::transmute(this.ptr);
intrinsics::move_val_init(ptr, object);
this.ptr = this.ptr.offset(1);
let ptr: &'a T = ptr;
ptr
}
}
/// Grows the arena.
#[inline(never)]
fn grow(&mut self) {
let chunk = self.first.take_unwrap();
let new_capacity = chunk.capacity.checked_mul(&2).unwrap();
let chunk = TypedArenaChunk::new::<T>(Some(chunk), new_capacity);
self.ptr = chunk.start(self.tydesc) as *T;
self.end = chunk.end(self.tydesc) as *T;
self.first = Some(chunk)
}
}
#[unsafe_destructor]
impl<T> Drop for TypedArena<T> {
fn drop(&mut self) {
// Determine how much was filled.
let start = self.first.get_ref().start(self.tydesc) as uint;
let end = self.ptr as uint;
let diff = (end - start) / mem::size_of::<T>();
// Pass that to the `destroy` method.
unsafe {
let opt_tydesc = if intrinsics::needs_drop::<T>() {
Some(self.tydesc)
} else {
None
};
self.first.get_mut_ref().destroy(diff, opt_tydesc)
}
}
}
#[cfg(test)]
mod test {
use super::{Arena, TypedArena};
use test::BenchHarness;
struct Point {
x: int,
y: int,
z: int,
}
#[test]
pub fn test_pod() {
let arena = TypedArena::new();
for _ in range(0, 1000000) {
arena.alloc(Point {
x: 1,
y: 2,
z: 3,
});
}
}
#[bench]
pub fn bench_pod(bh: &mut BenchHarness) {
let arena = TypedArena::new();
bh.iter(|| {
arena.alloc(Point {
x: 1,
y: 2,
z: 3,
});
})
}
#[bench]
pub fn bench_pod_nonarena(bh: &mut BenchHarness) {
bh.iter(|| {
let _ = ~Point {
x: 1,
y: 2,
z: 3,
};
})
}
#[bench]
pub fn bench_pod_old_arena(bh: &mut BenchHarness) {
let arena = Arena::new();
bh.iter(|| {
arena.alloc(|| {
Point {
x: 1,
y: 2,
z: 3,
}
});
})
}
struct Nonpod {
string: ~str,
array: ~[int],
}
#[test]
pub fn test_nonpod() {
let arena = TypedArena::new();
for _ in range(0, 1000000) {
arena.alloc(Nonpod {
string: ~"hello world",
array: ~[ 1, 2, 3, 4, 5 ],
});
}
}
#[bench]
pub fn bench_nonpod(bh: &mut BenchHarness) {
let arena = TypedArena::new();
bh.iter(|| {
arena.alloc(Nonpod {
string: ~"hello world",
array: ~[ 1, 2, 3, 4, 5 ],
});
})
}
#[bench]
pub fn bench_nonpod_nonarena(bh: &mut BenchHarness) {
bh.iter(|| {
let _ = ~Nonpod {
string: ~"hello world",
array: ~[ 1, 2, 3, 4, 5 ],
};
})
}
#[bench]
pub fn bench_nonpod_old_arena(bh: &mut BenchHarness) {
let arena = Arena::new();
bh.iter(|| {
let _ = arena.alloc(|| Nonpod {
string: ~"hello world",
array: ~[ 1, 2, 3, 4, 5 ],
});
})
}
}
|
{
return this.alloc_pod_grow(n_bytes, align);
}
|
conditional_block
|
test__helpers.py
|
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
class Test_PropertyMixin(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.storage._helpers import _PropertyMixin
return _PropertyMixin
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def _derivedClass(self, path=None):
class Derived(self._get_target_class()):
client = None
@property
def path(self):
return path
return Derived
def test_path_is_abstract(self):
mixin = self._make_one()
self.assertRaises(NotImplementedError, lambda: mixin.path)
def test_client_is_abstract(self):
mixin = self._make_one()
self.assertRaises(NotImplementedError, lambda: mixin.client)
def test_reload(self):
connection = _Connection({'foo': 'Foo'})
client = _Client(connection)
derived = self._derivedClass('/path')()
# Make sure changes is not a set, so we can observe a change.
derived._changes = object()
derived.reload(client=client)
self.assertEqual(derived._properties, {'foo': 'Foo'})
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'GET')
self.assertEqual(kw[0]['path'], '/path')
self.assertEqual(kw[0]['query_params'], {'projection': 'noAcl'})
# Make sure changes get reset by reload.
self.assertEqual(derived._changes, set())
def test__set_properties(self):
mixin = self._make_one()
self.assertEqual(mixin._properties, {})
VALUE = object()
mixin._set_properties(VALUE)
self.assertEqual(mixin._properties, VALUE)
def test__patch_property(self):
derived = self._derivedClass()()
derived._patch_property('foo', 'Foo')
self.assertEqual(derived._properties, {'foo': 'Foo'})
def test_patch(self):
connection = _Connection({'foo': 'Foo'})
client = _Client(connection)
derived = self._derivedClass('/path')()
# Make sure changes is non-empty, so we can observe a change.
BAR = object()
BAZ = object()
derived._properties = {'bar': BAR, 'baz': BAZ}
derived._changes = set(['bar']) # Ignore baz.
derived.patch(client=client)
self.assertEqual(derived._properties, {'foo': 'Foo'})
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'PATCH')
self.assertEqual(kw[0]['path'], '/path')
self.assertEqual(kw[0]['query_params'], {'projection': 'full'})
# Since changes does not include `baz`, we don't see it sent.
self.assertEqual(kw[0]['data'], {'bar': BAR})
# Make sure changes get reset by patch().
self.assertEqual(derived._changes, set())
class Test__scalar_property(unittest.TestCase):
def _call_fut(self, fieldName):
from google.cloud.storage._helpers import _scalar_property
return _scalar_property(fieldName)
def test_getter(self):
class Test(object):
def __init__(self, **kw):
self._properties = kw.copy()
do_re_mi = self._call_fut('solfege')
test = Test(solfege='Latido')
self.assertEqual(test.do_re_mi, 'Latido')
def test_setter(self):
class Test(object):
def _patch_property(self, name, value):
self._patched = (name, value)
do_re_mi = self._call_fut('solfege')
test = Test()
test.do_re_mi = 'Latido'
self.assertEqual(test._patched, ('solfege', 'Latido'))
class Test__base64_md5hash(unittest.TestCase):
|
from google.cloud.storage._helpers import _base64_md5hash
return _base64_md5hash(bytes_to_sign)
def test_it(self):
from io import BytesIO
BYTES_TO_SIGN = b'FOO'
BUFFER = BytesIO()
BUFFER.write(BYTES_TO_SIGN)
BUFFER.seek(0)
SIGNED_CONTENT = self._call_fut(BUFFER)
self.assertEqual(SIGNED_CONTENT, b'kBiQqOnIz21aGlQrIp/r/w==')
def test_it_with_stubs(self):
import mock
class _Buffer(object):
def __init__(self, return_vals):
self.return_vals = return_vals
self._block_sizes = []
def read(self, block_size):
self._block_sizes.append(block_size)
return self.return_vals.pop()
BASE64 = _Base64()
DIGEST_VAL = object()
BYTES_TO_SIGN = b'BYTES_TO_SIGN'
BUFFER = _Buffer([b'', BYTES_TO_SIGN])
MD5 = _MD5(DIGEST_VAL)
patch = mock.patch.multiple(
'google.cloud.storage._helpers',
base64=BASE64, md5=MD5)
with patch:
SIGNED_CONTENT = self._call_fut(BUFFER)
self.assertEqual(BUFFER._block_sizes, [8192, 8192])
self.assertIs(SIGNED_CONTENT, DIGEST_VAL)
self.assertEqual(BASE64._called_b64encode, [DIGEST_VAL])
self.assertEqual(MD5._called, [None])
self.assertEqual(MD5.hash_obj.num_digest_calls, 1)
self.assertEqual(MD5.hash_obj._blocks, [BYTES_TO_SIGN])
class _Connection(object):
def __init__(self, *responses):
self._responses = responses
self._requested = []
def api_request(self, **kw):
self._requested.append(kw)
response, self._responses = self._responses[0], self._responses[1:]
return response
class _MD5Hash(object):
def __init__(self, digest_val):
self.digest_val = digest_val
self.num_digest_calls = 0
self._blocks = []
def update(self, block):
self._blocks.append(block)
def digest(self):
self.num_digest_calls += 1
return self.digest_val
class _MD5(object):
def __init__(self, digest_val):
self.hash_obj = _MD5Hash(digest_val)
self._called = []
def __call__(self, data=None):
self._called.append(data)
return self.hash_obj
class _Base64(object):
def __init__(self):
self._called_b64encode = []
def b64encode(self, value):
self._called_b64encode.append(value)
return value
class _Client(object):
def __init__(self, connection):
self._connection = connection
|
def _call_fut(self, bytes_to_sign):
|
random_line_split
|
test__helpers.py
|
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
class Test_PropertyMixin(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.storage._helpers import _PropertyMixin
return _PropertyMixin
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def _derivedClass(self, path=None):
class Derived(self._get_target_class()):
client = None
@property
def path(self):
return path
return Derived
def test_path_is_abstract(self):
mixin = self._make_one()
self.assertRaises(NotImplementedError, lambda: mixin.path)
def test_client_is_abstract(self):
mixin = self._make_one()
self.assertRaises(NotImplementedError, lambda: mixin.client)
def test_reload(self):
connection = _Connection({'foo': 'Foo'})
client = _Client(connection)
derived = self._derivedClass('/path')()
# Make sure changes is not a set, so we can observe a change.
derived._changes = object()
derived.reload(client=client)
self.assertEqual(derived._properties, {'foo': 'Foo'})
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'GET')
self.assertEqual(kw[0]['path'], '/path')
self.assertEqual(kw[0]['query_params'], {'projection': 'noAcl'})
# Make sure changes get reset by reload.
self.assertEqual(derived._changes, set())
def test__set_properties(self):
mixin = self._make_one()
self.assertEqual(mixin._properties, {})
VALUE = object()
mixin._set_properties(VALUE)
self.assertEqual(mixin._properties, VALUE)
def test__patch_property(self):
derived = self._derivedClass()()
derived._patch_property('foo', 'Foo')
self.assertEqual(derived._properties, {'foo': 'Foo'})
def test_patch(self):
connection = _Connection({'foo': 'Foo'})
client = _Client(connection)
derived = self._derivedClass('/path')()
# Make sure changes is non-empty, so we can observe a change.
BAR = object()
BAZ = object()
derived._properties = {'bar': BAR, 'baz': BAZ}
derived._changes = set(['bar']) # Ignore baz.
derived.patch(client=client)
self.assertEqual(derived._properties, {'foo': 'Foo'})
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'PATCH')
self.assertEqual(kw[0]['path'], '/path')
self.assertEqual(kw[0]['query_params'], {'projection': 'full'})
# Since changes does not include `baz`, we don't see it sent.
self.assertEqual(kw[0]['data'], {'bar': BAR})
# Make sure changes get reset by patch().
self.assertEqual(derived._changes, set())
class Test__scalar_property(unittest.TestCase):
def _call_fut(self, fieldName):
from google.cloud.storage._helpers import _scalar_property
return _scalar_property(fieldName)
def test_getter(self):
class Test(object):
def __init__(self, **kw):
self._properties = kw.copy()
do_re_mi = self._call_fut('solfege')
test = Test(solfege='Latido')
self.assertEqual(test.do_re_mi, 'Latido')
def test_setter(self):
class
|
(object):
def _patch_property(self, name, value):
self._patched = (name, value)
do_re_mi = self._call_fut('solfege')
test = Test()
test.do_re_mi = 'Latido'
self.assertEqual(test._patched, ('solfege', 'Latido'))
class Test__base64_md5hash(unittest.TestCase):
def _call_fut(self, bytes_to_sign):
from google.cloud.storage._helpers import _base64_md5hash
return _base64_md5hash(bytes_to_sign)
def test_it(self):
from io import BytesIO
BYTES_TO_SIGN = b'FOO'
BUFFER = BytesIO()
BUFFER.write(BYTES_TO_SIGN)
BUFFER.seek(0)
SIGNED_CONTENT = self._call_fut(BUFFER)
self.assertEqual(SIGNED_CONTENT, b'kBiQqOnIz21aGlQrIp/r/w==')
def test_it_with_stubs(self):
import mock
class _Buffer(object):
def __init__(self, return_vals):
self.return_vals = return_vals
self._block_sizes = []
def read(self, block_size):
self._block_sizes.append(block_size)
return self.return_vals.pop()
BASE64 = _Base64()
DIGEST_VAL = object()
BYTES_TO_SIGN = b'BYTES_TO_SIGN'
BUFFER = _Buffer([b'', BYTES_TO_SIGN])
MD5 = _MD5(DIGEST_VAL)
patch = mock.patch.multiple(
'google.cloud.storage._helpers',
base64=BASE64, md5=MD5)
with patch:
SIGNED_CONTENT = self._call_fut(BUFFER)
self.assertEqual(BUFFER._block_sizes, [8192, 8192])
self.assertIs(SIGNED_CONTENT, DIGEST_VAL)
self.assertEqual(BASE64._called_b64encode, [DIGEST_VAL])
self.assertEqual(MD5._called, [None])
self.assertEqual(MD5.hash_obj.num_digest_calls, 1)
self.assertEqual(MD5.hash_obj._blocks, [BYTES_TO_SIGN])
class _Connection(object):
def __init__(self, *responses):
self._responses = responses
self._requested = []
def api_request(self, **kw):
self._requested.append(kw)
response, self._responses = self._responses[0], self._responses[1:]
return response
class _MD5Hash(object):
def __init__(self, digest_val):
self.digest_val = digest_val
self.num_digest_calls = 0
self._blocks = []
def update(self, block):
self._blocks.append(block)
def digest(self):
self.num_digest_calls += 1
return self.digest_val
class _MD5(object):
def __init__(self, digest_val):
self.hash_obj = _MD5Hash(digest_val)
self._called = []
def __call__(self, data=None):
self._called.append(data)
return self.hash_obj
class _Base64(object):
def __init__(self):
self._called_b64encode = []
def b64encode(self, value):
self._called_b64encode.append(value)
return value
class _Client(object):
def __init__(self, connection):
self._connection = connection
|
Test
|
identifier_name
|
test__helpers.py
|
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
class Test_PropertyMixin(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.storage._helpers import _PropertyMixin
return _PropertyMixin
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def _derivedClass(self, path=None):
class Derived(self._get_target_class()):
client = None
@property
def path(self):
return path
return Derived
def test_path_is_abstract(self):
mixin = self._make_one()
self.assertRaises(NotImplementedError, lambda: mixin.path)
def test_client_is_abstract(self):
mixin = self._make_one()
self.assertRaises(NotImplementedError, lambda: mixin.client)
def test_reload(self):
connection = _Connection({'foo': 'Foo'})
client = _Client(connection)
derived = self._derivedClass('/path')()
# Make sure changes is not a set, so we can observe a change.
derived._changes = object()
derived.reload(client=client)
self.assertEqual(derived._properties, {'foo': 'Foo'})
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'GET')
self.assertEqual(kw[0]['path'], '/path')
self.assertEqual(kw[0]['query_params'], {'projection': 'noAcl'})
# Make sure changes get reset by reload.
self.assertEqual(derived._changes, set())
def test__set_properties(self):
mixin = self._make_one()
self.assertEqual(mixin._properties, {})
VALUE = object()
mixin._set_properties(VALUE)
self.assertEqual(mixin._properties, VALUE)
def test__patch_property(self):
derived = self._derivedClass()()
derived._patch_property('foo', 'Foo')
self.assertEqual(derived._properties, {'foo': 'Foo'})
def test_patch(self):
connection = _Connection({'foo': 'Foo'})
client = _Client(connection)
derived = self._derivedClass('/path')()
# Make sure changes is non-empty, so we can observe a change.
BAR = object()
BAZ = object()
derived._properties = {'bar': BAR, 'baz': BAZ}
derived._changes = set(['bar']) # Ignore baz.
derived.patch(client=client)
self.assertEqual(derived._properties, {'foo': 'Foo'})
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'PATCH')
self.assertEqual(kw[0]['path'], '/path')
self.assertEqual(kw[0]['query_params'], {'projection': 'full'})
# Since changes does not include `baz`, we don't see it sent.
self.assertEqual(kw[0]['data'], {'bar': BAR})
# Make sure changes get reset by patch().
self.assertEqual(derived._changes, set())
class Test__scalar_property(unittest.TestCase):
def _call_fut(self, fieldName):
from google.cloud.storage._helpers import _scalar_property
return _scalar_property(fieldName)
def test_getter(self):
class Test(object):
|
test = Test(solfege='Latido')
self.assertEqual(test.do_re_mi, 'Latido')
def test_setter(self):
class Test(object):
def _patch_property(self, name, value):
self._patched = (name, value)
do_re_mi = self._call_fut('solfege')
test = Test()
test.do_re_mi = 'Latido'
self.assertEqual(test._patched, ('solfege', 'Latido'))
class Test__base64_md5hash(unittest.TestCase):
def _call_fut(self, bytes_to_sign):
from google.cloud.storage._helpers import _base64_md5hash
return _base64_md5hash(bytes_to_sign)
def test_it(self):
from io import BytesIO
BYTES_TO_SIGN = b'FOO'
BUFFER = BytesIO()
BUFFER.write(BYTES_TO_SIGN)
BUFFER.seek(0)
SIGNED_CONTENT = self._call_fut(BUFFER)
self.assertEqual(SIGNED_CONTENT, b'kBiQqOnIz21aGlQrIp/r/w==')
def test_it_with_stubs(self):
import mock
class _Buffer(object):
def __init__(self, return_vals):
self.return_vals = return_vals
self._block_sizes = []
def read(self, block_size):
self._block_sizes.append(block_size)
return self.return_vals.pop()
BASE64 = _Base64()
DIGEST_VAL = object()
BYTES_TO_SIGN = b'BYTES_TO_SIGN'
BUFFER = _Buffer([b'', BYTES_TO_SIGN])
MD5 = _MD5(DIGEST_VAL)
patch = mock.patch.multiple(
'google.cloud.storage._helpers',
base64=BASE64, md5=MD5)
with patch:
SIGNED_CONTENT = self._call_fut(BUFFER)
self.assertEqual(BUFFER._block_sizes, [8192, 8192])
self.assertIs(SIGNED_CONTENT, DIGEST_VAL)
self.assertEqual(BASE64._called_b64encode, [DIGEST_VAL])
self.assertEqual(MD5._called, [None])
self.assertEqual(MD5.hash_obj.num_digest_calls, 1)
self.assertEqual(MD5.hash_obj._blocks, [BYTES_TO_SIGN])
class _Connection(object):
def __init__(self, *responses):
self._responses = responses
self._requested = []
def api_request(self, **kw):
self._requested.append(kw)
response, self._responses = self._responses[0], self._responses[1:]
return response
class _MD5Hash(object):
def __init__(self, digest_val):
self.digest_val = digest_val
self.num_digest_calls = 0
self._blocks = []
def update(self, block):
self._blocks.append(block)
def digest(self):
self.num_digest_calls += 1
return self.digest_val
class _MD5(object):
def __init__(self, digest_val):
self.hash_obj = _MD5Hash(digest_val)
self._called = []
def __call__(self, data=None):
self._called.append(data)
return self.hash_obj
class _Base64(object):
def __init__(self):
self._called_b64encode = []
def b64encode(self, value):
self._called_b64encode.append(value)
return value
class _Client(object):
def __init__(self, connection):
self._connection = connection
|
def __init__(self, **kw):
self._properties = kw.copy()
do_re_mi = self._call_fut('solfege')
|
identifier_body
|
db_logger.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014 Smile (<http://www.smile.fr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
import logging
from openerp.modules.registry import RegistryManager
from .misc import add_timing, add_trace
class SmileDBLogger:
def __init__(self, dbname, model_name, res_id, uid=0):
assert isinstance(uid, (int, long)), 'uid should be an integer'
self._logger = logging.getLogger('smile_log')
db = RegistryManager.get(dbname)._db
pid = 0
try:
cr = db.cursor()
cr.autocommit(True)
cr.execute("select relname from pg_class where relname='smile_log_seq'")
if not cr.rowcount:
cr.execute("create sequence smile_log_seq")
cr.execute("select nextval('smile_log_seq')")
res = cr.fetchone()
pid = res and res[0] or 0
finally:
cr.close()
self._logger_start = datetime.datetime.now()
self._logger_args = {'dbname': dbname, 'model_name': model_name, 'res_id': res_id, 'uid': uid, 'pid': pid}
@property
def pid(self):
return self._logger_args['pid']
def setLevel(self, level):
self._logger.setLevel(level)
def getEffectiveLevel(self):
return self._logger.getEffectiveLevel()
def debug(self, msg):
self._logger.debug(msg, self._logger_args)
def info(self, msg):
self._logger.info(msg, self._logger_args)
def warning(self, msg):
self._logger.warning(msg, self._logger_args)
def log(self, msg):
self._logger.log(msg, self._logger_args)
@add_trace
def error(self, msg):
self._logger.error(msg, self._logger_args)
@add_trace
def critical(self, msg):
self._logger.critical(msg, self._logger_args)
@add_trace
def exception(self, msg):
self._logger.exception(msg, self._logger_args)
@add_timing
def time_info(self, msg):
|
@add_timing
def time_debug(self, msg):
self._logger.debug(msg, self._logger_args)
|
self._logger.info(msg, self._logger_args)
|
identifier_body
|
db_logger.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014 Smile (<http://www.smile.fr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
import logging
from openerp.modules.registry import RegistryManager
from .misc import add_timing, add_trace
class SmileDBLogger:
def __init__(self, dbname, model_name, res_id, uid=0):
assert isinstance(uid, (int, long)), 'uid should be an integer'
self._logger = logging.getLogger('smile_log')
db = RegistryManager.get(dbname)._db
pid = 0
try:
cr = db.cursor()
cr.autocommit(True)
cr.execute("select relname from pg_class where relname='smile_log_seq'")
if not cr.rowcount:
cr.execute("create sequence smile_log_seq")
cr.execute("select nextval('smile_log_seq')")
res = cr.fetchone()
pid = res and res[0] or 0
finally:
cr.close()
self._logger_start = datetime.datetime.now()
self._logger_args = {'dbname': dbname, 'model_name': model_name, 'res_id': res_id, 'uid': uid, 'pid': pid}
@property
def pid(self):
return self._logger_args['pid']
def setLevel(self, level):
self._logger.setLevel(level)
def getEffectiveLevel(self):
return self._logger.getEffectiveLevel()
def debug(self, msg):
self._logger.debug(msg, self._logger_args)
def info(self, msg):
self._logger.info(msg, self._logger_args)
def warning(self, msg):
self._logger.warning(msg, self._logger_args)
def log(self, msg):
self._logger.log(msg, self._logger_args)
@add_trace
def
|
(self, msg):
self._logger.error(msg, self._logger_args)
@add_trace
def critical(self, msg):
self._logger.critical(msg, self._logger_args)
@add_trace
def exception(self, msg):
self._logger.exception(msg, self._logger_args)
@add_timing
def time_info(self, msg):
self._logger.info(msg, self._logger_args)
@add_timing
def time_debug(self, msg):
self._logger.debug(msg, self._logger_args)
|
error
|
identifier_name
|
db_logger.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014 Smile (<http://www.smile.fr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
import logging
from openerp.modules.registry import RegistryManager
from .misc import add_timing, add_trace
class SmileDBLogger:
def __init__(self, dbname, model_name, res_id, uid=0):
assert isinstance(uid, (int, long)), 'uid should be an integer'
self._logger = logging.getLogger('smile_log')
db = RegistryManager.get(dbname)._db
pid = 0
try:
cr = db.cursor()
cr.autocommit(True)
cr.execute("select relname from pg_class where relname='smile_log_seq'")
if not cr.rowcount:
cr.execute("create sequence smile_log_seq")
cr.execute("select nextval('smile_log_seq')")
res = cr.fetchone()
pid = res and res[0] or 0
finally:
cr.close()
self._logger_start = datetime.datetime.now()
self._logger_args = {'dbname': dbname, 'model_name': model_name, 'res_id': res_id, 'uid': uid, 'pid': pid}
@property
def pid(self):
return self._logger_args['pid']
def setLevel(self, level):
self._logger.setLevel(level)
def getEffectiveLevel(self):
return self._logger.getEffectiveLevel()
def debug(self, msg):
self._logger.debug(msg, self._logger_args)
def info(self, msg):
self._logger.info(msg, self._logger_args)
def warning(self, msg):
self._logger.warning(msg, self._logger_args)
def log(self, msg):
self._logger.log(msg, self._logger_args)
@add_trace
|
@add_trace
def critical(self, msg):
self._logger.critical(msg, self._logger_args)
@add_trace
def exception(self, msg):
self._logger.exception(msg, self._logger_args)
@add_timing
def time_info(self, msg):
self._logger.info(msg, self._logger_args)
@add_timing
def time_debug(self, msg):
self._logger.debug(msg, self._logger_args)
|
def error(self, msg):
self._logger.error(msg, self._logger_args)
|
random_line_split
|
db_logger.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014 Smile (<http://www.smile.fr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
import logging
from openerp.modules.registry import RegistryManager
from .misc import add_timing, add_trace
class SmileDBLogger:
def __init__(self, dbname, model_name, res_id, uid=0):
assert isinstance(uid, (int, long)), 'uid should be an integer'
self._logger = logging.getLogger('smile_log')
db = RegistryManager.get(dbname)._db
pid = 0
try:
cr = db.cursor()
cr.autocommit(True)
cr.execute("select relname from pg_class where relname='smile_log_seq'")
if not cr.rowcount:
|
cr.execute("select nextval('smile_log_seq')")
res = cr.fetchone()
pid = res and res[0] or 0
finally:
cr.close()
self._logger_start = datetime.datetime.now()
self._logger_args = {'dbname': dbname, 'model_name': model_name, 'res_id': res_id, 'uid': uid, 'pid': pid}
@property
def pid(self):
return self._logger_args['pid']
def setLevel(self, level):
self._logger.setLevel(level)
def getEffectiveLevel(self):
return self._logger.getEffectiveLevel()
def debug(self, msg):
self._logger.debug(msg, self._logger_args)
def info(self, msg):
self._logger.info(msg, self._logger_args)
def warning(self, msg):
self._logger.warning(msg, self._logger_args)
def log(self, msg):
self._logger.log(msg, self._logger_args)
@add_trace
def error(self, msg):
self._logger.error(msg, self._logger_args)
@add_trace
def critical(self, msg):
self._logger.critical(msg, self._logger_args)
@add_trace
def exception(self, msg):
self._logger.exception(msg, self._logger_args)
@add_timing
def time_info(self, msg):
self._logger.info(msg, self._logger_args)
@add_timing
def time_debug(self, msg):
self._logger.debug(msg, self._logger_args)
|
cr.execute("create sequence smile_log_seq")
|
conditional_block
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.