file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
template_installer.rs
|
// Copyright (C) 2020 Jason Ish
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
use crate::elastic::client::Client;
use crate::prelude::*;
use anyhow::anyhow;
use anyhow::Result;
pub async fn install_template(client: &Client, template: &str) -> Result<()>
|
{
debug!("Checking for template \"{}\"", template);
match client.get_template(template).await {
Err(err) => {
warn!("Failed to check if template {} exists: {}", template, err);
}
Ok(None) => {
debug!("Did not find template for \"{}\", will install", template);
}
Ok(Some(_)) => {
debug!("Found template for \"{}\"", template);
return Ok(());
}
};
let version = client.get_version().await?;
if version.major < 7 {
return Err(anyhow!(
"Elasticsearch version {} not supported",
version.version
));
}
let template_string = {
if version.major >= 7 {
crate::resource::get_string("elasticsearch/template-es7x.json").ok_or_else(|| {
anyhow!(
"Failed to find template for Elasticsearch version {}",
version.version
)
})?
} else {
return Err(anyhow!(
"Elasticsearch version {} not supported",
version.version
));
}
};
info!("Installing template {}", &template);
let mut templatejs: serde_json::Value = serde_json::from_str(&template_string)?;
templatejs["index_patterns"] = format!("{}-*", template).into();
client
.put_template(template, serde_json::to_string(&templatejs)?)
.await?;
Ok(())
}
|
identifier_body
|
|
template_installer.rs
|
// Copyright (C) 2020 Jason Ish
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
use crate::elastic::client::Client;
use crate::prelude::*;
use anyhow::anyhow;
use anyhow::Result;
pub async fn
|
(client: &Client, template: &str) -> Result<()> {
debug!("Checking for template \"{}\"", template);
match client.get_template(template).await {
Err(err) => {
warn!("Failed to check if template {} exists: {}", template, err);
}
Ok(None) => {
debug!("Did not find template for \"{}\", will install", template);
}
Ok(Some(_)) => {
debug!("Found template for \"{}\"", template);
return Ok(());
}
};
let version = client.get_version().await?;
if version.major < 7 {
return Err(anyhow!(
"Elasticsearch version {} not supported",
version.version
));
}
let template_string = {
if version.major >= 7 {
crate::resource::get_string("elasticsearch/template-es7x.json").ok_or_else(|| {
anyhow!(
"Failed to find template for Elasticsearch version {}",
version.version
)
})?
} else {
return Err(anyhow!(
"Elasticsearch version {} not supported",
version.version
));
}
};
info!("Installing template {}", &template);
let mut templatejs: serde_json::Value = serde_json::from_str(&template_string)?;
templatejs["index_patterns"] = format!("{}-*", template).into();
client
.put_template(template, serde_json::to_string(&templatejs)?)
.await?;
Ok(())
}
|
install_template
|
identifier_name
|
template_installer.rs
|
// Copyright (C) 2020 Jason Ish
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
use crate::elastic::client::Client;
use crate::prelude::*;
|
debug!("Checking for template \"{}\"", template);
match client.get_template(template).await {
Err(err) => {
warn!("Failed to check if template {} exists: {}", template, err);
}
Ok(None) => {
debug!("Did not find template for \"{}\", will install", template);
}
Ok(Some(_)) => {
debug!("Found template for \"{}\"", template);
return Ok(());
}
};
let version = client.get_version().await?;
if version.major < 7 {
return Err(anyhow!(
"Elasticsearch version {} not supported",
version.version
));
}
let template_string = {
if version.major >= 7 {
crate::resource::get_string("elasticsearch/template-es7x.json").ok_or_else(|| {
anyhow!(
"Failed to find template for Elasticsearch version {}",
version.version
)
})?
} else {
return Err(anyhow!(
"Elasticsearch version {} not supported",
version.version
));
}
};
info!("Installing template {}", &template);
let mut templatejs: serde_json::Value = serde_json::from_str(&template_string)?;
templatejs["index_patterns"] = format!("{}-*", template).into();
client
.put_template(template, serde_json::to_string(&templatejs)?)
.await?;
Ok(())
}
|
use anyhow::anyhow;
use anyhow::Result;
pub async fn install_template(client: &Client, template: &str) -> Result<()> {
|
random_line_split
|
template_installer.rs
|
// Copyright (C) 2020 Jason Ish
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
use crate::elastic::client::Client;
use crate::prelude::*;
use anyhow::anyhow;
use anyhow::Result;
pub async fn install_template(client: &Client, template: &str) -> Result<()> {
debug!("Checking for template \"{}\"", template);
match client.get_template(template).await {
Err(err) => {
warn!("Failed to check if template {} exists: {}", template, err);
}
Ok(None) => {
debug!("Did not find template for \"{}\", will install", template);
}
Ok(Some(_)) =>
|
};
let version = client.get_version().await?;
if version.major < 7 {
return Err(anyhow!(
"Elasticsearch version {} not supported",
version.version
));
}
let template_string = {
if version.major >= 7 {
crate::resource::get_string("elasticsearch/template-es7x.json").ok_or_else(|| {
anyhow!(
"Failed to find template for Elasticsearch version {}",
version.version
)
})?
} else {
return Err(anyhow!(
"Elasticsearch version {} not supported",
version.version
));
}
};
info!("Installing template {}", &template);
let mut templatejs: serde_json::Value = serde_json::from_str(&template_string)?;
templatejs["index_patterns"] = format!("{}-*", template).into();
client
.put_template(template, serde_json::to_string(&templatejs)?)
.await?;
Ok(())
}
|
{
debug!("Found template for \"{}\"", template);
return Ok(());
}
|
conditional_block
|
index.d.ts
|
// Type definitions for tough-cookie 2.3
// Project: https://github.com/salesforce/tough-cookie
// Definitions by: Leonard Thieu <https://github.com/leonard-thieu>
// LiJinyao <https://github.com/LiJinyao>
// Michael Wei <https://github.com/no2chem>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
// TypeScript Version: 2.2
/**
* Parse a cookie date string into a Date.
* Parses according to RFC6265 Section 5.1.1, not Date.parse().
*/
export function parseDate(string: string): Date;
/**
* Format a Date into a RFC1123 string (the RFC6265-recommended format).
*/
export function formatDate(date: Date): string;
/**
* Transforms a domain-name into a canonical domain-name.
* The canonical domain-name is a trimmed, lowercased, stripped-of-leading-dot
* and optionally punycode-encoded domain-name (Section 5.1.2 of RFC6265).
* For the most part, this function is idempotent (can be run again on its output without ill effects).
*/
export function canonicalDomain(str: string): string;
/**
* Answers "does this real domain match the domain in a cookie?".
* The str is the "current" domain-name and the domStr is the "cookie" domain-name.
* Matches according to RFC6265 Section 5.1.3, but it helps to think of it as a "suffix match".
*
* The canonicalize parameter will run the other two paramters through canonicalDomain or not.
*/
export function domainMatch(str: string, domStr: string, canonicalize?: boolean): boolean;
/**
* Given a current request/response path, gives the Path apropriate for storing in a cookie.
* This is basically the "directory" of a "file" in the path, but is specified by Section 5.1.4 of the RFC.
*
* The path parameter MUST be only the pathname part of a URI (i.e. excludes the hostname, query, fragment, etc.).
* This is the .pathname property of node's uri.parse() output.
*/
export function defaultPath(path: string): string;
/**
* Answers "does the request-path path-match a given cookie-path?" as per RFC6265 Section 5.1.4.
* Returns a boolean.
*
* This is essentially a prefix-match where cookiePath is a prefix of reqPath.
*/
export function pathMatch(reqPath: string, cookiePath: string): boolean;
/**
* alias for Cookie.fromJSON(string)
*/
export function fromJSON(string: string): Cookie;
export function getPublicSuffix(hostname: string): string | null;
export function cookieCompare(a: Cookie, b: Cookie): number;
export function permuteDomain(domain: string): string[];
export function permutePath(path: string): string[];
// region Cookie
export class
|
{
static parse(cookieString: string, options?: Cookie.ParseOptions): Cookie | undefined;
static fromJSON(strOrObj: string | object): Cookie | null;
constructor(properties?: Cookie.Properties);
// TODO: Some of the following properties might actually be nullable.
key: string;
value: string;
expires: Date;
maxAge: number | 'Infinity' | '-Infinity';
domain: string;
path: string;
secure: boolean;
httpOnly: boolean;
extensions: string[];
creation: Date;
creationIndex: number;
hostOnly: boolean | null;
pathIsDefault: boolean | null;
lastAccessed: Date | null;
toString(): string;
cookieString(): string;
setExpires(String: string): void;
setMaxAge(number: number): void;
expiryTime(now?: number): number | typeof Infinity;
expiryDate(now?: number): Date;
TTL(now?: Date): number | typeof Infinity;
canonicalizedDomain(): string;
cdomain(): string;
toJSON(): { [key: string]: any; };
clone(): Cookie;
validate(): boolean | string;
}
export namespace Cookie {
interface ParseOptions {
loose?: boolean;
}
interface Properties {
key?: string;
value?: string;
expires?: Date;
maxAge?: number | 'Infinity' | '-Infinity';
domain?: string;
path?: string;
secure?: boolean;
httpOnly?: boolean;
extensions?: string[];
creation?: Date;
creationIndex?: number;
hostOnly?: boolean;
pathIsDefault?: boolean;
lastAccessed?: Date;
}
interface Serialized {
[key: string]: any;
}
}
// endregion
// region CookieJar
export class CookieJar {
static deserialize(serialized: CookieJar.Serialized | string, store: Store, cb: (err: Error | null, object: CookieJar) => void): void;
static deserialize(serialized: CookieJar.Serialized | string, cb: (err: Error | null, object: CookieJar) => void): void;
static deserializeSync(serialized: CookieJar.Serialized | string, store?: Store): CookieJar;
static fromJSON(string: string): CookieJar;
constructor(store?: Store, options?: CookieJar.Options);
setCookie(cookieOrString: Cookie | string, currentUrl: string, options: CookieJar.SetCookieOptions, cb: (err: Error | null, cookie: Cookie) => void): void;
setCookie(cookieOrString: Cookie | string, currentUrl: string, cb: (err: Error, cookie: Cookie) => void): void;
setCookieSync(cookieOrString: Cookie | string, currentUrl: string, options?: CookieJar.SetCookieOptions): void;
getCookies(currentUrl: string, options: CookieJar.GetCookiesOptions, cb: (err: Error | null, cookies: Cookie[]) => void): void;
getCookies(currentUrl: string, cb: (err: Error | null, cookies: Cookie[]) => void): void;
getCookiesSync(currentUrl: string, options?: CookieJar.GetCookiesOptions): Cookie[];
getCookieString(currentUrl: string, options: CookieJar.GetCookiesOptions, cb: (err: Error | null, cookies: string) => void): void;
getCookieString(currentUrl: string, cb: (err: Error | null, cookies: string) => void): void;
getCookieStringSync(currentUrl: string, options?: CookieJar.GetCookiesOptions): string;
getSetCookieStrings(currentUrl: string, options: CookieJar.GetCookiesOptions, cb: (err: Error | null, cookies: string) => void): void;
getSetCookieStrings(currentUrl: string, cb: (err: Error | null, cookies: string) => void): void;
getSetCookieStringsSync(currentUrl: string, options?: CookieJar.GetCookiesOptions): string;
serialize(cb: (err: Error | null, serializedObject: CookieJar.Serialized) => void): void;
serializeSync(): CookieJar.Serialized;
toJSON(): CookieJar.Serialized;
clone(store: Store, cb: (err: Error | null, newJar: CookieJar) => void): void;
clone(cb: (err: Error | null, newJar: CookieJar) => void): void;
cloneSync(store: Store): CookieJar;
}
export namespace CookieJar {
interface Options {
rejectPublicSuffixes?: boolean;
looseMode?: boolean;
}
interface SetCookieOptions {
http?: boolean;
secure?: boolean;
now?: Date;
ignoreError?: boolean;
}
interface GetCookiesOptions {
http?: boolean;
secure?: boolean;
date?: Date;
expire?: boolean;
allPoints?: boolean;
}
interface Serialized {
version: string;
storeType: string;
rejectPublicSuffixes: boolean;
cookies: Cookie.Serialized[];
}
}
// endregion
// region Store
export abstract class Store {
findCookie(domain: string, path: string, key: string, cb: (err: Error | null, cookie: Cookie | null) => void): void;
findCookies(domain: string, path: string, cb: (err: Error | null, cookie: Cookie[]) => void): void;
putCookie(cookie: Cookie, cb: (err: Error | null) => void): void;
updateCookie(oldCookie: Cookie, newCookie: Cookie, cb: (err: Error | null) => void): void;
removeCookie(domain: string, path: string, key: string, cb: (err: Error | null) => void): void;
removeCookies(domain: string, path: string, cb: (err: Error | null) => void): void;
getAllCookies(cb: (err: Error | null, cookie: Cookie[]) => void): void;
}
export class MemoryCookieStore extends Store { }
// endregion
|
Cookie
|
identifier_name
|
index.d.ts
|
// Type definitions for tough-cookie 2.3
// Project: https://github.com/salesforce/tough-cookie
// Definitions by: Leonard Thieu <https://github.com/leonard-thieu>
// LiJinyao <https://github.com/LiJinyao>
// Michael Wei <https://github.com/no2chem>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
// TypeScript Version: 2.2
/**
* Parse a cookie date string into a Date.
* Parses according to RFC6265 Section 5.1.1, not Date.parse().
*/
export function parseDate(string: string): Date;
/**
* Format a Date into a RFC1123 string (the RFC6265-recommended format).
*/
export function formatDate(date: Date): string;
/**
* Transforms a domain-name into a canonical domain-name.
* The canonical domain-name is a trimmed, lowercased, stripped-of-leading-dot
* and optionally punycode-encoded domain-name (Section 5.1.2 of RFC6265).
* For the most part, this function is idempotent (can be run again on its output without ill effects).
*/
export function canonicalDomain(str: string): string;
/**
* Answers "does this real domain match the domain in a cookie?".
* The str is the "current" domain-name and the domStr is the "cookie" domain-name.
* Matches according to RFC6265 Section 5.1.3, but it helps to think of it as a "suffix match".
*
* The canonicalize parameter will run the other two paramters through canonicalDomain or not.
*/
export function domainMatch(str: string, domStr: string, canonicalize?: boolean): boolean;
/**
* Given a current request/response path, gives the Path apropriate for storing in a cookie.
* This is basically the "directory" of a "file" in the path, but is specified by Section 5.1.4 of the RFC.
*
* The path parameter MUST be only the pathname part of a URI (i.e. excludes the hostname, query, fragment, etc.).
* This is the .pathname property of node's uri.parse() output.
*/
export function defaultPath(path: string): string;
/**
* Answers "does the request-path path-match a given cookie-path?" as per RFC6265 Section 5.1.4.
* Returns a boolean.
*
* This is essentially a prefix-match where cookiePath is a prefix of reqPath.
*/
export function pathMatch(reqPath: string, cookiePath: string): boolean;
/**
* alias for Cookie.fromJSON(string)
|
export function cookieCompare(a: Cookie, b: Cookie): number;
export function permuteDomain(domain: string): string[];
export function permutePath(path: string): string[];
// region Cookie
export class Cookie {
static parse(cookieString: string, options?: Cookie.ParseOptions): Cookie | undefined;
static fromJSON(strOrObj: string | object): Cookie | null;
constructor(properties?: Cookie.Properties);
// TODO: Some of the following properties might actually be nullable.
key: string;
value: string;
expires: Date;
maxAge: number | 'Infinity' | '-Infinity';
domain: string;
path: string;
secure: boolean;
httpOnly: boolean;
extensions: string[];
creation: Date;
creationIndex: number;
hostOnly: boolean | null;
pathIsDefault: boolean | null;
lastAccessed: Date | null;
toString(): string;
cookieString(): string;
setExpires(String: string): void;
setMaxAge(number: number): void;
expiryTime(now?: number): number | typeof Infinity;
expiryDate(now?: number): Date;
TTL(now?: Date): number | typeof Infinity;
canonicalizedDomain(): string;
cdomain(): string;
toJSON(): { [key: string]: any; };
clone(): Cookie;
validate(): boolean | string;
}
export namespace Cookie {
interface ParseOptions {
loose?: boolean;
}
interface Properties {
key?: string;
value?: string;
expires?: Date;
maxAge?: number | 'Infinity' | '-Infinity';
domain?: string;
path?: string;
secure?: boolean;
httpOnly?: boolean;
extensions?: string[];
creation?: Date;
creationIndex?: number;
hostOnly?: boolean;
pathIsDefault?: boolean;
lastAccessed?: Date;
}
interface Serialized {
[key: string]: any;
}
}
// endregion
// region CookieJar
export class CookieJar {
static deserialize(serialized: CookieJar.Serialized | string, store: Store, cb: (err: Error | null, object: CookieJar) => void): void;
static deserialize(serialized: CookieJar.Serialized | string, cb: (err: Error | null, object: CookieJar) => void): void;
static deserializeSync(serialized: CookieJar.Serialized | string, store?: Store): CookieJar;
static fromJSON(string: string): CookieJar;
constructor(store?: Store, options?: CookieJar.Options);
setCookie(cookieOrString: Cookie | string, currentUrl: string, options: CookieJar.SetCookieOptions, cb: (err: Error | null, cookie: Cookie) => void): void;
setCookie(cookieOrString: Cookie | string, currentUrl: string, cb: (err: Error, cookie: Cookie) => void): void;
setCookieSync(cookieOrString: Cookie | string, currentUrl: string, options?: CookieJar.SetCookieOptions): void;
getCookies(currentUrl: string, options: CookieJar.GetCookiesOptions, cb: (err: Error | null, cookies: Cookie[]) => void): void;
getCookies(currentUrl: string, cb: (err: Error | null, cookies: Cookie[]) => void): void;
getCookiesSync(currentUrl: string, options?: CookieJar.GetCookiesOptions): Cookie[];
getCookieString(currentUrl: string, options: CookieJar.GetCookiesOptions, cb: (err: Error | null, cookies: string) => void): void;
getCookieString(currentUrl: string, cb: (err: Error | null, cookies: string) => void): void;
getCookieStringSync(currentUrl: string, options?: CookieJar.GetCookiesOptions): string;
getSetCookieStrings(currentUrl: string, options: CookieJar.GetCookiesOptions, cb: (err: Error | null, cookies: string) => void): void;
getSetCookieStrings(currentUrl: string, cb: (err: Error | null, cookies: string) => void): void;
getSetCookieStringsSync(currentUrl: string, options?: CookieJar.GetCookiesOptions): string;
serialize(cb: (err: Error | null, serializedObject: CookieJar.Serialized) => void): void;
serializeSync(): CookieJar.Serialized;
toJSON(): CookieJar.Serialized;
clone(store: Store, cb: (err: Error | null, newJar: CookieJar) => void): void;
clone(cb: (err: Error | null, newJar: CookieJar) => void): void;
cloneSync(store: Store): CookieJar;
}
export namespace CookieJar {
interface Options {
rejectPublicSuffixes?: boolean;
looseMode?: boolean;
}
interface SetCookieOptions {
http?: boolean;
secure?: boolean;
now?: Date;
ignoreError?: boolean;
}
interface GetCookiesOptions {
http?: boolean;
secure?: boolean;
date?: Date;
expire?: boolean;
allPoints?: boolean;
}
interface Serialized {
version: string;
storeType: string;
rejectPublicSuffixes: boolean;
cookies: Cookie.Serialized[];
}
}
// endregion
// region Store
export abstract class Store {
findCookie(domain: string, path: string, key: string, cb: (err: Error | null, cookie: Cookie | null) => void): void;
findCookies(domain: string, path: string, cb: (err: Error | null, cookie: Cookie[]) => void): void;
putCookie(cookie: Cookie, cb: (err: Error | null) => void): void;
updateCookie(oldCookie: Cookie, newCookie: Cookie, cb: (err: Error | null) => void): void;
removeCookie(domain: string, path: string, key: string, cb: (err: Error | null) => void): void;
removeCookies(domain: string, path: string, cb: (err: Error | null) => void): void;
getAllCookies(cb: (err: Error | null, cookie: Cookie[]) => void): void;
}
export class MemoryCookieStore extends Store { }
// endregion
|
*/
export function fromJSON(string: string): Cookie;
export function getPublicSuffix(hostname: string): string | null;
|
random_line_split
|
panel.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import horizon
from horizon.dashboards.nova import dashboard
class
|
(horizon.Panel):
name = "Instances & Volumes"
slug = 'instances_and_volumes'
dashboard.Nova.register(InstancesAndVolumes)
|
InstancesAndVolumes
|
identifier_name
|
panel.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import horizon
from horizon.dashboards.nova import dashboard
class InstancesAndVolumes(horizon.Panel):
name = "Instances & Volumes"
slug = 'instances_and_volumes'
|
dashboard.Nova.register(InstancesAndVolumes)
|
random_line_split
|
|
panel.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import horizon
from horizon.dashboards.nova import dashboard
class InstancesAndVolumes(horizon.Panel):
|
dashboard.Nova.register(InstancesAndVolumes)
|
name = "Instances & Volumes"
slug = 'instances_and_volumes'
|
identifier_body
|
netbeans8.js
|
(function(root, factory) {
if (typeof define === 'function' && define.amd)
|
else if (typeof exports === 'object') {
module.exports = factory();
} else {
var lastName = root,
namespace = 'allColors.ideaColorThemes.netbeans8'.split('.');
for (var i = 0; i < namespace.length; i++) {
if (lastName[namespace[i]] === undefined) {
lastName = lastName[namespace[i]] = {};
}
}
root.allColors.ideaColorThemes.netbeans8 = factory();
}
}(this, function() {
return [
'#a1f2ac',
'#eeeeee',
'#ffffff',
'#cccccc',
'#a0a9f9',
'#ffc8c8',
'#b7b7b7',
'#ce7b00',
'#d25252',
'#868686',
'#808080',
'#ffded8',
'#ffc8bd',
'#d6d6d6',
'#cbcbcb',
'#c8f2c8',
'#baeeba',
'#bccff9',
'#f5f7f0',
'#ff0000',
'#e9e9e9',
'#f4e803',
'#eceba3',
'#ffffcc',
'#660e7a',
'#ccffcc',
'#ffcccc',
'#99ccff',
'#f49810',
'#d8d8d8',
'#ffdcdc',
'#f6ebbc',
'#ffff00'
];
}));
|
{
define([], factory);
}
|
conditional_block
|
netbeans8.js
|
(function(root, factory) {
if (typeof define === 'function' && define.amd) {
define([], factory);
} else if (typeof exports === 'object') {
module.exports = factory();
} else {
var lastName = root,
namespace = 'allColors.ideaColorThemes.netbeans8'.split('.');
for (var i = 0; i < namespace.length; i++) {
if (lastName[namespace[i]] === undefined) {
lastName = lastName[namespace[i]] = {};
}
}
root.allColors.ideaColorThemes.netbeans8 = factory();
}
}(this, function() {
|
'#a0a9f9',
'#ffc8c8',
'#b7b7b7',
'#ce7b00',
'#d25252',
'#868686',
'#808080',
'#ffded8',
'#ffc8bd',
'#d6d6d6',
'#cbcbcb',
'#c8f2c8',
'#baeeba',
'#bccff9',
'#f5f7f0',
'#ff0000',
'#e9e9e9',
'#f4e803',
'#eceba3',
'#ffffcc',
'#660e7a',
'#ccffcc',
'#ffcccc',
'#99ccff',
'#f49810',
'#d8d8d8',
'#ffdcdc',
'#f6ebbc',
'#ffff00'
];
}));
|
return [
'#a1f2ac',
'#eeeeee',
'#ffffff',
'#cccccc',
|
random_line_split
|
Users.js
|
/**
* Users collection.
* Initializes Users collection and provides methods
* for accessing the collection.
* */
users = "Users";
Users = new Mongo.Collection(users);
/**
* Schema for Users
*/
Users.attachSchema(new SimpleSchema({
userName:{
label: "Username",
type: String,
optional: false,
autoform:{
group: users,
placeholder: "Username"
}
},
firstName:{
|
group: users,
placeholder: "First Name"
}
},
lastName:{
label: "Last Name",
type: String,
optional: true,
autoform:{
group: users,
placeholder: "Last Name"
}
},
email:{
label: "Email",
type: String,
optional: false,
unique: true,
autoform:{
group: users,
placeholder: "Email"
}
},
roles: {
type: Object,
optional: true,
blackbox: true
}
}));
|
label: "First Name",
type: String,
optional: true,
autoform:{
|
random_line_split
|
filter.rs
|
use crate::fns::FnMut1;
use core::fmt;
use core::pin::Pin;
use futures_core::future::Future;
use futures_core::ready;
use futures_core::stream::{FusedStream, Stream};
use futures_core::task::{Context, Poll};
#[cfg(feature = "sink")]
use futures_sink::Sink;
use pin_project_lite::pin_project;
pin_project! {
/// Stream for the [`filter`](super::StreamExt::filter) method.
#[must_use = "streams do nothing unless polled"]
pub struct Filter<St, Fut, F>
where St: Stream,
{
#[pin]
stream: St,
f: F,
#[pin]
pending_fut: Option<Fut>,
pending_item: Option<St::Item>,
}
}
impl<St, Fut, F> fmt::Debug for Filter<St, Fut, F>
where
St: Stream + fmt::Debug,
St::Item: fmt::Debug,
Fut: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Filter")
.field("stream", &self.stream)
.field("pending_fut", &self.pending_fut)
.field("pending_item", &self.pending_item)
.finish()
}
}
#[allow(single_use_lifetimes)] // https://github.com/rust-lang/rust/issues/55058
impl<St, Fut, F> Filter<St, Fut, F>
where
St: Stream,
F: for<'a> FnMut1<&'a St::Item, Output = Fut>,
Fut: Future<Output = bool>,
{
pub(super) fn new(stream: St, f: F) -> Self {
Self { stream, f, pending_fut: None, pending_item: None }
}
delegate_access_inner!(stream, St, ());
}
impl<St, Fut, F> FusedStream for Filter<St, Fut, F>
where
St: Stream + FusedStream,
F: FnMut(&St::Item) -> Fut,
Fut: Future<Output = bool>,
{
fn is_terminated(&self) -> bool {
self.pending_fut.is_none() && self.stream.is_terminated()
}
}
#[allow(single_use_lifetimes)] // https://github.com/rust-lang/rust/issues/55058
impl<St, Fut, F> Stream for Filter<St, Fut, F>
where
St: Stream,
F: for<'a> FnMut1<&'a St::Item, Output = Fut>,
Fut: Future<Output = bool>,
{
type Item = St::Item;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<St::Item>> {
let mut this = self.project();
Poll::Ready(loop {
if let Some(fut) = this.pending_fut.as_mut().as_pin_mut() {
let res = ready!(fut.poll(cx));
this.pending_fut.set(None);
if res {
break this.pending_item.take();
}
*this.pending_item = None;
} else if let Some(item) = ready!(this.stream.as_mut().poll_next(cx)) {
this.pending_fut.set(Some(this.f.call_mut(&item)));
*this.pending_item = Some(item);
} else {
break None;
}
})
}
fn
|
(&self) -> (usize, Option<usize>) {
let pending_len = if self.pending_item.is_some() { 1 } else { 0 };
let (_, upper) = self.stream.size_hint();
let upper = match upper {
Some(x) => x.checked_add(pending_len),
None => None,
};
(0, upper) // can't know a lower bound, due to the predicate
}
}
// Forwarding impl of Sink from the underlying stream
#[cfg(feature = "sink")]
impl<S, Fut, F, Item> Sink<Item> for Filter<S, Fut, F>
where
S: Stream + Sink<Item>,
F: FnMut(&S::Item) -> Fut,
Fut: Future<Output = bool>,
{
type Error = S::Error;
delegate_sink!(stream, Item);
}
|
size_hint
|
identifier_name
|
filter.rs
|
use crate::fns::FnMut1;
use core::fmt;
use core::pin::Pin;
use futures_core::future::Future;
use futures_core::ready;
use futures_core::stream::{FusedStream, Stream};
use futures_core::task::{Context, Poll};
#[cfg(feature = "sink")]
use futures_sink::Sink;
use pin_project_lite::pin_project;
pin_project! {
/// Stream for the [`filter`](super::StreamExt::filter) method.
#[must_use = "streams do nothing unless polled"]
pub struct Filter<St, Fut, F>
where St: Stream,
{
#[pin]
stream: St,
f: F,
#[pin]
pending_fut: Option<Fut>,
pending_item: Option<St::Item>,
}
}
impl<St, Fut, F> fmt::Debug for Filter<St, Fut, F>
where
St: Stream + fmt::Debug,
St::Item: fmt::Debug,
Fut: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Filter")
.field("stream", &self.stream)
.field("pending_fut", &self.pending_fut)
.field("pending_item", &self.pending_item)
.finish()
}
}
#[allow(single_use_lifetimes)] // https://github.com/rust-lang/rust/issues/55058
impl<St, Fut, F> Filter<St, Fut, F>
where
St: Stream,
F: for<'a> FnMut1<&'a St::Item, Output = Fut>,
Fut: Future<Output = bool>,
{
pub(super) fn new(stream: St, f: F) -> Self {
Self { stream, f, pending_fut: None, pending_item: None }
}
delegate_access_inner!(stream, St, ());
}
impl<St, Fut, F> FusedStream for Filter<St, Fut, F>
where
St: Stream + FusedStream,
F: FnMut(&St::Item) -> Fut,
Fut: Future<Output = bool>,
{
fn is_terminated(&self) -> bool {
self.pending_fut.is_none() && self.stream.is_terminated()
}
}
#[allow(single_use_lifetimes)] // https://github.com/rust-lang/rust/issues/55058
impl<St, Fut, F> Stream for Filter<St, Fut, F>
where
St: Stream,
F: for<'a> FnMut1<&'a St::Item, Output = Fut>,
Fut: Future<Output = bool>,
{
type Item = St::Item;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<St::Item>> {
let mut this = self.project();
Poll::Ready(loop {
if let Some(fut) = this.pending_fut.as_mut().as_pin_mut() {
let res = ready!(fut.poll(cx));
this.pending_fut.set(None);
if res {
break this.pending_item.take();
}
*this.pending_item = None;
} else if let Some(item) = ready!(this.stream.as_mut().poll_next(cx)) {
this.pending_fut.set(Some(this.f.call_mut(&item)));
*this.pending_item = Some(item);
} else {
break None;
}
})
}
fn size_hint(&self) -> (usize, Option<usize>) {
let pending_len = if self.pending_item.is_some() { 1 } else
|
;
let (_, upper) = self.stream.size_hint();
let upper = match upper {
Some(x) => x.checked_add(pending_len),
None => None,
};
(0, upper) // can't know a lower bound, due to the predicate
}
}
// Forwarding impl of Sink from the underlying stream
#[cfg(feature = "sink")]
impl<S, Fut, F, Item> Sink<Item> for Filter<S, Fut, F>
where
S: Stream + Sink<Item>,
F: FnMut(&S::Item) -> Fut,
Fut: Future<Output = bool>,
{
type Error = S::Error;
delegate_sink!(stream, Item);
}
|
{ 0 }
|
conditional_block
|
filter.rs
|
use crate::fns::FnMut1;
use core::fmt;
use core::pin::Pin;
use futures_core::future::Future;
use futures_core::ready;
use futures_core::stream::{FusedStream, Stream};
use futures_core::task::{Context, Poll};
#[cfg(feature = "sink")]
use futures_sink::Sink;
use pin_project_lite::pin_project;
pin_project! {
/// Stream for the [`filter`](super::StreamExt::filter) method.
#[must_use = "streams do nothing unless polled"]
pub struct Filter<St, Fut, F>
where St: Stream,
{
#[pin]
stream: St,
f: F,
#[pin]
pending_fut: Option<Fut>,
pending_item: Option<St::Item>,
}
}
impl<St, Fut, F> fmt::Debug for Filter<St, Fut, F>
where
St: Stream + fmt::Debug,
St::Item: fmt::Debug,
Fut: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Filter")
.field("stream", &self.stream)
.field("pending_fut", &self.pending_fut)
.field("pending_item", &self.pending_item)
.finish()
}
}
#[allow(single_use_lifetimes)] // https://github.com/rust-lang/rust/issues/55058
impl<St, Fut, F> Filter<St, Fut, F>
where
St: Stream,
F: for<'a> FnMut1<&'a St::Item, Output = Fut>,
Fut: Future<Output = bool>,
{
pub(super) fn new(stream: St, f: F) -> Self {
Self { stream, f, pending_fut: None, pending_item: None }
}
delegate_access_inner!(stream, St, ());
}
impl<St, Fut, F> FusedStream for Filter<St, Fut, F>
where
St: Stream + FusedStream,
F: FnMut(&St::Item) -> Fut,
Fut: Future<Output = bool>,
{
fn is_terminated(&self) -> bool {
self.pending_fut.is_none() && self.stream.is_terminated()
}
}
#[allow(single_use_lifetimes)] // https://github.com/rust-lang/rust/issues/55058
impl<St, Fut, F> Stream for Filter<St, Fut, F>
where
St: Stream,
|
{
type Item = St::Item;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<St::Item>> {
let mut this = self.project();
Poll::Ready(loop {
if let Some(fut) = this.pending_fut.as_mut().as_pin_mut() {
let res = ready!(fut.poll(cx));
this.pending_fut.set(None);
if res {
break this.pending_item.take();
}
*this.pending_item = None;
} else if let Some(item) = ready!(this.stream.as_mut().poll_next(cx)) {
this.pending_fut.set(Some(this.f.call_mut(&item)));
*this.pending_item = Some(item);
} else {
break None;
}
})
}
fn size_hint(&self) -> (usize, Option<usize>) {
let pending_len = if self.pending_item.is_some() { 1 } else { 0 };
let (_, upper) = self.stream.size_hint();
let upper = match upper {
Some(x) => x.checked_add(pending_len),
None => None,
};
(0, upper) // can't know a lower bound, due to the predicate
}
}
// Forwarding impl of Sink from the underlying stream
#[cfg(feature = "sink")]
impl<S, Fut, F, Item> Sink<Item> for Filter<S, Fut, F>
where
S: Stream + Sink<Item>,
F: FnMut(&S::Item) -> Fut,
Fut: Future<Output = bool>,
{
type Error = S::Error;
delegate_sink!(stream, Item);
}
|
F: for<'a> FnMut1<&'a St::Item, Output = Fut>,
Fut: Future<Output = bool>,
|
random_line_split
|
filter.rs
|
use crate::fns::FnMut1;
use core::fmt;
use core::pin::Pin;
use futures_core::future::Future;
use futures_core::ready;
use futures_core::stream::{FusedStream, Stream};
use futures_core::task::{Context, Poll};
#[cfg(feature = "sink")]
use futures_sink::Sink;
use pin_project_lite::pin_project;
pin_project! {
/// Stream for the [`filter`](super::StreamExt::filter) method.
#[must_use = "streams do nothing unless polled"]
pub struct Filter<St, Fut, F>
where St: Stream,
{
#[pin]
stream: St,
f: F,
#[pin]
pending_fut: Option<Fut>,
pending_item: Option<St::Item>,
}
}
impl<St, Fut, F> fmt::Debug for Filter<St, Fut, F>
where
St: Stream + fmt::Debug,
St::Item: fmt::Debug,
Fut: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Filter")
.field("stream", &self.stream)
.field("pending_fut", &self.pending_fut)
.field("pending_item", &self.pending_item)
.finish()
}
}
#[allow(single_use_lifetimes)] // https://github.com/rust-lang/rust/issues/55058
impl<St, Fut, F> Filter<St, Fut, F>
where
St: Stream,
F: for<'a> FnMut1<&'a St::Item, Output = Fut>,
Fut: Future<Output = bool>,
{
pub(super) fn new(stream: St, f: F) -> Self {
Self { stream, f, pending_fut: None, pending_item: None }
}
delegate_access_inner!(stream, St, ());
}
impl<St, Fut, F> FusedStream for Filter<St, Fut, F>
where
St: Stream + FusedStream,
F: FnMut(&St::Item) -> Fut,
Fut: Future<Output = bool>,
{
fn is_terminated(&self) -> bool {
self.pending_fut.is_none() && self.stream.is_terminated()
}
}
#[allow(single_use_lifetimes)] // https://github.com/rust-lang/rust/issues/55058
impl<St, Fut, F> Stream for Filter<St, Fut, F>
where
St: Stream,
F: for<'a> FnMut1<&'a St::Item, Output = Fut>,
Fut: Future<Output = bool>,
{
type Item = St::Item;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<St::Item>> {
let mut this = self.project();
Poll::Ready(loop {
if let Some(fut) = this.pending_fut.as_mut().as_pin_mut() {
let res = ready!(fut.poll(cx));
this.pending_fut.set(None);
if res {
break this.pending_item.take();
}
*this.pending_item = None;
} else if let Some(item) = ready!(this.stream.as_mut().poll_next(cx)) {
this.pending_fut.set(Some(this.f.call_mut(&item)));
*this.pending_item = Some(item);
} else {
break None;
}
})
}
fn size_hint(&self) -> (usize, Option<usize>)
|
}
// Forwarding impl of Sink from the underlying stream
#[cfg(feature = "sink")]
impl<S, Fut, F, Item> Sink<Item> for Filter<S, Fut, F>
where
S: Stream + Sink<Item>,
F: FnMut(&S::Item) -> Fut,
Fut: Future<Output = bool>,
{
type Error = S::Error;
delegate_sink!(stream, Item);
}
|
{
let pending_len = if self.pending_item.is_some() { 1 } else { 0 };
let (_, upper) = self.stream.size_hint();
let upper = match upper {
Some(x) => x.checked_add(pending_len),
None => None,
};
(0, upper) // can't know a lower bound, due to the predicate
}
|
identifier_body
|
package.py
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
class Vasp(MakefilePackage):
"""
The Vienna Ab initio Simulation Package (VASP)
is a computer program for atomic scale materials modelling,
e.g. electronic structure calculations
and quantum-mechanical molecular dynamics, from first principles.
"""
homepage = "https://vasp.at"
url = "file://{0}/vasp.5.4.4.pl2.tgz".format(os.getcwd())
manual_download = True
version('6.2.0', sha256='49e7ba351bd634bc5f5f67a8ef1e38e64e772857a1c02f602828898a84197e25')
version('6.1.1', sha256='e37a4dfad09d3ad0410833bcd55af6b599179a085299026992c2d8e319bf6927')
version('5.4.4.pl2', sha256='98f75fd75399a23d76d060a6155f4416b340a1704f256a00146f89024035bc8e')
version('5.4.4', sha256='5bd2449462386f01e575f9adf629c08cb03a13142806ffb6a71309ca4431cfb3')
resource(name='vaspsol',
git='https://github.com/henniggroup/VASPsol.git',
tag='V1.0',
when='+vaspsol')
variant('openmp', default=False,
description='Enable openmp build')
variant('scalapack', default=False,
description='Enables build with SCALAPACK')
variant('cuda', default=False,
description='Enables running on Nvidia GPUs')
variant('vaspsol', default=False,
description='Enable VASPsol implicit solvation model\n'
'https://github.com/henniggroup/VASPsol')
depends_on('rsync', type='build')
depends_on('blas')
depends_on('lapack')
depends_on('fftw-api')
depends_on('mpi', type=('build', 'link', 'run'))
depends_on('scalapack', when='+scalapack')
depends_on('cuda', when='+cuda')
depends_on('qd', when='%nvhpc')
conflicts('%gcc@:8', msg='GFortran before 9.x does not support all features needed to build VASP')
conflicts('+vaspsol', when='+cuda', msg='+vaspsol only available for CPU')
conflicts('+openmp', when='@:6.1.1', msg='openmp support started from 6.2')
parallel = False
def edit(self, spec, prefix):
if '%gcc' in spec:
if '+openmp' in spec:
make_include = join_path('arch', 'makefile.include.linux_gnu_omp')
else:
make_include = join_path('arch', 'makefile.include.linux_gnu')
elif '%nvhpc' in spec:
make_include = join_path('arch', 'makefile.include.linux_pgi')
filter_file('-pgc++libs', '-c++libs', make_include, string=True)
filter_file('pgcc', spack_cc, make_include)
filter_file('pgc++', spack_cxx, make_include, string=True)
filter_file('pgfortran', spack_fc, make_include)
filter_file('/opt/pgi/qd-2.3.17/install/include',
spec['qd'].prefix.include, make_include)
filter_file('/opt/pgi/qd-2.3.17/install/lib',
spec['qd'].prefix.lib, make_include)
elif '%aocc' in spec:
if '+openmp' in spec:
copy(
join_path('arch', 'makefile.include.linux_gnu_omp'),
join_path('arch', 'makefile.include.linux_aocc_omp')
)
make_include = join_path('arch', 'makefile.include.linux_aocc_omp')
else:
copy(
join_path('arch', 'makefile.include.linux_gnu'),
join_path('arch', 'makefile.include.linux_aocc')
)
make_include = join_path('arch', 'makefile.include.linux_aocc')
filter_file(
'gcc', '{0} {1}'.format(spack_cc, '-Mfree'),
make_include, string=True
)
filter_file('g++', spack_cxx, make_include, string=True)
filter_file('^CFLAGS_LIB[ ]{0,}=.*$',
'CFLAGS_LIB = -O3', make_include)
filter_file('^FFLAGS_LIB[ ]{0,}=.*$',
'FFLAGS_LIB = -O2', make_include)
filter_file('^OFLAG[ ]{0,}=.*$',
'OFLAG = -O3', make_include)
filter_file('^FC[ ]{0,}=.*$',
'FC = {0}'.format(spec['mpi'].mpifc),
make_include, string=True)
filter_file('^FCL[ ]{0,}=.*$',
'FCL = {0}'.format(spec['mpi'].mpifc),
make_include, string=True)
else:
if '+openmp' in spec:
make_include = join_path('arch',
'makefile.include.linux_{0}_omp'.
format(spec.compiler.name))
else:
make_include = join_path('arch',
'makefile.include.linux_' +
spec.compiler.name)
os.rename(make_include, 'makefile.include')
# This bunch of 'filter_file()' is to make these options settable
# as environment variables
filter_file('^CPP_OPTIONS[ ]{0,}=[ ]{0,}',
'CPP_OPTIONS ?= ',
'makefile.include')
filter_file('^FFLAGS[ ]{0,}=[ ]{0,}',
'FFLAGS ?= ',
'makefile.include')
filter_file('^LIBDIR[ ]{0,}=.*$', '', 'makefile.include')
filter_file('^BLAS[ ]{0,}=.*$', 'BLAS ?=', 'makefile.include')
filter_file('^LAPACK[ ]{0,}=.*$', 'LAPACK ?=', 'makefile.include')
filter_file('^FFTW[ ]{0,}?=.*$', 'FFTW ?=', 'makefile.include')
filter_file('^MPI_INC[ ]{0,}=.*$', 'MPI_INC ?=', 'makefile.include')
filter_file('-DscaLAPACK.*$\n', '', 'makefile.include')
filter_file('^SCALAPACK[ ]{0,}=.*$', 'SCALAPACK ?=', 'makefile.include')
if '+cuda' in spec:
filter_file('^OBJECTS_GPU[ ]{0,}=.*$',
'OBJECTS_GPU ?=',
'makefile.include')
filter_file('^CPP_GPU[ ]{0,}=.*$',
'CPP_GPU ?=',
'makefile.include')
filter_file('^CFLAGS[ ]{0,}=.*$',
'CFLAGS ?=',
'makefile.include')
if '+vaspsol' in spec:
copy('VASPsol/src/solvation.F', 'src/')
def setup_build_environment(self, spack_env):
|
def build(self, spec, prefix):
if '+cuda' in self.spec:
make('gpu', 'gpu_ncl')
else:
make('std', 'gam', 'ncl')
def install(self, spec, prefix):
install_tree('bin/', prefix.bin)
|
spec = self.spec
cpp_options = ['-DMPI -DMPI_BLOCK=8000',
'-Duse_collective', '-DCACHE_SIZE=4000',
'-Davoidalloc', '-Duse_bse_te',
'-Dtbdyn', '-Duse_shmem']
if '%nvhpc' in self.spec:
cpp_options.extend(['-DHOST=\\"LinuxPGI\\"', '-DPGI16',
'-Dqd_emulate'])
elif '%aocc' in self.spec:
cpp_options.extend(['-DHOST=\\"LinuxGNU\\"',
'-Dfock_dblbuf'])
if '+openmp' in self.spec:
cpp_options.extend(['-D_OPENMP'])
else:
cpp_options.append('-DHOST=\\"LinuxGNU\\"')
if self.spec.satisfies('@6:'):
cpp_options.append('-Dvasp6')
cflags = ['-fPIC', '-DADD_']
fflags = []
if '%gcc' in spec or '%intel' in spec:
fflags.append('-w')
elif '%nvhpc' in spec:
fflags.extend(['-Mnoupcase', '-Mbackslash', '-Mlarge_arrays'])
elif '%aocc' in spec:
fflags.extend(['-fno-fortran-main', '-Mbackslash', '-ffast-math'])
spack_env.set('BLAS', spec['blas'].libs.ld_flags)
spack_env.set('LAPACK', spec['lapack'].libs.ld_flags)
spack_env.set('FFTW', spec['fftw-api'].prefix)
spack_env.set('MPI_INC', spec['mpi'].prefix.include)
if '%nvhpc' in spec:
spack_env.set('QD', spec['qd'].prefix)
if '+scalapack' in spec:
cpp_options.append('-DscaLAPACK')
spack_env.set('SCALAPACK', spec['scalapack'].libs.ld_flags)
if '+cuda' in spec:
cpp_gpu = ['-DCUDA_GPU', '-DRPROMU_CPROJ_OVERLAP',
'-DCUFFT_MIN=28', '-DUSE_PINNED_MEMORY']
objects_gpu = ['fftmpiw.o', 'fftmpi_map.o', 'fft3dlib.o',
'fftw3d_gpu.o', 'fftmpiw_gpu.o']
cflags.extend(['-DGPUSHMEM=300', '-DHAVE_CUBLAS'])
spack_env.set('CUDA_ROOT', spec['cuda'].prefix)
spack_env.set('CPP_GPU', ' '.join(cpp_gpu))
spack_env.set('OBJECTS_GPU', ' '.join(objects_gpu))
if '+vaspsol' in spec:
cpp_options.append('-Dsol_compat')
if spec.satisfies('%gcc@10:'):
fflags.append('-fallow-argument-mismatch')
# Finally
spack_env.set('CPP_OPTIONS', ' '.join(cpp_options))
spack_env.set('CFLAGS', ' '.join(cflags))
spack_env.set('FFLAGS', ' '.join(fflags))
|
identifier_body
|
package.py
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
class Vasp(MakefilePackage):
"""
The Vienna Ab initio Simulation Package (VASP)
is a computer program for atomic scale materials modelling,
e.g. electronic structure calculations
and quantum-mechanical molecular dynamics, from first principles.
"""
homepage = "https://vasp.at"
url = "file://{0}/vasp.5.4.4.pl2.tgz".format(os.getcwd())
manual_download = True
version('6.2.0', sha256='49e7ba351bd634bc5f5f67a8ef1e38e64e772857a1c02f602828898a84197e25')
version('6.1.1', sha256='e37a4dfad09d3ad0410833bcd55af6b599179a085299026992c2d8e319bf6927')
version('5.4.4.pl2', sha256='98f75fd75399a23d76d060a6155f4416b340a1704f256a00146f89024035bc8e')
version('5.4.4', sha256='5bd2449462386f01e575f9adf629c08cb03a13142806ffb6a71309ca4431cfb3')
resource(name='vaspsol',
git='https://github.com/henniggroup/VASPsol.git',
tag='V1.0',
when='+vaspsol')
variant('openmp', default=False,
description='Enable openmp build')
variant('scalapack', default=False,
description='Enables build with SCALAPACK')
variant('cuda', default=False,
description='Enables running on Nvidia GPUs')
variant('vaspsol', default=False,
description='Enable VASPsol implicit solvation model\n'
'https://github.com/henniggroup/VASPsol')
depends_on('rsync', type='build')
depends_on('blas')
depends_on('lapack')
depends_on('fftw-api')
depends_on('mpi', type=('build', 'link', 'run'))
depends_on('scalapack', when='+scalapack')
depends_on('cuda', when='+cuda')
depends_on('qd', when='%nvhpc')
conflicts('%gcc@:8', msg='GFortran before 9.x does not support all features needed to build VASP')
conflicts('+vaspsol', when='+cuda', msg='+vaspsol only available for CPU')
conflicts('+openmp', when='@:6.1.1', msg='openmp support started from 6.2')
parallel = False
def edit(self, spec, prefix):
if '%gcc' in spec:
if '+openmp' in spec:
make_include = join_path('arch', 'makefile.include.linux_gnu_omp')
else:
make_include = join_path('arch', 'makefile.include.linux_gnu')
elif '%nvhpc' in spec:
make_include = join_path('arch', 'makefile.include.linux_pgi')
filter_file('-pgc++libs', '-c++libs', make_include, string=True)
filter_file('pgcc', spack_cc, make_include)
filter_file('pgc++', spack_cxx, make_include, string=True)
filter_file('pgfortran', spack_fc, make_include)
filter_file('/opt/pgi/qd-2.3.17/install/include',
spec['qd'].prefix.include, make_include)
filter_file('/opt/pgi/qd-2.3.17/install/lib',
spec['qd'].prefix.lib, make_include)
elif '%aocc' in spec:
if '+openmp' in spec:
copy(
join_path('arch', 'makefile.include.linux_gnu_omp'),
join_path('arch', 'makefile.include.linux_aocc_omp')
)
make_include = join_path('arch', 'makefile.include.linux_aocc_omp')
else:
copy(
join_path('arch', 'makefile.include.linux_gnu'),
join_path('arch', 'makefile.include.linux_aocc')
)
make_include = join_path('arch', 'makefile.include.linux_aocc')
filter_file(
'gcc', '{0} {1}'.format(spack_cc, '-Mfree'),
make_include, string=True
)
filter_file('g++', spack_cxx, make_include, string=True)
filter_file('^CFLAGS_LIB[ ]{0,}=.*$',
'CFLAGS_LIB = -O3', make_include)
filter_file('^FFLAGS_LIB[ ]{0,}=.*$',
'FFLAGS_LIB = -O2', make_include)
filter_file('^OFLAG[ ]{0,}=.*$',
'OFLAG = -O3', make_include)
filter_file('^FC[ ]{0,}=.*$',
'FC = {0}'.format(spec['mpi'].mpifc),
make_include, string=True)
filter_file('^FCL[ ]{0,}=.*$',
'FCL = {0}'.format(spec['mpi'].mpifc),
make_include, string=True)
else:
if '+openmp' in spec:
make_include = join_path('arch',
'makefile.include.linux_{0}_omp'.
format(spec.compiler.name))
else:
make_include = join_path('arch',
'makefile.include.linux_' +
spec.compiler.name)
os.rename(make_include, 'makefile.include')
# This bunch of 'filter_file()' is to make these options settable
# as environment variables
filter_file('^CPP_OPTIONS[ ]{0,}=[ ]{0,}',
'CPP_OPTIONS ?= ',
'makefile.include')
filter_file('^FFLAGS[ ]{0,}=[ ]{0,}',
'FFLAGS ?= ',
'makefile.include')
filter_file('^LIBDIR[ ]{0,}=.*$', '', 'makefile.include')
filter_file('^BLAS[ ]{0,}=.*$', 'BLAS ?=', 'makefile.include')
filter_file('^LAPACK[ ]{0,}=.*$', 'LAPACK ?=', 'makefile.include')
filter_file('^FFTW[ ]{0,}?=.*$', 'FFTW ?=', 'makefile.include')
filter_file('^MPI_INC[ ]{0,}=.*$', 'MPI_INC ?=', 'makefile.include')
filter_file('-DscaLAPACK.*$\n', '', 'makefile.include')
filter_file('^SCALAPACK[ ]{0,}=.*$', 'SCALAPACK ?=', 'makefile.include')
if '+cuda' in spec:
filter_file('^OBJECTS_GPU[ ]{0,}=.*$',
'OBJECTS_GPU ?=',
'makefile.include')
filter_file('^CPP_GPU[ ]{0,}=.*$',
'CPP_GPU ?=',
'makefile.include')
filter_file('^CFLAGS[ ]{0,}=.*$',
'CFLAGS ?=',
'makefile.include')
if '+vaspsol' in spec:
copy('VASPsol/src/solvation.F', 'src/')
def
|
(self, spack_env):
spec = self.spec
cpp_options = ['-DMPI -DMPI_BLOCK=8000',
'-Duse_collective', '-DCACHE_SIZE=4000',
'-Davoidalloc', '-Duse_bse_te',
'-Dtbdyn', '-Duse_shmem']
if '%nvhpc' in self.spec:
cpp_options.extend(['-DHOST=\\"LinuxPGI\\"', '-DPGI16',
'-Dqd_emulate'])
elif '%aocc' in self.spec:
cpp_options.extend(['-DHOST=\\"LinuxGNU\\"',
'-Dfock_dblbuf'])
if '+openmp' in self.spec:
cpp_options.extend(['-D_OPENMP'])
else:
cpp_options.append('-DHOST=\\"LinuxGNU\\"')
if self.spec.satisfies('@6:'):
cpp_options.append('-Dvasp6')
cflags = ['-fPIC', '-DADD_']
fflags = []
if '%gcc' in spec or '%intel' in spec:
fflags.append('-w')
elif '%nvhpc' in spec:
fflags.extend(['-Mnoupcase', '-Mbackslash', '-Mlarge_arrays'])
elif '%aocc' in spec:
fflags.extend(['-fno-fortran-main', '-Mbackslash', '-ffast-math'])
spack_env.set('BLAS', spec['blas'].libs.ld_flags)
spack_env.set('LAPACK', spec['lapack'].libs.ld_flags)
spack_env.set('FFTW', spec['fftw-api'].prefix)
spack_env.set('MPI_INC', spec['mpi'].prefix.include)
if '%nvhpc' in spec:
spack_env.set('QD', spec['qd'].prefix)
if '+scalapack' in spec:
cpp_options.append('-DscaLAPACK')
spack_env.set('SCALAPACK', spec['scalapack'].libs.ld_flags)
if '+cuda' in spec:
cpp_gpu = ['-DCUDA_GPU', '-DRPROMU_CPROJ_OVERLAP',
'-DCUFFT_MIN=28', '-DUSE_PINNED_MEMORY']
objects_gpu = ['fftmpiw.o', 'fftmpi_map.o', 'fft3dlib.o',
'fftw3d_gpu.o', 'fftmpiw_gpu.o']
cflags.extend(['-DGPUSHMEM=300', '-DHAVE_CUBLAS'])
spack_env.set('CUDA_ROOT', spec['cuda'].prefix)
spack_env.set('CPP_GPU', ' '.join(cpp_gpu))
spack_env.set('OBJECTS_GPU', ' '.join(objects_gpu))
if '+vaspsol' in spec:
cpp_options.append('-Dsol_compat')
if spec.satisfies('%gcc@10:'):
fflags.append('-fallow-argument-mismatch')
# Finally
spack_env.set('CPP_OPTIONS', ' '.join(cpp_options))
spack_env.set('CFLAGS', ' '.join(cflags))
spack_env.set('FFLAGS', ' '.join(fflags))
def build(self, spec, prefix):
if '+cuda' in self.spec:
make('gpu', 'gpu_ncl')
else:
make('std', 'gam', 'ncl')
def install(self, spec, prefix):
install_tree('bin/', prefix.bin)
|
setup_build_environment
|
identifier_name
|
package.py
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
class Vasp(MakefilePackage):
"""
The Vienna Ab initio Simulation Package (VASP)
is a computer program for atomic scale materials modelling,
e.g. electronic structure calculations
and quantum-mechanical molecular dynamics, from first principles.
"""
homepage = "https://vasp.at"
url = "file://{0}/vasp.5.4.4.pl2.tgz".format(os.getcwd())
manual_download = True
version('6.2.0', sha256='49e7ba351bd634bc5f5f67a8ef1e38e64e772857a1c02f602828898a84197e25')
version('6.1.1', sha256='e37a4dfad09d3ad0410833bcd55af6b599179a085299026992c2d8e319bf6927')
version('5.4.4.pl2', sha256='98f75fd75399a23d76d060a6155f4416b340a1704f256a00146f89024035bc8e')
version('5.4.4', sha256='5bd2449462386f01e575f9adf629c08cb03a13142806ffb6a71309ca4431cfb3')
resource(name='vaspsol',
git='https://github.com/henniggroup/VASPsol.git',
tag='V1.0',
when='+vaspsol')
variant('openmp', default=False,
description='Enable openmp build')
variant('scalapack', default=False,
description='Enables build with SCALAPACK')
variant('cuda', default=False,
description='Enables running on Nvidia GPUs')
variant('vaspsol', default=False,
description='Enable VASPsol implicit solvation model\n'
'https://github.com/henniggroup/VASPsol')
depends_on('rsync', type='build')
depends_on('blas')
depends_on('lapack')
depends_on('fftw-api')
depends_on('mpi', type=('build', 'link', 'run'))
depends_on('scalapack', when='+scalapack')
depends_on('cuda', when='+cuda')
depends_on('qd', when='%nvhpc')
|
conflicts('%gcc@:8', msg='GFortran before 9.x does not support all features needed to build VASP')
conflicts('+vaspsol', when='+cuda', msg='+vaspsol only available for CPU')
conflicts('+openmp', when='@:6.1.1', msg='openmp support started from 6.2')
parallel = False
def edit(self, spec, prefix):
if '%gcc' in spec:
if '+openmp' in spec:
make_include = join_path('arch', 'makefile.include.linux_gnu_omp')
else:
make_include = join_path('arch', 'makefile.include.linux_gnu')
elif '%nvhpc' in spec:
make_include = join_path('arch', 'makefile.include.linux_pgi')
filter_file('-pgc++libs', '-c++libs', make_include, string=True)
filter_file('pgcc', spack_cc, make_include)
filter_file('pgc++', spack_cxx, make_include, string=True)
filter_file('pgfortran', spack_fc, make_include)
filter_file('/opt/pgi/qd-2.3.17/install/include',
spec['qd'].prefix.include, make_include)
filter_file('/opt/pgi/qd-2.3.17/install/lib',
spec['qd'].prefix.lib, make_include)
elif '%aocc' in spec:
if '+openmp' in spec:
copy(
join_path('arch', 'makefile.include.linux_gnu_omp'),
join_path('arch', 'makefile.include.linux_aocc_omp')
)
make_include = join_path('arch', 'makefile.include.linux_aocc_omp')
else:
copy(
join_path('arch', 'makefile.include.linux_gnu'),
join_path('arch', 'makefile.include.linux_aocc')
)
make_include = join_path('arch', 'makefile.include.linux_aocc')
filter_file(
'gcc', '{0} {1}'.format(spack_cc, '-Mfree'),
make_include, string=True
)
filter_file('g++', spack_cxx, make_include, string=True)
filter_file('^CFLAGS_LIB[ ]{0,}=.*$',
'CFLAGS_LIB = -O3', make_include)
filter_file('^FFLAGS_LIB[ ]{0,}=.*$',
'FFLAGS_LIB = -O2', make_include)
filter_file('^OFLAG[ ]{0,}=.*$',
'OFLAG = -O3', make_include)
filter_file('^FC[ ]{0,}=.*$',
'FC = {0}'.format(spec['mpi'].mpifc),
make_include, string=True)
filter_file('^FCL[ ]{0,}=.*$',
'FCL = {0}'.format(spec['mpi'].mpifc),
make_include, string=True)
else:
if '+openmp' in spec:
make_include = join_path('arch',
'makefile.include.linux_{0}_omp'.
format(spec.compiler.name))
else:
make_include = join_path('arch',
'makefile.include.linux_' +
spec.compiler.name)
os.rename(make_include, 'makefile.include')
# This bunch of 'filter_file()' is to make these options settable
# as environment variables
filter_file('^CPP_OPTIONS[ ]{0,}=[ ]{0,}',
'CPP_OPTIONS ?= ',
'makefile.include')
filter_file('^FFLAGS[ ]{0,}=[ ]{0,}',
'FFLAGS ?= ',
'makefile.include')
filter_file('^LIBDIR[ ]{0,}=.*$', '', 'makefile.include')
filter_file('^BLAS[ ]{0,}=.*$', 'BLAS ?=', 'makefile.include')
filter_file('^LAPACK[ ]{0,}=.*$', 'LAPACK ?=', 'makefile.include')
filter_file('^FFTW[ ]{0,}?=.*$', 'FFTW ?=', 'makefile.include')
filter_file('^MPI_INC[ ]{0,}=.*$', 'MPI_INC ?=', 'makefile.include')
filter_file('-DscaLAPACK.*$\n', '', 'makefile.include')
filter_file('^SCALAPACK[ ]{0,}=.*$', 'SCALAPACK ?=', 'makefile.include')
if '+cuda' in spec:
filter_file('^OBJECTS_GPU[ ]{0,}=.*$',
'OBJECTS_GPU ?=',
'makefile.include')
filter_file('^CPP_GPU[ ]{0,}=.*$',
'CPP_GPU ?=',
'makefile.include')
filter_file('^CFLAGS[ ]{0,}=.*$',
'CFLAGS ?=',
'makefile.include')
if '+vaspsol' in spec:
copy('VASPsol/src/solvation.F', 'src/')
def setup_build_environment(self, spack_env):
spec = self.spec
cpp_options = ['-DMPI -DMPI_BLOCK=8000',
'-Duse_collective', '-DCACHE_SIZE=4000',
'-Davoidalloc', '-Duse_bse_te',
'-Dtbdyn', '-Duse_shmem']
if '%nvhpc' in self.spec:
cpp_options.extend(['-DHOST=\\"LinuxPGI\\"', '-DPGI16',
'-Dqd_emulate'])
elif '%aocc' in self.spec:
cpp_options.extend(['-DHOST=\\"LinuxGNU\\"',
'-Dfock_dblbuf'])
if '+openmp' in self.spec:
cpp_options.extend(['-D_OPENMP'])
else:
cpp_options.append('-DHOST=\\"LinuxGNU\\"')
if self.spec.satisfies('@6:'):
cpp_options.append('-Dvasp6')
cflags = ['-fPIC', '-DADD_']
fflags = []
if '%gcc' in spec or '%intel' in spec:
fflags.append('-w')
elif '%nvhpc' in spec:
fflags.extend(['-Mnoupcase', '-Mbackslash', '-Mlarge_arrays'])
elif '%aocc' in spec:
fflags.extend(['-fno-fortran-main', '-Mbackslash', '-ffast-math'])
spack_env.set('BLAS', spec['blas'].libs.ld_flags)
spack_env.set('LAPACK', spec['lapack'].libs.ld_flags)
spack_env.set('FFTW', spec['fftw-api'].prefix)
spack_env.set('MPI_INC', spec['mpi'].prefix.include)
if '%nvhpc' in spec:
spack_env.set('QD', spec['qd'].prefix)
if '+scalapack' in spec:
cpp_options.append('-DscaLAPACK')
spack_env.set('SCALAPACK', spec['scalapack'].libs.ld_flags)
if '+cuda' in spec:
cpp_gpu = ['-DCUDA_GPU', '-DRPROMU_CPROJ_OVERLAP',
'-DCUFFT_MIN=28', '-DUSE_PINNED_MEMORY']
objects_gpu = ['fftmpiw.o', 'fftmpi_map.o', 'fft3dlib.o',
'fftw3d_gpu.o', 'fftmpiw_gpu.o']
cflags.extend(['-DGPUSHMEM=300', '-DHAVE_CUBLAS'])
spack_env.set('CUDA_ROOT', spec['cuda'].prefix)
spack_env.set('CPP_GPU', ' '.join(cpp_gpu))
spack_env.set('OBJECTS_GPU', ' '.join(objects_gpu))
if '+vaspsol' in spec:
cpp_options.append('-Dsol_compat')
if spec.satisfies('%gcc@10:'):
fflags.append('-fallow-argument-mismatch')
# Finally
spack_env.set('CPP_OPTIONS', ' '.join(cpp_options))
spack_env.set('CFLAGS', ' '.join(cflags))
spack_env.set('FFLAGS', ' '.join(fflags))
def build(self, spec, prefix):
if '+cuda' in self.spec:
make('gpu', 'gpu_ncl')
else:
make('std', 'gam', 'ncl')
def install(self, spec, prefix):
install_tree('bin/', prefix.bin)
|
random_line_split
|
|
package.py
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
class Vasp(MakefilePackage):
"""
The Vienna Ab initio Simulation Package (VASP)
is a computer program for atomic scale materials modelling,
e.g. electronic structure calculations
and quantum-mechanical molecular dynamics, from first principles.
"""
homepage = "https://vasp.at"
url = "file://{0}/vasp.5.4.4.pl2.tgz".format(os.getcwd())
manual_download = True
version('6.2.0', sha256='49e7ba351bd634bc5f5f67a8ef1e38e64e772857a1c02f602828898a84197e25')
version('6.1.1', sha256='e37a4dfad09d3ad0410833bcd55af6b599179a085299026992c2d8e319bf6927')
version('5.4.4.pl2', sha256='98f75fd75399a23d76d060a6155f4416b340a1704f256a00146f89024035bc8e')
version('5.4.4', sha256='5bd2449462386f01e575f9adf629c08cb03a13142806ffb6a71309ca4431cfb3')
resource(name='vaspsol',
git='https://github.com/henniggroup/VASPsol.git',
tag='V1.0',
when='+vaspsol')
variant('openmp', default=False,
description='Enable openmp build')
variant('scalapack', default=False,
description='Enables build with SCALAPACK')
variant('cuda', default=False,
description='Enables running on Nvidia GPUs')
variant('vaspsol', default=False,
description='Enable VASPsol implicit solvation model\n'
'https://github.com/henniggroup/VASPsol')
depends_on('rsync', type='build')
depends_on('blas')
depends_on('lapack')
depends_on('fftw-api')
depends_on('mpi', type=('build', 'link', 'run'))
depends_on('scalapack', when='+scalapack')
depends_on('cuda', when='+cuda')
depends_on('qd', when='%nvhpc')
conflicts('%gcc@:8', msg='GFortran before 9.x does not support all features needed to build VASP')
conflicts('+vaspsol', when='+cuda', msg='+vaspsol only available for CPU')
conflicts('+openmp', when='@:6.1.1', msg='openmp support started from 6.2')
parallel = False
def edit(self, spec, prefix):
if '%gcc' in spec:
if '+openmp' in spec:
make_include = join_path('arch', 'makefile.include.linux_gnu_omp')
else:
make_include = join_path('arch', 'makefile.include.linux_gnu')
elif '%nvhpc' in spec:
make_include = join_path('arch', 'makefile.include.linux_pgi')
filter_file('-pgc++libs', '-c++libs', make_include, string=True)
filter_file('pgcc', spack_cc, make_include)
filter_file('pgc++', spack_cxx, make_include, string=True)
filter_file('pgfortran', spack_fc, make_include)
filter_file('/opt/pgi/qd-2.3.17/install/include',
spec['qd'].prefix.include, make_include)
filter_file('/opt/pgi/qd-2.3.17/install/lib',
spec['qd'].prefix.lib, make_include)
elif '%aocc' in spec:
if '+openmp' in spec:
copy(
join_path('arch', 'makefile.include.linux_gnu_omp'),
join_path('arch', 'makefile.include.linux_aocc_omp')
)
make_include = join_path('arch', 'makefile.include.linux_aocc_omp')
else:
copy(
join_path('arch', 'makefile.include.linux_gnu'),
join_path('arch', 'makefile.include.linux_aocc')
)
make_include = join_path('arch', 'makefile.include.linux_aocc')
filter_file(
'gcc', '{0} {1}'.format(spack_cc, '-Mfree'),
make_include, string=True
)
filter_file('g++', spack_cxx, make_include, string=True)
filter_file('^CFLAGS_LIB[ ]{0,}=.*$',
'CFLAGS_LIB = -O3', make_include)
filter_file('^FFLAGS_LIB[ ]{0,}=.*$',
'FFLAGS_LIB = -O2', make_include)
filter_file('^OFLAG[ ]{0,}=.*$',
'OFLAG = -O3', make_include)
filter_file('^FC[ ]{0,}=.*$',
'FC = {0}'.format(spec['mpi'].mpifc),
make_include, string=True)
filter_file('^FCL[ ]{0,}=.*$',
'FCL = {0}'.format(spec['mpi'].mpifc),
make_include, string=True)
else:
if '+openmp' in spec:
make_include = join_path('arch',
'makefile.include.linux_{0}_omp'.
format(spec.compiler.name))
else:
make_include = join_path('arch',
'makefile.include.linux_' +
spec.compiler.name)
os.rename(make_include, 'makefile.include')
# This bunch of 'filter_file()' is to make these options settable
# as environment variables
filter_file('^CPP_OPTIONS[ ]{0,}=[ ]{0,}',
'CPP_OPTIONS ?= ',
'makefile.include')
filter_file('^FFLAGS[ ]{0,}=[ ]{0,}',
'FFLAGS ?= ',
'makefile.include')
filter_file('^LIBDIR[ ]{0,}=.*$', '', 'makefile.include')
filter_file('^BLAS[ ]{0,}=.*$', 'BLAS ?=', 'makefile.include')
filter_file('^LAPACK[ ]{0,}=.*$', 'LAPACK ?=', 'makefile.include')
filter_file('^FFTW[ ]{0,}?=.*$', 'FFTW ?=', 'makefile.include')
filter_file('^MPI_INC[ ]{0,}=.*$', 'MPI_INC ?=', 'makefile.include')
filter_file('-DscaLAPACK.*$\n', '', 'makefile.include')
filter_file('^SCALAPACK[ ]{0,}=.*$', 'SCALAPACK ?=', 'makefile.include')
if '+cuda' in spec:
filter_file('^OBJECTS_GPU[ ]{0,}=.*$',
'OBJECTS_GPU ?=',
'makefile.include')
filter_file('^CPP_GPU[ ]{0,}=.*$',
'CPP_GPU ?=',
'makefile.include')
filter_file('^CFLAGS[ ]{0,}=.*$',
'CFLAGS ?=',
'makefile.include')
if '+vaspsol' in spec:
copy('VASPsol/src/solvation.F', 'src/')
def setup_build_environment(self, spack_env):
spec = self.spec
cpp_options = ['-DMPI -DMPI_BLOCK=8000',
'-Duse_collective', '-DCACHE_SIZE=4000',
'-Davoidalloc', '-Duse_bse_te',
'-Dtbdyn', '-Duse_shmem']
if '%nvhpc' in self.spec:
cpp_options.extend(['-DHOST=\\"LinuxPGI\\"', '-DPGI16',
'-Dqd_emulate'])
elif '%aocc' in self.spec:
cpp_options.extend(['-DHOST=\\"LinuxGNU\\"',
'-Dfock_dblbuf'])
if '+openmp' in self.spec:
cpp_options.extend(['-D_OPENMP'])
else:
cpp_options.append('-DHOST=\\"LinuxGNU\\"')
if self.spec.satisfies('@6:'):
cpp_options.append('-Dvasp6')
cflags = ['-fPIC', '-DADD_']
fflags = []
if '%gcc' in spec or '%intel' in spec:
fflags.append('-w')
elif '%nvhpc' in spec:
fflags.extend(['-Mnoupcase', '-Mbackslash', '-Mlarge_arrays'])
elif '%aocc' in spec:
fflags.extend(['-fno-fortran-main', '-Mbackslash', '-ffast-math'])
spack_env.set('BLAS', spec['blas'].libs.ld_flags)
spack_env.set('LAPACK', spec['lapack'].libs.ld_flags)
spack_env.set('FFTW', spec['fftw-api'].prefix)
spack_env.set('MPI_INC', spec['mpi'].prefix.include)
if '%nvhpc' in spec:
spack_env.set('QD', spec['qd'].prefix)
if '+scalapack' in spec:
cpp_options.append('-DscaLAPACK')
spack_env.set('SCALAPACK', spec['scalapack'].libs.ld_flags)
if '+cuda' in spec:
cpp_gpu = ['-DCUDA_GPU', '-DRPROMU_CPROJ_OVERLAP',
'-DCUFFT_MIN=28', '-DUSE_PINNED_MEMORY']
objects_gpu = ['fftmpiw.o', 'fftmpi_map.o', 'fft3dlib.o',
'fftw3d_gpu.o', 'fftmpiw_gpu.o']
cflags.extend(['-DGPUSHMEM=300', '-DHAVE_CUBLAS'])
spack_env.set('CUDA_ROOT', spec['cuda'].prefix)
spack_env.set('CPP_GPU', ' '.join(cpp_gpu))
spack_env.set('OBJECTS_GPU', ' '.join(objects_gpu))
if '+vaspsol' in spec:
|
if spec.satisfies('%gcc@10:'):
fflags.append('-fallow-argument-mismatch')
# Finally
spack_env.set('CPP_OPTIONS', ' '.join(cpp_options))
spack_env.set('CFLAGS', ' '.join(cflags))
spack_env.set('FFLAGS', ' '.join(fflags))
def build(self, spec, prefix):
if '+cuda' in self.spec:
make('gpu', 'gpu_ncl')
else:
make('std', 'gam', 'ncl')
def install(self, spec, prefix):
install_tree('bin/', prefix.bin)
|
cpp_options.append('-Dsol_compat')
|
conditional_block
|
htmloptionscollection.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::ElementBinding::ElementMethods;
use dom::bindings::codegen::Bindings::HTMLCollectionBinding::HTMLCollectionMethods;
use dom::bindings::codegen::Bindings::HTMLOptionsCollectionBinding;
use dom::bindings::codegen::Bindings::HTMLOptionsCollectionBinding::HTMLOptionsCollectionMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeBinding::NodeMethods;
use dom::bindings::codegen::UnionTypes::{HTMLOptionElementOrHTMLOptGroupElement, HTMLElementOrLong};
use dom::bindings::error::{Error, ErrorResult};
use dom::bindings::inheritance::Castable;
use dom::bindings::js::{Root, RootedReference};
use dom::bindings::reflector::reflect_dom_object;
use dom::bindings::str::DOMString;
use dom::element::Element;
use dom::htmlcollection::{CollectionFilter, HTMLCollection};
use dom::htmloptionelement::HTMLOptionElement;
use dom::node::{document_from_node, Node};
use dom::window::Window;
#[dom_struct]
pub struct HTMLOptionsCollection {
collection: HTMLCollection,
}
impl HTMLOptionsCollection {
fn new_inherited(root: &Node, filter: Box<CollectionFilter + 'static>) -> HTMLOptionsCollection {
HTMLOptionsCollection {
collection: HTMLCollection::new_inherited(root, filter),
}
}
pub fn new(window: &Window, root: &Node, filter: Box<CollectionFilter + 'static>)
-> Root<HTMLOptionsCollection>
{
reflect_dom_object(box HTMLOptionsCollection::new_inherited(root, filter),
window,
HTMLOptionsCollectionBinding::Wrap)
}
fn add_new_elements(&self, count: u32) -> ErrorResult {
let root = self.upcast().root_node();
let document = document_from_node(root.r());
for _ in 0..count {
let element = HTMLOptionElement::new(atom!("option"), None, document.r());
let node = element.upcast::<Node>();
try!(root.AppendChild(node));
};
Ok(())
}
}
impl HTMLOptionsCollectionMethods for HTMLOptionsCollection {
// FIXME: This shouldn't need to be implemented here since HTMLCollection (the parent of
// HTMLOptionsCollection) implements NamedGetter.
// https://github.com/servo/servo/issues/5875
//
// https://dom.spec.whatwg.org/#dom-htmlcollection-nameditem
fn NamedGetter(&self, name: DOMString) -> Option<Root<Element>> {
self.upcast().NamedItem(name)
}
// https://heycam.github.io/webidl/#dfn-supported-property-names
fn SupportedPropertyNames(&self) -> Vec<DOMString> {
self.upcast().SupportedPropertyNames()
}
// FIXME: This shouldn't need to be implemented here since HTMLCollection (the parent of
// HTMLOptionsCollection) implements IndexedGetter.
// https://github.com/servo/servo/issues/5875
//
// https://dom.spec.whatwg.org/#dom-htmlcollection-item
fn IndexedGetter(&self, index: u32) -> Option<Root<Element>> {
self.upcast().IndexedGetter(index)
}
// https://html.spec.whatwg.org/multipage/#dom-htmloptionscollection-setter
fn IndexedSetter(&self, index: u32, value: Option<&HTMLOptionElement>) -> ErrorResult {
if let Some(value) = value {
// Step 2
let length = self.upcast().Length();
// Step 3
let n = index as i32 - length as i32;
// Step 4
if n > 0 {
try!(self.add_new_elements(n as u32));
}
// Step 5
let node = value.upcast::<Node>();
let root = self.upcast().root_node();
if n >= 0 {
Node::pre_insert(node, root.r(), None).map(|_| ())
} else {
let child = self.upcast().IndexedGetter(index).unwrap();
let child_node = child.r().upcast::<Node>();
root.r().ReplaceChild(node, child_node).map(|_| ())
}
} else {
// Step 1
self.Remove(index as i32);
Ok(())
}
}
// https://html.spec.whatwg.org/multipage/#dom-htmloptionscollection-length
fn Length(&self) -> u32 {
self.upcast().Length()
}
// https://html.spec.whatwg.org/multipage/#dom-htmloptionscollection-length
fn SetLength(&self, length: u32) {
let current_length = self.upcast().Length();
let delta = length as i32 - current_length as i32;
if delta < 0 {
// new length is lower - deleting last option elements
for index in (length..current_length).rev() {
self.Remove(index as i32)
}
} else if delta > 0 {
// new length is higher - adding new option elements
self.add_new_elements(delta as u32).unwrap();
}
}
// https://html.spec.whatwg.org/multipage/#dom-htmloptionscollection-add
fn Add(&self, element: HTMLOptionElementOrHTMLOptGroupElement, before: Option<HTMLElementOrLong>) -> ErrorResult {
let root = self.upcast().root_node();
let node: &Node = match element {
HTMLOptionElementOrHTMLOptGroupElement::HTMLOptionElement(ref element) => element.upcast(),
HTMLOptionElementOrHTMLOptGroupElement::HTMLOptGroupElement(ref element) => element.upcast(),
};
// Step 1
if node.is_ancestor_of(root.r()) {
return Err(Error::HierarchyRequest);
}
if let Some(HTMLElementOrLong::HTMLElement(ref before_element)) = before {
// Step 2
let before_node = before_element.upcast::<Node>();
if !root.r().is_ancestor_of(before_node)
|
// Step 3
if node == before_node {
return Ok(());
}
}
// Step 4
let reference_node = before.and_then(|before| {
match before {
HTMLElementOrLong::HTMLElement(element) => Some(Root::upcast::<Node>(element)),
HTMLElementOrLong::Long(index) => {
self.upcast().IndexedGetter(index as u32).map(Root::upcast::<Node>)
}
}
});
// Step 5
let parent = if let Some(reference_node) = reference_node.r() {
reference_node.GetParentNode().unwrap()
} else {
root
};
// Step 6
Node::pre_insert(node, parent.r(), reference_node.r()).map(|_| ())
}
// https://html.spec.whatwg.org/multipage/#dom-htmloptionscollection-remove
fn Remove(&self, index: i32) {
if let Some(element) = self.upcast().IndexedGetter(index as u32) {
element.r().Remove();
}
}
}
|
{
return Err(Error::NotFound);
}
|
conditional_block
|
htmloptionscollection.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::ElementBinding::ElementMethods;
use dom::bindings::codegen::Bindings::HTMLCollectionBinding::HTMLCollectionMethods;
use dom::bindings::codegen::Bindings::HTMLOptionsCollectionBinding;
use dom::bindings::codegen::Bindings::HTMLOptionsCollectionBinding::HTMLOptionsCollectionMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeBinding::NodeMethods;
use dom::bindings::codegen::UnionTypes::{HTMLOptionElementOrHTMLOptGroupElement, HTMLElementOrLong};
use dom::bindings::error::{Error, ErrorResult};
use dom::bindings::inheritance::Castable;
use dom::bindings::js::{Root, RootedReference};
use dom::bindings::reflector::reflect_dom_object;
use dom::bindings::str::DOMString;
use dom::element::Element;
use dom::htmlcollection::{CollectionFilter, HTMLCollection};
use dom::htmloptionelement::HTMLOptionElement;
use dom::node::{document_from_node, Node};
use dom::window::Window;
#[dom_struct]
pub struct HTMLOptionsCollection {
collection: HTMLCollection,
}
impl HTMLOptionsCollection {
fn new_inherited(root: &Node, filter: Box<CollectionFilter + 'static>) -> HTMLOptionsCollection {
HTMLOptionsCollection {
collection: HTMLCollection::new_inherited(root, filter),
}
}
pub fn new(window: &Window, root: &Node, filter: Box<CollectionFilter + 'static>)
-> Root<HTMLOptionsCollection>
{
reflect_dom_object(box HTMLOptionsCollection::new_inherited(root, filter),
window,
HTMLOptionsCollectionBinding::Wrap)
}
fn add_new_elements(&self, count: u32) -> ErrorResult {
let root = self.upcast().root_node();
let document = document_from_node(root.r());
for _ in 0..count {
let element = HTMLOptionElement::new(atom!("option"), None, document.r());
let node = element.upcast::<Node>();
try!(root.AppendChild(node));
};
Ok(())
}
}
impl HTMLOptionsCollectionMethods for HTMLOptionsCollection {
// FIXME: This shouldn't need to be implemented here since HTMLCollection (the parent of
// HTMLOptionsCollection) implements NamedGetter.
// https://github.com/servo/servo/issues/5875
//
// https://dom.spec.whatwg.org/#dom-htmlcollection-nameditem
fn NamedGetter(&self, name: DOMString) -> Option<Root<Element>> {
self.upcast().NamedItem(name)
}
// https://heycam.github.io/webidl/#dfn-supported-property-names
fn SupportedPropertyNames(&self) -> Vec<DOMString> {
self.upcast().SupportedPropertyNames()
}
// FIXME: This shouldn't need to be implemented here since HTMLCollection (the parent of
// HTMLOptionsCollection) implements IndexedGetter.
// https://github.com/servo/servo/issues/5875
//
// https://dom.spec.whatwg.org/#dom-htmlcollection-item
fn IndexedGetter(&self, index: u32) -> Option<Root<Element>> {
self.upcast().IndexedGetter(index)
}
// https://html.spec.whatwg.org/multipage/#dom-htmloptionscollection-setter
fn IndexedSetter(&self, index: u32, value: Option<&HTMLOptionElement>) -> ErrorResult {
if let Some(value) = value {
// Step 2
let length = self.upcast().Length();
// Step 3
let n = index as i32 - length as i32;
// Step 4
if n > 0 {
try!(self.add_new_elements(n as u32));
}
// Step 5
let node = value.upcast::<Node>();
let root = self.upcast().root_node();
if n >= 0 {
Node::pre_insert(node, root.r(), None).map(|_| ())
} else {
let child = self.upcast().IndexedGetter(index).unwrap();
let child_node = child.r().upcast::<Node>();
root.r().ReplaceChild(node, child_node).map(|_| ())
}
} else {
// Step 1
self.Remove(index as i32);
Ok(())
}
}
// https://html.spec.whatwg.org/multipage/#dom-htmloptionscollection-length
fn Length(&self) -> u32 {
self.upcast().Length()
}
// https://html.spec.whatwg.org/multipage/#dom-htmloptionscollection-length
fn SetLength(&self, length: u32) {
let current_length = self.upcast().Length();
let delta = length as i32 - current_length as i32;
if delta < 0 {
// new length is lower - deleting last option elements
for index in (length..current_length).rev() {
self.Remove(index as i32)
}
} else if delta > 0 {
// new length is higher - adding new option elements
self.add_new_elements(delta as u32).unwrap();
}
}
// https://html.spec.whatwg.org/multipage/#dom-htmloptionscollection-add
fn Add(&self, element: HTMLOptionElementOrHTMLOptGroupElement, before: Option<HTMLElementOrLong>) -> ErrorResult {
let root = self.upcast().root_node();
let node: &Node = match element {
HTMLOptionElementOrHTMLOptGroupElement::HTMLOptionElement(ref element) => element.upcast(),
HTMLOptionElementOrHTMLOptGroupElement::HTMLOptGroupElement(ref element) => element.upcast(),
};
// Step 1
if node.is_ancestor_of(root.r()) {
return Err(Error::HierarchyRequest);
}
if let Some(HTMLElementOrLong::HTMLElement(ref before_element)) = before {
// Step 2
let before_node = before_element.upcast::<Node>();
if !root.r().is_ancestor_of(before_node) {
return Err(Error::NotFound);
}
// Step 3
if node == before_node {
return Ok(());
}
}
// Step 4
let reference_node = before.and_then(|before| {
match before {
HTMLElementOrLong::HTMLElement(element) => Some(Root::upcast::<Node>(element)),
HTMLElementOrLong::Long(index) => {
|
self.upcast().IndexedGetter(index as u32).map(Root::upcast::<Node>)
}
}
});
// Step 5
let parent = if let Some(reference_node) = reference_node.r() {
reference_node.GetParentNode().unwrap()
} else {
root
};
// Step 6
Node::pre_insert(node, parent.r(), reference_node.r()).map(|_| ())
}
// https://html.spec.whatwg.org/multipage/#dom-htmloptionscollection-remove
fn Remove(&self, index: i32) {
if let Some(element) = self.upcast().IndexedGetter(index as u32) {
element.r().Remove();
}
}
}
|
random_line_split
|
|
htmloptionscollection.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::ElementBinding::ElementMethods;
use dom::bindings::codegen::Bindings::HTMLCollectionBinding::HTMLCollectionMethods;
use dom::bindings::codegen::Bindings::HTMLOptionsCollectionBinding;
use dom::bindings::codegen::Bindings::HTMLOptionsCollectionBinding::HTMLOptionsCollectionMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeBinding::NodeMethods;
use dom::bindings::codegen::UnionTypes::{HTMLOptionElementOrHTMLOptGroupElement, HTMLElementOrLong};
use dom::bindings::error::{Error, ErrorResult};
use dom::bindings::inheritance::Castable;
use dom::bindings::js::{Root, RootedReference};
use dom::bindings::reflector::reflect_dom_object;
use dom::bindings::str::DOMString;
use dom::element::Element;
use dom::htmlcollection::{CollectionFilter, HTMLCollection};
use dom::htmloptionelement::HTMLOptionElement;
use dom::node::{document_from_node, Node};
use dom::window::Window;
#[dom_struct]
pub struct HTMLOptionsCollection {
collection: HTMLCollection,
}
impl HTMLOptionsCollection {
fn new_inherited(root: &Node, filter: Box<CollectionFilter + 'static>) -> HTMLOptionsCollection {
HTMLOptionsCollection {
collection: HTMLCollection::new_inherited(root, filter),
}
}
pub fn new(window: &Window, root: &Node, filter: Box<CollectionFilter + 'static>)
-> Root<HTMLOptionsCollection>
{
reflect_dom_object(box HTMLOptionsCollection::new_inherited(root, filter),
window,
HTMLOptionsCollectionBinding::Wrap)
}
fn add_new_elements(&self, count: u32) -> ErrorResult {
let root = self.upcast().root_node();
let document = document_from_node(root.r());
for _ in 0..count {
let element = HTMLOptionElement::new(atom!("option"), None, document.r());
let node = element.upcast::<Node>();
try!(root.AppendChild(node));
};
Ok(())
}
}
impl HTMLOptionsCollectionMethods for HTMLOptionsCollection {
// FIXME: This shouldn't need to be implemented here since HTMLCollection (the parent of
// HTMLOptionsCollection) implements NamedGetter.
// https://github.com/servo/servo/issues/5875
//
// https://dom.spec.whatwg.org/#dom-htmlcollection-nameditem
fn NamedGetter(&self, name: DOMString) -> Option<Root<Element>> {
self.upcast().NamedItem(name)
}
// https://heycam.github.io/webidl/#dfn-supported-property-names
fn
|
(&self) -> Vec<DOMString> {
self.upcast().SupportedPropertyNames()
}
// FIXME: This shouldn't need to be implemented here since HTMLCollection (the parent of
// HTMLOptionsCollection) implements IndexedGetter.
// https://github.com/servo/servo/issues/5875
//
// https://dom.spec.whatwg.org/#dom-htmlcollection-item
fn IndexedGetter(&self, index: u32) -> Option<Root<Element>> {
self.upcast().IndexedGetter(index)
}
// https://html.spec.whatwg.org/multipage/#dom-htmloptionscollection-setter
fn IndexedSetter(&self, index: u32, value: Option<&HTMLOptionElement>) -> ErrorResult {
if let Some(value) = value {
// Step 2
let length = self.upcast().Length();
// Step 3
let n = index as i32 - length as i32;
// Step 4
if n > 0 {
try!(self.add_new_elements(n as u32));
}
// Step 5
let node = value.upcast::<Node>();
let root = self.upcast().root_node();
if n >= 0 {
Node::pre_insert(node, root.r(), None).map(|_| ())
} else {
let child = self.upcast().IndexedGetter(index).unwrap();
let child_node = child.r().upcast::<Node>();
root.r().ReplaceChild(node, child_node).map(|_| ())
}
} else {
// Step 1
self.Remove(index as i32);
Ok(())
}
}
// https://html.spec.whatwg.org/multipage/#dom-htmloptionscollection-length
fn Length(&self) -> u32 {
self.upcast().Length()
}
// https://html.spec.whatwg.org/multipage/#dom-htmloptionscollection-length
fn SetLength(&self, length: u32) {
let current_length = self.upcast().Length();
let delta = length as i32 - current_length as i32;
if delta < 0 {
// new length is lower - deleting last option elements
for index in (length..current_length).rev() {
self.Remove(index as i32)
}
} else if delta > 0 {
// new length is higher - adding new option elements
self.add_new_elements(delta as u32).unwrap();
}
}
// https://html.spec.whatwg.org/multipage/#dom-htmloptionscollection-add
fn Add(&self, element: HTMLOptionElementOrHTMLOptGroupElement, before: Option<HTMLElementOrLong>) -> ErrorResult {
let root = self.upcast().root_node();
let node: &Node = match element {
HTMLOptionElementOrHTMLOptGroupElement::HTMLOptionElement(ref element) => element.upcast(),
HTMLOptionElementOrHTMLOptGroupElement::HTMLOptGroupElement(ref element) => element.upcast(),
};
// Step 1
if node.is_ancestor_of(root.r()) {
return Err(Error::HierarchyRequest);
}
if let Some(HTMLElementOrLong::HTMLElement(ref before_element)) = before {
// Step 2
let before_node = before_element.upcast::<Node>();
if !root.r().is_ancestor_of(before_node) {
return Err(Error::NotFound);
}
// Step 3
if node == before_node {
return Ok(());
}
}
// Step 4
let reference_node = before.and_then(|before| {
match before {
HTMLElementOrLong::HTMLElement(element) => Some(Root::upcast::<Node>(element)),
HTMLElementOrLong::Long(index) => {
self.upcast().IndexedGetter(index as u32).map(Root::upcast::<Node>)
}
}
});
// Step 5
let parent = if let Some(reference_node) = reference_node.r() {
reference_node.GetParentNode().unwrap()
} else {
root
};
// Step 6
Node::pre_insert(node, parent.r(), reference_node.r()).map(|_| ())
}
// https://html.spec.whatwg.org/multipage/#dom-htmloptionscollection-remove
fn Remove(&self, index: i32) {
if let Some(element) = self.upcast().IndexedGetter(index as u32) {
element.r().Remove();
}
}
}
|
SupportedPropertyNames
|
identifier_name
|
htmloptionscollection.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::ElementBinding::ElementMethods;
use dom::bindings::codegen::Bindings::HTMLCollectionBinding::HTMLCollectionMethods;
use dom::bindings::codegen::Bindings::HTMLOptionsCollectionBinding;
use dom::bindings::codegen::Bindings::HTMLOptionsCollectionBinding::HTMLOptionsCollectionMethods;
use dom::bindings::codegen::Bindings::NodeBinding::NodeBinding::NodeMethods;
use dom::bindings::codegen::UnionTypes::{HTMLOptionElementOrHTMLOptGroupElement, HTMLElementOrLong};
use dom::bindings::error::{Error, ErrorResult};
use dom::bindings::inheritance::Castable;
use dom::bindings::js::{Root, RootedReference};
use dom::bindings::reflector::reflect_dom_object;
use dom::bindings::str::DOMString;
use dom::element::Element;
use dom::htmlcollection::{CollectionFilter, HTMLCollection};
use dom::htmloptionelement::HTMLOptionElement;
use dom::node::{document_from_node, Node};
use dom::window::Window;
#[dom_struct]
pub struct HTMLOptionsCollection {
collection: HTMLCollection,
}
impl HTMLOptionsCollection {
fn new_inherited(root: &Node, filter: Box<CollectionFilter + 'static>) -> HTMLOptionsCollection {
HTMLOptionsCollection {
collection: HTMLCollection::new_inherited(root, filter),
}
}
pub fn new(window: &Window, root: &Node, filter: Box<CollectionFilter + 'static>)
-> Root<HTMLOptionsCollection>
{
reflect_dom_object(box HTMLOptionsCollection::new_inherited(root, filter),
window,
HTMLOptionsCollectionBinding::Wrap)
}
fn add_new_elements(&self, count: u32) -> ErrorResult {
let root = self.upcast().root_node();
let document = document_from_node(root.r());
for _ in 0..count {
let element = HTMLOptionElement::new(atom!("option"), None, document.r());
let node = element.upcast::<Node>();
try!(root.AppendChild(node));
};
Ok(())
}
}
impl HTMLOptionsCollectionMethods for HTMLOptionsCollection {
// FIXME: This shouldn't need to be implemented here since HTMLCollection (the parent of
// HTMLOptionsCollection) implements NamedGetter.
// https://github.com/servo/servo/issues/5875
//
// https://dom.spec.whatwg.org/#dom-htmlcollection-nameditem
fn NamedGetter(&self, name: DOMString) -> Option<Root<Element>> {
self.upcast().NamedItem(name)
}
// https://heycam.github.io/webidl/#dfn-supported-property-names
fn SupportedPropertyNames(&self) -> Vec<DOMString> {
self.upcast().SupportedPropertyNames()
}
// FIXME: This shouldn't need to be implemented here since HTMLCollection (the parent of
// HTMLOptionsCollection) implements IndexedGetter.
// https://github.com/servo/servo/issues/5875
//
// https://dom.spec.whatwg.org/#dom-htmlcollection-item
fn IndexedGetter(&self, index: u32) -> Option<Root<Element>>
|
// https://html.spec.whatwg.org/multipage/#dom-htmloptionscollection-setter
fn IndexedSetter(&self, index: u32, value: Option<&HTMLOptionElement>) -> ErrorResult {
if let Some(value) = value {
// Step 2
let length = self.upcast().Length();
// Step 3
let n = index as i32 - length as i32;
// Step 4
if n > 0 {
try!(self.add_new_elements(n as u32));
}
// Step 5
let node = value.upcast::<Node>();
let root = self.upcast().root_node();
if n >= 0 {
Node::pre_insert(node, root.r(), None).map(|_| ())
} else {
let child = self.upcast().IndexedGetter(index).unwrap();
let child_node = child.r().upcast::<Node>();
root.r().ReplaceChild(node, child_node).map(|_| ())
}
} else {
// Step 1
self.Remove(index as i32);
Ok(())
}
}
// https://html.spec.whatwg.org/multipage/#dom-htmloptionscollection-length
fn Length(&self) -> u32 {
self.upcast().Length()
}
// https://html.spec.whatwg.org/multipage/#dom-htmloptionscollection-length
fn SetLength(&self, length: u32) {
let current_length = self.upcast().Length();
let delta = length as i32 - current_length as i32;
if delta < 0 {
// new length is lower - deleting last option elements
for index in (length..current_length).rev() {
self.Remove(index as i32)
}
} else if delta > 0 {
// new length is higher - adding new option elements
self.add_new_elements(delta as u32).unwrap();
}
}
// https://html.spec.whatwg.org/multipage/#dom-htmloptionscollection-add
fn Add(&self, element: HTMLOptionElementOrHTMLOptGroupElement, before: Option<HTMLElementOrLong>) -> ErrorResult {
let root = self.upcast().root_node();
let node: &Node = match element {
HTMLOptionElementOrHTMLOptGroupElement::HTMLOptionElement(ref element) => element.upcast(),
HTMLOptionElementOrHTMLOptGroupElement::HTMLOptGroupElement(ref element) => element.upcast(),
};
// Step 1
if node.is_ancestor_of(root.r()) {
return Err(Error::HierarchyRequest);
}
if let Some(HTMLElementOrLong::HTMLElement(ref before_element)) = before {
// Step 2
let before_node = before_element.upcast::<Node>();
if !root.r().is_ancestor_of(before_node) {
return Err(Error::NotFound);
}
// Step 3
if node == before_node {
return Ok(());
}
}
// Step 4
let reference_node = before.and_then(|before| {
match before {
HTMLElementOrLong::HTMLElement(element) => Some(Root::upcast::<Node>(element)),
HTMLElementOrLong::Long(index) => {
self.upcast().IndexedGetter(index as u32).map(Root::upcast::<Node>)
}
}
});
// Step 5
let parent = if let Some(reference_node) = reference_node.r() {
reference_node.GetParentNode().unwrap()
} else {
root
};
// Step 6
Node::pre_insert(node, parent.r(), reference_node.r()).map(|_| ())
}
// https://html.spec.whatwg.org/multipage/#dom-htmloptionscollection-remove
fn Remove(&self, index: i32) {
if let Some(element) = self.upcast().IndexedGetter(index as u32) {
element.r().Remove();
}
}
}
|
{
self.upcast().IndexedGetter(index)
}
|
identifier_body
|
mem.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Memory profiling functions.
use ipc_channel::ipc::{self, IpcReceiver};
use ipc_channel::router::ROUTER;
use profile_traits::mem::ReportsChan;
use profile_traits::mem::{ProfilerChan, ProfilerMsg, ReportKind, Reporter, ReporterRequest};
use std::borrow::ToOwned;
use std::cmp::Ordering;
use std::collections::HashMap;
use std::thread;
use util::task::spawn_named;
use util::time::duration_from_seconds;
pub struct Profiler {
/// The port through which messages are received.
pub port: IpcReceiver<ProfilerMsg>,
/// Registered memory reporters.
reporters: HashMap<String, Reporter>,
}
const JEMALLOC_HEAP_ALLOCATED_STR: &'static str = "jemalloc-heap-allocated";
const SYSTEM_HEAP_ALLOCATED_STR: &'static str = "system-heap-allocated";
impl Profiler {
pub fn create(period: Option<f64>) -> ProfilerChan {
let (chan, port) = ipc::channel().unwrap();
// Create the timer thread if a period was provided.
if let Some(period) = period {
let chan = chan.clone();
spawn_named("Memory profiler timer".to_owned(), move || {
loop {
thread::sleep(duration_from_seconds(period));
if chan.send(ProfilerMsg::Print).is_err() {
break;
}
}
});
}
// Always spawn the memory profiler. If there is no timer thread it won't receive regular
// `Print` events, but it will still receive the other events.
spawn_named("Memory profiler".to_owned(), move || {
let mut mem_profiler = Profiler::new(port);
mem_profiler.start();
});
let mem_profiler_chan = ProfilerChan(chan);
// Register the system memory reporter, which will run on its own thread. It never needs to
// be unregistered, because as long as the memory profiler is running the system memory
// reporter can make measurements.
let (system_reporter_sender, system_reporter_receiver) = ipc::channel().unwrap();
ROUTER.add_route(system_reporter_receiver.to_opaque(), box |message| {
let request: ReporterRequest = message.to().unwrap();
system_reporter::collect_reports(request)
});
mem_profiler_chan.send(ProfilerMsg::RegisterReporter("system".to_owned(),
Reporter(system_reporter_sender)));
mem_profiler_chan
}
pub fn new(port: IpcReceiver<ProfilerMsg>) -> Profiler {
Profiler {
port: port,
reporters: HashMap::new(),
}
}
pub fn start(&mut self) {
while let Ok(msg) = self.port.recv() {
if !self.handle_msg(msg) {
break
}
}
}
fn handle_msg(&mut self, msg: ProfilerMsg) -> bool {
match msg {
ProfilerMsg::RegisterReporter(name, reporter) => {
// Panic if it has already been registered.
let name_clone = name.clone();
match self.reporters.insert(name, reporter) {
None => true,
Some(_) => panic!(format!("RegisterReporter: '{}' name is already in use",
name_clone)),
}
},
ProfilerMsg::UnregisterReporter(name) => {
// Panic if it hasn't previously been registered.
match self.reporters.remove(&name) {
Some(_) => true,
None =>
panic!(format!("UnregisterReporter: '{}' name is unknown", &name)),
}
},
ProfilerMsg::Print => {
self.handle_print_msg();
true
},
ProfilerMsg::Exit => false
}
}
fn handle_print_msg(&self) {
println!("Begin memory reports");
println!("|");
// Collect reports from memory reporters.
//
// This serializes the report-gathering. It might be worth creating a new scoped thread for
// each reporter once we have enough of them.
//
// If anything goes wrong with a reporter, we just skip it.
//
// We also track the total memory reported on the jemalloc heap and the system heap, and
// use that to compute the special "jemalloc-heap-unclassified" and
// "system-heap-unclassified" values.
let mut forest = ReportsForest::new();
let mut jemalloc_heap_reported_size = 0;
let mut system_heap_reported_size = 0;
let mut jemalloc_heap_allocated_size: Option<usize> = None;
let mut system_heap_allocated_size: Option<usize> = None;
for reporter in self.reporters.values() {
let (chan, port) = ipc::channel().unwrap();
reporter.collect_reports(ReportsChan(chan));
if let Ok(mut reports) = port.recv() {
for report in &mut reports {
// Add "explicit" to the start of the path, when appropriate.
match report.kind {
ReportKind::ExplicitJemallocHeapSize |
ReportKind::ExplicitSystemHeapSize |
ReportKind::ExplicitNonHeapSize |
ReportKind::ExplicitUnknownLocationSize =>
report.path.insert(0, String::from("explicit")),
ReportKind::NonExplicitSize => {},
}
// Update the reported fractions of the heaps, when appropriate.
match report.kind {
ReportKind::ExplicitJemallocHeapSize =>
jemalloc_heap_reported_size += report.size,
ReportKind::ExplicitSystemHeapSize =>
system_heap_reported_size += report.size,
_ => {},
}
// Record total size of the heaps, when we see them.
if report.path.len() == 1 {
if report.path[0] == JEMALLOC_HEAP_ALLOCATED_STR {
assert!(jemalloc_heap_allocated_size.is_none());
jemalloc_heap_allocated_size = Some(report.size);
} else if report.path[0] == SYSTEM_HEAP_ALLOCATED_STR {
assert!(system_heap_allocated_size.is_none());
system_heap_allocated_size = Some(report.size);
}
}
// Insert the report.
forest.insert(&report.path, report.size);
}
}
}
// Compute and insert the heap-unclassified values.
if let Some(jemalloc_heap_allocated_size) = jemalloc_heap_allocated_size {
forest.insert(&path!["explicit", "jemalloc-heap-unclassified"],
jemalloc_heap_allocated_size - jemalloc_heap_reported_size);
}
if let Some(system_heap_allocated_size) = system_heap_allocated_size {
forest.insert(&path!["explicit", "system-heap-unclassified"],
system_heap_allocated_size - system_heap_reported_size);
}
forest.print();
println!("|");
println!("End memory reports");
println!("");
}
}
/// A collection of one or more reports with the same initial path segment. A ReportsTree
/// containing a single node is described as "degenerate".
struct ReportsTree {
/// For leaf nodes, this is the sum of the sizes of all reports that mapped to this location.
/// For interior nodes, this is the sum of the sizes of all its child nodes.
size: usize,
/// For leaf nodes, this is the count of all reports that mapped to this location.
/// For interor nodes, this is always zero.
count: u32,
/// The segment from the report path that maps to this node.
path_seg: String,
/// Child nodes.
children: Vec<ReportsTree>,
}
impl ReportsTree {
fn new(path_seg: String) -> ReportsTree {
ReportsTree {
size: 0,
count: 0,
path_seg: path_seg,
children: vec![]
}
}
// Searches the tree's children for a path_seg match, and returns the index if there is a
// match.
fn find_child(&self, path_seg: &str) -> Option<usize> {
for (i, child) in self.children.iter().enumerate() {
if child.path_seg == *path_seg {
return Some(i);
}
}
None
}
// Insert the path and size into the tree, adding any nodes as necessary.
fn insert(&mut self, path: &[String], size: usize) {
let mut t: &mut ReportsTree = self;
for path_seg in path {
let i = match t.find_child(&path_seg) {
Some(i) => i,
None => {
let new_t = ReportsTree::new(path_seg.clone());
t.children.push(new_t);
t.children.len() - 1
},
};
let tmp = t; // this temporary is needed to satisfy the borrow checker
t = &mut tmp.children[i];
}
t.size += size;
t.count += 1;
}
// Fill in sizes for interior nodes and sort sub-trees accordingly. Should only be done once
// all the reports have been inserted.
fn compute_interior_node_sizes_and_sort(&mut self) -> usize {
if !self.children.is_empty() {
// Interior node. Derive its size from its children.
if self.size != 0 {
// This will occur if e.g. we have paths ["a", "b"] and ["a", "b", "c"].
panic!("one report's path is a sub-path of another report's path");
}
for child in &mut self.children {
self.size += child.compute_interior_node_sizes_and_sort();
}
// Now that child sizes have been computed, we can sort the children.
self.children.sort_by(|t1, t2| t2.size.cmp(&t1.size));
}
self.size
}
fn print(&self, depth: i32) {
if !self.children.is_empty() {
assert_eq!(self.count, 0);
}
let mut indent_str = String::new();
for _ in 0..depth {
indent_str.push_str(" ");
}
let mebi = 1024f64 * 1024f64;
let count_str = if self.count > 1 { format!(" [{}]", self.count) } else { "".to_owned() };
println!("|{}{:8.2} MiB -- {}{}",
indent_str, (self.size as f64) / mebi, self.path_seg, count_str);
for child in &self.children {
child.print(depth + 1);
}
}
}
/// A collection of ReportsTrees. It represents the data from multiple memory reports in a form
/// that's good to print.
struct ReportsForest {
trees: HashMap<String, ReportsTree>,
}
impl ReportsForest {
fn new() -> ReportsForest {
ReportsForest {
trees: HashMap::new(),
}
}
// Insert the path and size into the forest, adding any trees and nodes as necessary.
fn insert(&mut self, path: &[String], size: usize) {
let (head, tail) = path.split_first().unwrap();
// Get the right tree, creating it if necessary.
if !self.trees.contains_key(head) {
self.trees.insert(head.clone(), ReportsTree::new(head.clone()));
}
let t = self.trees.get_mut(head).unwrap();
// Use tail because the 0th path segment was used to find the right tree in the forest.
t.insert(tail, size);
}
fn print(&mut self) {
// Fill in sizes of interior nodes, and recursively sort the sub-trees.
for (_, tree) in &mut self.trees {
tree.compute_interior_node_sizes_and_sort();
}
// Put the trees into a sorted vector. Primary sort: degenerate trees (those containing a
// single node) come after non-degenerate trees. Secondary sort: alphabetical order of the
// root node's path_seg.
let mut v = vec![];
for (_, tree) in &self.trees {
v.push(tree);
}
v.sort_by(|a, b| {
if a.children.is_empty() && !b.children.is_empty() {
Ordering::Greater
} else if !a.children.is_empty() && b.children.is_empty() {
Ordering::Less
} else {
a.path_seg.cmp(&b.path_seg)
}
});
// Print the forest.
for tree in &v {
tree.print(0);
// Print a blank line after non-degenerate trees.
if !tree.children.is_empty() {
println!("|");
}
}
}
}
//---------------------------------------------------------------------------
mod system_reporter {
use libc::{c_char, c_int, c_void, size_t};
use profile_traits::mem::{Report, ReportKind, ReporterRequest};
use std::borrow::ToOwned;
use std::ffi::CString;
use std::mem::size_of;
use std::ptr::null_mut;
use super::{JEMALLOC_HEAP_ALLOCATED_STR, SYSTEM_HEAP_ALLOCATED_STR};
#[cfg(target_os = "macos")]
use task_info::task_basic_info::{virtual_size, resident_size};
/// Collects global measurements from the OS and heap allocators.
pub fn collect_reports(request: ReporterRequest) {
let mut reports = vec![];
{
let mut report = |path, size| {
if let Some(size) = size {
reports.push(Report {
path: path,
kind: ReportKind::NonExplicitSize,
size: size,
});
}
};
// Virtual and physical memory usage, as reported by the OS.
report(path!["vsize"], vsize());
report(path!["resident"], resident());
// Memory segments, as reported by the OS.
for seg in resident_segments() {
report(path!["resident-according-to-smaps", seg.0], Some(seg.1));
}
// Total number of bytes allocated by the application on the system
// heap.
report(path![SYSTEM_HEAP_ALLOCATED_STR], system_heap_allocated());
// The descriptions of the following jemalloc measurements are taken
// directly from the jemalloc documentation.
// "Total number of bytes allocated by the application."
report(path![JEMALLOC_HEAP_ALLOCATED_STR], jemalloc_stat("stats.allocated"));
// "Total number of bytes in active pages allocated by the application.
// This is a multiple of the page size, and greater than or equal to
// |stats.allocated|."
report(path!["jemalloc-heap-active"], jemalloc_stat("stats.active"));
// "Total number of bytes in chunks mapped on behalf of the application.
// This is a multiple of the chunk size, and is at least as large as
// |stats.active|. This does not include inactive chunks."
report(path!["jemalloc-heap-mapped"], jemalloc_stat("stats.mapped"));
}
request.reports_channel.send(reports);
}
#[cfg(target_os = "linux")]
extern {
fn mallinfo() -> struct_mallinfo;
}
#[cfg(target_os = "linux")]
#[repr(C)]
pub struct struct_mallinfo {
arena: c_int,
ordblks: c_int,
smblks: c_int,
hblks: c_int,
hblkhd: c_int,
usmblks: c_int,
fsmblks: c_int,
uordblks: c_int,
fordblks: c_int,
keepcost: c_int,
}
#[cfg(target_os = "linux")]
fn system_heap_allocated() -> Option<usize> {
let info: struct_mallinfo = unsafe { mallinfo() };
// The documentation in the glibc man page makes it sound like |uordblks| would suffice,
// but that only gets the small allocations that are put in the brk heap. We need |hblkhd|
// as well to get the larger allocations that are mmapped.
//
// These fields are unfortunately |int| and so can overflow (becoming negative) if memory
// usage gets high enough. So don't report anything in that case. In the non-overflow case
// we cast the two values to usize before adding them to make sure the sum also doesn't
// overflow.
if info.hblkhd < 0 || info.uordblks < 0 {
None
} else {
Some(info.hblkhd as usize + info.uordblks as usize)
}
}
#[cfg(not(target_os = "linux"))]
fn system_heap_allocated() -> Option<usize> {
None
}
extern {
fn je_mallctl(name: *const c_char, oldp: *mut c_void, oldlenp: *mut size_t,
newp: *mut c_void, newlen: size_t) -> c_int;
}
fn jemalloc_stat(value_name: &str) -> Option<usize> {
// Before we request the measurement of interest, we first send an "epoch"
// request. Without that jemalloc gives cached statistics(!) which can be
// highly inaccurate.
let epoch_name = "epoch";
let epoch_c_name = CString::new(epoch_name).unwrap();
let mut epoch: u64 = 0;
let epoch_ptr = &mut epoch as *mut _ as *mut c_void;
let mut epoch_len = size_of::<u64>() as size_t;
let value_c_name = CString::new(value_name).unwrap();
let mut value: size_t = 0;
let value_ptr = &mut value as *mut _ as *mut c_void;
let mut value_len = size_of::<size_t>() as size_t;
// Using the same values for the `old` and `new` parameters is enough
// to get the statistics updated.
let rv = unsafe {
je_mallctl(epoch_c_name.as_ptr(), epoch_ptr, &mut epoch_len, epoch_ptr,
epoch_len)
};
if rv != 0 {
return None;
}
let rv = unsafe {
je_mallctl(value_c_name.as_ptr(), value_ptr, &mut value_len, null_mut(), 0)
};
if rv != 0 {
return None;
}
Some(value as usize)
}
// Like std::macros::try!, but for Option<>.
macro_rules! option_try(
|
#[cfg(target_os = "linux")]
fn page_size() -> usize {
unsafe {
::libc::sysconf(::libc::_SC_PAGESIZE) as usize
}
}
#[cfg(target_os = "linux")]
fn proc_self_statm_field(field: usize) -> Option<usize> {
use std::fs::File;
use std::io::Read;
let mut f = option_try!(File::open("/proc/self/statm").ok());
let mut contents = String::new();
option_try!(f.read_to_string(&mut contents).ok());
let s = option_try!(contents.split_whitespace().nth(field));
let npages = option_try!(s.parse::<usize>().ok());
Some(npages * page_size())
}
#[cfg(target_os = "linux")]
fn vsize() -> Option<usize> {
proc_self_statm_field(0)
}
#[cfg(target_os = "linux")]
fn resident() -> Option<usize> {
proc_self_statm_field(1)
}
#[cfg(target_os = "macos")]
fn vsize() -> Option<usize> {
virtual_size()
}
#[cfg(target_os = "macos")]
fn resident() -> Option<usize> {
resident_size()
}
#[cfg(not(any(target_os = "linux", target_os = "macos")))]
fn vsize() -> Option<usize> {
None
}
#[cfg(not(any(target_os = "linux", target_os = "macos")))]
fn resident() -> Option<usize> {
None
}
#[cfg(target_os = "linux")]
fn resident_segments() -> Vec<(String, usize)> {
use regex::Regex;
use std::collections::HashMap;
use std::collections::hash_map::Entry;
use std::fs::File;
use std::io::{BufReader, BufRead};
// The first line of an entry in /proc/<pid>/smaps looks just like an entry
// in /proc/<pid>/maps:
//
// address perms offset dev inode pathname
// 02366000-025d8000 rw-p 00000000 00:00 0 [heap]
//
// Each of the following lines contains a key and a value, separated
// by ": ", where the key does not contain either of those characters.
// For example:
//
// Rss: 132 kB
let f = match File::open("/proc/self/smaps") {
Ok(f) => BufReader::new(f),
Err(_) => return vec![],
};
let seg_re = Regex::new(
r"^[:xdigit:]+-[:xdigit:]+ (....) [:xdigit:]+ [:xdigit:]+:[:xdigit:]+ \d+ +(.*)").unwrap();
let rss_re = Regex::new(r"^Rss: +(\d+) kB").unwrap();
// We record each segment's resident size.
let mut seg_map: HashMap<String, usize> = HashMap::new();
#[derive(PartialEq)]
enum LookingFor { Segment, Rss }
let mut looking_for = LookingFor::Segment;
let mut curr_seg_name = String::new();
// Parse the file.
for line in f.lines() {
let line = match line {
Ok(line) => line,
Err(_) => continue,
};
if looking_for == LookingFor::Segment {
// Look for a segment info line.
let cap = match seg_re.captures(&line) {
Some(cap) => cap,
None => continue,
};
let perms = cap.at(1).unwrap();
let pathname = cap.at(2).unwrap();
// Construct the segment name from its pathname and permissions.
curr_seg_name.clear();
if pathname == "" || pathname.starts_with("[stack:") {
// Anonymous memory. Entries marked with "[stack:nnn]"
// look like thread stacks but they may include other
// anonymous mappings, so we can't trust them and just
// treat them as entirely anonymous.
curr_seg_name.push_str("anonymous");
} else {
curr_seg_name.push_str(pathname);
}
curr_seg_name.push_str(" (");
curr_seg_name.push_str(perms);
curr_seg_name.push_str(")");
looking_for = LookingFor::Rss;
} else {
// Look for an "Rss:" line.
let cap = match rss_re.captures(&line) {
Some(cap) => cap,
None => continue,
};
let rss = cap.at(1).unwrap().parse::<usize>().unwrap() * 1024;
if rss > 0 {
// Aggregate small segments into "other".
let seg_name = if rss < 512 * 1024 {
"other".to_owned()
} else {
curr_seg_name.clone()
};
match seg_map.entry(seg_name) {
Entry::Vacant(entry) => { entry.insert(rss); },
Entry::Occupied(mut entry) => *entry.get_mut() += rss,
}
}
looking_for = LookingFor::Segment;
}
}
// Note that the sum of all these segments' RSS values differs from the "resident"
// measurement obtained via /proc/<pid>/statm in resident(). It's unclear why this
// difference occurs; for some processes the measurements match, but for Servo they do not.
let segs: Vec<(String, usize)> = seg_map.into_iter().collect();
segs
}
#[cfg(not(target_os = "linux"))]
fn resident_segments() -> Vec<(String, usize)> {
vec![]
}
}
|
($e:expr) => (match $e { Some(e) => e, None => return None })
);
|
random_line_split
|
mem.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Memory profiling functions.
use ipc_channel::ipc::{self, IpcReceiver};
use ipc_channel::router::ROUTER;
use profile_traits::mem::ReportsChan;
use profile_traits::mem::{ProfilerChan, ProfilerMsg, ReportKind, Reporter, ReporterRequest};
use std::borrow::ToOwned;
use std::cmp::Ordering;
use std::collections::HashMap;
use std::thread;
use util::task::spawn_named;
use util::time::duration_from_seconds;
pub struct Profiler {
/// The port through which messages are received.
pub port: IpcReceiver<ProfilerMsg>,
/// Registered memory reporters.
reporters: HashMap<String, Reporter>,
}
const JEMALLOC_HEAP_ALLOCATED_STR: &'static str = "jemalloc-heap-allocated";
const SYSTEM_HEAP_ALLOCATED_STR: &'static str = "system-heap-allocated";
impl Profiler {
pub fn create(period: Option<f64>) -> ProfilerChan {
let (chan, port) = ipc::channel().unwrap();
// Create the timer thread if a period was provided.
if let Some(period) = period {
let chan = chan.clone();
spawn_named("Memory profiler timer".to_owned(), move || {
loop {
thread::sleep(duration_from_seconds(period));
if chan.send(ProfilerMsg::Print).is_err() {
break;
}
}
});
}
// Always spawn the memory profiler. If there is no timer thread it won't receive regular
// `Print` events, but it will still receive the other events.
spawn_named("Memory profiler".to_owned(), move || {
let mut mem_profiler = Profiler::new(port);
mem_profiler.start();
});
let mem_profiler_chan = ProfilerChan(chan);
// Register the system memory reporter, which will run on its own thread. It never needs to
// be unregistered, because as long as the memory profiler is running the system memory
// reporter can make measurements.
let (system_reporter_sender, system_reporter_receiver) = ipc::channel().unwrap();
ROUTER.add_route(system_reporter_receiver.to_opaque(), box |message| {
let request: ReporterRequest = message.to().unwrap();
system_reporter::collect_reports(request)
});
mem_profiler_chan.send(ProfilerMsg::RegisterReporter("system".to_owned(),
Reporter(system_reporter_sender)));
mem_profiler_chan
}
pub fn new(port: IpcReceiver<ProfilerMsg>) -> Profiler {
Profiler {
port: port,
reporters: HashMap::new(),
}
}
pub fn start(&mut self) {
while let Ok(msg) = self.port.recv() {
if !self.handle_msg(msg) {
break
}
}
}
fn handle_msg(&mut self, msg: ProfilerMsg) -> bool {
match msg {
ProfilerMsg::RegisterReporter(name, reporter) => {
// Panic if it has already been registered.
let name_clone = name.clone();
match self.reporters.insert(name, reporter) {
None => true,
Some(_) => panic!(format!("RegisterReporter: '{}' name is already in use",
name_clone)),
}
},
ProfilerMsg::UnregisterReporter(name) => {
// Panic if it hasn't previously been registered.
match self.reporters.remove(&name) {
Some(_) => true,
None =>
panic!(format!("UnregisterReporter: '{}' name is unknown", &name)),
}
},
ProfilerMsg::Print => {
self.handle_print_msg();
true
},
ProfilerMsg::Exit => false
}
}
fn handle_print_msg(&self) {
println!("Begin memory reports");
println!("|");
// Collect reports from memory reporters.
//
// This serializes the report-gathering. It might be worth creating a new scoped thread for
// each reporter once we have enough of them.
//
// If anything goes wrong with a reporter, we just skip it.
//
// We also track the total memory reported on the jemalloc heap and the system heap, and
// use that to compute the special "jemalloc-heap-unclassified" and
// "system-heap-unclassified" values.
let mut forest = ReportsForest::new();
let mut jemalloc_heap_reported_size = 0;
let mut system_heap_reported_size = 0;
let mut jemalloc_heap_allocated_size: Option<usize> = None;
let mut system_heap_allocated_size: Option<usize> = None;
for reporter in self.reporters.values() {
let (chan, port) = ipc::channel().unwrap();
reporter.collect_reports(ReportsChan(chan));
if let Ok(mut reports) = port.recv() {
for report in &mut reports {
// Add "explicit" to the start of the path, when appropriate.
match report.kind {
ReportKind::ExplicitJemallocHeapSize |
ReportKind::ExplicitSystemHeapSize |
ReportKind::ExplicitNonHeapSize |
ReportKind::ExplicitUnknownLocationSize =>
report.path.insert(0, String::from("explicit")),
ReportKind::NonExplicitSize => {},
}
// Update the reported fractions of the heaps, when appropriate.
match report.kind {
ReportKind::ExplicitJemallocHeapSize =>
jemalloc_heap_reported_size += report.size,
ReportKind::ExplicitSystemHeapSize =>
system_heap_reported_size += report.size,
_ => {},
}
// Record total size of the heaps, when we see them.
if report.path.len() == 1 {
if report.path[0] == JEMALLOC_HEAP_ALLOCATED_STR {
assert!(jemalloc_heap_allocated_size.is_none());
jemalloc_heap_allocated_size = Some(report.size);
} else if report.path[0] == SYSTEM_HEAP_ALLOCATED_STR {
assert!(system_heap_allocated_size.is_none());
system_heap_allocated_size = Some(report.size);
}
}
// Insert the report.
forest.insert(&report.path, report.size);
}
}
}
// Compute and insert the heap-unclassified values.
if let Some(jemalloc_heap_allocated_size) = jemalloc_heap_allocated_size {
forest.insert(&path!["explicit", "jemalloc-heap-unclassified"],
jemalloc_heap_allocated_size - jemalloc_heap_reported_size);
}
if let Some(system_heap_allocated_size) = system_heap_allocated_size {
forest.insert(&path!["explicit", "system-heap-unclassified"],
system_heap_allocated_size - system_heap_reported_size);
}
forest.print();
println!("|");
println!("End memory reports");
println!("");
}
}
/// A collection of one or more reports with the same initial path segment. A ReportsTree
/// containing a single node is described as "degenerate".
struct ReportsTree {
/// For leaf nodes, this is the sum of the sizes of all reports that mapped to this location.
/// For interior nodes, this is the sum of the sizes of all its child nodes.
size: usize,
/// For leaf nodes, this is the count of all reports that mapped to this location.
/// For interor nodes, this is always zero.
count: u32,
/// The segment from the report path that maps to this node.
path_seg: String,
/// Child nodes.
children: Vec<ReportsTree>,
}
impl ReportsTree {
fn new(path_seg: String) -> ReportsTree {
ReportsTree {
size: 0,
count: 0,
path_seg: path_seg,
children: vec![]
}
}
// Searches the tree's children for a path_seg match, and returns the index if there is a
// match.
fn find_child(&self, path_seg: &str) -> Option<usize> {
for (i, child) in self.children.iter().enumerate() {
if child.path_seg == *path_seg {
return Some(i);
}
}
None
}
// Insert the path and size into the tree, adding any nodes as necessary.
fn insert(&mut self, path: &[String], size: usize) {
let mut t: &mut ReportsTree = self;
for path_seg in path {
let i = match t.find_child(&path_seg) {
Some(i) => i,
None => {
let new_t = ReportsTree::new(path_seg.clone());
t.children.push(new_t);
t.children.len() - 1
},
};
let tmp = t; // this temporary is needed to satisfy the borrow checker
t = &mut tmp.children[i];
}
t.size += size;
t.count += 1;
}
// Fill in sizes for interior nodes and sort sub-trees accordingly. Should only be done once
// all the reports have been inserted.
fn compute_interior_node_sizes_and_sort(&mut self) -> usize {
if !self.children.is_empty() {
// Interior node. Derive its size from its children.
if self.size != 0 {
// This will occur if e.g. we have paths ["a", "b"] and ["a", "b", "c"].
panic!("one report's path is a sub-path of another report's path");
}
for child in &mut self.children {
self.size += child.compute_interior_node_sizes_and_sort();
}
// Now that child sizes have been computed, we can sort the children.
self.children.sort_by(|t1, t2| t2.size.cmp(&t1.size));
}
self.size
}
fn print(&self, depth: i32) {
if !self.children.is_empty() {
assert_eq!(self.count, 0);
}
let mut indent_str = String::new();
for _ in 0..depth {
indent_str.push_str(" ");
}
let mebi = 1024f64 * 1024f64;
let count_str = if self.count > 1 { format!(" [{}]", self.count) } else { "".to_owned() };
println!("|{}{:8.2} MiB -- {}{}",
indent_str, (self.size as f64) / mebi, self.path_seg, count_str);
for child in &self.children {
child.print(depth + 1);
}
}
}
/// A collection of ReportsTrees. It represents the data from multiple memory reports in a form
/// that's good to print.
struct ReportsForest {
trees: HashMap<String, ReportsTree>,
}
impl ReportsForest {
fn new() -> ReportsForest {
ReportsForest {
trees: HashMap::new(),
}
}
// Insert the path and size into the forest, adding any trees and nodes as necessary.
fn insert(&mut self, path: &[String], size: usize) {
let (head, tail) = path.split_first().unwrap();
// Get the right tree, creating it if necessary.
if !self.trees.contains_key(head) {
self.trees.insert(head.clone(), ReportsTree::new(head.clone()));
}
let t = self.trees.get_mut(head).unwrap();
// Use tail because the 0th path segment was used to find the right tree in the forest.
t.insert(tail, size);
}
fn print(&mut self) {
// Fill in sizes of interior nodes, and recursively sort the sub-trees.
for (_, tree) in &mut self.trees {
tree.compute_interior_node_sizes_and_sort();
}
// Put the trees into a sorted vector. Primary sort: degenerate trees (those containing a
// single node) come after non-degenerate trees. Secondary sort: alphabetical order of the
// root node's path_seg.
let mut v = vec![];
for (_, tree) in &self.trees {
v.push(tree);
}
v.sort_by(|a, b| {
if a.children.is_empty() && !b.children.is_empty() {
Ordering::Greater
} else if !a.children.is_empty() && b.children.is_empty() {
Ordering::Less
} else {
a.path_seg.cmp(&b.path_seg)
}
});
// Print the forest.
for tree in &v {
tree.print(0);
// Print a blank line after non-degenerate trees.
if !tree.children.is_empty() {
println!("|");
}
}
}
}
//---------------------------------------------------------------------------
mod system_reporter {
use libc::{c_char, c_int, c_void, size_t};
use profile_traits::mem::{Report, ReportKind, ReporterRequest};
use std::borrow::ToOwned;
use std::ffi::CString;
use std::mem::size_of;
use std::ptr::null_mut;
use super::{JEMALLOC_HEAP_ALLOCATED_STR, SYSTEM_HEAP_ALLOCATED_STR};
#[cfg(target_os = "macos")]
use task_info::task_basic_info::{virtual_size, resident_size};
/// Collects global measurements from the OS and heap allocators.
pub fn collect_reports(request: ReporterRequest) {
let mut reports = vec![];
{
let mut report = |path, size| {
if let Some(size) = size {
reports.push(Report {
path: path,
kind: ReportKind::NonExplicitSize,
size: size,
});
}
};
// Virtual and physical memory usage, as reported by the OS.
report(path!["vsize"], vsize());
report(path!["resident"], resident());
// Memory segments, as reported by the OS.
for seg in resident_segments() {
report(path!["resident-according-to-smaps", seg.0], Some(seg.1));
}
// Total number of bytes allocated by the application on the system
// heap.
report(path![SYSTEM_HEAP_ALLOCATED_STR], system_heap_allocated());
// The descriptions of the following jemalloc measurements are taken
// directly from the jemalloc documentation.
// "Total number of bytes allocated by the application."
report(path![JEMALLOC_HEAP_ALLOCATED_STR], jemalloc_stat("stats.allocated"));
// "Total number of bytes in active pages allocated by the application.
// This is a multiple of the page size, and greater than or equal to
// |stats.allocated|."
report(path!["jemalloc-heap-active"], jemalloc_stat("stats.active"));
// "Total number of bytes in chunks mapped on behalf of the application.
// This is a multiple of the chunk size, and is at least as large as
// |stats.active|. This does not include inactive chunks."
report(path!["jemalloc-heap-mapped"], jemalloc_stat("stats.mapped"));
}
request.reports_channel.send(reports);
}
#[cfg(target_os = "linux")]
extern {
fn mallinfo() -> struct_mallinfo;
}
#[cfg(target_os = "linux")]
#[repr(C)]
pub struct struct_mallinfo {
arena: c_int,
ordblks: c_int,
smblks: c_int,
hblks: c_int,
hblkhd: c_int,
usmblks: c_int,
fsmblks: c_int,
uordblks: c_int,
fordblks: c_int,
keepcost: c_int,
}
#[cfg(target_os = "linux")]
fn system_heap_allocated() -> Option<usize> {
let info: struct_mallinfo = unsafe { mallinfo() };
// The documentation in the glibc man page makes it sound like |uordblks| would suffice,
// but that only gets the small allocations that are put in the brk heap. We need |hblkhd|
// as well to get the larger allocations that are mmapped.
//
// These fields are unfortunately |int| and so can overflow (becoming negative) if memory
// usage gets high enough. So don't report anything in that case. In the non-overflow case
// we cast the two values to usize before adding them to make sure the sum also doesn't
// overflow.
if info.hblkhd < 0 || info.uordblks < 0 {
None
} else {
Some(info.hblkhd as usize + info.uordblks as usize)
}
}
#[cfg(not(target_os = "linux"))]
fn system_heap_allocated() -> Option<usize>
|
extern {
fn je_mallctl(name: *const c_char, oldp: *mut c_void, oldlenp: *mut size_t,
newp: *mut c_void, newlen: size_t) -> c_int;
}
fn jemalloc_stat(value_name: &str) -> Option<usize> {
// Before we request the measurement of interest, we first send an "epoch"
// request. Without that jemalloc gives cached statistics(!) which can be
// highly inaccurate.
let epoch_name = "epoch";
let epoch_c_name = CString::new(epoch_name).unwrap();
let mut epoch: u64 = 0;
let epoch_ptr = &mut epoch as *mut _ as *mut c_void;
let mut epoch_len = size_of::<u64>() as size_t;
let value_c_name = CString::new(value_name).unwrap();
let mut value: size_t = 0;
let value_ptr = &mut value as *mut _ as *mut c_void;
let mut value_len = size_of::<size_t>() as size_t;
// Using the same values for the `old` and `new` parameters is enough
// to get the statistics updated.
let rv = unsafe {
je_mallctl(epoch_c_name.as_ptr(), epoch_ptr, &mut epoch_len, epoch_ptr,
epoch_len)
};
if rv != 0 {
return None;
}
let rv = unsafe {
je_mallctl(value_c_name.as_ptr(), value_ptr, &mut value_len, null_mut(), 0)
};
if rv != 0 {
return None;
}
Some(value as usize)
}
// Like std::macros::try!, but for Option<>.
macro_rules! option_try(
($e:expr) => (match $e { Some(e) => e, None => return None })
);
#[cfg(target_os = "linux")]
fn page_size() -> usize {
unsafe {
::libc::sysconf(::libc::_SC_PAGESIZE) as usize
}
}
#[cfg(target_os = "linux")]
fn proc_self_statm_field(field: usize) -> Option<usize> {
use std::fs::File;
use std::io::Read;
let mut f = option_try!(File::open("/proc/self/statm").ok());
let mut contents = String::new();
option_try!(f.read_to_string(&mut contents).ok());
let s = option_try!(contents.split_whitespace().nth(field));
let npages = option_try!(s.parse::<usize>().ok());
Some(npages * page_size())
}
#[cfg(target_os = "linux")]
fn vsize() -> Option<usize> {
proc_self_statm_field(0)
}
#[cfg(target_os = "linux")]
fn resident() -> Option<usize> {
proc_self_statm_field(1)
}
#[cfg(target_os = "macos")]
fn vsize() -> Option<usize> {
virtual_size()
}
#[cfg(target_os = "macos")]
fn resident() -> Option<usize> {
resident_size()
}
#[cfg(not(any(target_os = "linux", target_os = "macos")))]
fn vsize() -> Option<usize> {
None
}
#[cfg(not(any(target_os = "linux", target_os = "macos")))]
fn resident() -> Option<usize> {
None
}
#[cfg(target_os = "linux")]
fn resident_segments() -> Vec<(String, usize)> {
use regex::Regex;
use std::collections::HashMap;
use std::collections::hash_map::Entry;
use std::fs::File;
use std::io::{BufReader, BufRead};
// The first line of an entry in /proc/<pid>/smaps looks just like an entry
// in /proc/<pid>/maps:
//
// address perms offset dev inode pathname
// 02366000-025d8000 rw-p 00000000 00:00 0 [heap]
//
// Each of the following lines contains a key and a value, separated
// by ": ", where the key does not contain either of those characters.
// For example:
//
// Rss: 132 kB
let f = match File::open("/proc/self/smaps") {
Ok(f) => BufReader::new(f),
Err(_) => return vec![],
};
let seg_re = Regex::new(
r"^[:xdigit:]+-[:xdigit:]+ (....) [:xdigit:]+ [:xdigit:]+:[:xdigit:]+ \d+ +(.*)").unwrap();
let rss_re = Regex::new(r"^Rss: +(\d+) kB").unwrap();
// We record each segment's resident size.
let mut seg_map: HashMap<String, usize> = HashMap::new();
#[derive(PartialEq)]
enum LookingFor { Segment, Rss }
let mut looking_for = LookingFor::Segment;
let mut curr_seg_name = String::new();
// Parse the file.
for line in f.lines() {
let line = match line {
Ok(line) => line,
Err(_) => continue,
};
if looking_for == LookingFor::Segment {
// Look for a segment info line.
let cap = match seg_re.captures(&line) {
Some(cap) => cap,
None => continue,
};
let perms = cap.at(1).unwrap();
let pathname = cap.at(2).unwrap();
// Construct the segment name from its pathname and permissions.
curr_seg_name.clear();
if pathname == "" || pathname.starts_with("[stack:") {
// Anonymous memory. Entries marked with "[stack:nnn]"
// look like thread stacks but they may include other
// anonymous mappings, so we can't trust them and just
// treat them as entirely anonymous.
curr_seg_name.push_str("anonymous");
} else {
curr_seg_name.push_str(pathname);
}
curr_seg_name.push_str(" (");
curr_seg_name.push_str(perms);
curr_seg_name.push_str(")");
looking_for = LookingFor::Rss;
} else {
// Look for an "Rss:" line.
let cap = match rss_re.captures(&line) {
Some(cap) => cap,
None => continue,
};
let rss = cap.at(1).unwrap().parse::<usize>().unwrap() * 1024;
if rss > 0 {
// Aggregate small segments into "other".
let seg_name = if rss < 512 * 1024 {
"other".to_owned()
} else {
curr_seg_name.clone()
};
match seg_map.entry(seg_name) {
Entry::Vacant(entry) => { entry.insert(rss); },
Entry::Occupied(mut entry) => *entry.get_mut() += rss,
}
}
looking_for = LookingFor::Segment;
}
}
// Note that the sum of all these segments' RSS values differs from the "resident"
// measurement obtained via /proc/<pid>/statm in resident(). It's unclear why this
// difference occurs; for some processes the measurements match, but for Servo they do not.
let segs: Vec<(String, usize)> = seg_map.into_iter().collect();
segs
}
#[cfg(not(target_os = "linux"))]
fn resident_segments() -> Vec<(String, usize)> {
vec![]
}
}
|
{
None
}
|
identifier_body
|
mem.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Memory profiling functions.
use ipc_channel::ipc::{self, IpcReceiver};
use ipc_channel::router::ROUTER;
use profile_traits::mem::ReportsChan;
use profile_traits::mem::{ProfilerChan, ProfilerMsg, ReportKind, Reporter, ReporterRequest};
use std::borrow::ToOwned;
use std::cmp::Ordering;
use std::collections::HashMap;
use std::thread;
use util::task::spawn_named;
use util::time::duration_from_seconds;
pub struct Profiler {
/// The port through which messages are received.
pub port: IpcReceiver<ProfilerMsg>,
/// Registered memory reporters.
reporters: HashMap<String, Reporter>,
}
const JEMALLOC_HEAP_ALLOCATED_STR: &'static str = "jemalloc-heap-allocated";
const SYSTEM_HEAP_ALLOCATED_STR: &'static str = "system-heap-allocated";
impl Profiler {
pub fn
|
(period: Option<f64>) -> ProfilerChan {
let (chan, port) = ipc::channel().unwrap();
// Create the timer thread if a period was provided.
if let Some(period) = period {
let chan = chan.clone();
spawn_named("Memory profiler timer".to_owned(), move || {
loop {
thread::sleep(duration_from_seconds(period));
if chan.send(ProfilerMsg::Print).is_err() {
break;
}
}
});
}
// Always spawn the memory profiler. If there is no timer thread it won't receive regular
// `Print` events, but it will still receive the other events.
spawn_named("Memory profiler".to_owned(), move || {
let mut mem_profiler = Profiler::new(port);
mem_profiler.start();
});
let mem_profiler_chan = ProfilerChan(chan);
// Register the system memory reporter, which will run on its own thread. It never needs to
// be unregistered, because as long as the memory profiler is running the system memory
// reporter can make measurements.
let (system_reporter_sender, system_reporter_receiver) = ipc::channel().unwrap();
ROUTER.add_route(system_reporter_receiver.to_opaque(), box |message| {
let request: ReporterRequest = message.to().unwrap();
system_reporter::collect_reports(request)
});
mem_profiler_chan.send(ProfilerMsg::RegisterReporter("system".to_owned(),
Reporter(system_reporter_sender)));
mem_profiler_chan
}
pub fn new(port: IpcReceiver<ProfilerMsg>) -> Profiler {
Profiler {
port: port,
reporters: HashMap::new(),
}
}
pub fn start(&mut self) {
while let Ok(msg) = self.port.recv() {
if !self.handle_msg(msg) {
break
}
}
}
fn handle_msg(&mut self, msg: ProfilerMsg) -> bool {
match msg {
ProfilerMsg::RegisterReporter(name, reporter) => {
// Panic if it has already been registered.
let name_clone = name.clone();
match self.reporters.insert(name, reporter) {
None => true,
Some(_) => panic!(format!("RegisterReporter: '{}' name is already in use",
name_clone)),
}
},
ProfilerMsg::UnregisterReporter(name) => {
// Panic if it hasn't previously been registered.
match self.reporters.remove(&name) {
Some(_) => true,
None =>
panic!(format!("UnregisterReporter: '{}' name is unknown", &name)),
}
},
ProfilerMsg::Print => {
self.handle_print_msg();
true
},
ProfilerMsg::Exit => false
}
}
fn handle_print_msg(&self) {
println!("Begin memory reports");
println!("|");
// Collect reports from memory reporters.
//
// This serializes the report-gathering. It might be worth creating a new scoped thread for
// each reporter once we have enough of them.
//
// If anything goes wrong with a reporter, we just skip it.
//
// We also track the total memory reported on the jemalloc heap and the system heap, and
// use that to compute the special "jemalloc-heap-unclassified" and
// "system-heap-unclassified" values.
let mut forest = ReportsForest::new();
let mut jemalloc_heap_reported_size = 0;
let mut system_heap_reported_size = 0;
let mut jemalloc_heap_allocated_size: Option<usize> = None;
let mut system_heap_allocated_size: Option<usize> = None;
for reporter in self.reporters.values() {
let (chan, port) = ipc::channel().unwrap();
reporter.collect_reports(ReportsChan(chan));
if let Ok(mut reports) = port.recv() {
for report in &mut reports {
// Add "explicit" to the start of the path, when appropriate.
match report.kind {
ReportKind::ExplicitJemallocHeapSize |
ReportKind::ExplicitSystemHeapSize |
ReportKind::ExplicitNonHeapSize |
ReportKind::ExplicitUnknownLocationSize =>
report.path.insert(0, String::from("explicit")),
ReportKind::NonExplicitSize => {},
}
// Update the reported fractions of the heaps, when appropriate.
match report.kind {
ReportKind::ExplicitJemallocHeapSize =>
jemalloc_heap_reported_size += report.size,
ReportKind::ExplicitSystemHeapSize =>
system_heap_reported_size += report.size,
_ => {},
}
// Record total size of the heaps, when we see them.
if report.path.len() == 1 {
if report.path[0] == JEMALLOC_HEAP_ALLOCATED_STR {
assert!(jemalloc_heap_allocated_size.is_none());
jemalloc_heap_allocated_size = Some(report.size);
} else if report.path[0] == SYSTEM_HEAP_ALLOCATED_STR {
assert!(system_heap_allocated_size.is_none());
system_heap_allocated_size = Some(report.size);
}
}
// Insert the report.
forest.insert(&report.path, report.size);
}
}
}
// Compute and insert the heap-unclassified values.
if let Some(jemalloc_heap_allocated_size) = jemalloc_heap_allocated_size {
forest.insert(&path!["explicit", "jemalloc-heap-unclassified"],
jemalloc_heap_allocated_size - jemalloc_heap_reported_size);
}
if let Some(system_heap_allocated_size) = system_heap_allocated_size {
forest.insert(&path!["explicit", "system-heap-unclassified"],
system_heap_allocated_size - system_heap_reported_size);
}
forest.print();
println!("|");
println!("End memory reports");
println!("");
}
}
/// A collection of one or more reports with the same initial path segment. A ReportsTree
/// containing a single node is described as "degenerate".
struct ReportsTree {
/// For leaf nodes, this is the sum of the sizes of all reports that mapped to this location.
/// For interior nodes, this is the sum of the sizes of all its child nodes.
size: usize,
/// For leaf nodes, this is the count of all reports that mapped to this location.
/// For interor nodes, this is always zero.
count: u32,
/// The segment from the report path that maps to this node.
path_seg: String,
/// Child nodes.
children: Vec<ReportsTree>,
}
impl ReportsTree {
fn new(path_seg: String) -> ReportsTree {
ReportsTree {
size: 0,
count: 0,
path_seg: path_seg,
children: vec![]
}
}
// Searches the tree's children for a path_seg match, and returns the index if there is a
// match.
fn find_child(&self, path_seg: &str) -> Option<usize> {
for (i, child) in self.children.iter().enumerate() {
if child.path_seg == *path_seg {
return Some(i);
}
}
None
}
// Insert the path and size into the tree, adding any nodes as necessary.
fn insert(&mut self, path: &[String], size: usize) {
let mut t: &mut ReportsTree = self;
for path_seg in path {
let i = match t.find_child(&path_seg) {
Some(i) => i,
None => {
let new_t = ReportsTree::new(path_seg.clone());
t.children.push(new_t);
t.children.len() - 1
},
};
let tmp = t; // this temporary is needed to satisfy the borrow checker
t = &mut tmp.children[i];
}
t.size += size;
t.count += 1;
}
// Fill in sizes for interior nodes and sort sub-trees accordingly. Should only be done once
// all the reports have been inserted.
fn compute_interior_node_sizes_and_sort(&mut self) -> usize {
if !self.children.is_empty() {
// Interior node. Derive its size from its children.
if self.size != 0 {
// This will occur if e.g. we have paths ["a", "b"] and ["a", "b", "c"].
panic!("one report's path is a sub-path of another report's path");
}
for child in &mut self.children {
self.size += child.compute_interior_node_sizes_and_sort();
}
// Now that child sizes have been computed, we can sort the children.
self.children.sort_by(|t1, t2| t2.size.cmp(&t1.size));
}
self.size
}
fn print(&self, depth: i32) {
if !self.children.is_empty() {
assert_eq!(self.count, 0);
}
let mut indent_str = String::new();
for _ in 0..depth {
indent_str.push_str(" ");
}
let mebi = 1024f64 * 1024f64;
let count_str = if self.count > 1 { format!(" [{}]", self.count) } else { "".to_owned() };
println!("|{}{:8.2} MiB -- {}{}",
indent_str, (self.size as f64) / mebi, self.path_seg, count_str);
for child in &self.children {
child.print(depth + 1);
}
}
}
/// A collection of ReportsTrees. It represents the data from multiple memory reports in a form
/// that's good to print.
struct ReportsForest {
trees: HashMap<String, ReportsTree>,
}
impl ReportsForest {
fn new() -> ReportsForest {
ReportsForest {
trees: HashMap::new(),
}
}
// Insert the path and size into the forest, adding any trees and nodes as necessary.
fn insert(&mut self, path: &[String], size: usize) {
let (head, tail) = path.split_first().unwrap();
// Get the right tree, creating it if necessary.
if !self.trees.contains_key(head) {
self.trees.insert(head.clone(), ReportsTree::new(head.clone()));
}
let t = self.trees.get_mut(head).unwrap();
// Use tail because the 0th path segment was used to find the right tree in the forest.
t.insert(tail, size);
}
fn print(&mut self) {
// Fill in sizes of interior nodes, and recursively sort the sub-trees.
for (_, tree) in &mut self.trees {
tree.compute_interior_node_sizes_and_sort();
}
// Put the trees into a sorted vector. Primary sort: degenerate trees (those containing a
// single node) come after non-degenerate trees. Secondary sort: alphabetical order of the
// root node's path_seg.
let mut v = vec![];
for (_, tree) in &self.trees {
v.push(tree);
}
v.sort_by(|a, b| {
if a.children.is_empty() && !b.children.is_empty() {
Ordering::Greater
} else if !a.children.is_empty() && b.children.is_empty() {
Ordering::Less
} else {
a.path_seg.cmp(&b.path_seg)
}
});
// Print the forest.
for tree in &v {
tree.print(0);
// Print a blank line after non-degenerate trees.
if !tree.children.is_empty() {
println!("|");
}
}
}
}
//---------------------------------------------------------------------------
mod system_reporter {
use libc::{c_char, c_int, c_void, size_t};
use profile_traits::mem::{Report, ReportKind, ReporterRequest};
use std::borrow::ToOwned;
use std::ffi::CString;
use std::mem::size_of;
use std::ptr::null_mut;
use super::{JEMALLOC_HEAP_ALLOCATED_STR, SYSTEM_HEAP_ALLOCATED_STR};
#[cfg(target_os = "macos")]
use task_info::task_basic_info::{virtual_size, resident_size};
/// Collects global measurements from the OS and heap allocators.
pub fn collect_reports(request: ReporterRequest) {
let mut reports = vec![];
{
let mut report = |path, size| {
if let Some(size) = size {
reports.push(Report {
path: path,
kind: ReportKind::NonExplicitSize,
size: size,
});
}
};
// Virtual and physical memory usage, as reported by the OS.
report(path!["vsize"], vsize());
report(path!["resident"], resident());
// Memory segments, as reported by the OS.
for seg in resident_segments() {
report(path!["resident-according-to-smaps", seg.0], Some(seg.1));
}
// Total number of bytes allocated by the application on the system
// heap.
report(path![SYSTEM_HEAP_ALLOCATED_STR], system_heap_allocated());
// The descriptions of the following jemalloc measurements are taken
// directly from the jemalloc documentation.
// "Total number of bytes allocated by the application."
report(path![JEMALLOC_HEAP_ALLOCATED_STR], jemalloc_stat("stats.allocated"));
// "Total number of bytes in active pages allocated by the application.
// This is a multiple of the page size, and greater than or equal to
// |stats.allocated|."
report(path!["jemalloc-heap-active"], jemalloc_stat("stats.active"));
// "Total number of bytes in chunks mapped on behalf of the application.
// This is a multiple of the chunk size, and is at least as large as
// |stats.active|. This does not include inactive chunks."
report(path!["jemalloc-heap-mapped"], jemalloc_stat("stats.mapped"));
}
request.reports_channel.send(reports);
}
#[cfg(target_os = "linux")]
extern {
fn mallinfo() -> struct_mallinfo;
}
#[cfg(target_os = "linux")]
#[repr(C)]
pub struct struct_mallinfo {
arena: c_int,
ordblks: c_int,
smblks: c_int,
hblks: c_int,
hblkhd: c_int,
usmblks: c_int,
fsmblks: c_int,
uordblks: c_int,
fordblks: c_int,
keepcost: c_int,
}
#[cfg(target_os = "linux")]
fn system_heap_allocated() -> Option<usize> {
let info: struct_mallinfo = unsafe { mallinfo() };
// The documentation in the glibc man page makes it sound like |uordblks| would suffice,
// but that only gets the small allocations that are put in the brk heap. We need |hblkhd|
// as well to get the larger allocations that are mmapped.
//
// These fields are unfortunately |int| and so can overflow (becoming negative) if memory
// usage gets high enough. So don't report anything in that case. In the non-overflow case
// we cast the two values to usize before adding them to make sure the sum also doesn't
// overflow.
if info.hblkhd < 0 || info.uordblks < 0 {
None
} else {
Some(info.hblkhd as usize + info.uordblks as usize)
}
}
#[cfg(not(target_os = "linux"))]
fn system_heap_allocated() -> Option<usize> {
None
}
extern {
fn je_mallctl(name: *const c_char, oldp: *mut c_void, oldlenp: *mut size_t,
newp: *mut c_void, newlen: size_t) -> c_int;
}
fn jemalloc_stat(value_name: &str) -> Option<usize> {
// Before we request the measurement of interest, we first send an "epoch"
// request. Without that jemalloc gives cached statistics(!) which can be
// highly inaccurate.
let epoch_name = "epoch";
let epoch_c_name = CString::new(epoch_name).unwrap();
let mut epoch: u64 = 0;
let epoch_ptr = &mut epoch as *mut _ as *mut c_void;
let mut epoch_len = size_of::<u64>() as size_t;
let value_c_name = CString::new(value_name).unwrap();
let mut value: size_t = 0;
let value_ptr = &mut value as *mut _ as *mut c_void;
let mut value_len = size_of::<size_t>() as size_t;
// Using the same values for the `old` and `new` parameters is enough
// to get the statistics updated.
let rv = unsafe {
je_mallctl(epoch_c_name.as_ptr(), epoch_ptr, &mut epoch_len, epoch_ptr,
epoch_len)
};
if rv != 0 {
return None;
}
let rv = unsafe {
je_mallctl(value_c_name.as_ptr(), value_ptr, &mut value_len, null_mut(), 0)
};
if rv != 0 {
return None;
}
Some(value as usize)
}
// Like std::macros::try!, but for Option<>.
macro_rules! option_try(
($e:expr) => (match $e { Some(e) => e, None => return None })
);
#[cfg(target_os = "linux")]
fn page_size() -> usize {
unsafe {
::libc::sysconf(::libc::_SC_PAGESIZE) as usize
}
}
#[cfg(target_os = "linux")]
fn proc_self_statm_field(field: usize) -> Option<usize> {
use std::fs::File;
use std::io::Read;
let mut f = option_try!(File::open("/proc/self/statm").ok());
let mut contents = String::new();
option_try!(f.read_to_string(&mut contents).ok());
let s = option_try!(contents.split_whitespace().nth(field));
let npages = option_try!(s.parse::<usize>().ok());
Some(npages * page_size())
}
#[cfg(target_os = "linux")]
fn vsize() -> Option<usize> {
proc_self_statm_field(0)
}
#[cfg(target_os = "linux")]
fn resident() -> Option<usize> {
proc_self_statm_field(1)
}
#[cfg(target_os = "macos")]
fn vsize() -> Option<usize> {
virtual_size()
}
#[cfg(target_os = "macos")]
fn resident() -> Option<usize> {
resident_size()
}
#[cfg(not(any(target_os = "linux", target_os = "macos")))]
fn vsize() -> Option<usize> {
None
}
#[cfg(not(any(target_os = "linux", target_os = "macos")))]
fn resident() -> Option<usize> {
None
}
#[cfg(target_os = "linux")]
fn resident_segments() -> Vec<(String, usize)> {
use regex::Regex;
use std::collections::HashMap;
use std::collections::hash_map::Entry;
use std::fs::File;
use std::io::{BufReader, BufRead};
// The first line of an entry in /proc/<pid>/smaps looks just like an entry
// in /proc/<pid>/maps:
//
// address perms offset dev inode pathname
// 02366000-025d8000 rw-p 00000000 00:00 0 [heap]
//
// Each of the following lines contains a key and a value, separated
// by ": ", where the key does not contain either of those characters.
// For example:
//
// Rss: 132 kB
let f = match File::open("/proc/self/smaps") {
Ok(f) => BufReader::new(f),
Err(_) => return vec![],
};
let seg_re = Regex::new(
r"^[:xdigit:]+-[:xdigit:]+ (....) [:xdigit:]+ [:xdigit:]+:[:xdigit:]+ \d+ +(.*)").unwrap();
let rss_re = Regex::new(r"^Rss: +(\d+) kB").unwrap();
// We record each segment's resident size.
let mut seg_map: HashMap<String, usize> = HashMap::new();
#[derive(PartialEq)]
enum LookingFor { Segment, Rss }
let mut looking_for = LookingFor::Segment;
let mut curr_seg_name = String::new();
// Parse the file.
for line in f.lines() {
let line = match line {
Ok(line) => line,
Err(_) => continue,
};
if looking_for == LookingFor::Segment {
// Look for a segment info line.
let cap = match seg_re.captures(&line) {
Some(cap) => cap,
None => continue,
};
let perms = cap.at(1).unwrap();
let pathname = cap.at(2).unwrap();
// Construct the segment name from its pathname and permissions.
curr_seg_name.clear();
if pathname == "" || pathname.starts_with("[stack:") {
// Anonymous memory. Entries marked with "[stack:nnn]"
// look like thread stacks but they may include other
// anonymous mappings, so we can't trust them and just
// treat them as entirely anonymous.
curr_seg_name.push_str("anonymous");
} else {
curr_seg_name.push_str(pathname);
}
curr_seg_name.push_str(" (");
curr_seg_name.push_str(perms);
curr_seg_name.push_str(")");
looking_for = LookingFor::Rss;
} else {
// Look for an "Rss:" line.
let cap = match rss_re.captures(&line) {
Some(cap) => cap,
None => continue,
};
let rss = cap.at(1).unwrap().parse::<usize>().unwrap() * 1024;
if rss > 0 {
// Aggregate small segments into "other".
let seg_name = if rss < 512 * 1024 {
"other".to_owned()
} else {
curr_seg_name.clone()
};
match seg_map.entry(seg_name) {
Entry::Vacant(entry) => { entry.insert(rss); },
Entry::Occupied(mut entry) => *entry.get_mut() += rss,
}
}
looking_for = LookingFor::Segment;
}
}
// Note that the sum of all these segments' RSS values differs from the "resident"
// measurement obtained via /proc/<pid>/statm in resident(). It's unclear why this
// difference occurs; for some processes the measurements match, but for Servo they do not.
let segs: Vec<(String, usize)> = seg_map.into_iter().collect();
segs
}
#[cfg(not(target_os = "linux"))]
fn resident_segments() -> Vec<(String, usize)> {
vec![]
}
}
|
create
|
identifier_name
|
StackedBarChart.stories.tsx
|
import React from "react"
import { StackedBarChart } from "./StackedBarChart.js"
import {
SampleColumnSlugs,
SynthesizeFruitTable,
SynthesizeGDPTable,
} from "../../coreTable/OwidTableSynthesizers.js"
export default {
title: "StackedBarChart",
component: StackedBarChart,
}
export const ColumnsAsSeries = (): JSX.Element => {
|
return (
<svg width={600} height={600}>
<StackedBarChart
manager={{ table, selection: table.sampleEntityName(1) }}
/>
</svg>
)
}
export const EntitiesAsSeries = (): JSX.Element => {
const table = SynthesizeGDPTable({ entityCount: 5 })
const manager = {
table,
selection: table.availableEntityNames,
yColumnSlugs: [SampleColumnSlugs.Population],
}
return (
<svg width={600} height={600}>
<StackedBarChart manager={manager} />
</svg>
)
}
export const EntitiesAsSeriesWithMissingRows = (): JSX.Element => {
const table = SynthesizeGDPTable({ entityCount: 5 }).dropRandomRows(30)
const manager = {
table,
selection: table.availableEntityNames,
yColumnSlugs: [SampleColumnSlugs.Population],
}
return (
<svg width={600} height={600}>
<StackedBarChart manager={manager} />
</svg>
)
}
|
const table = SynthesizeFruitTable()
|
random_line_split
|
google.js
|
// TODO: Add tests
import passport from 'passport';
import { OAuth2Strategy as GoogleStrategy } from 'passport-google-oauth';
import authConfig from '../credentials.json';
import init from '../init';
import { upsert } from '../../lib/util';
function
|
() {
// serialize user into the session
init();
passport.use(new GoogleStrategy(
authConfig.google,
(accessToken, refreshToken, profile, done) => {
const params = {
email: profile.emails[0].value,
external_auth_type: 'google',
};
const data = {
first_name: profile.name.givenName,
last_name: profile.name.familyName,
email: profile.emails.length && profile.emails[0].value,
photo_url: profile.photos.length && profile.photos[0].value,
external_auth_type: 'google',
external_auth_id: profile.id,
};
upsert('/users', params, data)
.then(resp => done(null, resp))
.catch(err => done(err));
},
));
}
passportInit();
export default passport;
|
passportInit
|
identifier_name
|
google.js
|
// TODO: Add tests
import passport from 'passport';
import { OAuth2Strategy as GoogleStrategy } from 'passport-google-oauth';
import authConfig from '../credentials.json';
import init from '../init';
import { upsert } from '../../lib/util';
function passportInit()
|
passportInit();
export default passport;
|
{
// serialize user into the session
init();
passport.use(new GoogleStrategy(
authConfig.google,
(accessToken, refreshToken, profile, done) => {
const params = {
email: profile.emails[0].value,
external_auth_type: 'google',
};
const data = {
first_name: profile.name.givenName,
last_name: profile.name.familyName,
email: profile.emails.length && profile.emails[0].value,
photo_url: profile.photos.length && profile.photos[0].value,
external_auth_type: 'google',
external_auth_id: profile.id,
};
upsert('/users', params, data)
.then(resp => done(null, resp))
.catch(err => done(err));
},
));
}
|
identifier_body
|
google.js
|
// TODO: Add tests
import passport from 'passport';
import { OAuth2Strategy as GoogleStrategy } from 'passport-google-oauth';
import authConfig from '../credentials.json';
import init from '../init';
import { upsert } from '../../lib/util';
function passportInit() {
// serialize user into the session
init();
passport.use(new GoogleStrategy(
|
const params = {
email: profile.emails[0].value,
external_auth_type: 'google',
};
const data = {
first_name: profile.name.givenName,
last_name: profile.name.familyName,
email: profile.emails.length && profile.emails[0].value,
photo_url: profile.photos.length && profile.photos[0].value,
external_auth_type: 'google',
external_auth_id: profile.id,
};
upsert('/users', params, data)
.then(resp => done(null, resp))
.catch(err => done(err));
},
));
}
passportInit();
export default passport;
|
authConfig.google,
(accessToken, refreshToken, profile, done) => {
|
random_line_split
|
bug-2470-bounds-check-overflow-2.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
|
fn main() {
let x = ~[1u,2u,3u];
// This should cause a bounds-check failure, but may not if we do our
// bounds checking by comparing a scaled index value to the vector's
// length (in bytes), because the scaling of the index will cause it to
// wrap around to a small number.
let idx = uint::max_value & !(uint::max_value >> 1u);
error!("ov2 idx = 0x%x", idx);
// This should fail.
error!("ov2 0x%x", x[idx]);
}
|
// except according to those terms.
// xfail-test
// error-pattern:index out of bounds
|
random_line_split
|
bug-2470-bounds-check-overflow-2.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-test
// error-pattern:index out of bounds
fn
|
() {
let x = ~[1u,2u,3u];
// This should cause a bounds-check failure, but may not if we do our
// bounds checking by comparing a scaled index value to the vector's
// length (in bytes), because the scaling of the index will cause it to
// wrap around to a small number.
let idx = uint::max_value & !(uint::max_value >> 1u);
error!("ov2 idx = 0x%x", idx);
// This should fail.
error!("ov2 0x%x", x[idx]);
}
|
main
|
identifier_name
|
bug-2470-bounds-check-overflow-2.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-test
// error-pattern:index out of bounds
fn main()
|
{
let x = ~[1u,2u,3u];
// This should cause a bounds-check failure, but may not if we do our
// bounds checking by comparing a scaled index value to the vector's
// length (in bytes), because the scaling of the index will cause it to
// wrap around to a small number.
let idx = uint::max_value & !(uint::max_value >> 1u);
error!("ov2 idx = 0x%x", idx);
// This should fail.
error!("ov2 0x%x", x[idx]);
}
|
identifier_body
|
|
world.rs
|
use crate::{
ai, animations, components, desc, flags::Flags, item, spatial::Spatial, spec::EntitySpawn,
stats, world_cache::WorldCache, Distribution, ExternalEntity, Location, Rng, WorldSkeleton,
};
use calx::seeded_rng;
use serde::{Deserialize, Serialize};
use std::collections::HashSet;
pub const GAME_VERSION: &str = "0.1.0";
calx_ecs::build_ecs! {
anim: animations::Anim,
brain: ai::Brain,
desc: desc::Desc,
health: stats::Health,
item: item::Item,
map_memory: components::MapMemory,
stacking: item::Stacking,
stats: stats::StatsComponent,
status: stats::Statuses,
}
#[derive(Serialize, Deserialize)]
pub struct WorldSeed {
pub rng_seed: u32,
pub world_skeleton: WorldSkeleton,
pub player_character: ExternalEntity,
}
/// Toplevel game state object.
#[derive(Serialize, Deserialize)]
pub struct World {
/// Game version. Not mutable in the slightest, but the simplest way to
/// get versioned save files is to just drop it here.
pub(crate) version: String,
/// Entity component system.
pub(crate) ecs: Ecs,
/// Static startup game world
pub(crate) world_cache: WorldCache,
/// Spawns from worldgen that have been generated in world.
generated_spawns: HashSet<(Location, EntitySpawn)>,
/// Spatial index for game entities.
pub(crate) spatial: Spatial,
/// Global gamestate flags.
pub(crate) flags: Flags,
/// Persistent random number generator.
pub(crate) rng: Rng,
}
impl World {
pub fn new(world_seed: &WorldSeed) -> World {
let mut ret = World {
version: GAME_VERSION.to_string(),
ecs: Default::default(),
world_cache: WorldCache::new(world_seed.rng_seed, world_seed.world_skeleton.clone()),
generated_spawns: Default::default(),
spatial: Default::default(),
flags: Default::default(),
rng: seeded_rng(&world_seed.rng_seed),
};
ret.spawn_player(
ret.world_cache.player_entrance(),
&world_seed.player_character,
);
ret.generate_world_spawns();
ret
}
pub(crate) fn
|
(&mut self) {
let mut spawns = self.world_cache.drain_spawns();
spawns.retain(|s| !self.generated_spawns.contains(s));
let seed = self.rng_seed();
for (loc, s) in &spawns {
// Create one-off RNG from just the spawn info, will always run the same for same info.
let mut rng = calx::seeded_rng(&(seed, loc, s));
// Construct loadout from the spawn info and generate it in world.
self.spawn(&s.sample(&mut rng), *loc);
self.generated_spawns.insert((*loc, s.clone()));
}
}
}
|
generate_world_spawns
|
identifier_name
|
world.rs
|
use crate::{
ai, animations, components, desc, flags::Flags, item, spatial::Spatial, spec::EntitySpawn,
stats, world_cache::WorldCache, Distribution, ExternalEntity, Location, Rng, WorldSkeleton,
};
use calx::seeded_rng;
use serde::{Deserialize, Serialize};
use std::collections::HashSet;
pub const GAME_VERSION: &str = "0.1.0";
calx_ecs::build_ecs! {
anim: animations::Anim,
brain: ai::Brain,
desc: desc::Desc,
health: stats::Health,
item: item::Item,
map_memory: components::MapMemory,
stacking: item::Stacking,
stats: stats::StatsComponent,
status: stats::Statuses,
}
#[derive(Serialize, Deserialize)]
pub struct WorldSeed {
pub rng_seed: u32,
pub world_skeleton: WorldSkeleton,
pub player_character: ExternalEntity,
}
/// Toplevel game state object.
#[derive(Serialize, Deserialize)]
pub struct World {
/// Game version. Not mutable in the slightest, but the simplest way to
/// get versioned save files is to just drop it here.
pub(crate) version: String,
/// Entity component system.
pub(crate) ecs: Ecs,
/// Static startup game world
pub(crate) world_cache: WorldCache,
/// Spawns from worldgen that have been generated in world.
generated_spawns: HashSet<(Location, EntitySpawn)>,
/// Spatial index for game entities.
pub(crate) spatial: Spatial,
/// Global gamestate flags.
pub(crate) flags: Flags,
/// Persistent random number generator.
|
impl World {
pub fn new(world_seed: &WorldSeed) -> World {
let mut ret = World {
version: GAME_VERSION.to_string(),
ecs: Default::default(),
world_cache: WorldCache::new(world_seed.rng_seed, world_seed.world_skeleton.clone()),
generated_spawns: Default::default(),
spatial: Default::default(),
flags: Default::default(),
rng: seeded_rng(&world_seed.rng_seed),
};
ret.spawn_player(
ret.world_cache.player_entrance(),
&world_seed.player_character,
);
ret.generate_world_spawns();
ret
}
pub(crate) fn generate_world_spawns(&mut self) {
let mut spawns = self.world_cache.drain_spawns();
spawns.retain(|s| !self.generated_spawns.contains(s));
let seed = self.rng_seed();
for (loc, s) in &spawns {
// Create one-off RNG from just the spawn info, will always run the same for same info.
let mut rng = calx::seeded_rng(&(seed, loc, s));
// Construct loadout from the spawn info and generate it in world.
self.spawn(&s.sample(&mut rng), *loc);
self.generated_spawns.insert((*loc, s.clone()));
}
}
}
|
pub(crate) rng: Rng,
}
|
random_line_split
|
cabi_x86_64.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// The classification code for the x86_64 ABI is taken from the clay language
// https://github.com/jckarter/clay/blob/master/compiler/src/externals.cpp
#![allow(non_uppercase_statics)]
use llvm;
use llvm::{Integer, Pointer, Float, Double};
use llvm::{Struct, Array, Attribute};
use llvm::{StructRetAttribute, ByValAttribute, ZExtAttribute};
use middle::trans::cabi::{ArgType, FnType};
use middle::trans::context::CrateContext;
use middle::trans::type_::Type;
use std::cmp;
#[deriving(Clone, PartialEq)]
enum RegClass {
NoClass,
Int,
SSEFs,
SSEFv,
SSEDs,
SSEDv,
SSEInt,
SSEUp,
X87,
X87Up,
ComplexX87,
Memory
}
trait TypeMethods {
fn is_reg_ty(&self) -> bool;
}
impl TypeMethods for Type {
fn is_reg_ty(&self) -> bool {
match self.kind() {
Integer | Pointer | Float | Double => true,
_ => false
}
}
}
impl RegClass {
fn is_sse(&self) -> bool {
match *self {
SSEFs | SSEFv | SSEDs | SSEDv => true,
_ => false
}
}
}
trait ClassList {
fn is_pass_byval(&self) -> bool;
fn is_ret_bysret(&self) -> bool;
}
impl<'a> ClassList for &'a [RegClass] {
fn is_pass_byval(&self) -> bool {
if self.len() == 0 { return false; }
let class = self[0];
class == Memory
|| class == X87
|| class == ComplexX87
}
fn is_ret_bysret(&self) -> bool {
if self.len() == 0 { return false; }
self[0] == Memory
}
}
fn classify_ty(ty: Type) -> Vec<RegClass> {
fn align(off: uint, ty: Type) -> uint {
let a = ty_align(ty);
return (off + a - 1u) / a * a;
}
fn ty_align(ty: Type) -> uint {
match ty.kind() {
Integer => {
unsafe {
((llvm::LLVMGetIntTypeWidth(ty.to_ref()) as uint) + 7) / 8
}
}
Pointer => 8,
Float => 4,
Double => 8,
Struct => {
if ty.is_packed() {
1
} else {
let str_tys = ty.field_types();
str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t)))
}
}
Array => {
let elt = ty.element_type();
ty_align(elt)
}
_ => fail!("ty_size: unhandled type")
}
}
fn ty_size(ty: Type) -> uint {
match ty.kind() {
Integer => {
unsafe {
((llvm::LLVMGetIntTypeWidth(ty.to_ref()) as uint) + 7) / 8
}
}
Pointer => 8,
Float => 4,
Double => 8,
Struct => {
let str_tys = ty.field_types();
if ty.is_packed() {
str_tys.iter().fold(0, |s, t| s + ty_size(*t))
} else {
let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t));
align(size, ty)
}
}
Array => {
let len = ty.array_length();
let elt = ty.element_type();
let eltsz = ty_size(elt);
len * eltsz
}
_ => fail!("ty_size: unhandled type")
}
}
fn all_mem(cls: &mut [RegClass]) {
for elt in cls.iter_mut() {
*elt = Memory;
}
}
fn unify(cls: &mut [RegClass],
i: uint,
newv: RegClass) {
if cls[i] == newv {
return;
} else if cls[i] == NoClass {
cls[i] = newv;
} else if newv == NoClass {
return;
} else if cls[i] == Memory || newv == Memory {
cls[i] = Memory;
} else if cls[i] == Int || newv == Int {
cls[i] = Int;
} else if cls[i] == X87 ||
cls[i] == X87Up ||
cls[i] == ComplexX87 ||
newv == X87 ||
newv == X87Up ||
newv == ComplexX87 {
cls[i] = Memory;
} else {
cls[i] = newv;
}
}
fn classify_struct(tys: &[Type],
cls: &mut [RegClass],
i: uint,
off: uint,
packed: bool) {
let mut field_off = off;
for ty in tys.iter() {
if !packed {
field_off = align(field_off, *ty);
}
classify(*ty, cls, i, field_off);
field_off += ty_size(*ty);
}
}
fn classify(ty: Type,
cls: &mut [RegClass], ix: uint,
off: uint) {
let t_align = ty_align(ty);
let t_size = ty_size(ty);
let misalign = off % t_align;
if misalign != 0u {
let mut i = off / 8u;
let e = (off + t_size + 7u) / 8u;
while i < e {
unify(cls, ix + i, Memory);
i += 1u;
}
return;
}
match ty.kind() {
Integer |
Pointer => {
unify(cls, ix + off / 8u, Int);
}
Float => {
if off % 8u == 4u {
unify(cls, ix + off / 8u, SSEFv);
} else {
unify(cls, ix + off / 8u, SSEFs);
}
}
Double => {
unify(cls, ix + off / 8u, SSEDs);
}
Struct => {
classify_struct(ty.field_types().as_slice(), cls, ix, off, ty.is_packed());
}
Array => {
let len = ty.array_length();
let elt = ty.element_type();
let eltsz = ty_size(elt);
let mut i = 0u;
while i < len {
classify(elt, cls, ix, off + i * eltsz);
i += 1u;
}
}
_ => fail!("classify: unhandled type")
}
}
fn fixup(ty: Type, cls: &mut [RegClass]) {
let mut i = 0u;
let ty_kind = ty.kind();
let e = cls.len();
if cls.len() > 2u && (ty_kind == Struct || ty_kind == Array) {
if cls[i].is_sse() {
i += 1u;
while i < e {
if cls[i] != SSEUp {
all_mem(cls);
return;
}
i += 1u;
}
} else {
all_mem(cls);
return
}
} else {
while i < e {
if cls[i] == Memory {
all_mem(cls);
return;
}
if cls[i] == X87Up {
// for darwin
// cls[i] = SSEDs;
all_mem(cls);
return;
}
if cls[i] == SSEUp {
cls[i] = SSEDv;
} else if cls[i].is_sse() {
i += 1;
while i != e && cls[i] == SSEUp { i += 1u; }
} else if cls[i] == X87 {
i += 1;
while i != e && cls[i] == X87Up { i += 1u; }
} else {
i += 1;
}
}
}
}
let words = (ty_size(ty) + 7) / 8;
let mut cls = Vec::from_elem(words, NoClass);
if words > 4 {
all_mem(cls.as_mut_slice());
return cls;
}
classify(ty, cls.as_mut_slice(), 0, 0);
fixup(ty, cls.as_mut_slice());
return cls;
}
fn llreg_ty(ccx: &CrateContext, cls: &[RegClass]) -> Type
|
pub fn compute_abi_info(ccx: &CrateContext,
atys: &[Type],
rty: Type,
ret_def: bool) -> FnType {
fn x86_64_ty(ccx: &CrateContext,
ty: Type,
is_mem_cls: |cls: &[RegClass]| -> bool,
ind_attr: Attribute)
-> ArgType {
if !ty.is_reg_ty() {
let cls = classify_ty(ty);
if is_mem_cls(cls.as_slice()) {
ArgType::indirect(ty, Some(ind_attr))
} else {
ArgType::direct(ty,
Some(llreg_ty(ccx, cls.as_slice())),
None,
None)
}
} else {
let attr = if ty == Type::i1(ccx) { Some(ZExtAttribute) } else { None };
ArgType::direct(ty, None, None, attr)
}
}
let mut arg_tys = Vec::new();
for t in atys.iter() {
let ty = x86_64_ty(ccx, *t, |cls| cls.is_pass_byval(), ByValAttribute);
arg_tys.push(ty);
}
let ret_ty = if ret_def {
x86_64_ty(ccx, rty, |cls| cls.is_ret_bysret(), StructRetAttribute)
} else {
ArgType::direct(Type::void(ccx), None, None, None)
};
return FnType {
arg_tys: arg_tys,
ret_ty: ret_ty,
};
}
|
{
fn llvec_len(cls: &[RegClass]) -> uint {
let mut len = 1u;
for c in cls.iter() {
if *c != SSEUp {
break;
}
len += 1u;
}
return len;
}
let mut tys = Vec::new();
let mut i = 0u;
let e = cls.len();
while i < e {
match cls[i] {
Int => {
tys.push(Type::i64(ccx));
}
SSEFv => {
let vec_len = llvec_len(cls.tailn(i + 1u));
let vec_ty = Type::vector(&Type::f32(ccx), (vec_len * 2u) as u64);
tys.push(vec_ty);
i += vec_len;
continue;
}
SSEFs => {
tys.push(Type::f32(ccx));
}
SSEDs => {
tys.push(Type::f64(ccx));
}
_ => fail!("llregtype: unhandled class")
}
i += 1u;
}
return Type::struct_(ccx, tys.as_slice(), false);
}
|
identifier_body
|
cabi_x86_64.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// The classification code for the x86_64 ABI is taken from the clay language
// https://github.com/jckarter/clay/blob/master/compiler/src/externals.cpp
#![allow(non_uppercase_statics)]
use llvm;
use llvm::{Integer, Pointer, Float, Double};
use llvm::{Struct, Array, Attribute};
use llvm::{StructRetAttribute, ByValAttribute, ZExtAttribute};
use middle::trans::cabi::{ArgType, FnType};
use middle::trans::context::CrateContext;
use middle::trans::type_::Type;
use std::cmp;
#[deriving(Clone, PartialEq)]
enum RegClass {
NoClass,
Int,
SSEFs,
SSEFv,
SSEDs,
SSEDv,
SSEInt,
SSEUp,
X87,
X87Up,
ComplexX87,
Memory
}
trait TypeMethods {
fn is_reg_ty(&self) -> bool;
}
impl TypeMethods for Type {
fn is_reg_ty(&self) -> bool {
match self.kind() {
Integer | Pointer | Float | Double => true,
_ => false
}
}
}
impl RegClass {
fn is_sse(&self) -> bool {
match *self {
SSEFs | SSEFv | SSEDs | SSEDv => true,
_ => false
}
}
}
trait ClassList {
fn is_pass_byval(&self) -> bool;
fn is_ret_bysret(&self) -> bool;
}
impl<'a> ClassList for &'a [RegClass] {
fn is_pass_byval(&self) -> bool {
if self.len() == 0 { return false; }
let class = self[0];
class == Memory
|| class == X87
|| class == ComplexX87
}
fn is_ret_bysret(&self) -> bool {
if self.len() == 0 { return false; }
self[0] == Memory
|
}
}
fn classify_ty(ty: Type) -> Vec<RegClass> {
fn align(off: uint, ty: Type) -> uint {
let a = ty_align(ty);
return (off + a - 1u) / a * a;
}
fn ty_align(ty: Type) -> uint {
match ty.kind() {
Integer => {
unsafe {
((llvm::LLVMGetIntTypeWidth(ty.to_ref()) as uint) + 7) / 8
}
}
Pointer => 8,
Float => 4,
Double => 8,
Struct => {
if ty.is_packed() {
1
} else {
let str_tys = ty.field_types();
str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t)))
}
}
Array => {
let elt = ty.element_type();
ty_align(elt)
}
_ => fail!("ty_size: unhandled type")
}
}
fn ty_size(ty: Type) -> uint {
match ty.kind() {
Integer => {
unsafe {
((llvm::LLVMGetIntTypeWidth(ty.to_ref()) as uint) + 7) / 8
}
}
Pointer => 8,
Float => 4,
Double => 8,
Struct => {
let str_tys = ty.field_types();
if ty.is_packed() {
str_tys.iter().fold(0, |s, t| s + ty_size(*t))
} else {
let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t));
align(size, ty)
}
}
Array => {
let len = ty.array_length();
let elt = ty.element_type();
let eltsz = ty_size(elt);
len * eltsz
}
_ => fail!("ty_size: unhandled type")
}
}
fn all_mem(cls: &mut [RegClass]) {
for elt in cls.iter_mut() {
*elt = Memory;
}
}
fn unify(cls: &mut [RegClass],
i: uint,
newv: RegClass) {
if cls[i] == newv {
return;
} else if cls[i] == NoClass {
cls[i] = newv;
} else if newv == NoClass {
return;
} else if cls[i] == Memory || newv == Memory {
cls[i] = Memory;
} else if cls[i] == Int || newv == Int {
cls[i] = Int;
} else if cls[i] == X87 ||
cls[i] == X87Up ||
cls[i] == ComplexX87 ||
newv == X87 ||
newv == X87Up ||
newv == ComplexX87 {
cls[i] = Memory;
} else {
cls[i] = newv;
}
}
fn classify_struct(tys: &[Type],
cls: &mut [RegClass],
i: uint,
off: uint,
packed: bool) {
let mut field_off = off;
for ty in tys.iter() {
if !packed {
field_off = align(field_off, *ty);
}
classify(*ty, cls, i, field_off);
field_off += ty_size(*ty);
}
}
fn classify(ty: Type,
cls: &mut [RegClass], ix: uint,
off: uint) {
let t_align = ty_align(ty);
let t_size = ty_size(ty);
let misalign = off % t_align;
if misalign != 0u {
let mut i = off / 8u;
let e = (off + t_size + 7u) / 8u;
while i < e {
unify(cls, ix + i, Memory);
i += 1u;
}
return;
}
match ty.kind() {
Integer |
Pointer => {
unify(cls, ix + off / 8u, Int);
}
Float => {
if off % 8u == 4u {
unify(cls, ix + off / 8u, SSEFv);
} else {
unify(cls, ix + off / 8u, SSEFs);
}
}
Double => {
unify(cls, ix + off / 8u, SSEDs);
}
Struct => {
classify_struct(ty.field_types().as_slice(), cls, ix, off, ty.is_packed());
}
Array => {
let len = ty.array_length();
let elt = ty.element_type();
let eltsz = ty_size(elt);
let mut i = 0u;
while i < len {
classify(elt, cls, ix, off + i * eltsz);
i += 1u;
}
}
_ => fail!("classify: unhandled type")
}
}
fn fixup(ty: Type, cls: &mut [RegClass]) {
let mut i = 0u;
let ty_kind = ty.kind();
let e = cls.len();
if cls.len() > 2u && (ty_kind == Struct || ty_kind == Array) {
if cls[i].is_sse() {
i += 1u;
while i < e {
if cls[i] != SSEUp {
all_mem(cls);
return;
}
i += 1u;
}
} else {
all_mem(cls);
return
}
} else {
while i < e {
if cls[i] == Memory {
all_mem(cls);
return;
}
if cls[i] == X87Up {
// for darwin
// cls[i] = SSEDs;
all_mem(cls);
return;
}
if cls[i] == SSEUp {
cls[i] = SSEDv;
} else if cls[i].is_sse() {
i += 1;
while i != e && cls[i] == SSEUp { i += 1u; }
} else if cls[i] == X87 {
i += 1;
while i != e && cls[i] == X87Up { i += 1u; }
} else {
i += 1;
}
}
}
}
let words = (ty_size(ty) + 7) / 8;
let mut cls = Vec::from_elem(words, NoClass);
if words > 4 {
all_mem(cls.as_mut_slice());
return cls;
}
classify(ty, cls.as_mut_slice(), 0, 0);
fixup(ty, cls.as_mut_slice());
return cls;
}
fn llreg_ty(ccx: &CrateContext, cls: &[RegClass]) -> Type {
fn llvec_len(cls: &[RegClass]) -> uint {
let mut len = 1u;
for c in cls.iter() {
if *c != SSEUp {
break;
}
len += 1u;
}
return len;
}
let mut tys = Vec::new();
let mut i = 0u;
let e = cls.len();
while i < e {
match cls[i] {
Int => {
tys.push(Type::i64(ccx));
}
SSEFv => {
let vec_len = llvec_len(cls.tailn(i + 1u));
let vec_ty = Type::vector(&Type::f32(ccx), (vec_len * 2u) as u64);
tys.push(vec_ty);
i += vec_len;
continue;
}
SSEFs => {
tys.push(Type::f32(ccx));
}
SSEDs => {
tys.push(Type::f64(ccx));
}
_ => fail!("llregtype: unhandled class")
}
i += 1u;
}
return Type::struct_(ccx, tys.as_slice(), false);
}
pub fn compute_abi_info(ccx: &CrateContext,
atys: &[Type],
rty: Type,
ret_def: bool) -> FnType {
fn x86_64_ty(ccx: &CrateContext,
ty: Type,
is_mem_cls: |cls: &[RegClass]| -> bool,
ind_attr: Attribute)
-> ArgType {
if !ty.is_reg_ty() {
let cls = classify_ty(ty);
if is_mem_cls(cls.as_slice()) {
ArgType::indirect(ty, Some(ind_attr))
} else {
ArgType::direct(ty,
Some(llreg_ty(ccx, cls.as_slice())),
None,
None)
}
} else {
let attr = if ty == Type::i1(ccx) { Some(ZExtAttribute) } else { None };
ArgType::direct(ty, None, None, attr)
}
}
let mut arg_tys = Vec::new();
for t in atys.iter() {
let ty = x86_64_ty(ccx, *t, |cls| cls.is_pass_byval(), ByValAttribute);
arg_tys.push(ty);
}
let ret_ty = if ret_def {
x86_64_ty(ccx, rty, |cls| cls.is_ret_bysret(), StructRetAttribute)
} else {
ArgType::direct(Type::void(ccx), None, None, None)
};
return FnType {
arg_tys: arg_tys,
ret_ty: ret_ty,
};
}
|
random_line_split
|
|
cabi_x86_64.rs
|
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// The classification code for the x86_64 ABI is taken from the clay language
// https://github.com/jckarter/clay/blob/master/compiler/src/externals.cpp
#![allow(non_uppercase_statics)]
use llvm;
use llvm::{Integer, Pointer, Float, Double};
use llvm::{Struct, Array, Attribute};
use llvm::{StructRetAttribute, ByValAttribute, ZExtAttribute};
use middle::trans::cabi::{ArgType, FnType};
use middle::trans::context::CrateContext;
use middle::trans::type_::Type;
use std::cmp;
#[deriving(Clone, PartialEq)]
enum RegClass {
NoClass,
Int,
SSEFs,
SSEFv,
SSEDs,
SSEDv,
SSEInt,
SSEUp,
X87,
X87Up,
ComplexX87,
Memory
}
trait TypeMethods {
fn is_reg_ty(&self) -> bool;
}
impl TypeMethods for Type {
fn is_reg_ty(&self) -> bool {
match self.kind() {
Integer | Pointer | Float | Double => true,
_ => false
}
}
}
impl RegClass {
fn is_sse(&self) -> bool {
match *self {
SSEFs | SSEFv | SSEDs | SSEDv => true,
_ => false
}
}
}
trait ClassList {
fn is_pass_byval(&self) -> bool;
fn is_ret_bysret(&self) -> bool;
}
impl<'a> ClassList for &'a [RegClass] {
fn is_pass_byval(&self) -> bool {
if self.len() == 0 { return false; }
let class = self[0];
class == Memory
|| class == X87
|| class == ComplexX87
}
fn is_ret_bysret(&self) -> bool {
if self.len() == 0 { return false; }
self[0] == Memory
}
}
fn
|
(ty: Type) -> Vec<RegClass> {
fn align(off: uint, ty: Type) -> uint {
let a = ty_align(ty);
return (off + a - 1u) / a * a;
}
fn ty_align(ty: Type) -> uint {
match ty.kind() {
Integer => {
unsafe {
((llvm::LLVMGetIntTypeWidth(ty.to_ref()) as uint) + 7) / 8
}
}
Pointer => 8,
Float => 4,
Double => 8,
Struct => {
if ty.is_packed() {
1
} else {
let str_tys = ty.field_types();
str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t)))
}
}
Array => {
let elt = ty.element_type();
ty_align(elt)
}
_ => fail!("ty_size: unhandled type")
}
}
fn ty_size(ty: Type) -> uint {
match ty.kind() {
Integer => {
unsafe {
((llvm::LLVMGetIntTypeWidth(ty.to_ref()) as uint) + 7) / 8
}
}
Pointer => 8,
Float => 4,
Double => 8,
Struct => {
let str_tys = ty.field_types();
if ty.is_packed() {
str_tys.iter().fold(0, |s, t| s + ty_size(*t))
} else {
let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t));
align(size, ty)
}
}
Array => {
let len = ty.array_length();
let elt = ty.element_type();
let eltsz = ty_size(elt);
len * eltsz
}
_ => fail!("ty_size: unhandled type")
}
}
fn all_mem(cls: &mut [RegClass]) {
for elt in cls.iter_mut() {
*elt = Memory;
}
}
fn unify(cls: &mut [RegClass],
i: uint,
newv: RegClass) {
if cls[i] == newv {
return;
} else if cls[i] == NoClass {
cls[i] = newv;
} else if newv == NoClass {
return;
} else if cls[i] == Memory || newv == Memory {
cls[i] = Memory;
} else if cls[i] == Int || newv == Int {
cls[i] = Int;
} else if cls[i] == X87 ||
cls[i] == X87Up ||
cls[i] == ComplexX87 ||
newv == X87 ||
newv == X87Up ||
newv == ComplexX87 {
cls[i] = Memory;
} else {
cls[i] = newv;
}
}
fn classify_struct(tys: &[Type],
cls: &mut [RegClass],
i: uint,
off: uint,
packed: bool) {
let mut field_off = off;
for ty in tys.iter() {
if !packed {
field_off = align(field_off, *ty);
}
classify(*ty, cls, i, field_off);
field_off += ty_size(*ty);
}
}
fn classify(ty: Type,
cls: &mut [RegClass], ix: uint,
off: uint) {
let t_align = ty_align(ty);
let t_size = ty_size(ty);
let misalign = off % t_align;
if misalign != 0u {
let mut i = off / 8u;
let e = (off + t_size + 7u) / 8u;
while i < e {
unify(cls, ix + i, Memory);
i += 1u;
}
return;
}
match ty.kind() {
Integer |
Pointer => {
unify(cls, ix + off / 8u, Int);
}
Float => {
if off % 8u == 4u {
unify(cls, ix + off / 8u, SSEFv);
} else {
unify(cls, ix + off / 8u, SSEFs);
}
}
Double => {
unify(cls, ix + off / 8u, SSEDs);
}
Struct => {
classify_struct(ty.field_types().as_slice(), cls, ix, off, ty.is_packed());
}
Array => {
let len = ty.array_length();
let elt = ty.element_type();
let eltsz = ty_size(elt);
let mut i = 0u;
while i < len {
classify(elt, cls, ix, off + i * eltsz);
i += 1u;
}
}
_ => fail!("classify: unhandled type")
}
}
fn fixup(ty: Type, cls: &mut [RegClass]) {
let mut i = 0u;
let ty_kind = ty.kind();
let e = cls.len();
if cls.len() > 2u && (ty_kind == Struct || ty_kind == Array) {
if cls[i].is_sse() {
i += 1u;
while i < e {
if cls[i] != SSEUp {
all_mem(cls);
return;
}
i += 1u;
}
} else {
all_mem(cls);
return
}
} else {
while i < e {
if cls[i] == Memory {
all_mem(cls);
return;
}
if cls[i] == X87Up {
// for darwin
// cls[i] = SSEDs;
all_mem(cls);
return;
}
if cls[i] == SSEUp {
cls[i] = SSEDv;
} else if cls[i].is_sse() {
i += 1;
while i != e && cls[i] == SSEUp { i += 1u; }
} else if cls[i] == X87 {
i += 1;
while i != e && cls[i] == X87Up { i += 1u; }
} else {
i += 1;
}
}
}
}
let words = (ty_size(ty) + 7) / 8;
let mut cls = Vec::from_elem(words, NoClass);
if words > 4 {
all_mem(cls.as_mut_slice());
return cls;
}
classify(ty, cls.as_mut_slice(), 0, 0);
fixup(ty, cls.as_mut_slice());
return cls;
}
fn llreg_ty(ccx: &CrateContext, cls: &[RegClass]) -> Type {
fn llvec_len(cls: &[RegClass]) -> uint {
let mut len = 1u;
for c in cls.iter() {
if *c != SSEUp {
break;
}
len += 1u;
}
return len;
}
let mut tys = Vec::new();
let mut i = 0u;
let e = cls.len();
while i < e {
match cls[i] {
Int => {
tys.push(Type::i64(ccx));
}
SSEFv => {
let vec_len = llvec_len(cls.tailn(i + 1u));
let vec_ty = Type::vector(&Type::f32(ccx), (vec_len * 2u) as u64);
tys.push(vec_ty);
i += vec_len;
continue;
}
SSEFs => {
tys.push(Type::f32(ccx));
}
SSEDs => {
tys.push(Type::f64(ccx));
}
_ => fail!("llregtype: unhandled class")
}
i += 1u;
}
return Type::struct_(ccx, tys.as_slice(), false);
}
pub fn compute_abi_info(ccx: &CrateContext,
atys: &[Type],
rty: Type,
ret_def: bool) -> FnType {
fn x86_64_ty(ccx: &CrateContext,
ty: Type,
is_mem_cls: |cls: &[RegClass]| -> bool,
ind_attr: Attribute)
-> ArgType {
if !ty.is_reg_ty() {
let cls = classify_ty(ty);
if is_mem_cls(cls.as_slice()) {
ArgType::indirect(ty, Some(ind_attr))
} else {
ArgType::direct(ty,
Some(llreg_ty(ccx, cls.as_slice())),
None,
None)
}
} else {
let attr = if ty == Type::i1(ccx) { Some(ZExtAttribute) } else { None };
ArgType::direct(ty, None, None, attr)
}
}
let mut arg_tys = Vec::new();
for t in atys.iter() {
let ty = x86_64_ty(ccx, *t, |cls| cls.is_pass_byval(), ByValAttribute);
arg_tys.push(ty);
}
let ret_ty = if ret_def {
x86_64_ty(ccx, rty, |cls| cls.is_ret_bysret(), StructRetAttribute)
} else {
ArgType::direct(Type::void(ccx), None, None, None)
};
return FnType {
arg_tys: arg_tys,
ret_ty: ret_ty,
};
}
|
classify_ty
|
identifier_name
|
ListVariable.py
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test the ListVariable canned Variable type.
"""
import os
import TestSCons
test = TestSCons.TestSCons()
SConstruct_path = test.workpath('SConstruct')
def check(expect):
|
test.write(SConstruct_path, """\
from SCons.Variables.ListVariable import ListVariable
LV = ListVariable
from SCons.Variables import ListVariable
list_of_libs = Split('x11 gl qt ical')
optsfile = 'scons.variables'
opts = Variables(optsfile, args=ARGUMENTS)
opts.AddVariables(
ListVariable('shared',
'libraries to build as shared libraries',
'all',
names = list_of_libs,
map = {'GL':'gl', 'QT':'qt'}),
LV('listvariable', 'listvariable help', 'all', names=['l1', 'l2', 'l3'])
)
env = Environment(variables=opts)
opts.Save(optsfile, env)
Help(opts.GenerateHelpText(env))
print(env['shared'])
if 'ical' in env['shared']:
print('1')
else:
print('0')
print(" ".join(env['shared']))
print(env.subst('$shared'))
# Test subst_path() because it's used in $CPPDEFINES expansions.
print(env.subst_path('$shared'))
Default(env.Alias('dummy', None))
""")
test.run()
check(['all', '1', 'gl ical qt x11', 'gl ical qt x11',
"['gl ical qt x11']"])
expect = "shared = 'all'"+os.linesep+"listvariable = 'all'"+os.linesep
test.must_match(test.workpath('scons.variables'), expect)
check(['all', '1', 'gl ical qt x11', 'gl ical qt x11',
"['gl ical qt x11']"])
test.run(arguments='shared=none')
check(['none', '0', '', '', "['']"])
test.run(arguments='shared=')
check(['none', '0', '', '', "['']"])
test.run(arguments='shared=x11,ical')
check(['ical,x11', '1', 'ical x11', 'ical x11',
"['ical x11']"])
test.run(arguments='shared=x11,,ical,,')
check(['ical,x11', '1', 'ical x11', 'ical x11',
"['ical x11']"])
test.run(arguments='shared=GL')
check(['gl', '0', 'gl', 'gl'])
test.run(arguments='shared=QT,GL')
check(['gl,qt', '0', 'gl qt', 'gl qt', "['gl qt']"])
expect_stderr = """
scons: *** Error converting option: shared
Invalid value(s) for option: foo
""" + test.python_file_line(SConstruct_path, 19)
test.run(arguments='shared=foo', stderr=expect_stderr, status=2)
# be paranoid in testing some more combinations
expect_stderr = """
scons: *** Error converting option: shared
Invalid value(s) for option: foo
""" + test.python_file_line(SConstruct_path, 19)
test.run(arguments='shared=foo,ical', stderr=expect_stderr, status=2)
expect_stderr = """
scons: *** Error converting option: shared
Invalid value(s) for option: foo
""" + test.python_file_line(SConstruct_path, 19)
test.run(arguments='shared=ical,foo', stderr=expect_stderr, status=2)
expect_stderr = """
scons: *** Error converting option: shared
Invalid value(s) for option: foo
""" + test.python_file_line(SConstruct_path, 19)
test.run(arguments='shared=ical,foo,x11', stderr=expect_stderr, status=2)
expect_stderr = """
scons: *** Error converting option: shared
Invalid value(s) for option: foo,bar
""" + test.python_file_line(SConstruct_path, 19)
test.run(arguments='shared=foo,x11,,,bar', stderr=expect_stderr, status=2)
test.write('SConstruct', """
from SCons.Variables import ListVariable
opts = Variables(args=ARGUMENTS)
opts.AddVariables(
ListVariable('gpib',
'comment',
['ENET', 'GPIB'],
names = ['ENET', 'GPIB', 'LINUX_GPIB', 'NO_GPIB']),
)
env = Environment(variables=opts)
Help(opts.GenerateHelpText(env))
print(env['gpib'])
Default(env.Alias('dummy', None))
""")
test.run(stdout=test.wrap_stdout(read_str="ENET,GPIB\n", build_str="""\
scons: Nothing to be done for `dummy'.
"""))
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
result = test.stdout().split('\n')
r = result[1:len(expect)+1]
assert r == expect, (r, expect)
|
identifier_body
|
ListVariable.py
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test the ListVariable canned Variable type.
"""
import os
import TestSCons
test = TestSCons.TestSCons()
SConstruct_path = test.workpath('SConstruct')
def
|
(expect):
result = test.stdout().split('\n')
r = result[1:len(expect)+1]
assert r == expect, (r, expect)
test.write(SConstruct_path, """\
from SCons.Variables.ListVariable import ListVariable
LV = ListVariable
from SCons.Variables import ListVariable
list_of_libs = Split('x11 gl qt ical')
optsfile = 'scons.variables'
opts = Variables(optsfile, args=ARGUMENTS)
opts.AddVariables(
ListVariable('shared',
'libraries to build as shared libraries',
'all',
names = list_of_libs,
map = {'GL':'gl', 'QT':'qt'}),
LV('listvariable', 'listvariable help', 'all', names=['l1', 'l2', 'l3'])
)
env = Environment(variables=opts)
opts.Save(optsfile, env)
Help(opts.GenerateHelpText(env))
print(env['shared'])
if 'ical' in env['shared']:
print('1')
else:
print('0')
print(" ".join(env['shared']))
print(env.subst('$shared'))
# Test subst_path() because it's used in $CPPDEFINES expansions.
print(env.subst_path('$shared'))
Default(env.Alias('dummy', None))
""")
test.run()
check(['all', '1', 'gl ical qt x11', 'gl ical qt x11',
"['gl ical qt x11']"])
expect = "shared = 'all'"+os.linesep+"listvariable = 'all'"+os.linesep
test.must_match(test.workpath('scons.variables'), expect)
check(['all', '1', 'gl ical qt x11', 'gl ical qt x11',
"['gl ical qt x11']"])
test.run(arguments='shared=none')
check(['none', '0', '', '', "['']"])
test.run(arguments='shared=')
check(['none', '0', '', '', "['']"])
test.run(arguments='shared=x11,ical')
check(['ical,x11', '1', 'ical x11', 'ical x11',
"['ical x11']"])
test.run(arguments='shared=x11,,ical,,')
check(['ical,x11', '1', 'ical x11', 'ical x11',
"['ical x11']"])
test.run(arguments='shared=GL')
check(['gl', '0', 'gl', 'gl'])
test.run(arguments='shared=QT,GL')
check(['gl,qt', '0', 'gl qt', 'gl qt', "['gl qt']"])
expect_stderr = """
scons: *** Error converting option: shared
Invalid value(s) for option: foo
""" + test.python_file_line(SConstruct_path, 19)
test.run(arguments='shared=foo', stderr=expect_stderr, status=2)
# be paranoid in testing some more combinations
expect_stderr = """
scons: *** Error converting option: shared
Invalid value(s) for option: foo
""" + test.python_file_line(SConstruct_path, 19)
test.run(arguments='shared=foo,ical', stderr=expect_stderr, status=2)
expect_stderr = """
scons: *** Error converting option: shared
Invalid value(s) for option: foo
""" + test.python_file_line(SConstruct_path, 19)
test.run(arguments='shared=ical,foo', stderr=expect_stderr, status=2)
expect_stderr = """
scons: *** Error converting option: shared
Invalid value(s) for option: foo
""" + test.python_file_line(SConstruct_path, 19)
test.run(arguments='shared=ical,foo,x11', stderr=expect_stderr, status=2)
expect_stderr = """
scons: *** Error converting option: shared
Invalid value(s) for option: foo,bar
""" + test.python_file_line(SConstruct_path, 19)
test.run(arguments='shared=foo,x11,,,bar', stderr=expect_stderr, status=2)
test.write('SConstruct', """
from SCons.Variables import ListVariable
opts = Variables(args=ARGUMENTS)
opts.AddVariables(
ListVariable('gpib',
'comment',
['ENET', 'GPIB'],
names = ['ENET', 'GPIB', 'LINUX_GPIB', 'NO_GPIB']),
)
env = Environment(variables=opts)
Help(opts.GenerateHelpText(env))
print(env['gpib'])
Default(env.Alias('dummy', None))
""")
test.run(stdout=test.wrap_stdout(read_str="ENET,GPIB\n", build_str="""\
scons: Nothing to be done for `dummy'.
"""))
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
check
|
identifier_name
|
ListVariable.py
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test the ListVariable canned Variable type.
"""
import os
import TestSCons
test = TestSCons.TestSCons()
SConstruct_path = test.workpath('SConstruct')
def check(expect):
result = test.stdout().split('\n')
r = result[1:len(expect)+1]
assert r == expect, (r, expect)
test.write(SConstruct_path, """\
from SCons.Variables.ListVariable import ListVariable
LV = ListVariable
from SCons.Variables import ListVariable
list_of_libs = Split('x11 gl qt ical')
optsfile = 'scons.variables'
opts = Variables(optsfile, args=ARGUMENTS)
opts.AddVariables(
ListVariable('shared',
'libraries to build as shared libraries',
'all',
names = list_of_libs,
map = {'GL':'gl', 'QT':'qt'}),
LV('listvariable', 'listvariable help', 'all', names=['l1', 'l2', 'l3'])
)
env = Environment(variables=opts)
opts.Save(optsfile, env)
Help(opts.GenerateHelpText(env))
print(env['shared'])
if 'ical' in env['shared']:
print('1')
else:
print('0')
print(" ".join(env['shared']))
print(env.subst('$shared'))
# Test subst_path() because it's used in $CPPDEFINES expansions.
print(env.subst_path('$shared'))
Default(env.Alias('dummy', None))
""")
test.run()
check(['all', '1', 'gl ical qt x11', 'gl ical qt x11',
"['gl ical qt x11']"])
expect = "shared = 'all'"+os.linesep+"listvariable = 'all'"+os.linesep
test.must_match(test.workpath('scons.variables'), expect)
check(['all', '1', 'gl ical qt x11', 'gl ical qt x11',
"['gl ical qt x11']"])
test.run(arguments='shared=none')
check(['none', '0', '', '', "['']"])
test.run(arguments='shared=')
check(['none', '0', '', '', "['']"])
test.run(arguments='shared=x11,ical')
check(['ical,x11', '1', 'ical x11', 'ical x11',
"['ical x11']"])
test.run(arguments='shared=x11,,ical,,')
check(['ical,x11', '1', 'ical x11', 'ical x11',
"['ical x11']"])
test.run(arguments='shared=GL')
check(['gl', '0', 'gl', 'gl'])
test.run(arguments='shared=QT,GL')
check(['gl,qt', '0', 'gl qt', 'gl qt', "['gl qt']"])
expect_stderr = """
scons: *** Error converting option: shared
Invalid value(s) for option: foo
""" + test.python_file_line(SConstruct_path, 19)
test.run(arguments='shared=foo', stderr=expect_stderr, status=2)
# be paranoid in testing some more combinations
expect_stderr = """
scons: *** Error converting option: shared
Invalid value(s) for option: foo
""" + test.python_file_line(SConstruct_path, 19)
test.run(arguments='shared=foo,ical', stderr=expect_stderr, status=2)
expect_stderr = """
scons: *** Error converting option: shared
Invalid value(s) for option: foo
""" + test.python_file_line(SConstruct_path, 19)
test.run(arguments='shared=ical,foo', stderr=expect_stderr, status=2)
expect_stderr = """
scons: *** Error converting option: shared
Invalid value(s) for option: foo
""" + test.python_file_line(SConstruct_path, 19)
test.run(arguments='shared=ical,foo,x11', stderr=expect_stderr, status=2)
expect_stderr = """
scons: *** Error converting option: shared
Invalid value(s) for option: foo,bar
""" + test.python_file_line(SConstruct_path, 19)
test.run(arguments='shared=foo,x11,,,bar', stderr=expect_stderr, status=2)
test.write('SConstruct', """
from SCons.Variables import ListVariable
opts = Variables(args=ARGUMENTS)
opts.AddVariables(
ListVariable('gpib',
'comment',
['ENET', 'GPIB'],
names = ['ENET', 'GPIB', 'LINUX_GPIB', 'NO_GPIB']),
)
env = Environment(variables=opts)
Help(opts.GenerateHelpText(env))
print(env['gpib'])
Default(env.Alias('dummy', None))
""")
test.run(stdout=test.wrap_stdout(read_str="ENET,GPIB\n", build_str="""\
scons: Nothing to be done for `dummy'.
"""))
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
|
# vim: set expandtab tabstop=4 shiftwidth=4:
|
# End:
|
random_line_split
|
SimplePopover.js
|
import React, { Component } from 'react';
import OnClickOutside from 'react-onclickoutside';
import './SimplePopover.scss';
|
position: 'absolute',
top: `${position.bottom + TOOLTIP_MARGIN}px`,
left: `${(position.left + (position.width / 2)) - (popoverWidth / 2)}px`,
});
const getPopoverOnBottomLeftStyle = (position, popoverWidth) => ({
position: 'absolute',
top: `${position.bottom + TOOLTIP_MARGIN}px`,
left: `${(position.left - popoverWidth) + position.width}px`,
});
const getPopoverOnRightStyle = (position, popoverWidth, popoverHeight) => ({
position: 'absolute',
top: `${position.bottom - (popoverHeight / 2)}px`,
left: `${position.right + TOOLTIP_MARGIN}px`,
});
@OnClickOutside
export default class SimplePopover extends Component {
constructor(props) {
super(props);
this.state = {
el: null,
};
}
handleRef = (e) => {
if (!this.state.el) {
this.setState({ el: e });
}
};
// Will be triggered by OnClickOutside HoC
handleClickOutside() {
this.props.removePopover();
}
render() {
const { pos, className, title, content, appearOn } = this.props;
const popoverWidth = this.state.el ? this.state.el.clientWidth : 0;
const popoverHeight = this.state.el ? this.state.el.clientHeight : 0;
let style;
if (appearOn === 'right') {
style = getPopoverOnRightStyle(pos, popoverWidth, popoverHeight);
} else if (appearOn === 'bottom-left') {
style = getPopoverOnBottomLeftStyle(pos, popoverWidth);
} else if(appearOn === 'bottom') {
style = getPopoverOnBottomStyle(pos, popoverWidth);
}
return (
<div className={className} style={style} ref={this.handleRef}>
<p className={`${className}--title`}>{title}</p>
{content}
</div>
);
}
}
|
const TOOLTIP_MARGIN = 10;
const getPopoverOnBottomStyle = (position, popoverWidth) => ({
|
random_line_split
|
SimplePopover.js
|
import React, { Component } from 'react';
import OnClickOutside from 'react-onclickoutside';
import './SimplePopover.scss';
const TOOLTIP_MARGIN = 10;
const getPopoverOnBottomStyle = (position, popoverWidth) => ({
position: 'absolute',
top: `${position.bottom + TOOLTIP_MARGIN}px`,
left: `${(position.left + (position.width / 2)) - (popoverWidth / 2)}px`,
});
const getPopoverOnBottomLeftStyle = (position, popoverWidth) => ({
position: 'absolute',
top: `${position.bottom + TOOLTIP_MARGIN}px`,
left: `${(position.left - popoverWidth) + position.width}px`,
});
const getPopoverOnRightStyle = (position, popoverWidth, popoverHeight) => ({
position: 'absolute',
top: `${position.bottom - (popoverHeight / 2)}px`,
left: `${position.right + TOOLTIP_MARGIN}px`,
});
@OnClickOutside
export default class SimplePopover extends Component {
constructor(props) {
super(props);
this.state = {
el: null,
};
}
handleRef = (e) => {
if (!this.state.el) {
this.setState({ el: e });
}
};
// Will be triggered by OnClickOutside HoC
handleClickOutside() {
this.props.removePopover();
}
render() {
const { pos, className, title, content, appearOn } = this.props;
const popoverWidth = this.state.el ? this.state.el.clientWidth : 0;
const popoverHeight = this.state.el ? this.state.el.clientHeight : 0;
let style;
if (appearOn === 'right')
|
else if (appearOn === 'bottom-left') {
style = getPopoverOnBottomLeftStyle(pos, popoverWidth);
} else if(appearOn === 'bottom') {
style = getPopoverOnBottomStyle(pos, popoverWidth);
}
return (
<div className={className} style={style} ref={this.handleRef}>
<p className={`${className}--title`}>{title}</p>
{content}
</div>
);
}
}
|
{
style = getPopoverOnRightStyle(pos, popoverWidth, popoverHeight);
}
|
conditional_block
|
SimplePopover.js
|
import React, { Component } from 'react';
import OnClickOutside from 'react-onclickoutside';
import './SimplePopover.scss';
const TOOLTIP_MARGIN = 10;
const getPopoverOnBottomStyle = (position, popoverWidth) => ({
position: 'absolute',
top: `${position.bottom + TOOLTIP_MARGIN}px`,
left: `${(position.left + (position.width / 2)) - (popoverWidth / 2)}px`,
});
const getPopoverOnBottomLeftStyle = (position, popoverWidth) => ({
position: 'absolute',
top: `${position.bottom + TOOLTIP_MARGIN}px`,
left: `${(position.left - popoverWidth) + position.width}px`,
});
const getPopoverOnRightStyle = (position, popoverWidth, popoverHeight) => ({
position: 'absolute',
top: `${position.bottom - (popoverHeight / 2)}px`,
left: `${position.right + TOOLTIP_MARGIN}px`,
});
@OnClickOutside
export default class SimplePopover extends Component {
constructor(props) {
super(props);
this.state = {
el: null,
};
}
handleRef = (e) => {
if (!this.state.el) {
this.setState({ el: e });
}
};
// Will be triggered by OnClickOutside HoC
handleClickOutside() {
this.props.removePopover();
}
|
() {
const { pos, className, title, content, appearOn } = this.props;
const popoverWidth = this.state.el ? this.state.el.clientWidth : 0;
const popoverHeight = this.state.el ? this.state.el.clientHeight : 0;
let style;
if (appearOn === 'right') {
style = getPopoverOnRightStyle(pos, popoverWidth, popoverHeight);
} else if (appearOn === 'bottom-left') {
style = getPopoverOnBottomLeftStyle(pos, popoverWidth);
} else if(appearOn === 'bottom') {
style = getPopoverOnBottomStyle(pos, popoverWidth);
}
return (
<div className={className} style={style} ref={this.handleRef}>
<p className={`${className}--title`}>{title}</p>
{content}
</div>
);
}
}
|
render
|
identifier_name
|
user.ts
|
import * as express from 'express';
import { default as User } from '../../models/User';
import * as jwt from 'jsonwebtoken';
const Token = require('../util/token');
const userRouter = express.Router();
//when user clicks "login button"
userRouter.post('/users', (req, res) => {
const user = new User();
user.firstName = req.body.firstName;
user.lastName = req.body.lastName;
user.email = req.body.email;
user.password = req.body.password;
user.github = req.body.github;
user.profile = {};
user.profile.github = req.body.github;
user.profile.location = req.body.location;
user.profile.firstName = req.body.firstName;
user.profile.lastName = req.body.lastName;
user.profile.picture = req.body.picture;
user.profile.website = req.body.website;
user.save((err) => {
if (err)
|
return User.findById(user._id, (err, user) => {
res.status(201).json(user);
});
});
});
module.exports = userRouter;
|
{ return res.status(400).send(err); }
|
conditional_block
|
user.ts
|
import * as express from 'express';
import { default as User } from '../../models/User';
import * as jwt from 'jsonwebtoken';
const Token = require('../util/token');
const userRouter = express.Router();
//when user clicks "login button"
userRouter.post('/users', (req, res) => {
const user = new User();
user.firstName = req.body.firstName;
user.lastName = req.body.lastName;
user.email = req.body.email;
user.password = req.body.password;
user.github = req.body.github;
user.profile = {};
user.profile.github = req.body.github;
user.profile.location = req.body.location;
user.profile.firstName = req.body.firstName;
user.profile.lastName = req.body.lastName;
user.profile.picture = req.body.picture;
|
return User.findById(user._id, (err, user) => {
res.status(201).json(user);
});
});
});
module.exports = userRouter;
|
user.profile.website = req.body.website;
user.save((err) => {
if (err) { return res.status(400).send(err); }
|
random_line_split
|
app.js
|
require('./modules/config');
require('./modules/controllers');
require('./modules/services');
let initialized = false;
function
|
(path, query, attribute){
var value = path.split("/").slice(-1).pop();
query.equalTo(attribute, value);
return query
}
const app = angular.module('Kinvey', [
'ionic',
'kinvey',
'config',
'controllers',
'services'
]);
app.config(function($logProvider) {
'ngInject';
// Enable log
$logProvider.debugEnabled(true);
});
app.config(function($stateProvider) {
'ngInject';
// Setup the states
$stateProvider
.state('welcome', {
url: '',
templateUrl: 'views/welcome.html',
data: {
requiresAuthorization: false
},
controller: 'WelcomeCtrl as vm'
})
.state('logout', {
url: '/logout',
data: {
requiresAuthorization: true
},
controller: function($state, Auth) {
'ngInject';
Auth.logout().then(function() {
$state.go('welcome');
});
}
})
.state('app', {
url: '/app',
abstract: true,
templateUrl: 'views/menu.html',
controller: 'MenuCtrl as vm'
})
.state('books', {
parent: 'app',
url: '/library',
views: {
content: {
templateUrl: 'views/library.html',
controller: 'BooksCtrl as vm',
resolve: {
books: function(DataStore) {
'ngInject';
return DataStore.find('books');
}
}
}
},
data: {
requiresAuthorization: true
}
})
.state('pages', {
parent: 'app',
url: '/read/:bookId',
views: {
content: {
templateUrl: 'views/read.html',
controller: 'PagesCtrl as vm',
resolve: {
pages: function(DataStore, $kinvey, $location) {
'ngInject';
return DataStore.find('pages', constructQuery($location.path(), new $kinvey.Query(), 'bookId'));
},
book: function(DataStore, $kinvey, $location) {
return DataStore.find('books', constructQuery($location.path(), new $kinvey.Query(), '_id'));
}
}
}
},
data: {
requiresAuthorization: true
}
});
});
app.run(function($ionicPlatform, $kinvey, $rootScope, $state, KinveyConfig, Auth) {
'ngInject';
$rootScope.$on('$stateChangeStart', function(event, toState, toParams) {
toState.data = toState.data || {};
if (!initialized) {
event.preventDefault();
// Initialize Kinvey
$kinvey.init(KinveyConfig).then(function() {
initialized = true;
$state.go(toState.name, toParams);
});
} else if (toState.data.requiresAuthorization && !$kinvey.getActiveUser()) {
event.preventDefault();
// Login
Auth.login().then(function() {
$state.go(toState.name, toParams);
});
}
});
$ionicPlatform.ready(function() {
const cordova = window.cordova;
const StatusBar = window.StatusBar;
// Hide the accessory bar by default (remove this to show the accessory bar above the keyboard
// for form inputs)
if (cordova && cordova.plugins.Keyboard) {
cordova.plugins.Keyboard.hideKeyboardAccessoryBar(true);
cordova.plugins.Keyboard.disableScroll(true);
}
if (StatusBar) {
StatusBar.styleDefault();
}
});
$state.go('welcome');
});
|
constructQuery
|
identifier_name
|
app.js
|
require('./modules/config');
require('./modules/controllers');
require('./modules/services');
let initialized = false;
function constructQuery(path, query, attribute){
var value = path.split("/").slice(-1).pop();
query.equalTo(attribute, value);
return query
}
const app = angular.module('Kinvey', [
'ionic',
'kinvey',
'config',
'controllers',
'services'
]);
app.config(function($logProvider) {
'ngInject';
// Enable log
$logProvider.debugEnabled(true);
});
app.config(function($stateProvider) {
'ngInject';
// Setup the states
$stateProvider
.state('welcome', {
url: '',
templateUrl: 'views/welcome.html',
data: {
requiresAuthorization: false
},
controller: 'WelcomeCtrl as vm'
})
.state('logout', {
url: '/logout',
data: {
requiresAuthorization: true
},
controller: function($state, Auth) {
'ngInject';
Auth.logout().then(function() {
$state.go('welcome');
});
}
})
.state('app', {
url: '/app',
abstract: true,
templateUrl: 'views/menu.html',
controller: 'MenuCtrl as vm'
})
.state('books', {
parent: 'app',
url: '/library',
views: {
content: {
templateUrl: 'views/library.html',
controller: 'BooksCtrl as vm',
resolve: {
books: function(DataStore) {
'ngInject';
return DataStore.find('books');
}
}
}
},
data: {
requiresAuthorization: true
}
})
.state('pages', {
parent: 'app',
url: '/read/:bookId',
views: {
content: {
templateUrl: 'views/read.html',
controller: 'PagesCtrl as vm',
resolve: {
pages: function(DataStore, $kinvey, $location) {
'ngInject';
return DataStore.find('pages', constructQuery($location.path(), new $kinvey.Query(), 'bookId'));
},
book: function(DataStore, $kinvey, $location) {
return DataStore.find('books', constructQuery($location.path(), new $kinvey.Query(), '_id'));
}
}
}
},
data: {
requiresAuthorization: true
}
});
});
app.run(function($ionicPlatform, $kinvey, $rootScope, $state, KinveyConfig, Auth) {
'ngInject';
$rootScope.$on('$stateChangeStart', function(event, toState, toParams) {
toState.data = toState.data || {};
if (!initialized) {
event.preventDefault();
// Initialize Kinvey
$kinvey.init(KinveyConfig).then(function() {
initialized = true;
$state.go(toState.name, toParams);
});
} else if (toState.data.requiresAuthorization && !$kinvey.getActiveUser())
|
});
$ionicPlatform.ready(function() {
const cordova = window.cordova;
const StatusBar = window.StatusBar;
// Hide the accessory bar by default (remove this to show the accessory bar above the keyboard
// for form inputs)
if (cordova && cordova.plugins.Keyboard) {
cordova.plugins.Keyboard.hideKeyboardAccessoryBar(true);
cordova.plugins.Keyboard.disableScroll(true);
}
if (StatusBar) {
StatusBar.styleDefault();
}
});
$state.go('welcome');
});
|
{
event.preventDefault();
// Login
Auth.login().then(function() {
$state.go(toState.name, toParams);
});
}
|
conditional_block
|
app.js
|
require('./modules/config');
require('./modules/controllers');
require('./modules/services');
let initialized = false;
function constructQuery(path, query, attribute)
|
const app = angular.module('Kinvey', [
'ionic',
'kinvey',
'config',
'controllers',
'services'
]);
app.config(function($logProvider) {
'ngInject';
// Enable log
$logProvider.debugEnabled(true);
});
app.config(function($stateProvider) {
'ngInject';
// Setup the states
$stateProvider
.state('welcome', {
url: '',
templateUrl: 'views/welcome.html',
data: {
requiresAuthorization: false
},
controller: 'WelcomeCtrl as vm'
})
.state('logout', {
url: '/logout',
data: {
requiresAuthorization: true
},
controller: function($state, Auth) {
'ngInject';
Auth.logout().then(function() {
$state.go('welcome');
});
}
})
.state('app', {
url: '/app',
abstract: true,
templateUrl: 'views/menu.html',
controller: 'MenuCtrl as vm'
})
.state('books', {
parent: 'app',
url: '/library',
views: {
content: {
templateUrl: 'views/library.html',
controller: 'BooksCtrl as vm',
resolve: {
books: function(DataStore) {
'ngInject';
return DataStore.find('books');
}
}
}
},
data: {
requiresAuthorization: true
}
})
.state('pages', {
parent: 'app',
url: '/read/:bookId',
views: {
content: {
templateUrl: 'views/read.html',
controller: 'PagesCtrl as vm',
resolve: {
pages: function(DataStore, $kinvey, $location) {
'ngInject';
return DataStore.find('pages', constructQuery($location.path(), new $kinvey.Query(), 'bookId'));
},
book: function(DataStore, $kinvey, $location) {
return DataStore.find('books', constructQuery($location.path(), new $kinvey.Query(), '_id'));
}
}
}
},
data: {
requiresAuthorization: true
}
});
});
app.run(function($ionicPlatform, $kinvey, $rootScope, $state, KinveyConfig, Auth) {
'ngInject';
$rootScope.$on('$stateChangeStart', function(event, toState, toParams) {
toState.data = toState.data || {};
if (!initialized) {
event.preventDefault();
// Initialize Kinvey
$kinvey.init(KinveyConfig).then(function() {
initialized = true;
$state.go(toState.name, toParams);
});
} else if (toState.data.requiresAuthorization && !$kinvey.getActiveUser()) {
event.preventDefault();
// Login
Auth.login().then(function() {
$state.go(toState.name, toParams);
});
}
});
$ionicPlatform.ready(function() {
const cordova = window.cordova;
const StatusBar = window.StatusBar;
// Hide the accessory bar by default (remove this to show the accessory bar above the keyboard
// for form inputs)
if (cordova && cordova.plugins.Keyboard) {
cordova.plugins.Keyboard.hideKeyboardAccessoryBar(true);
cordova.plugins.Keyboard.disableScroll(true);
}
if (StatusBar) {
StatusBar.styleDefault();
}
});
$state.go('welcome');
});
|
{
var value = path.split("/").slice(-1).pop();
query.equalTo(attribute, value);
return query
}
|
identifier_body
|
app.js
|
require('./modules/config');
require('./modules/controllers');
require('./modules/services');
let initialized = false;
function constructQuery(path, query, attribute){
var value = path.split("/").slice(-1).pop();
query.equalTo(attribute, value);
return query
}
const app = angular.module('Kinvey', [
'ionic',
'kinvey',
'config',
'controllers',
'services'
]);
app.config(function($logProvider) {
'ngInject';
// Enable log
$logProvider.debugEnabled(true);
});
app.config(function($stateProvider) {
'ngInject';
// Setup the states
$stateProvider
.state('welcome', {
url: '',
templateUrl: 'views/welcome.html',
data: {
requiresAuthorization: false
},
controller: 'WelcomeCtrl as vm'
})
.state('logout', {
url: '/logout',
data: {
requiresAuthorization: true
},
controller: function($state, Auth) {
'ngInject';
Auth.logout().then(function() {
$state.go('welcome');
});
}
})
.state('app', {
url: '/app',
abstract: true,
templateUrl: 'views/menu.html',
controller: 'MenuCtrl as vm'
})
.state('books', {
parent: 'app',
url: '/library',
views: {
content: {
templateUrl: 'views/library.html',
controller: 'BooksCtrl as vm',
resolve: {
books: function(DataStore) {
'ngInject';
return DataStore.find('books');
}
}
}
},
data: {
requiresAuthorization: true
}
})
.state('pages', {
parent: 'app',
url: '/read/:bookId',
views: {
content: {
templateUrl: 'views/read.html',
controller: 'PagesCtrl as vm',
resolve: {
pages: function(DataStore, $kinvey, $location) {
'ngInject';
return DataStore.find('pages', constructQuery($location.path(), new $kinvey.Query(), 'bookId'));
},
book: function(DataStore, $kinvey, $location) {
return DataStore.find('books', constructQuery($location.path(), new $kinvey.Query(), '_id'));
}
}
}
},
data: {
requiresAuthorization: true
}
});
});
app.run(function($ionicPlatform, $kinvey, $rootScope, $state, KinveyConfig, Auth) {
'ngInject';
$rootScope.$on('$stateChangeStart', function(event, toState, toParams) {
toState.data = toState.data || {};
if (!initialized) {
event.preventDefault();
// Initialize Kinvey
$kinvey.init(KinveyConfig).then(function() {
initialized = true;
$state.go(toState.name, toParams);
});
} else if (toState.data.requiresAuthorization && !$kinvey.getActiveUser()) {
event.preventDefault();
// Login
Auth.login().then(function() {
$state.go(toState.name, toParams);
});
}
});
|
// Hide the accessory bar by default (remove this to show the accessory bar above the keyboard
// for form inputs)
if (cordova && cordova.plugins.Keyboard) {
cordova.plugins.Keyboard.hideKeyboardAccessoryBar(true);
cordova.plugins.Keyboard.disableScroll(true);
}
if (StatusBar) {
StatusBar.styleDefault();
}
});
$state.go('welcome');
});
|
$ionicPlatform.ready(function() {
const cordova = window.cordova;
const StatusBar = window.StatusBar;
|
random_line_split
|
notebook_utils.py
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python functions which run only within a Jupyter notebook."""
# internal imports
import IPython
from magenta.music import midi_synth
_DEFAULT_SAMPLE_RATE = 44100
def
|
(sequence,
synth=midi_synth.synthesize,
sample_rate=_DEFAULT_SAMPLE_RATE,
**synth_args):
"""Creates an interactive player for a synthesized note sequence.
This function should only be called from a Jupyter notebook.
Args:
sequence: A music_pb2.NoteSequence to synthesize and play.
synth: A synthesis function that takes a sequence and sample rate as input.
sample_rate: The sample rate at which to synthesize.
**synth_args: Additional keyword arguments to pass to the synth function.
"""
array_of_floats = synth(sequence, sample_rate=sample_rate, **synth_args)
IPython.display.display(IPython.display.Audio(array_of_floats,
rate=sample_rate))
|
play_sequence
|
identifier_name
|
notebook_utils.py
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python functions which run only within a Jupyter notebook."""
# internal imports
import IPython
from magenta.music import midi_synth
_DEFAULT_SAMPLE_RATE = 44100
def play_sequence(sequence,
synth=midi_synth.synthesize,
sample_rate=_DEFAULT_SAMPLE_RATE,
**synth_args):
|
"""Creates an interactive player for a synthesized note sequence.
This function should only be called from a Jupyter notebook.
Args:
sequence: A music_pb2.NoteSequence to synthesize and play.
synth: A synthesis function that takes a sequence and sample rate as input.
sample_rate: The sample rate at which to synthesize.
**synth_args: Additional keyword arguments to pass to the synth function.
"""
array_of_floats = synth(sequence, sample_rate=sample_rate, **synth_args)
IPython.display.display(IPython.display.Audio(array_of_floats,
rate=sample_rate))
|
identifier_body
|
|
notebook_utils.py
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python functions which run only within a Jupyter notebook."""
# internal imports
import IPython
from magenta.music import midi_synth
|
def play_sequence(sequence,
synth=midi_synth.synthesize,
sample_rate=_DEFAULT_SAMPLE_RATE,
**synth_args):
"""Creates an interactive player for a synthesized note sequence.
This function should only be called from a Jupyter notebook.
Args:
sequence: A music_pb2.NoteSequence to synthesize and play.
synth: A synthesis function that takes a sequence and sample rate as input.
sample_rate: The sample rate at which to synthesize.
**synth_args: Additional keyword arguments to pass to the synth function.
"""
array_of_floats = synth(sequence, sample_rate=sample_rate, **synth_args)
IPython.display.display(IPython.display.Audio(array_of_floats,
rate=sample_rate))
|
_DEFAULT_SAMPLE_RATE = 44100
|
random_line_split
|
plugin.js
|
tinymce.PluginManager.add('tabpanel', function(editor, url) {
var walker = tinymce.dom.TreeWalker;
editor.ui.registry.addNestedMenuItem('tabpanel', {
//icon: 'tabpanel',
text: "Tabs",
tooltip: "Tabs",
getSubmenuItems: function () {
return [
{
type: 'menuitem',
//icon: 'tab',
text: "New panel",
tooltip: "New panel",
onAction: function () {
let el = editor.selection.getNode();
let parent = el.parentNode;
let tabpanels = editor.dom.getParents(el,'.tabpanels')[0];
if(tabpanels !== undefined && tabpanels !== null) {
let tabs = editor.dom.select('.nav-tabs',tabpanels)[0],
panels = editor.dom.select('.tab-content',tabpanels)[0],
|
editor.dom.add(panels,'section',{id:'tab'+nb,class:'tab-pane',role:'tabpanel'},'<p>color'+nb+'</p>');
}
}
},
{
type: 'menuitem',
//icon: 'tab',
text: "Remove panel",
tooltip: "Remove panel",
onAction: function () {
let el = editor.selection.getNode();
let parent = el.parentNode;
let tabpanels = editor.dom.getParents(el,'.tabpanels')[0];
if(tabpanels !== undefined && tabpanels !== null) {
let tabs = editor.dom.select('.nav-tabs',tabpanels)[0],
panels = editor.dom.select('.tab-content',tabpanels)[0];
tinymce.activeEditor.dom.remove(tinymce.activeEditor.dom.select('li:last-child',tabs));
tinymce.activeEditor.dom.remove(tinymce.activeEditor.dom.select('section:last-child',panels));
}
}
}
];
}
});
});
// Load the required translation files
tinymce.PluginManager.requireLangPack('tabpanel', 'en_EN,fr_FR');
|
nb = tabs.children.length;
nb++;
editor.dom.add(tabs,'li',false,'<a role="tab" href="#tab'+nb+'" aria-controls="tab'+nb+'" data-toggle="tab"><img class="img-responsive" src="#" alt="color'+nb+'" width="250" height="250" /><span>Color'+nb+'</span></a>');
|
random_line_split
|
plugin.js
|
tinymce.PluginManager.add('tabpanel', function(editor, url) {
var walker = tinymce.dom.TreeWalker;
editor.ui.registry.addNestedMenuItem('tabpanel', {
//icon: 'tabpanel',
text: "Tabs",
tooltip: "Tabs",
getSubmenuItems: function () {
return [
{
type: 'menuitem',
//icon: 'tab',
text: "New panel",
tooltip: "New panel",
onAction: function () {
let el = editor.selection.getNode();
let parent = el.parentNode;
let tabpanels = editor.dom.getParents(el,'.tabpanels')[0];
if(tabpanels !== undefined && tabpanels !== null) {
let tabs = editor.dom.select('.nav-tabs',tabpanels)[0],
panels = editor.dom.select('.tab-content',tabpanels)[0],
nb = tabs.children.length;
nb++;
editor.dom.add(tabs,'li',false,'<a role="tab" href="#tab'+nb+'" aria-controls="tab'+nb+'" data-toggle="tab"><img class="img-responsive" src="#" alt="color'+nb+'" width="250" height="250" /><span>Color'+nb+'</span></a>');
editor.dom.add(panels,'section',{id:'tab'+nb,class:'tab-pane',role:'tabpanel'},'<p>color'+nb+'</p>');
}
}
},
{
type: 'menuitem',
//icon: 'tab',
text: "Remove panel",
tooltip: "Remove panel",
onAction: function () {
let el = editor.selection.getNode();
let parent = el.parentNode;
let tabpanels = editor.dom.getParents(el,'.tabpanels')[0];
if(tabpanels !== undefined && tabpanels !== null)
|
}
}
];
}
});
});
// Load the required translation files
tinymce.PluginManager.requireLangPack('tabpanel', 'en_EN,fr_FR');
|
{
let tabs = editor.dom.select('.nav-tabs',tabpanels)[0],
panels = editor.dom.select('.tab-content',tabpanels)[0];
tinymce.activeEditor.dom.remove(tinymce.activeEditor.dom.select('li:last-child',tabs));
tinymce.activeEditor.dom.remove(tinymce.activeEditor.dom.select('section:last-child',panels));
}
|
conditional_block
|
__openerp__.py
|
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
|
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
{
'name': 'XMLRPC Operation Product',
'version': '0.1',
'category': 'ETL',
'description': '''
XMLRPC Import product
''',
'author': 'Micronaet S.r.l. - Nicola Riolini',
'website': 'http://www.micronaet.it',
'license': 'AGPL-3',
'depends': [
'base',
'xmlrpc_base',
'product',
'sql_product', # for statistic category
'base_accounting_program', # q x pack
#'sql_partner', # for fields to update
#'l10n_it_private', # private info
#'mx_partner_zone', # zone
# 'l10n_it_iban_cin'
],
'init_xml': [],
'demo': [],
'data': [
#'operation.xml',
'product_view.xml',
'data/operation.xml',
],
'active': False,
'installable': True,
'auto_install': False,
}
|
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
random_line_split
|
app.module.ts
|
import { BrowserModule } from '@angular/platform-browser';
import { NgModule } from '@angular/core';
import { FormsModule } from '@angular/forms';
import { HttpModule } from '@angular/http';
// used to create fake backend
import { fakeBackendProvider } from './_helpers/index';
import { MockBackend, MockConnection } from '@angular/http/testing';
import { BaseRequestOptions } from '@angular/http';
import { AppComponent } from './app.component';
import { AlertComponent } from './_directives/index';
import { AuthGuard } from './_guards/index';
import { AlertService, AuthenticationService, UserService } from './_services/index';
import { LoggerService } from '../app/shared/logger.service';
import { HomeComponent } from './home/home.component';
import { AboutComponent } from './about/about.component';
import { LoginComponent } from './login/login.component';
import { RegisterComponent } from './register/register.component';
import { DashboardComponent } from './dashboard/dashboard.component';
import { AppRoutingModule, routedComponents } from './app-routing.module';
@NgModule({
declarations: [
AppComponent,
|
HomeComponent,
AboutComponent,
LoginComponent,
routedComponents,
RegisterComponent,
DashboardComponent
],
imports: [
BrowserModule,
FormsModule,
HttpModule,
AppRoutingModule,
],
providers: [
AuthGuard,
AlertService,
AuthenticationService,
LoggerService,
UserService,
// providers used to create fake backend
fakeBackendProvider,
MockBackend,
BaseRequestOptions
],
bootstrap: [AppComponent]
})
export class AppModule { }
|
AlertComponent,
|
random_line_split
|
app.module.ts
|
import { BrowserModule } from '@angular/platform-browser';
import { NgModule } from '@angular/core';
import { FormsModule } from '@angular/forms';
import { HttpModule } from '@angular/http';
// used to create fake backend
import { fakeBackendProvider } from './_helpers/index';
import { MockBackend, MockConnection } from '@angular/http/testing';
import { BaseRequestOptions } from '@angular/http';
import { AppComponent } from './app.component';
import { AlertComponent } from './_directives/index';
import { AuthGuard } from './_guards/index';
import { AlertService, AuthenticationService, UserService } from './_services/index';
import { LoggerService } from '../app/shared/logger.service';
import { HomeComponent } from './home/home.component';
import { AboutComponent } from './about/about.component';
import { LoginComponent } from './login/login.component';
import { RegisterComponent } from './register/register.component';
import { DashboardComponent } from './dashboard/dashboard.component';
import { AppRoutingModule, routedComponents } from './app-routing.module';
@NgModule({
declarations: [
AppComponent,
AlertComponent,
HomeComponent,
AboutComponent,
LoginComponent,
routedComponents,
RegisterComponent,
DashboardComponent
],
imports: [
BrowserModule,
FormsModule,
HttpModule,
AppRoutingModule,
],
providers: [
AuthGuard,
AlertService,
AuthenticationService,
LoggerService,
UserService,
// providers used to create fake backend
fakeBackendProvider,
MockBackend,
BaseRequestOptions
],
bootstrap: [AppComponent]
})
export class
|
{ }
|
AppModule
|
identifier_name
|
possible_browser.py
|
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.internal.app import possible_app
class PossibleBrowser(possible_app.PossibleApp):
"""A browser that can be controlled.
Call Create() to launch the browser and begin manipulating it..
"""
def __init__(self, browser_type, target_os, supports_tab_control):
super(PossibleBrowser, self).__init__(app_type=browser_type,
target_os=target_os)
self._supports_tab_control = supports_tab_control
self._credentials_path = None
def __repr__(self):
return 'PossibleBrowser(app_type=%s)' % self.app_type
@property
def browser_type(self):
return self.app_type
@property
def supports_tab_control(self):
return self._supports_tab_control
def _InitPlatformIfNeeded(self):
raise NotImplementedError()
def Create(self, finder_options):
raise NotImplementedError()
def SupportsOptions(self, browser_options):
"""Tests for extension support."""
raise NotImplementedError()
def IsRemote(self):
return False
def RunRemote(self):
pass
def UpdateExecutableIfNeeded(self):
pass
def last_modification_time(self):
return -1
def
|
(self, credentials_path):
self._credentials_path = credentials_path
|
SetCredentialsPath
|
identifier_name
|
possible_browser.py
|
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.internal.app import possible_app
class PossibleBrowser(possible_app.PossibleApp):
"""A browser that can be controlled.
Call Create() to launch the browser and begin manipulating it..
"""
def __init__(self, browser_type, target_os, supports_tab_control):
super(PossibleBrowser, self).__init__(app_type=browser_type,
target_os=target_os)
self._supports_tab_control = supports_tab_control
self._credentials_path = None
def __repr__(self):
return 'PossibleBrowser(app_type=%s)' % self.app_type
@property
def browser_type(self):
return self.app_type
@property
def supports_tab_control(self):
|
def _InitPlatformIfNeeded(self):
raise NotImplementedError()
def Create(self, finder_options):
raise NotImplementedError()
def SupportsOptions(self, browser_options):
"""Tests for extension support."""
raise NotImplementedError()
def IsRemote(self):
return False
def RunRemote(self):
pass
def UpdateExecutableIfNeeded(self):
pass
def last_modification_time(self):
return -1
def SetCredentialsPath(self, credentials_path):
self._credentials_path = credentials_path
|
return self._supports_tab_control
|
identifier_body
|
possible_browser.py
|
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.internal.app import possible_app
class PossibleBrowser(possible_app.PossibleApp):
"""A browser that can be controlled.
Call Create() to launch the browser and begin manipulating it..
"""
def __init__(self, browser_type, target_os, supports_tab_control):
super(PossibleBrowser, self).__init__(app_type=browser_type,
target_os=target_os)
self._supports_tab_control = supports_tab_control
self._credentials_path = None
def __repr__(self):
return 'PossibleBrowser(app_type=%s)' % self.app_type
@property
def browser_type(self):
return self.app_type
@property
def supports_tab_control(self):
return self._supports_tab_control
def _InitPlatformIfNeeded(self):
raise NotImplementedError()
def Create(self, finder_options):
raise NotImplementedError()
def SupportsOptions(self, browser_options):
"""Tests for extension support."""
raise NotImplementedError()
def IsRemote(self):
return False
def RunRemote(self):
pass
def UpdateExecutableIfNeeded(self):
pass
def last_modification_time(self):
return -1
|
def SetCredentialsPath(self, credentials_path):
self._credentials_path = credentials_path
|
random_line_split
|
|
customBlockHandler.ts
|
import { Process, BlockHandler } from '../blockHandlers';
import { isSpaceOrTab, peek } from '../blockHelper';
import { unescapeString } from '../common';
import { CustomBlockNode, BlockNode } from '../node';
const reClosingCustomBlock = /^\$\$$/;
export const customBlock: BlockHandler = {
|
(parser, container: CustomBlockNode) {
const line = parser.currentLine;
const match = line.match(reClosingCustomBlock);
if (match) {
// closing custom block
parser.lastLineLength = match[0].length;
parser.finalize(container as BlockNode, parser.lineNumber);
return Process.Finished;
}
// skip optional spaces of custom block offset
let i = container.offset;
while (i > 0 && isSpaceOrTab(peek(line, parser.offset))) {
parser.advanceOffset(1, true);
i--;
}
return Process.Go;
},
finalize(_, block: CustomBlockNode) {
if (block.stringContent === null) {
return;
}
// first line becomes info string
const content = block.stringContent;
const newlinePos = content.indexOf('\n');
const firstLine = content.slice(0, newlinePos);
const rest = content.slice(newlinePos + 1);
const infoString = firstLine.match(/^(\s*)(.*)/);
block.info = unescapeString(infoString![2].trim());
block.literal = rest;
block.stringContent = null;
},
canContain() {
return false;
},
acceptsLines: true,
};
|
continue
|
identifier_name
|
customBlockHandler.ts
|
import { Process, BlockHandler } from '../blockHandlers';
import { isSpaceOrTab, peek } from '../blockHelper';
import { unescapeString } from '../common';
import { CustomBlockNode, BlockNode } from '../node';
const reClosingCustomBlock = /^\$\$$/;
export const customBlock: BlockHandler = {
continue(parser, container: CustomBlockNode) {
const line = parser.currentLine;
const match = line.match(reClosingCustomBlock);
if (match)
|
// skip optional spaces of custom block offset
let i = container.offset;
while (i > 0 && isSpaceOrTab(peek(line, parser.offset))) {
parser.advanceOffset(1, true);
i--;
}
return Process.Go;
},
finalize(_, block: CustomBlockNode) {
if (block.stringContent === null) {
return;
}
// first line becomes info string
const content = block.stringContent;
const newlinePos = content.indexOf('\n');
const firstLine = content.slice(0, newlinePos);
const rest = content.slice(newlinePos + 1);
const infoString = firstLine.match(/^(\s*)(.*)/);
block.info = unescapeString(infoString![2].trim());
block.literal = rest;
block.stringContent = null;
},
canContain() {
return false;
},
acceptsLines: true,
};
|
{
// closing custom block
parser.lastLineLength = match[0].length;
parser.finalize(container as BlockNode, parser.lineNumber);
return Process.Finished;
}
|
conditional_block
|
customBlockHandler.ts
|
import { Process, BlockHandler } from '../blockHandlers';
import { isSpaceOrTab, peek } from '../blockHelper';
import { unescapeString } from '../common';
import { CustomBlockNode, BlockNode } from '../node';
const reClosingCustomBlock = /^\$\$$/;
export const customBlock: BlockHandler = {
continue(parser, container: CustomBlockNode)
|
,
finalize(_, block: CustomBlockNode) {
if (block.stringContent === null) {
return;
}
// first line becomes info string
const content = block.stringContent;
const newlinePos = content.indexOf('\n');
const firstLine = content.slice(0, newlinePos);
const rest = content.slice(newlinePos + 1);
const infoString = firstLine.match(/^(\s*)(.*)/);
block.info = unescapeString(infoString![2].trim());
block.literal = rest;
block.stringContent = null;
},
canContain() {
return false;
},
acceptsLines: true,
};
|
{
const line = parser.currentLine;
const match = line.match(reClosingCustomBlock);
if (match) {
// closing custom block
parser.lastLineLength = match[0].length;
parser.finalize(container as BlockNode, parser.lineNumber);
return Process.Finished;
}
// skip optional spaces of custom block offset
let i = container.offset;
while (i > 0 && isSpaceOrTab(peek(line, parser.offset))) {
parser.advanceOffset(1, true);
i--;
}
return Process.Go;
}
|
identifier_body
|
customBlockHandler.ts
|
import { Process, BlockHandler } from '../blockHandlers';
import { isSpaceOrTab, peek } from '../blockHelper';
import { unescapeString } from '../common';
import { CustomBlockNode, BlockNode } from '../node';
const reClosingCustomBlock = /^\$\$$/;
export const customBlock: BlockHandler = {
continue(parser, container: CustomBlockNode) {
const line = parser.currentLine;
const match = line.match(reClosingCustomBlock);
if (match) {
// closing custom block
parser.lastLineLength = match[0].length;
parser.finalize(container as BlockNode, parser.lineNumber);
return Process.Finished;
}
// skip optional spaces of custom block offset
let i = container.offset;
while (i > 0 && isSpaceOrTab(peek(line, parser.offset))) {
parser.advanceOffset(1, true);
i--;
}
return Process.Go;
},
finalize(_, block: CustomBlockNode) {
if (block.stringContent === null) {
return;
}
// first line becomes info string
const content = block.stringContent;
const newlinePos = content.indexOf('\n');
const firstLine = content.slice(0, newlinePos);
const rest = content.slice(newlinePos + 1);
const infoString = firstLine.match(/^(\s*)(.*)/);
block.info = unescapeString(infoString![2].trim());
block.literal = rest;
block.stringContent = null;
},
canContain() {
return false;
|
},
acceptsLines: true,
};
|
random_line_split
|
|
pe598-split-divisibilities.py
|
#!/usr/bin/env python
# coding=utf-8
"""598. Split Divisibilities
https://projecteuler.net/problem=598
Consider the number 48.
There are five pairs of integers $a$ and $b$ ($a \leq b$) such that $a \times
b=48$: (1,48), (2,24), (3,16), (4,12) and (6,8).
It can be seen that both 6 and 8 have 4 divisors.
So of those five pairs one consists of two integers with the same number of
divisors.
In general:
Let $C(n)$ be the number of pairs of positive integers $a \times b=n$, ($a
\leq b$) such that $a$ and $b$ have the same number of divisors;
so $C(48)=1$.
|
You are given $C(10!)=3$: (1680, 2160), (1800, 2016) and (1890,1920).
Find $C(100!)$
"""
|
random_line_split
|
|
test.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base classes for our unit tests.
Allows overriding of flags for use of fakes, and some black magic for
inline callbacks.
"""
import functools
import uuid
import unittest
import mox
import nose.plugins.skip
import stubout
from cinder import flags
from cinder.openstack.common import log as logging
from cinder.openstack.common import cfg
from cinder.openstack.common import timeutils
from cinder import service
from cinder import tests
from cinder.tests import fake_flags
test_opts = [
cfg.StrOpt('sqlite_clean_db',
default='clean.sqlite',
help='File name of clean sqlite db'),
cfg.BoolOpt('fake_tests',
default=True,
help='should we use everything for testing'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(test_opts)
LOG = logging.getLogger(__name__)
class skip_test(object):
"""Decorator that skips a test."""
# TODO(tr3buchet): remember forever what comstud did here
def __init__(self, msg):
self.message = msg
def __call__(self, func):
@functools.wraps(func)
def _skipper(*args, **kw):
"""Wrapped skipper function."""
raise nose.SkipTest(self.message)
return _skipper
class skip_if(object):
"""Decorator that skips a test if condition is true."""
def __init__(self, condition, msg):
self.condition = condition
self.message = msg
def __call__(self, func):
@functools.wraps(func)
def _skipper(*args, **kw):
"""Wrapped skipper function."""
if self.condition:
raise nose.SkipTest(self.message)
func(*args, **kw)
return _skipper
class skip_unless(object):
"""Decorator that skips a test if condition is not true."""
def __init__(self, condition, msg):
self.condition = condition
self.message = msg
def __call__(self, func):
@functools.wraps(func)
def _skipper(*args, **kw):
"""Wrapped skipper function."""
if not self.condition:
raise nose.SkipTest(self.message)
func(*args, **kw)
return _skipper
def skip_if_fake(func):
"""Decorator that skips a test if running in fake mode."""
def _skipper(*args, **kw):
"""Wrapped skipper function."""
if FLAGS.fake_tests:
raise unittest.SkipTest('Test cannot be run in fake mode')
else:
return func(*args, **kw)
return _skipper
|
pass
class TestCase(unittest.TestCase):
"""Test case base class for all unit tests."""
def setUp(self):
"""Run before each test method to initialize test environment."""
super(TestCase, self).setUp()
fake_flags.set_defaults(FLAGS)
flags.parse_args([], default_config_files=[])
# NOTE(vish): We need a better method for creating fixtures for tests
# now that we have some required db setup for the system
# to work properly.
self.start = timeutils.utcnow()
tests.reset_db()
# emulate some of the mox stuff, we can't use the metaclass
# because it screws with our generators
self.mox = mox.Mox()
self.stubs = stubout.StubOutForTesting()
self.injected = []
self._services = []
def tearDown(self):
"""Runs after each test method to tear down test environment."""
try:
self.mox.UnsetStubs()
self.stubs.UnsetAll()
self.stubs.SmartUnsetAll()
self.mox.VerifyAll()
super(TestCase, self).tearDown()
finally:
# Reset any overridden flags
FLAGS.reset()
# Stop any timers
for x in self.injected:
try:
x.stop()
except AssertionError:
pass
# Kill any services
for x in self._services:
try:
x.kill()
except Exception:
pass
# Delete attributes that don't start with _ so they don't pin
# memory around unnecessarily for the duration of the test
# suite
for key in [k for k in self.__dict__.keys() if k[0] != '_']:
del self.__dict__[key]
def flags(self, **kw):
"""Override flag variables for a test."""
for k, v in kw.iteritems():
FLAGS.set_override(k, v)
def start_service(self, name, host=None, **kwargs):
host = host and host or uuid.uuid4().hex
kwargs.setdefault('host', host)
kwargs.setdefault('binary', 'cinder-%s' % name)
svc = service.Service.create(**kwargs)
svc.start()
self._services.append(svc)
return svc
# Useful assertions
def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001):
"""Assert two dicts are equivalent.
This is a 'deep' match in the sense that it handles nested
dictionaries appropriately.
NOTE:
If you don't care (or don't know) a given value, you can specify
the string DONTCARE as the value. This will cause that dict-item
to be skipped.
"""
def raise_assertion(msg):
d1str = str(d1)
d2str = str(d2)
base_msg = ('Dictionaries do not match. %(msg)s d1: %(d1str)s '
'd2: %(d2str)s' % locals())
raise AssertionError(base_msg)
d1keys = set(d1.keys())
d2keys = set(d2.keys())
if d1keys != d2keys:
d1only = d1keys - d2keys
d2only = d2keys - d1keys
raise_assertion('Keys in d1 and not d2: %(d1only)s. '
'Keys in d2 and not d1: %(d2only)s' % locals())
for key in d1keys:
d1value = d1[key]
d2value = d2[key]
try:
error = abs(float(d1value) - float(d2value))
within_tolerance = error <= tolerance
except (ValueError, TypeError):
# If both values aren't convertable to float, just ignore
# ValueError if arg is a str, TypeError if it's something else
# (like None)
within_tolerance = False
if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'):
self.assertDictMatch(d1value, d2value)
elif 'DONTCARE' in (d1value, d2value):
continue
elif approx_equal and within_tolerance:
continue
elif d1value != d2value:
raise_assertion("d1['%(key)s']=%(d1value)s != "
"d2['%(key)s']=%(d2value)s" % locals())
def assertDictListMatch(self, L1, L2, approx_equal=False, tolerance=0.001):
"""Assert a list of dicts are equivalent."""
def raise_assertion(msg):
L1str = str(L1)
L2str = str(L2)
base_msg = ('List of dictionaries do not match: %(msg)s '
'L1: %(L1str)s L2: %(L2str)s' % locals())
raise AssertionError(base_msg)
L1count = len(L1)
L2count = len(L2)
if L1count != L2count:
raise_assertion('Length mismatch: len(L1)=%(L1count)d != '
'len(L2)=%(L2count)d' % locals())
for d1, d2 in zip(L1, L2):
self.assertDictMatch(d1, d2, approx_equal=approx_equal,
tolerance=tolerance)
def assertSubDictMatch(self, sub_dict, super_dict):
"""Assert a sub_dict is subset of super_dict."""
self.assertTrue(set(sub_dict.keys()).issubset(set(super_dict.keys())))
for k, sub_value in sub_dict.items():
super_value = super_dict[k]
if isinstance(sub_value, dict):
self.assertSubDictMatch(sub_value, super_value)
elif 'DONTCARE' in (sub_value, super_value):
continue
else:
self.assertEqual(sub_value, super_value)
def assertIn(self, a, b, *args, **kwargs):
"""Python < v2.7 compatibility. Assert 'a' in 'b'"""
try:
f = super(TestCase, self).assertIn
except AttributeError:
self.assertTrue(a in b, *args, **kwargs)
else:
f(a, b, *args, **kwargs)
def assertNotIn(self, a, b, *args, **kwargs):
"""Python < v2.7 compatibility. Assert 'a' NOT in 'b'"""
try:
f = super(TestCase, self).assertNotIn
except AttributeError:
self.assertFalse(a in b, *args, **kwargs)
else:
f(a, b, *args, **kwargs)
|
class TestingException(Exception):
|
random_line_split
|
test.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base classes for our unit tests.
Allows overriding of flags for use of fakes, and some black magic for
inline callbacks.
"""
import functools
import uuid
import unittest
import mox
import nose.plugins.skip
import stubout
from cinder import flags
from cinder.openstack.common import log as logging
from cinder.openstack.common import cfg
from cinder.openstack.common import timeutils
from cinder import service
from cinder import tests
from cinder.tests import fake_flags
test_opts = [
cfg.StrOpt('sqlite_clean_db',
default='clean.sqlite',
help='File name of clean sqlite db'),
cfg.BoolOpt('fake_tests',
default=True,
help='should we use everything for testing'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(test_opts)
LOG = logging.getLogger(__name__)
class skip_test(object):
"""Decorator that skips a test."""
# TODO(tr3buchet): remember forever what comstud did here
def __init__(self, msg):
self.message = msg
def __call__(self, func):
@functools.wraps(func)
def _skipper(*args, **kw):
"""Wrapped skipper function."""
raise nose.SkipTest(self.message)
return _skipper
class skip_if(object):
"""Decorator that skips a test if condition is true."""
def __init__(self, condition, msg):
self.condition = condition
self.message = msg
def __call__(self, func):
@functools.wraps(func)
def _skipper(*args, **kw):
"""Wrapped skipper function."""
if self.condition:
raise nose.SkipTest(self.message)
func(*args, **kw)
return _skipper
class skip_unless(object):
"""Decorator that skips a test if condition is not true."""
def __init__(self, condition, msg):
self.condition = condition
self.message = msg
def __call__(self, func):
@functools.wraps(func)
def _skipper(*args, **kw):
"""Wrapped skipper function."""
if not self.condition:
raise nose.SkipTest(self.message)
func(*args, **kw)
return _skipper
def skip_if_fake(func):
"""Decorator that skips a test if running in fake mode."""
def _skipper(*args, **kw):
"""Wrapped skipper function."""
if FLAGS.fake_tests:
raise unittest.SkipTest('Test cannot be run in fake mode')
else:
return func(*args, **kw)
return _skipper
class TestingException(Exception):
pass
class TestCase(unittest.TestCase):
"""Test case base class for all unit tests."""
def setUp(self):
"""Run before each test method to initialize test environment."""
super(TestCase, self).setUp()
fake_flags.set_defaults(FLAGS)
flags.parse_args([], default_config_files=[])
# NOTE(vish): We need a better method for creating fixtures for tests
# now that we have some required db setup for the system
# to work properly.
self.start = timeutils.utcnow()
tests.reset_db()
# emulate some of the mox stuff, we can't use the metaclass
# because it screws with our generators
self.mox = mox.Mox()
self.stubs = stubout.StubOutForTesting()
self.injected = []
self._services = []
def tearDown(self):
|
def flags(self, **kw):
"""Override flag variables for a test."""
for k, v in kw.iteritems():
FLAGS.set_override(k, v)
def start_service(self, name, host=None, **kwargs):
host = host and host or uuid.uuid4().hex
kwargs.setdefault('host', host)
kwargs.setdefault('binary', 'cinder-%s' % name)
svc = service.Service.create(**kwargs)
svc.start()
self._services.append(svc)
return svc
# Useful assertions
def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001):
"""Assert two dicts are equivalent.
This is a 'deep' match in the sense that it handles nested
dictionaries appropriately.
NOTE:
If you don't care (or don't know) a given value, you can specify
the string DONTCARE as the value. This will cause that dict-item
to be skipped.
"""
def raise_assertion(msg):
d1str = str(d1)
d2str = str(d2)
base_msg = ('Dictionaries do not match. %(msg)s d1: %(d1str)s '
'd2: %(d2str)s' % locals())
raise AssertionError(base_msg)
d1keys = set(d1.keys())
d2keys = set(d2.keys())
if d1keys != d2keys:
d1only = d1keys - d2keys
d2only = d2keys - d1keys
raise_assertion('Keys in d1 and not d2: %(d1only)s. '
'Keys in d2 and not d1: %(d2only)s' % locals())
for key in d1keys:
d1value = d1[key]
d2value = d2[key]
try:
error = abs(float(d1value) - float(d2value))
within_tolerance = error <= tolerance
except (ValueError, TypeError):
# If both values aren't convertable to float, just ignore
# ValueError if arg is a str, TypeError if it's something else
# (like None)
within_tolerance = False
if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'):
self.assertDictMatch(d1value, d2value)
elif 'DONTCARE' in (d1value, d2value):
continue
elif approx_equal and within_tolerance:
continue
elif d1value != d2value:
raise_assertion("d1['%(key)s']=%(d1value)s != "
"d2['%(key)s']=%(d2value)s" % locals())
def assertDictListMatch(self, L1, L2, approx_equal=False, tolerance=0.001):
"""Assert a list of dicts are equivalent."""
def raise_assertion(msg):
L1str = str(L1)
L2str = str(L2)
base_msg = ('List of dictionaries do not match: %(msg)s '
'L1: %(L1str)s L2: %(L2str)s' % locals())
raise AssertionError(base_msg)
L1count = len(L1)
L2count = len(L2)
if L1count != L2count:
raise_assertion('Length mismatch: len(L1)=%(L1count)d != '
'len(L2)=%(L2count)d' % locals())
for d1, d2 in zip(L1, L2):
self.assertDictMatch(d1, d2, approx_equal=approx_equal,
tolerance=tolerance)
def assertSubDictMatch(self, sub_dict, super_dict):
"""Assert a sub_dict is subset of super_dict."""
self.assertTrue(set(sub_dict.keys()).issubset(set(super_dict.keys())))
for k, sub_value in sub_dict.items():
super_value = super_dict[k]
if isinstance(sub_value, dict):
self.assertSubDictMatch(sub_value, super_value)
elif 'DONTCARE' in (sub_value, super_value):
continue
else:
self.assertEqual(sub_value, super_value)
def assertIn(self, a, b, *args, **kwargs):
"""Python < v2.7 compatibility. Assert 'a' in 'b'"""
try:
f = super(TestCase, self).assertIn
except AttributeError:
self.assertTrue(a in b, *args, **kwargs)
else:
f(a, b, *args, **kwargs)
def assertNotIn(self, a, b, *args, **kwargs):
"""Python < v2.7 compatibility. Assert 'a' NOT in 'b'"""
try:
f = super(TestCase, self).assertNotIn
except AttributeError:
self.assertFalse(a in b, *args, **kwargs)
else:
f(a, b, *args, **kwargs)
|
"""Runs after each test method to tear down test environment."""
try:
self.mox.UnsetStubs()
self.stubs.UnsetAll()
self.stubs.SmartUnsetAll()
self.mox.VerifyAll()
super(TestCase, self).tearDown()
finally:
# Reset any overridden flags
FLAGS.reset()
# Stop any timers
for x in self.injected:
try:
x.stop()
except AssertionError:
pass
# Kill any services
for x in self._services:
try:
x.kill()
except Exception:
pass
# Delete attributes that don't start with _ so they don't pin
# memory around unnecessarily for the duration of the test
# suite
for key in [k for k in self.__dict__.keys() if k[0] != '_']:
del self.__dict__[key]
|
identifier_body
|
test.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base classes for our unit tests.
Allows overriding of flags for use of fakes, and some black magic for
inline callbacks.
"""
import functools
import uuid
import unittest
import mox
import nose.plugins.skip
import stubout
from cinder import flags
from cinder.openstack.common import log as logging
from cinder.openstack.common import cfg
from cinder.openstack.common import timeutils
from cinder import service
from cinder import tests
from cinder.tests import fake_flags
test_opts = [
cfg.StrOpt('sqlite_clean_db',
default='clean.sqlite',
help='File name of clean sqlite db'),
cfg.BoolOpt('fake_tests',
default=True,
help='should we use everything for testing'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(test_opts)
LOG = logging.getLogger(__name__)
class skip_test(object):
"""Decorator that skips a test."""
# TODO(tr3buchet): remember forever what comstud did here
def __init__(self, msg):
self.message = msg
def __call__(self, func):
@functools.wraps(func)
def _skipper(*args, **kw):
"""Wrapped skipper function."""
raise nose.SkipTest(self.message)
return _skipper
class skip_if(object):
"""Decorator that skips a test if condition is true."""
def
|
(self, condition, msg):
self.condition = condition
self.message = msg
def __call__(self, func):
@functools.wraps(func)
def _skipper(*args, **kw):
"""Wrapped skipper function."""
if self.condition:
raise nose.SkipTest(self.message)
func(*args, **kw)
return _skipper
class skip_unless(object):
"""Decorator that skips a test if condition is not true."""
def __init__(self, condition, msg):
self.condition = condition
self.message = msg
def __call__(self, func):
@functools.wraps(func)
def _skipper(*args, **kw):
"""Wrapped skipper function."""
if not self.condition:
raise nose.SkipTest(self.message)
func(*args, **kw)
return _skipper
def skip_if_fake(func):
"""Decorator that skips a test if running in fake mode."""
def _skipper(*args, **kw):
"""Wrapped skipper function."""
if FLAGS.fake_tests:
raise unittest.SkipTest('Test cannot be run in fake mode')
else:
return func(*args, **kw)
return _skipper
class TestingException(Exception):
pass
class TestCase(unittest.TestCase):
"""Test case base class for all unit tests."""
def setUp(self):
"""Run before each test method to initialize test environment."""
super(TestCase, self).setUp()
fake_flags.set_defaults(FLAGS)
flags.parse_args([], default_config_files=[])
# NOTE(vish): We need a better method for creating fixtures for tests
# now that we have some required db setup for the system
# to work properly.
self.start = timeutils.utcnow()
tests.reset_db()
# emulate some of the mox stuff, we can't use the metaclass
# because it screws with our generators
self.mox = mox.Mox()
self.stubs = stubout.StubOutForTesting()
self.injected = []
self._services = []
def tearDown(self):
"""Runs after each test method to tear down test environment."""
try:
self.mox.UnsetStubs()
self.stubs.UnsetAll()
self.stubs.SmartUnsetAll()
self.mox.VerifyAll()
super(TestCase, self).tearDown()
finally:
# Reset any overridden flags
FLAGS.reset()
# Stop any timers
for x in self.injected:
try:
x.stop()
except AssertionError:
pass
# Kill any services
for x in self._services:
try:
x.kill()
except Exception:
pass
# Delete attributes that don't start with _ so they don't pin
# memory around unnecessarily for the duration of the test
# suite
for key in [k for k in self.__dict__.keys() if k[0] != '_']:
del self.__dict__[key]
def flags(self, **kw):
"""Override flag variables for a test."""
for k, v in kw.iteritems():
FLAGS.set_override(k, v)
def start_service(self, name, host=None, **kwargs):
host = host and host or uuid.uuid4().hex
kwargs.setdefault('host', host)
kwargs.setdefault('binary', 'cinder-%s' % name)
svc = service.Service.create(**kwargs)
svc.start()
self._services.append(svc)
return svc
# Useful assertions
def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001):
"""Assert two dicts are equivalent.
This is a 'deep' match in the sense that it handles nested
dictionaries appropriately.
NOTE:
If you don't care (or don't know) a given value, you can specify
the string DONTCARE as the value. This will cause that dict-item
to be skipped.
"""
def raise_assertion(msg):
d1str = str(d1)
d2str = str(d2)
base_msg = ('Dictionaries do not match. %(msg)s d1: %(d1str)s '
'd2: %(d2str)s' % locals())
raise AssertionError(base_msg)
d1keys = set(d1.keys())
d2keys = set(d2.keys())
if d1keys != d2keys:
d1only = d1keys - d2keys
d2only = d2keys - d1keys
raise_assertion('Keys in d1 and not d2: %(d1only)s. '
'Keys in d2 and not d1: %(d2only)s' % locals())
for key in d1keys:
d1value = d1[key]
d2value = d2[key]
try:
error = abs(float(d1value) - float(d2value))
within_tolerance = error <= tolerance
except (ValueError, TypeError):
# If both values aren't convertable to float, just ignore
# ValueError if arg is a str, TypeError if it's something else
# (like None)
within_tolerance = False
if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'):
self.assertDictMatch(d1value, d2value)
elif 'DONTCARE' in (d1value, d2value):
continue
elif approx_equal and within_tolerance:
continue
elif d1value != d2value:
raise_assertion("d1['%(key)s']=%(d1value)s != "
"d2['%(key)s']=%(d2value)s" % locals())
def assertDictListMatch(self, L1, L2, approx_equal=False, tolerance=0.001):
"""Assert a list of dicts are equivalent."""
def raise_assertion(msg):
L1str = str(L1)
L2str = str(L2)
base_msg = ('List of dictionaries do not match: %(msg)s '
'L1: %(L1str)s L2: %(L2str)s' % locals())
raise AssertionError(base_msg)
L1count = len(L1)
L2count = len(L2)
if L1count != L2count:
raise_assertion('Length mismatch: len(L1)=%(L1count)d != '
'len(L2)=%(L2count)d' % locals())
for d1, d2 in zip(L1, L2):
self.assertDictMatch(d1, d2, approx_equal=approx_equal,
tolerance=tolerance)
def assertSubDictMatch(self, sub_dict, super_dict):
"""Assert a sub_dict is subset of super_dict."""
self.assertTrue(set(sub_dict.keys()).issubset(set(super_dict.keys())))
for k, sub_value in sub_dict.items():
super_value = super_dict[k]
if isinstance(sub_value, dict):
self.assertSubDictMatch(sub_value, super_value)
elif 'DONTCARE' in (sub_value, super_value):
continue
else:
self.assertEqual(sub_value, super_value)
def assertIn(self, a, b, *args, **kwargs):
"""Python < v2.7 compatibility. Assert 'a' in 'b'"""
try:
f = super(TestCase, self).assertIn
except AttributeError:
self.assertTrue(a in b, *args, **kwargs)
else:
f(a, b, *args, **kwargs)
def assertNotIn(self, a, b, *args, **kwargs):
"""Python < v2.7 compatibility. Assert 'a' NOT in 'b'"""
try:
f = super(TestCase, self).assertNotIn
except AttributeError:
self.assertFalse(a in b, *args, **kwargs)
else:
f(a, b, *args, **kwargs)
|
__init__
|
identifier_name
|
test.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base classes for our unit tests.
Allows overriding of flags for use of fakes, and some black magic for
inline callbacks.
"""
import functools
import uuid
import unittest
import mox
import nose.plugins.skip
import stubout
from cinder import flags
from cinder.openstack.common import log as logging
from cinder.openstack.common import cfg
from cinder.openstack.common import timeutils
from cinder import service
from cinder import tests
from cinder.tests import fake_flags
test_opts = [
cfg.StrOpt('sqlite_clean_db',
default='clean.sqlite',
help='File name of clean sqlite db'),
cfg.BoolOpt('fake_tests',
default=True,
help='should we use everything for testing'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(test_opts)
LOG = logging.getLogger(__name__)
class skip_test(object):
"""Decorator that skips a test."""
# TODO(tr3buchet): remember forever what comstud did here
def __init__(self, msg):
self.message = msg
def __call__(self, func):
@functools.wraps(func)
def _skipper(*args, **kw):
"""Wrapped skipper function."""
raise nose.SkipTest(self.message)
return _skipper
class skip_if(object):
"""Decorator that skips a test if condition is true."""
def __init__(self, condition, msg):
self.condition = condition
self.message = msg
def __call__(self, func):
@functools.wraps(func)
def _skipper(*args, **kw):
"""Wrapped skipper function."""
if self.condition:
raise nose.SkipTest(self.message)
func(*args, **kw)
return _skipper
class skip_unless(object):
"""Decorator that skips a test if condition is not true."""
def __init__(self, condition, msg):
self.condition = condition
self.message = msg
def __call__(self, func):
@functools.wraps(func)
def _skipper(*args, **kw):
"""Wrapped skipper function."""
if not self.condition:
raise nose.SkipTest(self.message)
func(*args, **kw)
return _skipper
def skip_if_fake(func):
"""Decorator that skips a test if running in fake mode."""
def _skipper(*args, **kw):
"""Wrapped skipper function."""
if FLAGS.fake_tests:
raise unittest.SkipTest('Test cannot be run in fake mode')
else:
return func(*args, **kw)
return _skipper
class TestingException(Exception):
pass
class TestCase(unittest.TestCase):
"""Test case base class for all unit tests."""
def setUp(self):
"""Run before each test method to initialize test environment."""
super(TestCase, self).setUp()
fake_flags.set_defaults(FLAGS)
flags.parse_args([], default_config_files=[])
# NOTE(vish): We need a better method for creating fixtures for tests
# now that we have some required db setup for the system
# to work properly.
self.start = timeutils.utcnow()
tests.reset_db()
# emulate some of the mox stuff, we can't use the metaclass
# because it screws with our generators
self.mox = mox.Mox()
self.stubs = stubout.StubOutForTesting()
self.injected = []
self._services = []
def tearDown(self):
"""Runs after each test method to tear down test environment."""
try:
self.mox.UnsetStubs()
self.stubs.UnsetAll()
self.stubs.SmartUnsetAll()
self.mox.VerifyAll()
super(TestCase, self).tearDown()
finally:
# Reset any overridden flags
FLAGS.reset()
# Stop any timers
for x in self.injected:
try:
x.stop()
except AssertionError:
pass
# Kill any services
for x in self._services:
try:
x.kill()
except Exception:
pass
# Delete attributes that don't start with _ so they don't pin
# memory around unnecessarily for the duration of the test
# suite
for key in [k for k in self.__dict__.keys() if k[0] != '_']:
|
def flags(self, **kw):
"""Override flag variables for a test."""
for k, v in kw.iteritems():
FLAGS.set_override(k, v)
def start_service(self, name, host=None, **kwargs):
host = host and host or uuid.uuid4().hex
kwargs.setdefault('host', host)
kwargs.setdefault('binary', 'cinder-%s' % name)
svc = service.Service.create(**kwargs)
svc.start()
self._services.append(svc)
return svc
# Useful assertions
def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001):
"""Assert two dicts are equivalent.
This is a 'deep' match in the sense that it handles nested
dictionaries appropriately.
NOTE:
If you don't care (or don't know) a given value, you can specify
the string DONTCARE as the value. This will cause that dict-item
to be skipped.
"""
def raise_assertion(msg):
d1str = str(d1)
d2str = str(d2)
base_msg = ('Dictionaries do not match. %(msg)s d1: %(d1str)s '
'd2: %(d2str)s' % locals())
raise AssertionError(base_msg)
d1keys = set(d1.keys())
d2keys = set(d2.keys())
if d1keys != d2keys:
d1only = d1keys - d2keys
d2only = d2keys - d1keys
raise_assertion('Keys in d1 and not d2: %(d1only)s. '
'Keys in d2 and not d1: %(d2only)s' % locals())
for key in d1keys:
d1value = d1[key]
d2value = d2[key]
try:
error = abs(float(d1value) - float(d2value))
within_tolerance = error <= tolerance
except (ValueError, TypeError):
# If both values aren't convertable to float, just ignore
# ValueError if arg is a str, TypeError if it's something else
# (like None)
within_tolerance = False
if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'):
self.assertDictMatch(d1value, d2value)
elif 'DONTCARE' in (d1value, d2value):
continue
elif approx_equal and within_tolerance:
continue
elif d1value != d2value:
raise_assertion("d1['%(key)s']=%(d1value)s != "
"d2['%(key)s']=%(d2value)s" % locals())
def assertDictListMatch(self, L1, L2, approx_equal=False, tolerance=0.001):
"""Assert a list of dicts are equivalent."""
def raise_assertion(msg):
L1str = str(L1)
L2str = str(L2)
base_msg = ('List of dictionaries do not match: %(msg)s '
'L1: %(L1str)s L2: %(L2str)s' % locals())
raise AssertionError(base_msg)
L1count = len(L1)
L2count = len(L2)
if L1count != L2count:
raise_assertion('Length mismatch: len(L1)=%(L1count)d != '
'len(L2)=%(L2count)d' % locals())
for d1, d2 in zip(L1, L2):
self.assertDictMatch(d1, d2, approx_equal=approx_equal,
tolerance=tolerance)
def assertSubDictMatch(self, sub_dict, super_dict):
"""Assert a sub_dict is subset of super_dict."""
self.assertTrue(set(sub_dict.keys()).issubset(set(super_dict.keys())))
for k, sub_value in sub_dict.items():
super_value = super_dict[k]
if isinstance(sub_value, dict):
self.assertSubDictMatch(sub_value, super_value)
elif 'DONTCARE' in (sub_value, super_value):
continue
else:
self.assertEqual(sub_value, super_value)
def assertIn(self, a, b, *args, **kwargs):
"""Python < v2.7 compatibility. Assert 'a' in 'b'"""
try:
f = super(TestCase, self).assertIn
except AttributeError:
self.assertTrue(a in b, *args, **kwargs)
else:
f(a, b, *args, **kwargs)
def assertNotIn(self, a, b, *args, **kwargs):
"""Python < v2.7 compatibility. Assert 'a' NOT in 'b'"""
try:
f = super(TestCase, self).assertNotIn
except AttributeError:
self.assertFalse(a in b, *args, **kwargs)
else:
f(a, b, *args, **kwargs)
|
del self.__dict__[key]
|
conditional_block
|
web-storage.js
|
function encode (value) {
if (Object.prototype.toString.call(value) === '[object Date]') {
return '__q_date|' + value.toUTCString()
}
if (Object.prototype.toString.call(value) === '[object RegExp]') {
return '__q_expr|' + value.source
}
if (typeof value === 'number') {
return '__q_numb|' + value
}
if (typeof value === 'boolean') {
return '__q_bool|' + (value ? '1' : '0')
}
if (typeof value === 'string') {
return '__q_strn|' + value
}
if (typeof value === 'function') {
return '__q_strn|' + value.toString()
}
if (value === Object(value)) {
return '__q_objt|' + JSON.stringify(value)
}
// hmm, we don't know what to do with it,
// so just return it as is
return value
}
function decode (value) {
let type, length, source
length = value.length
if (length < 10) {
// then it wasn't encoded by us
return value
}
type = value.substr(0, 8)
source = value.substring(9)
switch (type) {
case '__q_date':
return new Date(source)
case '__q_expr':
return new RegExp(source)
case '__q_numb':
return Number(source)
case '__q_bool':
return Boolean(source === '1')
case '__q_strn':
return '' + source
case '__q_objt':
return JSON.parse(source)
default:
// hmm, we reached here, we don't know the type,
// then it means it wasn't encoded by us, so just
// return whatever value it is
return value
}
}
function generateFunctions (fn) {
return {
local: fn('local'),
session: fn('session')
}
}
let
hasStorageItem = generateFunctions(
(type) => (key) => window[type + 'Storage'].getItem(key) !== null
),
getStorageLength = generateFunctions(
(type) => () => window[type + 'Storage'].length
),
getStorageItem = generateFunctions((type) => {
let
hasFn = hasStorageItem[type],
storage = window[type + 'Storage']
return (key) => {
if (hasFn(key)) {
return decode(storage.getItem(key))
}
return null
}
}),
getStorageAtIndex = generateFunctions((type) => {
let
lengthFn = getStorageLength[type],
getItemFn = getStorageItem[type],
storage = window[type + 'Storage']
return (index) => {
if (index < lengthFn())
|
}
}),
getAllStorageItems = generateFunctions((type) => {
let
lengthFn = getStorageLength[type],
storage = window[type + 'Storage'],
getItemFn = getStorageItem[type]
return () => {
let
result = {},
key,
length = lengthFn()
for (let i = 0; i < length; i++) {
key = storage.key(i)
result[key] = getItemFn(key)
}
return result
}
}),
setStorageItem = generateFunctions((type) => {
let storage = window[type + 'Storage']
return (key, value) => { storage.setItem(key, encode(value)) }
}),
removeStorageItem = generateFunctions((type) => {
let storage = window[type + 'Storage']
return (key) => { storage.removeItem(key) }
}),
clearStorage = generateFunctions((type) => {
let storage = window[type + 'Storage']
return () => { storage.clear() }
}),
storageIsEmpty = generateFunctions((type) => {
let getLengthFn = getStorageLength[type]
return () => getLengthFn() === 0
})
export var LocalStorage = {
has: hasStorageItem.local,
get: {
length: getStorageLength.local,
item: getStorageItem.local,
index: getStorageAtIndex.local,
all: getAllStorageItems.local
},
set: setStorageItem.local,
remove: removeStorageItem.local,
clear: clearStorage.local,
isEmpty: storageIsEmpty.local
}
export var SessionStorage = { // eslint-disable-line one-var
has: hasStorageItem.session,
get: {
length: getStorageLength.session,
item: getStorageItem.session,
index: getStorageAtIndex.session,
all: getAllStorageItems.session
},
set: setStorageItem.session,
remove: removeStorageItem.session,
clear: clearStorage.session,
isEmpty: storageIsEmpty.session
}
|
{
return getItemFn(storage.key(index))
}
|
conditional_block
|
web-storage.js
|
function encode (value) {
if (Object.prototype.toString.call(value) === '[object Date]') {
return '__q_date|' + value.toUTCString()
}
if (Object.prototype.toString.call(value) === '[object RegExp]') {
return '__q_expr|' + value.source
}
if (typeof value === 'number') {
return '__q_numb|' + value
}
if (typeof value === 'boolean') {
return '__q_bool|' + (value ? '1' : '0')
}
if (typeof value === 'string') {
return '__q_strn|' + value
}
if (typeof value === 'function') {
return '__q_strn|' + value.toString()
}
if (value === Object(value)) {
return '__q_objt|' + JSON.stringify(value)
}
// hmm, we don't know what to do with it,
// so just return it as is
return value
}
function decode (value) {
let type, length, source
length = value.length
if (length < 10) {
// then it wasn't encoded by us
return value
}
type = value.substr(0, 8)
source = value.substring(9)
switch (type) {
case '__q_date':
return new Date(source)
case '__q_expr':
return new RegExp(source)
case '__q_numb':
return Number(source)
case '__q_bool':
return Boolean(source === '1')
case '__q_strn':
return '' + source
case '__q_objt':
return JSON.parse(source)
default:
// hmm, we reached here, we don't know the type,
// then it means it wasn't encoded by us, so just
// return whatever value it is
return value
}
}
function generateFunctions (fn)
|
let
hasStorageItem = generateFunctions(
(type) => (key) => window[type + 'Storage'].getItem(key) !== null
),
getStorageLength = generateFunctions(
(type) => () => window[type + 'Storage'].length
),
getStorageItem = generateFunctions((type) => {
let
hasFn = hasStorageItem[type],
storage = window[type + 'Storage']
return (key) => {
if (hasFn(key)) {
return decode(storage.getItem(key))
}
return null
}
}),
getStorageAtIndex = generateFunctions((type) => {
let
lengthFn = getStorageLength[type],
getItemFn = getStorageItem[type],
storage = window[type + 'Storage']
return (index) => {
if (index < lengthFn()) {
return getItemFn(storage.key(index))
}
}
}),
getAllStorageItems = generateFunctions((type) => {
let
lengthFn = getStorageLength[type],
storage = window[type + 'Storage'],
getItemFn = getStorageItem[type]
return () => {
let
result = {},
key,
length = lengthFn()
for (let i = 0; i < length; i++) {
key = storage.key(i)
result[key] = getItemFn(key)
}
return result
}
}),
setStorageItem = generateFunctions((type) => {
let storage = window[type + 'Storage']
return (key, value) => { storage.setItem(key, encode(value)) }
}),
removeStorageItem = generateFunctions((type) => {
let storage = window[type + 'Storage']
return (key) => { storage.removeItem(key) }
}),
clearStorage = generateFunctions((type) => {
let storage = window[type + 'Storage']
return () => { storage.clear() }
}),
storageIsEmpty = generateFunctions((type) => {
let getLengthFn = getStorageLength[type]
return () => getLengthFn() === 0
})
export var LocalStorage = {
has: hasStorageItem.local,
get: {
length: getStorageLength.local,
item: getStorageItem.local,
index: getStorageAtIndex.local,
all: getAllStorageItems.local
},
set: setStorageItem.local,
remove: removeStorageItem.local,
clear: clearStorage.local,
isEmpty: storageIsEmpty.local
}
export var SessionStorage = { // eslint-disable-line one-var
has: hasStorageItem.session,
get: {
length: getStorageLength.session,
item: getStorageItem.session,
index: getStorageAtIndex.session,
all: getAllStorageItems.session
},
set: setStorageItem.session,
remove: removeStorageItem.session,
clear: clearStorage.session,
isEmpty: storageIsEmpty.session
}
|
{
return {
local: fn('local'),
session: fn('session')
}
}
|
identifier_body
|
web-storage.js
|
function encode (value) {
if (Object.prototype.toString.call(value) === '[object Date]') {
return '__q_date|' + value.toUTCString()
}
if (Object.prototype.toString.call(value) === '[object RegExp]') {
return '__q_expr|' + value.source
}
if (typeof value === 'number') {
return '__q_numb|' + value
}
if (typeof value === 'boolean') {
return '__q_bool|' + (value ? '1' : '0')
}
if (typeof value === 'string') {
return '__q_strn|' + value
}
if (typeof value === 'function') {
return '__q_strn|' + value.toString()
}
if (value === Object(value)) {
return '__q_objt|' + JSON.stringify(value)
}
// hmm, we don't know what to do with it,
// so just return it as is
return value
}
function decode (value) {
let type, length, source
length = value.length
if (length < 10) {
// then it wasn't encoded by us
return value
}
type = value.substr(0, 8)
source = value.substring(9)
switch (type) {
case '__q_date':
return new Date(source)
case '__q_expr':
return new RegExp(source)
case '__q_numb':
return Number(source)
case '__q_bool':
return Boolean(source === '1')
case '__q_strn':
return '' + source
case '__q_objt':
return JSON.parse(source)
default:
// hmm, we reached here, we don't know the type,
// then it means it wasn't encoded by us, so just
// return whatever value it is
return value
}
}
function
|
(fn) {
return {
local: fn('local'),
session: fn('session')
}
}
let
hasStorageItem = generateFunctions(
(type) => (key) => window[type + 'Storage'].getItem(key) !== null
),
getStorageLength = generateFunctions(
(type) => () => window[type + 'Storage'].length
),
getStorageItem = generateFunctions((type) => {
let
hasFn = hasStorageItem[type],
storage = window[type + 'Storage']
return (key) => {
if (hasFn(key)) {
return decode(storage.getItem(key))
}
return null
}
}),
getStorageAtIndex = generateFunctions((type) => {
let
lengthFn = getStorageLength[type],
getItemFn = getStorageItem[type],
storage = window[type + 'Storage']
return (index) => {
if (index < lengthFn()) {
return getItemFn(storage.key(index))
}
}
}),
getAllStorageItems = generateFunctions((type) => {
let
lengthFn = getStorageLength[type],
storage = window[type + 'Storage'],
getItemFn = getStorageItem[type]
return () => {
let
result = {},
key,
length = lengthFn()
for (let i = 0; i < length; i++) {
key = storage.key(i)
result[key] = getItemFn(key)
}
return result
}
}),
setStorageItem = generateFunctions((type) => {
let storage = window[type + 'Storage']
return (key, value) => { storage.setItem(key, encode(value)) }
}),
removeStorageItem = generateFunctions((type) => {
let storage = window[type + 'Storage']
return (key) => { storage.removeItem(key) }
}),
clearStorage = generateFunctions((type) => {
let storage = window[type + 'Storage']
return () => { storage.clear() }
}),
storageIsEmpty = generateFunctions((type) => {
let getLengthFn = getStorageLength[type]
return () => getLengthFn() === 0
})
export var LocalStorage = {
has: hasStorageItem.local,
get: {
length: getStorageLength.local,
item: getStorageItem.local,
index: getStorageAtIndex.local,
all: getAllStorageItems.local
},
set: setStorageItem.local,
remove: removeStorageItem.local,
clear: clearStorage.local,
isEmpty: storageIsEmpty.local
}
export var SessionStorage = { // eslint-disable-line one-var
has: hasStorageItem.session,
get: {
length: getStorageLength.session,
item: getStorageItem.session,
index: getStorageAtIndex.session,
all: getAllStorageItems.session
},
set: setStorageItem.session,
remove: removeStorageItem.session,
clear: clearStorage.session,
isEmpty: storageIsEmpty.session
}
|
generateFunctions
|
identifier_name
|
web-storage.js
|
function encode (value) {
if (Object.prototype.toString.call(value) === '[object Date]') {
return '__q_date|' + value.toUTCString()
}
if (Object.prototype.toString.call(value) === '[object RegExp]') {
return '__q_expr|' + value.source
}
if (typeof value === 'number') {
return '__q_numb|' + value
}
if (typeof value === 'boolean') {
return '__q_bool|' + (value ? '1' : '0')
}
if (typeof value === 'string') {
return '__q_strn|' + value
}
if (typeof value === 'function') {
return '__q_strn|' + value.toString()
}
if (value === Object(value)) {
return '__q_objt|' + JSON.stringify(value)
}
// hmm, we don't know what to do with it,
// so just return it as is
return value
}
function decode (value) {
let type, length, source
length = value.length
if (length < 10) {
// then it wasn't encoded by us
return value
}
type = value.substr(0, 8)
source = value.substring(9)
switch (type) {
case '__q_date':
return new Date(source)
case '__q_expr':
return new RegExp(source)
case '__q_numb':
return Number(source)
case '__q_bool':
return Boolean(source === '1')
case '__q_strn':
return '' + source
case '__q_objt':
return JSON.parse(source)
default:
// hmm, we reached here, we don't know the type,
// then it means it wasn't encoded by us, so just
// return whatever value it is
return value
}
}
function generateFunctions (fn) {
return {
local: fn('local'),
session: fn('session')
}
}
let
hasStorageItem = generateFunctions(
(type) => (key) => window[type + 'Storage'].getItem(key) !== null
),
getStorageLength = generateFunctions(
(type) => () => window[type + 'Storage'].length
),
getStorageItem = generateFunctions((type) => {
let
hasFn = hasStorageItem[type],
storage = window[type + 'Storage']
return (key) => {
if (hasFn(key)) {
return decode(storage.getItem(key))
}
return null
}
}),
getStorageAtIndex = generateFunctions((type) => {
let
lengthFn = getStorageLength[type],
getItemFn = getStorageItem[type],
storage = window[type + 'Storage']
return (index) => {
if (index < lengthFn()) {
return getItemFn(storage.key(index))
}
}
}),
getAllStorageItems = generateFunctions((type) => {
let
lengthFn = getStorageLength[type],
storage = window[type + 'Storage'],
getItemFn = getStorageItem[type]
return () => {
let
|
for (let i = 0; i < length; i++) {
key = storage.key(i)
result[key] = getItemFn(key)
}
return result
}
}),
setStorageItem = generateFunctions((type) => {
let storage = window[type + 'Storage']
return (key, value) => { storage.setItem(key, encode(value)) }
}),
removeStorageItem = generateFunctions((type) => {
let storage = window[type + 'Storage']
return (key) => { storage.removeItem(key) }
}),
clearStorage = generateFunctions((type) => {
let storage = window[type + 'Storage']
return () => { storage.clear() }
}),
storageIsEmpty = generateFunctions((type) => {
let getLengthFn = getStorageLength[type]
return () => getLengthFn() === 0
})
export var LocalStorage = {
has: hasStorageItem.local,
get: {
length: getStorageLength.local,
item: getStorageItem.local,
index: getStorageAtIndex.local,
all: getAllStorageItems.local
},
set: setStorageItem.local,
remove: removeStorageItem.local,
clear: clearStorage.local,
isEmpty: storageIsEmpty.local
}
export var SessionStorage = { // eslint-disable-line one-var
has: hasStorageItem.session,
get: {
length: getStorageLength.session,
item: getStorageItem.session,
index: getStorageAtIndex.session,
all: getAllStorageItems.session
},
set: setStorageItem.session,
remove: removeStorageItem.session,
clear: clearStorage.session,
isEmpty: storageIsEmpty.session
}
|
result = {},
key,
length = lengthFn()
|
random_line_split
|
viewport.rs
|
//! Provides a utility method for calculating native viewport size when the window is resized.
use ppu::{SCREEN_WIDTH, SCREEN_HEIGHT};
/// A simple rectangle
pub struct
|
{
pub x: u32,
pub y: u32,
pub w: u32,
pub h: u32,
}
impl Viewport {
/// Calculates a viewport to use for a window of the given size.
///
/// The returned viewport will have the native SNES aspect ratio and still fill the window on at
/// least one axis. Basically, this calculates the black bars to apply to the window to make the
/// center have the native SNES ratio.
pub fn for_window_size(w: u32, h: u32) -> Self {
// FIXME Not sure if floats are a good idea here
let w = w as f32;
let h = h as f32;
const NATIVE_RATIO: f32 = SCREEN_WIDTH as f32 / SCREEN_HEIGHT as f32;
let ratio = w / h;
let view_w;
let view_h;
if ratio > NATIVE_RATIO {
// Too wide
view_h = h;
view_w = h * NATIVE_RATIO;
} else {
// Too high
view_w = w;
view_h = w / NATIVE_RATIO;
}
let border_x = (w - view_w).round() as u32 / 2;
let border_y = (h - view_h).round() as u32 / 2;
let view_w = view_w.round() as u32;
let view_h = view_h.round() as u32;
Viewport {
x: border_x as u32,
y: border_y as u32,
w: view_w,
h: view_h,
}
}
}
|
Viewport
|
identifier_name
|
viewport.rs
|
//! Provides a utility method for calculating native viewport size when the window is resized.
use ppu::{SCREEN_WIDTH, SCREEN_HEIGHT};
/// A simple rectangle
pub struct Viewport {
pub x: u32,
pub y: u32,
pub w: u32,
pub h: u32,
}
impl Viewport {
/// Calculates a viewport to use for a window of the given size.
///
/// The returned viewport will have the native SNES aspect ratio and still fill the window on at
/// least one axis. Basically, this calculates the black bars to apply to the window to make the
/// center have the native SNES ratio.
pub fn for_window_size(w: u32, h: u32) -> Self {
// FIXME Not sure if floats are a good idea here
let w = w as f32;
let h = h as f32;
const NATIVE_RATIO: f32 = SCREEN_WIDTH as f32 / SCREEN_HEIGHT as f32;
let ratio = w / h;
let view_w;
let view_h;
if ratio > NATIVE_RATIO
|
else {
// Too high
view_w = w;
view_h = w / NATIVE_RATIO;
}
let border_x = (w - view_w).round() as u32 / 2;
let border_y = (h - view_h).round() as u32 / 2;
let view_w = view_w.round() as u32;
let view_h = view_h.round() as u32;
Viewport {
x: border_x as u32,
y: border_y as u32,
w: view_w,
h: view_h,
}
}
}
|
{
// Too wide
view_h = h;
view_w = h * NATIVE_RATIO;
}
|
conditional_block
|
viewport.rs
|
//! Provides a utility method for calculating native viewport size when the window is resized.
use ppu::{SCREEN_WIDTH, SCREEN_HEIGHT};
/// A simple rectangle
pub struct Viewport {
pub x: u32,
pub y: u32,
pub w: u32,
|
}
impl Viewport {
/// Calculates a viewport to use for a window of the given size.
///
/// The returned viewport will have the native SNES aspect ratio and still fill the window on at
/// least one axis. Basically, this calculates the black bars to apply to the window to make the
/// center have the native SNES ratio.
pub fn for_window_size(w: u32, h: u32) -> Self {
// FIXME Not sure if floats are a good idea here
let w = w as f32;
let h = h as f32;
const NATIVE_RATIO: f32 = SCREEN_WIDTH as f32 / SCREEN_HEIGHT as f32;
let ratio = w / h;
let view_w;
let view_h;
if ratio > NATIVE_RATIO {
// Too wide
view_h = h;
view_w = h * NATIVE_RATIO;
} else {
// Too high
view_w = w;
view_h = w / NATIVE_RATIO;
}
let border_x = (w - view_w).round() as u32 / 2;
let border_y = (h - view_h).round() as u32 / 2;
let view_w = view_w.round() as u32;
let view_h = view_h.round() as u32;
Viewport {
x: border_x as u32,
y: border_y as u32,
w: view_w,
h: view_h,
}
}
}
|
pub h: u32,
|
random_line_split
|
config.rs
|
use toml;
use std::fs::File;
use std::io::BufReader;
use std::io::prelude::*;
#[derive(Debug, Deserialize)]
pub struct Config {
pub uplink: Uplink,
pub plugins: Option<Vec<Plugin>>,
}
#[derive(Debug, Deserialize)]
pub struct Uplink {
pub ip: String,
pub port: i32,
pub protocol: String,
pub hostname: String,
pub description: String,
pub send_pass: String,
pub recv_pass: String,
pub numeric: Option<String>,
}
#[derive(Debug, Deserialize)]
pub struct Plugin {
pub file: String,
pub load: Option<bool>,
}
pub fn get_protocol() -> Result<String, Box<::std::error::Error>> {
let file = File::open("etc/nero.toml")?;
let mut buf_reader = BufReader::new(file);
let mut contents = String::new();
buf_reader.read_to_string(&mut contents)?;
let cfg: Config = toml::from_str(&contents)?;
Ok(cfg.uplink.protocol)
}
pub fn
|
() -> Result<Result<Config, toml::de::Error>, ::std::io::Error> {
let file = File::open("etc/nero.toml")?;
let mut buf_reader = BufReader::new(file);
let mut contents = String::new();
buf_reader.read_to_string(&mut contents)?;
Ok(toml::from_str(&contents))
}
|
load
|
identifier_name
|
config.rs
|
use toml;
use std::fs::File;
use std::io::BufReader;
use std::io::prelude::*;
#[derive(Debug, Deserialize)]
pub struct Config {
pub uplink: Uplink,
pub plugins: Option<Vec<Plugin>>,
}
#[derive(Debug, Deserialize)]
pub struct Uplink {
pub ip: String,
pub port: i32,
pub protocol: String,
pub hostname: String,
pub description: String,
pub send_pass: String,
pub recv_pass: String,
pub numeric: Option<String>,
}
#[derive(Debug, Deserialize)]
pub struct Plugin {
pub file: String,
pub load: Option<bool>,
}
pub fn get_protocol() -> Result<String, Box<::std::error::Error>> {
let file = File::open("etc/nero.toml")?;
|
let cfg: Config = toml::from_str(&contents)?;
Ok(cfg.uplink.protocol)
}
pub fn load() -> Result<Result<Config, toml::de::Error>, ::std::io::Error> {
let file = File::open("etc/nero.toml")?;
let mut buf_reader = BufReader::new(file);
let mut contents = String::new();
buf_reader.read_to_string(&mut contents)?;
Ok(toml::from_str(&contents))
}
|
let mut buf_reader = BufReader::new(file);
let mut contents = String::new();
buf_reader.read_to_string(&mut contents)?;
|
random_line_split
|
alunos.component.ts
|
import { Component, OnInit } from '@angular/core';
import { Aluno } from './shared/aluno.model';
import { AlunoService } from './shared/aluno.service';
@Component({
selector: 'app-alunos',
templateUrl: './alunos.component.html',
styleUrls: ['./alunos.component.css'],
})
export class AlunosComponent implements OnInit {
public aluno = new Aluno();
public alunos: Aluno[];
public cpfDuplicado = false;
public constructor(private alunoService: AlunoService) {
}
public criarAluno(a: Aluno): void {
this.alunoService.cadastrar(a)
.then(value => {
this.alunos.push(value);
this.aluno = new Aluno();
})
.catch(reason => {
const res = reason.json();
if (res.failure) {
this.cpfDuplicado = true;
setTimeout(() => this.cpfDuplicado = false, 4000);
} else
|
});
}
public ngOnInit(): void {
this.alunoService.getAlunos()
.then(value => this.alunos = value)
.catch(reason => alert(reason.json().failure || reason));
}
}
|
{
alert(reason);
}
|
conditional_block
|
alunos.component.ts
|
import { Component, OnInit } from '@angular/core';
import { Aluno } from './shared/aluno.model';
import { AlunoService } from './shared/aluno.service';
@Component({
selector: 'app-alunos',
templateUrl: './alunos.component.html',
styleUrls: ['./alunos.component.css'],
})
export class AlunosComponent implements OnInit {
public aluno = new Aluno();
public alunos: Aluno[];
public cpfDuplicado = false;
public constructor(private alunoService: AlunoService) {
}
public criarAluno(a: Aluno): void {
this.alunoService.cadastrar(a)
.then(value => {
this.alunos.push(value);
this.aluno = new Aluno();
})
.catch(reason => {
const res = reason.json();
if (res.failure) {
this.cpfDuplicado = true;
setTimeout(() => this.cpfDuplicado = false, 4000);
} else {
alert(reason);
}
});
}
public
|
(): void {
this.alunoService.getAlunos()
.then(value => this.alunos = value)
.catch(reason => alert(reason.json().failure || reason));
}
}
|
ngOnInit
|
identifier_name
|
alunos.component.ts
|
import { Component, OnInit } from '@angular/core';
import { Aluno } from './shared/aluno.model';
import { AlunoService } from './shared/aluno.service';
@Component({
selector: 'app-alunos',
templateUrl: './alunos.component.html',
styleUrls: ['./alunos.component.css'],
})
export class AlunosComponent implements OnInit {
public aluno = new Aluno();
public alunos: Aluno[];
public cpfDuplicado = false;
public constructor(private alunoService: AlunoService) {
}
public criarAluno(a: Aluno): void {
this.alunoService.cadastrar(a)
.then(value => {
this.alunos.push(value);
this.aluno = new Aluno();
})
.catch(reason => {
const res = reason.json();
if (res.failure) {
this.cpfDuplicado = true;
setTimeout(() => this.cpfDuplicado = false, 4000);
} else {
alert(reason);
}
});
}
public ngOnInit(): void
|
}
|
{
this.alunoService.getAlunos()
.then(value => this.alunos = value)
.catch(reason => alert(reason.json().failure || reason));
}
|
identifier_body
|
alunos.component.ts
|
import { Component, OnInit } from '@angular/core';
import { Aluno } from './shared/aluno.model';
import { AlunoService } from './shared/aluno.service';
@Component({
selector: 'app-alunos',
templateUrl: './alunos.component.html',
styleUrls: ['./alunos.component.css'],
})
export class AlunosComponent implements OnInit {
public aluno = new Aluno();
public alunos: Aluno[];
public cpfDuplicado = false;
public constructor(private alunoService: AlunoService) {
}
public criarAluno(a: Aluno): void {
this.alunoService.cadastrar(a)
.then(value => {
this.alunos.push(value);
this.aluno = new Aluno();
})
.catch(reason => {
const res = reason.json();
if (res.failure) {
this.cpfDuplicado = true;
setTimeout(() => this.cpfDuplicado = false, 4000);
} else {
alert(reason);
}
});
}
public ngOnInit(): void {
this.alunoService.getAlunos()
.then(value => this.alunos = value)
.catch(reason => alert(reason.json().failure || reason));
|
}
}
|
random_line_split
|
|
synonyms.py
|
#Synonyms experiment. Pass a string to see its "synonyms"
from pyspark.sql import SparkSession, Row
from pyspark.ml.feature import Word2Vec, Tokenizer, StopWordsRemover, Word2VecModel
import sys;
from string import punctuation
def strip_punctuation(arr):
return [''.join(c for c in s if c not in punctuation) for s in arr]
def main():
spark = SparkSession.builder \
.appName("Spark CV-job ad matching") \
.config("spark.some.config.option", "some-value") \
.master("local[*]") \
.getOrCreate()
df_categories = spark.read.json("allcategories4rdd/allcategories.jsonl")
tokenizer = Tokenizer(inputCol="skillText", outputCol="words")
tokenized = tokenizer.transform(df_categories)
remover = StopWordsRemover(inputCol="words", outputCol="filtered")
removed = remover.transform(tokenized)
stripped = removed.select('filtered').rdd.map(lambda x: strip_punctuation(x[0]))\
.map(lambda x: Row(filtered=x)).toDF(['filtered'])
# word2vec = Word2Vec(vectorSize=100, inputCol="filtered", outputCol="result")
# model = word2vec.fit(stripped)
#model.save("word2vec-model")
model = Word2VecModel.load("word2vec-model")
synonyms = model.findSynonyms(sys.argv[1], 10)
synonyms.show(truncate=False)
# for word, cosine_distance in synonyms:
# print("{}: {}".format(word, cosine_distance))
if __name__ == '__main__':
|
main()
|
conditional_block
|
|
synonyms.py
|
#Synonyms experiment. Pass a string to see its "synonyms"
from pyspark.sql import SparkSession, Row
from pyspark.ml.feature import Word2Vec, Tokenizer, StopWordsRemover, Word2VecModel
import sys;
from string import punctuation
def strip_punctuation(arr):
return [''.join(c for c in s if c not in punctuation) for s in arr]
def main():
spark = SparkSession.builder \
.appName("Spark CV-job ad matching") \
.config("spark.some.config.option", "some-value") \
.master("local[*]") \
.getOrCreate()
df_categories = spark.read.json("allcategories4rdd/allcategories.jsonl")
tokenizer = Tokenizer(inputCol="skillText", outputCol="words")
tokenized = tokenizer.transform(df_categories)
remover = StopWordsRemover(inputCol="words", outputCol="filtered")
removed = remover.transform(tokenized)
stripped = removed.select('filtered').rdd.map(lambda x: strip_punctuation(x[0]))\
.map(lambda x: Row(filtered=x)).toDF(['filtered'])
# word2vec = Word2Vec(vectorSize=100, inputCol="filtered", outputCol="result")
# model = word2vec.fit(stripped)
|
model = Word2VecModel.load("word2vec-model")
synonyms = model.findSynonyms(sys.argv[1], 10)
synonyms.show(truncate=False)
# for word, cosine_distance in synonyms:
# print("{}: {}".format(word, cosine_distance))
if __name__ == '__main__':
main()
|
#model.save("word2vec-model")
|
random_line_split
|
synonyms.py
|
#Synonyms experiment. Pass a string to see its "synonyms"
from pyspark.sql import SparkSession, Row
from pyspark.ml.feature import Word2Vec, Tokenizer, StopWordsRemover, Word2VecModel
import sys;
from string import punctuation
def strip_punctuation(arr):
return [''.join(c for c in s if c not in punctuation) for s in arr]
def
|
():
spark = SparkSession.builder \
.appName("Spark CV-job ad matching") \
.config("spark.some.config.option", "some-value") \
.master("local[*]") \
.getOrCreate()
df_categories = spark.read.json("allcategories4rdd/allcategories.jsonl")
tokenizer = Tokenizer(inputCol="skillText", outputCol="words")
tokenized = tokenizer.transform(df_categories)
remover = StopWordsRemover(inputCol="words", outputCol="filtered")
removed = remover.transform(tokenized)
stripped = removed.select('filtered').rdd.map(lambda x: strip_punctuation(x[0]))\
.map(lambda x: Row(filtered=x)).toDF(['filtered'])
# word2vec = Word2Vec(vectorSize=100, inputCol="filtered", outputCol="result")
# model = word2vec.fit(stripped)
#model.save("word2vec-model")
model = Word2VecModel.load("word2vec-model")
synonyms = model.findSynonyms(sys.argv[1], 10)
synonyms.show(truncate=False)
# for word, cosine_distance in synonyms:
# print("{}: {}".format(word, cosine_distance))
if __name__ == '__main__':
main()
|
main
|
identifier_name
|
synonyms.py
|
#Synonyms experiment. Pass a string to see its "synonyms"
from pyspark.sql import SparkSession, Row
from pyspark.ml.feature import Word2Vec, Tokenizer, StopWordsRemover, Word2VecModel
import sys;
from string import punctuation
def strip_punctuation(arr):
return [''.join(c for c in s if c not in punctuation) for s in arr]
def main():
|
if __name__ == '__main__':
main()
|
spark = SparkSession.builder \
.appName("Spark CV-job ad matching") \
.config("spark.some.config.option", "some-value") \
.master("local[*]") \
.getOrCreate()
df_categories = spark.read.json("allcategories4rdd/allcategories.jsonl")
tokenizer = Tokenizer(inputCol="skillText", outputCol="words")
tokenized = tokenizer.transform(df_categories)
remover = StopWordsRemover(inputCol="words", outputCol="filtered")
removed = remover.transform(tokenized)
stripped = removed.select('filtered').rdd.map(lambda x: strip_punctuation(x[0]))\
.map(lambda x: Row(filtered=x)).toDF(['filtered'])
# word2vec = Word2Vec(vectorSize=100, inputCol="filtered", outputCol="result")
# model = word2vec.fit(stripped)
#model.save("word2vec-model")
model = Word2VecModel.load("word2vec-model")
synonyms = model.findSynonyms(sys.argv[1], 10)
synonyms.show(truncate=False)
# for word, cosine_distance in synonyms:
# print("{}: {}".format(word, cosine_distance))
|
identifier_body
|
profiles.js
|
import React, {Component} from 'react';
import PropTypes from 'prop-types';
import Loader from 'jsx/Loader';
import FilterableDataTable from 'jsx/FilterableDataTable';
/**
* Profiles Component.
*
* @description Genomic Browser Profiles tab.
*
* @author Alizée Wickenheiser
* @version 1.0.0
*
*/
class Profiles extends Component {
/**
* Constructor of component
* @param {object} props - the component properties.
*/
constructor(props) {
super(props);
this.state = {
data: {},
fieldOptions: {},
error: false,
isLoaded: false,
};
this.fetchData = this.fetchData.bind(this);
this.formatColumn = this.formatColumn.bind(this);
}
/**
* Fetch data when component mounts.
*/
componentDidMount() {
this.fetchData();
}
/**
* Retrieve data from the provided URL and save it in state.
*/
fetchData() {
fetch(
`${this.props.baseURL}/genomic_browser/Profiles`,
{
method: 'GET',
credentials: 'same-origin',
headers: {
'Content-Type': 'application/json',
},
}
).then((resp) => {
if (resp.ok) {
resp.json().then((json) => {
const data = {
fieldOptions: json.fieldOptions,
Data: json.data.map((e) => Object.values(e)),
subprojects: json.subprojects,
};
this.setState({
data,
isLoaded: true,
});
});
} else {
this.setState({error: true});
console.error(resp.statusText);
}
}).catch((error) => {
this.setState({error: true});
console.error(error);
});
}
/**
* Modify behaviour of specified column cells in the Data Table component
*
* @param {string} column - column name
* @param {string} cell - cell content
* @param {array} rowData - array of cell contents for a specific row
* @param {array} rowHeaders - array of table headers (column names)
*
* @return {*} a formatted table cell for a given column
*/
formatColumn(column, cell, rowData, rowHeaders) {
let reactElement;
switch (column) {
case 'PSCID': {
const url = `${this.props.baseURL}/${rowData.DCCID}/`;
reactElement = <td><a href={url}>{rowData.PSCID}</a></td>;
break;
}
case 'File':
if (cell === 'Y') {
reactElement = <td>
<a href="#" onClick={loris.loadFilteredMenuClickHandler(
'genomic_browser/viewGenomicFile/',
{candID: rowData[1]}
)}>{cell}</a>
</td>;
} else {
reactElement = <td>{cell}</td>;
}
break;
case 'CNV':
case 'CPG':
case 'SNP':
if (cell === 'Y') {
reactElement = <td>
<span
style={{cursor: 'pointer'}}
onClick={loris.loadFilteredMenuClickHandler(
'genomic_browser/' + column.toLowerCase() + '_browser/',
{DCCID: rowData[1]}
)}
>
{cell}
</span>
</td>;
} else {
reactElement = <td>{cell}</td>;
}
break;
default:
reactElement = <td>{cell}</td>;
}
return reactElement;
}
/**
* @return {DOMRect}
*/
render() {
// Waiting for async data to load.
if (!this.state.isLoaded) {
return <Loader/>;
}
// The filter options
const options = this.state.data.fieldOptions;
// The fields configured for display/hide.
let fields = [
{
label: 'Site',
show: false,
filter: {
name: 'Site',
type: 'select',
options: options.Sites,
},
},
{
label: 'DCCID',
show: false,
filter: {
name: 'DCCID',
type: 'text',
},
|
label: 'PSCID',
show: true,
filter: {
name: 'PSCID',
type: 'text',
},
},
{
label: 'Sex',
show: true,
filter: {
name: 'Sex',
type: 'select',
options: {
Male: 'Male',
Female: 'Female',
},
},
},
{
label: 'Subproject',
show: true,
filter: {
name: 'Subproject',
type: 'select',
options: options.Subproject,
},
},
{
label: 'Date of Birth',
show: false,
},
{
label: 'External ID',
show: false,
filter: {
name: 'External ID',
type: 'text',
},
},
{
label: 'File',
show: true,
filter: {
name: 'File',
type: 'select',
options: {
Y: 'Yes',
N: 'No',
},
},
},
{
label: 'SNPs found',
show: true,
filter: {
name: 'SNPs found',
type: 'select',
options: {
Y: 'Yes',
N: 'No',
},
},
},
{
label: 'CNVs found',
show: true,
filter: {
name: 'CNVs found',
type: 'select',
options: {
Y: 'Yes',
N: 'No',
},
},
},
{
label: 'CPGs found',
show: true,
filter: {
name: 'CPGs found',
type: 'select',
options: {
Y: 'Yes',
N: 'No',
},
},
},
];
return (
<FilterableDataTable
name={'filterableDataTableProfiles'}
data={this.state.data.Data}
fields={fields}
getFormattedCell={this.formatColumn}
/>
);
}
}
Profiles.defaultProps = {
display: false,
data: null,
};
Profiles.propTypes = {
display: PropTypes.bool,
data: PropTypes.object,
baseURL: PropTypes.string.isRequired,
};
export default Profiles;
|
},
{
|
random_line_split
|
profiles.js
|
import React, {Component} from 'react';
import PropTypes from 'prop-types';
import Loader from 'jsx/Loader';
import FilterableDataTable from 'jsx/FilterableDataTable';
/**
* Profiles Component.
*
* @description Genomic Browser Profiles tab.
*
* @author Alizée Wickenheiser
* @version 1.0.0
*
*/
class Profiles extends Component {
/**
* Constructor of component
* @param {object} props - the component properties.
*/
c
|
props) {
super(props);
this.state = {
data: {},
fieldOptions: {},
error: false,
isLoaded: false,
};
this.fetchData = this.fetchData.bind(this);
this.formatColumn = this.formatColumn.bind(this);
}
/**
* Fetch data when component mounts.
*/
componentDidMount() {
this.fetchData();
}
/**
* Retrieve data from the provided URL and save it in state.
*/
fetchData() {
fetch(
`${this.props.baseURL}/genomic_browser/Profiles`,
{
method: 'GET',
credentials: 'same-origin',
headers: {
'Content-Type': 'application/json',
},
}
).then((resp) => {
if (resp.ok) {
resp.json().then((json) => {
const data = {
fieldOptions: json.fieldOptions,
Data: json.data.map((e) => Object.values(e)),
subprojects: json.subprojects,
};
this.setState({
data,
isLoaded: true,
});
});
} else {
this.setState({error: true});
console.error(resp.statusText);
}
}).catch((error) => {
this.setState({error: true});
console.error(error);
});
}
/**
* Modify behaviour of specified column cells in the Data Table component
*
* @param {string} column - column name
* @param {string} cell - cell content
* @param {array} rowData - array of cell contents for a specific row
* @param {array} rowHeaders - array of table headers (column names)
*
* @return {*} a formatted table cell for a given column
*/
formatColumn(column, cell, rowData, rowHeaders) {
let reactElement;
switch (column) {
case 'PSCID': {
const url = `${this.props.baseURL}/${rowData.DCCID}/`;
reactElement = <td><a href={url}>{rowData.PSCID}</a></td>;
break;
}
case 'File':
if (cell === 'Y') {
reactElement = <td>
<a href="#" onClick={loris.loadFilteredMenuClickHandler(
'genomic_browser/viewGenomicFile/',
{candID: rowData[1]}
)}>{cell}</a>
</td>;
} else {
reactElement = <td>{cell}</td>;
}
break;
case 'CNV':
case 'CPG':
case 'SNP':
if (cell === 'Y') {
reactElement = <td>
<span
style={{cursor: 'pointer'}}
onClick={loris.loadFilteredMenuClickHandler(
'genomic_browser/' + column.toLowerCase() + '_browser/',
{DCCID: rowData[1]}
)}
>
{cell}
</span>
</td>;
} else {
reactElement = <td>{cell}</td>;
}
break;
default:
reactElement = <td>{cell}</td>;
}
return reactElement;
}
/**
* @return {DOMRect}
*/
render() {
// Waiting for async data to load.
if (!this.state.isLoaded) {
return <Loader/>;
}
// The filter options
const options = this.state.data.fieldOptions;
// The fields configured for display/hide.
let fields = [
{
label: 'Site',
show: false,
filter: {
name: 'Site',
type: 'select',
options: options.Sites,
},
},
{
label: 'DCCID',
show: false,
filter: {
name: 'DCCID',
type: 'text',
},
},
{
label: 'PSCID',
show: true,
filter: {
name: 'PSCID',
type: 'text',
},
},
{
label: 'Sex',
show: true,
filter: {
name: 'Sex',
type: 'select',
options: {
Male: 'Male',
Female: 'Female',
},
},
},
{
label: 'Subproject',
show: true,
filter: {
name: 'Subproject',
type: 'select',
options: options.Subproject,
},
},
{
label: 'Date of Birth',
show: false,
},
{
label: 'External ID',
show: false,
filter: {
name: 'External ID',
type: 'text',
},
},
{
label: 'File',
show: true,
filter: {
name: 'File',
type: 'select',
options: {
Y: 'Yes',
N: 'No',
},
},
},
{
label: 'SNPs found',
show: true,
filter: {
name: 'SNPs found',
type: 'select',
options: {
Y: 'Yes',
N: 'No',
},
},
},
{
label: 'CNVs found',
show: true,
filter: {
name: 'CNVs found',
type: 'select',
options: {
Y: 'Yes',
N: 'No',
},
},
},
{
label: 'CPGs found',
show: true,
filter: {
name: 'CPGs found',
type: 'select',
options: {
Y: 'Yes',
N: 'No',
},
},
},
];
return (
<FilterableDataTable
name={'filterableDataTableProfiles'}
data={this.state.data.Data}
fields={fields}
getFormattedCell={this.formatColumn}
/>
);
}
}
Profiles.defaultProps = {
display: false,
data: null,
};
Profiles.propTypes = {
display: PropTypes.bool,
data: PropTypes.object,
baseURL: PropTypes.string.isRequired,
};
export default Profiles;
|
onstructor(
|
identifier_name
|
profiles.js
|
import React, {Component} from 'react';
import PropTypes from 'prop-types';
import Loader from 'jsx/Loader';
import FilterableDataTable from 'jsx/FilterableDataTable';
/**
* Profiles Component.
*
* @description Genomic Browser Profiles tab.
*
* @author Alizée Wickenheiser
* @version 1.0.0
*
*/
class Profiles extends Component {
/**
* Constructor of component
* @param {object} props - the component properties.
*/
constructor(props) {
|
/**
* Fetch data when component mounts.
*/
componentDidMount() {
this.fetchData();
}
/**
* Retrieve data from the provided URL and save it in state.
*/
fetchData() {
fetch(
`${this.props.baseURL}/genomic_browser/Profiles`,
{
method: 'GET',
credentials: 'same-origin',
headers: {
'Content-Type': 'application/json',
},
}
).then((resp) => {
if (resp.ok) {
resp.json().then((json) => {
const data = {
fieldOptions: json.fieldOptions,
Data: json.data.map((e) => Object.values(e)),
subprojects: json.subprojects,
};
this.setState({
data,
isLoaded: true,
});
});
} else {
this.setState({error: true});
console.error(resp.statusText);
}
}).catch((error) => {
this.setState({error: true});
console.error(error);
});
}
/**
* Modify behaviour of specified column cells in the Data Table component
*
* @param {string} column - column name
* @param {string} cell - cell content
* @param {array} rowData - array of cell contents for a specific row
* @param {array} rowHeaders - array of table headers (column names)
*
* @return {*} a formatted table cell for a given column
*/
formatColumn(column, cell, rowData, rowHeaders) {
let reactElement;
switch (column) {
case 'PSCID': {
const url = `${this.props.baseURL}/${rowData.DCCID}/`;
reactElement = <td><a href={url}>{rowData.PSCID}</a></td>;
break;
}
case 'File':
if (cell === 'Y') {
reactElement = <td>
<a href="#" onClick={loris.loadFilteredMenuClickHandler(
'genomic_browser/viewGenomicFile/',
{candID: rowData[1]}
)}>{cell}</a>
</td>;
} else {
reactElement = <td>{cell}</td>;
}
break;
case 'CNV':
case 'CPG':
case 'SNP':
if (cell === 'Y') {
reactElement = <td>
<span
style={{cursor: 'pointer'}}
onClick={loris.loadFilteredMenuClickHandler(
'genomic_browser/' + column.toLowerCase() + '_browser/',
{DCCID: rowData[1]}
)}
>
{cell}
</span>
</td>;
} else {
reactElement = <td>{cell}</td>;
}
break;
default:
reactElement = <td>{cell}</td>;
}
return reactElement;
}
/**
* @return {DOMRect}
*/
render() {
// Waiting for async data to load.
if (!this.state.isLoaded) {
return <Loader/>;
}
// The filter options
const options = this.state.data.fieldOptions;
// The fields configured for display/hide.
let fields = [
{
label: 'Site',
show: false,
filter: {
name: 'Site',
type: 'select',
options: options.Sites,
},
},
{
label: 'DCCID',
show: false,
filter: {
name: 'DCCID',
type: 'text',
},
},
{
label: 'PSCID',
show: true,
filter: {
name: 'PSCID',
type: 'text',
},
},
{
label: 'Sex',
show: true,
filter: {
name: 'Sex',
type: 'select',
options: {
Male: 'Male',
Female: 'Female',
},
},
},
{
label: 'Subproject',
show: true,
filter: {
name: 'Subproject',
type: 'select',
options: options.Subproject,
},
},
{
label: 'Date of Birth',
show: false,
},
{
label: 'External ID',
show: false,
filter: {
name: 'External ID',
type: 'text',
},
},
{
label: 'File',
show: true,
filter: {
name: 'File',
type: 'select',
options: {
Y: 'Yes',
N: 'No',
},
},
},
{
label: 'SNPs found',
show: true,
filter: {
name: 'SNPs found',
type: 'select',
options: {
Y: 'Yes',
N: 'No',
},
},
},
{
label: 'CNVs found',
show: true,
filter: {
name: 'CNVs found',
type: 'select',
options: {
Y: 'Yes',
N: 'No',
},
},
},
{
label: 'CPGs found',
show: true,
filter: {
name: 'CPGs found',
type: 'select',
options: {
Y: 'Yes',
N: 'No',
},
},
},
];
return (
<FilterableDataTable
name={'filterableDataTableProfiles'}
data={this.state.data.Data}
fields={fields}
getFormattedCell={this.formatColumn}
/>
);
}
}
Profiles.defaultProps = {
display: false,
data: null,
};
Profiles.propTypes = {
display: PropTypes.bool,
data: PropTypes.object,
baseURL: PropTypes.string.isRequired,
};
export default Profiles;
|
super(props);
this.state = {
data: {},
fieldOptions: {},
error: false,
isLoaded: false,
};
this.fetchData = this.fetchData.bind(this);
this.formatColumn = this.formatColumn.bind(this);
}
|
identifier_body
|
assignment5.py
|
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn import tree
from subprocess import call
# https://archive.ics.uci.edu/ml/machine-learning-databases/mushroom/agaricus-lepiota.names
#
# TODO: Load up the mushroom dataset into dataframe 'X'
# Verify you did it properly.
# Indices shouldn't be doubled.
# Header information is on the dataset's website at the UCI ML Repo
# Check NA Encoding
X = pd.read_csv('Datasets/agaricus-lepiota.data', names=['label', 'cap-shape', 'cap-surface', 'cap-color',
'bruises', 'odor', 'gill-attachment',
'gill-spacing', 'gill-size', 'gill-color',
'stalk-shape', 'stalk-root',
'stalk-surface-above-ring',
'stalk-surface-below-ring', 'stalk-color-above-ring',
'stalk-color-below-ring', ' veil-type', 'veil-color',
'ring-number', 'ring-type', 'spore-print-colo', 'population',
'habitat'], header=None)
# INFO: An easy way to show which rows have nans in them
# print X[pd.isnull(X).any(axis=1)]
#
# TODO: Go ahead and drop any row with a nan
X.replace(to_replace='?', value=np.NaN, inplace=True)
X.dropna(axis=0, inplace=True)
print(X.shape)
#
# TODO: Copy the labels out of the dset into variable 'y' then Remove
# them from X. Encode the labels, using the .map() trick we showed
# you in Module 5 -- canadian:0, kama:1, and rosa:2
X['label'] = X['label'].map({'e': 1, 'p': 0})
y = X['label'].copy()
X.drop(labels=['label'], axis=1, inplace=True)
#
# TODO: Encode the entire dataset using dummies
X = pd.get_dummies(X)
#
# TODO: Split your data into test / train sets
|
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=7)
#
# TODO: Create an DT classifier. No need to set any parameters
model = tree.DecisionTreeClassifier()
#
# TODO: train the classifier on the training data / labels:
# TODO: score the classifier on the testing data / labels:
model.fit(X_train, y_train)
score = model.score(X_test, y_test)
print('High-Dimensionality Score: %f' % round((score * 100), 3))
#
# TODO: Use the code on the courses SciKit-Learn page to output a .DOT file
# Then render the .DOT to .PNGs. Ensure you have graphviz installed.
# If not, `brew install graphviz. If you can't, use: http://webgraphviz.com/
tree.export_graphviz(model.tree_, out_file='tree.dot', feature_names=X.columns)
|
# Your test size can be 30% with random_state 7
# Use variable names: X_train, X_test, y_train, y_test
|
random_line_split
|
task.js
|
/** @jsx html */
import { html } from '../../../snabbdom-jsx';
import Type from 'union-type';
import { bind, pipe, isBoolean, targetValue, targetChecked } from './helpers';
import { KEY_ENTER } from './constants';
// model : {id: Number, title: String, done: Boolean, editing: Boolean, editingValue: String }
const Action = Type({
SetTitle : [String],
Toggle : [isBoolean],
StartEdit : [],
CommitEdit : [String],
CancelEdit : []
});
function
|
(handler, e) {
if(e.keyCode === KEY_ENTER)
handler(Action.CommitEdit(e.target.value))
}
const view = ({model, handler, onRemove}) =>
<li
key={model.id}
class-completed={!!model.done && !model.editing}
class-editing={model.editing}>
<div selector=".view">
<input
selector=".toggle"
type="checkbox"
checked={!!model.done}
on-click={ pipe(targetChecked, Action.Toggle, handler) } />
<label
on-dblclick={ bind(handler, Action.StartEdit()) }>{model.title}</label>
<button
selector=".destroy"
on-click={onRemove} />
</div>
<input
selector=".edit"
value={model.title}
on-blur={ bind(handler, Action.CancelEdit()) }
on-keydown={ bind(onInput, handler) } />
</li>
function init(id, title) {
return { id, title, done: false, editing: false, editingValue: '' };
}
function update(task, action) {
return Action.case({
Toggle : done => ({...task, done}),
StartEdit : () => ({...task, editing: true, editingValue: task.title}),
CommitEdit : title => ({...task, title, editing: false, editingValue: ''}),
CancelEdit : title => ({...task, editing: false, editingValue: ''})
}, action);
}
export default { view, init, update, Action }
|
onInput
|
identifier_name
|
task.js
|
/** @jsx html */
import { html } from '../../../snabbdom-jsx';
import Type from 'union-type';
import { bind, pipe, isBoolean, targetValue, targetChecked } from './helpers';
import { KEY_ENTER } from './constants';
// model : {id: Number, title: String, done: Boolean, editing: Boolean, editingValue: String }
const Action = Type({
SetTitle : [String],
Toggle : [isBoolean],
StartEdit : [],
CommitEdit : [String],
CancelEdit : []
});
function onInput(handler, e) {
if(e.keyCode === KEY_ENTER)
handler(Action.CommitEdit(e.target.value))
}
const view = ({model, handler, onRemove}) =>
<li
key={model.id}
class-completed={!!model.done && !model.editing}
class-editing={model.editing}>
<div selector=".view">
<input
selector=".toggle"
type="checkbox"
checked={!!model.done}
on-click={ pipe(targetChecked, Action.Toggle, handler) } />
<label
on-dblclick={ bind(handler, Action.StartEdit()) }>{model.title}</label>
<button
selector=".destroy"
on-click={onRemove} />
</div>
<input
selector=".edit"
value={model.title}
on-blur={ bind(handler, Action.CancelEdit()) }
on-keydown={ bind(onInput, handler) } />
</li>
function init(id, title) {
return { id, title, done: false, editing: false, editingValue: '' };
}
function update(task, action)
|
export default { view, init, update, Action }
|
{
return Action.case({
Toggle : done => ({...task, done}),
StartEdit : () => ({...task, editing: true, editingValue: task.title}),
CommitEdit : title => ({...task, title, editing: false, editingValue: ''}),
CancelEdit : title => ({...task, editing: false, editingValue: ''})
}, action);
}
|
identifier_body
|
task.js
|
/** @jsx html */
import { html } from '../../../snabbdom-jsx';
import Type from 'union-type';
import { bind, pipe, isBoolean, targetValue, targetChecked } from './helpers';
import { KEY_ENTER } from './constants';
// model : {id: Number, title: String, done: Boolean, editing: Boolean, editingValue: String }
const Action = Type({
SetTitle : [String],
Toggle : [isBoolean],
StartEdit : [],
CommitEdit : [String],
CancelEdit : []
});
function onInput(handler, e) {
if(e.keyCode === KEY_ENTER)
handler(Action.CommitEdit(e.target.value))
}
const view = ({model, handler, onRemove}) =>
<li
key={model.id}
class-completed={!!model.done && !model.editing}
class-editing={model.editing}>
<div selector=".view">
<input
selector=".toggle"
type="checkbox"
checked={!!model.done}
on-click={ pipe(targetChecked, Action.Toggle, handler) } />
<label
on-dblclick={ bind(handler, Action.StartEdit()) }>{model.title}</label>
<button
selector=".destroy"
on-click={onRemove} />
</div>
<input
selector=".edit"
value={model.title}
on-blur={ bind(handler, Action.CancelEdit()) }
on-keydown={ bind(onInput, handler) } />
</li>
function init(id, title) {
|
}
function update(task, action) {
return Action.case({
Toggle : done => ({...task, done}),
StartEdit : () => ({...task, editing: true, editingValue: task.title}),
CommitEdit : title => ({...task, title, editing: false, editingValue: ''}),
CancelEdit : title => ({...task, editing: false, editingValue: ''})
}, action);
}
export default { view, init, update, Action }
|
return { id, title, done: false, editing: false, editingValue: '' };
|
random_line_split
|
userinfo.js
|
const { Command } = require("klasa");
const { MessageEmbed } = require("discord.js");
const statusList = {
online: "online",
idle: "idle",
dnd: "in do not disturb"
};
module.exports = class extends Command {
|
(...args) {
super(...args, {
name: "userinfo",
enabled: true,
runIn: ["text"],
aliases: ["user"],
description: "Get a user's information",
usage: "<user:usersearch>",
extendedHelp: "Need Discord info on a specific user? I got you covered!"
});
this.humanUse = "<user>";
}
async run(msg, [user]) {
user = msg.guild.members.cache.get(user.id);
var userActivity = null;
// If presence intent is enabled, grab presence activity for display.
if (user.presence.clientStatus != null) {
var status = statusList[user.presence.status] || "offline";
var activity = user.presence.activities[0];
if (user.presence.activity === null) { userActivity += " "; }
else {
switch (activity.type) { //All cases covered
case "PLAYING":
userActivity = " while playing ";
break;
case "LISTENING":
userActivity = " while listening to ";
break;
case "WATCHING":
userActivity = " while watching ";
break;
case "STREAMING":
userActivity = " while streaming ";
break;
}
userActivity += activity.name;
}
}
var lastMsgTime;
if (user.lastMessageChannelID) {
var lastMsg = user.guild.channels.cache.get(user.lastMessageChannelID)
.messages.cache.get(user.lastMessageID);
lastMsgTime = this.client.util.dateDisplay(new Date(lastMsg.createdTimestamp));
}
else {
lastMsgTime = "No message found...";
}
const embed = new MessageEmbed()
.setTimestamp()
.setFooter(msg.guild.name, msg.guild.iconURL())
.setThumbnail(user.user.displayAvatarURL())
.setAuthor(user.user.tag);
if (userActivity != null) {
embed.setDescription(`Currently ${status}${userActivity}`);
}
embed.addField("ID", user.id, true);
if (user.nickname) {
embed.addField("Nickname", user.nickname, true);
}
embed.addField("User Type", user.user.bot ? "Bot": "Human", true)
.addField("Last Guild Message", lastMsgTime)
.addField("Created", this.client.util.dateDisplay(user.user.createdAt), true)
.addField("Joined", this.client.util.dateDisplay(user.joinedAt), true)
.setColor(0x04d5fd);
msg.channel.send({embed});
}
};
|
constructor
|
identifier_name
|
userinfo.js
|
const { Command } = require("klasa");
const { MessageEmbed } = require("discord.js");
const statusList = {
online: "online",
idle: "idle",
dnd: "in do not disturb"
};
module.exports = class extends Command {
constructor(...args) {
super(...args, {
name: "userinfo",
enabled: true,
runIn: ["text"],
aliases: ["user"],
description: "Get a user's information",
usage: "<user:usersearch>",
extendedHelp: "Need Discord info on a specific user? I got you covered!"
});
this.humanUse = "<user>";
}
async run(msg, [user]) {
user = msg.guild.members.cache.get(user.id);
var userActivity = null;
// If presence intent is enabled, grab presence activity for display.
if (user.presence.clientStatus != null) {
var status = statusList[user.presence.status] || "offline";
var activity = user.presence.activities[0];
if (user.presence.activity === null) { userActivity += " "; }
else {
switch (activity.type) { //All cases covered
case "PLAYING":
userActivity = " while playing ";
break;
case "LISTENING":
userActivity = " while listening to ";
break;
case "WATCHING":
userActivity = " while watching ";
break;
case "STREAMING":
userActivity = " while streaming ";
break;
}
userActivity += activity.name;
}
}
var lastMsgTime;
if (user.lastMessageChannelID) {
var lastMsg = user.guild.channels.cache.get(user.lastMessageChannelID)
.messages.cache.get(user.lastMessageID);
lastMsgTime = this.client.util.dateDisplay(new Date(lastMsg.createdTimestamp));
}
else {
lastMsgTime = "No message found...";
}
|
.setFooter(msg.guild.name, msg.guild.iconURL())
.setThumbnail(user.user.displayAvatarURL())
.setAuthor(user.user.tag);
if (userActivity != null) {
embed.setDescription(`Currently ${status}${userActivity}`);
}
embed.addField("ID", user.id, true);
if (user.nickname) {
embed.addField("Nickname", user.nickname, true);
}
embed.addField("User Type", user.user.bot ? "Bot": "Human", true)
.addField("Last Guild Message", lastMsgTime)
.addField("Created", this.client.util.dateDisplay(user.user.createdAt), true)
.addField("Joined", this.client.util.dateDisplay(user.joinedAt), true)
.setColor(0x04d5fd);
msg.channel.send({embed});
}
};
|
const embed = new MessageEmbed()
.setTimestamp()
|
random_line_split
|
userinfo.js
|
const { Command } = require("klasa");
const { MessageEmbed } = require("discord.js");
const statusList = {
online: "online",
idle: "idle",
dnd: "in do not disturb"
};
module.exports = class extends Command {
constructor(...args) {
super(...args, {
name: "userinfo",
enabled: true,
runIn: ["text"],
aliases: ["user"],
description: "Get a user's information",
usage: "<user:usersearch>",
extendedHelp: "Need Discord info on a specific user? I got you covered!"
});
this.humanUse = "<user>";
}
async run(msg, [user]) {
user = msg.guild.members.cache.get(user.id);
var userActivity = null;
// If presence intent is enabled, grab presence activity for display.
if (user.presence.clientStatus != null) {
var status = statusList[user.presence.status] || "offline";
var activity = user.presence.activities[0];
if (user.presence.activity === null) { userActivity += " "; }
else {
switch (activity.type) { //All cases covered
case "PLAYING":
userActivity = " while playing ";
break;
case "LISTENING":
userActivity = " while listening to ";
break;
case "WATCHING":
userActivity = " while watching ";
break;
case "STREAMING":
userActivity = " while streaming ";
break;
}
userActivity += activity.name;
}
}
var lastMsgTime;
if (user.lastMessageChannelID) {
var lastMsg = user.guild.channels.cache.get(user.lastMessageChannelID)
.messages.cache.get(user.lastMessageID);
lastMsgTime = this.client.util.dateDisplay(new Date(lastMsg.createdTimestamp));
}
else
|
const embed = new MessageEmbed()
.setTimestamp()
.setFooter(msg.guild.name, msg.guild.iconURL())
.setThumbnail(user.user.displayAvatarURL())
.setAuthor(user.user.tag);
if (userActivity != null) {
embed.setDescription(`Currently ${status}${userActivity}`);
}
embed.addField("ID", user.id, true);
if (user.nickname) {
embed.addField("Nickname", user.nickname, true);
}
embed.addField("User Type", user.user.bot ? "Bot": "Human", true)
.addField("Last Guild Message", lastMsgTime)
.addField("Created", this.client.util.dateDisplay(user.user.createdAt), true)
.addField("Joined", this.client.util.dateDisplay(user.joinedAt), true)
.setColor(0x04d5fd);
msg.channel.send({embed});
}
};
|
{
lastMsgTime = "No message found...";
}
|
conditional_block
|
userinfo.js
|
const { Command } = require("klasa");
const { MessageEmbed } = require("discord.js");
const statusList = {
online: "online",
idle: "idle",
dnd: "in do not disturb"
};
module.exports = class extends Command {
constructor(...args) {
super(...args, {
name: "userinfo",
enabled: true,
runIn: ["text"],
aliases: ["user"],
description: "Get a user's information",
usage: "<user:usersearch>",
extendedHelp: "Need Discord info on a specific user? I got you covered!"
});
this.humanUse = "<user>";
}
async run(msg, [user])
|
};
|
{
user = msg.guild.members.cache.get(user.id);
var userActivity = null;
// If presence intent is enabled, grab presence activity for display.
if (user.presence.clientStatus != null) {
var status = statusList[user.presence.status] || "offline";
var activity = user.presence.activities[0];
if (user.presence.activity === null) { userActivity += " "; }
else {
switch (activity.type) { //All cases covered
case "PLAYING":
userActivity = " while playing ";
break;
case "LISTENING":
userActivity = " while listening to ";
break;
case "WATCHING":
userActivity = " while watching ";
break;
case "STREAMING":
userActivity = " while streaming ";
break;
}
userActivity += activity.name;
}
}
var lastMsgTime;
if (user.lastMessageChannelID) {
var lastMsg = user.guild.channels.cache.get(user.lastMessageChannelID)
.messages.cache.get(user.lastMessageID);
lastMsgTime = this.client.util.dateDisplay(new Date(lastMsg.createdTimestamp));
}
else {
lastMsgTime = "No message found...";
}
const embed = new MessageEmbed()
.setTimestamp()
.setFooter(msg.guild.name, msg.guild.iconURL())
.setThumbnail(user.user.displayAvatarURL())
.setAuthor(user.user.tag);
if (userActivity != null) {
embed.setDescription(`Currently ${status}${userActivity}`);
}
embed.addField("ID", user.id, true);
if (user.nickname) {
embed.addField("Nickname", user.nickname, true);
}
embed.addField("User Type", user.user.bot ? "Bot": "Human", true)
.addField("Last Guild Message", lastMsgTime)
.addField("Created", this.client.util.dateDisplay(user.user.createdAt), true)
.addField("Joined", this.client.util.dateDisplay(user.joinedAt), true)
.setColor(0x04d5fd);
msg.channel.send({embed});
}
|
identifier_body
|
ND280Transform_CSVEvtList.py
|
from GangaCore.GPIDev.Schema import *
from GangaCore.GPIDev.Lib.Tasks.common import *
from GangaCore.GPIDev.Lib.Tasks.ITransform import ITransform
from GangaCore.GPIDev.Lib.Job.Job import JobError
from GangaCore.GPIDev.Lib.Registry.JobRegistry import JobRegistrySlice, JobRegistrySliceProxy
from GangaCore.Core.exceptions import ApplicationConfigurationError
from GangaCore.GPIDev.Lib.Tasks.ITransform import ITransform
from GangaCore.GPIDev.Lib.Tasks.TaskLocalCopy import TaskLocalCopy
from GangaCore.Utility.logging import getLogger
from .ND280Unit_CSVEvtList import ND280Unit_CSVEvtList
from GangaND280.ND280Dataset.ND280Dataset import ND280LocalDataset
from GangaND280.ND280Splitter import splitCSVFile
import GangaCore.GPI as GPI
import os
logger = getLogger()
class ND280Transform_CSVEvtList(ITransform):
_schema = Schema(Version(1,0), dict(list(ITransform._schema.datadict.items()) + list({
'nbevents' : SimpleItem(defvalue=-1,doc='The number of events for each unit'),
}.items())))
_category = 'transforms'
_name = 'ND280Transform_CSVEvtList'
_exportmethods = ITransform._exportmethods + [ ]
def __init__(self):
super(ND280Transform_CSVEvtList,self).__init__()
def
|
(self):
"""Create new units if required given the inputdata"""
# call parent for chaining
super(ND280Transform_CSVEvtList,self).createUnits()
# Look at the application schema and check if there is a csvfile variable
try:
csvfile = self.application.csvfile
except AttributeError:
logger.error('This application doesn\'t contain a csvfile variable. Use another Transform !')
return
subsets = splitCSVFile(self.application.csvfile, self.nbevents)
for s,sub in enumerate(subsets):
# check if this data is being run over by checking all the names listed
ok = False
for unit in self.units:
if unit.subpartid == s:
ok = True
if ok:
continue
# new unit required for this dataset
unit = ND280Unit_CSVEvtList()
unit.name = "Unit %d" % len(self.units)
unit.subpartid = s
unit.eventswanted = sub
unit.inputdata = self.inputdata[0]
self.addUnitToTRF( unit )
def createChainUnit( self, parent_units, use_copy_output = True ):
"""Create a chained unit using the output data from the given units"""
# check all parent units for copy_output
copy_output_ok = True
for parent in parent_units:
if not parent.copy_output:
copy_output_ok = False
# all parent units must be completed so the outputfiles are filled correctly
for parent in parent_units:
if parent.status != "completed":
return None
if not use_copy_output or not copy_output_ok:
unit = ND280Unit_CSVEvtList()
unit.inputdata = ND280LocalDataset()
for parent in parent_units:
# loop over the output files and add them to the ND280LocalDataset - THIS MIGHT NEED SOME WORK!
job = GPI.jobs(parent.active_job_ids[0])
for f in job.outputfiles:
# should check for different file types and add them as appropriate to the dataset
# self.inputdata (== TaskChainInput).include/exclude_file_mask could help with this
# This will be A LOT easier with Ganga 6.1 as you can easily map outputfiles -> inputfiles!
unit.inputdata.names.append( os.path.join( job.outputdir, f.namePattern ) )
else:
unit = ND280Unit_CSVEvtList()
unit.inputdata = ND280LocalDataset()
for parent in parent_units:
# unit needs to have completed and downloaded before we can get file list
if parent.status != "completed":
return None
# we should be OK so copy all output to the dataset
for f in parent.copy_output.files:
unit.inputdata.names.append( os.path.join( parent.copy_output.local_location, f ) )
return unit
|
createUnits
|
identifier_name
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.