file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
issue-15381.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn
|
() {
let values: Vec<u8> = vec![1,2,3,4,5,6,7,8];
for
[x,y,z]
//~^ ERROR refutable pattern in `for` loop binding: `[]` not covered
in values.as_slice().chunks(3).filter(|&xs| xs.len() == 3) {
println!("y={}", y);
}
}
|
main
|
identifier_name
|
issue-15381.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn main() {
let values: Vec<u8> = vec![1,2,3,4,5,6,7,8];
|
println!("y={}", y);
}
}
|
for
[x,y,z]
//~^ ERROR refutable pattern in `for` loop binding: `[]` not covered
in values.as_slice().chunks(3).filter(|&xs| xs.len() == 3) {
|
random_line_split
|
__init__.py
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The main command group for gcloud bigquery.
"""
import urlparse
from googlecloudsdk.api_lib.bigquery import bigquery
from googlecloudsdk.calliope import base
from googlecloudsdk.core import apis
from googlecloudsdk.core import cli
from googlecloudsdk.core import properties
from googlecloudsdk.core import resolvers
from googlecloudsdk.core import resources
from googlecloudsdk.core.credentials import store as c_store
SERVICE_NAME = 'bigquery'
BIGQUERY_MESSAGES_MODULE_KEY = 'bigquery-messages-module'
APITOOLS_CLIENT_KEY = 'bigquery-apitools-client'
BIGQUERY_REGISTRY_KEY = 'bigquery-registry'
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class Bigquery(base.Group):
"""A group of commands for using BigQuery.
"""
def Filter(self, context, args):
"""Initialize context for bigquery commands.
Args:
context: The current context.
args: The argparse namespace that was specified on the CLI or API.
Returns:
The updated context.
"""
resources.SetParamDefault(
api='bigquery', collection=None, param='projectId',
|
resolver=resolvers.FromProperty(properties.VALUES.core.project))
# TODO(user): remove command dependence on these.
context[BIGQUERY_MESSAGES_MODULE_KEY] = apis.GetMessagesModule(
'bigquery', 'v2')
context[APITOOLS_CLIENT_KEY] = apis.GetClientInstance(
'bigquery', 'v2', http=self.Http())
context[BIGQUERY_REGISTRY_KEY] = resources.REGISTRY
# Inject bigquery backend params.
bigquery.Bigquery.SetResourceParser(resources.REGISTRY)
bigquery.Bigquery.SetApiEndpoint(
self.Http(), properties.VALUES.api_endpoint_overrides.bigquery.Get())
@staticmethod
def Args(parser):
parser.add_argument(
'--fingerprint-job-id',
action='store_true',
help='Whether to use a job id that is derived from a fingerprint of '
'the job configuration.')
|
random_line_split
|
|
__init__.py
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The main command group for gcloud bigquery.
"""
import urlparse
from googlecloudsdk.api_lib.bigquery import bigquery
from googlecloudsdk.calliope import base
from googlecloudsdk.core import apis
from googlecloudsdk.core import cli
from googlecloudsdk.core import properties
from googlecloudsdk.core import resolvers
from googlecloudsdk.core import resources
from googlecloudsdk.core.credentials import store as c_store
SERVICE_NAME = 'bigquery'
BIGQUERY_MESSAGES_MODULE_KEY = 'bigquery-messages-module'
APITOOLS_CLIENT_KEY = 'bigquery-apitools-client'
BIGQUERY_REGISTRY_KEY = 'bigquery-registry'
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class Bigquery(base.Group):
"""A group of commands for using BigQuery.
"""
def Filter(self, context, args):
"""Initialize context for bigquery commands.
Args:
context: The current context.
args: The argparse namespace that was specified on the CLI or API.
Returns:
The updated context.
"""
resources.SetParamDefault(
api='bigquery', collection=None, param='projectId',
resolver=resolvers.FromProperty(properties.VALUES.core.project))
# TODO(user): remove command dependence on these.
context[BIGQUERY_MESSAGES_MODULE_KEY] = apis.GetMessagesModule(
'bigquery', 'v2')
context[APITOOLS_CLIENT_KEY] = apis.GetClientInstance(
'bigquery', 'v2', http=self.Http())
context[BIGQUERY_REGISTRY_KEY] = resources.REGISTRY
# Inject bigquery backend params.
bigquery.Bigquery.SetResourceParser(resources.REGISTRY)
bigquery.Bigquery.SetApiEndpoint(
self.Http(), properties.VALUES.api_endpoint_overrides.bigquery.Get())
@staticmethod
def
|
(parser):
parser.add_argument(
'--fingerprint-job-id',
action='store_true',
help='Whether to use a job id that is derived from a fingerprint of '
'the job configuration.')
|
Args
|
identifier_name
|
__init__.py
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The main command group for gcloud bigquery.
"""
import urlparse
from googlecloudsdk.api_lib.bigquery import bigquery
from googlecloudsdk.calliope import base
from googlecloudsdk.core import apis
from googlecloudsdk.core import cli
from googlecloudsdk.core import properties
from googlecloudsdk.core import resolvers
from googlecloudsdk.core import resources
from googlecloudsdk.core.credentials import store as c_store
SERVICE_NAME = 'bigquery'
BIGQUERY_MESSAGES_MODULE_KEY = 'bigquery-messages-module'
APITOOLS_CLIENT_KEY = 'bigquery-apitools-client'
BIGQUERY_REGISTRY_KEY = 'bigquery-registry'
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class Bigquery(base.Group):
"""A group of commands for using BigQuery.
"""
def Filter(self, context, args):
|
@staticmethod
def Args(parser):
parser.add_argument(
'--fingerprint-job-id',
action='store_true',
help='Whether to use a job id that is derived from a fingerprint of '
'the job configuration.')
|
"""Initialize context for bigquery commands.
Args:
context: The current context.
args: The argparse namespace that was specified on the CLI or API.
Returns:
The updated context.
"""
resources.SetParamDefault(
api='bigquery', collection=None, param='projectId',
resolver=resolvers.FromProperty(properties.VALUES.core.project))
# TODO(user): remove command dependence on these.
context[BIGQUERY_MESSAGES_MODULE_KEY] = apis.GetMessagesModule(
'bigquery', 'v2')
context[APITOOLS_CLIENT_KEY] = apis.GetClientInstance(
'bigquery', 'v2', http=self.Http())
context[BIGQUERY_REGISTRY_KEY] = resources.REGISTRY
# Inject bigquery backend params.
bigquery.Bigquery.SetResourceParser(resources.REGISTRY)
bigquery.Bigquery.SetApiEndpoint(
self.Http(), properties.VALUES.api_endpoint_overrides.bigquery.Get())
|
identifier_body
|
jquery.bbq.d.ts
|
// Type definitions for jquery.bbq 1.2
// Project: http://benalman.com/projects/jquery-bbq-plugin/
// Definitions by: Adam R. Smith <https://github.com/sunetos>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
/// <reference path="../jquery/jquery.d.ts" />
declare namespace JQueryBbq {
interface JQuery {
/**
* Adds a 'state' into the browser history at the current position, setting
* location.hash and triggering any bound <hashchange event> callbacks
* (provided the new state is different than the previous state).
*
* @name params A serialized params string or a hash string beginning with # to merge into location.hash.
* @name merge_mode Merge behavior defaults to 0 if merge_mode is not specified (unless a hash string beginning with # is specified, in which case merge behavior defaults to 2)
*/
pushState(params?: string, merge_mode?: number): void;
pushState(params?: any, merge_mode?: number): void;
/**
* Retrieves the current 'state' from the browser history, parsing
* location.hash for a specific key or returning an object containing the
|
* @name key An optional state key for which to return a value.
* @name coerce If true, coerces any numbers or true, false, null, and undefined to their actual value. Defaults to false
*/
getState(key?: string, coerce?: boolean): any;
getState(coerce?: boolean): any;
/**
* Remove one or more keys from the current browser history 'state', creating
* a new state, setting location.hash and triggering any bound
* <hashchange event> callbacks (provided the new state is different than
* the previous state).
*
* @name key One or more key values to remove from the current state.
*/
removeState(...key: any[]): void;
}
interface ParamFragment {
(url?: string): string;
(url: string, params: any, merge_mode?: number): string;
/**
* Specify characters that will be left unescaped when fragments are created
* or merged using <jQuery.param.fragment>, or when the fragment is modified
* using <jQuery.bbq.pushState>. This option only applies to serialized data
* object fragments, and not set-as-string fragments. Does not affect the
* query string. Defaults to ",/" (comma, forward slash).
*
* @name chars The characters to not escape in the fragment. If unspecified, defaults to empty string (escape all characters).
*/
noEscape: (chars?: string) => void;
/**
* TODO: DESCRIBE
*
* @name state TODO: DESCRIBE
*/
ajaxCrawlable(state?: boolean): boolean;
}
interface JQueryDeparam {
/**
* Deserialize a params string into an object, optionally coercing numbers,
* booleans, null and undefined values; this method is the counterpart to the
* internal jQuery.param method.
*
* @name params A params string to be parsed.
* @name coerce If true, coerces any numbers or true, false, null, and undefined to their actual value. Defaults to false if omitted.
*/
(params: string, coerce?: boolean): any;
/**
* Parse the query string from a URL or the current window.location.href,
* deserializing it into an object, optionally coercing numbers, booleans,
* null and undefined values.
*
* @name url An optional params string or URL containing query string params to be parsed. If url is omitted, the current window.location.href is used.
* @name coerce If true, coerces any numbers or true, false, null, and undefined to their actual value. Defaults to false if omitted.
*/
querystring(url?: string, coerce?: boolean): any;
/**
* Parse the fragment (hash) from a URL or the current window.location.href,
* deserializing it into an object, optionally coercing numbers, booleans,
* null and undefined values.
*
* @name url An optional params string or URL containing fragment (hash) params to be parsed. If url is omitted, the current window.location.href is used.
* @name coerce If true, coerces any numbers or true, false, null, and undefined to their actual value. Defaults to false if omitted.
*/
fragment(url?: string, coerce?: boolean): any;
}
interface EventObject extends JQueryEventObject {
fragment: string;
getState(key?: string, coerce?: boolean);
}
}
interface JQueryParam {
/**
* Parse the query string from a URL or the current window.location.href,
* deserializing it into an object, optionally coercing numbers, booleans,
* null and undefined values.
*
* @name url An optional params string or URL containing query string params to be parsed. If url is omitted, the current window.location.href is used.
* @name coerce (Boolean) If true, coerces any numbers or true, false, null, and undefined to their actual value. Defaults to false if omitted.
* @name merge_mode An object representing the deserialized params string.
*/
querystring(url?: string, coerce?: boolean, merge_mode?: number): string;
querystring(url?: string, coerce?: any, merge_mode?: number): string;
fragment: JQueryBbq.ParamFragment;
/**
* Returns a params string equivalent to that returned by the internal
* jQuery.param method, but sorted, which makes it suitable for use as a
* cache key.
*
* @name obj An object to be serialized.
* @name traditional Params deep/shallow serialization mode. See the documentation at http://api.jquery.com/jQuery.param/ for more detail.
*/
sorted(obj: any, traditional?: boolean): string;
}
interface JQueryStatic {
bbq: JQueryBbq.JQuery;
deparam: JQueryBbq.JQueryDeparam;
/**
* Get the internal "Default URL attribute per tag" list, or augment the list
* with additional tag-attribute pairs, in case the defaults are insufficient.
*
* @name tag_attr An object containing a list of tag names and their associated default attribute names in the format { tag: 'attr', ... } to be merged into the internal tag-attribute list.
*/
elemUrlAttr(tag_attr?: any): any;
}
interface JQuery {
querystring(attr?: any, params?: any, merge_mode?: number): JQuery;
fragment(attr?: any, params?: any, merge_mode?: number): JQuery;
hashchange(eventData?: any, handler?: (eventObject: JQueryBbq.EventObject) => any): JQuery;
hashchange(handler: (eventObject: JQueryBbq.EventObject) => any): JQuery;
}
|
* entire state, optionally coercing numbers, booleans, null and undefined
* values.
*
|
random_line_split
|
mod.rs
|
pub use self::manager::ContextManager;
pub use self::manager::ViewContext;
pub use self::manager::ViewContextMut;
mod manager;
//mod proxies;
//use mopa;
use std::collections::HashMap;
use store::StoreValueStatic;
use {
Store,
StoreValue,
AttributeGetResult,
AttributeMutResult,
AttributeSetResult
};
use lookup::PropertyAccessor;
/// This trait is used to provide a possible interface for Context
/// objects managed by the `ContextManager`. It is implemented by
/// the `AmbientModel` to give an example of such a `Context`.
/// **Note:**
/// If the "Context" type for the `ContextManager` implement this trait,
/// then those function can be used also on the `ContextManager`.
pub trait Context {
/// Register a single value at the key
fn register_value<V: Into<StoreValueStatic>>(&mut self, key: String, value: V);
/// Register a store:
fn register_store<S: Store>(&mut self, key: String, store: S);
/// Return a previously registered store:
/// This can be useful when you want to modify an existing store but without
/// retaining a reference to it.
fn get_store_mut(&mut self, key: String) -> Option<&mut Box<Store + 'static>>;
}
/// Default version of the `ContextManager` where the template
/// parameter is set to `AmbientModel`.
pub type DefaultContextManager = ContextManager<AmbientModel, AmbientModel>;
/// An `AmbientModel` instance is a root object that is used
/// by the `DefaultContextManager`.
/// Internally it use a HashMap for single `StoreValue`s
/// and an other HashMap for boxed type implementing the trait `Store`.
#[derive(Default)]
pub struct AmbientModel {
values: HashMap<String, StoreValueStatic>,
stores: HashMap<String, Box<Store>>,
}
/// Minimal contraint to be used in a `ContextManager`:
/// implement the trait `Store`.
impl Store for AmbientModel {
fn get_attribute<'a>(&'a self, k: PropertyAccessor) -> AttributeGetResult<'a> {
let value = self.stores.get_attribute(k.clone());
if value.is_found() {
value
} else {
self.values.get_attribute(k)
}
}
fn get_attribute_mut<'a>(&'a mut self, k: PropertyAccessor) -> AttributeMutResult<'a> {
let value = self.stores.get_attribute_mut(k.clone());
if value.is_found() {
value
} else {
self.values.get_attribute_mut(k)
}
}
fn set_attribute<'a>(&mut self, k: PropertyAccessor, value: StoreValue<'a>) -> AttributeSetResult<'a> {
match self.stores.set_attribute(k.clone(), value) {
AttributeSetResult::NoSuchProperty(v) => {
self.values.set_attribute(k, v)
}
_ => AttributeSetResult::Stored
}
}
}
// Context implementation
impl Context for AmbientModel {
fn register_value<V: Into<StoreValueStatic>>(&mut self, key: String, value: V) {
self.values.insert(key, value.into());
}
fn register_store<S: Store + 'static>(&mut self, key: String, store: S) {
self.stores.insert(key, Box::new(store) as Box<Store>);
}
|
}
}
|
fn get_store_mut(&mut self, key: String) -> Option<&mut Box<Store + 'static>> {
self.stores.get_mut(&key)
|
random_line_split
|
mod.rs
|
pub use self::manager::ContextManager;
pub use self::manager::ViewContext;
pub use self::manager::ViewContextMut;
mod manager;
//mod proxies;
//use mopa;
use std::collections::HashMap;
use store::StoreValueStatic;
use {
Store,
StoreValue,
AttributeGetResult,
AttributeMutResult,
AttributeSetResult
};
use lookup::PropertyAccessor;
/// This trait is used to provide a possible interface for Context
/// objects managed by the `ContextManager`. It is implemented by
/// the `AmbientModel` to give an example of such a `Context`.
/// **Note:**
/// If the "Context" type for the `ContextManager` implement this trait,
/// then those function can be used also on the `ContextManager`.
pub trait Context {
/// Register a single value at the key
fn register_value<V: Into<StoreValueStatic>>(&mut self, key: String, value: V);
/// Register a store:
fn register_store<S: Store>(&mut self, key: String, store: S);
/// Return a previously registered store:
/// This can be useful when you want to modify an existing store but without
/// retaining a reference to it.
fn get_store_mut(&mut self, key: String) -> Option<&mut Box<Store + 'static>>;
}
/// Default version of the `ContextManager` where the template
/// parameter is set to `AmbientModel`.
pub type DefaultContextManager = ContextManager<AmbientModel, AmbientModel>;
/// An `AmbientModel` instance is a root object that is used
/// by the `DefaultContextManager`.
/// Internally it use a HashMap for single `StoreValue`s
/// and an other HashMap for boxed type implementing the trait `Store`.
#[derive(Default)]
pub struct AmbientModel {
values: HashMap<String, StoreValueStatic>,
stores: HashMap<String, Box<Store>>,
}
/// Minimal contraint to be used in a `ContextManager`:
/// implement the trait `Store`.
impl Store for AmbientModel {
fn get_attribute<'a>(&'a self, k: PropertyAccessor) -> AttributeGetResult<'a> {
let value = self.stores.get_attribute(k.clone());
if value.is_found() {
value
} else {
self.values.get_attribute(k)
}
}
fn get_attribute_mut<'a>(&'a mut self, k: PropertyAccessor) -> AttributeMutResult<'a> {
let value = self.stores.get_attribute_mut(k.clone());
if value.is_found() {
value
} else {
self.values.get_attribute_mut(k)
}
}
fn set_attribute<'a>(&mut self, k: PropertyAccessor, value: StoreValue<'a>) -> AttributeSetResult<'a>
|
}
// Context implementation
impl Context for AmbientModel {
fn register_value<V: Into<StoreValueStatic>>(&mut self, key: String, value: V) {
self.values.insert(key, value.into());
}
fn register_store<S: Store + 'static>(&mut self, key: String, store: S) {
self.stores.insert(key, Box::new(store) as Box<Store>);
}
fn get_store_mut(&mut self, key: String) -> Option<&mut Box<Store + 'static>> {
self.stores.get_mut(&key)
}
}
|
{
match self.stores.set_attribute(k.clone(), value) {
AttributeSetResult::NoSuchProperty(v) => {
self.values.set_attribute(k, v)
}
_ => AttributeSetResult::Stored
}
}
|
identifier_body
|
mod.rs
|
pub use self::manager::ContextManager;
pub use self::manager::ViewContext;
pub use self::manager::ViewContextMut;
mod manager;
//mod proxies;
//use mopa;
use std::collections::HashMap;
use store::StoreValueStatic;
use {
Store,
StoreValue,
AttributeGetResult,
AttributeMutResult,
AttributeSetResult
};
use lookup::PropertyAccessor;
/// This trait is used to provide a possible interface for Context
/// objects managed by the `ContextManager`. It is implemented by
/// the `AmbientModel` to give an example of such a `Context`.
/// **Note:**
/// If the "Context" type for the `ContextManager` implement this trait,
/// then those function can be used also on the `ContextManager`.
pub trait Context {
/// Register a single value at the key
fn register_value<V: Into<StoreValueStatic>>(&mut self, key: String, value: V);
/// Register a store:
fn register_store<S: Store>(&mut self, key: String, store: S);
/// Return a previously registered store:
/// This can be useful when you want to modify an existing store but without
/// retaining a reference to it.
fn get_store_mut(&mut self, key: String) -> Option<&mut Box<Store + 'static>>;
}
/// Default version of the `ContextManager` where the template
/// parameter is set to `AmbientModel`.
pub type DefaultContextManager = ContextManager<AmbientModel, AmbientModel>;
/// An `AmbientModel` instance is a root object that is used
/// by the `DefaultContextManager`.
/// Internally it use a HashMap for single `StoreValue`s
/// and an other HashMap for boxed type implementing the trait `Store`.
#[derive(Default)]
pub struct AmbientModel {
values: HashMap<String, StoreValueStatic>,
stores: HashMap<String, Box<Store>>,
}
/// Minimal contraint to be used in a `ContextManager`:
/// implement the trait `Store`.
impl Store for AmbientModel {
fn get_attribute<'a>(&'a self, k: PropertyAccessor) -> AttributeGetResult<'a> {
let value = self.stores.get_attribute(k.clone());
if value.is_found() {
value
} else {
self.values.get_attribute(k)
}
}
fn get_attribute_mut<'a>(&'a mut self, k: PropertyAccessor) -> AttributeMutResult<'a> {
let value = self.stores.get_attribute_mut(k.clone());
if value.is_found() {
value
} else {
self.values.get_attribute_mut(k)
}
}
fn set_attribute<'a>(&mut self, k: PropertyAccessor, value: StoreValue<'a>) -> AttributeSetResult<'a> {
match self.stores.set_attribute(k.clone(), value) {
AttributeSetResult::NoSuchProperty(v) => {
self.values.set_attribute(k, v)
}
_ => AttributeSetResult::Stored
}
}
}
// Context implementation
impl Context for AmbientModel {
fn register_value<V: Into<StoreValueStatic>>(&mut self, key: String, value: V) {
self.values.insert(key, value.into());
}
fn
|
<S: Store + 'static>(&mut self, key: String, store: S) {
self.stores.insert(key, Box::new(store) as Box<Store>);
}
fn get_store_mut(&mut self, key: String) -> Option<&mut Box<Store + 'static>> {
self.stores.get_mut(&key)
}
}
|
register_store
|
identifier_name
|
ext-lang-pt_PT.js
|
/*!
* Extensible 1.6.0-rc.1
* Copyright(c) 2010-2013 Extensible, LLC
* [email protected]
* http://ext.ensible.com
*/
/**
* Portuguese/Portugal (pt_PT) Translation
* by Nuno Franco da Costa - francodacosta.com
* translated from ext-lang-en.js
*/
Ext.onReady(function() {
var cm = Ext.ClassManager,
exists = Ext.Function.bind(cm.get, cm);
if(Ext.Updater) {
Ext.Updater.defaults.indicatorText = '<div class="loading-indicator">A carregar...</div>';
}
if(exists('Ext.view.View')){
Ext.view.View.prototype.emptyText = "";
}
if(exists('Ext.grid.Panel')){
Ext.grid.Panel.prototype.ddText = "{0} linha(s) seleccionada(s)";
}
if(Ext.TabPanelItem){
Ext.TabPanelItem.prototype.closeText = "Fechar aba";
}
if(Ext.LoadMask){
Ext.LoadMask.prototype.msg = "A carregar...";
}
if(Ext.Date) {
Ext.Date.monthNames = [
"Janeiro",
|
"Abril",
"Maio",
"Junho",
"Julho",
"Agosto",
"Setembro",
"Outubro",
"Novembro",
"Dezembro"
];
Ext.Date.getShortMonthName = function(month) {
return Ext.Date.monthNames[month].substring(0, 3);
};
Ext.Date.monthNumbers = {
Jan : 0,
Feb : 1,
Mar : 2,
Apr : 3,
May : 4,
Jun : 5,
Jul : 6,
Aug : 7,
Sep : 8,
Oct : 9,
Nov : 10,
Dec : 11
};
Ext.Date.getMonthNumber = function(name) {
return Ext.Date.monthNumbers[name.substring(0, 1).toUpperCase() + name.substring(1, 3).toLowerCase()];
};
Ext.Date.dayNames = [
"Domingo",
"Segunda",
"Terça",
"Quarta",
"Quinta",
"Sexta",
"Sabado"
];
Ext.Date.getShortDayName = function(day) {
return Ext.Date.dayNames[day].substring(0, 3);
};
}
if(Ext.MessageBox){
Ext.MessageBox.buttonText = {
ok : "OK",
cancel : "Cancelar",
yes : "Sim",
no : "Não"
};
}
if(exists('Ext.util.Format')){
Ext.apply(Ext.util.Format, {
thousandSeparator: '.',
decimalSeparator: ',',
currencySign: '\u20ac', // Portugese Euro
dateFormat: 'Y/m/d'
});
}
if(exists('Ext.picker.Date')){
Ext.apply(Ext.picker.Date.prototype, {
todayText : "Hoje",
minText : "A data é anterior ao mínimo definido",
maxText : "A data é posterior ao máximo definido",
disabledDaysText : "",
disabledDatesText : "",
monthNames : Ext.Date.monthNames,
dayNames : Ext.Date.dayNames,
nextText : 'Mês Seguinte (Control+Right)',
prevText : 'Mês Anterior (Control+Left)',
monthYearText : 'Escolha um mês (Control+Up/Down avaçar/recuar anos)',
todayTip : "{0} (barra de espaço)",
format : "y/m/d",
startDay : 0
});
}
if(exists('Ext.picker.Month')) {
Ext.apply(Ext.picker.Month.prototype, {
okText : " OK ",
cancelText : "Cancelar"
});
}
if(exists('Ext.toolbar.Paging')){
Ext.apply(Ext.PagingToolbar.prototype, {
beforePageText : "Página",
afterPageText : "de {0}",
firstText : "Primeira Página",
prevText : "Página Anterior",
nextText : "Pr%oacute;xima Página",
lastText : "Última Página",
refreshText : "Recaregar",
displayMsg : "A mostrar {0} - {1} de {2}",
emptyMsg : 'Sem dados para mostrar'
});
}
if(exists('Ext.form.field.Base')){
Ext.form.field.Base.prototype.invalidText = "O valor deste campo é inválido";
}
if(exists('Ext.form.field.Text')){
Ext.apply(Ext.form.field.Text.prototype, {
minLengthText : "O comprimento mínimo deste campo &eaute; {0}",
maxLengthText : "O comprimento máximo deste campo &eaute; {0}",
blankText : "Este campo é de preenchimento obrigatório",
regexText : "",
emptyText : null
});
}
if(exists('Ext.form.field.Number')){
Ext.apply(Ext.form.field.Number.prototype, {
minText : "O valor mínimo deste campo &eaute; {0}",
maxText : "O valor máximo deste campo &eaute; {0}",
nanText : "{0} não é um numero"
});
}
if(exists('Ext.form.field.Date')){
Ext.apply(Ext.form.field.Date.prototype, {
disabledDaysText : "Desabilitado",
disabledDatesText : "Desabilitado",
minText : "A data deste campo deve ser posterior a {0}",
maxText : "A data deste campo deve ser anterior a {0}",
invalidText : "{0} não é uma data válida - deve estar no seguinte formato{1}",
format : "y/m/d",
altFormats : "m/d/Y|m-d-y|m-d-Y|m/d|m-d|md|mdy|mdY|d|Y-m-d"
});
}
if(exists('Ext.form.field.ComboBox')){
Ext.apply(Ext.form.field.ComboBox.prototype, {
valueNotFoundText : undefined
});
Ext.apply(Ext.form.field.ComboBox.prototype.defaultListConfig, {
loadingText : "A Carregar..."
});
}
if(exists('Ext.form.field.VTypes')){
Ext.apply(Ext.form.field.VTypes, {
emailText : 'Este campo deve ser um endereço de email no formato "[email protected]"',
urlText : 'Este campo deve ser um URL no formato "http:/'+'/www.dominio.com"',
alphaText : 'Este campo deve conter apenas letras e _',
alphanumText : 'Este campo deve conter apenas letras, números e _'
});
}
if(exists('Ext.form.field.HtmlEditor')){
Ext.apply(Ext.form.field.HtmlEditor.prototype, {
createLinkText : 'Indique o endereço do link:',
buttonTips : {
bold : {
title: 'Negrito (Ctrl+B)',
text: 'Transforma o texto em Negrito.',
cls: Ext.baseCSSPrefix + 'html-editor-tip'
},
italic : {
title: 'Itálico (Ctrl+I)',
text: 'Transforma o texto em itálico.',
cls: Ext.baseCSSPrefix + 'html-editor-tip'
},
underline : {
title: 'Sublinhar (Ctrl+U)',
text: 'Sublinha o texto.',
cls: Ext.baseCSSPrefix + 'html-editor-tip'
},
increasefontsize : {
title: 'Aumentar texto',
text: 'Aumenta o tamanho da fonte.',
cls: Ext.baseCSSPrefix + 'html-editor-tip'
},
decreasefontsize : {
title: 'Encolher texto',
text: 'Diminui o tamanho da fonte.',
cls: Ext.baseCSSPrefix + 'html-editor-tip'
},
backcolor : {
title: 'Côr de fundo do texto',
text: 'Altera a côr de fundo do texto.',
cls: Ext.baseCSSPrefix + 'html-editor-tip'
},
forecolor : {
title: 'Côr do texo',
text: 'Altera a aôr do texo.',
cls: Ext.baseCSSPrefix + 'html-editor-tip'
},
justifyleft : {
title: 'ALinhar à esquerda',
text: 'ALinha o texto à esquerda.',
cls: Ext.baseCSSPrefix + 'html-editor-tip'
},
justifycenter : {
title: 'Centrar',
text: 'Centra o texto.',
cls: Ext.baseCSSPrefix + 'html-editor-tip'
},
justifyright : {
title: 'ALinhar à direita',
text: 'ALinha o texto &agravce; direita.',
cls: Ext.baseCSSPrefix + 'html-editor-tip'
},
insertunorderedlist : {
title: 'Lista',
text: 'Inicia uma lista.',
cls: Ext.baseCSSPrefix + 'html-editor-tip'
},
insertorderedlist : {
title: 'Lista Numerada',
text: 'Inicia uma lista numerada.',
cls: Ext.baseCSSPrefix + 'html-editor-tip'
},
createlink : {
title: 'Hyperlink',
text: 'Transforma o texto num hyperlink.',
cls: Ext.baseCSSPrefix + 'html-editor-tip'
},
sourceedit : {
title: 'Editar código',
text: 'Alterar para o modo de edição de código.',
cls: Ext.baseCSSPrefix + 'html-editor-tip'
}
}
});
}
if(exists('Ext.form.Basic')){
Ext.form.Basic.prototype.waitTitle = "Por favor espere...";
}
if(exists('Ext.grid.header.Container')){
Ext.apply(Ext.grid.header.Container.prototype, {
sortAscText : "Ordenação Crescente",
sortDescText : "Ordenação Decrescente",
lockText : "Fixar Coluna",
unlockText : "Libertar Coluna",
columnsText : "Colunas"
});
}
if(exists('Ext.grid.GroupingFeature')){
Ext.apply(Ext.grid.GroupingFeature.prototype, {
emptyGroupText : '(Nenhum)',
groupByText : 'Agrupar por este campo',
showGroupsText : 'Mostrar nos Grupos'
});
}
if(exists('Ext.grid.PropertyColumnModel')){
Ext.apply(Ext.grid.PropertyColumnModel.prototype, {
nameText : "Nome",
valueText : "Valor",
dateFormat : "Y/j/m"
});
}
});
|
"Fevereiro",
"Março",
|
random_line_split
|
ext-lang-pt_PT.js
|
/*!
* Extensible 1.6.0-rc.1
* Copyright(c) 2010-2013 Extensible, LLC
* [email protected]
* http://ext.ensible.com
*/
/**
* Portuguese/Portugal (pt_PT) Translation
* by Nuno Franco da Costa - francodacosta.com
* translated from ext-lang-en.js
*/
Ext.onReady(function() {
var cm = Ext.ClassManager,
exists = Ext.Function.bind(cm.get, cm);
if(Ext.Updater) {
Ext.Updater.defaults.indicatorText = '<div class="loading-indicator">A carregar...</div>';
}
if(exists('Ext.view.View')){
Ext.view.View.prototype.emptyText = "";
}
if(exists('Ext.grid.Panel')){
Ext.grid.Panel.prototype.ddText = "{0} linha(s) seleccionada(s)";
}
if(Ext.TabPanelItem){
Ext.TabPanelItem.prototype.closeText = "Fechar aba";
}
if(Ext.LoadMask)
|
if(Ext.Date) {
Ext.Date.monthNames = [
"Janeiro",
"Fevereiro",
"Março",
"Abril",
"Maio",
"Junho",
"Julho",
"Agosto",
"Setembro",
"Outubro",
"Novembro",
"Dezembro"
];
Ext.Date.getShortMonthName = function(month) {
return Ext.Date.monthNames[month].substring(0, 3);
};
Ext.Date.monthNumbers = {
Jan : 0,
Feb : 1,
Mar : 2,
Apr : 3,
May : 4,
Jun : 5,
Jul : 6,
Aug : 7,
Sep : 8,
Oct : 9,
Nov : 10,
Dec : 11
};
Ext.Date.getMonthNumber = function(name) {
return Ext.Date.monthNumbers[name.substring(0, 1).toUpperCase() + name.substring(1, 3).toLowerCase()];
};
Ext.Date.dayNames = [
"Domingo",
"Segunda",
"Terça",
"Quarta",
"Quinta",
"Sexta",
"Sabado"
];
Ext.Date.getShortDayName = function(day) {
return Ext.Date.dayNames[day].substring(0, 3);
};
}
if(Ext.MessageBox){
Ext.MessageBox.buttonText = {
ok : "OK",
cancel : "Cancelar",
yes : "Sim",
no : "Não"
};
}
if(exists('Ext.util.Format')){
Ext.apply(Ext.util.Format, {
thousandSeparator: '.',
decimalSeparator: ',',
currencySign: '\u20ac', // Portugese Euro
dateFormat: 'Y/m/d'
});
}
if(exists('Ext.picker.Date')){
Ext.apply(Ext.picker.Date.prototype, {
todayText : "Hoje",
minText : "A data é anterior ao mínimo definido",
maxText : "A data é posterior ao máximo definido",
disabledDaysText : "",
disabledDatesText : "",
monthNames : Ext.Date.monthNames,
dayNames : Ext.Date.dayNames,
nextText : 'Mês Seguinte (Control+Right)',
prevText : 'Mês Anterior (Control+Left)',
monthYearText : 'Escolha um mês (Control+Up/Down avaçar/recuar anos)',
todayTip : "{0} (barra de espaço)",
format : "y/m/d",
startDay : 0
});
}
if(exists('Ext.picker.Month')) {
Ext.apply(Ext.picker.Month.prototype, {
okText : " OK ",
cancelText : "Cancelar"
});
}
if(exists('Ext.toolbar.Paging')){
Ext.apply(Ext.PagingToolbar.prototype, {
beforePageText : "Página",
afterPageText : "de {0}",
firstText : "Primeira Página",
prevText : "Página Anterior",
nextText : "Pr%oacute;xima Página",
lastText : "Última Página",
refreshText : "Recaregar",
displayMsg : "A mostrar {0} - {1} de {2}",
emptyMsg : 'Sem dados para mostrar'
});
}
if(exists('Ext.form.field.Base')){
Ext.form.field.Base.prototype.invalidText = "O valor deste campo é inválido";
}
if(exists('Ext.form.field.Text')){
Ext.apply(Ext.form.field.Text.prototype, {
minLengthText : "O comprimento mínimo deste campo &eaute; {0}",
maxLengthText : "O comprimento máximo deste campo &eaute; {0}",
blankText : "Este campo é de preenchimento obrigatório",
regexText : "",
emptyText : null
});
}
if(exists('Ext.form.field.Number')){
Ext.apply(Ext.form.field.Number.prototype, {
minText : "O valor mínimo deste campo &eaute; {0}",
maxText : "O valor máximo deste campo &eaute; {0}",
nanText : "{0} não é um numero"
});
}
if(exists('Ext.form.field.Date')){
Ext.apply(Ext.form.field.Date.prototype, {
disabledDaysText : "Desabilitado",
disabledDatesText : "Desabilitado",
minText : "A data deste campo deve ser posterior a {0}",
maxText : "A data deste campo deve ser anterior a {0}",
invalidText : "{0} não é uma data válida - deve estar no seguinte formato{1}",
format : "y/m/d",
altFormats : "m/d/Y|m-d-y|m-d-Y|m/d|m-d|md|mdy|mdY|d|Y-m-d"
});
}
if(exists('Ext.form.field.ComboBox')){
Ext.apply(Ext.form.field.ComboBox.prototype, {
valueNotFoundText : undefined
});
Ext.apply(Ext.form.field.ComboBox.prototype.defaultListConfig, {
loadingText : "A Carregar..."
});
}
if(exists('Ext.form.field.VTypes')){
Ext.apply(Ext.form.field.VTypes, {
emailText : 'Este campo deve ser um endereço de email no formato "[email protected]"',
urlText : 'Este campo deve ser um URL no formato "http:/'+'/www.dominio.com"',
alphaText : 'Este campo deve conter apenas letras e _',
alphanumText : 'Este campo deve conter apenas letras, números e _'
});
}
if(exists('Ext.form.field.HtmlEditor')){
Ext.apply(Ext.form.field.HtmlEditor.prototype, {
createLinkText : 'Indique o endereço do link:',
buttonTips : {
bold : {
title: 'Negrito (Ctrl+B)',
text: 'Transforma o texto em Negrito.',
cls: Ext.baseCSSPrefix + 'html-editor-tip'
},
italic : {
title: 'Itálico (Ctrl+I)',
text: 'Transforma o texto em itálico.',
cls: Ext.baseCSSPrefix + 'html-editor-tip'
},
underline : {
title: 'Sublinhar (Ctrl+U)',
text: 'Sublinha o texto.',
cls: Ext.baseCSSPrefix + 'html-editor-tip'
},
increasefontsize : {
title: 'Aumentar texto',
text: 'Aumenta o tamanho da fonte.',
cls: Ext.baseCSSPrefix + 'html-editor-tip'
},
decreasefontsize : {
title: 'Encolher texto',
text: 'Diminui o tamanho da fonte.',
cls: Ext.baseCSSPrefix + 'html-editor-tip'
},
backcolor : {
title: 'Côr de fundo do texto',
text: 'Altera a côr de fundo do texto.',
cls: Ext.baseCSSPrefix + 'html-editor-tip'
},
forecolor : {
title: 'Côr do texo',
text: 'Altera a aôr do texo.',
cls: Ext.baseCSSPrefix + 'html-editor-tip'
},
justifyleft : {
title: 'ALinhar à esquerda',
text: 'ALinha o texto à esquerda.',
cls: Ext.baseCSSPrefix + 'html-editor-tip'
},
justifycenter : {
title: 'Centrar',
text: 'Centra o texto.',
cls: Ext.baseCSSPrefix + 'html-editor-tip'
},
justifyright : {
title: 'ALinhar à direita',
text: 'ALinha o texto &agravce; direita.',
cls: Ext.baseCSSPrefix + 'html-editor-tip'
},
insertunorderedlist : {
title: 'Lista',
text: 'Inicia uma lista.',
cls: Ext.baseCSSPrefix + 'html-editor-tip'
},
insertorderedlist : {
title: 'Lista Numerada',
text: 'Inicia uma lista numerada.',
cls: Ext.baseCSSPrefix + 'html-editor-tip'
},
createlink : {
title: 'Hyperlink',
text: 'Transforma o texto num hyperlink.',
cls: Ext.baseCSSPrefix + 'html-editor-tip'
},
sourceedit : {
title: 'Editar código',
text: 'Alterar para o modo de edição de código.',
cls: Ext.baseCSSPrefix + 'html-editor-tip'
}
}
});
}
if(exists('Ext.form.Basic')){
Ext.form.Basic.prototype.waitTitle = "Por favor espere...";
}
if(exists('Ext.grid.header.Container')){
Ext.apply(Ext.grid.header.Container.prototype, {
sortAscText : "Ordenação Crescente",
sortDescText : "Ordenação Decrescente",
lockText : "Fixar Coluna",
unlockText : "Libertar Coluna",
columnsText : "Colunas"
});
}
if(exists('Ext.grid.GroupingFeature')){
Ext.apply(Ext.grid.GroupingFeature.prototype, {
emptyGroupText : '(Nenhum)',
groupByText : 'Agrupar por este campo',
showGroupsText : 'Mostrar nos Grupos'
});
}
if(exists('Ext.grid.PropertyColumnModel')){
Ext.apply(Ext.grid.PropertyColumnModel.prototype, {
nameText : "Nome",
valueText : "Valor",
dateFormat : "Y/j/m"
});
}
});
|
{
Ext.LoadMask.prototype.msg = "A carregar...";
}
|
conditional_block
|
ConvergencePlot.py
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import matplotlib.pyplot as plt
import glob
import collections
import pandas
import numpy as np
class ConvergencePlot(object):
"""
A tool for making convergence plots.
Args:
x[np.array]: The x data of the graph (e.g., dofs)
y[np.array]: The y data of the graph (e.g., L2_error)
Key, value Options:
xlabel[str]: The label for the x-axis
ylabel[str]: The label for the y-axis
"""
Line = collections.namedtuple('Line', 'x y label')
def __init__(self, xlabel='x', ylabel='y', fontsize=12, fit=True):
self._figure = plt.figure(figsize=(10,6), facecolor='w')
self._axes = plt.gca()
self._axes.set_yscale('log')
self._axes.set_xscale('log')
# Add axis labels
plt.xlabel(xlabel, fontsize=fontsize)
plt.ylabel(ylabel, fontsize=fontsize)
# Adjust tick mark fonts
for tick in self._axes.xaxis.get_major_ticks() + self._axes.yaxis.get_major_ticks():
tick.label.set_fontsize(fontsize)
# Apply grid marks
plt.grid(True, which='both', color=[0.8]*3)
self.label_to_slope = {}
self.label_to_intercept = {}
def plot(self, df, label=None, title=None, num_fitted_points=None, slope_precision=3, **kwargs):
num_y_columns = len(df.columns) - 1
if label:
if num_y_columns > 1:
if not isinstance(label, list):
raise TypeError("For multiple y-data label must be a list")
if isinstance(label, list) and num_y_columns != len(label):
raise IOError("The length of the label and the number of y columns must be the same")
if not isinstance(label, list):
label = [label]
x = df[df.columns[0]]
lines = []
for i in range(1,len(df.columns)):
y = df[df.columns[i]]
if label is None:
this_label = 'line-{}'.format(len(lines))
else:
this_label = label[i-1]
if num_fitted_points is not None:
coeffs = self._fit(x[-num_fitted_points:], y[-num_fitted_points:])
else:
coeffs = self._fit(x, y)
slope = coeffs[0]
intercept = coeffs[1]
self.label_to_slope.update({this_label:slope})
self.label_to_intercept.update({this_label:intercept})
this_label = '{}: {:.{precision}f}'.format(this_label, slope, precision=slope_precision)
lines.append(self._axes.plot(x, y, label=this_label, **kwargs)[0])
if title:
self._axes.set_title(title)
self._axes.legend()
return lines
def
|
(self, title):
self._axes.set_title(title)
def _fit(self, x, y):
"""
Apply the fit and report the slope.
Key, value Options:
x[float]: The x-position in data coordinates.
y[float]: The y-position in data coordinates.
"""
# Perform fit
coefficients = np.polyfit(np.log10(x), np.log10(y), 1)
return coefficients
def save(self, filename):
"""
Save figure to a file.
Args:
filename[str]: The destination file.
"""
plt.savefig(filename)
def show(self):
"""
Display the plot.
"""
plt.show()
|
set_title
|
identifier_name
|
ConvergencePlot.py
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import matplotlib.pyplot as plt
import glob
import collections
import pandas
import numpy as np
class ConvergencePlot(object):
"""
A tool for making convergence plots.
Args:
x[np.array]: The x data of the graph (e.g., dofs)
y[np.array]: The y data of the graph (e.g., L2_error)
Key, value Options:
xlabel[str]: The label for the x-axis
ylabel[str]: The label for the y-axis
"""
Line = collections.namedtuple('Line', 'x y label')
def __init__(self, xlabel='x', ylabel='y', fontsize=12, fit=True):
|
def plot(self, df, label=None, title=None, num_fitted_points=None, slope_precision=3, **kwargs):
num_y_columns = len(df.columns) - 1
if label:
if num_y_columns > 1:
if not isinstance(label, list):
raise TypeError("For multiple y-data label must be a list")
if isinstance(label, list) and num_y_columns != len(label):
raise IOError("The length of the label and the number of y columns must be the same")
if not isinstance(label, list):
label = [label]
x = df[df.columns[0]]
lines = []
for i in range(1,len(df.columns)):
y = df[df.columns[i]]
if label is None:
this_label = 'line-{}'.format(len(lines))
else:
this_label = label[i-1]
if num_fitted_points is not None:
coeffs = self._fit(x[-num_fitted_points:], y[-num_fitted_points:])
else:
coeffs = self._fit(x, y)
slope = coeffs[0]
intercept = coeffs[1]
self.label_to_slope.update({this_label:slope})
self.label_to_intercept.update({this_label:intercept})
this_label = '{}: {:.{precision}f}'.format(this_label, slope, precision=slope_precision)
lines.append(self._axes.plot(x, y, label=this_label, **kwargs)[0])
if title:
self._axes.set_title(title)
self._axes.legend()
return lines
def set_title(self, title):
self._axes.set_title(title)
def _fit(self, x, y):
"""
Apply the fit and report the slope.
Key, value Options:
x[float]: The x-position in data coordinates.
y[float]: The y-position in data coordinates.
"""
# Perform fit
coefficients = np.polyfit(np.log10(x), np.log10(y), 1)
return coefficients
def save(self, filename):
"""
Save figure to a file.
Args:
filename[str]: The destination file.
"""
plt.savefig(filename)
def show(self):
"""
Display the plot.
"""
plt.show()
|
self._figure = plt.figure(figsize=(10,6), facecolor='w')
self._axes = plt.gca()
self._axes.set_yscale('log')
self._axes.set_xscale('log')
# Add axis labels
plt.xlabel(xlabel, fontsize=fontsize)
plt.ylabel(ylabel, fontsize=fontsize)
# Adjust tick mark fonts
for tick in self._axes.xaxis.get_major_ticks() + self._axes.yaxis.get_major_ticks():
tick.label.set_fontsize(fontsize)
# Apply grid marks
plt.grid(True, which='both', color=[0.8]*3)
self.label_to_slope = {}
self.label_to_intercept = {}
|
identifier_body
|
ConvergencePlot.py
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import matplotlib.pyplot as plt
import glob
import collections
import pandas
import numpy as np
class ConvergencePlot(object):
"""
A tool for making convergence plots.
Args:
x[np.array]: The x data of the graph (e.g., dofs)
y[np.array]: The y data of the graph (e.g., L2_error)
Key, value Options:
xlabel[str]: The label for the x-axis
ylabel[str]: The label for the y-axis
"""
Line = collections.namedtuple('Line', 'x y label')
def __init__(self, xlabel='x', ylabel='y', fontsize=12, fit=True):
self._figure = plt.figure(figsize=(10,6), facecolor='w')
self._axes = plt.gca()
self._axes.set_yscale('log')
self._axes.set_xscale('log')
# Add axis labels
plt.xlabel(xlabel, fontsize=fontsize)
plt.ylabel(ylabel, fontsize=fontsize)
# Adjust tick mark fonts
for tick in self._axes.xaxis.get_major_ticks() + self._axes.yaxis.get_major_ticks():
|
# Apply grid marks
plt.grid(True, which='both', color=[0.8]*3)
self.label_to_slope = {}
self.label_to_intercept = {}
def plot(self, df, label=None, title=None, num_fitted_points=None, slope_precision=3, **kwargs):
num_y_columns = len(df.columns) - 1
if label:
if num_y_columns > 1:
if not isinstance(label, list):
raise TypeError("For multiple y-data label must be a list")
if isinstance(label, list) and num_y_columns != len(label):
raise IOError("The length of the label and the number of y columns must be the same")
if not isinstance(label, list):
label = [label]
x = df[df.columns[0]]
lines = []
for i in range(1,len(df.columns)):
y = df[df.columns[i]]
if label is None:
this_label = 'line-{}'.format(len(lines))
else:
this_label = label[i-1]
if num_fitted_points is not None:
coeffs = self._fit(x[-num_fitted_points:], y[-num_fitted_points:])
else:
coeffs = self._fit(x, y)
slope = coeffs[0]
intercept = coeffs[1]
self.label_to_slope.update({this_label:slope})
self.label_to_intercept.update({this_label:intercept})
this_label = '{}: {:.{precision}f}'.format(this_label, slope, precision=slope_precision)
lines.append(self._axes.plot(x, y, label=this_label, **kwargs)[0])
if title:
self._axes.set_title(title)
self._axes.legend()
return lines
def set_title(self, title):
self._axes.set_title(title)
def _fit(self, x, y):
"""
Apply the fit and report the slope.
Key, value Options:
x[float]: The x-position in data coordinates.
y[float]: The y-position in data coordinates.
"""
# Perform fit
coefficients = np.polyfit(np.log10(x), np.log10(y), 1)
return coefficients
def save(self, filename):
"""
Save figure to a file.
Args:
filename[str]: The destination file.
"""
plt.savefig(filename)
def show(self):
"""
Display the plot.
"""
plt.show()
|
tick.label.set_fontsize(fontsize)
|
conditional_block
|
ConvergencePlot.py
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import matplotlib.pyplot as plt
import glob
import collections
import pandas
import numpy as np
class ConvergencePlot(object):
"""
A tool for making convergence plots.
Args:
x[np.array]: The x data of the graph (e.g., dofs)
y[np.array]: The y data of the graph (e.g., L2_error)
Key, value Options:
xlabel[str]: The label for the x-axis
ylabel[str]: The label for the y-axis
"""
Line = collections.namedtuple('Line', 'x y label')
def __init__(self, xlabel='x', ylabel='y', fontsize=12, fit=True):
self._figure = plt.figure(figsize=(10,6), facecolor='w')
self._axes = plt.gca()
self._axes.set_yscale('log')
self._axes.set_xscale('log')
# Add axis labels
plt.xlabel(xlabel, fontsize=fontsize)
plt.ylabel(ylabel, fontsize=fontsize)
# Adjust tick mark fonts
for tick in self._axes.xaxis.get_major_ticks() + self._axes.yaxis.get_major_ticks():
tick.label.set_fontsize(fontsize)
# Apply grid marks
plt.grid(True, which='both', color=[0.8]*3)
self.label_to_slope = {}
self.label_to_intercept = {}
def plot(self, df, label=None, title=None, num_fitted_points=None, slope_precision=3, **kwargs):
num_y_columns = len(df.columns) - 1
if label:
if num_y_columns > 1:
if not isinstance(label, list):
raise TypeError("For multiple y-data label must be a list")
if isinstance(label, list) and num_y_columns != len(label):
raise IOError("The length of the label and the number of y columns must be the same")
if not isinstance(label, list):
label = [label]
x = df[df.columns[0]]
lines = []
for i in range(1,len(df.columns)):
y = df[df.columns[i]]
if label is None:
this_label = 'line-{}'.format(len(lines))
else:
this_label = label[i-1]
if num_fitted_points is not None:
coeffs = self._fit(x[-num_fitted_points:], y[-num_fitted_points:])
else:
coeffs = self._fit(x, y)
slope = coeffs[0]
intercept = coeffs[1]
self.label_to_slope.update({this_label:slope})
self.label_to_intercept.update({this_label:intercept})
this_label = '{}: {:.{precision}f}'.format(this_label, slope, precision=slope_precision)
lines.append(self._axes.plot(x, y, label=this_label, **kwargs)[0])
if title:
self._axes.set_title(title)
self._axes.legend()
return lines
def set_title(self, title):
self._axes.set_title(title)
def _fit(self, x, y):
"""
Apply the fit and report the slope.
Key, value Options:
x[float]: The x-position in data coordinates.
y[float]: The y-position in data coordinates.
"""
# Perform fit
coefficients = np.polyfit(np.log10(x), np.log10(y), 1)
return coefficients
def save(self, filename):
"""
Save figure to a file.
Args:
filename[str]: The destination file.
"""
plt.savefig(filename)
def show(self):
"""
|
Display the plot.
"""
plt.show()
|
random_line_split
|
|
Gruntfile.js
|
/**
* @file
*/
module.exports = function(grunt) {
// This is where we configure each task that we'd like to run.
grunt.initConfig({
pkg: grunt.file.readJSON('package.json'),
watch: {
// This is where we set up all the tasks we'd like grunt to watch for changes.
images: {
files: ['**/*.{png,jpg,gif}'],
tasks: ['imagemin'],
options: {
spawn: false,
}
},
vector: {
files: ['**/*.svg'],
tasks: ['svgmin'],
options: {
spawn: false,
}
},
css: {
files: ['**/*.scss'],
tasks: ['sass'],
options: {
interrupt: true
}
},
twig: {
files: ['**/*.html.twig'],
tasks: ['svgmin', 'imagemin', 'sass', 'drush:ccall']
}
},
imagemin: {
// This will optimize all of our images for the web.
dynamic: {
files: [{
expand: true,
cwd: 'img/source/',
src: ['{,*/}*.{png,jpg,gif}' ],
dest: 'img/optimized/'
}]
}
},
svgmin: {
options: {
plugins: [{
removeViewBox: false
}, {
removeUselessStrokeAndFill: false
}]
},
dist: {
files: [{
expand: true,
cwd: 'img/source/',
src: ['{,*/}*.svg' ],
dest: 'img/optimized/'
}]
}
},
sass: {
// This will compile all of our sass files
// Additional configuration options can be found at https://github.com/sindresorhus/grunt-sass
options: {
includePaths: [
"node_modules/bourbon/core",
"node_modules/bourbon-neat/core",
"node_modules/neat-omega",
"node_modules"
],
sourceMap: true,
// This controls the compiled css and can be changed to nested, compact or compressed.
outputStyle: 'expanded',
precision: 10
},
|
'css/layout.css': 'scss/layout/layout.scss',
'css/states.css': 'scss/states/states.scss',
'css/theme.css': 'scss/theme/theme.scss',
'css/print.css': 'scss/theme/print.scss'
}
}
},
drush: {
ccall: {
args: ['cache-rebuild', 'all']
}
},
browserSync: {
dev: {
bsFiles: {
src : [
'css/**/*.css',
'templates/**/*.twig',
'img/optimized/**/*.{png,jpg,gif,svg}',
'js/build/**/*.js',
'*.theme'
]
},
options: {
watchTask: true,
// reloadDelay: 1000,
// reloadDebounce: 500,
reloadOnRestart: true,
logConnections: true,
injectChanges: false // Depends on enabling the link_css module
}
}
},
availabletasks: {
tasks: {
options: {
filter: "include",
tasks: [
'browserSync', 'imagemin', 'sass', 'svgmin', 'watch', 'devmode'
]
}
}
}
});
// This is where we tell Grunt we plan to use this plug-in.
grunt.loadNpmTasks('grunt-contrib-uglify');
grunt.loadNpmTasks('grunt-contrib-imagemin');
grunt.loadNpmTasks('grunt-svgmin');
grunt.loadNpmTasks('grunt-sass');
grunt.loadNpmTasks('grunt-contrib-watch');
grunt.loadNpmTasks('grunt-browser-sync');
grunt.loadNpmTasks('grunt-available-tasks');
grunt.loadNpmTasks('grunt-drush');
// My tasks.
grunt.registerTask('devmode', "Watch and BrowserSync all in one.", ['browserSync', 'watch']);
// This is where we tell Grunt what to do when we type "grunt" into the terminal.
// Note: if you'd like to run and of the tasks individually you can do so by typing 'grunt mytaskname' alternatively
// you can type 'grunt watch' to automatically track your files for changes.
grunt.registerTask('default', ['availabletasks']);
};
|
dist: {
files: {
// Compiled styles.
'css/components.css': 'scss/components/components.scss',
|
random_line_split
|
__init__.py
|
# Copyright (C) 2021 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
import functools
import os
import sys
import time
from contextlib import contextmanager
from ycmd.tests.test_utils import ( BuildRequest,
ClearCompletionsCache,
IgnoreExtraConfOutsideTestsFolder,
IsolatedApp,
WaitUntilCompleterServerReady,
StopCompleterServer,
SetUpApp )
shared_app = None
# map of 'app' to filepaths
shared_filepaths = {}
shared_log_indexes = {}
def PathToTestFile( *args ):
dir_of_current_script = os.path.dirname( os.path.abspath( __file__ ) )
return os.path.join( dir_of_current_script, 'testdata', *args )
def setUpModule():
global shared_app
shared_app = SetUpApp()
def tearDownModule():
global shared_app, shared_filepaths
for filepath in shared_filepaths.get( shared_app, [] ):
StopCompleterServer( shared_app, 'cs', filepath )
def SharedYcmd( test ):
global shared_app
@functools.wraps( test )
def Wrapper( test_case_instance, *args, **kwargs ):
|
return Wrapper
def IsolatedYcmd( custom_options = {} ):
def Decorator( test ):
@functools.wraps( test )
def Wrapper( test_case_instance, *args, **kwargs ):
with IsolatedApp( custom_options ) as app:
try:
test( test_case_instance, app, *args, **kwargs )
finally:
global shared_filepaths
for filepath in shared_filepaths.get( app, [] ):
StopCompleterServer( app, 'cs', filepath )
return Wrapper
return Decorator
def GetDebugInfo( app, filepath ):
request_data = BuildRequest( filetype = 'cs', filepath = filepath )
return app.post_json( '/debug_info', request_data ).json
def ReadFile( filepath, fileposition ):
with open( filepath, encoding = 'utf8' ) as f:
if fileposition:
f.seek( fileposition )
return f.read(), f.tell()
def GetDiagnostics( app, filepath ):
contents, _ = ReadFile( filepath, 0 )
event_data = BuildRequest( filepath = filepath,
event_name = 'FileReadyToParse',
filetype = 'cs',
contents = contents )
return app.post_json( '/event_notification', event_data ).json
@contextmanager
def WrapOmniSharpServer( app, filepath ):
global shared_filepaths
global shared_log_indexes
if filepath not in shared_filepaths.setdefault( app, [] ):
GetDiagnostics( app, filepath )
shared_filepaths[ app ].append( filepath )
WaitUntilCsCompleterIsReady( app, filepath )
logfiles = []
response = GetDebugInfo( app, filepath )
for server in response[ 'completer' ][ 'servers' ]:
logfiles.extend( server[ 'logfiles' ] )
try:
yield
finally:
for logfile in logfiles:
if os.path.isfile( logfile ):
log_content, log_end_position = ReadFile(
logfile, shared_log_indexes.get( logfile, 0 ) )
shared_log_indexes[ logfile ] = log_end_position
sys.stdout.write( f'Logfile { logfile }:\n\n' )
sys.stdout.write( log_content )
sys.stdout.write( '\n' )
def WaitUntilCsCompleterIsReady( app, filepath ):
WaitUntilCompleterServerReady( app, 'cs' )
# Omnisharp isn't ready when it says it is, so wait until Omnisharp returns
# at least one diagnostic multiple times.
success_count = 0
for reraise_error in [ False ] * 39 + [ True ]:
try:
if len( GetDiagnostics( app, filepath ) ) == 0:
raise RuntimeError( "No diagnostic" )
success_count += 1
if success_count > 2:
break
except Exception:
success_count = 0
if reraise_error:
raise
time.sleep( .5 )
else:
raise RuntimeError( "Never was ready" )
|
ClearCompletionsCache()
with IgnoreExtraConfOutsideTestsFolder():
return test( test_case_instance, shared_app, *args, **kwargs )
|
identifier_body
|
__init__.py
|
# Copyright (C) 2021 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
import functools
import os
import sys
import time
from contextlib import contextmanager
from ycmd.tests.test_utils import ( BuildRequest,
ClearCompletionsCache,
IgnoreExtraConfOutsideTestsFolder,
IsolatedApp,
WaitUntilCompleterServerReady,
StopCompleterServer,
SetUpApp )
shared_app = None
# map of 'app' to filepaths
shared_filepaths = {}
shared_log_indexes = {}
def PathToTestFile( *args ):
dir_of_current_script = os.path.dirname( os.path.abspath( __file__ ) )
return os.path.join( dir_of_current_script, 'testdata', *args )
def setUpModule():
global shared_app
shared_app = SetUpApp()
def tearDownModule():
global shared_app, shared_filepaths
for filepath in shared_filepaths.get( shared_app, [] ):
StopCompleterServer( shared_app, 'cs', filepath )
def SharedYcmd( test ):
global shared_app
@functools.wraps( test )
def Wrapper( test_case_instance, *args, **kwargs ):
ClearCompletionsCache()
with IgnoreExtraConfOutsideTestsFolder():
return test( test_case_instance, shared_app, *args, **kwargs )
return Wrapper
def IsolatedYcmd( custom_options = {} ):
def Decorator( test ):
@functools.wraps( test )
def Wrapper( test_case_instance, *args, **kwargs ):
with IsolatedApp( custom_options ) as app:
try:
test( test_case_instance, app, *args, **kwargs )
finally:
global shared_filepaths
for filepath in shared_filepaths.get( app, [] ):
StopCompleterServer( app, 'cs', filepath )
return Wrapper
return Decorator
def
|
( app, filepath ):
request_data = BuildRequest( filetype = 'cs', filepath = filepath )
return app.post_json( '/debug_info', request_data ).json
def ReadFile( filepath, fileposition ):
with open( filepath, encoding = 'utf8' ) as f:
if fileposition:
f.seek( fileposition )
return f.read(), f.tell()
def GetDiagnostics( app, filepath ):
contents, _ = ReadFile( filepath, 0 )
event_data = BuildRequest( filepath = filepath,
event_name = 'FileReadyToParse',
filetype = 'cs',
contents = contents )
return app.post_json( '/event_notification', event_data ).json
@contextmanager
def WrapOmniSharpServer( app, filepath ):
global shared_filepaths
global shared_log_indexes
if filepath not in shared_filepaths.setdefault( app, [] ):
GetDiagnostics( app, filepath )
shared_filepaths[ app ].append( filepath )
WaitUntilCsCompleterIsReady( app, filepath )
logfiles = []
response = GetDebugInfo( app, filepath )
for server in response[ 'completer' ][ 'servers' ]:
logfiles.extend( server[ 'logfiles' ] )
try:
yield
finally:
for logfile in logfiles:
if os.path.isfile( logfile ):
log_content, log_end_position = ReadFile(
logfile, shared_log_indexes.get( logfile, 0 ) )
shared_log_indexes[ logfile ] = log_end_position
sys.stdout.write( f'Logfile { logfile }:\n\n' )
sys.stdout.write( log_content )
sys.stdout.write( '\n' )
def WaitUntilCsCompleterIsReady( app, filepath ):
WaitUntilCompleterServerReady( app, 'cs' )
# Omnisharp isn't ready when it says it is, so wait until Omnisharp returns
# at least one diagnostic multiple times.
success_count = 0
for reraise_error in [ False ] * 39 + [ True ]:
try:
if len( GetDiagnostics( app, filepath ) ) == 0:
raise RuntimeError( "No diagnostic" )
success_count += 1
if success_count > 2:
break
except Exception:
success_count = 0
if reraise_error:
raise
time.sleep( .5 )
else:
raise RuntimeError( "Never was ready" )
|
GetDebugInfo
|
identifier_name
|
__init__.py
|
# Copyright (C) 2021 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
import functools
import os
import sys
import time
from contextlib import contextmanager
from ycmd.tests.test_utils import ( BuildRequest,
ClearCompletionsCache,
IgnoreExtraConfOutsideTestsFolder,
IsolatedApp,
WaitUntilCompleterServerReady,
StopCompleterServer,
SetUpApp )
shared_app = None
# map of 'app' to filepaths
shared_filepaths = {}
shared_log_indexes = {}
def PathToTestFile( *args ):
dir_of_current_script = os.path.dirname( os.path.abspath( __file__ ) )
return os.path.join( dir_of_current_script, 'testdata', *args )
def setUpModule():
global shared_app
shared_app = SetUpApp()
def tearDownModule():
global shared_app, shared_filepaths
for filepath in shared_filepaths.get( shared_app, [] ):
StopCompleterServer( shared_app, 'cs', filepath )
def SharedYcmd( test ):
global shared_app
@functools.wraps( test )
def Wrapper( test_case_instance, *args, **kwargs ):
ClearCompletionsCache()
with IgnoreExtraConfOutsideTestsFolder():
return test( test_case_instance, shared_app, *args, **kwargs )
return Wrapper
def IsolatedYcmd( custom_options = {} ):
def Decorator( test ):
@functools.wraps( test )
def Wrapper( test_case_instance, *args, **kwargs ):
with IsolatedApp( custom_options ) as app:
try:
test( test_case_instance, app, *args, **kwargs )
finally:
global shared_filepaths
for filepath in shared_filepaths.get( app, [] ):
|
return Wrapper
return Decorator
def GetDebugInfo( app, filepath ):
request_data = BuildRequest( filetype = 'cs', filepath = filepath )
return app.post_json( '/debug_info', request_data ).json
def ReadFile( filepath, fileposition ):
with open( filepath, encoding = 'utf8' ) as f:
if fileposition:
f.seek( fileposition )
return f.read(), f.tell()
def GetDiagnostics( app, filepath ):
contents, _ = ReadFile( filepath, 0 )
event_data = BuildRequest( filepath = filepath,
event_name = 'FileReadyToParse',
filetype = 'cs',
contents = contents )
return app.post_json( '/event_notification', event_data ).json
@contextmanager
def WrapOmniSharpServer( app, filepath ):
global shared_filepaths
global shared_log_indexes
if filepath not in shared_filepaths.setdefault( app, [] ):
GetDiagnostics( app, filepath )
shared_filepaths[ app ].append( filepath )
WaitUntilCsCompleterIsReady( app, filepath )
logfiles = []
response = GetDebugInfo( app, filepath )
for server in response[ 'completer' ][ 'servers' ]:
logfiles.extend( server[ 'logfiles' ] )
try:
yield
finally:
for logfile in logfiles:
if os.path.isfile( logfile ):
log_content, log_end_position = ReadFile(
logfile, shared_log_indexes.get( logfile, 0 ) )
shared_log_indexes[ logfile ] = log_end_position
sys.stdout.write( f'Logfile { logfile }:\n\n' )
sys.stdout.write( log_content )
sys.stdout.write( '\n' )
def WaitUntilCsCompleterIsReady( app, filepath ):
WaitUntilCompleterServerReady( app, 'cs' )
# Omnisharp isn't ready when it says it is, so wait until Omnisharp returns
# at least one diagnostic multiple times.
success_count = 0
for reraise_error in [ False ] * 39 + [ True ]:
try:
if len( GetDiagnostics( app, filepath ) ) == 0:
raise RuntimeError( "No diagnostic" )
success_count += 1
if success_count > 2:
break
except Exception:
success_count = 0
if reraise_error:
raise
time.sleep( .5 )
else:
raise RuntimeError( "Never was ready" )
|
StopCompleterServer( app, 'cs', filepath )
|
conditional_block
|
__init__.py
|
# Copyright (C) 2021 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
import functools
import os
import sys
import time
from contextlib import contextmanager
from ycmd.tests.test_utils import ( BuildRequest,
ClearCompletionsCache,
IgnoreExtraConfOutsideTestsFolder,
IsolatedApp,
WaitUntilCompleterServerReady,
StopCompleterServer,
SetUpApp )
shared_app = None
# map of 'app' to filepaths
shared_filepaths = {}
shared_log_indexes = {}
def PathToTestFile( *args ):
dir_of_current_script = os.path.dirname( os.path.abspath( __file__ ) )
return os.path.join( dir_of_current_script, 'testdata', *args )
def setUpModule():
global shared_app
shared_app = SetUpApp()
def tearDownModule():
global shared_app, shared_filepaths
for filepath in shared_filepaths.get( shared_app, [] ):
StopCompleterServer( shared_app, 'cs', filepath )
def SharedYcmd( test ):
global shared_app
@functools.wraps( test )
def Wrapper( test_case_instance, *args, **kwargs ):
ClearCompletionsCache()
with IgnoreExtraConfOutsideTestsFolder():
return test( test_case_instance, shared_app, *args, **kwargs )
return Wrapper
def IsolatedYcmd( custom_options = {} ):
def Decorator( test ):
@functools.wraps( test )
def Wrapper( test_case_instance, *args, **kwargs ):
with IsolatedApp( custom_options ) as app:
try:
test( test_case_instance, app, *args, **kwargs )
finally:
global shared_filepaths
for filepath in shared_filepaths.get( app, [] ):
StopCompleterServer( app, 'cs', filepath )
return Wrapper
return Decorator
def GetDebugInfo( app, filepath ):
request_data = BuildRequest( filetype = 'cs', filepath = filepath )
return app.post_json( '/debug_info', request_data ).json
def ReadFile( filepath, fileposition ):
with open( filepath, encoding = 'utf8' ) as f:
if fileposition:
f.seek( fileposition )
return f.read(), f.tell()
def GetDiagnostics( app, filepath ):
contents, _ = ReadFile( filepath, 0 )
event_data = BuildRequest( filepath = filepath,
event_name = 'FileReadyToParse',
filetype = 'cs',
contents = contents )
return app.post_json( '/event_notification', event_data ).json
@contextmanager
def WrapOmniSharpServer( app, filepath ):
global shared_filepaths
global shared_log_indexes
if filepath not in shared_filepaths.setdefault( app, [] ):
GetDiagnostics( app, filepath )
shared_filepaths[ app ].append( filepath )
WaitUntilCsCompleterIsReady( app, filepath )
logfiles = []
response = GetDebugInfo( app, filepath )
for server in response[ 'completer' ][ 'servers' ]:
logfiles.extend( server[ 'logfiles' ] )
try:
yield
finally:
for logfile in logfiles:
if os.path.isfile( logfile ):
log_content, log_end_position = ReadFile(
logfile, shared_log_indexes.get( logfile, 0 ) )
shared_log_indexes[ logfile ] = log_end_position
sys.stdout.write( f'Logfile { logfile }:\n\n' )
sys.stdout.write( log_content )
sys.stdout.write( '\n' )
def WaitUntilCsCompleterIsReady( app, filepath ):
WaitUntilCompleterServerReady( app, 'cs' )
# Omnisharp isn't ready when it says it is, so wait until Omnisharp returns
# at least one diagnostic multiple times.
success_count = 0
for reraise_error in [ False ] * 39 + [ True ]:
try:
if len( GetDiagnostics( app, filepath ) ) == 0:
raise RuntimeError( "No diagnostic" )
success_count += 1
if success_count > 2:
break
except Exception:
success_count = 0
if reraise_error:
raise
time.sleep( .5 )
else:
raise RuntimeError( "Never was ready" )
|
random_line_split
|
|
object.rs
|
use std::sync::Arc;
use ::types::{Vec3f, Mat4f};
use ::Material;
use ::ray::Ray;
pub struct
|
{
ray: Ray, // Ray that intersected
pub time: f64,
normal: Vec3f,
object: Arc<Object>
}
pub struct ObjectData {
// texture
transformation: Mat4f,
inv_trans: Mat4f
}
pub trait Object {
/// Return whether ray intersected object
fn intersection(&self, ray: &Ray) -> Option<Intersection> {
let internal = self.internal();
// 1. Transform the ray by the inverse transformation
let transformed = ray.transform(internal.inv_trans);
// if let Some(intersection) = self.inters
None
}
/// Return whether point is inside object
fn is_inside(&self, point: &Vec3f) -> bool;
fn internal(&self) -> &ObjectData;
// A world coordinate bounding box computed for the object
// fn bounding_box(&self, transformation: Mat4f) -> AABB;
// fn texture
// fn interior
// fn transformation
// fn children
// pre computed
// trans_inverse
// aabb
}
|
Intersection
|
identifier_name
|
object.rs
|
use std::sync::Arc;
use ::types::{Vec3f, Mat4f};
use ::Material;
use ::ray::Ray;
pub struct Intersection {
ray: Ray, // Ray that intersected
pub time: f64,
normal: Vec3f,
object: Arc<Object>
}
pub struct ObjectData {
// texture
transformation: Mat4f,
inv_trans: Mat4f
}
pub trait Object {
/// Return whether ray intersected object
fn intersection(&self, ray: &Ray) -> Option<Intersection> {
let internal = self.internal();
// 1. Transform the ray by the inverse transformation
let transformed = ray.transform(internal.inv_trans);
// if let Some(intersection) = self.inters
None
}
/// Return whether point is inside object
fn is_inside(&self, point: &Vec3f) -> bool;
fn internal(&self) -> &ObjectData;
// A world coordinate bounding box computed for the object
// fn bounding_box(&self, transformation: Mat4f) -> AABB;
// fn texture
// fn interior
// fn transformation
|
// fn children
// pre computed
// trans_inverse
// aabb
}
|
random_line_split
|
|
BaseBuildTriggerTemplate.tsx
|
import React from 'react';
import { capitalize, get } from 'lodash';
import { Option } from 'react-select';
import { $q } from 'ngimport';
import { IPromise } from 'angular';
import { Observable, Subject } from 'rxjs';
import { IBuild, IBuildInfo, IBuildTrigger, IPipelineCommand } from 'core/domain';
import { ITriggerTemplateComponentProps } from 'core/pipeline/manualExecution/TriggerTemplate';
import { IgorService, BuildServiceType } from 'core/ci';
import { Spinner } from 'core/widgets/spinners/Spinner';
import { buildDisplayName } from 'core/pipeline/executionBuild/buildDisplayName.filter';
import { timestamp } from 'core/utils/timeFormatters';
import { TetheredSelect } from 'core/presentation/TetheredSelect';
import { TextInput } from 'core/presentation';
export interface IBaseBuildTriggerTemplateProps extends ITriggerTemplateComponentProps {
buildTriggerType: BuildServiceType;
optionRenderer?: (build: Option) => JSX.Element;
}
export interface IBaseBuildTriggerTemplateState {
builds?: IBuild[];
buildsLoading?: boolean;
loadError?: boolean;
selectedBuild?: number;
explicitBuild?: boolean;
}
export class BaseBuildTriggerTemplate extends React.Component<
IBaseBuildTriggerTemplateProps,
IBaseBuildTriggerTemplateState
> {
private destroy$ = new Subject();
public static formatLabel(trigger: IBuildTrigger): IPromise<string> {
return $q.when(`(${capitalize(trigger.type)}) ${trigger.master}: ${trigger.job}`);
}
public constructor(props: IBaseBuildTriggerTemplateProps) {
super(props);
this.state = {
builds: [],
buildsLoading: false,
loadError: false,
selectedBuild: 0,
explicitBuild: false,
};
}
private buildLoadSuccess = (allBuilds: IBuild[]) => {
const newState: Partial<IBaseBuildTriggerTemplateState> = {
buildsLoading: false,
};
const trigger = this.props.command.trigger as IBuildTrigger;
newState.builds = (allBuilds || [])
.filter(build => !build.building && build.result === 'SUCCESS')
.sort((a, b) => b.number - a.number);
if (newState.builds.length) {
// default to what is supplied by the trigger if possible; otherwise, use the latest
const defaultSelection = newState.builds.find(b => b.number === trigger.buildNumber) || newState.builds[0];
newState.selectedBuild = defaultSelection.number;
this.updateSelectedBuild(defaultSelection);
}
this.setState(newState);
};
private buildLoadFailure = () => {
this.setState({
buildsLoading: false,
loadError: true,
});
};
private updateSelectedBuild = (item: any) => {
const { updateCommand } = this.props;
updateCommand('extraFields.buildNumber', item.number);
this.setState({ selectedBuild: item.number });
};
private initialize = (command: IPipelineCommand) => {
this.props.updateCommand('triggerInvalid', true);
const trigger = command.trigger as IBuildTrigger;
// These fields will be added to the trigger when the form is submitted
this.props.updateCommand('extraFields', { buildNumber: get(command, 'extraFields.buildNumber', '') });
this.setState({
buildsLoading: true,
loadError: false,
});
if (trigger.buildNumber)
|
// do not re-initialize if the trigger has changed to some other type
if (trigger.type !== this.props.buildTriggerType) {
return;
}
Observable.fromPromise(IgorService.listBuildsForJob(trigger.master, trigger.job))
.takeUntil(this.destroy$)
.subscribe(this.buildLoadSuccess, this.buildLoadFailure);
};
private manuallySpecify = () => {
this.setState({
explicitBuild: true,
});
};
private explicitlyUpdateBuildNumber = (event: React.ChangeEvent<HTMLInputElement>) => {
this.updateSelectedBuild({ number: event.target.value });
};
public componentDidMount() {
this.initialize(this.props.command);
}
public componentWillUnmount(): void {
this.destroy$.next();
}
private handleBuildChanged = (option: Option): void => {
this.updateSelectedBuild({ number: option.number });
};
private optionRenderer = (build: Option) => {
return (
<span style={{ fontSize: '13px' }}>
<strong>Build {build.number} </strong>
{buildDisplayName(build as IBuildInfo)}({timestamp(build.timestamp)})
</span>
);
};
public render() {
const { builds, buildsLoading, loadError, selectedBuild, explicitBuild } = this.state;
const loadingBuilds = (
<div className="form-control-static text-center">
<Spinner size={'small'} />
</div>
);
const errorLoadingBuilds = <div className="col-md-6">Error loading builds!</div>;
const noBuildsFound = (
<div>
<p className="form-control-static">No builds found</p>
</div>
);
return (
<div className="form-group">
<label className="col-md-4 sm-label-right">Build</label>
<div className="col-md-6">
<div>
{explicitBuild ? (
<TextInput
inputClassName="input-sm"
value={this.props.command.extraFields.buildNumber}
onChange={this.explicitlyUpdateBuildNumber}
/>
) : buildsLoading ? (
loadingBuilds
) : loadError ? (
errorLoadingBuilds
) : builds.length <= 0 ? (
noBuildsFound
) : (
<TetheredSelect
options={builds}
valueKey="number"
optionRenderer={this.props.optionRenderer ? this.props.optionRenderer : this.optionRenderer}
clearable={false}
value={selectedBuild}
valueRenderer={this.props.optionRenderer ? this.props.optionRenderer : this.optionRenderer}
onChange={this.handleBuildChanged}
/>
)}
</div>
{!explicitBuild && (
<div className="small" style={{ marginTop: '5px' }}>
<a className="clickable" onClick={this.manuallySpecify}>
Manually specify build
</a>
</div>
)}
</div>
</div>
);
}
}
|
{
this.updateSelectedBuild(trigger.buildInfo);
}
|
conditional_block
|
BaseBuildTriggerTemplate.tsx
|
import React from 'react';
import { capitalize, get } from 'lodash';
import { Option } from 'react-select';
import { $q } from 'ngimport';
import { IPromise } from 'angular';
import { Observable, Subject } from 'rxjs';
import { IBuild, IBuildInfo, IBuildTrigger, IPipelineCommand } from 'core/domain';
import { ITriggerTemplateComponentProps } from 'core/pipeline/manualExecution/TriggerTemplate';
import { IgorService, BuildServiceType } from 'core/ci';
import { Spinner } from 'core/widgets/spinners/Spinner';
import { buildDisplayName } from 'core/pipeline/executionBuild/buildDisplayName.filter';
import { timestamp } from 'core/utils/timeFormatters';
import { TetheredSelect } from 'core/presentation/TetheredSelect';
import { TextInput } from 'core/presentation';
export interface IBaseBuildTriggerTemplateProps extends ITriggerTemplateComponentProps {
buildTriggerType: BuildServiceType;
optionRenderer?: (build: Option) => JSX.Element;
}
export interface IBaseBuildTriggerTemplateState {
builds?: IBuild[];
buildsLoading?: boolean;
loadError?: boolean;
selectedBuild?: number;
explicitBuild?: boolean;
}
export class BaseBuildTriggerTemplate extends React.Component<
IBaseBuildTriggerTemplateProps,
IBaseBuildTriggerTemplateState
> {
private destroy$ = new Subject();
public static formatLabel(trigger: IBuildTrigger): IPromise<string> {
return $q.when(`(${capitalize(trigger.type)}) ${trigger.master}: ${trigger.job}`);
}
public constructor(props: IBaseBuildTriggerTemplateProps) {
super(props);
this.state = {
builds: [],
buildsLoading: false,
loadError: false,
selectedBuild: 0,
explicitBuild: false,
};
}
private buildLoadSuccess = (allBuilds: IBuild[]) => {
const newState: Partial<IBaseBuildTriggerTemplateState> = {
buildsLoading: false,
};
const trigger = this.props.command.trigger as IBuildTrigger;
newState.builds = (allBuilds || [])
.filter(build => !build.building && build.result === 'SUCCESS')
.sort((a, b) => b.number - a.number);
if (newState.builds.length) {
// default to what is supplied by the trigger if possible; otherwise, use the latest
const defaultSelection = newState.builds.find(b => b.number === trigger.buildNumber) || newState.builds[0];
newState.selectedBuild = defaultSelection.number;
this.updateSelectedBuild(defaultSelection);
}
this.setState(newState);
};
private buildLoadFailure = () => {
this.setState({
|
});
};
private updateSelectedBuild = (item: any) => {
const { updateCommand } = this.props;
updateCommand('extraFields.buildNumber', item.number);
this.setState({ selectedBuild: item.number });
};
private initialize = (command: IPipelineCommand) => {
this.props.updateCommand('triggerInvalid', true);
const trigger = command.trigger as IBuildTrigger;
// These fields will be added to the trigger when the form is submitted
this.props.updateCommand('extraFields', { buildNumber: get(command, 'extraFields.buildNumber', '') });
this.setState({
buildsLoading: true,
loadError: false,
});
if (trigger.buildNumber) {
this.updateSelectedBuild(trigger.buildInfo);
}
// do not re-initialize if the trigger has changed to some other type
if (trigger.type !== this.props.buildTriggerType) {
return;
}
Observable.fromPromise(IgorService.listBuildsForJob(trigger.master, trigger.job))
.takeUntil(this.destroy$)
.subscribe(this.buildLoadSuccess, this.buildLoadFailure);
};
private manuallySpecify = () => {
this.setState({
explicitBuild: true,
});
};
private explicitlyUpdateBuildNumber = (event: React.ChangeEvent<HTMLInputElement>) => {
this.updateSelectedBuild({ number: event.target.value });
};
public componentDidMount() {
this.initialize(this.props.command);
}
public componentWillUnmount(): void {
this.destroy$.next();
}
private handleBuildChanged = (option: Option): void => {
this.updateSelectedBuild({ number: option.number });
};
private optionRenderer = (build: Option) => {
return (
<span style={{ fontSize: '13px' }}>
<strong>Build {build.number} </strong>
{buildDisplayName(build as IBuildInfo)}({timestamp(build.timestamp)})
</span>
);
};
public render() {
const { builds, buildsLoading, loadError, selectedBuild, explicitBuild } = this.state;
const loadingBuilds = (
<div className="form-control-static text-center">
<Spinner size={'small'} />
</div>
);
const errorLoadingBuilds = <div className="col-md-6">Error loading builds!</div>;
const noBuildsFound = (
<div>
<p className="form-control-static">No builds found</p>
</div>
);
return (
<div className="form-group">
<label className="col-md-4 sm-label-right">Build</label>
<div className="col-md-6">
<div>
{explicitBuild ? (
<TextInput
inputClassName="input-sm"
value={this.props.command.extraFields.buildNumber}
onChange={this.explicitlyUpdateBuildNumber}
/>
) : buildsLoading ? (
loadingBuilds
) : loadError ? (
errorLoadingBuilds
) : builds.length <= 0 ? (
noBuildsFound
) : (
<TetheredSelect
options={builds}
valueKey="number"
optionRenderer={this.props.optionRenderer ? this.props.optionRenderer : this.optionRenderer}
clearable={false}
value={selectedBuild}
valueRenderer={this.props.optionRenderer ? this.props.optionRenderer : this.optionRenderer}
onChange={this.handleBuildChanged}
/>
)}
</div>
{!explicitBuild && (
<div className="small" style={{ marginTop: '5px' }}>
<a className="clickable" onClick={this.manuallySpecify}>
Manually specify build
</a>
</div>
)}
</div>
</div>
);
}
}
|
buildsLoading: false,
loadError: true,
|
random_line_split
|
BaseBuildTriggerTemplate.tsx
|
import React from 'react';
import { capitalize, get } from 'lodash';
import { Option } from 'react-select';
import { $q } from 'ngimport';
import { IPromise } from 'angular';
import { Observable, Subject } from 'rxjs';
import { IBuild, IBuildInfo, IBuildTrigger, IPipelineCommand } from 'core/domain';
import { ITriggerTemplateComponentProps } from 'core/pipeline/manualExecution/TriggerTemplate';
import { IgorService, BuildServiceType } from 'core/ci';
import { Spinner } from 'core/widgets/spinners/Spinner';
import { buildDisplayName } from 'core/pipeline/executionBuild/buildDisplayName.filter';
import { timestamp } from 'core/utils/timeFormatters';
import { TetheredSelect } from 'core/presentation/TetheredSelect';
import { TextInput } from 'core/presentation';
export interface IBaseBuildTriggerTemplateProps extends ITriggerTemplateComponentProps {
buildTriggerType: BuildServiceType;
optionRenderer?: (build: Option) => JSX.Element;
}
export interface IBaseBuildTriggerTemplateState {
builds?: IBuild[];
buildsLoading?: boolean;
loadError?: boolean;
selectedBuild?: number;
explicitBuild?: boolean;
}
export class BaseBuildTriggerTemplate extends React.Component<
IBaseBuildTriggerTemplateProps,
IBaseBuildTriggerTemplateState
> {
private destroy$ = new Subject();
public static formatLabel(trigger: IBuildTrigger): IPromise<string> {
return $q.when(`(${capitalize(trigger.type)}) ${trigger.master}: ${trigger.job}`);
}
public constructor(props: IBaseBuildTriggerTemplateProps) {
super(props);
this.state = {
builds: [],
buildsLoading: false,
loadError: false,
selectedBuild: 0,
explicitBuild: false,
};
}
private buildLoadSuccess = (allBuilds: IBuild[]) => {
const newState: Partial<IBaseBuildTriggerTemplateState> = {
buildsLoading: false,
};
const trigger = this.props.command.trigger as IBuildTrigger;
newState.builds = (allBuilds || [])
.filter(build => !build.building && build.result === 'SUCCESS')
.sort((a, b) => b.number - a.number);
if (newState.builds.length) {
// default to what is supplied by the trigger if possible; otherwise, use the latest
const defaultSelection = newState.builds.find(b => b.number === trigger.buildNumber) || newState.builds[0];
newState.selectedBuild = defaultSelection.number;
this.updateSelectedBuild(defaultSelection);
}
this.setState(newState);
};
private buildLoadFailure = () => {
this.setState({
buildsLoading: false,
loadError: true,
});
};
private updateSelectedBuild = (item: any) => {
const { updateCommand } = this.props;
updateCommand('extraFields.buildNumber', item.number);
this.setState({ selectedBuild: item.number });
};
private initialize = (command: IPipelineCommand) => {
this.props.updateCommand('triggerInvalid', true);
const trigger = command.trigger as IBuildTrigger;
// These fields will be added to the trigger when the form is submitted
this.props.updateCommand('extraFields', { buildNumber: get(command, 'extraFields.buildNumber', '') });
this.setState({
buildsLoading: true,
loadError: false,
});
if (trigger.buildNumber) {
this.updateSelectedBuild(trigger.buildInfo);
}
// do not re-initialize if the trigger has changed to some other type
if (trigger.type !== this.props.buildTriggerType) {
return;
}
Observable.fromPromise(IgorService.listBuildsForJob(trigger.master, trigger.job))
.takeUntil(this.destroy$)
.subscribe(this.buildLoadSuccess, this.buildLoadFailure);
};
private manuallySpecify = () => {
this.setState({
explicitBuild: true,
});
};
private explicitlyUpdateBuildNumber = (event: React.ChangeEvent<HTMLInputElement>) => {
this.updateSelectedBuild({ number: event.target.value });
};
public componentDidMount()
|
public componentWillUnmount(): void {
this.destroy$.next();
}
private handleBuildChanged = (option: Option): void => {
this.updateSelectedBuild({ number: option.number });
};
private optionRenderer = (build: Option) => {
return (
<span style={{ fontSize: '13px' }}>
<strong>Build {build.number} </strong>
{buildDisplayName(build as IBuildInfo)}({timestamp(build.timestamp)})
</span>
);
};
public render() {
const { builds, buildsLoading, loadError, selectedBuild, explicitBuild } = this.state;
const loadingBuilds = (
<div className="form-control-static text-center">
<Spinner size={'small'} />
</div>
);
const errorLoadingBuilds = <div className="col-md-6">Error loading builds!</div>;
const noBuildsFound = (
<div>
<p className="form-control-static">No builds found</p>
</div>
);
return (
<div className="form-group">
<label className="col-md-4 sm-label-right">Build</label>
<div className="col-md-6">
<div>
{explicitBuild ? (
<TextInput
inputClassName="input-sm"
value={this.props.command.extraFields.buildNumber}
onChange={this.explicitlyUpdateBuildNumber}
/>
) : buildsLoading ? (
loadingBuilds
) : loadError ? (
errorLoadingBuilds
) : builds.length <= 0 ? (
noBuildsFound
) : (
<TetheredSelect
options={builds}
valueKey="number"
optionRenderer={this.props.optionRenderer ? this.props.optionRenderer : this.optionRenderer}
clearable={false}
value={selectedBuild}
valueRenderer={this.props.optionRenderer ? this.props.optionRenderer : this.optionRenderer}
onChange={this.handleBuildChanged}
/>
)}
</div>
{!explicitBuild && (
<div className="small" style={{ marginTop: '5px' }}>
<a className="clickable" onClick={this.manuallySpecify}>
Manually specify build
</a>
</div>
)}
</div>
</div>
);
}
}
|
{
this.initialize(this.props.command);
}
|
identifier_body
|
BaseBuildTriggerTemplate.tsx
|
import React from 'react';
import { capitalize, get } from 'lodash';
import { Option } from 'react-select';
import { $q } from 'ngimport';
import { IPromise } from 'angular';
import { Observable, Subject } from 'rxjs';
import { IBuild, IBuildInfo, IBuildTrigger, IPipelineCommand } from 'core/domain';
import { ITriggerTemplateComponentProps } from 'core/pipeline/manualExecution/TriggerTemplate';
import { IgorService, BuildServiceType } from 'core/ci';
import { Spinner } from 'core/widgets/spinners/Spinner';
import { buildDisplayName } from 'core/pipeline/executionBuild/buildDisplayName.filter';
import { timestamp } from 'core/utils/timeFormatters';
import { TetheredSelect } from 'core/presentation/TetheredSelect';
import { TextInput } from 'core/presentation';
export interface IBaseBuildTriggerTemplateProps extends ITriggerTemplateComponentProps {
buildTriggerType: BuildServiceType;
optionRenderer?: (build: Option) => JSX.Element;
}
export interface IBaseBuildTriggerTemplateState {
builds?: IBuild[];
buildsLoading?: boolean;
loadError?: boolean;
selectedBuild?: number;
explicitBuild?: boolean;
}
export class BaseBuildTriggerTemplate extends React.Component<
IBaseBuildTriggerTemplateProps,
IBaseBuildTriggerTemplateState
> {
private destroy$ = new Subject();
public static formatLabel(trigger: IBuildTrigger): IPromise<string> {
return $q.when(`(${capitalize(trigger.type)}) ${trigger.master}: ${trigger.job}`);
}
public constructor(props: IBaseBuildTriggerTemplateProps) {
super(props);
this.state = {
builds: [],
buildsLoading: false,
loadError: false,
selectedBuild: 0,
explicitBuild: false,
};
}
private buildLoadSuccess = (allBuilds: IBuild[]) => {
const newState: Partial<IBaseBuildTriggerTemplateState> = {
buildsLoading: false,
};
const trigger = this.props.command.trigger as IBuildTrigger;
newState.builds = (allBuilds || [])
.filter(build => !build.building && build.result === 'SUCCESS')
.sort((a, b) => b.number - a.number);
if (newState.builds.length) {
// default to what is supplied by the trigger if possible; otherwise, use the latest
const defaultSelection = newState.builds.find(b => b.number === trigger.buildNumber) || newState.builds[0];
newState.selectedBuild = defaultSelection.number;
this.updateSelectedBuild(defaultSelection);
}
this.setState(newState);
};
private buildLoadFailure = () => {
this.setState({
buildsLoading: false,
loadError: true,
});
};
private updateSelectedBuild = (item: any) => {
const { updateCommand } = this.props;
updateCommand('extraFields.buildNumber', item.number);
this.setState({ selectedBuild: item.number });
};
private initialize = (command: IPipelineCommand) => {
this.props.updateCommand('triggerInvalid', true);
const trigger = command.trigger as IBuildTrigger;
// These fields will be added to the trigger when the form is submitted
this.props.updateCommand('extraFields', { buildNumber: get(command, 'extraFields.buildNumber', '') });
this.setState({
buildsLoading: true,
loadError: false,
});
if (trigger.buildNumber) {
this.updateSelectedBuild(trigger.buildInfo);
}
// do not re-initialize if the trigger has changed to some other type
if (trigger.type !== this.props.buildTriggerType) {
return;
}
Observable.fromPromise(IgorService.listBuildsForJob(trigger.master, trigger.job))
.takeUntil(this.destroy$)
.subscribe(this.buildLoadSuccess, this.buildLoadFailure);
};
private manuallySpecify = () => {
this.setState({
explicitBuild: true,
});
};
private explicitlyUpdateBuildNumber = (event: React.ChangeEvent<HTMLInputElement>) => {
this.updateSelectedBuild({ number: event.target.value });
};
public
|
() {
this.initialize(this.props.command);
}
public componentWillUnmount(): void {
this.destroy$.next();
}
private handleBuildChanged = (option: Option): void => {
this.updateSelectedBuild({ number: option.number });
};
private optionRenderer = (build: Option) => {
return (
<span style={{ fontSize: '13px' }}>
<strong>Build {build.number} </strong>
{buildDisplayName(build as IBuildInfo)}({timestamp(build.timestamp)})
</span>
);
};
public render() {
const { builds, buildsLoading, loadError, selectedBuild, explicitBuild } = this.state;
const loadingBuilds = (
<div className="form-control-static text-center">
<Spinner size={'small'} />
</div>
);
const errorLoadingBuilds = <div className="col-md-6">Error loading builds!</div>;
const noBuildsFound = (
<div>
<p className="form-control-static">No builds found</p>
</div>
);
return (
<div className="form-group">
<label className="col-md-4 sm-label-right">Build</label>
<div className="col-md-6">
<div>
{explicitBuild ? (
<TextInput
inputClassName="input-sm"
value={this.props.command.extraFields.buildNumber}
onChange={this.explicitlyUpdateBuildNumber}
/>
) : buildsLoading ? (
loadingBuilds
) : loadError ? (
errorLoadingBuilds
) : builds.length <= 0 ? (
noBuildsFound
) : (
<TetheredSelect
options={builds}
valueKey="number"
optionRenderer={this.props.optionRenderer ? this.props.optionRenderer : this.optionRenderer}
clearable={false}
value={selectedBuild}
valueRenderer={this.props.optionRenderer ? this.props.optionRenderer : this.optionRenderer}
onChange={this.handleBuildChanged}
/>
)}
</div>
{!explicitBuild && (
<div className="small" style={{ marginTop: '5px' }}>
<a className="clickable" onClick={this.manuallySpecify}>
Manually specify build
</a>
</div>
)}
</div>
</div>
);
}
}
|
componentDidMount
|
identifier_name
|
settings.py
|
# Django settings for mcjsms project.
import os
DEBUG = False
TEMPLATE_DEBUG = DEBUG
SITE_ROOT = os.path.realpath(os.path.dirname(__file__))
ADMINS = []
MANAGERS = ADMINS
if DEBUG:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(SITE_ROOT, 'data/dev.sqlite'), # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'mcj_sms', # Or path to database file if using sqlite3.
'USER': 'mcj_sms', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '127.0.0.1', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '3306', # Set to empty string for default. Not used with sqlite3.
'OPTIONS': {
'init_command': 'SET storage_engine=INNODB',
}
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Montreal'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
if DEBUG:
MEDIA_ROOT = os.path.join(SITE_ROOT, 'media/')
else:
MEDIA_ROOT = '/home/ramisayar/public/mcj/mcj2011/media/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
|
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
if DEBUG:
STATIC_ROOT = os.path.join(SITE_ROOT, 'static')
else:
STATIC_ROOT = '/home/ramisayar/public/mcj/mcj2011/static/'
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(SITE_ROOT, 'global_static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '3+pefpl6rsg&#smr*4$f(18nasrr0u)wp_4q=lkn50n-qz0rjt'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'sms.urls'
TEMPLATE_DIRS = (os.path.join(SITE_ROOT, 'templates'),)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
#'django.contrib.admin',
'django.contrib.localflavor',
'django_twilio',
'sms.twilio_sms'
)
TWILIO_ACCOUNT_SID = ''
TWILIO_AUTH_TOKEN = ''
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/site_media/'
# Absolute path to the directory static files should be collected to.
|
random_line_split
|
settings.py
|
# Django settings for mcjsms project.
import os
DEBUG = False
TEMPLATE_DEBUG = DEBUG
SITE_ROOT = os.path.realpath(os.path.dirname(__file__))
ADMINS = []
MANAGERS = ADMINS
if DEBUG:
|
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'mcj_sms', # Or path to database file if using sqlite3.
'USER': 'mcj_sms', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '127.0.0.1', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '3306', # Set to empty string for default. Not used with sqlite3.
'OPTIONS': {
'init_command': 'SET storage_engine=INNODB',
}
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Montreal'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
if DEBUG:
MEDIA_ROOT = os.path.join(SITE_ROOT, 'media/')
else:
MEDIA_ROOT = '/home/ramisayar/public/mcj/mcj2011/media/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/site_media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
if DEBUG:
STATIC_ROOT = os.path.join(SITE_ROOT, 'static')
else:
STATIC_ROOT = '/home/ramisayar/public/mcj/mcj2011/static/'
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(SITE_ROOT, 'global_static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '3+pefpl6rsg&#smr*4$f(18nasrr0u)wp_4q=lkn50n-qz0rjt'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'sms.urls'
TEMPLATE_DIRS = (os.path.join(SITE_ROOT, 'templates'),)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
#'django.contrib.admin',
'django.contrib.localflavor',
'django_twilio',
'sms.twilio_sms'
)
TWILIO_ACCOUNT_SID = ''
TWILIO_AUTH_TOKEN = ''
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(SITE_ROOT, 'data/dev.sqlite'), # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
|
conditional_block
|
test_notifications.py
|
"""Integration test for Notifications."""
import github3
from .helper import IntegrationHelper
class TestThread(IntegrationHelper):
"""Integration test for methods on Test class"""
def test_subscription(self):
"""Show that a user can retrieve notifications for repository"""
self.token_login()
cassette_name = self.cassette_name("subscription")
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
threads = list(repository.notifications(all=True))
assert len(threads) > 0
thread = threads[0]
assert isinstance(thread, github3.notifications.Thread)
assert isinstance(thread.subscription(),
github3.notifications.Subscription)
class TestSubscription(IntegrationHelper):
"""Integration test for methods on Test class"""
def
|
(self):
"""Show that user can successful set subscription"""
self.token_login()
cassette_name = self.cassette_name("set")
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
threads = list(repository.notifications(all='true'))
assert len(threads) > 0
subscription = threads[0].subscription()
assert subscription.set(True, False) is None
assert isinstance(subscription, github3.notifications.Subscription)
|
test_set
|
identifier_name
|
test_notifications.py
|
"""Integration test for Notifications."""
import github3
from .helper import IntegrationHelper
class TestThread(IntegrationHelper):
"""Integration test for methods on Test class"""
def test_subscription(self):
"""Show that a user can retrieve notifications for repository"""
self.token_login()
cassette_name = self.cassette_name("subscription")
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
threads = list(repository.notifications(all=True))
assert len(threads) > 0
thread = threads[0]
assert isinstance(thread, github3.notifications.Thread)
assert isinstance(thread.subscription(),
github3.notifications.Subscription)
class TestSubscription(IntegrationHelper):
"""Integration test for methods on Test class"""
def test_set(self):
|
"""Show that user can successful set subscription"""
self.token_login()
cassette_name = self.cassette_name("set")
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
threads = list(repository.notifications(all='true'))
assert len(threads) > 0
subscription = threads[0].subscription()
assert subscription.set(True, False) is None
assert isinstance(subscription, github3.notifications.Subscription)
|
identifier_body
|
|
test_notifications.py
|
"""Integration test for Notifications."""
import github3
from .helper import IntegrationHelper
class TestThread(IntegrationHelper):
"""Integration test for methods on Test class"""
def test_subscription(self):
"""Show that a user can retrieve notifications for repository"""
self.token_login()
|
assert len(threads) > 0
thread = threads[0]
assert isinstance(thread, github3.notifications.Thread)
assert isinstance(thread.subscription(),
github3.notifications.Subscription)
class TestSubscription(IntegrationHelper):
"""Integration test for methods on Test class"""
def test_set(self):
"""Show that user can successful set subscription"""
self.token_login()
cassette_name = self.cassette_name("set")
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
threads = list(repository.notifications(all='true'))
assert len(threads) > 0
subscription = threads[0].subscription()
assert subscription.set(True, False) is None
assert isinstance(subscription, github3.notifications.Subscription)
|
cassette_name = self.cassette_name("subscription")
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
threads = list(repository.notifications(all=True))
|
random_line_split
|
unit-test.js
|
var page = require('webpage').create();
var url;
if (phantom.args) {
url = phantom.args[0];
} else {
url = require('system').args[1];
}
page.onConsoleMessage = function (message) {
console.log(message);
};
function exit(code)
|
console.log("Loading URL: " + url);
page.open(url, function (status) {
if (status != "success") {
console.log('Failed to open ' + url);
phantom.exit(1);
}
console.log("Running test.");
var result = page.evaluate(function() {
return chess_game.test_runner.runner();
});
if (result != 0) {
console.log("*** Test failed! ***");
exit(1);
}
else {
console.log("Test succeeded.");
exit(0);
}
});
|
{
setTimeout(function(){ phantom.exit(code); }, 0);
phantom.onError = function(){};
}
|
identifier_body
|
unit-test.js
|
var page = require('webpage').create();
var url;
if (phantom.args) {
url = phantom.args[0];
} else {
url = require('system').args[1];
}
page.onConsoleMessage = function (message) {
console.log(message);
};
function
|
(code) {
setTimeout(function(){ phantom.exit(code); }, 0);
phantom.onError = function(){};
}
console.log("Loading URL: " + url);
page.open(url, function (status) {
if (status != "success") {
console.log('Failed to open ' + url);
phantom.exit(1);
}
console.log("Running test.");
var result = page.evaluate(function() {
return chess_game.test_runner.runner();
});
if (result != 0) {
console.log("*** Test failed! ***");
exit(1);
}
else {
console.log("Test succeeded.");
exit(0);
}
});
|
exit
|
identifier_name
|
unit-test.js
|
var page = require('webpage').create();
var url;
if (phantom.args) {
url = phantom.args[0];
} else {
url = require('system').args[1];
}
page.onConsoleMessage = function (message) {
console.log(message);
};
function exit(code) {
setTimeout(function(){ phantom.exit(code); }, 0);
phantom.onError = function(){};
}
console.log("Loading URL: " + url);
page.open(url, function (status) {
if (status != "success") {
console.log('Failed to open ' + url);
phantom.exit(1);
}
console.log("Running test.");
var result = page.evaluate(function() {
return chess_game.test_runner.runner();
});
if (result != 0)
|
else {
console.log("Test succeeded.");
exit(0);
}
});
|
{
console.log("*** Test failed! ***");
exit(1);
}
|
conditional_block
|
unit-test.js
|
var page = require('webpage').create();
var url;
if (phantom.args) {
url = phantom.args[0];
} else {
url = require('system').args[1];
}
page.onConsoleMessage = function (message) {
console.log(message);
};
function exit(code) {
|
console.log("Loading URL: " + url);
page.open(url, function (status) {
if (status != "success") {
console.log('Failed to open ' + url);
phantom.exit(1);
}
console.log("Running test.");
var result = page.evaluate(function() {
return chess_game.test_runner.runner();
});
if (result != 0) {
console.log("*** Test failed! ***");
exit(1);
}
else {
console.log("Test succeeded.");
exit(0);
}
});
|
setTimeout(function(){ phantom.exit(code); }, 0);
phantom.onError = function(){};
}
|
random_line_split
|
render.ts
|
import global from 'global';
import dedent from 'ts-dedent';
import { RenderContext, ElementArgs, OptionsArgs } from './types';
const { window: globalWindow, document } = global;
declare let Ember: any;
const rootEl = document.getElementById('root');
const config = globalWindow.require(`${globalWindow.STORYBOOK_NAME}/config/environment`);
const app = globalWindow.require(`${globalWindow.STORYBOOK_NAME}/app`).default.create({
autoboot: false,
rootElement: rootEl,
...config.APP,
});
let lastPromise = app.boot();
let hasRendered = false;
let isRendering = false;
function
|
(options: OptionsArgs, el: ElementArgs) {
if (isRendering) return;
isRendering = true;
const { template, context = {}, element } = options;
if (hasRendered) {
lastPromise = lastPromise.then((instance: any) => instance.destroy());
}
lastPromise = lastPromise
.then(() => {
const appInstancePrivate = app.buildInstance();
return appInstancePrivate.boot().then(() => appInstancePrivate);
})
.then((instance: any) => {
instance.register(
'component:story-mode',
Ember.Component.extend({
layout: template || options,
...context,
})
);
const component = instance.lookup('component:story-mode');
if (element) {
component.appendTo(element);
element.appendTo(el);
} else {
component.appendTo(el);
}
hasRendered = true;
isRendering = false;
return instance;
});
}
export default function renderMain({ storyFn, kind, name, showMain, showError }: RenderContext) {
const element = storyFn();
if (!element) {
showError({
title: `Expecting a Ember element from the story: "${name}" of "${kind}".`,
description: dedent`
Did you forget to return the Ember element from the story?
Use "() => hbs('{{component}}')" or "() => { return {
template: hbs\`{{component}}\`
} }" when defining the story.
`,
});
return;
}
showMain();
render(element, rootEl);
}
|
render
|
identifier_name
|
render.ts
|
import global from 'global';
import dedent from 'ts-dedent';
import { RenderContext, ElementArgs, OptionsArgs } from './types';
const { window: globalWindow, document } = global;
declare let Ember: any;
const rootEl = document.getElementById('root');
const config = globalWindow.require(`${globalWindow.STORYBOOK_NAME}/config/environment`);
const app = globalWindow.require(`${globalWindow.STORYBOOK_NAME}/app`).default.create({
autoboot: false,
rootElement: rootEl,
...config.APP,
});
let lastPromise = app.boot();
let hasRendered = false;
let isRendering = false;
function render(options: OptionsArgs, el: ElementArgs) {
if (isRendering) return;
isRendering = true;
const { template, context = {}, element } = options;
if (hasRendered)
|
lastPromise = lastPromise
.then(() => {
const appInstancePrivate = app.buildInstance();
return appInstancePrivate.boot().then(() => appInstancePrivate);
})
.then((instance: any) => {
instance.register(
'component:story-mode',
Ember.Component.extend({
layout: template || options,
...context,
})
);
const component = instance.lookup('component:story-mode');
if (element) {
component.appendTo(element);
element.appendTo(el);
} else {
component.appendTo(el);
}
hasRendered = true;
isRendering = false;
return instance;
});
}
export default function renderMain({ storyFn, kind, name, showMain, showError }: RenderContext) {
const element = storyFn();
if (!element) {
showError({
title: `Expecting a Ember element from the story: "${name}" of "${kind}".`,
description: dedent`
Did you forget to return the Ember element from the story?
Use "() => hbs('{{component}}')" or "() => { return {
template: hbs\`{{component}}\`
} }" when defining the story.
`,
});
return;
}
showMain();
render(element, rootEl);
}
|
{
lastPromise = lastPromise.then((instance: any) => instance.destroy());
}
|
conditional_block
|
render.ts
|
import global from 'global';
import dedent from 'ts-dedent';
import { RenderContext, ElementArgs, OptionsArgs } from './types';
const { window: globalWindow, document } = global;
declare let Ember: any;
const rootEl = document.getElementById('root');
const config = globalWindow.require(`${globalWindow.STORYBOOK_NAME}/config/environment`);
const app = globalWindow.require(`${globalWindow.STORYBOOK_NAME}/app`).default.create({
autoboot: false,
rootElement: rootEl,
...config.APP,
});
let lastPromise = app.boot();
let hasRendered = false;
let isRendering = false;
function render(options: OptionsArgs, el: ElementArgs) {
if (isRendering) return;
isRendering = true;
const { template, context = {}, element } = options;
if (hasRendered) {
lastPromise = lastPromise.then((instance: any) => instance.destroy());
}
lastPromise = lastPromise
.then(() => {
const appInstancePrivate = app.buildInstance();
return appInstancePrivate.boot().then(() => appInstancePrivate);
})
.then((instance: any) => {
instance.register(
'component:story-mode',
Ember.Component.extend({
layout: template || options,
...context,
})
|
if (element) {
component.appendTo(element);
element.appendTo(el);
} else {
component.appendTo(el);
}
hasRendered = true;
isRendering = false;
return instance;
});
}
export default function renderMain({ storyFn, kind, name, showMain, showError }: RenderContext) {
const element = storyFn();
if (!element) {
showError({
title: `Expecting a Ember element from the story: "${name}" of "${kind}".`,
description: dedent`
Did you forget to return the Ember element from the story?
Use "() => hbs('{{component}}')" or "() => { return {
template: hbs\`{{component}}\`
} }" when defining the story.
`,
});
return;
}
showMain();
render(element, rootEl);
}
|
);
const component = instance.lookup('component:story-mode');
|
random_line_split
|
render.ts
|
import global from 'global';
import dedent from 'ts-dedent';
import { RenderContext, ElementArgs, OptionsArgs } from './types';
const { window: globalWindow, document } = global;
declare let Ember: any;
const rootEl = document.getElementById('root');
const config = globalWindow.require(`${globalWindow.STORYBOOK_NAME}/config/environment`);
const app = globalWindow.require(`${globalWindow.STORYBOOK_NAME}/app`).default.create({
autoboot: false,
rootElement: rootEl,
...config.APP,
});
let lastPromise = app.boot();
let hasRendered = false;
let isRendering = false;
function render(options: OptionsArgs, el: ElementArgs) {
if (isRendering) return;
isRendering = true;
const { template, context = {}, element } = options;
if (hasRendered) {
lastPromise = lastPromise.then((instance: any) => instance.destroy());
}
lastPromise = lastPromise
.then(() => {
const appInstancePrivate = app.buildInstance();
return appInstancePrivate.boot().then(() => appInstancePrivate);
})
.then((instance: any) => {
instance.register(
'component:story-mode',
Ember.Component.extend({
layout: template || options,
...context,
})
);
const component = instance.lookup('component:story-mode');
if (element) {
component.appendTo(element);
element.appendTo(el);
} else {
component.appendTo(el);
}
hasRendered = true;
isRendering = false;
return instance;
});
}
export default function renderMain({ storyFn, kind, name, showMain, showError }: RenderContext)
|
{
const element = storyFn();
if (!element) {
showError({
title: `Expecting a Ember element from the story: "${name}" of "${kind}".`,
description: dedent`
Did you forget to return the Ember element from the story?
Use "() => hbs('{{component}}')" or "() => { return {
template: hbs\`{{component}}\`
} }" when defining the story.
`,
});
return;
}
showMain();
render(element, rootEl);
}
|
identifier_body
|
|
grabbing.rs
|
#[cfg(target_os = "android")]
#[macro_use]
extern crate android_glue;
extern crate glutin;
use glutin::{Event, ElementState};
mod support;
#[cfg(target_os = "android")]
android_start!(main);
fn main() {
let window = glutin::WindowBuilder::new().build().unwrap();
window.set_title("glutin - Cursor grabbing test");
let _ = unsafe { window.make_current() };
let context = support::load(&window);
let mut grabbed = false;
for event in window.wait_events() {
match event {
Event::KeyboardInput(ElementState::Pressed, _, _) => {
if grabbed
|
else {
grabbed = true;
window.set_cursor_state(glutin::CursorState::Grab)
.ok().expect("could not grab mouse cursor");
}
},
Event::Closed => break,
a @ Event::MouseMoved(_, _) => {
println!("{:?}", a);
},
_ => (),
}
context.draw_frame((0.0, 1.0, 0.0, 1.0));
let _ = window.swap_buffers();
}
}
|
{
grabbed = false;
window.set_cursor_state(glutin::CursorState::Normal)
.ok().expect("could not ungrab mouse cursor");
}
|
conditional_block
|
grabbing.rs
|
#[cfg(target_os = "android")]
#[macro_use]
extern crate android_glue;
extern crate glutin;
use glutin::{Event, ElementState};
mod support;
#[cfg(target_os = "android")]
|
window.set_title("glutin - Cursor grabbing test");
let _ = unsafe { window.make_current() };
let context = support::load(&window);
let mut grabbed = false;
for event in window.wait_events() {
match event {
Event::KeyboardInput(ElementState::Pressed, _, _) => {
if grabbed {
grabbed = false;
window.set_cursor_state(glutin::CursorState::Normal)
.ok().expect("could not ungrab mouse cursor");
} else {
grabbed = true;
window.set_cursor_state(glutin::CursorState::Grab)
.ok().expect("could not grab mouse cursor");
}
},
Event::Closed => break,
a @ Event::MouseMoved(_, _) => {
println!("{:?}", a);
},
_ => (),
}
context.draw_frame((0.0, 1.0, 0.0, 1.0));
let _ = window.swap_buffers();
}
}
|
android_start!(main);
fn main() {
let window = glutin::WindowBuilder::new().build().unwrap();
|
random_line_split
|
grabbing.rs
|
#[cfg(target_os = "android")]
#[macro_use]
extern crate android_glue;
extern crate glutin;
use glutin::{Event, ElementState};
mod support;
#[cfg(target_os = "android")]
android_start!(main);
fn main()
|
{
let window = glutin::WindowBuilder::new().build().unwrap();
window.set_title("glutin - Cursor grabbing test");
let _ = unsafe { window.make_current() };
let context = support::load(&window);
let mut grabbed = false;
for event in window.wait_events() {
match event {
Event::KeyboardInput(ElementState::Pressed, _, _) => {
if grabbed {
grabbed = false;
window.set_cursor_state(glutin::CursorState::Normal)
.ok().expect("could not ungrab mouse cursor");
} else {
grabbed = true;
window.set_cursor_state(glutin::CursorState::Grab)
.ok().expect("could not grab mouse cursor");
}
},
Event::Closed => break,
a @ Event::MouseMoved(_, _) => {
println!("{:?}", a);
},
_ => (),
}
context.draw_frame((0.0, 1.0, 0.0, 1.0));
let _ = window.swap_buffers();
}
}
|
identifier_body
|
|
grabbing.rs
|
#[cfg(target_os = "android")]
#[macro_use]
extern crate android_glue;
extern crate glutin;
use glutin::{Event, ElementState};
mod support;
#[cfg(target_os = "android")]
android_start!(main);
fn
|
() {
let window = glutin::WindowBuilder::new().build().unwrap();
window.set_title("glutin - Cursor grabbing test");
let _ = unsafe { window.make_current() };
let context = support::load(&window);
let mut grabbed = false;
for event in window.wait_events() {
match event {
Event::KeyboardInput(ElementState::Pressed, _, _) => {
if grabbed {
grabbed = false;
window.set_cursor_state(glutin::CursorState::Normal)
.ok().expect("could not ungrab mouse cursor");
} else {
grabbed = true;
window.set_cursor_state(glutin::CursorState::Grab)
.ok().expect("could not grab mouse cursor");
}
},
Event::Closed => break,
a @ Event::MouseMoved(_, _) => {
println!("{:?}", a);
},
_ => (),
}
context.draw_frame((0.0, 1.0, 0.0, 1.0));
let _ = window.swap_buffers();
}
}
|
main
|
identifier_name
|
Util.py
|
# encoding: utf-8
import sys
import os
import signal
from openpyxl.utils import get_column_letter
from openpyxl import Workbook,load_workbook
ItemList=[]
## {{{ http://code.activestate.com/recipes/410692/ (r8)
# This class provides the functionality we want. You only need to look at
# this if you want to know how this works. It only needs to be defined
# once, no need to muck around with its internals.
class switch(object):
def __init__(self, value):
|
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
class Items:
def __init__(self, BallName,GPIO):
self.BallName = BallName
self.GPIO = GPIO
self.NetName = None
self.Direction = None
self.Data = None
self.Position = None
def set_NetName(self, NetName):
self.NetName=NetName
def set_Direction(self, Direction):
self.Direction=Direction
def set_Data(self, Data):
self.Data=Data
def set_Position(self, Position):
self.Position=Position
def GetCellValue(ws,index_row,column_letter):
return ws[column_letter+str(index_row)].value
def GetColumnNameAndChangeValue(ws,string_row,changelist,index_row):
string=[]
for i in range(0,len(changelist)):
column_letter = changelist[i]
string.append(str(GetCellValue(ws,string_row,column_letter))+' to ['+str(GetCellValue(ws,index_row,column_letter))+'] ')
return "".join(string)
def GetNumAndName(ws,index_row):
return '['+GetCellValue(ws,index_row,'D')+'] '+GetCellValue(ws,index_row,'C')+' : '
def GetColumnLetter(ws,string_row,string_value):
for column in range(1,40):
column_letter = get_column_letter(column)
if ws[column_letter+str(string_row)].value==string_value:
return column_letter
return None
def Get_Bit(byteval,idx):
return ((byteval&(1<<idx))!=0);
def AppendBit(data_L,data_M):
output_str=""
if data_L != 0:
for i in range(0, 8):
if Get_Bit(int(data_L,16),i) == True:
output_str=output_str+str(i)+"/"
if data_M != 0:
for i in range(0, 8):
if Get_Bit(int(data_M,16),i) == True:
output_str=output_str+str(i+8)+"/"
if data_L != 0 or data_M != 0:
output_str=output_str+"\n"
return output_str
def StringToSignint(string,len):
x = int(string,16)
if x > ((1<<(8*len))/2)-1:
x -= 1<<(8*len)
return x
def ExcelToStruct(filename):
try:
wb = load_workbook(filename)
except IOError:
print ("Can't open file exit")
sys.exit(0)
ws = wb.active
index_row=2
print ("clear All data in excel")
tmp_row=index_row
while True:
BallName=ws[GetColumnLetter(ws,1,'BallName')+str(tmp_row)].value
if BallName==None:
break;
for row in ws['C'+str(tmp_row)+':G'+str(tmp_row)]:
for cell in row:
cell.value = None
tmp_row = tmp_row+1;
while True:
BallName=ws[GetColumnLetter(ws,1,'BallName')+str(index_row)].value
if BallName==None:
break;
GPIOPPin=ws[GetColumnLetter(ws,1,'GPIO')+str(index_row)].value
if GPIOPPin!=None:
ItemList.append(Items(BallName,GPIOPPin))
index_row = index_row+1;
wb.save(filename)
def StructToExcel(filename):
try:
wb = load_workbook(filename)
except IOError:
print ("Can't open file exit")
sys.exit(0)
ws = wb.active
index_row=2
while True:
BallName=ws[GetColumnLetter(ws,1,'BallName')+str(index_row)].value
if BallName==None:
break;
for item in ItemList:
if item.BallName!=None and item.NetName !=None and BallName.strip() == item.BallName.strip():
ws[GetColumnLetter(ws,1,'NetName')+str(index_row)] = item.NetName
index_row = index_row+1;
wb.save(filename)
def FindBallNameAppend(BallName,Position):
for item in ItemList:
if BallName.strip() == item.BallName.strip():
item.set_Position(Position)
def FindPositionAppend(Position,SIG_NAME):
if SIG_NAME.find("\g")!=-1:
return
for item in ItemList:
if xstr(Position).strip() == xstr(item.Position).strip():
item.set_NetName(SIG_NAME)
def xstr(s):
if s is None:
return ''
return str(s)
def CheckEmptyNetName():
for item in ItemList:
if item.NetName is None:
item.set_NetName("NOT_CONNECT_"+item.GPIO[4:])
def PrintItemList():
for item in ItemList:
print (xstr(item.BallName)+" "+xstr(item.GPIO)+" "+xstr(item.Position)+" "+xstr(item.NetName))
|
self.value = value
self.fall = False
|
random_line_split
|
Util.py
|
# encoding: utf-8
import sys
import os
import signal
from openpyxl.utils import get_column_letter
from openpyxl import Workbook,load_workbook
ItemList=[]
## {{{ http://code.activestate.com/recipes/410692/ (r8)
# This class provides the functionality we want. You only need to look at
# this if you want to know how this works. It only needs to be defined
# once, no need to muck around with its internals.
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
|
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
class Items:
def __init__(self, BallName,GPIO):
self.BallName = BallName
self.GPIO = GPIO
self.NetName = None
self.Direction = None
self.Data = None
self.Position = None
def set_NetName(self, NetName):
self.NetName=NetName
def set_Direction(self, Direction):
self.Direction=Direction
def set_Data(self, Data):
self.Data=Data
def set_Position(self, Position):
self.Position=Position
def GetCellValue(ws,index_row,column_letter):
return ws[column_letter+str(index_row)].value
def GetColumnNameAndChangeValue(ws,string_row,changelist,index_row):
string=[]
for i in range(0,len(changelist)):
column_letter = changelist[i]
string.append(str(GetCellValue(ws,string_row,column_letter))+' to ['+str(GetCellValue(ws,index_row,column_letter))+'] ')
return "".join(string)
def GetNumAndName(ws,index_row):
return '['+GetCellValue(ws,index_row,'D')+'] '+GetCellValue(ws,index_row,'C')+' : '
def GetColumnLetter(ws,string_row,string_value):
for column in range(1,40):
column_letter = get_column_letter(column)
if ws[column_letter+str(string_row)].value==string_value:
return column_letter
return None
def Get_Bit(byteval,idx):
return ((byteval&(1<<idx))!=0);
def AppendBit(data_L,data_M):
output_str=""
if data_L != 0:
for i in range(0, 8):
if Get_Bit(int(data_L,16),i) == True:
output_str=output_str+str(i)+"/"
if data_M != 0:
for i in range(0, 8):
if Get_Bit(int(data_M,16),i) == True:
output_str=output_str+str(i+8)+"/"
if data_L != 0 or data_M != 0:
output_str=output_str+"\n"
return output_str
def StringToSignint(string,len):
x = int(string,16)
if x > ((1<<(8*len))/2)-1:
x -= 1<<(8*len)
return x
def ExcelToStruct(filename):
try:
wb = load_workbook(filename)
except IOError:
print ("Can't open file exit")
sys.exit(0)
ws = wb.active
index_row=2
print ("clear All data in excel")
tmp_row=index_row
while True:
BallName=ws[GetColumnLetter(ws,1,'BallName')+str(tmp_row)].value
if BallName==None:
break;
for row in ws['C'+str(tmp_row)+':G'+str(tmp_row)]:
for cell in row:
cell.value = None
tmp_row = tmp_row+1;
while True:
BallName=ws[GetColumnLetter(ws,1,'BallName')+str(index_row)].value
if BallName==None:
break;
GPIOPPin=ws[GetColumnLetter(ws,1,'GPIO')+str(index_row)].value
if GPIOPPin!=None:
ItemList.append(Items(BallName,GPIOPPin))
index_row = index_row+1;
wb.save(filename)
def StructToExcel(filename):
try:
wb = load_workbook(filename)
except IOError:
print ("Can't open file exit")
sys.exit(0)
ws = wb.active
index_row=2
while True:
BallName=ws[GetColumnLetter(ws,1,'BallName')+str(index_row)].value
if BallName==None:
break;
for item in ItemList:
if item.BallName!=None and item.NetName !=None and BallName.strip() == item.BallName.strip():
ws[GetColumnLetter(ws,1,'NetName')+str(index_row)] = item.NetName
index_row = index_row+1;
wb.save(filename)
def FindBallNameAppend(BallName,Position):
for item in ItemList:
if BallName.strip() == item.BallName.strip():
item.set_Position(Position)
def FindPositionAppend(Position,SIG_NAME):
if SIG_NAME.find("\g")!=-1:
return
for item in ItemList:
if xstr(Position).strip() == xstr(item.Position).strip():
item.set_NetName(SIG_NAME)
def xstr(s):
if s is None:
return ''
return str(s)
def CheckEmptyNetName():
for item in ItemList:
if item.NetName is None:
item.set_NetName("NOT_CONNECT_"+item.GPIO[4:])
def PrintItemList():
for item in ItemList:
print (xstr(item.BallName)+" "+xstr(item.GPIO)+" "+xstr(item.Position)+" "+xstr(item.NetName))
|
return True
|
conditional_block
|
Util.py
|
# encoding: utf-8
import sys
import os
import signal
from openpyxl.utils import get_column_letter
from openpyxl import Workbook,load_workbook
ItemList=[]
## {{{ http://code.activestate.com/recipes/410692/ (r8)
# This class provides the functionality we want. You only need to look at
# this if you want to know how this works. It only needs to be defined
# once, no need to muck around with its internals.
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
|
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
class Items:
def __init__(self, BallName,GPIO):
self.BallName = BallName
self.GPIO = GPIO
self.NetName = None
self.Direction = None
self.Data = None
self.Position = None
def set_NetName(self, NetName):
self.NetName=NetName
def set_Direction(self, Direction):
self.Direction=Direction
def set_Data(self, Data):
self.Data=Data
def set_Position(self, Position):
self.Position=Position
def GetCellValue(ws,index_row,column_letter):
return ws[column_letter+str(index_row)].value
def GetColumnNameAndChangeValue(ws,string_row,changelist,index_row):
string=[]
for i in range(0,len(changelist)):
column_letter = changelist[i]
string.append(str(GetCellValue(ws,string_row,column_letter))+' to ['+str(GetCellValue(ws,index_row,column_letter))+'] ')
return "".join(string)
def GetNumAndName(ws,index_row):
return '['+GetCellValue(ws,index_row,'D')+'] '+GetCellValue(ws,index_row,'C')+' : '
def GetColumnLetter(ws,string_row,string_value):
for column in range(1,40):
column_letter = get_column_letter(column)
if ws[column_letter+str(string_row)].value==string_value:
return column_letter
return None
def Get_Bit(byteval,idx):
return ((byteval&(1<<idx))!=0);
def AppendBit(data_L,data_M):
output_str=""
if data_L != 0:
for i in range(0, 8):
if Get_Bit(int(data_L,16),i) == True:
output_str=output_str+str(i)+"/"
if data_M != 0:
for i in range(0, 8):
if Get_Bit(int(data_M,16),i) == True:
output_str=output_str+str(i+8)+"/"
if data_L != 0 or data_M != 0:
output_str=output_str+"\n"
return output_str
def StringToSignint(string,len):
x = int(string,16)
if x > ((1<<(8*len))/2)-1:
x -= 1<<(8*len)
return x
def ExcelToStruct(filename):
try:
wb = load_workbook(filename)
except IOError:
print ("Can't open file exit")
sys.exit(0)
ws = wb.active
index_row=2
print ("clear All data in excel")
tmp_row=index_row
while True:
BallName=ws[GetColumnLetter(ws,1,'BallName')+str(tmp_row)].value
if BallName==None:
break;
for row in ws['C'+str(tmp_row)+':G'+str(tmp_row)]:
for cell in row:
cell.value = None
tmp_row = tmp_row+1;
while True:
BallName=ws[GetColumnLetter(ws,1,'BallName')+str(index_row)].value
if BallName==None:
break;
GPIOPPin=ws[GetColumnLetter(ws,1,'GPIO')+str(index_row)].value
if GPIOPPin!=None:
ItemList.append(Items(BallName,GPIOPPin))
index_row = index_row+1;
wb.save(filename)
def StructToExcel(filename):
try:
wb = load_workbook(filename)
except IOError:
print ("Can't open file exit")
sys.exit(0)
ws = wb.active
index_row=2
while True:
BallName=ws[GetColumnLetter(ws,1,'BallName')+str(index_row)].value
if BallName==None:
break;
for item in ItemList:
if item.BallName!=None and item.NetName !=None and BallName.strip() == item.BallName.strip():
ws[GetColumnLetter(ws,1,'NetName')+str(index_row)] = item.NetName
index_row = index_row+1;
wb.save(filename)
def FindBallNameAppend(BallName,Position):
for item in ItemList:
if BallName.strip() == item.BallName.strip():
item.set_Position(Position)
def FindPositionAppend(Position,SIG_NAME):
if SIG_NAME.find("\g")!=-1:
return
for item in ItemList:
if xstr(Position).strip() == xstr(item.Position).strip():
item.set_NetName(SIG_NAME)
def xstr(s):
if s is None:
return ''
return str(s)
def CheckEmptyNetName():
for item in ItemList:
if item.NetName is None:
item.set_NetName("NOT_CONNECT_"+item.GPIO[4:])
def PrintItemList():
for item in ItemList:
print (xstr(item.BallName)+" "+xstr(item.GPIO)+" "+xstr(item.Position)+" "+xstr(item.NetName))
|
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
|
identifier_body
|
Util.py
|
# encoding: utf-8
import sys
import os
import signal
from openpyxl.utils import get_column_letter
from openpyxl import Workbook,load_workbook
ItemList=[]
## {{{ http://code.activestate.com/recipes/410692/ (r8)
# This class provides the functionality we want. You only need to look at
# this if you want to know how this works. It only needs to be defined
# once, no need to muck around with its internals.
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
class Items:
def __init__(self, BallName,GPIO):
self.BallName = BallName
self.GPIO = GPIO
self.NetName = None
self.Direction = None
self.Data = None
self.Position = None
def set_NetName(self, NetName):
self.NetName=NetName
def set_Direction(self, Direction):
self.Direction=Direction
def set_Data(self, Data):
self.Data=Data
def set_Position(self, Position):
self.Position=Position
def GetCellValue(ws,index_row,column_letter):
return ws[column_letter+str(index_row)].value
def GetColumnNameAndChangeValue(ws,string_row,changelist,index_row):
string=[]
for i in range(0,len(changelist)):
column_letter = changelist[i]
string.append(str(GetCellValue(ws,string_row,column_letter))+' to ['+str(GetCellValue(ws,index_row,column_letter))+'] ')
return "".join(string)
def GetNumAndName(ws,index_row):
return '['+GetCellValue(ws,index_row,'D')+'] '+GetCellValue(ws,index_row,'C')+' : '
def GetColumnLetter(ws,string_row,string_value):
for column in range(1,40):
column_letter = get_column_letter(column)
if ws[column_letter+str(string_row)].value==string_value:
return column_letter
return None
def Get_Bit(byteval,idx):
return ((byteval&(1<<idx))!=0);
def AppendBit(data_L,data_M):
output_str=""
if data_L != 0:
for i in range(0, 8):
if Get_Bit(int(data_L,16),i) == True:
output_str=output_str+str(i)+"/"
if data_M != 0:
for i in range(0, 8):
if Get_Bit(int(data_M,16),i) == True:
output_str=output_str+str(i+8)+"/"
if data_L != 0 or data_M != 0:
output_str=output_str+"\n"
return output_str
def StringToSignint(string,len):
x = int(string,16)
if x > ((1<<(8*len))/2)-1:
x -= 1<<(8*len)
return x
def ExcelToStruct(filename):
try:
wb = load_workbook(filename)
except IOError:
print ("Can't open file exit")
sys.exit(0)
ws = wb.active
index_row=2
print ("clear All data in excel")
tmp_row=index_row
while True:
BallName=ws[GetColumnLetter(ws,1,'BallName')+str(tmp_row)].value
if BallName==None:
break;
for row in ws['C'+str(tmp_row)+':G'+str(tmp_row)]:
for cell in row:
cell.value = None
tmp_row = tmp_row+1;
while True:
BallName=ws[GetColumnLetter(ws,1,'BallName')+str(index_row)].value
if BallName==None:
break;
GPIOPPin=ws[GetColumnLetter(ws,1,'GPIO')+str(index_row)].value
if GPIOPPin!=None:
ItemList.append(Items(BallName,GPIOPPin))
index_row = index_row+1;
wb.save(filename)
def StructToExcel(filename):
try:
wb = load_workbook(filename)
except IOError:
print ("Can't open file exit")
sys.exit(0)
ws = wb.active
index_row=2
while True:
BallName=ws[GetColumnLetter(ws,1,'BallName')+str(index_row)].value
if BallName==None:
break;
for item in ItemList:
if item.BallName!=None and item.NetName !=None and BallName.strip() == item.BallName.strip():
ws[GetColumnLetter(ws,1,'NetName')+str(index_row)] = item.NetName
index_row = index_row+1;
wb.save(filename)
def FindBallNameAppend(BallName,Position):
for item in ItemList:
if BallName.strip() == item.BallName.strip():
item.set_Position(Position)
def FindPositionAppend(Position,SIG_NAME):
if SIG_NAME.find("\g")!=-1:
return
for item in ItemList:
if xstr(Position).strip() == xstr(item.Position).strip():
item.set_NetName(SIG_NAME)
def xstr(s):
if s is None:
return ''
return str(s)
def
|
():
for item in ItemList:
if item.NetName is None:
item.set_NetName("NOT_CONNECT_"+item.GPIO[4:])
def PrintItemList():
for item in ItemList:
print (xstr(item.BallName)+" "+xstr(item.GPIO)+" "+xstr(item.Position)+" "+xstr(item.NetName))
|
CheckEmptyNetName
|
identifier_name
|
CargadePanos.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Carga de paños.py
#
# Copyright 2013 Akoharowen <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
#
"""
¡¡¡¡MODULO PARA CARGAR POR CONSOLA!!!
MODULO de CARGA DE DATOS DE PANELES (EJEMPLO)
El objetivo de este módulo es cargar en una matriz los datos básicos de
los paneles y luego guardarlos en un archivo:
------------------------------
"""
def CargadePanos():
#Carga de Datos de cabecera de Red
nombre=
|
""
autor=""
descred=""
while nombre=="":
nombre=raw_input("Nombre de la Red: ") #Alfa
while autor=="":
autor=raw_input("Autor: ") #Alfa
while descred=="":
descred=raw_input("Descripcion de la Red: ") #Alfa
archivo=open(nombre+".red","w")
datosred=str([nombre,autor,descred])
#print datosred
archivo.write(datosred)
archivo.write('\n')
seguir=1
#Carga de paños de la Red
Red=""
PreRed=""
iteraciones=0
Panel=1
while seguir==1:
iteraciones=iteraciones+1
print "------------"
print "Panel:"+str(Panel) #n+1 ó b+letra
print "------------"
while 1: #Comprobacion de Enteros
try:
NPaneles=input("Cantidad de Paneles: ") #Entero positivo
break
except:
print "Valor inesperado"
Descripcion=raw_input("Descripción: ") #Alfanumérico
Material=raw_input("Material: ") #Alfanumérico
while 1: #Comprobacion de Enteros
try:
Runnage=input("Runnage: ") #Flotante Positivo (m/kg)
break
except:
print "Valor Inesperado"
while 1:
try:
mL=input("mL: ") #Entero Positivo
break
except:
print "Valor Inesperado"
while 1:
try:
dHilo=input("dHilo: ") #mm
break
except:
print "Valor Inesperado"
while 1:
try:
T1=input("T1: ") #Entero Positivo
break
except:
print "Valor Inesperado"
while 1:
try:
T2=input("T2: ") #Entero Positivo
break
except:
print "Valor Inesperado"
while 1:
try:
N2=input("N2: ") #Entero Positivo
break
except:
print "Valor Inesperado"
panel=[Panel, "Descripcion",mat,NPaneles,Runnage,mL,Dhilo,T1,T2,N2]
seguir=raw_input("¿Cargar otro panel?")
if seguir=="0" or seguir=="n":
PreRed=PreRed+","+str(panel)
Red="["+str(PreRed)+"]"
print datosred
print Red
print "-"*len("Se guardó "+ nombre+".red "+ "en forma Exitosa")
print "Se guardó "+ nombre+".red "+ "en forma Exitosa"
print "-"*len("Se guardó "+ nombre+".red "+ "en forma Exitosa")
archivo.write(Red)
archivo.close()
else:
if iteraciones==1:
PreRed=str(panel)
else:
PreRed=PreRed+","+str(panel)
seguir=1
Panel=Panel+1
|
identifier_body
|
|
CargadePanos.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Carga de paños.py
#
# Copyright 2013 Akoharowen <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
#
"""
¡¡¡¡MODULO PARA CARGAR POR CONSOLA!!!
MODULO de CARGA DE DATOS DE PANELES (EJEMPLO)
El objetivo de este módulo es cargar en una matriz los datos básicos de
los paneles y luego guardarlos en un archivo:
------------------------------
"""
def Cargade
|
arga de Datos de cabecera de Red
nombre=""
autor=""
descred=""
while nombre=="":
nombre=raw_input("Nombre de la Red: ") #Alfa
while autor=="":
autor=raw_input("Autor: ") #Alfa
while descred=="":
descred=raw_input("Descripcion de la Red: ") #Alfa
archivo=open(nombre+".red","w")
datosred=str([nombre,autor,descred])
#print datosred
archivo.write(datosred)
archivo.write('\n')
seguir=1
#Carga de paños de la Red
Red=""
PreRed=""
iteraciones=0
Panel=1
while seguir==1:
iteraciones=iteraciones+1
print "------------"
print "Panel:"+str(Panel) #n+1 ó b+letra
print "------------"
while 1: #Comprobacion de Enteros
try:
NPaneles=input("Cantidad de Paneles: ") #Entero positivo
break
except:
print "Valor inesperado"
Descripcion=raw_input("Descripción: ") #Alfanumérico
Material=raw_input("Material: ") #Alfanumérico
while 1: #Comprobacion de Enteros
try:
Runnage=input("Runnage: ") #Flotante Positivo (m/kg)
break
except:
print "Valor Inesperado"
while 1:
try:
mL=input("mL: ") #Entero Positivo
break
except:
print "Valor Inesperado"
while 1:
try:
dHilo=input("dHilo: ") #mm
break
except:
print "Valor Inesperado"
while 1:
try:
T1=input("T1: ") #Entero Positivo
break
except:
print "Valor Inesperado"
while 1:
try:
T2=input("T2: ") #Entero Positivo
break
except:
print "Valor Inesperado"
while 1:
try:
N2=input("N2: ") #Entero Positivo
break
except:
print "Valor Inesperado"
panel=[Panel, "Descripcion",mat,NPaneles,Runnage,mL,Dhilo,T1,T2,N2]
seguir=raw_input("¿Cargar otro panel?")
if seguir=="0" or seguir=="n":
PreRed=PreRed+","+str(panel)
Red="["+str(PreRed)+"]"
print datosred
print Red
print "-"*len("Se guardó "+ nombre+".red "+ "en forma Exitosa")
print "Se guardó "+ nombre+".red "+ "en forma Exitosa"
print "-"*len("Se guardó "+ nombre+".red "+ "en forma Exitosa")
archivo.write(Red)
archivo.close()
else:
if iteraciones==1:
PreRed=str(panel)
else:
PreRed=PreRed+","+str(panel)
seguir=1
Panel=Panel+1
|
Panos():
#C
|
identifier_name
|
CargadePanos.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Carga de paños.py
#
# Copyright 2013 Akoharowen <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
#
"""
¡¡¡¡MODULO PARA CARGAR POR CONSOLA!!!
MODULO de CARGA DE DATOS DE PANELES (EJEMPLO)
El objetivo de este módulo es cargar en una matriz los datos básicos de
los paneles y luego guardarlos en un archivo:
------------------------------
"""
def CargadePanos():
#Carga de Datos de cabecera de Red
nombre=""
autor=""
descred=""
while nombre=="":
nombre=raw_input("Nombre de la Red: ") #Alfa
while autor=="":
autor=raw_input("Autor: ") #Alfa
while descred=="":
descred
|
vo=open(nombre+".red","w")
datosred=str([nombre,autor,descred])
#print datosred
archivo.write(datosred)
archivo.write('\n')
seguir=1
#Carga de paños de la Red
Red=""
PreRed=""
iteraciones=0
Panel=1
while seguir==1:
iteraciones=iteraciones+1
print "------------"
print "Panel:"+str(Panel) #n+1 ó b+letra
print "------------"
while 1: #Comprobacion de Enteros
try:
NPaneles=input("Cantidad de Paneles: ") #Entero positivo
break
except:
print "Valor inesperado"
Descripcion=raw_input("Descripción: ") #Alfanumérico
Material=raw_input("Material: ") #Alfanumérico
while 1: #Comprobacion de Enteros
try:
Runnage=input("Runnage: ") #Flotante Positivo (m/kg)
break
except:
print "Valor Inesperado"
while 1:
try:
mL=input("mL: ") #Entero Positivo
break
except:
print "Valor Inesperado"
while 1:
try:
dHilo=input("dHilo: ") #mm
break
except:
print "Valor Inesperado"
while 1:
try:
T1=input("T1: ") #Entero Positivo
break
except:
print "Valor Inesperado"
while 1:
try:
T2=input("T2: ") #Entero Positivo
break
except:
print "Valor Inesperado"
while 1:
try:
N2=input("N2: ") #Entero Positivo
break
except:
print "Valor Inesperado"
panel=[Panel, "Descripcion",mat,NPaneles,Runnage,mL,Dhilo,T1,T2,N2]
seguir=raw_input("¿Cargar otro panel?")
if seguir=="0" or seguir=="n":
PreRed=PreRed+","+str(panel)
Red="["+str(PreRed)+"]"
print datosred
print Red
print "-"*len("Se guardó "+ nombre+".red "+ "en forma Exitosa")
print "Se guardó "+ nombre+".red "+ "en forma Exitosa"
print "-"*len("Se guardó "+ nombre+".red "+ "en forma Exitosa")
archivo.write(Red)
archivo.close()
else:
if iteraciones==1:
PreRed=str(panel)
else:
PreRed=PreRed+","+str(panel)
seguir=1
Panel=Panel+1
|
=raw_input("Descripcion de la Red: ") #Alfa
archi
|
conditional_block
|
CargadePanos.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Carga de paños.py
#
# Copyright 2013 Akoharowen <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
#
"""
¡¡¡¡MODULO PARA CARGAR POR CONSOLA!!!
MODULO de CARGA DE DATOS DE PANELES (EJEMPLO)
El objetivo de este módulo es cargar en una matriz los datos básicos de
los paneles y luego guardarlos en un archivo:
------------------------------
"""
def CargadePanos():
#Carga de Datos de cabecera de Red
nombre=""
autor=""
descred=""
while nombre=="":
nombre=raw_input("Nombre de la Red: ") #Alfa
while autor=="":
autor=raw_input("Autor: ") #Alfa
while descred=="":
descred=raw_input("Descripcion de la Red: ") #Alfa
archivo=open(nombre+".red","w")
datosred=str([nombre,autor,descred])
#print datosred
archivo.write(datosred)
archivo.write('\n')
seguir=1
#Carga de paños de la Red
Red=""
PreRed=""
iteraciones=0
Panel=1
while seguir==1:
iteraciones=iteraciones+1
print "------------"
print "Panel:"+str(Panel) #n+1 ó b+letra
print "------------"
while 1: #Comprobacion de Enteros
try:
NPaneles=input("Cantidad de Paneles: ") #Entero positivo
break
except:
print "Valor inesperado"
Descripcion=raw_input("Descripción: ") #Alfanumérico
Material=raw_input("Material: ") #Alfanumérico
while 1: #Comprobacion de Enteros
try:
Runnage=input("Runnage: ") #Flotante Positivo (m/kg)
break
except:
print "Valor Inesperado"
while 1:
try:
mL=input("mL: ") #Entero Positivo
break
except:
print "Valor Inesperado"
while 1:
try:
dHilo=input("dHilo: ") #mm
break
except:
print "Valor Inesperado"
while 1:
try:
T1=input("T1: ") #Entero Positivo
break
except:
print "Valor Inesperado"
while 1:
try:
T2=input("T2: ") #Entero Positivo
break
except:
print "Valor Inesperado"
while 1:
try:
N2=input("N2: ") #Entero Positivo
|
except:
print "Valor Inesperado"
panel=[Panel, "Descripcion",mat,NPaneles,Runnage,mL,Dhilo,T1,T2,N2]
seguir=raw_input("¿Cargar otro panel?")
if seguir=="0" or seguir=="n":
PreRed=PreRed+","+str(panel)
Red="["+str(PreRed)+"]"
print datosred
print Red
print "-"*len("Se guardó "+ nombre+".red "+ "en forma Exitosa")
print "Se guardó "+ nombre+".red "+ "en forma Exitosa"
print "-"*len("Se guardó "+ nombre+".red "+ "en forma Exitosa")
archivo.write(Red)
archivo.close()
else:
if iteraciones==1:
PreRed=str(panel)
else:
PreRed=PreRed+","+str(panel)
seguir=1
Panel=Panel+1
|
break
|
random_line_split
|
events.ts
|
import { DadosVeiculo } from '../../../../browser/src/app/services/veiculo.service';
import { DadosLogin, DadosRegistro, RegistroResultado } from '../../../../browser/src/interfaces/login.interface';
import { BrazucasServer } from '../../../../common/brazucas-server';
import { Jogador } from '../../../../common/database/models/Jogador';
import { environment } from '../../../../common/environment';
import { BrazucasEventos } from '../../interfaces/brazucas-eventos';
import { VoiceChatProvider } from '../../providers/voice-chat.provider';
import { Rpg } from '../../rpg';
import { playerEvent } from '../functions/player';
export class Events {
protected brazucasServer: BrazucasServer;
constructor(brazucasServer: BrazucasServer) {
this.brazucasServer = brazucasServer;
}
public async [BrazucasEventos.AUTENTICAR_JOGADOR](player: PlayerMp, dados: DadosLogin) {
try {
const jogador: Jogador = await this.brazucasServer.autenticarJogador(player.name, dados.senha);
if (jogador) {
player.spawn(environment.posicaoLogin);
await Rpg.playerProvider.update(player, jogador.toJSON());
return {
eventoResposta: 'AutenticacaoResultado',
credenciaisInvalidas: false,
autenticado: true,
};
} else {
return {
eventoResposta: 'AutenticacaoResultado',
credenciaisInvalidas: true,
autenticado: false,
};
}
} catch (err) {
console.error(err.toString());
return {
eventoResposta: 'AutenticacaoResultado',
credenciaisInvalidas: false,
autenticado: false,
};
}
}
public async [BrazucasEventos.REGISTRAR_JOGADOR](player: PlayerMp, dados: DadosRegistro) {
try {
const jogador: Jogador = await this.brazucasServer.registrarJogador(player, dados);
if (jogador) {
player.spawn(environment.posicaoLogin);
playerEvent<RegistroResultado>(player, BrazucasEventos.REGISTRO_RESULTADO, {
erro: false,
jogador: jogador,
registrado: true,
});
} else {
playerEvent<RegistroResultado>(player, BrazucasEventos.REGISTRO_RESULTADO, {
registrado: false,
erro: true,
});
}
} catch (err) {
console.debug(`[REGISTRO] Um erro ocorreu ao criar o jogador ${player.name}`);
console.error(err.toString());
playerEvent<RegistroResultado>(player, BrazucasEventos.REGISTRO_RESULTADO, {
registrado: false,
erro: true,
mensagem: err.toString() || 'Erro interno ao cadastrar',
});
}
}
public async [BrazucasEventos.CRIAR_VEICULO](player: PlayerMp, dados: DadosVeiculo) {
try {
await this.brazucasServer.criarVeiculo(player, dados);
} catch (err) {
console.debug(`[VEÍCULOS] Um erro ocorreu ao criar o veículo`);
console.error(err.toString());
return false;
}
}
public async [BrazucasEventos.HABILITAR_VOICE_CHAT](player: PlayerMp, dados: any) {
console.log(`[VOICE CHAT] Ativando voice chat para ${player.name} com os dados: ${JSON.stringify(dados)}`);
const target = mp.players.at(dados.targetId);
if (!target) {
return {
erro: true,
mensagem: 'Jogador não encontrado',
};
}
VoiceChatProvider.habilitar(player, target);
}
public async [BrazucasEventos.DESABILITAR_VOICE_CHAT](player: PlayerMp, dados: any) {
console.log(`[VOICE CHAT] Desativando voice chat para ${player.name} com os dados: ${JSON.stringify(dados)}`);
const target = mp.players.at(dados.targetId);
if (!target) {
return {
erro: true,
mensagem: 'Jogador não encontrado',
};
}
VoiceChatProvider.desabilitar(player, target);
}
public async [BrazucasEventos.ANIMACAO_VOICE_CHAT](player: PlayerMp) {
console.log(`[VOICE CHAT] Aplicando animação para ${player.name}`);
player.playAnimation('special_ped@baygor@monologue_3@monologue_3e', 'trees_can_talk_4', 1, 0);
}
public async [Brazu
|
r: PlayerMp, dados: {
pacote: string,
nome: string;
}) {
player.stopAnimation();
player.playAnimation(dados.pacote, dados.nome, 1, 0);
}
}
|
casEventos.VISUALIZAR_ANIMACAO](playe
|
identifier_name
|
events.ts
|
import { DadosVeiculo } from '../../../../browser/src/app/services/veiculo.service';
import { DadosLogin, DadosRegistro, RegistroResultado } from '../../../../browser/src/interfaces/login.interface';
import { BrazucasServer } from '../../../../common/brazucas-server';
import { Jogador } from '../../../../common/database/models/Jogador';
import { environment } from '../../../../common/environment';
import { BrazucasEventos } from '../../interfaces/brazucas-eventos';
import { VoiceChatProvider } from '../../providers/voice-chat.provider';
import { Rpg } from '../../rpg';
import { playerEvent } from '../functions/player';
export class Events {
protected brazucasServer: BrazucasServer;
constructor(brazucasServer: BrazucasServer) {
this.brazucasServer = brazucasServer;
}
public async [BrazucasEventos.AUTENTICAR_JOGADOR](player: PlayerMp, dados: DadosLogin) {
try {
const jogador: Jogador = await this.brazucasServer.autenticarJogador(player.name, dados.senha);
if (jogador) {
player.spawn(environment.posicaoLogin);
await Rpg.playerProvider.update(player, jogador.toJSON());
return {
eventoResposta: 'AutenticacaoResultado',
credenciaisInvalidas: false,
autenticado: true,
};
} else {
return {
eventoResposta: 'AutenticacaoResultado',
credenciaisInvalidas: true,
autenticado: false,
};
}
} catch (err) {
console.error(err.toString());
return {
eventoResposta: 'AutenticacaoResultado',
credenciaisInvalidas: false,
autenticado: false,
};
}
}
public async [BrazucasEventos.REGISTRAR_JOGADOR](player: PlayerMp, dados: DadosRegistro) {
try {
const jogador: Jogador = await this.brazucasServer.registrarJogador(player, dados);
if (jogador) {
player.spawn(environment.posicaoLogin);
playerEvent<RegistroResultado>(player, BrazucasEventos.REGISTRO_RESULTADO, {
erro: false,
jogador: jogador,
registrado: true,
});
} else {
playerEvent<RegistroResultado>(player, BrazucasEventos.REGISTRO_RESULTADO, {
registrado: false,
erro: true,
});
}
} catch (err) {
console.debug(`[REGISTRO] Um erro ocorreu ao criar o jogador ${player.name}`);
console.error(err.toString());
playerEvent<RegistroResultado>(player, BrazucasEventos.REGISTRO_RESULTADO, {
registrado: false,
erro: true,
mensagem: err.toString() || 'Erro interno ao cadastrar',
});
}
}
public async [BrazucasEventos.CRIAR_VEICULO](player: PlayerMp, dados: DadosVeiculo) {
try {
await this.brazucasServer.criarVeiculo(player, dados);
} catch (err) {
console.debug(`[VEÍCULOS] Um erro ocorreu ao criar o veículo`);
console.error(err.toString());
return false;
}
}
public async [BrazucasEventos.HABILITAR_VOICE_CHAT](player: PlayerMp, dados: any) {
console.log(`[VOICE CHAT] Ativando voice chat para ${player.name} com os dados: ${JSON.stringify(dados)}`);
const target = mp.players.at(dados.targetId);
if (!target) {
|
VoiceChatProvider.habilitar(player, target);
}
public async [BrazucasEventos.DESABILITAR_VOICE_CHAT](player: PlayerMp, dados: any) {
console.log(`[VOICE CHAT] Desativando voice chat para ${player.name} com os dados: ${JSON.stringify(dados)}`);
const target = mp.players.at(dados.targetId);
if (!target) {
return {
erro: true,
mensagem: 'Jogador não encontrado',
};
}
VoiceChatProvider.desabilitar(player, target);
}
public async [BrazucasEventos.ANIMACAO_VOICE_CHAT](player: PlayerMp) {
console.log(`[VOICE CHAT] Aplicando animação para ${player.name}`);
player.playAnimation('special_ped@baygor@monologue_3@monologue_3e', 'trees_can_talk_4', 1, 0);
}
public async [BrazucasEventos.VISUALIZAR_ANIMACAO](player: PlayerMp, dados: {
pacote: string,
nome: string;
}) {
player.stopAnimation();
player.playAnimation(dados.pacote, dados.nome, 1, 0);
}
}
|
return {
erro: true,
mensagem: 'Jogador não encontrado',
};
}
|
conditional_block
|
events.ts
|
import { DadosVeiculo } from '../../../../browser/src/app/services/veiculo.service';
import { DadosLogin, DadosRegistro, RegistroResultado } from '../../../../browser/src/interfaces/login.interface';
import { BrazucasServer } from '../../../../common/brazucas-server';
import { Jogador } from '../../../../common/database/models/Jogador';
import { environment } from '../../../../common/environment';
import { BrazucasEventos } from '../../interfaces/brazucas-eventos';
import { VoiceChatProvider } from '../../providers/voice-chat.provider';
import { Rpg } from '../../rpg';
import { playerEvent } from '../functions/player';
export class Events {
|
public async [BrazucasEventos.AUTENTICAR_JOGADOR](player: PlayerMp, dados: DadosLogin) {
try {
const jogador: Jogador = await this.brazucasServer.autenticarJogador(player.name, dados.senha);
if (jogador) {
player.spawn(environment.posicaoLogin);
await Rpg.playerProvider.update(player, jogador.toJSON());
return {
eventoResposta: 'AutenticacaoResultado',
credenciaisInvalidas: false,
autenticado: true,
};
} else {
return {
eventoResposta: 'AutenticacaoResultado',
credenciaisInvalidas: true,
autenticado: false,
};
}
} catch (err) {
console.error(err.toString());
return {
eventoResposta: 'AutenticacaoResultado',
credenciaisInvalidas: false,
autenticado: false,
};
}
}
public async [BrazucasEventos.REGISTRAR_JOGADOR](player: PlayerMp, dados: DadosRegistro) {
try {
const jogador: Jogador = await this.brazucasServer.registrarJogador(player, dados);
if (jogador) {
player.spawn(environment.posicaoLogin);
playerEvent<RegistroResultado>(player, BrazucasEventos.REGISTRO_RESULTADO, {
erro: false,
jogador: jogador,
registrado: true,
});
} else {
playerEvent<RegistroResultado>(player, BrazucasEventos.REGISTRO_RESULTADO, {
registrado: false,
erro: true,
});
}
} catch (err) {
console.debug(`[REGISTRO] Um erro ocorreu ao criar o jogador ${player.name}`);
console.error(err.toString());
playerEvent<RegistroResultado>(player, BrazucasEventos.REGISTRO_RESULTADO, {
registrado: false,
erro: true,
mensagem: err.toString() || 'Erro interno ao cadastrar',
});
}
}
public async [BrazucasEventos.CRIAR_VEICULO](player: PlayerMp, dados: DadosVeiculo) {
try {
await this.brazucasServer.criarVeiculo(player, dados);
} catch (err) {
console.debug(`[VEÍCULOS] Um erro ocorreu ao criar o veículo`);
console.error(err.toString());
return false;
}
}
public async [BrazucasEventos.HABILITAR_VOICE_CHAT](player: PlayerMp, dados: any) {
console.log(`[VOICE CHAT] Ativando voice chat para ${player.name} com os dados: ${JSON.stringify(dados)}`);
const target = mp.players.at(dados.targetId);
if (!target) {
return {
erro: true,
mensagem: 'Jogador não encontrado',
};
}
VoiceChatProvider.habilitar(player, target);
}
public async [BrazucasEventos.DESABILITAR_VOICE_CHAT](player: PlayerMp, dados: any) {
console.log(`[VOICE CHAT] Desativando voice chat para ${player.name} com os dados: ${JSON.stringify(dados)}`);
const target = mp.players.at(dados.targetId);
if (!target) {
return {
erro: true,
mensagem: 'Jogador não encontrado',
};
}
VoiceChatProvider.desabilitar(player, target);
}
public async [BrazucasEventos.ANIMACAO_VOICE_CHAT](player: PlayerMp) {
console.log(`[VOICE CHAT] Aplicando animação para ${player.name}`);
player.playAnimation('special_ped@baygor@monologue_3@monologue_3e', 'trees_can_talk_4', 1, 0);
}
public async [BrazucasEventos.VISUALIZAR_ANIMACAO](player: PlayerMp, dados: {
pacote: string,
nome: string;
}) {
player.stopAnimation();
player.playAnimation(dados.pacote, dados.nome, 1, 0);
}
}
|
protected brazucasServer: BrazucasServer;
constructor(brazucasServer: BrazucasServer) {
this.brazucasServer = brazucasServer;
}
|
random_line_split
|
events.ts
|
import { DadosVeiculo } from '../../../../browser/src/app/services/veiculo.service';
import { DadosLogin, DadosRegistro, RegistroResultado } from '../../../../browser/src/interfaces/login.interface';
import { BrazucasServer } from '../../../../common/brazucas-server';
import { Jogador } from '../../../../common/database/models/Jogador';
import { environment } from '../../../../common/environment';
import { BrazucasEventos } from '../../interfaces/brazucas-eventos';
import { VoiceChatProvider } from '../../providers/voice-chat.provider';
import { Rpg } from '../../rpg';
import { playerEvent } from '../functions/player';
export class Events {
protected brazucasServer: BrazucasServer;
constructor(brazucasServer: BrazucasServer) {
this.brazucasServer = brazucasServer;
}
public async [BrazucasEventos.AUTENTICAR_JOGADOR](player: PlayerMp, dados: DadosLogin) {
try {
const jogador: Jogador = await this.brazucasServer.autenticarJogador(player.name, dados.senha);
if (jogador) {
player.spawn(environment.posicaoLogin);
await Rpg.playerProvider.update(player, jogador.toJSON());
return {
eventoResposta: 'AutenticacaoResultado',
credenciaisInvalidas: false,
autenticado: true,
};
} else {
return {
eventoResposta: 'AutenticacaoResultado',
credenciaisInvalidas: true,
autenticado: false,
};
}
} catch (err) {
console.error(err.toString());
return {
eventoResposta: 'AutenticacaoResultado',
credenciaisInvalidas: false,
autenticado: false,
};
}
}
public async [BrazucasEventos.REGISTRAR_JOGADOR](player: PlayerMp, dados: DadosRegistro) {
try {
const jogador: Jogador = await this.brazucasServer.registrarJogador(player, dados);
if (jogador) {
player.spawn(environment.posicaoLogin);
playerEvent<RegistroResultado>(player, BrazucasEventos.REGISTRO_RESULTADO, {
erro: false,
jogador: jogador,
registrado: true,
});
} else {
playerEvent<RegistroResultado>(player, BrazucasEventos.REGISTRO_RESULTADO, {
registrado: false,
erro: true,
});
}
} catch (err) {
console.debug(`[REGISTRO] Um erro ocorreu ao criar o jogador ${player.name}`);
console.error(err.toString());
playerEvent<RegistroResultado>(player, BrazucasEventos.REGISTRO_RESULTADO, {
registrado: false,
erro: true,
mensagem: err.toString() || 'Erro interno ao cadastrar',
});
}
}
public async [BrazucasEventos.CRIAR_VEICULO](player: PlayerMp, dados: DadosVeiculo) {
try {
await this.brazucasServer.criarVeiculo(player, dados);
} catch (err) {
console.debug(`[VEÍCULOS] Um erro ocorreu ao criar o veículo`);
console.error(err.toString());
return false;
}
}
public async [BrazucasEventos.HABILITAR_VOICE_CHAT](player: PlayerMp, dados: any) {
console.log(`[VOICE CHAT] Ativando voice chat para ${player.name} com os dados: ${JSON.stringify(dados)}`);
const target = mp.players.at(dados.targetId);
if (!target) {
return {
erro: true,
mensagem: 'Jogador não encontrado',
};
}
VoiceChatProvider.habilitar(player, target);
}
public async [BrazucasEventos.DESABILITAR_VOICE_CHAT](player: PlayerMp, dados: any) {
console.log(`[VOICE CHAT] Desativando voice chat para ${player.name} com os dados: ${JSON.stringify(dados)}`);
const target = mp.players.at(dados.targetId);
if (!target) {
return {
erro: true,
mensagem: 'Jogador não encontrado',
};
}
VoiceChatProvider.desabilitar(player, target);
}
public async [BrazucasEventos.ANIMACAO_VOICE_CHAT](player: PlayerMp) {
|
blic async [BrazucasEventos.VISUALIZAR_ANIMACAO](player: PlayerMp, dados: {
pacote: string,
nome: string;
}) {
player.stopAnimation();
player.playAnimation(dados.pacote, dados.nome, 1, 0);
}
}
|
console.log(`[VOICE CHAT] Aplicando animação para ${player.name}`);
player.playAnimation('special_ped@baygor@monologue_3@monologue_3e', 'trees_can_talk_4', 1, 0);
}
pu
|
identifier_body
|
struct_lits_visual.rs
|
// rustfmt-normalize_comments: true
// rustfmt-wrap_comments: true
// rustfmt-struct_lit_style: Visual
// rustfmt-error_on_line_overflow: false
// Struct literal expressions.
fn
|
() {
let x = Bar;
// Comment
let y = Foo { a: x };
Foo { a: foo(), // comment
// comment
b: bar(),
..something };
Fooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo { a: f(), b: b() };
Foooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo { // Comment
a: foo(), /* Comment */
// Comment
b: bar(), /* Comment */ };
Foo { a: Bar, b: f() };
Quux { x: if cond {
bar();
},
y: baz(), };
Baz { x: yxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
z: zzzzz, /* test */ };
A { // Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec a diam lectus. Sed sit
// amet ipsum mauris. Maecenas congue ligula ac quam viverra nec consectetur ante
// hendrerit. Donec et mollis dolor.
first: item(),
// Praesent et diam eget libero egestas mattis sit amet vitae augue.
// Nam tincidunt congue enim, ut porta lorem lacinia consectetur.
second: Item, };
Diagram { // o This graph demonstrates how
// / \ significant whitespace is
// o o preserved.
// /|\ \
// o o o o
graph: G, }
}
|
main
|
identifier_name
|
struct_lits_visual.rs
|
// rustfmt-normalize_comments: true
// rustfmt-wrap_comments: true
// rustfmt-struct_lit_style: Visual
// rustfmt-error_on_line_overflow: false
// Struct literal expressions.
fn main() {
let x = Bar;
// Comment
let y = Foo { a: x };
Foo { a: foo(), // comment
// comment
b: bar(),
..something };
Fooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo { a: f(), b: b() };
Foooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo { // Comment
a: foo(), /* Comment */
// Comment
b: bar(), /* Comment */ };
Foo { a: Bar, b: f() };
Quux { x: if cond
|
,
y: baz(), };
Baz { x: yxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
z: zzzzz, /* test */ };
A { // Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec a diam lectus. Sed sit
// amet ipsum mauris. Maecenas congue ligula ac quam viverra nec consectetur ante
// hendrerit. Donec et mollis dolor.
first: item(),
// Praesent et diam eget libero egestas mattis sit amet vitae augue.
// Nam tincidunt congue enim, ut porta lorem lacinia consectetur.
second: Item, };
Diagram { // o This graph demonstrates how
// / \ significant whitespace is
// o o preserved.
// /|\ \
// o o o o
graph: G, }
}
|
{
bar();
}
|
conditional_block
|
struct_lits_visual.rs
|
// rustfmt-normalize_comments: true
// rustfmt-wrap_comments: true
// rustfmt-struct_lit_style: Visual
// rustfmt-error_on_line_overflow: false
// Struct literal expressions.
fn main() {
let x = Bar;
// Comment
let y = Foo { a: x };
Foo { a: foo(), // comment
// comment
b: bar(),
..something };
Fooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo { a: f(), b: b() };
Foooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo { // Comment
|
// Comment
b: bar(), /* Comment */ };
Foo { a: Bar, b: f() };
Quux { x: if cond {
bar();
},
y: baz(), };
Baz { x: yxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
z: zzzzz, /* test */ };
A { // Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec a diam lectus. Sed sit
// amet ipsum mauris. Maecenas congue ligula ac quam viverra nec consectetur ante
// hendrerit. Donec et mollis dolor.
first: item(),
// Praesent et diam eget libero egestas mattis sit amet vitae augue.
// Nam tincidunt congue enim, ut porta lorem lacinia consectetur.
second: Item, };
Diagram { // o This graph demonstrates how
// / \ significant whitespace is
// o o preserved.
// /|\ \
// o o o o
graph: G, }
}
|
a: foo(), /* Comment */
|
random_line_split
|
struct_lits_visual.rs
|
// rustfmt-normalize_comments: true
// rustfmt-wrap_comments: true
// rustfmt-struct_lit_style: Visual
// rustfmt-error_on_line_overflow: false
// Struct literal expressions.
fn main()
|
{
let x = Bar;
// Comment
let y = Foo { a: x };
Foo { a: foo(), // comment
// comment
b: bar(),
..something };
Fooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo { a: f(), b: b() };
Foooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo { // Comment
a: foo(), /* Comment */
// Comment
b: bar(), /* Comment */ };
Foo { a: Bar, b: f() };
Quux { x: if cond {
bar();
},
y: baz(), };
Baz { x: yxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,
z: zzzzz, /* test */ };
A { // Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec a diam lectus. Sed sit
// amet ipsum mauris. Maecenas congue ligula ac quam viverra nec consectetur ante
// hendrerit. Donec et mollis dolor.
first: item(),
// Praesent et diam eget libero egestas mattis sit amet vitae augue.
// Nam tincidunt congue enim, ut porta lorem lacinia consectetur.
second: Item, };
Diagram { // o This graph demonstrates how
// / \ significant whitespace is
// o o preserved.
// /|\ \
// o o o o
graph: G, }
}
|
identifier_body
|
|
index.js
|
const GraphQL = require('graphql');
const {
GraphQLSchema,
GraphQLObjectType,
GraphQLString,
graphql
} = GraphQL;
const graphqlMiddleware = require('graphql-middleware').default;
const ExampleType = new GraphQLObjectType({
name: 'Example',
fields: {
id: {
type: GraphQLString
},
name: {
type: GraphQLString
}
}
})
const querySchema = graphqlMiddleware({
name: 'RootQueryType',
fields: {
hello: {
beforeResolve (root, args, ctx) {
throw new Error('Test');
},
type: GraphQLString,
resolve (root, args, ctx) {
return 'world';
}
},
haha: {
type: ExampleType,
resolve () {
return {
id: 1
|
}
}
}, [
(root, args, ctx) => {
// throw new Error('Applies to all');
},
(root, args, ctx) => {
throw new Error('Applies to all after first');
},
]);
const schema = new GraphQLSchema({
query: new GraphQLObjectType(querySchema)
});
const query = '{ hello }';
graphql(schema, query).then((res) => {
console.log(res);
}).catch((err) => {
console.error(err.stack);
});
const queryTwo = '{ haha { id } }';
graphql(schema, queryTwo).then((res) => {
console.log(res);
}).catch((err) => {
console.error(err.stack);
});
|
};
}
|
random_line_split
|
index.js
|
const GraphQL = require('graphql');
const {
GraphQLSchema,
GraphQLObjectType,
GraphQLString,
graphql
} = GraphQL;
const graphqlMiddleware = require('graphql-middleware').default;
const ExampleType = new GraphQLObjectType({
name: 'Example',
fields: {
id: {
type: GraphQLString
},
name: {
type: GraphQLString
}
}
})
const querySchema = graphqlMiddleware({
name: 'RootQueryType',
fields: {
hello: {
beforeResolve (root, args, ctx) {
throw new Error('Test');
},
type: GraphQLString,
resolve (root, args, ctx)
|
},
haha: {
type: ExampleType,
resolve () {
return {
id: 1
};
}
}
}
}, [
(root, args, ctx) => {
// throw new Error('Applies to all');
},
(root, args, ctx) => {
throw new Error('Applies to all after first');
},
]);
const schema = new GraphQLSchema({
query: new GraphQLObjectType(querySchema)
});
const query = '{ hello }';
graphql(schema, query).then((res) => {
console.log(res);
}).catch((err) => {
console.error(err.stack);
});
const queryTwo = '{ haha { id } }';
graphql(schema, queryTwo).then((res) => {
console.log(res);
}).catch((err) => {
console.error(err.stack);
});
|
{
return 'world';
}
|
identifier_body
|
index.js
|
const GraphQL = require('graphql');
const {
GraphQLSchema,
GraphQLObjectType,
GraphQLString,
graphql
} = GraphQL;
const graphqlMiddleware = require('graphql-middleware').default;
const ExampleType = new GraphQLObjectType({
name: 'Example',
fields: {
id: {
type: GraphQLString
},
name: {
type: GraphQLString
}
}
})
const querySchema = graphqlMiddleware({
name: 'RootQueryType',
fields: {
hello: {
beforeResolve (root, args, ctx) {
throw new Error('Test');
},
type: GraphQLString,
resolve (root, args, ctx) {
return 'world';
}
},
haha: {
type: ExampleType,
|
() {
return {
id: 1
};
}
}
}
}, [
(root, args, ctx) => {
// throw new Error('Applies to all');
},
(root, args, ctx) => {
throw new Error('Applies to all after first');
},
]);
const schema = new GraphQLSchema({
query: new GraphQLObjectType(querySchema)
});
const query = '{ hello }';
graphql(schema, query).then((res) => {
console.log(res);
}).catch((err) => {
console.error(err.stack);
});
const queryTwo = '{ haha { id } }';
graphql(schema, queryTwo).then((res) => {
console.log(res);
}).catch((err) => {
console.error(err.stack);
});
|
resolve
|
identifier_name
|
serializers.py
|
from django.contrib.auth import get_user_model
from rest_framework.fields import CharField
from rest_framework.serializers import ModelSerializer
from grandchallenge.challenges.models import Challenge
from grandchallenge.components.serializers import (
ComponentInterfaceValueSerializer,
)
from grandchallenge.evaluation.models import (
Evaluation,
Phase,
Submission,
)
class UserSerializer(ModelSerializer):
class Meta:
model = get_user_model()
fields = ("username",)
class ChallengeSerializer(ModelSerializer):
class Meta:
model = Challenge
fields = (
"title",
"short_name",
)
class PhaseSerializer(ModelSerializer):
challenge = ChallengeSerializer()
class Meta:
model = Phase
fields = (
"challenge",
"title",
"slug",
)
class SubmissionSerializer(ModelSerializer):
phase = PhaseSerializer()
creator = UserSerializer()
class Meta:
|
class EvaluationSerializer(ModelSerializer):
submission = SubmissionSerializer()
outputs = ComponentInterfaceValueSerializer(many=True)
status = CharField(source="get_status_display", read_only=True)
title = CharField(read_only=True)
class Meta:
model = Evaluation
fields = (
"pk",
"method",
"submission",
"created",
"published",
"outputs",
"rank",
"rank_score",
"rank_per_metric",
"status",
"title",
)
|
model = Submission
fields = (
"pk",
"phase",
"created",
"creator",
"comment",
"predictions_file",
"supplementary_file",
"supplementary_url",
)
|
identifier_body
|
serializers.py
|
from django.contrib.auth import get_user_model
from rest_framework.fields import CharField
from rest_framework.serializers import ModelSerializer
from grandchallenge.challenges.models import Challenge
from grandchallenge.components.serializers import (
ComponentInterfaceValueSerializer,
)
from grandchallenge.evaluation.models import (
Evaluation,
Phase,
Submission,
)
class UserSerializer(ModelSerializer):
class Meta:
model = get_user_model()
fields = ("username",)
class ChallengeSerializer(ModelSerializer):
class Meta:
model = Challenge
fields = (
"title",
"short_name",
)
class PhaseSerializer(ModelSerializer):
challenge = ChallengeSerializer()
class Meta:
model = Phase
fields = (
"challenge",
"title",
"slug",
)
class SubmissionSerializer(ModelSerializer):
phase = PhaseSerializer()
creator = UserSerializer()
class Meta:
model = Submission
fields = (
"pk",
"phase",
"created",
"creator",
"comment",
"predictions_file",
"supplementary_file",
"supplementary_url",
)
class EvaluationSerializer(ModelSerializer):
submission = SubmissionSerializer()
outputs = ComponentInterfaceValueSerializer(many=True)
status = CharField(source="get_status_display", read_only=True)
|
class Meta:
model = Evaluation
fields = (
"pk",
"method",
"submission",
"created",
"published",
"outputs",
"rank",
"rank_score",
"rank_per_metric",
"status",
"title",
)
|
title = CharField(read_only=True)
|
random_line_split
|
serializers.py
|
from django.contrib.auth import get_user_model
from rest_framework.fields import CharField
from rest_framework.serializers import ModelSerializer
from grandchallenge.challenges.models import Challenge
from grandchallenge.components.serializers import (
ComponentInterfaceValueSerializer,
)
from grandchallenge.evaluation.models import (
Evaluation,
Phase,
Submission,
)
class UserSerializer(ModelSerializer):
class Meta:
model = get_user_model()
fields = ("username",)
class ChallengeSerializer(ModelSerializer):
class Meta:
model = Challenge
fields = (
"title",
"short_name",
)
class PhaseSerializer(ModelSerializer):
challenge = ChallengeSerializer()
class
|
:
model = Phase
fields = (
"challenge",
"title",
"slug",
)
class SubmissionSerializer(ModelSerializer):
phase = PhaseSerializer()
creator = UserSerializer()
class Meta:
model = Submission
fields = (
"pk",
"phase",
"created",
"creator",
"comment",
"predictions_file",
"supplementary_file",
"supplementary_url",
)
class EvaluationSerializer(ModelSerializer):
submission = SubmissionSerializer()
outputs = ComponentInterfaceValueSerializer(many=True)
status = CharField(source="get_status_display", read_only=True)
title = CharField(read_only=True)
class Meta:
model = Evaluation
fields = (
"pk",
"method",
"submission",
"created",
"published",
"outputs",
"rank",
"rank_score",
"rank_per_metric",
"status",
"title",
)
|
Meta
|
identifier_name
|
nl.js
|
export default {
// Generic
"generic.add": "Toevoegen",
"generic.cancel": "Annuleren",
// BlockType
"components.controls.blocktype.h1": "H1",
"components.controls.blocktype.h2": "H2",
"components.controls.blocktype.h3": "H3",
"components.controls.blocktype.h4": "H4",
"components.controls.blocktype.h5": "H5",
"components.controls.blocktype.h6": "H6",
"components.controls.blocktype.blockquote": "Blockquote",
"components.controls.blocktype.code": "Code",
"components.controls.blocktype.blocktype": "Blocktype",
"components.controls.blocktype.normal": "Normaal",
// Color Picker
"components.controls.colorpicker.colorpicker": "Kleurkiezer",
"components.controls.colorpicker.text": "Tekst",
"components.controls.colorpicker.background": "Achtergrond",
// Embedded
"components.controls.embedded.embedded": "Ingevoegd",
"components.controls.embedded.embeddedlink": "Ingevoegde link",
"components.controls.embedded.enterlink": "Voeg link toe",
// Emoji
"components.controls.emoji.emoji": "Emoji",
// FontFamily
"components.controls.fontfamily.fontfamily": "Lettertype",
// FontSize
"components.controls.fontsize.fontsize": "Lettergrootte",
// History
"components.controls.history.history": "Geschiedenis",
"components.controls.history.undo": "Ongedaan maken",
"components.controls.history.redo": "Opnieuw",
// Image
"components.controls.image.image": "Afbeelding",
"components.controls.image.fileUpload": "Bestand uploaden",
"components.controls.image.byURL": "URL",
"components.controls.image.dropFileText":
"Drop het bestand hier of klik om te uploaden",
// Inline
"components.controls.inline.bold": "Dikgedrukt",
"components.controls.inline.italic": "Schuingedrukt",
"components.controls.inline.underline": "Onderstrepen",
"components.controls.inline.strikethrough": "Doorstrepen",
"components.controls.inline.monospace": "Monospace",
"components.controls.inline.superscript": "Superscript",
"components.controls.inline.subscript": "Subscript",
// Link
"components.controls.link.linkTitle": "Linktitel",
"components.controls.link.linkTarget": "Link bestemming",
|
// List
"components.controls.list.list": "Lijst",
"components.controls.list.unordered": "Ongeordend",
"components.controls.list.ordered": "Geordend",
"components.controls.list.indent": "Inspringen",
"components.controls.list.outdent": "Inspringen verkleinen",
// Remove
"components.controls.remove.remove": "Verwijderen",
// TextAlign
"components.controls.textalign.textalign": "Tekst uitlijnen",
"components.controls.textalign.left": "Links",
"components.controls.textalign.center": "Gecentreerd",
"components.controls.textalign.right": "Rechts",
"components.controls.textalign.justify": "Uitgelijnd"
};
|
"components.controls.link.linkTargetOption": "Open link in een nieuw venster",
"components.controls.link.link": "Link",
"components.controls.link.unlink": "Unlink",
|
random_line_split
|
385. Mini Parser.py
|
"""
Given a nested list of integers represented as a string, implement a parser to deserialize it.
Each element is either an integer, or a list -- whose elements may also be integers or other lists.
Note: You may assume that the string is well-formed:
String is non-empty.
String does not contain white spaces.
String contains only digits 0-9, [, - ,, ].
Example 1:
Given s = "324",
You should return a NestedInteger object which contains a single integer 324.
Example 2:
Given s = "[123,[456,[789]]]",
Return a NestedInteger object containing a nested list with 2 elements:
1. An integer containing value 123.
2. A nested list containing two elements:
i. An integer containing value 456.
ii. A nested list with one element:
a. An integer containing value 789.
"""
# """
# This is the interface that allows for creating nested lists.
# You should not implement it, or speculate about its implementation
# """
#class NestedInteger(object):
# def __init__(self, value=None):
# """
# If value is not specified, initializes an empty list.
# Otherwise initializes a single integer equal to value.
# """
#
# def isInteger(self):
# """
# @return True if this NestedInteger holds a single integer, rather than a nested list.
# :rtype bool
# """
#
# def add(self, elem):
# """
# Set this NestedInteger to hold a nested list and adds a nested integer elem to it.
# :rtype void
# """
#
# def setInteger(self, value):
# """
# Set this NestedInteger to hold a single integer equal to value.
# :rtype void
# """
#
# def getInteger(self):
# """
# @return the single integer that this NestedInteger holds, if it holds a single integer
# Return None if this NestedInteger holds a nested list
# :rtype int
# """
#
# def getList(self):
# """
# @return the nested list that this NestedInteger holds, if it holds a nested list
# Return None if this NestedInteger holds a single integer
# :rtype List[NestedInteger]
# """
class Solution(object):
def deserialize(self, s):
"""
:type s: str
:rtype: NestedInteger
"""
def parse():
num = ''
while s[0] in '-0123456789':
num += s.pop(0)
if num:
return NestedInteger(int(num))
s.pop(0)
result = NestedInteger()
while s[0] != ']':
|
s.pop(0)
return result
s = list(s + ' ')
return parse()
|
result.add(parse())
if s[0] == ',':
s.pop(0)
|
conditional_block
|
385. Mini Parser.py
|
"""
Given a nested list of integers represented as a string, implement a parser to deserialize it.
Each element is either an integer, or a list -- whose elements may also be integers or other lists.
Note: You may assume that the string is well-formed:
String is non-empty.
String does not contain white spaces.
String contains only digits 0-9, [, - ,, ].
Example 1:
Given s = "324",
You should return a NestedInteger object which contains a single integer 324.
Example 2:
Given s = "[123,[456,[789]]]",
|
i. An integer containing value 456.
ii. A nested list with one element:
a. An integer containing value 789.
"""
# """
# This is the interface that allows for creating nested lists.
# You should not implement it, or speculate about its implementation
# """
#class NestedInteger(object):
# def __init__(self, value=None):
# """
# If value is not specified, initializes an empty list.
# Otherwise initializes a single integer equal to value.
# """
#
# def isInteger(self):
# """
# @return True if this NestedInteger holds a single integer, rather than a nested list.
# :rtype bool
# """
#
# def add(self, elem):
# """
# Set this NestedInteger to hold a nested list and adds a nested integer elem to it.
# :rtype void
# """
#
# def setInteger(self, value):
# """
# Set this NestedInteger to hold a single integer equal to value.
# :rtype void
# """
#
# def getInteger(self):
# """
# @return the single integer that this NestedInteger holds, if it holds a single integer
# Return None if this NestedInteger holds a nested list
# :rtype int
# """
#
# def getList(self):
# """
# @return the nested list that this NestedInteger holds, if it holds a nested list
# Return None if this NestedInteger holds a single integer
# :rtype List[NestedInteger]
# """
class Solution(object):
def deserialize(self, s):
"""
:type s: str
:rtype: NestedInteger
"""
def parse():
num = ''
while s[0] in '-0123456789':
num += s.pop(0)
if num:
return NestedInteger(int(num))
s.pop(0)
result = NestedInteger()
while s[0] != ']':
result.add(parse())
if s[0] == ',':
s.pop(0)
s.pop(0)
return result
s = list(s + ' ')
return parse()
|
Return a NestedInteger object containing a nested list with 2 elements:
1. An integer containing value 123.
2. A nested list containing two elements:
|
random_line_split
|
385. Mini Parser.py
|
"""
Given a nested list of integers represented as a string, implement a parser to deserialize it.
Each element is either an integer, or a list -- whose elements may also be integers or other lists.
Note: You may assume that the string is well-formed:
String is non-empty.
String does not contain white spaces.
String contains only digits 0-9, [, - ,, ].
Example 1:
Given s = "324",
You should return a NestedInteger object which contains a single integer 324.
Example 2:
Given s = "[123,[456,[789]]]",
Return a NestedInteger object containing a nested list with 2 elements:
1. An integer containing value 123.
2. A nested list containing two elements:
i. An integer containing value 456.
ii. A nested list with one element:
a. An integer containing value 789.
"""
# """
# This is the interface that allows for creating nested lists.
# You should not implement it, or speculate about its implementation
# """
#class NestedInteger(object):
# def __init__(self, value=None):
# """
# If value is not specified, initializes an empty list.
# Otherwise initializes a single integer equal to value.
# """
#
# def isInteger(self):
# """
# @return True if this NestedInteger holds a single integer, rather than a nested list.
# :rtype bool
# """
#
# def add(self, elem):
# """
# Set this NestedInteger to hold a nested list and adds a nested integer elem to it.
# :rtype void
# """
#
# def setInteger(self, value):
# """
# Set this NestedInteger to hold a single integer equal to value.
# :rtype void
# """
#
# def getInteger(self):
# """
# @return the single integer that this NestedInteger holds, if it holds a single integer
# Return None if this NestedInteger holds a nested list
# :rtype int
# """
#
# def getList(self):
# """
# @return the nested list that this NestedInteger holds, if it holds a nested list
# Return None if this NestedInteger holds a single integer
# :rtype List[NestedInteger]
# """
class Solution(object):
def deserialize(self, s):
|
"""
:type s: str
:rtype: NestedInteger
"""
def parse():
num = ''
while s[0] in '-0123456789':
num += s.pop(0)
if num:
return NestedInteger(int(num))
s.pop(0)
result = NestedInteger()
while s[0] != ']':
result.add(parse())
if s[0] == ',':
s.pop(0)
s.pop(0)
return result
s = list(s + ' ')
return parse()
|
identifier_body
|
|
385. Mini Parser.py
|
"""
Given a nested list of integers represented as a string, implement a parser to deserialize it.
Each element is either an integer, or a list -- whose elements may also be integers or other lists.
Note: You may assume that the string is well-formed:
String is non-empty.
String does not contain white spaces.
String contains only digits 0-9, [, - ,, ].
Example 1:
Given s = "324",
You should return a NestedInteger object which contains a single integer 324.
Example 2:
Given s = "[123,[456,[789]]]",
Return a NestedInteger object containing a nested list with 2 elements:
1. An integer containing value 123.
2. A nested list containing two elements:
i. An integer containing value 456.
ii. A nested list with one element:
a. An integer containing value 789.
"""
# """
# This is the interface that allows for creating nested lists.
# You should not implement it, or speculate about its implementation
# """
#class NestedInteger(object):
# def __init__(self, value=None):
# """
# If value is not specified, initializes an empty list.
# Otherwise initializes a single integer equal to value.
# """
#
# def isInteger(self):
# """
# @return True if this NestedInteger holds a single integer, rather than a nested list.
# :rtype bool
# """
#
# def add(self, elem):
# """
# Set this NestedInteger to hold a nested list and adds a nested integer elem to it.
# :rtype void
# """
#
# def setInteger(self, value):
# """
# Set this NestedInteger to hold a single integer equal to value.
# :rtype void
# """
#
# def getInteger(self):
# """
# @return the single integer that this NestedInteger holds, if it holds a single integer
# Return None if this NestedInteger holds a nested list
# :rtype int
# """
#
# def getList(self):
# """
# @return the nested list that this NestedInteger holds, if it holds a nested list
# Return None if this NestedInteger holds a single integer
# :rtype List[NestedInteger]
# """
class
|
(object):
def deserialize(self, s):
"""
:type s: str
:rtype: NestedInteger
"""
def parse():
num = ''
while s[0] in '-0123456789':
num += s.pop(0)
if num:
return NestedInteger(int(num))
s.pop(0)
result = NestedInteger()
while s[0] != ']':
result.add(parse())
if s[0] == ',':
s.pop(0)
s.pop(0)
return result
s = list(s + ' ')
return parse()
|
Solution
|
identifier_name
|
python_comparator_code.py
|
## Script that takes in a list of fastas and generates summaries of their differences
# this is python implementation to which bugMat was compared in our publication.
# code by Drs Madeleine Cule and David Eyre, Oxford University
import sys, re, os, gzip
from Bio import SeqIO
from itertools import izip
bases = "ACGT"
def get_distance( seq1, seq2 ):
|
if __name__=="__main__":
## Parse command line arguments
listoffasta, outname_prefix = sys.argv[1:]
outname = outname_prefix
## Read in all the sequences, and replace the id with the required nicename
seqlist = []
with open( listoffasta ) as fp:
for line in fp:
nicename, fapath = line.strip().split()
if (os.path.exists( fapath)):
f = gzip.open( fapath )
fa = SeqIO.read( f, "fasta" )
fa.id = nicename
seqlist.append( fa )
else:
sys.stderr.write(fapath+" does not exist, skipping...")
## Find nonshared positions
seq_generator = izip( *seqlist )
nonshared_pos =[ i for ( i, a ) in enumerate( seq_generator )
if len( set( [ ai for ai in a if ai in bases ] ) ) > 1 ]
sys.stderr.write("Successfully obtained nonshared_diffs; there are %s of them.\n"%len( nonshared_pos ) )
## Sort out the SNPs
for seq in seqlist:
nonshared_bases = "".join( seq.seq[ i ] for i in nonshared_pos )
seq.seq._data = nonshared_bases
SeqIO.write( seqlist , "%s_snps.fa"%outname, "fasta" )
sys.stderr.write("Successfully wrote snps fasta file.\n")
## Write the positions
with open("%s_positions.txt"%outname, "w" ) as out:
out.write( "\n".join( [ str( n+1 ) for n in nonshared_pos ] ) )
out.write("\n")
sys.stderr.write("Successfully wrote nonshared positions.\n")
## Do the data matrix
mfasta = "%s_snps.fa"%outname
listofsamples = []
strings = dict()
for seq_record in SeqIO.parse( mfasta, "fasta" ):
strings[ seq_record.id ] = str( seq_record.seq )
listofsamples.append( seq_record.id )
listofsamples2 = list(listofsamples)
with open("%s.dat"%outname, "w" ) as fp:
fp.write("%s\n"%("\t".join( listofsamples ) ) ),
for s1 in listofsamples:
fp.write( "%s\t"%s1 )
for s2 in listofsamples2:
fp.write( "%s\t"%get_distance( strings[ s1 ], strings[ s2 ] ) )
fp.write("\n")
fp.close()
sys.stderr.write("Successfully wrote pairwise distance matrix.\n")
sys.stderr.write("Done.")
|
'''Function to calculate the [Hamming] distance between two sequences'''
return sum(True for c1, c2 in izip( seq1, seq2 ) if c1!=c2 and c1 in bases and c2 in bases )
|
identifier_body
|
python_comparator_code.py
|
## Script that takes in a list of fastas and generates summaries of their differences
# this is python implementation to which bugMat was compared in our publication.
# code by Drs Madeleine Cule and David Eyre, Oxford University
import sys, re, os, gzip
from Bio import SeqIO
from itertools import izip
bases = "ACGT"
def get_distance( seq1, seq2 ):
'''Function to calculate the [Hamming] distance between two sequences'''
return sum(True for c1, c2 in izip( seq1, seq2 ) if c1!=c2 and c1 in bases and c2 in bases )
if __name__=="__main__":
## Parse command line arguments
|
listoffasta, outname_prefix = sys.argv[1:]
outname = outname_prefix
## Read in all the sequences, and replace the id with the required nicename
seqlist = []
with open( listoffasta ) as fp:
for line in fp:
nicename, fapath = line.strip().split()
if (os.path.exists( fapath)):
f = gzip.open( fapath )
fa = SeqIO.read( f, "fasta" )
fa.id = nicename
seqlist.append( fa )
else:
sys.stderr.write(fapath+" does not exist, skipping...")
## Find nonshared positions
seq_generator = izip( *seqlist )
nonshared_pos =[ i for ( i, a ) in enumerate( seq_generator )
if len( set( [ ai for ai in a if ai in bases ] ) ) > 1 ]
sys.stderr.write("Successfully obtained nonshared_diffs; there are %s of them.\n"%len( nonshared_pos ) )
## Sort out the SNPs
for seq in seqlist:
nonshared_bases = "".join( seq.seq[ i ] for i in nonshared_pos )
seq.seq._data = nonshared_bases
SeqIO.write( seqlist , "%s_snps.fa"%outname, "fasta" )
sys.stderr.write("Successfully wrote snps fasta file.\n")
## Write the positions
with open("%s_positions.txt"%outname, "w" ) as out:
out.write( "\n".join( [ str( n+1 ) for n in nonshared_pos ] ) )
out.write("\n")
sys.stderr.write("Successfully wrote nonshared positions.\n")
## Do the data matrix
mfasta = "%s_snps.fa"%outname
listofsamples = []
strings = dict()
for seq_record in SeqIO.parse( mfasta, "fasta" ):
strings[ seq_record.id ] = str( seq_record.seq )
listofsamples.append( seq_record.id )
listofsamples2 = list(listofsamples)
with open("%s.dat"%outname, "w" ) as fp:
fp.write("%s\n"%("\t".join( listofsamples ) ) ),
for s1 in listofsamples:
fp.write( "%s\t"%s1 )
for s2 in listofsamples2:
fp.write( "%s\t"%get_distance( strings[ s1 ], strings[ s2 ] ) )
fp.write("\n")
fp.close()
sys.stderr.write("Successfully wrote pairwise distance matrix.\n")
sys.stderr.write("Done.")
|
conditional_block
|
|
python_comparator_code.py
|
## Script that takes in a list of fastas and generates summaries of their differences
# this is python implementation to which bugMat was compared in our publication.
# code by Drs Madeleine Cule and David Eyre, Oxford University
import sys, re, os, gzip
from Bio import SeqIO
from itertools import izip
bases = "ACGT"
def
|
( seq1, seq2 ):
'''Function to calculate the [Hamming] distance between two sequences'''
return sum(True for c1, c2 in izip( seq1, seq2 ) if c1!=c2 and c1 in bases and c2 in bases )
if __name__=="__main__":
## Parse command line arguments
listoffasta, outname_prefix = sys.argv[1:]
outname = outname_prefix
## Read in all the sequences, and replace the id with the required nicename
seqlist = []
with open( listoffasta ) as fp:
for line in fp:
nicename, fapath = line.strip().split()
if (os.path.exists( fapath)):
f = gzip.open( fapath )
fa = SeqIO.read( f, "fasta" )
fa.id = nicename
seqlist.append( fa )
else:
sys.stderr.write(fapath+" does not exist, skipping...")
## Find nonshared positions
seq_generator = izip( *seqlist )
nonshared_pos =[ i for ( i, a ) in enumerate( seq_generator )
if len( set( [ ai for ai in a if ai in bases ] ) ) > 1 ]
sys.stderr.write("Successfully obtained nonshared_diffs; there are %s of them.\n"%len( nonshared_pos ) )
## Sort out the SNPs
for seq in seqlist:
nonshared_bases = "".join( seq.seq[ i ] for i in nonshared_pos )
seq.seq._data = nonshared_bases
SeqIO.write( seqlist , "%s_snps.fa"%outname, "fasta" )
sys.stderr.write("Successfully wrote snps fasta file.\n")
## Write the positions
with open("%s_positions.txt"%outname, "w" ) as out:
out.write( "\n".join( [ str( n+1 ) for n in nonshared_pos ] ) )
out.write("\n")
sys.stderr.write("Successfully wrote nonshared positions.\n")
## Do the data matrix
mfasta = "%s_snps.fa"%outname
listofsamples = []
strings = dict()
for seq_record in SeqIO.parse( mfasta, "fasta" ):
strings[ seq_record.id ] = str( seq_record.seq )
listofsamples.append( seq_record.id )
listofsamples2 = list(listofsamples)
with open("%s.dat"%outname, "w" ) as fp:
fp.write("%s\n"%("\t".join( listofsamples ) ) ),
for s1 in listofsamples:
fp.write( "%s\t"%s1 )
for s2 in listofsamples2:
fp.write( "%s\t"%get_distance( strings[ s1 ], strings[ s2 ] ) )
fp.write("\n")
fp.close()
sys.stderr.write("Successfully wrote pairwise distance matrix.\n")
sys.stderr.write("Done.")
|
get_distance
|
identifier_name
|
python_comparator_code.py
|
## Script that takes in a list of fastas and generates summaries of their differences
# this is python implementation to which bugMat was compared in our publication.
# code by Drs Madeleine Cule and David Eyre, Oxford University
import sys, re, os, gzip
from Bio import SeqIO
from itertools import izip
bases = "ACGT"
def get_distance( seq1, seq2 ):
'''Function to calculate the [Hamming] distance between two sequences'''
return sum(True for c1, c2 in izip( seq1, seq2 ) if c1!=c2 and c1 in bases and c2 in bases )
if __name__=="__main__":
## Parse command line arguments
listoffasta, outname_prefix = sys.argv[1:]
outname = outname_prefix
## Read in all the sequences, and replace the id with the required nicename
seqlist = []
|
for line in fp:
nicename, fapath = line.strip().split()
if (os.path.exists( fapath)):
f = gzip.open( fapath )
fa = SeqIO.read( f, "fasta" )
fa.id = nicename
seqlist.append( fa )
else:
sys.stderr.write(fapath+" does not exist, skipping...")
## Find nonshared positions
seq_generator = izip( *seqlist )
nonshared_pos =[ i for ( i, a ) in enumerate( seq_generator )
if len( set( [ ai for ai in a if ai in bases ] ) ) > 1 ]
sys.stderr.write("Successfully obtained nonshared_diffs; there are %s of them.\n"%len( nonshared_pos ) )
## Sort out the SNPs
for seq in seqlist:
nonshared_bases = "".join( seq.seq[ i ] for i in nonshared_pos )
seq.seq._data = nonshared_bases
SeqIO.write( seqlist , "%s_snps.fa"%outname, "fasta" )
sys.stderr.write("Successfully wrote snps fasta file.\n")
## Write the positions
with open("%s_positions.txt"%outname, "w" ) as out:
out.write( "\n".join( [ str( n+1 ) for n in nonshared_pos ] ) )
out.write("\n")
sys.stderr.write("Successfully wrote nonshared positions.\n")
## Do the data matrix
mfasta = "%s_snps.fa"%outname
listofsamples = []
strings = dict()
for seq_record in SeqIO.parse( mfasta, "fasta" ):
strings[ seq_record.id ] = str( seq_record.seq )
listofsamples.append( seq_record.id )
listofsamples2 = list(listofsamples)
with open("%s.dat"%outname, "w" ) as fp:
fp.write("%s\n"%("\t".join( listofsamples ) ) ),
for s1 in listofsamples:
fp.write( "%s\t"%s1 )
for s2 in listofsamples2:
fp.write( "%s\t"%get_distance( strings[ s1 ], strings[ s2 ] ) )
fp.write("\n")
fp.close()
sys.stderr.write("Successfully wrote pairwise distance matrix.\n")
sys.stderr.write("Done.")
|
with open( listoffasta ) as fp:
|
random_line_split
|
tr.js
|
/**
* @license Copyright (c) 2003-2013, CKSource - Frederico Knabben. All rights reserved.
* For licensing, see LICENSE.md or http://ckeditor.com/license
*/
/**
* @fileOverview
*/
/**#@+
@type String
@example
*/
/**
|
*/
CKEDITOR.lang[ 'tr' ] = {
// ARIA description.
editor: 'Zengin Metin Editörü',
// Common messages and labels.
common: {
// Screenreader titles. Please note that screenreaders are not always capable
// of reading non-English words. So be careful while translating it.
editorHelp: 'Yardım için ALT 0 tuşuna basın',
browseServer: 'Sunucuyu Gez',
url: 'URL',
protocol: 'Protokol',
upload: 'Karşıya Yükle',
uploadSubmit: 'Sunucuya Yolla',
image: 'Resim',
flash: 'Flash',
form: 'Form',
checkbox: 'Onay Kutusu',
radio: 'Seçenek Düğmesi',
textField: 'Metin Girişi',
textarea: 'Çok Satırlı Metin',
hiddenField: 'Gizli Veri',
button: 'Düğme',
select: 'Seçim Menüsü',
imageButton: 'Resimli Düğme',
notSet: '<tanımlanmamış>',
id: 'Kimlik',
name: 'Ad',
langDir: 'Dil Yönü',
langDirLtr: 'Soldan Sağa (LTR)',
langDirRtl: 'Sağdan Sola (RTL)',
langCode: 'Dil Kodlaması',
longDescr: 'Uzun Tanımlı URL',
cssClass: 'Biçem Sayfası Sınıfları',
advisoryTitle: 'Danışma Başlığı',
cssStyle: 'Biçem',
ok: 'Tamam',
cancel: 'İptal',
close: 'Kapat',
preview: 'Ön gösterim',
resize: 'Boyutlandırmak için sürükle',
generalTab: 'Genel',
advancedTab: 'Gelişmiş',
validateNumberFailed: 'Bu değer sayı değildir.',
confirmNewPage: 'İceriğiniz kayıt edilmediğinden dolayı kaybolacaktır. Yeni bir sayfa yüklemek istediğinize eminsiniz?',
confirmCancel: 'Bazı seçenekler değişmiştir. Dialog penceresini kapatmak istediğinize eminmisiniz?',
options: 'Seçenekler',
target: 'Hedef',
targetNew: 'Yeni Pencere (_blank)',
targetTop: 'Enüst Pencere (_top)',
targetSelf: 'Aynı Pencere (_self)',
targetParent: 'Ana Pencere (_parent)',
langDirLTR: 'Soldan Sağa (LTR)',
langDirRTL: 'Sağdan Sola (RTL)',
styles: 'Stil',
cssClasses: 'Stil sayfası Sınıfı',
width: 'Genişlik',
height: 'Yükseklik',
align: 'Hizalama',
alignLeft: 'Sol',
alignRight: 'Sağ',
alignCenter: 'Merkez',
alignTop: 'Tepe',
alignMiddle: 'Orta',
alignBottom: 'Alt',
invalidValue : 'Geçersiz değer.',
invalidHeight: 'Yükseklik sayı olmalıdır.',
invalidWidth: 'Genişlik bir sayı olmalıdır.',
invalidCssLength: 'Belirttiğiniz sayı "%1" alanı için pozitif bir sayı CSS birim değeri olmalıdır (px, %, in, cm, mm, em, ex, pt, veya pc).',
invalidHtmlLength: 'Belirttiğiniz sayı "%1" alanı için pozitif bir sayı HTML birim değeri olmalıdır (px veya %).',
invalidInlineStyle: 'Noktalı virgülle ayrılmış: "değer adı," inline stil için belirtilen değer biçiminde bir veya daha fazla dizilerden oluşmalıdır.',
cssLengthTooltip: 'Pikseller için bir numara girin veya geçerli bir CSS numarası (px, %, in, cm, mm, em, ex, pt, veya pc).',
// Put the voice-only part of the label in the span.
unavailable: '%1<span class="cke_accessibility">, hazır değildir</span>'
}
};
|
* Contains the dictionary of language entries.
* @namespace
|
random_line_split
|
toolkit.py
|
import datetime
import os
import subprocess
import sys
import warnings
from typing import Optional, Union
import click
from ruamel.yaml import YAML
from ruamel.yaml.compat import StringIO
from great_expectations import exceptions as ge_exceptions
from great_expectations.checkpoint import Checkpoint, LegacyCheckpoint
from great_expectations.cli.v012.cli_messages import SECTION_SEPARATOR
from great_expectations.cli.v012.datasource import get_batch_kwargs
from great_expectations.cli.v012.docs import build_docs
from great_expectations.cli.v012.upgrade_helpers import GE_UPGRADE_HELPER_VERSION_MAP
from great_expectations.cli.v012.util import cli_colorize_string, cli_message
from great_expectations.core.batch import Batch
from great_expectations.core.expectation_suite import ExpectationSuite
from great_expectations.core.id_dict import BatchKwargs
from great_expectations.core.usage_statistics.util import send_usage_message
from great_expectations.data_asset import DataAsset
from great_expectations.data_context.data_context import DataContext
from great_expectations.data_context.types.base import CURRENT_GE_CONFIG_VERSION
from great_expectations.data_context.types.resource_identifiers import (
ExpectationSuiteIdentifier,
RunIdentifier,
ValidationResultIdentifier,
)
from great_expectations.datasource import Datasource
from great_expectations.profile import BasicSuiteBuilderProfiler
EXIT_UPGRADE_CONTINUATION_MESSAGE = (
"\nOk, exiting now. To upgrade at a later time, use the following command: "
"<cyan>great_expectations project upgrade</cyan>\n\nTo learn more about the upgrade "
"process, visit "
"<cyan>https://docs.greatexpectations.io/en/latest/how_to_guides/migrating_versions.html"
"</cyan>.\n"
)
class MyYAML(YAML):
# copied from https://yaml.readthedocs.io/en/latest/example.html#output-of-dump-as-a-string
def dump(self, data, stream=None, **kw):
inefficient = False
if stream is None:
inefficient = True
stream = StringIO()
YAML.dump(self, data, stream, **kw)
if inefficient:
return stream.getvalue()
yaml = MyYAML() # or typ='safe'/'unsafe' etc
yaml.indent(mapping=2, sequence=4, offset=2)
yaml.default_flow_style = False
def create_expectation_suite(
context,
datasource_name=None,
batch_kwargs_generator_name=None,
generator_asset=None,
batch_kwargs=None,
expectation_suite_name=None,
additional_batch_kwargs=None,
empty_suite=False,
show_intro_message=False,
flag_build_docs=True,
open_docs=False,
profiler_configuration="demo",
data_asset_name=None,
):
"""
Create a new expectation suite.
WARNING: the flow and name of this method and its interaction with _profile_to_create_a_suite
require a serious revisiting.
:return: a tuple: (success, suite name, profiling_results)
"""
if generator_asset:
warnings.warn(
"The 'generator_asset' argument will be deprecated and renamed to 'data_asset_name'. "
"Please update code accordingly.",
DeprecationWarning,
)
data_asset_name = generator_asset
if show_intro_message and not empty_suite:
cli_message(
"\n<cyan>========== Create sample Expectations ==========</cyan>\n\n"
)
data_source = select_datasource(context, datasource_name=datasource_name)
if data_source is None:
# select_datasource takes care of displaying an error message, so all is left here is to exit.
sys.exit(1)
datasource_name = data_source.name
if expectation_suite_name in context.list_expectation_suite_names():
tell_user_suite_exists(expectation_suite_name)
sys.exit(1)
if (
batch_kwargs_generator_name is None
or data_asset_name is None
or batch_kwargs is None
):
(
datasource_name,
batch_kwargs_generator_name,
data_asset_name,
batch_kwargs,
) = get_batch_kwargs(
context,
datasource_name=datasource_name,
batch_kwargs_generator_name=batch_kwargs_generator_name,
data_asset_name=data_asset_name,
additional_batch_kwargs=additional_batch_kwargs,
)
# In this case, we have "consumed" the additional_batch_kwargs
additional_batch_kwargs = {}
if expectation_suite_name is None:
default_expectation_suite_name = _get_default_expectation_suite_name(
batch_kwargs, data_asset_name
)
while True:
expectation_suite_name = click.prompt(
"\nName the new Expectation Suite",
default=default_expectation_suite_name,
)
if expectation_suite_name in context.list_expectation_suite_names():
tell_user_suite_exists(expectation_suite_name)
else:
break
if empty_suite:
create_empty_suite(context, expectation_suite_name, batch_kwargs)
return True, expectation_suite_name, None
profiling_results = _profile_to_create_a_suite(
additional_batch_kwargs,
batch_kwargs,
batch_kwargs_generator_name,
context,
datasource_name,
expectation_suite_name,
data_asset_name,
profiler_configuration,
)
if flag_build_docs:
build_docs(context, view=False)
if open_docs:
attempt_to_open_validation_results_in_data_docs(context, profiling_results)
return True, expectation_suite_name, profiling_results
def _profile_to_create_a_suite(
additional_batch_kwargs,
batch_kwargs,
batch_kwargs_generator_name,
context,
datasource_name,
expectation_suite_name,
data_asset_name,
profiler_configuration,
):
cli_message(
"""
Great Expectations will choose a couple of columns and generate expectations about them
to demonstrate some examples of assertions you can make about your data.
Great Expectations will store these expectations in a new Expectation Suite '{:s}' here:
{:s}
""".format(
expectation_suite_name,
context.stores[
context.expectations_store_name
].store_backend.get_url_for_key(
ExpectationSuiteIdentifier(
expectation_suite_name=expectation_suite_name
).to_tuple()
),
)
)
confirm_proceed_or_exit()
# TODO this may not apply
cli_message("\nGenerating example Expectation Suite...")
run_id = datetime.datetime.now(datetime.timezone.utc).strftime("%Y%m%dT%H%M%S.%fZ")
profiling_results = context.profile_data_asset(
datasource_name,
batch_kwargs_generator_name=batch_kwargs_generator_name,
data_asset_name=data_asset_name,
batch_kwargs=batch_kwargs,
profiler=BasicSuiteBuilderProfiler,
profiler_configuration=profiler_configuration,
expectation_suite_name=expectation_suite_name,
run_id=RunIdentifier(run_name=run_id),
additional_batch_kwargs=additional_batch_kwargs,
)
if not profiling_results["success"]:
_raise_profiling_errors(profiling_results)
cli_message("\nDone generating example Expectation Suite")
return profiling_results
def _raise_profiling_errors(profiling_results):
if (
profiling_results["error"]["code"]
== DataContext.PROFILING_ERROR_CODE_SPECIFIED_DATA_ASSETS_NOT_FOUND
):
raise ge_exceptions.DataContextError(
"""Some of the data assets you specified were not found: {:s}
""".format(
",".join(profiling_results["error"]["not_found_data_assets"])
)
)
raise ge_exceptions.DataContextError(
f"Unknown profiling error code: {profiling_results['error']['code']}"
)
def attempt_to_open_validation_results_in_data_docs(context, profiling_results):
try:
# TODO this is really brittle and not covered in tests
validation_result = profiling_results["results"][0][1]
validation_result_identifier = ValidationResultIdentifier.from_object(
validation_result
)
context.open_data_docs(resource_identifier=validation_result_identifier)
except (KeyError, IndexError):
context.open_data_docs()
def _get_default_expectation_suite_name(batch_kwargs, data_asset_name):
if data_asset_name:
suite_name = f"{data_asset_name}.warning"
elif "query" in batch_kwargs:
suite_name = "query.warning"
elif "path" in batch_kwargs:
try:
# Try guessing a filename
filename = os.path.split(os.path.normpath(batch_kwargs["path"]))[1]
# Take all but the last part after the period
filename = ".".join(filename.split(".")[:-1])
suite_name = f"{str(filename)}.warning"
except (OSError, IndexError):
suite_name = "warning"
else:
suite_name = "warning"
return suite_name
def tell_user_suite_exists(suite_name: str) -> None:
cli_message(
f"""<red>An expectation suite named `{suite_name}` already exists.</red>
- If you intend to edit the suite please use `great_expectations suite edit {suite_name}`."""
)
def create_empty_suite(
context: DataContext, expectation_suite_name: str, batch_kwargs
) -> None:
cli_message(
"""
Great Expectations will create a new Expectation Suite '{:s}' and store it here:
{:s}
""".format(
expectation_suite_name,
context.stores[
context.expectations_store_name
].store_backend.get_url_for_key(
ExpectationSuiteIdentifier(
expectation_suite_name=expectation_suite_name
).to_tuple()
),
)
)
|
suite = context.create_expectation_suite(expectation_suite_name)
suite.add_citation(comment="New suite added via CLI", batch_kwargs=batch_kwargs)
context.save_expectation_suite(suite, expectation_suite_name)
def launch_jupyter_notebook(notebook_path: str) -> None:
jupyter_command_override = os.getenv("GE_JUPYTER_CMD", None)
if jupyter_command_override:
subprocess.call(f"{jupyter_command_override} {notebook_path}", shell=True)
else:
subprocess.call(["jupyter", "notebook", notebook_path])
def load_batch(
context: DataContext,
suite: Union[str, ExpectationSuite],
batch_kwargs: Union[dict, BatchKwargs],
) -> Union[Batch, DataAsset]:
batch: Union[Batch, DataAsset] = context.get_batch(batch_kwargs, suite)
assert isinstance(batch, DataAsset) or isinstance(
batch, Batch
), "Batch failed to load. Please check your batch_kwargs"
return batch
def load_expectation_suite(
# TODO consolidate all the myriad CLI tests into this
context: DataContext,
suite_name: str,
usage_event: str,
) -> ExpectationSuite:
"""
Load an expectation suite from a given context.
Handles a suite name with or without `.json`
:param usage_event:
"""
if suite_name.endswith(".json"):
suite_name = suite_name[:-5]
try:
suite = context.get_expectation_suite(suite_name)
return suite
except ge_exceptions.DataContextError:
exit_with_failure_message_and_stats(
context,
usage_event,
f"<red>Could not find a suite named `{suite_name}`.</red> Please check "
"the name by running `great_expectations suite list` and try again.",
)
def exit_with_failure_message_and_stats(
context: DataContext, usage_event: str, message: str
) -> None:
cli_message(message)
send_usage_message(
data_context=context,
event=usage_event,
api_version="v2",
success=False,
)
sys.exit(1)
def load_checkpoint(
context: DataContext,
checkpoint_name: str,
usage_event: str,
) -> Union[Checkpoint, LegacyCheckpoint]:
"""Load a checkpoint or raise helpful errors."""
try:
checkpoint: Union[Checkpoint, LegacyCheckpoint] = context.get_checkpoint(
name=checkpoint_name
)
return checkpoint
except (
ge_exceptions.CheckpointNotFoundError,
ge_exceptions.InvalidCheckpointConfigError,
):
exit_with_failure_message_and_stats(
context,
usage_event,
f"""\
<red>Could not find checkpoint `{checkpoint_name}`.</red> Try running:
- `<green>great_expectations checkpoint list</green>` to verify your checkpoint exists
- `<green>great_expectations checkpoint new</green>` to configure a new checkpoint""",
)
except ge_exceptions.CheckpointError as e:
exit_with_failure_message_and_stats(context, usage_event, f"<red>{e}</red>")
def select_datasource(context: DataContext, datasource_name: str = None) -> Datasource:
"""Select a datasource interactively."""
# TODO consolidate all the myriad CLI tests into this
data_source = None
if datasource_name is None:
data_sources = sorted(context.list_datasources(), key=lambda x: x["name"])
if len(data_sources) == 0:
cli_message(
"<red>No datasources found in the context. To add a datasource, run `great_expectations datasource new`</red>"
)
elif len(data_sources) == 1:
datasource_name = data_sources[0]["name"]
else:
choices = "\n".join(
[
f" {i}. {data_source['name']}"
for i, data_source in enumerate(data_sources, 1)
]
)
option_selection = click.prompt(
f"Select a datasource\n{choices}\n",
type=click.Choice(
[str(i) for i, data_source in enumerate(data_sources, 1)]
),
show_choices=False,
)
datasource_name = data_sources[int(option_selection) - 1]["name"]
if datasource_name is not None:
data_source = context.get_datasource(datasource_name)
return data_source
def load_data_context_with_error_handling(
directory: str, from_cli_upgrade_command: bool = False
) -> DataContext:
"""Return a DataContext with good error handling and exit codes."""
try:
context: DataContext = DataContext(context_root_dir=directory)
ge_config_version: int = context.get_config().config_version
if (
from_cli_upgrade_command
and int(ge_config_version) < CURRENT_GE_CONFIG_VERSION
):
directory = directory or context.root_directory
(
increment_version,
exception_occurred,
) = upgrade_project_up_to_one_version_increment(
context_root_dir=directory,
ge_config_version=ge_config_version,
continuation_message=EXIT_UPGRADE_CONTINUATION_MESSAGE,
from_cli_upgrade_command=from_cli_upgrade_command,
)
if not exception_occurred and increment_version:
context = DataContext(context_root_dir=directory)
return context
except ge_exceptions.UnsupportedConfigVersionError as err:
directory = directory or DataContext.find_context_root_dir()
ge_config_version = DataContext.get_ge_config_version(
context_root_dir=directory
)
upgrade_helper_class = (
GE_UPGRADE_HELPER_VERSION_MAP.get(int(ge_config_version))
if ge_config_version
else None
)
if upgrade_helper_class and ge_config_version < CURRENT_GE_CONFIG_VERSION:
upgrade_project(
context_root_dir=directory,
ge_config_version=ge_config_version,
from_cli_upgrade_command=from_cli_upgrade_command,
)
else:
cli_message(f"<red>{err.message}</red>")
sys.exit(1)
except (
ge_exceptions.ConfigNotFoundError,
ge_exceptions.InvalidConfigError,
) as err:
cli_message(f"<red>{err.message}</red>")
sys.exit(1)
except ge_exceptions.PluginModuleNotFoundError as err:
cli_message(err.cli.v012_colored_message)
sys.exit(1)
except ge_exceptions.PluginClassNotFoundError as err:
cli_message(err.cli.v012_colored_message)
sys.exit(1)
except ge_exceptions.InvalidConfigurationYamlError as err:
cli_message(f"<red>{str(err)}</red>")
sys.exit(1)
def upgrade_project(
context_root_dir, ge_config_version, from_cli_upgrade_command=False
):
if from_cli_upgrade_command:
message = (
f"<red>\nYour project appears to have an out-of-date config version ({ge_config_version}) - "
f"the version "
f"number must be at least {CURRENT_GE_CONFIG_VERSION}.</red>"
)
else:
message = (
f"<red>\nYour project appears to have an out-of-date config version ({ge_config_version}) - "
f"the version "
f"number must be at least {CURRENT_GE_CONFIG_VERSION}.\nIn order to proceed, "
f"your project must be upgraded.</red>"
)
cli_message(message)
upgrade_prompt = (
"\nWould you like to run the Upgrade Helper to bring your project up-to-date?"
)
confirm_proceed_or_exit(
confirm_prompt=upgrade_prompt,
continuation_message=EXIT_UPGRADE_CONTINUATION_MESSAGE,
)
cli_message(SECTION_SEPARATOR)
# use loop in case multiple upgrades need to take place
while ge_config_version < CURRENT_GE_CONFIG_VERSION:
(
increment_version,
exception_occurred,
) = upgrade_project_up_to_one_version_increment(
context_root_dir=context_root_dir,
ge_config_version=ge_config_version,
continuation_message=EXIT_UPGRADE_CONTINUATION_MESSAGE,
from_cli_upgrade_command=from_cli_upgrade_command,
)
if exception_occurred or not increment_version:
break
ge_config_version += 1
cli_message(SECTION_SEPARATOR)
upgrade_success_message = "<green>Upgrade complete. Exiting...</green>\n"
upgrade_incomplete_message = f"""\
<red>The Upgrade Helper was unable to perform a complete project upgrade. Next steps:</red>
- Please perform any manual steps outlined in the Upgrade Overview and/or Upgrade Report above
- When complete, increment the config_version key in your <cyan>great_expectations.yml</cyan> to <cyan>{
ge_config_version + 1}</cyan>\n
To learn more about the upgrade process, visit \
<cyan>https://docs.greatexpectations.io/en/latest/how_to_guides/migrating_versions.html</cyan>
"""
if ge_config_version < CURRENT_GE_CONFIG_VERSION:
cli_message(upgrade_incomplete_message)
else:
cli_message(upgrade_success_message)
sys.exit(0)
def upgrade_project_up_to_one_version_increment(
context_root_dir: str,
ge_config_version: float,
continuation_message: str,
from_cli_upgrade_command: bool = False,
) -> [bool, bool]: # Returns increment_version, exception_occurred
upgrade_helper_class = GE_UPGRADE_HELPER_VERSION_MAP.get(int(ge_config_version))
if not upgrade_helper_class:
return False, False
target_ge_config_version = int(ge_config_version) + 1
# set version temporarily to CURRENT_GE_CONFIG_VERSION to get functional DataContext
DataContext.set_ge_config_version(
config_version=CURRENT_GE_CONFIG_VERSION,
context_root_dir=context_root_dir,
)
upgrade_helper = upgrade_helper_class(context_root_dir=context_root_dir)
upgrade_overview, confirmation_required = upgrade_helper.get_upgrade_overview()
if confirmation_required or from_cli_upgrade_command:
upgrade_confirmed = confirm_proceed_or_exit(
confirm_prompt=upgrade_overview,
continuation_message=continuation_message,
exit_on_no=False,
)
else:
upgrade_confirmed = True
if upgrade_confirmed:
cli_message("\nUpgrading project...")
cli_message(SECTION_SEPARATOR)
# run upgrade and get report of what was done, if version number should be incremented
(
upgrade_report,
increment_version,
exception_occurred,
) = upgrade_helper.upgrade_project()
# display report to user
cli_message(upgrade_report)
if exception_occurred:
# restore version number to current number
DataContext.set_ge_config_version(
ge_config_version, context_root_dir, validate_config_version=False
)
# display report to user
return False, True
# set config version to target version
if increment_version:
DataContext.set_ge_config_version(
target_ge_config_version,
context_root_dir,
validate_config_version=False,
)
return True, False
# restore version number to current number
DataContext.set_ge_config_version(
ge_config_version, context_root_dir, validate_config_version=False
)
return False, False
# restore version number to current number
DataContext.set_ge_config_version(
ge_config_version, context_root_dir, validate_config_version=False
)
cli_message(continuation_message)
sys.exit(0)
def confirm_proceed_or_exit(
confirm_prompt: str = "Would you like to proceed?",
continuation_message: str = "Ok, exiting now. You can always read more at https://docs.greatexpectations.io/ !",
exit_on_no: bool = True,
exit_code: int = 0,
) -> Optional[bool]:
"""
Every CLI command that starts a potentially lengthy (>1 sec) computation
or modifies some resources (e.g., edits the config file, adds objects
to the stores) must follow this pattern:
1. Explain which resources will be created/modified/deleted
2. Use this method to ask for user's confirmation
The goal of this standardization is for the users to expect consistency -
if you saw one command, you know what to expect from all others.
If the user does not confirm, the program should exit. The purpose of the exit_on_no parameter is to provide
the option to perform cleanup actions before exiting outside of the function.
"""
confirm_prompt_colorized = cli_colorize_string(confirm_prompt)
continuation_message_colorized = cli_colorize_string(continuation_message)
if not click.confirm(confirm_prompt_colorized, default=True):
if exit_on_no:
cli_message(continuation_message_colorized)
sys.exit(exit_code)
else:
return False
return True
|
random_line_split
|
|
toolkit.py
|
import datetime
import os
import subprocess
import sys
import warnings
from typing import Optional, Union
import click
from ruamel.yaml import YAML
from ruamel.yaml.compat import StringIO
from great_expectations import exceptions as ge_exceptions
from great_expectations.checkpoint import Checkpoint, LegacyCheckpoint
from great_expectations.cli.v012.cli_messages import SECTION_SEPARATOR
from great_expectations.cli.v012.datasource import get_batch_kwargs
from great_expectations.cli.v012.docs import build_docs
from great_expectations.cli.v012.upgrade_helpers import GE_UPGRADE_HELPER_VERSION_MAP
from great_expectations.cli.v012.util import cli_colorize_string, cli_message
from great_expectations.core.batch import Batch
from great_expectations.core.expectation_suite import ExpectationSuite
from great_expectations.core.id_dict import BatchKwargs
from great_expectations.core.usage_statistics.util import send_usage_message
from great_expectations.data_asset import DataAsset
from great_expectations.data_context.data_context import DataContext
from great_expectations.data_context.types.base import CURRENT_GE_CONFIG_VERSION
from great_expectations.data_context.types.resource_identifiers import (
ExpectationSuiteIdentifier,
RunIdentifier,
ValidationResultIdentifier,
)
from great_expectations.datasource import Datasource
from great_expectations.profile import BasicSuiteBuilderProfiler
EXIT_UPGRADE_CONTINUATION_MESSAGE = (
"\nOk, exiting now. To upgrade at a later time, use the following command: "
"<cyan>great_expectations project upgrade</cyan>\n\nTo learn more about the upgrade "
"process, visit "
"<cyan>https://docs.greatexpectations.io/en/latest/how_to_guides/migrating_versions.html"
"</cyan>.\n"
)
class MyYAML(YAML):
# copied from https://yaml.readthedocs.io/en/latest/example.html#output-of-dump-as-a-string
def dump(self, data, stream=None, **kw):
inefficient = False
if stream is None:
inefficient = True
stream = StringIO()
YAML.dump(self, data, stream, **kw)
if inefficient:
return stream.getvalue()
yaml = MyYAML() # or typ='safe'/'unsafe' etc
yaml.indent(mapping=2, sequence=4, offset=2)
yaml.default_flow_style = False
def create_expectation_suite(
context,
datasource_name=None,
batch_kwargs_generator_name=None,
generator_asset=None,
batch_kwargs=None,
expectation_suite_name=None,
additional_batch_kwargs=None,
empty_suite=False,
show_intro_message=False,
flag_build_docs=True,
open_docs=False,
profiler_configuration="demo",
data_asset_name=None,
):
"""
Create a new expectation suite.
WARNING: the flow and name of this method and its interaction with _profile_to_create_a_suite
require a serious revisiting.
:return: a tuple: (success, suite name, profiling_results)
"""
if generator_asset:
warnings.warn(
"The 'generator_asset' argument will be deprecated and renamed to 'data_asset_name'. "
"Please update code accordingly.",
DeprecationWarning,
)
data_asset_name = generator_asset
if show_intro_message and not empty_suite:
cli_message(
"\n<cyan>========== Create sample Expectations ==========</cyan>\n\n"
)
data_source = select_datasource(context, datasource_name=datasource_name)
if data_source is None:
# select_datasource takes care of displaying an error message, so all is left here is to exit.
sys.exit(1)
datasource_name = data_source.name
if expectation_suite_name in context.list_expectation_suite_names():
tell_user_suite_exists(expectation_suite_name)
sys.exit(1)
if (
batch_kwargs_generator_name is None
or data_asset_name is None
or batch_kwargs is None
):
(
datasource_name,
batch_kwargs_generator_name,
data_asset_name,
batch_kwargs,
) = get_batch_kwargs(
context,
datasource_name=datasource_name,
batch_kwargs_generator_name=batch_kwargs_generator_name,
data_asset_name=data_asset_name,
additional_batch_kwargs=additional_batch_kwargs,
)
# In this case, we have "consumed" the additional_batch_kwargs
additional_batch_kwargs = {}
if expectation_suite_name is None:
default_expectation_suite_name = _get_default_expectation_suite_name(
batch_kwargs, data_asset_name
)
while True:
expectation_suite_name = click.prompt(
"\nName the new Expectation Suite",
default=default_expectation_suite_name,
)
if expectation_suite_name in context.list_expectation_suite_names():
tell_user_suite_exists(expectation_suite_name)
else:
break
if empty_suite:
create_empty_suite(context, expectation_suite_name, batch_kwargs)
return True, expectation_suite_name, None
profiling_results = _profile_to_create_a_suite(
additional_batch_kwargs,
batch_kwargs,
batch_kwargs_generator_name,
context,
datasource_name,
expectation_suite_name,
data_asset_name,
profiler_configuration,
)
if flag_build_docs:
build_docs(context, view=False)
if open_docs:
attempt_to_open_validation_results_in_data_docs(context, profiling_results)
return True, expectation_suite_name, profiling_results
def _profile_to_create_a_suite(
additional_batch_kwargs,
batch_kwargs,
batch_kwargs_generator_name,
context,
datasource_name,
expectation_suite_name,
data_asset_name,
profiler_configuration,
):
cli_message(
"""
Great Expectations will choose a couple of columns and generate expectations about them
to demonstrate some examples of assertions you can make about your data.
Great Expectations will store these expectations in a new Expectation Suite '{:s}' here:
{:s}
""".format(
expectation_suite_name,
context.stores[
context.expectations_store_name
].store_backend.get_url_for_key(
ExpectationSuiteIdentifier(
expectation_suite_name=expectation_suite_name
).to_tuple()
),
)
)
confirm_proceed_or_exit()
# TODO this may not apply
cli_message("\nGenerating example Expectation Suite...")
run_id = datetime.datetime.now(datetime.timezone.utc).strftime("%Y%m%dT%H%M%S.%fZ")
profiling_results = context.profile_data_asset(
datasource_name,
batch_kwargs_generator_name=batch_kwargs_generator_name,
data_asset_name=data_asset_name,
batch_kwargs=batch_kwargs,
profiler=BasicSuiteBuilderProfiler,
profiler_configuration=profiler_configuration,
expectation_suite_name=expectation_suite_name,
run_id=RunIdentifier(run_name=run_id),
additional_batch_kwargs=additional_batch_kwargs,
)
if not profiling_results["success"]:
_raise_profiling_errors(profiling_results)
cli_message("\nDone generating example Expectation Suite")
return profiling_results
def _raise_profiling_errors(profiling_results):
if (
profiling_results["error"]["code"]
== DataContext.PROFILING_ERROR_CODE_SPECIFIED_DATA_ASSETS_NOT_FOUND
):
raise ge_exceptions.DataContextError(
"""Some of the data assets you specified were not found: {:s}
""".format(
",".join(profiling_results["error"]["not_found_data_assets"])
)
)
raise ge_exceptions.DataContextError(
f"Unknown profiling error code: {profiling_results['error']['code']}"
)
def attempt_to_open_validation_results_in_data_docs(context, profiling_results):
try:
# TODO this is really brittle and not covered in tests
validation_result = profiling_results["results"][0][1]
validation_result_identifier = ValidationResultIdentifier.from_object(
validation_result
)
context.open_data_docs(resource_identifier=validation_result_identifier)
except (KeyError, IndexError):
context.open_data_docs()
def _get_default_expectation_suite_name(batch_kwargs, data_asset_name):
if data_asset_name:
suite_name = f"{data_asset_name}.warning"
elif "query" in batch_kwargs:
suite_name = "query.warning"
elif "path" in batch_kwargs:
try:
# Try guessing a filename
filename = os.path.split(os.path.normpath(batch_kwargs["path"]))[1]
# Take all but the last part after the period
filename = ".".join(filename.split(".")[:-1])
suite_name = f"{str(filename)}.warning"
except (OSError, IndexError):
suite_name = "warning"
else:
suite_name = "warning"
return suite_name
def tell_user_suite_exists(suite_name: str) -> None:
cli_message(
f"""<red>An expectation suite named `{suite_name}` already exists.</red>
- If you intend to edit the suite please use `great_expectations suite edit {suite_name}`."""
)
def create_empty_suite(
context: DataContext, expectation_suite_name: str, batch_kwargs
) -> None:
cli_message(
"""
Great Expectations will create a new Expectation Suite '{:s}' and store it here:
{:s}
""".format(
expectation_suite_name,
context.stores[
context.expectations_store_name
].store_backend.get_url_for_key(
ExpectationSuiteIdentifier(
expectation_suite_name=expectation_suite_name
).to_tuple()
),
)
)
suite = context.create_expectation_suite(expectation_suite_name)
suite.add_citation(comment="New suite added via CLI", batch_kwargs=batch_kwargs)
context.save_expectation_suite(suite, expectation_suite_name)
def launch_jupyter_notebook(notebook_path: str) -> None:
jupyter_command_override = os.getenv("GE_JUPYTER_CMD", None)
if jupyter_command_override:
subprocess.call(f"{jupyter_command_override} {notebook_path}", shell=True)
else:
subprocess.call(["jupyter", "notebook", notebook_path])
def load_batch(
context: DataContext,
suite: Union[str, ExpectationSuite],
batch_kwargs: Union[dict, BatchKwargs],
) -> Union[Batch, DataAsset]:
batch: Union[Batch, DataAsset] = context.get_batch(batch_kwargs, suite)
assert isinstance(batch, DataAsset) or isinstance(
batch, Batch
), "Batch failed to load. Please check your batch_kwargs"
return batch
def
|
(
# TODO consolidate all the myriad CLI tests into this
context: DataContext,
suite_name: str,
usage_event: str,
) -> ExpectationSuite:
"""
Load an expectation suite from a given context.
Handles a suite name with or without `.json`
:param usage_event:
"""
if suite_name.endswith(".json"):
suite_name = suite_name[:-5]
try:
suite = context.get_expectation_suite(suite_name)
return suite
except ge_exceptions.DataContextError:
exit_with_failure_message_and_stats(
context,
usage_event,
f"<red>Could not find a suite named `{suite_name}`.</red> Please check "
"the name by running `great_expectations suite list` and try again.",
)
def exit_with_failure_message_and_stats(
context: DataContext, usage_event: str, message: str
) -> None:
cli_message(message)
send_usage_message(
data_context=context,
event=usage_event,
api_version="v2",
success=False,
)
sys.exit(1)
def load_checkpoint(
context: DataContext,
checkpoint_name: str,
usage_event: str,
) -> Union[Checkpoint, LegacyCheckpoint]:
"""Load a checkpoint or raise helpful errors."""
try:
checkpoint: Union[Checkpoint, LegacyCheckpoint] = context.get_checkpoint(
name=checkpoint_name
)
return checkpoint
except (
ge_exceptions.CheckpointNotFoundError,
ge_exceptions.InvalidCheckpointConfigError,
):
exit_with_failure_message_and_stats(
context,
usage_event,
f"""\
<red>Could not find checkpoint `{checkpoint_name}`.</red> Try running:
- `<green>great_expectations checkpoint list</green>` to verify your checkpoint exists
- `<green>great_expectations checkpoint new</green>` to configure a new checkpoint""",
)
except ge_exceptions.CheckpointError as e:
exit_with_failure_message_and_stats(context, usage_event, f"<red>{e}</red>")
def select_datasource(context: DataContext, datasource_name: str = None) -> Datasource:
"""Select a datasource interactively."""
# TODO consolidate all the myriad CLI tests into this
data_source = None
if datasource_name is None:
data_sources = sorted(context.list_datasources(), key=lambda x: x["name"])
if len(data_sources) == 0:
cli_message(
"<red>No datasources found in the context. To add a datasource, run `great_expectations datasource new`</red>"
)
elif len(data_sources) == 1:
datasource_name = data_sources[0]["name"]
else:
choices = "\n".join(
[
f" {i}. {data_source['name']}"
for i, data_source in enumerate(data_sources, 1)
]
)
option_selection = click.prompt(
f"Select a datasource\n{choices}\n",
type=click.Choice(
[str(i) for i, data_source in enumerate(data_sources, 1)]
),
show_choices=False,
)
datasource_name = data_sources[int(option_selection) - 1]["name"]
if datasource_name is not None:
data_source = context.get_datasource(datasource_name)
return data_source
def load_data_context_with_error_handling(
directory: str, from_cli_upgrade_command: bool = False
) -> DataContext:
"""Return a DataContext with good error handling and exit codes."""
try:
context: DataContext = DataContext(context_root_dir=directory)
ge_config_version: int = context.get_config().config_version
if (
from_cli_upgrade_command
and int(ge_config_version) < CURRENT_GE_CONFIG_VERSION
):
directory = directory or context.root_directory
(
increment_version,
exception_occurred,
) = upgrade_project_up_to_one_version_increment(
context_root_dir=directory,
ge_config_version=ge_config_version,
continuation_message=EXIT_UPGRADE_CONTINUATION_MESSAGE,
from_cli_upgrade_command=from_cli_upgrade_command,
)
if not exception_occurred and increment_version:
context = DataContext(context_root_dir=directory)
return context
except ge_exceptions.UnsupportedConfigVersionError as err:
directory = directory or DataContext.find_context_root_dir()
ge_config_version = DataContext.get_ge_config_version(
context_root_dir=directory
)
upgrade_helper_class = (
GE_UPGRADE_HELPER_VERSION_MAP.get(int(ge_config_version))
if ge_config_version
else None
)
if upgrade_helper_class and ge_config_version < CURRENT_GE_CONFIG_VERSION:
upgrade_project(
context_root_dir=directory,
ge_config_version=ge_config_version,
from_cli_upgrade_command=from_cli_upgrade_command,
)
else:
cli_message(f"<red>{err.message}</red>")
sys.exit(1)
except (
ge_exceptions.ConfigNotFoundError,
ge_exceptions.InvalidConfigError,
) as err:
cli_message(f"<red>{err.message}</red>")
sys.exit(1)
except ge_exceptions.PluginModuleNotFoundError as err:
cli_message(err.cli.v012_colored_message)
sys.exit(1)
except ge_exceptions.PluginClassNotFoundError as err:
cli_message(err.cli.v012_colored_message)
sys.exit(1)
except ge_exceptions.InvalidConfigurationYamlError as err:
cli_message(f"<red>{str(err)}</red>")
sys.exit(1)
def upgrade_project(
context_root_dir, ge_config_version, from_cli_upgrade_command=False
):
if from_cli_upgrade_command:
message = (
f"<red>\nYour project appears to have an out-of-date config version ({ge_config_version}) - "
f"the version "
f"number must be at least {CURRENT_GE_CONFIG_VERSION}.</red>"
)
else:
message = (
f"<red>\nYour project appears to have an out-of-date config version ({ge_config_version}) - "
f"the version "
f"number must be at least {CURRENT_GE_CONFIG_VERSION}.\nIn order to proceed, "
f"your project must be upgraded.</red>"
)
cli_message(message)
upgrade_prompt = (
"\nWould you like to run the Upgrade Helper to bring your project up-to-date?"
)
confirm_proceed_or_exit(
confirm_prompt=upgrade_prompt,
continuation_message=EXIT_UPGRADE_CONTINUATION_MESSAGE,
)
cli_message(SECTION_SEPARATOR)
# use loop in case multiple upgrades need to take place
while ge_config_version < CURRENT_GE_CONFIG_VERSION:
(
increment_version,
exception_occurred,
) = upgrade_project_up_to_one_version_increment(
context_root_dir=context_root_dir,
ge_config_version=ge_config_version,
continuation_message=EXIT_UPGRADE_CONTINUATION_MESSAGE,
from_cli_upgrade_command=from_cli_upgrade_command,
)
if exception_occurred or not increment_version:
break
ge_config_version += 1
cli_message(SECTION_SEPARATOR)
upgrade_success_message = "<green>Upgrade complete. Exiting...</green>\n"
upgrade_incomplete_message = f"""\
<red>The Upgrade Helper was unable to perform a complete project upgrade. Next steps:</red>
- Please perform any manual steps outlined in the Upgrade Overview and/or Upgrade Report above
- When complete, increment the config_version key in your <cyan>great_expectations.yml</cyan> to <cyan>{
ge_config_version + 1}</cyan>\n
To learn more about the upgrade process, visit \
<cyan>https://docs.greatexpectations.io/en/latest/how_to_guides/migrating_versions.html</cyan>
"""
if ge_config_version < CURRENT_GE_CONFIG_VERSION:
cli_message(upgrade_incomplete_message)
else:
cli_message(upgrade_success_message)
sys.exit(0)
def upgrade_project_up_to_one_version_increment(
context_root_dir: str,
ge_config_version: float,
continuation_message: str,
from_cli_upgrade_command: bool = False,
) -> [bool, bool]: # Returns increment_version, exception_occurred
upgrade_helper_class = GE_UPGRADE_HELPER_VERSION_MAP.get(int(ge_config_version))
if not upgrade_helper_class:
return False, False
target_ge_config_version = int(ge_config_version) + 1
# set version temporarily to CURRENT_GE_CONFIG_VERSION to get functional DataContext
DataContext.set_ge_config_version(
config_version=CURRENT_GE_CONFIG_VERSION,
context_root_dir=context_root_dir,
)
upgrade_helper = upgrade_helper_class(context_root_dir=context_root_dir)
upgrade_overview, confirmation_required = upgrade_helper.get_upgrade_overview()
if confirmation_required or from_cli_upgrade_command:
upgrade_confirmed = confirm_proceed_or_exit(
confirm_prompt=upgrade_overview,
continuation_message=continuation_message,
exit_on_no=False,
)
else:
upgrade_confirmed = True
if upgrade_confirmed:
cli_message("\nUpgrading project...")
cli_message(SECTION_SEPARATOR)
# run upgrade and get report of what was done, if version number should be incremented
(
upgrade_report,
increment_version,
exception_occurred,
) = upgrade_helper.upgrade_project()
# display report to user
cli_message(upgrade_report)
if exception_occurred:
# restore version number to current number
DataContext.set_ge_config_version(
ge_config_version, context_root_dir, validate_config_version=False
)
# display report to user
return False, True
# set config version to target version
if increment_version:
DataContext.set_ge_config_version(
target_ge_config_version,
context_root_dir,
validate_config_version=False,
)
return True, False
# restore version number to current number
DataContext.set_ge_config_version(
ge_config_version, context_root_dir, validate_config_version=False
)
return False, False
# restore version number to current number
DataContext.set_ge_config_version(
ge_config_version, context_root_dir, validate_config_version=False
)
cli_message(continuation_message)
sys.exit(0)
def confirm_proceed_or_exit(
confirm_prompt: str = "Would you like to proceed?",
continuation_message: str = "Ok, exiting now. You can always read more at https://docs.greatexpectations.io/ !",
exit_on_no: bool = True,
exit_code: int = 0,
) -> Optional[bool]:
"""
Every CLI command that starts a potentially lengthy (>1 sec) computation
or modifies some resources (e.g., edits the config file, adds objects
to the stores) must follow this pattern:
1. Explain which resources will be created/modified/deleted
2. Use this method to ask for user's confirmation
The goal of this standardization is for the users to expect consistency -
if you saw one command, you know what to expect from all others.
If the user does not confirm, the program should exit. The purpose of the exit_on_no parameter is to provide
the option to perform cleanup actions before exiting outside of the function.
"""
confirm_prompt_colorized = cli_colorize_string(confirm_prompt)
continuation_message_colorized = cli_colorize_string(continuation_message)
if not click.confirm(confirm_prompt_colorized, default=True):
if exit_on_no:
cli_message(continuation_message_colorized)
sys.exit(exit_code)
else:
return False
return True
|
load_expectation_suite
|
identifier_name
|
toolkit.py
|
import datetime
import os
import subprocess
import sys
import warnings
from typing import Optional, Union
import click
from ruamel.yaml import YAML
from ruamel.yaml.compat import StringIO
from great_expectations import exceptions as ge_exceptions
from great_expectations.checkpoint import Checkpoint, LegacyCheckpoint
from great_expectations.cli.v012.cli_messages import SECTION_SEPARATOR
from great_expectations.cli.v012.datasource import get_batch_kwargs
from great_expectations.cli.v012.docs import build_docs
from great_expectations.cli.v012.upgrade_helpers import GE_UPGRADE_HELPER_VERSION_MAP
from great_expectations.cli.v012.util import cli_colorize_string, cli_message
from great_expectations.core.batch import Batch
from great_expectations.core.expectation_suite import ExpectationSuite
from great_expectations.core.id_dict import BatchKwargs
from great_expectations.core.usage_statistics.util import send_usage_message
from great_expectations.data_asset import DataAsset
from great_expectations.data_context.data_context import DataContext
from great_expectations.data_context.types.base import CURRENT_GE_CONFIG_VERSION
from great_expectations.data_context.types.resource_identifiers import (
ExpectationSuiteIdentifier,
RunIdentifier,
ValidationResultIdentifier,
)
from great_expectations.datasource import Datasource
from great_expectations.profile import BasicSuiteBuilderProfiler
EXIT_UPGRADE_CONTINUATION_MESSAGE = (
"\nOk, exiting now. To upgrade at a later time, use the following command: "
"<cyan>great_expectations project upgrade</cyan>\n\nTo learn more about the upgrade "
"process, visit "
"<cyan>https://docs.greatexpectations.io/en/latest/how_to_guides/migrating_versions.html"
"</cyan>.\n"
)
class MyYAML(YAML):
# copied from https://yaml.readthedocs.io/en/latest/example.html#output-of-dump-as-a-string
def dump(self, data, stream=None, **kw):
inefficient = False
if stream is None:
inefficient = True
stream = StringIO()
YAML.dump(self, data, stream, **kw)
if inefficient:
return stream.getvalue()
yaml = MyYAML() # or typ='safe'/'unsafe' etc
yaml.indent(mapping=2, sequence=4, offset=2)
yaml.default_flow_style = False
def create_expectation_suite(
context,
datasource_name=None,
batch_kwargs_generator_name=None,
generator_asset=None,
batch_kwargs=None,
expectation_suite_name=None,
additional_batch_kwargs=None,
empty_suite=False,
show_intro_message=False,
flag_build_docs=True,
open_docs=False,
profiler_configuration="demo",
data_asset_name=None,
):
"""
Create a new expectation suite.
WARNING: the flow and name of this method and its interaction with _profile_to_create_a_suite
require a serious revisiting.
:return: a tuple: (success, suite name, profiling_results)
"""
if generator_asset:
warnings.warn(
"The 'generator_asset' argument will be deprecated and renamed to 'data_asset_name'. "
"Please update code accordingly.",
DeprecationWarning,
)
data_asset_name = generator_asset
if show_intro_message and not empty_suite:
cli_message(
"\n<cyan>========== Create sample Expectations ==========</cyan>\n\n"
)
data_source = select_datasource(context, datasource_name=datasource_name)
if data_source is None:
# select_datasource takes care of displaying an error message, so all is left here is to exit.
sys.exit(1)
datasource_name = data_source.name
if expectation_suite_name in context.list_expectation_suite_names():
tell_user_suite_exists(expectation_suite_name)
sys.exit(1)
if (
batch_kwargs_generator_name is None
or data_asset_name is None
or batch_kwargs is None
):
(
datasource_name,
batch_kwargs_generator_name,
data_asset_name,
batch_kwargs,
) = get_batch_kwargs(
context,
datasource_name=datasource_name,
batch_kwargs_generator_name=batch_kwargs_generator_name,
data_asset_name=data_asset_name,
additional_batch_kwargs=additional_batch_kwargs,
)
# In this case, we have "consumed" the additional_batch_kwargs
additional_batch_kwargs = {}
if expectation_suite_name is None:
default_expectation_suite_name = _get_default_expectation_suite_name(
batch_kwargs, data_asset_name
)
while True:
expectation_suite_name = click.prompt(
"\nName the new Expectation Suite",
default=default_expectation_suite_name,
)
if expectation_suite_name in context.list_expectation_suite_names():
tell_user_suite_exists(expectation_suite_name)
else:
break
if empty_suite:
create_empty_suite(context, expectation_suite_name, batch_kwargs)
return True, expectation_suite_name, None
profiling_results = _profile_to_create_a_suite(
additional_batch_kwargs,
batch_kwargs,
batch_kwargs_generator_name,
context,
datasource_name,
expectation_suite_name,
data_asset_name,
profiler_configuration,
)
if flag_build_docs:
build_docs(context, view=False)
if open_docs:
attempt_to_open_validation_results_in_data_docs(context, profiling_results)
return True, expectation_suite_name, profiling_results
def _profile_to_create_a_suite(
additional_batch_kwargs,
batch_kwargs,
batch_kwargs_generator_name,
context,
datasource_name,
expectation_suite_name,
data_asset_name,
profiler_configuration,
):
cli_message(
"""
Great Expectations will choose a couple of columns and generate expectations about them
to demonstrate some examples of assertions you can make about your data.
Great Expectations will store these expectations in a new Expectation Suite '{:s}' here:
{:s}
""".format(
expectation_suite_name,
context.stores[
context.expectations_store_name
].store_backend.get_url_for_key(
ExpectationSuiteIdentifier(
expectation_suite_name=expectation_suite_name
).to_tuple()
),
)
)
confirm_proceed_or_exit()
# TODO this may not apply
cli_message("\nGenerating example Expectation Suite...")
run_id = datetime.datetime.now(datetime.timezone.utc).strftime("%Y%m%dT%H%M%S.%fZ")
profiling_results = context.profile_data_asset(
datasource_name,
batch_kwargs_generator_name=batch_kwargs_generator_name,
data_asset_name=data_asset_name,
batch_kwargs=batch_kwargs,
profiler=BasicSuiteBuilderProfiler,
profiler_configuration=profiler_configuration,
expectation_suite_name=expectation_suite_name,
run_id=RunIdentifier(run_name=run_id),
additional_batch_kwargs=additional_batch_kwargs,
)
if not profiling_results["success"]:
_raise_profiling_errors(profiling_results)
cli_message("\nDone generating example Expectation Suite")
return profiling_results
def _raise_profiling_errors(profiling_results):
if (
profiling_results["error"]["code"]
== DataContext.PROFILING_ERROR_CODE_SPECIFIED_DATA_ASSETS_NOT_FOUND
):
raise ge_exceptions.DataContextError(
"""Some of the data assets you specified were not found: {:s}
""".format(
",".join(profiling_results["error"]["not_found_data_assets"])
)
)
raise ge_exceptions.DataContextError(
f"Unknown profiling error code: {profiling_results['error']['code']}"
)
def attempt_to_open_validation_results_in_data_docs(context, profiling_results):
try:
# TODO this is really brittle and not covered in tests
validation_result = profiling_results["results"][0][1]
validation_result_identifier = ValidationResultIdentifier.from_object(
validation_result
)
context.open_data_docs(resource_identifier=validation_result_identifier)
except (KeyError, IndexError):
context.open_data_docs()
def _get_default_expectation_suite_name(batch_kwargs, data_asset_name):
if data_asset_name:
suite_name = f"{data_asset_name}.warning"
elif "query" in batch_kwargs:
suite_name = "query.warning"
elif "path" in batch_kwargs:
try:
# Try guessing a filename
filename = os.path.split(os.path.normpath(batch_kwargs["path"]))[1]
# Take all but the last part after the period
filename = ".".join(filename.split(".")[:-1])
suite_name = f"{str(filename)}.warning"
except (OSError, IndexError):
suite_name = "warning"
else:
suite_name = "warning"
return suite_name
def tell_user_suite_exists(suite_name: str) -> None:
cli_message(
f"""<red>An expectation suite named `{suite_name}` already exists.</red>
- If you intend to edit the suite please use `great_expectations suite edit {suite_name}`."""
)
def create_empty_suite(
context: DataContext, expectation_suite_name: str, batch_kwargs
) -> None:
|
def launch_jupyter_notebook(notebook_path: str) -> None:
jupyter_command_override = os.getenv("GE_JUPYTER_CMD", None)
if jupyter_command_override:
subprocess.call(f"{jupyter_command_override} {notebook_path}", shell=True)
else:
subprocess.call(["jupyter", "notebook", notebook_path])
def load_batch(
context: DataContext,
suite: Union[str, ExpectationSuite],
batch_kwargs: Union[dict, BatchKwargs],
) -> Union[Batch, DataAsset]:
batch: Union[Batch, DataAsset] = context.get_batch(batch_kwargs, suite)
assert isinstance(batch, DataAsset) or isinstance(
batch, Batch
), "Batch failed to load. Please check your batch_kwargs"
return batch
def load_expectation_suite(
# TODO consolidate all the myriad CLI tests into this
context: DataContext,
suite_name: str,
usage_event: str,
) -> ExpectationSuite:
"""
Load an expectation suite from a given context.
Handles a suite name with or without `.json`
:param usage_event:
"""
if suite_name.endswith(".json"):
suite_name = suite_name[:-5]
try:
suite = context.get_expectation_suite(suite_name)
return suite
except ge_exceptions.DataContextError:
exit_with_failure_message_and_stats(
context,
usage_event,
f"<red>Could not find a suite named `{suite_name}`.</red> Please check "
"the name by running `great_expectations suite list` and try again.",
)
def exit_with_failure_message_and_stats(
context: DataContext, usage_event: str, message: str
) -> None:
cli_message(message)
send_usage_message(
data_context=context,
event=usage_event,
api_version="v2",
success=False,
)
sys.exit(1)
def load_checkpoint(
context: DataContext,
checkpoint_name: str,
usage_event: str,
) -> Union[Checkpoint, LegacyCheckpoint]:
"""Load a checkpoint or raise helpful errors."""
try:
checkpoint: Union[Checkpoint, LegacyCheckpoint] = context.get_checkpoint(
name=checkpoint_name
)
return checkpoint
except (
ge_exceptions.CheckpointNotFoundError,
ge_exceptions.InvalidCheckpointConfigError,
):
exit_with_failure_message_and_stats(
context,
usage_event,
f"""\
<red>Could not find checkpoint `{checkpoint_name}`.</red> Try running:
- `<green>great_expectations checkpoint list</green>` to verify your checkpoint exists
- `<green>great_expectations checkpoint new</green>` to configure a new checkpoint""",
)
except ge_exceptions.CheckpointError as e:
exit_with_failure_message_and_stats(context, usage_event, f"<red>{e}</red>")
def select_datasource(context: DataContext, datasource_name: str = None) -> Datasource:
"""Select a datasource interactively."""
# TODO consolidate all the myriad CLI tests into this
data_source = None
if datasource_name is None:
data_sources = sorted(context.list_datasources(), key=lambda x: x["name"])
if len(data_sources) == 0:
cli_message(
"<red>No datasources found in the context. To add a datasource, run `great_expectations datasource new`</red>"
)
elif len(data_sources) == 1:
datasource_name = data_sources[0]["name"]
else:
choices = "\n".join(
[
f" {i}. {data_source['name']}"
for i, data_source in enumerate(data_sources, 1)
]
)
option_selection = click.prompt(
f"Select a datasource\n{choices}\n",
type=click.Choice(
[str(i) for i, data_source in enumerate(data_sources, 1)]
),
show_choices=False,
)
datasource_name = data_sources[int(option_selection) - 1]["name"]
if datasource_name is not None:
data_source = context.get_datasource(datasource_name)
return data_source
def load_data_context_with_error_handling(
directory: str, from_cli_upgrade_command: bool = False
) -> DataContext:
"""Return a DataContext with good error handling and exit codes."""
try:
context: DataContext = DataContext(context_root_dir=directory)
ge_config_version: int = context.get_config().config_version
if (
from_cli_upgrade_command
and int(ge_config_version) < CURRENT_GE_CONFIG_VERSION
):
directory = directory or context.root_directory
(
increment_version,
exception_occurred,
) = upgrade_project_up_to_one_version_increment(
context_root_dir=directory,
ge_config_version=ge_config_version,
continuation_message=EXIT_UPGRADE_CONTINUATION_MESSAGE,
from_cli_upgrade_command=from_cli_upgrade_command,
)
if not exception_occurred and increment_version:
context = DataContext(context_root_dir=directory)
return context
except ge_exceptions.UnsupportedConfigVersionError as err:
directory = directory or DataContext.find_context_root_dir()
ge_config_version = DataContext.get_ge_config_version(
context_root_dir=directory
)
upgrade_helper_class = (
GE_UPGRADE_HELPER_VERSION_MAP.get(int(ge_config_version))
if ge_config_version
else None
)
if upgrade_helper_class and ge_config_version < CURRENT_GE_CONFIG_VERSION:
upgrade_project(
context_root_dir=directory,
ge_config_version=ge_config_version,
from_cli_upgrade_command=from_cli_upgrade_command,
)
else:
cli_message(f"<red>{err.message}</red>")
sys.exit(1)
except (
ge_exceptions.ConfigNotFoundError,
ge_exceptions.InvalidConfigError,
) as err:
cli_message(f"<red>{err.message}</red>")
sys.exit(1)
except ge_exceptions.PluginModuleNotFoundError as err:
cli_message(err.cli.v012_colored_message)
sys.exit(1)
except ge_exceptions.PluginClassNotFoundError as err:
cli_message(err.cli.v012_colored_message)
sys.exit(1)
except ge_exceptions.InvalidConfigurationYamlError as err:
cli_message(f"<red>{str(err)}</red>")
sys.exit(1)
def upgrade_project(
context_root_dir, ge_config_version, from_cli_upgrade_command=False
):
if from_cli_upgrade_command:
message = (
f"<red>\nYour project appears to have an out-of-date config version ({ge_config_version}) - "
f"the version "
f"number must be at least {CURRENT_GE_CONFIG_VERSION}.</red>"
)
else:
message = (
f"<red>\nYour project appears to have an out-of-date config version ({ge_config_version}) - "
f"the version "
f"number must be at least {CURRENT_GE_CONFIG_VERSION}.\nIn order to proceed, "
f"your project must be upgraded.</red>"
)
cli_message(message)
upgrade_prompt = (
"\nWould you like to run the Upgrade Helper to bring your project up-to-date?"
)
confirm_proceed_or_exit(
confirm_prompt=upgrade_prompt,
continuation_message=EXIT_UPGRADE_CONTINUATION_MESSAGE,
)
cli_message(SECTION_SEPARATOR)
# use loop in case multiple upgrades need to take place
while ge_config_version < CURRENT_GE_CONFIG_VERSION:
(
increment_version,
exception_occurred,
) = upgrade_project_up_to_one_version_increment(
context_root_dir=context_root_dir,
ge_config_version=ge_config_version,
continuation_message=EXIT_UPGRADE_CONTINUATION_MESSAGE,
from_cli_upgrade_command=from_cli_upgrade_command,
)
if exception_occurred or not increment_version:
break
ge_config_version += 1
cli_message(SECTION_SEPARATOR)
upgrade_success_message = "<green>Upgrade complete. Exiting...</green>\n"
upgrade_incomplete_message = f"""\
<red>The Upgrade Helper was unable to perform a complete project upgrade. Next steps:</red>
- Please perform any manual steps outlined in the Upgrade Overview and/or Upgrade Report above
- When complete, increment the config_version key in your <cyan>great_expectations.yml</cyan> to <cyan>{
ge_config_version + 1}</cyan>\n
To learn more about the upgrade process, visit \
<cyan>https://docs.greatexpectations.io/en/latest/how_to_guides/migrating_versions.html</cyan>
"""
if ge_config_version < CURRENT_GE_CONFIG_VERSION:
cli_message(upgrade_incomplete_message)
else:
cli_message(upgrade_success_message)
sys.exit(0)
def upgrade_project_up_to_one_version_increment(
context_root_dir: str,
ge_config_version: float,
continuation_message: str,
from_cli_upgrade_command: bool = False,
) -> [bool, bool]: # Returns increment_version, exception_occurred
upgrade_helper_class = GE_UPGRADE_HELPER_VERSION_MAP.get(int(ge_config_version))
if not upgrade_helper_class:
return False, False
target_ge_config_version = int(ge_config_version) + 1
# set version temporarily to CURRENT_GE_CONFIG_VERSION to get functional DataContext
DataContext.set_ge_config_version(
config_version=CURRENT_GE_CONFIG_VERSION,
context_root_dir=context_root_dir,
)
upgrade_helper = upgrade_helper_class(context_root_dir=context_root_dir)
upgrade_overview, confirmation_required = upgrade_helper.get_upgrade_overview()
if confirmation_required or from_cli_upgrade_command:
upgrade_confirmed = confirm_proceed_or_exit(
confirm_prompt=upgrade_overview,
continuation_message=continuation_message,
exit_on_no=False,
)
else:
upgrade_confirmed = True
if upgrade_confirmed:
cli_message("\nUpgrading project...")
cli_message(SECTION_SEPARATOR)
# run upgrade and get report of what was done, if version number should be incremented
(
upgrade_report,
increment_version,
exception_occurred,
) = upgrade_helper.upgrade_project()
# display report to user
cli_message(upgrade_report)
if exception_occurred:
# restore version number to current number
DataContext.set_ge_config_version(
ge_config_version, context_root_dir, validate_config_version=False
)
# display report to user
return False, True
# set config version to target version
if increment_version:
DataContext.set_ge_config_version(
target_ge_config_version,
context_root_dir,
validate_config_version=False,
)
return True, False
# restore version number to current number
DataContext.set_ge_config_version(
ge_config_version, context_root_dir, validate_config_version=False
)
return False, False
# restore version number to current number
DataContext.set_ge_config_version(
ge_config_version, context_root_dir, validate_config_version=False
)
cli_message(continuation_message)
sys.exit(0)
def confirm_proceed_or_exit(
confirm_prompt: str = "Would you like to proceed?",
continuation_message: str = "Ok, exiting now. You can always read more at https://docs.greatexpectations.io/ !",
exit_on_no: bool = True,
exit_code: int = 0,
) -> Optional[bool]:
"""
Every CLI command that starts a potentially lengthy (>1 sec) computation
or modifies some resources (e.g., edits the config file, adds objects
to the stores) must follow this pattern:
1. Explain which resources will be created/modified/deleted
2. Use this method to ask for user's confirmation
The goal of this standardization is for the users to expect consistency -
if you saw one command, you know what to expect from all others.
If the user does not confirm, the program should exit. The purpose of the exit_on_no parameter is to provide
the option to perform cleanup actions before exiting outside of the function.
"""
confirm_prompt_colorized = cli_colorize_string(confirm_prompt)
continuation_message_colorized = cli_colorize_string(continuation_message)
if not click.confirm(confirm_prompt_colorized, default=True):
if exit_on_no:
cli_message(continuation_message_colorized)
sys.exit(exit_code)
else:
return False
return True
|
cli_message(
"""
Great Expectations will create a new Expectation Suite '{:s}' and store it here:
{:s}
""".format(
expectation_suite_name,
context.stores[
context.expectations_store_name
].store_backend.get_url_for_key(
ExpectationSuiteIdentifier(
expectation_suite_name=expectation_suite_name
).to_tuple()
),
)
)
suite = context.create_expectation_suite(expectation_suite_name)
suite.add_citation(comment="New suite added via CLI", batch_kwargs=batch_kwargs)
context.save_expectation_suite(suite, expectation_suite_name)
|
identifier_body
|
toolkit.py
|
import datetime
import os
import subprocess
import sys
import warnings
from typing import Optional, Union
import click
from ruamel.yaml import YAML
from ruamel.yaml.compat import StringIO
from great_expectations import exceptions as ge_exceptions
from great_expectations.checkpoint import Checkpoint, LegacyCheckpoint
from great_expectations.cli.v012.cli_messages import SECTION_SEPARATOR
from great_expectations.cli.v012.datasource import get_batch_kwargs
from great_expectations.cli.v012.docs import build_docs
from great_expectations.cli.v012.upgrade_helpers import GE_UPGRADE_HELPER_VERSION_MAP
from great_expectations.cli.v012.util import cli_colorize_string, cli_message
from great_expectations.core.batch import Batch
from great_expectations.core.expectation_suite import ExpectationSuite
from great_expectations.core.id_dict import BatchKwargs
from great_expectations.core.usage_statistics.util import send_usage_message
from great_expectations.data_asset import DataAsset
from great_expectations.data_context.data_context import DataContext
from great_expectations.data_context.types.base import CURRENT_GE_CONFIG_VERSION
from great_expectations.data_context.types.resource_identifiers import (
ExpectationSuiteIdentifier,
RunIdentifier,
ValidationResultIdentifier,
)
from great_expectations.datasource import Datasource
from great_expectations.profile import BasicSuiteBuilderProfiler
EXIT_UPGRADE_CONTINUATION_MESSAGE = (
"\nOk, exiting now. To upgrade at a later time, use the following command: "
"<cyan>great_expectations project upgrade</cyan>\n\nTo learn more about the upgrade "
"process, visit "
"<cyan>https://docs.greatexpectations.io/en/latest/how_to_guides/migrating_versions.html"
"</cyan>.\n"
)
class MyYAML(YAML):
# copied from https://yaml.readthedocs.io/en/latest/example.html#output-of-dump-as-a-string
def dump(self, data, stream=None, **kw):
inefficient = False
if stream is None:
inefficient = True
stream = StringIO()
YAML.dump(self, data, stream, **kw)
if inefficient:
return stream.getvalue()
yaml = MyYAML() # or typ='safe'/'unsafe' etc
yaml.indent(mapping=2, sequence=4, offset=2)
yaml.default_flow_style = False
def create_expectation_suite(
context,
datasource_name=None,
batch_kwargs_generator_name=None,
generator_asset=None,
batch_kwargs=None,
expectation_suite_name=None,
additional_batch_kwargs=None,
empty_suite=False,
show_intro_message=False,
flag_build_docs=True,
open_docs=False,
profiler_configuration="demo",
data_asset_name=None,
):
"""
Create a new expectation suite.
WARNING: the flow and name of this method and its interaction with _profile_to_create_a_suite
require a serious revisiting.
:return: a tuple: (success, suite name, profiling_results)
"""
if generator_asset:
warnings.warn(
"The 'generator_asset' argument will be deprecated and renamed to 'data_asset_name'. "
"Please update code accordingly.",
DeprecationWarning,
)
data_asset_name = generator_asset
if show_intro_message and not empty_suite:
cli_message(
"\n<cyan>========== Create sample Expectations ==========</cyan>\n\n"
)
data_source = select_datasource(context, datasource_name=datasource_name)
if data_source is None:
# select_datasource takes care of displaying an error message, so all is left here is to exit.
sys.exit(1)
datasource_name = data_source.name
if expectation_suite_name in context.list_expectation_suite_names():
tell_user_suite_exists(expectation_suite_name)
sys.exit(1)
if (
batch_kwargs_generator_name is None
or data_asset_name is None
or batch_kwargs is None
):
(
datasource_name,
batch_kwargs_generator_name,
data_asset_name,
batch_kwargs,
) = get_batch_kwargs(
context,
datasource_name=datasource_name,
batch_kwargs_generator_name=batch_kwargs_generator_name,
data_asset_name=data_asset_name,
additional_batch_kwargs=additional_batch_kwargs,
)
# In this case, we have "consumed" the additional_batch_kwargs
additional_batch_kwargs = {}
if expectation_suite_name is None:
default_expectation_suite_name = _get_default_expectation_suite_name(
batch_kwargs, data_asset_name
)
while True:
expectation_suite_name = click.prompt(
"\nName the new Expectation Suite",
default=default_expectation_suite_name,
)
if expectation_suite_name in context.list_expectation_suite_names():
tell_user_suite_exists(expectation_suite_name)
else:
break
if empty_suite:
create_empty_suite(context, expectation_suite_name, batch_kwargs)
return True, expectation_suite_name, None
profiling_results = _profile_to_create_a_suite(
additional_batch_kwargs,
batch_kwargs,
batch_kwargs_generator_name,
context,
datasource_name,
expectation_suite_name,
data_asset_name,
profiler_configuration,
)
if flag_build_docs:
build_docs(context, view=False)
if open_docs:
attempt_to_open_validation_results_in_data_docs(context, profiling_results)
return True, expectation_suite_name, profiling_results
def _profile_to_create_a_suite(
additional_batch_kwargs,
batch_kwargs,
batch_kwargs_generator_name,
context,
datasource_name,
expectation_suite_name,
data_asset_name,
profiler_configuration,
):
cli_message(
"""
Great Expectations will choose a couple of columns and generate expectations about them
to demonstrate some examples of assertions you can make about your data.
Great Expectations will store these expectations in a new Expectation Suite '{:s}' here:
{:s}
""".format(
expectation_suite_name,
context.stores[
context.expectations_store_name
].store_backend.get_url_for_key(
ExpectationSuiteIdentifier(
expectation_suite_name=expectation_suite_name
).to_tuple()
),
)
)
confirm_proceed_or_exit()
# TODO this may not apply
cli_message("\nGenerating example Expectation Suite...")
run_id = datetime.datetime.now(datetime.timezone.utc).strftime("%Y%m%dT%H%M%S.%fZ")
profiling_results = context.profile_data_asset(
datasource_name,
batch_kwargs_generator_name=batch_kwargs_generator_name,
data_asset_name=data_asset_name,
batch_kwargs=batch_kwargs,
profiler=BasicSuiteBuilderProfiler,
profiler_configuration=profiler_configuration,
expectation_suite_name=expectation_suite_name,
run_id=RunIdentifier(run_name=run_id),
additional_batch_kwargs=additional_batch_kwargs,
)
if not profiling_results["success"]:
_raise_profiling_errors(profiling_results)
cli_message("\nDone generating example Expectation Suite")
return profiling_results
def _raise_profiling_errors(profiling_results):
if (
profiling_results["error"]["code"]
== DataContext.PROFILING_ERROR_CODE_SPECIFIED_DATA_ASSETS_NOT_FOUND
):
raise ge_exceptions.DataContextError(
"""Some of the data assets you specified were not found: {:s}
""".format(
",".join(profiling_results["error"]["not_found_data_assets"])
)
)
raise ge_exceptions.DataContextError(
f"Unknown profiling error code: {profiling_results['error']['code']}"
)
def attempt_to_open_validation_results_in_data_docs(context, profiling_results):
try:
# TODO this is really brittle and not covered in tests
validation_result = profiling_results["results"][0][1]
validation_result_identifier = ValidationResultIdentifier.from_object(
validation_result
)
context.open_data_docs(resource_identifier=validation_result_identifier)
except (KeyError, IndexError):
context.open_data_docs()
def _get_default_expectation_suite_name(batch_kwargs, data_asset_name):
if data_asset_name:
suite_name = f"{data_asset_name}.warning"
elif "query" in batch_kwargs:
suite_name = "query.warning"
elif "path" in batch_kwargs:
try:
# Try guessing a filename
filename = os.path.split(os.path.normpath(batch_kwargs["path"]))[1]
# Take all but the last part after the period
filename = ".".join(filename.split(".")[:-1])
suite_name = f"{str(filename)}.warning"
except (OSError, IndexError):
suite_name = "warning"
else:
suite_name = "warning"
return suite_name
def tell_user_suite_exists(suite_name: str) -> None:
cli_message(
f"""<red>An expectation suite named `{suite_name}` already exists.</red>
- If you intend to edit the suite please use `great_expectations suite edit {suite_name}`."""
)
def create_empty_suite(
context: DataContext, expectation_suite_name: str, batch_kwargs
) -> None:
cli_message(
"""
Great Expectations will create a new Expectation Suite '{:s}' and store it here:
{:s}
""".format(
expectation_suite_name,
context.stores[
context.expectations_store_name
].store_backend.get_url_for_key(
ExpectationSuiteIdentifier(
expectation_suite_name=expectation_suite_name
).to_tuple()
),
)
)
suite = context.create_expectation_suite(expectation_suite_name)
suite.add_citation(comment="New suite added via CLI", batch_kwargs=batch_kwargs)
context.save_expectation_suite(suite, expectation_suite_name)
def launch_jupyter_notebook(notebook_path: str) -> None:
jupyter_command_override = os.getenv("GE_JUPYTER_CMD", None)
if jupyter_command_override:
|
else:
subprocess.call(["jupyter", "notebook", notebook_path])
def load_batch(
context: DataContext,
suite: Union[str, ExpectationSuite],
batch_kwargs: Union[dict, BatchKwargs],
) -> Union[Batch, DataAsset]:
batch: Union[Batch, DataAsset] = context.get_batch(batch_kwargs, suite)
assert isinstance(batch, DataAsset) or isinstance(
batch, Batch
), "Batch failed to load. Please check your batch_kwargs"
return batch
def load_expectation_suite(
# TODO consolidate all the myriad CLI tests into this
context: DataContext,
suite_name: str,
usage_event: str,
) -> ExpectationSuite:
"""
Load an expectation suite from a given context.
Handles a suite name with or without `.json`
:param usage_event:
"""
if suite_name.endswith(".json"):
suite_name = suite_name[:-5]
try:
suite = context.get_expectation_suite(suite_name)
return suite
except ge_exceptions.DataContextError:
exit_with_failure_message_and_stats(
context,
usage_event,
f"<red>Could not find a suite named `{suite_name}`.</red> Please check "
"the name by running `great_expectations suite list` and try again.",
)
def exit_with_failure_message_and_stats(
context: DataContext, usage_event: str, message: str
) -> None:
cli_message(message)
send_usage_message(
data_context=context,
event=usage_event,
api_version="v2",
success=False,
)
sys.exit(1)
def load_checkpoint(
context: DataContext,
checkpoint_name: str,
usage_event: str,
) -> Union[Checkpoint, LegacyCheckpoint]:
"""Load a checkpoint or raise helpful errors."""
try:
checkpoint: Union[Checkpoint, LegacyCheckpoint] = context.get_checkpoint(
name=checkpoint_name
)
return checkpoint
except (
ge_exceptions.CheckpointNotFoundError,
ge_exceptions.InvalidCheckpointConfigError,
):
exit_with_failure_message_and_stats(
context,
usage_event,
f"""\
<red>Could not find checkpoint `{checkpoint_name}`.</red> Try running:
- `<green>great_expectations checkpoint list</green>` to verify your checkpoint exists
- `<green>great_expectations checkpoint new</green>` to configure a new checkpoint""",
)
except ge_exceptions.CheckpointError as e:
exit_with_failure_message_and_stats(context, usage_event, f"<red>{e}</red>")
def select_datasource(context: DataContext, datasource_name: str = None) -> Datasource:
"""Select a datasource interactively."""
# TODO consolidate all the myriad CLI tests into this
data_source = None
if datasource_name is None:
data_sources = sorted(context.list_datasources(), key=lambda x: x["name"])
if len(data_sources) == 0:
cli_message(
"<red>No datasources found in the context. To add a datasource, run `great_expectations datasource new`</red>"
)
elif len(data_sources) == 1:
datasource_name = data_sources[0]["name"]
else:
choices = "\n".join(
[
f" {i}. {data_source['name']}"
for i, data_source in enumerate(data_sources, 1)
]
)
option_selection = click.prompt(
f"Select a datasource\n{choices}\n",
type=click.Choice(
[str(i) for i, data_source in enumerate(data_sources, 1)]
),
show_choices=False,
)
datasource_name = data_sources[int(option_selection) - 1]["name"]
if datasource_name is not None:
data_source = context.get_datasource(datasource_name)
return data_source
def load_data_context_with_error_handling(
directory: str, from_cli_upgrade_command: bool = False
) -> DataContext:
"""Return a DataContext with good error handling and exit codes."""
try:
context: DataContext = DataContext(context_root_dir=directory)
ge_config_version: int = context.get_config().config_version
if (
from_cli_upgrade_command
and int(ge_config_version) < CURRENT_GE_CONFIG_VERSION
):
directory = directory or context.root_directory
(
increment_version,
exception_occurred,
) = upgrade_project_up_to_one_version_increment(
context_root_dir=directory,
ge_config_version=ge_config_version,
continuation_message=EXIT_UPGRADE_CONTINUATION_MESSAGE,
from_cli_upgrade_command=from_cli_upgrade_command,
)
if not exception_occurred and increment_version:
context = DataContext(context_root_dir=directory)
return context
except ge_exceptions.UnsupportedConfigVersionError as err:
directory = directory or DataContext.find_context_root_dir()
ge_config_version = DataContext.get_ge_config_version(
context_root_dir=directory
)
upgrade_helper_class = (
GE_UPGRADE_HELPER_VERSION_MAP.get(int(ge_config_version))
if ge_config_version
else None
)
if upgrade_helper_class and ge_config_version < CURRENT_GE_CONFIG_VERSION:
upgrade_project(
context_root_dir=directory,
ge_config_version=ge_config_version,
from_cli_upgrade_command=from_cli_upgrade_command,
)
else:
cli_message(f"<red>{err.message}</red>")
sys.exit(1)
except (
ge_exceptions.ConfigNotFoundError,
ge_exceptions.InvalidConfigError,
) as err:
cli_message(f"<red>{err.message}</red>")
sys.exit(1)
except ge_exceptions.PluginModuleNotFoundError as err:
cli_message(err.cli.v012_colored_message)
sys.exit(1)
except ge_exceptions.PluginClassNotFoundError as err:
cli_message(err.cli.v012_colored_message)
sys.exit(1)
except ge_exceptions.InvalidConfigurationYamlError as err:
cli_message(f"<red>{str(err)}</red>")
sys.exit(1)
def upgrade_project(
context_root_dir, ge_config_version, from_cli_upgrade_command=False
):
if from_cli_upgrade_command:
message = (
f"<red>\nYour project appears to have an out-of-date config version ({ge_config_version}) - "
f"the version "
f"number must be at least {CURRENT_GE_CONFIG_VERSION}.</red>"
)
else:
message = (
f"<red>\nYour project appears to have an out-of-date config version ({ge_config_version}) - "
f"the version "
f"number must be at least {CURRENT_GE_CONFIG_VERSION}.\nIn order to proceed, "
f"your project must be upgraded.</red>"
)
cli_message(message)
upgrade_prompt = (
"\nWould you like to run the Upgrade Helper to bring your project up-to-date?"
)
confirm_proceed_or_exit(
confirm_prompt=upgrade_prompt,
continuation_message=EXIT_UPGRADE_CONTINUATION_MESSAGE,
)
cli_message(SECTION_SEPARATOR)
# use loop in case multiple upgrades need to take place
while ge_config_version < CURRENT_GE_CONFIG_VERSION:
(
increment_version,
exception_occurred,
) = upgrade_project_up_to_one_version_increment(
context_root_dir=context_root_dir,
ge_config_version=ge_config_version,
continuation_message=EXIT_UPGRADE_CONTINUATION_MESSAGE,
from_cli_upgrade_command=from_cli_upgrade_command,
)
if exception_occurred or not increment_version:
break
ge_config_version += 1
cli_message(SECTION_SEPARATOR)
upgrade_success_message = "<green>Upgrade complete. Exiting...</green>\n"
upgrade_incomplete_message = f"""\
<red>The Upgrade Helper was unable to perform a complete project upgrade. Next steps:</red>
- Please perform any manual steps outlined in the Upgrade Overview and/or Upgrade Report above
- When complete, increment the config_version key in your <cyan>great_expectations.yml</cyan> to <cyan>{
ge_config_version + 1}</cyan>\n
To learn more about the upgrade process, visit \
<cyan>https://docs.greatexpectations.io/en/latest/how_to_guides/migrating_versions.html</cyan>
"""
if ge_config_version < CURRENT_GE_CONFIG_VERSION:
cli_message(upgrade_incomplete_message)
else:
cli_message(upgrade_success_message)
sys.exit(0)
def upgrade_project_up_to_one_version_increment(
context_root_dir: str,
ge_config_version: float,
continuation_message: str,
from_cli_upgrade_command: bool = False,
) -> [bool, bool]: # Returns increment_version, exception_occurred
upgrade_helper_class = GE_UPGRADE_HELPER_VERSION_MAP.get(int(ge_config_version))
if not upgrade_helper_class:
return False, False
target_ge_config_version = int(ge_config_version) + 1
# set version temporarily to CURRENT_GE_CONFIG_VERSION to get functional DataContext
DataContext.set_ge_config_version(
config_version=CURRENT_GE_CONFIG_VERSION,
context_root_dir=context_root_dir,
)
upgrade_helper = upgrade_helper_class(context_root_dir=context_root_dir)
upgrade_overview, confirmation_required = upgrade_helper.get_upgrade_overview()
if confirmation_required or from_cli_upgrade_command:
upgrade_confirmed = confirm_proceed_or_exit(
confirm_prompt=upgrade_overview,
continuation_message=continuation_message,
exit_on_no=False,
)
else:
upgrade_confirmed = True
if upgrade_confirmed:
cli_message("\nUpgrading project...")
cli_message(SECTION_SEPARATOR)
# run upgrade and get report of what was done, if version number should be incremented
(
upgrade_report,
increment_version,
exception_occurred,
) = upgrade_helper.upgrade_project()
# display report to user
cli_message(upgrade_report)
if exception_occurred:
# restore version number to current number
DataContext.set_ge_config_version(
ge_config_version, context_root_dir, validate_config_version=False
)
# display report to user
return False, True
# set config version to target version
if increment_version:
DataContext.set_ge_config_version(
target_ge_config_version,
context_root_dir,
validate_config_version=False,
)
return True, False
# restore version number to current number
DataContext.set_ge_config_version(
ge_config_version, context_root_dir, validate_config_version=False
)
return False, False
# restore version number to current number
DataContext.set_ge_config_version(
ge_config_version, context_root_dir, validate_config_version=False
)
cli_message(continuation_message)
sys.exit(0)
def confirm_proceed_or_exit(
confirm_prompt: str = "Would you like to proceed?",
continuation_message: str = "Ok, exiting now. You can always read more at https://docs.greatexpectations.io/ !",
exit_on_no: bool = True,
exit_code: int = 0,
) -> Optional[bool]:
"""
Every CLI command that starts a potentially lengthy (>1 sec) computation
or modifies some resources (e.g., edits the config file, adds objects
to the stores) must follow this pattern:
1. Explain which resources will be created/modified/deleted
2. Use this method to ask for user's confirmation
The goal of this standardization is for the users to expect consistency -
if you saw one command, you know what to expect from all others.
If the user does not confirm, the program should exit. The purpose of the exit_on_no parameter is to provide
the option to perform cleanup actions before exiting outside of the function.
"""
confirm_prompt_colorized = cli_colorize_string(confirm_prompt)
continuation_message_colorized = cli_colorize_string(continuation_message)
if not click.confirm(confirm_prompt_colorized, default=True):
if exit_on_no:
cli_message(continuation_message_colorized)
sys.exit(exit_code)
else:
return False
return True
|
subprocess.call(f"{jupyter_command_override} {notebook_path}", shell=True)
|
conditional_block
|
code_interpreter.py
|
#!/usr/bin/env python3
# -*- coding: utf-8; Mode: Python; indent-tabs-mode: nil; tab-width: 4 -*-
# vim: set fileencoding=utf-8 filetype=python syntax=python.doxygen fileformat=unix tabstop=4 expandtab :
# kate: encoding utf-8; bom off; syntax python; indent-mode python; eol unix; replace-tabs off; indent-width 4; tab-width 4; remove-trailing-space on;
"""@brief Interpret various computer languages using installed interpreters.
@file code_interpreter.py
@package pybooster.code_interpreter
@version 2019.07.14
@author Devyn Collier Johnson <[email protected]>
@copyright LGPLv3
@section LICENSE
GNU Lesser General Public License v3
Copyright (c) Devyn Collier Johnson, All rights reserved.
This software is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this software.
"""
from subprocess import getoutput # nosec
from sys import stdout
__all__: list = [
# CLISP #
r'execclispfile',
# COFFEESCRIPT #
r'execcoffeescript',
# JAVASCRIPT #
r'execjs',
r'execjsfile',
# LUA #
r'execlua',
r'execluafile',
# PERL #
r'execperl',
r'execperlfile',
r'initperl',
# PHP #
r'execphp',
r'execphpfile',
# RUBY #
r'execruby',
r'execrubyfile',
# SCALA #
r'execscala',
r'execscala',
# SHELL #
r'execsh',
r'execshfile',
r'initsh'
]
# CLISP #
def execclispfile(_filename: str) -> str:
"""Execute a CLisp file given as a str and return the output as a str."""
return getoutput(r'clisp ' + _filename)
# COFFEESCRIPT #
def execcoffeescript(_code: str) -> str:
"""Execute Coffeescript code given as a str and return the output as a str."""
return getoutput('coffeescript --eval \'' + _code.replace('\'', '\\\'') + '\'')
# JAVASCRIPT #
def execjs(_code: str) -> str:
"""Execute JavaScript code given as a str and return the output as a str."""
return getoutput('jsc -e \'' + _code.replace('\'', '\\\'') + '\'')
def execjsfile(_filename: str) -> str:
"""Execute a JavaScript file given as a str and return the output as a str."""
return getoutput(r'jsc -e ' + _filename)
# LUA #
def execlua(_code: str) -> str:
"""Execute Lua code given as a str and return the output as a str."""
return getoutput('lua -e \'' + _code.replace('\'', '\\\'') + '\'')
def execluafile(_filename: str) -> str:
"""Execute a Lua script given as a str and return the output as a str."""
return getoutput(r'lua ' + _filename)
# PERL #
def execperl(_code: str) -> str:
"""Execute Perl code given as a str and return the output as a str."""
return getoutput('perl -e \'' + _code.replace('\'', '\\\'') + '\'')
def execperlfile(_filename: str) -> str:
"""Execute a Perl script given as a str and return the output as a str."""
return getoutput(r'perl ' + _filename)
def initperl() -> None:
"""Run a Perl REP-Loop (Read-Evaluate-Print-Loop)."""
_input: str = r''
while 1:
_input = input(r'Perl > ').replace('\'', '\\\'') # nosec
if _input in {r'exit', r'quit'}:
break
stdout.write(getoutput('perl -e \'' + _input + '\'') + '\n')
# PHP #
def execphp(_code: str) -> str:
"""Execute PHP code given as a str and return the output as a str."""
return getoutput('php -r \'' + _code.replace('\'', '\\\'') + '\'')
def execphpfile(_filename: str) -> str:
"""Execute a PHP script given as a str and return the output as a str."""
return getoutput(r'php -f ' + _filename)
# RUBY #
def execruby(_code: str) -> str:
"""Execute Ruby code given as a str and return the output as a str."""
return getoutput('ruby -e \'' + _code.replace('\'', '\\\'') + '\'')
def execrubyfile(_filename: str) -> str:
"""Execute a Ruby script given as a str and return the output as a str."""
return getoutput(r'ruby ' + _filename)
# SCALA #
def execscala(_code: str) -> str:
"""Execute Scala code given as a str and return the output as a str."""
return getoutput('scala -e \'' + _code.replace('\'', '\\\'') + '\'')
def execscalafile(_filename: str) -> str:
"""Execute a Scala file given as a str and return the output as a str."""
return getoutput(r'scala ' + _filename)
# SHELL #
def execsh(_code: str) -> str:
"""Execute Shell code given as a str and return the output as a str."""
return getoutput('sh -c \'' + _code.replace('\'', '\\\'') + '\'')
def execshfile(_filename: str) -> str:
"""Execute a Shell script given as a str and return the output as a str."""
return getoutput(r'sh ' + _filename)
def initsh() -> None:
"""Run a shell REP-Loop (Read-Evaluate-Print-Loop)."""
_input: str = r''
while 1:
|
_input = input(r'Shell: $ ').replace('\'', '\\\'') # nosec
if _input in {r'exit', r'quit'}:
break
stdout.write(getoutput('sh -c \'' + _input + '\'') + '\n')
|
conditional_block
|
|
code_interpreter.py
|
#!/usr/bin/env python3
# -*- coding: utf-8; Mode: Python; indent-tabs-mode: nil; tab-width: 4 -*-
# vim: set fileencoding=utf-8 filetype=python syntax=python.doxygen fileformat=unix tabstop=4 expandtab :
# kate: encoding utf-8; bom off; syntax python; indent-mode python; eol unix; replace-tabs off; indent-width 4; tab-width 4; remove-trailing-space on;
"""@brief Interpret various computer languages using installed interpreters.
@file code_interpreter.py
@package pybooster.code_interpreter
@version 2019.07.14
@author Devyn Collier Johnson <[email protected]>
@copyright LGPLv3
@section LICENSE
GNU Lesser General Public License v3
Copyright (c) Devyn Collier Johnson, All rights reserved.
This software is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this software.
"""
from subprocess import getoutput # nosec
from sys import stdout
__all__: list = [
# CLISP #
r'execclispfile',
# COFFEESCRIPT #
r'execcoffeescript',
# JAVASCRIPT #
r'execjs',
r'execjsfile',
# LUA #
r'execlua',
r'execluafile',
# PERL #
r'execperl',
r'execperlfile',
r'initperl',
# PHP #
r'execphp',
r'execphpfile',
# RUBY #
r'execruby',
r'execrubyfile',
# SCALA #
r'execscala',
r'execscala',
# SHELL #
r'execsh',
r'execshfile',
r'initsh'
]
# CLISP #
def execclispfile(_filename: str) -> str:
"""Execute a CLisp file given as a str and return the output as a str."""
return getoutput(r'clisp ' + _filename)
# COFFEESCRIPT #
def execcoffeescript(_code: str) -> str:
"""Execute Coffeescript code given as a str and return the output as a str."""
return getoutput('coffeescript --eval \'' + _code.replace('\'', '\\\'') + '\'')
# JAVASCRIPT #
def execjs(_code: str) -> str:
"""Execute JavaScript code given as a str and return the output as a str."""
return getoutput('jsc -e \'' + _code.replace('\'', '\\\'') + '\'')
def execjsfile(_filename: str) -> str:
"""Execute a JavaScript file given as a str and return the output as a str."""
return getoutput(r'jsc -e ' + _filename)
# LUA #
def execlua(_code: str) -> str:
"""Execute Lua code given as a str and return the output as a str."""
return getoutput('lua -e \'' + _code.replace('\'', '\\\'') + '\'')
def execluafile(_filename: str) -> str:
"""Execute a Lua script given as a str and return the output as a str."""
return getoutput(r'lua ' + _filename)
# PERL #
|
"""Execute Perl code given as a str and return the output as a str."""
return getoutput('perl -e \'' + _code.replace('\'', '\\\'') + '\'')
def execperlfile(_filename: str) -> str:
"""Execute a Perl script given as a str and return the output as a str."""
return getoutput(r'perl ' + _filename)
def initperl() -> None:
"""Run a Perl REP-Loop (Read-Evaluate-Print-Loop)."""
_input: str = r''
while 1:
_input = input(r'Perl > ').replace('\'', '\\\'') # nosec
if _input in {r'exit', r'quit'}:
break
stdout.write(getoutput('perl -e \'' + _input + '\'') + '\n')
# PHP #
def execphp(_code: str) -> str:
"""Execute PHP code given as a str and return the output as a str."""
return getoutput('php -r \'' + _code.replace('\'', '\\\'') + '\'')
def execphpfile(_filename: str) -> str:
"""Execute a PHP script given as a str and return the output as a str."""
return getoutput(r'php -f ' + _filename)
# RUBY #
def execruby(_code: str) -> str:
"""Execute Ruby code given as a str and return the output as a str."""
return getoutput('ruby -e \'' + _code.replace('\'', '\\\'') + '\'')
def execrubyfile(_filename: str) -> str:
"""Execute a Ruby script given as a str and return the output as a str."""
return getoutput(r'ruby ' + _filename)
# SCALA #
def execscala(_code: str) -> str:
"""Execute Scala code given as a str and return the output as a str."""
return getoutput('scala -e \'' + _code.replace('\'', '\\\'') + '\'')
def execscalafile(_filename: str) -> str:
"""Execute a Scala file given as a str and return the output as a str."""
return getoutput(r'scala ' + _filename)
# SHELL #
def execsh(_code: str) -> str:
"""Execute Shell code given as a str and return the output as a str."""
return getoutput('sh -c \'' + _code.replace('\'', '\\\'') + '\'')
def execshfile(_filename: str) -> str:
"""Execute a Shell script given as a str and return the output as a str."""
return getoutput(r'sh ' + _filename)
def initsh() -> None:
"""Run a shell REP-Loop (Read-Evaluate-Print-Loop)."""
_input: str = r''
while 1:
_input = input(r'Shell: $ ').replace('\'', '\\\'') # nosec
if _input in {r'exit', r'quit'}:
break
stdout.write(getoutput('sh -c \'' + _input + '\'') + '\n')
|
def execperl(_code: str) -> str:
|
random_line_split
|
code_interpreter.py
|
#!/usr/bin/env python3
# -*- coding: utf-8; Mode: Python; indent-tabs-mode: nil; tab-width: 4 -*-
# vim: set fileencoding=utf-8 filetype=python syntax=python.doxygen fileformat=unix tabstop=4 expandtab :
# kate: encoding utf-8; bom off; syntax python; indent-mode python; eol unix; replace-tabs off; indent-width 4; tab-width 4; remove-trailing-space on;
"""@brief Interpret various computer languages using installed interpreters.
@file code_interpreter.py
@package pybooster.code_interpreter
@version 2019.07.14
@author Devyn Collier Johnson <[email protected]>
@copyright LGPLv3
@section LICENSE
GNU Lesser General Public License v3
Copyright (c) Devyn Collier Johnson, All rights reserved.
This software is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this software.
"""
from subprocess import getoutput # nosec
from sys import stdout
__all__: list = [
# CLISP #
r'execclispfile',
# COFFEESCRIPT #
r'execcoffeescript',
# JAVASCRIPT #
r'execjs',
r'execjsfile',
# LUA #
r'execlua',
r'execluafile',
# PERL #
r'execperl',
r'execperlfile',
r'initperl',
# PHP #
r'execphp',
r'execphpfile',
# RUBY #
r'execruby',
r'execrubyfile',
# SCALA #
r'execscala',
r'execscala',
# SHELL #
r'execsh',
r'execshfile',
r'initsh'
]
# CLISP #
def execclispfile(_filename: str) -> str:
"""Execute a CLisp file given as a str and return the output as a str."""
return getoutput(r'clisp ' + _filename)
# COFFEESCRIPT #
def execcoffeescript(_code: str) -> str:
"""Execute Coffeescript code given as a str and return the output as a str."""
return getoutput('coffeescript --eval \'' + _code.replace('\'', '\\\'') + '\'')
# JAVASCRIPT #
def execjs(_code: str) -> str:
"""Execute JavaScript code given as a str and return the output as a str."""
return getoutput('jsc -e \'' + _code.replace('\'', '\\\'') + '\'')
def execjsfile(_filename: str) -> str:
"""Execute a JavaScript file given as a str and return the output as a str."""
return getoutput(r'jsc -e ' + _filename)
# LUA #
def
|
(_code: str) -> str:
"""Execute Lua code given as a str and return the output as a str."""
return getoutput('lua -e \'' + _code.replace('\'', '\\\'') + '\'')
def execluafile(_filename: str) -> str:
"""Execute a Lua script given as a str and return the output as a str."""
return getoutput(r'lua ' + _filename)
# PERL #
def execperl(_code: str) -> str:
"""Execute Perl code given as a str and return the output as a str."""
return getoutput('perl -e \'' + _code.replace('\'', '\\\'') + '\'')
def execperlfile(_filename: str) -> str:
"""Execute a Perl script given as a str and return the output as a str."""
return getoutput(r'perl ' + _filename)
def initperl() -> None:
"""Run a Perl REP-Loop (Read-Evaluate-Print-Loop)."""
_input: str = r''
while 1:
_input = input(r'Perl > ').replace('\'', '\\\'') # nosec
if _input in {r'exit', r'quit'}:
break
stdout.write(getoutput('perl -e \'' + _input + '\'') + '\n')
# PHP #
def execphp(_code: str) -> str:
"""Execute PHP code given as a str and return the output as a str."""
return getoutput('php -r \'' + _code.replace('\'', '\\\'') + '\'')
def execphpfile(_filename: str) -> str:
"""Execute a PHP script given as a str and return the output as a str."""
return getoutput(r'php -f ' + _filename)
# RUBY #
def execruby(_code: str) -> str:
"""Execute Ruby code given as a str and return the output as a str."""
return getoutput('ruby -e \'' + _code.replace('\'', '\\\'') + '\'')
def execrubyfile(_filename: str) -> str:
"""Execute a Ruby script given as a str and return the output as a str."""
return getoutput(r'ruby ' + _filename)
# SCALA #
def execscala(_code: str) -> str:
"""Execute Scala code given as a str and return the output as a str."""
return getoutput('scala -e \'' + _code.replace('\'', '\\\'') + '\'')
def execscalafile(_filename: str) -> str:
"""Execute a Scala file given as a str and return the output as a str."""
return getoutput(r'scala ' + _filename)
# SHELL #
def execsh(_code: str) -> str:
"""Execute Shell code given as a str and return the output as a str."""
return getoutput('sh -c \'' + _code.replace('\'', '\\\'') + '\'')
def execshfile(_filename: str) -> str:
"""Execute a Shell script given as a str and return the output as a str."""
return getoutput(r'sh ' + _filename)
def initsh() -> None:
"""Run a shell REP-Loop (Read-Evaluate-Print-Loop)."""
_input: str = r''
while 1:
_input = input(r'Shell: $ ').replace('\'', '\\\'') # nosec
if _input in {r'exit', r'quit'}:
break
stdout.write(getoutput('sh -c \'' + _input + '\'') + '\n')
|
execlua
|
identifier_name
|
code_interpreter.py
|
#!/usr/bin/env python3
# -*- coding: utf-8; Mode: Python; indent-tabs-mode: nil; tab-width: 4 -*-
# vim: set fileencoding=utf-8 filetype=python syntax=python.doxygen fileformat=unix tabstop=4 expandtab :
# kate: encoding utf-8; bom off; syntax python; indent-mode python; eol unix; replace-tabs off; indent-width 4; tab-width 4; remove-trailing-space on;
"""@brief Interpret various computer languages using installed interpreters.
@file code_interpreter.py
@package pybooster.code_interpreter
@version 2019.07.14
@author Devyn Collier Johnson <[email protected]>
@copyright LGPLv3
@section LICENSE
GNU Lesser General Public License v3
Copyright (c) Devyn Collier Johnson, All rights reserved.
This software is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this software.
"""
from subprocess import getoutput # nosec
from sys import stdout
__all__: list = [
# CLISP #
r'execclispfile',
# COFFEESCRIPT #
r'execcoffeescript',
# JAVASCRIPT #
r'execjs',
r'execjsfile',
# LUA #
r'execlua',
r'execluafile',
# PERL #
r'execperl',
r'execperlfile',
r'initperl',
# PHP #
r'execphp',
r'execphpfile',
# RUBY #
r'execruby',
r'execrubyfile',
# SCALA #
r'execscala',
r'execscala',
# SHELL #
r'execsh',
r'execshfile',
r'initsh'
]
# CLISP #
def execclispfile(_filename: str) -> str:
"""Execute a CLisp file given as a str and return the output as a str."""
return getoutput(r'clisp ' + _filename)
# COFFEESCRIPT #
def execcoffeescript(_code: str) -> str:
"""Execute Coffeescript code given as a str and return the output as a str."""
return getoutput('coffeescript --eval \'' + _code.replace('\'', '\\\'') + '\'')
# JAVASCRIPT #
def execjs(_code: str) -> str:
"""Execute JavaScript code given as a str and return the output as a str."""
return getoutput('jsc -e \'' + _code.replace('\'', '\\\'') + '\'')
def execjsfile(_filename: str) -> str:
"""Execute a JavaScript file given as a str and return the output as a str."""
return getoutput(r'jsc -e ' + _filename)
# LUA #
def execlua(_code: str) -> str:
"""Execute Lua code given as a str and return the output as a str."""
return getoutput('lua -e \'' + _code.replace('\'', '\\\'') + '\'')
def execluafile(_filename: str) -> str:
"""Execute a Lua script given as a str and return the output as a str."""
return getoutput(r'lua ' + _filename)
# PERL #
def execperl(_code: str) -> str:
"""Execute Perl code given as a str and return the output as a str."""
return getoutput('perl -e \'' + _code.replace('\'', '\\\'') + '\'')
def execperlfile(_filename: str) -> str:
"""Execute a Perl script given as a str and return the output as a str."""
return getoutput(r'perl ' + _filename)
def initperl() -> None:
"""Run a Perl REP-Loop (Read-Evaluate-Print-Loop)."""
_input: str = r''
while 1:
_input = input(r'Perl > ').replace('\'', '\\\'') # nosec
if _input in {r'exit', r'quit'}:
break
stdout.write(getoutput('perl -e \'' + _input + '\'') + '\n')
# PHP #
def execphp(_code: str) -> str:
"""Execute PHP code given as a str and return the output as a str."""
return getoutput('php -r \'' + _code.replace('\'', '\\\'') + '\'')
def execphpfile(_filename: str) -> str:
"""Execute a PHP script given as a str and return the output as a str."""
return getoutput(r'php -f ' + _filename)
# RUBY #
def execruby(_code: str) -> str:
"""Execute Ruby code given as a str and return the output as a str."""
return getoutput('ruby -e \'' + _code.replace('\'', '\\\'') + '\'')
def execrubyfile(_filename: str) -> str:
"""Execute a Ruby script given as a str and return the output as a str."""
return getoutput(r'ruby ' + _filename)
# SCALA #
def execscala(_code: str) -> str:
"""Execute Scala code given as a str and return the output as a str."""
return getoutput('scala -e \'' + _code.replace('\'', '\\\'') + '\'')
def execscalafile(_filename: str) -> str:
|
# SHELL #
def execsh(_code: str) -> str:
"""Execute Shell code given as a str and return the output as a str."""
return getoutput('sh -c \'' + _code.replace('\'', '\\\'') + '\'')
def execshfile(_filename: str) -> str:
"""Execute a Shell script given as a str and return the output as a str."""
return getoutput(r'sh ' + _filename)
def initsh() -> None:
"""Run a shell REP-Loop (Read-Evaluate-Print-Loop)."""
_input: str = r''
while 1:
_input = input(r'Shell: $ ').replace('\'', '\\\'') # nosec
if _input in {r'exit', r'quit'}:
break
stdout.write(getoutput('sh -c \'' + _input + '\'') + '\n')
|
"""Execute a Scala file given as a str and return the output as a str."""
return getoutput(r'scala ' + _filename)
|
identifier_body
|
dev-data.js
|
// For development only!!
// Add sample data to window object that used for initializing appendGrid.
window.myAppendGridInitData = [{
"uid": "d4c74a61-a24e-429f-9db0-3cf3aaa22425",
"name": "Monique Zebedee",
"company": "Welch LLC",
"country": "Japan",
"memberSince": "2012-02-18",
"orderPlaced": 111,
"level": "Bronze",
"isNPO": true
}, {
"uid": "afdf285d-da5c-4fa8-9225-201c858a173d",
"name": "Daryle McLaren",
"company": "Bogisich Group",
"country": "United States",
"memberSince": "2016-10-08",
"orderPlaced": 261,
"level": "Diamond",
"isNPO": false
}, {
"uid": "202a8afb-130b-476b-b415-c659f21a73e7",
"name": "Glori Spellecy",
"company": "Grady and Sons",
"country": "Germany",
"memberSince": "2014-07-28",
"orderPlaced": 282,
"level": "Gold",
"isNPO": false
}, {
"uid": "08c9adee-abdd-43d5-866d-ce540be19be8",
"name": "Blondy Boggis",
"company": "Eichmann, Parker and Herzog",
"country": "Malaysia",
"memberSince": "2010-08-17",
"orderPlaced": 308,
"level": "Platinum",
"isNPO": true
}, {
"uid": "57644023-cd0c-47ec-a556-fd8d4e21a4e7",
"name": "Batholomew Zecchii",
"company": "Corwin-Fahey",
"country": "Malaysia",
"memberSince": "2016-09-20",
"orderPlaced": 881,
"level": "Gold",
|
"isNPO": true
}, {
"uid": "38e08e8a-c7eb-41eb-9191-6bb2df1fd39b",
"name": "Paulie Poel",
"company": "MacGyver, Rohan and West",
"country": "United Kingdom",
"memberSince": "2016-12-26",
"orderPlaced": 387,
"level": "Silver",
"isNPO": false
}, {
"uid": "d7bf56d4-f955-4dca-b3db-b30eab590028",
"name": "Jessica Levett",
"company": "Lind, O'Kon and Hamill",
"country": "United States",
"memberSince": "2015-04-26",
"orderPlaced": 984,
"level": "Gold",
"isNPO": false
}, {
"uid": "b9075764-5228-4ca7-9435-7c362ce097e5",
"name": "Fonsie Spring",
"company": "McKenzie, Block and Wiegand",
"country": "Japan",
"memberSince": "2013-11-08",
"orderPlaced": 875,
"level": "Silver",
"isNPO": false
}];
|
random_line_split
|
|
mod.rs
|
//! A 'simple' ARM emulator.
//!
//! At the moment the emulator only has support for a handful of THUMB-2 instructions
/// and no ARM-mode support.
use super::*;
pub use self::memory_tree::MemoryTree;
pub use self::ram::RAM;
pub use self::emu::SimpleEmulator;
pub use self::system::SimpleSystem;
pub mod memory_tree;
pub mod ram;
pub mod emu;
pub mod system;
/// Copy as much memory as possible from `src` to `dest`.
pub fn copy_memory(src: &[u8], dest: &mut [u8]) {
for x in 0.. {
if (x >= src.len()) || (x >= dest.len()) {
break
}
dest[x] = src[x]
}
}
fn swap_word(src: Word) -> Word {
let src = src as u32;
let src = (src >> 24)
| ((src >> 8) & 0xff00)
| ((src << 8) & 0xff0000)
| ((src << 24) & 0xff000000);
src as Word
}
fn
|
(a: Word, b: Word, c: Word) -> (Word, bool, bool) {
let sa = a as i64;
let sb = b as i64;
let sc = c as i64;
let ua = (a as u32) as u64;
let ub = (b as u32) as u64;
let uc = (c as u32) as u64;
let us = ua.wrapping_add(ub).wrapping_add(uc);
let ss = sa.wrapping_add(sb).wrapping_add(sc);
let result = us as u32;
(result as i32,
(result as u64) != us,
((result as i32) as i64) != ss)
}
pub trait Memory {
fn read(&self, _addr: u64, _dest: &mut [u8]) -> Result<()> { Err(Error::Unknown(format!("not implemented"))) }
fn write(&self, _addr: u64, _src: &[u8]) -> Result<()> { Err(Error::Unknown(format!("not implemented"))) }
fn read_u8(&self, addr: u64) -> Result<u8> {
let mut data = [0u8];
try!(self.read(addr, &mut data));
Ok(data[0])
}
fn read_u16(&self, addr: u64) -> Result<u16> {
let mut data = [0u8;2];
try!(self.read(addr, &mut data));
Ok((data[0] as u16) | ((data[1] as u16) << 8))
}
fn read_u32(&self, addr: u64) -> Result<u32> {
let mut data = [0u8;4];
try!(self.read(addr, &mut data));
Ok((data[0] as u32)
| ((data[1] as u32) << 8)
| ((data[2] as u32) << 16)
| ((data[3] as u32) << 24))
}
fn write_u8(&self, addr: u64, val: u8) -> Result<()> {
self.write(addr, &[val])
}
fn write_u16(&self, addr: u64, val: u16) -> Result<()> {
self.write(addr, &[(val & 0xff) as u8,
((val >> 8) & 0xff) as u8])
}
fn write_u32(&self, addr: u64, val: u32) -> Result<()> {
self.write(addr, &[(val & 0xff) as u8,
((val >> 8) & 0xff) as u8,
((val >> 16) & 0xff) as u8,
((val >> 24) & 0xff) as u8])
}
}
pub trait System {
type Memory: Memory;
fn memory(&self) -> &Self::Memory;
}
|
adc32
|
identifier_name
|
mod.rs
|
//! A 'simple' ARM emulator.
//!
//! At the moment the emulator only has support for a handful of THUMB-2 instructions
/// and no ARM-mode support.
use super::*;
pub use self::memory_tree::MemoryTree;
pub use self::ram::RAM;
pub use self::emu::SimpleEmulator;
pub use self::system::SimpleSystem;
pub mod memory_tree;
pub mod ram;
pub mod emu;
pub mod system;
/// Copy as much memory as possible from `src` to `dest`.
pub fn copy_memory(src: &[u8], dest: &mut [u8]) {
for x in 0.. {
if (x >= src.len()) || (x >= dest.len()) {
break
}
dest[x] = src[x]
}
}
fn swap_word(src: Word) -> Word {
let src = src as u32;
let src = (src >> 24)
| ((src >> 8) & 0xff00)
| ((src << 8) & 0xff0000)
| ((src << 24) & 0xff000000);
src as Word
}
fn adc32(a: Word, b: Word, c: Word) -> (Word, bool, bool) {
let sa = a as i64;
let sb = b as i64;
let sc = c as i64;
let ua = (a as u32) as u64;
let ub = (b as u32) as u64;
let uc = (c as u32) as u64;
let us = ua.wrapping_add(ub).wrapping_add(uc);
let ss = sa.wrapping_add(sb).wrapping_add(sc);
let result = us as u32;
(result as i32,
(result as u64) != us,
((result as i32) as i64) != ss)
}
pub trait Memory {
fn read(&self, _addr: u64, _dest: &mut [u8]) -> Result<()> { Err(Error::Unknown(format!("not implemented"))) }
fn write(&self, _addr: u64, _src: &[u8]) -> Result<()> { Err(Error::Unknown(format!("not implemented"))) }
fn read_u8(&self, addr: u64) -> Result<u8> {
let mut data = [0u8];
try!(self.read(addr, &mut data));
Ok(data[0])
}
fn read_u16(&self, addr: u64) -> Result<u16> {
let mut data = [0u8;2];
try!(self.read(addr, &mut data));
Ok((data[0] as u16) | ((data[1] as u16) << 8))
}
fn read_u32(&self, addr: u64) -> Result<u32> {
let mut data = [0u8;4];
try!(self.read(addr, &mut data));
Ok((data[0] as u32)
| ((data[1] as u32) << 8)
| ((data[2] as u32) << 16)
| ((data[3] as u32) << 24))
}
fn write_u8(&self, addr: u64, val: u8) -> Result<()> {
self.write(addr, &[val])
}
fn write_u16(&self, addr: u64, val: u16) -> Result<()>
|
fn write_u32(&self, addr: u64, val: u32) -> Result<()> {
self.write(addr, &[(val & 0xff) as u8,
((val >> 8) & 0xff) as u8,
((val >> 16) & 0xff) as u8,
((val >> 24) & 0xff) as u8])
}
}
pub trait System {
type Memory: Memory;
fn memory(&self) -> &Self::Memory;
}
|
{
self.write(addr, &[(val & 0xff) as u8,
((val >> 8) & 0xff) as u8])
}
|
identifier_body
|
mod.rs
|
//! A 'simple' ARM emulator.
//!
//! At the moment the emulator only has support for a handful of THUMB-2 instructions
/// and no ARM-mode support.
use super::*;
pub use self::memory_tree::MemoryTree;
pub use self::ram::RAM;
pub use self::emu::SimpleEmulator;
pub use self::system::SimpleSystem;
pub mod memory_tree;
pub mod ram;
pub mod emu;
pub mod system;
/// Copy as much memory as possible from `src` to `dest`.
pub fn copy_memory(src: &[u8], dest: &mut [u8]) {
for x in 0.. {
if (x >= src.len()) || (x >= dest.len())
|
dest[x] = src[x]
}
}
fn swap_word(src: Word) -> Word {
let src = src as u32;
let src = (src >> 24)
| ((src >> 8) & 0xff00)
| ((src << 8) & 0xff0000)
| ((src << 24) & 0xff000000);
src as Word
}
fn adc32(a: Word, b: Word, c: Word) -> (Word, bool, bool) {
let sa = a as i64;
let sb = b as i64;
let sc = c as i64;
let ua = (a as u32) as u64;
let ub = (b as u32) as u64;
let uc = (c as u32) as u64;
let us = ua.wrapping_add(ub).wrapping_add(uc);
let ss = sa.wrapping_add(sb).wrapping_add(sc);
let result = us as u32;
(result as i32,
(result as u64) != us,
((result as i32) as i64) != ss)
}
pub trait Memory {
fn read(&self, _addr: u64, _dest: &mut [u8]) -> Result<()> { Err(Error::Unknown(format!("not implemented"))) }
fn write(&self, _addr: u64, _src: &[u8]) -> Result<()> { Err(Error::Unknown(format!("not implemented"))) }
fn read_u8(&self, addr: u64) -> Result<u8> {
let mut data = [0u8];
try!(self.read(addr, &mut data));
Ok(data[0])
}
fn read_u16(&self, addr: u64) -> Result<u16> {
let mut data = [0u8;2];
try!(self.read(addr, &mut data));
Ok((data[0] as u16) | ((data[1] as u16) << 8))
}
fn read_u32(&self, addr: u64) -> Result<u32> {
let mut data = [0u8;4];
try!(self.read(addr, &mut data));
Ok((data[0] as u32)
| ((data[1] as u32) << 8)
| ((data[2] as u32) << 16)
| ((data[3] as u32) << 24))
}
fn write_u8(&self, addr: u64, val: u8) -> Result<()> {
self.write(addr, &[val])
}
fn write_u16(&self, addr: u64, val: u16) -> Result<()> {
self.write(addr, &[(val & 0xff) as u8,
((val >> 8) & 0xff) as u8])
}
fn write_u32(&self, addr: u64, val: u32) -> Result<()> {
self.write(addr, &[(val & 0xff) as u8,
((val >> 8) & 0xff) as u8,
((val >> 16) & 0xff) as u8,
((val >> 24) & 0xff) as u8])
}
}
pub trait System {
type Memory: Memory;
fn memory(&self) -> &Self::Memory;
}
|
{
break
}
|
conditional_block
|
mod.rs
|
//! A 'simple' ARM emulator.
//!
//! At the moment the emulator only has support for a handful of THUMB-2 instructions
/// and no ARM-mode support.
use super::*;
pub use self::memory_tree::MemoryTree;
pub use self::ram::RAM;
pub use self::emu::SimpleEmulator;
pub use self::system::SimpleSystem;
pub mod memory_tree;
pub mod ram;
pub mod emu;
pub mod system;
/// Copy as much memory as possible from `src` to `dest`.
pub fn copy_memory(src: &[u8], dest: &mut [u8]) {
for x in 0.. {
if (x >= src.len()) || (x >= dest.len()) {
break
}
dest[x] = src[x]
}
}
fn swap_word(src: Word) -> Word {
let src = src as u32;
let src = (src >> 24)
| ((src >> 8) & 0xff00)
| ((src << 8) & 0xff0000)
| ((src << 24) & 0xff000000);
src as Word
}
fn adc32(a: Word, b: Word, c: Word) -> (Word, bool, bool) {
let sa = a as i64;
let sb = b as i64;
let sc = c as i64;
let ua = (a as u32) as u64;
let ub = (b as u32) as u64;
let uc = (c as u32) as u64;
let us = ua.wrapping_add(ub).wrapping_add(uc);
let ss = sa.wrapping_add(sb).wrapping_add(sc);
let result = us as u32;
(result as i32,
(result as u64) != us,
((result as i32) as i64) != ss)
}
pub trait Memory {
fn read(&self, _addr: u64, _dest: &mut [u8]) -> Result<()> { Err(Error::Unknown(format!("not implemented"))) }
fn write(&self, _addr: u64, _src: &[u8]) -> Result<()> { Err(Error::Unknown(format!("not implemented"))) }
fn read_u8(&self, addr: u64) -> Result<u8> {
let mut data = [0u8];
try!(self.read(addr, &mut data));
Ok(data[0])
}
fn read_u16(&self, addr: u64) -> Result<u16> {
|
try!(self.read(addr, &mut data));
Ok((data[0] as u16) | ((data[1] as u16) << 8))
}
fn read_u32(&self, addr: u64) -> Result<u32> {
let mut data = [0u8;4];
try!(self.read(addr, &mut data));
Ok((data[0] as u32)
| ((data[1] as u32) << 8)
| ((data[2] as u32) << 16)
| ((data[3] as u32) << 24))
}
fn write_u8(&self, addr: u64, val: u8) -> Result<()> {
self.write(addr, &[val])
}
fn write_u16(&self, addr: u64, val: u16) -> Result<()> {
self.write(addr, &[(val & 0xff) as u8,
((val >> 8) & 0xff) as u8])
}
fn write_u32(&self, addr: u64, val: u32) -> Result<()> {
self.write(addr, &[(val & 0xff) as u8,
((val >> 8) & 0xff) as u8,
((val >> 16) & 0xff) as u8,
((val >> 24) & 0xff) as u8])
}
}
pub trait System {
type Memory: Memory;
fn memory(&self) -> &Self::Memory;
}
|
let mut data = [0u8;2];
|
random_line_split
|
result_try.rs
|
use std::io::prelude::*;
use std::fs::File;
type Result<T> = std::result::Result<T, String>;
// Setup to make this work. Create two files with some info. Ignore the
// return values because we don't care about them here.
fn setup() {
File::create("a")
.and_then(|mut file| file.write_all(b"grape"))
.unwrap();
File::create("b")
.and_then(|mut file| file.write_all(b"fruit"))
.unwrap();
}
// Get the data from each file with the data stored in a `Result`.
fn get_data(path: &str) -> Result<String> {
File::open(path)
.map_err(|err| err.to_string())
.and_then(|mut file| {
let mut contents = String::new();
// Read the data into `contents`.
file.read_to_string(&mut contents)
.map_err(|err| err.to_string())
// Ignore the output `read_to_string` returns and return `contents`.
|
// Concat the contents of the two files together into a new `Result`.
fn concat(a: &str, b: &str) -> Result<String> {
let (data_a, data_b) = (get_data(a), get_data(b));
data_a.and_then(|a|
// Return `Ok` when both `a` and `b` are `Ok`. Otherwise return
// whichever has the first `Err`.
data_b.and_then(|b| Ok(a + &b))
)
}
fn main() {
setup();
match concat("a", "b") {
Ok(n) => println!("{}", n),
Err(e) => println!("Error: {}", e),
}
}
|
.map(|_| contents)
})
}
|
random_line_split
|
result_try.rs
|
use std::io::prelude::*;
use std::fs::File;
type Result<T> = std::result::Result<T, String>;
// Setup to make this work. Create two files with some info. Ignore the
// return values because we don't care about them here.
fn setup() {
File::create("a")
.and_then(|mut file| file.write_all(b"grape"))
.unwrap();
File::create("b")
.and_then(|mut file| file.write_all(b"fruit"))
.unwrap();
}
// Get the data from each file with the data stored in a `Result`.
fn get_data(path: &str) -> Result<String> {
File::open(path)
.map_err(|err| err.to_string())
.and_then(|mut file| {
let mut contents = String::new();
// Read the data into `contents`.
file.read_to_string(&mut contents)
.map_err(|err| err.to_string())
// Ignore the output `read_to_string` returns and return `contents`.
.map(|_| contents)
})
}
// Concat the contents of the two files together into a new `Result`.
fn concat(a: &str, b: &str) -> Result<String> {
let (data_a, data_b) = (get_data(a), get_data(b));
data_a.and_then(|a|
// Return `Ok` when both `a` and `b` are `Ok`. Otherwise return
// whichever has the first `Err`.
data_b.and_then(|b| Ok(a + &b))
)
}
fn main()
|
{
setup();
match concat("a", "b") {
Ok(n) => println!("{}", n),
Err(e) => println!("Error: {}", e),
}
}
|
identifier_body
|
|
result_try.rs
|
use std::io::prelude::*;
use std::fs::File;
type Result<T> = std::result::Result<T, String>;
// Setup to make this work. Create two files with some info. Ignore the
// return values because we don't care about them here.
fn setup() {
File::create("a")
.and_then(|mut file| file.write_all(b"grape"))
.unwrap();
File::create("b")
.and_then(|mut file| file.write_all(b"fruit"))
.unwrap();
}
// Get the data from each file with the data stored in a `Result`.
fn get_data(path: &str) -> Result<String> {
File::open(path)
.map_err(|err| err.to_string())
.and_then(|mut file| {
let mut contents = String::new();
// Read the data into `contents`.
file.read_to_string(&mut contents)
.map_err(|err| err.to_string())
// Ignore the output `read_to_string` returns and return `contents`.
.map(|_| contents)
})
}
// Concat the contents of the two files together into a new `Result`.
fn concat(a: &str, b: &str) -> Result<String> {
let (data_a, data_b) = (get_data(a), get_data(b));
data_a.and_then(|a|
// Return `Ok` when both `a` and `b` are `Ok`. Otherwise return
// whichever has the first `Err`.
data_b.and_then(|b| Ok(a + &b))
)
}
fn
|
() {
setup();
match concat("a", "b") {
Ok(n) => println!("{}", n),
Err(e) => println!("Error: {}", e),
}
}
|
main
|
identifier_name
|
logger.service.spec.ts
|
import "test-setup";
import { consts } from "../core.consts";
import { LoggerService } from "./logger.service";
import { LogType } from "./logger.model";
beforeEach(JasminePromiseMatchers.install);
afterEach(JasminePromiseMatchers.uninstall);
describe("LoggerServiceSpecs", () => {
let SUT: LoggerService;
let $log: ng.ILogService;
beforeEach(angular.mock.module(consts.moduleName));
beforeEach(inject((
_loggerService_: LoggerService,
_$log_: ng.ILogService
) => {
SUT = _loggerService_;
$log = _$log_;
}));
|
it("should be sucessful", () => {
expect(1 + 1).toBe(2);
});
});
describe("given logType warn", () => {
it("should invoke warn method", () => {
spyOn($log, "warn");
SUT.log(LogType.Warning, "yo querro");
expect($log.warn).toHaveBeenCalled();
});
});
describe("given logType error", () => {
it("should invoke error method", () => {
spyOn($log, "error");
SUT.log(LogType.Error, "yo querro");
expect($log.error).toHaveBeenCalled();
});
});
});
|
describe("given a simple task", () => {
it("should fail", () => {
expect(1 + 1).toBe(3);
});
|
random_line_split
|
ThemesDialog.ts
|
namespace phasereditor2d.ide.ui.dialogs {
import controls = colibri.ui.controls;
export class ThemesDialog extends controls.dialogs.ViewerDialog {
constructor() {
super(new ThemeViewer());
this.setSize(200, 300);
}
|
this.addButton("Close", () => this.close());
}
}
class ThemeViewer extends controls.viewers.TreeViewer {
constructor() {
super("ThemeViewer");
this.setLabelProvider(new ThemeLabelProvider());
this.setContentProvider(new controls.viewers.ArrayTreeContentProvider());
this.setCellRendererProvider(
new controls.viewers.EmptyCellRendererProvider(
e => new controls.viewers.IconImageCellRenderer(
IDEPlugin.getInstance().getIcon(ICON_THEME)
)
)
);
this.setInput(
colibri.Platform
.getExtensions<colibri.ui.ide.themes.ThemeExtension>(colibri.ui.ide.themes.ThemeExtension.POINT_ID)
.map(ext => ext.getTheme())
.sort((a, b) => a.displayName.localeCompare(b.displayName))
);
}
}
class ThemeLabelProvider extends controls.viewers.LabelProvider {
getLabel(theme: controls.Theme) {
return theme.displayName;
}
}
}
|
create() {
super.create();
this.setTitle("Themes");
|
random_line_split
|
ThemesDialog.ts
|
namespace phasereditor2d.ide.ui.dialogs {
import controls = colibri.ui.controls;
export class ThemesDialog extends controls.dialogs.ViewerDialog {
|
() {
super(new ThemeViewer());
this.setSize(200, 300);
}
create() {
super.create();
this.setTitle("Themes");
this.addButton("Close", () => this.close());
}
}
class ThemeViewer extends controls.viewers.TreeViewer {
constructor() {
super("ThemeViewer");
this.setLabelProvider(new ThemeLabelProvider());
this.setContentProvider(new controls.viewers.ArrayTreeContentProvider());
this.setCellRendererProvider(
new controls.viewers.EmptyCellRendererProvider(
e => new controls.viewers.IconImageCellRenderer(
IDEPlugin.getInstance().getIcon(ICON_THEME)
)
)
);
this.setInput(
colibri.Platform
.getExtensions<colibri.ui.ide.themes.ThemeExtension>(colibri.ui.ide.themes.ThemeExtension.POINT_ID)
.map(ext => ext.getTheme())
.sort((a, b) => a.displayName.localeCompare(b.displayName))
);
}
}
class ThemeLabelProvider extends controls.viewers.LabelProvider {
getLabel(theme: controls.Theme) {
return theme.displayName;
}
}
}
|
constructor
|
identifier_name
|
lib.rs
|
// Copyright 2016 Mozilla Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![deny(rust_2018_idioms)]
#![allow(clippy::type_complexity, clippy::new_without_default)]
#![recursion_limit = "256"]
#[macro_use]
extern crate async_trait;
#[macro_use]
extern crate clap;
#[macro_use]
extern crate counted_array;
#[cfg(feature = "jsonwebtoken")]
use jsonwebtoken as jwt;
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
#[cfg(feature = "rouille")]
#[macro_use(router)]
extern crate rouille;
#[macro_use]
extern crate serde_derive;
// To get macros in scope, this has to be first.
#[cfg(test)]
#[macro_use]
mod test;
#[macro_use]
pub mod errors;
#[cfg(feature = "azure")]
mod azure;
mod cache;
mod client;
mod cmdline;
mod commands;
mod compiler;
pub mod config;
pub mod dist;
mod jobserver;
pub mod lru_disk_cache;
mod mock_command;
mod protocol;
pub mod server;
#[cfg(feature = "simple-s3")]
mod simples3;
#[doc(hidden)]
pub mod util;
use std::env;
const LOGGING_ENV: &str = "SCCACHE_LOG";
pub fn main() {
init_logging();
std::process::exit(match cmdline::parse() {
Ok(cmd) => match commands::run_command(cmd) {
Ok(s) => s,
Err(e) => {
eprintln!("sccache: error: {}", e);
for e in e.chain().skip(1) {
eprintln!("sccache: caused by: {}", e);
}
2
}
},
Err(e) => {
println!("sccache: {}", e);
for e in e.chain().skip(1) {
|
println!();
1
}
});
}
fn init_logging() {
if env::var(LOGGING_ENV).is_ok() {
match env_logger::Builder::from_env(LOGGING_ENV).try_init() {
Ok(_) => (),
Err(e) => panic!("Failed to initalize logging: {:?}", e),
}
}
}
|
println!("sccache: caused by: {}", e);
}
cmdline::get_app().print_help().unwrap();
|
random_line_split
|
lib.rs
|
// Copyright 2016 Mozilla Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![deny(rust_2018_idioms)]
#![allow(clippy::type_complexity, clippy::new_without_default)]
#![recursion_limit = "256"]
#[macro_use]
extern crate async_trait;
#[macro_use]
extern crate clap;
#[macro_use]
extern crate counted_array;
#[cfg(feature = "jsonwebtoken")]
use jsonwebtoken as jwt;
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
#[cfg(feature = "rouille")]
#[macro_use(router)]
extern crate rouille;
#[macro_use]
extern crate serde_derive;
// To get macros in scope, this has to be first.
#[cfg(test)]
#[macro_use]
mod test;
#[macro_use]
pub mod errors;
#[cfg(feature = "azure")]
mod azure;
mod cache;
mod client;
mod cmdline;
mod commands;
mod compiler;
pub mod config;
pub mod dist;
mod jobserver;
pub mod lru_disk_cache;
mod mock_command;
mod protocol;
pub mod server;
#[cfg(feature = "simple-s3")]
mod simples3;
#[doc(hidden)]
pub mod util;
use std::env;
const LOGGING_ENV: &str = "SCCACHE_LOG";
pub fn main() {
init_logging();
std::process::exit(match cmdline::parse() {
Ok(cmd) => match commands::run_command(cmd) {
Ok(s) => s,
Err(e) => {
eprintln!("sccache: error: {}", e);
for e in e.chain().skip(1) {
eprintln!("sccache: caused by: {}", e);
}
2
}
},
Err(e) => {
println!("sccache: {}", e);
for e in e.chain().skip(1) {
println!("sccache: caused by: {}", e);
}
cmdline::get_app().print_help().unwrap();
println!();
1
}
});
}
fn
|
() {
if env::var(LOGGING_ENV).is_ok() {
match env_logger::Builder::from_env(LOGGING_ENV).try_init() {
Ok(_) => (),
Err(e) => panic!("Failed to initalize logging: {:?}", e),
}
}
}
|
init_logging
|
identifier_name
|
lib.rs
|
// Copyright 2016 Mozilla Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![deny(rust_2018_idioms)]
#![allow(clippy::type_complexity, clippy::new_without_default)]
#![recursion_limit = "256"]
#[macro_use]
extern crate async_trait;
#[macro_use]
extern crate clap;
#[macro_use]
extern crate counted_array;
#[cfg(feature = "jsonwebtoken")]
use jsonwebtoken as jwt;
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
#[cfg(feature = "rouille")]
#[macro_use(router)]
extern crate rouille;
#[macro_use]
extern crate serde_derive;
// To get macros in scope, this has to be first.
#[cfg(test)]
#[macro_use]
mod test;
#[macro_use]
pub mod errors;
#[cfg(feature = "azure")]
mod azure;
mod cache;
mod client;
mod cmdline;
mod commands;
mod compiler;
pub mod config;
pub mod dist;
mod jobserver;
pub mod lru_disk_cache;
mod mock_command;
mod protocol;
pub mod server;
#[cfg(feature = "simple-s3")]
mod simples3;
#[doc(hidden)]
pub mod util;
use std::env;
const LOGGING_ENV: &str = "SCCACHE_LOG";
pub fn main() {
init_logging();
std::process::exit(match cmdline::parse() {
Ok(cmd) => match commands::run_command(cmd) {
Ok(s) => s,
Err(e) =>
|
},
Err(e) => {
println!("sccache: {}", e);
for e in e.chain().skip(1) {
println!("sccache: caused by: {}", e);
}
cmdline::get_app().print_help().unwrap();
println!();
1
}
});
}
fn init_logging() {
if env::var(LOGGING_ENV).is_ok() {
match env_logger::Builder::from_env(LOGGING_ENV).try_init() {
Ok(_) => (),
Err(e) => panic!("Failed to initalize logging: {:?}", e),
}
}
}
|
{
eprintln!("sccache: error: {}", e);
for e in e.chain().skip(1) {
eprintln!("sccache: caused by: {}", e);
}
2
}
|
conditional_block
|
lib.rs
|
// Copyright 2016 Mozilla Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![deny(rust_2018_idioms)]
#![allow(clippy::type_complexity, clippy::new_without_default)]
#![recursion_limit = "256"]
#[macro_use]
extern crate async_trait;
#[macro_use]
extern crate clap;
#[macro_use]
extern crate counted_array;
#[cfg(feature = "jsonwebtoken")]
use jsonwebtoken as jwt;
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
#[cfg(feature = "rouille")]
#[macro_use(router)]
extern crate rouille;
#[macro_use]
extern crate serde_derive;
// To get macros in scope, this has to be first.
#[cfg(test)]
#[macro_use]
mod test;
#[macro_use]
pub mod errors;
#[cfg(feature = "azure")]
mod azure;
mod cache;
mod client;
mod cmdline;
mod commands;
mod compiler;
pub mod config;
pub mod dist;
mod jobserver;
pub mod lru_disk_cache;
mod mock_command;
mod protocol;
pub mod server;
#[cfg(feature = "simple-s3")]
mod simples3;
#[doc(hidden)]
pub mod util;
use std::env;
const LOGGING_ENV: &str = "SCCACHE_LOG";
pub fn main() {
init_logging();
std::process::exit(match cmdline::parse() {
Ok(cmd) => match commands::run_command(cmd) {
Ok(s) => s,
Err(e) => {
eprintln!("sccache: error: {}", e);
for e in e.chain().skip(1) {
eprintln!("sccache: caused by: {}", e);
}
2
}
},
Err(e) => {
println!("sccache: {}", e);
for e in e.chain().skip(1) {
println!("sccache: caused by: {}", e);
}
cmdline::get_app().print_help().unwrap();
println!();
1
}
});
}
fn init_logging()
|
{
if env::var(LOGGING_ENV).is_ok() {
match env_logger::Builder::from_env(LOGGING_ENV).try_init() {
Ok(_) => (),
Err(e) => panic!("Failed to initalize logging: {:?}", e),
}
}
}
|
identifier_body
|
|
pt.ts
|
import type { Translations, TranslationsContext } from '.';
export const translations: Translations = {
|
site: {
description: 'Website de Luiz Felipe Gonçalves (lffg)',
nav: {
home: 'Início',
about: 'Sobre',
contact: 'Contato'
},
footer: {
openSource:
'O código fonte deste website é <a href="https://github.com/lffg/luizfelipe.dev" rel="noopener noreferrer" target="_blank">open-source</a>.'
}
},
index: {
greeting:
'Olá e seja bem-vindo(a) ao meu (ainda não finalizado) website. Enquanto estou trabalhando nele, deixo abaixo algumas informações para contato:',
latestArticles: 'Últimos artigos',
aboutMe: 'Sobre mim.'
},
about: {
title: 'Sobre mim',
desc: 'Todo (pt)'
},
contact: {
title: 'Contato'
},
article: {}
};
export const context: TranslationsContext = {
dateFmt: 'DD [de] MMMM [de] YYYY'
};
|
random_line_split
|
|
test_issue_162.py
|
def compute_rir(order):
fromPos = np.zeros((3))
toPos = np.ones((3, 1))
roomSize = np.array([3, 3, 3])
room = pra.ShoeBox(roomSize, fs=1000, absorption=0.95, max_order=order)
room.add_source(fromPos)
mics = pra.MicrophoneArray(toPos, room.fs)
room.add_microphone_array(mics)
room.compute_rir()
def test_issue_162_max_order_15():
compute_rir(15)
def test_issue_162_max_order_31():
compute_rir(31)
def test_issue_162_max_order_32():
compute_rir(32)
def test_issue_162_max_order_50():
compute_rir(50)
def test_issue_162_max_order_75():
compute_rir(75)
|
import numpy as np
import pyroomacoustics as pra
|
random_line_split
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.