file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
call.rs | let fn_name = methods.iter().map(|method| &method.name).collect::<Vec<_>>();
let call_index = methods.iter().map(|method| method.call_index).collect::<Vec<_>>();
let new_call_variant_fn_name = fn_name
.iter()
.map(|fn_name| quote::format_ident!("new_call_variant_{}", fn_name))
.collect::<Vec<_>>();
let new_call_variant_doc = fn_name
.iter()
.map(|fn_name| format!("Create a call with the variant `{}`.", fn_name))
.collect::<Vec<_>>();
let mut call_index_warnings = Vec::new();
// Emit a warning for each call that is missing `call_index` when not in dev-mode.
for method in &methods {
if method.explicit_call_index || def.dev_mode {
continue
}
let warning = proc_macro_warning::Warning::new_deprecated("ImplicitCallIndex")
.index(call_index_warnings.len())
.old("use implicit call indices")
.new("ensure that all calls have a `pallet::call_index` attribute or put the pallet into `dev` mode")
.help_links(&[
"https://github.com/paritytech/substrate/pull/12891",
"https://github.com/paritytech/substrate/pull/11381"
])
.span(method.name.span())
.build();
call_index_warnings.push(warning);
}
let mut fn_weight = Vec::<TokenStream2>::new();
let mut weight_warnings = Vec::new();
for method in &methods {
match &method.weight {
CallWeightDef::DevModeDefault => fn_weight.push(syn::parse_quote!(0)),
CallWeightDef::Immediate(e @ syn::Expr::Lit(lit)) if !def.dev_mode => {
let warning = proc_macro_warning::Warning::new_deprecated("ConstantWeight")
.index(weight_warnings.len())
.old("use hard-coded constant as call weight")
.new("benchmark all calls or put the pallet into `dev` mode")
.help_link("https://github.com/paritytech/substrate/pull/13798")
.span(lit.span())
.build();
weight_warnings.push(warning);
fn_weight.push(e.into_token_stream());
},
CallWeightDef::Immediate(e) => fn_weight.push(e.into_token_stream()),
CallWeightDef::Inherited => {
let pallet_weight = def
.call
.as_ref()
.expect("we have methods; we have calls; qed")
.inherited_call_weight
.as_ref()
.expect("the parser prevents this");
// Expand `<<T as Config>::WeightInfo>::call_name()`.
let t = &pallet_weight.typename;
let n = &method.name;
fn_weight.push(quote!({ < #t > :: #n () }));
},
}
}
debug_assert_eq!(fn_weight.len(), methods.len());
let map_fn_docs = if !def.dev_mode {
// Emit the [`Pallet::method`] documentation only for non-dev modes.
|method: &CallVariantDef| {
let reference = format!("See [`Pallet::{}`].", method.name);
quote!(#reference)
}
} else {
// For the dev-mode do not provide a documenation link as it will break the
// `cargo doc` if the pallet is private inside a test.
|method: &CallVariantDef| {
let reference = format!("See `Pallet::{}`.", method.name);
quote!(#reference)
}
};
let fn_doc = methods.iter().map(map_fn_docs).collect::<Vec<_>>();
let args_name = methods
.iter()
.map(|method| method.args.iter().map(|(_, name, _)| name.clone()).collect::<Vec<_>>())
.collect::<Vec<_>>();
let args_name_stripped = methods
.iter()
.map(|method| {
method
.args
.iter()
.map(|(_, name, _)| {
syn::Ident::new(name.to_string().trim_start_matches('_'), name.span())
})
.collect::<Vec<_>>()
})
.collect::<Vec<_>>();
let make_args_name_pattern = |ref_tok| {
args_name
.iter()
.zip(args_name_stripped.iter())
.map(|(args_name, args_name_stripped)| {
args_name
.iter()
.zip(args_name_stripped)
.map(|(args_name, args_name_stripped)| {
if args_name == args_name_stripped {
quote::quote!( #ref_tok #args_name )
} else {
quote::quote!( #args_name_stripped: #ref_tok #args_name )
}
})
.collect::<Vec<_>>()
})
.collect::<Vec<_>>()
};
let args_name_pattern = make_args_name_pattern(None);
let args_name_pattern_ref = make_args_name_pattern(Some(quote::quote!(ref)));
let args_type = methods
.iter()
.map(|method| method.args.iter().map(|(_, _, type_)| type_.clone()).collect::<Vec<_>>())
.collect::<Vec<_>>();
let args_compact_attr = methods.iter().map(|method| {
method
.args
.iter()
.map(|(is_compact, _, type_)| {
if *is_compact {
quote::quote_spanned!(type_.span() => #[codec(compact)] )
} else {
quote::quote!()
}
})
.collect::<Vec<_>>()
});
let default_docs =
[syn::parse_quote!(r"Contains a variant per dispatchable extrinsic that this pallet has.")];
let docs = if docs.is_empty() { &default_docs[..] } else { &docs[..] };
let maybe_compile_error = if def.call.is_none() {
quote::quote! {
compile_error!(concat!(
"`",
stringify!($pallet_name),
"` does not have #[pallet::call] defined, perhaps you should remove `Call` from \
construct_runtime?",
));
}
} else {
proc_macro2::TokenStream::new()
};
let count = COUNTER.with(|counter| counter.borrow_mut().inc());
let macro_ident = syn::Ident::new(&format!("__is_call_part_defined_{}", count), span);
let capture_docs = if cfg!(feature = "no-metadata-docs") { "never" } else { "always" };
// Wrap all calls inside of storage layers
if let Some(syn::Item::Impl(item_impl)) = def
.call
.as_ref()
.map(|c| &mut def.item.content.as_mut().expect("Checked by def parser").1[c.index])
{
item_impl.items.iter_mut().for_each(|i| {
if let syn::ImplItem::Fn(method) = i {
let block = &method.block;
method.block = syn::parse_quote! {{
// We execute all dispatchable in a new storage layer, allowing them
// to return an error at any point, and undoing any storage changes.
#frame_support::storage::with_storage_layer(|| #block)
}};
}
});
}
// Extracts #[allow] attributes, necessary so that we don't run into compiler warnings
let maybe_allow_attrs = methods
.iter()
.map(|method| {
method
.attrs
.iter()
.find(|attr| attr.path().is_ident("allow"))
.map_or(proc_macro2::TokenStream::new(), |attr| attr.to_token_stream())
})
.collect::<Vec<_>>();
quote::quote_spanned!(span =>
mod warnings {
#(
#call_index_warnings
)*
#(
#weight_warnings
)*
}
#[doc(hidden)]
pub mod __substrate_call_check {
#[macro_export]
#[doc(hidden)]
macro_rules! #macro_ident {
($pallet_name:ident) => {
#maybe_compile_error
};
}
#[doc(hidden)]
pub use #macro_ident as is_call_part_defined;
}
#( #[doc = #docs] )*
#[ | {
let (span, where_clause, methods, docs) = match def.call.as_ref() {
Some(call) => {
let span = call.attr_span;
let where_clause = call.where_clause.clone();
let methods = call.methods.clone();
let docs = call.docs.clone();
(span, where_clause, methods, docs)
},
None => (def.item.span(), def.config.where_clause.clone(), Vec::new(), Vec::new()),
};
let frame_support = &def.frame_support;
let frame_system = &def.frame_system;
let type_impl_gen = &def.type_impl_generics(span);
let type_decl_bounded_gen = &def.type_decl_bounded_generics(span);
let type_use_gen = &def.type_use_generics(span);
let call_ident = syn::Ident::new("Call", span);
let pallet_ident = &def.pallet_struct.pallet;
| identifier_body |
|
call.rs | (def: &mut Def) -> proc_macro2::TokenStream {
let (span, where_clause, methods, docs) = match def.call.as_ref() {
Some(call) => {
let span = call.attr_span;
let where_clause = call.where_clause.clone();
let methods = call.methods.clone();
let docs = call.docs.clone();
(span, where_clause, methods, docs)
},
None => (def.item.span(), def.config.where_clause.clone(), Vec::new(), Vec::new()),
};
let frame_support = &def.frame_support;
let frame_system = &def.frame_system;
let type_impl_gen = &def.type_impl_generics(span);
let type_decl_bounded_gen = &def.type_decl_bounded_generics(span);
let type_use_gen = &def.type_use_generics(span);
let call_ident = syn::Ident::new("Call", span);
let pallet_ident = &def.pallet_struct.pallet;
let fn_name = methods.iter().map(|method| &method.name).collect::<Vec<_>>();
let call_index = methods.iter().map(|method| method.call_index).collect::<Vec<_>>();
let new_call_variant_fn_name = fn_name
.iter()
.map(|fn_name| quote::format_ident!("new_call_variant_{}", fn_name))
.collect::<Vec<_>>();
let new_call_variant_doc = fn_name
.iter()
.map(|fn_name| format!("Create a call with the variant `{}`.", fn_name))
.collect::<Vec<_>>();
let mut call_index_warnings = Vec::new();
// Emit a warning for each call that is missing `call_index` when not in dev-mode.
for method in &methods {
if method.explicit_call_index || def.dev_mode {
continue
}
let warning = proc_macro_warning::Warning::new_deprecated("ImplicitCallIndex")
.index(call_index_warnings.len())
.old("use implicit call indices")
.new("ensure that all calls have a `pallet::call_index` attribute or put the pallet into `dev` mode")
.help_links(&[
"https://github.com/paritytech/substrate/pull/12891",
"https://github.com/paritytech/substrate/pull/11381"
])
.span(method.name.span())
.build();
call_index_warnings.push(warning);
}
let mut fn_weight = Vec::<TokenStream2>::new();
let mut weight_warnings = Vec::new();
for method in &methods {
match &method.weight {
CallWeightDef::DevModeDefault => fn_weight.push(syn::parse_quote!(0)),
CallWeightDef::Immediate(e @ syn::Expr::Lit(lit)) if !def.dev_mode => {
let warning = proc_macro_warning::Warning::new_deprecated("ConstantWeight")
.index(weight_warnings.len())
.old("use hard-coded constant as call weight")
.new("benchmark all calls or put the pallet into `dev` mode")
.help_link("https://github.com/paritytech/substrate/pull/13798")
.span(lit.span())
.build();
weight_warnings.push(warning);
fn_weight.push(e.into_token_stream());
},
CallWeightDef::Immediate(e) => fn_weight.push(e.into_token_stream()),
CallWeightDef::Inherited => {
let pallet_weight = def
.call
.as_ref()
.expect("we have methods; we have calls; qed")
.inherited_call_weight
.as_ref()
.expect("the parser prevents this");
// Expand `<<T as Config>::WeightInfo>::call_name()`.
let t = &pallet_weight.typename;
let n = &method.name;
fn_weight.push(quote!({ < #t > :: #n () }));
},
}
}
debug_assert_eq!(fn_weight.len(), methods.len());
let map_fn_docs = if !def.dev_mode {
// Emit the [`Pallet::method`] documentation only for non-dev modes.
|method: &CallVariantDef| {
let reference = format!("See [`Pallet::{}`].", method.name);
quote!(#reference)
}
} else {
// For the dev-mode do not provide a documenation link as it will break the
// `cargo doc` if the pallet is private inside a test.
|method: &CallVariantDef| {
let reference = format!("See `Pallet::{}`.", method.name);
quote!(#reference)
}
};
let fn_doc = methods.iter().map(map_fn_docs).collect::<Vec<_>>();
let args_name = methods
.iter()
.map(|method| method.args.iter().map(|(_, name, _)| name.clone()).collect::<Vec<_>>())
.collect::<Vec<_>>();
let args_name_stripped = methods
.iter()
.map(|method| {
method
.args
.iter()
.map(|(_, name, _)| {
syn::Ident::new(name.to_string().trim_start_matches('_'), name.span())
})
.collect::<Vec<_>>()
})
.collect::<Vec<_>>();
let make_args_name_pattern = |ref_tok| {
args_name
.iter()
.zip(args_name_stripped.iter())
.map(|(args_name, args_name_stripped)| {
args_name
.iter()
.zip(args_name_stripped)
.map(|(args_name, args_name_stripped)| {
if args_name == args_name_stripped {
quote::quote!( #ref_tok #args_name )
} else {
quote::quote!( #args_name_stripped: #ref_tok #args_name )
}
})
.collect::<Vec<_>>()
})
.collect::<Vec<_>>()
};
let args_name_pattern = make_args_name_pattern(None);
let args_name_pattern_ref = make_args_name_pattern(Some(quote::quote!(ref)));
let args_type = methods
.iter()
.map(|method| method.args.iter().map(|(_, _, type_)| type_.clone()).collect::<Vec<_>>())
.collect::<Vec<_>>();
let args_compact_attr = methods.iter().map(|method| {
method
.args
.iter()
.map(|(is_compact, _, type_)| {
if *is_compact {
quote::quote_spanned!(type_.span() => #[codec(compact)] )
} else {
quote::quote!()
}
})
.collect::<Vec<_>>()
});
let default_docs =
[syn::parse_quote!(r"Contains a variant per dispatchable extrinsic that this pallet has.")];
let docs = if docs.is_empty() { &default_docs[..] } else { &docs[..] };
let maybe_compile_error = if def.call.is_none() {
quote::quote! {
compile_error!(concat!(
"`",
stringify!($pallet_name),
"` does not have #[pallet::call] defined, perhaps you should remove `Call` from \
construct_runtime?",
));
}
} else {
proc_macro2::TokenStream::new()
};
let count = COUNTER.with(|counter| counter.borrow_mut().inc());
let macro_ident = syn::Ident::new(&format!("__is_call_part_defined_{}", count), span);
let capture_docs = if cfg!(feature = "no-metadata-docs") { "never" } else { "always" };
// Wrap all calls inside of storage layers
if let Some(syn::Item::Impl(item_impl)) = def
.call
.as_ref()
.map(|c| &mut def.item.content.as_mut().expect("Checked by def parser").1[c.index])
{
item_impl.items.iter_mut().for_each(|i| {
if let syn::ImplItem::Fn(method) = i {
let block = &method.block;
method.block = syn::parse_quote! {{
// We execute all dispatchable in a new storage layer, allowing them
// to return an error at any point, and undoing any storage changes.
#frame_support::storage::with_storage_layer(|| #block)
}};
}
});
}
// Extracts #[allow] attributes, necessary so that we don't run into compiler warnings
let maybe_allow_attrs = methods
.iter()
.map(|method| {
method
.attrs
.iter()
.find(|attr| attr.path().is_ident("allow"))
.map_or(proc_macro2::TokenStream::new(), |attr| attr.to_token_stream())
})
.collect::<Vec<_>>();
quote::quote_spanned!(span =>
mod warnings {
#(
#call_index_warnings
)*
#(
#weight_warnings
)*
}
#[doc(hidden)]
pub mod __substrate_call_check {
#[macro_export]
#[doc(hidden)]
macro_rules! #macro_ident {
($pallet_name:ident) => {
#maybe_compile_error
};
}
#[doc(hidden)]
pub use #macro_ident as is_call_part_defined;
}
| expand_call | identifier_name |
|
call.rs | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{
pallet::{
parse::call::{CallVariantDef, CallWeightDef},
Def,
},
COUNTER,
};
use proc_macro2::TokenStream as TokenStream2;
use quote::{quote, ToTokens};
use syn::spanned::Spanned;
///
/// * Generate enum call and implement various trait on it.
/// * Implement Callable and call_function on `Pallet`
pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream {
let (span, where_clause, methods, docs) = match def.call.as_ref() {
Some(call) => {
let span = call.attr_span;
let where_clause = call.where_clause.clone();
let methods = call.methods.clone();
let docs = call.docs.clone();
(span, where_clause, methods, docs)
},
None => (def.item.span(), def.config.where_clause.clone(), Vec::new(), Vec::new()),
};
let frame_support = &def.frame_support;
let frame_system = &def.frame_system;
let type_impl_gen = &def.type_impl_generics(span);
let type_decl_bounded_gen = &def.type_decl_bounded_generics(span);
let type_use_gen = &def.type_use_generics(span);
let call_ident = syn::Ident::new("Call", span);
let pallet_ident = &def.pallet_struct.pallet;
let fn_name = methods.iter().map(|method| &method.name).collect::<Vec<_>>();
let call_index = methods.iter().map(|method| method.call_index).collect::<Vec<_>>();
let new_call_variant_fn_name = fn_name
.iter()
.map(|fn_name| quote::format_ident!("new_call_variant_{}", fn_name))
.collect::<Vec<_>>();
let new_call_variant_doc = fn_name
.iter()
.map(|fn_name| format!("Create a call with the variant `{}`.", fn_name))
.collect::<Vec<_>>();
let mut call_index_warnings = Vec::new();
// Emit a warning for each call that is missing `call_index` when not in dev-mode.
for method in &methods {
if method.explicit_call_index || def.dev_mode {
continue
}
let warning = proc_macro_warning::Warning::new_deprecated("ImplicitCallIndex")
.index(call_index_warnings.len())
.old("use implicit call indices")
.new("ensure that all calls have a `pallet::call_index` attribute or put the pallet into `dev` mode")
.help_links(&[
"https://github.com/paritytech/substrate/pull/12891",
"https://github.com/paritytech/substrate/pull/11381"
])
.span(method.name.span())
.build();
call_index_warnings.push(warning);
}
let mut fn_weight = Vec::<TokenStream2>::new();
let mut weight_warnings = Vec::new();
for method in &methods {
match &method.weight {
CallWeightDef::DevModeDefault => fn_weight.push(syn::parse_quote!(0)),
CallWeightDef::Immediate(e @ syn::Expr::Lit(lit)) if !def.dev_mode => {
let warning = proc_macro_warning::Warning::new_deprecated("ConstantWeight")
.index(weight_warnings.len())
.old("use hard-coded constant as call weight")
.new("benchmark all calls or put the pallet into `dev` mode")
.help_link("https://github.com/paritytech/substrate/pull/13798")
.span(lit.span())
.build();
weight_warnings.push(warning);
fn_weight.push(e.into_token_stream());
},
CallWeightDef::Immediate(e) => fn_weight.push(e.into_token_stream()),
CallWeightDef::Inherited => {
let pallet_weight = def
.call
.as_ref()
.expect("we have methods; we have calls; qed")
.inherited_call_weight
.as_ref()
.expect("the parser prevents this");
// Expand `<<T as Config>::WeightInfo>::call_name()`.
let t = &pallet_weight.typename;
let n = &method.name;
fn_weight.push(quote!({ < #t > :: #n () }));
},
}
}
debug_assert_eq!(fn_weight.len(), methods.len());
let map_fn_docs = if !def.dev_mode {
// Emit the [`Pallet::method`] documentation only for non-dev modes.
|method: &CallVariantDef| {
let reference = format!("See [`Pallet::{}`].", method.name);
quote!(#reference)
}
} else {
// For the dev-mode do not provide a documenation link as it will break the
// `cargo doc` if the pallet is private inside a test.
|method: &CallVariantDef| {
let reference = format!("See `Pallet::{}`.", method.name);
quote!(#reference)
}
};
let fn_doc = methods.iter().map(map_fn_docs).collect::<Vec<_>>();
let args_name = methods
.iter()
.map(|method| method.args.iter().map(|(_, name, _)| name.clone()).collect::<Vec<_>>())
.collect::<Vec<_>>();
let args_name_stripped = methods
.iter()
.map(|method| {
method
.args
.iter()
.map(|(_, name, _)| {
syn::Ident::new(name.to_string().trim_start_matches('_'), name.span())
})
.collect::<Vec<_>>()
})
.collect::<Vec<_>>();
let make_args_name_pattern = |ref_tok| {
args_name
.iter()
.zip(args_name_stripped.iter())
.map(|(args_name, args_name_stripped)| {
args_name
.iter()
.zip(args_name_stripped)
.map(|(args_name, args_name_stripped)| {
if args_name == args_name_stripped {
quote::quote!( #ref_tok #args_name )
} else {
quote::quote!( #args_name_stripped: #ref_tok #args_name )
}
})
.collect::<Vec<_>>()
})
.collect::<Vec<_>>()
};
let args_name_pattern = make_args_name_pattern(None);
let args_name_pattern_ref = make_args_name_pattern(Some(quote::quote!(ref)));
let args_type = methods
.iter()
.map(|method| method.args.iter().map(|(_, _, type_)| type_.clone()).collect::<Vec<_>>())
.collect::<Vec<_>>();
let args_compact_attr = methods.iter().map(|method| {
method
.args
.iter()
.map(|(is_compact, _, type_)| {
if *is_compact {
quote::quote_spanned!(type_.span() => #[codec(compact)] )
} else {
quote::quote!()
}
})
.collect::<Vec<_>>()
});
let default_docs =
[syn::parse_quote!(r"Contains a variant per dispatchable extrinsic that this pallet has.")];
let docs = if docs.is_empty() { &default_docs[..] } else { &docs[..] };
let maybe_compile_error = if def.call.is_none() {
quote::quote! {
compile_error!(concat!(
"`",
stringify!($pallet_name),
"` does not have #[pallet::call] defined, perhaps you should remove `Call` from \
construct_runtime?",
));
}
} else {
proc_macro2::TokenStream::new()
};
let count = COUNTER.with(|counter| counter.borrow_mut().inc());
let macro_ident = syn::Ident::new(&format!("__is_call_part_defined_{}", count), span);
let capture_docs = if cfg!(feature = "no-metadata-docs") { "never" } else { "always" };
// Wrap all calls inside of storage layers
if let Some(syn::Item::Impl(item_impl)) = def
.call
.as_ref()
.map(|c| &mut def.item.content.as_mut().expect("Checked by def parser").1[c.index])
{
item_impl.items.iter_mut().for_each(|i| {
if let syn::ImplItem::Fn(method) = i {
let block = &method.block;
method.block = syn::parse_quote! {{
// We execute all dispatchable in a new storage layer, allowing them
// to return an error at any point, and undoing any storage changes.
#frame_support::storage::with_storage_layer(|| #block)
}};
}
});
}
// Extracts #[allow] attributes, necessary so that we don't run into compiler warnings
let maybe_allow_attrs = methods
.iter()
.map(|method| {
method
.attrs
.iter()
.find(|attr| attr.path().is_ident("allow"))
.map_or(proc_macro2::TokenStream:: | // Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, | random_line_split |
|
server.js | and set the default language and text dir for arabic
const languages = require('./languages.js');
let language = 'arabic';
let text = languages[language];
let dir = 'rtl';
// object for the redirects pages
const langRedirects = {
'/addvolunteer': 'list',
'/orgform': 'login',
'/addrole': 'login'
};
// set the port
app.set('port', process.env.PORT || 8080);
// set the local variables of the language
app.locals.dir = dir;
app.locals.text = text;
// parse application/x-www-form-urlencoded
app.use(bodyParser.urlencoded({ extended: false }));
// serve the favicon
app.use(favicon(path.join(__dirname, '../public/assets/', 'favicon.ico')));
// parse application/json
app.use(bodyParser.json());
// set up expressValidator
app.use(expressValidator({
errorFormatter: (param, msg, value) => {
let namespace = param.split('.');
let root = namespace.shift();
let formParam = root;
while (namespace.length) {
formParam += '[' + namespace.shift() + ']';
}
return {
param: formParam,
msg: msg,
value: value
};
}
}));
// set up handlebars
app.engine('.hbs', exphbs({
defaultLayout: 'main',
extname: '.hbs',
helpers: {
// turn the id into an anchor link with href as querylink to form page
link: function (id) {
return '<a class="altbg shadow-2 w3 mw4 tc brown link grow f5 ba br3 pa2 bg-leave" href="form?id=' + id + '">' + text.applyButton + '</a>';
}
}
}));
app.set('view engine', '.hbs');
// serve static files
const options = {
dotfiles: 'ignore',
extensions: ['htm', 'html'],
index: false
};
app.use(express.static(path.join(__dirname, '../public'), options));
// set the response locals to the same as the app locals
app.use((req, res, next) => {
res.locals = app.locals;
next();
});
app.get('/', (req, res) => {
res.render('home');
});
// Handler for the language change radio button
app.post('/langChange', (req, res) => {
// Get the language selected
language = req.body.language;
// set text to the language selected
text = languages[language];
// change the text direction for the language
if (language === 'english') {
dir = 'ltr';
} else {
dir = 'rtl';
}
// change the locals
app.locals.dir = dir;
app.locals.text = text;
// redirect back to the page the post request came from unless from 2 specific pages
// req.headers.referer is the page where the language change attempt occured, req.headers.origin is the name of the host
// get the path from which the request came by removing the host part
const refererLong = req.headers.referer;
const referer = refererLong.replace(req.headers.origin, '');
// check if that path is a key in the redirect object (defined above), and if so, redirect to the value of that
// if not, just redirect back to the referer
if (langRedirects[referer]) {
res.redirect(langRedirects[referer]);
} else {
res.redirect(refererLong);
}
});
app.get('/form', (req, res) => {
MongoClient.connect(url, (err, db) => {
if (err) return ('err: ', err);
else {
const collection = db.collection('vol_roles');
// find collection document where id is equal to the role id
// make result an array to read easily, take the first element of array
collection.find({
'_id': ObjectId(req.query.id)
}).toArray((err, docs) => {
if (err) return err;
const data = docs[0];
const goodSDate = functions.convertDate(data.start_date);
data.start_date = goodSDate;
const goodEDate = functions.convertDate(data.end_date);
data.end_date = goodEDate;
res.render('form', {
// make object with role as a key and data as value to pass to view
role: data,
headline: text.formHeader
});
db.close();
});
}
});
});
app.get('/list', (req, res) => {
MongoClient.connect(url, (err, db) => {
if (err) return ('err: ', err);
else {
console.log('connection made');
const collection = db.collection('vol_roles');
// Find the volunteer roles, sorted by start date
collection.find({}).sort({'start_date': 1}).toArray((err, result) => {
if (err) res.send(err);
else if (result.length) {
// loop through the dates to make them look the same
result.forEach((item, index) => {
const goodSDate = functions.convertDate(item.start_date);
result[index].start_date = goodSDate;
const goodEDate = functions.convertDate(item.end_date);
result[index].end_date = goodEDate;
});
res.render('list', {
'roleList': result,
'headline': text.listHeader
});
} else {
res.send('No roles found');
}
db.close();
});
}
});
});
// addrole- its deal with orgform and we validate orgform
app.post('/addrole', (req, res) => {
req.checkBody(schema.orgForm(req, text));
req.getValidationResult().then((result) => {
const errors = result.useFirstErrorOnly().array();
// if the length of the errors array its big than zero its mean we have error validate in the form and we have to deal with this errors
if (errors.length) {
// take the information we filled and put in array
const prefilled = [req.body];
res.render('orgform', {
error: errors,
prefilled: prefilled,
headline: text.orgFormHeader
});
} else {
MongoClient.connect(url, (err, db) => {
if (err) return ('Error connection to DB: ', err);
else {
console.log('connection made');
// object take the data from html page and put in this object
const role = {
'org_name': req.body.org_name,
'org_desc': req.body.org_desc,
'phone_num': req.body.user_phone,
'email': req.body.user_mail,
'role_name': req.body.role_name,
'role_desc': req.body.role_desc,
'num_vlntr_req': req.body.num_vol,
'start_date': new Date(req.body.start_date),
'end_date': new Date(req.body.end_date),
// add the date that the client fill the form
'date_added': new Date()
};
// connect to the table called vol_roles
const collection = db.collection('vol_roles');
// insert the data in db
collection.insert(role, {w: 1}, (err, result) => {
if (err) return ('Error inserting to DB: ', err);
db.close();
// redirect the information to the list page also
res.redirect('/list');
});
}
});
}
});
});
app.post('/addvolunteer', (req, res) => {
// validate the form
req.checkBody(schema.volForm(req, text));
// get the result asynchonously
req.getValidationResult().then((result) => {
// only look at first error
const errors = result.useFirstErrorOnly().array();
// do something with the validation result
// errors comes as an array, [] returns as true
if (errors.length) {
MongoClient.connect(url, (err, db) => {
if (err) return ('err: ', err);
else {
const collection = db.collection('vol_roles');
// find collection document where id is equal to the role id
// make result an array to read easily, take the first element of array
collection.find({
'_id': ObjectId(req.body.role_id)
}).toArray((err, docs) => {
if (err) return err;
const data = docs[0];
// must send as an array to handlebars | // render form with error data and already filled in inputs
res.render('form', {
role: data,
error: errors,
prefilled: prefilled,
headline: text.formHeader
});
db.close();
});
}
});
} else {
MongoClient.connect(url, (err, db) => {
if (err) return ('Error connection to DB: ', err);
else {
console.log('connection made');
// object take the data from html page and put in this object
const role = {
'user_fname': req.body.user_fname,
'user_lname': req.body.user_lname,
'user_age': req.body.user_age,
'user_message': req.body.user_message,
'user_phone': req.body.user_phone,
'user_mail': req.body.user_mail,
'role_id': req.body.role_id
};
// connect to the table called vol_volunteer
| const prefilled = [req.body]; | random_line_split |
crawler_sina.go | }
func getSinaFailedQueue(value string) string {
v, _ := global.RD.GetSETRandStringRm(SINA_SPIDER_FAILED)
return v
}
func checkSinaFailedQueue(value string) bool {
x, _ := global.RD.CheckSETString(SINA_SPIDER_FAILED, value)
return x > 0
}
func addSinaDataQueue(value map[string]interface{}) {
b, err := json.Marshal(value)
if err != nil {
global.Log.Info(err.Error())
return
}
x, err := global.RD.SetSETString(SINA_SPIDER_DATA, string(b))
if err != nil {
global.Log.Info(err.Error())
}
global.Log.Info("添加数据到Redis结果%v", x)
}
func getSinaDataQueue() (string, error) {
return global.RD.GetSETRandStringRm(SINA_SPIDER_DATA)
}
func getSinaData() map[string]interface{} {
d, err := getSinaDataQueue()
if err != nil {
global.Log.Warning("无数据", err.Error())
return nil
}
if d != "" {
m := map[string]interface{}{}
err = json.Unmarshal([]byte(d), &m)
if err == nil {
return m
}
}
return nil
}
func main() {
go saveSinaData()
driver := agouti.ChromeDriver(agouti.ChromeOptions("args", []string{
"--start-maximized",
"--disable-infobars",
"--app=https://weibo.com/",
"--webkit-text-size-adjust"}))
driver.Start()
var err error
global.Page.Page, err = driver.NewPage()
if err != nil {
global.Log.Info(err.Error())
} else {
flog, ce := SetSinaCookieLogin("sinaWeibo" + global.SinaWeiboUName)
if ce != nil {
global.Log.Info(ce.Error())
return
}
if flog {
//global.Page.Navigate("https://weibo.com")
//time.Sleep(time.Second * 3)
name, ei := global.Page.Find(".nameBox").Text()
if ei != nil {
global.Log.Info(ei.Error())
}
if name != "" {
global.Log.Info("头像", name)
} else {
flog = false
}
}
if !flog {
//global.Page.Navigate("http://weibo.com/login.php")
time.Sleep(time.Second * 3)
global.Page.FindByID("loginname").Fill(global.SinaWeiboUName)
time.Sleep(time.Second * 1)
global.Page.Find("input[type=\"password\"]").Fill(global.SinaWeiboPwd)
time.Sleep(time.Second * 2)
global.Page.FindByXPath("//*[@id=\"pl_login_form\"]/div/div[3]/div[6]/a/span").Click()
time.Sleep(time.Second * 2)
c, err5 := global.Page.GetCookies()
if err5 != nil {
global.Log.Info("登陆失败", err5.Error())
return
}
cookieJson, _ := json.Marshal(c)
//global.Log.Info("cookie", string(cookieJson[:]))
redis.Cache.SetString("sinaWeibo"+global.SinaWeiboUName, string(cookieJson[:]))
if getSinaQueueCount() <= 0 {
/*获取粉丝和关注的用户href*/
ClikFans()
/*进入用户微博获取信息*/
newSina := getSinaQueue()
if newSina != "" {
getUserInfo(newSina)
}
} else {
newSina := getSinaQueue()
if newSina != "" {
getUserInfo(newSina)
}
}
}
}
}
func ClikFans() {
eles, err := global.Page.Page.Find(".user_atten").All("li").Elements()
if err != nil {
return
}
if eles != nil && len(eles) > 1 {
a, err := eles[0].GetElement(api.Selector{"css selector", "a"})
if err != nil {
global.Log.Info("获取关注节点失败", err.Error())
return
}
href, _ := a.GetAttribute("href")
a2, err := eles[1].GetElement(api.Selector{"css selector", "a"})
if err != nil {
global.Log.Info("获取关注节点失败", err.Error())
return
}
href2, _ := a2.GetAttribute("href")
if href != "" {
getUserHref(href)
}
if href2 != "" {
getUserHref(href2)
}
}
}
func ClikFrendsFans() {
eles, err := global.Page.Page.Find(".PCD_counter").All("td.S_line1").Elements()
if err != nil {
println("获取微博关注/粉丝节点失败,", err.Error())
} else {
if eles != nil && len(eles) > 1 {
a, err := eles[0].GetElement(api.Selector{"css selector", "a"})
if err != nil {
global.Log.Info("获取关注节点失败", err.Error())
return
}
href, _ := a.GetAttribute("href")
a2, err := eles[1].GetElement(api.Selector{"css selector", "a"})
if err != nil {
global.Log.Info("获取关注节点失败", err.Error())
return
}
href2, _ := a2.GetAttribute("href")
if href != "" {
getUserHref(href)
}
if href2 != "" {
getUserHref(href2)
}
}
}
}
func getUserHref(href string) {
if href != "" {
println("待抓url:", href)
global.Page.Navigate(href)
} else {
println("点击操作项")
}
eles, err := global.Page.Find(".follow_list").All("li.follow_item").Elements()
if err != nil {
eles, err = global.Page.Find(".member_ul").All("li.member_li").Elements()
if err != nil {
println("未找到微博用户列表标签")
}
}
if err != nil {
global.Log.Info("获取列表href错误", err.Error())
return
}
if eles != nil {
for i := range eles {
a, err := eles[i].GetElement(api.Selector{"css selector", " .mod_pic a"})
if err != nil {
println("未找到列表中的微博用户节点标签")
continue
}
//存入待抓队列
href, err := a.GetAttribute("href")
if err != nil {
global.Log.Info("获取粉丝用户详情href失败", err.Error())
} else {
newHref := strings.Split(href, "?")
addSinaQueue(newHref[0])
}
}
}
//下一页
next := global.Page.Page.Find(".W_pages .next")
cls, err := next.Attribute("class")
if err != nil {
global.Log.Info("获取下一页按钮样式失败", err.Error())
return
}
if !strings.Contains(cls, "page_dis") {
global.Page.RunScript("document.documentElement.scrollTop=document.body.clientHeight;", nil, nil)
time.Sleep(time.Second * 2)
next.Click()
time.Sleep(time.Second * 5)
html, _ := global.Page.HTML()
if !strings.Contains(html, "由于系统限制,你无法查看") {
getUserHref("")
}
}
}
func getUserInfo(href string) {
global.Page.Page.Navigate(href)
addSinaDoneQueue(href)
m := map[string]interface{}{}
m["url"] = href
m["nickname"], _ = global.Page.Page.FindByClass("username").Text()
m["head_img"], _ = global.Page.Page.FindByClass("photo").Attribute("src")
m["desc"], _ = global.Page.Page.FindByClass("pf_intro").Text()
eles, err := global.Page.Page.Find(".PCD_counter").All("strong").Elements()
if err != nil {
println("获取微博关注/粉丝/微博失败,", err.Error())
} else {
m["tweets"], _ = eles[0].GetText()
m["follows"], _ = eles[1].GetText()
m["fans"], _ = eles[2].GetText()
}
//获取微博说说
m["weibos"] = getUserWeibo()
//获取用户详细信息
m = getUserInfoDetail(m)
addSinaDataQueue(m)
/*获取粉丝和关注的用户href*/
ClikFrendsFans()
newSina := getSinaQueue()
if newSina != "" {
getUserInfo(newSina)
}
}
func Try(fun func(), handler func(interface{})) {
defer func() {
if err := recover(); err != nil {
handler(err)
}
}()
fun()
}
func saveSinaData() {
for {
Try(func() {
global.Log.Info("执行Sina数据保存服务")
m := getSinaData()
if m != nil {
weibos := m["weibos"]
delete(m, "weibos")
sinaBaseId :=utils.ID()
| return x > 0
}
func addSinaFailedQueue(value string) {
global.RD.SetSETString(SINA_SPIDER_FAILED, value) | random_line_split |
|
crawler_sina.go | global.RD.GetSETRandStringRm(SINA_SPIDER_DATA)
}
func getSinaData() map[string]interface{} {
d, err := getSinaDataQueue()
if err != nil {
global.Log.Warning("无数据", err.Error())
return nil
}
if d != "" {
m := map[string]interface{}{}
err = json.Unmarshal([]byte(d), &m)
if err == nil {
return m
}
}
return nil
}
func main() {
go saveSinaData()
driver := agouti.ChromeDriver(agouti.ChromeOptions("args", []string{
"--start-maximized",
"--disable-infobars",
"--app=https://weibo.com/",
"--webkit-text-size-adjust"}))
driver.Start()
var err error
global.Page.Page, err = driver.NewPage()
if err != nil {
global.Log.Info(err.Error())
} else {
flog, ce := SetSinaCookieLogin("sinaWeibo" + global.SinaWeiboUName)
if ce != nil {
global.Log.Info(ce.Error())
return
}
if flog {
//global.Page.Navigate("https://weibo.com")
//time.Sleep(time.Second * 3)
name, ei := global.Page.Find(".nameBox").Text()
if ei != nil {
global.Log.Info(ei.Error())
}
if name != "" {
global.Log.Info("头像", name)
} else {
flog = false
}
}
if !flog {
//global.Page.Navigate("http://weibo.com/login.php")
time.Sleep(time.Second * 3)
global.Page.FindByID("loginname").Fill(global.SinaWeiboUName)
time.Sleep(time.Second * 1)
global.Page.Find("input[type=\"password\"]").Fill(global.SinaWeiboPwd)
time.Sleep(time.Second * 2)
global.Page.FindByXPath("//*[@id=\"pl_login_form\"]/div/div[3]/div[6]/a/span").Click()
time.Sleep(time.Second * 2)
c, err5 := global.Page.GetCookies()
if err5 != nil {
global.Log.Info("登陆失败", err5.Error())
return
}
cookieJson, _ := json.Marshal(c)
//global.Log.Info("cookie", string(cookieJson[:]))
redis.Cache.SetString("sinaWeibo"+global.SinaWeiboUName, string(cookieJson[:]))
if getSinaQueueCount() <= 0 {
/*获取粉丝和关注的用户href*/
ClikFans()
/*进入用户微博获取信息*/
newSina := getSinaQueue()
if newSina != "" {
getUserInfo(newSina)
}
} else {
newSina := getSinaQueue()
if newSina != "" {
getUserInfo(newSina)
}
}
}
}
}
func ClikFans() {
eles, err := global.Page.Page.Find(".user_atten").All("li").Elements()
if err != nil {
return
}
if eles != nil && len(eles) > 1 {
a, err := eles[0].GetElement(api.Selector{"css selector", "a"})
if err != nil {
global.Log.Info("获取关注节点失败", err.Error())
return
}
href, _ := a.GetAttribute("href")
a2, err := eles[1].GetElement(api.Selector{"css selector", "a"})
if err != nil {
global.Log.Info("获取关注节点失败", err.Error())
return
}
href2, _ := a2.GetAttribute("href")
if href != "" {
getUserHref(href)
}
if href2 != "" {
getUserHref(href2)
}
}
}
func ClikFrendsFans() {
eles, err := global.Page.Page.Find(".PCD_counter").All("td.S_line1").Elements()
if err != nil {
println("获取微博关注/粉丝节点失败,", err.Error())
} else {
if eles != nil && len(eles) > 1 {
a, err := eles[0].GetElement(api.Selector{"css selector", "a"})
if err != nil {
global.Log.Info("获取关注节点失败", err.Error())
return
}
href, _ := a.GetAttribute("href")
a2, err := eles[1].GetElement(api.Selector{"css selector", "a"})
if err != nil {
global.Log.Info("获取关注节点失败", err.Error())
return
}
href2, _ := a2.GetAttribute("href")
if href != "" {
getUserHref(href)
}
if href2 != "" {
getUserHref(href2)
}
}
}
}
func getUserHref(href string) {
if href != "" {
println("待抓url:", href)
global.Page.Navigate(href)
} else {
println("点击操作项")
}
eles, err := global.Page.Find(".follow_list").All("li.follow_item").Elements()
if err != nil {
eles, err = global.Page.Find(".member_ul").All("li.member_li").Elements()
if err != nil {
println("未找到微博用户列表标签")
}
}
if err != nil {
global.Log.Info("获取列表href错误", err.Error())
return
}
if eles != nil {
for i := range eles {
a, err := eles[i].GetElement(api.Selector{"css selector", " .mod_pic a"})
if err != nil {
println("未找到列表中的微博用户节点标签")
continue
}
//存入待抓队列
href, err := a.GetAttribute("href")
if err != nil {
global.Log.Info("获取粉丝用户详情href失败", err.Error())
} else {
newHref := strings.Split(href, "?")
addSinaQueue(newHref[0])
}
}
}
//下一页
next := global.Page.Page.Find(".W_pages .next")
cls, err := next.Attribute("class")
if err != nil {
global.Log.Info("获取下一页按钮样式失败", err.Error())
return
}
if !strings.Contains(cls, "page_dis") {
global.Page.RunScript("document.documentElement.scrollTop=document.body.clientHeight;", nil, nil)
time.Sleep(time.Second * 2)
next.Click()
time.Sleep(time.Second * 5)
html, _ := global.Page.HTML()
if !strings.Contains(html, "由于系统限制,你无法查看") {
getUserHref("")
}
}
}
func getUserInfo(href string) {
global.Page.Page.Navigate(href)
addSinaDoneQueue(href)
m := map[string]interface{}{}
m["url"] = href
m["nickname"], _ = global.Page.Page.FindByClass("username").Text()
m["head_img"], _ = global.Page.Page.FindByClass("photo").Attribute("src")
m["desc"], _ = global.Page.Page.FindByClass("pf_intro").Text()
eles, err := global.Page.Page.Find(".PCD_counter").All("strong").Elements()
if err != nil {
println("获取微博关注/粉丝/微博失败,", err.Error())
} else {
m["tweets"], _ = eles[0].GetText()
m["follows"], _ = eles[1].GetText()
m["fans"], _ = eles[2].GetText()
}
//获取微博说说
m["weibos"] = getUserWeibo()
//获取用户详细信息
m = getUserInfoDetail(m)
addSinaDataQueue(m)
/*获取粉丝和关注的用户href*/
ClikFrendsFans()
newSina := getSinaQueue()
if newSina != "" {
getUserInfo(newSina)
}
}
func Try(fun func(), handler func(interface{})) {
defer func() {
if err := recover(); err != nil {
handler(err)
}
}()
fun()
}
func saveSinaData() {
for {
Try(func() {
global.Log.Info("执行Sina数据保存服务")
m := getSinaData()
if m != nil {
weibos := m["weibos"]
delete(m, "weibos")
sinaBaseId :=utils.ID()
m["id"] = sinaBaseId
m["ct_time"] = utils.CurrentTime()
m["ut_time"] = utils.CurrentTime()
if conve |
_, err := global.DB.InsertMap("sina_user_base_info", m)
if err != nil {
global.Log.Error("sina_user_base_info 数据保存失败:", err.Error())
} else {
if weibos != nil {
wb, err := convert.Obj2ListMap(weibos)
if err != nil {
global.Log.Error("weibo 数据转换[]map失败:", err.Error())
} else {
for i := 0; i < len(wb); i++ {
wb[i]["id"] =utils.ID()
wb[i]["sina_id"] = | rt.ToString(m["desc"]) != "" {
m["mydesc"] = m["desc"]
}
delete(m, "desc") | identifier_body |
crawler_sina.go | 2, _ := a2.GetAttribute("href")
if href != "" {
getUserHref(href)
}
if href2 != "" {
getUserHref(href2)
}
}
}
}
func getUserHref(href string) {
if href != "" {
println("待抓url:", href)
global.Page.Navigate(href)
} else {
println("点击操作项")
}
eles, err := global.Page.Find(".follow_list").All("li.follow_item").Elements()
if err != nil {
eles, err = global.Page.Find(".member_ul").All("li.member_li").Elements()
if err != nil {
println("未找到微博用户列表标签")
}
}
if err != nil {
global.Log.Info("获取列表href错误", err.Error())
return
}
if eles != nil {
for i := range eles {
a, err := eles[i].GetElement(api.Selector{"css selector", " .mod_pic a"})
if err != nil {
println("未找到列表中的微博用户节点标签")
continue
}
//存入待抓队列
href, err := a.GetAttribute("href")
if err != nil {
global.Log.Info("获取粉丝用户详情href失败", err.Error())
} else {
newHref := strings.Split(href, "?")
addSinaQueue(newHref[0])
}
}
}
//下一页
next := global.Page.Page.Find(".W_pages .next")
cls, err := next.Attribute("class")
if err != nil {
global.Log.Info("获取下一页按钮样式失败", err.Error())
return
}
if !strings.Contains(cls, "page_dis") {
global.Page.RunScript("document.documentElement.scrollTop=document.body.clientHeight;", nil, nil)
time.Sleep(time.Second * 2)
next.Click()
time.Sleep(time.Second * 5)
html, _ := global.Page.HTML()
if !strings.Contains(html, "由于系统限制,你无法查看") {
getUserHref("")
}
}
}
func getUserInfo(href string) {
global.Page.Page.Navigate(href)
addSinaDoneQueue(href)
m := map[string]interface{}{}
m["url"] = href
m["nickname"], _ = global.Page.Page.FindByClass("username").Text()
m["head_img"], _ = global.Page.Page.FindByClass("photo").Attribute("src")
m["desc"], _ = global.Page.Page.FindByClass("pf_intro").Text()
eles, err := global.Page.Page.Find(".PCD_counter").All("strong").Elements()
if err != nil {
println("获取微博关注/粉丝/微博失败,", err.Error())
} else {
m["tweets"], _ = eles[0].GetText()
m["follows"], _ = eles[1].GetText()
m["fans"], _ = eles[2].GetText()
}
//获取微博说说
m["weibos"] = getUserWeibo()
//获取用户详细信息
m = getUserInfoDetail(m)
addSinaDataQueue(m)
/*获取粉丝和关注的用户href*/
ClikFrendsFans()
newSina := getSinaQueue()
if newSina != "" {
getUserInfo(newSina)
}
}
func Try(fun func(), handler func(interface{})) {
defer func() {
if err := recover(); err != nil {
handler(err)
}
}()
fun()
}
func saveSinaData() {
for {
Try(func() {
global.Log.Info("执行Sina数据保存服务")
m := getSinaData()
if m != nil {
weibos := m["weibos"]
delete(m, "weibos")
sinaBaseId :=utils.ID()
m["id"] = sinaBaseId
m["ct_time"] = utils.CurrentTime()
m["ut_time"] = utils.CurrentTime()
if convert.ToString(m["desc"]) != "" {
m["mydesc"] = m["desc"]
}
delete(m, "desc")
_, err := global.DB.InsertMap("sina_user_base_info", m)
if err != nil {
global.Log.Error("sina_user_base_info 数据保存失败:", err.Error())
} else {
if weibos != nil {
wb, err := convert.Obj2ListMap(weibos)
if err != nil {
global.Log.Error("weibo 数据转换[]map失败:", err.Error())
} else {
for i := 0; i < len(wb); i++ {
wb[i]["id"] =utils.ID()
wb[i]["sina_id"] = sinaBaseId
if convert.ToString(wb[i]["like"]) != "" {
wb[i]["likes"] = wb[i]["like"]
}
delete(wb[i], "like")
if convert.ToString(wb[i]["like"]) != "" {
wb[i]["source"] = wb[i]["from"]
}
delete(wb[i], "from")
}
_, err = global.DB.InsertMapList("weibo", wb)
if err != nil {
global.Log.Error("weibo 数据保存失败:", err.Error())
}
}
}
}
}
time.Sleep(time.Second * 10)
}, func(e interface{}) {
print(e)
})
}
}
func getUserWeibo() []map[string]interface{} {
eles, err := global.Page.Page.Find(".WB_feed").All(".WB_feed_like").Elements()
if err != nil {
println("获取微博说说失败,", err.Error())
return nil
}
if eles != nil {
weiboMap := []map[string]interface{}{}
for i := range eles {
m := map[string]interface{}{}
ele, _ := eles[i].GetElement(api.Selector{"css selector", ".WB_text"})
m["content"] = nil
if ele != nil {
m["content"], _ = ele.GetText()
}
ele, _ = eles[i].GetElement(api.Selector{"css selector", ".WB_feed_expand"})
m["transfer_content"] = nil
if ele != nil {
m["transfer_content"], _ = ele.GetText()
}
eles2, _ := eles[i].GetElements(api.Selector{"css selector", ".WB_from a"})
m["say_time"] = nil
m["source"] = nil
if eles2 != nil && len(eles2) > 0 {
m["say_time"], _ = eles2[0].GetText()
if len(eles2) > 1 {
m["source"], _ = eles2[1].GetText()
}
}
//点赞
eles2, _ = eles[i].GetElements(api.Selector{"css selector", "span[node-type=like_status] em"})
m["likes"] = nil
if ele != nil && len(eles2) > 1 {
like, _ := eles2[1].GetText()
if utils.IsValidNumber(like) {
m["likes"] = like
}
}
//评论
eles2, _ = eles[i].GetElements(api.Selector{"css selector", "span[node-type=comment_btn_text] em"})
m["comment"] = nil
if ele != nil && len(eles2) > 1 {
comment, _ := eles2[1].GetText()
if utils.IsValidNumber(comment) {
m["comment"] = comment
}
}
//转载
eles2, _ = eles[i].GetElements(api.Selector{"css selector", "span[node-type=forward_btn_text] em"})
m["transfer"] = nil
if ele != nil && len(eles2) > 1 {
transfer, _ := eles2[1].GetText()
if utils.IsValidNumber(transfer) {
m["transfer"] = transfer
}
}
weiboMap = append(weiboMap, m)
}
return weiboMap
}
return nil
}
func getUserInfoDetail(m map[string]interface{}) map[string]interface{} {
err := global.Page.Page.FindByID("Pl_Core_UserInfo__6").FindByClass("WB_cardmore").Click()
if err != nil {
println("点击用户更多资料信息失败")
} else {
time.Sleep(time.Second * 3)
//基本信息
eles, err := global.Page.Page.Find(".WB_frame_c").All("li").Elements()
if err != nil {
println("获取微博用户基本信息失败")
} else {
if eles != nil {
for i := range eles {
tit, _ := eles[i].GetElement(api.Selector{"css selector", ".pt_title"})
det, _ := eles[i].GetElement(api.Selector{"css selector", ".pt_detail"})
title, _ := tit.GetText()
detail, _ := det.GetText()
if strings.Contains(title, "昵称") {
m["nickname"] = detail
} else if strings.Contains(title, "真实姓名") {
m["name"] = detail
} else if strings.Contains(title, "所在地") {
m["place"] = de | tail
} else if strings.Contains(title, "性别") {
| conditional_block |
|
crawler_sina.go | global.RD.GetSETRandStringRm(SINA_SPIDER_DATA)
}
func getSinaData() map[string]interface{} {
d, err := getSinaDataQueue()
if err != nil {
global.Log.Warning("无数据", err.Error())
return nil
}
if d != "" {
m := map[string]interface{}{}
err = json.Unmarshal([]byte(d), &m)
if err == nil {
return m
}
}
return nil
}
func main() {
go saveSinaD | )
driver := agouti.ChromeDriver(agouti.ChromeOptions("args", []string{
"--start-maximized",
"--disable-infobars",
"--app=https://weibo.com/",
"--webkit-text-size-adjust"}))
driver.Start()
var err error
global.Page.Page, err = driver.NewPage()
if err != nil {
global.Log.Info(err.Error())
} else {
flog, ce := SetSinaCookieLogin("sinaWeibo" + global.SinaWeiboUName)
if ce != nil {
global.Log.Info(ce.Error())
return
}
if flog {
//global.Page.Navigate("https://weibo.com")
//time.Sleep(time.Second * 3)
name, ei := global.Page.Find(".nameBox").Text()
if ei != nil {
global.Log.Info(ei.Error())
}
if name != "" {
global.Log.Info("头像", name)
} else {
flog = false
}
}
if !flog {
//global.Page.Navigate("http://weibo.com/login.php")
time.Sleep(time.Second * 3)
global.Page.FindByID("loginname").Fill(global.SinaWeiboUName)
time.Sleep(time.Second * 1)
global.Page.Find("input[type=\"password\"]").Fill(global.SinaWeiboPwd)
time.Sleep(time.Second * 2)
global.Page.FindByXPath("//*[@id=\"pl_login_form\"]/div/div[3]/div[6]/a/span").Click()
time.Sleep(time.Second * 2)
c, err5 := global.Page.GetCookies()
if err5 != nil {
global.Log.Info("登陆失败", err5.Error())
return
}
cookieJson, _ := json.Marshal(c)
//global.Log.Info("cookie", string(cookieJson[:]))
redis.Cache.SetString("sinaWeibo"+global.SinaWeiboUName, string(cookieJson[:]))
if getSinaQueueCount() <= 0 {
/*获取粉丝和关注的用户href*/
ClikFans()
/*进入用户微博获取信息*/
newSina := getSinaQueue()
if newSina != "" {
getUserInfo(newSina)
}
} else {
newSina := getSinaQueue()
if newSina != "" {
getUserInfo(newSina)
}
}
}
}
}
func ClikFans() {
eles, err := global.Page.Page.Find(".user_atten").All("li").Elements()
if err != nil {
return
}
if eles != nil && len(eles) > 1 {
a, err := eles[0].GetElement(api.Selector{"css selector", "a"})
if err != nil {
global.Log.Info("获取关注节点失败", err.Error())
return
}
href, _ := a.GetAttribute("href")
a2, err := eles[1].GetElement(api.Selector{"css selector", "a"})
if err != nil {
global.Log.Info("获取关注节点失败", err.Error())
return
}
href2, _ := a2.GetAttribute("href")
if href != "" {
getUserHref(href)
}
if href2 != "" {
getUserHref(href2)
}
}
}
func ClikFrendsFans() {
eles, err := global.Page.Page.Find(".PCD_counter").All("td.S_line1").Elements()
if err != nil {
println("获取微博关注/粉丝节点失败,", err.Error())
} else {
if eles != nil && len(eles) > 1 {
a, err := eles[0].GetElement(api.Selector{"css selector", "a"})
if err != nil {
global.Log.Info("获取关注节点失败", err.Error())
return
}
href, _ := a.GetAttribute("href")
a2, err := eles[1].GetElement(api.Selector{"css selector", "a"})
if err != nil {
global.Log.Info("获取关注节点失败", err.Error())
return
}
href2, _ := a2.GetAttribute("href")
if href != "" {
getUserHref(href)
}
if href2 != "" {
getUserHref(href2)
}
}
}
}
func getUserHref(href string) {
if href != "" {
println("待抓url:", href)
global.Page.Navigate(href)
} else {
println("点击操作项")
}
eles, err := global.Page.Find(".follow_list").All("li.follow_item").Elements()
if err != nil {
eles, err = global.Page.Find(".member_ul").All("li.member_li").Elements()
if err != nil {
println("未找到微博用户列表标签")
}
}
if err != nil {
global.Log.Info("获取列表href错误", err.Error())
return
}
if eles != nil {
for i := range eles {
a, err := eles[i].GetElement(api.Selector{"css selector", " .mod_pic a"})
if err != nil {
println("未找到列表中的微博用户节点标签")
continue
}
//存入待抓队列
href, err := a.GetAttribute("href")
if err != nil {
global.Log.Info("获取粉丝用户详情href失败", err.Error())
} else {
newHref := strings.Split(href, "?")
addSinaQueue(newHref[0])
}
}
}
//下一页
next := global.Page.Page.Find(".W_pages .next")
cls, err := next.Attribute("class")
if err != nil {
global.Log.Info("获取下一页按钮样式失败", err.Error())
return
}
if !strings.Contains(cls, "page_dis") {
global.Page.RunScript("document.documentElement.scrollTop=document.body.clientHeight;", nil, nil)
time.Sleep(time.Second * 2)
next.Click()
time.Sleep(time.Second * 5)
html, _ := global.Page.HTML()
if !strings.Contains(html, "由于系统限制,你无法查看") {
getUserHref("")
}
}
}
func getUserInfo(href string) {
global.Page.Page.Navigate(href)
addSinaDoneQueue(href)
m := map[string]interface{}{}
m["url"] = href
m["nickname"], _ = global.Page.Page.FindByClass("username").Text()
m["head_img"], _ = global.Page.Page.FindByClass("photo").Attribute("src")
m["desc"], _ = global.Page.Page.FindByClass("pf_intro").Text()
eles, err := global.Page.Page.Find(".PCD_counter").All("strong").Elements()
if err != nil {
println("获取微博关注/粉丝/微博失败,", err.Error())
} else {
m["tweets"], _ = eles[0].GetText()
m["follows"], _ = eles[1].GetText()
m["fans"], _ = eles[2].GetText()
}
//获取微博说说
m["weibos"] = getUserWeibo()
//获取用户详细信息
m = getUserInfoDetail(m)
addSinaDataQueue(m)
/*获取粉丝和关注的用户href*/
ClikFrendsFans()
newSina := getSinaQueue()
if newSina != "" {
getUserInfo(newSina)
}
}
func Try(fun func(), handler func(interface{})) {
defer func() {
if err := recover(); err != nil {
handler(err)
}
}()
fun()
}
func saveSinaData() {
for {
Try(func() {
global.Log.Info("执行Sina数据保存服务")
m := getSinaData()
if m != nil {
weibos := m["weibos"]
delete(m, "weibos")
sinaBaseId :=utils.ID()
m["id"] = sinaBaseId
m["ct_time"] = utils.CurrentTime()
m["ut_time"] = utils.CurrentTime()
if convert.ToString(m["desc"]) != "" {
m["mydesc"] = m["desc"]
}
delete(m, "desc")
_, err := global.DB.InsertMap("sina_user_base_info", m)
if err != nil {
global.Log.Error("sina_user_base_info 数据保存失败:", err.Error())
} else {
if weibos != nil {
wb, err := convert.Obj2ListMap(weibos)
if err != nil {
global.Log.Error("weibo 数据转换[]map失败:", err.Error())
} else {
for i := 0; i < len(wb); i++ {
wb[i]["id"] =utils.ID()
wb[i]["sina_id"] = sina | ata( | identifier_name |
encoding.py | u"ed25519 signature"),
(b"spsig", 99, tb([13, 115, 101, 19, 63]), 64, u"secp256k1 signature"),
(b"p2sig", 98, tb([54, 240, 44, 52]), 64, u"p256 signature"),
(b"sig", 96, tb([4, 130, 43]), 64, u"generic signature"),
(b'Net', 15, tb([87, 82, 0]), 4, u"chain id"),
]
operation_tags = {
'endorsement': 0,
'seed_nonce_revelation': 1,
'double_endorsement_evidence': 2,
'double_baking_evidence': 3,
'account_activation': 4,
'proposal': 5,
'ballot': 6,
'reveal': 7,
'transaction': 8,
'origination': 9,
'delegation': 10
}
def scrub_input(v) -> bytes:
if isinstance(v, str) and not isinstance(v, bytes):
try:
_ = int(v, 16)
except ValueError:
v = v.encode('ascii')
else:
if v.startswith('0x'):
v = v[2:]
v = bytes.fromhex(v)
if not isinstance(v, bytes):
raise TypeError(
"a bytes-like object is required (also str), not '%s'" %
type(v).__name__)
return v
def base58_decode(v: bytes) -> bytes:
""" Decode data using Base58 with checksum + validate binary prefix against known kinds and cut in the end.
:param v: Array of bytes (use string.encode())
:returns: bytes
"""
try:
prefix_len = next(
len(encoding[2])
for encoding in base58_encodings
if len(v) == encoding[1] and v.startswith(encoding[0])
)
except StopIteration:
raise ValueError('Invalid encoding, prefix or length mismatch.')
return base58.b58decode_check(v)[prefix_len:]
def base58_encode(v: bytes, prefix: bytes) -> bytes:
""" Encode data using Base58 with checksum and add an according binary prefix in the end.
:param v: Array of bytes
:param prefix: Human-readable prefix (use b'') e.g. b'tz', b'KT', etc
:returns: bytes (use string.decode())
"""
try:
encoding = next(
encoding
for encoding in base58_encodings
if len(v) == encoding[3] and prefix == encoding[0]
)
except StopIteration:
raise ValueError('Invalid encoding, prefix or length mismatch.')
return base58.b58encode_check(encoding[2] + v)
def _validate(v, prefixes: list):
v = scrub_input(v)
if any(map(lambda x: v.startswith(x), prefixes)):
base58_decode(v)
else:
raise ValueError('Unknown prefix.')
def validate_pkh(v):
""" Ensure parameter is a public key hash (starts with b'tz1', b'tz2', b'tz3')
:param v: string or bytes
:raises ValueError: if parameter is not a public key hash
"""
return _validate(v, prefixes=[b'tz1', b'tz2', b'tz3'])
def validate_sig(v):
""" Ensure parameter is a signature (starts with b'edsig', b'spsig', b'p2sig', b'sig')
:param v: string or bytes
:raises ValueError: if parameter is not a signature
"""
return _validate(v, prefixes=[b'edsig', b'spsig', b'p2sig', b'sig'])
def is_pkh(v) -> bool:
""" Check if value is a public key hash.
"""
try:
validate_pkh(v)
except (ValueError, TypeError):
return False
return True
def is_sig(v) -> bool:
""" Check if value is a signature.
"""
try:
validate_sig(v)
except (ValueError, TypeError):
return False
return True
def is_bh(v) -> bool:
""" Check if value is a block hash.
"""
try:
_validate(v, prefixes=[b'B'])
except (ValueError, TypeError):
return False
return True
def is_ogh(v) -> bool:
""" Check if value is an operation group hash.
"""
try:
_validate(v, prefixes=[b'o'])
except (ValueError, TypeError):
return False
return True
def is_kt(v) -> bool:
""" Check if value is a KT address.
"""
try:
_validate(v, prefixes=[b'KT1'])
except (ValueError, TypeError):
return False
return True
def is_key(v) -> bool:
""" Check if value is a public key.
"""
try:
_validate(v, prefixes=[b"edsk", b"edpk", b"spsk", b"p2sk", b"sppk", b"p2pk"])
except (ValueError, TypeError):
return False
return True
def is_chain_id(v) -> bool:
""" Check if value is a chain id.
"""
try:
_validate(v, prefixes=[b'Net'])
except (ValueError, TypeError):
return False
return True
def forge_nat(value) -> bytes:
""" Encode a number using LEB128 encoding (Zarith).
:param int value: the value to encode
:returns: encoded value
:rtype: bytes
"""
if value < 0:
raise ValueError('Value cannot be negative.')
buf = bytearray()
more = True
while more:
byte = value & 0x7f
value >>= 7
if value:
byte |= 0x80
else:
more = False
buf.append(byte)
return bytes(buf)
def forge_public_key(value) -> bytes:
""" Encode public key into bytes.
:param value: public key in in base58 form
"""
prefix = value[:4]
res = base58.b58decode_check(value)[4:]
if prefix == 'edpk':
return b'\x00' + res
elif prefix == 'sppk':
return b'\x01' + res
elif prefix == 'p2pk':
return b'\x02' + res
raise ValueError(f'Unrecognized key type: #{prefix}')
def parse_public_key(data: bytes) -> str:
""" Decode public key from byte form.
:param data: encoded public key.
:returns: base58 encoded public key
"""
key_prefix = {
b'\x00': b'edpk',
b'\x01': b'sppk',
b'\x02': b'p2pk'
}
return base58_encode(data[1:], key_prefix[data[:1]]).decode()
def parse_chain_id(data: bytes):
""" Decode chain id from byte form.
:param data: encoded chain id.
:returns: base58 encoded chain id
"""
return base58_encode(data, b'Net').decode()
def parse_signature(data: bytes):
""" Decode signature from byte form.
:param data: encoded signature.
:returns: base58 encoded signature (generic)
"""
return base58_encode(data, b'sig').decode()
def forge_address(value: str, tz_only=False) -> bytes:
""" Encode address or key hash into bytes.
:param value: base58 encoded address or key_hash
:param tz_only: True indicates that it's a key_hash (will be encoded in a more compact form)
"""
prefix = value[:3]
address = base58.b58decode_check(value)[3:]
if prefix == 'tz1':
res = b'\x00\x00' + address
elif prefix == 'tz2':
res = b'\x00\x01' + address
elif prefix == 'tz3':
res = b'\x00\x02' + address
elif prefix == 'KT1':
res = b'\x01' + address + b'\x00'
else:
raise ValueError(value)
return res[1:] if tz_only else res
def parse_address(data: bytes):
""" Decode address or key_hash from bytes.
:param data: encoded address or key_hash
:returns: base58 encoded address
"""
tz_prefixes = {
b'\x00\x00': b'tz1',
b'\x00\x01': b'tz2',
b'\x00\x02': b'tz3'
} |
for bin_prefix, tz_prefix in tz_prefixes.items():
if data.startswith(bin_prefix): | random_line_split |
|
encoding.py | 240, 44, 52]), 64, u"p256 signature"),
(b"sig", 96, tb([4, 130, 43]), 64, u"generic signature"),
(b'Net', 15, tb([87, 82, 0]), 4, u"chain id"),
]
operation_tags = {
'endorsement': 0,
'seed_nonce_revelation': 1,
'double_endorsement_evidence': 2,
'double_baking_evidence': 3,
'account_activation': 4,
'proposal': 5,
'ballot': 6,
'reveal': 7,
'transaction': 8,
'origination': 9,
'delegation': 10
}
def scrub_input(v) -> bytes:
if isinstance(v, str) and not isinstance(v, bytes):
try:
_ = int(v, 16)
except ValueError:
v = v.encode('ascii')
else:
if v.startswith('0x'):
v = v[2:]
v = bytes.fromhex(v)
if not isinstance(v, bytes):
raise TypeError(
"a bytes-like object is required (also str), not '%s'" %
type(v).__name__)
return v
def base58_decode(v: bytes) -> bytes:
""" Decode data using Base58 with checksum + validate binary prefix against known kinds and cut in the end.
:param v: Array of bytes (use string.encode())
:returns: bytes
"""
try:
prefix_len = next(
len(encoding[2])
for encoding in base58_encodings
if len(v) == encoding[1] and v.startswith(encoding[0])
)
except StopIteration:
raise ValueError('Invalid encoding, prefix or length mismatch.')
return base58.b58decode_check(v)[prefix_len:]
def base58_encode(v: bytes, prefix: bytes) -> bytes:
""" Encode data using Base58 with checksum and add an according binary prefix in the end.
:param v: Array of bytes
:param prefix: Human-readable prefix (use b'') e.g. b'tz', b'KT', etc
:returns: bytes (use string.decode())
"""
try:
encoding = next(
encoding
for encoding in base58_encodings
if len(v) == encoding[3] and prefix == encoding[0]
)
except StopIteration:
raise ValueError('Invalid encoding, prefix or length mismatch.')
return base58.b58encode_check(encoding[2] + v)
def _validate(v, prefixes: list):
v = scrub_input(v)
if any(map(lambda x: v.startswith(x), prefixes)):
base58_decode(v)
else:
raise ValueError('Unknown prefix.')
def validate_pkh(v):
""" Ensure parameter is a public key hash (starts with b'tz1', b'tz2', b'tz3')
:param v: string or bytes
:raises ValueError: if parameter is not a public key hash
"""
return _validate(v, prefixes=[b'tz1', b'tz2', b'tz3'])
def validate_sig(v):
""" Ensure parameter is a signature (starts with b'edsig', b'spsig', b'p2sig', b'sig')
:param v: string or bytes
:raises ValueError: if parameter is not a signature
"""
return _validate(v, prefixes=[b'edsig', b'spsig', b'p2sig', b'sig'])
def is_pkh(v) -> bool:
""" Check if value is a public key hash.
"""
try:
validate_pkh(v)
except (ValueError, TypeError):
return False
return True
def is_sig(v) -> bool:
""" Check if value is a signature.
"""
try:
validate_sig(v)
except (ValueError, TypeError):
return False
return True
def is_bh(v) -> bool:
""" Check if value is a block hash.
"""
try:
_validate(v, prefixes=[b'B'])
except (ValueError, TypeError):
return False
return True
def is_ogh(v) -> bool:
""" Check if value is an operation group hash.
"""
try:
_validate(v, prefixes=[b'o'])
except (ValueError, TypeError):
return False
return True
def is_kt(v) -> bool:
""" Check if value is a KT address.
"""
try:
_validate(v, prefixes=[b'KT1'])
except (ValueError, TypeError):
return False
return True
def is_key(v) -> bool:
""" Check if value is a public key.
"""
try:
_validate(v, prefixes=[b"edsk", b"edpk", b"spsk", b"p2sk", b"sppk", b"p2pk"])
except (ValueError, TypeError):
return False
return True
def is_chain_id(v) -> bool:
""" Check if value is a chain id.
"""
try:
_validate(v, prefixes=[b'Net'])
except (ValueError, TypeError):
return False
return True
def forge_nat(value) -> bytes:
""" Encode a number using LEB128 encoding (Zarith).
:param int value: the value to encode
:returns: encoded value
:rtype: bytes
"""
if value < 0:
raise ValueError('Value cannot be negative.')
buf = bytearray()
more = True
while more:
byte = value & 0x7f
value >>= 7
if value:
byte |= 0x80
else:
more = False
buf.append(byte)
return bytes(buf)
def forge_public_key(value) -> bytes:
""" Encode public key into bytes.
:param value: public key in in base58 form
"""
prefix = value[:4]
res = base58.b58decode_check(value)[4:]
if prefix == 'edpk':
return b'\x00' + res
elif prefix == 'sppk':
return b'\x01' + res
elif prefix == 'p2pk':
return b'\x02' + res
raise ValueError(f'Unrecognized key type: #{prefix}')
def parse_public_key(data: bytes) -> str:
""" Decode public key from byte form.
:param data: encoded public key.
:returns: base58 encoded public key
"""
key_prefix = {
b'\x00': b'edpk',
b'\x01': b'sppk',
b'\x02': b'p2pk'
}
return base58_encode(data[1:], key_prefix[data[:1]]).decode()
def parse_chain_id(data: bytes):
""" Decode chain id from byte form.
:param data: encoded chain id.
:returns: base58 encoded chain id
"""
return base58_encode(data, b'Net').decode()
def parse_signature(data: bytes):
""" Decode signature from byte form.
:param data: encoded signature.
:returns: base58 encoded signature (generic)
"""
return base58_encode(data, b'sig').decode()
def forge_address(value: str, tz_only=False) -> bytes:
""" Encode address or key hash into bytes.
:param value: base58 encoded address or key_hash
:param tz_only: True indicates that it's a key_hash (will be encoded in a more compact form)
"""
prefix = value[:3]
address = base58.b58decode_check(value)[3:]
if prefix == 'tz1':
res = b'\x00\x00' + address
elif prefix == 'tz2':
res = b'\x00\x01' + address
elif prefix == 'tz3':
res = b'\x00\x02' + address
elif prefix == 'KT1':
res = b'\x01' + address + b'\x00'
else:
raise ValueError(value)
return res[1:] if tz_only else res
def parse_address(data: bytes):
| """ Decode address or key_hash from bytes.
:param data: encoded address or key_hash
:returns: base58 encoded address
"""
tz_prefixes = {
b'\x00\x00': b'tz1',
b'\x00\x01': b'tz2',
b'\x00\x02': b'tz3'
}
for bin_prefix, tz_prefix in tz_prefixes.items():
if data.startswith(bin_prefix):
return base58_encode(data[2:], tz_prefix).decode()
if data.startswith(b'\x01') and data.endswith(b'\x00'):
return base58_encode(data[1:-1], b'KT1').decode()
else:
return base58_encode(data[1:], tz_prefixes[b'\x00' + data[:1]]).decode() | identifier_body |
|
encoding.py | Base58 with checksum + validate binary prefix against known kinds and cut in the end.
:param v: Array of bytes (use string.encode())
:returns: bytes
"""
try:
prefix_len = next(
len(encoding[2])
for encoding in base58_encodings
if len(v) == encoding[1] and v.startswith(encoding[0])
)
except StopIteration:
raise ValueError('Invalid encoding, prefix or length mismatch.')
return base58.b58decode_check(v)[prefix_len:]
def base58_encode(v: bytes, prefix: bytes) -> bytes:
""" Encode data using Base58 with checksum and add an according binary prefix in the end.
:param v: Array of bytes
:param prefix: Human-readable prefix (use b'') e.g. b'tz', b'KT', etc
:returns: bytes (use string.decode())
"""
try:
encoding = next(
encoding
for encoding in base58_encodings
if len(v) == encoding[3] and prefix == encoding[0]
)
except StopIteration:
raise ValueError('Invalid encoding, prefix or length mismatch.')
return base58.b58encode_check(encoding[2] + v)
def _validate(v, prefixes: list):
v = scrub_input(v)
if any(map(lambda x: v.startswith(x), prefixes)):
base58_decode(v)
else:
raise ValueError('Unknown prefix.')
def validate_pkh(v):
""" Ensure parameter is a public key hash (starts with b'tz1', b'tz2', b'tz3')
:param v: string or bytes
:raises ValueError: if parameter is not a public key hash
"""
return _validate(v, prefixes=[b'tz1', b'tz2', b'tz3'])
def validate_sig(v):
""" Ensure parameter is a signature (starts with b'edsig', b'spsig', b'p2sig', b'sig')
:param v: string or bytes
:raises ValueError: if parameter is not a signature
"""
return _validate(v, prefixes=[b'edsig', b'spsig', b'p2sig', b'sig'])
def is_pkh(v) -> bool:
""" Check if value is a public key hash.
"""
try:
validate_pkh(v)
except (ValueError, TypeError):
return False
return True
def is_sig(v) -> bool:
""" Check if value is a signature.
"""
try:
validate_sig(v)
except (ValueError, TypeError):
return False
return True
def is_bh(v) -> bool:
""" Check if value is a block hash.
"""
try:
_validate(v, prefixes=[b'B'])
except (ValueError, TypeError):
return False
return True
def is_ogh(v) -> bool:
""" Check if value is an operation group hash.
"""
try:
_validate(v, prefixes=[b'o'])
except (ValueError, TypeError):
return False
return True
def is_kt(v) -> bool:
""" Check if value is a KT address.
"""
try:
_validate(v, prefixes=[b'KT1'])
except (ValueError, TypeError):
return False
return True
def is_key(v) -> bool:
""" Check if value is a public key.
"""
try:
_validate(v, prefixes=[b"edsk", b"edpk", b"spsk", b"p2sk", b"sppk", b"p2pk"])
except (ValueError, TypeError):
return False
return True
def is_chain_id(v) -> bool:
""" Check if value is a chain id.
"""
try:
_validate(v, prefixes=[b'Net'])
except (ValueError, TypeError):
return False
return True
def forge_nat(value) -> bytes:
""" Encode a number using LEB128 encoding (Zarith).
:param int value: the value to encode
:returns: encoded value
:rtype: bytes
"""
if value < 0:
raise ValueError('Value cannot be negative.')
buf = bytearray()
more = True
while more:
byte = value & 0x7f
value >>= 7
if value:
byte |= 0x80
else:
more = False
buf.append(byte)
return bytes(buf)
def forge_public_key(value) -> bytes:
""" Encode public key into bytes.
:param value: public key in in base58 form
"""
prefix = value[:4]
res = base58.b58decode_check(value)[4:]
if prefix == 'edpk':
return b'\x00' + res
elif prefix == 'sppk':
return b'\x01' + res
elif prefix == 'p2pk':
return b'\x02' + res
raise ValueError(f'Unrecognized key type: #{prefix}')
def parse_public_key(data: bytes) -> str:
""" Decode public key from byte form.
:param data: encoded public key.
:returns: base58 encoded public key
"""
key_prefix = {
b'\x00': b'edpk',
b'\x01': b'sppk',
b'\x02': b'p2pk'
}
return base58_encode(data[1:], key_prefix[data[:1]]).decode()
def parse_chain_id(data: bytes):
""" Decode chain id from byte form.
:param data: encoded chain id.
:returns: base58 encoded chain id
"""
return base58_encode(data, b'Net').decode()
def parse_signature(data: bytes):
""" Decode signature from byte form.
:param data: encoded signature.
:returns: base58 encoded signature (generic)
"""
return base58_encode(data, b'sig').decode()
def forge_address(value: str, tz_only=False) -> bytes:
""" Encode address or key hash into bytes.
:param value: base58 encoded address or key_hash
:param tz_only: True indicates that it's a key_hash (will be encoded in a more compact form)
"""
prefix = value[:3]
address = base58.b58decode_check(value)[3:]
if prefix == 'tz1':
res = b'\x00\x00' + address
elif prefix == 'tz2':
res = b'\x00\x01' + address
elif prefix == 'tz3':
res = b'\x00\x02' + address
elif prefix == 'KT1':
res = b'\x01' + address + b'\x00'
else:
raise ValueError(value)
return res[1:] if tz_only else res
def parse_address(data: bytes):
""" Decode address or key_hash from bytes.
:param data: encoded address or key_hash
:returns: base58 encoded address
"""
tz_prefixes = {
b'\x00\x00': b'tz1',
b'\x00\x01': b'tz2',
b'\x00\x02': b'tz3'
}
for bin_prefix, tz_prefix in tz_prefixes.items():
if data.startswith(bin_prefix):
return base58_encode(data[2:], tz_prefix).decode()
if data.startswith(b'\x01') and data.endswith(b'\x00'):
return base58_encode(data[1:-1], b'KT1').decode()
else:
return base58_encode(data[1:], tz_prefixes[b'\x00' + data[:1]]).decode()
def parse_contract(data: bytes):
""" Decode contract (address + optional entrypoint) from bytes
:param data: encoded contract
:returns: base58 encoded address and entrypoint (if exists) separated by `%`
"""
res = parse_address(data[:22])
if len(data) > 22:
res += f'%{data[22:].decode()}'
return res
def forge_bool(value: bool) -> bytes:
""" Encode boolean value into bytes.
"""
return b'\xff' if value else b'\x00'
def forge_array(data, len_bytes=4) -> bytes:
""" Encode array of bytes (prepend length).
:param data: list of bytes
:param len_bytes: number of bytes to store array length
"""
return len(data).to_bytes(len_bytes, 'big') + data
def parse_array(data, len_bytes=4) -> tuple:
""" Decode array of bytes.
:param data: encoded array
:param len_bytes: number of bytes to store array length
:returns: Tuple[list of bytes, array length]
"""
assert len(data) >= len_bytes, f'not enough bytes to parse array length, wanted {len_bytes}'
length = int.from_bytes(data[:len_bytes], 'big')
assert len(data) >= len_bytes + length, f'not enough bytes to parse array body, wanted {length}'
return data[len_bytes:len_bytes+length], len_bytes+length
def | forge_base58 | identifier_name |
|
encoding.py | 256k1 scalar"),
(b"GSp", 53, tb([5, 92, 0]), 33, u"secp256k1 element"),
(b"edsk", 98, tb([43, 246, 78, 7]), 64, u"ed25519 secret key"),
(b"edsig", 99, tb([9, 245, 205, 134, 18]), 64, u"ed25519 signature"),
(b"spsig", 99, tb([13, 115, 101, 19, 63]), 64, u"secp256k1 signature"),
(b"p2sig", 98, tb([54, 240, 44, 52]), 64, u"p256 signature"),
(b"sig", 96, tb([4, 130, 43]), 64, u"generic signature"),
(b'Net', 15, tb([87, 82, 0]), 4, u"chain id"),
]
operation_tags = {
'endorsement': 0,
'seed_nonce_revelation': 1,
'double_endorsement_evidence': 2,
'double_baking_evidence': 3,
'account_activation': 4,
'proposal': 5,
'ballot': 6,
'reveal': 7,
'transaction': 8,
'origination': 9,
'delegation': 10
}
def scrub_input(v) -> bytes:
if isinstance(v, str) and not isinstance(v, bytes):
try:
_ = int(v, 16)
except ValueError:
v = v.encode('ascii')
else:
if v.startswith('0x'):
v = v[2:]
v = bytes.fromhex(v)
if not isinstance(v, bytes):
raise TypeError(
"a bytes-like object is required (also str), not '%s'" %
type(v).__name__)
return v
def base58_decode(v: bytes) -> bytes:
""" Decode data using Base58 with checksum + validate binary prefix against known kinds and cut in the end.
:param v: Array of bytes (use string.encode())
:returns: bytes
"""
try:
prefix_len = next(
len(encoding[2])
for encoding in base58_encodings
if len(v) == encoding[1] and v.startswith(encoding[0])
)
except StopIteration:
raise ValueError('Invalid encoding, prefix or length mismatch.')
return base58.b58decode_check(v)[prefix_len:]
def base58_encode(v: bytes, prefix: bytes) -> bytes:
""" Encode data using Base58 with checksum and add an according binary prefix in the end.
:param v: Array of bytes
:param prefix: Human-readable prefix (use b'') e.g. b'tz', b'KT', etc
:returns: bytes (use string.decode())
"""
try:
encoding = next(
encoding
for encoding in base58_encodings
if len(v) == encoding[3] and prefix == encoding[0]
)
except StopIteration:
raise ValueError('Invalid encoding, prefix or length mismatch.')
return base58.b58encode_check(encoding[2] + v)
def _validate(v, prefixes: list):
v = scrub_input(v)
if any(map(lambda x: v.startswith(x), prefixes)):
base58_decode(v)
else:
raise ValueError('Unknown prefix.')
def validate_pkh(v):
""" Ensure parameter is a public key hash (starts with b'tz1', b'tz2', b'tz3')
:param v: string or bytes
:raises ValueError: if parameter is not a public key hash
"""
return _validate(v, prefixes=[b'tz1', b'tz2', b'tz3'])
def validate_sig(v):
""" Ensure parameter is a signature (starts with b'edsig', b'spsig', b'p2sig', b'sig')
:param v: string or bytes
:raises ValueError: if parameter is not a signature
"""
return _validate(v, prefixes=[b'edsig', b'spsig', b'p2sig', b'sig'])
def is_pkh(v) -> bool:
""" Check if value is a public key hash.
"""
try:
validate_pkh(v)
except (ValueError, TypeError):
return False
return True
def is_sig(v) -> bool:
""" Check if value is a signature.
"""
try:
validate_sig(v)
except (ValueError, TypeError):
return False
return True
def is_bh(v) -> bool:
""" Check if value is a block hash.
"""
try:
_validate(v, prefixes=[b'B'])
except (ValueError, TypeError):
return False
return True
def is_ogh(v) -> bool:
""" Check if value is an operation group hash.
"""
try:
_validate(v, prefixes=[b'o'])
except (ValueError, TypeError):
return False
return True
def is_kt(v) -> bool:
""" Check if value is a KT address.
"""
try:
_validate(v, prefixes=[b'KT1'])
except (ValueError, TypeError):
return False
return True
def is_key(v) -> bool:
""" Check if value is a public key.
"""
try:
_validate(v, prefixes=[b"edsk", b"edpk", b"spsk", b"p2sk", b"sppk", b"p2pk"])
except (ValueError, TypeError):
return False
return True
def is_chain_id(v) -> bool:
""" Check if value is a chain id.
"""
try:
_validate(v, prefixes=[b'Net'])
except (ValueError, TypeError):
return False
return True
def forge_nat(value) -> bytes:
""" Encode a number using LEB128 encoding (Zarith).
:param int value: the value to encode
:returns: encoded value
:rtype: bytes
"""
if value < 0:
raise ValueError('Value cannot be negative.')
buf = bytearray()
more = True
while more:
byte = value & 0x7f
value >>= 7
if value:
byte |= 0x80
else:
more = False
buf.append(byte)
return bytes(buf)
def forge_public_key(value) -> bytes:
""" Encode public key into bytes.
:param value: public key in in base58 form
"""
prefix = value[:4]
res = base58.b58decode_check(value)[4:]
if prefix == 'edpk':
return b'\x00' + res
elif prefix == 'sppk':
return b'\x01' + res
elif prefix == 'p2pk':
return b'\x02' + res
raise ValueError(f'Unrecognized key type: #{prefix}')
def parse_public_key(data: bytes) -> str:
""" Decode public key from byte form.
:param data: encoded public key.
:returns: base58 encoded public key
"""
key_prefix = {
b'\x00': b'edpk',
b'\x01': b'sppk',
b'\x02': b'p2pk'
}
return base58_encode(data[1:], key_prefix[data[:1]]).decode()
def parse_chain_id(data: bytes):
""" Decode chain id from byte form.
:param data: encoded chain id.
:returns: base58 encoded chain id
"""
return base58_encode(data, b'Net').decode()
def parse_signature(data: bytes):
""" Decode signature from byte form.
:param data: encoded signature.
:returns: base58 encoded signature (generic)
"""
return base58_encode(data, b'sig').decode()
def forge_address(value: str, tz_only=False) -> bytes:
""" Encode address or key hash into bytes.
:param value: base58 encoded address or key_hash
:param tz_only: True indicates that it's a key_hash (will be encoded in a more compact form)
"""
prefix = value[:3]
address = base58.b58decode_check(value)[3:]
if prefix == 'tz1':
res = b'\x00\x00' + address
elif prefix == 'tz2':
res = b'\x00\x01' + address
elif prefix == 'tz3':
res = b'\x00\x02' + address
elif prefix == 'KT1':
| res = b'\x01' + address + b'\x00' | conditional_block |
|
blockSelection.ts |
*
* @private
*/
private anyBlockSelectedCache: boolean | null = null;
/**
* Sanitizer Config
*
* @returns {SanitizerConfig}
*/
private get | (): SanitizerConfig {
return {
p: {},
h1: {},
h2: {},
h3: {},
h4: {},
h5: {},
h6: {},
ol: {},
ul: {},
li: {},
br: true,
img: {
src: true,
width: true,
height: true,
},
a: {
href: true,
},
b: {},
i: {},
u: {},
};
}
/**
* Flag that identifies all Blocks selection
*
* @returns {boolean}
*/
public get allBlocksSelected(): boolean {
const { BlockManager } = this.Editor;
return BlockManager.blocks.every((block) => block.selected === true);
}
/**
* Set selected all blocks
*
* @param {boolean} state - state to set
*/
public set allBlocksSelected(state: boolean) {
const { BlockManager } = this.Editor;
BlockManager.blocks.forEach((block) => {
block.selected = state;
});
this.clearCache();
}
/**
* Flag that identifies any Block selection
*
* @returns {boolean}
*/
public get anyBlockSelected(): boolean {
const { BlockManager } = this.Editor;
if (this.anyBlockSelectedCache === null) {
this.anyBlockSelectedCache = BlockManager.blocks.some((block) => block.selected === true);
}
return this.anyBlockSelectedCache;
}
/**
* Return selected Blocks array
*
* @returns {Block[]}
*/
public get selectedBlocks(): Block[] {
return this.Editor.BlockManager.blocks.filter((block: Block) => block.selected);
}
/**
* Flag used to define block selection
* First CMD+A defines it as true and then second CMD+A selects all Blocks
*
* @type {boolean}
*/
private needToSelectAll = false;
/**
* Flag used to define native input selection
* In this case we allow double CMD+A to select Block
*
* @type {boolean}
*/
private nativeInputSelected = false;
/**
* Flag identifies any input selection
* That means we can select whole Block
*
* @type {boolean}
*/
private readyToBlockSelection = false;
/**
* SelectionUtils instance
*
* @type {SelectionUtils}
*/
private selection: SelectionUtils;
/**
* Module Preparation
* Registers Shortcuts CMD+A and CMD+C
* to select all and copy them
*/
public prepare(): void {
this.selection = new SelectionUtils();
/**
* CMD/CTRL+A selection shortcut
*/
Shortcuts.add({
name: 'CMD+A',
handler: (event) => {
const { BlockManager, ReadOnly } = this.Editor;
/**
* We use Editor's Block selection on CMD+A ShortCut instead of Browsers
*/
if (ReadOnly.isEnabled) {
event.preventDefault();
this.selectAllBlocks();
return;
}
/**
* When one page consist of two or more EditorJS instances
* Shortcut module tries to handle all events.
* Thats why Editor's selection works inside the target Editor, but
* for others error occurs because nothing to select.
*
* Prevent such actions if focus is not inside the Editor
*/
if (!BlockManager.currentBlock) {
return;
}
this.handleCommandA(event);
},
on: this.Editor.UI.nodes.redactor,
});
}
/**
* Toggle read-only state
*
* - Remove all ranges
* - Unselect all Blocks
*/
public toggleReadOnly(): void {
SelectionUtils.get()
.removeAllRanges();
this.allBlocksSelected = false;
}
/**
* Remove selection of Block
*
* @param {number?} index - Block index according to the BlockManager's indexes
*/
public unSelectBlockByIndex(index?): void {
const { BlockManager } = this.Editor;
let block;
if (isNaN(index)) {
block = BlockManager.currentBlock;
} else {
block = BlockManager.getBlockByIndex(index);
}
block.selected = false;
this.clearCache();
}
/**
* Clear selection from Blocks
*
* @param {Event} reason - event caused clear of selection
* @param {boolean} restoreSelection - if true, restore saved selection
*/
public clearSelection(reason?: Event, restoreSelection = false): void {
const { BlockManager, Caret, RectangleSelection } = this.Editor;
this.needToSelectAll = false;
this.nativeInputSelected = false;
this.readyToBlockSelection = false;
const isKeyboard = reason && (reason instanceof KeyboardEvent);
const isPrintableKey = isKeyboard && _.isPrintableKey((reason as KeyboardEvent).keyCode);
/**
* If reason caused clear of the selection was printable key and any block is selected,
* remove selected blocks and insert pressed key
*/
if (this.anyBlockSelected && isKeyboard && isPrintableKey && !SelectionUtils.isSelectionExists) {
const indexToInsert = BlockManager.removeSelectedBlocks();
BlockManager.insertDefaultBlockAtIndex(indexToInsert, true);
Caret.setToBlock(BlockManager.currentBlock);
_.delay(() => {
const eventKey = (reason as KeyboardEvent).key;
/**
* If event.key length >1 that means key is special (e.g. Enter or Dead or Unidentified).
* So we use empty string
*
* @see https://developer.mozilla.org/ru/docs/Web/API/KeyboardEvent/key
*/
Caret.insertContentAtCaretPosition(eventKey.length > 1 ? '' : eventKey);
// eslint-disable-next-line @typescript-eslint/no-magic-numbers
}, 20)();
}
this.Editor.CrossBlockSelection.clear(reason);
if (!this.anyBlockSelected || RectangleSelection.isRectActivated()) {
this.Editor.RectangleSelection.clearSelection();
return;
}
/**
* Restore selection when Block is already selected
* but someone tries to write something.
*/
if (restoreSelection) {
this.selection.restore();
}
/** Now all blocks cleared */
this.allBlocksSelected = false;
}
/**
* Reduce each Block and copy its content
*
* @param {ClipboardEvent} e - copy/cut event
* @returns {Promise<void>}
*/
public copySelectedBlocks(e: ClipboardEvent): Promise<void> {
/**
* Prevent default copy
*/
e.preventDefault();
const fakeClipboard = $.make('div');
this.selectedBlocks.forEach((block) => {
/**
* Make <p> tag that holds clean HTML
*/
const cleanHTML = clean(block.holder.innerHTML, this.sanitizerConfig);
const fragment = $.make('p');
fragment.innerHTML = cleanHTML;
fakeClipboard.appendChild(fragment);
});
const textPlain = Array.from(fakeClipboard.childNodes).map((node) => node.textContent)
.join('\n\n');
const textHTML = fakeClipboard.innerHTML;
e.clipboardData.setData('text/plain', textPlain);
e.clipboardData.setData('text/html', textHTML);
return Promise
.all(this.selectedBlocks.map((block) => block.save()))
.then(savedData => {
try {
e.clipboardData.setData(this.Editor.Paste.MIME_TYPE, JSON.stringify(savedData));
} catch (err) {
// In Firefox we can't set data in async function
}
});
}
/**
* select Block
*
* @param {number?} index - Block index according to the BlockManager's indexes
*/
public selectBlockByIndex(index?): void {
const { BlockManager } = this.Editor;
/**
* Remove previous focused Block's state
*/
BlockManager.clearFocused();
let block;
if (isNaN(index)) {
block = BlockManager.currentBlock;
} else {
block = BlockManager.getBlockByIndex(index);
}
/** Save selection */
this.selection.save();
SelectionUtils.get()
.removeAllRanges();
block.selected = true;
this.clearCache();
/** close InlineToolbar when we selected any Block */
this.Editor.InlineToolbar.close();
}
/**
* Clear anyBlockSelected cache
*/
public clearCache(): void {
this.anyBlockSelectedCache = null;
}
/**
* Module destruction
* De-registers Shortcut CMD+A
*/
public destroy(): void {
/** Selection shortcut */
Shortcuts.remove(this.Editor.UI.nodes.redactor, 'CMD+A');
}
/**
* First CMD+A selects all input content by native behaviour,
* next CMD+A keypress selects all blocks
*
* @param {KeyboardEvent} event - keyboard event
*/
private handleCommandA(event: KeyboardEvent): void {
this.Editor.RectangleSelection.clearSelection();
/** allow default selection on native inputs */
if ($.isNativeInput(event.target) && !this.readyToBlockSelection) {
this.readyToBlockSelection = true;
return;
}
const workingBlock = this.Editor.BlockManager.getBlock | sanitizerConfig | identifier_name |
blockSelection.ts | @returns {SanitizerConfig}
*/
private get sanitizerConfig(): SanitizerConfig {
return {
p: {},
h1: {},
h2: {},
h3: {},
h4: {},
h5: {},
h6: {},
ol: {},
ul: {},
li: {},
br: true,
img: {
src: true,
width: true,
height: true,
},
a: {
href: true,
},
b: {},
i: {},
u: {},
};
}
/**
* Flag that identifies all Blocks selection
*
* @returns {boolean}
*/
public get allBlocksSelected(): boolean {
const { BlockManager } = this.Editor;
return BlockManager.blocks.every((block) => block.selected === true);
}
/**
* Set selected all blocks
*
* @param {boolean} state - state to set
*/
public set allBlocksSelected(state: boolean) {
const { BlockManager } = this.Editor;
BlockManager.blocks.forEach((block) => {
block.selected = state;
});
this.clearCache();
}
/**
* Flag that identifies any Block selection
*
* @returns {boolean}
*/
public get anyBlockSelected(): boolean {
const { BlockManager } = this.Editor;
if (this.anyBlockSelectedCache === null) {
this.anyBlockSelectedCache = BlockManager.blocks.some((block) => block.selected === true);
}
return this.anyBlockSelectedCache;
}
/**
* Return selected Blocks array
*
* @returns {Block[]}
*/
public get selectedBlocks(): Block[] {
return this.Editor.BlockManager.blocks.filter((block: Block) => block.selected);
}
/**
* Flag used to define block selection
* First CMD+A defines it as true and then second CMD+A selects all Blocks
*
* @type {boolean}
*/
private needToSelectAll = false;
/**
* Flag used to define native input selection
* In this case we allow double CMD+A to select Block
*
* @type {boolean}
*/
private nativeInputSelected = false;
/**
* Flag identifies any input selection
* That means we can select whole Block
*
* @type {boolean}
*/
private readyToBlockSelection = false;
/**
* SelectionUtils instance
*
* @type {SelectionUtils}
*/
private selection: SelectionUtils;
/**
* Module Preparation
* Registers Shortcuts CMD+A and CMD+C
* to select all and copy them
*/
public prepare(): void {
this.selection = new SelectionUtils();
/**
* CMD/CTRL+A selection shortcut
*/
Shortcuts.add({
name: 'CMD+A',
handler: (event) => {
const { BlockManager, ReadOnly } = this.Editor;
/**
* We use Editor's Block selection on CMD+A ShortCut instead of Browsers
*/
if (ReadOnly.isEnabled) {
event.preventDefault();
this.selectAllBlocks();
return;
}
/**
* When one page consist of two or more EditorJS instances
* Shortcut module tries to handle all events.
* Thats why Editor's selection works inside the target Editor, but
* for others error occurs because nothing to select.
*
* Prevent such actions if focus is not inside the Editor
*/
if (!BlockManager.currentBlock) {
return;
}
this.handleCommandA(event);
},
on: this.Editor.UI.nodes.redactor,
});
}
/**
* Toggle read-only state
*
* - Remove all ranges
* - Unselect all Blocks
*/
public toggleReadOnly(): void {
SelectionUtils.get()
.removeAllRanges();
this.allBlocksSelected = false;
}
/**
* Remove selection of Block
*
* @param {number?} index - Block index according to the BlockManager's indexes
*/
public unSelectBlockByIndex(index?): void {
const { BlockManager } = this.Editor;
let block;
if (isNaN(index)) {
block = BlockManager.currentBlock;
} else {
block = BlockManager.getBlockByIndex(index);
}
block.selected = false;
this.clearCache();
}
/**
* Clear selection from Blocks
*
* @param {Event} reason - event caused clear of selection
* @param {boolean} restoreSelection - if true, restore saved selection
*/
public clearSelection(reason?: Event, restoreSelection = false): void {
const { BlockManager, Caret, RectangleSelection } = this.Editor;
this.needToSelectAll = false;
this.nativeInputSelected = false;
this.readyToBlockSelection = false;
const isKeyboard = reason && (reason instanceof KeyboardEvent);
const isPrintableKey = isKeyboard && _.isPrintableKey((reason as KeyboardEvent).keyCode);
/**
* If reason caused clear of the selection was printable key and any block is selected,
* remove selected blocks and insert pressed key
*/
if (this.anyBlockSelected && isKeyboard && isPrintableKey && !SelectionUtils.isSelectionExists) {
const indexToInsert = BlockManager.removeSelectedBlocks();
BlockManager.insertDefaultBlockAtIndex(indexToInsert, true);
Caret.setToBlock(BlockManager.currentBlock);
_.delay(() => {
const eventKey = (reason as KeyboardEvent).key;
/**
* If event.key length >1 that means key is special (e.g. Enter or Dead or Unidentified).
* So we use empty string
*
* @see https://developer.mozilla.org/ru/docs/Web/API/KeyboardEvent/key
*/
Caret.insertContentAtCaretPosition(eventKey.length > 1 ? '' : eventKey);
// eslint-disable-next-line @typescript-eslint/no-magic-numbers
}, 20)();
}
this.Editor.CrossBlockSelection.clear(reason);
if (!this.anyBlockSelected || RectangleSelection.isRectActivated()) {
this.Editor.RectangleSelection.clearSelection();
return;
}
/**
* Restore selection when Block is already selected
* but someone tries to write something.
*/
if (restoreSelection) {
this.selection.restore();
}
/** Now all blocks cleared */
this.allBlocksSelected = false;
}
/**
* Reduce each Block and copy its content
*
* @param {ClipboardEvent} e - copy/cut event
* @returns {Promise<void>}
*/
public copySelectedBlocks(e: ClipboardEvent): Promise<void> {
/**
* Prevent default copy
*/
e.preventDefault();
const fakeClipboard = $.make('div');
this.selectedBlocks.forEach((block) => {
/**
* Make <p> tag that holds clean HTML
*/
const cleanHTML = clean(block.holder.innerHTML, this.sanitizerConfig);
const fragment = $.make('p');
fragment.innerHTML = cleanHTML;
fakeClipboard.appendChild(fragment);
});
const textPlain = Array.from(fakeClipboard.childNodes).map((node) => node.textContent)
.join('\n\n');
const textHTML = fakeClipboard.innerHTML;
e.clipboardData.setData('text/plain', textPlain);
e.clipboardData.setData('text/html', textHTML);
return Promise
.all(this.selectedBlocks.map((block) => block.save()))
.then(savedData => {
try {
e.clipboardData.setData(this.Editor.Paste.MIME_TYPE, JSON.stringify(savedData));
} catch (err) {
// In Firefox we can't set data in async function
}
});
}
/**
* select Block
*
* @param {number?} index - Block index according to the BlockManager's indexes
*/
public selectBlockByIndex(index?): void {
const { BlockManager } = this.Editor;
/**
* Remove previous focused Block's state
*/
BlockManager.clearFocused();
let block;
if (isNaN(index)) {
block = BlockManager.currentBlock;
} else {
block = BlockManager.getBlockByIndex(index);
}
/** Save selection */
this.selection.save();
SelectionUtils.get()
.removeAllRanges();
block.selected = true;
this.clearCache();
/** close InlineToolbar when we selected any Block */
this.Editor.InlineToolbar.close();
}
/**
* Clear anyBlockSelected cache
*/
public clearCache(): void {
this.anyBlockSelectedCache = null;
}
/**
* Module destruction
* De-registers Shortcut CMD+A
*/
public destroy(): void {
/** Selection shortcut */
Shortcuts.remove(this.Editor.UI.nodes.redactor, 'CMD+A');
}
/**
* First CMD+A selects all input content by native behaviour,
* next CMD+A keypress selects all blocks
*
* @param {KeyboardEvent} event - keyboard event
*/
private handleCommandA(event: KeyboardEvent): void {
this.Editor.RectangleSelection.clearSelection();
/** allow default selection on native inputs */
if ($.isNativeInput(event.target) && !this.readyToBlockSelection) {
this.readyToBlockSelection = true;
return;
}
const workingBlock = this.Editor.BlockManager.getBlock(event.target as HTMLElement);
const inputs = workingBlock.inputs;
/**
* If Block has more than one editable element allow native selection | * Second cmd+a will select whole Block | random_line_split |
|
blockSelection.ts |
*
* @private
*/
private anyBlockSelectedCache: boolean | null = null;
/**
* Sanitizer Config
*
* @returns {SanitizerConfig}
*/
private get sanitizerConfig(): SanitizerConfig {
return {
p: {},
h1: {},
h2: {},
h3: {},
h4: {},
h5: {},
h6: {},
ol: {},
ul: {},
li: {},
br: true,
img: {
src: true,
width: true,
height: true,
},
a: {
href: true,
},
b: {},
i: {},
u: {},
};
}
/**
* Flag that identifies all Blocks selection
*
* @returns {boolean}
*/
public get allBlocksSelected(): boolean {
const { BlockManager } = this.Editor;
return BlockManager.blocks.every((block) => block.selected === true);
}
/**
* Set selected all blocks
*
* @param {boolean} state - state to set
*/
public set allBlocksSelected(state: boolean) {
const { BlockManager } = this.Editor;
BlockManager.blocks.forEach((block) => {
block.selected = state;
});
this.clearCache();
}
/**
* Flag that identifies any Block selection
*
* @returns {boolean}
*/
public get anyBlockSelected(): boolean {
const { BlockManager } = this.Editor;
if (this.anyBlockSelectedCache === null) {
this.anyBlockSelectedCache = BlockManager.blocks.some((block) => block.selected === true);
}
return this.anyBlockSelectedCache;
}
/**
* Return selected Blocks array
*
* @returns {Block[]}
*/
public get selectedBlocks(): Block[] {
return this.Editor.BlockManager.blocks.filter((block: Block) => block.selected);
}
/**
* Flag used to define block selection
* First CMD+A defines it as true and then second CMD+A selects all Blocks
*
* @type {boolean}
*/
private needToSelectAll = false;
/**
* Flag used to define native input selection
* In this case we allow double CMD+A to select Block
*
* @type {boolean}
*/
private nativeInputSelected = false;
/**
* Flag identifies any input selection
* That means we can select whole Block
*
* @type {boolean}
*/
private readyToBlockSelection = false;
/**
* SelectionUtils instance
*
* @type {SelectionUtils}
*/
private selection: SelectionUtils;
/**
* Module Preparation
* Registers Shortcuts CMD+A and CMD+C
* to select all and copy them
*/
public prepare(): void {
this.selection = new SelectionUtils();
/**
* CMD/CTRL+A selection shortcut
*/
Shortcuts.add({
name: 'CMD+A',
handler: (event) => {
const { BlockManager, ReadOnly } = this.Editor;
/**
* We use Editor's Block selection on CMD+A ShortCut instead of Browsers
*/
if (ReadOnly.isEnabled) {
event.preventDefault();
this.selectAllBlocks();
return;
}
/**
* When one page consist of two or more EditorJS instances
* Shortcut module tries to handle all events.
* Thats why Editor's selection works inside the target Editor, but
* for others error occurs because nothing to select.
*
* Prevent such actions if focus is not inside the Editor
*/
if (!BlockManager.currentBlock) {
return;
}
this.handleCommandA(event);
},
on: this.Editor.UI.nodes.redactor,
});
}
/**
* Toggle read-only state
*
* - Remove all ranges
* - Unselect all Blocks
*/
public toggleReadOnly(): void {
SelectionUtils.get()
.removeAllRanges();
this.allBlocksSelected = false;
}
/**
* Remove selection of Block
*
* @param {number?} index - Block index according to the BlockManager's indexes
*/
public unSelectBlockByIndex(index?): void {
const { BlockManager } = this.Editor;
let block;
if (isNaN(index)) {
block = BlockManager.currentBlock;
} else {
block = BlockManager.getBlockByIndex(index);
}
block.selected = false;
this.clearCache();
}
/**
* Clear selection from Blocks
*
* @param {Event} reason - event caused clear of selection
* @param {boolean} restoreSelection - if true, restore saved selection
*/
public clearSelection(reason?: Event, restoreSelection = false): void {
const { BlockManager, Caret, RectangleSelection } = this.Editor;
this.needToSelectAll = false;
this.nativeInputSelected = false;
this.readyToBlockSelection = false;
const isKeyboard = reason && (reason instanceof KeyboardEvent);
const isPrintableKey = isKeyboard && _.isPrintableKey((reason as KeyboardEvent).keyCode);
/**
* If reason caused clear of the selection was printable key and any block is selected,
* remove selected blocks and insert pressed key
*/
if (this.anyBlockSelected && isKeyboard && isPrintableKey && !SelectionUtils.isSelectionExists) {
const indexToInsert = BlockManager.removeSelectedBlocks();
BlockManager.insertDefaultBlockAtIndex(indexToInsert, true);
Caret.setToBlock(BlockManager.currentBlock);
_.delay(() => {
const eventKey = (reason as KeyboardEvent).key;
/**
* If event.key length >1 that means key is special (e.g. Enter or Dead or Unidentified).
* So we use empty string
*
* @see https://developer.mozilla.org/ru/docs/Web/API/KeyboardEvent/key
*/
Caret.insertContentAtCaretPosition(eventKey.length > 1 ? '' : eventKey);
// eslint-disable-next-line @typescript-eslint/no-magic-numbers
}, 20)();
}
this.Editor.CrossBlockSelection.clear(reason);
if (!this.anyBlockSelected || RectangleSelection.isRectActivated()) {
this.Editor.RectangleSelection.clearSelection();
return;
}
/**
* Restore selection when Block is already selected
* but someone tries to write something.
*/
if (restoreSelection) {
this.selection.restore();
}
/** Now all blocks cleared */
this.allBlocksSelected = false;
}
/**
* Reduce each Block and copy its content
*
* @param {ClipboardEvent} e - copy/cut event
* @returns {Promise<void>}
*/
public copySelectedBlocks(e: ClipboardEvent): Promise<void> | .join('\n\n');
const textHTML = fakeClipboard.innerHTML;
e.clipboardData.setData('text/plain', textPlain);
e.clipboardData.setData('text/html', textHTML);
return Promise
.all(this.selectedBlocks.map((block) => block.save()))
.then(savedData => {
try {
e.clipboardData.setData(this.Editor.Paste.MIME_TYPE, JSON.stringify(savedData));
} catch (err) {
// In Firefox we can't set data in async function
}
});
}
/**
* select Block
*
* @param {number?} index - Block index according to the BlockManager's indexes
*/
public selectBlockByIndex(index?): void {
const { BlockManager } = this.Editor;
/**
* Remove previous focused Block's state
*/
BlockManager.clearFocused();
let block;
if (isNaN(index)) {
block = BlockManager.currentBlock;
} else {
block = BlockManager.getBlockByIndex(index);
}
/** Save selection */
this.selection.save();
SelectionUtils.get()
.removeAllRanges();
block.selected = true;
this.clearCache();
/** close InlineToolbar when we selected any Block */
this.Editor.InlineToolbar.close();
}
/**
* Clear anyBlockSelected cache
*/
public clearCache(): void {
this.anyBlockSelectedCache = null;
}
/**
* Module destruction
* De-registers Shortcut CMD+A
*/
public destroy(): void {
/** Selection shortcut */
Shortcuts.remove(this.Editor.UI.nodes.redactor, 'CMD+A');
}
/**
* First CMD+A selects all input content by native behaviour,
* next CMD+A keypress selects all blocks
*
* @param {KeyboardEvent} event - keyboard event
*/
private handleCommandA(event: KeyboardEvent): void {
this.Editor.RectangleSelection.clearSelection();
/** allow default selection on native inputs */
if ($.isNativeInput(event.target) && !this.readyToBlockSelection) {
this.readyToBlockSelection = true;
return;
}
const workingBlock = this.Editor.BlockManager.getBlock(event | {
/**
* Prevent default copy
*/
e.preventDefault();
const fakeClipboard = $.make('div');
this.selectedBlocks.forEach((block) => {
/**
* Make <p> tag that holds clean HTML
*/
const cleanHTML = clean(block.holder.innerHTML, this.sanitizerConfig);
const fragment = $.make('p');
fragment.innerHTML = cleanHTML;
fakeClipboard.appendChild(fragment);
});
const textPlain = Array.from(fakeClipboard.childNodes).map((node) => node.textContent) | identifier_body |
blockSelection.ts | br: true,
img: {
src: true,
width: true,
height: true,
},
a: {
href: true,
},
b: {},
i: {},
u: {},
};
}
/**
* Flag that identifies all Blocks selection
*
* @returns {boolean}
*/
public get allBlocksSelected(): boolean {
const { BlockManager } = this.Editor;
return BlockManager.blocks.every((block) => block.selected === true);
}
/**
* Set selected all blocks
*
* @param {boolean} state - state to set
*/
public set allBlocksSelected(state: boolean) {
const { BlockManager } = this.Editor;
BlockManager.blocks.forEach((block) => {
block.selected = state;
});
this.clearCache();
}
/**
* Flag that identifies any Block selection
*
* @returns {boolean}
*/
public get anyBlockSelected(): boolean {
const { BlockManager } = this.Editor;
if (this.anyBlockSelectedCache === null) {
this.anyBlockSelectedCache = BlockManager.blocks.some((block) => block.selected === true);
}
return this.anyBlockSelectedCache;
}
/**
* Return selected Blocks array
*
* @returns {Block[]}
*/
public get selectedBlocks(): Block[] {
return this.Editor.BlockManager.blocks.filter((block: Block) => block.selected);
}
/**
* Flag used to define block selection
* First CMD+A defines it as true and then second CMD+A selects all Blocks
*
* @type {boolean}
*/
private needToSelectAll = false;
/**
* Flag used to define native input selection
* In this case we allow double CMD+A to select Block
*
* @type {boolean}
*/
private nativeInputSelected = false;
/**
* Flag identifies any input selection
* That means we can select whole Block
*
* @type {boolean}
*/
private readyToBlockSelection = false;
/**
* SelectionUtils instance
*
* @type {SelectionUtils}
*/
private selection: SelectionUtils;
/**
* Module Preparation
* Registers Shortcuts CMD+A and CMD+C
* to select all and copy them
*/
public prepare(): void {
this.selection = new SelectionUtils();
/**
* CMD/CTRL+A selection shortcut
*/
Shortcuts.add({
name: 'CMD+A',
handler: (event) => {
const { BlockManager, ReadOnly } = this.Editor;
/**
* We use Editor's Block selection on CMD+A ShortCut instead of Browsers
*/
if (ReadOnly.isEnabled) {
event.preventDefault();
this.selectAllBlocks();
return;
}
/**
* When one page consist of two or more EditorJS instances
* Shortcut module tries to handle all events.
* Thats why Editor's selection works inside the target Editor, but
* for others error occurs because nothing to select.
*
* Prevent such actions if focus is not inside the Editor
*/
if (!BlockManager.currentBlock) {
return;
}
this.handleCommandA(event);
},
on: this.Editor.UI.nodes.redactor,
});
}
/**
* Toggle read-only state
*
* - Remove all ranges
* - Unselect all Blocks
*/
public toggleReadOnly(): void {
SelectionUtils.get()
.removeAllRanges();
this.allBlocksSelected = false;
}
/**
* Remove selection of Block
*
* @param {number?} index - Block index according to the BlockManager's indexes
*/
public unSelectBlockByIndex(index?): void {
const { BlockManager } = this.Editor;
let block;
if (isNaN(index)) {
block = BlockManager.currentBlock;
} else {
block = BlockManager.getBlockByIndex(index);
}
block.selected = false;
this.clearCache();
}
/**
* Clear selection from Blocks
*
* @param {Event} reason - event caused clear of selection
* @param {boolean} restoreSelection - if true, restore saved selection
*/
public clearSelection(reason?: Event, restoreSelection = false): void {
const { BlockManager, Caret, RectangleSelection } = this.Editor;
this.needToSelectAll = false;
this.nativeInputSelected = false;
this.readyToBlockSelection = false;
const isKeyboard = reason && (reason instanceof KeyboardEvent);
const isPrintableKey = isKeyboard && _.isPrintableKey((reason as KeyboardEvent).keyCode);
/**
* If reason caused clear of the selection was printable key and any block is selected,
* remove selected blocks and insert pressed key
*/
if (this.anyBlockSelected && isKeyboard && isPrintableKey && !SelectionUtils.isSelectionExists) {
const indexToInsert = BlockManager.removeSelectedBlocks();
BlockManager.insertDefaultBlockAtIndex(indexToInsert, true);
Caret.setToBlock(BlockManager.currentBlock);
_.delay(() => {
const eventKey = (reason as KeyboardEvent).key;
/**
* If event.key length >1 that means key is special (e.g. Enter or Dead or Unidentified).
* So we use empty string
*
* @see https://developer.mozilla.org/ru/docs/Web/API/KeyboardEvent/key
*/
Caret.insertContentAtCaretPosition(eventKey.length > 1 ? '' : eventKey);
// eslint-disable-next-line @typescript-eslint/no-magic-numbers
}, 20)();
}
this.Editor.CrossBlockSelection.clear(reason);
if (!this.anyBlockSelected || RectangleSelection.isRectActivated()) {
this.Editor.RectangleSelection.clearSelection();
return;
}
/**
* Restore selection when Block is already selected
* but someone tries to write something.
*/
if (restoreSelection) {
this.selection.restore();
}
/** Now all blocks cleared */
this.allBlocksSelected = false;
}
/**
* Reduce each Block and copy its content
*
* @param {ClipboardEvent} e - copy/cut event
* @returns {Promise<void>}
*/
public copySelectedBlocks(e: ClipboardEvent): Promise<void> {
/**
* Prevent default copy
*/
e.preventDefault();
const fakeClipboard = $.make('div');
this.selectedBlocks.forEach((block) => {
/**
* Make <p> tag that holds clean HTML
*/
const cleanHTML = clean(block.holder.innerHTML, this.sanitizerConfig);
const fragment = $.make('p');
fragment.innerHTML = cleanHTML;
fakeClipboard.appendChild(fragment);
});
const textPlain = Array.from(fakeClipboard.childNodes).map((node) => node.textContent)
.join('\n\n');
const textHTML = fakeClipboard.innerHTML;
e.clipboardData.setData('text/plain', textPlain);
e.clipboardData.setData('text/html', textHTML);
return Promise
.all(this.selectedBlocks.map((block) => block.save()))
.then(savedData => {
try {
e.clipboardData.setData(this.Editor.Paste.MIME_TYPE, JSON.stringify(savedData));
} catch (err) {
// In Firefox we can't set data in async function
}
});
}
/**
* select Block
*
* @param {number?} index - Block index according to the BlockManager's indexes
*/
public selectBlockByIndex(index?): void {
const { BlockManager } = this.Editor;
/**
* Remove previous focused Block's state
*/
BlockManager.clearFocused();
let block;
if (isNaN(index)) {
block = BlockManager.currentBlock;
} else {
block = BlockManager.getBlockByIndex(index);
}
/** Save selection */
this.selection.save();
SelectionUtils.get()
.removeAllRanges();
block.selected = true;
this.clearCache();
/** close InlineToolbar when we selected any Block */
this.Editor.InlineToolbar.close();
}
/**
* Clear anyBlockSelected cache
*/
public clearCache(): void {
this.anyBlockSelectedCache = null;
}
/**
* Module destruction
* De-registers Shortcut CMD+A
*/
public destroy(): void {
/** Selection shortcut */
Shortcuts.remove(this.Editor.UI.nodes.redactor, 'CMD+A');
}
/**
* First CMD+A selects all input content by native behaviour,
* next CMD+A keypress selects all blocks
*
* @param {KeyboardEvent} event - keyboard event
*/
private handleCommandA(event: KeyboardEvent): void {
this.Editor.RectangleSelection.clearSelection();
/** allow default selection on native inputs */
if ($.isNativeInput(event.target) && !this.readyToBlockSelection) {
this.readyToBlockSelection = true;
return;
}
const workingBlock = this.Editor.BlockManager.getBlock(event.target as HTMLElement);
const inputs = workingBlock.inputs;
/**
* If Block has more than one editable element allow native selection
* Second cmd+a will select whole Block
*/
if (inputs.length > 1 && !this.readyToBlockSelection) {
this.readyToBlockSelection = true;
return;
}
if (inputs.length === 1 && !this.needToSelectAll) | {
this.needToSelectAll = true;
return;
} | conditional_block |
|
day13.rs | //! Your notes (your puzzle input) consist of two lines. The first line is your
//! estimate of **the earliest timestamp you could depart on a bus**. The second
//! line lists the bus IDs that are in service according to the shuttle company;
//! entries that show `x` must be out of service, so you decide to ignore them.
//!
//! To save time once you arrive, your goal is to figure out **the earliest bus
//! you can take to the airport**. (There will be exactly one such bus.)
//!
//! For example, suppose you have the following notes:
//!
//! ```
//! 939
//! 7,13,x,x,59,x,31,19
//! ```
//!
//! Here, the earliest timestamp you could depart is `939`, and the bus IDs in
//! service are `7`, `13`, `59`, `31`, and `19`. Near timestamp `939`, these bus
//! IDs depart at the times marked `D`:
//!
//! ```
//! time bus 7 bus 13 bus 59 bus 31 bus 19
//! 929 . . . . .
//! 930 . . . D .
//! 931 D . . . D
//! 932 . . . . .
//! 933 . . . . .
//! 934 . . . . .
//! 935 . . . . .
//! 936 . D . . .
//! 937 . . . . .
//! 938 D . . . .
//! 939 . . . . .
//! 940 . . . . .
//! 941 . . . . .
//! 942 . . . . .
//! 943 . . . . .
//! 944 . . D . .
//! 945 D . . . .
//! 946 . . . . .
//! 947 . . . . .
//! 948 . . . . .
//! 949 . D . . .
//! ```
//!
//! The earliest bus you could take is bus ID `59`. It doesn't depart until
//! timestamp `944`, so you would need to wait `944 - 939 = 5` minutes before it
//! departs. Multiplying the bus ID by the number of minutes you'd need to wait
//! gives `295`.
//!
//! **What is the ID of the earliest bus you can take to the airport multiplied
//! by the number of minutes you'll need to wait for that bus?**
//!
//! ## --- Part Two ---
//!
//! The shuttle company is running a contest: one gold coin for anyone that can
//! find the earliest timestamp such that the first bus ID departs at that time
//! and each subsequent listed bus ID departs at that subsequent minute. (The
//! first line in your input is no longer relevant.)
//!
//! For example, suppose you have the same list of bus IDs as above:
//!
//! `7,13,x,x,59,x,31,19`
//!
//! An `x` in the schedule means there are no constraints on what bus IDs must
//! depart at that time.
//!
//! This means you are looking for the earliest timestamp (called `t`) such
//! that:
//!
//! - Bus ID `7` departs at timestamp `t`.
//! - Bus ID `13` departs one minute after timestamp `t`.
//! - There are no requirements or restrictions on departures at two or three
//! minutes after timestamp `t`.
//! - Bus ID `59` departs four minutes after timestamp `t`.
//! - There are no requirements or restrictions on departures at five minutes
//! after timestamp `t`.
//! - Bus ID `31` departs six minutes after timestamp `t`.
//! - Bus ID `19` departs seven minutes after timestamp `t`.
//!
//! The only bus departures that matter are the listed bus IDs at their specific
//! offsets from `t`. Those bus IDs can depart at other times, and other bus IDs
//! can depart at those times. For example, in the list above, because bus ID
//! `19` must depart seven minutes after the timestamp at which bus ID `7`
//! departs, bus ID `7` will always **also** be departing with bus ID `19` at
//! seven minutes after timestamp `t`.
//!
//! In this example, the earliest timestamp at which this occurs is
//! **`1068781`**:
//!
//! ```
//! time bus 7 bus 13 bus 59 bus 31 bus 19
//! 1068773 . . . . .
//! 1068774 D . . . .
//! 1068775 . . . . .
//! 1068776 . . . . .
//! 1068777 . . . . .
//! 1068778 . . . . .
//! 1068779 . . . . .
//! 1068780 . . . . .
//! 1068781 D . . . .
//! 1068782 . D . . .
//! 1068783 . . . . .
//! 1068784 . . . . .
//! 1068785 . . D . .
//! 1068786 . . . . .
//! 1068787 . . . D .
//! 1068788 D . . . D
//! 1068789 . . . . .
//! 1068790 . . . . .
//! 1068791 . . . . .
//! 1068792 . . . . .
//! 1068793 . . . . .
//! 1068794 . . . . .
//! 1068795 D D . . .
//! 1068796 . . . . .
//! 1068797 . . . . .
//! ```
//!
//! In the above example, bus ID `7` departs at timestamp `1068788` (seven
//! minutes after `t`). This is fine; the only requirement on that minute is
//! that bus ID `19` departs then, and it does.
//!
//! Here are some other examples:
//!
//! - The earliest timestamp that matches the list `17,x,13,19` is `3417`.
//! - `67,7,59,61` first occurs at timestamp `754018`.
//! - `67,x,7,59,61` first occurs at timestamp `779210`.
//! - `67,7,x,59,61` first occurs at timestamp `1261476`.
//! - `1789,37,47,1889` first occurs at timestamp `1202161486`.
//!
//! However, with so many bus IDs in your list, surely the actual earliest
//! timestamp will be larger than `100000000000000`!
//!
//! **What is the earliest timestamp such that all of the listed bus IDs depart
//! at offsets matching their positions in the list?**
use std::{env, fs};
#[derive(Debug, Eq, PartialEq, Clone, Copy)]
struct Bus {
serial: usize,
id: usize,
}
fn | (input: &str) -> (usize, Vec<Bus>) {
let mut it = input.lines();
let first_line = it.next().unwrap();
let second_line = it.next().unwrap();
let depart_time = first_line.parse::<usize>().unwrap();
let shuttles = second_line
.split(',')
.enumerate()
.filter_map(|(serial, x)| {
if let Ok(id) = x.parse::<usize>() {
Some(Bus { serial, id })
} else {
None
}
})
. | parse_input | identifier_name |
day13.rs | . . . .
//! 930 . . . D .
//! 931 D . . . D
//! 932 . . . . .
//! 933 . . . . .
//! 934 . . . . .
//! 935 . . . . .
//! 936 . D . . .
//! 937 . . . . .
//! 938 D . . . .
//! 939 . . . . .
//! 940 . . . . .
//! 941 . . . . .
//! 942 . . . . .
//! 943 . . . . .
//! 944 . . D . .
//! 945 D . . . .
//! 946 . . . . .
//! 947 . . . . .
//! 948 . . . . .
//! 949 . D . . .
//! ```
//!
//! The earliest bus you could take is bus ID `59`. It doesn't depart until
//! timestamp `944`, so you would need to wait `944 - 939 = 5` minutes before it
//! departs. Multiplying the bus ID by the number of minutes you'd need to wait
//! gives `295`.
//!
//! **What is the ID of the earliest bus you can take to the airport multiplied
//! by the number of minutes you'll need to wait for that bus?**
//!
//! ## --- Part Two ---
//!
//! The shuttle company is running a contest: one gold coin for anyone that can
//! find the earliest timestamp such that the first bus ID departs at that time
//! and each subsequent listed bus ID departs at that subsequent minute. (The
//! first line in your input is no longer relevant.)
//!
//! For example, suppose you have the same list of bus IDs as above:
//!
//! `7,13,x,x,59,x,31,19`
//!
//! An `x` in the schedule means there are no constraints on what bus IDs must
//! depart at that time.
//!
//! This means you are looking for the earliest timestamp (called `t`) such
//! that:
//!
//! - Bus ID `7` departs at timestamp `t`.
//! - Bus ID `13` departs one minute after timestamp `t`.
//! - There are no requirements or restrictions on departures at two or three
//! minutes after timestamp `t`.
//! - Bus ID `59` departs four minutes after timestamp `t`.
//! - There are no requirements or restrictions on departures at five minutes
//! after timestamp `t`.
//! - Bus ID `31` departs six minutes after timestamp `t`.
//! - Bus ID `19` departs seven minutes after timestamp `t`.
//!
//! The only bus departures that matter are the listed bus IDs at their specific
//! offsets from `t`. Those bus IDs can depart at other times, and other bus IDs
//! can depart at those times. For example, in the list above, because bus ID
//! `19` must depart seven minutes after the timestamp at which bus ID `7`
//! departs, bus ID `7` will always **also** be departing with bus ID `19` at
//! seven minutes after timestamp `t`.
//!
//! In this example, the earliest timestamp at which this occurs is
//! **`1068781`**:
//!
//! ```
//! time bus 7 bus 13 bus 59 bus 31 bus 19
//! 1068773 . . . . .
//! 1068774 D . . . .
//! 1068775 . . . . .
//! 1068776 . . . . .
//! 1068777 . . . . .
//! 1068778 . . . . .
//! 1068779 . . . . .
//! 1068780 . . . . .
//! 1068781 D . . . .
//! 1068782 . D . . .
//! 1068783 . . . . .
//! 1068784 . . . . .
//! 1068785 . . D . .
//! 1068786 . . . . .
//! 1068787 . . . D .
//! 1068788 D . . . D
//! 1068789 . . . . .
//! 1068790 . . . . .
//! 1068791 . . . . .
//! 1068792 . . . . .
//! 1068793 . . . . .
//! 1068794 . . . . .
//! 1068795 D D . . .
//! 1068796 . . . . .
//! 1068797 . . . . .
//! ```
//!
//! In the above example, bus ID `7` departs at timestamp `1068788` (seven
//! minutes after `t`). This is fine; the only requirement on that minute is
//! that bus ID `19` departs then, and it does.
//!
//! Here are some other examples:
//!
//! - The earliest timestamp that matches the list `17,x,13,19` is `3417`.
//! - `67,7,59,61` first occurs at timestamp `754018`.
//! - `67,x,7,59,61` first occurs at timestamp `779210`.
//! - `67,7,x,59,61` first occurs at timestamp `1261476`.
//! - `1789,37,47,1889` first occurs at timestamp `1202161486`.
//!
//! However, with so many bus IDs in your list, surely the actual earliest
//! timestamp will be larger than `100000000000000`!
//!
//! **What is the earliest timestamp such that all of the listed bus IDs depart
//! at offsets matching their positions in the list?**
use std::{env, fs};
#[derive(Debug, Eq, PartialEq, Clone, Copy)]
struct Bus {
serial: usize,
id: usize,
}
fn parse_input(input: &str) -> (usize, Vec<Bus>) {
let mut it = input.lines();
let first_line = it.next().unwrap();
let second_line = it.next().unwrap();
let depart_time = first_line.parse::<usize>().unwrap();
let shuttles = second_line
.split(',')
.enumerate()
.filter_map(|(serial, x)| {
if let Ok(id) = x.parse::<usize>() {
Some(Bus { serial, id })
} else {
None
}
})
.collect::<Vec<Bus>>();
(depart_time, shuttles)
}
fn calc_wait_time(depart_time: usize, shuttles: &[Bus]) -> usize {
let buses: Vec<usize> = shuttles.iter().map(|x| x.id).collect();
let (idx, min_wait) = buses
.iter()
.map(|&x| x - depart_time % x)
.enumerate()
.min_by_key(|&x| x.1)
.unwrap();
let bus_id = buses[idx];
bus_id * min_wait
}
/// implement <https://github.com/mstksg/advent-of-code-2020/blob/master/reflections.md#day-13>
fn clac_contest(shuttles: &[Bus]) -> usize {
let mut earliest = 0;
let mut period = 1;
for Bus { serial, id } in shuttles {
let mut n = 0;
loop {
let candidate = earliest + period * n;
if (candidate + serial) % id == 0 {
earliest = candidate;
period *= id;
break;
} else | {
n += 1;
} | conditional_block |
|
day13.rs | //!
//! Your notes (your puzzle input) consist of two lines. The first line is your
//! estimate of **the earliest timestamp you could depart on a bus**. The second
//! line lists the bus IDs that are in service according to the shuttle company;
//! entries that show `x` must be out of service, so you decide to ignore them.
//!
//! To save time once you arrive, your goal is to figure out **the earliest bus
//! you can take to the airport**. (There will be exactly one such bus.)
//!
//! For example, suppose you have the following notes:
//!
//! ```
//! 939
//! 7,13,x,x,59,x,31,19
//! ``` | //!
//! ```
//! time bus 7 bus 13 bus 59 bus 31 bus 19
//! 929 . . . . .
//! 930 . . . D .
//! 931 D . . . D
//! 932 . . . . .
//! 933 . . . . .
//! 934 . . . . .
//! 935 . . . . .
//! 936 . D . . .
//! 937 . . . . .
//! 938 D . . . .
//! 939 . . . . .
//! 940 . . . . .
//! 941 . . . . .
//! 942 . . . . .
//! 943 . . . . .
//! 944 . . D . .
//! 945 D . . . .
//! 946 . . . . .
//! 947 . . . . .
//! 948 . . . . .
//! 949 . D . . .
//! ```
//!
//! The earliest bus you could take is bus ID `59`. It doesn't depart until
//! timestamp `944`, so you would need to wait `944 - 939 = 5` minutes before it
//! departs. Multiplying the bus ID by the number of minutes you'd need to wait
//! gives `295`.
//!
//! **What is the ID of the earliest bus you can take to the airport multiplied
//! by the number of minutes you'll need to wait for that bus?**
//!
//! ## --- Part Two ---
//!
//! The shuttle company is running a contest: one gold coin for anyone that can
//! find the earliest timestamp such that the first bus ID departs at that time
//! and each subsequent listed bus ID departs at that subsequent minute. (The
//! first line in your input is no longer relevant.)
//!
//! For example, suppose you have the same list of bus IDs as above:
//!
//! `7,13,x,x,59,x,31,19`
//!
//! An `x` in the schedule means there are no constraints on what bus IDs must
//! depart at that time.
//!
//! This means you are looking for the earliest timestamp (called `t`) such
//! that:
//!
//! - Bus ID `7` departs at timestamp `t`.
//! - Bus ID `13` departs one minute after timestamp `t`.
//! - There are no requirements or restrictions on departures at two or three
//! minutes after timestamp `t`.
//! - Bus ID `59` departs four minutes after timestamp `t`.
//! - There are no requirements or restrictions on departures at five minutes
//! after timestamp `t`.
//! - Bus ID `31` departs six minutes after timestamp `t`.
//! - Bus ID `19` departs seven minutes after timestamp `t`.
//!
//! The only bus departures that matter are the listed bus IDs at their specific
//! offsets from `t`. Those bus IDs can depart at other times, and other bus IDs
//! can depart at those times. For example, in the list above, because bus ID
//! `19` must depart seven minutes after the timestamp at which bus ID `7`
//! departs, bus ID `7` will always **also** be departing with bus ID `19` at
//! seven minutes after timestamp `t`.
//!
//! In this example, the earliest timestamp at which this occurs is
//! **`1068781`**:
//!
//! ```
//! time bus 7 bus 13 bus 59 bus 31 bus 19
//! 1068773 . . . . .
//! 1068774 D . . . .
//! 1068775 . . . . .
//! 1068776 . . . . .
//! 1068777 . . . . .
//! 1068778 . . . . .
//! 1068779 . . . . .
//! 1068780 . . . . .
//! 1068781 D . . . .
//! 1068782 . D . . .
//! 1068783 . . . . .
//! 1068784 . . . . .
//! 1068785 . . D . .
//! 1068786 . . . . .
//! 1068787 . . . D .
//! 1068788 D . . . D
//! 1068789 . . . . .
//! 1068790 . . . . .
//! 1068791 . . . . .
//! 1068792 . . . . .
//! 1068793 . . . . .
//! 1068794 . . . . .
//! 1068795 D D . . .
//! 1068796 . . . . .
//! 1068797 . . . . .
//! ```
//!
//! In the above example, bus ID `7` departs at timestamp `1068788` (seven
//! minutes after `t`). This is fine; the only requirement on that minute is
//! that bus ID `19` departs then, and it does.
//!
//! Here are some other examples:
//!
//! - The earliest timestamp that matches the list `17,x,13,19` is `3417`.
//! - `67,7,59,61` first occurs at timestamp `754018`.
//! - `67,x,7,59,61` first occurs at timestamp `779210`.
//! - `67,7,x,59,61` first occurs at timestamp `1261476`.
//! - `1789,37,47,1889` first occurs at timestamp `1202161486`.
//!
//! However, with so many bus IDs in your list, surely the actual earliest
//! timestamp will be larger than `100000000000000`!
//!
//! **What is the earliest timestamp such that all of the listed bus IDs depart
//! at offsets matching their positions in the list?**
use std::{env, fs};
#[derive(Debug, Eq, PartialEq, Clone, Copy)]
struct Bus {
serial: usize,
id: usize,
}
fn parse_input(input: &str) -> (usize, Vec<Bus>) {
let mut it = input.lines();
let first_line = it.next().unwrap();
let second_line = it.next().unwrap();
let depart_time = first_line.parse::<usize>().unwrap();
let shuttles = second_line
.split(',')
.enumerate()
.filter_map(|(serial, x)| {
if let Ok(id) = x.parse::<usize>() {
Some(Bus { serial, id })
} else {
None
}
})
.collect | //!
//! Here, the earliest timestamp you could depart is `939`, and the bus IDs in
//! service are `7`, `13`, `59`, `31`, and `19`. Near timestamp `939`, these bus
//! IDs depart at the times marked `D`: | random_line_split |
day13.rs | for anyone that can
//! find the earliest timestamp such that the first bus ID departs at that time
//! and each subsequent listed bus ID departs at that subsequent minute. (The
//! first line in your input is no longer relevant.)
//!
//! For example, suppose you have the same list of bus IDs as above:
//!
//! `7,13,x,x,59,x,31,19`
//!
//! An `x` in the schedule means there are no constraints on what bus IDs must
//! depart at that time.
//!
//! This means you are looking for the earliest timestamp (called `t`) such
//! that:
//!
//! - Bus ID `7` departs at timestamp `t`.
//! - Bus ID `13` departs one minute after timestamp `t`.
//! - There are no requirements or restrictions on departures at two or three
//! minutes after timestamp `t`.
//! - Bus ID `59` departs four minutes after timestamp `t`.
//! - There are no requirements or restrictions on departures at five minutes
//! after timestamp `t`.
//! - Bus ID `31` departs six minutes after timestamp `t`.
//! - Bus ID `19` departs seven minutes after timestamp `t`.
//!
//! The only bus departures that matter are the listed bus IDs at their specific
//! offsets from `t`. Those bus IDs can depart at other times, and other bus IDs
//! can depart at those times. For example, in the list above, because bus ID
//! `19` must depart seven minutes after the timestamp at which bus ID `7`
//! departs, bus ID `7` will always **also** be departing with bus ID `19` at
//! seven minutes after timestamp `t`.
//!
//! In this example, the earliest timestamp at which this occurs is
//! **`1068781`**:
//!
//! ```
//! time bus 7 bus 13 bus 59 bus 31 bus 19
//! 1068773 . . . . .
//! 1068774 D . . . .
//! 1068775 . . . . .
//! 1068776 . . . . .
//! 1068777 . . . . .
//! 1068778 . . . . .
//! 1068779 . . . . .
//! 1068780 . . . . .
//! 1068781 D . . . .
//! 1068782 . D . . .
//! 1068783 . . . . .
//! 1068784 . . . . .
//! 1068785 . . D . .
//! 1068786 . . . . .
//! 1068787 . . . D .
//! 1068788 D . . . D
//! 1068789 . . . . .
//! 1068790 . . . . .
//! 1068791 . . . . .
//! 1068792 . . . . .
//! 1068793 . . . . .
//! 1068794 . . . . .
//! 1068795 D D . . .
//! 1068796 . . . . .
//! 1068797 . . . . .
//! ```
//!
//! In the above example, bus ID `7` departs at timestamp `1068788` (seven
//! minutes after `t`). This is fine; the only requirement on that minute is
//! that bus ID `19` departs then, and it does.
//!
//! Here are some other examples:
//!
//! - The earliest timestamp that matches the list `17,x,13,19` is `3417`.
//! - `67,7,59,61` first occurs at timestamp `754018`.
//! - `67,x,7,59,61` first occurs at timestamp `779210`.
//! - `67,7,x,59,61` first occurs at timestamp `1261476`.
//! - `1789,37,47,1889` first occurs at timestamp `1202161486`.
//!
//! However, with so many bus IDs in your list, surely the actual earliest
//! timestamp will be larger than `100000000000000`!
//!
//! **What is the earliest timestamp such that all of the listed bus IDs depart
//! at offsets matching their positions in the list?**
use std::{env, fs};
#[derive(Debug, Eq, PartialEq, Clone, Copy)]
struct Bus {
serial: usize,
id: usize,
}
fn parse_input(input: &str) -> (usize, Vec<Bus>) {
let mut it = input.lines();
let first_line = it.next().unwrap();
let second_line = it.next().unwrap();
let depart_time = first_line.parse::<usize>().unwrap();
let shuttles = second_line
.split(',')
.enumerate()
.filter_map(|(serial, x)| {
if let Ok(id) = x.parse::<usize>() {
Some(Bus { serial, id })
} else {
None
}
})
.collect::<Vec<Bus>>();
(depart_time, shuttles)
}
fn calc_wait_time(depart_time: usize, shuttles: &[Bus]) -> usize {
let buses: Vec<usize> = shuttles.iter().map(|x| x.id).collect();
let (idx, min_wait) = buses
.iter()
.map(|&x| x - depart_time % x)
.enumerate()
.min_by_key(|&x| x.1)
.unwrap();
let bus_id = buses[idx];
bus_id * min_wait
}
/// implement <https://github.com/mstksg/advent-of-code-2020/blob/master/reflections.md#day-13>
fn clac_contest(shuttles: &[Bus]) -> usize {
let mut earliest = 0;
let mut period = 1;
for Bus { serial, id } in shuttles {
let mut n = 0;
loop {
let candidate = earliest + period * n;
if (candidate + serial) % id == 0 {
earliest = candidate;
period *= id;
break;
} else {
n += 1;
}
}
}
earliest
}
fn main() -> Result<(), &'static str> {
let args: Vec<String> = env::args().collect();
if args.len() < 2 {
return Err("not enough arguments");
}
let filename = &args[1];
println!("Load input file {}.", filename);
let input = fs::read_to_string(filename).expect("Something went wrong reading the file.");
let (depart_time, shuttles) = parse_input(&input);
let prod = calc_wait_time(depart_time, &shuttles);
println!(
"The ID of the earliest bus multiplied by the wait minutes is {}",
prod
);
let earliest = clac_contest(&shuttles);
println!(
"The earliest timestamp listed bus IDs depart match with offsets is {}",
earliest
);
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
const TEST_ARRAY: [Bus; 5] = [
Bus { serial: 0, id: 7 },
Bus { serial: 1, id: 13 },
Bus { serial: 4, id: 59 },
Bus { serial: 6, id: 31 },
Bus { serial: 7, id: 19 },
];
#[test]
fn test_parse_input() {
let input = "939
7,13,x,x,59,x,31,19
";
let (depart_time, shuttles) = parse_input(input);
assert_eq!(depart_time, 939);
assert_eq!(shuttles, TEST_ARRAY.to_vec());
}
#[test]
fn test_wait_time() {
let prod = calc_wait_time(939, &TEST_ARRAY);
assert_eq!(prod, 295);
}
#[test]
fn test_contest() | {
let earliest = clac_contest(&TEST_ARRAY);
assert_eq!(earliest, 1068781);
} | identifier_body |
|
parser.go | екс"`
Alias string `xml:"Псевдоним"`
CryptoClass string `xml:"КлассСредствЭП"`
PakAddress CenterAddress `xml:"Адрес"`
CryptoVersion string `xml:"СредстваУЦ"`
Keys []Key `xml:"КлючиУполномоченныхЛиц>Ключ"`
}
//Key структура содержащая информацию о Ключе СКЗИ
type Key struct {
XMLName xml.Name `xml:"Ключ"`
KeyID []byte `xml:"ИдентификаторКлюча"`
Crls []string `xml:"АдресаСписковОтзыва>Адрес"`
Certs []Cert `xml:"Сертификаты>ДанныеСертификата"`
}
//Cert структура содержащая информацию о Сертифике выданном ПАК
type Cert struct {
XMLName xml.Name `xml:"ДанныеСертификата"`
Footprint string `xml:"Отпечаток"`
Issuer string `xml:"КемВыдан"`
Subject string `xml:"КомуВыдан"`
Serial []byte `xml:"СерийныйНомер"`
ValidFrom customTime `xml:"ПериодДействияС"`
ValidThru customTime `xml:"ПериодДействияДо"`
CertData []byte `xml:"Данные"`
}
func createFileIfNotExists(path string) (*os.File, error) {
if _, err := os.Stat(path); os.IsNotExist(err) {
file, err := os.Create(path)
if err != nil {
return nil, err
}
return file, nil
}
file, err := os.OpenFile(path, os.O_RDWR, 0755)
if err != nil {
return nil, err
}
return file, nil
}
func init() {
cfg := &tls.Config{
InsecureSkipVerify: true,
}
http.DefaultClient.Transport = &http.Transport{
TLSClientConfig: cfg,
}
}
/*
func findAndInstallCertByName(ucName string, root *UcRoot, fingerFile *os.File) {
for _, uc := range root.Centers {
if strings.Compare(ucName, strings.TrimSpace(uc.FullName)) == 0 {
uc.installCrls()
uc.installCerts(fingerFile)
} else {
// fmt.Printf("debug: not equal: %s != %s\n", ucName, uc.FullName)
// fmt.Printf("debug: not equal: %x != %x\n", []byte(ucName), []byte(uc.FullName))
}
}
}
*/
func installAllCert(root *UcRoot, fingerFile *os.File) {
for _, uc := range root.Centers {
uc.installCrls()
uc.installCerts(fingerFile)
}
}
/*
func installCertByUcFile(listfile string, root *UcRoot, fingerFile *os.File) {
if file, err := os.Open(listfile); err != nil {
panic("error: Cannor open list of UC CNs")
} else {
bufScanner := bufio.NewScanner(file)
for bufScanner.Scan() {
fmt.Println("----------------------------------------------------------------------------------------------------------------------------")
fmt.Printf("%s\n", bufScanner.Text())
fmt.Println("----------------------------------------------------------------------------------------------------------------------------")
findAndInstallCertByName(bufScanner.Text(), root, fingerFile)
}
}
}
*/
func (center *Center) installCrls() {
for _, pak := range center.PAKs {
for _, key := range pak.Keys {
for _, crl := range key.Crls {
err := installCrlToContainer(&crl)
if err == nil {
fmt.Printf("%-90sinstalled\n", crl)
break
} else {
fmt.Printf("error:%s (try next revocation distributor)\n", err)
}
}
}
}
}
func (center *Center) installCerts(fingerFile *os.File) {
fileContent, err := ioutil.ReadFile(fingerFile.Name())
if err != nil {
fmt.Println("Cannot read file" + err.Error())
}
for _, pak := range center.PAKs {
for _, key := range pak.Keys {
for _, cert := range key.Certs {
if strings.Contains(string(fileContent), cert.Footprint) {
fmt.Println("Сертификат уже есть: SHA1 " + cert.Footprint)
} else {
fmt.Println("Новый сертификат: SHA1 " + cert.Footprint)
if err := installCertToContainer(&cert.CertData); err != nil {
panic(err)
}
fingerFile.WriteString(string(cert.Footprint) + "\n")
fmt.Printf("%-90sinstalled\n", string(cert.Serial))
}
}
}
}
}
func installCertToContainer(cert *[]byte) error {
file, _ := makeTemp(cert)
cmd := exec.Command("/opt/cprocsp/bin/amd64/certmgr", "-inst", "-store=mRoot", "--file="+file)
if err := cmd.Run(); err != nil {
panic(err)
}
cmd = exec.Command("/opt/cprocsp/bin/amd64/certmgr", "-inst", "-store=mCA", "--file="+file)
if err := cmd.Run(); err != nil {
panic(err)
}
defer os.Remove(file)
return nil
}
func get_hash(path *string) string {
f, err := os.Open(*path)
if err != nil {
log.Fatal(err)
}
defer f.Close()
hasher := sha256.New()
if _, err := io.Copy(hasher, f); err != nil {
log.Fatal(err)
}
return hex.EncodeToString(hasher.Sum(nil))
}
var CRL_hash_list string
func installCrlToContainer(cert *string) error {
content, err := getCrlByURL(cert)
if err != nil {
return err
}
file, _ := makeTemp(&content)
hs := get_hash(&file)
if strings.Contains(CRL_hash_list, hs) {
//nothing to do
} else {
cmd := exec.Command("/opt/cprocsp/bin/amd64/certmgr", "-inst", "-store=mCA", "-crl", "--file="+file)
if err := cmd.Run(); err != nil {
if err.Error() == "exit status 45" {
fmt.Printf("error:%3scrl not valid:%s\n", " ", *cert)
return errors.New("CRLNVAL")
}
}
CRL_hash_list = CRL_hash_list + ";" + hs
}
defer os.Remove(file)
return nil
}
func installCrlToContainerLocal(cert *[]byte) error {
file, _ := makeTemp(cert)
cmd := exec.Command("/opt/cprocsp/bin/amd64/certmgr", "-inst", "-store=mCA", "-crl", "--file="+file)
if err := cmd.Run(); err != nil {
if e | () == "exit status 45" {
fmt.Printf("error:%3scrl not valid:%s\n", " ", *cert)
return errors.New("CRLNVAL")
}
}
defer os.Remove(file)
return nil
}
/* func dumpUcsFingerptints(root *UcRoot, fingerFile *os.File) {
for _, uc := range root.Centers {
for _, pak := range uc.PAKs {
for _, key := range pak.Keys {
for _, cert := range key.Certs {
fingerFile.WriteString(string(cert.Footprint) + "\n")
}
}
}
}
fingerFile.Close()
}
*/
func makeListOfUCS(root *UcRoot) {
ucFile, err := os.Create("./ucs_grabbed.list")
if err != nil {
log.Fatal("Cannot create file :", err)
}
defer ucFile.Close()
for _, uc := range root.Centers {
ucFile.WriteString(uc.FullName + "\n")
}
}
func testUserCert(certPath string) {
cmd := exec.Command("/opt/cprocsp/bin/amd64/cryptcp", "-verify", "-errchain", "-f", certPath, certPath)
//var stderr bytes.Buffer
var stdout bytes.Buffer
//cmd.Stderr = &stderr
cmd.Stdout = &stdout
cmd.Run()
/*if err != nil {
log.Fatal(stderr.String())
return
}*/
fmt.Println(stdout.String())
}
func makeListInstalledCerts(listCaPath *string) {
fmt.Println("--------------- создаем список установленных сертификатов -----------------------")
cmd := exec.Command("/bin/bash", "-c", "/opt/cprocsp/bin/amd64/certmgr -list -store root |grep 'Serial' |cut -d':' -f2| sed -e 's/^[[:space:]]//' > "+*listCaPath)
var stdout bytes.Buffer
cmd.Stdout = &stdout
cmd.Run()
//fmt.Println(stdout.String())
}
func getCrlByURL(crl *string) ([]byte, error) {
supportedProtos := map[string]bool{"http": true, "ftp": false}
if supportedProtos[strings | rr.Error | identifier_name |
parser.go | + cert.Footprint)
if err := installCertToContainer(&cert.CertData); err != nil {
panic(err)
}
fingerFile.WriteString(string(cert.Footprint) + "\n")
fmt.Printf("%-90sinstalled\n", string(cert.Serial))
}
}
}
}
}
func installCertToContainer(cert *[]byte) error {
file, _ := makeTemp(cert)
cmd := exec.Command("/opt/cprocsp/bin/amd64/certmgr", "-inst", "-store=mRoot", "--file="+file)
if err := cmd.Run(); err != nil {
panic(err)
}
cmd = exec.Command("/opt/cprocsp/bin/amd64/certmgr", "-inst", "-store=mCA", "--file="+file)
if err := cmd.Run(); err != nil {
panic(err)
}
defer os.Remove(file)
return nil
}
func get_hash(path *string) string {
f, err := os.Open(*path)
if err != nil {
log.Fatal(err)
}
defer f.Close()
hasher := sha256.New()
if _, err := io.Copy(hasher, f); err != nil {
log.Fatal(err)
}
return hex.EncodeToString(hasher.Sum(nil))
}
var CRL_hash_list string
func installCrlToContainer(cert *string) error {
content, err := getCrlByURL(cert)
if err != nil {
return err
}
file, _ := makeTemp(&content)
hs := get_hash(&file)
if strings.Contains(CRL_hash_list, hs) {
//nothing to do
} else {
cmd := exec.Command("/opt/cprocsp/bin/amd64/certmgr", "-inst", "-store=mCA", "-crl", "--file="+file)
if err := cmd.Run(); err != nil {
if err.Error() == "exit status 45" {
fmt.Printf("error:%3scrl not valid:%s\n", " ", *cert)
return errors.New("CRLNVAL")
}
}
CRL_hash_list = CRL_hash_list + ";" + hs
}
defer os.Remove(file)
return nil
}
func installCrlToContainerLocal(cert *[]byte) error {
file, _ := makeTemp(cert)
cmd := exec.Command("/opt/cprocsp/bin/amd64/certmgr", "-inst", "-store=mCA", "-crl", "--file="+file)
if err := cmd.Run(); err != nil {
if err.Error() == "exit status 45" {
fmt.Printf("error:%3scrl not valid:%s\n", " ", *cert)
return errors.New("CRLNVAL")
}
}
defer os.Remove(file)
return nil
}
/* func dumpUcsFingerptints(root *UcRoot, fingerFile *os.File) {
for _, uc := range root.Centers {
for _, pak := range uc.PAKs {
for _, key := range pak.Keys {
for _, cert := range key.Certs {
fingerFile.WriteString(string(cert.Footprint) + "\n")
}
}
}
}
fingerFile.Close()
}
*/
func makeListOfUCS(root *UcRoot) {
ucFile, err := os.Create("./ucs_grabbed.list")
if err != nil {
log.Fatal("Cannot create file :", err)
}
defer ucFile.Close()
for _, uc := range root.Centers {
ucFile.WriteString(uc.FullName + "\n")
}
}
func testUserCert(certPath string) {
cmd := exec.Command("/opt/cprocsp/bin/amd64/cryptcp", "-verify", "-errchain", "-f", certPath, certPath)
//var stderr bytes.Buffer
var stdout bytes.Buffer
//cmd.Stderr = &stderr
cmd.Stdout = &stdout
cmd.Run()
/*if err != nil {
log.Fatal(stderr.String())
return
}*/
fmt.Println(stdout.String())
}
func makeListInstalledCerts(listCaPath *string) {
fmt.Println("--------------- создаем список установленных сертификатов -----------------------")
cmd := exec.Command("/bin/bash", "-c", "/opt/cprocsp/bin/amd64/certmgr -list -store root |grep 'Serial' |cut -d':' -f2| sed -e 's/^[[:space:]]//' > "+*listCaPath)
var stdout bytes.Buffer
cmd.Stdout = &stdout
cmd.Run()
//fmt.Println(stdout.String())
}
func getCrlByURL(crl *string) ([]byte, error) {
supportedProtos := map[string]bool{"http": true, "ftp": false}
if supportedProtos[strings.Split(*crl, ":")[0]] == false {
//loadOverFTP(strings.Split(*crl, ":")[0])
return nil, errors.New("unsupported proto")
}
timeout := time.Duration(2 * time.Second)
client := http.Client{
Timeout: timeout,
}
response, err := client.Get(*crl)
if err != nil {
return nil, err
}
fileContent, err := ioutil.ReadAll(response.Body)
if err != nil {
return nil, err
}
return fileContent, nil
}
func loadOverFTP(url string) error {
client, err := ftp.Dial(url)
if err != nil {
fmt.Println(err)
return err
}
if err := client.Login("anonymous", "anonimous"); err != nil {
fmt.Println(err)
return err
}
ftpWalk(client)
return nil
}
func ftpWalk(client *ftp.ServerConn) error {
currentDir, _ := client.CurrentDir()
entries, err := client.List(currentDir)
if err != nil {
fmt.Println(err)
return err
}
for _, entry := range entries {
if entry.Type == ftp.EntryTypeFolder {
client.ChangeDir(entry.Name)
ftpWalk(client)
} else {
ucFile, err := client.Retr(currentDir + "/" + entry.Name)
if err != nil {
fmt.Println(err)
return err
}
ucFile.SetDeadline(time.Now().Add(time.Second * 10))
buf, err := ioutil.ReadAll(ucFile)
ucFile.Close()
if err != nil {
fmt.Println(err)
return err
}
if strings.Contains(entry.Name, "crt") {
installCertToContainer(&buf)
fmt.Println("[Костыль для Выбора]: CRT ", currentDir, "/", entry.Name, " -> installed")
} else if strings.Contains(entry.Name, "crl") {
installCrlToContainerLocal(&buf)
fmt.Println("[Костыль для Выбора]: CRL ", currentDir, "/", entry.Name, " -> installed")
}
}
}
client.ChangeDirToParent()
return nil
}
func getRosreestrXML(url string) {
response, err := http.Get(url)
if err != nil {
panic("can't download rosreestr XML" + err.Error())
}
fileContent, err := ioutil.ReadAll(response.Body)
if err != nil {
panic("cannot download rosreestr XML")
}
if err := ioutil.WriteFile("./uc.xml", fileContent, 0600); err != nil {
panic("can not save rosreestr XML to uc.xml")
}
}
func makeTemp(bytes *[]byte) (string, error) {
file, err := ioutil.TempFile("/tmp/", "__certParserTmp__")
defer file.Close()
if err != nil {
panic("Cannot create TempFile")
}
if err := ioutil.WriteFile(file.Name(), *bytes, 0600); err != nil {
panic(err)
}
return file.Name(), nil
}
func checkXMLVersion(newRoot *UcRoot, oldRoot *UcRoot) bool {
return newRoot.Version > oldRoot.Version
}
func killOnTimeout(lock *lockfile.Lockfile, timeout int64) {
time.Sleep(time.Minute * time.Duration(timeout))
lock.Unlock()
log.Panic("Чето пощло не так")
}
/*
func detectUCListLocation(list *string) string {
var fileContent []byte
ucListFile := *list
match, _ := regexp.MatchString("http[s]?://[a-zA-Z0-9]*", *list)
if match {
response, err := http.Get(*list)
if err != nil {
log.Panic("Не можем загрузить список УЦ аккредитованных на плщадке" + err.Error())
}
fileContent, err = ioutil.ReadAll(response.Body)
if err != nil {
log.Panic("Загрузили список УЦ аккредитованных на площадке, но не можем его прочитать" + err.Error())
}
os.Remove("/tmp/__certParserTmp__uc_list")
if err := ioutil.WriteFile("/tmp/__certParserTmp__uc_list", fileContent, 0600); err != nil {
log.Panic("Не смо | гли сохранить временный файл со списком УЦ", err.Error())
}
ucListFile = "/tmp/__certParserTmp__uc_list"
*list = ucListFile
return ucListFile
}
*list = ucListFile
return ucListFile
}
*/
func main() {
runtime.GOMAXPROCS(2)
//loadOver | identifier_body |
|
parser.go | екс"`
Alias string `xml:"Псевдоним"`
CryptoClass string `xml:"КлассСредствЭП"`
PakAddress CenterAddress `xml:"Адрес"`
CryptoVersion string `xml:"СредстваУЦ"`
Keys []Key `xml:"КлючиУполномоченныхЛиц>Ключ"`
}
//Key структура содержащая информацию о Ключе СКЗИ
type Key struct {
XMLName xml.Name `xml:"Ключ"`
KeyID []byte `xml:"ИдентификаторКлюча"`
Crls []string `xml:"АдресаСписковОтзыва>Адрес"`
Certs []Cert `xml:"Сертификаты>ДанныеСертификата"`
}
//Cert структура содержащая информацию о Сертифике выданном ПАК
type Cert struct {
XMLName xml.Name `xml:"ДанныеСертификата"`
Footprint string `xml:"Отпечаток"`
Issuer string `xml:"КемВыдан"`
Subject string `xml:"КомуВыдан"`
Serial []byte `xml:"СерийныйНомер"`
ValidFrom customTime `xml:"ПериодДействияС"`
ValidThru customTime `xml:"ПериодДействияДо"`
CertData []byte `xml:"Данные"`
}
func createFileIfNotExists(path string) (*os.File, error) {
if _, err := os.Stat(path); os.IsNotExist(err) {
file, err := os.Create(path)
if err != nil {
return nil, err
}
return file, nil
}
file, err := os.OpenFile(path, os.O_RDWR, 0755)
if err != nil {
return nil, err
}
return file, nil
}
func init() {
cfg := &tls.Config{
InsecureSkipVerify: true,
}
http.DefaultClient.Transport = &http.Transport{
TLSClientConfig: cfg,
}
}
/*
func findAndInstallCertByName(ucName string, root *UcRoot, fingerFile *os.File) {
for _, uc := range root.Centers {
if strings.Compare(ucName, strings.TrimSpace(uc.FullName)) == 0 {
uc.installCrls()
uc.installCerts(fingerFile)
} else {
// fmt.Printf("debug: not equal: %s != %s\n", ucName, uc.FullName)
// fmt.Printf("debug: not equal: %x != %x\n", []byte(ucName), []byte(uc.FullName))
}
}
}
*/
func installAllCert(root *UcRoot, fingerFile *os.File) {
for _, uc := range root.Centers {
uc.installCrls()
uc.installCerts(fingerFile)
}
}
/*
func installCertByUcFile(listfile string, root *UcRoot, fingerFile *os.File) {
if file, err := os.Open(listfile); err != nil {
panic("error: Cannor open list of UC CNs")
} else {
bufScanner := bufio.NewScanner(file)
for bufScanner.Scan() {
fmt.Println("----------------------------------------------------------------------------------------------------------------------------")
fmt.Printf("%s\n", bufScanner.Text())
fmt.Println("----------------------------------------------------------------------------------------------------------------------------")
findAndInstallCertByName(bufScanner.Text(), root, fingerFile)
}
}
}
*/
func (center *Center) installCrls() {
for _, pak := range center.PAKs {
for _, key := range pak.Keys {
for _, crl := range key.Crls {
err := installCrlToContainer(&crl)
if err == nil {
fmt.Printf("%-90sinstalled\n", crl)
break
} else {
fmt.Printf("error:%s (try next revocation distributor)\n", err)
}
}
}
}
}
func (center *Center) installCerts(fingerFile *os.File) {
fileContent, err := ioutil.ReadFile(fingerFile.Name())
if err != nil {
fmt.Println("Cannot read file" + err.Error())
}
for _, pak := range center.PAKs {
for _, key := range pak.Keys {
for _, cert := range key.Certs {
if strings.Contains(string(fileContent), cert.Footprint) {
fmt.Println("Сертификат уже есть: SHA1 " + cert.Footprint)
} else {
fmt.Println("Новый сертификат: SHA1 " + cert.Footprint)
if err := installCertToContainer(&cert.CertData); err != nil {
panic(err)
}
fingerFile.WriteString(string(cert.Footprint) + "\n")
fmt.Printf("%-90sinstalled\n", string(cert.Serial))
} |
}
func installCertToContainer(cert *[]byte) error {
file, _ := makeTemp(cert)
cmd := exec.Command("/opt/cprocsp/bin/amd64/certmgr", "-inst", "-store=mRoot", "--file="+file)
if err := cmd.Run(); err != nil {
panic(err)
}
cmd = exec.Command("/opt/cprocsp/bin/amd64/certmgr", "-inst", "-store=mCA", "--file="+file)
if err := cmd.Run(); err != nil {
panic(err)
}
defer os.Remove(file)
return nil
}
func get_hash(path *string) string {
f, err := os.Open(*path)
if err != nil {
log.Fatal(err)
}
defer f.Close()
hasher := sha256.New()
if _, err := io.Copy(hasher, f); err != nil {
log.Fatal(err)
}
return hex.EncodeToString(hasher.Sum(nil))
}
var CRL_hash_list string
func installCrlToContainer(cert *string) error {
content, err := getCrlByURL(cert)
if err != nil {
return err
}
file, _ := makeTemp(&content)
hs := get_hash(&file)
if strings.Contains(CRL_hash_list, hs) {
//nothing to do
} else {
cmd := exec.Command("/opt/cprocsp/bin/amd64/certmgr", "-inst", "-store=mCA", "-crl", "--file="+file)
if err := cmd.Run(); err != nil {
if err.Error() == "exit status 45" {
fmt.Printf("error:%3scrl not valid:%s\n", " ", *cert)
return errors.New("CRLNVAL")
}
}
CRL_hash_list = CRL_hash_list + ";" + hs
}
defer os.Remove(file)
return nil
}
func installCrlToContainerLocal(cert *[]byte) error {
file, _ := makeTemp(cert)
cmd := exec.Command("/opt/cprocsp/bin/amd64/certmgr", "-inst", "-store=mCA", "-crl", "--file="+file)
if err := cmd.Run(); err != nil {
if err.Error() == "exit status 45" {
fmt.Printf("error:%3scrl not valid:%s\n", " ", *cert)
return errors.New("CRLNVAL")
}
}
defer os.Remove(file)
return nil
}
/* func dumpUcsFingerptints(root *UcRoot, fingerFile *os.File) {
for _, uc := range root.Centers {
for _, pak := range uc.PAKs {
for _, key := range pak.Keys {
for _, cert := range key.Certs {
fingerFile.WriteString(string(cert.Footprint) + "\n")
}
}
}
}
fingerFile.Close()
}
*/
func makeListOfUCS(root *UcRoot) {
ucFile, err := os.Create("./ucs_grabbed.list")
if err != nil {
log.Fatal("Cannot create file :", err)
}
defer ucFile.Close()
for _, uc := range root.Centers {
ucFile.WriteString(uc.FullName + "\n")
}
}
func testUserCert(certPath string) {
cmd := exec.Command("/opt/cprocsp/bin/amd64/cryptcp", "-verify", "-errchain", "-f", certPath, certPath)
//var stderr bytes.Buffer
var stdout bytes.Buffer
//cmd.Stderr = &stderr
cmd.Stdout = &stdout
cmd.Run()
/*if err != nil {
log.Fatal(stderr.String())
return
}*/
fmt.Println(stdout.String())
}
func makeListInstalledCerts(listCaPath *string) {
fmt.Println("--------------- создаем список установленных сертификатов -----------------------")
cmd := exec.Command("/bin/bash", "-c", "/opt/cprocsp/bin/amd64/certmgr -list -store root |grep 'Serial' |cut -d':' -f2| sed -e 's/^[[:space:]]//' > "+*listCaPath)
var stdout bytes.Buffer
cmd.Stdout = &stdout
cmd.Run()
//fmt.Println(stdout.String())
}
func getCrlByURL(crl *string) ([]byte, error) {
supportedProtos := map[string]bool{"http": true, "ftp": false}
if supportedProtos[strings | }
}
} | random_line_split |
parser.go | procsp/bin/amd64/cryptcp", "-verify", "-errchain", "-f", certPath, certPath)
//var stderr bytes.Buffer
var stdout bytes.Buffer
//cmd.Stderr = &stderr
cmd.Stdout = &stdout
cmd.Run()
/*if err != nil {
log.Fatal(stderr.String())
return
}*/
fmt.Println(stdout.String())
}
func makeListInstalledCerts(listCaPath *string) {
fmt.Println("--------------- создаем список установленных сертификатов -----------------------")
cmd := exec.Command("/bin/bash", "-c", "/opt/cprocsp/bin/amd64/certmgr -list -store root |grep 'Serial' |cut -d':' -f2| sed -e 's/^[[:space:]]//' > "+*listCaPath)
var stdout bytes.Buffer
cmd.Stdout = &stdout
cmd.Run()
//fmt.Println(stdout.String())
}
func getCrlByURL(crl *string) ([]byte, error) {
supportedProtos := map[string]bool{"http": true, "ftp": false}
if supportedProtos[strings.Split(*crl, ":")[0]] == false {
//loadOverFTP(strings.Split(*crl, ":")[0])
return nil, errors.New("unsupported proto")
}
timeout := time.Duration(2 * time.Second)
client := http.Client{
Timeout: timeout,
}
response, err := client.Get(*crl)
if err != nil {
return nil, err
}
fileContent, err := ioutil.ReadAll(response.Body)
if err != nil {
return nil, err
}
return fileContent, nil
}
func loadOverFTP(url string) error {
client, err := ftp.Dial(url)
if err != nil {
fmt.Println(err)
return err
}
if err := client.Login("anonymous", "anonimous"); err != nil {
fmt.Println(err)
return err
}
ftpWalk(client)
return nil
}
func ftpWalk(client *ftp.ServerConn) error {
currentDir, _ := client.CurrentDir()
entries, err := client.List(currentDir)
if err != nil {
fmt.Println(err)
return err
}
for _, entry := range entries {
if entry.Type == ftp.EntryTypeFolder {
client.ChangeDir(entry.Name)
ftpWalk(client)
} else {
ucFile, err := client.Retr(currentDir + "/" + entry.Name)
if err != nil {
fmt.Println(err)
return err
}
ucFile.SetDeadline(time.Now().Add(time.Second * 10))
buf, err := ioutil.ReadAll(ucFile)
ucFile.Close()
if err != nil {
fmt.Println(err)
return err
}
if strings.Contains(entry.Name, "crt") {
installCertToContainer(&buf)
fmt.Println("[Костыль для Выбора]: CRT ", currentDir, "/", entry.Name, " -> installed")
} else if strings.Contains(entry.Name, "crl") {
installCrlToContainerLocal(&buf)
fmt.Println("[Костыль для Выбора]: CRL ", currentDir, "/", entry.Name, " -> installed")
}
}
}
client.ChangeDirToParent()
return nil
}
func getRosreestrXML(url string) {
response, err := http.Get(url)
if err != nil {
panic("can't download rosreestr XML" + err.Error())
}
fileContent, err := ioutil.ReadAll(response.Body)
if err != nil {
panic("cannot download rosreestr XML")
}
if err := ioutil.WriteFile("./uc.xml", fileContent, 0600); err != nil {
panic("can not save rosreestr XML to uc.xml")
}
}
func makeTemp(bytes *[]byte) (string, error) {
file, err := ioutil.TempFile("/tmp/", "__certParserTmp__")
defer file.Close()
if err != nil {
panic("Cannot create TempFile")
}
if err := ioutil.WriteFile(file.Name(), *bytes, 0600); err != nil {
panic(err)
}
return file.Name(), nil
}
func checkXMLVersion(newRoot *UcRoot, oldRoot *UcRoot) bool {
return newRoot.Version > oldRoot.Version
}
func killOnTimeout(lock *lockfile.Lockfile, timeout int64) {
time.Sleep(time.Minute * time.Duration(timeout))
lock.Unlock()
log.Panic("Чето пощло не так")
}
/*
func detectUCListLocation(list *string) string {
var fileContent []byte
ucListFile := *list
match, _ := regexp.MatchString("http[s]?://[a-zA-Z0-9]*", *list)
if match {
response, err := http.Get(*list)
if err != nil {
log.Panic("Не можем загрузить список УЦ аккредитованных на плщадке" + err.Error())
}
fileContent, err = ioutil.ReadAll(response.Body)
if err != nil {
log.Panic("Загрузили список УЦ аккредитованных на площадке, но не можем его прочитать" + err.Error())
}
os.Remove("/tmp/__certParserTmp__uc_list")
if err := ioutil.WriteFile("/tmp/__certParserTmp__uc_list", fileContent, 0600); err != nil {
log.Panic("Не смогли сохранить временный файл со списком УЦ", err.Error())
}
ucListFile = "/tmp/__certParserTmp__uc_list"
*list = ucListFile
return ucListFile
}
*list = ucListFile
return ucListFile
}
*/
func main() {
runtime.GOMAXPROCS(2)
//loadOverFTP("ftp.icvibor.ru:21")
//os.Exit(0)
var certPath = flag.String("certpath", "None", "путь до сертификата который проверяем (работаете только совместно c --testcert)")
var testCert = flag.Bool("testcert", false, "флаг указывающий на режим проверки сертификата")
var forceUpdate = flag.Bool("forceupdate", false, "флаг указывающий на игнорирование проверки версии xml")
var version = flag.Bool("version", false, "версия дистрибутива")
var daemon = flag.Bool("daemon", false, "запустить в режиме демона, в этом режиме интерактив недоступен")
var listCa = flag.Bool("listca", false, "выводит список установленный корневых сертификатов в файл installed.list")
var listCaPath = flag.String("listcapath", "installed.list", "путь куда записать список сертификатов")
//var uclist = flag.String("list", "", "путь до файла со списком аккредитованых УЦ")
flag.Parse()
if flag.NFlag() == 0 {
flag.Usage()
return
}
if *version {
fmt.Println(VERSION)
return
}
//detectUCListLocation(uclist)
lock, err := lockfile.New(filepath.Join(os.TempDir(), "certbot.lock"))
if err != nil {
log.Fatalf("Cannot init lock. reason: %v", err)
}
err = lock.TryLock()
go killOnTimeout(&lock, 60)
if err != nil {
log.Fatalf("Cannot lock %q, reason: %v", lock, err)
}
defer lock.Unlock()
if *testCert {
fmt.Println("------------ режим тестирования ------------------")
if *certPath == "None" {
flag.Usage()
return
}
testUserCert(*certPath)
return
}
if *listCa {
makeListInstalledCerts(listCaPath)
return
}
fmt.Printf("----------------------Запуск %s -----------------------\n", time.Now().String())
logwriter, e := syslog.New(syslog.LOG_NOTICE, "certparser")
if e == nil {
log.SetOutput(logwriter)
}
oldRoot := UcRoot{}
oldXMLFile, err := ioutil.ReadFile("./uc.xml")
if err == nil {
err = xml.Unmarshal(oldXMLFile, &oldRoot)
if err != nil {
panic(err.Error())
}
} else {
oldRoot.Version = 0
fmt.Println("Похоже что это свежая установка или вы грохнули старую XML-ку")
}
for do := true; do; do = *daemon {
fmt.Println("daemon: ", *daemon)
getRosreestrXML("https://e-trust.gosuslugi.ru/CA/DownloadTSL?schemaVersion=0")
root := UcRoot{}
xmlFile, err := ioutil.ReadFile("./uc.xml")
if err != nil {
panic(err.Error())
}
err = xml.Unmarshal(xmlFile, &root)
if err != nil {
panic(err.Error())
}
if *forceUpdate {
root.Version = 9999999999999
}
// fingerFile, err := os.Create("./fingers.list")
fingerFile, err := createFileIfNotExists("./fingers.list")
if err != nil {
| log.Fatal("Cannot create fil | conditional_block |
|
samplesheet.rs | entries: Vec::new(),
}
}
pub fn from_xlsx(xlsx: &str, db: &PgConnection) -> Result<Self> {
// open Excel workbook
let mut ss: Xlsx<_> = open_workbook(xlsx)?;
let sheetname = ss.sheet_names()[0].clone();
let sheet = ss.worksheet_range(&sheetname).unwrap()?;
let header_row: Vec<String> = sheet.rows().next().unwrap().iter().map(|d| d.to_string()).collect();
let col_dna_nr = header_row.iter().position(|c| *c == "DNA nr");
let col_lims_id = header_row.iter().position(|c| *c == "LIMS ID");
let col_sample = header_row.iter().position(|c| *c == "Sample");
let col_primer_set = header_row.iter().position(|c| *c == "primer set");
let col_run = header_row.iter().position(|c| *c == "run").ok_or_else(|| Box::<dyn Error>::from("Could not find required column 'run'"))?;
let mut result = SampleSheet::new();
for (row_idx, row) in sheet.rows().skip(1).enumerate() {
let run = row[col_run].to_string();
let name = col_sample.map(|col| row[col].to_string());
let primer_set = col_primer_set.map(|col| row[col].to_string());
let lims_id = col_lims_id.map(|col| row[col].to_string().parse::<i64>().ok()).flatten();
let dna_nr = col_dna_nr.map(|col| row[col].to_string());
let mut entry: SampleSheetEntry = match crate::vaultdb::match_samples(db, lims_id, dna_nr, primer_set, name, run)? {
MatchStatus::None(reason) => { warn!("Cannot find match for sample in row {}. Skipping. Reason: {}", row_idx+2, reason); continue }
MatchStatus::One(sample) => sample.into(),
MatchStatus::Multiple(v) => { warn!("Found {} matches for sample in row {}. Skipping.", row_idx+2, v.len()); continue }
};
// put all sample sheet columns as extra columns. During export, the user may select which one to use.
// Defaults to what the DB already knows
entry.extra_cols = header_row.iter().cloned().zip(row).map(|(header,data)| (header, data.to_string())).collect();
result.entries.push(entry);
}
Ok(result)
}
pub fn has_multiple_runs(&self) -> bool {
self.entries.iter().map(|e| (e.model.run.clone(), true)).collect::<HashMap<String,bool>>().into_keys().count() > 1
}
pub fn extract_fastqs(&self, db: &PgConnection, targetpath: &Path) -> Result<()> {
// Make a list of paths that correspond to the runs so we can aggregate the ZIP extractions by ZIP file/run path
let mut runs: Vec<&str> = self.entries.iter().map( |e| e.model.run.as_ref()).collect();
runs.sort_unstable();
runs.dedup();
// Discover actual run path for runs
let runpaths: HashMap<String,String> = {
use crate::schema::run;
run::table
.select((run::name, run::path))
.filter(run::name.eq_any(&runs))
.load(db)
.expect("Could not get run")
}.into_iter().collect();
// Collect run paths before we go into parallel extraction
let files: Vec<Vec<String>> = self.entries.iter().map(|e| e.fastq_paths(db)).collect::<Result<_>>()?;
// Extract FASTQs from runs sample-wise in parallel, adding a sample prefix on-the-fly
self.entries.par_iter().enumerate().for_each(|(idx, entry)| {
let runpath = PathBuf::from(runpaths.get(&entry.model.run).unwrap());
let fastqs = &files[idx];
let prefix = if runs.len() > 1 { Some( format!("{}-", entry.get_unique_run_id()) ) } else { None };
if let Some(ext) = runpath.extension() {
if ext.to_ascii_lowercase() == "zip" {
extract_from_zip(&runpath, fastqs.as_ref(), targetpath, prefix).unwrap_or_else(|e| {
error!("Cannot extract from zip file {}: {}", runpath.display(), e)
});
} else {
warn!(
"Run path {} has weird extension. Don't know what to do, skipping.",
entry.model.run
);
}
} else {
extract_from_dir(&runpath, fastqs.as_ref(), targetpath, prefix)
.unwrap_or_else(|e| error!("Cannot copy from run folder: {}", e));
}
});
Ok(())
}
pub fn write_csv<T: AsRef<str> + PartialEq> (&self, separator: &str, overrides: &[T], outfile: &Path) -> Result<()> {
let basic_header = vec!["Sample", "run", "DNA nr", "primer set", "project", "LIMS ID", "cells"];
// extra_cols hashmap is not necessarily fully populated for every sample, so check all
let mut all_headers: Vec<String> = self.entries
.iter()
.map::<Vec<String>,_>(|e| e.extra_cols.keys().cloned().collect())
.flatten()
.collect();
all_headers.sort_unstable();
all_headers.dedup();
//...to not have duplicates in the header lines where extra_cols and the basic headers would overlap
let all_sans_basic: Vec<&str> = all_headers.iter().filter(|&h| !basic_header.contains(&(**h).as_ref())).map(|s| s.as_ref()).collect();
// write header
let mut csv = basic_header.join(separator);
if !all_sans_basic.is_empty() {
csv += separator;
csv += &all_sans_basic.join(separator);
}
csv += "\n";
let has_multiple_runs = self.has_multiple_runs();
for e in &self.entries {
// write basic data points
for (col_idx, col) in basic_header.iter().enumerate() {
let last = col_idx+1 == basic_header.len();
if overrides.iter().any(|x| &x.as_ref() == col) {
csv += e.extra_cols.get(*col).unwrap_or(&String::from(""));
} else {
match *col {
"Sample" => {
if has_multiple_runs {
csv += &format!("{}-{}", e.get_unique_run_id(), e.model.name);
} else {
csv += &e.model.name;
}
},
"run" => { csv += &e.model.run; },
"DNA nr" => { csv += &e.model.dna_nr.as_ref().unwrap_or(&String::from("")); },
"primer set" => { csv += e.model.primer_set.as_ref().unwrap_or(&String::from("")); },
"project" => { csv += &e.model.project.as_ref().unwrap_or(&String::from("")); },
"LIMS ID" => { csv += &e.model.lims_id.map(|i| i.to_string()).unwrap_or_else(|| String::from("")); },
"cells" => {
if let Some(cells) = e.model.cells.as_ref() {
csv += &cells.to_string()
} else if let Some(cells) = e.extra_cols.get(*col) {
csv += cells
}
},
s=> { error!("Unknown header: {}", s); panic!("Matching unknown basic header?!") },
}
};
if !last {
csv += separator;
}
}
if !all_sans_basic.is_empty() {
csv += separator;
}
// write non-basic columns (extra cols from sample sheet)
for (col_idx, col) in all_sans_basic.iter().enumerate() {
csv += e.extra_cols.get(*col).unwrap_or(&String::from(""));
if col_idx+1 < all_sans_basic.len() {
csv += separator;
}
}
csv += "\n";
}
File::create(outfile)?.write_all(csv.as_bytes())?;
Ok(())
}
pub fn write_xlsx<T: AsRef<str> + PartialEq> (&self, overrides: &[T], outfile: &Path) -> Result<()> { |
let basic_header = vec!["Sample", "run", "DNA nr", "primer set", "project", "LIMS ID", "cells"];
// extra_cols hashmap is not necessarily fully populated for every sample, so check all
let mut all_headers: Vec<String> = self.entries
.iter()
.map::<Vec<String>,_>(|e| e.extra_cols.keys().cloned().collect())
.flatten()
.collect();
all_headers.sort_unstable();
all_headers.dedup();
//...to not have duplicates in the header lines where extra_cols and the basic headers would overlap
let all_sans_basic: Vec<&str> = all_headers.iter().filter(|&h| !basic_header.contains(&(**h).as_ref())).map(|s| s.as_ref()).collect();
// set up an empty file
let workbook = xlsxwriter::Workbook::new(outfile.to_str().unwrap());
let mut sheet = workbook.add_worksheet(None)?;
| identifier_body |
|
samplesheet.rs | r.split('-').collect();
if parts.len() != 2 {
return None;
}
Some(format!(
"{:02}-{:05}",
parts[0].parse::<u32>().unwrap(),
parts[1].parse::<u32>().unwrap()
))
}
impl SampleSheetEntry {
pub fn _run_path(&self, db: &PgConnection) -> Result<PathBuf> {
use crate::schema::run;
let p: String = run::table.select(run::path).filter(run::name.eq(&self.model.run)).get_result(db)?;
Ok(PathBuf::from(p))
}
pub fn fastq_paths(&self, db: &PgConnection) -> Result<Vec<String>> {
use crate::schema::fastq;
Ok(fastq::table.select(fastq::filename).filter(fastq::sample_id.eq(self.model.id)).load(db)?)
}
// generate a short but unique string representation of the run
// to keep samples with same characteristics in different runs apart
fn get_unique_run_id(&self) -> String {
let underscore_parts: Vec<&str> = self.model.run.split('_').collect();
let dash_parts: Vec<&str> = self.model.run.split('-').collect();
format!("{}-{}", underscore_parts[0], dash_parts[dash_parts.len()-1])
}
}
impl From<models::Sample> for SampleSheetEntry {
fn from(s: models::Sample) -> Self {
SampleSheetEntry {
model: s,
extra_cols: HashMap::new()
}
}
}
impl From<Vec<models::Sample>> for SampleSheet {
fn from(ss: Vec<models::Sample>) -> Self { | }
}
fn extract_from_zip(path: &Path, fastqs: &[String], targetdir: &Path, sample_prefix: Option<String>) -> Result<()> {
let zipfile = std::fs::File::open(path)?;
let mut zip = zip::ZipArchive::new(zipfile)?;
let prefix = sample_prefix.unwrap_or_else(|| String::from(""));
for f in fastqs {
let mut fastq = zip.by_name(f)?;
let target = PathBuf::from(fastq.name());
let mut local_path = PathBuf::from(targetdir);
local_path.push(prefix.clone() + &target.file_name().unwrap().to_string_lossy().to_string());
let mut targetfile = std::fs::File::create(local_path)?;
std::io::copy(&mut fastq, &mut targetfile)?;
}
Ok(())
}
fn extract_from_dir(path: &Path, fastqs: &[String], targetdir: &Path, sample_prefix: Option<String>) -> Result<()> {
let prefix = sample_prefix.unwrap_or_else(|| String::from(""));
for f in fastqs {
let mut src = path.to_path_buf();
src.push(f);
let mut target = PathBuf::from(targetdir);
target.push(prefix.clone() + &PathBuf::from(f).file_name().unwrap().to_string_lossy().to_string());
std::fs::copy(&src, &target)?;
}
Ok(())
}
impl SampleSheet {
pub fn new() -> Self {
SampleSheet {
entries: Vec::new(),
}
}
pub fn from_xlsx(xlsx: &str, db: &PgConnection) -> Result<Self> {
// open Excel workbook
let mut ss: Xlsx<_> = open_workbook(xlsx)?;
let sheetname = ss.sheet_names()[0].clone();
let sheet = ss.worksheet_range(&sheetname).unwrap()?;
let header_row: Vec<String> = sheet.rows().next().unwrap().iter().map(|d| d.to_string()).collect();
let col_dna_nr = header_row.iter().position(|c| *c == "DNA nr");
let col_lims_id = header_row.iter().position(|c| *c == "LIMS ID");
let col_sample = header_row.iter().position(|c| *c == "Sample");
let col_primer_set = header_row.iter().position(|c| *c == "primer set");
let col_run = header_row.iter().position(|c| *c == "run").ok_or_else(|| Box::<dyn Error>::from("Could not find required column 'run'"))?;
let mut result = SampleSheet::new();
for (row_idx, row) in sheet.rows().skip(1).enumerate() {
let run = row[col_run].to_string();
let name = col_sample.map(|col| row[col].to_string());
let primer_set = col_primer_set.map(|col| row[col].to_string());
let lims_id = col_lims_id.map(|col| row[col].to_string().parse::<i64>().ok()).flatten();
let dna_nr = col_dna_nr.map(|col| row[col].to_string());
let mut entry: SampleSheetEntry = match crate::vaultdb::match_samples(db, lims_id, dna_nr, primer_set, name, run)? {
MatchStatus::None(reason) => { warn!("Cannot find match for sample in row {}. Skipping. Reason: {}", row_idx+2, reason); continue }
MatchStatus::One(sample) => sample.into(),
MatchStatus::Multiple(v) => { warn!("Found {} matches for sample in row {}. Skipping.", row_idx+2, v.len()); continue }
};
// put all sample sheet columns as extra columns. During export, the user may select which one to use.
// Defaults to what the DB already knows
entry.extra_cols = header_row.iter().cloned().zip(row).map(|(header,data)| (header, data.to_string())).collect();
result.entries.push(entry);
}
Ok(result)
}
pub fn has_multiple_runs(&self) -> bool {
self.entries.iter().map(|e| (e.model.run.clone(), true)).collect::<HashMap<String,bool>>().into_keys().count() > 1
}
pub fn extract_fastqs(&self, db: &PgConnection, targetpath: &Path) -> Result<()> {
// Make a list of paths that correspond to the runs so we can aggregate the ZIP extractions by ZIP file/run path
let mut runs: Vec<&str> = self.entries.iter().map( |e| e.model.run.as_ref()).collect();
runs.sort_unstable();
runs.dedup();
// Discover actual run path for runs
let runpaths: HashMap<String,String> = {
use crate::schema::run;
run::table
.select((run::name, run::path))
.filter(run::name.eq_any(&runs))
.load(db)
.expect("Could not get run")
}.into_iter().collect();
// Collect run paths before we go into parallel extraction
let files: Vec<Vec<String>> = self.entries.iter().map(|e| e.fastq_paths(db)).collect::<Result<_>>()?;
// Extract FASTQs from runs sample-wise in parallel, adding a sample prefix on-the-fly
self.entries.par_iter().enumerate().for_each(|(idx, entry)| {
let runpath = PathBuf::from(runpaths.get(&entry.model.run).unwrap());
let fastqs = &files[idx];
let prefix = if runs.len() > 1 { Some( format!("{}-", entry.get_unique_run_id()) ) } else { None };
if let Some(ext) = runpath.extension() {
if ext.to_ascii_lowercase() == "zip" {
extract_from_zip(&runpath, fastqs.as_ref(), targetpath, prefix).unwrap_or_else(|e| {
error!("Cannot extract from zip file {}: {}", runpath.display(), e)
});
} else {
warn!(
"Run path {} has weird extension. Don't know what to do, skipping.",
entry.model.run
);
}
} else {
extract_from_dir(&runpath, fastqs.as_ref(), targetpath, prefix)
.unwrap_or_else(|e| error!("Cannot copy from run folder: {}", e));
}
});
Ok(())
}
pub fn write_csv<T: AsRef<str> + PartialEq> (&self, separator: &str, overrides: &[T], outfile: &Path) -> Result<()> {
let basic_header = vec!["Sample", "run", "DNA nr", "primer set", "project", "LIMS ID", "cells"];
// extra_cols hashmap is not necessarily fully populated for every sample, so check all
let mut all_headers: Vec<String> = self.entries
.iter()
.map::<Vec<String>,_>(|e| e.extra_cols.keys().cloned().collect())
.flatten()
.collect();
all_headers.sort_unstable();
all_headers.dedup();
//...to not have duplicates in the header lines where extra_cols and the basic headers would overlap
let all_sans_basic: Vec<&str> = all_headers.iter().filter(|&h| !basic_header.contains(&(**h).as_ref())).map(|s| s.as_ref()).collect();
// write header
let mut csv = basic_header.join(separator);
if !all_sans_basic.is_empty() {
csv += separator;
csv += &all_sans_basic.join(separator);
}
csv += "\n";
let has_multiple_runs = self.has_multiple_runs();
for e | SampleSheet {
entries: ss.into_iter().map(|s| s.into()).collect()
} | random_line_split |
samplesheet.rs | r.split('-').collect();
if parts.len() != 2 {
return None;
}
Some(format!(
"{:02}-{:05}",
parts[0].parse::<u32>().unwrap(),
parts[1].parse::<u32>().unwrap()
))
}
impl SampleSheetEntry {
pub fn _run_path(&self, db: &PgConnection) -> Result<PathBuf> {
use crate::schema::run;
let p: String = run::table.select(run::path).filter(run::name.eq(&self.model.run)).get_result(db)?;
Ok(PathBuf::from(p))
}
pub fn f | &self, db: &PgConnection) -> Result<Vec<String>> {
use crate::schema::fastq;
Ok(fastq::table.select(fastq::filename).filter(fastq::sample_id.eq(self.model.id)).load(db)?)
}
// generate a short but unique string representation of the run
// to keep samples with same characteristics in different runs apart
fn get_unique_run_id(&self) -> String {
let underscore_parts: Vec<&str> = self.model.run.split('_').collect();
let dash_parts: Vec<&str> = self.model.run.split('-').collect();
format!("{}-{}", underscore_parts[0], dash_parts[dash_parts.len()-1])
}
}
impl From<models::Sample> for SampleSheetEntry {
fn from(s: models::Sample) -> Self {
SampleSheetEntry {
model: s,
extra_cols: HashMap::new()
}
}
}
impl From<Vec<models::Sample>> for SampleSheet {
fn from(ss: Vec<models::Sample>) -> Self {
SampleSheet {
entries: ss.into_iter().map(|s| s.into()).collect()
}
}
}
fn extract_from_zip(path: &Path, fastqs: &[String], targetdir: &Path, sample_prefix: Option<String>) -> Result<()> {
let zipfile = std::fs::File::open(path)?;
let mut zip = zip::ZipArchive::new(zipfile)?;
let prefix = sample_prefix.unwrap_or_else(|| String::from(""));
for f in fastqs {
let mut fastq = zip.by_name(f)?;
let target = PathBuf::from(fastq.name());
let mut local_path = PathBuf::from(targetdir);
local_path.push(prefix.clone() + &target.file_name().unwrap().to_string_lossy().to_string());
let mut targetfile = std::fs::File::create(local_path)?;
std::io::copy(&mut fastq, &mut targetfile)?;
}
Ok(())
}
fn extract_from_dir(path: &Path, fastqs: &[String], targetdir: &Path, sample_prefix: Option<String>) -> Result<()> {
let prefix = sample_prefix.unwrap_or_else(|| String::from(""));
for f in fastqs {
let mut src = path.to_path_buf();
src.push(f);
let mut target = PathBuf::from(targetdir);
target.push(prefix.clone() + &PathBuf::from(f).file_name().unwrap().to_string_lossy().to_string());
std::fs::copy(&src, &target)?;
}
Ok(())
}
impl SampleSheet {
pub fn new() -> Self {
SampleSheet {
entries: Vec::new(),
}
}
pub fn from_xlsx(xlsx: &str, db: &PgConnection) -> Result<Self> {
// open Excel workbook
let mut ss: Xlsx<_> = open_workbook(xlsx)?;
let sheetname = ss.sheet_names()[0].clone();
let sheet = ss.worksheet_range(&sheetname).unwrap()?;
let header_row: Vec<String> = sheet.rows().next().unwrap().iter().map(|d| d.to_string()).collect();
let col_dna_nr = header_row.iter().position(|c| *c == "DNA nr");
let col_lims_id = header_row.iter().position(|c| *c == "LIMS ID");
let col_sample = header_row.iter().position(|c| *c == "Sample");
let col_primer_set = header_row.iter().position(|c| *c == "primer set");
let col_run = header_row.iter().position(|c| *c == "run").ok_or_else(|| Box::<dyn Error>::from("Could not find required column 'run'"))?;
let mut result = SampleSheet::new();
for (row_idx, row) in sheet.rows().skip(1).enumerate() {
let run = row[col_run].to_string();
let name = col_sample.map(|col| row[col].to_string());
let primer_set = col_primer_set.map(|col| row[col].to_string());
let lims_id = col_lims_id.map(|col| row[col].to_string().parse::<i64>().ok()).flatten();
let dna_nr = col_dna_nr.map(|col| row[col].to_string());
let mut entry: SampleSheetEntry = match crate::vaultdb::match_samples(db, lims_id, dna_nr, primer_set, name, run)? {
MatchStatus::None(reason) => { warn!("Cannot find match for sample in row {}. Skipping. Reason: {}", row_idx+2, reason); continue }
MatchStatus::One(sample) => sample.into(),
MatchStatus::Multiple(v) => { warn!("Found {} matches for sample in row {}. Skipping.", row_idx+2, v.len()); continue }
};
// put all sample sheet columns as extra columns. During export, the user may select which one to use.
// Defaults to what the DB already knows
entry.extra_cols = header_row.iter().cloned().zip(row).map(|(header,data)| (header, data.to_string())).collect();
result.entries.push(entry);
}
Ok(result)
}
pub fn has_multiple_runs(&self) -> bool {
self.entries.iter().map(|e| (e.model.run.clone(), true)).collect::<HashMap<String,bool>>().into_keys().count() > 1
}
pub fn extract_fastqs(&self, db: &PgConnection, targetpath: &Path) -> Result<()> {
// Make a list of paths that correspond to the runs so we can aggregate the ZIP extractions by ZIP file/run path
let mut runs: Vec<&str> = self.entries.iter().map( |e| e.model.run.as_ref()).collect();
runs.sort_unstable();
runs.dedup();
// Discover actual run path for runs
let runpaths: HashMap<String,String> = {
use crate::schema::run;
run::table
.select((run::name, run::path))
.filter(run::name.eq_any(&runs))
.load(db)
.expect("Could not get run")
}.into_iter().collect();
// Collect run paths before we go into parallel extraction
let files: Vec<Vec<String>> = self.entries.iter().map(|e| e.fastq_paths(db)).collect::<Result<_>>()?;
// Extract FASTQs from runs sample-wise in parallel, adding a sample prefix on-the-fly
self.entries.par_iter().enumerate().for_each(|(idx, entry)| {
let runpath = PathBuf::from(runpaths.get(&entry.model.run).unwrap());
let fastqs = &files[idx];
let prefix = if runs.len() > 1 { Some( format!("{}-", entry.get_unique_run_id()) ) } else { None };
if let Some(ext) = runpath.extension() {
if ext.to_ascii_lowercase() == "zip" {
extract_from_zip(&runpath, fastqs.as_ref(), targetpath, prefix).unwrap_or_else(|e| {
error!("Cannot extract from zip file {}: {}", runpath.display(), e)
});
} else {
warn!(
"Run path {} has weird extension. Don't know what to do, skipping.",
entry.model.run
);
}
} else {
extract_from_dir(&runpath, fastqs.as_ref(), targetpath, prefix)
.unwrap_or_else(|e| error!("Cannot copy from run folder: {}", e));
}
});
Ok(())
}
pub fn write_csv<T: AsRef<str> + PartialEq> (&self, separator: &str, overrides: &[T], outfile: &Path) -> Result<()> {
let basic_header = vec!["Sample", "run", "DNA nr", "primer set", "project", "LIMS ID", "cells"];
// extra_cols hashmap is not necessarily fully populated for every sample, so check all
let mut all_headers: Vec<String> = self.entries
.iter()
.map::<Vec<String>,_>(|e| e.extra_cols.keys().cloned().collect())
.flatten()
.collect();
all_headers.sort_unstable();
all_headers.dedup();
//...to not have duplicates in the header lines where extra_cols and the basic headers would overlap
let all_sans_basic: Vec<&str> = all_headers.iter().filter(|&h| !basic_header.contains(&(**h).as_ref())).map(|s| s.as_ref()).collect();
// write header
let mut csv = basic_header.join(separator);
if !all_sans_basic.is_empty() {
csv += separator;
csv += &all_sans_basic.join(separator);
}
csv += "\n";
let has_multiple_runs = self.has_multiple_runs();
| astq_paths( | identifier_name |
samplesheet.rs | = col_sample.map(|col| row[col].to_string());
let primer_set = col_primer_set.map(|col| row[col].to_string());
let lims_id = col_lims_id.map(|col| row[col].to_string().parse::<i64>().ok()).flatten();
let dna_nr = col_dna_nr.map(|col| row[col].to_string());
let mut entry: SampleSheetEntry = match crate::vaultdb::match_samples(db, lims_id, dna_nr, primer_set, name, run)? {
MatchStatus::None(reason) => { warn!("Cannot find match for sample in row {}. Skipping. Reason: {}", row_idx+2, reason); continue }
MatchStatus::One(sample) => sample.into(),
MatchStatus::Multiple(v) => { warn!("Found {} matches for sample in row {}. Skipping.", row_idx+2, v.len()); continue }
};
// put all sample sheet columns as extra columns. During export, the user may select which one to use.
// Defaults to what the DB already knows
entry.extra_cols = header_row.iter().cloned().zip(row).map(|(header,data)| (header, data.to_string())).collect();
result.entries.push(entry);
}
Ok(result)
}
pub fn has_multiple_runs(&self) -> bool {
self.entries.iter().map(|e| (e.model.run.clone(), true)).collect::<HashMap<String,bool>>().into_keys().count() > 1
}
pub fn extract_fastqs(&self, db: &PgConnection, targetpath: &Path) -> Result<()> {
// Make a list of paths that correspond to the runs so we can aggregate the ZIP extractions by ZIP file/run path
let mut runs: Vec<&str> = self.entries.iter().map( |e| e.model.run.as_ref()).collect();
runs.sort_unstable();
runs.dedup();
// Discover actual run path for runs
let runpaths: HashMap<String,String> = {
use crate::schema::run;
run::table
.select((run::name, run::path))
.filter(run::name.eq_any(&runs))
.load(db)
.expect("Could not get run")
}.into_iter().collect();
// Collect run paths before we go into parallel extraction
let files: Vec<Vec<String>> = self.entries.iter().map(|e| e.fastq_paths(db)).collect::<Result<_>>()?;
// Extract FASTQs from runs sample-wise in parallel, adding a sample prefix on-the-fly
self.entries.par_iter().enumerate().for_each(|(idx, entry)| {
let runpath = PathBuf::from(runpaths.get(&entry.model.run).unwrap());
let fastqs = &files[idx];
let prefix = if runs.len() > 1 { Some( format!("{}-", entry.get_unique_run_id()) ) } else { None };
if let Some(ext) = runpath.extension() {
if ext.to_ascii_lowercase() == "zip" {
extract_from_zip(&runpath, fastqs.as_ref(), targetpath, prefix).unwrap_or_else(|e| {
error!("Cannot extract from zip file {}: {}", runpath.display(), e)
});
} else {
warn!(
"Run path {} has weird extension. Don't know what to do, skipping.",
entry.model.run
);
}
} else {
extract_from_dir(&runpath, fastqs.as_ref(), targetpath, prefix)
.unwrap_or_else(|e| error!("Cannot copy from run folder: {}", e));
}
});
Ok(())
}
pub fn write_csv<T: AsRef<str> + PartialEq> (&self, separator: &str, overrides: &[T], outfile: &Path) -> Result<()> {
let basic_header = vec!["Sample", "run", "DNA nr", "primer set", "project", "LIMS ID", "cells"];
// extra_cols hashmap is not necessarily fully populated for every sample, so check all
let mut all_headers: Vec<String> = self.entries
.iter()
.map::<Vec<String>,_>(|e| e.extra_cols.keys().cloned().collect())
.flatten()
.collect();
all_headers.sort_unstable();
all_headers.dedup();
//...to not have duplicates in the header lines where extra_cols and the basic headers would overlap
let all_sans_basic: Vec<&str> = all_headers.iter().filter(|&h| !basic_header.contains(&(**h).as_ref())).map(|s| s.as_ref()).collect();
// write header
let mut csv = basic_header.join(separator);
if !all_sans_basic.is_empty() {
csv += separator;
csv += &all_sans_basic.join(separator);
}
csv += "\n";
let has_multiple_runs = self.has_multiple_runs();
for e in &self.entries {
// write basic data points
for (col_idx, col) in basic_header.iter().enumerate() {
let last = col_idx+1 == basic_header.len();
if overrides.iter().any(|x| &x.as_ref() == col) {
csv += e.extra_cols.get(*col).unwrap_or(&String::from(""));
} else {
match *col {
"Sample" => {
if has_multiple_runs {
csv += &format!("{}-{}", e.get_unique_run_id(), e.model.name);
} else {
csv += &e.model.name;
}
},
"run" => { csv += &e.model.run; },
"DNA nr" => { csv += &e.model.dna_nr.as_ref().unwrap_or(&String::from("")); },
"primer set" => { csv += e.model.primer_set.as_ref().unwrap_or(&String::from("")); },
"project" => { csv += &e.model.project.as_ref().unwrap_or(&String::from("")); },
"LIMS ID" => { csv += &e.model.lims_id.map(|i| i.to_string()).unwrap_or_else(|| String::from("")); },
"cells" => {
if let Some(cells) = e.model.cells.as_ref() {
csv += &cells.to_string()
} else if let Some(cells) = e.extra_cols.get(*col) {
csv += cells
}
},
s=> { error!("Unknown header: {}", s); panic!("Matching unknown basic header?!") },
}
};
if !last {
csv += separator;
}
}
if !all_sans_basic.is_empty() {
csv += separator;
}
// write non-basic columns (extra cols from sample sheet)
for (col_idx, col) in all_sans_basic.iter().enumerate() {
csv += e.extra_cols.get(*col).unwrap_or(&String::from(""));
if col_idx+1 < all_sans_basic.len() {
csv += separator;
}
}
csv += "\n";
}
File::create(outfile)?.write_all(csv.as_bytes())?;
Ok(())
}
pub fn write_xlsx<T: AsRef<str> + PartialEq> (&self, overrides: &[T], outfile: &Path) -> Result<()> {
let basic_header = vec!["Sample", "run", "DNA nr", "primer set", "project", "LIMS ID", "cells"];
// extra_cols hashmap is not necessarily fully populated for every sample, so check all
let mut all_headers: Vec<String> = self.entries
.iter()
.map::<Vec<String>,_>(|e| e.extra_cols.keys().cloned().collect())
.flatten()
.collect();
all_headers.sort_unstable();
all_headers.dedup();
//...to not have duplicates in the header lines where extra_cols and the basic headers would overlap
let all_sans_basic: Vec<&str> = all_headers.iter().filter(|&h| !basic_header.contains(&(**h).as_ref())).map(|s| s.as_ref()).collect();
// set up an empty file
let workbook = xlsxwriter::Workbook::new(outfile.to_str().unwrap());
let mut sheet = workbook.add_worksheet(None)?;
// write header
for (col, title) in basic_header.iter().chain(all_sans_basic.iter()).enumerate() {
sheet.write_string(0, col.clamp(0, u16::MAX.into()) as u16, title, None)?;
}
let has_multiple_runs = self.has_multiple_runs();
for (row, e) in self.entries.iter().enumerate() {
let row: u32 = (row + 1).try_into().unwrap();
// write basic data points
for (col_idx, colname) in basic_header.iter().enumerate() {
let col_idx: u16 = col_idx.try_into().unwrap();
let val = if overrides.iter().any(|x| &x.as_ref() == colname) {
e.extra_cols.get(*colname).unwrap_or(&String::from("")).to_string()
} else {
match *colname {
"Sample" => {
if has_multiple_runs {
format!("{}-{}", e.get_unique_run_id(), e.model.name)
} else {
e.model.name.to_string()
}
},
"run" => { e.model.run.to_string() },
"DNA nr" => { | e.model.dna_nr.as_ref().map(|s| s.clone()).unwrap_or(String::from("")) }, | conditional_block |
|
mainwindow.py |
from .ui import resources
from .phynx import FileModel, FileView, ExportRawCSV, ExportCorrectedCSV
from praxes.io import phynx
#logger = logging.getLogger(__file__)
class MainWindow(QtGui.QMainWindow):
"""
"""
def __init__(self, log_level=logging.CRITICAL, parent=None):
super(MainWindow, self).__init__(parent)
uic.loadUi(resources['mainwindow.ui'], self)
self.setCorner(QtCore.Qt.TopLeftCorner, QtCore.Qt.LeftDockWidgetArea)
self.setCorner(QtCore.Qt.BottomLeftCorner, QtCore.Qt.LeftDockWidgetArea)
self.setCorner(QtCore.Qt.TopRightCorner, QtCore.Qt.RightDockWidgetArea)
self.setCorner(QtCore.Qt.BottomRightCorner, QtCore.Qt.BottomDockWidgetArea)
self.setDockNestingEnabled(True)
self._specFileRegistry = {}
self.fileModel = FileModel(self)
self.fileView = FileView(self.fileModel, self)
self.setCentralWidget(self.fileView)
# TODO: will acquisition work on other platforms?
if sys.platform != 'linux2':
self.menuAcquisition.setEnabled(False)
self.expInterface = None
self.statusBar.showMessage('Ready', 2000)
self._currentItem = None
self._toolActions = {}
self._setupToolActions()
settings = QtCore.QSettings()
settings.beginGroup('MainWindow')
self.restoreGeometry(settings.value('Geometry').toByteArray())
self.restoreState(settings.value('State').toByteArray())
import praxes
# TODO: this should be a factory function, not a method of the main win:
praxes.application.registerService('ScanView', self.newScanWindow)
praxes.application.registerService('FileInterface', self)
def _createToolAction(
self, name, target, helptext=None, icon=None
):
assert hasattr(target, 'offersService')
action = QtGui.QAction(name, self)
action.setVisible(False)
self._toolActions[action] = target
action.triggered.connect(self.toolActionTriggered)
return action
def _setupToolActions(self):
try:
from ..fluorescence.mcaanalysiswindow import McaAnalysisWindow
self.menuTools.addAction(
self._createToolAction("Analyze MCA", McaAnalysisWindow)
)
except ImportError:
pass
self.menuExport.addAction(
self._createToolAction("Raw data", ExportRawCSV)
)
self.menuExport.addAction(
self._createToolAction("Corrected data", ExportCorrectedCSV)
)
@QtCore.pyqtSignature("")
def on_actionAboutQt_triggered(self):
QtGui.qApp.aboutQt()
@QtCore.pyqtSignature("")
def on_actionAboutPraxes_triggered(self):
QtGui.QMessageBox.about(self, self.tr("About Praxes"),
self.tr("Praxes Application, version %s\n\n"
"Praxes is a user interface for controlling synchrotron "
"experiments and analyzing data.\n\n"
"Praxes depends on several programs and libraries:\n\n"
" spec: for controlling hardware and data acquisition\n"
" SpecClient: a python interface to the spec server\n"
" PyMca: a set of programs and libraries for analyzing "
"X-ray fluorescence spectra"%praxes.__version__))
@QtCore.pyqtSignature("")
def on_actionImportSpecFile_triggered(self, force=False):
f = '%s'% QtGui.QFileDialog.getOpenFileName(
self,
'Open File',
os.getcwd(),
"Spec datafiles (*.dat *.mca);;All files (*)"
)
if f:
while 1:
h5_filename = str(
QtGui.QFileDialog.getSaveFileName(
self,
'Save HDF5 File',
os.path.join(os.getcwd(), f+'.h5'),
'HDF5 files (*.h5 *.hdf5 *.hdf *.nxs)'
)
)
if h5_filename and os.path.isfile(h5_filename):
res = QtGui.QMessageBox.question(
self,
'overwrite?',
'Do you want to overwrite the existing file?',
QtGui.QMessageBox.Yes,
QtGui.QMessageBox.No
)
if res == QtGui.QMessageBox.Yes:
os.remove(h5_filename)
else:
continue
break
if h5_filename:
self.statusBar.showMessage('Converting spec data...')
#QtGui.qApp.processEvents()
from praxes.io.phynx.migration.spec import convert_to_phynx
h5file = convert_to_phynx(
f, h5_filename=h5_filename, force=True, report=True
)
h5file.close()
self.statusBar.clearMessage()
self.openFile(h5_filename)
@QtCore.pyqtSignature("")
def on_menuTools_aboutToShow(self):
index = self.fileView.currentIndex()
self._currentItem = self.fileModel.getNodeFromIndex(index)
if self._currentItem is not None:
for action, tool in self._toolActions.items():
action.setVisible(tool.offersService(self._currentItem))
@QtCore.pyqtSignature("")
def on_actionOffline_triggered(self):
if self.expInterface is None: return
if self.expInterface.name == 'spec':
self.connectToSpec(False)
@QtCore.pyqtSignature("")
def on_actionOpen_triggered(self):
self.openFile()
@QtCore.pyqtSignature("bool")
def on_actionSpec_toggled(self, bool):
self.connectToSpec(bool)
def connectToSpec(self, bool):
if bool:
from praxes.instrumentation.spec.specinterface import ConnectionAborted
try:
from praxes.instrumentation.spec.specinterface import SpecInterface
self.expInterface = SpecInterface(self)
except ConnectionAborted:
return
if self.expInterface:
self.actionConfigure.setEnabled(True)
for key, (item, area, action) in \
self.expInterface.dockWidgets.items():
self.menuView.addAction(action)
self.addDockWidget(area, item)
else:
self.actionOffline.setChecked(True)
else:
if self.expInterface:
self.actionConfigure.setEnabled(False)
for key, (item, area, action) in \
self.expInterface.dockWidgets.items():
self.removeDockWidget(item)
self.menuView.removeAction(action)
self.expInterface.close()
self.expInterface = None
def closeEvent(self, event):
for view in praxes.application.openViews:
view.close()
if praxes.application.openViews:
event.ignore()
return
self.connectToSpec(False)
settings = QtCore.QSettings()
settings.beginGroup("MainWindow")
settings.setValue('Geometry', QtCore.QVariant(self.saveGeometry()))
settings.setValue('State', QtCore.QVariant(self.saveState()))
#self.fileModel.close()
return event.accept()
def getH5FileFromKey(self, key):
h5File = self._specFileRegistry.get(key, None)
if not h5File:
default = key + '.h5'
h5File = self.saveFile(default)
if h5File:
self._specFileRegistry[key] = h5File
return h5File
## TODO: The following two methods needs to be generalized
## given a scan, offer analyses options
def getScanView(self, scan):
# this is a shortcut for now, in the future the view would be
# an overview of the entry with ability to open different analyses
if isinstance(scan, phynx.registry['Entry']):
from ..fluorescence.mcaanalysiswindow import McaAnalysisWindow
if len(scan['measurement'].mcas) > 0:
return McaAnalysisWindow(scan, self)
else:
msg = QtGui.QErrorMessage(self)
msg.showMessage(
'The entry you selected has no MCA data to process'
)
def newScanWindow(self, scan):
self.statusBar.showMessage('Configuring New Analysis Window ...')
scanView = self.getScanView(scan)
if scanView is None:
self.statusBar.clearMessage()
return
scanView.show()
self.statusBar.clearMessage()
return scanView
def openFile(self, filename=None):
if filename is None:
filename = QtGui.QFileDialog.getOpenFileName(
self,
'Open File',
os.getcwd(),
"hdf5 files (*.h5 *.hdf5 *.hdf *.nxs)"
)
if filename:
self.fileModel.openFile(str(filename))
def saveFile(self, filename=None):
if os.path.isfile(filename):
return self.fileModel.openFile(filename)
else:
newfilename = QtGui.QFileDialog.getSaveFileName(
self,
"Save File",
os.path.join(os.getcwd(), filename),
"hdf5 files (*.h5 *.hdf5 *.hdf *.nxs)"
)
if newfilename:
newfilename = str(newfilename)
if os.path.splitext(newfilename)[-1] not in (
'.h5', '.hdf5', '.hdf', '.nxs'
):
|
return self.fileModel.openFile(newfilename)
def toolActionTriggered(self):
self.statusBar.showMessage('Configuring...')
action = self.sender()
if action is not None and isinstance(action, QtGui.QAction):
tool = self._toolActions[action](self._currentItem, self)
if isinstance(tool, QtGui.QWidget):
tool.show()
self.statusBar.clearMessage()
#def main():
# | newfilename = newfilename + '.h5' | conditional_block |
mainwindow.py |
from .ui import resources
from .phynx import FileModel, FileView, ExportRawCSV, ExportCorrectedCSV
from praxes.io import phynx
#logger = logging.getLogger(__file__)
class MainWindow(QtGui.QMainWindow):
"""
"""
def __init__(self, log_level=logging.CRITICAL, parent=None):
super(MainWindow, self).__init__(parent)
uic.loadUi(resources['mainwindow.ui'], self)
self.setCorner(QtCore.Qt.TopLeftCorner, QtCore.Qt.LeftDockWidgetArea)
self.setCorner(QtCore.Qt.BottomLeftCorner, QtCore.Qt.LeftDockWidgetArea)
self.setCorner(QtCore.Qt.TopRightCorner, QtCore.Qt.RightDockWidgetArea)
self.setCorner(QtCore.Qt.BottomRightCorner, QtCore.Qt.BottomDockWidgetArea)
self.setDockNestingEnabled(True)
self._specFileRegistry = {}
self.fileModel = FileModel(self)
self.fileView = FileView(self.fileModel, self)
self.setCentralWidget(self.fileView)
# TODO: will acquisition work on other platforms?
if sys.platform != 'linux2':
self.menuAcquisition.setEnabled(False)
self.expInterface = None
self.statusBar.showMessage('Ready', 2000)
self._currentItem = None
self._toolActions = {}
self._setupToolActions()
settings = QtCore.QSettings()
settings.beginGroup('MainWindow')
self.restoreGeometry(settings.value('Geometry').toByteArray())
self.restoreState(settings.value('State').toByteArray())
import praxes
# TODO: this should be a factory function, not a method of the main win:
praxes.application.registerService('ScanView', self.newScanWindow)
praxes.application.registerService('FileInterface', self)
def _createToolAction(
self, name, target, helptext=None, icon=None
):
assert hasattr(target, 'offersService')
action = QtGui.QAction(name, self)
action.setVisible(False)
self._toolActions[action] = target
action.triggered.connect(self.toolActionTriggered)
return action
def _setupToolActions(self):
try:
from ..fluorescence.mcaanalysiswindow import McaAnalysisWindow
self.menuTools.addAction(
self._createToolAction("Analyze MCA", McaAnalysisWindow)
)
except ImportError:
pass
self.menuExport.addAction(
self._createToolAction("Raw data", ExportRawCSV)
)
self.menuExport.addAction(
self._createToolAction("Corrected data", ExportCorrectedCSV)
)
@QtCore.pyqtSignature("")
def on_actionAboutQt_triggered(self):
QtGui.qApp.aboutQt()
@QtCore.pyqtSignature("")
def on_actionAboutPraxes_triggered(self):
QtGui.QMessageBox.about(self, self.tr("About Praxes"),
self.tr("Praxes Application, version %s\n\n"
"Praxes is a user interface for controlling synchrotron "
"experiments and analyzing data.\n\n"
"Praxes depends on several programs and libraries:\n\n"
" spec: for controlling hardware and data acquisition\n"
" SpecClient: a python interface to the spec server\n"
" PyMca: a set of programs and libraries for analyzing "
"X-ray fluorescence spectra"%praxes.__version__))
@QtCore.pyqtSignature("")
def on_actionImportSpecFile_triggered(self, force=False):
f = '%s'% QtGui.QFileDialog.getOpenFileName(
self,
'Open File',
os.getcwd(),
"Spec datafiles (*.dat *.mca);;All files (*)"
)
if f:
while 1:
h5_filename = str(
QtGui.QFileDialog.getSaveFileName(
self,
'Save HDF5 File',
os.path.join(os.getcwd(), f+'.h5'),
'HDF5 files (*.h5 *.hdf5 *.hdf *.nxs)'
)
)
if h5_filename and os.path.isfile(h5_filename):
res = QtGui.QMessageBox.question(
self,
'overwrite?',
'Do you want to overwrite the existing file?',
QtGui.QMessageBox.Yes,
QtGui.QMessageBox.No
)
if res == QtGui.QMessageBox.Yes:
os.remove(h5_filename)
else:
continue
break
if h5_filename:
self.statusBar.showMessage('Converting spec data...')
#QtGui.qApp.processEvents()
from praxes.io.phynx.migration.spec import convert_to_phynx
h5file = convert_to_phynx(
f, h5_filename=h5_filename, force=True, report=True
)
h5file.close()
self.statusBar.clearMessage()
self.openFile(h5_filename)
@QtCore.pyqtSignature("")
def on_menuTools_aboutToShow(self):
index = self.fileView.currentIndex()
self._currentItem = self.fileModel.getNodeFromIndex(index)
if self._currentItem is not None:
for action, tool in self._toolActions.items():
action.setVisible(tool.offersService(self._currentItem))
@QtCore.pyqtSignature("")
def on_actionOffline_triggered(self):
if self.expInterface is None: return
if self.expInterface.name == 'spec':
self.connectToSpec(False)
@QtCore.pyqtSignature("")
def | (self):
self.openFile()
@QtCore.pyqtSignature("bool")
def on_actionSpec_toggled(self, bool):
self.connectToSpec(bool)
def connectToSpec(self, bool):
if bool:
from praxes.instrumentation.spec.specinterface import ConnectionAborted
try:
from praxes.instrumentation.spec.specinterface import SpecInterface
self.expInterface = SpecInterface(self)
except ConnectionAborted:
return
if self.expInterface:
self.actionConfigure.setEnabled(True)
for key, (item, area, action) in \
self.expInterface.dockWidgets.items():
self.menuView.addAction(action)
self.addDockWidget(area, item)
else:
self.actionOffline.setChecked(True)
else:
if self.expInterface:
self.actionConfigure.setEnabled(False)
for key, (item, area, action) in \
self.expInterface.dockWidgets.items():
self.removeDockWidget(item)
self.menuView.removeAction(action)
self.expInterface.close()
self.expInterface = None
def closeEvent(self, event):
for view in praxes.application.openViews:
view.close()
if praxes.application.openViews:
event.ignore()
return
self.connectToSpec(False)
settings = QtCore.QSettings()
settings.beginGroup("MainWindow")
settings.setValue('Geometry', QtCore.QVariant(self.saveGeometry()))
settings.setValue('State', QtCore.QVariant(self.saveState()))
#self.fileModel.close()
return event.accept()
def getH5FileFromKey(self, key):
h5File = self._specFileRegistry.get(key, None)
if not h5File:
default = key + '.h5'
h5File = self.saveFile(default)
if h5File:
self._specFileRegistry[key] = h5File
return h5File
## TODO: The following two methods needs to be generalized
## given a scan, offer analyses options
def getScanView(self, scan):
# this is a shortcut for now, in the future the view would be
# an overview of the entry with ability to open different analyses
if isinstance(scan, phynx.registry['Entry']):
from ..fluorescence.mcaanalysiswindow import McaAnalysisWindow
if len(scan['measurement'].mcas) > 0:
return McaAnalysisWindow(scan, self)
else:
msg = QtGui.QErrorMessage(self)
msg.showMessage(
'The entry you selected has no MCA data to process'
)
def newScanWindow(self, scan):
self.statusBar.showMessage('Configuring New Analysis Window ...')
scanView = self.getScanView(scan)
if scanView is None:
self.statusBar.clearMessage()
return
scanView.show()
self.statusBar.clearMessage()
return scanView
def openFile(self, filename=None):
if filename is None:
filename = QtGui.QFileDialog.getOpenFileName(
self,
'Open File',
os.getcwd(),
"hdf5 files (*.h5 *.hdf5 *.hdf *.nxs)"
)
if filename:
self.fileModel.openFile(str(filename))
def saveFile(self, filename=None):
if os.path.isfile(filename):
return self.fileModel.openFile(filename)
else:
newfilename = QtGui.QFileDialog.getSaveFileName(
self,
"Save File",
os.path.join(os.getcwd(), filename),
"hdf5 files (*.h5 *.hdf5 *.hdf *.nxs)"
)
if newfilename:
newfilename = str(newfilename)
if os.path.splitext(newfilename)[-1] not in (
'.h5', '.hdf5', '.hdf', '.nxs'
):
newfilename = newfilename + '.h5'
return self.fileModel.openFile(newfilename)
def toolActionTriggered(self):
self.statusBar.showMessage('Configuring...')
action = self.sender()
if action is not None and isinstance(action, QtGui.QAction):
tool = self._toolActions[action](self._currentItem, self)
if isinstance(tool, QtGui.QWidget):
tool.show()
self.statusBar.clearMessage()
#def main():
# | on_actionOpen_triggered | identifier_name |
mainwindow.py |
from .ui import resources
from .phynx import FileModel, FileView, ExportRawCSV, ExportCorrectedCSV
from praxes.io import phynx
#logger = logging.getLogger(__file__)
class MainWindow(QtGui.QMainWindow):
"""
"""
def __init__(self, log_level=logging.CRITICAL, parent=None):
super(MainWindow, self).__init__(parent)
uic.loadUi(resources['mainwindow.ui'], self)
self.setCorner(QtCore.Qt.TopLeftCorner, QtCore.Qt.LeftDockWidgetArea)
self.setCorner(QtCore.Qt.BottomLeftCorner, QtCore.Qt.LeftDockWidgetArea)
self.setCorner(QtCore.Qt.TopRightCorner, QtCore.Qt.RightDockWidgetArea)
self.setCorner(QtCore.Qt.BottomRightCorner, QtCore.Qt.BottomDockWidgetArea)
self.setDockNestingEnabled(True)
self._specFileRegistry = {}
self.fileModel = FileModel(self)
self.fileView = FileView(self.fileModel, self)
self.setCentralWidget(self.fileView)
# TODO: will acquisition work on other platforms?
if sys.platform != 'linux2':
self.menuAcquisition.setEnabled(False)
self.expInterface = None
self.statusBar.showMessage('Ready', 2000)
self._currentItem = None
self._toolActions = {}
self._setupToolActions()
settings = QtCore.QSettings()
settings.beginGroup('MainWindow')
self.restoreGeometry(settings.value('Geometry').toByteArray())
self.restoreState(settings.value('State').toByteArray())
import praxes
# TODO: this should be a factory function, not a method of the main win:
praxes.application.registerService('ScanView', self.newScanWindow)
praxes.application.registerService('FileInterface', self)
def _createToolAction(
self, name, target, helptext=None, icon=None
):
assert hasattr(target, 'offersService')
action = QtGui.QAction(name, self)
action.setVisible(False)
self._toolActions[action] = target
action.triggered.connect(self.toolActionTriggered)
return action
def _setupToolActions(self):
try:
from ..fluorescence.mcaanalysiswindow import McaAnalysisWindow
self.menuTools.addAction(
self._createToolAction("Analyze MCA", McaAnalysisWindow)
)
except ImportError:
pass
self.menuExport.addAction(
self._createToolAction("Raw data", ExportRawCSV)
)
self.menuExport.addAction(
self._createToolAction("Corrected data", ExportCorrectedCSV)
)
@QtCore.pyqtSignature("")
def on_actionAboutQt_triggered(self):
QtGui.qApp.aboutQt()
@QtCore.pyqtSignature("")
def on_actionAboutPraxes_triggered(self):
QtGui.QMessageBox.about(self, self.tr("About Praxes"),
self.tr("Praxes Application, version %s\n\n"
"Praxes is a user interface for controlling synchrotron "
"experiments and analyzing data.\n\n"
"Praxes depends on several programs and libraries:\n\n"
" spec: for controlling hardware and data acquisition\n"
" SpecClient: a python interface to the spec server\n"
" PyMca: a set of programs and libraries for analyzing "
"X-ray fluorescence spectra"%praxes.__version__))
@QtCore.pyqtSignature("")
def on_actionImportSpecFile_triggered(self, force=False):
f = '%s'% QtGui.QFileDialog.getOpenFileName(
self,
'Open File',
os.getcwd(),
"Spec datafiles (*.dat *.mca);;All files (*)"
)
if f:
while 1:
h5_filename = str(
QtGui.QFileDialog.getSaveFileName(
self,
'Save HDF5 File',
os.path.join(os.getcwd(), f+'.h5'),
'HDF5 files (*.h5 *.hdf5 *.hdf *.nxs)'
)
)
if h5_filename and os.path.isfile(h5_filename):
res = QtGui.QMessageBox.question(
self,
'overwrite?',
'Do you want to overwrite the existing file?',
QtGui.QMessageBox.Yes,
QtGui.QMessageBox.No
)
if res == QtGui.QMessageBox.Yes:
os.remove(h5_filename)
else:
continue
break
if h5_filename:
self.statusBar.showMessage('Converting spec data...')
#QtGui.qApp.processEvents()
from praxes.io.phynx.migration.spec import convert_to_phynx
h5file = convert_to_phynx(
f, h5_filename=h5_filename, force=True, report=True
)
h5file.close()
self.statusBar.clearMessage()
self.openFile(h5_filename)
@QtCore.pyqtSignature("")
def on_menuTools_aboutToShow(self):
index = self.fileView.currentIndex()
self._currentItem = self.fileModel.getNodeFromIndex(index)
if self._currentItem is not None:
for action, tool in self._toolActions.items():
action.setVisible(tool.offersService(self._currentItem))
@QtCore.pyqtSignature("")
def on_actionOffline_triggered(self):
|
@QtCore.pyqtSignature("")
def on_actionOpen_triggered(self):
self.openFile()
@QtCore.pyqtSignature("bool")
def on_actionSpec_toggled(self, bool):
self.connectToSpec(bool)
def connectToSpec(self, bool):
if bool:
from praxes.instrumentation.spec.specinterface import ConnectionAborted
try:
from praxes.instrumentation.spec.specinterface import SpecInterface
self.expInterface = SpecInterface(self)
except ConnectionAborted:
return
if self.expInterface:
self.actionConfigure.setEnabled(True)
for key, (item, area, action) in \
self.expInterface.dockWidgets.items():
self.menuView.addAction(action)
self.addDockWidget(area, item)
else:
self.actionOffline.setChecked(True)
else:
if self.expInterface:
self.actionConfigure.setEnabled(False)
for key, (item, area, action) in \
self.expInterface.dockWidgets.items():
self.removeDockWidget(item)
self.menuView.removeAction(action)
self.expInterface.close()
self.expInterface = None
def closeEvent(self, event):
for view in praxes.application.openViews:
view.close()
if praxes.application.openViews:
event.ignore()
return
self.connectToSpec(False)
settings = QtCore.QSettings()
settings.beginGroup("MainWindow")
settings.setValue('Geometry', QtCore.QVariant(self.saveGeometry()))
settings.setValue('State', QtCore.QVariant(self.saveState()))
#self.fileModel.close()
return event.accept()
def getH5FileFromKey(self, key):
h5File = self._specFileRegistry.get(key, None)
if not h5File:
default = key + '.h5'
h5File = self.saveFile(default)
if h5File:
self._specFileRegistry[key] = h5File
return h5File
## TODO: The following two methods needs to be generalized
## given a scan, offer analyses options
def getScanView(self, scan):
# this is a shortcut for now, in the future the view would be
# an overview of the entry with ability to open different analyses
if isinstance(scan, phynx.registry['Entry']):
from ..fluorescence.mcaanalysiswindow import McaAnalysisWindow
if len(scan['measurement'].mcas) > 0:
return McaAnalysisWindow(scan, self)
else:
msg = QtGui.QErrorMessage(self)
msg.showMessage(
'The entry you selected has no MCA data to process'
)
def newScanWindow(self, scan):
self.statusBar.showMessage('Configuring New Analysis Window ...')
scanView = self.getScanView(scan)
if scanView is None:
self.statusBar.clearMessage()
return
scanView.show()
self.statusBar.clearMessage()
return scanView
def openFile(self, filename=None):
if filename is None:
filename = QtGui.QFileDialog.getOpenFileName(
self,
'Open File',
os.getcwd(),
"hdf5 files (*.h5 *.hdf5 *.hdf *.nxs)"
)
if filename:
self.fileModel.openFile(str(filename))
def saveFile(self, filename=None):
if os.path.isfile(filename):
return self.fileModel.openFile(filename)
else:
newfilename = QtGui.QFileDialog.getSaveFileName(
self,
"Save File",
os.path.join(os.getcwd(), filename),
"hdf5 files (*.h5 *.hdf5 *.hdf *.nxs)"
)
if newfilename:
newfilename = str(newfilename)
if os.path.splitext(newfilename)[-1] not in (
'.h5', '.hdf5', '.hdf', '.nxs'
):
newfilename = newfilename + '.h5'
return self.fileModel.openFile(newfilename)
def toolActionTriggered(self):
self.statusBar.showMessage('Configuring...')
action = self.sender()
if action is not None and isinstance(action, QtGui.QAction):
tool = self._toolActions[action](self._currentItem, self)
if isinstance(tool, QtGui.QWidget):
tool.show()
self.statusBar.clearMessage()
#def main():
# | if self.expInterface is None: return
if self.expInterface.name == 'spec':
self.connectToSpec(False) | identifier_body |
mainwindow.py |
from .ui import resources
from .phynx import FileModel, FileView, ExportRawCSV, ExportCorrectedCSV
from praxes.io import phynx
#logger = logging.getLogger(__file__)
class MainWindow(QtGui.QMainWindow):
"""
"""
def __init__(self, log_level=logging.CRITICAL, parent=None):
super(MainWindow, self).__init__(parent)
uic.loadUi(resources['mainwindow.ui'], self)
self.setCorner(QtCore.Qt.TopLeftCorner, QtCore.Qt.LeftDockWidgetArea)
self.setCorner(QtCore.Qt.BottomLeftCorner, QtCore.Qt.LeftDockWidgetArea)
self.setCorner(QtCore.Qt.TopRightCorner, QtCore.Qt.RightDockWidgetArea)
self.setCorner(QtCore.Qt.BottomRightCorner, QtCore.Qt.BottomDockWidgetArea)
self.setDockNestingEnabled(True)
self._specFileRegistry = {}
self.fileModel = FileModel(self)
self.fileView = FileView(self.fileModel, self)
self.setCentralWidget(self.fileView)
# TODO: will acquisition work on other platforms?
if sys.platform != 'linux2':
self.menuAcquisition.setEnabled(False)
self.expInterface = None
self.statusBar.showMessage('Ready', 2000)
self._currentItem = None
self._toolActions = {}
self._setupToolActions()
settings = QtCore.QSettings()
settings.beginGroup('MainWindow')
self.restoreGeometry(settings.value('Geometry').toByteArray())
self.restoreState(settings.value('State').toByteArray())
import praxes
# TODO: this should be a factory function, not a method of the main win:
praxes.application.registerService('ScanView', self.newScanWindow)
praxes.application.registerService('FileInterface', self)
def _createToolAction(
self, name, target, helptext=None, icon=None
):
assert hasattr(target, 'offersService')
action = QtGui.QAction(name, self)
action.setVisible(False)
self._toolActions[action] = target
action.triggered.connect(self.toolActionTriggered)
return action
def _setupToolActions(self):
try:
from ..fluorescence.mcaanalysiswindow import McaAnalysisWindow
self.menuTools.addAction(
self._createToolAction("Analyze MCA", McaAnalysisWindow)
)
except ImportError:
pass
self.menuExport.addAction(
self._createToolAction("Raw data", ExportRawCSV)
)
self.menuExport.addAction(
self._createToolAction("Corrected data", ExportCorrectedCSV)
)
@QtCore.pyqtSignature("")
def on_actionAboutQt_triggered(self):
QtGui.qApp.aboutQt()
@QtCore.pyqtSignature("")
def on_actionAboutPraxes_triggered(self):
QtGui.QMessageBox.about(self, self.tr("About Praxes"),
self.tr("Praxes Application, version %s\n\n"
"Praxes is a user interface for controlling synchrotron "
"experiments and analyzing data.\n\n"
"Praxes depends on several programs and libraries:\n\n"
" spec: for controlling hardware and data acquisition\n"
" SpecClient: a python interface to the spec server\n"
" PyMca: a set of programs and libraries for analyzing "
"X-ray fluorescence spectra"%praxes.__version__))
@QtCore.pyqtSignature("")
def on_actionImportSpecFile_triggered(self, force=False):
f = '%s'% QtGui.QFileDialog.getOpenFileName(
self,
'Open File',
os.getcwd(),
"Spec datafiles (*.dat *.mca);;All files (*)"
)
if f:
while 1: | QtGui.QFileDialog.getSaveFileName(
self,
'Save HDF5 File',
os.path.join(os.getcwd(), f+'.h5'),
'HDF5 files (*.h5 *.hdf5 *.hdf *.nxs)'
)
)
if h5_filename and os.path.isfile(h5_filename):
res = QtGui.QMessageBox.question(
self,
'overwrite?',
'Do you want to overwrite the existing file?',
QtGui.QMessageBox.Yes,
QtGui.QMessageBox.No
)
if res == QtGui.QMessageBox.Yes:
os.remove(h5_filename)
else:
continue
break
if h5_filename:
self.statusBar.showMessage('Converting spec data...')
#QtGui.qApp.processEvents()
from praxes.io.phynx.migration.spec import convert_to_phynx
h5file = convert_to_phynx(
f, h5_filename=h5_filename, force=True, report=True
)
h5file.close()
self.statusBar.clearMessage()
self.openFile(h5_filename)
@QtCore.pyqtSignature("")
def on_menuTools_aboutToShow(self):
index = self.fileView.currentIndex()
self._currentItem = self.fileModel.getNodeFromIndex(index)
if self._currentItem is not None:
for action, tool in self._toolActions.items():
action.setVisible(tool.offersService(self._currentItem))
@QtCore.pyqtSignature("")
def on_actionOffline_triggered(self):
if self.expInterface is None: return
if self.expInterface.name == 'spec':
self.connectToSpec(False)
@QtCore.pyqtSignature("")
def on_actionOpen_triggered(self):
self.openFile()
@QtCore.pyqtSignature("bool")
def on_actionSpec_toggled(self, bool):
self.connectToSpec(bool)
def connectToSpec(self, bool):
if bool:
from praxes.instrumentation.spec.specinterface import ConnectionAborted
try:
from praxes.instrumentation.spec.specinterface import SpecInterface
self.expInterface = SpecInterface(self)
except ConnectionAborted:
return
if self.expInterface:
self.actionConfigure.setEnabled(True)
for key, (item, area, action) in \
self.expInterface.dockWidgets.items():
self.menuView.addAction(action)
self.addDockWidget(area, item)
else:
self.actionOffline.setChecked(True)
else:
if self.expInterface:
self.actionConfigure.setEnabled(False)
for key, (item, area, action) in \
self.expInterface.dockWidgets.items():
self.removeDockWidget(item)
self.menuView.removeAction(action)
self.expInterface.close()
self.expInterface = None
def closeEvent(self, event):
for view in praxes.application.openViews:
view.close()
if praxes.application.openViews:
event.ignore()
return
self.connectToSpec(False)
settings = QtCore.QSettings()
settings.beginGroup("MainWindow")
settings.setValue('Geometry', QtCore.QVariant(self.saveGeometry()))
settings.setValue('State', QtCore.QVariant(self.saveState()))
#self.fileModel.close()
return event.accept()
def getH5FileFromKey(self, key):
h5File = self._specFileRegistry.get(key, None)
if not h5File:
default = key + '.h5'
h5File = self.saveFile(default)
if h5File:
self._specFileRegistry[key] = h5File
return h5File
## TODO: The following two methods needs to be generalized
## given a scan, offer analyses options
def getScanView(self, scan):
# this is a shortcut for now, in the future the view would be
# an overview of the entry with ability to open different analyses
if isinstance(scan, phynx.registry['Entry']):
from ..fluorescence.mcaanalysiswindow import McaAnalysisWindow
if len(scan['measurement'].mcas) > 0:
return McaAnalysisWindow(scan, self)
else:
msg = QtGui.QErrorMessage(self)
msg.showMessage(
'The entry you selected has no MCA data to process'
)
def newScanWindow(self, scan):
self.statusBar.showMessage('Configuring New Analysis Window ...')
scanView = self.getScanView(scan)
if scanView is None:
self.statusBar.clearMessage()
return
scanView.show()
self.statusBar.clearMessage()
return scanView
def openFile(self, filename=None):
if filename is None:
filename = QtGui.QFileDialog.getOpenFileName(
self,
'Open File',
os.getcwd(),
"hdf5 files (*.h5 *.hdf5 *.hdf *.nxs)"
)
if filename:
self.fileModel.openFile(str(filename))
def saveFile(self, filename=None):
if os.path.isfile(filename):
return self.fileModel.openFile(filename)
else:
newfilename = QtGui.QFileDialog.getSaveFileName(
self,
"Save File",
os.path.join(os.getcwd(), filename),
"hdf5 files (*.h5 *.hdf5 *.hdf *.nxs)"
)
if newfilename:
newfilename = str(newfilename)
if os.path.splitext(newfilename)[-1] not in (
'.h5', '.hdf5', '.hdf', '.nxs'
):
newfilename = newfilename + '.h5'
return self.fileModel.openFile(newfilename)
def toolActionTriggered(self):
self.statusBar.showMessage('Configuring...')
action = self.sender()
if action is not None and isinstance(action, QtGui.QAction):
tool = self._toolActions[action](self._currentItem, self)
if isinstance(tool, QtGui.QWidget):
tool.show()
self.statusBar.clearMessage()
#def main():
# import | h5_filename = str( | random_line_split |
lib.rs | ::{error, info, warn};
use rocksdb::{checkpoint::Checkpoint, DB};
use tokio::time::{sleep, Duration};
use std::collections::HashSet;
use std::net::ToSocketAddrs;
use std::path::{Path, PathBuf};
use std::sync::Arc;
const KEEP_NUM: u64 = 100;
const PRUNE_INTERVAL: u64 = 1000;
const GENESIS_NUMBER: u64 = 0;
// Adapted from https://github.com/nervosnetwork/ckb-indexer/blob/290ae55a2d2acfc3d466a69675a1a58fcade7f5d/src/service.rs#L25
// with extensions for more indexing features.
pub struct Service {
store: RocksdbStore,
ckb_client: CkbRpcClient,
poll_interval: Duration,
listen_address: String,
rpc_thread_num: usize,
network_type: NetworkType,
extensions_config: ExtensionsConfig,
snapshot_interval: u64,
snapshot_path: PathBuf,
cellbase_maturity: RationalU256,
cheque_since: U256,
}
impl Service {
pub fn new(
store_path: &str,
listen_address: &str,
poll_interval: Duration,
rpc_thread_num: usize,
network_ty: &str,
extensions_config: ExtensionsConfig,
snapshot_interval: u64,
snapshot_path: &str,
cellbase_maturity: u64,
ckb_uri: String,
cheque_since: u64,
) -> Self {
let store = RocksdbStore::new(store_path);
let ckb_client = CkbRpcClient::new(ckb_uri);
let network_type = NetworkType::from_raw_str(network_ty).expect("invalid network type");
let listen_address = listen_address.to_string();
let snapshot_path = Path::new(snapshot_path).to_path_buf();
let cellbase_maturity = RationalU256::from_u256(U256::from(cellbase_maturity));
let cheque_since: U256 = cheque_since.into();
info!("Mercury running in CKB {:?}", network_type);
Service {
store,
ckb_client,
poll_interval,
listen_address,
rpc_thread_num,
network_type,
extensions_config,
snapshot_interval,
snapshot_path,
cellbase_maturity,
cheque_since,
}
}
pub fn init(&self) -> Server {
let mut io_handler = IoHandler::new();
let mercury_rpc_impl = MercuryRpcImpl::new(
self.store.clone(),
self.network_type,
self.ckb_client.clone(),
self.cheque_since.clone(),
self.extensions_config.to_rpc_config(),
);
let indexer_rpc_impl = IndexerRpcImpl {
version: "0.2.1".to_string(),
store: self.store.clone(),
};
io_handler.extend_with(indexer_rpc_impl.to_delegate());
io_handler.extend_with(mercury_rpc_impl.to_delegate());
info!("Running!");
ServerBuilder::new(io_handler)
.cors(DomainsValidation::AllowOnly(vec![
AccessControlAllowOrigin::Null,
AccessControlAllowOrigin::Any,
]))
.threads(self.rpc_thread_num)
.health_api(("/ping", "ping"))
.start_http(
&self
.listen_address
.to_socket_addrs()
.expect("config listen_address parsed")
.next()
.expect("listen_address parsed"),
)
.expect("Start Jsonrpc HTTP service")
}
#[allow(clippy::cmp_owned)]
pub async fn start(&self) {
// 0.37.0 and above supports hex format
let use_hex_format = loop {
match self.ckb_client.local_node_info().await {
Ok(local_node_info) => {
break local_node_info.version > "0.36".to_owned();
}
Err(err) => {
// < 0.32.0 compatibility
if format!("#{}", err).contains("missing field") {
break false;
}
error!("cannot get local_node_info from ckb node: {}", err);
std::thread::sleep(self.poll_interval);
}
}
};
USE_HEX_FORMAT.swap(Arc::new(use_hex_format));
let use_hex = use_hex_format;
let client_clone = self.ckb_client.clone();
tokio::spawn(async move {
update_tx_pool_cache(client_clone, use_hex).await;
});
self.run(use_hex_format).await;
}
async fn run(&self, use_hex_format: bool) {
let mut tip = 0;
loop {
let batch_store =
BatchStore::create(self.store.clone()).expect("batch store creation should be OK");
let indexer = Arc::new(Indexer::new(batch_store.clone(), KEEP_NUM, u64::MAX));
let extensions = build_extensions(
self.network_type,
&self.extensions_config,
Arc::clone(&indexer),
batch_store.clone(),
)
.expect("extension building failure");
let append_block_func = |block: BlockView| {
extensions.iter().for_each(|extension| {
extension
.append(&block)
.unwrap_or_else(|e| panic!("append block error {:?}", e))
});
indexer.append(&block).expect("append block should be OK");
};
// TODO: load tip first so extensions do not need to store their
// own tip?
let rollback_func = |tip_number: BlockNumber, tip_hash: packed::Byte32| {
indexer.rollback().expect("rollback block should be OK");
extensions.iter().for_each(|extension| {
extension
.rollback(tip_number, &tip_hash)
.unwrap_or_else(|e| panic!("rollback error {:?}", e))
});
}; | match self
.get_block_by_number(tip_number + 1, use_hex_format)
.await
{
Ok(Some(block)) => {
self.change_current_epoch(block.epoch().to_rational());
if block.parent_hash() == tip_hash {
info!("append {}, {}", block.number(), block.hash());
append_block_func(block.clone());
prune = (block.number() % PRUNE_INTERVAL) == 0;
} else {
info!("rollback {}, {}", tip_number, tip_hash);
rollback_func(tip_number, tip_hash);
}
}
Ok(None) => {
sleep(self.poll_interval).await;
}
Err(err) => {
error!("cannot get block from ckb node, error: {}", err);
sleep(self.poll_interval).await;
}
}
} else {
match self
.get_block_by_number(GENESIS_NUMBER, use_hex_format)
.await
{
Ok(Some(block)) => {
self.change_current_epoch(block.epoch().to_rational());
append_block_func(block);
}
Ok(None) => {
error!("ckb node returns an empty genesis block");
sleep(self.poll_interval).await;
}
Err(err) => {
error!("cannot get genesis block from ckb node, error: {}", err);
sleep(self.poll_interval).await;
}
}
}
batch_store.commit().expect("commit should be OK");
let _ = *CURRENT_BLOCK_NUMBER.swap(Arc::new(tip));
if prune {
let store = BatchStore::create(self.store.clone())
.expect("batch store creation should be OK");
let indexer = Arc::new(Indexer::new(store.clone(), KEEP_NUM, PRUNE_INTERVAL));
let extensions = build_extensions(
self.network_type,
&self.extensions_config,
Arc::clone(&indexer),
store.clone(),
)
.expect("extension building failure");
if let Some((tip_number, tip_hash)) = indexer.tip().expect("get tip should be OK") {
indexer.prune().expect("indexer prune should be OK");
for extension in extensions.iter() {
extension
.prune(tip_number, &tip_hash, KEEP_NUM)
.expect("extension prune should be OK");
}
}
store.commit().expect("commit should be OK");
}
self.snapshot(tip);
}
}
async fn get_block_by_number(
&self,
block_number: BlockNumber,
use_hex_format: bool,
) -> Result<Option<BlockView>> {
self.ckb_client
.get_block_by_number(block_number, use_hex_format)
.await
.map(|res| res.map(Into::into))
}
fn snapshot(&self, height: u64) {
if height % self.snapshot_interval != 0 {
return;
}
let mut path = self.snapshot_path.clone();
path.push(height.to_string());
let store = self.store.clone();
tokio::spawn(async move {
if let Err(e) = create_checkpoint(store.inner(), path) {
error!("build {} checkpoint failed: {:?}", height, e);
}
});
}
fn change_current_epoch(&self, current_epoch: RationalU256) {
self.change_maturity_threshold(current_epoch.clone());
let mut epoch = CURRENT_EPOCH.write();
*epoch = current_epoch;
}
fn change_m |
let mut prune = false;
if let Some((tip_number, tip_hash)) = indexer.tip().expect("get tip should be OK") {
tip = tip_number;
| random_line_split |
lib.rs | error, info, warn};
use rocksdb::{checkpoint::Checkpoint, DB};
use tokio::time::{sleep, Duration};
use std::collections::HashSet;
use std::net::ToSocketAddrs;
use std::path::{Path, PathBuf};
use std::sync::Arc;
const KEEP_NUM: u64 = 100;
const PRUNE_INTERVAL: u64 = 1000;
const GENESIS_NUMBER: u64 = 0;
// Adapted from https://github.com/nervosnetwork/ckb-indexer/blob/290ae55a2d2acfc3d466a69675a1a58fcade7f5d/src/service.rs#L25
// with extensions for more indexing features.
pub struct Service {
store: RocksdbStore,
ckb_client: CkbRpcClient,
poll_interval: Duration,
listen_address: String,
rpc_thread_num: usize,
network_type: NetworkType,
extensions_config: ExtensionsConfig,
snapshot_interval: u64,
snapshot_path: PathBuf,
cellbase_maturity: RationalU256,
cheque_since: U256,
}
impl Service {
pub fn new(
store_path: &str,
listen_address: &str,
poll_interval: Duration,
rpc_thread_num: usize,
network_ty: &str,
extensions_config: ExtensionsConfig,
snapshot_interval: u64,
snapshot_path: &str,
cellbase_maturity: u64,
ckb_uri: String,
cheque_since: u64,
) -> Self {
let store = RocksdbStore::new(store_path);
let ckb_client = CkbRpcClient::new(ckb_uri);
let network_type = NetworkType::from_raw_str(network_ty).expect("invalid network type");
let listen_address = listen_address.to_string();
let snapshot_path = Path::new(snapshot_path).to_path_buf();
let cellbase_maturity = RationalU256::from_u256(U256::from(cellbase_maturity));
let cheque_since: U256 = cheque_since.into();
info!("Mercury running in CKB {:?}", network_type);
Service {
store,
ckb_client,
poll_interval,
listen_address,
rpc_thread_num,
network_type,
extensions_config,
snapshot_interval,
snapshot_path,
cellbase_maturity,
cheque_since,
}
}
pub fn init(&self) -> Server {
let mut io_handler = IoHandler::new();
let mercury_rpc_impl = MercuryRpcImpl::new(
self.store.clone(),
self.network_type,
self.ckb_client.clone(),
self.cheque_since.clone(),
self.extensions_config.to_rpc_config(),
);
let indexer_rpc_impl = IndexerRpcImpl {
version: "0.2.1".to_string(),
store: self.store.clone(),
};
io_handler.extend_with(indexer_rpc_impl.to_delegate());
io_handler.extend_with(mercury_rpc_impl.to_delegate());
info!("Running!");
ServerBuilder::new(io_handler)
.cors(DomainsValidation::AllowOnly(vec![
AccessControlAllowOrigin::Null,
AccessControlAllowOrigin::Any,
]))
.threads(self.rpc_thread_num)
.health_api(("/ping", "ping"))
.start_http(
&self
.listen_address
.to_socket_addrs()
.expect("config listen_address parsed")
.next()
.expect("listen_address parsed"),
)
.expect("Start Jsonrpc HTTP service")
}
#[allow(clippy::cmp_owned)]
pub async fn start(&self) {
// 0.37.0 and above supports hex format
let use_hex_format = loop {
match self.ckb_client.local_node_info().await {
Ok(local_node_info) => {
break local_node_info.version > "0.36".to_owned();
}
Err(err) => {
// < 0.32.0 compatibility
if format!("#{}", err).contains("missing field") {
break false;
}
error!("cannot get local_node_info from ckb node: {}", err);
std::thread::sleep(self.poll_interval);
}
}
};
USE_HEX_FORMAT.swap(Arc::new(use_hex_format));
let use_hex = use_hex_format;
let client_clone = self.ckb_client.clone();
tokio::spawn(async move {
update_tx_pool_cache(client_clone, use_hex).await;
});
self.run(use_hex_format).await;
}
async fn run(&self, use_hex_format: bool) {
let mut tip = 0;
loop {
let batch_store =
BatchStore::create(self.store.clone()).expect("batch store creation should be OK");
let indexer = Arc::new(Indexer::new(batch_store.clone(), KEEP_NUM, u64::MAX));
let extensions = build_extensions(
self.network_type,
&self.extensions_config,
Arc::clone(&indexer),
batch_store.clone(),
)
.expect("extension building failure");
let append_block_func = |block: BlockView| {
extensions.iter().for_each(|extension| {
extension
.append(&block)
.unwrap_or_else(|e| panic!("append block error {:?}", e))
});
indexer.append(&block).expect("append block should be OK");
};
// TODO: load tip first so extensions do not need to store their
// own tip?
let rollback_func = |tip_number: BlockNumber, tip_hash: packed::Byte32| {
indexer.rollback().expect("rollback block should be OK");
extensions.iter().for_each(|extension| {
extension
.rollback(tip_number, &tip_hash)
.unwrap_or_else(|e| panic!("rollback error {:?}", e))
});
};
let mut prune = false;
if let Some((tip_number, tip_hash)) = indexer.tip().expect("get tip should be OK") {
tip = tip_number;
match self
.get_block_by_number(tip_number + 1, use_hex_format)
.await
{
Ok(Some(block)) => {
self.change_current_epoch(block.epoch().to_rational());
if block.parent_hash() == tip_hash {
info!("append {}, {}", block.number(), block.hash());
append_block_func(block.clone());
prune = (block.number() % PRUNE_INTERVAL) == 0;
} else {
info!("rollback {}, {}", tip_number, tip_hash);
rollback_func(tip_number, tip_hash);
}
}
Ok(None) => {
sleep(self.poll_interval).await;
}
Err(err) => {
error!("cannot get block from ckb node, error: {}", err);
sleep(self.poll_interval).await;
}
}
} else {
match self
.get_block_by_number(GENESIS_NUMBER, use_hex_format)
.await
{
Ok(Some(block)) => {
self.change_current_epoch(block.epoch().to_rational());
append_block_func(block);
}
Ok(None) => {
error!("ckb node returns an empty genesis block");
sleep(self.poll_interval).await;
}
Err(err) => {
error!("cannot get genesis block from ckb node, error: {}", err);
sleep(self.poll_interval).await;
}
}
}
batch_store.commit().expect("commit should be OK");
let _ = *CURRENT_BLOCK_NUMBER.swap(Arc::new(tip));
if prune {
let store = BatchStore::create(self.store.clone())
.expect("batch store creation should be OK");
let indexer = Arc::new(Indexer::new(store.clone(), KEEP_NUM, PRUNE_INTERVAL));
let extensions = build_extensions(
self.network_type,
&self.extensions_config,
Arc::clone(&indexer),
store.clone(),
)
.expect("extension building failure");
if let Some((tip_number, tip_hash)) = indexer.tip().expect("get tip should be OK") {
indexer.prune().expect("indexer prune should be OK");
for extension in extensions.iter() {
extension
.prune(tip_number, &tip_hash, KEEP_NUM)
.expect("extension prune should be OK");
}
}
store.commit().expect("commit should be OK");
}
self.snapshot(tip);
}
}
async fn get_block_by_number(
&self,
block_number: BlockNumber,
use_hex_format: bool,
) -> Result<Option<BlockView>> {
self.ckb_client
.get_block_by_number(block_number, use_hex_format)
.await
.map(|res| res.map(Into::into))
}
fn snapshot(&self, height: u64) |
fn change_current_epoch(&self, current_epoch: RationalU256) {
self.change_maturity_threshold(current_epoch.clone());
let mut epoch = CURRENT_EPOCH.write();
*epoch = current_epoch;
}
fn change_m | {
if height % self.snapshot_interval != 0 {
return;
}
let mut path = self.snapshot_path.clone();
path.push(height.to_string());
let store = self.store.clone();
tokio::spawn(async move {
if let Err(e) = create_checkpoint(store.inner(), path) {
error!("build {} checkpoint failed: {:?}", height, e);
}
});
} | identifier_body |
lib.rs | listen_address: String,
rpc_thread_num: usize,
network_type: NetworkType,
extensions_config: ExtensionsConfig,
snapshot_interval: u64,
snapshot_path: PathBuf,
cellbase_maturity: RationalU256,
cheque_since: U256,
}
impl Service {
pub fn new(
store_path: &str,
listen_address: &str,
poll_interval: Duration,
rpc_thread_num: usize,
network_ty: &str,
extensions_config: ExtensionsConfig,
snapshot_interval: u64,
snapshot_path: &str,
cellbase_maturity: u64,
ckb_uri: String,
cheque_since: u64,
) -> Self {
let store = RocksdbStore::new(store_path);
let ckb_client = CkbRpcClient::new(ckb_uri);
let network_type = NetworkType::from_raw_str(network_ty).expect("invalid network type");
let listen_address = listen_address.to_string();
let snapshot_path = Path::new(snapshot_path).to_path_buf();
let cellbase_maturity = RationalU256::from_u256(U256::from(cellbase_maturity));
let cheque_since: U256 = cheque_since.into();
info!("Mercury running in CKB {:?}", network_type);
Service {
store,
ckb_client,
poll_interval,
listen_address,
rpc_thread_num,
network_type,
extensions_config,
snapshot_interval,
snapshot_path,
cellbase_maturity,
cheque_since,
}
}
pub fn init(&self) -> Server {
let mut io_handler = IoHandler::new();
let mercury_rpc_impl = MercuryRpcImpl::new(
self.store.clone(),
self.network_type,
self.ckb_client.clone(),
self.cheque_since.clone(),
self.extensions_config.to_rpc_config(),
);
let indexer_rpc_impl = IndexerRpcImpl {
version: "0.2.1".to_string(),
store: self.store.clone(),
};
io_handler.extend_with(indexer_rpc_impl.to_delegate());
io_handler.extend_with(mercury_rpc_impl.to_delegate());
info!("Running!");
ServerBuilder::new(io_handler)
.cors(DomainsValidation::AllowOnly(vec![
AccessControlAllowOrigin::Null,
AccessControlAllowOrigin::Any,
]))
.threads(self.rpc_thread_num)
.health_api(("/ping", "ping"))
.start_http(
&self
.listen_address
.to_socket_addrs()
.expect("config listen_address parsed")
.next()
.expect("listen_address parsed"),
)
.expect("Start Jsonrpc HTTP service")
}
#[allow(clippy::cmp_owned)]
pub async fn start(&self) {
// 0.37.0 and above supports hex format
let use_hex_format = loop {
match self.ckb_client.local_node_info().await {
Ok(local_node_info) => {
break local_node_info.version > "0.36".to_owned();
}
Err(err) => {
// < 0.32.0 compatibility
if format!("#{}", err).contains("missing field") {
break false;
}
error!("cannot get local_node_info from ckb node: {}", err);
std::thread::sleep(self.poll_interval);
}
}
};
USE_HEX_FORMAT.swap(Arc::new(use_hex_format));
let use_hex = use_hex_format;
let client_clone = self.ckb_client.clone();
tokio::spawn(async move {
update_tx_pool_cache(client_clone, use_hex).await;
});
self.run(use_hex_format).await;
}
async fn run(&self, use_hex_format: bool) {
let mut tip = 0;
loop {
let batch_store =
BatchStore::create(self.store.clone()).expect("batch store creation should be OK");
let indexer = Arc::new(Indexer::new(batch_store.clone(), KEEP_NUM, u64::MAX));
let extensions = build_extensions(
self.network_type,
&self.extensions_config,
Arc::clone(&indexer),
batch_store.clone(),
)
.expect("extension building failure");
let append_block_func = |block: BlockView| {
extensions.iter().for_each(|extension| {
extension
.append(&block)
.unwrap_or_else(|e| panic!("append block error {:?}", e))
});
indexer.append(&block).expect("append block should be OK");
};
// TODO: load tip first so extensions do not need to store their
// own tip?
let rollback_func = |tip_number: BlockNumber, tip_hash: packed::Byte32| {
indexer.rollback().expect("rollback block should be OK");
extensions.iter().for_each(|extension| {
extension
.rollback(tip_number, &tip_hash)
.unwrap_or_else(|e| panic!("rollback error {:?}", e))
});
};
let mut prune = false;
if let Some((tip_number, tip_hash)) = indexer.tip().expect("get tip should be OK") {
tip = tip_number;
match self
.get_block_by_number(tip_number + 1, use_hex_format)
.await
{
Ok(Some(block)) => {
self.change_current_epoch(block.epoch().to_rational());
if block.parent_hash() == tip_hash {
info!("append {}, {}", block.number(), block.hash());
append_block_func(block.clone());
prune = (block.number() % PRUNE_INTERVAL) == 0;
} else {
info!("rollback {}, {}", tip_number, tip_hash);
rollback_func(tip_number, tip_hash);
}
}
Ok(None) => {
sleep(self.poll_interval).await;
}
Err(err) => {
error!("cannot get block from ckb node, error: {}", err);
sleep(self.poll_interval).await;
}
}
} else {
match self
.get_block_by_number(GENESIS_NUMBER, use_hex_format)
.await
{
Ok(Some(block)) => {
self.change_current_epoch(block.epoch().to_rational());
append_block_func(block);
}
Ok(None) => {
error!("ckb node returns an empty genesis block");
sleep(self.poll_interval).await;
}
Err(err) => {
error!("cannot get genesis block from ckb node, error: {}", err);
sleep(self.poll_interval).await;
}
}
}
batch_store.commit().expect("commit should be OK");
let _ = *CURRENT_BLOCK_NUMBER.swap(Arc::new(tip));
if prune {
let store = BatchStore::create(self.store.clone())
.expect("batch store creation should be OK");
let indexer = Arc::new(Indexer::new(store.clone(), KEEP_NUM, PRUNE_INTERVAL));
let extensions = build_extensions(
self.network_type,
&self.extensions_config,
Arc::clone(&indexer),
store.clone(),
)
.expect("extension building failure");
if let Some((tip_number, tip_hash)) = indexer.tip().expect("get tip should be OK") {
indexer.prune().expect("indexer prune should be OK");
for extension in extensions.iter() {
extension
.prune(tip_number, &tip_hash, KEEP_NUM)
.expect("extension prune should be OK");
}
}
store.commit().expect("commit should be OK");
}
self.snapshot(tip);
}
}
async fn get_block_by_number(
&self,
block_number: BlockNumber,
use_hex_format: bool,
) -> Result<Option<BlockView>> {
self.ckb_client
.get_block_by_number(block_number, use_hex_format)
.await
.map(|res| res.map(Into::into))
}
fn snapshot(&self, height: u64) {
if height % self.snapshot_interval != 0 {
return;
}
let mut path = self.snapshot_path.clone();
path.push(height.to_string());
let store = self.store.clone();
tokio::spawn(async move {
if let Err(e) = create_checkpoint(store.inner(), path) {
error!("build {} checkpoint failed: {:?}", height, e);
}
});
}
fn change_current_epoch(&self, current_epoch: RationalU256) {
self.change_maturity_threshold(current_epoch.clone());
let mut epoch = CURRENT_EPOCH.write();
*epoch = current_epoch;
}
fn change_maturity_threshold(&self, current_epoch: RationalU256) {
if current_epoch < self.cellbase_maturity {
return;
}
let new = current_epoch - self.cellbase_maturity.clone();
let mut threshold = MATURE_THRESHOLD.write();
*threshold = new;
}
}
fn create_checkpoint(db: &DB, path: PathBuf) -> Result<()> {
Checkpoint::new(db)?.create_checkpoint(path)?;
Ok(())
}
async fn update_tx_pool_cache(ckb_client: CkbRpcClient, use_hex_format: bool) {
loop {
match ckb_client.get_raw_tx_pool(Some(use_hex_format)).await {
Ok(raw_pool) => handle_raw_tx_pool(&ckb_client, raw_pool).await,
Err(e) => error!("get raw tx pool error {:?}", e),
}
sleep(Duration::from_millis(350)).await;
}
}
async fn | handle_raw_tx_pool | identifier_name |
|
raw.go | for i, mo := range m.bufferedChunk.Values {
if ascending && mo.Time > timeBoundary {
ind = i
break
} else if !ascending && mo.Time < timeBoundary {
ind = i
break
}
}
// Add up to the index to the values
if chunkedOutput == nil {
chunkedOutput = &MapperOutput{
Name: m.bufferedChunk.Name,
Tags: m.bufferedChunk.Tags,
CursorKey: m.bufferedChunk.key(),
}
chunkedOutput.Values = m.bufferedChunk.Values[:ind]
} else {
chunkedOutput.Values = append(chunkedOutput.Values, m.bufferedChunk.Values[:ind]...)
}
// Clear out the values being sent out, keep the remainder.
m.bufferedChunk.Values = m.bufferedChunk.Values[ind:]
// If we emptied out all the values, clear the mapper's buffered chunk.
if len(m.bufferedChunk.Values) == 0 {
m.bufferedChunk = nil
}
}
// Protect against none of the mappers producing any output.
if chunkedOutput == nil {
continue
}
if ascending {
// Sort the values by time first so we can then handle offset and limit
sort.Sort(MapperValues(chunkedOutput.Values))
} else {
sort.Sort(sort.Reverse(MapperValues(chunkedOutput.Values)))
}
// Now that we have full name and tag details, initialize the rowWriter.
// The Name and Tags will be the same for all mappers.
if rowWriter == nil {
rowWriter = &limitedRowWriter{
limit: e.stmt.Limit,
offset: e.stmt.Offset,
chunkSize: e.chunkSize,
name: chunkedOutput.Name,
tags: chunkedOutput.Tags,
selectNames: selectFields,
aliasNames: aliasFields,
fields: e.stmt.Fields,
c: out,
}
}
if e.stmt.HasDerivative() {
interval, err := derivativeInterval(e.stmt)
if err != nil {
out <- &models.Row{Err: err}
return
}
rowWriter.transformer = &RawQueryDerivativeProcessor{
IsNonNegative: e.stmt.FunctionCalls()[0].Name == "non_negative_derivative",
DerivativeInterval: interval,
}
}
// Emit the data via the limiter.
if limited := rowWriter.Add(chunkedOutput.Values); limited {
// Limit for this tagset was reached, mark it and start draining a new tagset.
e.limitTagSet(chunkedOutput.key())
continue
}
// Check to see if our client disconnected, or it has been to long since
// we were asked for data...
select {
case <-closing:
out <- &models.Row{Err: fmt.Errorf("execute was closed by caller")}
break
default:
// do nothing
}
}
close(out)
}
// mappersDrained returns whether all the executors Mappers have been drained of data.
func (e *RawExecutor) mappersDrained() bool {
for _, m := range e.mappers {
if !m.drained {
return false
}
}
return true
}
// nextMapperTagset returns the alphabetically lowest tagset across all Mappers.
func (e *RawExecutor) nextMapperTagSet() string {
tagset := ""
for _, m := range e.mappers {
if m.bufferedChunk != nil {
if tagset == "" {
tagset = m.bufferedChunk.key()
} else if m.bufferedChunk.key() < tagset {
tagset = m.bufferedChunk.key()
}
}
}
return tagset
}
// nextMapperLowestTime returns the lowest minimum time across all Mappers, for the given tagset.
func (e *RawExecutor) nextMapperLowestTime(tagset string) int64 {
minTime := int64(math.MaxInt64)
for _, m := range e.mappers {
if !m.drained && m.bufferedChunk != nil {
if m.bufferedChunk.key() != tagset {
continue
}
t := m.bufferedChunk.Values[len(m.bufferedChunk.Values)-1].Time
if t < minTime {
minTime = t
}
}
}
return minTime
}
// nextMapperHighestTime returns the highest time across all Mappers, for the given tagset.
func (e *RawExecutor) nextMapperHighestTime(tagset string) int64 {
maxTime := int64(math.MinInt64)
for _, m := range e.mappers {
if !m.drained && m.bufferedChunk != nil {
if m.bufferedChunk.key() != tagset {
continue
}
t := m.bufferedChunk.Values[0].Time
if t > maxTime {
maxTime = t
}
}
}
return maxTime
}
// tagSetIsLimited returns whether data for the given tagset has been LIMITed.
func (e *RawExecutor) tagSetIsLimited(tagset string) bool {
_, ok := e.limitedTagSets[tagset]
return ok
}
// limitTagSet marks the given taset as LIMITed.
func (e *RawExecutor) limitTagSet(tagset string) {
e.limitedTagSets[tagset] = struct{}{}
}
// limitedRowWriter accepts raw mapper values, and will emit those values as rows in chunks
// of the given size. If the chunk size is 0, no chunking will be performed. In addition if
// limit is reached, outstanding values will be emitted. If limit is zero, no limit is enforced.
type limitedRowWriter struct {
chunkSize int
limit int
offset int
name string
tags map[string]string
fields influxql.Fields
selectNames []string
aliasNames []string
c chan *models.Row
currValues []*MapperValue
totalOffSet int
totalSent int
transformer interface {
Process(input []*MapperValue) []*MapperValue
}
}
// Add accepts a slice of values, and will emit those values as per chunking requirements.
// If limited is returned as true, the limit was also reached and no more values should be
// added. In that case only up the limit of values are emitted.
func (r *limitedRowWriter) Add(values []*MapperValue) (limited bool) {
if r.currValues == nil {
r.currValues = make([]*MapperValue, 0, r.chunkSize)
}
// Enforce offset.
if r.totalOffSet < r.offset {
// Still some offsetting to do.
offsetRequired := r.offset - r.totalOffSet
if offsetRequired >= len(values) {
r.totalOffSet += len(values)
return false
} else {
// Drop leading values and keep going.
values = values[offsetRequired:]
r.totalOffSet += offsetRequired
}
}
r.currValues = append(r.currValues, values...)
// Check limit.
limitReached := r.limit > 0 && r.totalSent+len(r.currValues) >= r.limit
if limitReached {
// Limit will be satified with current values. Truncate 'em.
r.currValues = r.currValues[:r.limit-r.totalSent]
}
// Is chunking in effect?
if r.chunkSize != IgnoredChunkSize {
// Chunking level reached?
for len(r.currValues) >= r.chunkSize {
index := len(r.currValues) - (len(r.currValues) - r.chunkSize)
r.c <- r.processValues(r.currValues[:index])
r.currValues = r.currValues[index:]
}
// After values have been sent out by chunking, there may still be some
// values left, if the remainder is less than the chunk size. But if the
// limit has been reached, kick them out.
if len(r.currValues) > 0 && limitReached {
r.c <- r.processValues(r.currValues)
r.currValues = nil
}
} else if limitReached {
// No chunking in effect, but the limit has been reached.
r.c <- r.processValues(r.currValues)
r.currValues = nil
}
return limitReached
}
// Flush instructs the limitedRowWriter to emit any pending values as a single row,
// adhering to any | {
if m.drained {
continue
}
chunkBoundary := false
if ascending {
chunkBoundary = m.bufferedChunk.Values[0].Time > timeBoundary
} else {
chunkBoundary = m.bufferedChunk.Values[0].Time < timeBoundary
}
// This mapper's next chunk is not for the next tagset, or the very first value of
// the chunk is at a higher acceptable timestamp. Skip it.
if m.bufferedChunk.key() != tagset || chunkBoundary {
continue
}
// Find the index of the point up to the min.
ind := len(m.bufferedChunk.Values) | conditional_block |
|
raw.go | // Add up to the index to the values
if chunkedOutput == nil {
chunkedOutput = &MapperOutput{ | chunkedOutput.Values = m.bufferedChunk.Values[:ind]
} else {
chunkedOutput.Values = append(chunkedOutput.Values, m.bufferedChunk.Values[:ind]...)
}
// Clear out the values being sent out, keep the remainder.
m.bufferedChunk.Values = m.bufferedChunk.Values[ind:]
// If we emptied out all the values, clear the mapper's buffered chunk.
if len(m.bufferedChunk.Values) == 0 {
m.bufferedChunk = nil
}
}
// Protect against none of the mappers producing any output.
if chunkedOutput == nil {
continue
}
if ascending {
// Sort the values by time first so we can then handle offset and limit
sort.Sort(MapperValues(chunkedOutput.Values))
} else {
sort.Sort(sort.Reverse(MapperValues(chunkedOutput.Values)))
}
// Now that we have full name and tag details, initialize the rowWriter.
// The Name and Tags will be the same for all mappers.
if rowWriter == nil {
rowWriter = &limitedRowWriter{
limit: e.stmt.Limit,
offset: e.stmt.Offset,
chunkSize: e.chunkSize,
name: chunkedOutput.Name,
tags: chunkedOutput.Tags,
selectNames: selectFields,
aliasNames: aliasFields,
fields: e.stmt.Fields,
c: out,
}
}
if e.stmt.HasDerivative() {
interval, err := derivativeInterval(e.stmt)
if err != nil {
out <- &models.Row{Err: err}
return
}
rowWriter.transformer = &RawQueryDerivativeProcessor{
IsNonNegative: e.stmt.FunctionCalls()[0].Name == "non_negative_derivative",
DerivativeInterval: interval,
}
}
// Emit the data via the limiter.
if limited := rowWriter.Add(chunkedOutput.Values); limited {
// Limit for this tagset was reached, mark it and start draining a new tagset.
e.limitTagSet(chunkedOutput.key())
continue
}
// Check to see if our client disconnected, or it has been to long since
// we were asked for data...
select {
case <-closing:
out <- &models.Row{Err: fmt.Errorf("execute was closed by caller")}
break
default:
// do nothing
}
}
close(out)
}
// mappersDrained returns whether all the executors Mappers have been drained of data.
func (e *RawExecutor) mappersDrained() bool {
for _, m := range e.mappers {
if !m.drained {
return false
}
}
return true
}
// nextMapperTagset returns the alphabetically lowest tagset across all Mappers.
func (e *RawExecutor) nextMapperTagSet() string {
tagset := ""
for _, m := range e.mappers {
if m.bufferedChunk != nil {
if tagset == "" {
tagset = m.bufferedChunk.key()
} else if m.bufferedChunk.key() < tagset {
tagset = m.bufferedChunk.key()
}
}
}
return tagset
}
// nextMapperLowestTime returns the lowest minimum time across all Mappers, for the given tagset.
func (e *RawExecutor) nextMapperLowestTime(tagset string) int64 {
minTime := int64(math.MaxInt64)
for _, m := range e.mappers {
if !m.drained && m.bufferedChunk != nil {
if m.bufferedChunk.key() != tagset {
continue
}
t := m.bufferedChunk.Values[len(m.bufferedChunk.Values)-1].Time
if t < minTime {
minTime = t
}
}
}
return minTime
}
// nextMapperHighestTime returns the highest time across all Mappers, for the given tagset.
func (e *RawExecutor) nextMapperHighestTime(tagset string) int64 {
maxTime := int64(math.MinInt64)
for _, m := range e.mappers {
if !m.drained && m.bufferedChunk != nil {
if m.bufferedChunk.key() != tagset {
continue
}
t := m.bufferedChunk.Values[0].Time
if t > maxTime {
maxTime = t
}
}
}
return maxTime
}
// tagSetIsLimited returns whether data for the given tagset has been LIMITed.
func (e *RawExecutor) tagSetIsLimited(tagset string) bool {
_, ok := e.limitedTagSets[tagset]
return ok
}
// limitTagSet marks the given taset as LIMITed.
func (e *RawExecutor) limitTagSet(tagset string) {
e.limitedTagSets[tagset] = struct{}{}
}
// limitedRowWriter accepts raw mapper values, and will emit those values as rows in chunks
// of the given size. If the chunk size is 0, no chunking will be performed. In addition if
// limit is reached, outstanding values will be emitted. If limit is zero, no limit is enforced.
type limitedRowWriter struct {
chunkSize int
limit int
offset int
name string
tags map[string]string
fields influxql.Fields
selectNames []string
aliasNames []string
c chan *models.Row
currValues []*MapperValue
totalOffSet int
totalSent int
transformer interface {
Process(input []*MapperValue) []*MapperValue
}
}
// Add accepts a slice of values, and will emit those values as per chunking requirements.
// If limited is returned as true, the limit was also reached and no more values should be
// added. In that case only up the limit of values are emitted.
func (r *limitedRowWriter) Add(values []*MapperValue) (limited bool) {
if r.currValues == nil {
r.currValues = make([]*MapperValue, 0, r.chunkSize)
}
// Enforce offset.
if r.totalOffSet < r.offset {
// Still some offsetting to do.
offsetRequired := r.offset - r.totalOffSet
if offsetRequired >= len(values) {
r.totalOffSet += len(values)
return false
} else {
// Drop leading values and keep going.
values = values[offsetRequired:]
r.totalOffSet += offsetRequired
}
}
r.currValues = append(r.currValues, values...)
// Check limit.
limitReached := r.limit > 0 && r.totalSent+len(r.currValues) >= r.limit
if limitReached {
// Limit will be satified with current values. Truncate 'em.
r.currValues = r.currValues[:r.limit-r.totalSent]
}
// Is chunking in effect?
if r.chunkSize != IgnoredChunkSize {
// Chunking level reached?
for len(r.currValues) >= r.chunkSize {
index := len(r.currValues) - (len(r.currValues) - r.chunkSize)
r.c <- r.processValues(r.currValues[:index])
r.currValues = r.currValues[index:]
}
// After values have been sent out by chunking, there may still be some
// values left, if the remainder is less than the chunk size. But if the
// limit has been reached, kick them out.
if len(r.currValues) > 0 && limitReached {
r.c <- r.processValues(r.currValues)
r.currValues = nil
}
} else if limitReached {
// No chunking in effect, but the limit has been reached.
r.c <- r.processValues(r.currValues)
r.currValues = nil
}
return limitReached
}
// Flush instructs the limitedRowWriter to emit any pending values as a single row,
// adhering to any limits. Chunking is not enforced.
func (r *limitedRowWriter) Flush() {
if r == nil {
return
}
// If at least some rows were sent, and no values are pending, then don't
// emit anything, since at least 1 row was previously emitted. This ensures
// that if no rows were ever sent, at least 1 will be emitted, even an empty row.
if r.totalSent != 0 && len(r.currValues) == 0 {
return
}
if r.limit > 0 && len(r.currValues) > r.limit {
r.currValues = r.currValues[:r.limit]
}
r.c <- r.processValues(r.currValues)
r.currValues = nil
}
// processValues emits the given values in a single row.
func (r *limitedRowWriter) processValues(values []*MapperValue) *models.Row {
defer func() {
r.totalSent += len(values)
}()
selectNames := r.selectNames
aliasNames | Name: m.bufferedChunk.Name,
Tags: m.bufferedChunk.Tags,
CursorKey: m.bufferedChunk.key(),
} | random_line_split |
raw.go | .Expr.(*influxql.BinaryExpr); ok {
hasMath = true
} else if _, ok := f.Expr.(*influxql.ParenExpr); ok {
hasMath = true
}
}
if !hasMath {
return results
}
processors := make([]influxql.Processor, len(fields))
startIndex := 1
for i, f := range fields {
processors[i], startIndex = influxql.GetProcessor(f.Expr, startIndex)
}
mathResults := make([][]interface{}, len(results))
for i, _ := range mathResults {
mathResults[i] = make([]interface{}, len(fields)+1)
// put the time in
mathResults[i][0] = results[i][0]
for j, p := range processors {
mathResults[i][j+1] = p(results[i])
}
}
return mathResults
}
// ProcessAggregateDerivative returns the derivatives of an aggregate result set
func ProcessAggregateDerivative(results [][]interface{}, isNonNegative bool, interval time.Duration) [][]interface{} {
// Return early if we can't calculate derivatives
if len(results) == 0 {
return results
}
// If we only have 1 value, then the value did not change, so return
// a single row w/ 0.0
if len(results) == 1 {
return [][]interface{}{
[]interface{}{results[0][0], 0.0},
}
}
// Otherwise calculate the derivatives as the difference between consecutive
// points divided by the elapsed time. Then normalize to the requested
// interval.
derivatives := [][]interface{}{}
for i := 1; i < len(results); i++ {
prev := results[i-1]
cur := results[i]
// If current value is nil, append nil for the value
if prev[1] == nil || cur[1] == nil {
derivatives = append(derivatives, []interface{}{
cur[0], nil,
})
continue
}
// Check the value's type to ensure it's an numeric, if not, return a nil result. We only check the first value
// because derivatives cannot be combined with other aggregates currently.
prevValue, prevOK := toFloat64(prev[1])
curValue, curOK := toFloat64(cur[1])
if !prevOK || !curOK {
derivatives = append(derivatives, []interface{}{
cur[0], nil,
})
continue
}
elapsed := cur[0].(time.Time).Sub(prev[0].(time.Time))
diff := curValue - prevValue
value := 0.0
if elapsed > 0 {
value = float64(diff) / (float64(elapsed) / float64(interval))
}
// Drop negative values for non-negative derivatives
if isNonNegative && diff < 0 {
continue
}
val := []interface{}{
cur[0],
value,
}
derivatives = append(derivatives, val)
}
return derivatives
}
// derivativeInterval returns the time interval for the one (and only) derivative func
func derivativeInterval(stmt *influxql.SelectStatement) (time.Duration, error) {
if len(stmt.FunctionCalls()[0].Args) == 2 {
return stmt.FunctionCalls()[0].Args[1].(*influxql.DurationLiteral).Val, nil
}
interval, err := stmt.GroupByInterval()
if err != nil {
return 0, err
}
if interval > 0 {
return interval, nil
}
return time.Second, nil
}
// resultsEmpty will return true if the all the result values are empty or contain only nulls
func resultsEmpty(resultValues [][]interface{}) bool {
for _, vals := range resultValues {
// start the loop at 1 because we want to skip over the time value
for i := 1; i < len(vals); i++ {
if vals[i] != nil {
return false
}
}
}
return true
}
// Convert commonly understood types to a float64
// Valid types are int64, float64 or PositionPoint with a Value of int64 or float64
// The second retuned boolean indicates if the conversion was successful.
func toFloat64(v interface{}) (float64, bool) {
switch value := v.(type) {
case int64:
return float64(value), true
case float64:
return value, true
case PositionPoint:
return toFloat64(value.Value)
}
return 0, false
}
func int64toFloat64(v interface{}) float64 {
switch value := v.(type) {
case int64:
return float64(value)
case float64:
return value
}
panic(fmt.Sprintf("expected either int64 or float64, got %T", v))
}
// RawMapper runs the map phase for non-aggregate, raw SELECT queries.
type RawMapper struct {
shard *Shard
stmt *influxql.SelectStatement
qmin, qmax int64 // query time range
tx Tx
cursors []*TagSetCursor
cursorIndex int
selectFields []string
selectTags []string
whereFields []string
ChunkSize int
}
// NewRawMapper returns a new instance of RawMapper.
func NewRawMapper(sh *Shard, stmt *influxql.SelectStatement) *RawMapper {
return &RawMapper{
shard: sh,
stmt: stmt,
}
}
// Open opens and initializes the mapper.
func (m *RawMapper) Open() error {
// Ignore if node has the shard but hasn't written to it yet.
if m.shard == nil {
return nil
}
// Rewrite statement.
stmt, err := m.shard.index.RewriteSelectStatement(m.stmt)
if err != nil {
return err
}
m.stmt = stmt
// Set all time-related parameters on the mapper.
m.qmin, m.qmax = influxql.TimeRangeAsEpochNano(m.stmt.Condition)
// Get a read-only transaction.
tx, err := m.shard.engine.Begin(false)
if err != nil {
return err
}
m.tx = tx
// Collect measurements.
mms := Measurements(m.shard.index.MeasurementsByName(m.stmt.SourceNames()))
m.selectFields = mms.SelectFields(m.stmt)
m.selectTags = mms.SelectTags(m.stmt)
m.whereFields = mms.WhereFields(m.stmt)
// Open cursors for each measurement.
for _, mm := range mms {
if err := m.openMeasurement(mm); err != nil {
return err
}
}
// Remove cursors if there are not SELECT fields.
if len(m.selectFields) == 0 {
m.cursors = nil
}
return nil
}
func (m *RawMapper) openMeasurement(mm *Measurement) error {
// Validate that ANY GROUP BY is not a field for the measurement.
if err := mm.ValidateGroupBy(m.stmt); err != nil {
return err
}
// Validate the fields and tags asked for exist and keep track of which are in the select vs the where
selectFields := mm.SelectFields(m.stmt)
selectTags := mm.SelectTags(m.stmt)
fields := uniqueStrings(m.selectFields, m.whereFields)
// If we only have tags in our select clause we just return
if len(selectFields) == 0 && len(selectTags) > 0 {
return fmt.Errorf("statement must have at least one field in select clause")
}
// Calculate tag sets and apply SLIMIT/SOFFSET.
tagSets, err := mm.DimensionTagSets(m.stmt)
if err != nil {
return err
}
tagSets = m.stmt.LimitTagSets(tagSets)
// Create all cursors for reading the data from this shard.
ascending := m.stmt.TimeAscending()
for _, t := range tagSets {
cursors := []*TagsCursor{}
for i, key := range t.SeriesKeys {
c := m.tx.Cursor(key, fields, m.shard.FieldCodec(mm.Name), ascending)
if c == nil {
continue
}
seriesTags := m.shard.index.TagsForSeries(key)
cm := NewTagsCursor(c, t.Filters[i], seriesTags)
cursors = append(cursors, cm)
}
tsc := NewTagSetCursor(mm.Name, t.Tags, cursors, ascending)
tsc.SelectFields = m.selectFields
if ascending {
tsc.Init(m.qmin)
} else {
tsc.Init(m.qmax)
}
m.cursors = append(m.cursors, tsc)
}
sort.Sort(TagSetCursors(m.cursors))
return nil
}
// Close closes the mapper.
func (m *RawMapper) Close() {
if m != nil && m.tx != nil {
m.tx.Rollback()
}
}
// TagSets returns the list of tag sets for which this mapper has data.
func (m *RawMapper) TagSets() []string { return TagSetCursors(m.cursors).Keys() }
// Fields returns all SELECT fields.
func (m *RawMapper) Fields() []string | { return append(m.selectFields, m.selectTags...) } | identifier_body |
|
raw.go | (closing <-chan struct{}) <-chan *models.Row {
out := make(chan *models.Row, 0)
go e.execute(out, closing)
return out
}
func (e *RawExecutor) execute(out chan *models.Row, closing <-chan struct{}) {
// It's important that all resources are released when execution completes.
defer e.close()
// Open the mappers.
for _, m := range e.mappers {
if err := m.Open(); err != nil {
out <- &models.Row{Err: err}
return
}
}
// Get the distinct fields across all mappers.
var selectFields, aliasFields []string
if e.stmt.HasWildcard() {
sf := newStringSet()
for _, m := range e.mappers {
sf.add(m.Fields()...)
}
selectFields = sf.list()
aliasFields = selectFields
} else {
selectFields = e.stmt.Fields.Names()
aliasFields = e.stmt.Fields.AliasNames()
}
// Used to read ahead chunks from mappers.
var rowWriter *limitedRowWriter
var currTagset string
// Keep looping until all mappers drained.
var err error
for {
// Get the next chunk from each Mapper.
for _, m := range e.mappers {
if m.drained {
continue
}
// Set the next buffered chunk on the mapper, or mark it drained.
for {
if m.bufferedChunk == nil {
m.bufferedChunk, err = m.NextChunk()
if err != nil {
out <- &models.Row{Err: err}
return
}
if m.bufferedChunk == nil {
// Mapper can do no more for us.
m.drained = true
break
}
// If the SELECT query is on more than 1 field, but the chunks values from the Mappers
// only contain a single value, create k-v pairs using the field name of the chunk
// and the value of the chunk. If there is only 1 SELECT field across all mappers then
// there is no need to create k-v pairs, and there is no need to distinguish field data,
// as it is all for the *same* field.
if len(selectFields) > 1 && len(m.bufferedChunk.Fields) == 1 {
fieldKey := m.bufferedChunk.Fields[0]
for i := range m.bufferedChunk.Values {
field := map[string]interface{}{fieldKey: m.bufferedChunk.Values[i].Value}
m.bufferedChunk.Values[i].Value = field
}
}
}
if e.tagSetIsLimited(m.bufferedChunk.Name) {
// chunk's tagset is limited, so no good. Try again.
m.bufferedChunk = nil
continue
}
// This mapper has a chunk available, and it is not limited.
break
}
}
// All Mappers done?
if e.mappersDrained() {
rowWriter.Flush()
break
}
// Send out data for the next alphabetically-lowest tagset. All Mappers emit data in this order,
// so by always continuing with the lowest tagset until it is finished, we process all data in
// the required order, and don't "miss" any.
tagset := e.nextMapperTagSet()
if tagset != currTagset {
currTagset = tagset
// Tagset has changed, time for a new rowWriter. Be sure to kick out any residual values.
rowWriter.Flush()
rowWriter = nil
}
ascending := true
if len(e.stmt.SortFields) > 0 {
ascending = e.stmt.SortFields[0].Ascending
}
var timeBoundary int64
if ascending {
// Process the mapper outputs. We can send out everything up to the min of the last time
// of the chunks for the next tagset.
timeBoundary = e.nextMapperLowestTime(tagset)
} else {
timeBoundary = e.nextMapperHighestTime(tagset)
}
// Now empty out all the chunks up to the min time. Create new output struct for this data.
var chunkedOutput *MapperOutput
for _, m := range e.mappers {
if m.drained {
continue
}
chunkBoundary := false
if ascending {
chunkBoundary = m.bufferedChunk.Values[0].Time > timeBoundary
} else {
chunkBoundary = m.bufferedChunk.Values[0].Time < timeBoundary
}
// This mapper's next chunk is not for the next tagset, or the very first value of
// the chunk is at a higher acceptable timestamp. Skip it.
if m.bufferedChunk.key() != tagset || chunkBoundary {
continue
}
// Find the index of the point up to the min.
ind := len(m.bufferedChunk.Values)
for i, mo := range m.bufferedChunk.Values {
if ascending && mo.Time > timeBoundary {
ind = i
break
} else if !ascending && mo.Time < timeBoundary {
ind = i
break
}
}
// Add up to the index to the values
if chunkedOutput == nil {
chunkedOutput = &MapperOutput{
Name: m.bufferedChunk.Name,
Tags: m.bufferedChunk.Tags,
CursorKey: m.bufferedChunk.key(),
}
chunkedOutput.Values = m.bufferedChunk.Values[:ind]
} else {
chunkedOutput.Values = append(chunkedOutput.Values, m.bufferedChunk.Values[:ind]...)
}
// Clear out the values being sent out, keep the remainder.
m.bufferedChunk.Values = m.bufferedChunk.Values[ind:]
// If we emptied out all the values, clear the mapper's buffered chunk.
if len(m.bufferedChunk.Values) == 0 {
m.bufferedChunk = nil
}
}
// Protect against none of the mappers producing any output.
if chunkedOutput == nil {
continue
}
if ascending {
// Sort the values by time first so we can then handle offset and limit
sort.Sort(MapperValues(chunkedOutput.Values))
} else {
sort.Sort(sort.Reverse(MapperValues(chunkedOutput.Values)))
}
// Now that we have full name and tag details, initialize the rowWriter.
// The Name and Tags will be the same for all mappers.
if rowWriter == nil {
rowWriter = &limitedRowWriter{
limit: e.stmt.Limit,
offset: e.stmt.Offset,
chunkSize: e.chunkSize,
name: chunkedOutput.Name,
tags: chunkedOutput.Tags,
selectNames: selectFields,
aliasNames: aliasFields,
fields: e.stmt.Fields,
c: out,
}
}
if e.stmt.HasDerivative() {
interval, err := derivativeInterval(e.stmt)
if err != nil {
out <- &models.Row{Err: err}
return
}
rowWriter.transformer = &RawQueryDerivativeProcessor{
IsNonNegative: e.stmt.FunctionCalls()[0].Name == "non_negative_derivative",
DerivativeInterval: interval,
}
}
// Emit the data via the limiter.
if limited := rowWriter.Add(chunkedOutput.Values); limited {
// Limit for this tagset was reached, mark it and start draining a new tagset.
e.limitTagSet(chunkedOutput.key())
continue
}
// Check to see if our client disconnected, or it has been to long since
// we were asked for data...
select {
case <-closing:
out <- &models.Row{Err: fmt.Errorf("execute was closed by caller")}
break
default:
// do nothing
}
}
close(out)
}
// mappersDrained returns whether all the executors Mappers have been drained of data.
func (e *RawExecutor) mappersDrained() bool {
for _, m := range e.mappers {
if !m.drained {
return false
}
}
return true
}
// nextMapperTagset returns the alphabetically lowest tagset across all Mappers.
func (e *RawExecutor) nextMapperTagSet() string {
tagset := ""
for _, m := range e.mappers {
if m.bufferedChunk != nil {
if tagset == "" {
tagset = m.bufferedChunk.key()
} else if m.bufferedChunk.key() < tagset {
tagset = m.bufferedChunk.key()
}
}
}
return tagset
}
// nextMapperLowestTime returns the lowest minimum time across all Mappers, for the given tagset.
func (e *RawExecutor) nextMapperLowestTime(tagset string) int64 {
minTime := int64(math.MaxInt64)
for _, m := range e.mappers {
if !m.drained && | Execute | identifier_name |
|
hcrml_parser.py | _filename()
# For output type 'hcr', write the binary repository file
if self.output_obj.type == 'hcr':
self.logger.info("Generating binary repository to '%s'" % outputfile)
writer = HcrWriter()
repo = self.output_obj.get_hcr_repository()
data = writer.get_repository_bindata(repo)
f = context.create_file(outputfile, mode='wb')
#f = open(outputfile,'wb')
try: f.write(data)
finally: f.close()
elif self.output_obj.type == 'header':
self.logger.info("Generating header file to '%s'" % outputfile)
writer = HeaderWriter(outputfile, self.output_obj)
writer.write(context)
elif self.output_obj.type == None:
# The HCRML file contains no <output> element, so no output should
# be generated
pass
def get_refs(self):
return self.refs
def list_output_files(self):
"""
Return a list of output files as an array.
"""
fname = self.__get_output_filename()
return [fname] if fname else []
def __get_output_filename(self):
if self.output_obj.file != None:
return os.path.normpath(os.path.join(self.output, self.output_obj.file))
else:
return None
class HcrmlReader(plugin.ReaderBase):
NAMESPACE = 'http://www.symbianfoundation.org/xml/hcrml/1'
NAMESPACE_ID = 'hcrml'
ROOT_ELEMENT_NAME = 'hcr'
FILE_EXTENSIONS = ['hcrml']
def __init__(self, resource_ref, configuration):
self.configuration = configuration
self.hcrml_file = resource_ref
self.refs = []
self.namespaces = [self.NAMESPACE]
self.doc = None
@classmethod
def read_impl(cls, resource_ref, configuration, etree):
reader = HcrmlReader(resource_ref, configuration)
reader.doc = etree
impl = HcrmlImpl(resource_ref, configuration)
impl.output_obj = reader.read_hcrml_output()
impl.refs = reader.refs
return impl
@classmethod
def get_schema_data(cls):
return pkg_resources.resource_string('hcrplugin', 'xsd/hcrml.xsd')
def read_hcrml_output(self, ignore_includes=False):
output = Output()
# There should only be one <output> element, so use find()
out_elem = self.doc.find("{%s}output" % self.namespaces[0])
if out_elem != None:
version = out_elem.get('version')
read_only = out_elem.get('readOnly')
file = out_elem.get('file')
type = out_elem.get('type')
if type == None or type == '':
raise NoTypeDefinedInOutPutTagError("Type attribute missing in hcrml file")
if type not in ('hcr', 'header'):
raise InvalidTypeDefinedInOutPutTagError("Type attribute invalid in hcrml file: %s" % type)
output.version = version
output.read_only = read_only
output.file = file
output.type = type
# An <output> element may contain <include> elements for including other
# HCRML files, so read and include categories from those
if not ignore_includes:
included_files = self.read_hcrml_includes(out_elem)
read_categories = self.read_categories_from_hcrml_files(included_files)
output.categories.extend(read_categories)
""" output tag is not mandatory, but there should be some categories included """
for cat_elem in self.doc.getiterator("{%s}category" % self.namespaces[0]):
category = self.read_hrcml_category(cat_elem)
output.categories.append(category)
return output
def read_hcrml_includes(self, output_elem):
"""
Read all <include> elements under an <output> element.
@return: List of other HCRML files to include.
"""
result = []
include_refs = []
for include_elem in output_elem.findall("{%s}include" % self.namespaces[0]):
ref = include_elem.get('ref')
if ref != None: include_refs.append(ref)
if include_refs:
# There are include refs, construct the list of files that should
# be included
all_files = self.configuration.list_resources()
included_files = []
for ref in include_refs:
files_by_ref = self.filter_file_list_by_include_ref(all_files, ref)
result.extend(files_by_ref)
# Make sure that no file is in the list more than once
result = list(set(result))
return result
def read_categories_from_hcrml_files(self, files):
"""
Read all categories from the list of the given HCRML files.
"""
categories = []
for file in files:
# Skip the current file
if os.path.normpath(file) == os.path.normpath(self.hcrml_file):
continue
# Read the <output> element and append its categories to the result list
reader = HcrmlReader(file, self.configuration)
reader.doc = self._read_xml_doc_from_resource(file, self.configuration)
# Read the output element, but ignore includes, since we are
# currently reading from inside an include
output_obj = reader.read_hcrml_output(ignore_includes=True)
categories.extend(output_obj.categories)
return categories
def read_hrcml_category(self,cat_elem):
|
def read_hcrml_setting(self,setting_elem):
ref = setting_elem.get('ref')
if ref == None or ref == '':
raise NoRefInHcrmlFileError("No ref in setting tag attribute implemented in hcrml file!")
else:
self.refs.append(ref)
type = setting_elem.get('type')
if type == None or type == '':
raise NoTypeAttributeInSettingHcrmlFileError("No type in setting tag attribute implemented in hcrml file ref: %s" % ref )
name = setting_elem.get('name')
if name == None or name == '':
raise NoNameAttributeInSettingHcrmlFileError("No type in setting tag attribute implemented in hcrml file ref: %s" % ref )
id = setting_elem.get('id')
if id == None or id == '':
raise NoIdAttributeInSettingHcrmlFileError("No id in setting tag attribute implemented in hcrml file ref: %s" % ref )
comment = setting_elem.get('comment')
if comment == None:
comment = ''
setting = Setting(self.configuration)
setting.comment = comment
setting.name = name
setting.ref = ref
try:
setting.id = long(id)
except ValueError:
setting.id = long(id, 16)
setting.type = type
setting.xml_elem = setting_elem
for flag_elem in setting_elem.getiterator("{%s}flags" % self.namespaces[0]):
flag = self.read_hrcml_flags(setting_elem)
setting.flag = flag
return setting
def read_hrcml_flags(self,flag_elem):
Uninitialised = flag_elem.get('Uninitialised')
Modifiable = flag_elem.get('Modifiable')
Persistent = flag_elem.get('Persistent')
flag = Flag()
flag.Uninitialised = Uninitialised
flag.Modifiable = Modifiable
flag.Persistent = Persistent
return flag
def filter_file_list_by_include_ref(self, files, ref):
pattern = ref + '$'
pattern = pattern.replace('.', r'\.')
pattern = pattern.replace('*', '.*')
pattern = '(^|.*/)' + pattern
result = []
for file in files:
if re.match(pattern, file.replace('\\', '/')) != None:
result.append(file)
return result
class Flag(object):
def __init__(self):
self.Uninitialised = 0
self.Modifiable = 0
self.Persistent = 0
class Setting(object):
def __init__(self,configuration):
self.name = None
self.ref = None
self.type = None
self.id = None
self.flag = None
self.comment = ''
self.configuration = configuration
@property
def value(self):
dview = self.configuration.get_default_view()
feature = dview.get_feature(self.ref)
value = feature.get_value()
if self.type in (HcrRecord.VALTYPE_ARRAY_INT32, HcrRecord.VAL | category_uid = cat_elem.get('uid')
if category_uid == None or category_uid == '':
raise NoCategoryUIDInHcrmlFileError("No category uid attribute implemented in hcrml file!")
name = cat_elem.get('name')
if name == None or name == '':
raise NoCategoryNameInHcrmlFileError("No category name attribute implemented in hcrml file!")
category = Category()
category.name = name
try:
category.category_uid = long(category_uid)
except ValueError:
category.category_uid = long(category_uid, 16)
category.xml_elem = cat_elem
for setting_elem in cat_elem.getiterator("{%s}setting" % self.namespaces[0]):
setting = self.read_hcrml_setting(setting_elem)
category.settings.append(setting)
return category | identifier_body |
hcrml_parser.py | def read_hcrml_output(self, ignore_includes=False):
output = Output()
# There should only be one <output> element, so use find()
out_elem = self.doc.find("{%s}output" % self.namespaces[0])
if out_elem != None:
version = out_elem.get('version')
read_only = out_elem.get('readOnly')
file = out_elem.get('file')
type = out_elem.get('type')
if type == None or type == '':
raise NoTypeDefinedInOutPutTagError("Type attribute missing in hcrml file")
if type not in ('hcr', 'header'):
raise InvalidTypeDefinedInOutPutTagError("Type attribute invalid in hcrml file: %s" % type)
output.version = version
output.read_only = read_only
output.file = file
output.type = type
# An <output> element may contain <include> elements for including other
# HCRML files, so read and include categories from those
if not ignore_includes:
included_files = self.read_hcrml_includes(out_elem)
read_categories = self.read_categories_from_hcrml_files(included_files)
output.categories.extend(read_categories)
""" output tag is not mandatory, but there should be some categories included """
for cat_elem in self.doc.getiterator("{%s}category" % self.namespaces[0]):
category = self.read_hrcml_category(cat_elem)
output.categories.append(category)
return output
def read_hcrml_includes(self, output_elem):
"""
Read all <include> elements under an <output> element.
@return: List of other HCRML files to include.
"""
result = []
include_refs = []
for include_elem in output_elem.findall("{%s}include" % self.namespaces[0]):
ref = include_elem.get('ref')
if ref != None: include_refs.append(ref)
if include_refs:
# There are include refs, construct the list of files that should
# be included
all_files = self.configuration.list_resources()
included_files = []
for ref in include_refs:
files_by_ref = self.filter_file_list_by_include_ref(all_files, ref)
result.extend(files_by_ref)
# Make sure that no file is in the list more than once
result = list(set(result))
return result
def read_categories_from_hcrml_files(self, files):
"""
Read all categories from the list of the given HCRML files.
"""
categories = []
for file in files:
# Skip the current file
if os.path.normpath(file) == os.path.normpath(self.hcrml_file):
continue
# Read the <output> element and append its categories to the result list
reader = HcrmlReader(file, self.configuration)
reader.doc = self._read_xml_doc_from_resource(file, self.configuration)
# Read the output element, but ignore includes, since we are
# currently reading from inside an include
output_obj = reader.read_hcrml_output(ignore_includes=True)
categories.extend(output_obj.categories)
return categories
def read_hrcml_category(self,cat_elem):
category_uid = cat_elem.get('uid')
if category_uid == None or category_uid == '':
raise NoCategoryUIDInHcrmlFileError("No category uid attribute implemented in hcrml file!")
name = cat_elem.get('name')
if name == None or name == '':
raise NoCategoryNameInHcrmlFileError("No category name attribute implemented in hcrml file!")
category = Category()
category.name = name
try:
category.category_uid = long(category_uid)
except ValueError:
category.category_uid = long(category_uid, 16)
category.xml_elem = cat_elem
for setting_elem in cat_elem.getiterator("{%s}setting" % self.namespaces[0]):
setting = self.read_hcrml_setting(setting_elem)
category.settings.append(setting)
return category
def read_hcrml_setting(self,setting_elem):
ref = setting_elem.get('ref')
if ref == None or ref == '':
raise NoRefInHcrmlFileError("No ref in setting tag attribute implemented in hcrml file!")
else:
self.refs.append(ref)
type = setting_elem.get('type')
if type == None or type == '':
raise NoTypeAttributeInSettingHcrmlFileError("No type in setting tag attribute implemented in hcrml file ref: %s" % ref )
name = setting_elem.get('name')
if name == None or name == '':
raise NoNameAttributeInSettingHcrmlFileError("No type in setting tag attribute implemented in hcrml file ref: %s" % ref )
id = setting_elem.get('id')
if id == None or id == '':
raise NoIdAttributeInSettingHcrmlFileError("No id in setting tag attribute implemented in hcrml file ref: %s" % ref )
comment = setting_elem.get('comment')
if comment == None:
comment = ''
setting = Setting(self.configuration)
setting.comment = comment
setting.name = name
setting.ref = ref
try:
setting.id = long(id)
except ValueError:
setting.id = long(id, 16)
setting.type = type
setting.xml_elem = setting_elem
for flag_elem in setting_elem.getiterator("{%s}flags" % self.namespaces[0]):
flag = self.read_hrcml_flags(setting_elem)
setting.flag = flag
return setting
def read_hrcml_flags(self,flag_elem):
Uninitialised = flag_elem.get('Uninitialised')
Modifiable = flag_elem.get('Modifiable')
Persistent = flag_elem.get('Persistent')
flag = Flag()
flag.Uninitialised = Uninitialised
flag.Modifiable = Modifiable
flag.Persistent = Persistent
return flag
def filter_file_list_by_include_ref(self, files, ref):
pattern = ref + '$'
pattern = pattern.replace('.', r'\.')
pattern = pattern.replace('*', '.*')
pattern = '(^|.*/)' + pattern
result = []
for file in files:
if re.match(pattern, file.replace('\\', '/')) != None:
result.append(file)
return result
class Flag(object):
def __init__(self):
self.Uninitialised = 0
self.Modifiable = 0
self.Persistent = 0
class Setting(object):
def __init__(self,configuration):
self.name = None
self.ref = None
self.type = None
self.id = None
self.flag = None
self.comment = ''
self.configuration = configuration
@property
def value(self):
dview = self.configuration.get_default_view()
feature = dview.get_feature(self.ref)
value = feature.get_value()
if self.type in (HcrRecord.VALTYPE_ARRAY_INT32, HcrRecord.VALTYPE_ARRAY_UINT32):
# Convert string values to numbers
value = map(lambda x: self.__str_to_long(x), value)
elif self.type == HcrRecord.VALTYPE_BIN_DATA and feature.get_type() == 'string':
value = self.__hex_to_bindata(value)
return value
def __str_to_long(self, str_value):
try:
return long(str_value)
except ValueError:
return long(str_value, 16)
def __hex_to_bindata(self, hexdata):
orig_hexdata = hexdata
hexdata = hexdata.replace(' ', '').replace('\r', '').replace('\n', '').replace('\t', '')
if len(hexdata) % 2 != 0:
raise ValueError("Failed to convert %r into binary data: String length %d (whitespace stripped) is not divisible by 2", orig_hexdata, len(hexdata))
for c in hexdata:
if c not in "0123456789abcdefABCDEF":
raise ValueError("Failed to convert %r into binary data: Not a valid hex string", hexdata)
temp = []
for i in xrange(len(hexdata) / 2):
start = i * 2
end = start + 2
temp.append(chr(int(hexdata[start:end], 16)))
return ''.join(temp)
class Category(object):
def __init__(self):
self.name = None
self.category_uid = None
self.settings = []
def get_hcr_records(self):
"""
Return a list of HcrRecord objects created based on this category's settings.
"""
result = []
for setting in self.settings:
record = HcrRecord(setting.type, setting.value, self.category_uid, setting.id)
flag = setting.flag
if flag:
record.flags = 0
if flag.Uninitialised == '1': record.flags |= HcrRecord.FLAG_UNINITIALIZED
if flag.Modifiable == '1': record.flags |= HcrRecord.FLAG_MODIFIABLE
if flag.Persistent == '1': record.flags |= HcrRecord.FLAG_PERSISTENT
result.append(record)
return result
class Output(object):
def | __init__ | identifier_name |
|
hcrml_parser.py | raise NoTypeDefinedInOutPutTagError("Type attribute missing in hcrml file")
if type not in ('hcr', 'header'):
raise InvalidTypeDefinedInOutPutTagError("Type attribute invalid in hcrml file: %s" % type)
output.version = version
output.read_only = read_only
output.file = file
output.type = type
# An <output> element may contain <include> elements for including other
# HCRML files, so read and include categories from those
if not ignore_includes:
included_files = self.read_hcrml_includes(out_elem)
read_categories = self.read_categories_from_hcrml_files(included_files)
output.categories.extend(read_categories)
""" output tag is not mandatory, but there should be some categories included """
for cat_elem in self.doc.getiterator("{%s}category" % self.namespaces[0]):
category = self.read_hrcml_category(cat_elem)
output.categories.append(category)
return output
def read_hcrml_includes(self, output_elem):
"""
Read all <include> elements under an <output> element.
@return: List of other HCRML files to include.
"""
result = []
include_refs = []
for include_elem in output_elem.findall("{%s}include" % self.namespaces[0]):
ref = include_elem.get('ref')
if ref != None: include_refs.append(ref)
if include_refs:
# There are include refs, construct the list of files that should
# be included
all_files = self.configuration.list_resources()
included_files = []
for ref in include_refs:
files_by_ref = self.filter_file_list_by_include_ref(all_files, ref)
result.extend(files_by_ref)
# Make sure that no file is in the list more than once
result = list(set(result))
return result
def read_categories_from_hcrml_files(self, files):
"""
Read all categories from the list of the given HCRML files.
"""
categories = []
for file in files:
# Skip the current file
if os.path.normpath(file) == os.path.normpath(self.hcrml_file):
continue
# Read the <output> element and append its categories to the result list
reader = HcrmlReader(file, self.configuration)
reader.doc = self._read_xml_doc_from_resource(file, self.configuration)
# Read the output element, but ignore includes, since we are
# currently reading from inside an include
output_obj = reader.read_hcrml_output(ignore_includes=True)
categories.extend(output_obj.categories)
return categories
def read_hrcml_category(self,cat_elem):
category_uid = cat_elem.get('uid')
if category_uid == None or category_uid == '':
raise NoCategoryUIDInHcrmlFileError("No category uid attribute implemented in hcrml file!")
name = cat_elem.get('name')
if name == None or name == '':
raise NoCategoryNameInHcrmlFileError("No category name attribute implemented in hcrml file!")
category = Category()
category.name = name
try:
category.category_uid = long(category_uid)
except ValueError:
category.category_uid = long(category_uid, 16)
category.xml_elem = cat_elem
for setting_elem in cat_elem.getiterator("{%s}setting" % self.namespaces[0]):
setting = self.read_hcrml_setting(setting_elem)
category.settings.append(setting)
return category
def read_hcrml_setting(self,setting_elem):
ref = setting_elem.get('ref')
if ref == None or ref == '':
raise NoRefInHcrmlFileError("No ref in setting tag attribute implemented in hcrml file!")
else:
self.refs.append(ref)
type = setting_elem.get('type')
if type == None or type == '':
raise NoTypeAttributeInSettingHcrmlFileError("No type in setting tag attribute implemented in hcrml file ref: %s" % ref )
name = setting_elem.get('name')
if name == None or name == '':
raise NoNameAttributeInSettingHcrmlFileError("No type in setting tag attribute implemented in hcrml file ref: %s" % ref )
id = setting_elem.get('id')
if id == None or id == '':
raise NoIdAttributeInSettingHcrmlFileError("No id in setting tag attribute implemented in hcrml file ref: %s" % ref )
comment = setting_elem.get('comment')
if comment == None:
comment = ''
setting = Setting(self.configuration)
setting.comment = comment
setting.name = name
setting.ref = ref
try:
setting.id = long(id)
except ValueError:
setting.id = long(id, 16)
setting.type = type
setting.xml_elem = setting_elem
for flag_elem in setting_elem.getiterator("{%s}flags" % self.namespaces[0]):
flag = self.read_hrcml_flags(setting_elem)
setting.flag = flag
return setting
def read_hrcml_flags(self,flag_elem):
Uninitialised = flag_elem.get('Uninitialised')
Modifiable = flag_elem.get('Modifiable')
Persistent = flag_elem.get('Persistent')
flag = Flag()
flag.Uninitialised = Uninitialised
flag.Modifiable = Modifiable
flag.Persistent = Persistent
return flag
def filter_file_list_by_include_ref(self, files, ref):
pattern = ref + '$'
pattern = pattern.replace('.', r'\.')
pattern = pattern.replace('*', '.*')
pattern = '(^|.*/)' + pattern
result = []
for file in files:
if re.match(pattern, file.replace('\\', '/')) != None:
result.append(file)
return result
class Flag(object):
def __init__(self):
self.Uninitialised = 0
self.Modifiable = 0
self.Persistent = 0
class Setting(object):
def __init__(self,configuration):
self.name = None
self.ref = None
self.type = None
self.id = None
self.flag = None
self.comment = ''
self.configuration = configuration
@property
def value(self):
dview = self.configuration.get_default_view()
feature = dview.get_feature(self.ref)
value = feature.get_value()
if self.type in (HcrRecord.VALTYPE_ARRAY_INT32, HcrRecord.VALTYPE_ARRAY_UINT32):
# Convert string values to numbers
value = map(lambda x: self.__str_to_long(x), value)
elif self.type == HcrRecord.VALTYPE_BIN_DATA and feature.get_type() == 'string':
value = self.__hex_to_bindata(value)
return value
def __str_to_long(self, str_value):
try:
return long(str_value)
except ValueError:
return long(str_value, 16)
def __hex_to_bindata(self, hexdata):
orig_hexdata = hexdata
hexdata = hexdata.replace(' ', '').replace('\r', '').replace('\n', '').replace('\t', '')
if len(hexdata) % 2 != 0:
raise ValueError("Failed to convert %r into binary data: String length %d (whitespace stripped) is not divisible by 2", orig_hexdata, len(hexdata))
for c in hexdata:
if c not in "0123456789abcdefABCDEF":
raise ValueError("Failed to convert %r into binary data: Not a valid hex string", hexdata)
temp = []
for i in xrange(len(hexdata) / 2):
start = i * 2
end = start + 2
temp.append(chr(int(hexdata[start:end], 16)))
return ''.join(temp)
class Category(object):
def __init__(self):
self.name = None
self.category_uid = None
self.settings = []
def get_hcr_records(self):
"""
Return a list of HcrRecord objects created based on this category's settings.
"""
result = []
for setting in self.settings:
record = HcrRecord(setting.type, setting.value, self.category_uid, setting.id)
flag = setting.flag
if flag:
record.flags = 0
if flag.Uninitialised == '1': record.flags |= HcrRecord.FLAG_UNINITIALIZED
if flag.Modifiable == '1': record.flags |= HcrRecord.FLAG_MODIFIABLE
if flag.Persistent == '1': record.flags |= HcrRecord.FLAG_PERSISTENT
result.append(record)
return result
class Output(object):
def __init__(self):
self.file = None
self.type = None
self.version = None
self.read_only = None
self.categories = []
def get_hcr_records(self):
"""
Return a list of HcrRecord objects created based on this output object's categories.
"""
result = []
for category in self.categories:
result.extend(category.get_hcr_records())
| return result
def get_hcr_repository(self):
"""
Return a HcrRepository object created based on this output.
| random_line_split |
|
hcrml_parser.py | mlImpl(resource_ref, configuration)
impl.output_obj = reader.read_hcrml_output()
impl.refs = reader.refs
return impl
@classmethod
def get_schema_data(cls):
return pkg_resources.resource_string('hcrplugin', 'xsd/hcrml.xsd')
def read_hcrml_output(self, ignore_includes=False):
output = Output()
# There should only be one <output> element, so use find()
out_elem = self.doc.find("{%s}output" % self.namespaces[0])
if out_elem != None:
version = out_elem.get('version')
read_only = out_elem.get('readOnly')
file = out_elem.get('file')
type = out_elem.get('type')
if type == None or type == '':
raise NoTypeDefinedInOutPutTagError("Type attribute missing in hcrml file")
if type not in ('hcr', 'header'):
raise InvalidTypeDefinedInOutPutTagError("Type attribute invalid in hcrml file: %s" % type)
output.version = version
output.read_only = read_only
output.file = file
output.type = type
# An <output> element may contain <include> elements for including other
# HCRML files, so read and include categories from those
if not ignore_includes:
included_files = self.read_hcrml_includes(out_elem)
read_categories = self.read_categories_from_hcrml_files(included_files)
output.categories.extend(read_categories)
""" output tag is not mandatory, but there should be some categories included """
for cat_elem in self.doc.getiterator("{%s}category" % self.namespaces[0]):
category = self.read_hrcml_category(cat_elem)
output.categories.append(category)
return output
def read_hcrml_includes(self, output_elem):
"""
Read all <include> elements under an <output> element.
@return: List of other HCRML files to include.
"""
result = []
include_refs = []
for include_elem in output_elem.findall("{%s}include" % self.namespaces[0]):
ref = include_elem.get('ref')
if ref != None: include_refs.append(ref)
if include_refs:
# There are include refs, construct the list of files that should
# be included
all_files = self.configuration.list_resources()
included_files = []
for ref in include_refs:
files_by_ref = self.filter_file_list_by_include_ref(all_files, ref)
result.extend(files_by_ref)
# Make sure that no file is in the list more than once
result = list(set(result))
return result
def read_categories_from_hcrml_files(self, files):
"""
Read all categories from the list of the given HCRML files.
"""
categories = []
for file in files:
# Skip the current file
if os.path.normpath(file) == os.path.normpath(self.hcrml_file):
continue
# Read the <output> element and append its categories to the result list
reader = HcrmlReader(file, self.configuration)
reader.doc = self._read_xml_doc_from_resource(file, self.configuration)
# Read the output element, but ignore includes, since we are
# currently reading from inside an include
output_obj = reader.read_hcrml_output(ignore_includes=True)
categories.extend(output_obj.categories)
return categories
def read_hrcml_category(self,cat_elem):
category_uid = cat_elem.get('uid')
if category_uid == None or category_uid == '':
raise NoCategoryUIDInHcrmlFileError("No category uid attribute implemented in hcrml file!")
name = cat_elem.get('name')
if name == None or name == '':
raise NoCategoryNameInHcrmlFileError("No category name attribute implemented in hcrml file!")
category = Category()
category.name = name
try:
category.category_uid = long(category_uid)
except ValueError:
category.category_uid = long(category_uid, 16)
category.xml_elem = cat_elem
for setting_elem in cat_elem.getiterator("{%s}setting" % self.namespaces[0]):
setting = self.read_hcrml_setting(setting_elem)
category.settings.append(setting)
return category
def read_hcrml_setting(self,setting_elem):
ref = setting_elem.get('ref')
if ref == None or ref == '':
raise NoRefInHcrmlFileError("No ref in setting tag attribute implemented in hcrml file!")
else:
self.refs.append(ref)
type = setting_elem.get('type')
if type == None or type == '':
raise NoTypeAttributeInSettingHcrmlFileError("No type in setting tag attribute implemented in hcrml file ref: %s" % ref )
name = setting_elem.get('name')
if name == None or name == '':
raise NoNameAttributeInSettingHcrmlFileError("No type in setting tag attribute implemented in hcrml file ref: %s" % ref )
id = setting_elem.get('id')
if id == None or id == '':
raise NoIdAttributeInSettingHcrmlFileError("No id in setting tag attribute implemented in hcrml file ref: %s" % ref )
comment = setting_elem.get('comment')
if comment == None:
comment = ''
setting = Setting(self.configuration)
setting.comment = comment
setting.name = name
setting.ref = ref
try:
setting.id = long(id)
except ValueError:
setting.id = long(id, 16)
setting.type = type
setting.xml_elem = setting_elem
for flag_elem in setting_elem.getiterator("{%s}flags" % self.namespaces[0]):
flag = self.read_hrcml_flags(setting_elem)
setting.flag = flag
return setting
def read_hrcml_flags(self,flag_elem):
Uninitialised = flag_elem.get('Uninitialised')
Modifiable = flag_elem.get('Modifiable')
Persistent = flag_elem.get('Persistent')
flag = Flag()
flag.Uninitialised = Uninitialised
flag.Modifiable = Modifiable
flag.Persistent = Persistent
return flag
def filter_file_list_by_include_ref(self, files, ref):
pattern = ref + '$'
pattern = pattern.replace('.', r'\.')
pattern = pattern.replace('*', '.*')
pattern = '(^|.*/)' + pattern
result = []
for file in files:
if re.match(pattern, file.replace('\\', '/')) != None:
result.append(file)
return result
class Flag(object):
def __init__(self):
self.Uninitialised = 0
self.Modifiable = 0
self.Persistent = 0
class Setting(object):
def __init__(self,configuration):
self.name = None
self.ref = None
self.type = None
self.id = None
self.flag = None
self.comment = ''
self.configuration = configuration
@property
def value(self):
dview = self.configuration.get_default_view()
feature = dview.get_feature(self.ref)
value = feature.get_value()
if self.type in (HcrRecord.VALTYPE_ARRAY_INT32, HcrRecord.VALTYPE_ARRAY_UINT32):
# Convert string values to numbers
value = map(lambda x: self.__str_to_long(x), value)
elif self.type == HcrRecord.VALTYPE_BIN_DATA and feature.get_type() == 'string':
value = self.__hex_to_bindata(value)
return value
def __str_to_long(self, str_value):
try:
return long(str_value)
except ValueError:
return long(str_value, 16)
def __hex_to_bindata(self, hexdata):
orig_hexdata = hexdata
hexdata = hexdata.replace(' ', '').replace('\r', '').replace('\n', '').replace('\t', '')
if len(hexdata) % 2 != 0:
raise ValueError("Failed to convert %r into binary data: String length %d (whitespace stripped) is not divisible by 2", orig_hexdata, len(hexdata))
for c in hexdata:
if c not in "0123456789abcdefABCDEF":
raise ValueError("Failed to convert %r into binary data: Not a valid hex string", hexdata)
temp = []
for i in xrange(len(hexdata) / 2):
start = i * 2
end = start + 2
temp.append(chr(int(hexdata[start:end], 16)))
return ''.join(temp)
class Category(object):
def __init__(self):
self.name = None
self.category_uid = None
self.settings = []
def get_hcr_records(self):
"""
Return a list of HcrRecord objects created based on this category's settings.
"""
result = []
for setting in self.settings:
record = HcrRecord(setting.type, setting.value, self.category_uid, setting.id)
flag = setting.flag
if flag:
record.flags = 0
if flag.Uninitialised == '1': | record.flags |= HcrRecord.FLAG_UNINITIALIZED | conditional_block |
|
beerData.js | ['4.5 out of 7', 'Oatmeal Stout', '6%', '★ ★', 'beer4', 'A full-bodied oatmeal milk stout that starts off with a mild roasty flavor and smoothly transitions into sweet, cream and luscious.'],
// ['(512) Pecan Porter', 'Brown Porter', '6.8%', 30, 'coldsunny4'],
['(512) Pecan Porter', 'Brown Porter', '6.8%', '★ ★', 'beer3', "Nearly black in color, (512) Pecan Porter is made with Organic US 2-row and Crystal malts along with Baird's Chocolate and Black malts. Its full body and malty sweetness are balanced with subtle pecan aroma and flavor from locally grown pecans. Yet another true Austin original!"],
// ["Anatevka", 'British Imperial Stout', '6.8%', 49, 'coldsunny5'],
["Anatevka", 'British Imperial Stout', '6.8%', '★ ★ ★', 'beer4', 'Jet black richness smoothed with flaked barley & complementary hops.'],
],
rainy: [
// ['10 Ton', 'Oatmeal Stout', '7%', 30, 'coldrainy1'],
['10 Ton', 'Oatmeal Stout', '7%', '★ ★', 'beer4', 'The Industrial Revolution that forged Dayton, O was built on the backs of many. Where sweat, dirt, and pride were part of the everyday diet.'],
// ['Barrel-Aged Imperial Smoked Porter', 'Smoke Porter', '8.5%', 50, 'coldcloudy2'],
['Barrel-Aged Imperial Smoked Porter', 'Smoke Porter', '8.5%', '★ ★ ★', 'beer4', 'Bold and robust with rich chocolate and coffee aromas, a subtle smoky flavor and a sweet finish. Aged in bourbon barrels.'],
// ['Colossal Two', 'Smoke Porter', '9%', 50, 'coldcloudy4'],
['Colossal Two', 'Smoke Porter', '9%', '★ ★ ★', 'beer4', 'This Porter is a stronger, smoked version of our flagship Porter. It is dark brown, almost black in color, and has a smoky, roasted aroma and smooth, rich, complex smoke and chocolate flavors.'],
// ['Big Ass Barrel Lapsang Tea Porter', 'Smoke Porter', '10.2%', 24.5, 'coldcloudy3'],
['Big Ass Barrel Lapsang Tea Porter', 'Smoke Porter', '10.2%', '★ ★', 'beer4', 'Imperial robust porter with pine smoked Lapsang Souchon tea leaves'],
// ["2X Most Stuffed", 'American Imperial Stout', '12%', 40, 'coldsunny5'],
["2X Most Stuffed", 'American Imperial Stout', '12%', '★ ★ ★', 'beer4', 'Thick, rich and excessively decadent, this beer aims to bring back childhood. Stuffed with an obscene 800 oreo cookies per barrel. Mic drop!'],
]
},
moderate: { // IPA, Kolsh, Dubel, Hazy IPA, Red Ale, Amber Ale
sunny: [
// ["8 Second Kölsch", 'Kölsch', '5%', 15, 'moderatemoderate'],
["8 Second Kölsch", 'Kölsch', '5%', '★', 'beer1', 'Head Brewer Christian Koch fell in love with this style of beer while visiting Cologne, Germany. Our 8 Second Kölsch is brewed to be a true representation of this easy drinking crisp ale.'],
// ["1055 Kolsch", 'Kölsch', '5%', 25, 'moderatemoderate'],
["1055 Kolsch", 'Kölsch', '5%', '★ ★', 'beer1', '1055 Kolsch is our collaboration with the local fire union benefiting burn related charities. This is our third release with the fire union and first in cans.'],
// ["A Kolsch Day in Helles", 'Kölsch', '5.4%', 19, 'moderatemoderate'],
["A Kolsch Day in Helles", 'Kölsch', '5.4%', '★ ★', 'beer1', 'Ale meets lager with a cherry on top! A malt driven German ale combined with a lager brewing style and English yeast results in this bright, light and easy beer with a hint of cherry.'],
// ["Ash Cloud Amber", 'American Amber Lager', '5.5%', 20, 'moderatemoderate'],
["Ash Cloud Amber", 'American Amber Lager', '5.5%', '★ ★', 'beer1', 'Ash Cloud Amber is an American style amber ale that is coppery brown in color with some reddish hues.'],
// ['1/2 Shot IPA', 'American IPA', '6.25%', 70, 'moderatemoderate'],
['1/2 Shot IPA', 'American IPA', '6.25%', '★ ★ ★ ★', 'beer2', 'Half of the robust grain bill that is found in the Shot Tower DIPA. 4 Different hops giving it lots of citrus floral notes'],
],
cloudy: [
// ["America's Gold", 'Kölsch', '4.6%', 21, 'moderatemoderate'],
// ["1980 Kolsch", 'Kölsch', '5.25%', 22, 'moderatemoderate'],
// ["Absent Landlord", 'Kölsch', '5.3%', 18, 'moderatemoderate'],
// ["67 Alaska", 'Kölsch', '5.5%', 21, 'moderatemoderate'],
// ['10 @ 2 Hazy', 'American IPA', '6.3%', 41, 'moderatemoderate'],
['10 @ 2 Hazy', 'American IPA', '6.3%', '★ ★ ★', 'beer2', 'No Description Provided'],
// ['"Zeppelin" IPA', 'American IPA', '6.5%', 75, 'moderatemoderate'],
['"Zeppelin" IPA', 'American IPA', '6.5%', '★ ★ ★ ★', 'beer2', 'American style India Pale Ale with a punchy hop finish'],
// ['32958', 'Juicy or Hazy IPA', '6.7%', 61, 'moderatemoderate'],
['32958', 'Juicy or Hazy IPA', '6.7%', '★ ★ ★ ★', 'beer2', 'Hazy IPA with late addition Citra and Mosaic hops.'],
// ['2-Headed Monster N.E. IPA', 'Juicy or Hazy IPA', '6.8%', 50, 'moderatemoderate'],
['2-Headed Monster N.E. IPA', 'Juicy or Hazy IPA', '6.8%', '★ ★ ★', 'beer2', `Brewed w/ a monstrous amount of Strata & Amarillo, this hazy IPA was brewed exclusively for the comedy duo Cory 'n Chad "The Smash Bothers"!`],
// ['Abandonment Issues', 'Juicy or Hazy IPA', '6.8%', 70, 'moderatemoderate'],
['Abandonment Issues', 'Juicy or Hazy IPA', '6.8%', '★ ★ ★ ★', 'beer2', 'Juicy IPA loaded with Citra and Idaho 7 hops.'],
],
rainy: [
// ["Amber Lager", 'American Amber Lager', '5.03%', 20, 'moderatemoderate'],
// ["1865 Amber", 'American Amber Lager', '4.8%', 32.2, 'moderatemoderate'],
// ["Bestside Lager", 'American Amber Lager', '4.9%', 20, 'moderatemoderate'],
// ['1-800-Tropics', 'American IPA', '6.9%', 50, 'moderatemoderate'],
['1-800-Tropics', 'American IPA', '6.9%', | ['"Tractor Beam" Oatmeal Stout', 'Oatmeal Stout', '5.8%', '★ ★', 'beer4', 'No Description Provided'],
// ["48er's Porter", 'Brown Porter', '5.9%', 31, 'coldsunny5'],
["48er's Porter", 'Brown Porter', '5.9%', '★ ★', 'beer3', 'Flavor/Balance: Named after a group of Germans who settled in the greater Boerne area in the 1840’s. This is a robust Porter w/ bits of English and American style. Notes of chocolate, coffee, and toffee. Finishes sweeter than our other beers. Just in time for Fall!'],
// ['4.5 out of 7', 'Oatmeal Stout', '6%', 25, 'coldrainy4'], | random_line_split |
|
stream_mock_test.go | .Itoa(id),
DiskHeartBeatInfo: blobnode.DiskHeartBeatInfo{DiskID: proto.DiskID(id)},
}
}
for _, id := range idcOtherID {
dataDisks[proto.DiskID(id)] = blobnode.DiskInfo{
ClusterID: clusterID, Idc: idcOther, Host: strconv.Itoa(id),
DiskHeartBeatInfo: blobnode.DiskHeartBeatInfo{DiskID: proto.DiskID(id)},
}
}
dataShards = &shardsData{
data: make(map[shardKey][]byte, len(allID)),
}
dataShards.clean()
ctr := gomock.NewController(&testing.T{})
cli := mocks.NewMockClientAPI(ctr)
cli.EXPECT().GetService(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn(
func(ctx context.Context, args clustermgr.GetServiceArgs) (clustermgr.ServiceInfo, error) {
if val, ok := dataNodes[args.Name]; ok {
return val, nil
}
return clustermgr.ServiceInfo{}, errNotFound
})
cmcli = cli
clusterInfo = &clustermgr.ClusterInfo{
Region: "test-region",
ClusterID: clusterID,
Nodes: []string{"node-1", "node-2", "node-3"},
}
ctr = gomock.NewController(&testing.T{})
proxycli := mocks.NewMockProxyClient(ctr)
proxycli.EXPECT().GetCacheVolume(gomock.Any(), gomock.Any(), gomock.Any()).
AnyTimes().Return(dataVolume, nil)
proxycli.EXPECT().GetCacheDisk(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn(
func(_ context.Context, _ string, args *proxy.CacheDiskArgs) (*blobnode.DiskInfo, error) {
if val, ok := dataDisks[args.DiskID]; ok {
return &val, nil
}
return nil, errNotFound
})
serviceController, _ = controller.NewServiceController(
controller.ServiceConfig{
ClusterID: clusterID,
IDC: idc,
ReloadSec: 1000,
}, cmcli, proxycli, nil)
volumeGetter, _ = controller.NewVolumeGetter(clusterID, serviceController, proxycli, 0)
ctr = gomock.NewController(&testing.T{})
c := NewMockClusterController(ctr)
c.EXPECT().Region().AnyTimes().Return("test-region")
c.EXPECT().ChooseOne().AnyTimes().Return(clusterInfo, nil)
c.EXPECT().GetServiceController(gomock.Any()).AnyTimes().Return(serviceController, nil)
c.EXPECT().GetVolumeGetter(gomock.Any()).AnyTimes().Return(volumeGetter, nil)
c.EXPECT().ChangeChooseAlg(gomock.Any()).AnyTimes().DoAndReturn(
func(alg controller.AlgChoose) error {
if alg < 10 {
return nil
}
return controller.ErrInvalidChooseAlg
})
cc = c
ctr = gomock.NewController(&testing.T{})
allocCli := mocks.NewMockProxyClient(ctr)
allocCli.EXPECT().SendDeleteMsg(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(nil)
allocCli.EXPECT().SendShardRepairMsg(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(nil)
allocCli.EXPECT().VolumeAlloc(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn(
func(ctx context.Context, host string, args *proxy.AllocVolsArgs) ([]proxy.AllocRet, error) {
if args.Fsize > allocTimeoutSize {
return nil, errAllocTimeout
}
return dataAllocs, nil
})
proxyClient = allocCli
}
func initPool() {
memPool = resourcepool.NewMemPool(getDefaultMempoolSize())
}
func initEncoder() {
coderEC6P6, _ := ec.NewEncoder(ec.Config{
CodeMode: codemode.EC6P6.Tactic(),
EnableVerify: true,
})
coderEC6P10L2, _ := ec.NewEncoder(ec.Config{
CodeMode: codemode.EC6P10L2.Tactic(),
EnableVerify: true,
})
coderEC15P12, _ := ec.NewEncoder(ec.Config{
CodeMode: codemode.EC15P12.Tactic(),
EnableVerify: true,
})
coderEC16P20L2, _ := ec.NewEncoder(ec.Config{
CodeMode: codemode.EC16P20L2.Tactic(),
EnableVerify: true,
})
encoder = map[codemode.CodeMode]ec.Encoder{
codemode.EC6P6: coderEC6P6,
codemode.EC6P10L2: coderEC6P10L2,
codemode.EC15P12: coderEC15P12,
codemode.EC16P20L2: coderEC16P20L2,
}
}
func initEC() {
allCodeModes = CodeModePairs{
codemode.EC6P6: CodeModePair{
Policy: codemode.Policy{
ModeName: codemode.EC6P6.Name(),
MaxSize: math.MaxInt64,
Enable: true,
},
Tactic: codemode.EC6P6.Tactic(),
},
codemode.EC6P10L2: CodeModePair{
Policy: codemode.Policy{
ModeName: codemode.EC6P10L2.Name(),
MaxSize: -1,
},
Tactic: codemode.EC6P10L2.Tactic(),
},
codemode.EC15P12: CodeModePair{
Policy: codemode.Policy{
ModeName: codemode.EC15P12.Name(),
MaxSize: -1,
},
Tactic: codemode.EC15P12.Tactic(),
},
codemode.EC16P20L2: CodeModePair{
Policy: codemode.Policy{
ModeName: codemode.EC16P20L2.Name(),
MaxSize: -1,
},
Tactic: codemode.EC16P20L2.Tactic(),
},
}
}
func initController() {
vuidController = &vuidControl{
broken: make(map[proto.Vuid]bool),
blocked: make(map[proto.Vuid]bool),
block: func() {
time.Sleep(200 * time.Millisecond)
},
duration: 200 * time.Millisecond,
isBNRealError: false,
}
// initialized broken 1005
vuidController.Break(1005)
}
func newMockStorageAPI() blobnode.StorageAPI {
ctr := gomock.NewController(&testing.T{})
api := mocks.NewMockStorageAPI(ctr)
api.EXPECT().RangeGetShard(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().
DoAndReturn(storageAPIRangeGetShard)
api.EXPECT().PutShard(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().
DoAndReturn(storageAPIPutShard)
return api
}
func init() {
rand.Seed(int64(time.Now().Nanosecond()))
hystrix.ConfigureCommand(rwCommand, hystrix.CommandConfig{
Timeout: 9000,
MaxConcurrentRequests: 9000,
ErrorPercentThreshold: 90,
})
initPool()
initEncoder()
initEC()
initMockData()
initController()
streamer = &Handler{
memPool: memPool,
encoder: encoder,
clusterController: cc,
blobnodeClient: newMockStorageAPI(),
proxyClient: proxyClient,
allCodeModes: allCodeModes,
maxObjectSize: defaultMaxObjectSize,
StreamConfig: StreamConfig{
IDC: idc,
MaxBlobSize: uint32(blobSize), // 4M
DiskPunishIntervalS: punishServiceS,
ServicePunishIntervalS: punishServiceS,
AllocRetryTimes: 3,
AllocRetryIntervalMS: 3000,
MinReadShardsX: minReadShardsX,
},
discardVidChan: make(chan discardVid, 8),
stopCh: make(chan struct{}),
}
streamer.loopDiscardVids()
}
func ctxWithName(funcName string) func() context.Context {
return func() context.Context {
_, ctx := trace.StartSpanFromContextWithTraceID(context.Background(), funcName, funcName)
return ctx
}
}
func | getBufSizes | identifier_name |
|
stream_mock_test.go | )
blobSize = 1 << 22
streamer *Handler
memPool *resourcepool.MemPool
encoder map[codemode.CodeMode]ec.Encoder
proxyClient proxy.Client
allCodeModes CodeModePairs
cmcli clustermgr.APIAccess
volumeGetter controller.VolumeGetter
serviceController controller.ServiceController
cc controller.ClusterController
clusterInfo *clustermgr.ClusterInfo
dataVolume *proxy.VersionVolume
dataAllocs []proxy.AllocRet
dataNodes map[string]clustermgr.ServiceInfo
dataDisks map[proto.DiskID]blobnode.DiskInfo
dataShards *shardsData
vuidController *vuidControl
putErrors = []errcode.Error{
errcode.ErrDiskBroken, errcode.ErrReadonlyVUID,
errcode.ErrChunkNoSpace,
errcode.ErrNoSuchDisk, errcode.ErrNoSuchVuid,
}
getErrors = []errcode.Error{
errcode.ErrOverload,
errcode.ErrDiskBroken, errcode.ErrReadonlyVUID,
errcode.ErrNoSuchDisk, errcode.ErrNoSuchVuid,
}
)
type shardKey struct {
Vuid proto.Vuid
Bid proto.BlobID
}
type shardsData struct {
mutex sync.RWMutex
data map[shardKey][]byte
}
func (d *shardsData) clean() {
d.mutex.Lock()
for key := range d.data {
d.data[key] = d.data[key][:0]
}
d.mutex.Unlock()
}
func (d *shardsData) get(vuid proto.Vuid, bid proto.BlobID) []byte {
key := shardKey{Vuid: vuid, Bid: bid}
d.mutex.RLock()
data := d.data[key]
buff := make([]byte, len(data))
copy(buff, data)
d.mutex.RUnlock()
return buff
}
func (d *shardsData) set(vuid proto.Vuid, bid proto.BlobID, b []byte) {
key := shardKey{Vuid: vuid, Bid: bid}
d.mutex.Lock()
old := d.data[key]
if cap(old) <= len(b) {
d.data[key] = make([]byte, len(b))
} else {
d.data[key] = old[:len(b)]
}
copy(d.data[key], b)
d.mutex.Unlock()
}
type vuidControl struct {
mutex sync.Mutex
broken map[proto.Vuid]bool
blocked map[proto.Vuid]bool
block func()
duration time.Duration
isBNRealError bool // is return blobnode real error
}
func (c *vuidControl) Break(id proto.Vuid) {
c.mutex.Lock()
c.broken[id] = true
c.mutex.Unlock()
}
func (c *vuidControl) Unbreak(id proto.Vuid) {
c.mutex.Lock()
delete(c.broken, id)
c.mutex.Unlock()
}
func (c *vuidControl) Isbroken(id proto.Vuid) bool {
c.mutex.Lock()
v, ok := c.broken[id]
c.mutex.Unlock()
return ok && v
}
func (c *vuidControl) Block(id proto.Vuid) {
c.mutex.Lock()
c.blocked[id] = true
c.mutex.Unlock()
}
func (c *vuidControl) Unblock(id proto.Vuid) {
c.mutex.Lock()
delete(c.blocked, id)
c.mutex.Unlock()
}
func (c *vuidControl) Isblocked(id proto.Vuid) bool {
c.mutex.Lock()
v, ok := c.blocked[id]
c.mutex.Unlock()
return ok && v
}
func (c *vuidControl) SetBNRealError(b bool) {
c.mutex.Lock()
c.isBNRealError = b
c.mutex.Unlock()
}
func (c *vuidControl) IsBNRealError() bool {
c.mutex.Lock()
defer c.mutex.Unlock()
return c.isBNRealError
}
func randBlobnodeRealError(errors []errcode.Error) error {
n := rand.Intn(1024) % len(errors)
return errors[n]
}
var storageAPIRangeGetShard = func(ctx context.Context, host string, args *blobnode.RangeGetShardArgs) (
body io.ReadCloser, shardCrc uint32, err error) {
if vuidController.Isbroken(args.Vuid) |
if vuidController.Isblocked(args.Vuid) {
vuidController.block()
if rand.Intn(2) == 0 {
err = errors.New("get shard timeout")
} else {
err = errors.New("get shard Timeout")
}
return
}
buff := dataShards.get(args.Vuid, args.Bid)
if len(buff) == 0 {
return nil, 0, errNotFound
}
if len(buff) < int(args.Offset+args.Size) {
err = errors.New("get shard concurrently")
return
}
buff = buff[int(args.Offset):int(args.Offset+args.Size)]
shardCrc = crc32.ChecksumIEEE(buff)
body = ioutil.NopCloser(bytes.NewReader(buff))
return
}
var storageAPIPutShard = func(ctx context.Context, host string, args *blobnode.PutShardArgs) (
crc uint32, err error) {
if vuidController.Isbroken(args.Vuid) {
err = errors.New("put shard fake error")
if vuidController.IsBNRealError() {
err = randBlobnodeRealError(putErrors)
}
return
}
if vuidController.Isblocked(args.Vuid) {
vuidController.block()
err = errors.New("put shard timeout")
return
}
buffer, _ := memPool.Alloc(int(args.Size))
defer memPool.Put(buffer)
buffer = buffer[:int(args.Size)]
_, err = io.ReadFull(args.Body, buffer)
if err != nil {
return
}
crc = crc32.ChecksumIEEE(buffer)
dataShards.set(args.Vuid, args.Bid, buffer)
return
}
func initMockData() {
dataAllocs = make([]proxy.AllocRet, 2)
dataAllocs[0] = proxy.AllocRet{
BidStart: 10000,
BidEnd: 10000,
Vid: volumeID,
}
dataAllocs[1] = proxy.AllocRet{
BidStart: 20000,
BidEnd: 50000,
Vid: volumeID,
}
dataVolume = &proxy.VersionVolume{VolumeInfo: clustermgr.VolumeInfo{
VolumeInfoBase: clustermgr.VolumeInfoBase{
Vid: volumeID,
CodeMode: codemode.EC6P6,
},
Units: func() (units []clustermgr.Unit) {
for _, id := range allID {
units = append(units, clustermgr.Unit{
Vuid: proto.Vuid(id),
DiskID: proto.DiskID(id),
Host: strconv.Itoa(id),
})
}
return
}(),
}}
proxyNodes := make([]clustermgr.ServiceNode, 32)
for idx := range proxyNodes {
proxyNodes[idx] = clustermgr.ServiceNode{
ClusterID: 1,
Name: serviceProxy,
Host: fmt.Sprintf("proxy-%d", idx),
Idc: idc,
}
}
dataNodes = make(map[string]clustermgr.ServiceInfo)
dataNodes[serviceProxy] = clustermgr.ServiceInfo{
Nodes: proxyNodes,
}
dataDisks = make(map[proto.DiskID]blobnode.DiskInfo)
for _, id := range idcID {
dataDisks[proto.DiskID(id)] = blobnode.DiskInfo{
ClusterID: clusterID, Idc: idc, Host: strconv.Itoa(id),
DiskHeartBeatInfo: blobnode.DiskHeartBeatInfo{DiskID: proto.DiskID(id)},
}
}
for _, id := range idcOtherID {
dataDisks[proto.DiskID(id)] = blobnode.DiskInfo{
ClusterID: clusterID, Idc: idcOther, Host: strconv.Itoa(id),
DiskHeartBeatInfo: blobnode.DiskHeartBeatInfo{DiskID: proto.DiskID(id)},
}
}
dataShards = &shardsData{
data: make(map[shardKey][]byte, len(allID)),
}
dataShards.clean()
ctr := gomock.NewController(&testing.T{})
cli := mocks.NewMockClientAPI(ctr)
cli.EXPECT().GetService(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn(
func(ctx context.Context, args clustermgr.GetServiceArgs) (clustermgr.ServiceInfo, error) {
if val, ok := dataNodes[args.Name]; ok {
return val, nil
}
return clustermgr.ServiceInfo{}, errNotFound
})
cmcli = cli
clusterInfo = &clustermgr.ClusterInfo{
Region: "test-region",
ClusterID: clusterID,
Nodes | {
err = errors.New("get shard fake error")
if vuidController.IsBNRealError() {
err = randBlobnodeRealError(getErrors)
}
return
} | conditional_block |
stream_mock_test.go | )
blobSize = 1 << 22
streamer *Handler
memPool *resourcepool.MemPool
encoder map[codemode.CodeMode]ec.Encoder
proxyClient proxy.Client
allCodeModes CodeModePairs
cmcli clustermgr.APIAccess
volumeGetter controller.VolumeGetter
serviceController controller.ServiceController
cc controller.ClusterController
clusterInfo *clustermgr.ClusterInfo
dataVolume *proxy.VersionVolume
dataAllocs []proxy.AllocRet
dataNodes map[string]clustermgr.ServiceInfo
dataDisks map[proto.DiskID]blobnode.DiskInfo
dataShards *shardsData
vuidController *vuidControl
putErrors = []errcode.Error{
errcode.ErrDiskBroken, errcode.ErrReadonlyVUID,
errcode.ErrChunkNoSpace,
errcode.ErrNoSuchDisk, errcode.ErrNoSuchVuid,
}
getErrors = []errcode.Error{
errcode.ErrOverload,
errcode.ErrDiskBroken, errcode.ErrReadonlyVUID,
errcode.ErrNoSuchDisk, errcode.ErrNoSuchVuid,
}
)
type shardKey struct {
Vuid proto.Vuid
Bid proto.BlobID
}
type shardsData struct {
mutex sync.RWMutex
data map[shardKey][]byte
}
func (d *shardsData) clean() {
d.mutex.Lock()
for key := range d.data {
d.data[key] = d.data[key][:0]
}
d.mutex.Unlock()
}
func (d *shardsData) get(vuid proto.Vuid, bid proto.BlobID) []byte {
key := shardKey{Vuid: vuid, Bid: bid}
d.mutex.RLock()
data := d.data[key]
buff := make([]byte, len(data))
copy(buff, data)
d.mutex.RUnlock()
return buff
}
func (d *shardsData) set(vuid proto.Vuid, bid proto.BlobID, b []byte) {
key := shardKey{Vuid: vuid, Bid: bid}
d.mutex.Lock()
old := d.data[key]
if cap(old) <= len(b) {
d.data[key] = make([]byte, len(b))
} else {
d.data[key] = old[:len(b)]
}
copy(d.data[key], b)
d.mutex.Unlock()
}
type vuidControl struct {
mutex sync.Mutex
broken map[proto.Vuid]bool
blocked map[proto.Vuid]bool
block func()
duration time.Duration
isBNRealError bool // is return blobnode real error
}
func (c *vuidControl) Break(id proto.Vuid) {
c.mutex.Lock()
c.broken[id] = true
c.mutex.Unlock()
}
func (c *vuidControl) Unbreak(id proto.Vuid) {
c.mutex.Lock()
delete(c.broken, id)
c.mutex.Unlock()
}
func (c *vuidControl) Isbroken(id proto.Vuid) bool {
c.mutex.Lock()
v, ok := c.broken[id]
c.mutex.Unlock()
return ok && v
}
func (c *vuidControl) Block(id proto.Vuid) {
c.mutex.Lock()
c.blocked[id] = true
c.mutex.Unlock()
}
func (c *vuidControl) Unblock(id proto.Vuid) {
c.mutex.Lock()
delete(c.blocked, id)
c.mutex.Unlock()
}
func (c *vuidControl) Isblocked(id proto.Vuid) bool {
c.mutex.Lock()
v, ok := c.blocked[id]
c.mutex.Unlock()
return ok && v
}
func (c *vuidControl) SetBNRealError(b bool) {
c.mutex.Lock()
c.isBNRealError = b
c.mutex.Unlock()
}
func (c *vuidControl) IsBNRealError() bool {
c.mutex.Lock()
defer c.mutex.Unlock()
return c.isBNRealError
}
func randBlobnodeRealError(errors []errcode.Error) error {
n := rand.Intn(1024) % len(errors)
return errors[n]
}
var storageAPIRangeGetShard = func(ctx context.Context, host string, args *blobnode.RangeGetShardArgs) (
body io.ReadCloser, shardCrc uint32, err error) {
if vuidController.Isbroken(args.Vuid) {
err = errors.New("get shard fake error")
if vuidController.IsBNRealError() {
err = randBlobnodeRealError(getErrors)
}
return
}
if vuidController.Isblocked(args.Vuid) {
vuidController.block()
if rand.Intn(2) == 0 {
err = errors.New("get shard timeout")
} else {
err = errors.New("get shard Timeout")
}
return
}
buff := dataShards.get(args.Vuid, args.Bid)
if len(buff) == 0 {
return nil, 0, errNotFound
}
if len(buff) < int(args.Offset+args.Size) {
err = errors.New("get shard concurrently")
return
}
buff = buff[int(args.Offset):int(args.Offset+args.Size)]
shardCrc = crc32.ChecksumIEEE(buff)
body = ioutil.NopCloser(bytes.NewReader(buff))
return
}
var storageAPIPutShard = func(ctx context.Context, host string, args *blobnode.PutShardArgs) (
crc uint32, err error) {
if vuidController.Isbroken(args.Vuid) {
err = errors.New("put shard fake error")
if vuidController.IsBNRealError() {
err = randBlobnodeRealError(putErrors)
}
return
}
if vuidController.Isblocked(args.Vuid) {
vuidController.block()
err = errors.New("put shard timeout")
return
}
buffer, _ := memPool.Alloc(int(args.Size))
defer memPool.Put(buffer)
buffer = buffer[:int(args.Size)]
_, err = io.ReadFull(args.Body, buffer)
if err != nil {
return
}
crc = crc32.ChecksumIEEE(buffer)
dataShards.set(args.Vuid, args.Bid, buffer)
return
}
func initMockData() | units = append(units, clustermgr.Unit{
Vuid: proto.Vuid(id),
DiskID: proto.DiskID(id),
Host: strconv.Itoa(id),
})
}
return
}(),
}}
proxyNodes := make([]clustermgr.ServiceNode, 32)
for idx := range proxyNodes {
proxyNodes[idx] = clustermgr.ServiceNode{
ClusterID: 1,
Name: serviceProxy,
Host: fmt.Sprintf("proxy-%d", idx),
Idc: idc,
}
}
dataNodes = make(map[string]clustermgr.ServiceInfo)
dataNodes[serviceProxy] = clustermgr.ServiceInfo{
Nodes: proxyNodes,
}
dataDisks = make(map[proto.DiskID]blobnode.DiskInfo)
for _, id := range idcID {
dataDisks[proto.DiskID(id)] = blobnode.DiskInfo{
ClusterID: clusterID, Idc: idc, Host: strconv.Itoa(id),
DiskHeartBeatInfo: blobnode.DiskHeartBeatInfo{DiskID: proto.DiskID(id)},
}
}
for _, id := range idcOtherID {
dataDisks[proto.DiskID(id)] = blobnode.DiskInfo{
ClusterID: clusterID, Idc: idcOther, Host: strconv.Itoa(id),
DiskHeartBeatInfo: blobnode.DiskHeartBeatInfo{DiskID: proto.DiskID(id)},
}
}
dataShards = &shardsData{
data: make(map[shardKey][]byte, len(allID)),
}
dataShards.clean()
ctr := gomock.NewController(&testing.T{})
cli := mocks.NewMockClientAPI(ctr)
cli.EXPECT().GetService(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn(
func(ctx context.Context, args clustermgr.GetServiceArgs) (clustermgr.ServiceInfo, error) {
if val, ok := dataNodes[args.Name]; ok {
return val, nil
}
return clustermgr.ServiceInfo{}, errNotFound
})
cmcli = cli
clusterInfo = &clustermgr.ClusterInfo{
Region: "test-region",
ClusterID: clusterID,
Nodes: | {
dataAllocs = make([]proxy.AllocRet, 2)
dataAllocs[0] = proxy.AllocRet{
BidStart: 10000,
BidEnd: 10000,
Vid: volumeID,
}
dataAllocs[1] = proxy.AllocRet{
BidStart: 20000,
BidEnd: 50000,
Vid: volumeID,
}
dataVolume = &proxy.VersionVolume{VolumeInfo: clustermgr.VolumeInfo{
VolumeInfoBase: clustermgr.VolumeInfoBase{
Vid: volumeID,
CodeMode: codemode.EC6P6,
},
Units: func() (units []clustermgr.Unit) {
for _, id := range allID { | identifier_body |
stream_mock_test.go | idc = "test-idc"
idcOther = "test-idc-other"
allID = []int{1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009, 1010, 1011, 1012}
idcID = []int{1001, 1002, 1003, 1007, 1008, 1009}
idcOtherID = []int{1004, 1005, 1006, 1010, 1011, 1012}
clusterID = proto.ClusterID(1)
volumeID = proto.Vid(1)
blobSize = 1 << 22
streamer *Handler
memPool *resourcepool.MemPool
encoder map[codemode.CodeMode]ec.Encoder
proxyClient proxy.Client
allCodeModes CodeModePairs
cmcli clustermgr.APIAccess
volumeGetter controller.VolumeGetter
serviceController controller.ServiceController
cc controller.ClusterController
clusterInfo *clustermgr.ClusterInfo
dataVolume *proxy.VersionVolume
dataAllocs []proxy.AllocRet
dataNodes map[string]clustermgr.ServiceInfo
dataDisks map[proto.DiskID]blobnode.DiskInfo
dataShards *shardsData
vuidController *vuidControl
putErrors = []errcode.Error{
errcode.ErrDiskBroken, errcode.ErrReadonlyVUID,
errcode.ErrChunkNoSpace,
errcode.ErrNoSuchDisk, errcode.ErrNoSuchVuid,
}
getErrors = []errcode.Error{
errcode.ErrOverload,
errcode.ErrDiskBroken, errcode.ErrReadonlyVUID,
errcode.ErrNoSuchDisk, errcode.ErrNoSuchVuid,
}
)
type shardKey struct {
Vuid proto.Vuid
Bid proto.BlobID
}
type shardsData struct {
mutex sync.RWMutex
data map[shardKey][]byte
}
func (d *shardsData) clean() {
d.mutex.Lock()
for key := range d.data {
d.data[key] = d.data[key][:0]
}
d.mutex.Unlock()
}
func (d *shardsData) get(vuid proto.Vuid, bid proto.BlobID) []byte {
key := shardKey{Vuid: vuid, Bid: bid}
d.mutex.RLock()
data := d.data[key]
buff := make([]byte, len(data))
copy(buff, data)
d.mutex.RUnlock()
return buff
}
func (d *shardsData) set(vuid proto.Vuid, bid proto.BlobID, b []byte) {
key := shardKey{Vuid: vuid, Bid: bid}
d.mutex.Lock()
old := d.data[key]
if cap(old) <= len(b) {
d.data[key] = make([]byte, len(b))
} else {
d.data[key] = old[:len(b)]
}
copy(d.data[key], b)
d.mutex.Unlock()
}
type vuidControl struct {
mutex sync.Mutex
broken map[proto.Vuid]bool
blocked map[proto.Vuid]bool
block func()
duration time.Duration
isBNRealError bool // is return blobnode real error
}
func (c *vuidControl) Break(id proto.Vuid) {
c.mutex.Lock()
c.broken[id] = true
c.mutex.Unlock()
}
func (c *vuidControl) Unbreak(id proto.Vuid) {
c.mutex.Lock()
delete(c.broken, id)
c.mutex.Unlock()
}
func (c *vuidControl) Isbroken(id proto.Vuid) bool {
c.mutex.Lock()
v, ok := c.broken[id]
c.mutex.Unlock()
return ok && v
}
func (c *vuidControl) Block(id proto.Vuid) {
c.mutex.Lock()
c.blocked[id] = true
c.mutex.Unlock()
}
func (c *vuidControl) Unblock(id proto.Vuid) {
c.mutex.Lock()
delete(c.blocked, id)
c.mutex.Unlock()
}
func (c *vuidControl) Isblocked(id proto.Vuid) bool {
c.mutex.Lock()
v, ok := c.blocked[id]
c.mutex.Unlock()
return ok && v
}
func (c *vuidControl) SetBNRealError(b bool) {
c.mutex.Lock()
c.isBNRealError = b
c.mutex.Unlock()
}
func (c *vuidControl) IsBNRealError() bool {
c.mutex.Lock()
defer c.mutex.Unlock()
return c.isBNRealError
}
func randBlobnodeRealError(errors []errcode.Error) error {
n := rand.Intn(1024) % len(errors)
return errors[n]
}
var storageAPIRangeGetShard = func(ctx context.Context, host string, args *blobnode.RangeGetShardArgs) (
body io.ReadCloser, shardCrc uint32, err error) {
if vuidController.Isbroken(args.Vuid) {
err = errors.New("get shard fake error")
if vuidController.IsBNRealError() {
err = randBlobnodeRealError(getErrors)
}
return
}
if vuidController.Isblocked(args.Vuid) {
vuidController.block()
if rand.Intn(2) == 0 {
err = errors.New("get shard timeout")
} else {
err = errors.New("get shard Timeout")
}
return
}
buff := dataShards.get(args.Vuid, args.Bid)
if len(buff) == 0 {
return nil, 0, errNotFound
}
if len(buff) < int(args.Offset+args.Size) {
err = errors.New("get shard concurrently")
return
}
buff = buff[int(args.Offset):int(args.Offset+args.Size)]
shardCrc = crc32.ChecksumIEEE(buff)
body = ioutil.NopCloser(bytes.NewReader(buff))
return
}
var storageAPIPutShard = func(ctx context.Context, host string, args *blobnode.PutShardArgs) (
crc uint32, err error) {
if vuidController.Isbroken(args.Vuid) {
err = errors.New("put shard fake error")
if vuidController.IsBNRealError() {
err = randBlobnodeRealError(putErrors)
}
return
}
if vuidController.Isblocked(args.Vuid) {
vuidController.block()
err = errors.New("put shard timeout")
return
}
buffer, _ := memPool.Alloc(int(args.Size))
defer memPool.Put(buffer)
buffer = buffer[:int(args.Size)]
_, err = io.ReadFull(args.Body, buffer)
if err != nil {
return
}
crc = crc32.ChecksumIEEE(buffer)
dataShards.set(args.Vuid, args.Bid, buffer)
return
}
func initMockData() {
dataAllocs = make([]proxy.AllocRet, 2)
dataAllocs[0] = proxy.AllocRet{
BidStart: 10000,
BidEnd: 10000,
Vid: volumeID,
}
dataAllocs[1] = proxy.AllocRet{
BidStart: 20000,
BidEnd: 50000,
Vid: volumeID,
}
dataVolume = &proxy.VersionVolume{VolumeInfo: clustermgr.VolumeInfo{
VolumeInfoBase: clustermgr.VolumeInfoBase{
Vid: volumeID,
CodeMode: codemode.EC6P6,
},
Units: func() (units []clustermgr.Unit) {
for _, id := range allID {
units = append(units, clustermgr.Unit{
Vuid: proto.Vuid(id),
DiskID: proto.DiskID(id),
Host: strconv.Itoa(id),
})
}
return
}(),
}}
proxyNodes := make([]clustermgr.ServiceNode, 32)
for idx := range proxyNodes {
proxyNodes[idx] = clustermgr.ServiceNode{
ClusterID: 1,
Name: serviceProxy,
Host: fmt.Sprintf("proxy-%d", idx),
Idc: idc,
}
}
dataNodes = make(map[string]clustermgr.ServiceInfo)
dataNodes[serviceProxy] = clustermgr.ServiceInfo{
Nodes: proxyNodes,
}
dataDisks = make(map[proto.DiskID]blobnode.DiskInfo)
for _, id := range idcID {
dataDisks[proto.DiskID(id)] = blobnode.DiskInfo{
ClusterID: clusterID, Idc: idc, Host: strconv.Itoa(id),
DiskHeartBeatInfo: blobnode.DiskHeartBeatInfo{DiskID: proto.DiskID(id)},
}
}
for _, id := range idcOtherID {
dataDisks[proto |
allocTimeoutSize uint64 = 1 << 40
punishServiceS = 1
minReadShardsX = 5
| random_line_split |
|
stopper.go |
// the provided panic handler.
//
// When Stop() is invoked during stack unwinding, OnPanic is also invoked, but
// Stop() may not have carried out its duties.
func OnPanic(handler func(interface{})) Option {
return optionPanicHandler(handler)
}
// NewStopper returns an instance of Stopper.
func NewStopper(options ...Option) *Stopper {
s := &Stopper{
quiescer: make(chan struct{}),
stopper: make(chan struct{}),
stopped: make(chan struct{}),
}
s.mu.tasks = TaskMap{}
s.mu.qCancels = map[int]func(){}
s.mu.sCancels = map[int]func(){}
for _, opt := range options {
opt.apply(s)
}
s.mu.quiesce = sync.NewCond(&s.mu)
register(s)
return s
}
// Recover is used internally by Stopper to provide a hook for recovery of
// panics on goroutines started by the Stopper. It can also be invoked
// explicitly (via "defer s.Recover()") on goroutines that are created outside
// of Stopper.
func (s *Stopper) Recover(ctx context.Context) {
if r := recover(); r != nil {
if s.onPanic != nil {
s.onPanic(r)
return
}
if sv := settings.TODO(); sv != nil {
log.ReportPanic(ctx, sv, r, 1)
}
panic(r)
}
}
// RunWorker runs the supplied function as a "worker" to be stopped
// by the stopper. The function <f> is run in a goroutine.
func (s *Stopper) RunWorker(ctx context.Context, f func(context.Context)) {
s.stop.Add(1)
go func() {
// Remove any associated span; we need to ensure this because the
// worker may run longer than the caller which presumably closes
// any spans it has created.
ctx = opentracing.ContextWithSpan(ctx, nil)
defer s.Recover(ctx)
defer s.stop.Done()
f(ctx)
}()
}
// AddCloser adds an object to close after the stopper has been stopped.
//
// WARNING: memory resources acquired by this method will stay around for
// the lifetime of the Stopper. Use with care to avoid leaking memory.
func (s *Stopper) AddCloser(c Closer) {
s.mu.Lock()
defer s.mu.Unlock()
select {
case <-s.stopper:
// Close immediately.
c.Close()
default:
s.mu.closers = append(s.mu.closers, c)
}
}
// WithCancelOnQuiesce returns a child context which is canceled when the
// returned cancel function is called or when the Stopper begins to quiesce,
// whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func (s *Stopper) WithCancelOnQuiesce(ctx context.Context) (context.Context, func()) {
return s.withCancel(ctx, s.mu.qCancels, s.quiescer)
}
// WithCancelOnStop returns a child context which is canceled when the
// returned cancel function is called or when the Stopper begins to stop,
// whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func (s *Stopper) WithCancelOnStop(ctx context.Context) (context.Context, func()) {
return s.withCancel(ctx, s.mu.sCancels, s.stopper)
}
func (s *Stopper) withCancel(
ctx context.Context, cancels map[int]func(), cancelCh chan struct{},
) (context.Context, func()) {
var cancel func()
ctx, cancel = context.WithCancel(ctx)
s.mu.Lock()
defer s.mu.Unlock()
select {
case <-cancelCh:
// Cancel immediately.
cancel()
return ctx, func() {}
default:
id := s.mu.idAlloc
s.mu.idAlloc++
cancels[id] = cancel
return ctx, func() {
cancel()
s.mu.Lock()
defer s.mu.Unlock()
delete(cancels, id)
}
}
}
// RunTask adds one to the count of tasks left to quiesce in the system.
// Any worker which is a "first mover" when starting tasks must call this method
// before starting work on a new task. First movers include goroutines launched
// to do periodic work and the kv/db.go gateway which accepts external client
// requests.
//
// taskName is used as the "operation" field of the span opened for this task
// and is visible in traces. It's also part of reports printed by stoppers
// waiting to stop. The convention is
// <package name>.<struct name>: <succinct description of the task's action>
//
// Returns an error to indicate that the system is currently quiescing and
// function f was not called.
func (s *Stopper) RunTask(ctx context.Context, taskName string, f func(context.Context)) error {
if !s.runPrelude(taskName) {
return errUnavailable
}
// Call f.
defer s.Recover(ctx)
defer s.runPostlude(taskName)
f(ctx)
return nil
}
// RunTaskWithErr is like RunTask(), but takes in a callback that can return an
// error. The error is returned to the caller.
func (s *Stopper) RunTaskWithErr(
ctx context.Context, taskName string, f func(context.Context) error,
) error {
if !s.runPrelude(taskName) {
return errUnavailable
}
// Call f.
defer s.Recover(ctx)
defer s.runPostlude(taskName)
return f(ctx)
}
// RunAsyncTask is like RunTask, except the callback is run in a goroutine. The
// method doesn't block for the callback to finish execution.
func (s *Stopper) RunAsyncTask(
ctx context.Context, taskName string, f func(context.Context),
) error {
taskName = asyncTaskNamePrefix + taskName
if !s.runPrelude(taskName) {
return errUnavailable
}
ctx, span := tracing.ForkCtxSpan(ctx, taskName)
// Call f.
go func() {
defer s.Recover(ctx)
defer s.runPostlude(taskName)
defer tracing.FinishSpan(span)
f(ctx)
}()
return nil
}
// RunLimitedAsyncTask runs function f in a goroutine, using the given
// channel as a semaphore to limit the number of tasks that are run
// concurrently to the channel's capacity. If wait is true, blocks
// until the semaphore is available in order to push back on callers
// that may be trying to create many tasks. If wait is false, returns
// immediately with an error if the semaphore is not
// available. Returns an error if the Stopper is quiescing, in which
// case the function is not executed.
func (s *Stopper) RunLimitedAsyncTask(
ctx context.Context, taskName string, sem chan struct{}, wait bool, f func(context.Context),
) error {
// Wait for permission to run from the semaphore.
select {
case sem <- struct{}{}:
case <-ctx.Done():
return ctx.Err()
case <-s.ShouldQuiesce():
return errUnavailable
default:
if !wait {
return ErrThrottled
}
log.Eventf(ctx, "stopper throttling task from %s due to semaphore", taskName)
// Retry the select without the default.
select {
case sem <- struct{}{}:
case <-ctx.Done():
return ctx.Err()
case <-s.ShouldQuiesce():
return errUnavailable
}
}
// Check for canceled context: it's possible to get the semaphore even
// if the context is canceled.
select {
case <-ctx.Done():
<-sem
return ctx.Err()
default:
}
if !s.runPrelude(taskName) {
<-sem
return errUnavailable
}
ctx, span := tracing.ForkCtxSpan(ctx, taskName)
go func() {
defer s.Recover(ctx)
defer s.runPostlude(taskName)
defer func() { <-sem }()
defer tracing.FinishSpan(span)
f(ctx)
}()
return nil
}
func (s *Stopper) runPrelude(taskName string) bool {
s.mu.Lock()
defer s.mu.Unlock()
if s.mu.quiescing {
return false
}
s.mu.numTasks++
s.mu.tasks[taskName]++
return true
}
func (s *Stopper) runPostlude(taskName string) {
s.mu.Lock()
defer s.mu.Unlock()
s.mu.numTasks--
s.mu.tasks[taskName]--
s.mu.quiesce.Broadcast()
}
// NumTasks returns the number of active tasks.
func (s *Stopper) NumTasks() int {
s.mu.Lock()
defer s.mu.Unlock()
return s.mu.numTasks
}
// A TaskMap is returned by RunningTasks().
type TaskMap map[string]int
// String implements fmt.Stringer and returns a sorted multi-line listing of
// the TaskMap.
func (tm TaskMap) String() string | {
var lines []string
for location, num := range tm {
lines = append(lines, fmt.Sprintf("%-6d %s", num, location))
}
sort.Sort(sort.Reverse(sort.StringSlice(lines)))
return strings.Join(lines, "\n")
} | identifier_body |
|
stopper.go | .mu.Unlock()
}
}
// Closer is an interface for objects to attach to the stopper to
// be closed once the stopper completes.
type Closer interface {
Close()
}
// CloserFn is type that allows any function to be a Closer.
type CloserFn func()
// Close implements the Closer interface.
func (f CloserFn) Close() {
f()
}
// A Stopper provides a channel-based mechanism to stop an arbitrary
// array of workers. Each worker is registered with the stopper via
// the RunWorker() method. The system further allows execution of functions
// through RunTask() and RunAsyncTask().
//
// Stopping occurs in two phases: the first is the request to stop, which moves
// the stopper into a quiescing phase. While quiescing, calls to RunTask() &
// RunAsyncTask() don't execute the function passed in and return errUnavailable.
// When all outstanding tasks have been completed, the stopper
// closes its stopper channel, which signals all live workers that it's safe to
// shut down. When all workers have shutdown, the stopper is complete.
//
// An arbitrary list of objects implementing the Closer interface may
// be added to the stopper via AddCloser(), to be closed after the
// stopper has stopped.
type Stopper struct {
quiescer chan struct{} // Closed when quiescing
stopper chan struct{} // Closed when stopping
stopped chan struct{} // Closed when stopped completely
onPanic func(interface{}) // called with recover() on panic on any goroutine
stop sync.WaitGroup // Incremented for outstanding workers
mu struct {
syncutil.Mutex
quiesce *sync.Cond // Conditional variable to wait for outstanding tasks
quiescing bool // true when Stop() has been called
numTasks int // number of outstanding tasks
tasks TaskMap
closers []Closer
idAlloc int
qCancels map[int]func()
sCancels map[int]func()
}
}
// An Option can be passed to NewStopper.
type Option interface {
apply(*Stopper)
}
type optionPanicHandler func(interface{})
func (oph optionPanicHandler) apply(stopper *Stopper) {
stopper.onPanic = oph
}
// OnPanic is an option which lets the Stopper recover from all panics using
// the provided panic handler.
//
// When Stop() is invoked during stack unwinding, OnPanic is also invoked, but
// Stop() may not have carried out its duties.
func OnPanic(handler func(interface{})) Option {
return optionPanicHandler(handler)
}
// NewStopper returns an instance of Stopper.
func NewStopper(options ...Option) *Stopper {
s := &Stopper{
quiescer: make(chan struct{}),
stopper: make(chan struct{}),
stopped: make(chan struct{}),
}
s.mu.tasks = TaskMap{}
s.mu.qCancels = map[int]func(){}
s.mu.sCancels = map[int]func(){}
for _, opt := range options {
opt.apply(s)
}
s.mu.quiesce = sync.NewCond(&s.mu)
register(s)
return s
}
// Recover is used internally by Stopper to provide a hook for recovery of
// panics on goroutines started by the Stopper. It can also be invoked
// explicitly (via "defer s.Recover()") on goroutines that are created outside
// of Stopper.
func (s *Stopper) Recover(ctx context.Context) {
if r := recover(); r != nil {
if s.onPanic != nil {
s.onPanic(r)
return
}
if sv := settings.TODO(); sv != nil {
log.ReportPanic(ctx, sv, r, 1)
}
panic(r)
}
}
// RunWorker runs the supplied function as a "worker" to be stopped
// by the stopper. The function <f> is run in a goroutine.
func (s *Stopper) RunWorker(ctx context.Context, f func(context.Context)) {
s.stop.Add(1)
go func() {
// Remove any associated span; we need to ensure this because the
// worker may run longer than the caller which presumably closes
// any spans it has created.
ctx = opentracing.ContextWithSpan(ctx, nil)
defer s.Recover(ctx)
defer s.stop.Done()
f(ctx)
}()
}
// AddCloser adds an object to close after the stopper has been stopped.
//
// WARNING: memory resources acquired by this method will stay around for
// the lifetime of the Stopper. Use with care to avoid leaking memory.
func (s *Stopper) AddCloser(c Closer) {
s.mu.Lock()
defer s.mu.Unlock()
select {
case <-s.stopper:
// Close immediately.
c.Close()
default:
s.mu.closers = append(s.mu.closers, c)
}
}
// WithCancelOnQuiesce returns a child context which is canceled when the
// returned cancel function is called or when the Stopper begins to quiesce,
// whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func (s *Stopper) WithCancelOnQuiesce(ctx context.Context) (context.Context, func()) {
return s.withCancel(ctx, s.mu.qCancels, s.quiescer)
}
// WithCancelOnStop returns a child context which is canceled when the
// returned cancel function is called or when the Stopper begins to stop,
// whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func (s *Stopper) WithCancelOnStop(ctx context.Context) (context.Context, func()) {
return s.withCancel(ctx, s.mu.sCancels, s.stopper)
}
func (s *Stopper) withCancel(
ctx context.Context, cancels map[int]func(), cancelCh chan struct{},
) (context.Context, func()) {
var cancel func()
ctx, cancel = context.WithCancel(ctx)
s.mu.Lock()
defer s.mu.Unlock()
select {
case <-cancelCh:
// Cancel immediately.
cancel()
return ctx, func() {}
default:
id := s.mu.idAlloc
s.mu.idAlloc++
cancels[id] = cancel
return ctx, func() {
cancel()
s.mu.Lock()
defer s.mu.Unlock()
delete(cancels, id)
}
}
}
// RunTask adds one to the count of tasks left to quiesce in the system.
// Any worker which is a "first mover" when starting tasks must call this method
// before starting work on a new task. First movers include goroutines launched
// to do periodic work and the kv/db.go gateway which accepts external client
// requests.
//
// taskName is used as the "operation" field of the span opened for this task
// and is visible in traces. It's also part of reports printed by stoppers
// waiting to stop. The convention is
// <package name>.<struct name>: <succinct description of the task's action>
//
// Returns an error to indicate that the system is currently quiescing and
// function f was not called.
func (s *Stopper) RunTask(ctx context.Context, taskName string, f func(context.Context)) error {
if !s.runPrelude(taskName) {
return errUnavailable
}
// Call f.
defer s.Recover(ctx)
defer s.runPostlude(taskName)
f(ctx)
return nil
}
// RunTaskWithErr is like RunTask(), but takes in a callback that can return an
// error. The error is returned to the caller.
func (s *Stopper) RunTaskWithErr(
ctx context.Context, taskName string, f func(context.Context) error,
) error {
if !s.runPrelude(taskName) |
// Call f.
defer s.Recover(ctx)
defer s.runPostlude(taskName)
return f(ctx)
}
// RunAsyncTask is like RunTask, except the callback is run in a goroutine. The
// method doesn't block for the callback to finish execution.
func (s *Stopper) RunAsyncTask(
ctx context.Context, taskName string, f func(context.Context),
) error {
taskName = asyncTaskNamePrefix + taskName
if !s.runPrelude(taskName) {
return errUnavailable
}
ctx, span := tracing.ForkCtxSpan(ctx, taskName)
// Call f.
go func() {
defer s.Recover(ctx)
defer s.runPostlude(taskName)
defer tracing.FinishSpan(span)
f(ctx)
}()
return nil
}
// RunLimitedAsyncTask runs function f in a goroutine, using the given
// channel as a semaphore to limit the number of tasks that are run
// concurrently to the channel's capacity. If wait is true, blocks
// until the semaphore is available in order to push back on callers
// that may be trying to create many tasks. If wait is false, returns
// immediately with an error if the semaphore is not
// available. Returns an error if the Stopper is quiescing, in which
// case the | {
return errUnavailable
} | conditional_block |
stopper.go | "github.com/cockroachdb/cockroach/pkg/settings"
"github.com/cockroachdb/cockroach/pkg/util/caller"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/opentracing/opentracing-go"
)
const asyncTaskNamePrefix = "[async] "
// ErrThrottled is returned from RunLimitedAsyncTask in the event that there
// is no more capacity for async tasks, as limited by the semaphore.
var ErrThrottled = errors.New("throttled on async limiting semaphore")
var errUnavailable = &roachpb.NodeUnavailableError{}
func register(s *Stopper) {
trackedStoppers.Lock()
trackedStoppers.stoppers = append(trackedStoppers.stoppers, s)
trackedStoppers.Unlock()
}
func unregister(s *Stopper) {
trackedStoppers.Lock()
defer trackedStoppers.Unlock()
sl := trackedStoppers.stoppers
for i, tracked := range sl {
if tracked == s {
trackedStoppers.stoppers = sl[:i+copy(sl[i:], sl[i+1:])]
return
}
}
panic("attempt to unregister untracked stopper")
}
var trackedStoppers struct {
syncutil.Mutex
stoppers []*Stopper
}
// HandleDebug responds with the list of stopper tasks actively running.
func HandleDebug(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
trackedStoppers.Lock()
defer trackedStoppers.Unlock()
for _, s := range trackedStoppers.stoppers {
s.mu.Lock()
fmt.Fprintf(w, "%p: %d tasks\n%s", s, s.mu.numTasks, s.runningTasksLocked())
s.mu.Unlock()
}
}
// Closer is an interface for objects to attach to the stopper to
// be closed once the stopper completes.
type Closer interface {
Close()
}
// CloserFn is type that allows any function to be a Closer.
type CloserFn func()
// Close implements the Closer interface.
func (f CloserFn) Close() {
f()
}
// A Stopper provides a channel-based mechanism to stop an arbitrary
// array of workers. Each worker is registered with the stopper via
// the RunWorker() method. The system further allows execution of functions
// through RunTask() and RunAsyncTask().
//
// Stopping occurs in two phases: the first is the request to stop, which moves
// the stopper into a quiescing phase. While quiescing, calls to RunTask() &
// RunAsyncTask() don't execute the function passed in and return errUnavailable.
// When all outstanding tasks have been completed, the stopper
// closes its stopper channel, which signals all live workers that it's safe to
// shut down. When all workers have shutdown, the stopper is complete.
//
// An arbitrary list of objects implementing the Closer interface may
// be added to the stopper via AddCloser(), to be closed after the
// stopper has stopped.
type Stopper struct {
quiescer chan struct{} // Closed when quiescing
stopper chan struct{} // Closed when stopping
stopped chan struct{} // Closed when stopped completely
onPanic func(interface{}) // called with recover() on panic on any goroutine
stop sync.WaitGroup // Incremented for outstanding workers
mu struct {
syncutil.Mutex
quiesce *sync.Cond // Conditional variable to wait for outstanding tasks
quiescing bool // true when Stop() has been called
numTasks int // number of outstanding tasks
tasks TaskMap
closers []Closer
idAlloc int
qCancels map[int]func()
sCancels map[int]func()
}
}
// An Option can be passed to NewStopper.
type Option interface {
apply(*Stopper)
}
type optionPanicHandler func(interface{})
func (oph optionPanicHandler) apply(stopper *Stopper) {
stopper.onPanic = oph
}
// OnPanic is an option which lets the Stopper recover from all panics using
// the provided panic handler.
//
// When Stop() is invoked during stack unwinding, OnPanic is also invoked, but
// Stop() may not have carried out its duties.
func OnPanic(handler func(interface{})) Option {
return optionPanicHandler(handler)
}
// NewStopper returns an instance of Stopper.
func NewStopper(options ...Option) *Stopper {
s := &Stopper{
quiescer: make(chan struct{}),
stopper: make(chan struct{}),
stopped: make(chan struct{}),
}
s.mu.tasks = TaskMap{}
s.mu.qCancels = map[int]func(){}
s.mu.sCancels = map[int]func(){}
for _, opt := range options {
opt.apply(s)
}
s.mu.quiesce = sync.NewCond(&s.mu)
register(s)
return s
}
// Recover is used internally by Stopper to provide a hook for recovery of
// panics on goroutines started by the Stopper. It can also be invoked
// explicitly (via "defer s.Recover()") on goroutines that are created outside
// of Stopper.
func (s *Stopper) Recover(ctx context.Context) {
if r := recover(); r != nil {
if s.onPanic != nil {
s.onPanic(r)
return
}
if sv := settings.TODO(); sv != nil {
log.ReportPanic(ctx, sv, r, 1)
}
panic(r)
}
}
// RunWorker runs the supplied function as a "worker" to be stopped
// by the stopper. The function <f> is run in a goroutine.
func (s *Stopper) RunWorker(ctx context.Context, f func(context.Context)) {
s.stop.Add(1)
go func() {
// Remove any associated span; we need to ensure this because the
// worker may run longer than the caller which presumably closes
// any spans it has created.
ctx = opentracing.ContextWithSpan(ctx, nil)
defer s.Recover(ctx)
defer s.stop.Done()
f(ctx)
}()
}
// AddCloser adds an object to close after the stopper has been stopped.
//
// WARNING: memory resources acquired by this method will stay around for
// the lifetime of the Stopper. Use with care to avoid leaking memory.
func (s *Stopper) AddCloser(c Closer) {
s.mu.Lock()
defer s.mu.Unlock()
select {
case <-s.stopper:
// Close immediately.
c.Close()
default:
s.mu.closers = append(s.mu.closers, c)
}
}
// WithCancelOnQuiesce returns a child context which is canceled when the
// returned cancel function is called or when the Stopper begins to quiesce,
// whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func (s *Stopper) WithCancelOnQuiesce(ctx context.Context) (context.Context, func()) {
return s.withCancel(ctx, s.mu.qCancels, s.quiescer)
}
// WithCancelOnStop returns a child context which is canceled when the
// returned cancel function is called or when the Stopper begins to stop,
// whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func (s *Stopper) WithCancelOnStop(ctx context.Context) (context.Context, func()) {
return s.withCancel(ctx, s.mu.sCancels, s.stopper)
}
func (s *Stopper) withCancel(
ctx context.Context, cancels map[int]func(), cancelCh chan struct{},
) (context.Context, func()) {
var cancel func()
ctx, cancel = context.WithCancel(ctx)
s.mu.Lock()
defer s.mu.Unlock()
select {
case <-cancelCh:
// Cancel immediately.
cancel()
return ctx, func() {}
default:
id := s.mu.idAlloc
s.mu.idAlloc++
cancels[id] = cancel
return ctx, func() {
cancel()
s.mu.Lock()
defer s.mu.Unlock()
delete(cancels, id)
}
}
}
// RunTask adds one to the count of tasks left to quiesce in the system.
// Any worker which is a "first mover" when starting tasks must call this method
// before starting work on a new task. First movers include goroutines launched
// to do periodic work and the kv/db.go gateway which accepts external client
// requests.
//
// taskName is used as the "operation" field of the span opened for this task
// and is visible in traces. It's also part of reports printed by stoppers
// waiting to stop. The convention is
// <package name>.<struct name>: <succinct description of the task's action>
//
// Returns an error to indicate that the system is currently quiescing and
// function f was not called.
func (s *Stopper) RunTask(ctx context.Context, taskName |
"github.com/cockroachdb/cockroach/pkg/roachpb" | random_line_split |
|
stopper.go | .mu.Unlock()
}
}
// Closer is an interface for objects to attach to the stopper to
// be closed once the stopper completes.
type Closer interface {
Close()
}
// CloserFn is type that allows any function to be a Closer.
type CloserFn func()
// Close implements the Closer interface.
func (f CloserFn) Close() {
f()
}
// A Stopper provides a channel-based mechanism to stop an arbitrary
// array of workers. Each worker is registered with the stopper via
// the RunWorker() method. The system further allows execution of functions
// through RunTask() and RunAsyncTask().
//
// Stopping occurs in two phases: the first is the request to stop, which moves
// the stopper into a quiescing phase. While quiescing, calls to RunTask() &
// RunAsyncTask() don't execute the function passed in and return errUnavailable.
// When all outstanding tasks have been completed, the stopper
// closes its stopper channel, which signals all live workers that it's safe to
// shut down. When all workers have shutdown, the stopper is complete.
//
// An arbitrary list of objects implementing the Closer interface may
// be added to the stopper via AddCloser(), to be closed after the
// stopper has stopped.
type Stopper struct {
quiescer chan struct{} // Closed when quiescing
stopper chan struct{} // Closed when stopping
stopped chan struct{} // Closed when stopped completely
onPanic func(interface{}) // called with recover() on panic on any goroutine
stop sync.WaitGroup // Incremented for outstanding workers
mu struct {
syncutil.Mutex
quiesce *sync.Cond // Conditional variable to wait for outstanding tasks
quiescing bool // true when Stop() has been called
numTasks int // number of outstanding tasks
tasks TaskMap
closers []Closer
idAlloc int
qCancels map[int]func()
sCancels map[int]func()
}
}
// An Option can be passed to NewStopper.
type Option interface {
apply(*Stopper)
}
type optionPanicHandler func(interface{})
func (oph optionPanicHandler) apply(stopper *Stopper) {
stopper.onPanic = oph
}
// OnPanic is an option which lets the Stopper recover from all panics using
// the provided panic handler.
//
// When Stop() is invoked during stack unwinding, OnPanic is also invoked, but
// Stop() may not have carried out its duties.
func OnPanic(handler func(interface{})) Option {
return optionPanicHandler(handler)
}
// NewStopper returns an instance of Stopper.
func NewStopper(options ...Option) *Stopper {
s := &Stopper{
quiescer: make(chan struct{}),
stopper: make(chan struct{}),
stopped: make(chan struct{}),
}
s.mu.tasks = TaskMap{}
s.mu.qCancels = map[int]func(){}
s.mu.sCancels = map[int]func(){}
for _, opt := range options {
opt.apply(s)
}
s.mu.quiesce = sync.NewCond(&s.mu)
register(s)
return s
}
// Recover is used internally by Stopper to provide a hook for recovery of
// panics on goroutines started by the Stopper. It can also be invoked
// explicitly (via "defer s.Recover()") on goroutines that are created outside
// of Stopper.
func (s *Stopper) Recover(ctx context.Context) {
if r := recover(); r != nil {
if s.onPanic != nil {
s.onPanic(r)
return
}
if sv := settings.TODO(); sv != nil {
log.ReportPanic(ctx, sv, r, 1)
}
panic(r)
}
}
// RunWorker runs the supplied function as a "worker" to be stopped
// by the stopper. The function <f> is run in a goroutine.
func (s *Stopper) RunWorker(ctx context.Context, f func(context.Context)) {
s.stop.Add(1)
go func() {
// Remove any associated span; we need to ensure this because the
// worker may run longer than the caller which presumably closes
// any spans it has created.
ctx = opentracing.ContextWithSpan(ctx, nil)
defer s.Recover(ctx)
defer s.stop.Done()
f(ctx)
}()
}
// AddCloser adds an object to close after the stopper has been stopped.
//
// WARNING: memory resources acquired by this method will stay around for
// the lifetime of the Stopper. Use with care to avoid leaking memory.
func (s *Stopper) AddCloser(c Closer) {
s.mu.Lock()
defer s.mu.Unlock()
select {
case <-s.stopper:
// Close immediately.
c.Close()
default:
s.mu.closers = append(s.mu.closers, c)
}
}
// WithCancelOnQuiesce returns a child context which is canceled when the
// returned cancel function is called or when the Stopper begins to quiesce,
// whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func (s *Stopper) WithCancelOnQuiesce(ctx context.Context) (context.Context, func()) {
return s.withCancel(ctx, s.mu.qCancels, s.quiescer)
}
// WithCancelOnStop returns a child context which is canceled when the
// returned cancel function is called or when the Stopper begins to stop,
// whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func (s *Stopper) WithCancelOnStop(ctx context.Context) (context.Context, func()) {
return s.withCancel(ctx, s.mu.sCancels, s.stopper)
}
func (s *Stopper) | (
ctx context.Context, cancels map[int]func(), cancelCh chan struct{},
) (context.Context, func()) {
var cancel func()
ctx, cancel = context.WithCancel(ctx)
s.mu.Lock()
defer s.mu.Unlock()
select {
case <-cancelCh:
// Cancel immediately.
cancel()
return ctx, func() {}
default:
id := s.mu.idAlloc
s.mu.idAlloc++
cancels[id] = cancel
return ctx, func() {
cancel()
s.mu.Lock()
defer s.mu.Unlock()
delete(cancels, id)
}
}
}
// RunTask adds one to the count of tasks left to quiesce in the system.
// Any worker which is a "first mover" when starting tasks must call this method
// before starting work on a new task. First movers include goroutines launched
// to do periodic work and the kv/db.go gateway which accepts external client
// requests.
//
// taskName is used as the "operation" field of the span opened for this task
// and is visible in traces. It's also part of reports printed by stoppers
// waiting to stop. The convention is
// <package name>.<struct name>: <succinct description of the task's action>
//
// Returns an error to indicate that the system is currently quiescing and
// function f was not called.
func (s *Stopper) RunTask(ctx context.Context, taskName string, f func(context.Context)) error {
if !s.runPrelude(taskName) {
return errUnavailable
}
// Call f.
defer s.Recover(ctx)
defer s.runPostlude(taskName)
f(ctx)
return nil
}
// RunTaskWithErr is like RunTask(), but takes in a callback that can return an
// error. The error is returned to the caller.
func (s *Stopper) RunTaskWithErr(
ctx context.Context, taskName string, f func(context.Context) error,
) error {
if !s.runPrelude(taskName) {
return errUnavailable
}
// Call f.
defer s.Recover(ctx)
defer s.runPostlude(taskName)
return f(ctx)
}
// RunAsyncTask is like RunTask, except the callback is run in a goroutine. The
// method doesn't block for the callback to finish execution.
func (s *Stopper) RunAsyncTask(
ctx context.Context, taskName string, f func(context.Context),
) error {
taskName = asyncTaskNamePrefix + taskName
if !s.runPrelude(taskName) {
return errUnavailable
}
ctx, span := tracing.ForkCtxSpan(ctx, taskName)
// Call f.
go func() {
defer s.Recover(ctx)
defer s.runPostlude(taskName)
defer tracing.FinishSpan(span)
f(ctx)
}()
return nil
}
// RunLimitedAsyncTask runs function f in a goroutine, using the given
// channel as a semaphore to limit the number of tasks that are run
// concurrently to the channel's capacity. If wait is true, blocks
// until the semaphore is available in order to push back on callers
// that may be trying to create many tasks. If wait is false, returns
// immediately with an error if the semaphore is not
// available. Returns an error if the Stopper is quiescing, in which
// case the function | withCancel | identifier_name |
pdd.go | // panic(err)
//}
//return p
}
func (self *Pdd)addSign(u *url.Values){
u.Set("client_id",self.Info.Client_id)
u.Set("timestamp",fmt.Sprintf("%d",time.Now().Unix()))
var li []string
for k,_ := range *u {
li = append(li,k)
}
sort.Strings(li)
sign := self.Info.Client_secret
for _,k :=range li {
sign+=k+u.Get(k)
}
sign+=self.Info.Client_secret
//fmt.Println(sign)
u.Set("sign",fmt.Sprintf("%X", md5.Sum([]byte(sign))))
}
func (self *Pdd) ClientHttp(u *url.Values)( out interface{}){
self.addSign(u)
//ht := http.Header{}
//ht.Add("Content-Type","application/json")
var err error
err = request.ClientHttp_(
PddUrl+"?"+u.Encode(),
"GET",nil,
nil,
func(body io.Reader,start int)error{
if start != 200 {
db,err := ioutil.ReadAll(body)
if err!= nil {
return err
}
return fmt.Errorf("%s",db)
}
return json.NewDecoder(body).Decode(&out)
})
if err != nil {
fmt.Println(err,out)
out = err
//time.Sleep(time.Second*1)
//return self.ClientHttp(u)
//panic(err)
}
return
}
func (self *Pdd) getPid()error {
req := self.pidQuery()
switch r := req.(type){
case error:
return r
}
pid := (req.(map[string]interface{})["p_id_query_response"]).(map[string]interface{})
if pid["total_count"].(float64) >0 {
for _,p_ := range pid["p_id_list"].([]interface{}){
self.PddPid = append(self.PddPid,(p_.(map[string]interface{})["p_id"]).(string))
}
return nil
}
req = self.pidGenerate(1)
switch r := req.(type){
case error:
return r
}
_pid := ((req.(map[string]interface{})["p_id_generate_response"]).(map[string]interface{})["p_id_list"]).([]interface{})
for _,p_ := range _pid{
self.PddPid = append(self.PddPid,(p_.(map[string]interface{})["p_id"]).(string))
}
return nil
}
//pdd.ddk.goods.pid.generate
func (self *Pdd) pidGenerate(n int) interface{}{
u := &url.Values{}
u.Add("type","pdd.ddk.goods.pid.generate")
u.Add("number",fmt.Sprintf("%d",n))
return self.ClientHttp(u)
}
//pdd.ddk.goods.pid.query
func (self *Pdd) pidQuery() interface{}{
u := &url.Values{}
u.Add("type","pdd.ddk.goods.pid.query")
return self.ClientHttp(u)
}
func (self *Pdd)GoodsAppMini(words ...string)interface{}{
goodsid := words[0]
if len(self.PddPid) == 0 {
err := self.getPid()
if err != nil {
return err
}
}
u := &url.Values{}
u.Add("type","pdd.ddk.goods.promotion.url.generate")
u.Add("goods_id_list","["+goodsid+"]")
u.Add("p_id",self.PddPid[0])
u.Add("generate_short_url","true")
//if words[len(words)-1] =="mini"{
// return self.GoodsAppMini(words[:len(words)-1]...)
u.Add("generate_we_app","true")
//}
//if multi{
u.Add("multi_group","true")
if len(words)>1 {
u.Add("custom_parameters",words[1])
}
//}
db := self.ClientHttp(u)
res := db.(map[string]interface{})["goods_promotion_url_generate_response"]
if res == nil {
return nil
}
res_ := res.(map[string]interface{})["goods_promotion_url_list"]
if res == nil {
return nil
}
res__ := res_.([]interface{})
if len(res__)== 0 {
return nil
}
res___ := res__[0].(map[string]interface{})
if res___ == nil {
return nil
}
if res___["we_app_info"] == nil {
return nil
}
app := res___["we_app_info"].(map[string]interface{})
return map[string]interface{}{
"appid":app["app_id"].(string),
"url":app["page_path"].(string),
}
}
//pdd.ddk.goods.promotion.url.generate
func (self *Pdd) GoodsUrl(words ...string) interface{}{
goodsid := words[0]
if len(self.PddPid) == 0 {
err := self.getPid()
if err != nil {
return err
}
}
u := &url.Values{}
u.Add("type","pdd.ddk.goods.promotion.url.generate")
u.Add("goods_id_list","["+goodsid+"]")
u.Add("p_id",self.PddPid[0])
u.Add("generate_short_url","true")
//if words[len(words)-1] =="mini"{
// return self.GoodsAppMini(words[:len(words)-1]...)
// u.Add("generate_we_app","true")
//}
//if multi{
u.Add("multi_group","true")
if len(words)>1 {
u.Add("custom_parameters",words[1])
}
//}
db := self.ClientHttp(u)
res := db.(map[string]interface{})["goods_promotion_url_generate_response"]
if res == nil {
return nil
}
res_ := res.(map[string]interface{})["goods_promotion_url_list"]
if res == nil {
return nil
}
res__ := res_.([]interface{})
if len(res__)== 0 {
return nil
}
res___ := res__[0].(map[string]interface{})
if res___ == nil {
return nil
}
//if res___["we_app_info"] != nil {
// app := res___["we_app_info"].(map[string]interface{})
// return map[string]interface{}{
// "appid":app["app_id"].(string),
// "url":app["page_path"].(string),
// }
//}
return res___
//return res___["short_url"].(string)
}
func (self *Pdd)OrderMsg(_db interface{}) (str string){
db := _db.(map[string]interface{})
res := db["order_detail_response"].(map[string]interface{})
fee := res["promotion_amount"].(float64)/100
str = fmt.Sprintf("%s\n¥%.2f\n佣金¥%.2f \n技术服务费¥%.2f\n",
res["goods_name"].(string),
res["order_amount"].(float64)/100,
fee,fee*0.1,
)
if res["order_status"].(float64) == 2{
finishTime :=time.Unix(int64(db["order_receive_time"].(float64)/1000),0).Add(time.Hour*24*15)
str += fmt.Sprintf("%s\n返¥%.2f\n预计%s到帐\n",
iMsg,
fee*0.9,
finishTime.Format("1月2日"),
)
}else{
str +=iMsg+"订单完成15日后返利\n"
}
return str
}
func (self *Pdd) stuctured(data interface{}) (g Goods){
d_ := data.(map[string]interface{})
p:= d_["min_group_price"].(float64)/100
return Goods{
Id:fmt.Sprintf("%.0f",d_["goods_id"].(float64)),
Img:[]string{d_["goods_thumbnail_url"].(string)},
Name:d_["goods_name"].(string),
Tag:d_["mall_name"].(string),
Price:p,
Fprice:fmt.Sprintf("%.2f",d_["promotion_rate"].(float64)/1000.0*p*Rate),
Coupon:d_["coupon_discount"].(float64)>0,
//Show:d_["goods_desc"].(string),
}
}
func (self *Pdd) SearchGoods(words ...string)interface{}{
db := self.searchGoods(words...)
if db == nil {
return nil
}
res := db.(map[string]interface{})["goods_search_response"]
if res == nil {
return nil
}
var li []interface{}
for _,d := range res.(map[string]interface{})["goods_list"].([]interface{}){
li = append(li,self.stuctured(d))
}
return li
//return res.(map[string]interface{})["goods_list"].([]interface{})
}
func (self | {
j :=&Pdd{Info:sh}
//go func(){
// for _ = range j.DownChan{
// j.OrderDownSelf(func(db interface{}){
// err := OrderUpdate(db.(map[string]interface{})["order_id"].(string),db)
// if err != nil {
// fmt.Println(err)
// }
// })
// }
//}()
return j
//if !o {
// return p
//}
//var err error
//p.OrderDB,err = bolt.Open("pddOrder",0600,nil)
//if err != nil { | identifier_body |
|
pdd.go | var err error
err = request.ClientHttp_(
PddUrl+"?"+u.Encode(),
"GET",nil,
nil,
func(body io.Reader,start int)error{
if start != 200 {
db,err := ioutil.ReadAll(body)
if err!= nil {
return err
}
return fmt.Errorf("%s",db)
}
return json.NewDecoder(body).Decode(&out)
})
if err != nil {
fmt.Println(err,out)
out = err
//time.Sleep(time.Second*1)
//return self.ClientHttp(u)
//panic(err)
}
return
}
func (self *Pdd) getPid()error {
req := self.pidQuery() | }
pid := (req.(map[string]interface{})["p_id_query_response"]).(map[string]interface{})
if pid["total_count"].(float64) >0 {
for _,p_ := range pid["p_id_list"].([]interface{}){
self.PddPid = append(self.PddPid,(p_.(map[string]interface{})["p_id"]).(string))
}
return nil
}
req = self.pidGenerate(1)
switch r := req.(type){
case error:
return r
}
_pid := ((req.(map[string]interface{})["p_id_generate_response"]).(map[string]interface{})["p_id_list"]).([]interface{})
for _,p_ := range _pid{
self.PddPid = append(self.PddPid,(p_.(map[string]interface{})["p_id"]).(string))
}
return nil
}
//pdd.ddk.goods.pid.generate
func (self *Pdd) pidGenerate(n int) interface{}{
u := &url.Values{}
u.Add("type","pdd.ddk.goods.pid.generate")
u.Add("number",fmt.Sprintf("%d",n))
return self.ClientHttp(u)
}
//pdd.ddk.goods.pid.query
func (self *Pdd) pidQuery() interface{}{
u := &url.Values{}
u.Add("type","pdd.ddk.goods.pid.query")
return self.ClientHttp(u)
}
func (self *Pdd)GoodsAppMini(words ...string)interface{}{
goodsid := words[0]
if len(self.PddPid) == 0 {
err := self.getPid()
if err != nil {
return err
}
}
u := &url.Values{}
u.Add("type","pdd.ddk.goods.promotion.url.generate")
u.Add("goods_id_list","["+goodsid+"]")
u.Add("p_id",self.PddPid[0])
u.Add("generate_short_url","true")
//if words[len(words)-1] =="mini"{
// return self.GoodsAppMini(words[:len(words)-1]...)
u.Add("generate_we_app","true")
//}
//if multi{
u.Add("multi_group","true")
if len(words)>1 {
u.Add("custom_parameters",words[1])
}
//}
db := self.ClientHttp(u)
res := db.(map[string]interface{})["goods_promotion_url_generate_response"]
if res == nil {
return nil
}
res_ := res.(map[string]interface{})["goods_promotion_url_list"]
if res == nil {
return nil
}
res__ := res_.([]interface{})
if len(res__)== 0 {
return nil
}
res___ := res__[0].(map[string]interface{})
if res___ == nil {
return nil
}
if res___["we_app_info"] == nil {
return nil
}
app := res___["we_app_info"].(map[string]interface{})
return map[string]interface{}{
"appid":app["app_id"].(string),
"url":app["page_path"].(string),
}
}
//pdd.ddk.goods.promotion.url.generate
func (self *Pdd) GoodsUrl(words ...string) interface{}{
goodsid := words[0]
if len(self.PddPid) == 0 {
err := self.getPid()
if err != nil {
return err
}
}
u := &url.Values{}
u.Add("type","pdd.ddk.goods.promotion.url.generate")
u.Add("goods_id_list","["+goodsid+"]")
u.Add("p_id",self.PddPid[0])
u.Add("generate_short_url","true")
//if words[len(words)-1] =="mini"{
// return self.GoodsAppMini(words[:len(words)-1]...)
// u.Add("generate_we_app","true")
//}
//if multi{
u.Add("multi_group","true")
if len(words)>1 {
u.Add("custom_parameters",words[1])
}
//}
db := self.ClientHttp(u)
res := db.(map[string]interface{})["goods_promotion_url_generate_response"]
if res == nil {
return nil
}
res_ := res.(map[string]interface{})["goods_promotion_url_list"]
if res == nil {
return nil
}
res__ := res_.([]interface{})
if len(res__)== 0 {
return nil
}
res___ := res__[0].(map[string]interface{})
if res___ == nil {
return nil
}
//if res___["we_app_info"] != nil {
// app := res___["we_app_info"].(map[string]interface{})
// return map[string]interface{}{
// "appid":app["app_id"].(string),
// "url":app["page_path"].(string),
// }
//}
return res___
//return res___["short_url"].(string)
}
func (self *Pdd)OrderMsg(_db interface{}) (str string){
db := _db.(map[string]interface{})
res := db["order_detail_response"].(map[string]interface{})
fee := res["promotion_amount"].(float64)/100
str = fmt.Sprintf("%s\n¥%.2f\n佣金¥%.2f \n技术服务费¥%.2f\n",
res["goods_name"].(string),
res["order_amount"].(float64)/100,
fee,fee*0.1,
)
if res["order_status"].(float64) == 2{
finishTime :=time.Unix(int64(db["order_receive_time"].(float64)/1000),0).Add(time.Hour*24*15)
str += fmt.Sprintf("%s\n返¥%.2f\n预计%s到帐\n",
iMsg,
fee*0.9,
finishTime.Format("1月2日"),
)
}else{
str +=iMsg+"订单完成15日后返利\n"
}
return str
}
func (self *Pdd) stuctured(data interface{}) (g Goods){
d_ := data.(map[string]interface{})
p:= d_["min_group_price"].(float64)/100
return Goods{
Id:fmt.Sprintf("%.0f",d_["goods_id"].(float64)),
Img:[]string{d_["goods_thumbnail_url"].(string)},
Name:d_["goods_name"].(string),
Tag:d_["mall_name"].(string),
Price:p,
Fprice:fmt.Sprintf("%.2f",d_["promotion_rate"].(float64)/1000.0*p*Rate),
Coupon:d_["coupon_discount"].(float64)>0,
//Show:d_["goods_desc"].(string),
}
}
func (self *Pdd) SearchGoods(words ...string)interface{}{
db := self.searchGoods(words...)
if db == nil {
return nil
}
res := db.(map[string]interface{})["goods_search_response"]
if res == nil {
return nil
}
var li []interface{}
for _,d := range res.(map[string]interface{})["goods_list"].([]interface{}){
li = append(li,self.stuctured(d))
}
return li
//return res.(map[string]interface{})["goods_list"].([]interface{})
}
func (self *Pdd) searchGoods(words ...string)interface{}{
u := &url.Values{}
u.Add("type","pdd.ddk.goods.search")
u.Add("keyword",words[0])
u.Add("page_size","30")
//u.Add("custom_parameters",words[1])
return self.ClientHttp(u)
}
//pdd.ddk.goods.detail
func (self *Pdd) goodsDetail(words ...string)interface{}{
goodsid := words[0]
u := &url.Values{}
u.Add("type","pdd.ddk.goods.detail")
u.Add("goods_id_list","["+goodsid+"]")
return self.ClientHttp(u)
}
func (self *Pdd) GoodsDetail(words ...string)interface{}{
db := self.goodsDetail(words...)
if db == nil {
return nil
}
res := db.(map[string]interface{})["goods_detail_response"]
if res == nil {
return nil
}
var li []interface{}
for _,d := range res.(map[string]interface{})["goods_details"].([]interface{}){
li = append(li,self.stuctured(d))
}
return li
//return res.(map[string]interface{})["goods_details"]
//db_.goods_detail_response.goods_details
}
//func (self *Pdd)OrderSearch(keys ...string)(d interface{}){
// //pdd.ddk.order.detail.get
// if len(keys)<2 {
// return nil
// }
// err := orderGet(keys[0],keys[1],func(db interface{} | switch r := req.(type){
case error:
return r | random_line_split |
pdd.go | err error
err = request.ClientHttp_(
PddUrl+"?"+u.Encode(),
"GET",nil,
nil,
func(body io.Reader,start int)error{
if start != 200 {
db,err := ioutil.ReadAll(body)
if err!= nil {
return err
}
return fmt.Errorf("%s",db)
}
return json.NewDecoder(body).Decode(&out)
})
if err != nil {
fmt.Println(err,out)
out = err
//time.Sleep(time.Second*1)
//return self.ClientHttp(u)
//panic(err)
}
return
}
func (self *Pdd) getPid()error {
req := self.pidQuery()
switch r := req.(type){
case error:
return r
}
pid := (req.(map[string]interface{})["p_id_query_response"]).(map[string]interface{})
if pid["total_count"].(float64) >0 {
for _,p_ := range pid["p_id_list"].([]interface{}){
self.PddPid = append(self.PddPid,(p_.(map[string]interface{})["p_id"]).(string))
}
return nil
}
req = self.pidGenerate(1)
switch r := req.(type){
case error:
return r
}
_pid := ((req.(map[string]interface{})["p_id_generate_response"]).(map[string]interface{})["p_id_list"]).([]interface{})
for _,p_ := range _pid{
self.PddPid = append(self.PddPid,(p_.(map[string]interface{})["p_id"]).(string))
}
return nil
}
//pdd.ddk.goods.pid.generate
func (self *Pdd) pidGenerate(n int) interface{}{
u := &url.Values{}
u.Add("type","pdd.ddk.goods.pid.generate")
u.Add("number",fmt.Sprintf("%d",n))
return self.ClientHttp(u)
}
//pdd.ddk.goods.pid.query
func (self *Pdd) | () interface{}{
u := &url.Values{}
u.Add("type","pdd.ddk.goods.pid.query")
return self.ClientHttp(u)
}
func (self *Pdd)GoodsAppMini(words ...string)interface{}{
goodsid := words[0]
if len(self.PddPid) == 0 {
err := self.getPid()
if err != nil {
return err
}
}
u := &url.Values{}
u.Add("type","pdd.ddk.goods.promotion.url.generate")
u.Add("goods_id_list","["+goodsid+"]")
u.Add("p_id",self.PddPid[0])
u.Add("generate_short_url","true")
//if words[len(words)-1] =="mini"{
// return self.GoodsAppMini(words[:len(words)-1]...)
u.Add("generate_we_app","true")
//}
//if multi{
u.Add("multi_group","true")
if len(words)>1 {
u.Add("custom_parameters",words[1])
}
//}
db := self.ClientHttp(u)
res := db.(map[string]interface{})["goods_promotion_url_generate_response"]
if res == nil {
return nil
}
res_ := res.(map[string]interface{})["goods_promotion_url_list"]
if res == nil {
return nil
}
res__ := res_.([]interface{})
if len(res__)== 0 {
return nil
}
res___ := res__[0].(map[string]interface{})
if res___ == nil {
return nil
}
if res___["we_app_info"] == nil {
return nil
}
app := res___["we_app_info"].(map[string]interface{})
return map[string]interface{}{
"appid":app["app_id"].(string),
"url":app["page_path"].(string),
}
}
//pdd.ddk.goods.promotion.url.generate
func (self *Pdd) GoodsUrl(words ...string) interface{}{
goodsid := words[0]
if len(self.PddPid) == 0 {
err := self.getPid()
if err != nil {
return err
}
}
u := &url.Values{}
u.Add("type","pdd.ddk.goods.promotion.url.generate")
u.Add("goods_id_list","["+goodsid+"]")
u.Add("p_id",self.PddPid[0])
u.Add("generate_short_url","true")
//if words[len(words)-1] =="mini"{
// return self.GoodsAppMini(words[:len(words)-1]...)
// u.Add("generate_we_app","true")
//}
//if multi{
u.Add("multi_group","true")
if len(words)>1 {
u.Add("custom_parameters",words[1])
}
//}
db := self.ClientHttp(u)
res := db.(map[string]interface{})["goods_promotion_url_generate_response"]
if res == nil {
return nil
}
res_ := res.(map[string]interface{})["goods_promotion_url_list"]
if res == nil {
return nil
}
res__ := res_.([]interface{})
if len(res__)== 0 {
return nil
}
res___ := res__[0].(map[string]interface{})
if res___ == nil {
return nil
}
//if res___["we_app_info"] != nil {
// app := res___["we_app_info"].(map[string]interface{})
// return map[string]interface{}{
// "appid":app["app_id"].(string),
// "url":app["page_path"].(string),
// }
//}
return res___
//return res___["short_url"].(string)
}
func (self *Pdd)OrderMsg(_db interface{}) (str string){
db := _db.(map[string]interface{})
res := db["order_detail_response"].(map[string]interface{})
fee := res["promotion_amount"].(float64)/100
str = fmt.Sprintf("%s\n¥%.2f\n佣金¥%.2f \n技术服务费¥%.2f\n",
res["goods_name"].(string),
res["order_amount"].(float64)/100,
fee,fee*0.1,
)
if res["order_status"].(float64) == 2{
finishTime :=time.Unix(int64(db["order_receive_time"].(float64)/1000),0).Add(time.Hour*24*15)
str += fmt.Sprintf("%s\n返¥%.2f\n预计%s到帐\n",
iMsg,
fee*0.9,
finishTime.Format("1月2日"),
)
}else{
str +=iMsg+"订单完成15日后返利\n"
}
return str
}
func (self *Pdd) stuctured(data interface{}) (g Goods){
d_ := data.(map[string]interface{})
p:= d_["min_group_price"].(float64)/100
return Goods{
Id:fmt.Sprintf("%.0f",d_["goods_id"].(float64)),
Img:[]string{d_["goods_thumbnail_url"].(string)},
Name:d_["goods_name"].(string),
Tag:d_["mall_name"].(string),
Price:p,
Fprice:fmt.Sprintf("%.2f",d_["promotion_rate"].(float64)/1000.0*p*Rate),
Coupon:d_["coupon_discount"].(float64)>0,
//Show:d_["goods_desc"].(string),
}
}
func (self *Pdd) SearchGoods(words ...string)interface{}{
db := self.searchGoods(words...)
if db == nil {
return nil
}
res := db.(map[string]interface{})["goods_search_response"]
if res == nil {
return nil
}
var li []interface{}
for _,d := range res.(map[string]interface{})["goods_list"].([]interface{}){
li = append(li,self.stuctured(d))
}
return li
//return res.(map[string]interface{})["goods_list"].([]interface{})
}
func (self *Pdd) searchGoods(words ...string)interface{}{
u := &url.Values{}
u.Add("type","pdd.ddk.goods.search")
u.Add("keyword",words[0])
u.Add("page_size","30")
//u.Add("custom_parameters",words[1])
return self.ClientHttp(u)
}
//pdd.ddk.goods.detail
func (self *Pdd) goodsDetail(words ...string)interface{}{
goodsid := words[0]
u := &url.Values{}
u.Add("type","pdd.ddk.goods.detail")
u.Add("goods_id_list","["+goodsid+"]")
return self.ClientHttp(u)
}
func (self *Pdd) GoodsDetail(words ...string)interface{}{
db := self.goodsDetail(words...)
if db == nil {
return nil
}
res := db.(map[string]interface{})["goods_detail_response"]
if res == nil {
return nil
}
var li []interface{}
for _,d := range res.(map[string]interface{})["goods_details"].([]interface{}){
li = append(li,self.stuctured(d))
}
return li
//return res.(map[string]interface{})["goods_details"]
//db_.goods_detail_response.goods_details
}
//func (self *Pdd)OrderSearch(keys ...string)(d interface{}){
// //pdd.ddk.order.detail.get
// if len(keys)<2 {
// return nil
// }
// err := orderGet(keys[0],keys[1],func(db interface | pidQuery | identifier_name |
pdd.go | ]...)
// u.Add("generate_we_app","true")
//}
//if multi{
u.Add("multi_group","true")
if len(words)>1 {
u.Add("custom_parameters",words[1])
}
//}
db := self.ClientHttp(u)
res := db.(map[string]interface{})["goods_promotion_url_generate_response"]
if res == nil {
return nil
}
res_ := res.(map[string]interface{})["goods_promotion_url_list"]
if res == nil {
return nil
}
res__ := res_.([]interface{})
if len(res__)== 0 {
return nil
}
res___ := res__[0].(map[string]interface{})
if res___ == nil {
return nil
}
//if res___["we_app_info"] != nil {
// app := res___["we_app_info"].(map[string]interface{})
// return map[string]interface{}{
// "appid":app["app_id"].(string),
// "url":app["page_path"].(string),
// }
//}
return res___
//return res___["short_url"].(string)
}
func (self *Pdd)OrderMsg(_db interface{}) (str string){
db := _db.(map[string]interface{})
res := db["order_detail_response"].(map[string]interface{})
fee := res["promotion_amount"].(float64)/100
str = fmt.Sprintf("%s\n¥%.2f\n佣金¥%.2f \n技术服务费¥%.2f\n",
res["goods_name"].(string),
res["order_amount"].(float64)/100,
fee,fee*0.1,
)
if res["order_status"].(float64) == 2{
finishTime :=time.Unix(int64(db["order_receive_time"].(float64)/1000),0).Add(time.Hour*24*15)
str += fmt.Sprintf("%s\n返¥%.2f\n预计%s到帐\n",
iMsg,
fee*0.9,
finishTime.Format("1月2日"),
)
}else{
str +=iMsg+"订单完成15日后返利\n"
}
return str
}
func (self *Pdd) stuctured(data interface{}) (g Goods){
d_ := data.(map[string]interface{})
p:= d_["min_group_price"].(float64)/100
return Goods{
Id:fmt.Sprintf("%.0f",d_["goods_id"].(float64)),
Img:[]string{d_["goods_thumbnail_url"].(string)},
Name:d_["goods_name"].(string),
Tag:d_["mall_name"].(string),
Price:p,
Fprice:fmt.Sprintf("%.2f",d_["promotion_rate"].(float64)/1000.0*p*Rate),
Coupon:d_["coupon_discount"].(float64)>0,
//Show:d_["goods_desc"].(string),
}
}
func (self *Pdd) SearchGoods(words ...string)interface{}{
db := self.searchGoods(words...)
if db == nil {
return nil
}
res := db.(map[string]interface{})["goods_search_response"]
if res == nil {
return nil
}
var li []interface{}
for _,d := range res.(map[string]interface{})["goods_list"].([]interface{}){
li = append(li,self.stuctured(d))
}
return li
//return res.(map[string]interface{})["goods_list"].([]interface{})
}
func (self *Pdd) searchGoods(words ...string)interface{}{
u := &url.Values{}
u.Add("type","pdd.ddk.goods.search")
u.Add("keyword",words[0])
u.Add("page_size","30")
//u.Add("custom_parameters",words[1])
return self.ClientHttp(u)
}
//pdd.ddk.goods.detail
func (self *Pdd) goodsDetail(words ...string)interface{}{
goodsid := words[0]
u := &url.Values{}
u.Add("type","pdd.ddk.goods.detail")
u.Add("goods_id_list","["+goodsid+"]")
return self.ClientHttp(u)
}
func (self *Pdd) GoodsDetail(words ...string)interface{}{
db := self.goodsDetail(words...)
if db == nil {
return nil
}
res := db.(map[string]interface{})["goods_detail_response"]
if res == nil {
return nil
}
var li []interface{}
for _,d := range res.(map[string]interface{})["goods_details"].([]interface{}){
li = append(li,self.stuctured(d))
}
return li
//return res.(map[string]interface{})["goods_details"]
//db_.goods_detail_response.goods_details
}
//func (self *Pdd)OrderSearch(keys ...string)(d interface{}){
// //pdd.ddk.order.detail.get
// if len(keys)<2 {
// return nil
// }
// err := orderGet(keys[0],keys[1],func(db interface{}){
// d = db
// //d = string(db.([]byte))
// })
// if err != nil {
// fmt.Println(err)
// return nil
// }
// return
// //return nil
//}
func (self *Pdd)OutUrl(db interface{}) string {
return db.(map[string]interface{})["short_url"].(string)
//res := db.(map[string]interface{})["goods_promotion_url_generate_response"]
//if res == nil {
// return ""
//}
//res_ := res.(map[string]interface{})["goods_promotion_url_list"]
//if res == nil {
// return ""
//}
//res__ := res_.([]interface{})
//if len(res__)== 0 {
// return ""
//}
//res___ := res__[0].(map[string]interface{})
//if res___ == nil {
// return ""
//}
//return res___["short_url"].(string)
}
func(self *Pdd)GetInfo()*ShoppingInfo {
return self.Info
}
func (self *Pdd) ProductSearch(words ...string)(result []interface{}){
return self.searchGoods(words...).([]interface{})
}
func (self *Pdd) OrderDownSelf(hand func(interface{}))error{
return self.OrderDown(hand)
}
func (self *Pdd) OrderDown(hand func(interface{}))error{
var begin,end time.Time
if self.Info.Update == 0 {
var err error
begin,err = time.Parse(timeFormat,"2020-01-01 00:00:00")
if err != nil {
panic(err)
}
}else{
begin = time.Unix(self.Info.Update,0)
}
//self.Info.Update = end.Unix()
for{
isOut := false
end = begin.Add(24*time.Hour)
Now := time.Now()
if !Now.After(end){
//fmt.Println()
end = Now
isOut = true
}
//fmt.Println(begin,end)
page := 1
for {
db := self.getOrder(begin,end,page)
if db == nil {
continue
}
res := db.(map[string]interface{})["order_list_get_response"]
if res == nil {
fmt.Println(db)
return io.EOF
}
li := res.(map[string]interface{})["order_list"].([]interface{})
for _,l := range li{
l_ := l.(map[string]interface{})
l_["order_id"] = l_["order_sn"]
l_["status"] = false
//if l_["order_status"].(float64) == 2{
// l_["status"] = true
// l_["endTime"] = l_["order_receive_time"]
//}
l_["fee"] = l_["promotion_amount"].(float64)/100
l_["goodsid"] =fmt.Sprintf("%.0f",l_["goods_id"].(float64))
l_["goodsName"] = l_["goods_name"]
l_["goodsImg"] = l_["goods_thumbnail_url"]
l_["site"] = self.Info.Py
//l_["userid"] = l_["custom_parameters"]
l_["time"] = time.Now().Unix()
l_["text"] = l_["order_status_desc"]
//if l_["order_verify_time"] != nil {
if l_["order_receive_time"] != nil {
//l_["status"] = true
l_["endTime"] = in | t64(l_["order_receive_time"].(float64))
var ver time.Time
if l_["order_verify_time"] == nil {
ver = time.Unix(int64(l_["order_receive_time"].(float64)),0)
}else{
ver = time.Unix(int64(l_["order_verify_time"].(float64)),0)
}
y,m,d := ver.Date()
if d >15{
y,m,_ = ver.AddDate(0,1,0).Date()
}
fmt.Println(y,m)
l_["payTime"] = time.Date(y,m,21,0,0,0,0,ver.Location()).Unix()
}
hand(l)
}
if len(li) <40 {
break
} | conditional_block |
|
main.rs | and the handles to the interrupts.
fn get_int_method(
pcid_handle: &mut PcidServerHandle,
function: &PciFunction,
allocated_bars: &AllocatedBars,
) -> Result<(InterruptMethod, InterruptSources)> {
log::trace!("Begin get_int_method");
use pcid_interface::irq_helpers;
let features = pcid_handle.fetch_all_features().unwrap();
let has_msi = features.iter().any(|(feature, _)| feature.is_msi());
let has_msix = features.iter().any(|(feature, _)| feature.is_msix());
// TODO: Allocate more than one vector when possible and useful.
if has_msix {
// Extended message signaled interrupts.
use self::nvme::MsixCfg;
use pcid_interface::msi::MsixTableEntry;
let mut capability_struct = match pcid_handle.feature_info(PciFeature::MsiX).unwrap() {
PciFeatureInfo::MsiX(msix) => msix,
_ => unreachable!(),
};
fn bar_base(
allocated_bars: &AllocatedBars,
function: &PciFunction,
bir: u8,
) -> Result<NonNull<u8>> {
let bir = usize::from(bir);
let mut bar_guard = allocated_bars.0[bir].lock().unwrap();
match &mut *bar_guard {
&mut Some(ref bar) => Ok(bar.ptr),
bar_to_set @ &mut None => {
let bar = match function.bars[bir] {
PciBar::Memory(addr) => addr,
other => panic!("Expected memory BAR, found {:?}", other),
};
let bar_size = function.bar_sizes[bir];
let bar = Bar::allocate(bar as usize, bar_size as usize)?;
*bar_to_set = Some(bar);
Ok(bar_to_set.as_ref().unwrap().ptr)
}
}
}
let table_bar_base: *mut u8 =
bar_base(allocated_bars, function, capability_struct.table_bir())?.as_ptr();
let pba_bar_base: *mut u8 =
bar_base(allocated_bars, function, capability_struct.pba_bir())?.as_ptr();
let table_base =
unsafe { table_bar_base.offset(capability_struct.table_offset() as isize) };
let pba_base = unsafe { pba_bar_base.offset(capability_struct.pba_offset() as isize) };
let vector_count = capability_struct.table_size();
let table_entries: &'static mut [MsixTableEntry] = unsafe {
slice::from_raw_parts_mut(table_base as *mut MsixTableEntry, vector_count as usize)
};
let pba_entries: &'static mut [Mmio<u64>] = unsafe {
slice::from_raw_parts_mut(
table_base as *mut Mmio<u64>,
(vector_count as usize + 63) / 64,
)
};
// Mask all interrupts in case some earlier driver/os already unmasked them (according to
// the PCI Local Bus spec 3.0, they are masked after system reset).
for table_entry in table_entries.iter_mut() {
table_entry.mask();
}
pcid_handle.enable_feature(PciFeature::MsiX).unwrap();
capability_struct.set_msix_enabled(true); // only affects our local mirror of the cap
let (msix_vector_number, irq_handle) = {
use msi_x86_64::DeliveryMode;
use pcid_interface::msi::x86_64 as msi_x86_64;
let entry: &mut MsixTableEntry = &mut table_entries[0];
let bsp_cpu_id =
irq_helpers::read_bsp_apic_id().expect("nvmed: failed to read APIC ID");
let bsp_lapic_id = bsp_cpu_id
.try_into()
.expect("nvmed: BSP local apic ID couldn't fit inside u8");
let (vector, irq_handle) = irq_helpers::allocate_single_interrupt_vector(bsp_cpu_id)
.expect("nvmed: failed to allocate single MSI-X interrupt vector")
.expect("nvmed: no interrupt vectors left on BSP");
let msg_addr = msi_x86_64::message_address(bsp_lapic_id, false, false);
let msg_data = msi_x86_64::message_data_edge_triggered(DeliveryMode::Fixed, vector);
entry.set_addr_lo(msg_addr);
entry.set_msg_data(msg_data);
(0, irq_handle)
};
let interrupt_method = InterruptMethod::MsiX(MsixCfg {
cap: capability_struct,
table: table_entries,
pba: pba_entries,
});
let interrupt_sources =
InterruptSources::MsiX(std::iter::once((msix_vector_number, irq_handle)).collect());
Ok((interrupt_method, interrupt_sources))
} else if has_msi {
// Message signaled interrupts.
let capability_struct = match pcid_handle.feature_info(PciFeature::Msi).unwrap() {
PciFeatureInfo::Msi(msi) => msi,
_ => unreachable!(),
};
let (msi_vector_number, irq_handle) = {
use msi_x86_64::DeliveryMode;
use pcid_interface::msi::x86_64 as msi_x86_64;
use pcid_interface::{MsiSetFeatureInfo, SetFeatureInfo};
let bsp_cpu_id =
irq_helpers::read_bsp_apic_id().expect("nvmed: failed to read BSP APIC ID");
let bsp_lapic_id = bsp_cpu_id
.try_into()
.expect("nvmed: BSP local apic ID couldn't fit inside u8");
let (vector, irq_handle) = irq_helpers::allocate_single_interrupt_vector(bsp_cpu_id)
.expect("nvmed: failed to allocate single MSI interrupt vector")
.expect("nvmed: no interrupt vectors left on BSP");
let msg_addr = msi_x86_64::message_address(bsp_lapic_id, false, false);
let msg_data =
msi_x86_64::message_data_edge_triggered(DeliveryMode::Fixed, vector) as u16;
pcid_handle.set_feature_info(SetFeatureInfo::Msi(MsiSetFeatureInfo {
message_address: Some(msg_addr),
message_upper_address: Some(0),
message_data: Some(msg_data),
multi_message_enable: Some(0), // enable 2^0=1 vectors
mask_bits: None,
})).unwrap();
(0, irq_handle)
};
let interrupt_method = InterruptMethod::Msi(capability_struct);
let interrupt_sources =
InterruptSources::Msi(std::iter::once((msi_vector_number, irq_handle)).collect());
pcid_handle.enable_feature(PciFeature::Msi).unwrap();
Ok((interrupt_method, interrupt_sources))
} else if function.legacy_interrupt_pin.is_some() {
// INTx# pin based interrupts.
let irq_handle = File::open(format!("irq:{}", function.legacy_interrupt_line))
.expect("nvmed: failed to open INTx# interrupt line");
Ok((InterruptMethod::Intx, InterruptSources::Intx(irq_handle)))
} else {
// No interrupts at all
todo!("handling of no interrupts")
}
}
fn setup_logging() -> Option<&'static RedoxLogger> {
let mut logger = RedoxLogger::new()
.with_output(
OutputBuilder::stderr()
.with_filter(log::LevelFilter::Info) // limit global output to important info
.with_ansi_escape_codes()
.flush_on_newline(true)
.build()
);
#[cfg(target_os = "redox")]
match OutputBuilder::in_redox_logging_scheme("disk", "pcie", "nvme.log") {
Ok(b) => logger = logger.with_output(
// TODO: Add a configuration file for this
b.with_filter(log::LevelFilter::Info)
.flush_on_newline(true)
.build()
),
Err(error) => eprintln!("nvmed: failed to create nvme.log: {}", error),
}
#[cfg(target_os = "redox")]
match OutputBuilder::in_redox_logging_scheme("disk", "pcie", "nvme.ansi.log") {
Ok(b) => logger = logger.with_output(
b.with_filter(log::LevelFilter::Info)
.with_ansi_escape_codes()
.flush_on_newline(true)
.build()
),
Err(error) => eprintln!("nvmed: failed to create nvme.ansi.log: {}", error),
}
match logger.enable() {
Ok(logger_ref) => {
eprintln!("nvmed: enabled logger");
Some(logger_ref)
}
Err(error) => {
eprintln!("nvmed: failed to set default logger: {}", error);
None
}
}
}
fn | () {
// Daemonize
if unsafe { syscall::clone(CloneFlags::empty()).unwrap() } != 0 {
return;
}
let _logger_ref = setup_logging();
let mut pcid_handle =
PcidServerHandle::connect_default().expect("nvmed: failed to setup channel to pcid");
let pci_config = pcid_handle
.fetch_config()
| main | identifier_name |
main.rs | and the handles to the interrupts.
fn get_int_method(
pcid_handle: &mut PcidServerHandle,
function: &PciFunction,
allocated_bars: &AllocatedBars,
) -> Result<(InterruptMethod, InterruptSources)> {
log::trace!("Begin get_int_method");
use pcid_interface::irq_helpers;
let features = pcid_handle.fetch_all_features().unwrap();
let has_msi = features.iter().any(|(feature, _)| feature.is_msi());
let has_msix = features.iter().any(|(feature, _)| feature.is_msix());
// TODO: Allocate more than one vector when possible and useful.
if has_msix {
// Extended message signaled interrupts.
use self::nvme::MsixCfg;
use pcid_interface::msi::MsixTableEntry;
let mut capability_struct = match pcid_handle.feature_info(PciFeature::MsiX).unwrap() {
PciFeatureInfo::MsiX(msix) => msix,
_ => unreachable!(),
};
fn bar_base(
allocated_bars: &AllocatedBars,
function: &PciFunction,
bir: u8,
) -> Result<NonNull<u8>> {
let bir = usize::from(bir);
let mut bar_guard = allocated_bars.0[bir].lock().unwrap();
match &mut *bar_guard {
&mut Some(ref bar) => Ok(bar.ptr),
bar_to_set @ &mut None => {
let bar = match function.bars[bir] {
PciBar::Memory(addr) => addr,
other => panic!("Expected memory BAR, found {:?}", other),
};
let bar_size = function.bar_sizes[bir];
let bar = Bar::allocate(bar as usize, bar_size as usize)?;
*bar_to_set = Some(bar);
Ok(bar_to_set.as_ref().unwrap().ptr)
}
}
}
let table_bar_base: *mut u8 =
bar_base(allocated_bars, function, capability_struct.table_bir())?.as_ptr();
let pba_bar_base: *mut u8 =
bar_base(allocated_bars, function, capability_struct.pba_bir())?.as_ptr();
let table_base =
unsafe { table_bar_base.offset(capability_struct.table_offset() as isize) };
let pba_base = unsafe { pba_bar_base.offset(capability_struct.pba_offset() as isize) };
let vector_count = capability_struct.table_size();
let table_entries: &'static mut [MsixTableEntry] = unsafe {
slice::from_raw_parts_mut(table_base as *mut MsixTableEntry, vector_count as usize)
};
let pba_entries: &'static mut [Mmio<u64>] = unsafe {
slice::from_raw_parts_mut(
table_base as *mut Mmio<u64>,
(vector_count as usize + 63) / 64,
)
};
// Mask all interrupts in case some earlier driver/os already unmasked them (according to
// the PCI Local Bus spec 3.0, they are masked after system reset).
for table_entry in table_entries.iter_mut() {
table_entry.mask();
}
pcid_handle.enable_feature(PciFeature::MsiX).unwrap();
capability_struct.set_msix_enabled(true); // only affects our local mirror of the cap
let (msix_vector_number, irq_handle) = {
use msi_x86_64::DeliveryMode;
use pcid_interface::msi::x86_64 as msi_x86_64;
let entry: &mut MsixTableEntry = &mut table_entries[0];
let bsp_cpu_id =
irq_helpers::read_bsp_apic_id().expect("nvmed: failed to read APIC ID");
let bsp_lapic_id = bsp_cpu_id
.try_into()
.expect("nvmed: BSP local apic ID couldn't fit inside u8");
let (vector, irq_handle) = irq_helpers::allocate_single_interrupt_vector(bsp_cpu_id)
.expect("nvmed: failed to allocate single MSI-X interrupt vector")
.expect("nvmed: no interrupt vectors left on BSP");
let msg_addr = msi_x86_64::message_address(bsp_lapic_id, false, false);
let msg_data = msi_x86_64::message_data_edge_triggered(DeliveryMode::Fixed, vector);
entry.set_addr_lo(msg_addr);
entry.set_msg_data(msg_data);
(0, irq_handle)
};
let interrupt_method = InterruptMethod::MsiX(MsixCfg {
cap: capability_struct,
table: table_entries,
pba: pba_entries,
});
let interrupt_sources =
InterruptSources::MsiX(std::iter::once((msix_vector_number, irq_handle)).collect());
Ok((interrupt_method, interrupt_sources))
} else if has_msi {
// Message signaled interrupts.
let capability_struct = match pcid_handle.feature_info(PciFeature::Msi).unwrap() {
PciFeatureInfo::Msi(msi) => msi,
_ => unreachable!(),
};
let (msi_vector_number, irq_handle) = {
use msi_x86_64::DeliveryMode;
use pcid_interface::msi::x86_64 as msi_x86_64;
use pcid_interface::{MsiSetFeatureInfo, SetFeatureInfo};
let bsp_cpu_id =
irq_helpers::read_bsp_apic_id().expect("nvmed: failed to read BSP APIC ID");
let bsp_lapic_id = bsp_cpu_id
.try_into()
.expect("nvmed: BSP local apic ID couldn't fit inside u8");
let (vector, irq_handle) = irq_helpers::allocate_single_interrupt_vector(bsp_cpu_id)
.expect("nvmed: failed to allocate single MSI interrupt vector")
.expect("nvmed: no interrupt vectors left on BSP");
let msg_addr = msi_x86_64::message_address(bsp_lapic_id, false, false);
let msg_data =
msi_x86_64::message_data_edge_triggered(DeliveryMode::Fixed, vector) as u16;
pcid_handle.set_feature_info(SetFeatureInfo::Msi(MsiSetFeatureInfo {
message_address: Some(msg_addr),
message_upper_address: Some(0),
message_data: Some(msg_data),
multi_message_enable: Some(0), // enable 2^0=1 vectors
mask_bits: None,
})).unwrap();
(0, irq_handle)
};
let interrupt_method = InterruptMethod::Msi(capability_struct);
let interrupt_sources =
InterruptSources::Msi(std::iter::once((msi_vector_number, irq_handle)).collect());
pcid_handle.enable_feature(PciFeature::Msi).unwrap();
Ok((interrupt_method, interrupt_sources))
} else if function.legacy_interrupt_pin.is_some() {
// INTx# pin based interrupts.
let irq_handle = File::open(format!("irq:{}", function.legacy_interrupt_line))
.expect("nvmed: failed to open INTx# interrupt line");
Ok((InterruptMethod::Intx, InterruptSources::Intx(irq_handle)))
} else {
// No interrupts at all
todo!("handling of no interrupts")
}
}
fn setup_logging() -> Option<&'static RedoxLogger> {
let mut logger = RedoxLogger::new()
.with_output(
OutputBuilder::stderr()
.with_filter(log::LevelFilter::Info) // limit global output to important info
.with_ansi_escape_codes()
.flush_on_newline(true)
.build()
);
#[cfg(target_os = "redox")]
match OutputBuilder::in_redox_logging_scheme("disk", "pcie", "nvme.log") {
Ok(b) => logger = logger.with_output(
// TODO: Add a configuration file for this
b.with_filter(log::LevelFilter::Info)
.flush_on_newline(true)
.build()
),
Err(error) => eprintln!("nvmed: failed to create nvme.log: {}", error),
}
#[cfg(target_os = "redox")]
match OutputBuilder::in_redox_logging_scheme("disk", "pcie", "nvme.ansi.log") {
Ok(b) => logger = logger.with_output(
b.with_filter(log::LevelFilter::Info)
.with_ansi_escape_codes()
.flush_on_newline(true)
.build()
),
Err(error) => eprintln!("nvmed: failed to create nvme.ansi.log: {}", error),
}
match logger.enable() {
Ok(logger_ref) => |
Err(error) => {
eprintln!("nvmed: failed to set default logger: {}", error);
None
}
}
}
fn main() {
// Daemonize
if unsafe { syscall::clone(CloneFlags::empty()).unwrap() } != 0 {
return;
}
let _logger_ref = setup_logging();
let mut pcid_handle =
PcidServerHandle::connect_default().expect("nvmed: failed to setup channel to pcid");
let pci_config = pcid_handle
.fetch_config()
| {
eprintln!("nvmed: enabled logger");
Some(logger_ref)
} | conditional_block |
main.rs | and the handles to the interrupts.
fn get_int_method(
pcid_handle: &mut PcidServerHandle,
function: &PciFunction,
allocated_bars: &AllocatedBars,
) -> Result<(InterruptMethod, InterruptSources)> {
log::trace!("Begin get_int_method");
use pcid_interface::irq_helpers;
let features = pcid_handle.fetch_all_features().unwrap();
let has_msi = features.iter().any(|(feature, _)| feature.is_msi());
let has_msix = features.iter().any(|(feature, _)| feature.is_msix());
// TODO: Allocate more than one vector when possible and useful.
if has_msix {
// Extended message signaled interrupts.
use self::nvme::MsixCfg;
use pcid_interface::msi::MsixTableEntry;
let mut capability_struct = match pcid_handle.feature_info(PciFeature::MsiX).unwrap() {
PciFeatureInfo::MsiX(msix) => msix,
_ => unreachable!(),
};
fn bar_base(
allocated_bars: &AllocatedBars,
function: &PciFunction,
bir: u8,
) -> Result<NonNull<u8>> {
let bir = usize::from(bir);
let mut bar_guard = allocated_bars.0[bir].lock().unwrap();
match &mut *bar_guard {
&mut Some(ref bar) => Ok(bar.ptr),
bar_to_set @ &mut None => {
let bar = match function.bars[bir] {
PciBar::Memory(addr) => addr,
other => panic!("Expected memory BAR, found {:?}", other),
};
let bar_size = function.bar_sizes[bir];
let bar = Bar::allocate(bar as usize, bar_size as usize)?;
*bar_to_set = Some(bar);
Ok(bar_to_set.as_ref().unwrap().ptr)
}
}
}
let table_bar_base: *mut u8 =
bar_base(allocated_bars, function, capability_struct.table_bir())?.as_ptr();
let pba_bar_base: *mut u8 =
bar_base(allocated_bars, function, capability_struct.pba_bir())?.as_ptr();
let table_base =
unsafe { table_bar_base.offset(capability_struct.table_offset() as isize) };
let pba_base = unsafe { pba_bar_base.offset(capability_struct.pba_offset() as isize) };
let vector_count = capability_struct.table_size();
let table_entries: &'static mut [MsixTableEntry] = unsafe {
slice::from_raw_parts_mut(table_base as *mut MsixTableEntry, vector_count as usize)
};
let pba_entries: &'static mut [Mmio<u64>] = unsafe {
slice::from_raw_parts_mut(
table_base as *mut Mmio<u64>,
(vector_count as usize + 63) / 64,
)
};
// Mask all interrupts in case some earlier driver/os already unmasked them (according to
// the PCI Local Bus spec 3.0, they are masked after system reset).
for table_entry in table_entries.iter_mut() {
table_entry.mask();
}
pcid_handle.enable_feature(PciFeature::MsiX).unwrap();
capability_struct.set_msix_enabled(true); // only affects our local mirror of the cap
let (msix_vector_number, irq_handle) = { |
let bsp_cpu_id =
irq_helpers::read_bsp_apic_id().expect("nvmed: failed to read APIC ID");
let bsp_lapic_id = bsp_cpu_id
.try_into()
.expect("nvmed: BSP local apic ID couldn't fit inside u8");
let (vector, irq_handle) = irq_helpers::allocate_single_interrupt_vector(bsp_cpu_id)
.expect("nvmed: failed to allocate single MSI-X interrupt vector")
.expect("nvmed: no interrupt vectors left on BSP");
let msg_addr = msi_x86_64::message_address(bsp_lapic_id, false, false);
let msg_data = msi_x86_64::message_data_edge_triggered(DeliveryMode::Fixed, vector);
entry.set_addr_lo(msg_addr);
entry.set_msg_data(msg_data);
(0, irq_handle)
};
let interrupt_method = InterruptMethod::MsiX(MsixCfg {
cap: capability_struct,
table: table_entries,
pba: pba_entries,
});
let interrupt_sources =
InterruptSources::MsiX(std::iter::once((msix_vector_number, irq_handle)).collect());
Ok((interrupt_method, interrupt_sources))
} else if has_msi {
// Message signaled interrupts.
let capability_struct = match pcid_handle.feature_info(PciFeature::Msi).unwrap() {
PciFeatureInfo::Msi(msi) => msi,
_ => unreachable!(),
};
let (msi_vector_number, irq_handle) = {
use msi_x86_64::DeliveryMode;
use pcid_interface::msi::x86_64 as msi_x86_64;
use pcid_interface::{MsiSetFeatureInfo, SetFeatureInfo};
let bsp_cpu_id =
irq_helpers::read_bsp_apic_id().expect("nvmed: failed to read BSP APIC ID");
let bsp_lapic_id = bsp_cpu_id
.try_into()
.expect("nvmed: BSP local apic ID couldn't fit inside u8");
let (vector, irq_handle) = irq_helpers::allocate_single_interrupt_vector(bsp_cpu_id)
.expect("nvmed: failed to allocate single MSI interrupt vector")
.expect("nvmed: no interrupt vectors left on BSP");
let msg_addr = msi_x86_64::message_address(bsp_lapic_id, false, false);
let msg_data =
msi_x86_64::message_data_edge_triggered(DeliveryMode::Fixed, vector) as u16;
pcid_handle.set_feature_info(SetFeatureInfo::Msi(MsiSetFeatureInfo {
message_address: Some(msg_addr),
message_upper_address: Some(0),
message_data: Some(msg_data),
multi_message_enable: Some(0), // enable 2^0=1 vectors
mask_bits: None,
})).unwrap();
(0, irq_handle)
};
let interrupt_method = InterruptMethod::Msi(capability_struct);
let interrupt_sources =
InterruptSources::Msi(std::iter::once((msi_vector_number, irq_handle)).collect());
pcid_handle.enable_feature(PciFeature::Msi).unwrap();
Ok((interrupt_method, interrupt_sources))
} else if function.legacy_interrupt_pin.is_some() {
// INTx# pin based interrupts.
let irq_handle = File::open(format!("irq:{}", function.legacy_interrupt_line))
.expect("nvmed: failed to open INTx# interrupt line");
Ok((InterruptMethod::Intx, InterruptSources::Intx(irq_handle)))
} else {
// No interrupts at all
todo!("handling of no interrupts")
}
}
fn setup_logging() -> Option<&'static RedoxLogger> {
let mut logger = RedoxLogger::new()
.with_output(
OutputBuilder::stderr()
.with_filter(log::LevelFilter::Info) // limit global output to important info
.with_ansi_escape_codes()
.flush_on_newline(true)
.build()
);
#[cfg(target_os = "redox")]
match OutputBuilder::in_redox_logging_scheme("disk", "pcie", "nvme.log") {
Ok(b) => logger = logger.with_output(
// TODO: Add a configuration file for this
b.with_filter(log::LevelFilter::Info)
.flush_on_newline(true)
.build()
),
Err(error) => eprintln!("nvmed: failed to create nvme.log: {}", error),
}
#[cfg(target_os = "redox")]
match OutputBuilder::in_redox_logging_scheme("disk", "pcie", "nvme.ansi.log") {
Ok(b) => logger = logger.with_output(
b.with_filter(log::LevelFilter::Info)
.with_ansi_escape_codes()
.flush_on_newline(true)
.build()
),
Err(error) => eprintln!("nvmed: failed to create nvme.ansi.log: {}", error),
}
match logger.enable() {
Ok(logger_ref) => {
eprintln!("nvmed: enabled logger");
Some(logger_ref)
}
Err(error) => {
eprintln!("nvmed: failed to set default logger: {}", error);
None
}
}
}
fn main() {
// Daemonize
if unsafe { syscall::clone(CloneFlags::empty()).unwrap() } != 0 {
return;
}
let _logger_ref = setup_logging();
let mut pcid_handle =
PcidServerHandle::connect_default().expect("nvmed: failed to setup channel to pcid");
let pci_config = pcid_handle
.fetch_config()
| use msi_x86_64::DeliveryMode;
use pcid_interface::msi::x86_64 as msi_x86_64;
let entry: &mut MsixTableEntry = &mut table_entries[0]; | random_line_split |
layout.py | " if y%2 == 0 else "VDD", y*self.row_height)
for y in range(self.num_words+3)
]
# Assemble the columns of the memory, which consist of bit arrays,
# welltaps, and the address decoder.
# Lower and upper bits.
lower_bits = [
Inst(
self, self.bitarray_cell,
"XBA%d" % i,
Vec(0, 0),
mx=True,
index=i,
size=bitarray_size
)
for i in range(self.num_bits_left)
]
upper_bits = [
Inst(
self, self.bitarray_cell,
"XBA%d" % (i + self.num_bits_left),
Vec(0, 0),
index=i + self.num_bits_left,
size=bitarray_size
)
for i in range(self.num_bits_right)
]
self.bitarrays = lower_bits + upper_bits
# Address Decoder
self.addrdec = Inst(
self, self.addrdec_cell, "XAD",
# Vec(x_addrdec, 0),
Vec(0, 0),
index=self.num_words,
size=Vec(addrdec_width, column_height)
)
columns = lower_bits + [self.addrdec] + upper_bits
# Determine a reasonable distribution for the welltaps.
width = layout_columns(columns)
num_welltaps = int(width/(welltap_cadence - welltap_width)) + 2
max_spacing = None
welltap_placement = None
while welltap_placement is None or max_spacing > welltap_cadence:
# Calculate the approximate position for each welltap.
approx_welltap_positions = [
width * i / (num_welltaps-1) for i in range(num_welltaps)
]
# Calculate the index of the column before which each welltap should
# be inserted. This positions each welltap to the left of each
# approximate positions.
colpos = [col.pos.x for col in columns]
welltap_indices = [
max(0, min(len(columns), bisect(colpos, x)))
for x in approx_welltap_positions
]
# Extract the position the welltaps would have if placed at the
# indices found above.
welltap_placement = [
(i, columns[i].pos.x if i < len(columns) else width)
for i in welltap_indices
]
# Calculate the maximum spacing between two neighbouring welltaps.
max_spacing = reduce(max, [
b - a + welltap_width
for ((_,a),(_,b)) in zip(welltap_placement[:-1], welltap_placement[1:])
])
# Increase the number of welltaps. If the max_spacing calculated
# above is greater than the required welltap cadence, the loop body
# is re-executed with one more welltap.
num_welltaps += 1
# Insert the welltaps and the required wiring on top of them.
self.welltaps = list()
for (i, (offset, _)) in enumerate(reversed(welltap_placement)):
wt = Inst(self, self.welltap_cell, "WT%d" % i, Vec(0,0),
size=Vec(welltap_width, column_height),
stack=self.num_words+2,
stack_step=self.row_height
)
self.welltaps.append(wt)
columns.insert(offset, wt)
# Rearrange the columns a final time and calculate the size of the
# macro.
self.size = Vec(
layout_columns(columns),
(self.num_words+2) * self.row_height
)
# Add the wiring to the welltaps.
for wt in self.welltaps[1:-1]:
flip = wt.pos.x < self.addrdec.pos.x + 0.5*addrdec_width
self.wiring.append(Inst(
self, self.welltap_awire_cell, wt.name+"WA",
Vec(wt.pos.x + welltap_width if flip else wt.pos.x, wt.pos.y),
stack=self.num_words,
stack_step=self.row_height,
mx=flip
))
self.wiring.append(Inst(
self, self.welltap_bwire_cell, wt.name+"WB",
Vec(
wt.pos.x + welltap_width if flip else wt.pos.x,
wt.pos.y + self.num_words * self.row_height
),
mx=flip
))
# Add the power routing ontop of the welltaps.
for wt in self.welltaps:
self.wiring.append(Inst(
self, self.welltap_pwr_inner_cell, wt.name+"WPI",
wt.pos,
stack=self.num_words+2,
stack_step=self.row_height
))
self.wiring.append(Inst(
self, self.welltap_pwr_outer_cell, wt.name+"WPO",
Vec(wt.pos.x, wt.pos.y+self.row_height),
stack=self.num_words+1,
stack_step=self.row_height,
stack_noflip=True
))
# Place the global clock gate and address registers which are attached
# to the address decoder layout-wise.
x_spine_l = self.addrdec.pos.x
x_spine_r = x_spine_l + self.addrdec.size.x
# Global Clock Gate
rwckg_x = x_spine_r - rwckg_width
rwckg_y = self.num_words * self.row_height
self.rwckg = Inst(self, self.rwckg_cell, "XRWCKG", Vec(rwckg_x, rwckg_y))
# Read Address Registers
x_ralower = x_spine_l - (self.num_addr_left) * rareg_width
x_raupper = x_spine_r
y_rareg = (self.num_words+2) * self.row_height
self.raregs = [
Inst(
self, self.rareg_cell,
"XRA%d" % i,
Vec(self.bitarrays[self.num_bits_left-self.num_addr_left+i].pos.x, y_rareg),
index=i,
mx=True,
my=True,
data = {
"ytrack": self.num_addr_left - i - 1,
"ymax": self.num_addr_left
}
)
for i in range(self.num_addr_left)
] + [
Inst(
self, self.rareg_cell,
"XRA%d" % (i+self.num_addr_left),
Vec(self.bitarrays[self.num_bits_left+i].pos.x, y_rareg),
index=(i + self.num_addr_left),
my=True,
data = {
"ytrack": i,
"ymax": self.num_addr_right
}
)
for i in range(self.num_addr_right)
]
# Wire up the RAREGs.
y_raregwire = (self.num_words+1) * self.row_height
for ra in self.raregs:
# Vertical breakout.
self.wiring.append(Inst(
self, self.rareg_vwire_cells[ra.ytrack],
"XRAWV%d" % ra.index,
Vec(ra.pos.x, y_raregwire),
mx=ra.mx
))
# Horizontal lanes.
for i in range(ra.ymax-ra.ytrack-1):
self.wiring.append(Inst(
self, self.rareg_hwire_a_cell,
"XRAWH%dY%d" % (ra.index, i),
Vec(ra.pos.x, y_raregwire + 0.4e-6 - (ra.ymax-i-1)*0.2e-6),
mx=ra.mx
))
# Add the wiring that is necessary on top of the welltaps which were
# placed among the RAREGs.
for wt in self.welltaps:
if wt.pos.x >= self.raregs[0].pos.x and wt.pos.x <= self.raregs[-1].pos.x:
| flip = wt.pos.x < self.addrdec.pos.x + 0.5*addrdec_width
x = wt.pos.x + welltap_width if flip else wt.pos.x
self.wiring.append(Inst(
self, self.welltap_bwire_cell, wt.name+"WB",
Vec(
x,
wt.pos.y + (self.num_words+2) * self.row_height
),
mx=flip,
my=True
))
# Gather a list of horizontal tracks this welltap interrupts.
tracks = list()
if x < self.addrdec.pos.x + 0.5*addrdec_width:
for ra in self.raregs:
if ra.pos.x < x:
tracks.append(y_raregwire + 0.4e-6 - ra.ytrack*0.2e-6)
else:
for ra in self.raregs: | conditional_block |
|
layout.py | , self.inst.size.y - 0.05e-6)
yield (self.inst.to_world(a), self.inst.to_world(b))
class Inst(object):
def __init__(self, layout, cell, name, pos, mx=False, my=False, index=None, size=None, stack=None, stack_step=None, stack_noflip=False, data=dict()):
super(Inst, self).__init__()
self.layout = layout
self.cell = cell
self.name = name
self.pos = pos
self.mx = mx
self.my = my
self.index = index
self.size = size
self.stack = stack
self.stack_step = stack_step
self.stack_noflip = stack_noflip
self.__dict__.update(data)
def to_world(self, v):
return Vec(
self.pos.x + (-v.x if self.mx else v.x),
self.pos.y + (-v.y if self.my else v.y)
)
def pins(self):
if "pins" in self.cell.config:
for (name, cfg) in self.cell.config["pins"].items():
if "tracks" in cfg:
for (idx, trk) in enumerate(cfg["tracks"][self.index]):
yield InstPin(name, cfg, self, index=idx, track=trk)
else:
yield InstPin(name, cfg, self, index=self.index)
def | (columns):
x = 0
for col in columns:
col.pos.x = (x + col.size.x if col.mx else x)
x += col.size.x
return x
class Layout(object):
def __init__(self, macro):
super(Layout, self).__init__()
self.macro = macro
self.cells = list()
self.num_addr = macro.num_addr
self.num_bits = macro.num_bits
self.num_words = 2**self.num_addr
self.name = "PS%dX%d" % (self.num_words, self.num_bits)
self.wiring = list()
with open(macro.techdir+"/config.yml") as f:
self.config = yaml.load(f)
# Calculate the number of bits and address bits that go to the left and
# right of the central spine.
self.num_bits_left = int(self.num_bits/2)
self.num_bits_right = self.num_bits - self.num_bits_left
self.num_addr_left = int(self.num_addr/2)
self.num_addr_right = self.num_addr - self.num_addr_left
# Load the cell descriptions.
cells = self.config["cells"]
self.rwckg_cell = Cell("rwckg", cells["rwckg"])
self.addrdec_cell = Cell("addrdec", cells["addrdec"], suffix=str(self.num_words))
self.bitarray_cell = Cell("bitarray", cells["bitarray"], suffix=str(self.num_words))
self.rareg_cell = Cell("rareg", cells["rareg"])
self.rareg_vwire_cells = [
Cell("raregwire", cells["raregwire"], suffix=str(i+1))
for i in range(4)
]
self.rareg_hwire_a_cell = Cell("raregwireha", cells["raregwire"], suffix="HA")
self.rareg_hwire_b_cell = Cell("raregwireha", cells["raregwire"], suffix="HB")
self.welltap_cell = Cell("welltap", cells["welltap"])
self.welltap_awire_cell = Cell("welltap_wa", self.welltap_cell.config["wiring_a"])
self.welltap_bwire_cell = Cell("welltap_wb", self.welltap_cell.config["wiring_b"])
self.welltap_pwr_inner_cell = Cell("welltap_wpi", self.welltap_cell.config["pwr_inner"])
self.welltap_pwr_outer_cell = Cell("welltap_wpo", self.welltap_cell.config["pwr_outer"])
self.filler_cell = Cell("filler", cells["filler"])
# Read and prepare some basic dimensions required for partitioning.
G = self.config["track"]
self.grid = G
self.row_height_trk = self.config["row-height"]
self.row_height = self.row_height_trk*G
self.regwd_y_trk = self.num_words * self.row_height_trk
self.column_width_trk = self.config["widths"]["bitarray"]
self.addrdec_width_trk = self.config["widths"]["addrdec"][self.num_words]
self.column_right_trk = self.num_bits_left * self.column_width_trk + self.addrdec_width_trk
bit_width = self.column_width_trk*G
rwckg_width = self.config["widths"]["rwckg"]*G
addrdec_width = self.addrdec_width_trk*G
rareg_width = self.config["widths"]["rareg"]*G
bitarray_size = Vec(bit_width, self.num_words * self.row_height)
column_height = (self.num_words+1)*self.row_height
welltap_width = float(self.welltap_cell.config["width"])
welltap_cadence = float(self.welltap_cell.config["cadence"])
# Calculate the supply pin tracks.
self.supply_layer_gds = self.config["supply_layer_gds"]
self.supply_tracks = [
("VSS" if y%2 == 0 else "VDD", y*self.row_height)
for y in range(self.num_words+3)
]
# Assemble the columns of the memory, which consist of bit arrays,
# welltaps, and the address decoder.
# Lower and upper bits.
lower_bits = [
Inst(
self, self.bitarray_cell,
"XBA%d" % i,
Vec(0, 0),
mx=True,
index=i,
size=bitarray_size
)
for i in range(self.num_bits_left)
]
upper_bits = [
Inst(
self, self.bitarray_cell,
"XBA%d" % (i + self.num_bits_left),
Vec(0, 0),
index=i + self.num_bits_left,
size=bitarray_size
)
for i in range(self.num_bits_right)
]
self.bitarrays = lower_bits + upper_bits
# Address Decoder
self.addrdec = Inst(
self, self.addrdec_cell, "XAD",
# Vec(x_addrdec, 0),
Vec(0, 0),
index=self.num_words,
size=Vec(addrdec_width, column_height)
)
columns = lower_bits + [self.addrdec] + upper_bits
# Determine a reasonable distribution for the welltaps.
width = layout_columns(columns)
num_welltaps = int(width/(welltap_cadence - welltap_width)) + 2
max_spacing = None
welltap_placement = None
while welltap_placement is None or max_spacing > welltap_cadence:
# Calculate the approximate position for each welltap.
approx_welltap_positions = [
width * i / (num_welltaps-1) for i in range(num_welltaps)
]
# Calculate the index of the column before which each welltap should
# be inserted. This positions each welltap to the left of each
# approximate positions.
colpos = [col.pos.x for col in columns]
welltap_indices = [
max(0, min(len(columns), bisect(colpos, x)))
for x in approx_welltap_positions
]
# Extract the position the welltaps would have if placed at the
# indices found above.
welltap_placement = [
(i, columns[i].pos.x if i < len(columns) else width)
for i in welltap_indices
]
# Calculate the maximum spacing between two neighbouring welltaps.
max_spacing = reduce(max, [
b - a + welltap_width
for ((_,a),(_,b)) in zip(welltap_placement[:-1], welltap_placement[1:])
])
# Increase the number of welltaps. If the max_spacing calculated
# above is greater than the required welltap cadence, the loop body
# is re-executed with one more welltap.
num_welltaps += 1
# Insert the welltaps and the required wiring on top of them.
self.welltaps = list()
for (i, (offset, _)) in enumerate(reversed(welltap_placement)):
wt = Inst(self, self.welltap_cell, "WT%d" % i, Vec(0,0),
size=Vec(welltap_width, column_height),
stack=self.num_words+2,
stack_step=self.row_height
)
self.welltaps.append(wt)
columns.insert(offset, wt)
# Rearrange the columns a final time and calculate the size of the
# macro.
self.size = Vec(
layout_columns(columns),
(self.num_words+2) * self.row_height
)
# Add the wiring to the | layout_columns | identifier_name |
layout.py | , self.inst.size.y - 0.05e-6)
yield (self.inst.to_world(a), self.inst.to_world(b))
class Inst(object):
def __init__(self, layout, cell, name, pos, mx=False, my=False, index=None, size=None, stack=None, stack_step=None, stack_noflip=False, data=dict()):
super(Inst, self).__init__()
self.layout = layout
self.cell = cell
self.name = name
self.pos = pos
self.mx = mx
self.my = my
self.index = index
self.size = size
self.stack = stack
self.stack_step = stack_step
self.stack_noflip = stack_noflip
self.__dict__.update(data)
def to_world(self, v):
return Vec(
self.pos.x + (-v.x if self.mx else v.x),
self.pos.y + (-v.y if self.my else v.y)
)
def pins(self):
if "pins" in self.cell.config:
for (name, cfg) in self.cell.config["pins"].items():
if "tracks" in cfg:
for (idx, trk) in enumerate(cfg["tracks"][self.index]):
yield InstPin(name, cfg, self, index=idx, track=trk)
else:
yield InstPin(name, cfg, self, index=self.index)
def layout_columns(columns):
x = 0
for col in columns:
col.pos.x = (x + col.size.x if col.mx else x)
x += col.size.x
return x
class Layout(object):
| cells = self.config["cells"]
self.rwckg_cell = Cell("rwckg", cells["rwckg"])
self.addrdec_cell = Cell("addrdec", cells["addrdec"], suffix=str(self.num_words))
self.bitarray_cell = Cell("bitarray", cells["bitarray"], suffix=str(self.num_words))
self.rareg_cell = Cell("rareg", cells["rareg"])
self.rareg_vwire_cells = [
Cell("raregwire", cells["raregwire"], suffix=str(i+1))
for i in range(4)
]
self.rareg_hwire_a_cell = Cell("raregwireha", cells["raregwire"], suffix="HA")
self.rareg_hwire_b_cell = Cell("raregwireha", cells["raregwire"], suffix="HB")
self.welltap_cell = Cell("welltap", cells["welltap"])
self.welltap_awire_cell = Cell("welltap_wa", self.welltap_cell.config["wiring_a"])
self.welltap_bwire_cell = Cell("welltap_wb", self.welltap_cell.config["wiring_b"])
self.welltap_pwr_inner_cell = Cell("welltap_wpi", self.welltap_cell.config["pwr_inner"])
self.welltap_pwr_outer_cell = Cell("welltap_wpo", self.welltap_cell.config["pwr_outer"])
self.filler_cell = Cell("filler", cells["filler"])
# Read and prepare some basic dimensions required for partitioning.
G = self.config["track"]
self.grid = G
self.row_height_trk = self.config["row-height"]
self.row_height = self.row_height_trk*G
self.regwd_y_trk = self.num_words * self.row_height_trk
self.column_width_trk = self.config["widths"]["bitarray"]
self.addrdec_width_trk = self.config["widths"]["addrdec"][self.num_words]
self.column_right_trk = self.num_bits_left * self.column_width_trk + self.addrdec_width_trk
bit_width = self.column_width_trk*G
rwckg_width = self.config["widths"]["rwckg"]*G
addrdec_width = self.addrdec_width_trk*G
rareg_width = self.config["widths"]["rareg"]*G
bitarray_size = Vec(bit_width, self.num_words * self.row_height)
column_height = (self.num_words+1)*self.row_height
welltap_width = float(self.welltap_cell.config["width"])
welltap_cadence = float(self.welltap_cell.config["cadence"])
# Calculate the supply pin tracks.
self.supply_layer_gds = self.config["supply_layer_gds"]
self.supply_tracks = [
("VSS" if y%2 == 0 else "VDD", y*self.row_height)
for y in range(self.num_words+3)
]
# Assemble the columns of the memory, which consist of bit arrays,
# welltaps, and the address decoder.
# Lower and upper bits.
lower_bits = [
Inst(
self, self.bitarray_cell,
"XBA%d" % i,
Vec(0, 0),
mx=True,
index=i,
size=bitarray_size
)
for i in range(self.num_bits_left)
]
upper_bits = [
Inst(
self, self.bitarray_cell,
"XBA%d" % (i + self.num_bits_left),
Vec(0, 0),
index=i + self.num_bits_left,
size=bitarray_size
)
for i in range(self.num_bits_right)
]
self.bitarrays = lower_bits + upper_bits
# Address Decoder
self.addrdec = Inst(
self, self.addrdec_cell, "XAD",
# Vec(x_addrdec, 0),
Vec(0, 0),
index=self.num_words,
size=Vec(addrdec_width, column_height)
)
columns = lower_bits + [self.addrdec] + upper_bits
# Determine a reasonable distribution for the welltaps.
width = layout_columns(columns)
num_welltaps = int(width/(welltap_cadence - welltap_width)) + 2
max_spacing = None
welltap_placement = None
while welltap_placement is None or max_spacing > welltap_cadence:
# Calculate the approximate position for each welltap.
approx_welltap_positions = [
width * i / (num_welltaps-1) for i in range(num_welltaps)
]
# Calculate the index of the column before which each welltap should
# be inserted. This positions each welltap to the left of each
# approximate positions.
colpos = [col.pos.x for col in columns]
welltap_indices = [
max(0, min(len(columns), bisect(colpos, x)))
for x in approx_welltap_positions
]
# Extract the position the welltaps would have if placed at the
# indices found above.
welltap_placement = [
(i, columns[i].pos.x if i < len(columns) else width)
for i in welltap_indices
]
# Calculate the maximum spacing between two neighbouring welltaps.
max_spacing = reduce(max, [
b - a + welltap_width
for ((_,a),(_,b)) in zip(welltap_placement[:-1], welltap_placement[1:])
])
# Increase the number of welltaps. If the max_spacing calculated
# above is greater than the required welltap cadence, the loop body
# is re-executed with one more welltap.
num_welltaps += 1
# Insert the welltaps and the required wiring on top of them.
self.welltaps = list()
for (i, (offset, _)) in enumerate(reversed(welltap_placement)):
wt = Inst(self, self.welltap_cell, "WT%d" % i, Vec(0,0),
size=Vec(welltap_width, column_height),
stack=self.num_words+2,
stack_step=self.row_height
)
self.welltaps.append(wt)
columns.insert(offset, wt)
# Rearrange the columns a final time and calculate the size of the
# macro.
self.size = Vec(
layout_columns(columns),
(self.num_words+2) * self.row_height
)
# Add the wiring to the | def __init__(self, macro):
super(Layout, self).__init__()
self.macro = macro
self.cells = list()
self.num_addr = macro.num_addr
self.num_bits = macro.num_bits
self.num_words = 2**self.num_addr
self.name = "PS%dX%d" % (self.num_words, self.num_bits)
self.wiring = list()
with open(macro.techdir+"/config.yml") as f:
self.config = yaml.load(f)
# Calculate the number of bits and address bits that go to the left and
# right of the central spine.
self.num_bits_left = int(self.num_bits/2)
self.num_bits_right = self.num_bits - self.num_bits_left
self.num_addr_left = int(self.num_addr/2)
self.num_addr_right = self.num_addr - self.num_addr_left
# Load the cell descriptions. | identifier_body |
layout.py | _cell,
"XBA%d" % i,
Vec(0, 0),
mx=True,
index=i,
size=bitarray_size
)
for i in range(self.num_bits_left)
]
upper_bits = [
Inst(
self, self.bitarray_cell,
"XBA%d" % (i + self.num_bits_left),
Vec(0, 0),
index=i + self.num_bits_left,
size=bitarray_size
)
for i in range(self.num_bits_right)
]
self.bitarrays = lower_bits + upper_bits
# Address Decoder
self.addrdec = Inst(
self, self.addrdec_cell, "XAD",
# Vec(x_addrdec, 0),
Vec(0, 0),
index=self.num_words,
size=Vec(addrdec_width, column_height)
)
columns = lower_bits + [self.addrdec] + upper_bits
# Determine a reasonable distribution for the welltaps.
width = layout_columns(columns)
num_welltaps = int(width/(welltap_cadence - welltap_width)) + 2
max_spacing = None
welltap_placement = None
while welltap_placement is None or max_spacing > welltap_cadence:
# Calculate the approximate position for each welltap.
approx_welltap_positions = [
width * i / (num_welltaps-1) for i in range(num_welltaps)
]
# Calculate the index of the column before which each welltap should
# be inserted. This positions each welltap to the left of each
# approximate positions.
colpos = [col.pos.x for col in columns]
welltap_indices = [
max(0, min(len(columns), bisect(colpos, x)))
for x in approx_welltap_positions
]
# Extract the position the welltaps would have if placed at the
# indices found above.
welltap_placement = [
(i, columns[i].pos.x if i < len(columns) else width)
for i in welltap_indices
]
# Calculate the maximum spacing between two neighbouring welltaps.
max_spacing = reduce(max, [
b - a + welltap_width
for ((_,a),(_,b)) in zip(welltap_placement[:-1], welltap_placement[1:])
])
# Increase the number of welltaps. If the max_spacing calculated
# above is greater than the required welltap cadence, the loop body
# is re-executed with one more welltap.
num_welltaps += 1
# Insert the welltaps and the required wiring on top of them.
self.welltaps = list()
for (i, (offset, _)) in enumerate(reversed(welltap_placement)):
wt = Inst(self, self.welltap_cell, "WT%d" % i, Vec(0,0),
size=Vec(welltap_width, column_height),
stack=self.num_words+2,
stack_step=self.row_height
)
self.welltaps.append(wt)
columns.insert(offset, wt)
# Rearrange the columns a final time and calculate the size of the
# macro.
self.size = Vec(
layout_columns(columns),
(self.num_words+2) * self.row_height
)
# Add the wiring to the welltaps.
for wt in self.welltaps[1:-1]:
flip = wt.pos.x < self.addrdec.pos.x + 0.5*addrdec_width
self.wiring.append(Inst(
self, self.welltap_awire_cell, wt.name+"WA",
Vec(wt.pos.x + welltap_width if flip else wt.pos.x, wt.pos.y),
stack=self.num_words,
stack_step=self.row_height,
mx=flip
))
self.wiring.append(Inst(
self, self.welltap_bwire_cell, wt.name+"WB",
Vec(
wt.pos.x + welltap_width if flip else wt.pos.x,
wt.pos.y + self.num_words * self.row_height
),
mx=flip
))
# Add the power routing ontop of the welltaps.
for wt in self.welltaps:
self.wiring.append(Inst(
self, self.welltap_pwr_inner_cell, wt.name+"WPI",
wt.pos,
stack=self.num_words+2,
stack_step=self.row_height
))
self.wiring.append(Inst(
self, self.welltap_pwr_outer_cell, wt.name+"WPO",
Vec(wt.pos.x, wt.pos.y+self.row_height),
stack=self.num_words+1,
stack_step=self.row_height,
stack_noflip=True
))
# Place the global clock gate and address registers which are attached
# to the address decoder layout-wise.
x_spine_l = self.addrdec.pos.x
x_spine_r = x_spine_l + self.addrdec.size.x
# Global Clock Gate
rwckg_x = x_spine_r - rwckg_width
rwckg_y = self.num_words * self.row_height
self.rwckg = Inst(self, self.rwckg_cell, "XRWCKG", Vec(rwckg_x, rwckg_y))
# Read Address Registers
x_ralower = x_spine_l - (self.num_addr_left) * rareg_width
x_raupper = x_spine_r
y_rareg = (self.num_words+2) * self.row_height
self.raregs = [
Inst(
self, self.rareg_cell,
"XRA%d" % i,
Vec(self.bitarrays[self.num_bits_left-self.num_addr_left+i].pos.x, y_rareg),
index=i,
mx=True,
my=True,
data = {
"ytrack": self.num_addr_left - i - 1,
"ymax": self.num_addr_left
}
)
for i in range(self.num_addr_left)
] + [
Inst(
self, self.rareg_cell,
"XRA%d" % (i+self.num_addr_left),
Vec(self.bitarrays[self.num_bits_left+i].pos.x, y_rareg),
index=(i + self.num_addr_left),
my=True,
data = {
"ytrack": i,
"ymax": self.num_addr_right
}
)
for i in range(self.num_addr_right)
]
# Wire up the RAREGs.
y_raregwire = (self.num_words+1) * self.row_height
for ra in self.raregs:
# Vertical breakout.
self.wiring.append(Inst(
self, self.rareg_vwire_cells[ra.ytrack],
"XRAWV%d" % ra.index,
Vec(ra.pos.x, y_raregwire),
mx=ra.mx
))
# Horizontal lanes.
for i in range(ra.ymax-ra.ytrack-1):
self.wiring.append(Inst(
self, self.rareg_hwire_a_cell,
"XRAWH%dY%d" % (ra.index, i),
Vec(ra.pos.x, y_raregwire + 0.4e-6 - (ra.ymax-i-1)*0.2e-6),
mx=ra.mx
))
# Add the wiring that is necessary on top of the welltaps which were
# placed among the RAREGs.
for wt in self.welltaps:
if wt.pos.x >= self.raregs[0].pos.x and wt.pos.x <= self.raregs[-1].pos.x:
flip = wt.pos.x < self.addrdec.pos.x + 0.5*addrdec_width
x = wt.pos.x + welltap_width if flip else wt.pos.x
self.wiring.append(Inst(
self, self.welltap_bwire_cell, wt.name+"WB",
Vec(
x,
wt.pos.y + (self.num_words+2) * self.row_height
),
mx=flip,
my=True
))
# Gather a list of horizontal tracks this welltap interrupts.
tracks = list()
if x < self.addrdec.pos.x + 0.5*addrdec_width:
for ra in self.raregs:
if ra.pos.x < x:
tracks.append(y_raregwire + 0.4e-6 - ra.ytrack*0.2e-6)
else:
for ra in self.raregs:
if ra.pos.x > x:
tracks.append(y_raregwire + 0.4e-6 - ra.ytrack*0.2e-6)
# Fill in the interrupted horizontal tracks.
for t in tracks: | self.wiring.append(Inst(
self, self.rareg_hwire_b_cell, wt.name+"WRAB",
Vec(x,t), | random_line_split |
|
develop.ts | building index.html`, {})
indexHTMLActivity.start()
const directory = program.directory
const directoryPath = withBasePath(directory)
const workerPool = WorkerPool.create()
const createIndexHtml = async (activity: ActivityTracker): Promise<void> => {
try {
await buildHTML({
program,
stage: BuildHTMLStage.DevelopHTML,
pagePaths: [`/`],
workerPool,
activity,
})
} catch (err) {
if (err.name !== `WebpackError`) {
report.panic(err)
return
}
report.panic(
report.stripIndent`
There was an error compiling the html.js component for the development server.
See our docs page on debugging HTML builds for help https://gatsby.dev/debug-html
`,
err
)
}
}
await createIndexHtml(indexHTMLActivity)
indexHTMLActivity.end()
// report.stateUpdate(`webpack`, `IN_PROGRESS`)
const webpackActivity = report.activityTimer(`Building development bundle`, {
id: `webpack-develop`,
})
webpackActivity.start()
const devConfig = await webpackConfig(
program,
directory,
`develop`,
program.port,
{ parentSpan: webpackActivity.span }
)
const compiler = webpack(devConfig)
/**
* Set up the express app.
**/
const app = express()
app.use(telemetry.expressMiddleware(`DEVELOP`))
app.use(
webpackHotMiddleware(compiler, {
log: false,
path: `/__webpack_hmr`,
heartbeat: 10 * 1000,
})
)
app.use(cors())
/**
* Pattern matching all endpoints with graphql or graphiql with 1 or more leading underscores
*/
const graphqlEndpoint = `/_+graphi?ql`
if (process.env.GATSBY_GRAPHQL_IDE === `playground`) {
app.get(
graphqlEndpoint,
graphqlPlayground({
endpoint: `/___graphql`,
}),
() => {}
)
} else {
graphiqlExplorer(app, {
graphqlEndpoint,
})
}
app.use(
graphqlEndpoint,
graphqlHTTP(
(): graphqlHTTP.OptionsData => {
const { schema, schemaCustomization } = store.getState()
return { | schemaComposer: schemaCustomization.composer,
context: {},
customContext: schemaCustomization.context,
}),
customFormatErrorFn(err): unknown {
return {
...formatError(err),
stack: err.stack ? err.stack.split(`\n`) : [],
}
},
}
}
)
)
/**
* Refresh external data sources.
* This behavior is disabled by default, but the ENABLE_GATSBY_REFRESH_ENDPOINT env var enables it
* If no GATSBY_REFRESH_TOKEN env var is available, then no Authorization header is required
**/
const REFRESH_ENDPOINT = `/__refresh`
const refresh = async (req: express.Request): Promise<void> => {
stopSchemaHotReloader()
let activity = report.activityTimer(`createSchemaCustomization`, {})
activity.start()
await createSchemaCustomization({
refresh: true,
})
activity.end()
activity = report.activityTimer(`Refreshing source data`, {})
activity.start()
await sourceNodes({
webhookBody: req.body,
})
activity.end()
activity = report.activityTimer(`rebuild schema`)
activity.start()
await rebuildSchema({ parentSpan: activity })
activity.end()
startSchemaHotReloader()
}
app.use(REFRESH_ENDPOINT, express.json())
app.post(REFRESH_ENDPOINT, (req, res) => {
const enableRefresh = process.env.ENABLE_GATSBY_REFRESH_ENDPOINT
const refreshToken = process.env.GATSBY_REFRESH_TOKEN
const authorizedRefresh =
!refreshToken || req.headers.authorization === refreshToken
if (enableRefresh && authorizedRefresh) {
refresh(req)
}
res.end()
})
app.get(`/__open-stack-frame-in-editor`, (req, res) => {
launchEditor(req.query.fileName, req.query.lineNumber)
res.end()
})
// Disable directory indexing i.e. serving index.html from a directory.
// This can lead to serving stale html files during development.
//
// We serve by default an empty index.html that sets up the dev environment.
app.use(developStatic(`public`, { index: false }))
app.use(
webpackDevMiddleware(compiler, {
logLevel: `silent`,
publicPath: devConfig.output.publicPath,
watchOptions: devConfig.devServer
? devConfig.devServer.watchOptions
: null,
stats: `errors-only`,
})
)
// Expose access to app for advanced use cases
const { developMiddleware } = store.getState().config
if (developMiddleware) {
developMiddleware(app, program)
}
// Set up API proxy.
const { proxy } = store.getState().config
if (proxy) {
proxy.forEach(({ prefix, url }) => {
app.use(`${prefix}/*`, (req, res) => {
const proxiedUrl = url + req.originalUrl
const {
// remove `host` from copied headers
// eslint-disable-next-line @typescript-eslint/no-unused-vars
headers: { host, ...headers },
method,
} = req
req
.pipe(
got
.stream(proxiedUrl, { headers, method, decompress: false })
.on(`response`, response =>
res.writeHead(response.statusCode || 200, response.headers)
)
.on(`error`, (err, _, response) => {
if (response) {
res.writeHead(response.statusCode || 400, response.headers)
} else {
const message = `Error when trying to proxy request "${req.originalUrl}" to "${proxiedUrl}"`
report.error(message, err)
res.sendStatus(500)
}
})
)
.pipe(res)
})
})
}
await apiRunnerNode(`onCreateDevServer`, { app })
// In case nothing before handled hot-update - send 404.
// This fixes "Unexpected token < in JSON at position 0" runtime
// errors after restarting development server and
// cause automatic hard refresh in the browser.
app.use(/.*\.hot-update\.json$/i, (_, res) => {
res.status(404).end()
})
// Render an HTML page and serve it.
app.use((_, res) => {
res.sendFile(directoryPath(`public/index.html`), err => {
if (err) {
res.status(500).end()
}
})
})
/**
* Set up the HTTP server and socket.io.
* If a SSL cert exists in program, use it with `createServer`.
**/
const server = program.ssl
? https.createServer(program.ssl, app)
: new http.Server(app)
const socket = websocketManager.init({ server, directory: program.directory })
const listener = server.listen(program.port, program.host)
// Register watcher that rebuilds index.html every time html.js changes.
const watchGlobs = [`src/html.js`, `plugins/**/gatsby-ssr.js`].map(path =>
slash(directoryPath(path))
)
chokidar.watch(watchGlobs).on(`change`, async () => {
await createIndexHtml(indexHTMLActivity)
socket.to(`clients`).emit(`reload`)
})
return { compiler, listener, webpackActivity }
}
module.exports = async (program: IProgram): Promise<void> => {
// We want to prompt the feedback request when users quit develop
// assuming they pass the heuristic check to know they are a user
// we want to request feedback from, and we're not annoying them.
process.on(
`SIGINT`,
async (): Promise<void> => {
if (await userPassesFeedbackRequestHeuristic()) {
showFeedbackRequest()
}
process.exit(0)
}
)
if (process.env.GATSBY_EXPERIMENTAL_PAGE_BUILD_ON_DATA_CHANGES) {
report.panic(
`The flag ${chalk.yellow(
`GATSBY_EXPERIMENTAL_PAGE_BUILD_ON_DATA_CHANGES`
)} is not available with ${chalk.cyan(
`gatsby develop`
)}, please retry using ${chalk.cyan(`gatsby build`)}`
)
}
initTracer(program.openTracingConfigFile)
report.pendingActivity({ id: `webpack-develop` })
telemetry.trackCli(`DEVELOP_START`)
telemetry.startBackgroundUpdate()
const port =
typeof program.port === `string` ? parseInt(program.port, 10) : program.port
// In order to enable custom ssl, --cert-file --key-file and -https flags must all be
// used together
if ((program[`cert-file`] || program[`key-file`]) && !program.https) {
report.panic(
`for custom ssl --https, --cert-file, and --key-file must be used together`
)
}
try {
program.port = await detectPortInUseAndPrompt(port)
} catch (e) {
if (e.message === `USER_REJECTED`) {
process.exit(0)
}
throw e
}
// Check if https is | schema,
graphiql: false,
context: withResolverContext({
schema, | random_line_split |
develop.ts | index.html`, {})
indexHTMLActivity.start()
const directory = program.directory
const directoryPath = withBasePath(directory)
const workerPool = WorkerPool.create()
const createIndexHtml = async (activity: ActivityTracker): Promise<void> => {
try {
await buildHTML({
program,
stage: BuildHTMLStage.DevelopHTML,
pagePaths: [`/`],
workerPool,
activity,
})
} catch (err) {
if (err.name !== `WebpackError`) {
report.panic(err)
return
}
report.panic(
report.stripIndent`
There was an error compiling the html.js component for the development server.
See our docs page on debugging HTML builds for help https://gatsby.dev/debug-html
`,
err
)
}
}
await createIndexHtml(indexHTMLActivity)
indexHTMLActivity.end()
// report.stateUpdate(`webpack`, `IN_PROGRESS`)
const webpackActivity = report.activityTimer(`Building development bundle`, {
id: `webpack-develop`,
})
webpackActivity.start()
const devConfig = await webpackConfig(
program,
directory,
`develop`,
program.port,
{ parentSpan: webpackActivity.span }
)
const compiler = webpack(devConfig)
/**
* Set up the express app.
**/
const app = express()
app.use(telemetry.expressMiddleware(`DEVELOP`))
app.use(
webpackHotMiddleware(compiler, {
log: false,
path: `/__webpack_hmr`,
heartbeat: 10 * 1000,
})
)
app.use(cors())
/**
* Pattern matching all endpoints with graphql or graphiql with 1 or more leading underscores
*/
const graphqlEndpoint = `/_+graphi?ql`
if (process.env.GATSBY_GRAPHQL_IDE === `playground`) {
app.get(
graphqlEndpoint,
graphqlPlayground({
endpoint: `/___graphql`,
}),
() => {}
)
} else {
graphiqlExplorer(app, {
graphqlEndpoint,
})
}
app.use(
graphqlEndpoint,
graphqlHTTP(
(): graphqlHTTP.OptionsData => {
const { schema, schemaCustomization } = store.getState()
return {
schema,
graphiql: false,
context: withResolverContext({
schema,
schemaComposer: schemaCustomization.composer,
context: {},
customContext: schemaCustomization.context,
}),
customFormatErrorFn(err): unknown | ,
}
}
)
)
/**
* Refresh external data sources.
* This behavior is disabled by default, but the ENABLE_GATSBY_REFRESH_ENDPOINT env var enables it
* If no GATSBY_REFRESH_TOKEN env var is available, then no Authorization header is required
**/
const REFRESH_ENDPOINT = `/__refresh`
const refresh = async (req: express.Request): Promise<void> => {
stopSchemaHotReloader()
let activity = report.activityTimer(`createSchemaCustomization`, {})
activity.start()
await createSchemaCustomization({
refresh: true,
})
activity.end()
activity = report.activityTimer(`Refreshing source data`, {})
activity.start()
await sourceNodes({
webhookBody: req.body,
})
activity.end()
activity = report.activityTimer(`rebuild schema`)
activity.start()
await rebuildSchema({ parentSpan: activity })
activity.end()
startSchemaHotReloader()
}
app.use(REFRESH_ENDPOINT, express.json())
app.post(REFRESH_ENDPOINT, (req, res) => {
const enableRefresh = process.env.ENABLE_GATSBY_REFRESH_ENDPOINT
const refreshToken = process.env.GATSBY_REFRESH_TOKEN
const authorizedRefresh =
!refreshToken || req.headers.authorization === refreshToken
if (enableRefresh && authorizedRefresh) {
refresh(req)
}
res.end()
})
app.get(`/__open-stack-frame-in-editor`, (req, res) => {
launchEditor(req.query.fileName, req.query.lineNumber)
res.end()
})
// Disable directory indexing i.e. serving index.html from a directory.
// This can lead to serving stale html files during development.
//
// We serve by default an empty index.html that sets up the dev environment.
app.use(developStatic(`public`, { index: false }))
app.use(
webpackDevMiddleware(compiler, {
logLevel: `silent`,
publicPath: devConfig.output.publicPath,
watchOptions: devConfig.devServer
? devConfig.devServer.watchOptions
: null,
stats: `errors-only`,
})
)
// Expose access to app for advanced use cases
const { developMiddleware } = store.getState().config
if (developMiddleware) {
developMiddleware(app, program)
}
// Set up API proxy.
const { proxy } = store.getState().config
if (proxy) {
proxy.forEach(({ prefix, url }) => {
app.use(`${prefix}/*`, (req, res) => {
const proxiedUrl = url + req.originalUrl
const {
// remove `host` from copied headers
// eslint-disable-next-line @typescript-eslint/no-unused-vars
headers: { host, ...headers },
method,
} = req
req
.pipe(
got
.stream(proxiedUrl, { headers, method, decompress: false })
.on(`response`, response =>
res.writeHead(response.statusCode || 200, response.headers)
)
.on(`error`, (err, _, response) => {
if (response) {
res.writeHead(response.statusCode || 400, response.headers)
} else {
const message = `Error when trying to proxy request "${req.originalUrl}" to "${proxiedUrl}"`
report.error(message, err)
res.sendStatus(500)
}
})
)
.pipe(res)
})
})
}
await apiRunnerNode(`onCreateDevServer`, { app })
// In case nothing before handled hot-update - send 404.
// This fixes "Unexpected token < in JSON at position 0" runtime
// errors after restarting development server and
// cause automatic hard refresh in the browser.
app.use(/.*\.hot-update\.json$/i, (_, res) => {
res.status(404).end()
})
// Render an HTML page and serve it.
app.use((_, res) => {
res.sendFile(directoryPath(`public/index.html`), err => {
if (err) {
res.status(500).end()
}
})
})
/**
* Set up the HTTP server and socket.io.
* If a SSL cert exists in program, use it with `createServer`.
**/
const server = program.ssl
? https.createServer(program.ssl, app)
: new http.Server(app)
const socket = websocketManager.init({ server, directory: program.directory })
const listener = server.listen(program.port, program.host)
// Register watcher that rebuilds index.html every time html.js changes.
const watchGlobs = [`src/html.js`, `plugins/**/gatsby-ssr.js`].map(path =>
slash(directoryPath(path))
)
chokidar.watch(watchGlobs).on(`change`, async () => {
await createIndexHtml(indexHTMLActivity)
socket.to(`clients`).emit(`reload`)
})
return { compiler, listener, webpackActivity }
}
module.exports = async (program: IProgram): Promise<void> => {
// We want to prompt the feedback request when users quit develop
// assuming they pass the heuristic check to know they are a user
// we want to request feedback from, and we're not annoying them.
process.on(
`SIGINT`,
async (): Promise<void> => {
if (await userPassesFeedbackRequestHeuristic()) {
showFeedbackRequest()
}
process.exit(0)
}
)
if (process.env.GATSBY_EXPERIMENTAL_PAGE_BUILD_ON_DATA_CHANGES) {
report.panic(
`The flag ${chalk.yellow(
`GATSBY_EXPERIMENTAL_PAGE_BUILD_ON_DATA_CHANGES`
)} is not available with ${chalk.cyan(
`gatsby develop`
)}, please retry using ${chalk.cyan(`gatsby build`)}`
)
}
initTracer(program.openTracingConfigFile)
report.pendingActivity({ id: `webpack-develop` })
telemetry.trackCli(`DEVELOP_START`)
telemetry.startBackgroundUpdate()
const port =
typeof program.port === `string` ? parseInt(program.port, 10) : program.port
// In order to enable custom ssl, --cert-file --key-file and -https flags must all be
// used together
if ((program[`cert-file`] || program[`key-file`]) && !program.https) {
report.panic(
`for custom ssl --https, --cert-file, and --key-file must be used together`
)
}
try {
program.port = await detectPortInUseAndPrompt(port)
} catch (e) {
if (e.message === `USER_REJECTED`) {
process.exit(0)
}
throw e
}
// Check if https | {
return {
...formatError(err),
stack: err.stack ? err.stack.split(`\n`) : [],
}
} | identifier_body |
develop.ts | Pattern matching all endpoints with graphql or graphiql with 1 or more leading underscores
*/
const graphqlEndpoint = `/_+graphi?ql`
if (process.env.GATSBY_GRAPHQL_IDE === `playground`) {
app.get(
graphqlEndpoint,
graphqlPlayground({
endpoint: `/___graphql`,
}),
() => {}
)
} else {
graphiqlExplorer(app, {
graphqlEndpoint,
})
}
app.use(
graphqlEndpoint,
graphqlHTTP(
(): graphqlHTTP.OptionsData => {
const { schema, schemaCustomization } = store.getState()
return {
schema,
graphiql: false,
context: withResolverContext({
schema,
schemaComposer: schemaCustomization.composer,
context: {},
customContext: schemaCustomization.context,
}),
customFormatErrorFn(err): unknown {
return {
...formatError(err),
stack: err.stack ? err.stack.split(`\n`) : [],
}
},
}
}
)
)
/**
* Refresh external data sources.
* This behavior is disabled by default, but the ENABLE_GATSBY_REFRESH_ENDPOINT env var enables it
* If no GATSBY_REFRESH_TOKEN env var is available, then no Authorization header is required
**/
const REFRESH_ENDPOINT = `/__refresh`
const refresh = async (req: express.Request): Promise<void> => {
stopSchemaHotReloader()
let activity = report.activityTimer(`createSchemaCustomization`, {})
activity.start()
await createSchemaCustomization({
refresh: true,
})
activity.end()
activity = report.activityTimer(`Refreshing source data`, {})
activity.start()
await sourceNodes({
webhookBody: req.body,
})
activity.end()
activity = report.activityTimer(`rebuild schema`)
activity.start()
await rebuildSchema({ parentSpan: activity })
activity.end()
startSchemaHotReloader()
}
app.use(REFRESH_ENDPOINT, express.json())
app.post(REFRESH_ENDPOINT, (req, res) => {
const enableRefresh = process.env.ENABLE_GATSBY_REFRESH_ENDPOINT
const refreshToken = process.env.GATSBY_REFRESH_TOKEN
const authorizedRefresh =
!refreshToken || req.headers.authorization === refreshToken
if (enableRefresh && authorizedRefresh) {
refresh(req)
}
res.end()
})
app.get(`/__open-stack-frame-in-editor`, (req, res) => {
launchEditor(req.query.fileName, req.query.lineNumber)
res.end()
})
// Disable directory indexing i.e. serving index.html from a directory.
// This can lead to serving stale html files during development.
//
// We serve by default an empty index.html that sets up the dev environment.
app.use(developStatic(`public`, { index: false }))
app.use(
webpackDevMiddleware(compiler, {
logLevel: `silent`,
publicPath: devConfig.output.publicPath,
watchOptions: devConfig.devServer
? devConfig.devServer.watchOptions
: null,
stats: `errors-only`,
})
)
// Expose access to app for advanced use cases
const { developMiddleware } = store.getState().config
if (developMiddleware) {
developMiddleware(app, program)
}
// Set up API proxy.
const { proxy } = store.getState().config
if (proxy) {
proxy.forEach(({ prefix, url }) => {
app.use(`${prefix}/*`, (req, res) => {
const proxiedUrl = url + req.originalUrl
const {
// remove `host` from copied headers
// eslint-disable-next-line @typescript-eslint/no-unused-vars
headers: { host, ...headers },
method,
} = req
req
.pipe(
got
.stream(proxiedUrl, { headers, method, decompress: false })
.on(`response`, response =>
res.writeHead(response.statusCode || 200, response.headers)
)
.on(`error`, (err, _, response) => {
if (response) {
res.writeHead(response.statusCode || 400, response.headers)
} else {
const message = `Error when trying to proxy request "${req.originalUrl}" to "${proxiedUrl}"`
report.error(message, err)
res.sendStatus(500)
}
})
)
.pipe(res)
})
})
}
await apiRunnerNode(`onCreateDevServer`, { app })
// In case nothing before handled hot-update - send 404.
// This fixes "Unexpected token < in JSON at position 0" runtime
// errors after restarting development server and
// cause automatic hard refresh in the browser.
app.use(/.*\.hot-update\.json$/i, (_, res) => {
res.status(404).end()
})
// Render an HTML page and serve it.
app.use((_, res) => {
res.sendFile(directoryPath(`public/index.html`), err => {
if (err) {
res.status(500).end()
}
})
})
/**
* Set up the HTTP server and socket.io.
* If a SSL cert exists in program, use it with `createServer`.
**/
const server = program.ssl
? https.createServer(program.ssl, app)
: new http.Server(app)
const socket = websocketManager.init({ server, directory: program.directory })
const listener = server.listen(program.port, program.host)
// Register watcher that rebuilds index.html every time html.js changes.
const watchGlobs = [`src/html.js`, `plugins/**/gatsby-ssr.js`].map(path =>
slash(directoryPath(path))
)
chokidar.watch(watchGlobs).on(`change`, async () => {
await createIndexHtml(indexHTMLActivity)
socket.to(`clients`).emit(`reload`)
})
return { compiler, listener, webpackActivity }
}
module.exports = async (program: IProgram): Promise<void> => {
// We want to prompt the feedback request when users quit develop
// assuming they pass the heuristic check to know they are a user
// we want to request feedback from, and we're not annoying them.
process.on(
`SIGINT`,
async (): Promise<void> => {
if (await userPassesFeedbackRequestHeuristic()) {
showFeedbackRequest()
}
process.exit(0)
}
)
if (process.env.GATSBY_EXPERIMENTAL_PAGE_BUILD_ON_DATA_CHANGES) {
report.panic(
`The flag ${chalk.yellow(
`GATSBY_EXPERIMENTAL_PAGE_BUILD_ON_DATA_CHANGES`
)} is not available with ${chalk.cyan(
`gatsby develop`
)}, please retry using ${chalk.cyan(`gatsby build`)}`
)
}
initTracer(program.openTracingConfigFile)
report.pendingActivity({ id: `webpack-develop` })
telemetry.trackCli(`DEVELOP_START`)
telemetry.startBackgroundUpdate()
const port =
typeof program.port === `string` ? parseInt(program.port, 10) : program.port
// In order to enable custom ssl, --cert-file --key-file and -https flags must all be
// used together
if ((program[`cert-file`] || program[`key-file`]) && !program.https) {
report.panic(
`for custom ssl --https, --cert-file, and --key-file must be used together`
)
}
try {
program.port = await detectPortInUseAndPrompt(port)
} catch (e) {
if (e.message === `USER_REJECTED`) {
process.exit(0)
}
throw e
}
// Check if https is enabled, then create or get SSL cert.
// Certs are named 'devcert' and issued to the host.
if (program.https) {
const sslHost =
program.host === `0.0.0.0` || program.host === `::`
? `localhost`
: program.host
if (REGEX_IP.test(sslHost)) {
report.panic(
`You're trying to generate a ssl certificate for an IP (${sslHost}). Please use a hostname instead.`
)
}
program.ssl = await getSslCert({
name: sslHost,
caFile: program[`ca-file`],
certFile: program[`cert-file`],
keyFile: program[`key-file`],
directory: program.directory,
})
}
// Start bootstrap process.
const { graphqlRunner } = await bootstrap(program)
// Start the createPages hot reloader.
bootstrapPageHotReloader(graphqlRunner)
// Start the schema hot reloader.
bootstrapSchemaHotReloader()
await queryUtil.initialProcessQueries()
require(`../redux/actions`).boundActionCreators.setProgramStatus(
`BOOTSTRAP_QUERY_RUNNING_FINISHED`
)
await db.saveState()
await waitUntilAllJobsComplete()
requiresWriter.startListener()
db.startAutosave()
queryUtil.startListeningToDevelopQueue()
queryWatcher.startWatchDeletePage()
let { compiler, webpackActivity } = await startServer(program)
interface IPreparedUrls {
lanUrlForConfig: string
lanUrlForTerminal: string
localUrlForTerminal: string
localUrlForBrowser: string
}
function | prepareUrls | identifier_name |
|
develop.ts | index.html`, {})
indexHTMLActivity.start()
const directory = program.directory
const directoryPath = withBasePath(directory)
const workerPool = WorkerPool.create()
const createIndexHtml = async (activity: ActivityTracker): Promise<void> => {
try {
await buildHTML({
program,
stage: BuildHTMLStage.DevelopHTML,
pagePaths: [`/`],
workerPool,
activity,
})
} catch (err) {
if (err.name !== `WebpackError`) {
report.panic(err)
return
}
report.panic(
report.stripIndent`
There was an error compiling the html.js component for the development server.
See our docs page on debugging HTML builds for help https://gatsby.dev/debug-html
`,
err
)
}
}
await createIndexHtml(indexHTMLActivity)
indexHTMLActivity.end()
// report.stateUpdate(`webpack`, `IN_PROGRESS`)
const webpackActivity = report.activityTimer(`Building development bundle`, {
id: `webpack-develop`,
})
webpackActivity.start()
const devConfig = await webpackConfig(
program,
directory,
`develop`,
program.port,
{ parentSpan: webpackActivity.span }
)
const compiler = webpack(devConfig)
/**
* Set up the express app.
**/
const app = express()
app.use(telemetry.expressMiddleware(`DEVELOP`))
app.use(
webpackHotMiddleware(compiler, {
log: false,
path: `/__webpack_hmr`,
heartbeat: 10 * 1000,
})
)
app.use(cors())
/**
* Pattern matching all endpoints with graphql or graphiql with 1 or more leading underscores
*/
const graphqlEndpoint = `/_+graphi?ql`
if (process.env.GATSBY_GRAPHQL_IDE === `playground`) {
app.get(
graphqlEndpoint,
graphqlPlayground({
endpoint: `/___graphql`,
}),
() => {}
)
} else {
graphiqlExplorer(app, {
graphqlEndpoint,
})
}
app.use(
graphqlEndpoint,
graphqlHTTP(
(): graphqlHTTP.OptionsData => {
const { schema, schemaCustomization } = store.getState()
return {
schema,
graphiql: false,
context: withResolverContext({
schema,
schemaComposer: schemaCustomization.composer,
context: {},
customContext: schemaCustomization.context,
}),
customFormatErrorFn(err): unknown {
return {
...formatError(err),
stack: err.stack ? err.stack.split(`\n`) : [],
}
},
}
}
)
)
/**
* Refresh external data sources.
* This behavior is disabled by default, but the ENABLE_GATSBY_REFRESH_ENDPOINT env var enables it
* If no GATSBY_REFRESH_TOKEN env var is available, then no Authorization header is required
**/
const REFRESH_ENDPOINT = `/__refresh`
const refresh = async (req: express.Request): Promise<void> => {
stopSchemaHotReloader()
let activity = report.activityTimer(`createSchemaCustomization`, {})
activity.start()
await createSchemaCustomization({
refresh: true,
})
activity.end()
activity = report.activityTimer(`Refreshing source data`, {})
activity.start()
await sourceNodes({
webhookBody: req.body,
})
activity.end()
activity = report.activityTimer(`rebuild schema`)
activity.start()
await rebuildSchema({ parentSpan: activity })
activity.end()
startSchemaHotReloader()
}
app.use(REFRESH_ENDPOINT, express.json())
app.post(REFRESH_ENDPOINT, (req, res) => {
const enableRefresh = process.env.ENABLE_GATSBY_REFRESH_ENDPOINT
const refreshToken = process.env.GATSBY_REFRESH_TOKEN
const authorizedRefresh =
!refreshToken || req.headers.authorization === refreshToken
if (enableRefresh && authorizedRefresh) {
refresh(req)
}
res.end()
})
app.get(`/__open-stack-frame-in-editor`, (req, res) => {
launchEditor(req.query.fileName, req.query.lineNumber)
res.end()
})
// Disable directory indexing i.e. serving index.html from a directory.
// This can lead to serving stale html files during development.
//
// We serve by default an empty index.html that sets up the dev environment.
app.use(developStatic(`public`, { index: false }))
app.use(
webpackDevMiddleware(compiler, {
logLevel: `silent`,
publicPath: devConfig.output.publicPath,
watchOptions: devConfig.devServer
? devConfig.devServer.watchOptions
: null,
stats: `errors-only`,
})
)
// Expose access to app for advanced use cases
const { developMiddleware } = store.getState().config
if (developMiddleware) {
developMiddleware(app, program)
}
// Set up API proxy.
const { proxy } = store.getState().config
if (proxy) {
proxy.forEach(({ prefix, url }) => {
app.use(`${prefix}/*`, (req, res) => {
const proxiedUrl = url + req.originalUrl
const {
// remove `host` from copied headers
// eslint-disable-next-line @typescript-eslint/no-unused-vars
headers: { host, ...headers },
method,
} = req
req
.pipe(
got
.stream(proxiedUrl, { headers, method, decompress: false })
.on(`response`, response =>
res.writeHead(response.statusCode || 200, response.headers)
)
.on(`error`, (err, _, response) => {
if (response) {
res.writeHead(response.statusCode || 400, response.headers)
} else |
})
)
.pipe(res)
})
})
}
await apiRunnerNode(`onCreateDevServer`, { app })
// In case nothing before handled hot-update - send 404.
// This fixes "Unexpected token < in JSON at position 0" runtime
// errors after restarting development server and
// cause automatic hard refresh in the browser.
app.use(/.*\.hot-update\.json$/i, (_, res) => {
res.status(404).end()
})
// Render an HTML page and serve it.
app.use((_, res) => {
res.sendFile(directoryPath(`public/index.html`), err => {
if (err) {
res.status(500).end()
}
})
})
/**
* Set up the HTTP server and socket.io.
* If a SSL cert exists in program, use it with `createServer`.
**/
const server = program.ssl
? https.createServer(program.ssl, app)
: new http.Server(app)
const socket = websocketManager.init({ server, directory: program.directory })
const listener = server.listen(program.port, program.host)
// Register watcher that rebuilds index.html every time html.js changes.
const watchGlobs = [`src/html.js`, `plugins/**/gatsby-ssr.js`].map(path =>
slash(directoryPath(path))
)
chokidar.watch(watchGlobs).on(`change`, async () => {
await createIndexHtml(indexHTMLActivity)
socket.to(`clients`).emit(`reload`)
})
return { compiler, listener, webpackActivity }
}
module.exports = async (program: IProgram): Promise<void> => {
// We want to prompt the feedback request when users quit develop
// assuming they pass the heuristic check to know they are a user
// we want to request feedback from, and we're not annoying them.
process.on(
`SIGINT`,
async (): Promise<void> => {
if (await userPassesFeedbackRequestHeuristic()) {
showFeedbackRequest()
}
process.exit(0)
}
)
if (process.env.GATSBY_EXPERIMENTAL_PAGE_BUILD_ON_DATA_CHANGES) {
report.panic(
`The flag ${chalk.yellow(
`GATSBY_EXPERIMENTAL_PAGE_BUILD_ON_DATA_CHANGES`
)} is not available with ${chalk.cyan(
`gatsby develop`
)}, please retry using ${chalk.cyan(`gatsby build`)}`
)
}
initTracer(program.openTracingConfigFile)
report.pendingActivity({ id: `webpack-develop` })
telemetry.trackCli(`DEVELOP_START`)
telemetry.startBackgroundUpdate()
const port =
typeof program.port === `string` ? parseInt(program.port, 10) : program.port
// In order to enable custom ssl, --cert-file --key-file and -https flags must all be
// used together
if ((program[`cert-file`] || program[`key-file`]) && !program.https) {
report.panic(
`for custom ssl --https, --cert-file, and --key-file must be used together`
)
}
try {
program.port = await detectPortInUseAndPrompt(port)
} catch (e) {
if (e.message === `USER_REJECTED`) {
process.exit(0)
}
throw e
}
// Check if https | {
const message = `Error when trying to proxy request "${req.originalUrl}" to "${proxiedUrl}"`
report.error(message, err)
res.sendStatus(500)
} | conditional_block |
mainga.py | highest_quality = 0
class Coordinates:
def __init__(self):
self.x1 = None
self.y1 = None
self.x2 = None
self.y2 = None
def setup_camera():
global mydll
global hCamera
global pbyteraw
global dwBufferSize
global dwNumberOfByteTrans
global dwFrameNo
global dwMilliseconds
global threshhold
# create parameters for camera
dwTransferBitsPerPixel = 4
im_height = 1200
im_width = 1600
dwBufferSize = im_height * im_width * 2
dwNumberOfByteTrans = c_uint32()
dwFrameNo = c_uint32()
pbyteraw = np.zeros((im_height, im_width), dtype=np.uint16)
dwMilliseconds = 3000
triggermode = 2049
# triggermode = 0
# threshhold = 0
# set up camera capture
mydll = windll.LoadLibrary('StTrgApi.dll')
hCamera = mydll.StTrg_Open()
print('hCamera id:', hCamera)
mydll.StTrg_SetTransferBitsPerPixel(hCamera, dwTransferBitsPerPixel)
mydll.StTrg_SetScanMode(hCamera, 0, 0, 0, 0, 0)
mydll.StTrg_SetGain(hCamera, 0)
# mydll.StTrg_SetDigitalGain(hCamera, 64)
mydll.StTrg_SetDigitalGain(hCamera, 64)
mydll.StTrg_SetExposureClock(hCamera, 200000)
mydll.StTrg_SetClock(hCamera, 0, 0)
mydll.StTrg_SetTriggerMode(hCamera, triggermode)
mydll.StTrg_SetTriggerTiming(hCamera, 0, 0)
mydll.StTrg_SetIOPinDirection(hCamera, 0)
mydll.StTrg_SetIOPinPolarity(hCamera, 0)
mydll.StTrg_SetIOPinMode(hCamera, 0, 16)
def after_selection():
global inner_rectangle
global outer_rectangle
# print('next')
# print(coord.x1)
# print(coord.y1)
# print(coord.x2)
# print(coord.x2)
if inner_rectangle:
# draw outer rectangle
# print('coord', coord.x1)
if coord.x1:
outer_rectangle = Coordinates()
outer_rectangle.x1 = coord.x1
outer_rectangle.y1 = coord.y1
outer_rectangle.x2 = coord.x2
outer_rectangle.y2 = coord.y2
else:
# draw inner rectangle:
if coord.x1:
inner_rectangle = Coordinates()
inner_rectangle.x1 = coord.x1
inner_rectangle.y1 = coord.y1
inner_rectangle.x2 = coord.x2
inner_rectangle.y2 = coord.y2
plt.close()
def toggle_selector(event):
# print(' Key pressed.')
if event.key in ['p'] and toggle_selector.rs.active:
# print(' RectangleSelector deactivated.')
toggle_selector.rs.set_active(False)
toggle_selector.rs.set_visible(False)
after_selection()
if event.key in ['r'] and toggle_selector.rs.active:
image = take_image()
b.set_data(image)
# print('new image')
# print('Frame:' + str(dwFrameNo.value))
plt.pause(0.001)
def goodorbad(event):
global inner_rectangle
global outer_rectangle
if event.key in ['y']:
# print('good')
plt.close()
if event.key in ['n']:
# print('bad')
plt.close()
inner_rectangle = None
outer_rectangle = None
get_rectangle()
get_rectangle()
draw_inner_and_outer()
if event.key in ['r']:
# print('refrsh')
image = take_image()
b.set_data(image)
# print('new image')
# print('Frame:' + str(dwFrameNo.value))
plt.pause(0.001)
def line_select_callback(eclick, erelease):
global coord
x1, y1 = eclick.xdata, eclick.ydata
x2, y2 = erelease.xdata, erelease.ydata
# print('x1: ' + str(x1) + ' y1: ' + str(y1))
# print('x2: ' + str(x2) + ' y2: ' + str(y2))
coord.x1 = x1
coord.x2 = x2
coord.y1 = y1
coord.y2 = y2
def get_rectangle():
global coord
global b
coord = Coordinates()
fig, ax = plt.subplots(1)
# take first image
image = take_image()
# print('Frame:' + str(dwFrameNo.value))
# pbyteraw[:, :] = np.zeros(np.shape(pbyteraw))
# pbyteraw[100:200, 100:200] = 10
b = ax.imshow(image, cmap='jet')
if inner_rectangle:
# print("draw inner rectangle")
# print('inner_rectangle:', inner_rectangle)
ax.add_patch(patch.Rectangle((inner_rectangle.x1, inner_rectangle.y1),
inner_rectangle.x2-inner_rectangle.x1,
inner_rectangle.y2-inner_rectangle.y1,
linewidth=2, edgecolor='r', facecolor='none'))
ax.text(0, 100, 'Draw the OUTER rectangle, then press [p] to continue\n'
'Press [r] to refresh image', color='black', backgroundcolor='yellow')
else:
ax.text(0, 100, 'Draw the INNER rectangle, then press [p] to continue\n'
'Press [r] to refresh image', color='black', backgroundcolor='yellow')
toggle_selector.rs = RectangleSelector(ax, line_select_callback,
drawtype='box', useblit=False, button=[1],
minspanx=5, minspany=5, spancoords='pixels',
interactive=True)
plt.connect('key_press_event', toggle_selector)
plt.show()
def take_image():
# print('image taken')
mydll.StTrg_TakeRawSnapShot(hCamera, pbyteraw.ctypes.data_as(POINTER(c_int16)),
dwBufferSize, pointer(dwNumberOfByteTrans), pointer(dwFrameNo), dwMilliseconds)
image = np.rot90(pbyteraw, 1)
# print("max signal: ", np.max(image))
# print('max:', np.max(image))
# image = np.zeros((1600, 1200))
return image
def draw_inner_and_outer():
global b
fig, ax = plt.subplots(1)
# take first image
image = take_image()
# print('Frame:' + str(dwFrameNo.value))
b = ax.imshow(image, cmap='jet')
if inner_rectangle:
ax.add_patch(patch.Rectangle((inner_rectangle.x1, inner_rectangle.y1),
inner_rectangle.x2 - inner_rectangle.x1,
inner_rectangle.y2 - inner_rectangle.y1,
linewidth=2, edgecolor='r', facecolor='none'))
if outer_rectangle:
ax.add_patch(patch.Rectangle((outer_rectangle.x1, outer_rectangle.y1),
outer_rectangle.x2 - outer_rectangle.x1,
outer_rectangle.y2 - outer_rectangle.y1,
linewidth=2, edgecolor='y', facecolor='none'))
# print('final thing')
ax.text(0, 1100, 'INNER', color='r', backgroundcolor='white')
ax.text(0, 1180, 'OUTER', color='y', backgroundcolor='white')
# print('final thing')
ax.text(0, 100, 'Press [y] to continue\n'
'Press [n] to start over\n'
'Press [r] to refresh image', color='black', backgroundcolor='yellow')
plt.connect('key_press_event', goodorbad)
plt.show()
def get_p_number():
return random.random() * 2 * np.pi*2
def params_to_daz(wl_send, phi_send):
home = os.getcwd()
os.chdir(r'\\CREOL-FAST-01\data')
if write_dazzler:
with open('pythonwavefile.txt', 'w') as file:
file.write('phase=2\n#phase')
i = 0
while i < len(wl_send):
file.write('\n')
file.write("{:.6f}".format(wl_send[i]))
file.write('\t')
file.write("{:.6f}".format(phi_send[i]))
i += 1
# print('writing to wavefile')
# time.sleep(0.05)
with open('request.txt', 'w') as file:
proj = r'C:\dazzler\data\pythonwavefile.txt'
file.write(proj)
file.flush()
time.sleep(0.05)
### worked
# fileh = open('request.txt', 'w+')
| import os
inner_rectangle = None
outer_rectangle = None
first_photo_taken = False | random_line_split |
|
mainga.py | inner_rectangle = Coordinates()
inner_rectangle.x1 = coord.x1
inner_rectangle.y1 = coord.y1
inner_rectangle.x2 = coord.x2
inner_rectangle.y2 = coord.y2
plt.close()
def toggle_selector(event):
# print(' Key pressed.')
if event.key in ['p'] and toggle_selector.rs.active:
# print(' RectangleSelector deactivated.')
toggle_selector.rs.set_active(False)
toggle_selector.rs.set_visible(False)
after_selection()
if event.key in ['r'] and toggle_selector.rs.active:
image = take_image()
b.set_data(image)
# print('new image')
# print('Frame:' + str(dwFrameNo.value))
plt.pause(0.001)
def goodorbad(event):
global inner_rectangle
global outer_rectangle
if event.key in ['y']:
# print('good')
plt.close()
if event.key in ['n']:
# print('bad')
plt.close()
inner_rectangle = None
outer_rectangle = None
get_rectangle()
get_rectangle()
draw_inner_and_outer()
if event.key in ['r']:
# print('refrsh')
image = take_image()
b.set_data(image)
# print('new image')
# print('Frame:' + str(dwFrameNo.value))
plt.pause(0.001)
def line_select_callback(eclick, erelease):
global coord
x1, y1 = eclick.xdata, eclick.ydata
x2, y2 = erelease.xdata, erelease.ydata
# print('x1: ' + str(x1) + ' y1: ' + str(y1))
# print('x2: ' + str(x2) + ' y2: ' + str(y2))
coord.x1 = x1
coord.x2 = x2
coord.y1 = y1
coord.y2 = y2
def get_rectangle():
global coord
global b
coord = Coordinates()
fig, ax = plt.subplots(1)
# take first image
image = take_image()
# print('Frame:' + str(dwFrameNo.value))
# pbyteraw[:, :] = np.zeros(np.shape(pbyteraw))
# pbyteraw[100:200, 100:200] = 10
b = ax.imshow(image, cmap='jet')
if inner_rectangle:
# print("draw inner rectangle")
# print('inner_rectangle:', inner_rectangle)
ax.add_patch(patch.Rectangle((inner_rectangle.x1, inner_rectangle.y1),
inner_rectangle.x2-inner_rectangle.x1,
inner_rectangle.y2-inner_rectangle.y1,
linewidth=2, edgecolor='r', facecolor='none'))
ax.text(0, 100, 'Draw the OUTER rectangle, then press [p] to continue\n'
'Press [r] to refresh image', color='black', backgroundcolor='yellow')
else:
ax.text(0, 100, 'Draw the INNER rectangle, then press [p] to continue\n'
'Press [r] to refresh image', color='black', backgroundcolor='yellow')
toggle_selector.rs = RectangleSelector(ax, line_select_callback,
drawtype='box', useblit=False, button=[1],
minspanx=5, minspany=5, spancoords='pixels',
interactive=True)
plt.connect('key_press_event', toggle_selector)
plt.show()
def take_image():
# print('image taken')
mydll.StTrg_TakeRawSnapShot(hCamera, pbyteraw.ctypes.data_as(POINTER(c_int16)),
dwBufferSize, pointer(dwNumberOfByteTrans), pointer(dwFrameNo), dwMilliseconds)
image = np.rot90(pbyteraw, 1)
# print("max signal: ", np.max(image))
# print('max:', np.max(image))
# image = np.zeros((1600, 1200))
return image
def draw_inner_and_outer():
global b
fig, ax = plt.subplots(1)
# take first image
image = take_image()
# print('Frame:' + str(dwFrameNo.value))
b = ax.imshow(image, cmap='jet')
if inner_rectangle:
ax.add_patch(patch.Rectangle((inner_rectangle.x1, inner_rectangle.y1),
inner_rectangle.x2 - inner_rectangle.x1,
inner_rectangle.y2 - inner_rectangle.y1,
linewidth=2, edgecolor='r', facecolor='none'))
if outer_rectangle:
ax.add_patch(patch.Rectangle((outer_rectangle.x1, outer_rectangle.y1),
outer_rectangle.x2 - outer_rectangle.x1,
outer_rectangle.y2 - outer_rectangle.y1,
linewidth=2, edgecolor='y', facecolor='none'))
# print('final thing')
ax.text(0, 1100, 'INNER', color='r', backgroundcolor='white')
ax.text(0, 1180, 'OUTER', color='y', backgroundcolor='white')
# print('final thing')
ax.text(0, 100, 'Press [y] to continue\n'
'Press [n] to start over\n'
'Press [r] to refresh image', color='black', backgroundcolor='yellow')
plt.connect('key_press_event', goodorbad)
plt.show()
def get_p_number():
return random.random() * 2 * np.pi*2
def params_to_daz(wl_send, phi_send):
home = os.getcwd()
os.chdir(r'\\CREOL-FAST-01\data')
if write_dazzler:
with open('pythonwavefile.txt', 'w') as file:
file.write('phase=2\n#phase')
i = 0
while i < len(wl_send):
file.write('\n')
file.write("{:.6f}".format(wl_send[i]))
file.write('\t')
file.write("{:.6f}".format(phi_send[i]))
i += 1
# print('writing to wavefile')
# time.sleep(0.05)
with open('request.txt', 'w') as file:
proj = r'C:\dazzler\data\pythonwavefile.txt'
file.write(proj)
file.flush()
time.sleep(0.05)
### worked
# fileh = open('request.txt', 'w+')
# proj = r'C:\dazzler\data\pythonwavefile.txt'
# fileh.write(proj)
# time.sleep(0.05)
# print('writing request')
# print(fileh.read())
# time.sleep(0.05)
# fileh.close()
# print('writing request.txt')
os.chdir(home)
time.sleep(1)
def evalOneMax(individual):
global highest_quality
global first_photo_taken
# the goal ('fitness') function to be maximized
# print('\n EVALUATING \n')
# calculate phi_send
phi_nodes = individual[:]
phi_func = interp1d(wavelength_nodes, phi_nodes, kind='cubic')
# print('individual: ', individual)
# print('wavelength nodes: ', wavelength_nodes)
# send parameters to dazzler
params_to_daz(wl_send=wavelength, phi_send=phi_func(wavelength))
# take image
image = take_image()
# calculate ratio
inner = image[int(inner_rectangle.y1):int(inner_rectangle.y2), int(inner_rectangle.x1):int(inner_rectangle.x2)]
outer = image[int(outer_rectangle.y1):int(outer_rectangle.y2), int(outer_rectangle.x1):int(outer_rectangle.x2)]
ratio = inner.sum() / outer.sum()
# plot image
ax2.cla()
ax2.plot(wavelength, phi_func(wavelength))
ax2.plot(wavelength_nodes, phi_nodes, 'ro')
ax2.set_title('Applied Phase')
ax2.yaxis.tick_right()
ax2.yaxis.set_label_position("right")
ax2.set_ylabel('phi [rad]')
ax2.set_xlabel('wavelength [nm]')
ax2.set_xlim(lambdamin, lambdamax)
ax1.cla()
ax1.imshow(image, cmap='jet')
ax1.set_title('Camera Image')
ax1.set_ylabel('y pixel')
ax1.set_xlabel('x pixel')
ax1.text(0, 100, 'Ratio: ' + str(np.round(ratio, 5)), color='black', backgroundcolor='yellow')
ax1.add_patch(patch.Rectangle((inner_rectangle.x1, inner_rectangle.y1),
inner_rectangle.x2 - inner_rectangle.x1,
inner_rectangle.y2 - inner_rectangle | global inner_rectangle
global outer_rectangle
# print('next')
# print(coord.x1)
# print(coord.y1)
# print(coord.x2)
# print(coord.x2)
if inner_rectangle:
# draw outer rectangle
# print('coord', coord.x1)
if coord.x1:
outer_rectangle = Coordinates()
outer_rectangle.x1 = coord.x1
outer_rectangle.y1 = coord.y1
outer_rectangle.x2 = coord.x2
outer_rectangle.y2 = coord.y2
else:
# draw inner rectangle:
if coord.x1: | identifier_body |
|
mainga.py | = open('request.txt', 'w+')
# proj = r'C:\dazzler\data\pythonwavefile.txt'
# fileh.write(proj)
# time.sleep(0.05)
# print('writing request')
# print(fileh.read())
# time.sleep(0.05)
# fileh.close()
# print('writing request.txt')
os.chdir(home)
time.sleep(1)
def evalOneMax(individual):
global highest_quality
global first_photo_taken
# the goal ('fitness') function to be maximized
# print('\n EVALUATING \n')
# calculate phi_send
phi_nodes = individual[:]
phi_func = interp1d(wavelength_nodes, phi_nodes, kind='cubic')
# print('individual: ', individual)
# print('wavelength nodes: ', wavelength_nodes)
# send parameters to dazzler
params_to_daz(wl_send=wavelength, phi_send=phi_func(wavelength))
# take image
image = take_image()
# calculate ratio
inner = image[int(inner_rectangle.y1):int(inner_rectangle.y2), int(inner_rectangle.x1):int(inner_rectangle.x2)]
outer = image[int(outer_rectangle.y1):int(outer_rectangle.y2), int(outer_rectangle.x1):int(outer_rectangle.x2)]
ratio = inner.sum() / outer.sum()
# plot image
ax2.cla()
ax2.plot(wavelength, phi_func(wavelength))
ax2.plot(wavelength_nodes, phi_nodes, 'ro')
ax2.set_title('Applied Phase')
ax2.yaxis.tick_right()
ax2.yaxis.set_label_position("right")
ax2.set_ylabel('phi [rad]')
ax2.set_xlabel('wavelength [nm]')
ax2.set_xlim(lambdamin, lambdamax)
ax1.cla()
ax1.imshow(image, cmap='jet')
ax1.set_title('Camera Image')
ax1.set_ylabel('y pixel')
ax1.set_xlabel('x pixel')
ax1.text(0, 100, 'Ratio: ' + str(np.round(ratio, 5)), color='black', backgroundcolor='yellow')
ax1.add_patch(patch.Rectangle((inner_rectangle.x1, inner_rectangle.y1),
inner_rectangle.x2 - inner_rectangle.x1,
inner_rectangle.y2 - inner_rectangle.y1,
linewidth=2, edgecolor='r', facecolor='none'))
ax1.add_patch(patch.Rectangle((outer_rectangle.x1, outer_rectangle.y1),
outer_rectangle.x2 - outer_rectangle.x1,
outer_rectangle.y2 - outer_rectangle.y1,
linewidth=2, edgecolor='y', facecolor='none'))
# calculate uniformity
ax_uni[0].cla()
ax_uni[0].imshow(image, cmap='jet')
ax_uni[0].text(0, 100, 'Ratio: ' + str(np.round(ratio, 5)), color='black', backgroundcolor='yellow')
ax_uni[0].add_patch(patch.Rectangle((inner_rectangle.x1, inner_rectangle.y1),
inner_rectangle.x2 - inner_rectangle.x1,
inner_rectangle.y2 - inner_rectangle.y1,
linewidth=2, edgecolor='r', facecolor='none'))
image_proc = image[:, :]
center = image_proc[int(inner_rectangle.y1):int(inner_rectangle.y2), int(inner_rectangle.x1):int(inner_rectangle.x2)]
center_1d = np.sum(center, 1)
ref_signal = np.max(center_1d) * np.ones_like(center_1d)
delta_I = np.abs(ref_signal - center_1d)
beta = 0.5
alpha = beta * np.max(center_1d)
uni_vals = np.exp(- (delta_I**2 / alpha**2))
# delta I
ax_uni[1].cla()
ax_uni[1].plot(ref_signal)
ax_uni[1].plot(center_1d)
# uniformity
ax_uni[2].cla()
ax_uni[2].plot(uni_vals)
uniformity = (1/len(uni_vals)) * np.sum(uni_vals)
ax_uni[2].text(0.5, 0.5, 'Uniformity: ' + str(np.round(uniformity, 5)),
color='black', backgroundcolor='yellow', transform=ax_uni[2].transAxes)
ax_uni[1].text(0, 0.2, 'Uniformity * ratio: ' + str(np.round(uniformity * ratio, 5)),
color='black', backgroundcolor='yellow', transform=ax_uni[1].transAxes)
# print(uniformity * ratio)
if ratio * uniformity > highest_quality:
# print('new best: ', ratio * uniformity, 'saving image')
np.save("best_quality", image)
highest_quality = ratio * uniformity
if not first_photo_taken:
# print("taking first photo")
np.save("initial", image)
first_photo_taken = True
plt.show()
plt.pause(0.001)
return (ratio * uniformity),
# return sum(individual),
def setup_ga():
global toolbox
global creator
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("Individual", list, fitness=creator.FitnessMax)
toolbox = base.Toolbox()
# Attribute generator
# define 'attr_bool' to be an attribute ('gene')
# which corresponds to integers sampled uniformly
# from the range [0,1] (i.e. 0 or 1 with equal
# probability)
# toolbox.register("attr_bool", random.randint, 0, 10)
toolbox.register("attr_bool", get_p_number)
# Structure initializers
# define 'individual' to be an individual
# consisting of 100 'attr_bool' elements ('genes')
toolbox.register("individual", tools.initRepeat, creator.Individual,
toolbox.attr_bool, number_of_nodes)
# define the population to be a list of individuals
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
# ----------
# Operator registration
# ----------
# register the goal / fitness function
toolbox.register("evaluate", evalOneMax)
# register the crossover operator
toolbox.register("mate", tools.cxTwoPoint)
# register a mutation operator with a probability to
# flip each attribute/gene of 0.05
toolbox.register("mutate", tools.mutShuffleIndexes, indpb=0.2)
# operator for selecting individuals for breeding the next
# generation: each individual of the current generation
# is replaced by the 'fittest' (best) of three individuals
# drawn randomly from the current generation.
toolbox.register("select", tools.selTournament, tournsize=4)
def run_ga():
# random.seed(64)
# create an initial population of 300 individuals (where
# each individual is a list of integers)
pop = toolbox.population(n=population_size)
# CXPB is the probability with which two individuals
# are crossed
#
# MUTPB is the probability for mutating an individual
CXPB, MUTPB, MUTPB2 = 0.2, 0.2, 0.5
# CXPB, MUTPB, MUTPB2 = 0, 0, 1
print("Start of evolution")
# Evaluate the entire population
# print(pop)
fitnesses = list(map(toolbox.evaluate, pop))
for ind, fit in zip(pop, fitnesses):
# print(ind)
# print(fit)
ind.fitness.values = fit
# print('initial population: ', pop)
# for ind in pop:
# print(max(ind))
print(" Evaluated %i individuals" % len(pop))
# Extracting all the fitnesses of
fits = [ind.fitness.values[0] for ind in pop]
# Variable keeping track of the number of generations
g = 0
# Begin the evolution
# while max(fits) < 100 and g < 100:
while g <= generations:
# A new generation
g = g + 1
print("-- Generation %i --" % g)
# Select the next generation individuals
# print('\npop: ', pop)
# print('fitnesses: ', fitnesses)
offspring = toolbox.select(pop, len(pop))
# Clone the selected individuals
offspring = list(map(toolbox.clone, offspring))
# Apply crossover and mutation on the offspring
for child1, child2 in zip(offspring[::2], offspring[1::2]):
# cross two individuals with probability CXPB
if random.random() < CXPB:
toolbox.mate(child1, child2)
# fitness values of the children
# must be recalculated later
del child1.fitness.values
del child2.fitness.values
for mutant in offspring:
# mutate an individual with probability MUTPB
if random.random() < MUTPB:
| toolbox.mutate(mutant)
del mutant.fitness.values | conditional_block |
|
mainga.py | ():
global inner_rectangle
global outer_rectangle
# print('next')
# print(coord.x1)
# print(coord.y1)
# print(coord.x2)
# print(coord.x2)
if inner_rectangle:
# draw outer rectangle
# print('coord', coord.x1)
if coord.x1:
outer_rectangle = Coordinates()
outer_rectangle.x1 = coord.x1
outer_rectangle.y1 = coord.y1
outer_rectangle.x2 = coord.x2
outer_rectangle.y2 = coord.y2
else:
# draw inner rectangle:
if coord.x1:
inner_rectangle = Coordinates()
inner_rectangle.x1 = coord.x1
inner_rectangle.y1 = coord.y1
inner_rectangle.x2 = coord.x2
inner_rectangle.y2 = coord.y2
plt.close()
def toggle_selector(event):
# print(' Key pressed.')
if event.key in ['p'] and toggle_selector.rs.active:
# print(' RectangleSelector deactivated.')
toggle_selector.rs.set_active(False)
toggle_selector.rs.set_visible(False)
after_selection()
if event.key in ['r'] and toggle_selector.rs.active:
image = take_image()
b.set_data(image)
# print('new image')
# print('Frame:' + str(dwFrameNo.value))
plt.pause(0.001)
def goodorbad(event):
global inner_rectangle
global outer_rectangle
if event.key in ['y']:
# print('good')
plt.close()
if event.key in ['n']:
# print('bad')
plt.close()
inner_rectangle = None
outer_rectangle = None
get_rectangle()
get_rectangle()
draw_inner_and_outer()
if event.key in ['r']:
# print('refrsh')
image = take_image()
b.set_data(image)
# print('new image')
# print('Frame:' + str(dwFrameNo.value))
plt.pause(0.001)
def line_select_callback(eclick, erelease):
global coord
x1, y1 = eclick.xdata, eclick.ydata
x2, y2 = erelease.xdata, erelease.ydata
# print('x1: ' + str(x1) + ' y1: ' + str(y1))
# print('x2: ' + str(x2) + ' y2: ' + str(y2))
coord.x1 = x1
coord.x2 = x2
coord.y1 = y1
coord.y2 = y2
def get_rectangle():
global coord
global b
coord = Coordinates()
fig, ax = plt.subplots(1)
# take first image
image = take_image()
# print('Frame:' + str(dwFrameNo.value))
# pbyteraw[:, :] = np.zeros(np.shape(pbyteraw))
# pbyteraw[100:200, 100:200] = 10
b = ax.imshow(image, cmap='jet')
if inner_rectangle:
# print("draw inner rectangle")
# print('inner_rectangle:', inner_rectangle)
ax.add_patch(patch.Rectangle((inner_rectangle.x1, inner_rectangle.y1),
inner_rectangle.x2-inner_rectangle.x1,
inner_rectangle.y2-inner_rectangle.y1,
linewidth=2, edgecolor='r', facecolor='none'))
ax.text(0, 100, 'Draw the OUTER rectangle, then press [p] to continue\n'
'Press [r] to refresh image', color='black', backgroundcolor='yellow')
else:
ax.text(0, 100, 'Draw the INNER rectangle, then press [p] to continue\n'
'Press [r] to refresh image', color='black', backgroundcolor='yellow')
toggle_selector.rs = RectangleSelector(ax, line_select_callback,
drawtype='box', useblit=False, button=[1],
minspanx=5, minspany=5, spancoords='pixels',
interactive=True)
plt.connect('key_press_event', toggle_selector)
plt.show()
def take_image():
# print('image taken')
mydll.StTrg_TakeRawSnapShot(hCamera, pbyteraw.ctypes.data_as(POINTER(c_int16)),
dwBufferSize, pointer(dwNumberOfByteTrans), pointer(dwFrameNo), dwMilliseconds)
image = np.rot90(pbyteraw, 1)
# print("max signal: ", np.max(image))
# print('max:', np.max(image))
# image = np.zeros((1600, 1200))
return image
def draw_inner_and_outer():
global b
fig, ax = plt.subplots(1)
# take first image
image = take_image()
# print('Frame:' + str(dwFrameNo.value))
b = ax.imshow(image, cmap='jet')
if inner_rectangle:
ax.add_patch(patch.Rectangle((inner_rectangle.x1, inner_rectangle.y1),
inner_rectangle.x2 - inner_rectangle.x1,
inner_rectangle.y2 - inner_rectangle.y1,
linewidth=2, edgecolor='r', facecolor='none'))
if outer_rectangle:
ax.add_patch(patch.Rectangle((outer_rectangle.x1, outer_rectangle.y1),
outer_rectangle.x2 - outer_rectangle.x1,
outer_rectangle.y2 - outer_rectangle.y1,
linewidth=2, edgecolor='y', facecolor='none'))
# print('final thing')
ax.text(0, 1100, 'INNER', color='r', backgroundcolor='white')
ax.text(0, 1180, 'OUTER', color='y', backgroundcolor='white')
# print('final thing')
ax.text(0, 100, 'Press [y] to continue\n'
'Press [n] to start over\n'
'Press [r] to refresh image', color='black', backgroundcolor='yellow')
plt.connect('key_press_event', goodorbad)
plt.show()
def get_p_number():
return random.random() * 2 * np.pi*2
def params_to_daz(wl_send, phi_send):
home = os.getcwd()
os.chdir(r'\\CREOL-FAST-01\data')
if write_dazzler:
with open('pythonwavefile.txt', 'w') as file:
file.write('phase=2\n#phase')
i = 0
while i < len(wl_send):
file.write('\n')
file.write("{:.6f}".format(wl_send[i]))
file.write('\t')
file.write("{:.6f}".format(phi_send[i]))
i += 1
# print('writing to wavefile')
# time.sleep(0.05)
with open('request.txt', 'w') as file:
proj = r'C:\dazzler\data\pythonwavefile.txt'
file.write(proj)
file.flush()
time.sleep(0.05)
### worked
# fileh = open('request.txt', 'w+')
# proj = r'C:\dazzler\data\pythonwavefile.txt'
# fileh.write(proj)
# time.sleep(0.05)
# print('writing request')
# print(fileh.read())
# time.sleep(0.05)
# fileh.close()
# print('writing request.txt')
os.chdir(home)
time.sleep(1)
def evalOneMax(individual):
global highest_quality
global first_photo_taken
# the goal ('fitness') function to be maximized
# print('\n EVALUATING \n')
# calculate phi_send
phi_nodes = individual[:]
phi_func = interp1d(wavelength_nodes, phi_nodes, kind='cubic')
# print('individual: ', individual)
# print('wavelength nodes: ', wavelength_nodes)
# send parameters to dazzler
params_to_daz(wl_send=wavelength, phi_send=phi_func(wavelength))
# take image
image = take_image()
# calculate ratio
inner = image[int(inner_rectangle.y1):int(inner_rectangle.y2), int(inner_rectangle.x1):int(inner_rectangle.x2)]
outer = image[int(outer_rectangle.y1):int(outer_rectangle.y2), int(outer_rectangle.x1):int(outer_rectangle.x2)]
ratio = inner.sum() / outer.sum()
# plot image
ax2.cla()
ax2.plot(wavelength, phi_func(wavelength))
ax2.plot(wavelength_nodes, phi_nodes, 'ro')
ax2.set_title('Applied Phase')
ax2.yaxis.tick_right()
ax2.yaxis.set_label_position("right")
ax2.set_ylabel('phi [rad]')
ax2.set_xlabel('wavelength [nm]')
ax2.set_xlim(lambdamin, lambdamax)
ax1.cla()
ax1.imshow(image, cmap='jet')
ax1.set_title('Camera Image')
ax1.set_ylabel('y pixel')
ax1.set_xlabel('x pixel')
ax1.text(0, 100, 'Ratio: ' + str(np.round(ratio, 5)), color='black', backgroundcolor='yellow')
ax1.add_patch(patch.Rectangle((inner_rectangle.x1, inner_rectangle.y1),
inner_rectangle.x2 - inner_rectangle.x1,
inner_rectangle.y | after_selection | identifier_name |
|
offline.js | VVMxEzARBgNVBAgTCldhc2hpbmd0
// SIG // b24xEDAOBgNVBAcTB1JlZG1vbmQxHjAcBgNVBAoTFU1p
// SIG // Y3Jvc29mdCBDb3Jwb3JhdGlvbjEhMB8GA1UEAxMYTWlj
// SIG // cm9zb2Z0IFRpbWUtU3RhbXAgUENBMB4XDTEyMDEwOTIy
// SIG // MjU1OFoXDTEzMDQwOTIyMjU1OFowgbMxCzAJBgNVBAYT
// SIG // AlVTMRMwEQYDVQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQH
// SIG // EwdSZWRtb25kMR4wHAYDVQQKExVNaWNyb3NvZnQgQ29y
// SIG // cG9yYXRpb24xDTALBgNVBAsTBE1PUFIxJzAlBgNVBAsT
// SIG // Hm5DaXBoZXIgRFNFIEVTTjpGNTI4LTM3NzctOEE3NjEl
// SIG // MCMGA1UEAxMcTWljcm9zb2Z0IFRpbWUtU3RhbXAgU2Vy
// SIG // dmljZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
// SIG // ggEBAJbsjkdNVMJclYDXTgs9v5dDw0vjYGcRLwFNDNjR
// SIG // Ri8QQN4LpFBSEogLQ3otP+5IbmbHkeYDym7sealqI5vN
// SIG // Yp7NaqQ/56ND/2JHobS6RPrfQMGFVH7ooKcsQyObUh8y
// SIG // NfT+mlafjWN3ezCeCjOFchvKSsjMJc3bXREux7CM8Y9D
// SIG // SEcFtXogC+Xz78G69LPYzTiP+yGqPQpthRfQyueGA8Az
// SIG // g7UlxMxanMTD2mIlTVMlFGGP+xvg7PdHxoBF5jVTIzZ3
// SIG // yrDdmCs5wHU1D92BTCE9djDFsrBlcylIJ9jC0rCER7t4
// SIG // utV0A97XSxn3U9542ob3YYgmM7RHxqBUiBUrLHUCAwEA
// SIG // AaOCAQkwggEFMB0GA1UdDgQWBBQv6EbIaNNuT7Ig0N6J
// SIG // TvFH7kjB8jAfBgNVHSMEGDAWgBQjNPjZUkZwCu1A+3b7
// SIG // syuwwzWzDzBUBgNVHR8ETTBLMEmgR6BFhkNodHRwOi8v
// SIG // Y3JsLm1pY3Jvc29mdC5jb20vcGtpL2NybC9wcm9kdWN0
// SIG // cy9NaWNyb3NvZnRUaW1lU3RhbXBQQ0EuY3JsMFgGCCsG
// SIG // AQUFBwEBBEwwSjBIBggrBgEFBQcwAoY8aHR0cDovL3d3
// SIG // dy5taWNyb3NvZnQuY29tL3BraS9jZXJ0cy9NaWNyb3Nv
// SIG // ZnRUaW1lU3RhbXBQQ0EuY3J0MBMGA1UdJQQMMAoGCCsG
// SIG // AQUFBwMIMA0GCSqGSIb3DQEBBQUAA4IBAQBz/30unc2N
// SIG // iCt8feNeFXHpaGLwCLZDVsRcSi1o2PlIEZHzEZyF7BLU
// SIG // VKB1qTihWX917sb1NNhUpOLQzHyXq5N1MJcHHQRTLDZ/
// SIG // f/FAHgybgOISCiA6McAHdWfg+jSc7Ij7VxzlWGIgkEUv
// SIG // XUWpyI6zfHJtECfFS9hvoqgSs201I2f6LNslLbldsR4F
// SIG // 50MoPpwFdnfxJd4FRxlt3kmFodpKSwhGITWodTZMt7MI
// SIG // qt+3K9m+Kmr93zUXzD8Mx90Gz06UJGMgCy4krl9DRBJ6
// SIG // XN0326RFs5E6Eld940fGZtPPnEZW9EwHseAMqtX21Tyi
// SIG // 4LXU+Bx+BFUQaxj0kc1Rp5VlMIIFvDCCA6SgAwIBAgIK
// SIG // YTMmGgAAAAAAMTANBgkqhkiG9w0BAQUFADBfMRMwEQYK
// SIG // CZImiZPyLGQBGRYDY29tMRkwFwYKCZImiZPyLGQBGRYJ
// SIG // bWljcm9zb2Z0MS0wKwYDVQQDEyRNaWNyb3NvZnQgUm9v
// SIG // dCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTAwODMx
// SIG // MjIxOTMyWhcNMjAwODMxMjIyOTMyWjB5MQswCQYDVQQG
// SIG // EwJVUzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UE
// SIG // BxMHUmVkbW9uZDEeMBwGA1UEChMVTWljcm9zb2Z0IENv
// SIG // cnBvcmF0aW9uMSMwIQYDVQQDExpNaWNyb3NvZnQgQ29k
// SIG // ZSBTaWduaW5nIFBDQTCCASIwDQYJKoZIhvcNAQEBBQAD
// SIG // ggEPADCCAQoCggEBALJyWVwZMGS/HZpgICBCmXZTbD4b
// SIG // 1m/My/Hqa/6XFhDg3zp0gxq3L6Ay7P/ewkJOI9VyANs1
// SIG // VwqJyq4gSfTwaKxNS42lvXlLcZtHB9r9Jd+ddYjPqnNE
// SIG // f9eB2/O98jakyVxF3K+tPeAoaJcap6Vyc1bxF5Tk/TWU
// SIG // cqDWdl8ed0WDhTgW0HNbBbpnUo2lsmkv2hkL/pJ0KeJ2
// SIG // L1TdFDBZ+NKNYv3LyV9GMVC5JxPkQDDPcikQKCLHN049
// SIG // oDI9kM2hOAaFXE5WgigqBTK3S9dPY+fSLWLxRT3nrAgA
// SIG // 9kahntFbjCZT6HqqSvJGzzc8OJ60d1ylF56NyxGPVjzB
// SIG // rAlfA9MCAwEAAaOCAV4wggFaMA8GA1UdEwEB/wQFMAMB
// SIG // Af8wHQYDVR0OBBYEFMsR6MrStBZYAck3LjMWFrlMmgof
// SIG // MAsGA1UdDwQEAwIBhjASBgkrBgEEAYI3FQEEBQIDAQAB
// SIG // MCMGCSsGAQQBgjcVAgQWBBT90TFO0yaKleGYYDuoMW+m | // SIG // PLzYLTAZBgkrBgEEAYI3FAIEDB4KAFMAdQBiAEMAQTAf | random_line_split |
|
offline.js | };
var showLinks = function () {
var allNavLinks = document.querySelectorAll("nav.page-nav a");
for (var i = 0; i < allNavLinks.length; i++) {
allNavLinks[i].style.display = "";
}
};
if (!navigator.onLine) {
hideLinksThatRequireOnline();
}
document.body.onoffline = hideLinksThatRequireOnline;
document.body.ononline = showLinks;
// Error fetching appcache.manifest: so we are probably offline
applicationCache.addEventListener("error", hideLinksThatRequireOnline, false);
} ());
// SIG // Begin signature block
// SIG // MIIaZgYJKoZIhvcNAQcCoIIaVzCCGlMCAQExCzAJBgUr
// SIG // DgMCGgUAMGcGCisGAQQBgjcCAQSgWTBXMDIGCisGAQQB
// SIG // gjcCAR4wJAIBAQQQEODJBs441BGiowAQS9NQkAIBAAIB
// SIG // AAIBAAIBAAIBADAhMAkGBSsOAwIaBQAEFF+G8yNjMx2D
// SIG // c+ohYQ/rvroogpMsoIIVNjCCBKkwggORoAMCAQICEzMA
// SIG // AACIWQ48UR/iamcAAQAAAIgwDQYJKoZIhvcNAQEFBQAw
// SIG // eTELMAkGA1UEBhMCVVMxEzARBgNVBAgTCldhc2hpbmd0
// SIG // b24xEDAOBgNVBAcTB1JlZG1vbmQxHjAcBgNVBAoTFU1p
// SIG // Y3Jvc29mdCBDb3Jwb3JhdGlvbjEjMCEGA1UEAxMaTWlj
// SIG // cm9zb2Z0IENvZGUgU2lnbmluZyBQQ0EwHhcNMTIwNzI2
// SIG // MjA1MDQxWhcNMTMxMDI2MjA1MDQxWjCBgzELMAkGA1UE
// SIG // BhMCVVMxEzARBgNVBAgTCldhc2hpbmd0b24xEDAOBgNV
// SIG // BAcTB1JlZG1vbmQxHjAcBgNVBAoTFU1pY3Jvc29mdCBD
// SIG // b3Jwb3JhdGlvbjENMAsGA1UECxMETU9QUjEeMBwGA1UE
// SIG // AxMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMIIBIjANBgkq
// SIG // hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAs3R00II8h6ea
// SIG // 1I6yBEKAlyUu5EHOk2M2XxPytHiYgMYofsyKE+89N4w7
// SIG // CaDYFMVcXtipHX8BwbOYG1B37P7qfEXPf+EhDsWEyp8P
// SIG // a7MJOLd0xFcevvBIqHla3w6bHJqovMhStQxpj4TOcVV7
// SIG // /wkgv0B3NyEwdFuV33fLoOXBchIGPfLIVWyvwftqFifI
// SIG // 9bNh49nOGw8e9OTNTDRsPkcR5wIrXxR6BAf11z2L22d9
// SIG // Vz41622NAUCNGoeW4g93TIm6OJz7jgKR2yIP5dA2qbg3
// SIG // RdAq/JaNwWBxM6WIsfbCBDCHW8PXL7J5EdiLZWKiihFm
// SIG // XX5/BXpzih96heXNKBDRPQIDAQABo4IBHTCCARkwEwYD
// SIG // VR0lBAwwCgYIKwYBBQUHAwMwHQYDVR0OBBYEFCZbPltd
// SIG // ll/i93eIf15FU1ioLlu4MA4GA1UdDwEB/wQEAwIHgDAf
// SIG // BgNVHSMEGDAWgBTLEejK0rQWWAHJNy4zFha5TJoKHzBW
// SIG // BgNVHR8ETzBNMEugSaBHhkVodHRwOi8vY3JsLm1pY3Jv
// SIG // c29mdC5jb20vcGtpL2NybC9wcm9kdWN0cy9NaWNDb2RT
// SIG // aWdQQ0FfMDgtMzEtMjAxMC5jcmwwWgYIKwYBBQUHAQEE
// SIG // TjBMMEoGCCsGAQUFBzAChj5odHRwOi8vd3d3Lm1pY3Jv
// SIG // c29mdC5jb20vcGtpL2NlcnRzL01pY0NvZFNpZ1BDQV8w
// SIG // OC0zMS0yMDEwLmNydDANBgkqhkiG9w0BAQUFAAOCAQEA
// SIG // D95ASYiR0TE3o0Q4abJqK9SR+2iFrli7HgyPVvqZ18qX
// SIG // J0zohY55aSzkvZY/5XBml5UwZSmtxsqs9Q95qGe/afQP
// SIG // l+MKD7/ulnYpsiLQM8b/i0mtrrL9vyXq7ydQwOsZ+Bpk
// SIG // aqDhF1mv8c/sgaiJ6LHSFAbjam10UmTalpQqXGlrH+0F
// SIG // mRrc6GWqiBsVlRrTpFGW/VWV+GONnxQMsZ5/SgT/w2at
// SIG // Cq+upN5j+vDqw7Oy64fbxTittnPSeGTq7CFbazvWRCL0
// SIG // gVKlK0MpiwyhKnGCQsurG37Upaet9973RprOQznoKlPt
// SIG // z0Dkd4hCv0cW4KU2au+nGo06PTME9iUgIzCCBLowggOi
// SIG // oAMCAQICCmECjkIAAAAAAB8wDQYJKoZIhvcNAQEFBQAw
// SIG // dzELMAkGA1UEBhMCVVMxEzARBgNVBAgTCldhc2hpbmd0
// SIG // b24xEDAOBgNVBAcTB1JlZG1vbmQxHjAcBgNVBAoTFU1p
// SIG // Y3Jvc29mdCBDb3Jwb3JhdGlvbjEhMB8GA1UEAxMYTWlj
// SIG // cm9zb2Z0IFRpbWUtU3RhbXAgUENBMB4XDTEyMDEwOTIy
// SIG // MjU1OFoXDTEzMDQwOTIyMjU1OFowgbMxCzAJBgNVBAYT
// SIG // AlVTMRMwEQYDVQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQH
// SIG // EwdSZWRtb25kMR4wHAYDVQQKExVNaWNyb3NvZnQgQ29y
// SIG // cG9yYXRpb24xDTALBgNVBAsTBE1PUFIxJzAlBgNVBAsT
// SIG // Hm5DaXBoZXIgRFNFIEVTTjpGNTI4LTM3NzctOEE3NjEl
// SIG // MCMGA1UEAxMcTWljcm9zb2Z0IFRpbWUtU3RhbXAgU2Vy
// SIG // dmljZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
// SIG // ggEBAJbsjkdNVMJclYDXTgs9v5dDw0 | var href = allNavLinks[i].getAttribute("href");
if (!offlinePages.test(href)) {
allNavLinks[i].style.display = "none";
}
}
| conditional_block |
|
postgres.rs | };
use sql_schema_describer::{DescriberErrorKind, SqlSchema, SqlSchemaDescriberBackend};
use std::collections::HashMap;
use url::Url;
use user_facing_errors::{
common::DatabaseDoesNotExist, introspection_engine::DatabaseSchemaInconsistent, migration_engine, KnownError,
UserFacingError,
};
#[derive(Debug)]
pub(crate) struct PostgresFlavour {
pub(crate) url: PostgresUrl,
features: BitFlags<MigrationFeature>,
}
impl PostgresFlavour {
pub fn new(url: PostgresUrl, features: BitFlags<MigrationFeature>) -> Self {
Self { url, features }
}
pub(crate) fn schema_name(&self) -> &str {
self.url.schema()
}
}
#[async_trait::async_trait]
impl SqlFlavour for PostgresFlavour {
#[tracing::instrument(skip(database_str))]
async fn create_database(&self, database_str: &str) -> ConnectorResult<String> {
let mut url = Url::parse(database_str).map_err(|err| ConnectorError::url_parse_error(err, database_str))?;
let db_name = self.url.dbname();
strip_schema_param_from_url(&mut url);
let conn = create_postgres_admin_conn(url.clone()).await?;
let query = format!("CREATE DATABASE \"{}\"", db_name);
let mut database_already_exists_error = None;
match conn.raw_cmd(&query).await {
Ok(_) => (),
Err(err) if matches!(err.kind(), QuaintKind::DatabaseAlreadyExists { .. }) => {
database_already_exists_error = Some(err)
}
Err(err) if matches!(err.kind(), QuaintKind::UniqueConstraintViolation { .. }) => {
database_already_exists_error = Some(err)
}
Err(err) => return Err(err.into()),
};
// Now create the schema
url.set_path(&format!("/{}", db_name));
let conn = connect(&url.to_string()).await?;
let schema_sql = format!("CREATE SCHEMA IF NOT EXISTS \"{}\";", &self.schema_name());
conn.raw_cmd(&schema_sql).await?;
if let Some(err) = database_already_exists_error {
return Err(err.into());
}
Ok(db_name.to_owned())
}
async fn create_imperative_migrations_table(&self, connection: &Connection) -> ConnectorResult<()> {
let sql = indoc! {r#"
CREATE TABLE _prisma_migrations (
id VARCHAR(36) PRIMARY KEY NOT NULL,
checksum VARCHAR(64) NOT NULL,
finished_at TIMESTAMPTZ,
migration_name VARCHAR(255) NOT NULL,
logs TEXT,
rolled_back_at TIMESTAMPTZ,
started_at TIMESTAMPTZ NOT NULL DEFAULT now(),
applied_steps_count INTEGER NOT NULL DEFAULT 0
);
"#};
Ok(connection.raw_cmd(sql).await?)
}
async fn describe_schema<'a>(&'a self, connection: &Connection) -> ConnectorResult<SqlSchema> {
sql_schema_describer::postgres::SqlSchemaDescriber::new(connection.quaint().clone())
.describe(connection.connection_info().schema_name())
.await
.map_err(|err| match err.into_kind() {
DescriberErrorKind::QuaintError(err) => {
quaint_error_to_connector_error(err, connection.connection_info())
}
e @ DescriberErrorKind::CrossSchemaReference { .. } => {
let err = KnownError::new(DatabaseSchemaInconsistent {
explanation: format!("{}", e),
});
ConnectorError::from(err)
}
})
}
async fn drop_database(&self, database_str: &str) -> ConnectorResult<()> {
let mut url = Url::parse(database_str).map_err(|err| ConnectorError::url_parse_error(err, database_str))?;
let db_name = url.path().trim_start_matches('/').to_owned();
assert!(!db_name.is_empty(), "Database name should not be empty.");
strip_schema_param_from_url(&mut url);
let conn = create_postgres_admin_conn(url.clone()).await?;
conn.raw_cmd(&format!("DROP DATABASE \"{}\"", db_name)).await?;
Ok(())
}
#[tracing::instrument]
async fn ensure_connection_validity(&self, connection: &Connection) -> ConnectorResult<()> {
let schema_name = connection.connection_info().schema_name();
let schema_exists_result = connection
.query_raw(
"SELECT EXISTS(SELECT 1 FROM pg_namespace WHERE nspname = $1)",
&[schema_name.into()],
)
.await?;
if let Some(true) = schema_exists_result
.get(0)
.and_then(|row| row.at(0).and_then(|value| value.as_bool()))
{
return Ok(());
}
tracing::debug!(
"Detected that the `{schema_name}` schema does not exist on the target database. Attempting to create it.",
schema_name = schema_name,
);
connection
.raw_cmd(&format!("CREATE SCHEMA \"{}\"", schema_name))
.await?;
Ok(())
}
async fn qe_setup(&self, database_str: &str) -> ConnectorResult<()> | conn.raw_cmd(&drop_and_recreate_schema).await?;
Ok(())
}
async fn reset(&self, connection: &Connection) -> ConnectorResult<()> {
let schema_name = connection.connection_info().schema_name();
connection
.raw_cmd(&format!("DROP SCHEMA \"{}\" CASCADE", schema_name))
.await?;
connection
.raw_cmd(&format!("CREATE SCHEMA \"{}\"", schema_name))
.await?;
Ok(())
}
fn sql_family(&self) -> SqlFamily {
SqlFamily::Postgres
}
#[tracing::instrument(skip(self, migrations, connection))]
async fn sql_schema_from_migration_history(
&self,
migrations: &[MigrationDirectory],
connection: &Connection,
) -> ConnectorResult<SqlSchema> {
let database_name = format!("prisma_migrations_shadow_database_{}", uuid::Uuid::new_v4());
let create_database = format!("CREATE DATABASE \"{}\"", database_name);
let create_schema = format!("CREATE SCHEMA IF NOT EXISTS \"{}\"", self.schema_name());
connection
.raw_cmd(&create_database)
.await
.map_err(ConnectorError::from)
.map_err(|err| err.into_shadow_db_creation_error())?;
let mut temporary_database_url = self.url.url().clone();
temporary_database_url.set_path(&format!("/{}", database_name));
let temporary_database_url = temporary_database_url.to_string();
tracing::debug!("Connecting to temporary database at {}", temporary_database_url);
// We go through the whole process without early return, then clean up
// the temporary database, and only then return the result. This avoids
// leaving shadow databases behind in case of e.g. faulty migrations.
let sql_schema_result = (|| {
async {
let temporary_database = crate::connect(&temporary_database_url).await?;
temporary_database.raw_cmd(&create_schema).await?;
for migration in migrations {
let script = migration.read_migration_script()?;
tracing::debug!(
"Applying migration `{}` to temporary database.",
migration.migration_name()
);
temporary_database
.raw_cmd(&script)
.await
.map_err(ConnectorError::from)
.map_err(|connector_error| {
connector_error.into_migration_does_not_apply_cleanly(migration.migration_name().to_owned())
})?;
}
// the connection to the temporary database is dropped at the end of
// the block.
self.describe_schema(&temporary_database).await
}
})()
.await;
let drop_database = format!("DROP DATABASE IF EXISTS \"{}\"", database_name);
connection.raw_cmd(&drop_database).await?;
sql_schema_result
}
fn features(&self) -> BitFlags<MigrationFeature> {
self.features
}
}
fn strip_schema_param_from_url(url: &mut Url) {
let mut params: HashMap<String, String> = url.query_pairs().into_owned().collect();
params.remove("schema");
let params: Vec<String> = params.into_iter().map(|(k, v)| format!("{}={}", k, v)).collect();
let params: String = params.join("&");
url.set_query(Some(¶ms));
}
/// Try to connect as an admin to a postgres database. We try to pick a default database from which
/// we can create another database.
async fn create_postgres_admin_conn(mut url: Url) -> ConnectorResult<Connection> {
let candidate_default_databases = &["postgres", "template1"];
let mut conn = None;
for database_name in candidate_default_databases | {
let mut url = Url::parse(database_str).map_err(|err| ConnectorError::url_parse_error(err, database_str))?;
strip_schema_param_from_url(&mut url);
let conn = create_postgres_admin_conn(url.clone()).await?;
let schema = self.url.schema();
let db_name = self.url.dbname();
let query = format!("CREATE DATABASE \"{}\"", db_name);
conn.raw_cmd(&query).await.ok();
// Now create the schema
url.set_path(&format!("/{}", db_name));
let conn = connect(&url.to_string()).await?;
let drop_and_recreate_schema = format!(
"DROP SCHEMA IF EXISTS \"{schema}\" CASCADE;\nCREATE SCHEMA \"{schema}\";",
schema = schema
); | identifier_body |
postgres.rs | };
use sql_schema_describer::{DescriberErrorKind, SqlSchema, SqlSchemaDescriberBackend};
use std::collections::HashMap;
use url::Url;
use user_facing_errors::{
common::DatabaseDoesNotExist, introspection_engine::DatabaseSchemaInconsistent, migration_engine, KnownError,
UserFacingError,
};
#[derive(Debug)]
pub(crate) struct PostgresFlavour {
pub(crate) url: PostgresUrl,
features: BitFlags<MigrationFeature>,
}
impl PostgresFlavour {
pub fn new(url: PostgresUrl, features: BitFlags<MigrationFeature>) -> Self {
Self { url, features }
}
pub(crate) fn schema_name(&self) -> &str {
self.url.schema()
}
}
#[async_trait::async_trait]
impl SqlFlavour for PostgresFlavour {
#[tracing::instrument(skip(database_str))]
async fn create_database(&self, database_str: &str) -> ConnectorResult<String> {
let mut url = Url::parse(database_str).map_err(|err| ConnectorError::url_parse_error(err, database_str))?;
let db_name = self.url.dbname();
strip_schema_param_from_url(&mut url);
let conn = create_postgres_admin_conn(url.clone()).await?;
let query = format!("CREATE DATABASE \"{}\"", db_name);
let mut database_already_exists_error = None;
match conn.raw_cmd(&query).await {
Ok(_) => (),
Err(err) if matches!(err.kind(), QuaintKind::DatabaseAlreadyExists { .. }) => {
database_already_exists_error = Some(err)
}
Err(err) if matches!(err.kind(), QuaintKind::UniqueConstraintViolation { .. }) => {
database_already_exists_error = Some(err)
}
Err(err) => return Err(err.into()),
};
// Now create the schema
url.set_path(&format!("/{}", db_name));
let conn = connect(&url.to_string()).await?;
let schema_sql = format!("CREATE SCHEMA IF NOT EXISTS \"{}\";", &self.schema_name());
conn.raw_cmd(&schema_sql).await?;
if let Some(err) = database_already_exists_error {
return Err(err.into());
}
Ok(db_name.to_owned())
}
async fn create_imperative_migrations_table(&self, connection: &Connection) -> ConnectorResult<()> {
let sql = indoc! {r#"
CREATE TABLE _prisma_migrations (
id VARCHAR(36) PRIMARY KEY NOT NULL,
checksum VARCHAR(64) NOT NULL,
finished_at TIMESTAMPTZ,
migration_name VARCHAR(255) NOT NULL,
logs TEXT,
rolled_back_at TIMESTAMPTZ,
started_at TIMESTAMPTZ NOT NULL DEFAULT now(),
applied_steps_count INTEGER NOT NULL DEFAULT 0
);
"#};
Ok(connection.raw_cmd(sql).await?)
}
async fn describe_schema<'a>(&'a self, connection: &Connection) -> ConnectorResult<SqlSchema> {
sql_schema_describer::postgres::SqlSchemaDescriber::new(connection.quaint().clone())
.describe(connection.connection_info().schema_name())
.await
.map_err(|err| match err.into_kind() {
DescriberErrorKind::QuaintError(err) => {
quaint_error_to_connector_error(err, connection.connection_info())
}
e @ DescriberErrorKind::CrossSchemaReference { .. } => {
let err = KnownError::new(DatabaseSchemaInconsistent {
explanation: format!("{}", e),
});
ConnectorError::from(err)
}
})
}
async fn drop_database(&self, database_str: &str) -> ConnectorResult<()> {
let mut url = Url::parse(database_str).map_err(|err| ConnectorError::url_parse_error(err, database_str))?;
let db_name = url.path().trim_start_matches('/').to_owned();
assert!(!db_name.is_empty(), "Database name should not be empty.");
strip_schema_param_from_url(&mut url);
let conn = create_postgres_admin_conn(url.clone()).await?;
conn.raw_cmd(&format!("DROP DATABASE \"{}\"", db_name)).await?;
Ok(())
}
#[tracing::instrument]
async fn ensure_connection_validity(&self, connection: &Connection) -> ConnectorResult<()> {
let schema_name = connection.connection_info().schema_name();
let schema_exists_result = connection
.query_raw(
"SELECT EXISTS(SELECT 1 FROM pg_namespace WHERE nspname = $1)",
&[schema_name.into()],
)
.await?;
if let Some(true) = schema_exists_result
.get(0)
.and_then(|row| row.at(0).and_then(|value| value.as_bool()))
{
return Ok(());
}
tracing::debug!(
"Detected that the `{schema_name}` schema does not exist on the target database. Attempting to create it.",
schema_name = schema_name,
);
connection
.raw_cmd(&format!("CREATE SCHEMA \"{}\"", schema_name))
.await?;
Ok(())
}
async fn qe_setup(&self, database_str: &str) -> ConnectorResult<()> {
let mut url = Url::parse(database_str).map_err(|err| ConnectorError::url_parse_error(err, database_str))?;
strip_schema_param_from_url(&mut url);
let conn = create_postgres_admin_conn(url.clone()).await?;
let schema = self.url.schema();
let db_name = self.url.dbname();
let query = format!("CREATE DATABASE \"{}\"", db_name);
conn.raw_cmd(&query).await.ok();
// Now create the schema
url.set_path(&format!("/{}", db_name));
let conn = connect(&url.to_string()).await?;
let drop_and_recreate_schema = format!(
"DROP SCHEMA IF EXISTS \"{schema}\" CASCADE;\nCREATE SCHEMA \"{schema}\";",
schema = schema
);
conn.raw_cmd(&drop_and_recreate_schema).await?;
Ok(())
}
async fn reset(&self, connection: &Connection) -> ConnectorResult<()> {
let schema_name = connection.connection_info().schema_name();
connection
.raw_cmd(&format!("DROP SCHEMA \"{}\" CASCADE", schema_name))
.await?;
connection
.raw_cmd(&format!("CREATE SCHEMA \"{}\"", schema_name))
.await?;
Ok(())
}
fn sql_family(&self) -> SqlFamily {
SqlFamily::Postgres
}
#[tracing::instrument(skip(self, migrations, connection))]
async fn sql_schema_from_migration_history(
&self,
migrations: &[MigrationDirectory],
connection: &Connection,
) -> ConnectorResult<SqlSchema> {
let database_name = format!("prisma_migrations_shadow_database_{}", uuid::Uuid::new_v4());
let create_database = format!("CREATE DATABASE \"{}\"", database_name);
let create_schema = format!("CREATE SCHEMA IF NOT EXISTS \"{}\"", self.schema_name());
connection
.raw_cmd(&create_database)
.await
.map_err(ConnectorError::from)
.map_err(|err| err.into_shadow_db_creation_error())?;
let mut temporary_database_url = self.url.url().clone();
temporary_database_url.set_path(&format!("/{}", database_name));
let temporary_database_url = temporary_database_url.to_string();
tracing::debug!("Connecting to temporary database at {}", temporary_database_url);
// We go through the whole process without early return, then clean up
// the temporary database, and only then return the result. This avoids
// leaving shadow databases behind in case of e.g. faulty migrations.
let sql_schema_result = (|| {
async {
let temporary_database = crate::connect(&temporary_database_url).await?; |
temporary_database.raw_cmd(&create_schema).await?;
for migration in migrations {
let script = migration.read_migration_script()?;
tracing::debug!(
"Applying migration `{}` to temporary database.",
migration.migration_name()
);
temporary_database
.raw_cmd(&script)
.await
.map_err(ConnectorError::from)
.map_err(|connector_error| {
connector_error.into_migration_does_not_apply_cleanly(migration.migration_name().to_owned())
})?;
}
// the connection to the temporary database is dropped at the end of
// the block.
self.describe_schema(&temporary_database).await
}
})()
.await;
let drop_database = format!("DROP DATABASE IF EXISTS \"{}\"", database_name);
connection.raw_cmd(&drop_database).await?;
sql_schema_result
}
fn features(&self) -> BitFlags<MigrationFeature> {
self.features
}
}
fn strip_schema_param_from_url(url: &mut Url) {
let mut params: HashMap<String, String> = url.query_pairs().into_owned().collect();
params.remove("schema");
let params: Vec<String> = params.into_iter().map(|(k, v)| format!("{}={}", k, v)).collect();
let params: String = params.join("&");
url.set_query(Some(¶ms));
}
/// Try to connect as an admin to a postgres database. We try to pick a default database from which
/// we can create another database.
async fn create_postgres_admin_conn(mut url: Url) -> ConnectorResult<Connection> {
let candidate_default_databases = &["postgres", "template1"];
let mut conn = None;
for database_name in candidate_default_d | random_line_split |
|
postgres.rs | };
use sql_schema_describer::{DescriberErrorKind, SqlSchema, SqlSchemaDescriberBackend};
use std::collections::HashMap;
use url::Url;
use user_facing_errors::{
common::DatabaseDoesNotExist, introspection_engine::DatabaseSchemaInconsistent, migration_engine, KnownError,
UserFacingError,
};
#[derive(Debug)]
pub(crate) struct PostgresFlavour {
pub(crate) url: PostgresUrl,
features: BitFlags<MigrationFeature>,
}
impl PostgresFlavour {
pub fn new(url: PostgresUrl, features: BitFlags<MigrationFeature>) -> Self {
Self { url, features }
}
pub(crate) fn schema_name(&self) -> &str {
self.url.schema()
}
}
#[async_trait::async_trait]
impl SqlFlavour for PostgresFlavour {
#[tracing::instrument(skip(database_str))]
async fn create_database(&self, database_str: &str) -> ConnectorResult<String> {
let mut url = Url::parse(database_str).map_err(|err| ConnectorError::url_parse_error(err, database_str))?;
let db_name = self.url.dbname();
strip_schema_param_from_url(&mut url);
let conn = create_postgres_admin_conn(url.clone()).await?;
let query = format!("CREATE DATABASE \"{}\"", db_name);
let mut database_already_exists_error = None;
match conn.raw_cmd(&query).await {
Ok(_) => (),
Err(err) if matches!(err.kind(), QuaintKind::DatabaseAlreadyExists { .. }) => {
database_already_exists_error = Some(err)
}
Err(err) if matches!(err.kind(), QuaintKind::UniqueConstraintViolation { .. }) => {
database_already_exists_error = Some(err)
}
Err(err) => return Err(err.into()),
};
// Now create the schema
url.set_path(&format!("/{}", db_name));
let conn = connect(&url.to_string()).await?;
let schema_sql = format!("CREATE SCHEMA IF NOT EXISTS \"{}\";", &self.schema_name());
conn.raw_cmd(&schema_sql).await?;
if let Some(err) = database_already_exists_error {
return Err(err.into());
}
Ok(db_name.to_owned())
}
async fn create_imperative_migrations_table(&self, connection: &Connection) -> ConnectorResult<()> {
let sql = indoc! {r#"
CREATE TABLE _prisma_migrations (
id VARCHAR(36) PRIMARY KEY NOT NULL,
checksum VARCHAR(64) NOT NULL,
finished_at TIMESTAMPTZ,
migration_name VARCHAR(255) NOT NULL,
logs TEXT,
rolled_back_at TIMESTAMPTZ,
started_at TIMESTAMPTZ NOT NULL DEFAULT now(),
applied_steps_count INTEGER NOT NULL DEFAULT 0
);
"#};
Ok(connection.raw_cmd(sql).await?)
}
async fn describe_schema<'a>(&'a self, connection: &Connection) -> ConnectorResult<SqlSchema> {
sql_schema_describer::postgres::SqlSchemaDescriber::new(connection.quaint().clone())
.describe(connection.connection_info().schema_name())
.await
.map_err(|err| match err.into_kind() {
DescriberErrorKind::QuaintError(err) => {
quaint_error_to_connector_error(err, connection.connection_info())
}
e @ DescriberErrorKind::CrossSchemaReference { .. } => {
let err = KnownError::new(DatabaseSchemaInconsistent {
explanation: format!("{}", e),
});
ConnectorError::from(err)
}
})
}
async fn drop_database(&self, database_str: &str) -> ConnectorResult<()> {
let mut url = Url::parse(database_str).map_err(|err| ConnectorError::url_parse_error(err, database_str))?;
let db_name = url.path().trim_start_matches('/').to_owned();
assert!(!db_name.is_empty(), "Database name should not be empty.");
strip_schema_param_from_url(&mut url);
let conn = create_postgres_admin_conn(url.clone()).await?;
conn.raw_cmd(&format!("DROP DATABASE \"{}\"", db_name)).await?;
Ok(())
}
#[tracing::instrument]
async fn ensure_connection_validity(&self, connection: &Connection) -> ConnectorResult<()> {
let schema_name = connection.connection_info().schema_name();
let schema_exists_result = connection
.query_raw(
"SELECT EXISTS(SELECT 1 FROM pg_namespace WHERE nspname = $1)",
&[schema_name.into()],
)
.await?;
if let Some(true) = schema_exists_result
.get(0)
.and_then(|row| row.at(0).and_then(|value| value.as_bool()))
{
return Ok(());
}
tracing::debug!(
"Detected that the `{schema_name}` schema does not exist on the target database. Attempting to create it.",
schema_name = schema_name,
);
connection
.raw_cmd(&format!("CREATE SCHEMA \"{}\"", schema_name))
.await?;
Ok(())
}
async fn qe_setup(&self, database_str: &str) -> ConnectorResult<()> {
let mut url = Url::parse(database_str).map_err(|err| ConnectorError::url_parse_error(err, database_str))?;
strip_schema_param_from_url(&mut url);
let conn = create_postgres_admin_conn(url.clone()).await?;
let schema = self.url.schema();
let db_name = self.url.dbname();
let query = format!("CREATE DATABASE \"{}\"", db_name);
conn.raw_cmd(&query).await.ok();
// Now create the schema
url.set_path(&format!("/{}", db_name));
let conn = connect(&url.to_string()).await?;
let drop_and_recreate_schema = format!(
"DROP SCHEMA IF EXISTS \"{schema}\" CASCADE;\nCREATE SCHEMA \"{schema}\";",
schema = schema
);
conn.raw_cmd(&drop_and_recreate_schema).await?;
Ok(())
}
async fn | (&self, connection: &Connection) -> ConnectorResult<()> {
let schema_name = connection.connection_info().schema_name();
connection
.raw_cmd(&format!("DROP SCHEMA \"{}\" CASCADE", schema_name))
.await?;
connection
.raw_cmd(&format!("CREATE SCHEMA \"{}\"", schema_name))
.await?;
Ok(())
}
fn sql_family(&self) -> SqlFamily {
SqlFamily::Postgres
}
#[tracing::instrument(skip(self, migrations, connection))]
async fn sql_schema_from_migration_history(
&self,
migrations: &[MigrationDirectory],
connection: &Connection,
) -> ConnectorResult<SqlSchema> {
let database_name = format!("prisma_migrations_shadow_database_{}", uuid::Uuid::new_v4());
let create_database = format!("CREATE DATABASE \"{}\"", database_name);
let create_schema = format!("CREATE SCHEMA IF NOT EXISTS \"{}\"", self.schema_name());
connection
.raw_cmd(&create_database)
.await
.map_err(ConnectorError::from)
.map_err(|err| err.into_shadow_db_creation_error())?;
let mut temporary_database_url = self.url.url().clone();
temporary_database_url.set_path(&format!("/{}", database_name));
let temporary_database_url = temporary_database_url.to_string();
tracing::debug!("Connecting to temporary database at {}", temporary_database_url);
// We go through the whole process without early return, then clean up
// the temporary database, and only then return the result. This avoids
// leaving shadow databases behind in case of e.g. faulty migrations.
let sql_schema_result = (|| {
async {
let temporary_database = crate::connect(&temporary_database_url).await?;
temporary_database.raw_cmd(&create_schema).await?;
for migration in migrations {
let script = migration.read_migration_script()?;
tracing::debug!(
"Applying migration `{}` to temporary database.",
migration.migration_name()
);
temporary_database
.raw_cmd(&script)
.await
.map_err(ConnectorError::from)
.map_err(|connector_error| {
connector_error.into_migration_does_not_apply_cleanly(migration.migration_name().to_owned())
})?;
}
// the connection to the temporary database is dropped at the end of
// the block.
self.describe_schema(&temporary_database).await
}
})()
.await;
let drop_database = format!("DROP DATABASE IF EXISTS \"{}\"", database_name);
connection.raw_cmd(&drop_database).await?;
sql_schema_result
}
fn features(&self) -> BitFlags<MigrationFeature> {
self.features
}
}
fn strip_schema_param_from_url(url: &mut Url) {
let mut params: HashMap<String, String> = url.query_pairs().into_owned().collect();
params.remove("schema");
let params: Vec<String> = params.into_iter().map(|(k, v)| format!("{}={}", k, v)).collect();
let params: String = params.join("&");
url.set_query(Some(¶ms));
}
/// Try to connect as an admin to a postgres database. We try to pick a default database from which
/// we can create another database.
async fn create_postgres_admin_conn(mut url: Url) -> ConnectorResult<Connection> {
let candidate_default_databases = &["postgres", "template1"];
let mut conn = None;
for database_name in candidate_default_databases | reset | identifier_name |
postgres.rs | };
use sql_schema_describer::{DescriberErrorKind, SqlSchema, SqlSchemaDescriberBackend};
use std::collections::HashMap;
use url::Url;
use user_facing_errors::{
common::DatabaseDoesNotExist, introspection_engine::DatabaseSchemaInconsistent, migration_engine, KnownError,
UserFacingError,
};
#[derive(Debug)]
pub(crate) struct PostgresFlavour {
pub(crate) url: PostgresUrl,
features: BitFlags<MigrationFeature>,
}
impl PostgresFlavour {
pub fn new(url: PostgresUrl, features: BitFlags<MigrationFeature>) -> Self {
Self { url, features }
}
pub(crate) fn schema_name(&self) -> &str {
self.url.schema()
}
}
#[async_trait::async_trait]
impl SqlFlavour for PostgresFlavour {
#[tracing::instrument(skip(database_str))]
async fn create_database(&self, database_str: &str) -> ConnectorResult<String> {
let mut url = Url::parse(database_str).map_err(|err| ConnectorError::url_parse_error(err, database_str))?;
let db_name = self.url.dbname();
strip_schema_param_from_url(&mut url);
let conn = create_postgres_admin_conn(url.clone()).await?;
let query = format!("CREATE DATABASE \"{}\"", db_name);
let mut database_already_exists_error = None;
match conn.raw_cmd(&query).await {
Ok(_) => (),
Err(err) if matches!(err.kind(), QuaintKind::DatabaseAlreadyExists { .. }) => {
database_already_exists_error = Some(err)
}
Err(err) if matches!(err.kind(), QuaintKind::UniqueConstraintViolation { .. }) => |
Err(err) => return Err(err.into()),
};
// Now create the schema
url.set_path(&format!("/{}", db_name));
let conn = connect(&url.to_string()).await?;
let schema_sql = format!("CREATE SCHEMA IF NOT EXISTS \"{}\";", &self.schema_name());
conn.raw_cmd(&schema_sql).await?;
if let Some(err) = database_already_exists_error {
return Err(err.into());
}
Ok(db_name.to_owned())
}
async fn create_imperative_migrations_table(&self, connection: &Connection) -> ConnectorResult<()> {
let sql = indoc! {r#"
CREATE TABLE _prisma_migrations (
id VARCHAR(36) PRIMARY KEY NOT NULL,
checksum VARCHAR(64) NOT NULL,
finished_at TIMESTAMPTZ,
migration_name VARCHAR(255) NOT NULL,
logs TEXT,
rolled_back_at TIMESTAMPTZ,
started_at TIMESTAMPTZ NOT NULL DEFAULT now(),
applied_steps_count INTEGER NOT NULL DEFAULT 0
);
"#};
Ok(connection.raw_cmd(sql).await?)
}
async fn describe_schema<'a>(&'a self, connection: &Connection) -> ConnectorResult<SqlSchema> {
sql_schema_describer::postgres::SqlSchemaDescriber::new(connection.quaint().clone())
.describe(connection.connection_info().schema_name())
.await
.map_err(|err| match err.into_kind() {
DescriberErrorKind::QuaintError(err) => {
quaint_error_to_connector_error(err, connection.connection_info())
}
e @ DescriberErrorKind::CrossSchemaReference { .. } => {
let err = KnownError::new(DatabaseSchemaInconsistent {
explanation: format!("{}", e),
});
ConnectorError::from(err)
}
})
}
async fn drop_database(&self, database_str: &str) -> ConnectorResult<()> {
let mut url = Url::parse(database_str).map_err(|err| ConnectorError::url_parse_error(err, database_str))?;
let db_name = url.path().trim_start_matches('/').to_owned();
assert!(!db_name.is_empty(), "Database name should not be empty.");
strip_schema_param_from_url(&mut url);
let conn = create_postgres_admin_conn(url.clone()).await?;
conn.raw_cmd(&format!("DROP DATABASE \"{}\"", db_name)).await?;
Ok(())
}
#[tracing::instrument]
async fn ensure_connection_validity(&self, connection: &Connection) -> ConnectorResult<()> {
let schema_name = connection.connection_info().schema_name();
let schema_exists_result = connection
.query_raw(
"SELECT EXISTS(SELECT 1 FROM pg_namespace WHERE nspname = $1)",
&[schema_name.into()],
)
.await?;
if let Some(true) = schema_exists_result
.get(0)
.and_then(|row| row.at(0).and_then(|value| value.as_bool()))
{
return Ok(());
}
tracing::debug!(
"Detected that the `{schema_name}` schema does not exist on the target database. Attempting to create it.",
schema_name = schema_name,
);
connection
.raw_cmd(&format!("CREATE SCHEMA \"{}\"", schema_name))
.await?;
Ok(())
}
async fn qe_setup(&self, database_str: &str) -> ConnectorResult<()> {
let mut url = Url::parse(database_str).map_err(|err| ConnectorError::url_parse_error(err, database_str))?;
strip_schema_param_from_url(&mut url);
let conn = create_postgres_admin_conn(url.clone()).await?;
let schema = self.url.schema();
let db_name = self.url.dbname();
let query = format!("CREATE DATABASE \"{}\"", db_name);
conn.raw_cmd(&query).await.ok();
// Now create the schema
url.set_path(&format!("/{}", db_name));
let conn = connect(&url.to_string()).await?;
let drop_and_recreate_schema = format!(
"DROP SCHEMA IF EXISTS \"{schema}\" CASCADE;\nCREATE SCHEMA \"{schema}\";",
schema = schema
);
conn.raw_cmd(&drop_and_recreate_schema).await?;
Ok(())
}
async fn reset(&self, connection: &Connection) -> ConnectorResult<()> {
let schema_name = connection.connection_info().schema_name();
connection
.raw_cmd(&format!("DROP SCHEMA \"{}\" CASCADE", schema_name))
.await?;
connection
.raw_cmd(&format!("CREATE SCHEMA \"{}\"", schema_name))
.await?;
Ok(())
}
fn sql_family(&self) -> SqlFamily {
SqlFamily::Postgres
}
#[tracing::instrument(skip(self, migrations, connection))]
async fn sql_schema_from_migration_history(
&self,
migrations: &[MigrationDirectory],
connection: &Connection,
) -> ConnectorResult<SqlSchema> {
let database_name = format!("prisma_migrations_shadow_database_{}", uuid::Uuid::new_v4());
let create_database = format!("CREATE DATABASE \"{}\"", database_name);
let create_schema = format!("CREATE SCHEMA IF NOT EXISTS \"{}\"", self.schema_name());
connection
.raw_cmd(&create_database)
.await
.map_err(ConnectorError::from)
.map_err(|err| err.into_shadow_db_creation_error())?;
let mut temporary_database_url = self.url.url().clone();
temporary_database_url.set_path(&format!("/{}", database_name));
let temporary_database_url = temporary_database_url.to_string();
tracing::debug!("Connecting to temporary database at {}", temporary_database_url);
// We go through the whole process without early return, then clean up
// the temporary database, and only then return the result. This avoids
// leaving shadow databases behind in case of e.g. faulty migrations.
let sql_schema_result = (|| {
async {
let temporary_database = crate::connect(&temporary_database_url).await?;
temporary_database.raw_cmd(&create_schema).await?;
for migration in migrations {
let script = migration.read_migration_script()?;
tracing::debug!(
"Applying migration `{}` to temporary database.",
migration.migration_name()
);
temporary_database
.raw_cmd(&script)
.await
.map_err(ConnectorError::from)
.map_err(|connector_error| {
connector_error.into_migration_does_not_apply_cleanly(migration.migration_name().to_owned())
})?;
}
// the connection to the temporary database is dropped at the end of
// the block.
self.describe_schema(&temporary_database).await
}
})()
.await;
let drop_database = format!("DROP DATABASE IF EXISTS \"{}\"", database_name);
connection.raw_cmd(&drop_database).await?;
sql_schema_result
}
fn features(&self) -> BitFlags<MigrationFeature> {
self.features
}
}
fn strip_schema_param_from_url(url: &mut Url) {
let mut params: HashMap<String, String> = url.query_pairs().into_owned().collect();
params.remove("schema");
let params: Vec<String> = params.into_iter().map(|(k, v)| format!("{}={}", k, v)).collect();
let params: String = params.join("&");
url.set_query(Some(¶ms));
}
/// Try to connect as an admin to a postgres database. We try to pick a default database from which
/// we can create another database.
async fn create_postgres_admin_conn(mut url: Url) -> ConnectorResult<Connection> {
let candidate_default_databases = &["postgres", "template1"];
let mut conn = None;
for database_name in candidate_default_d | {
database_already_exists_error = Some(err)
} | conditional_block |
shelllogger.py | new terminals are encountered.
escape_regex is a Regex filter to capture all escape sequences.
Modified from: http://wiki.tcl.tk/9673
"""
# Filter out control characters
# First, handle the backspaces.
for backspace in backspaces:
try:
while True:
ind = buf.index(backspace)
buf = ''.join((buf[0:ind-1],buf[ind+len(backspace):]))
except:
pass
strip_escapes = escape_regex.sub('',buf)
# strip non-printable ASCII characters
clean = ''.join([x for x in strip_escapes if is_printable(x)])
return clean
class TTY:
def __init__(self):
self.iflag, self.oflag, self.cflag, self.lflag, \
self.ispeed, self.ospeed, self.cc = termios.tcgetattr(0)
def raw(self):
# ISIG - passes Ctl-C, Ctl-Z, etc. to the child rather than generating signals
raw_lflag = self.lflag & ~(termios.ICANON|termios.ECHO|termios.ISIG)
raw_iflag = self.iflag & ~(termios.ICRNL|termios.IXON)
raw_cc = self.cc[:]
raw_cc[termios.VMIN] = 1
raw_cc[termios.VTIME] = 0
termios.tcsetattr(0, termios.TCSANOW, [raw_iflag, self.oflag,
self.cflag, raw_lflag,
self.ispeed, self.ospeed,
raw_cc])
def restore(self):
termios.tcsetattr(0, termios.TCSANOW, [self.iflag, self.oflag,
self.cflag, self.lflag,
self.ispeed, self.ospeed,
self.cc])
class ChildWindowResizer:
"""Informs the child process that the window has been resized."""
def __init__(self,child_fd):
self.child_fd = child_fd
signal.signal(signal.SIGWINCH,self.signal_handler)
def signal_handler(self,sig,data):
"""Signal handler that gets installed"""
self.resize_child_window()
def resize_child_window(self):
"""Tells the child process to resize its window"""
s = struct.pack('HHHH', 0, 0, 0, 0)
x = fcntl.ioctl(0,termios.TIOCGWINSZ,s)
fcntl.ioctl(self.child_fd,termios.TIOCSWINSZ,x)
def get_shell():
return os.path.basename(os.environ['SHELL'])
def run_shell():
"""Launch the appropriate shell as a login shell
It will be either bash or tcsh depending on what the user is currently running.
It checks the SHELL variable to figure it out.
"""
shell = get_shell()
if shell not in ['bash','tcsh']:
raise ValueError, "Unsupported shell (only works with bash and tcsh)"
os.execvp(shell,(shell,"-l"))
def get_log_dir():
"""Retrieve the name of the directory that will store the logfiles.
If the SHELLLOGGERDIR environment variable is set, use that.
Otherwise, default to ~/.shelllogger"""
env_var = "SHELLLOGGERDIR"
if os.environ.has_key(env_var):
return os.environ[env_var]
else:
return os.path.expanduser('~/.shelllogger')
def start_recording(logfilename, debug):
# Check for recursive call
env_var = 'ShellLogger'
if os.environ.has_key(env_var):
# Recursive call, just exit
return
os.environ[env_var]='1'
print "ShellLogger enabled"
if logfilename is None:
dirname = get_log_dir()
try:
os.mkdir(dirname)
print "Creating %s directory for storing logfile" % dirname
except OSError, e:
# If it's anything but "File exists",then we're in trouble.
# We'll just re-raise the exception for now
if e.errno != errno.EEXIST:
raise e
logfilename = os.path.join(dirname,'log.%d.raw' % time.time())
if debug:
debugfilename = os.path.join(dirname,'log.%d.debug' % time.time())
else:
debugfilename = None
pid, fd = pty.fork()
# Python won't return -1, rather will raise exception.
if pid == 0: # child process
try:
run_shell()
except:
# must not return to caller.
os._exit(0)
# parent process
input = TTY()
input.raw()
resizer = ChildWindowResizer(fd)
resizer.resize_child_window()
bufsize = 1024
try:
logger = Logger(logfilename, debugfilename)
if debugfilename is not None:
print "Warning, shelllogger running in debug mode. All keystrokes will be logged to a plaintext file. Do not type in any passwords during this session!"
# Set the shell prompt properly
os.write(fd,SHELL_PROMPTS[get_shell()])
while True:
delay = 1
exit = 0
try:
r, w, e = select.select([0, fd], [], [], delay)
except select.error, se:
# When the user resizes the window, it will generate a signal
# that will be handled, which will cause select to be
# interrupted.
if se.args[0]==errno.EINTR:
continue
else:
raise
for File in r:
if File == 0:
first_user_input = 1
from_user = os.read(0, bufsize)
os.write(fd, from_user)
logger.input_from_user(from_user)
elif File == fd:
try:
from_shell = os.read(fd, bufsize)
os.write(1, from_shell)
logger.input_from_shell(from_shell)
if from_shell=='':
exit = 1
except OSError:
# On Linux, os.read throws an OSError
# when data is done
from_shell = ''
os.write(1, from_shell)
logger.input_from_shell(from_shell)
exit = 1
if exit==1:
break
xmlfilename = logger.done()
except:
input.restore()
raise
input.restore()
print "ShellLogger data stored in " + xmlfilename
class Logger:
def __init__(self,logfilename, debugfilename):
self.logfilename = logfilename
self.logfile = open(logfilename,'w')
self.logfile.write('<?xml version="1.0" encoding="UTF-8"?>\n')
self.logfile.write('<cli-logger machine="%s">\n\n' % socket.gethostname())
self.buffer = ''
self.cwd = os.getcwd()
self.state = BeginState(self)
self.debugfilename = debugfilename
self.isLinux = False
if self.debugfilename is not None:
self.debugfile = open(debugfilename, 'w')
self.debugfile.write("<cli-debug>\n")
else:
self.debugfile = None
def done(self):
"""Call when session is complete.
Returns the name of the XML file
"""
self.logfile.write("]]></result>\n</cli-logger-entry>\n</cli-logger>\n")
self.logfile.close()
if self.debugfilename is not None:
self.debugfile.write("</cli-debug>")
return self.raw_to_xml()
def raw_to_xml(self):
"""Convert the .raw file, with illegal characters and escape keys, to a proper XML version.
Returns the name of the XML file
"""
xmlfilename = self.logfilename.replace('.raw','.xml')
fout = codecs.open(xmlfilename, encoding="utf-8", mode="w")
for line in codecs.open(self.logfilename,encoding="utf-8"):
fout.write(sanitize(line))
fout.close()
return xmlfilename
def input_from_shell(self,buf):
if self.debugfile:
self.debug_log(buf,True)
self.state.input_from_shell(buf)
self.state = self.state.next_state()
def input_from_user(self,buf):
if self.debugfile:
self.debug_log(buf,False)
self.state.input_from_user(buf)
self.state = self.state.next_state()
def | (self,buf):
self.logfile.write(buf)
def debug_log(self, buf, shell):
"""Record to the debug log"""
# Handle Shell output
if shell == True:
self.debugfile.write("<shell time=\" " + datetime.datetime.now().strftime("%H:%M:%S ") + "\" >" )
self.debugfile.write("<![CDATA["+buf+"]]></shell>\n")
# Handle User Input
else:
self.debugfile.write("<user time=\" " + datetime.datetime.now().strftime("%H:%M:%S ") + "\" >" )
self.debugfile.write("<![CDATA["+buf+"]]></user>\n")
# regex for matching the prompt
# this is used to identify the data directory
re_prompt = re.compile(r'(.*)^\[SL (.*)\]\$ $', re.MULTILINE | re.DOTALL | re.IGNORECASE)
mac_prompt = re.compile(re.compile(r'(?:.*)\[SL (.*)\](.*)(\$)?',re.MULTILINE | re.DOTALL | re.IGNORECASE))
linux_prompt = re.compile(r'( | write | identifier_name |
shelllogger.py | -8"?>\n')
self.logfile.write('<cli-logger machine="%s">\n\n' % socket.gethostname())
self.buffer = ''
self.cwd = os.getcwd()
self.state = BeginState(self)
self.debugfilename = debugfilename
self.isLinux = False
if self.debugfilename is not None:
self.debugfile = open(debugfilename, 'w')
self.debugfile.write("<cli-debug>\n")
else:
self.debugfile = None
def done(self):
"""Call when session is complete.
Returns the name of the XML file
"""
self.logfile.write("]]></result>\n</cli-logger-entry>\n</cli-logger>\n")
self.logfile.close()
if self.debugfilename is not None:
self.debugfile.write("</cli-debug>")
return self.raw_to_xml()
def raw_to_xml(self):
"""Convert the .raw file, with illegal characters and escape keys, to a proper XML version.
Returns the name of the XML file
"""
xmlfilename = self.logfilename.replace('.raw','.xml')
fout = codecs.open(xmlfilename, encoding="utf-8", mode="w")
for line in codecs.open(self.logfilename,encoding="utf-8"):
fout.write(sanitize(line))
fout.close()
return xmlfilename
def input_from_shell(self,buf):
if self.debugfile:
self.debug_log(buf,True)
self.state.input_from_shell(buf)
self.state = self.state.next_state()
def input_from_user(self,buf):
if self.debugfile:
self.debug_log(buf,False)
self.state.input_from_user(buf)
self.state = self.state.next_state()
def write(self,buf):
self.logfile.write(buf)
def debug_log(self, buf, shell):
"""Record to the debug log"""
# Handle Shell output
if shell == True:
self.debugfile.write("<shell time=\" " + datetime.datetime.now().strftime("%H:%M:%S ") + "\" >" )
self.debugfile.write("<![CDATA["+buf+"]]></shell>\n")
# Handle User Input
else:
self.debugfile.write("<user time=\" " + datetime.datetime.now().strftime("%H:%M:%S ") + "\" >" )
self.debugfile.write("<![CDATA["+buf+"]]></user>\n")
# regex for matching the prompt
# this is used to identify the data directory
re_prompt = re.compile(r'(.*)^\[SL (.*)\]\$ $', re.MULTILINE | re.DOTALL | re.IGNORECASE)
mac_prompt = re.compile(re.compile(r'(?:.*)\[SL (.*)\](.*)(\$)?',re.MULTILINE | re.DOTALL | re.IGNORECASE))
linux_prompt = re.compile(r'(?:.*)\[SL (.*)\]\$',re.MULTILINE | re.DOTALL | re.IGNORECASE)
def is_enter(buf):
# Check if buffer consists entirely of \n or \r
for c in buf:
if c!='\n' and c!='\r':
return False
return True
class BeginState:
def __init__(self,logger):
self.logger = logger
self.saw_shell_input = False
def input_from_shell(self,buf):
# If it's the prompt, then it's just the very first shell
m = re_prompt.match(buf)
if m is not None:
self.logger.cwd = os.path.expanduser(m.group(2))
return
# If the user just hit enter, we don't log it
if is_enter(buf):
return
self.saw_shell_input = True
# Stick the data in a buffer
self.logger.buffer = buf
def input_from_user(self,buf):
# We don't actually care about input from the user,
# we just use shell echos
pass
def next_state(self):
if self.saw_shell_input:
return UserTypingState(self.logger)
else:
return self
class UserTypingState:
def __init__(self,logger):
self.logger = logger
self.seen_cr = False
self.program_invoked = None
def input_from_shell(self,buf):
if(buf.startswith('\x0D') or buf.startswith('\r')):
self.logger.isLinux = True
self.seen_cr = True
self.program_invoked = self.logger.buffer.split()[0]
self.logger.write('''<cli-logger-entry>
<invocation time="%f"
current-directory="%s"><![CDATA[''' % (time.time(),self.logger.cwd))
self.logger.write(self.logger.buffer)
self.logger.write(']]></invocation>\n')
self.logger.buffer = buf;
elif is_enter(buf) and len(self.logger.buffer)>0 and ( self.logger.buffer[-1]!='\\' or 'logout' in buf ):
self.seen_cr = True
self.program_invoked = self.logger.buffer.split()[0]
self.logger.write('''<cli-logger-entry>
<invocation time="%f"
current-directory="%s"><![CDATA[''' % (time.time(),self.logger.cwd))
self.logger.write(self.logger.buffer)
self.logger.write(']]></invocation>\n')
else:
self.logger.buffer += buf
def input_from_user(self,buf):
# Don't need to take any action
global isFirst
if(isFirst):
isFirst = False
self.logger.buffer = ''
pass
def next_state(self):
if self.seen_cr:
if self.program_invoked in TERMINAL_APPS:
return WaitingForShellNoOutputState(self.logger)
else:
return WaitingForShellState(self.logger)
else:
return self
class WaitingForShellState:
def __init__(self,logger):
self.logger = logger
self.seen_shell_input = False
self.seen_prompt = False
def input_from_shell(self,buf):
# Check for the case of no input, just a shell prompt
m = re_prompt.match(buf)
if m is not None:
# Empty result
try:
self.logger.write('<result time="%f"></result>\n</cli-logger-entry>\n\n' % time.time())
self.logger.cwd = os.path.expanduser(m.group(2))
self.seen_prompt = True
return
except:
m = mac_prompt.match(buf)
if m is not None:
self.logger.cwd = os.path.expanduser(m.group(1))
self.logger.write('</result>\n</cli-logger-entry>\n\n' % time.time())
self.seen_prompt = True
return
else:
self.seen_shell_input = True
self.logger.write('<result time="%f"><![CDATA[\n' % time.time())
self.write_output_to_log(buf)
def write_output_to_log(self,buf):
self.logger.write(buf)
def input_from_user(self,buf):
if self.logger.isLinux:
m = linux_prompt.match(self.logger.buffer.strip())
if m is not None:
self.logger.cwd = os.path.expanduser(m.group(1))
self.logger.write('<result time="%f"></result>\n</cli-logger-entry>\n\n' % time.time())
self.seen_prompt = True
def shell_output_state(self,logger):
return ShellOutputState(logger)
def next_state(self):
if self.seen_prompt:
return BeginState(self.logger)
elif self.seen_shell_input:
return self.shell_output_state(self.logger)
else:
return self
class WaitingForShellNoOutputState(WaitingForShellState):
"""
In this state, we do not want to capture any output. The typical case
is when the user has invoked an interactive program such as a
text editor.
"""
def write_output_to_log(self,buf):
pass
def shell_output_state(self,logger):
return ShellOutputNoOutputState(logger)
class ShellOutputState:
def __init__(self,logger):
self.logger = logger
self.saw_prompt = False
def input_from_shell(self,buf):
# Check if it's the prompt
m = re_prompt.match(buf)
mac = mac_prompt.match(buf)
linux = linux_prompt.match(buf)
if m is not None:
# It's the prompt!
self.saw_prompt = True
try:
self.logger.cwd = os.path.expanduser(m.group(2))
self.write_output_to_log(m.group(1))
self.logger.write("]]></result>\n</cli-logger-entry>\n\n")
except:
m = mac_prompt.match(buf)
if m is not None:
self.logger.cwd = os.path.expanduser(m.group(1))
self.logger.write('</result>\n</cli-logger-entry>\n\n' % time.time())
self.seen_prompt = True
elif mac is not None:
self.logger.cwd = os.path.expanduser(mac.group(1))
self.logger.write("]]></result>\n</cli-logger-entry>\n\n")
elif linux is not None:
self.logger.cwd = os.path.expanduser(linux.group(1))
self.logger.write("]]></result>\n</cli-logger-entry>\n\n")
else:
self.write_output_to_log(buf)
def write_output_to_log(self,buf):
self.logger.write(buf)
def input_from_user(self,buf):
if(self.logger.isLinux):
self.logger.isLinux = False
self.saw_prompt = True
| def next_state(self):
if self.saw_prompt: | random_line_split |
|
shelllogger.py | 0
termios.tcsetattr(0, termios.TCSANOW, [raw_iflag, self.oflag,
self.cflag, raw_lflag,
self.ispeed, self.ospeed,
raw_cc])
def restore(self):
termios.tcsetattr(0, termios.TCSANOW, [self.iflag, self.oflag,
self.cflag, self.lflag,
self.ispeed, self.ospeed,
self.cc])
class ChildWindowResizer:
"""Informs the child process that the window has been resized."""
def __init__(self,child_fd):
self.child_fd = child_fd
signal.signal(signal.SIGWINCH,self.signal_handler)
def signal_handler(self,sig,data):
"""Signal handler that gets installed"""
self.resize_child_window()
def resize_child_window(self):
"""Tells the child process to resize its window"""
s = struct.pack('HHHH', 0, 0, 0, 0)
x = fcntl.ioctl(0,termios.TIOCGWINSZ,s)
fcntl.ioctl(self.child_fd,termios.TIOCSWINSZ,x)
def get_shell():
return os.path.basename(os.environ['SHELL'])
def run_shell():
"""Launch the appropriate shell as a login shell
It will be either bash or tcsh depending on what the user is currently running.
It checks the SHELL variable to figure it out.
"""
shell = get_shell()
if shell not in ['bash','tcsh']:
raise ValueError, "Unsupported shell (only works with bash and tcsh)"
os.execvp(shell,(shell,"-l"))
def get_log_dir():
"""Retrieve the name of the directory that will store the logfiles.
If the SHELLLOGGERDIR environment variable is set, use that.
Otherwise, default to ~/.shelllogger"""
env_var = "SHELLLOGGERDIR"
if os.environ.has_key(env_var):
return os.environ[env_var]
else:
return os.path.expanduser('~/.shelllogger')
def start_recording(logfilename, debug):
# Check for recursive call
env_var = 'ShellLogger'
if os.environ.has_key(env_var):
# Recursive call, just exit
return
os.environ[env_var]='1'
print "ShellLogger enabled"
if logfilename is None:
dirname = get_log_dir()
try:
os.mkdir(dirname)
print "Creating %s directory for storing logfile" % dirname
except OSError, e:
# If it's anything but "File exists",then we're in trouble.
# We'll just re-raise the exception for now
if e.errno != errno.EEXIST:
raise e
logfilename = os.path.join(dirname,'log.%d.raw' % time.time())
if debug:
debugfilename = os.path.join(dirname,'log.%d.debug' % time.time())
else:
debugfilename = None
pid, fd = pty.fork()
# Python won't return -1, rather will raise exception.
if pid == 0: # child process
try:
run_shell()
except:
# must not return to caller.
os._exit(0)
# parent process
input = TTY()
input.raw()
resizer = ChildWindowResizer(fd)
resizer.resize_child_window()
bufsize = 1024
try:
logger = Logger(logfilename, debugfilename)
if debugfilename is not None:
print "Warning, shelllogger running in debug mode. All keystrokes will be logged to a plaintext file. Do not type in any passwords during this session!"
# Set the shell prompt properly
os.write(fd,SHELL_PROMPTS[get_shell()])
while True:
delay = 1
exit = 0
try:
r, w, e = select.select([0, fd], [], [], delay)
except select.error, se:
# When the user resizes the window, it will generate a signal
# that will be handled, which will cause select to be
# interrupted.
if se.args[0]==errno.EINTR:
continue
else:
raise
for File in r:
if File == 0:
first_user_input = 1
from_user = os.read(0, bufsize)
os.write(fd, from_user)
logger.input_from_user(from_user)
elif File == fd:
try:
from_shell = os.read(fd, bufsize)
os.write(1, from_shell)
logger.input_from_shell(from_shell)
if from_shell=='':
exit = 1
except OSError:
# On Linux, os.read throws an OSError
# when data is done
from_shell = ''
os.write(1, from_shell)
logger.input_from_shell(from_shell)
exit = 1
if exit==1:
break
xmlfilename = logger.done()
except:
input.restore()
raise
input.restore()
print "ShellLogger data stored in " + xmlfilename
class Logger:
def __init__(self,logfilename, debugfilename):
self.logfilename = logfilename
self.logfile = open(logfilename,'w')
self.logfile.write('<?xml version="1.0" encoding="UTF-8"?>\n')
self.logfile.write('<cli-logger machine="%s">\n\n' % socket.gethostname())
self.buffer = ''
self.cwd = os.getcwd()
self.state = BeginState(self)
self.debugfilename = debugfilename
self.isLinux = False
if self.debugfilename is not None:
self.debugfile = open(debugfilename, 'w')
self.debugfile.write("<cli-debug>\n")
else:
self.debugfile = None
def done(self):
"""Call when session is complete.
Returns the name of the XML file
"""
self.logfile.write("]]></result>\n</cli-logger-entry>\n</cli-logger>\n")
self.logfile.close()
if self.debugfilename is not None:
self.debugfile.write("</cli-debug>")
return self.raw_to_xml()
def raw_to_xml(self):
"""Convert the .raw file, with illegal characters and escape keys, to a proper XML version.
Returns the name of the XML file
"""
xmlfilename = self.logfilename.replace('.raw','.xml')
fout = codecs.open(xmlfilename, encoding="utf-8", mode="w")
for line in codecs.open(self.logfilename,encoding="utf-8"):
fout.write(sanitize(line))
fout.close()
return xmlfilename
def input_from_shell(self,buf):
if self.debugfile:
self.debug_log(buf,True)
self.state.input_from_shell(buf)
self.state = self.state.next_state()
def input_from_user(self,buf):
if self.debugfile:
self.debug_log(buf,False)
self.state.input_from_user(buf)
self.state = self.state.next_state()
def write(self,buf):
self.logfile.write(buf)
def debug_log(self, buf, shell):
"""Record to the debug log"""
# Handle Shell output
if shell == True:
self.debugfile.write("<shell time=\" " + datetime.datetime.now().strftime("%H:%M:%S ") + "\" >" )
self.debugfile.write("<![CDATA["+buf+"]]></shell>\n")
# Handle User Input
else:
self.debugfile.write("<user time=\" " + datetime.datetime.now().strftime("%H:%M:%S ") + "\" >" )
self.debugfile.write("<![CDATA["+buf+"]]></user>\n")
# regex for matching the prompt
# this is used to identify the data directory
re_prompt = re.compile(r'(.*)^\[SL (.*)\]\$ $', re.MULTILINE | re.DOTALL | re.IGNORECASE)
mac_prompt = re.compile(re.compile(r'(?:.*)\[SL (.*)\](.*)(\$)?',re.MULTILINE | re.DOTALL | re.IGNORECASE))
linux_prompt = re.compile(r'(?:.*)\[SL (.*)\]\$',re.MULTILINE | re.DOTALL | re.IGNORECASE)
def is_enter(buf):
# Check if buffer consists entirely of \n or \r
for c in buf:
if c!='\n' and c!='\r':
return False
return True
class BeginState:
def __init__(self,logger):
self.logger = logger
self.saw_shell_input = False
def input_from_shell(self,buf):
# If it's the prompt, then it's just the very first shell
m = re_prompt.match(buf)
if m is not None:
self.logger.cwd = os.path.expanduser(m.group(2))
return
# If the user just hit enter, we don't log it
if is_enter(buf):
return
self.saw_shell_input = True
# Stick the data in a buffer
self.logger.buffer = buf
def input_from_user(self,buf):
# We don't actually care about input from the user,
# we just use shell echos
pass
def next_state(self):
if self.saw_shell_input:
return UserTypingState(self.logger)
else:
return self
class UserTypingState:
def __init__(self,logger):
| self.logger = logger
self.seen_cr = False
self.program_invoked = None | identifier_body |
|
shelllogger.py | new terminals are encountered.
escape_regex is a Regex filter to capture all escape sequences.
Modified from: http://wiki.tcl.tk/9673
"""
# Filter out control characters
# First, handle the backspaces.
for backspace in backspaces:
try:
while True:
ind = buf.index(backspace)
buf = ''.join((buf[0:ind-1],buf[ind+len(backspace):]))
except:
pass
strip_escapes = escape_regex.sub('',buf)
# strip non-printable ASCII characters
clean = ''.join([x for x in strip_escapes if is_printable(x)])
return clean
class TTY:
def __init__(self):
self.iflag, self.oflag, self.cflag, self.lflag, \
self.ispeed, self.ospeed, self.cc = termios.tcgetattr(0)
def raw(self):
# ISIG - passes Ctl-C, Ctl-Z, etc. to the child rather than generating signals
raw_lflag = self.lflag & ~(termios.ICANON|termios.ECHO|termios.ISIG)
raw_iflag = self.iflag & ~(termios.ICRNL|termios.IXON)
raw_cc = self.cc[:]
raw_cc[termios.VMIN] = 1
raw_cc[termios.VTIME] = 0
termios.tcsetattr(0, termios.TCSANOW, [raw_iflag, self.oflag,
self.cflag, raw_lflag,
self.ispeed, self.ospeed,
raw_cc])
def restore(self):
termios.tcsetattr(0, termios.TCSANOW, [self.iflag, self.oflag,
self.cflag, self.lflag,
self.ispeed, self.ospeed,
self.cc])
class ChildWindowResizer:
"""Informs the child process that the window has been resized."""
def __init__(self,child_fd):
self.child_fd = child_fd
signal.signal(signal.SIGWINCH,self.signal_handler)
def signal_handler(self,sig,data):
"""Signal handler that gets installed"""
self.resize_child_window()
def resize_child_window(self):
"""Tells the child process to resize its window"""
s = struct.pack('HHHH', 0, 0, 0, 0)
x = fcntl.ioctl(0,termios.TIOCGWINSZ,s)
fcntl.ioctl(self.child_fd,termios.TIOCSWINSZ,x)
def get_shell():
return os.path.basename(os.environ['SHELL'])
def run_shell():
"""Launch the appropriate shell as a login shell
It will be either bash or tcsh depending on what the user is currently running.
It checks the SHELL variable to figure it out.
"""
shell = get_shell()
if shell not in ['bash','tcsh']:
raise ValueError, "Unsupported shell (only works with bash and tcsh)"
os.execvp(shell,(shell,"-l"))
def get_log_dir():
"""Retrieve the name of the directory that will store the logfiles.
If the SHELLLOGGERDIR environment variable is set, use that.
Otherwise, default to ~/.shelllogger"""
env_var = "SHELLLOGGERDIR"
if os.environ.has_key(env_var):
|
else:
return os.path.expanduser('~/.shelllogger')
def start_recording(logfilename, debug):
# Check for recursive call
env_var = 'ShellLogger'
if os.environ.has_key(env_var):
# Recursive call, just exit
return
os.environ[env_var]='1'
print "ShellLogger enabled"
if logfilename is None:
dirname = get_log_dir()
try:
os.mkdir(dirname)
print "Creating %s directory for storing logfile" % dirname
except OSError, e:
# If it's anything but "File exists",then we're in trouble.
# We'll just re-raise the exception for now
if e.errno != errno.EEXIST:
raise e
logfilename = os.path.join(dirname,'log.%d.raw' % time.time())
if debug:
debugfilename = os.path.join(dirname,'log.%d.debug' % time.time())
else:
debugfilename = None
pid, fd = pty.fork()
# Python won't return -1, rather will raise exception.
if pid == 0: # child process
try:
run_shell()
except:
# must not return to caller.
os._exit(0)
# parent process
input = TTY()
input.raw()
resizer = ChildWindowResizer(fd)
resizer.resize_child_window()
bufsize = 1024
try:
logger = Logger(logfilename, debugfilename)
if debugfilename is not None:
print "Warning, shelllogger running in debug mode. All keystrokes will be logged to a plaintext file. Do not type in any passwords during this session!"
# Set the shell prompt properly
os.write(fd,SHELL_PROMPTS[get_shell()])
while True:
delay = 1
exit = 0
try:
r, w, e = select.select([0, fd], [], [], delay)
except select.error, se:
# When the user resizes the window, it will generate a signal
# that will be handled, which will cause select to be
# interrupted.
if se.args[0]==errno.EINTR:
continue
else:
raise
for File in r:
if File == 0:
first_user_input = 1
from_user = os.read(0, bufsize)
os.write(fd, from_user)
logger.input_from_user(from_user)
elif File == fd:
try:
from_shell = os.read(fd, bufsize)
os.write(1, from_shell)
logger.input_from_shell(from_shell)
if from_shell=='':
exit = 1
except OSError:
# On Linux, os.read throws an OSError
# when data is done
from_shell = ''
os.write(1, from_shell)
logger.input_from_shell(from_shell)
exit = 1
if exit==1:
break
xmlfilename = logger.done()
except:
input.restore()
raise
input.restore()
print "ShellLogger data stored in " + xmlfilename
class Logger:
def __init__(self,logfilename, debugfilename):
self.logfilename = logfilename
self.logfile = open(logfilename,'w')
self.logfile.write('<?xml version="1.0" encoding="UTF-8"?>\n')
self.logfile.write('<cli-logger machine="%s">\n\n' % socket.gethostname())
self.buffer = ''
self.cwd = os.getcwd()
self.state = BeginState(self)
self.debugfilename = debugfilename
self.isLinux = False
if self.debugfilename is not None:
self.debugfile = open(debugfilename, 'w')
self.debugfile.write("<cli-debug>\n")
else:
self.debugfile = None
def done(self):
"""Call when session is complete.
Returns the name of the XML file
"""
self.logfile.write("]]></result>\n</cli-logger-entry>\n</cli-logger>\n")
self.logfile.close()
if self.debugfilename is not None:
self.debugfile.write("</cli-debug>")
return self.raw_to_xml()
def raw_to_xml(self):
"""Convert the .raw file, with illegal characters and escape keys, to a proper XML version.
Returns the name of the XML file
"""
xmlfilename = self.logfilename.replace('.raw','.xml')
fout = codecs.open(xmlfilename, encoding="utf-8", mode="w")
for line in codecs.open(self.logfilename,encoding="utf-8"):
fout.write(sanitize(line))
fout.close()
return xmlfilename
def input_from_shell(self,buf):
if self.debugfile:
self.debug_log(buf,True)
self.state.input_from_shell(buf)
self.state = self.state.next_state()
def input_from_user(self,buf):
if self.debugfile:
self.debug_log(buf,False)
self.state.input_from_user(buf)
self.state = self.state.next_state()
def write(self,buf):
self.logfile.write(buf)
def debug_log(self, buf, shell):
"""Record to the debug log"""
# Handle Shell output
if shell == True:
self.debugfile.write("<shell time=\" " + datetime.datetime.now().strftime("%H:%M:%S ") + "\" >" )
self.debugfile.write("<![CDATA["+buf+"]]></shell>\n")
# Handle User Input
else:
self.debugfile.write("<user time=\" " + datetime.datetime.now().strftime("%H:%M:%S ") + "\" >" )
self.debugfile.write("<![CDATA["+buf+"]]></user>\n")
# regex for matching the prompt
# this is used to identify the data directory
re_prompt = re.compile(r'(.*)^\[SL (.*)\]\$ $', re.MULTILINE | re.DOTALL | re.IGNORECASE)
mac_prompt = re.compile(re.compile(r'(?:.*)\[SL (.*)\](.*)(\$)?',re.MULTILINE | re.DOTALL | re.IGNORECASE))
linux_prompt = re.compile(r'( | return os.environ[env_var] | conditional_block |
LearningMachines2.py |
let me keep expanding the width. Only the
Variable width output for classifier.
Assign any function to a classifier node.
input width is fixed.
# TODO Need a better predictor.
"""
__author__ = 'Abhishek Rao'
# Headers
import numpy as np
from sklearn import svm
import math
import matplotlib.pyplot as plt
import pickle
import os.path
from sklearn.metrics import accuracy_score
import School
# Constants
# Classes
class ClassifierNode:
""" A node that contains classifier, it's input address and output address.
"""
def __init__(self, end_in_address, out_address, classifier_name='Default',
given_predictor=None):
self.out_address = out_address
self.end_in_address = end_in_address # end column
self.label = classifier_name # The name of this concept. e.g. like apple etc.
# Check whether to create a standard classifier or a custom, given one.
if given_predictor:
self.given_predictor = given_predictor
self.classifier_type = 'custom'
else:
self.classifier = svm.LinearSVC(dual=False, penalty='l1')
self.classifier_type = 'standard'
def fit(self, x_in, y):
new_x_in = x_in[:, :self.end_in_address]
self.classifier.fit(new_x_in, y)
def predict(self, x_in):
"""
Give output for the current classifier. Note instead of predict 1,0, better to use probability, soft prediction.
:param x_in: The Classifier banks working memory, full matrix_in.
:return: A column of predicted values.
"""
new_x_in = x_in[:, :self.end_in_address]
if self.classifier_type == 'standard':
dec_fx_in = self.classifier.decision_function(new_x_in)
else:
dec_fx_in = self.given_predictor(new_x_in)
# Convert it into mapping between 0 to 1 instead of -1 to 1
return np.array([sigmoid_10(i) for i in dec_fx_in])
class | :
""" A machine which stores both input X and the current output of bunch of classifiers.
API should be similar to scikit learn"""
def __init__(self, max_width, input_width, height):
"""
Initialize this class.
:rtype : object self
:param max_width: maximum data dimension in current working memory, should be greater than
input_width.
:param input_width: maximum input dimension.
:param height: maximum number of input samples
:return: None
"""
self.current_working_memory = np.zeros([height, max_width])
self.classifiers_out_address_start = input_width # the start of classifiers output.
self.classifiers_current_count = 0 # starting address for output for new classifier
self.classifiers_list = []
def predict(self, x_pred):
"""Give out what it thinks from the input. Input x_pred should be 2 dimensional.
:param: x_pred: input, dimension 2, (samples x_pred dimension)"""
self.current_working_memory *= 0 # Flush the current input
x_pred = np.array(x_pred)
input_number_samples, input_feature_dimension = x_pred.shape
if len(x_pred.shape) is not 2:
print "Error in predict. Input dimension should be 2"
raise ValueError
self.current_working_memory[:input_number_samples, :input_feature_dimension] = x_pred
for classifier_i in self.classifiers_list:
predicted_value = classifier_i.predict(self.current_working_memory)
predicted_shape = predicted_value.shape
if len(predicted_shape) < 2:
predicted_value = predicted_value.reshape(-1, 1)
predicted_shape = predicted_value.shape
self.current_working_memory[:predicted_shape[0], classifier_i.out_address] = predicted_value
# need to return the rightmost nonzero column.
for column_j in range(self.current_working_memory.shape[1])[::-1]: # reverse traverse through columns
if np.any(self.current_working_memory[:input_number_samples, column_j]):
soft_dec = self.current_working_memory[:input_number_samples, column_j]
return np.array(soft_dec > 0.5, dtype=np.int16)
print 'Cant find any nonzero column'
return self.current_working_memory[:, 0]
def fit(self, x_in, y, task_name='Default'):
"""
Adds a new classifier and trains it, similar to Scikit API
:param x_in: 2d Input data
:param y: labels
:return: None
"""
# check for limit reach for number of classifiers.
if self.classifiers_current_count + self.classifiers_out_address_start \
> self.current_working_memory.shape[1]:
print 'No more space for classifier. ERROR'
raise MemoryError
x_in = np.array(x_in)
input_number_samples, input_feature_dimension = x_in.shape
if len(x_in.shape) is not 2:
print "Error in predict. Input dimension should be 2"
raise ValueError
self.current_working_memory[:x_in.shape[0], :x_in.shape[1]] = x_in
# Procure a new classifier, this might be wasteful, later perhaps reuse classifier
# instead of lavishly getting new ones, chinese restaurant?
new_classifier = ClassifierNode(
end_in_address=self.classifiers_out_address_start + self.classifiers_current_count,
out_address=[self.classifiers_out_address_start + self.classifiers_current_count + 1],
classifier_name=task_name)
self.classifiers_current_count += 1
# Need to take care of mismatch in length of working memory and input samples.
new_classifier.fit(self.current_working_memory[:input_number_samples], y)
self.classifiers_list.append(new_classifier)
def fit_custom_fx(self, custom_function, input_width, output_width, task_name):
"""
Push in a new custom function to classifiers list.
:param custom_function: The function that will be used to predict. Should take in a 2D array input and
give out a 2d array of same height and variable width.
:param input_width: The width of input.
:param output_width: The width of output. If a single neuron this is one.
:param task_name: name of this function
:return: None
"""
new_classifier = ClassifierNode(
end_in_address=input_width,
out_address=self.classifiers_out_address_start + self.classifiers_current_count + np.arange(output_width),
classifier_name=task_name,
given_predictor=custom_function
)
self.classifiers_current_count += output_width
self.classifiers_list.append(new_classifier)
def status(self):
"""Gives out the current status, like number of classifier and prints their values"""
print 'Currently there are ', len(self.classifiers_list), ' classifiers. They are'
classifiers_coefficients = np.zeros(self.current_working_memory.shape)
print [classifier_i.label for classifier_i in self.classifiers_list]
for count, classifier_i in enumerate(self.classifiers_list):
coeffs_i = classifier_i.classifier.coef_ \
if classifier_i.classifier_type == 'standard' else np.zeros([1, 1])
classifiers_coefficients[count, :coeffs_i.shape[1]] = coeffs_i
# print 'Classifier: ', classifier_i
# print 'Classifier name: ', classifier_i.label
# print 'Out address', classifier_i.out_address
# print 'In address', classifier_i.end_in_address
# print 'Coefficients: ', classifier_i.classifier.coef_, classifier_i.classifier.intercept_
plt.imshow(self.current_working_memory, interpolation='none', cmap='gray')
plt.title('Current working memory')
plt.figure()
plt.imshow(classifiers_coefficients, interpolation='none', cmap='gray')
plt.title('Classifier coefficients')
plt.show()
def remove_classifier(self, classifier_name):
"""
Removes the classifier whose name is same as classifier_name
:param classifier_name: the label of the classifier to be removed.
:return: the index of removed classifier. -1 if not found.
"""
try:
labels_list = [classifier_i.label for classifier_i in self.classifiers_list]
except ValueError:
print 'The specified label does not exist.'
return -1
removing_index = labels_list.index(classifier_name)
self.classifiers_list.pop(removing_index)
print 'Classifier was removed. Its nae was', classifier_name
return removing_index
def score(self, x_in, y):
"""
Gives the accuracy between predicted( x_in) and y
:param x_in: 2d matrix, samples x_in dimension
:param y: actual label
:return: float, between 0 to 1
"""
yp_score = self.predict(x_in)
return accuracy_score(y, y_pred=yp_score)
def generic_task(self, x_in, y, task_name):
"""
A generic framework to train on different tasks.
"""
self.fit(x_in, y, task_name=task_name)
print 'The score for task ', task_name, ' is ', self.score(x_in, y)
# Global functions
# Reason for having 10 sigmoid is to get sharper distinction.
def sigmoid_10(x):
return 1 / (1 + math.exp(-10*x))
# Following are | SimpleClassifierBank | identifier_name |
LearningMachines2.py |
let me keep expanding the width. Only the
Variable width output for classifier.
Assign any function to a classifier node.
input width is fixed.
# TODO Need a better predictor.
"""
__author__ = 'Abhishek Rao'
# Headers
import numpy as np
from sklearn import svm
import math
import matplotlib.pyplot as plt
import pickle
import os.path
from sklearn.metrics import accuracy_score
import School
# Constants
# Classes
class ClassifierNode:
""" A node that contains classifier, it's input address and output address.
"""
def __init__(self, end_in_address, out_address, classifier_name='Default',
given_predictor=None):
self.out_address = out_address
self.end_in_address = end_in_address # end column
self.label = classifier_name # The name of this concept. e.g. like apple etc.
# Check whether to create a standard classifier or a custom, given one.
if given_predictor:
self.given_predictor = given_predictor
self.classifier_type = 'custom'
else:
self.classifier = svm.LinearSVC(dual=False, penalty='l1')
self.classifier_type = 'standard'
def fit(self, x_in, y):
new_x_in = x_in[:, :self.end_in_address]
self.classifier.fit(new_x_in, y)
def predict(self, x_in):
"""
Give output for the current classifier. Note instead of predict 1,0, better to use probability, soft prediction.
:param x_in: The Classifier banks working memory, full matrix_in.
:return: A column of predicted values.
"""
new_x_in = x_in[:, :self.end_in_address]
if self.classifier_type == 'standard':
dec_fx_in = self.classifier.decision_function(new_x_in)
else:
dec_fx_in = self.given_predictor(new_x_in)
# Convert it into mapping between 0 to 1 instead of -1 to 1
return np.array([sigmoid_10(i) for i in dec_fx_in])
class SimpleClassifierBank:
""" A machine which stores both input X and the current output of bunch of classifiers.
API should be similar to scikit learn"""
def __init__(self, max_width, input_width, height):
"""
Initialize this class.
:rtype : object self
:param max_width: maximum data dimension in current working memory, should be greater than
input_width.
:param input_width: maximum input dimension.
:param height: maximum number of input samples
:return: None
"""
self.current_working_memory = np.zeros([height, max_width])
self.classifiers_out_address_start = input_width # the start of classifiers output.
self.classifiers_current_count = 0 # starting address for output for new classifier
self.classifiers_list = []
def predict(self, x_pred):
"""Give out what it thinks from the input. Input x_pred should be 2 dimensional.
:param: x_pred: input, dimension 2, (samples x_pred dimension)"""
self.current_working_memory *= 0 # Flush the current input
x_pred = np.array(x_pred)
input_number_samples, input_feature_dimension = x_pred.shape
if len(x_pred.shape) is not 2:
|
self.current_working_memory[:input_number_samples, :input_feature_dimension] = x_pred
for classifier_i in self.classifiers_list:
predicted_value = classifier_i.predict(self.current_working_memory)
predicted_shape = predicted_value.shape
if len(predicted_shape) < 2:
predicted_value = predicted_value.reshape(-1, 1)
predicted_shape = predicted_value.shape
self.current_working_memory[:predicted_shape[0], classifier_i.out_address] = predicted_value
# need to return the rightmost nonzero column.
for column_j in range(self.current_working_memory.shape[1])[::-1]: # reverse traverse through columns
if np.any(self.current_working_memory[:input_number_samples, column_j]):
soft_dec = self.current_working_memory[:input_number_samples, column_j]
return np.array(soft_dec > 0.5, dtype=np.int16)
print 'Cant find any nonzero column'
return self.current_working_memory[:, 0]
def fit(self, x_in, y, task_name='Default'):
"""
Adds a new classifier and trains it, similar to Scikit API
:param x_in: 2d Input data
:param y: labels
:return: None
"""
# check for limit reach for number of classifiers.
if self.classifiers_current_count + self.classifiers_out_address_start \
> self.current_working_memory.shape[1]:
print 'No more space for classifier. ERROR'
raise MemoryError
x_in = np.array(x_in)
input_number_samples, input_feature_dimension = x_in.shape
if len(x_in.shape) is not 2:
print "Error in predict. Input dimension should be 2"
raise ValueError
self.current_working_memory[:x_in.shape[0], :x_in.shape[1]] = x_in
# Procure a new classifier, this might be wasteful, later perhaps reuse classifier
# instead of lavishly getting new ones, chinese restaurant?
new_classifier = ClassifierNode(
end_in_address=self.classifiers_out_address_start + self.classifiers_current_count,
out_address=[self.classifiers_out_address_start + self.classifiers_current_count + 1],
classifier_name=task_name)
self.classifiers_current_count += 1
# Need to take care of mismatch in length of working memory and input samples.
new_classifier.fit(self.current_working_memory[:input_number_samples], y)
self.classifiers_list.append(new_classifier)
def fit_custom_fx(self, custom_function, input_width, output_width, task_name):
"""
Push in a new custom function to classifiers list.
:param custom_function: The function that will be used to predict. Should take in a 2D array input and
give out a 2d array of same height and variable width.
:param input_width: The width of input.
:param output_width: The width of output. If a single neuron this is one.
:param task_name: name of this function
:return: None
"""
new_classifier = ClassifierNode(
end_in_address=input_width,
out_address=self.classifiers_out_address_start + self.classifiers_current_count + np.arange(output_width),
classifier_name=task_name,
given_predictor=custom_function
)
self.classifiers_current_count += output_width
self.classifiers_list.append(new_classifier)
def status(self):
"""Gives out the current status, like number of classifier and prints their values"""
print 'Currently there are ', len(self.classifiers_list), ' classifiers. They are'
classifiers_coefficients = np.zeros(self.current_working_memory.shape)
print [classifier_i.label for classifier_i in self.classifiers_list]
for count, classifier_i in enumerate(self.classifiers_list):
coeffs_i = classifier_i.classifier.coef_ \
if classifier_i.classifier_type == 'standard' else np.zeros([1, 1])
classifiers_coefficients[count, :coeffs_i.shape[1]] = coeffs_i
# print 'Classifier: ', classifier_i
# print 'Classifier name: ', classifier_i.label
# print 'Out address', classifier_i.out_address
# print 'In address', classifier_i.end_in_address
# print 'Coefficients: ', classifier_i.classifier.coef_, classifier_i.classifier.intercept_
plt.imshow(self.current_working_memory, interpolation='none', cmap='gray')
plt.title('Current working memory')
plt.figure()
plt.imshow(classifiers_coefficients, interpolation='none', cmap='gray')
plt.title('Classifier coefficients')
plt.show()
def remove_classifier(self, classifier_name):
"""
Removes the classifier whose name is same as classifier_name
:param classifier_name: the label of the classifier to be removed.
:return: the index of removed classifier. -1 if not found.
"""
try:
labels_list = [classifier_i.label for classifier_i in self.classifiers_list]
except ValueError:
print 'The specified label does not exist.'
return -1
removing_index = labels_list.index(classifier_name)
self.classifiers_list.pop(removing_index)
print 'Classifier was removed. Its nae was', classifier_name
return removing_index
def score(self, x_in, y):
"""
Gives the accuracy between predicted( x_in) and y
:param x_in: 2d matrix, samples x_in dimension
:param y: actual label
:return: float, between 0 to 1
"""
yp_score = self.predict(x_in)
return accuracy_score(y, y_pred=yp_score)
def generic_task(self, x_in, y, task_name):
"""
A generic framework to train on different tasks.
"""
self.fit(x_in, y, task_name=task_name)
print 'The score for task ', task_name, ' is ', self.score(x_in, y)
# Global functions
# Reason for having 10 sigmoid is to get sharper distinction.
def sigmoid_10(x):
return 1 / (1 + math.exp(-10*x))
# Following are required | print "Error in predict. Input dimension should be 2"
raise ValueError | conditional_block |
LearningMachines2.py |
let me keep expanding the width. Only the
Variable width output for classifier.
Assign any function to a classifier node.
input width is fixed.
# TODO Need a better predictor.
"""
__author__ = 'Abhishek Rao'
# Headers
import numpy as np
from sklearn import svm
import math
import matplotlib.pyplot as plt
import pickle
import os.path
from sklearn.metrics import accuracy_score
import School
# Constants
# Classes
class ClassifierNode:
""" A node that contains classifier, it's input address and output address.
"""
def __init__(self, end_in_address, out_address, classifier_name='Default',
given_predictor=None):
self.out_address = out_address
self.end_in_address = end_in_address # end column
self.label = classifier_name # The name of this concept. e.g. like apple etc.
# Check whether to create a standard classifier or a custom, given one.
if given_predictor:
self.given_predictor = given_predictor
self.classifier_type = 'custom'
else:
self.classifier = svm.LinearSVC(dual=False, penalty='l1')
self.classifier_type = 'standard'
def fit(self, x_in, y):
new_x_in = x_in[:, :self.end_in_address]
self.classifier.fit(new_x_in, y)
def predict(self, x_in):
"""
Give output for the current classifier. Note instead of predict 1,0, better to use probability, soft prediction.
:param x_in: The Classifier banks working memory, full matrix_in.
:return: A column of predicted values.
"""
new_x_in = x_in[:, :self.end_in_address]
if self.classifier_type == 'standard':
dec_fx_in = self.classifier.decision_function(new_x_in)
else:
dec_fx_in = self.given_predictor(new_x_in)
# Convert it into mapping between 0 to 1 instead of -1 to 1
return np.array([sigmoid_10(i) for i in dec_fx_in])
class SimpleClassifierBank:
""" A machine which stores both input X and the current output of bunch of classifiers.
API should be similar to scikit learn"""
def __init__(self, max_width, input_width, height):
"""
Initialize this class.
:rtype : object self
:param max_width: maximum data dimension in current working memory, should be greater than
input_width.
:param input_width: maximum input dimension.
:param height: maximum number of input samples
:return: None
"""
self.current_working_memory = np.zeros([height, max_width])
self.classifiers_out_address_start = input_width # the start of classifiers output.
self.classifiers_current_count = 0 # starting address for output for new classifier
self.classifiers_list = []
def predict(self, x_pred):
"""Give out what it thinks from the input. Input x_pred should be 2 dimensional.
:param: x_pred: input, dimension 2, (samples x_pred dimension)"""
self.current_working_memory *= 0 # Flush the current input
x_pred = np.array(x_pred)
input_number_samples, input_feature_dimension = x_pred.shape
if len(x_pred.shape) is not 2:
print "Error in predict. Input dimension should be 2"
raise ValueError
self.current_working_memory[:input_number_samples, :input_feature_dimension] = x_pred
for classifier_i in self.classifiers_list:
predicted_value = classifier_i.predict(self.current_working_memory)
predicted_shape = predicted_value.shape
if len(predicted_shape) < 2:
predicted_value = predicted_value.reshape(-1, 1)
predicted_shape = predicted_value.shape
self.current_working_memory[:predicted_shape[0], classifier_i.out_address] = predicted_value
# need to return the rightmost nonzero column.
for column_j in range(self.current_working_memory.shape[1])[::-1]: # reverse traverse through columns
if np.any(self.current_working_memory[:input_number_samples, column_j]):
soft_dec = self.current_working_memory[:input_number_samples, column_j]
return np.array(soft_dec > 0.5, dtype=np.int16)
print 'Cant find any nonzero column'
return self.current_working_memory[:, 0] | def fit(self, x_in, y, task_name='Default'):
"""
Adds a new classifier and trains it, similar to Scikit API
:param x_in: 2d Input data
:param y: labels
:return: None
"""
# check for limit reach for number of classifiers.
if self.classifiers_current_count + self.classifiers_out_address_start \
> self.current_working_memory.shape[1]:
print 'No more space for classifier. ERROR'
raise MemoryError
x_in = np.array(x_in)
input_number_samples, input_feature_dimension = x_in.shape
if len(x_in.shape) is not 2:
print "Error in predict. Input dimension should be 2"
raise ValueError
self.current_working_memory[:x_in.shape[0], :x_in.shape[1]] = x_in
# Procure a new classifier, this might be wasteful, later perhaps reuse classifier
# instead of lavishly getting new ones, chinese restaurant?
new_classifier = ClassifierNode(
end_in_address=self.classifiers_out_address_start + self.classifiers_current_count,
out_address=[self.classifiers_out_address_start + self.classifiers_current_count + 1],
classifier_name=task_name)
self.classifiers_current_count += 1
# Need to take care of mismatch in length of working memory and input samples.
new_classifier.fit(self.current_working_memory[:input_number_samples], y)
self.classifiers_list.append(new_classifier)
def fit_custom_fx(self, custom_function, input_width, output_width, task_name):
"""
Push in a new custom function to classifiers list.
:param custom_function: The function that will be used to predict. Should take in a 2D array input and
give out a 2d array of same height and variable width.
:param input_width: The width of input.
:param output_width: The width of output. If a single neuron this is one.
:param task_name: name of this function
:return: None
"""
new_classifier = ClassifierNode(
end_in_address=input_width,
out_address=self.classifiers_out_address_start + self.classifiers_current_count + np.arange(output_width),
classifier_name=task_name,
given_predictor=custom_function
)
self.classifiers_current_count += output_width
self.classifiers_list.append(new_classifier)
def status(self):
"""Gives out the current status, like number of classifier and prints their values"""
print 'Currently there are ', len(self.classifiers_list), ' classifiers. They are'
classifiers_coefficients = np.zeros(self.current_working_memory.shape)
print [classifier_i.label for classifier_i in self.classifiers_list]
for count, classifier_i in enumerate(self.classifiers_list):
coeffs_i = classifier_i.classifier.coef_ \
if classifier_i.classifier_type == 'standard' else np.zeros([1, 1])
classifiers_coefficients[count, :coeffs_i.shape[1]] = coeffs_i
# print 'Classifier: ', classifier_i
# print 'Classifier name: ', classifier_i.label
# print 'Out address', classifier_i.out_address
# print 'In address', classifier_i.end_in_address
# print 'Coefficients: ', classifier_i.classifier.coef_, classifier_i.classifier.intercept_
plt.imshow(self.current_working_memory, interpolation='none', cmap='gray')
plt.title('Current working memory')
plt.figure()
plt.imshow(classifiers_coefficients, interpolation='none', cmap='gray')
plt.title('Classifier coefficients')
plt.show()
def remove_classifier(self, classifier_name):
"""
Removes the classifier whose name is same as classifier_name
:param classifier_name: the label of the classifier to be removed.
:return: the index of removed classifier. -1 if not found.
"""
try:
labels_list = [classifier_i.label for classifier_i in self.classifiers_list]
except ValueError:
print 'The specified label does not exist.'
return -1
removing_index = labels_list.index(classifier_name)
self.classifiers_list.pop(removing_index)
print 'Classifier was removed. Its nae was', classifier_name
return removing_index
def score(self, x_in, y):
"""
Gives the accuracy between predicted( x_in) and y
:param x_in: 2d matrix, samples x_in dimension
:param y: actual label
:return: float, between 0 to 1
"""
yp_score = self.predict(x_in)
return accuracy_score(y, y_pred=yp_score)
def generic_task(self, x_in, y, task_name):
"""
A generic framework to train on different tasks.
"""
self.fit(x_in, y, task_name=task_name)
print 'The score for task ', task_name, ' is ', self.score(x_in, y)
# Global functions
# Reason for having 10 sigmoid is to get sharper distinction.
def sigmoid_10(x):
return 1 / (1 + math.exp(-10*x))
# Following are required | random_line_split |
|
LearningMachines2.py |
let me keep expanding the width. Only the
Variable width output for classifier.
Assign any function to a classifier node.
input width is fixed.
# TODO Need a better predictor.
"""
__author__ = 'Abhishek Rao'
# Headers
import numpy as np
from sklearn import svm
import math
import matplotlib.pyplot as plt
import pickle
import os.path
from sklearn.metrics import accuracy_score
import School
# Constants
# Classes
class ClassifierNode:
""" A node that contains classifier, it's input address and output address.
"""
def __init__(self, end_in_address, out_address, classifier_name='Default',
given_predictor=None):
self.out_address = out_address
self.end_in_address = end_in_address # end column
self.label = classifier_name # The name of this concept. e.g. like apple etc.
# Check whether to create a standard classifier or a custom, given one.
if given_predictor:
self.given_predictor = given_predictor
self.classifier_type = 'custom'
else:
self.classifier = svm.LinearSVC(dual=False, penalty='l1')
self.classifier_type = 'standard'
def fit(self, x_in, y):
new_x_in = x_in[:, :self.end_in_address]
self.classifier.fit(new_x_in, y)
def predict(self, x_in):
"""
Give output for the current classifier. Note instead of predict 1,0, better to use probability, soft prediction.
:param x_in: The Classifier banks working memory, full matrix_in.
:return: A column of predicted values.
"""
new_x_in = x_in[:, :self.end_in_address]
if self.classifier_type == 'standard':
dec_fx_in = self.classifier.decision_function(new_x_in)
else:
dec_fx_in = self.given_predictor(new_x_in)
# Convert it into mapping between 0 to 1 instead of -1 to 1
return np.array([sigmoid_10(i) for i in dec_fx_in])
class SimpleClassifierBank:
""" A machine which stores both input X and the current output of bunch of classifiers.
API should be similar to scikit learn"""
def __init__(self, max_width, input_width, height):
"""
Initialize this class.
:rtype : object self
:param max_width: maximum data dimension in current working memory, should be greater than
input_width.
:param input_width: maximum input dimension.
:param height: maximum number of input samples
:return: None
"""
self.current_working_memory = np.zeros([height, max_width])
self.classifiers_out_address_start = input_width # the start of classifiers output.
self.classifiers_current_count = 0 # starting address for output for new classifier
self.classifiers_list = []
def predict(self, x_pred):
"""Give out what it thinks from the input. Input x_pred should be 2 dimensional.
:param: x_pred: input, dimension 2, (samples x_pred dimension)"""
self.current_working_memory *= 0 # Flush the current input
x_pred = np.array(x_pred)
input_number_samples, input_feature_dimension = x_pred.shape
if len(x_pred.shape) is not 2:
print "Error in predict. Input dimension should be 2"
raise ValueError
self.current_working_memory[:input_number_samples, :input_feature_dimension] = x_pred
for classifier_i in self.classifiers_list:
predicted_value = classifier_i.predict(self.current_working_memory)
predicted_shape = predicted_value.shape
if len(predicted_shape) < 2:
predicted_value = predicted_value.reshape(-1, 1)
predicted_shape = predicted_value.shape
self.current_working_memory[:predicted_shape[0], classifier_i.out_address] = predicted_value
# need to return the rightmost nonzero column.
for column_j in range(self.current_working_memory.shape[1])[::-1]: # reverse traverse through columns
if np.any(self.current_working_memory[:input_number_samples, column_j]):
soft_dec = self.current_working_memory[:input_number_samples, column_j]
return np.array(soft_dec > 0.5, dtype=np.int16)
print 'Cant find any nonzero column'
return self.current_working_memory[:, 0]
def fit(self, x_in, y, task_name='Default'):
| new_classifier = ClassifierNode(
end_in_address=self.classifiers_out_address_start + self.classifiers_current_count,
out_address=[self.classifiers_out_address_start + self.classifiers_current_count + 1],
classifier_name=task_name)
self.classifiers_current_count += 1
# Need to take care of mismatch in length of working memory and input samples.
new_classifier.fit(self.current_working_memory[:input_number_samples], y)
self.classifiers_list.append(new_classifier)
def fit_custom_fx(self, custom_function, input_width, output_width, task_name):
"""
Push in a new custom function to classifiers list.
:param custom_function: The function that will be used to predict. Should take in a 2D array input and
give out a 2d array of same height and variable width.
:param input_width: The width of input.
:param output_width: The width of output. If a single neuron this is one.
:param task_name: name of this function
:return: None
"""
new_classifier = ClassifierNode(
end_in_address=input_width,
out_address=self.classifiers_out_address_start + self.classifiers_current_count + np.arange(output_width),
classifier_name=task_name,
given_predictor=custom_function
)
self.classifiers_current_count += output_width
self.classifiers_list.append(new_classifier)
def status(self):
"""Gives out the current status, like number of classifier and prints their values"""
print 'Currently there are ', len(self.classifiers_list), ' classifiers. They are'
classifiers_coefficients = np.zeros(self.current_working_memory.shape)
print [classifier_i.label for classifier_i in self.classifiers_list]
for count, classifier_i in enumerate(self.classifiers_list):
coeffs_i = classifier_i.classifier.coef_ \
if classifier_i.classifier_type == 'standard' else np.zeros([1, 1])
classifiers_coefficients[count, :coeffs_i.shape[1]] = coeffs_i
# print 'Classifier: ', classifier_i
# print 'Classifier name: ', classifier_i.label
# print 'Out address', classifier_i.out_address
# print 'In address', classifier_i.end_in_address
# print 'Coefficients: ', classifier_i.classifier.coef_, classifier_i.classifier.intercept_
plt.imshow(self.current_working_memory, interpolation='none', cmap='gray')
plt.title('Current working memory')
plt.figure()
plt.imshow(classifiers_coefficients, interpolation='none', cmap='gray')
plt.title('Classifier coefficients')
plt.show()
def remove_classifier(self, classifier_name):
"""
Removes the classifier whose name is same as classifier_name
:param classifier_name: the label of the classifier to be removed.
:return: the index of removed classifier. -1 if not found.
"""
try:
labels_list = [classifier_i.label for classifier_i in self.classifiers_list]
except ValueError:
print 'The specified label does not exist.'
return -1
removing_index = labels_list.index(classifier_name)
self.classifiers_list.pop(removing_index)
print 'Classifier was removed. Its nae was', classifier_name
return removing_index
def score(self, x_in, y):
"""
Gives the accuracy between predicted( x_in) and y
:param x_in: 2d matrix, samples x_in dimension
:param y: actual label
:return: float, between 0 to 1
"""
yp_score = self.predict(x_in)
return accuracy_score(y, y_pred=yp_score)
def generic_task(self, x_in, y, task_name):
"""
A generic framework to train on different tasks.
"""
self.fit(x_in, y, task_name=task_name)
print 'The score for task ', task_name, ' is ', self.score(x_in, y)
# Global functions
# Reason for having 10 sigmoid is to get sharper distinction.
def sigmoid_10(x):
return 1 / (1 + math.exp(-10*x))
# Following are required | """
Adds a new classifier and trains it, similar to Scikit API
:param x_in: 2d Input data
:param y: labels
:return: None
"""
# check for limit reach for number of classifiers.
if self.classifiers_current_count + self.classifiers_out_address_start \
> self.current_working_memory.shape[1]:
print 'No more space for classifier. ERROR'
raise MemoryError
x_in = np.array(x_in)
input_number_samples, input_feature_dimension = x_in.shape
if len(x_in.shape) is not 2:
print "Error in predict. Input dimension should be 2"
raise ValueError
self.current_working_memory[:x_in.shape[0], :x_in.shape[1]] = x_in
# Procure a new classifier, this might be wasteful, later perhaps reuse classifier
# instead of lavishly getting new ones, chinese restaurant? | identifier_body |
autoSuggest.js | ();
break;
default:
clearTimeout(timeout);
timeout = setTimeout(onChange, options.delay);
break;
}
}).focus(function(){
hasFocus++;
}).blur(function() {
hasFocus = 0;
if (!config.mouseDownOnSelect) {
hideResults();
}
}).click(function() {
if ( hasFocus++ > 1 && !select.visible() ) {
onChange(0, true);
}
}).bind("search", function() {
var fn = (arguments.length > 1) ? arguments[1] : null;
function findValueCallback(q, data) {
var result;
if( data && data.length ) {
for (var i=0; i < data.length; i++) {
if( data[i].result.toLowerCase() == q.toLowerCase() ) {
result = data[i];
break;
}
}
}
if( typeof fn == "function" ) fn(result);
else $input.trigger("result", result && [result.data, result.value]);
}
$.each(trimWords($input.val()), function(i, value) {
request(value, findValueCallback, findValueCallback);
});
}).bind("setOptions", function() {
$.extend(options, arguments[1]);
}).bind("unautocomplete", function() {
select.unbind();
$input.unbind();
$(input.form).unbind(".autocomplete");
}).bind("input", function() { //ronizhang:针对firefox中文输入bug的处理
if ( hasFocus++ > 1 ) {//&& !select.visible() ) {
onChange(0, true);
}
});
function selectCurrent() {
var selected = select.selected();
if( !selected ){
return false;
}
var v = selected.name;
previousValue = v;
if ( options.multiple ) {
var words = trimWords($input.val());
if ( words.length > 1 ) {
var seperator = options.multipleSeparator.length;
var cursorAt = $(input).selection().start;
var wordAt, progress = 0;
$.each(words, function(i, word) {
progress += word.length;
if (cursorAt <= progress) {
wordAt = i;
return false;
}
progress += seperator;
});
words[wordAt] = v;
v = words.join( options.multipleSeparator );
}
v += options.multipleSeparator;
}
$input.val(v);
hideResultsNow();
$input.trigger("result", [selected.data, selected.value]);
if(options.callback){
options.callback(selected.name,selected.value);
}
return true;
}
/**
*
*/
function onChange(crap, skipPrevCheck) {
if( lastKeyPressCode == KEY.DEL ) {
select.hide();
return;
}
var currentValue = $input.val();
if ( skipPrevCheck && currentValue == previousValue )
return;
previousValue = currentValue;
currentValue = lastWord(currentValue);
if ( currentValue.length >= options.minChars) {
$input.addClass(options.loadingClass);
if (!options.matchCase)
currentValue = currentValue.toLowerCase();
request(currentValue, receiveData, hideResultsNow);
} else {
stopLoading();
select.hide();
options.callback('','');
}
};
function trimWords(value) {
if (!value)
return [""];
if (!options.multiple)
return [$.trim(value)];
return $.map(value.split(options.multipleSeparator), function(word) {
return $.trim(value).length ? $.trim(word) : null;
});
}
function lastWord(value) {
if ( !options.multiple )
return value;
var words = trimWords(value);
if (words.length == 1)
return words[0];
var cursorAt = $(input).selection().start;
if (cursorAt == value.length) {
words = trimWords(value)
} else {
words = trimWords(value.replace(value.substring(cursorAt), ""));
}
return words[words.length - 1];
}
function autoFill(q, sValue){
if( options.autoFill && (lastWord($input.val()).toLowerCase() == q.toLowerCase()) && lastKeyPressCode != KEY.BACKSPACE ) {
$input.val($input.val() + sValue.substring(lastWord(previousValue).length));
$(input).selection(previousValue.length, previousValue.length + sValue.length);
}
};
function hideResults() {
clearTimeout(timeout);
timeout = setTimeout(hideResultsNow, 200);
};
function hideResultsNow() {
var wasVisible = select.visible();
select.hide();
clearTimeout(timeout);
stopLoading();
if (options.mustMatch) {
$input.search(
function (result){
if( !result ) {
if (options.multiple) {
var words = trimWords($input.val()).slice(0, -1);
$input.val( words.join(options.multipleSeparator) + (words.length ? options.multipleSeparator : "") );
}
else {
$input.val( "" );
$input.trigger("result", null);
}
}
}
);
}
};
function receiveData(q, data) {
if ( data && data.length && hasFocus ) {
stopLoading();
select.display(data, q);
autoFill(q, data[0].value);
select.show();
} else {
hideResultsNow();
}
};
function request(term, success, failure) {
if (!options.matchCase){
term = term.toLowerCase();
}
var data = {};
if( (typeof options.url == "string") && (options.url.length > 0) ){
var extraParams = {
timestamp: +new Date()
};
$.each(options.extraParams, function(key, param) {
extraParams[key] = typeof param == "function" ? param() : param;
});
/*ronizhang:请求处理*/
if(options.forSearch === true){ //如果是搜索用,特殊处理
//获取post的数据
// var _chanel = options.getCurChannel()();
// var _engin = options.getCurEngin()();
// var _qInfo = options.getSmartInfo(_chanel, _engin);
var _chanel = options.enginController.getCurChannel();
var _engin = options.enginController.getCurEngin();
var _qInfo = options.enginModel.getSmartInfo(_chanel, _engin);
if(_qInfo.type == 'jsonp'){//是jsonp请求
$.requestJsonp(_qInfo.smartUrl, _qInfo.jsonpName, function(data){
var parsed = options.enginModel.getSmartDataHandler && options.enginModel.getSmartDataHandler(data, _chanel, _engin) || parse(data);
if(parsed.length == 0){
options.callback("","-1");
}
success(term, parsed);
}, options.enginModel.getSmartPostParam(lastWord(term), _chanel, _engin), options.postType, $.noop, _qInfo.jpCallbackName);
}
else if(_qInfo.type == 'json'){
$.requestJson(_qInfo.smartUrl, function(data){
var parsed = options.enginModel.getSmartDataHandler && options.enginModel.getSmartDataHandler(data, _chanel, _engin) || parse(data);
if(parsed.length==0){
options.callback("", "-1");
}
success(term, parsed);
}, options.enginModel.getSmartPostParam(lastWord(term), _chanel, _engin), options.postType);
}
}
else{
if(options.jsonp){
$.requestJsonp(options.url, options.jsonpName, function(data){
var parsed = options.parse && options.parse(data) || parse(data);
if(parsed.length == 0){
options.callback("","-1");
}
success(term, parsed);
}, {
w: lastWord(term),//TODO 不太通用的地方,为搜索专门指定名字为w
limit: options.max
},options.postType);
}
else{
$.requestJson(options.url,function(data){
var parsed = options.parse && options.parse(data) || parse(data);
if(parsed.length==0){
options.callback("","-1");
}
success(term, parsed);
},{
autoword: lastWord(term),
limit: options.max
},options.postType);
}
}
} else {
select.emptyList();
failure(term);
}
};
function parse(data) {
var parsed = [];
var rows=data.datalist;
for (var i=0; i <rows .length; i++) {
var row = rows[i];
if (row) {
parsed[parsed.length] = {
name: row.name,
value: row.value,
result: options.formatResult && options.formatResu | lt(row, row[0]) || row.value
};
}
}
| conditional_block |
|
autoSuggest.js | case KEY.PAGEDOWN:
event.preventDefault();
if ( select.visible() ) {
select.pageDown();
} else {
onChange(0, true);
}
break;
case options.multiple && $.trim(options.multipleSeparator) == "," && KEY.COMMA:
case KEY.TAB:
case KEY.RETURN:
if( selectCurrent() ) {
event.preventDefault();
blockSubmit = true;
return false;
}
break;
case KEY.ESC:
select.hide();
break;
default:
clearTimeout(timeout); | }).focus(function(){
hasFocus++;
}).blur(function() {
hasFocus = 0;
if (!config.mouseDownOnSelect) {
hideResults();
}
}).click(function() {
if ( hasFocus++ > 1 && !select.visible() ) {
onChange(0, true);
}
}).bind("search", function() {
var fn = (arguments.length > 1) ? arguments[1] : null;
function findValueCallback(q, data) {
var result;
if( data && data.length ) {
for (var i=0; i < data.length; i++) {
if( data[i].result.toLowerCase() == q.toLowerCase() ) {
result = data[i];
break;
}
}
}
if( typeof fn == "function" ) fn(result);
else $input.trigger("result", result && [result.data, result.value]);
}
$.each(trimWords($input.val()), function(i, value) {
request(value, findValueCallback, findValueCallback);
});
}).bind("setOptions", function() {
$.extend(options, arguments[1]);
}).bind("unautocomplete", function() {
select.unbind();
$input.unbind();
$(input.form).unbind(".autocomplete");
}).bind("input", function() { //ronizhang:针对firefox中文输入bug的处理
if ( hasFocus++ > 1 ) {//&& !select.visible() ) {
onChange(0, true);
}
});
function selectCurrent() {
var selected = select.selected();
if( !selected ){
return false;
}
var v = selected.name;
previousValue = v;
if ( options.multiple ) {
var words = trimWords($input.val());
if ( words.length > 1 ) {
var seperator = options.multipleSeparator.length;
var cursorAt = $(input).selection().start;
var wordAt, progress = 0;
$.each(words, function(i, word) {
progress += word.length;
if (cursorAt <= progress) {
wordAt = i;
return false;
}
progress += seperator;
});
words[wordAt] = v;
v = words.join( options.multipleSeparator );
}
v += options.multipleSeparator;
}
$input.val(v);
hideResultsNow();
$input.trigger("result", [selected.data, selected.value]);
if(options.callback){
options.callback(selected.name,selected.value);
}
return true;
}
/**
*
*/
function onChange(crap, skipPrevCheck) {
if( lastKeyPressCode == KEY.DEL ) {
select.hide();
return;
}
var currentValue = $input.val();
if ( skipPrevCheck && currentValue == previousValue )
return;
previousValue = currentValue;
currentValue = lastWord(currentValue);
if ( currentValue.length >= options.minChars) {
$input.addClass(options.loadingClass);
if (!options.matchCase)
currentValue = currentValue.toLowerCase();
request(currentValue, receiveData, hideResultsNow);
} else {
stopLoading();
select.hide();
options.callback('','');
}
};
function trimWords(value) {
if (!value)
return [""];
if (!options.multiple)
return [$.trim(value)];
return $.map(value.split(options.multipleSeparator), function(word) {
return $.trim(value).length ? $.trim(word) : null;
});
}
function lastWord(value) {
if ( !options.multiple )
return value;
var words = trimWords(value);
if (words.length == 1)
return words[0];
var cursorAt = $(input).selection().start;
if (cursorAt == value.length) {
words = trimWords(value)
} else {
words = trimWords(value.replace(value.substring(cursorAt), ""));
}
return words[words.length - 1];
}
function autoFill(q, sValue){
if( options.autoFill && (lastWord($input.val()).toLowerCase() == q.toLowerCase()) && lastKeyPressCode != KEY.BACKSPACE ) {
$input.val($input.val() + sValue.substring(lastWord(previousValue).length));
$(input).selection(previousValue.length, previousValue.length + sValue.length);
}
};
function hideResults() {
clearTimeout(timeout);
timeout = setTimeout(hideResultsNow, 200);
};
function hideResultsNow() {
var wasVisible = select.visible();
select.hide();
clearTimeout(timeout);
stopLoading();
if (options.mustMatch) {
$input.search(
function (result){
if( !result ) {
if (options.multiple) {
var words = trimWords($input.val()).slice(0, -1);
$input.val( words.join(options.multipleSeparator) + (words.length ? options.multipleSeparator : "") );
}
else {
$input.val( "" );
$input.trigger("result", null);
}
}
}
);
}
};
function receiveData(q, data) {
if ( data && data.length && hasFocus ) {
stopLoading();
select.display(data, q);
autoFill(q, data[0].value);
select.show();
} else {
hideResultsNow();
}
};
function request(term, success, failure) {
if (!options.matchCase){
term = term.toLowerCase();
}
var data = {};
if( (typeof options.url == "string") && (options.url.length > 0) ){
var extraParams = {
timestamp: +new Date()
};
$.each(options.extraParams, function(key, param) {
extraParams[key] = typeof param == "function" ? param() : param;
});
/*ronizhang:请求处理*/
if(options.forSearch === true){ //如果是搜索用,特殊处理
//获取post的数据
// var _chanel = options.getCurChannel()();
// var _engin = options.getCurEngin()();
// var _qInfo = options.getSmartInfo(_chanel, _engin);
var _chanel = options.enginController.getCurChannel();
var _engin = options.enginController.getCurEngin();
var _qInfo = options.enginModel.getSmartInfo(_chanel, _engin);
if(_qInfo.type == 'jsonp'){//是jsonp请求
$.requestJsonp(_qInfo.smartUrl, _qInfo.jsonpName, function(data){
var parsed = options.enginModel.getSmartDataHandler && options.enginModel.getSmartDataHandler(data, _chanel, _engin) || parse(data);
if(parsed.length == 0){
options.callback("","-1");
}
success(term, parsed);
}, options.enginModel.getSmartPostParam(lastWord(term), _chanel, _engin), options.postType, $.noop, _qInfo.jpCallbackName);
}
else if(_qInfo.type == 'json'){
$.requestJson(_qInfo.smartUrl, function(data){
var parsed = options.enginModel.getSmartDataHandler && options.enginModel.getSmartDataHandler(data, _chanel, _engin) || parse(data);
if(parsed.length==0){
options.callback("", "-1");
}
success(term, parsed);
}, options.enginModel.getSmartPostParam(lastWord(term), _chanel, _engin), options.postType);
}
}
else{
if(options.jsonp){
$.requestJsonp(options.url, options.jsonpName, function(data){
var parsed = options.parse && options.parse(data) || parse(data);
if(parsed.length == 0){
options.callback("","-1");
}
success(term, parsed);
}, {
w: lastWord(term),//TODO 不太通用的地方,为搜索专门指定名字为w
limit: options.max
},options.postType);
}
else{
$.requestJson(options.url,function(data){
var parsed = options.parse && options.parse(data) || parse(data);
if(parsed.length==0){
options.callback("","-1");
}
success(term, parsed);
},{
autoword: lastWord(term),
limit: options.max
},options.postType);
}
}
} else {
select.emptyList();
| timeout = setTimeout(onChange, options.delay);
break;
} | random_line_split |
autoSuggest.js | KEY.PAGEDOWN:
event.preventDefault();
if ( select.visible() ) {
select.pageDown();
} else {
onChange(0, true);
}
break;
case options.multiple && $.trim(options.multipleSeparator) == "," && KEY.COMMA:
case KEY.TAB:
case KEY.RETURN:
if( selectCurrent() ) {
event.preventDefault();
blockSubmit = true;
return false;
}
break;
case KEY.ESC:
select.hide();
break;
default:
clearTimeout(timeout);
timeout = setTimeout(onChange, options.delay);
break;
}
}).focus(function(){
hasFocus++;
}).blur(function() {
hasFocus = 0;
if (!config.mouseDownOnSelect) {
hideResults();
}
}).click(function() {
if ( hasFocus++ > 1 && !select.visible() ) {
onChange(0, true);
}
}).bind("search", function() {
var fn = (arguments.length > 1) ? arguments[1] : null;
function findValueCallback(q, data) {
var result;
if( data && data.length ) {
for (var i=0; i < data.length; i++) {
if( data[i].result.toLowerCase() == q.toLowerCase() ) {
result = data[i];
break;
}
}
}
if( typeof fn == "f | {
select.unbind();
$input.unbind();
$(input.form).unbind(".autocomplete");
}).bind("input", function() { //ronizhang:针对firefox中文输入bug的处理
if ( hasFocus++ > 1 ) {//&& !select.visible() ) {
onChange(0, true);
}
});
function selectCurrent() {
var selected = select.selected();
if( !selected ){
return false;
}
var v = selected.name;
previousValue = v;
if ( options.multiple ) {
var words = trimWords($input.val());
if ( words.length > 1 ) {
var seperator = options.multipleSeparator.length;
var cursorAt = $(input).selection().start;
var wordAt, progress = 0;
$.each(words, function(i, word) {
progress += word.length;
if (cursorAt <= progress) {
wordAt = i;
return false;
}
progress += seperator;
});
words[wordAt] = v;
v = words.join( options.multipleSeparator );
}
v += options.multipleSeparator;
}
$input.val(v);
hideResultsNow();
$input.trigger("result", [selected.data, selected.value]);
if(options.callback){
options.callback(selected.name,selected.value);
}
return true;
}
/**
*
*/
function onChange(crap, skipPrevCheck) {
if( lastKeyPressCode == KEY.DEL ) {
select.hide();
return;
}
var currentValue = $input.val();
if ( skipPrevCheck && currentValue == previousValue )
return;
previousValue = currentValue;
currentValue = lastWord(currentValue);
if ( currentValue.length >= options.minChars) {
$input.addClass(options.loadingClass);
if (!options.matchCase)
currentValue = currentValue.toLowerCase();
request(currentValue, receiveData, hideResultsNow);
} else {
stopLoading();
select.hide();
options.callback('','');
}
};
function trimWords(value) {
if (!value)
return [""];
if (!options.multiple)
return [$.trim(value)];
return $.map(value.split(options.multipleSeparator), function(word) {
return $.trim(value).length ? $.trim(word) : null;
});
}
function lastWord(value) {
if ( !options.multiple )
return value;
var words = trimWords(value);
if (words.length == 1)
return words[0];
var cursorAt = $(input).selection().start;
if (cursorAt == value.length) {
words = trimWords(value)
} else {
words = trimWords(value.replace(value.substring(cursorAt), ""));
}
return words[words.length - 1];
}
function autoFill(q, sValue){
if( options.autoFill && (lastWord($input.val()).toLowerCase() == q.toLowerCase()) && lastKeyPressCode != KEY.BACKSPACE ) {
$input.val($input.val() + sValue.substring(lastWord(previousValue).length));
$(input).selection(previousValue.length, previousValue.length + sValue.length);
}
};
function hideResults() {
clearTimeout(timeout);
timeout = setTimeout(hideResultsNow, 200);
};
function hideResultsNow() {
var wasVisible = select.visible();
select.hide();
clearTimeout(timeout);
stopLoading();
if (options.mustMatch) {
$input.search(
function (result){
if( !result ) {
if (options.multiple) {
var words = trimWords($input.val()).slice(0, -1);
$input.val( words.join(options.multipleSeparator) + (words.length ? options.multipleSeparator : "") );
}
else {
$input.val( "" );
$input.trigger("result", null);
}
}
}
);
}
};
function receiveData(q, data) {
if ( data && data.length && hasFocus ) {
stopLoading();
select.display(data, q);
autoFill(q, data[0].value);
select.show();
} else {
hideResultsNow();
}
};
function request(term, success, failure) {
if (!options.matchCase){
term = term.toLowerCase();
}
var data = {};
if( (typeof options.url == "string") && (options.url.length > 0) ){
var extraParams = {
timestamp: +new Date()
};
$.each(options.extraParams, function(key, param) {
extraParams[key] = typeof param == "function" ? param() : param;
});
/*ronizhang:请求处理*/
if(options.forSearch === true){ //如果是搜索用,特殊处理
//获取post的数据
// var _chanel = options.getCurChannel()();
// var _engin = options.getCurEngin()();
// var _qInfo = options.getSmartInfo(_chanel, _engin);
var _chanel = options.enginController.getCurChannel();
var _engin = options.enginController.getCurEngin();
var _qInfo = options.enginModel.getSmartInfo(_chanel, _engin);
if(_qInfo.type == 'jsonp'){//是jsonp请求
$.requestJsonp(_qInfo.smartUrl, _qInfo.jsonpName, function(data){
var parsed = options.enginModel.getSmartDataHandler && options.enginModel.getSmartDataHandler(data, _chanel, _engin) || parse(data);
if(parsed.length == 0){
options.callback("","-1");
}
success(term, parsed);
}, options.enginModel.getSmartPostParam(lastWord(term), _chanel, _engin), options.postType, $.noop, _qInfo.jpCallbackName);
}
else if(_qInfo.type == 'json'){
$.requestJson(_qInfo.smartUrl, function(data){
var parsed = options.enginModel.getSmartDataHandler && options.enginModel.getSmartDataHandler(data, _chanel, _engin) || parse(data);
if(parsed.length==0){
options.callback("", "-1");
}
success(term, parsed);
}, options.enginModel.getSmartPostParam(lastWord(term), _chanel, _engin), options.postType);
}
}
else{
if(options.jsonp){
$.requestJsonp(options.url, options.jsonpName, function(data){
var parsed = options.parse && options.parse(data) || parse(data);
if(parsed.length == 0){
options.callback("","-1");
}
success(term, parsed);
}, {
w: lastWord(term),//TODO 不太通用的地方,为搜索专门指定名字为w
limit: options.max
},options.postType);
}
else{
$.requestJson(options.url,function(data){
var parsed = options.parse && options.parse(data) || parse(data);
if(parsed.length==0){
options.callback("","-1");
}
success(term, parsed);
},{
autoword: lastWord(term),
limit: options.max
},options.postType);
}
}
} else {
select.emptyList | unction" ) fn(result);
else $input.trigger("result", result && [result.data, result.value]);
}
$.each(trimWords($input.val()), function(i, value) {
request(value, findValueCallback, findValueCallback);
});
}).bind("setOptions", function() {
$.extend(options, arguments[1]);
}).bind("unautocomplete", function() | identifier_body |
autoSuggest.js | KEY.PAGEDOWN:
event.preventDefault();
if ( select.visible() ) {
select.pageDown();
} else {
onChange(0, true);
}
break;
case options.multiple && $.trim(options.multipleSeparator) == "," && KEY.COMMA:
case KEY.TAB:
case KEY.RETURN:
if( selectCurrent() ) {
event.preventDefault();
blockSubmit = true;
return false;
}
break;
case KEY.ESC:
select.hide();
break;
default:
clearTimeout(timeout);
timeout = setTimeout(onChange, options.delay);
break;
}
}).focus(function(){
hasFocus++;
}).blur(function() {
hasFocus = 0;
if (!config.mouseDownOnSelect) {
hideResults();
}
}).click(function() {
if ( hasFocus++ > 1 && !select.visible() ) {
onChange(0, true);
}
}).bind("search", function() {
var fn = (arguments.length > 1) ? arguments[1] : null;
function findValueCallback(q, data) {
var result;
if( data && data.length ) {
for (var i=0; i < data.length; i++) {
if( data[i].result.toLowerCase() == q.toLowerCase() ) {
result = data[i];
break;
}
}
}
if( typeof fn == "function" ) fn(result);
else $input.trigger("result", result && [result.data, result.value]);
}
$.each(trimWords($input.val()), function(i, value) {
request(value, findValueCallback, findValueCallback);
});
}).bind("setOptions", function() {
$.extend(options, arguments[1]);
}).bind("unautocomplete", function() {
select.unbind();
$input.unbind();
$(input.form).unbind(".autocomplete");
}).bind("input", function() { //ronizhang:针对firefox中文输入bug的处理
if ( hasFocus++ > 1 ) {//&& !select.visible() ) {
onChange(0, true);
}
});
function selectCurrent() {
var selected = select.selected();
if( !selected ){
return false;
}
var v = selected.name;
previousValue = v;
if ( options.multiple ) {
var words = trimWords($input.val());
if ( words.length > 1 ) {
var seperator = options.multipleSeparator.length;
var cursorAt = $(input).selection().start;
var wordAt, progress = 0;
$.each(words, function(i, word) {
progress += word.length;
if (cursorAt <= progress) {
wordAt = i;
return false;
}
progress += seperator;
});
words[wordAt] = v;
v = words.join( options.multipleSeparator );
}
v += options.multipleSeparator;
}
$input.val(v);
hideResultsNow();
$input.trigger("result", [selected.data, selected.value]);
if(options.callback){
options.callback(selected.name,selected.value);
}
return true;
}
/**
*
*/
function onChange(crap, skipPrevCheck) {
if( lastKeyPressCode == KEY.DEL ) {
select.hide();
return;
}
var currentValue = $input.val();
if ( skipPrevCheck && currentValue == previousValue )
return;
previousValue = currentValue;
currentValue = lastWord(currentValue);
if ( currentValue.length >= options.minChars) {
$input.addClass(options.loadingClass);
if (!options.matchCase)
currentValue = currentValue.toLowerCase();
request(currentValue, receiveData, hideResultsNow);
} else {
stopLoading();
select.hide();
options.callback('','');
}
};
function trimWords(value) {
if (!value)
return [""];
if (!options.multiple)
return [$.trim(value)];
return $.map(value.split(options.multipleSeparator), function(word) {
return $.trim(value).length ? $.trim(word) : null;
});
}
function lastWord(value) {
if ( !options.multiple )
return value;
var words = trimWords(value);
if (words.length == 1)
return words[0];
var cursorAt = $(input).selection().start;
if (cursorAt == value.length) {
words = trimWords(value)
} else {
words = trimWords(value.replace(value.substring(cursorAt), ""));
}
return words[words.length - 1];
}
function autoFill(q, sValue){
if( options.autoFill && (lastWord($input.val()).toLowerCase() == q.toLowerCase()) && lastKeyPressCode != KEY.BACKSPACE ) {
$input.val($input.val() + sValue.substring(lastWord(previousValue).length));
$(input).selection( | Value.length, previousValue.length + sValue.length);
}
};
function hideResults() {
clearTimeout(timeout);
timeout = setTimeout(hideResultsNow, 200);
};
function hideResultsNow() {
var wasVisible = select.visible();
select.hide();
clearTimeout(timeout);
stopLoading();
if (options.mustMatch) {
$input.search(
function (result){
if( !result ) {
if (options.multiple) {
var words = trimWords($input.val()).slice(0, -1);
$input.val( words.join(options.multipleSeparator) + (words.length ? options.multipleSeparator : "") );
}
else {
$input.val( "" );
$input.trigger("result", null);
}
}
}
);
}
};
function receiveData(q, data) {
if ( data && data.length && hasFocus ) {
stopLoading();
select.display(data, q);
autoFill(q, data[0].value);
select.show();
} else {
hideResultsNow();
}
};
function request(term, success, failure) {
if (!options.matchCase){
term = term.toLowerCase();
}
var data = {};
if( (typeof options.url == "string") && (options.url.length > 0) ){
var extraParams = {
timestamp: +new Date()
};
$.each(options.extraParams, function(key, param) {
extraParams[key] = typeof param == "function" ? param() : param;
});
/*ronizhang:请求处理*/
if(options.forSearch === true){ //如果是搜索用,特殊处理
//获取post的数据
// var _chanel = options.getCurChannel()();
// var _engin = options.getCurEngin()();
// var _qInfo = options.getSmartInfo(_chanel, _engin);
var _chanel = options.enginController.getCurChannel();
var _engin = options.enginController.getCurEngin();
var _qInfo = options.enginModel.getSmartInfo(_chanel, _engin);
if(_qInfo.type == 'jsonp'){//是jsonp请求
$.requestJsonp(_qInfo.smartUrl, _qInfo.jsonpName, function(data){
var parsed = options.enginModel.getSmartDataHandler && options.enginModel.getSmartDataHandler(data, _chanel, _engin) || parse(data);
if(parsed.length == 0){
options.callback("","-1");
}
success(term, parsed);
}, options.enginModel.getSmartPostParam(lastWord(term), _chanel, _engin), options.postType, $.noop, _qInfo.jpCallbackName);
}
else if(_qInfo.type == 'json'){
$.requestJson(_qInfo.smartUrl, function(data){
var parsed = options.enginModel.getSmartDataHandler && options.enginModel.getSmartDataHandler(data, _chanel, _engin) || parse(data);
if(parsed.length==0){
options.callback("", "-1");
}
success(term, parsed);
}, options.enginModel.getSmartPostParam(lastWord(term), _chanel, _engin), options.postType);
}
}
else{
if(options.jsonp){
$.requestJsonp(options.url, options.jsonpName, function(data){
var parsed = options.parse && options.parse(data) || parse(data);
if(parsed.length == 0){
options.callback("","-1");
}
success(term, parsed);
}, {
w: lastWord(term),//TODO 不太通用的地方,为搜索专门指定名字为w
limit: options.max
},options.postType);
}
else{
$.requestJson(options.url,function(data){
var parsed = options.parse && options.parse(data) || parse(data);
if(parsed.length==0){
options.callback("","-1");
}
success(term, parsed);
},{
autoword: lastWord(term),
limit: options.max
},options.postType);
}
}
} else {
select.emptyList();
| previous | identifier_name |
Canviz.js | < keysLength; ++i) {
var key = keys[i];
Canviz.colors[key] = colors[key];
}
};
// Constants
var MAX_XDOT_VERSION = '1.6';
// An alphanumeric string or a number or a double-quoted string or an HTML string
var ID_MATCH = '([a-zA-Z\u0080-\uFFFF_][0-9a-zA-Z\u0080-\uFFFF_]*|-?(?:\\.\\d+|\\d+(?:\\.\\d*)?)|"(?:\\\\"|[^"])*"|<(?:<[^>]*>|[^<>]+?)+>)';
// ID or ID:port or ID:compassPoint or ID:port:compassPoint
var NODE_ID_MATCH = ID_MATCH + '(?::' + ID_MATCH + ')?(?::' + ID_MATCH + ')?';
// Regular expressions used by the parser
var GRAPH_MATCH_RE = new RegExp('^(strict\\s+)?(graph|digraph)(?:\\s+' + ID_MATCH + ')?\\s*{$', 'i');
var SUBGRAPH_MATCH_RE = new RegExp('^(?:subgraph\\s+)?' + ID_MATCH + '?\\s*{$', 'i');
var NODE_MATCH_RE = new RegExp('^(' + NODE_ID_MATCH + ')\\s+\\[(.+)\\];$');
var EDGE_MATCH_RE = new RegExp('^(' + NODE_ID_MATCH + '\\s*-[->]\\s*' + NODE_ID_MATCH + ')\\s+\\[(.+)\\];$');
var ATTR_MATCH_RE = new RegExp('^' + ID_MATCH + '=' + ID_MATCH + '(?:[,\\s]+|$)');
// Prototype
Canviz.prototype = {
constructor: Canviz,
setScale: function (scale) {
this.scale = scale;
},
setImagePath: function (imagePath) {
this.imagePath = imagePath;
},
setTextMode: function (textMode) {
if (~indexOf(this._textModes, textMode)) this.textMode = textMode;
else debug('unsupported text mode ' + textMode);
},
load: function (url, urlParams, callback) {
if (urlParams) return console.log('urlParams not supported');
var self = this;
loadFile(url, function (err, text) {
if (err) {
console.log(err.message);
} else {
self.parse(text);
if (callback) callback();
}
});
},
parse: function (xdot) {
if (IS_BROWSER) {
if (document.getElementById('debug_output')) {
document.getElementById('debug_output').innerHTML = '';
}
}
this.graphs = [];
this.images = {};
this.paddingX = this.paddingY = XDOT_DPI * 0.0555;
this.dpi = 96;
var bgColor = 'white';
var bbEnlarge = this.rotate = 0; // false
var containers = [];
var lines = xdot.split(/\r?\n/);
var linesLength = lines.length;
var line, lastChar, matches, rootGraph, isGraph, entity, entityName, attrs, attrName, attrValue, attrHash, drawAttrHash, maxHeight, maxWidth;
var i = this.width = this.height = this.marginX = this.marginY = this.numImages = this.numImagesFinished = 0;
while (i < linesLength) {
line = lines[i++].replace(/^\s+/, '');
if ('' != line && '#' != line.substr(0, 1)) {
while (i < linesLength && ';' != (lastChar = line.substr(line.length - 1, line.length)) && '{' != lastChar && '}' != lastChar) {
if ('\\' == lastChar) {
line = line.substr(0, line.length - 1);
}
line += lines[i++];
}
// debug(line);
if (containers.length) {
matches = line.match(SUBGRAPH_MATCH_RE);
if (matches) {
containers.unshift(Graph(matches[1], this, rootGraph, containers[0]));
containers[1].subgraphs.push(containers[0]);
// debug('subgraph: ' + containers[0].name);
}
} else {
matches = line.match(GRAPH_MATCH_RE);
if (matches) {
rootGraph = Graph(matches[3], this);
rootGraph.strict = !!matches[1];
rootGraph.type = 'graph' == matches[2] ? 'undirected' : 'directed';
rootGraph.attrs.xdotversion = '1.0';
containers.unshift(rootGraph);
this.graphs.push(rootGraph);
// debug('graph: ' + containers[0].name);
}
}
if (matches) {
// debug('begin container ' + containers[0].name);
} else if ('}' == line) {
// debug('end container ' + containers[0].name);
containers.shift();
if (!containers.length) {
break;
}
} else {
matches = line.match(NODE_MATCH_RE);
if (matches) {
entityName = matches[2];
attrs = matches[5];
drawAttrHash = containers[0].drawAttrs;
isGraph = false;
switch (entityName) {
case 'graph':
attrHash = containers[0].attrs;
isGraph = true;
break;
case 'node':
attrHash = containers[0].nodeAttrs;
break;
case 'edge':
attrHash = containers[0].edgeAttrs;
break;
default:
entity = Node(entityName, this, rootGraph, containers[0]);
attrHash = entity.attrs;
drawAttrHash = entity.drawAttrs;
containers[0].nodes.push(entity);
}
// debug('node: ' + entityName);
} else {
matches = line.match(EDGE_MATCH_RE);
if (matches) {
entityName = matches[1];
attrs = matches[8];
entity = Edge(entityName, this, rootGraph, containers[0], matches[2], matches[5]);
attrHash = entity.attrs;
drawAttrHash = entity.drawAttrs;
containers[0].edges.push(entity);
// debug('edge: ' + entityName);
}
}
if (matches) {
do {
if (!attrs.length) {
break;
}
matches = attrs.match(ATTR_MATCH_RE);
if (matches) {
attrs = attrs.substr(matches[0].length);
attrName = matches[1];
attrValue = unescapeAttr(matches[2]);
if (/^_.*draw_$/.test(attrName)) {
drawAttrHash[attrName] = attrValue;
} else {
attrHash[attrName] = attrValue;
}
// debug(attrName + ' ' + attrValue);
if (isGraph && containers.length < 2) {
switch (attrName) {
case 'bb':
attrValue = attrValue.split(',');
this.width = Number(attrValue[2]);
this.height = Math.abs(attrValue[3] - attrValue[1]);
// This is the opposite of the dot "-y" flag because canvas Y-coordinates are already inverted from Graphviz coordinates.
this.invertY = attrValue[3] > 0;
break;
case 'bgcolor':
bgColor = attrValue;
break;
case 'dpi':
this.dpi = attrValue;
break;
case 'landscape':
this.rotate = 'true' == attrValue || Number(attrValue);
break;
case 'margin':
attrValue = attrValue.split(',');
this.marginX = XDOT_DPI * attrValue[0];
this.marginY = XDOT_DPI * attrValue[attrValue.length - 1];
break;
case 'orientation':
this.rotate = 'l' == attrValue.substr(0, 1).toLowerCase();
break;
case 'pad':
attrValue = attrValue.split(',');
this.paddingX = XDOT_DPI * attrValue[0];
this.paddingY = XDOT_DPI * attrValue[attrValue.length - 1];
break;
case 'rotate':
this.rotate = 90 == attrValue;
break;
case 'size':
if (attrValue.substr(attrValue.length - 1) == '!') {
bbEnlarge = 1; // true
attrValue = attrValue.substr(0, attrValue.length - 1);
}
attrValue = attrValue.split(',');
maxWidth = XDOT_DPI * attrValue[0];
maxHeight = XDOT_DPI * attrValue[attrValue.length - 1];
break;
case 'xdotversion':
if (versionCompare(attrValue, MAX_XDOT_VERSION) > 0) {
debug('unsupported xdotversion ' + attrValue + '; this script currently supports up to xdotversion ' + MAX_XDOT_VERSION);
}
break;
}
}
} else {
debug('can\'t read attributes for entity ' + entityName + ' from ' + attrs); | } while (matches);
}
}
}
}
function xdotRound(n) {
var digits = 2;
var mult = Math.pow(10, digits);
return Math.round(mult * n) / mult;
}
var drawingWidth = this.width + 2 * this.paddingX;
var drawingHeight = this.height + 2 * this.padding | } | random_line_split |
Canviz.js |
this.container.appendChild(this.elements);
textModes.push('dom');
}
this.ctx = this.canvas.getContext('2d');
if (this.ctx.fillText) textModes.push('canvas');
this.setTextMode(textModes[0]);
this.setScale(1);
this.dashLength = 6;
this.dotSpacing = 4;
this.graphs = [];
this.images = {};
this.imagePath = '';
if (url) {
this.load(url, urlParams);
}
}
// Properties
Canviz.Path = Path;
Canviz.colors = {
fallback: {
black: '000000',
lightgrey: 'd3d3d3',
white: 'ffffff'
}
};
Canviz.addColors = function (colors) {
var keys = objectKeys(colors),
keysLength = keys.length;
for (var i = 0; i < keysLength; ++i) {
var key = keys[i];
Canviz.colors[key] = colors[key];
}
};
// Constants
var MAX_XDOT_VERSION = '1.6';
// An alphanumeric string or a number or a double-quoted string or an HTML string
var ID_MATCH = '([a-zA-Z\u0080-\uFFFF_][0-9a-zA-Z\u0080-\uFFFF_]*|-?(?:\\.\\d+|\\d+(?:\\.\\d*)?)|"(?:\\\\"|[^"])*"|<(?:<[^>]*>|[^<>]+?)+>)';
// ID or ID:port or ID:compassPoint or ID:port:compassPoint
var NODE_ID_MATCH = ID_MATCH + '(?::' + ID_MATCH + ')?(?::' + ID_MATCH + ')?';
// Regular expressions used by the parser
var GRAPH_MATCH_RE = new RegExp('^(strict\\s+)?(graph|digraph)(?:\\s+' + ID_MATCH + ')?\\s*{$', 'i');
var SUBGRAPH_MATCH_RE = new RegExp('^(?:subgraph\\s+)?' + ID_MATCH + '?\\s*{$', 'i');
var NODE_MATCH_RE = new RegExp('^(' + NODE_ID_MATCH + ')\\s+\\[(.+)\\];$');
var EDGE_MATCH_RE = new RegExp('^(' + NODE_ID_MATCH + '\\s*-[->]\\s*' + NODE_ID_MATCH + ')\\s+\\[(.+)\\];$');
var ATTR_MATCH_RE = new RegExp('^' + ID_MATCH + '=' + ID_MATCH + '(?:[,\\s]+|$)');
// Prototype
Canviz.prototype = {
constructor: Canviz,
setScale: function (scale) {
this.scale = scale;
},
setImagePath: function (imagePath) {
this.imagePath = imagePath;
},
setTextMode: function (textMode) {
if (~indexOf(this._textModes, textMode)) this.textMode = textMode;
else debug('unsupported text mode ' + textMode);
},
load: function (url, urlParams, callback) {
if (urlParams) return console.log('urlParams not supported');
var self = this;
loadFile(url, function (err, text) {
if (err) {
console.log(err.message);
} else {
self.parse(text);
if (callback) callback();
}
});
},
parse: function (xdot) {
if (IS_BROWSER) {
if (document.getElementById('debug_output')) {
document.getElementById('debug_output').innerHTML = '';
}
}
this.graphs = [];
this.images = {};
this.paddingX = this.paddingY = XDOT_DPI * 0.0555;
this.dpi = 96;
var bgColor = 'white';
var bbEnlarge = this.rotate = 0; // false
var containers = [];
var lines = xdot.split(/\r?\n/);
var linesLength = lines.length;
var line, lastChar, matches, rootGraph, isGraph, entity, entityName, attrs, attrName, attrValue, attrHash, drawAttrHash, maxHeight, maxWidth;
var i = this.width = this.height = this.marginX = this.marginY = this.numImages = this.numImagesFinished = 0;
while (i < linesLength) {
line = lines[i++].replace(/^\s+/, '');
if ('' != line && '#' != line.substr(0, 1)) {
while (i < linesLength && ';' != (lastChar = line.substr(line.length - 1, line.length)) && '{' != lastChar && '}' != lastChar) {
if ('\\' == lastChar) {
line = line.substr(0, line.length - 1);
}
line += lines[i++];
}
// debug(line);
if (containers.length) {
matches = line.match(SUBGRAPH_MATCH_RE);
if (matches) {
containers.unshift(Graph(matches[1], this, rootGraph, containers[0]));
containers[1].subgraphs.push(containers[0]);
// debug('subgraph: ' + containers[0].name);
}
} else {
matches = line.match(GRAPH_MATCH_RE);
if (matches) {
rootGraph = Graph(matches[3], this);
rootGraph.strict = !!matches[1];
rootGraph.type = 'graph' == matches[2] ? 'undirected' : 'directed';
rootGraph.attrs.xdotversion = '1.0';
containers.unshift(rootGraph);
this.graphs.push(rootGraph);
// debug('graph: ' + containers[0].name);
}
}
if (matches) {
// debug('begin container ' + containers[0].name);
} else if ('}' == line) {
// debug('end container ' + containers[0].name);
containers.shift();
if (!containers.length) {
break;
}
} else {
matches = line.match(NODE_MATCH_RE);
if (matches) {
entityName = matches[2];
attrs = matches[5];
drawAttrHash = containers[0].drawAttrs;
isGraph = false;
switch (entityName) {
case 'graph':
attrHash = containers[0].attrs;
isGraph = true;
break;
case 'node':
attrHash = containers[0].nodeAttrs;
break;
case 'edge':
attrHash = containers[0].edgeAttrs;
break;
default:
entity = Node(entityName, this, rootGraph, containers[0]);
attrHash = entity.attrs;
drawAttrHash = entity.drawAttrs;
containers[0].nodes.push(entity);
}
// debug('node: ' + entityName);
} else {
matches = line.match(EDGE_MATCH_RE);
if (matches) {
entityName = matches[1];
attrs = matches[8];
entity = Edge(entityName, this, rootGraph, containers[0], matches[2], matches[5]);
attrHash = entity.attrs;
drawAttrHash = entity.drawAttrs;
containers[0].edges.push(entity);
// debug('edge: ' + entityName);
}
}
if (matches) {
do {
if (!attrs.length) {
break;
}
matches = attrs.match(ATTR_MATCH_RE);
if (matches) {
attrs = attrs.substr(matches[0].length);
attrName = matches[1];
attrValue = unescapeAttr(matches[2]);
if (/^_.*draw_$/.test(attrName)) {
drawAttrHash[attrName] = attrValue;
} else {
attrHash[attrName] = attrValue;
}
// debug(attrName + ' ' + attrValue);
if (isGraph && containers.length < 2) {
switch (attrName) {
case 'bb':
attrValue = attrValue.split(',');
this.width = Number(attrValue[2]);
this.height = Math.abs(attrValue[3] - attrValue[1]);
// This is the opposite of the dot "-y" flag because canvas Y-coordinates are already inverted from Graphviz coordinates.
this.invertY = attrValue[3] > 0;
break;
case 'bgcolor':
bgColor = attrValue;
break;
case 'dpi':
this.dpi = attrValue;
break;
case 'landscape':
this.rotate = 'true' == attrValue || Number(attrValue);
break;
case 'margin':
attrValue = attrValue.split(',');
this.marginX = XDOT_DPI * attrValue[0];
this.marginY = XDOT_DPI * attrValue[attrValue.length - 1];
break;
case 'orientation':
this.rotate = 'l' == attrValue.substr(0, 1).toLowerCase();
break;
case 'pad':
attrValue = attrValue.split(',');
this.paddingX = XDOT_DPI * attrValue[0];
this.paddingY = XDOT_DPI * attrValue[attrValue.length - 1];
break;
case 'rotate':
this.rotate = 90 == attrValue;
break;
case 'size':
if (attrValue.substr(attrValue.length - 1) == '!') {
bbEnlarge = 1; // true
attrValue = attrValue.substr(0, attrValue.length - 1);
| {
G_vmlCanvasManager.initElement(this.canvas);
this.canvas = document.getElementById(this.canvas.id);
} | conditional_block |
|
Canviz.js | this.ctx = this.canvas.getContext('2d');
if (this.ctx.fillText) textModes.push('canvas');
this.setTextMode(textModes[0]);
this.setScale(1);
this.dashLength = 6;
this.dotSpacing = 4;
this.graphs = [];
this.images = {};
this.imagePath = '';
if (url) {
this.load(url, urlParams);
}
}
// Properties
Canviz.Path = Path;
Canviz.colors = {
fallback: {
black: '000000',
lightgrey: 'd3d3d3',
white: 'ffffff'
}
};
Canviz.addColors = function (colors) {
var keys = objectKeys(colors),
keysLength = keys.length;
for (var i = 0; i < keysLength; ++i) {
var key = keys[i];
Canviz.colors[key] = colors[key];
}
};
// Constants
var MAX_XDOT_VERSION = '1.6';
// An alphanumeric string or a number or a double-quoted string or an HTML string
var ID_MATCH = '([a-zA-Z\u0080-\uFFFF_][0-9a-zA-Z\u0080-\uFFFF_]*|-?(?:\\.\\d+|\\d+(?:\\.\\d*)?)|"(?:\\\\"|[^"])*"|<(?:<[^>]*>|[^<>]+?)+>)';
// ID or ID:port or ID:compassPoint or ID:port:compassPoint
var NODE_ID_MATCH = ID_MATCH + '(?::' + ID_MATCH + ')?(?::' + ID_MATCH + ')?';
// Regular expressions used by the parser
var GRAPH_MATCH_RE = new RegExp('^(strict\\s+)?(graph|digraph)(?:\\s+' + ID_MATCH + ')?\\s*{$', 'i');
var SUBGRAPH_MATCH_RE = new RegExp('^(?:subgraph\\s+)?' + ID_MATCH + '?\\s*{$', 'i');
var NODE_MATCH_RE = new RegExp('^(' + NODE_ID_MATCH + ')\\s+\\[(.+)\\];$');
var EDGE_MATCH_RE = new RegExp('^(' + NODE_ID_MATCH + '\\s*-[->]\\s*' + NODE_ID_MATCH + ')\\s+\\[(.+)\\];$');
var ATTR_MATCH_RE = new RegExp('^' + ID_MATCH + '=' + ID_MATCH + '(?:[,\\s]+|$)');
// Prototype
Canviz.prototype = {
constructor: Canviz,
setScale: function (scale) {
this.scale = scale;
},
setImagePath: function (imagePath) {
this.imagePath = imagePath;
},
setTextMode: function (textMode) {
if (~indexOf(this._textModes, textMode)) this.textMode = textMode;
else debug('unsupported text mode ' + textMode);
},
load: function (url, urlParams, callback) {
if (urlParams) return console.log('urlParams not supported');
var self = this;
loadFile(url, function (err, text) {
if (err) {
console.log(err.message);
} else {
self.parse(text);
if (callback) callback();
}
});
},
parse: function (xdot) {
if (IS_BROWSER) {
if (document.getElementById('debug_output')) {
document.getElementById('debug_output').innerHTML = '';
}
}
this.graphs = [];
this.images = {};
this.paddingX = this.paddingY = XDOT_DPI * 0.0555;
this.dpi = 96;
var bgColor = 'white';
var bbEnlarge = this.rotate = 0; // false
var containers = [];
var lines = xdot.split(/\r?\n/);
var linesLength = lines.length;
var line, lastChar, matches, rootGraph, isGraph, entity, entityName, attrs, attrName, attrValue, attrHash, drawAttrHash, maxHeight, maxWidth;
var i = this.width = this.height = this.marginX = this.marginY = this.numImages = this.numImagesFinished = 0;
while (i < linesLength) {
line = lines[i++].replace(/^\s+/, '');
if ('' != line && '#' != line.substr(0, 1)) {
while (i < linesLength && ';' != (lastChar = line.substr(line.length - 1, line.length)) && '{' != lastChar && '}' != lastChar) {
if ('\\' == lastChar) {
line = line.substr(0, line.length - 1);
}
line += lines[i++];
}
// debug(line);
if (containers.length) {
matches = line.match(SUBGRAPH_MATCH_RE);
if (matches) {
containers.unshift(Graph(matches[1], this, rootGraph, containers[0]));
containers[1].subgraphs.push(containers[0]);
// debug('subgraph: ' + containers[0].name);
}
} else {
matches = line.match(GRAPH_MATCH_RE);
if (matches) {
rootGraph = Graph(matches[3], this);
rootGraph.strict = !!matches[1];
rootGraph.type = 'graph' == matches[2] ? 'undirected' : 'directed';
rootGraph.attrs.xdotversion = '1.0';
containers.unshift(rootGraph);
this.graphs.push(rootGraph);
// debug('graph: ' + containers[0].name);
}
}
if (matches) {
// debug('begin container ' + containers[0].name);
} else if ('}' == line) {
// debug('end container ' + containers[0].name);
containers.shift();
if (!containers.length) {
break;
}
} else {
matches = line.match(NODE_MATCH_RE);
if (matches) {
entityName = matches[2];
attrs = matches[5];
drawAttrHash = containers[0].drawAttrs;
isGraph = false;
switch (entityName) {
case 'graph':
attrHash = containers[0].attrs;
isGraph = true;
break;
case 'node':
attrHash = containers[0].nodeAttrs;
break;
case 'edge':
attrHash = containers[0].edgeAttrs;
break;
default:
entity = Node(entityName, this, rootGraph, containers[0]);
attrHash = entity.attrs;
drawAttrHash = entity.drawAttrs;
containers[0].nodes.push(entity);
}
// debug('node: ' + entityName);
} else {
matches = line.match(EDGE_MATCH_RE);
if (matches) {
entityName = matches[1];
attrs = matches[8];
entity = Edge(entityName, this, rootGraph, containers[0], matches[2], matches[5]);
attrHash = entity.attrs;
drawAttrHash = entity.drawAttrs;
containers[0].edges.push(entity);
// debug('edge: ' + entityName);
}
}
if (matches) {
do {
if (!attrs.length) {
break;
}
matches = attrs.match(ATTR_MATCH_RE);
if (matches) {
attrs = attrs.substr(matches[0].length);
attrName = matches[1];
attrValue = unescapeAttr(matches[2]);
if (/^_.*draw_$/.test(attrName)) {
drawAttrHash[attrName] = attrValue;
} else {
attrHash[attrName] = attrValue;
}
// debug(attrName + ' ' + attrValue);
if (isGraph && containers.length < 2) {
switch (attrName) {
case 'bb':
attrValue = attrValue.split(',');
this.width = Number(attrValue[2]);
this.height = Math.abs(attrValue[3] - attrValue[1]);
// This is the opposite of the dot "-y" flag because canvas Y-coordinates are already inverted from Graphviz coordinates.
this.invertY = attrValue[3] > 0;
break;
case 'bgcolor':
bgColor = attrValue;
break;
case 'dpi':
this.dpi = attrValue;
break;
case 'landscape':
this.rotate = 'true' == attrValue || Number(attrValue);
break;
case 'margin':
attrValue = attrValue.split(',');
this.marginX = XDOT_DPI * attrValue[0];
this.marginY = XDOT_DPI * attr | {
if (!(this instanceof Canviz)) return new Canviz(container, url, urlParams);
var textModes = this._textModes = [];
this.canvas = new Canvas(0, 0);
if (!Canviz.canvasCounter) Canviz.canvasCounter = 0;
this.canvas.id = 'canviz_canvas_' + ++Canviz.canvasCounter;
if (IS_BROWSER) {
this.canvas.style.position = 'absolute';
this.elements = document.createElement('div');
this.elements.style.position = 'absolute';
this.container = typeof container == 'string' ? document.getElementById(container) : container;
this.container.style.position = 'relative';
this.container.appendChild(this.canvas);
if (typeof G_vmlCanvasManager != 'undefined') {
G_vmlCanvasManager.initElement(this.canvas);
this.canvas = document.getElementById(this.canvas.id);
}
this.container.appendChild(this.elements);
textModes.push('dom');
} | identifier_body |
|
Canviz.js | (container, url, urlParams) {
if (!(this instanceof Canviz)) return new Canviz(container, url, urlParams);
var textModes = this._textModes = [];
this.canvas = new Canvas(0, 0);
if (!Canviz.canvasCounter) Canviz.canvasCounter = 0;
this.canvas.id = 'canviz_canvas_' + ++Canviz.canvasCounter;
if (IS_BROWSER) {
this.canvas.style.position = 'absolute';
this.elements = document.createElement('div');
this.elements.style.position = 'absolute';
this.container = typeof container == 'string' ? document.getElementById(container) : container;
this.container.style.position = 'relative';
this.container.appendChild(this.canvas);
if (typeof G_vmlCanvasManager != 'undefined') {
G_vmlCanvasManager.initElement(this.canvas);
this.canvas = document.getElementById(this.canvas.id);
}
this.container.appendChild(this.elements);
textModes.push('dom');
}
this.ctx = this.canvas.getContext('2d');
if (this.ctx.fillText) textModes.push('canvas');
this.setTextMode(textModes[0]);
this.setScale(1);
this.dashLength = 6;
this.dotSpacing = 4;
this.graphs = [];
this.images = {};
this.imagePath = '';
if (url) {
this.load(url, urlParams);
}
}
// Properties
Canviz.Path = Path;
Canviz.colors = {
fallback: {
black: '000000',
lightgrey: 'd3d3d3',
white: 'ffffff'
}
};
Canviz.addColors = function (colors) {
var keys = objectKeys(colors),
keysLength = keys.length;
for (var i = 0; i < keysLength; ++i) {
var key = keys[i];
Canviz.colors[key] = colors[key];
}
};
// Constants
var MAX_XDOT_VERSION = '1.6';
// An alphanumeric string or a number or a double-quoted string or an HTML string
var ID_MATCH = '([a-zA-Z\u0080-\uFFFF_][0-9a-zA-Z\u0080-\uFFFF_]*|-?(?:\\.\\d+|\\d+(?:\\.\\d*)?)|"(?:\\\\"|[^"])*"|<(?:<[^>]*>|[^<>]+?)+>)';
// ID or ID:port or ID:compassPoint or ID:port:compassPoint
var NODE_ID_MATCH = ID_MATCH + '(?::' + ID_MATCH + ')?(?::' + ID_MATCH + ')?';
// Regular expressions used by the parser
var GRAPH_MATCH_RE = new RegExp('^(strict\\s+)?(graph|digraph)(?:\\s+' + ID_MATCH + ')?\\s*{$', 'i');
var SUBGRAPH_MATCH_RE = new RegExp('^(?:subgraph\\s+)?' + ID_MATCH + '?\\s*{$', 'i');
var NODE_MATCH_RE = new RegExp('^(' + NODE_ID_MATCH + ')\\s+\\[(.+)\\];$');
var EDGE_MATCH_RE = new RegExp('^(' + NODE_ID_MATCH + '\\s*-[->]\\s*' + NODE_ID_MATCH + ')\\s+\\[(.+)\\];$');
var ATTR_MATCH_RE = new RegExp('^' + ID_MATCH + '=' + ID_MATCH + '(?:[,\\s]+|$)');
// Prototype
Canviz.prototype = {
constructor: Canviz,
setScale: function (scale) {
this.scale = scale;
},
setImagePath: function (imagePath) {
this.imagePath = imagePath;
},
setTextMode: function (textMode) {
if (~indexOf(this._textModes, textMode)) this.textMode = textMode;
else debug('unsupported text mode ' + textMode);
},
load: function (url, urlParams, callback) {
if (urlParams) return console.log('urlParams not supported');
var self = this;
loadFile(url, function (err, text) {
if (err) {
console.log(err.message);
} else {
self.parse(text);
if (callback) callback();
}
});
},
parse: function (xdot) {
if (IS_BROWSER) {
if (document.getElementById('debug_output')) {
document.getElementById('debug_output').innerHTML = '';
}
}
this.graphs = [];
this.images = {};
this.paddingX = this.paddingY = XDOT_DPI * 0.0555;
this.dpi = 96;
var bgColor = 'white';
var bbEnlarge = this.rotate = 0; // false
var containers = [];
var lines = xdot.split(/\r?\n/);
var linesLength = lines.length;
var line, lastChar, matches, rootGraph, isGraph, entity, entityName, attrs, attrName, attrValue, attrHash, drawAttrHash, maxHeight, maxWidth;
var i = this.width = this.height = this.marginX = this.marginY = this.numImages = this.numImagesFinished = 0;
while (i < linesLength) {
line = lines[i++].replace(/^\s+/, '');
if ('' != line && '#' != line.substr(0, 1)) {
while (i < linesLength && ';' != (lastChar = line.substr(line.length - 1, line.length)) && '{' != lastChar && '}' != lastChar) {
if ('\\' == lastChar) {
line = line.substr(0, line.length - 1);
}
line += lines[i++];
}
// debug(line);
if (containers.length) {
matches = line.match(SUBGRAPH_MATCH_RE);
if (matches) {
containers.unshift(Graph(matches[1], this, rootGraph, containers[0]));
containers[1].subgraphs.push(containers[0]);
// debug('subgraph: ' + containers[0].name);
}
} else {
matches = line.match(GRAPH_MATCH_RE);
if (matches) {
rootGraph = Graph(matches[3], this);
rootGraph.strict = !!matches[1];
rootGraph.type = 'graph' == matches[2] ? 'undirected' : 'directed';
rootGraph.attrs.xdotversion = '1.0';
containers.unshift(rootGraph);
this.graphs.push(rootGraph);
// debug('graph: ' + containers[0].name);
}
}
if (matches) {
// debug('begin container ' + containers[0].name);
} else if ('}' == line) {
// debug('end container ' + containers[0].name);
containers.shift();
if (!containers.length) {
break;
}
} else {
matches = line.match(NODE_MATCH_RE);
if (matches) {
entityName = matches[2];
attrs = matches[5];
drawAttrHash = containers[0].drawAttrs;
isGraph = false;
switch (entityName) {
case 'graph':
attrHash = containers[0].attrs;
isGraph = true;
break;
case 'node':
attrHash = containers[0].nodeAttrs;
break;
case 'edge':
attrHash = containers[0].edgeAttrs;
break;
default:
entity = Node(entityName, this, rootGraph, containers[0]);
attrHash = entity.attrs;
drawAttrHash = entity.drawAttrs;
containers[0].nodes.push(entity);
}
// debug('node: ' + entityName);
} else {
matches = line.match(EDGE_MATCH_RE);
if (matches) {
entityName = matches[1];
attrs = matches[8];
entity = Edge(entityName, this, rootGraph, containers[0], matches[2], matches[5]);
attrHash = entity.attrs;
drawAttrHash = entity.drawAttrs;
containers[0].edges.push(entity);
// debug('edge: ' + entityName);
}
}
if (matches) {
do {
if (!attrs.length) {
break;
}
matches = attrs.match(ATTR_MATCH_RE);
if (matches) {
attrs = attrs.substr(matches[0].length);
attrName = matches[1];
attrValue = unescapeAttr(matches[2]);
if (/^_.*draw_$/.test(attrName)) {
drawAttrHash[attrName] = attrValue;
} else {
attrHash[attrName] = attrValue;
}
// debug(attrName + ' ' + attrValue);
if (isGraph && containers.length < 2) {
switch (attrName) {
case 'bb':
attrValue = attrValue.split(',');
this.width = Number(attrValue[2]);
this.height = Math.abs(attrValue[3] - attrValue[1]);
// This is the opposite of the dot "-y" flag because canvas Y-coordinates are already inverted from Graphviz coordinates.
this.invertY = attrValue[3] > 0;
break;
case 'bgcolor':
bgColor = attrValue;
break;
case 'dpi':
this.dpi = attrValue;
break;
case 'landscape':
this.rotate = 'true' == attrValue || Number(attrValue);
break;
case 'margin':
attrValue = attrValue.split(',');
this.marginX = XDOT_DPI * attrValue[0];
this | Canviz | identifier_name |
|
net.go | Read can be made to time out and
// return an Error with Timeout() == true after a fixed time limit; see
// SetDeadline and SetReadDeadline.
//
// Part of the net.Conn interface.
func (c *Conn) Read(b []byte) (n int, err error) {
fmt.Println("read (start)", b)
done := make(chan int)
callback := js.FuncOf(func(this js.Value, args []js.Value) interface{} {
fmt.Println("got response")
// callback.Release() // free up memory from callback
done <- args[0].Int()
return nil
})
// func printMessage(this js.Value, args []js.Value) interface{} {
// message := args[0].String()
// fmt.Println(message)
//
// return nil
// }
defer callback.Release()
ta := js.TypedArrayOf(b)
defer ta.Release()
js.Global().Get("readFromSocket").Invoke(c.id, ta, callback)
// wait until we've got our response
bytesRead := <-done
fmt.Println("read (done)", b)
// // In order to reconcile the differences between the record abstraction
// // of our AEAD connection, and the stream abstraction of TCP, we
// // maintain an intermediate read buffer. If this buffer becomes
// // depleted, then we read the next record, and feed it into the
// // buffer. Otherwise, we read directly from the buffer.
// if c.readBuf.Len() == 0 {
// plaintext, err := c.noise.ReadMessage(c.conn)
// if err != nil {
// return 0, err
// }
// if _, err := c.readBuf.Write(plaintext); err != nil {
// return 0, err
// }
// }
// return c.readBuf.Read(b)
return bytesRead, nil
}
// Write writes data to the connection. Write can be made to time out and
// return an Error with Timeout() == true after a fixed time limit; see
// SetDeadline and SetWriteDeadline.
//
// Part of the net.Conn interface.
func (c *Conn) Write(b []byte) (n int, err error) {
fmt.Println("writing", b)
done := make(chan int)
callback := js.FuncOf(func(this js.Value, args []js.Value) interface{} {
fmt.Println("wrote stuff")
// callback.Release() // free up memory from callback
done <- 0
return nil
})
// func printMessage(this js.Value, args []js.Value) interface{} {
// message := args[0].String()
// fmt.Println(message)
//
// return nil
// }
defer callback.Release()
ta := js.TypedArrayOf(b)
defer ta.Release()
js.Global().Get("writeToSocket").Invoke(c.id, ta, callback)
// wait until we've got our response
bytesWritten := <-done
// // If the message doesn't require any chunking, then we can go ahead
// // with a single write.
// if len(b) <= math.MaxUint16 {
// err = c.noise.WriteMessage(b)
// if err != nil {
// return 0, err
// }
// return c.noise.Flush(c.conn)
// }
// // If we need to split the message into fragments, then we'll write
// // chunks which maximize usage of the available payload.
// chunkSize := math.MaxUint16
// bytesToWrite := len(b)
// for bytesWritten < bytesToWrite {
// // If we're on the last chunk, then truncate the chunk size as
// // necessary to avoid an out-of-bounds array memory access.
// if bytesWritten+chunkSize > len(b) {
// chunkSize = len(b) - bytesWritten
// }
// // Slice off the next chunk to be written based on our running
// // counter and next chunk size.
// chunk := b[bytesWritten : bytesWritten+chunkSize]
// if err := c.noise.WriteMessage(chunk); err != nil {
// return bytesWritten, err
// }
// n, err := c.noise.Flush(c.conn)
// bytesWritten += n
// if err != nil {
// return bytesWritten, err
// }
// }
return bytesWritten, nil
}
// // WriteMessage encrypts and buffers the next message p for the connection. The
// // ciphertext of the message is prepended with an encrypt+auth'd length which
// // must be used as the AD to the AEAD construction when being decrypted by the
// // other side.
// //
// // NOTE: This DOES NOT write the message to the wire, it should be followed by a
// // call to Flush to ensure the message is written.
// func (c *Conn) WriteMessage(b []byte) error {
// return c.noise.WriteMessage(b)
// }
// // Flush attempts to write a message buffered using WriteMessage to the
// // underlying connection. If no buffered message exists, this will result in a
// // NOP. Otherwise, it will continue to write the remaining bytes, picking up
// // where the byte stream left off in the event of a partial write. The number of
// // bytes returned reflects the number of plaintext bytes in the payload, and
// // does not account for the overhead of the header or MACs.
// //
// // NOTE: It is safe to call this method again iff a timeout error is returned.
// func (c *Conn) Flush() (int, error) {
// return c.noise.Flush(c.conn)
// }
// Close closes the connection. Any blocked Read or Write operations will be
// unblocked and return errors.
//
// Part of the net.Conn interface.
func (c *Conn) Close() error {
// TODO(roasbeef): reset brontide state?
// return c.conn.Close()
fmt.Println("closed conn")
return nil
}
// LocalAddr returns the local network address.
//
// Part of the net.Conn interface.
func (c *Conn) LocalAddr() net.Addr {
// return c.conn.LocalAddr()
fmt.Println("getting lcoal addr")
return nil
}
// RemoteAddr returns the remote network address.
//
// Part of the net.Conn interface.
func (c *Conn) RemoteAddr() net.Addr {
// return c.conn.RemoteAddr()
fmt.Println("getting remote addr")
return nil
}
// SetDeadline sets the read and write deadlines associated with the
// connection. It is equivalent to calling both SetReadDeadline and
// SetWriteDeadline.
//
// Part of the net.Conn interface.
func (c *Conn) SetDeadline(t time.Time) error {
// return c.conn.SetDeadline(t)
fmt.Println("set deadline", t)
return nil
}
// SetReadDeadline sets the deadline for future Read calls. A zero value for t
// means Read will not time out.
//
// Part of the net.Conn interface.
func (c *Conn) SetReadDeadline(t time.Time) error {
// return c.conn.SetReadDeadline(t)
fmt.Println("set read deadline", t)
return nil
}
// SetWriteDeadline sets the deadline for future Write calls. Even if write
// times out, it may return n > 0, indicating that some of the data was
// successfully written. A zero value for t means Write will not time out.
//
// Part of the net.Conn interface.
func (c *Conn) SetWriteDeadline(t time.Time) error {
// return c.conn.SetWriteDeadline(t)
fmt.Println("set write deadline", t)
return nil
}
// Dial on the regular network uses net.Dial
func (r *ClearNet) Dial(network, address string) (net.Conn, error) {
fmt.Println("dialing beeep boop", network, address)
// fmt.Println("writing", b)
done := make(chan int)
callback := js.FuncOf(func(this js.Value, args []js.Value) interface{} {
fmt.Println("finished dialing stuff")
// callback.Release() // free up memory from callback
done <- args[0].Int()
return nil
})
// func printMessage(this js.Value, args []js.Value) interface{} {
// message := args[0].String()
// fmt.Println(message)
//
// return nil
// }
defer callback.Release()
rawHost, rawPort, _ := net.SplitHostPort(address)
js.Global().Get("dialSocket").Invoke(rawHost, rawPort, callback)
// wait until we've got our response
id := <-done
// TODO: error if id < 0
// return net.Dial(network, address)
return &Conn{
id: id,
}, nil
}
// LookupHost for regular network uses the net.LookupHost function
func (r *ClearNet) LookupHost(host string) ([]string, error) {
return net.LookupHost(host)
}
// LookupSRV for regular network uses net.LookupSRV function
func (r *ClearNet) LookupSRV(service, proto, name string) (string, []*net.SRV, error) {
return net.LookupSRV(service, proto, name)
}
// ResolveTCPAddr for regular network uses net.ResolveTCPAddr function
func (r *ClearNet) | ResolveTCPAddr | identifier_name |
|
net.go |
id int
// conn net.Conn
// noise *Machine
// readBuf bytes.Buffer
}
var _ net.Conn = (*Conn)(nil)
// Read reads data from the connection. Read can be made to time out and
// return an Error with Timeout() == true after a fixed time limit; see
// SetDeadline and SetReadDeadline.
//
// Part of the net.Conn interface.
func (c *Conn) Read(b []byte) (n int, err error) {
fmt.Println("read (start)", b)
done := make(chan int)
callback := js.FuncOf(func(this js.Value, args []js.Value) interface{} {
fmt.Println("got response")
// callback.Release() // free up memory from callback
done <- args[0].Int()
return nil
})
// func printMessage(this js.Value, args []js.Value) interface{} {
// message := args[0].String()
// fmt.Println(message)
//
// return nil
// }
defer callback.Release()
ta := js.TypedArrayOf(b)
defer ta.Release()
js.Global().Get("readFromSocket").Invoke(c.id, ta, callback)
// wait until we've got our response
bytesRead := <-done
fmt.Println("read (done)", b)
// // In order to reconcile the differences between the record abstraction
// // of our AEAD connection, and the stream abstraction of TCP, we
// // maintain an intermediate read buffer. If this buffer becomes
// // depleted, then we read the next record, and feed it into the
// // buffer. Otherwise, we read directly from the buffer.
// if c.readBuf.Len() == 0 {
// plaintext, err := c.noise.ReadMessage(c.conn)
// if err != nil {
// return 0, err
// }
// if _, err := c.readBuf.Write(plaintext); err != nil {
// return 0, err
// }
// }
// return c.readBuf.Read(b)
return bytesRead, nil
}
// Write writes data to the connection. Write can be made to time out and
// return an Error with Timeout() == true after a fixed time limit; see
// SetDeadline and SetWriteDeadline.
//
// Part of the net.Conn interface.
func (c *Conn) Write(b []byte) (n int, err error) {
fmt.Println("writing", b)
done := make(chan int)
callback := js.FuncOf(func(this js.Value, args []js.Value) interface{} {
fmt.Println("wrote stuff")
// callback.Release() // free up memory from callback
done <- 0
return nil
})
// func printMessage(this js.Value, args []js.Value) interface{} {
// message := args[0].String()
// fmt.Println(message)
//
// return nil
// }
defer callback.Release()
ta := js.TypedArrayOf(b)
defer ta.Release()
js.Global().Get("writeToSocket").Invoke(c.id, ta, callback)
// wait until we've got our response
bytesWritten := <-done
// // If the message doesn't require any chunking, then we can go ahead
// // with a single write.
// if len(b) <= math.MaxUint16 {
// err = c.noise.WriteMessage(b)
// if err != nil {
// return 0, err
// }
// return c.noise.Flush(c.conn)
// }
// // If we need to split the message into fragments, then we'll write
// // chunks which maximize usage of the available payload.
// chunkSize := math.MaxUint16
// bytesToWrite := len(b)
// for bytesWritten < bytesToWrite {
// // If we're on the last chunk, then truncate the chunk size as
// // necessary to avoid an out-of-bounds array memory access.
// if bytesWritten+chunkSize > len(b) {
// chunkSize = len(b) - bytesWritten
// }
// // Slice off the next chunk to be written based on our running
// // counter and next chunk size.
// chunk := b[bytesWritten : bytesWritten+chunkSize]
// if err := c.noise.WriteMessage(chunk); err != nil {
// return bytesWritten, err
// }
// n, err := c.noise.Flush(c.conn)
// bytesWritten += n
// if err != nil {
// return bytesWritten, err
// }
// }
return bytesWritten, nil
}
// // WriteMessage encrypts and buffers the next message p for the connection. The
// // ciphertext of the message is prepended with an encrypt+auth'd length which
// // must be used as the AD to the AEAD construction when being decrypted by the
// // other side.
// //
// // NOTE: This DOES NOT write the message to the wire, it should be followed by a
// // call to Flush to ensure the message is written.
// func (c *Conn) WriteMessage(b []byte) error {
// return c.noise.WriteMessage(b)
// }
// // Flush attempts to write a message buffered using WriteMessage to the
// // underlying connection. If no buffered message exists, this will result in a
// // NOP. Otherwise, it will continue to write the remaining bytes, picking up
// // where the byte stream left off in the event of a partial write. The number of
// // bytes returned reflects the number of plaintext bytes in the payload, and
// // does not account for the overhead of the header or MACs. | // return c.noise.Flush(c.conn)
// }
// Close closes the connection. Any blocked Read or Write operations will be
// unblocked and return errors.
//
// Part of the net.Conn interface.
func (c *Conn) Close() error {
// TODO(roasbeef): reset brontide state?
// return c.conn.Close()
fmt.Println("closed conn")
return nil
}
// LocalAddr returns the local network address.
//
// Part of the net.Conn interface.
func (c *Conn) LocalAddr() net.Addr {
// return c.conn.LocalAddr()
fmt.Println("getting lcoal addr")
return nil
}
// RemoteAddr returns the remote network address.
//
// Part of the net.Conn interface.
func (c *Conn) RemoteAddr() net.Addr {
// return c.conn.RemoteAddr()
fmt.Println("getting remote addr")
return nil
}
// SetDeadline sets the read and write deadlines associated with the
// connection. It is equivalent to calling both SetReadDeadline and
// SetWriteDeadline.
//
// Part of the net.Conn interface.
func (c *Conn) SetDeadline(t time.Time) error {
// return c.conn.SetDeadline(t)
fmt.Println("set deadline", t)
return nil
}
// SetReadDeadline sets the deadline for future Read calls. A zero value for t
// means Read will not time out.
//
// Part of the net.Conn interface.
func (c *Conn) SetReadDeadline(t time.Time) error {
// return c.conn.SetReadDeadline(t)
fmt.Println("set read deadline", t)
return nil
}
// SetWriteDeadline sets the deadline for future Write calls. Even if write
// times out, it may return n > 0, indicating that some of the data was
// successfully written. A zero value for t means Write will not time out.
//
// Part of the net.Conn interface.
func (c *Conn) SetWriteDeadline(t time.Time) error {
// return c.conn.SetWriteDeadline(t)
fmt.Println("set write deadline", t)
return nil
}
// Dial on the regular network uses net.Dial
func (r *ClearNet) Dial(network, address string) (net.Conn, error) {
fmt.Println("dialing beeep boop", network, address)
// fmt.Println("writing", b)
done := make(chan int)
callback := js.FuncOf(func(this js.Value, args []js.Value) interface{} {
fmt.Println("finished dialing stuff")
// callback.Release() // free up memory from callback
done <- args[0].Int()
return nil
})
// func printMessage(this js.Value, args []js.Value) interface{} {
// message := args[0].String()
// fmt.Println(message)
//
// return nil
// }
defer callback.Release()
rawHost, rawPort, _ := net.SplitHostPort(address)
js.Global().Get("dialSocket").Invoke(rawHost, rawPort, callback)
// wait until we've got our response
id := <-done
// TODO: error if id < 0
// return net.Dial(network, address)
return &Conn{
id: id,
}, nil
}
// LookupHost for regular network uses the net.LookupHost function
func (r *ClearNet) LookupHost(host string) ([]string, error) {
return net.LookupHost(host)
}
// LookupSRV for regular network uses net.LookupSRV function
func (r *ClearNet) LookupSRV(service, proto, name string) (string, []*net.S | // //
// // NOTE: It is safe to call this method again iff a timeout error is returned.
// func (c *Conn) Flush() (int, error) { | random_line_split |
net.go | id int
// conn net.Conn
// noise *Machine
// readBuf bytes.Buffer
}
var _ net.Conn = (*Conn)(nil)
// Read reads data from the connection. Read can be made to time out and
// return an Error with Timeout() == true after a fixed time limit; see
// SetDeadline and SetReadDeadline.
//
// Part of the net.Conn interface.
func (c *Conn) Read(b []byte) (n int, err error) {
fmt.Println("read (start)", b)
done := make(chan int)
callback := js.FuncOf(func(this js.Value, args []js.Value) interface{} {
fmt.Println("got response")
// callback.Release() // free up memory from callback
done <- args[0].Int()
return nil
})
// func printMessage(this js.Value, args []js.Value) interface{} {
// message := args[0].String()
// fmt.Println(message)
//
// return nil
// }
defer callback.Release()
ta := js.TypedArrayOf(b)
defer ta.Release()
js.Global().Get("readFromSocket").Invoke(c.id, ta, callback)
// wait until we've got our response
bytesRead := <-done
fmt.Println("read (done)", b)
// // In order to reconcile the differences between the record abstraction
// // of our AEAD connection, and the stream abstraction of TCP, we
// // maintain an intermediate read buffer. If this buffer becomes
// // depleted, then we read the next record, and feed it into the
// // buffer. Otherwise, we read directly from the buffer.
// if c.readBuf.Len() == 0 {
// plaintext, err := c.noise.ReadMessage(c.conn)
// if err != nil {
// return 0, err
// }
// if _, err := c.readBuf.Write(plaintext); err != nil {
// return 0, err
// }
// }
// return c.readBuf.Read(b)
return bytesRead, nil
}
// Write writes data to the connection. Write can be made to time out and
// return an Error with Timeout() == true after a fixed time limit; see
// SetDeadline and SetWriteDeadline.
//
// Part of the net.Conn interface.
func (c *Conn) Write(b []byte) (n int, err error) {
fmt.Println("writing", b)
done := make(chan int)
callback := js.FuncOf(func(this js.Value, args []js.Value) interface{} {
fmt.Println("wrote stuff")
// callback.Release() // free up memory from callback
done <- 0
return nil
})
// func printMessage(this js.Value, args []js.Value) interface{} {
// message := args[0].String()
// fmt.Println(message)
//
// return nil
// }
defer callback.Release()
ta := js.TypedArrayOf(b)
defer ta.Release()
js.Global().Get("writeToSocket").Invoke(c.id, ta, callback)
// wait until we've got our response
bytesWritten := <-done
// // If the message doesn't require any chunking, then we can go ahead
// // with a single write.
// if len(b) <= math.MaxUint16 {
// err = c.noise.WriteMessage(b)
// if err != nil {
// return 0, err
// }
// return c.noise.Flush(c.conn)
// }
// // If we need to split the message into fragments, then we'll write
// // chunks which maximize usage of the available payload.
// chunkSize := math.MaxUint16
// bytesToWrite := len(b)
// for bytesWritten < bytesToWrite {
// // If we're on the last chunk, then truncate the chunk size as
// // necessary to avoid an out-of-bounds array memory access.
// if bytesWritten+chunkSize > len(b) {
// chunkSize = len(b) - bytesWritten
// }
// // Slice off the next chunk to be written based on our running
// // counter and next chunk size.
// chunk := b[bytesWritten : bytesWritten+chunkSize]
// if err := c.noise.WriteMessage(chunk); err != nil {
// return bytesWritten, err
// }
// n, err := c.noise.Flush(c.conn)
// bytesWritten += n
// if err != nil {
// return bytesWritten, err
// }
// }
return bytesWritten, nil
}
// // WriteMessage encrypts and buffers the next message p for the connection. The
// // ciphertext of the message is prepended with an encrypt+auth'd length which
// // must be used as the AD to the AEAD construction when being decrypted by the
// // other side.
// //
// // NOTE: This DOES NOT write the message to the wire, it should be followed by a
// // call to Flush to ensure the message is written.
// func (c *Conn) WriteMessage(b []byte) error {
// return c.noise.WriteMessage(b)
// }
// // Flush attempts to write a message buffered using WriteMessage to the
// // underlying connection. If no buffered message exists, this will result in a
// // NOP. Otherwise, it will continue to write the remaining bytes, picking up
// // where the byte stream left off in the event of a partial write. The number of
// // bytes returned reflects the number of plaintext bytes in the payload, and
// // does not account for the overhead of the header or MACs.
// //
// // NOTE: It is safe to call this method again iff a timeout error is returned.
// func (c *Conn) Flush() (int, error) {
// return c.noise.Flush(c.conn)
// }
// Close closes the connection. Any blocked Read or Write operations will be
// unblocked and return errors.
//
// Part of the net.Conn interface.
func (c *Conn) Close() error {
// TODO(roasbeef): reset brontide state?
// return c.conn.Close()
fmt.Println("closed conn")
return nil
}
// LocalAddr returns the local network address.
//
// Part of the net.Conn interface.
func (c *Conn) LocalAddr() net.Addr {
// return c.conn.LocalAddr()
fmt.Println("getting lcoal addr")
return nil
}
// RemoteAddr returns the remote network address.
//
// Part of the net.Conn interface.
func (c *Conn) RemoteAddr() net.Addr |
// SetDeadline sets the read and write deadlines associated with the
// connection. It is equivalent to calling both SetReadDeadline and
// SetWriteDeadline.
//
// Part of the net.Conn interface.
func (c *Conn) SetDeadline(t time.Time) error {
// return c.conn.SetDeadline(t)
fmt.Println("set deadline", t)
return nil
}
// SetReadDeadline sets the deadline for future Read calls. A zero value for t
// means Read will not time out.
//
// Part of the net.Conn interface.
func (c *Conn) SetReadDeadline(t time.Time) error {
// return c.conn.SetReadDeadline(t)
fmt.Println("set read deadline", t)
return nil
}
// SetWriteDeadline sets the deadline for future Write calls. Even if write
// times out, it may return n > 0, indicating that some of the data was
// successfully written. A zero value for t means Write will not time out.
//
// Part of the net.Conn interface.
func (c *Conn) SetWriteDeadline(t time.Time) error {
// return c.conn.SetWriteDeadline(t)
fmt.Println("set write deadline", t)
return nil
}
// Dial on the regular network uses net.Dial
func (r *ClearNet) Dial(network, address string) (net.Conn, error) {
fmt.Println("dialing beeep boop", network, address)
// fmt.Println("writing", b)
done := make(chan int)
callback := js.FuncOf(func(this js.Value, args []js.Value) interface{} {
fmt.Println("finished dialing stuff")
// callback.Release() // free up memory from callback
done <- args[0].Int()
return nil
})
// func printMessage(this js.Value, args []js.Value) interface{} {
// message := args[0].String()
// fmt.Println(message)
//
// return nil
// }
defer callback.Release()
rawHost, rawPort, _ := net.SplitHostPort(address)
js.Global().Get("dialSocket").Invoke(rawHost, rawPort, callback)
// wait until we've got our response
id := <-done
// TODO: error if id < 0
// return net.Dial(network, address)
return &Conn{
id: id,
}, nil
}
// LookupHost for regular network uses the net.LookupHost function
func (r *ClearNet) LookupHost(host string) ([]string, error) {
return net.LookupHost(host)
}
// LookupSRV for regular network uses net.LookupSRV function
func (r *ClearNet) LookupSRV(service, proto, name string) (string, []*net | {
// return c.conn.RemoteAddr()
fmt.Println("getting remote addr")
return nil
} | identifier_body |
padtwitch.py |
if self.stream_thread:
print('shutting down stream thread')
self.stream_thread.join()
self.stream_thread = None
print('done shutting down stream thread')
def __unload(self):
self._try_shutdown_twitch()
async def on_connect(self):
"""Called when connected as a Discord client.
Connects to Twitch IRC.
"""
self._try_shutdown_twitch()
self.stream_thread = self.connect_thread()
def connect_thread(self, *args, **kwargs):
"""Convenient wrapper for calling follow() in a background thread.
The Thread object is started and then returned. Passes on args and kwargs to
the follow() method."""
thread = threading.Thread(target=self.connect_stream, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
return thread
def connect_stream(self):
if self.stream is None:
print('Not connecting stream, set up username/oauth first')
return
self.stream.connect()
for channel in self.settings.channels().values():
if channel['enabled']:
channel_name = channel['name']
print('Connecting to twitch channel: {}'.format(channel_name))
self.stream.join_channel(channel_name)
while True:
received = self.stream.twitch_receive_messages()
if received:
for m in received:
self.process_user_message(**m)
time.sleep(.1)
def process_user_message(self, message, channel, username):
for action_name, action_fn in self.monster_actions.items():
if message.startswith(action_name):
query = message[len(action_name):]
m = self.lookup_monster(query)
msg = action_fn(channel, username, m) if m else 'no matches for ' + query
self.stream.send_chat_message(channel, msg)
return
for action_name, action_fn in self.actions.items():
if message.startswith(action_name):
query = message[len(action_name):]
action_fn(channel, username, query)
return
for command_name, command_response in self.settings.getCustomCommands(channel).items():
if message.rstrip()[1:] == command_name:
self.stream.send_chat_message(channel, command_response)
return
def lookup_monster(self, query):
padinfo = self.bot.get_cog('PadInfo')
if not padinfo:
return None
m, _, _ = padinfo.findMonster(query)
return m
def _get_header(self, m):
return '{}. {}'.format(m.monster_id_na, m.name_na)
def post_as(self, channel, username, m):
as_text = '(CD{}) {}'.format(m.active_skill.turn_min,
m.active_skill.desc) if m.active_skill else 'None/Missing'
return '{} : {}'.format(self._get_header(m), as_text)
def post_info(self, channel, username, m):
name = self._get_header(m)
types = m.type1 + ('/' + m.type2 if m.type2 else '') + ('/' + m.type3 if m.type3 else '')
stats = '({}/{}/{}) W{}'.format(m.hp, m.atk, m.rcv, m.weighted_stats)
awakenings = self.bot.get_cog('PadInfo').map_awakenings_text(m)
return '{} | {} | {} | {}'.format(name, types, stats, awakenings)
def post_ls(self, channel, username, m):
ls_text = "[{}] {}".format(
m.multiplier_text, m.leader_text) if m.leader_text else 'None/Missing'
return '{} : {}'.format(self._get_header(m), ls_text)
def whisper_help(self, channel, username, m):
help_text = 'Cmds: ^info <q>, ^as <q>, ^ls <q>, ^cc'
self.stream.send_chat_message(channel, help_text)
def whisper_commands(self, channel, username, m):
cmds_with_prefix = map(lambda x: '^' + x, self.settings.getCustomCommands(channel))
msg = "Custom Cmds: " + ', '.join(cmds_with_prefix)
self.stream.send_chat_message(channel, msg)
def add_com(self, channel, username, query):
query = query.strip()
if '"' in query or '^' in query or ' ' not in query:
self.stream.send_chat_message(channel, 'bad request')
return
space_idx = query.index(' ')
cmd_name = query[:space_idx]
cmd_value = query[space_idx:]
self.settings.addCustomCommand(channel, cmd_name, cmd_value)
self.stream.send_chat_message(channel, 'Done adding ' + cmd_name)
def rm_com(self, channel, username, query):
cmd_name = query.strip()
self.settings.rmCustomCommand(channel, cmd_name)
self.stream.send_chat_message(channel, 'Done deleting ' + cmd_name)
@commands.group(pass_context=True)
@checks.is_owner()
async def padtwitch(self, ctx):
"""Manage twitter feed mirroring"""
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
@padtwitch.command(pass_context=True)
async def setUserName(self, ctx, user_name: str):
self.settings.setUserName(user_name)
await self.bot.say(inline('done, reload the cog'))
@padtwitch.command(pass_context=True)
async def setOauthCode(self, ctx, oauth_code: str):
self.settings.setOauthCode(oauth_code)
await self.bot.say(inline('done, reload the cog'))
@padtwitch.command(pass_context=True)
async def setEnabled(self, ctx, twitch_channel: str, enabled: bool):
self.settings.setChannelEnabled(twitch_channel, enabled)
await self.bot.say(inline('done, reload the cog'))
@padtwitch.command(pass_context=True)
async def join(self, ctx, twitch_channel):
self.stream.join_channel(twitch_channel)
await self.bot.say(inline('done'))
@padtwitch.command(pass_context=True)
async def send(self, ctx, twitch_channel, *, msg_text):
self.stream.send_chat_message(twitch_channel, msg_text)
await self.bot.say(inline('done'))
@padtwitch.command(pass_context=True)
async def list(self, ctx):
msg = 'UserName: {}'.format(self.settings.getUserName())
msg += '\nChannels:'
for channel, cs in self.settings.channels().items():
msg += '\n\t({}) {}'.format('+' if cs['enabled'] else '-', cs['name'])
await self.bot.say(box(msg))
def setup(bot):
n = PadTwitch(bot)
asyncio.get_event_loop().create_task(n.on_connect())
bot.add_cog(n)
class PadTwitchSettings(CogSettings):
def make_default_settings(self):
config = {
'channels': {},
'user_name': '',
'oauth_code': '',
}
return config
def getUserName(self):
return self.bot_settings['user_name']
def setUserName(self, user_name):
self.bot_settings['user_name'] = user_name
self.save_settings()
def getOauthCode(self):
return self.bot_settings['oauth_code']
def setOauthCode(self, oauth_code):
self.bot_settings['oauth_code'] = oauth_code
self.save_settings()
def channels(self):
return self.bot_settings['channels']
def getChannel(self, channel_name):
channels = self.channels()
if channel_name not in channels:
channels[channel_name] = {
'name': channel_name,
'enabled': False,
'custom_commands': {},
}
return channels[channel_name]
def setChannelEnabled(self, channel_name: str, enabled: bool):
self.getChannel(channel_name)['enabled'] = enabled
self.save_settings()
def getCustomCommands(self, channel_name: str):
return self.getChannel(channel_name)['custom_commands']
def addCustomCommand(self, channel_name: str, cmd_name: str, cmd_value: str):
self.getCustomCommands(channel_name)[cmd_name] = cmd_value
self.save_settings()
def rmCustomCommand(self, channel_name: str, cmd_name: str):
self.getCustomCommands(channel_name).pop(cmd_name)
self.save_settings()
"""
Adapted from:
https://github.com/317070/python-twitch-stream/blob/master/twitchstream/chat.py
This file contains the python code used to interface with the Twitch
chat. Twitch chat is IRC-based, so it is basically an IRC-bot, but with
special features for Twitch, such as congestion control built in.
"""
class TwitchChatStream(object):
"""
The TwitchChatStream is used for interfacing with the Twitch chat of
a channel. To use this, an oauth-account (of the user chatting)
should be created. At the moment of writing, this can be done here:
https://twitchapps.com/tmi/
:param username: Twitch username
:type username: string
:param oauth: oauth for logging in (see https://twitchapps.com/tmi/)
:type oauth: string
:param verbose: show all stream messages on stdout (for debugging)
:type verbose: boolean
"""
def __init__(self, username, oauth, verbose=False):
"""Create a new stream object, and try to connect."""
self.username = username
self.oauth = oauth
self.verbose = verbose
self.current_channel = ""
self.last_sent_time = time.time | self.stream.disconnect() | conditional_block |
|
padtwitch.py | enabled: bool):
self.getChannel(channel_name)['enabled'] = enabled
self.save_settings()
def getCustomCommands(self, channel_name: str):
return self.getChannel(channel_name)['custom_commands']
def addCustomCommand(self, channel_name: str, cmd_name: str, cmd_value: str):
self.getCustomCommands(channel_name)[cmd_name] = cmd_value
self.save_settings()
def rmCustomCommand(self, channel_name: str, cmd_name: str):
self.getCustomCommands(channel_name).pop(cmd_name)
self.save_settings()
"""
Adapted from:
https://github.com/317070/python-twitch-stream/blob/master/twitchstream/chat.py
This file contains the python code used to interface with the Twitch
chat. Twitch chat is IRC-based, so it is basically an IRC-bot, but with
special features for Twitch, such as congestion control built in.
"""
class TwitchChatStream(object):
"""
The TwitchChatStream is used for interfacing with the Twitch chat of
a channel. To use this, an oauth-account (of the user chatting)
should be created. At the moment of writing, this can be done here:
https://twitchapps.com/tmi/
:param username: Twitch username
:type username: string
:param oauth: oauth for logging in (see https://twitchapps.com/tmi/)
:type oauth: string
:param verbose: show all stream messages on stdout (for debugging)
:type verbose: boolean
"""
def __init__(self, username, oauth, verbose=False):
"""Create a new stream object, and try to connect."""
self.username = username
self.oauth = oauth
self.verbose = verbose
self.current_channel = ""
self.last_sent_time = time.time()
self.buffer = []
self.s = None
self.in_shutdown = False
def __enter__(self):
self.connect()
return self
def __exit__(self, type, value, traceback):
self.disconnect()
@staticmethod
def _logged_in_successful(data):
"""
Test the login status from the returned communication of the
server.
:param data: bytes received from server during login
:type data: list of bytes
:return boolean, True when you are logged in.
"""
if re.match(r'^:(testserver\.local|tmi\.twitch\.tv)'
r' NOTICE \* :'
r'(Login unsuccessful|Error logging in)*$',
data.strip()):
return False
else:
return True
@staticmethod
def _check_has_ping(data):
"""
Check if the data from the server contains a request to ping.
:param data: the byte string from the server
:type data: list of bytes
:return: True when there is a request to ping, False otherwise
"""
return re.match(
r'^PING :tmi\.twitch\.tv$', data)
@staticmethod
def _check_has_channel(data):
"""
Check if the data from the server contains a channel switch.
:param data: the byte string from the server
:type data: list of bytes
:return: Name of channel when new channel, False otherwise
"""
return re.findall(
r'^:[a-zA-Z0-9_]+\![a-zA-Z0-9_]+@[a-zA-Z0-9_]+'
r'\.tmi\.twitch\.tv '
r'JOIN #([a-zA-Z0-9_]+)$', data)
@staticmethod
def _check_has_message(data):
"""
Check if the data from the server contains a message a user
typed in the chat.
:param data: the byte string from the server
:type data: list of bytes
:return: returns iterator over these messages
"""
return re.match(r'^:[a-zA-Z0-9_]+\![a-zA-Z0-9_]+@[a-zA-Z0-9_]+'
r'\.tmi\.twitch\.tv '
r'PRIVMSG #[a-zA-Z0-9_]+ :.+$', data)
def connect(self):
"""
Connect to Twitch
"""
if self.s:
self.disconnect()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s = s
connect_host = "irc.twitch.tv"
connect_port = 6667
try:
print('starting connect to {} {}'.format(connect_host, connect_port))
s.connect((connect_host, connect_port))
except (Exception, IOError):
print("Unable to create a socket to %s:%s" % (
connect_host,
connect_port))
raise # unexpected, because it is a blocking socket
# Connected to twitch
# Sending our details to twitch...
self._send_now('PASS %s\r\n' % self.oauth)
self._send_now('NICK %s\r\n' % self.username)
received = s.recv(1024).decode()
if not TwitchChatStream._logged_in_successful(received):
self.s = None
# ... and they didn't accept our details
raise IOError("Twitch did not accept the username-oauth "
"combination")
# ... and they accepted our details
# Connected to twitch.tv!
# now make this socket non-blocking on the OS-level
if os.name != 'nt':
fcntl.fcntl(s, fcntl.F_SETFL, os.O_NONBLOCK)
else:
s.setblocking(0)
print('done with twitch connect')
self.in_shutdown = False # This is bad. probably need to use a connection counter or something
def disconnect(self):
if self.s is not None:
print('doing disconnect')
self.in_shutdown = True
self.s.close() # close the previous socket
self.s = None
print('done doing disconnect')
def _push_from_buffer(self):
"""
Push a message on the stack to the IRC stream.
This is necessary to avoid Twitch overflow control.
"""
if len(self.buffer) > 0:
if time.time() - self.last_sent_time > 5:
try:
message = self.buffer.pop(0)
self._send_now(message)
finally:
self.last_sent_time = time.time()
def _send_now(self, message: str):
if not message:
return
self._maybe_print('twitch out now: ' + message)
if self.s is None:
print('Error: socket was None but tried to send a message')
return
self.s.send(message.encode())
def _send(self, message):
"""
Send a message to the IRC stream
:param message: the message to be sent.
:type message: string
"""
if not message:
return
self._maybe_print('twitch out queued: ' + message)
self.buffer.append(message + "\n")
def _send_pong(self):
"""
Send a pong message, usually in reply to a received ping message
"""
self._send("PONG")
def join_channel(self, channel):
"""
Join a different chat channel on Twitch.
Note, this function returns immediately, but the switch might
take a moment
:param channel: name of the channel (without #)
"""
self._send('JOIN #%s\r\n' % channel)
def send_chat_message(self, channel, message):
"""
Send a chat message to the server.
:param message: String to send (don't use \\n)
"""
self._send("PRIVMSG #{0} :{1}".format(channel, message))
def send_whisper_message(self, channel, user, message):
"""
Send a chat whisper to the server.
:param message: String to send (don't use \\n)
"""
self._send("PRIVMSG #{0} :/w {1} {2}".format(channel, user, message))
def _parse_message(self, data):
"""
Parse the bytes received from the socket.
:param data: the bytes received from the socket
:return:
"""
if TwitchChatStream._check_has_ping(data):
self._maybe_print('got ping')
self._send_pong()
channel_name_or_false = TwitchChatStream._check_has_channel(data)
if channel_name_or_false:
current_channel = channel_name_or_false[0]
print('Connected to channel: ' + current_channel)
if TwitchChatStream._check_has_message(data):
msg = {
'channel': re.findall(r'^:.+![a-zA-Z0-9_]+'
r'@[a-zA-Z0-9_]+'
r'.+ '
r'PRIVMSG (.*?) :',
data)[0],
'username': re.findall(r'^:([a-zA-Z0-9_]+)!', data)[0],
'message': re.findall(r'PRIVMSG #[a-zA-Z0-9_]+ :(.+)',
data)[0]
}
if msg['channel'].startswith('#'):
msg['channel'] = msg['channel'][1:]
| self._maybe_print(
'got msg: #{} @{} -- {}'.format(msg['channel'], msg['username'], msg['message']))
return msg
elif len(data):
| random_line_split |
|
padtwitch.py | , channel_name: str):
return self.getChannel(channel_name)['custom_commands']
def addCustomCommand(self, channel_name: str, cmd_name: str, cmd_value: str):
self.getCustomCommands(channel_name)[cmd_name] = cmd_value
self.save_settings()
def rmCustomCommand(self, channel_name: str, cmd_name: str):
self.getCustomCommands(channel_name).pop(cmd_name)
self.save_settings()
"""
Adapted from:
https://github.com/317070/python-twitch-stream/blob/master/twitchstream/chat.py
This file contains the python code used to interface with the Twitch
chat. Twitch chat is IRC-based, so it is basically an IRC-bot, but with
special features for Twitch, such as congestion control built in.
"""
class TwitchChatStream(object):
"""
The TwitchChatStream is used for interfacing with the Twitch chat of
a channel. To use this, an oauth-account (of the user chatting)
should be created. At the moment of writing, this can be done here:
https://twitchapps.com/tmi/
:param username: Twitch username
:type username: string
:param oauth: oauth for logging in (see https://twitchapps.com/tmi/)
:type oauth: string
:param verbose: show all stream messages on stdout (for debugging)
:type verbose: boolean
"""
def __init__(self, username, oauth, verbose=False):
"""Create a new stream object, and try to connect."""
self.username = username
self.oauth = oauth
self.verbose = verbose
self.current_channel = ""
self.last_sent_time = time.time()
self.buffer = []
self.s = None
self.in_shutdown = False
def __enter__(self):
self.connect()
return self
def __exit__(self, type, value, traceback):
self.disconnect()
@staticmethod
def _logged_in_successful(data):
"""
Test the login status from the returned communication of the
server.
:param data: bytes received from server during login
:type data: list of bytes
:return boolean, True when you are logged in.
"""
if re.match(r'^:(testserver\.local|tmi\.twitch\.tv)'
r' NOTICE \* :'
r'(Login unsuccessful|Error logging in)*$',
data.strip()):
return False
else:
return True
@staticmethod
def _check_has_ping(data):
"""
Check if the data from the server contains a request to ping.
:param data: the byte string from the server
:type data: list of bytes
:return: True when there is a request to ping, False otherwise
"""
return re.match(
r'^PING :tmi\.twitch\.tv$', data)
@staticmethod
def _check_has_channel(data):
"""
Check if the data from the server contains a channel switch.
:param data: the byte string from the server
:type data: list of bytes
:return: Name of channel when new channel, False otherwise
"""
return re.findall(
r'^:[a-zA-Z0-9_]+\![a-zA-Z0-9_]+@[a-zA-Z0-9_]+'
r'\.tmi\.twitch\.tv '
r'JOIN #([a-zA-Z0-9_]+)$', data)
@staticmethod
def _check_has_message(data):
"""
Check if the data from the server contains a message a user
typed in the chat.
:param data: the byte string from the server
:type data: list of bytes
:return: returns iterator over these messages
"""
return re.match(r'^:[a-zA-Z0-9_]+\![a-zA-Z0-9_]+@[a-zA-Z0-9_]+'
r'\.tmi\.twitch\.tv '
r'PRIVMSG #[a-zA-Z0-9_]+ :.+$', data)
def connect(self):
"""
Connect to Twitch
"""
if self.s:
self.disconnect()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s = s
connect_host = "irc.twitch.tv"
connect_port = 6667
try:
print('starting connect to {} {}'.format(connect_host, connect_port))
s.connect((connect_host, connect_port))
except (Exception, IOError):
print("Unable to create a socket to %s:%s" % (
connect_host,
connect_port))
raise # unexpected, because it is a blocking socket
# Connected to twitch
# Sending our details to twitch...
self._send_now('PASS %s\r\n' % self.oauth)
self._send_now('NICK %s\r\n' % self.username)
received = s.recv(1024).decode()
if not TwitchChatStream._logged_in_successful(received):
self.s = None
# ... and they didn't accept our details
raise IOError("Twitch did not accept the username-oauth "
"combination")
# ... and they accepted our details
# Connected to twitch.tv!
# now make this socket non-blocking on the OS-level
if os.name != 'nt':
fcntl.fcntl(s, fcntl.F_SETFL, os.O_NONBLOCK)
else:
s.setblocking(0)
print('done with twitch connect')
self.in_shutdown = False # This is bad. probably need to use a connection counter or something
def disconnect(self):
if self.s is not None:
print('doing disconnect')
self.in_shutdown = True
self.s.close() # close the previous socket
self.s = None
print('done doing disconnect')
def _push_from_buffer(self):
"""
Push a message on the stack to the IRC stream.
This is necessary to avoid Twitch overflow control.
"""
if len(self.buffer) > 0:
if time.time() - self.last_sent_time > 5:
try:
message = self.buffer.pop(0)
self._send_now(message)
finally:
self.last_sent_time = time.time()
def _send_now(self, message: str):
if not message:
return
self._maybe_print('twitch out now: ' + message)
if self.s is None:
print('Error: socket was None but tried to send a message')
return
self.s.send(message.encode())
def _send(self, message):
"""
Send a message to the IRC stream
:param message: the message to be sent.
:type message: string
"""
if not message:
return
self._maybe_print('twitch out queued: ' + message)
self.buffer.append(message + "\n")
def _send_pong(self):
"""
Send a pong message, usually in reply to a received ping message
"""
self._send("PONG")
def join_channel(self, channel):
"""
Join a different chat channel on Twitch.
Note, this function returns immediately, but the switch might
take a moment
:param channel: name of the channel (without #)
"""
self._send('JOIN #%s\r\n' % channel)
def send_chat_message(self, channel, message):
"""
Send a chat message to the server.
:param message: String to send (don't use \\n)
"""
self._send("PRIVMSG #{0} :{1}".format(channel, message))
def send_whisper_message(self, channel, user, message):
"""
Send a chat whisper to the server.
:param message: String to send (don't use \\n)
"""
self._send("PRIVMSG #{0} :/w {1} {2}".format(channel, user, message))
def _parse_message(self, data):
"""
Parse the bytes received from the socket.
:param data: the bytes received from the socket
:return:
"""
if TwitchChatStream._check_has_ping(data):
self._maybe_print('got ping')
self._send_pong()
channel_name_or_false = TwitchChatStream._check_has_channel(data)
if channel_name_or_false:
current_channel = channel_name_or_false[0]
print('Connected to channel: ' + current_channel)
if TwitchChatStream._check_has_message(data):
msg = {
'channel': re.findall(r'^:.+![a-zA-Z0-9_]+'
r'@[a-zA-Z0-9_]+'
r'.+ '
r'PRIVMSG (.*?) :',
data)[0],
'username': re.findall(r'^:([a-zA-Z0-9_]+)!', data)[0],
'message': re.findall(r'PRIVMSG #[a-zA-Z0-9_]+ :(.+)',
data)[0]
}
if msg['channel'].startswith('#'):
msg['channel'] = msg['channel'][1:]
self._maybe_print(
'got msg: #{} @{} -- {}'.format(msg['channel'], msg['username'], msg['message']))
return msg
elif len(data):
self._maybe_print('other data: {}'.format(data))
else:
return None
def | twitch_receive_messages | identifier_name |
|
padtwitch.py | _name):]
action_fn(channel, username, query)
return
for command_name, command_response in self.settings.getCustomCommands(channel).items():
if message.rstrip()[1:] == command_name:
self.stream.send_chat_message(channel, command_response)
return
def lookup_monster(self, query):
padinfo = self.bot.get_cog('PadInfo')
if not padinfo:
return None
m, _, _ = padinfo.findMonster(query)
return m
def _get_header(self, m):
return '{}. {}'.format(m.monster_id_na, m.name_na)
def post_as(self, channel, username, m):
as_text = '(CD{}) {}'.format(m.active_skill.turn_min,
m.active_skill.desc) if m.active_skill else 'None/Missing'
return '{} : {}'.format(self._get_header(m), as_text)
def post_info(self, channel, username, m):
name = self._get_header(m)
types = m.type1 + ('/' + m.type2 if m.type2 else '') + ('/' + m.type3 if m.type3 else '')
stats = '({}/{}/{}) W{}'.format(m.hp, m.atk, m.rcv, m.weighted_stats)
awakenings = self.bot.get_cog('PadInfo').map_awakenings_text(m)
return '{} | {} | {} | {}'.format(name, types, stats, awakenings)
def post_ls(self, channel, username, m):
ls_text = "[{}] {}".format(
m.multiplier_text, m.leader_text) if m.leader_text else 'None/Missing'
return '{} : {}'.format(self._get_header(m), ls_text)
def whisper_help(self, channel, username, m):
help_text = 'Cmds: ^info <q>, ^as <q>, ^ls <q>, ^cc'
self.stream.send_chat_message(channel, help_text)
def whisper_commands(self, channel, username, m):
cmds_with_prefix = map(lambda x: '^' + x, self.settings.getCustomCommands(channel))
msg = "Custom Cmds: " + ', '.join(cmds_with_prefix)
self.stream.send_chat_message(channel, msg)
def add_com(self, channel, username, query):
query = query.strip()
if '"' in query or '^' in query or ' ' not in query:
self.stream.send_chat_message(channel, 'bad request')
return
space_idx = query.index(' ')
cmd_name = query[:space_idx]
cmd_value = query[space_idx:]
self.settings.addCustomCommand(channel, cmd_name, cmd_value)
self.stream.send_chat_message(channel, 'Done adding ' + cmd_name)
def rm_com(self, channel, username, query):
|
@commands.group(pass_context=True)
@checks.is_owner()
async def padtwitch(self, ctx):
"""Manage twitter feed mirroring"""
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
@padtwitch.command(pass_context=True)
async def setUserName(self, ctx, user_name: str):
self.settings.setUserName(user_name)
await self.bot.say(inline('done, reload the cog'))
@padtwitch.command(pass_context=True)
async def setOauthCode(self, ctx, oauth_code: str):
self.settings.setOauthCode(oauth_code)
await self.bot.say(inline('done, reload the cog'))
@padtwitch.command(pass_context=True)
async def setEnabled(self, ctx, twitch_channel: str, enabled: bool):
self.settings.setChannelEnabled(twitch_channel, enabled)
await self.bot.say(inline('done, reload the cog'))
@padtwitch.command(pass_context=True)
async def join(self, ctx, twitch_channel):
self.stream.join_channel(twitch_channel)
await self.bot.say(inline('done'))
@padtwitch.command(pass_context=True)
async def send(self, ctx, twitch_channel, *, msg_text):
self.stream.send_chat_message(twitch_channel, msg_text)
await self.bot.say(inline('done'))
@padtwitch.command(pass_context=True)
async def list(self, ctx):
msg = 'UserName: {}'.format(self.settings.getUserName())
msg += '\nChannels:'
for channel, cs in self.settings.channels().items():
msg += '\n\t({}) {}'.format('+' if cs['enabled'] else '-', cs['name'])
await self.bot.say(box(msg))
def setup(bot):
n = PadTwitch(bot)
asyncio.get_event_loop().create_task(n.on_connect())
bot.add_cog(n)
class PadTwitchSettings(CogSettings):
def make_default_settings(self):
config = {
'channels': {},
'user_name': '',
'oauth_code': '',
}
return config
def getUserName(self):
return self.bot_settings['user_name']
def setUserName(self, user_name):
self.bot_settings['user_name'] = user_name
self.save_settings()
def getOauthCode(self):
return self.bot_settings['oauth_code']
def setOauthCode(self, oauth_code):
self.bot_settings['oauth_code'] = oauth_code
self.save_settings()
def channels(self):
return self.bot_settings['channels']
def getChannel(self, channel_name):
channels = self.channels()
if channel_name not in channels:
channels[channel_name] = {
'name': channel_name,
'enabled': False,
'custom_commands': {},
}
return channels[channel_name]
def setChannelEnabled(self, channel_name: str, enabled: bool):
self.getChannel(channel_name)['enabled'] = enabled
self.save_settings()
def getCustomCommands(self, channel_name: str):
return self.getChannel(channel_name)['custom_commands']
def addCustomCommand(self, channel_name: str, cmd_name: str, cmd_value: str):
self.getCustomCommands(channel_name)[cmd_name] = cmd_value
self.save_settings()
def rmCustomCommand(self, channel_name: str, cmd_name: str):
self.getCustomCommands(channel_name).pop(cmd_name)
self.save_settings()
"""
Adapted from:
https://github.com/317070/python-twitch-stream/blob/master/twitchstream/chat.py
This file contains the python code used to interface with the Twitch
chat. Twitch chat is IRC-based, so it is basically an IRC-bot, but with
special features for Twitch, such as congestion control built in.
"""
class TwitchChatStream(object):
"""
The TwitchChatStream is used for interfacing with the Twitch chat of
a channel. To use this, an oauth-account (of the user chatting)
should be created. At the moment of writing, this can be done here:
https://twitchapps.com/tmi/
:param username: Twitch username
:type username: string
:param oauth: oauth for logging in (see https://twitchapps.com/tmi/)
:type oauth: string
:param verbose: show all stream messages on stdout (for debugging)
:type verbose: boolean
"""
def __init__(self, username, oauth, verbose=False):
"""Create a new stream object, and try to connect."""
self.username = username
self.oauth = oauth
self.verbose = verbose
self.current_channel = ""
self.last_sent_time = time.time()
self.buffer = []
self.s = None
self.in_shutdown = False
def __enter__(self):
self.connect()
return self
def __exit__(self, type, value, traceback):
self.disconnect()
@staticmethod
def _logged_in_successful(data):
"""
Test the login status from the returned communication of the
server.
:param data: bytes received from server during login
:type data: list of bytes
:return boolean, True when you are logged in.
"""
if re.match(r'^:(testserver\.local|tmi\.twitch\.tv)'
r' NOTICE \* :'
r'(Login unsuccessful|Error logging in)*$',
data.strip()):
return False
else:
return True
@staticmethod
def _check_has_ping(data):
"""
Check if the data from the server contains a request to ping.
:param data: the byte string from the server
:type data: list of bytes
:return: True when there is a request to ping, False otherwise
"""
return re.match(
r'^PING :tmi\.twitch\.tv$', data)
@staticmethod
def _check_has_channel(data):
"""
Check if the data from the server contains a channel switch.
:param data: the byte string from the server
:type data: list of bytes
:return: Name of channel when new channel, False otherwise
"""
return re.findall(
r'^:[a-zA-Z0-9_]+\![a-zA-Z0-9_]+@[a-zA-Z0-9_]+'
r'\.tmi\.twitch\.tv '
r'JOIN #([a-zA-Z0-9_]+)$', data)
@staticmethod
def _check_has_message(data):
"""
Check if the data from the | cmd_name = query.strip()
self.settings.rmCustomCommand(channel, cmd_name)
self.stream.send_chat_message(channel, 'Done deleting ' + cmd_name) | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.