file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
klogd.rs | ;
#[no_mangle]
fn bb_simple_perror_msg(s: *const libc::c_char);
#[no_mangle]
static bb_banner: [libc::c_char; 0];
#[no_mangle]
static mut bb_common_bufsiz1: [libc::c_char; 0];
}
use crate::librb::signal::__sighandler_t;
use crate::librb::smallint;
pub type C2RustUnnamed = libc::c_uint;
pub const BB_FATAL_SIGS: C2RustUnnamed = 117503054;
pub type C2RustUnnamed_0 = libc::c_uint;
pub const DAEMON_ONLY_SANITIZE: C2RustUnnamed_0 = 8;
pub const DAEMON_CLOSE_EXTRA_FDS: C2RustUnnamed_0 = 4;
pub const DAEMON_DEVNULL_STDIO: C2RustUnnamed_0 = 2;
pub const DAEMON_CHDIR_ROOT: C2RustUnnamed_0 = 1;
pub type C2RustUnnamed_1 = libc::c_uint;
pub const LOGMODE_BOTH: C2RustUnnamed_1 = 3;
pub const LOGMODE_SYSLOG: C2RustUnnamed_1 = 2;
pub const LOGMODE_STDIO: C2RustUnnamed_1 = 1;
pub const LOGMODE_NONE: C2RustUnnamed_1 = 0;
pub type C2RustUnnamed_2 = libc::c_uint;
pub const OPT_FOREGROUND: C2RustUnnamed_2 = 2;
pub const OPT_LEVEL: C2RustUnnamed_2 = 1;
pub const KLOGD_LOGBUF_SIZE: C2RustUnnamed_2 = 1024;
/*
* Mini klogd implementation for busybox
*
* Copyright (C) 2001 by Gennady Feldman <[email protected]>.
* Changes: Made this a standalone busybox module which uses standalone
* syslog() client interface.
*
* Copyright (C) 1999-2004 by Erik Andersen <[email protected]>
*
* Copyright (C) 2000 by Karl M. Hegbloom <[email protected]>
*
* "circular buffer" Copyright (C) 2000 by Gennady Feldman <[email protected]>
*
* Maintainer: Gennady Feldman <[email protected]> as of Mar 12, 2001
*
* Licensed under GPLv2 or later, see file LICENSE in this source tree.
*/
//config:config KLOGD
//config: bool "klogd (5.7 kb)"
//config: default y
//config: help
//config: klogd is a utility which intercepts and logs all
//config: messages from the Linux kernel and sends the messages
//config: out to the 'syslogd' utility so they can be logged. If
//config: you wish to record the messages produced by the kernel,
//config: you should enable this option.
//config:
//config:comment "klogd should not be used together with syslog to kernel printk buffer"
//config: depends on KLOGD && FEATURE_KMSG_SYSLOG
//config:
//config:config FEATURE_KLOGD_KLOGCTL
//config: bool "Use the klogctl() interface"
//config: default y
//config: depends on KLOGD
//config: select PLATFORM_LINUX
//config: help
//config: The klogd applet supports two interfaces for reading
//config: kernel messages. Linux provides the klogctl() interface
//config: which allows reading messages from the kernel ring buffer
//config: independently from the file system.
//config:
//config: If you answer 'N' here, klogd will use the more portable
//config: approach of reading them from /proc or a device node.
//config: However, this method requires the file to be available.
//config:
//config: If in doubt, say 'Y'.
//applet:IF_KLOGD(APPLET(klogd, BB_DIR_SBIN, BB_SUID_DROP))
//kbuild:lib-$(CONFIG_KLOGD) += klogd.o
//usage:#define klogd_trivial_usage
//usage: "[-c N] [-n]"
//usage:#define klogd_full_usage "\n\n"
//usage: "Log kernel messages to syslog\n"
//usage: "\n -c N Print to console messages more urgent than prio N (1-8)"
//usage: "\n -n Run in foreground"
/* The Linux-specific klogctl(3) interface does not rely on the filesystem and
* allows us to change the console loglevel. Alternatively, we read the
* messages from _PATH_KLOG. */
unsafe extern "C" fn klogd_open() {
/* "Open the log. Currently a NOP" */
klogctl(1i32, 0 as *mut libc::c_char, 0i32);
}
unsafe extern "C" fn klogd_setloglevel(mut lvl: libc::c_int) {
/* "printk() prints a message on the console only if it has a loglevel
* less than console_loglevel". Here we set console_loglevel = lvl. */
klogctl(8i32, 0 as *mut libc::c_char, lvl);
}
unsafe extern "C" fn klogd_read(mut bufp: *mut libc::c_char, mut len: libc::c_int) -> libc::c_int {
/* "2 -- Read from the log." */
return klogctl(2i32, bufp, len);
}
unsafe extern "C" fn klogd_close() {
/* FYI: cmd 7 is equivalent to setting console_loglevel to 7
* via klogctl(8, NULL, 7). */
klogctl(7i32, 0 as *mut libc::c_char, 0i32); /* "7 -- Enable printk's to console" */
klogctl(0i32, 0 as *mut libc::c_char, 0i32);
/* "0 -- Close the log. Currently a NOP" */
}
/* TODO: glibc openlog(LOG_KERN) reverts to LOG_USER instead,
* because that's how they interpret word "default"
* in the openlog() manpage:
* LOG_USER (default)
* generic user-level messages
* and the fact that LOG_KERN is a constant 0.
* glibc interprets it as "0 in openlog() call means 'use default'".
* I think it means "if openlog wasn't called before syslog() is called,
* use default".
* Convincing glibc maintainers otherwise is, as usual, nearly impossible.
* Should we open-code syslog() here to use correct facility?
*/
#[no_mangle]
pub unsafe extern "C" fn | (
mut _argc: libc::c_int,
mut argv: *mut *mut libc::c_char,
) -> libc::c_int {
let mut i: libc::c_int = 0i32;
let mut opt_c: *mut libc::c_char = 0 as *mut libc::c_char;
let mut opt: libc::c_int = 0;
let mut used: libc::c_int = 0;
opt = getopt32(
argv,
b"c:n\x00" as *const u8 as *const libc::c_char,
&mut opt_c as *mut *mut libc::c_char,
) as libc::c_int;
if opt & OPT_LEVEL as libc::c_int != 0 {
/* Valid levels are between 1 and 8 */
i = xatou_range(opt_c, 1i32 as libc::c_uint, 8i32 as libc::c_uint) as libc::c_int
}
if opt & OPT_FOREGROUND as libc::c_int == 0 {
bb_daemonize_or_rexec(DAEMON_CHDIR_ROOT as libc::c_int);
}
logmode = LOGMODE_SYSLOG as libc::c_int as smallint;
/* klogd_open() before openlog(), since it might use fixed fd 3,
* and openlog() also may use the same fd 3 if we swap them:
*/
klogd_open();
openlog(
b"kernel\x00" as *const u8 as *const libc::c_char,
0i32,
0i32 << 3i32,
);
/*
* glibc problem: for some reason, glibc changes LOG_KERN to LOG_USER
* above. The logic behind this is that standard
* http://pubs.opengroup.org/onlinepubs/9699919799/functions/syslog.html
* says the following about openlog and syslog:
* "LOG_USER
* Messages generated by arbitrary processes.
* This is the default facility identifier if none is specified."
*
* I believe glibc misinterpreted this text as "if openlog's
* third parameter is 0 (=LOG_K | klogd_main | identifier_name |
klogd.rs | fn getopt32(argv: *mut *mut libc::c_char, applet_opts: *const libc::c_char, _: ...) -> u32;
#[no_mangle]
fn write_pidfile_std_path_and_ext(path: *const libc::c_char);
#[no_mangle]
fn remove_pidfile_std_path_and_ext(path: *const libc::c_char);
#[no_mangle]
static mut logmode: smallint;
#[no_mangle]
fn bb_simple_perror_msg(s: *const libc::c_char);
#[no_mangle]
static bb_banner: [libc::c_char; 0];
#[no_mangle]
static mut bb_common_bufsiz1: [libc::c_char; 0];
}
use crate::librb::signal::__sighandler_t;
use crate::librb::smallint;
pub type C2RustUnnamed = libc::c_uint;
pub const BB_FATAL_SIGS: C2RustUnnamed = 117503054;
pub type C2RustUnnamed_0 = libc::c_uint;
pub const DAEMON_ONLY_SANITIZE: C2RustUnnamed_0 = 8;
pub const DAEMON_CLOSE_EXTRA_FDS: C2RustUnnamed_0 = 4;
pub const DAEMON_DEVNULL_STDIO: C2RustUnnamed_0 = 2;
pub const DAEMON_CHDIR_ROOT: C2RustUnnamed_0 = 1;
pub type C2RustUnnamed_1 = libc::c_uint;
pub const LOGMODE_BOTH: C2RustUnnamed_1 = 3;
pub const LOGMODE_SYSLOG: C2RustUnnamed_1 = 2;
pub const LOGMODE_STDIO: C2RustUnnamed_1 = 1;
pub const LOGMODE_NONE: C2RustUnnamed_1 = 0;
pub type C2RustUnnamed_2 = libc::c_uint;
pub const OPT_FOREGROUND: C2RustUnnamed_2 = 2;
pub const OPT_LEVEL: C2RustUnnamed_2 = 1;
pub const KLOGD_LOGBUF_SIZE: C2RustUnnamed_2 = 1024;
/*
* Mini klogd implementation for busybox
*
* Copyright (C) 2001 by Gennady Feldman <[email protected]>.
* Changes: Made this a standalone busybox module which uses standalone
* syslog() client interface.
*
* Copyright (C) 1999-2004 by Erik Andersen <[email protected]>
*
* Copyright (C) 2000 by Karl M. Hegbloom <[email protected]>
*
* "circular buffer" Copyright (C) 2000 by Gennady Feldman <[email protected]>
*
* Maintainer: Gennady Feldman <[email protected]> as of Mar 12, 2001
*
* Licensed under GPLv2 or later, see file LICENSE in this source tree.
*/
//config:config KLOGD
//config: bool "klogd (5.7 kb)"
//config: default y
//config: help
//config: klogd is a utility which intercepts and logs all
//config: messages from the Linux kernel and sends the messages
//config: out to the 'syslogd' utility so they can be logged. If
//config: you wish to record the messages produced by the kernel,
//config: you should enable this option.
//config:
//config:comment "klogd should not be used together with syslog to kernel printk buffer"
//config: depends on KLOGD && FEATURE_KMSG_SYSLOG
//config:
//config:config FEATURE_KLOGD_KLOGCTL
//config: bool "Use the klogctl() interface"
//config: default y
//config: depends on KLOGD
//config: select PLATFORM_LINUX
//config: help
//config: The klogd applet supports two interfaces for reading
//config: kernel messages. Linux provides the klogctl() interface
//config: which allows reading messages from the kernel ring buffer
//config: independently from the file system.
//config:
//config: If you answer 'N' here, klogd will use the more portable
//config: approach of reading them from /proc or a device node.
//config: However, this method requires the file to be available.
//config:
//config: If in doubt, say 'Y'.
//applet:IF_KLOGD(APPLET(klogd, BB_DIR_SBIN, BB_SUID_DROP))
//kbuild:lib-$(CONFIG_KLOGD) += klogd.o
//usage:#define klogd_trivial_usage
//usage: "[-c N] [-n]"
//usage:#define klogd_full_usage "\n\n"
//usage: "Log kernel messages to syslog\n"
//usage: "\n -c N Print to console messages more urgent than prio N (1-8)"
//usage: "\n -n Run in foreground"
/* The Linux-specific klogctl(3) interface does not rely on the filesystem and
* allows us to change the console loglevel. Alternatively, we read the
* messages from _PATH_KLOG. */
unsafe extern "C" fn klogd_open() {
/* "Open the log. Currently a NOP" */
klogctl(1i32, 0 as *mut libc::c_char, 0i32);
}
unsafe extern "C" fn klogd_setloglevel(mut lvl: libc::c_int) {
/* "printk() prints a message on the console only if it has a loglevel
* less than console_loglevel". Here we set console_loglevel = lvl. */
klogctl(8i32, 0 as *mut libc::c_char, lvl);
}
unsafe extern "C" fn klogd_read(mut bufp: *mut libc::c_char, mut len: libc::c_int) -> libc::c_int {
/* "2 -- Read from the log." */
return klogctl(2i32, bufp, len);
}
unsafe extern "C" fn klogd_close() {
/* FYI: cmd 7 is equivalent to setting console_loglevel to 7
* via klogctl(8, NULL, 7). */
klogctl(7i32, 0 as *mut libc::c_char, 0i32); /* "7 -- Enable printk's to console" */
klogctl(0i32, 0 as *mut libc::c_char, 0i32);
/* "0 -- Close the log. Currently a NOP" */
}
/* TODO: glibc openlog(LOG_KERN) reverts to LOG_USER instead,
* because that's how they interpret word "default"
* in the openlog() manpage:
* LOG_USER (default)
* generic user-level messages
* and the fact that LOG_KERN is a constant 0.
* glibc interprets it as "0 in openlog() call means 'use default'".
* I think it means "if openlog wasn't called before syslog() is called,
* use default".
* Convincing glibc maintainers otherwise is, as usual, nearly impossible.
* Should we open-code syslog() here to use correct facility?
*/
#[no_mangle]
pub unsafe extern "C" fn klogd_main(
mut _argc: libc::c_int,
mut argv: *mut *mut libc::c_char,
) -> libc::c_int {
let mut i: libc::c_int = 0i32;
let mut opt_c: *mut libc::c_char = 0 as *mut libc::c_char;
let mut opt: libc::c_int = 0;
let mut used: libc::c_int = 0;
opt = getopt32(
argv,
b"c:n\x00" as *const u8 as *const libc::c_char,
&mut opt_c as *mut *mut libc::c_char,
) as libc::c_int;
if opt & OPT_LEVEL as libc::c_int != 0 {
/* Valid levels are between 1 and 8 */
i = xatou_range(opt_c, 1i32 as libc::c_uint, 8i32 as libc::c_uint) as libc::c_int
}
if opt & OPT_FOREGROUND as libc::c_int == 0 {
bb_daemonize_or_rexec(DAEMON_CHDIR_ROOT as libc::c_int);
}
logmode = LOGMODE_SYSLOG as libc::c_int as smallint;
/* klogd_open() before openlog(), since it might use fixed fd 3,
* and openlog() also may use the same fd 3 if we swap them:
*/
klogd_open();
openlog(
b"kernel\x00" as *const u8 as *const libc::c_char,
0i32,
0i32 << 3i32,
);
/*
* glibc problem: for some reason, glibc changes LOG_KERN to LOG_USER
* above. The logic behind this is that standard
* http | random_line_split |
||
klogd.rs | ;
#[no_mangle]
fn bb_simple_perror_msg(s: *const libc::c_char);
#[no_mangle]
static bb_banner: [libc::c_char; 0];
#[no_mangle]
static mut bb_common_bufsiz1: [libc::c_char; 0];
}
use crate::librb::signal::__sighandler_t;
use crate::librb::smallint;
pub type C2RustUnnamed = libc::c_uint;
pub const BB_FATAL_SIGS: C2RustUnnamed = 117503054;
pub type C2RustUnnamed_0 = libc::c_uint;
pub const DAEMON_ONLY_SANITIZE: C2RustUnnamed_0 = 8;
pub const DAEMON_CLOSE_EXTRA_FDS: C2RustUnnamed_0 = 4;
pub const DAEMON_DEVNULL_STDIO: C2RustUnnamed_0 = 2;
pub const DAEMON_CHDIR_ROOT: C2RustUnnamed_0 = 1;
pub type C2RustUnnamed_1 = libc::c_uint;
pub const LOGMODE_BOTH: C2RustUnnamed_1 = 3;
pub const LOGMODE_SYSLOG: C2RustUnnamed_1 = 2;
pub const LOGMODE_STDIO: C2RustUnnamed_1 = 1;
pub const LOGMODE_NONE: C2RustUnnamed_1 = 0;
pub type C2RustUnnamed_2 = libc::c_uint;
pub const OPT_FOREGROUND: C2RustUnnamed_2 = 2;
pub const OPT_LEVEL: C2RustUnnamed_2 = 1;
pub const KLOGD_LOGBUF_SIZE: C2RustUnnamed_2 = 1024;
/*
* Mini klogd implementation for busybox
*
* Copyright (C) 2001 by Gennady Feldman <[email protected]>.
* Changes: Made this a standalone busybox module which uses standalone
* syslog() client interface.
*
* Copyright (C) 1999-2004 by Erik Andersen <[email protected]>
*
* Copyright (C) 2000 by Karl M. Hegbloom <[email protected]>
*
* "circular buffer" Copyright (C) 2000 by Gennady Feldman <[email protected]>
*
* Maintainer: Gennady Feldman <[email protected]> as of Mar 12, 2001
*
* Licensed under GPLv2 or later, see file LICENSE in this source tree.
*/
//config:config KLOGD
//config: bool "klogd (5.7 kb)"
//config: default y
//config: help
//config: klogd is a utility which intercepts and logs all
//config: messages from the Linux kernel and sends the messages
//config: out to the 'syslogd' utility so they can be logged. If
//config: you wish to record the messages produced by the kernel,
//config: you should enable this option.
//config:
//config:comment "klogd should not be used together with syslog to kernel printk buffer"
//config: depends on KLOGD && FEATURE_KMSG_SYSLOG
//config:
//config:config FEATURE_KLOGD_KLOGCTL
//config: bool "Use the klogctl() interface"
//config: default y
//config: depends on KLOGD
//config: select PLATFORM_LINUX
//config: help
//config: The klogd applet supports two interfaces for reading
//config: kernel messages. Linux provides the klogctl() interface
//config: which allows reading messages from the kernel ring buffer
//config: independently from the file system.
//config:
//config: If you answer 'N' here, klogd will use the more portable
//config: approach of reading them from /proc or a device node.
//config: However, this method requires the file to be available.
//config:
//config: If in doubt, say 'Y'.
//applet:IF_KLOGD(APPLET(klogd, BB_DIR_SBIN, BB_SUID_DROP))
//kbuild:lib-$(CONFIG_KLOGD) += klogd.o
//usage:#define klogd_trivial_usage
//usage: "[-c N] [-n]"
//usage:#define klogd_full_usage "\n\n"
//usage: "Log kernel messages to syslog\n"
//usage: "\n -c N Print to console messages more urgent than prio N (1-8)"
//usage: "\n -n Run in foreground"
/* The Linux-specific klogctl(3) interface does not rely on the filesystem and
* allows us to change the console loglevel. Alternatively, we read the
* messages from _PATH_KLOG. */
unsafe extern "C" fn klogd_open() {
/* "Open the log. Currently a NOP" */
klogctl(1i32, 0 as *mut libc::c_char, 0i32);
}
unsafe extern "C" fn klogd_setloglevel(mut lvl: libc::c_int) {
/* "printk() prints a message on the console only if it has a loglevel
* less than console_loglevel". Here we set console_loglevel = lvl. */
klogctl(8i32, 0 as *mut libc::c_char, lvl);
}
unsafe extern "C" fn klogd_read(mut bufp: *mut libc::c_char, mut len: libc::c_int) -> libc::c_int |
unsafe extern "C" fn klogd_close() {
/* FYI: cmd 7 is equivalent to setting console_loglevel to 7
* via klogctl(8, NULL, 7). */
klogctl(7i32, 0 as *mut libc::c_char, 0i32); /* "7 -- Enable printk's to console" */
klogctl(0i32, 0 as *mut libc::c_char, 0i32);
/* "0 -- Close the log. Currently a NOP" */
}
/* TODO: glibc openlog(LOG_KERN) reverts to LOG_USER instead,
* because that's how they interpret word "default"
* in the openlog() manpage:
* LOG_USER (default)
* generic user-level messages
* and the fact that LOG_KERN is a constant 0.
* glibc interprets it as "0 in openlog() call means 'use default'".
* I think it means "if openlog wasn't called before syslog() is called,
* use default".
* Convincing glibc maintainers otherwise is, as usual, nearly impossible.
* Should we open-code syslog() here to use correct facility?
*/
#[no_mangle]
pub unsafe extern "C" fn klogd_main(
mut _argc: libc::c_int,
mut argv: *mut *mut libc::c_char,
) -> libc::c_int {
let mut i: libc::c_int = 0i32;
let mut opt_c: *mut libc::c_char = 0 as *mut libc::c_char;
let mut opt: libc::c_int = 0;
let mut used: libc::c_int = 0;
opt = getopt32(
argv,
b"c:n\x00" as *const u8 as *const libc::c_char,
&mut opt_c as *mut *mut libc::c_char,
) as libc::c_int;
if opt & OPT_LEVEL as libc::c_int != 0 {
/* Valid levels are between 1 and 8 */
i = xatou_range(opt_c, 1i32 as libc::c_uint, 8i32 as libc::c_uint) as libc::c_int
}
if opt & OPT_FOREGROUND as libc::c_int == 0 {
bb_daemonize_or_rexec(DAEMON_CHDIR_ROOT as libc::c_int);
}
logmode = LOGMODE_SYSLOG as libc::c_int as smallint;
/* klogd_open() before openlog(), since it might use fixed fd 3,
* and openlog() also may use the same fd 3 if we swap them:
*/
klogd_open();
openlog(
b"kernel\x00" as *const u8 as *const libc::c_char,
0i32,
0i32 << 3i32,
);
/*
* glibc problem: for some reason, glibc changes LOG_KERN to LOG_USER
* above. The logic behind this is that standard
* http://pubs.opengroup.org/onlinepubs/9699919799/functions/syslog.html
* says the following about openlog and syslog:
* "LOG_USER
* Messages generated by arbitrary processes.
* This is the default facility identifier if none is specified."
*
* I believe glibc misinterpreted this text as "if openlog's
* third parameter is 0 (=LOG | {
/* "2 -- Read from the log." */
return klogctl(2i32, bufp, len);
} | identifier_body |
klogd.rs | dependently from the file system.
//config:
//config: If you answer 'N' here, klogd will use the more portable
//config: approach of reading them from /proc or a device node.
//config: However, this method requires the file to be available.
//config:
//config: If in doubt, say 'Y'.
//applet:IF_KLOGD(APPLET(klogd, BB_DIR_SBIN, BB_SUID_DROP))
//kbuild:lib-$(CONFIG_KLOGD) += klogd.o
//usage:#define klogd_trivial_usage
//usage: "[-c N] [-n]"
//usage:#define klogd_full_usage "\n\n"
//usage: "Log kernel messages to syslog\n"
//usage: "\n -c N Print to console messages more urgent than prio N (1-8)"
//usage: "\n -n Run in foreground"
/* The Linux-specific klogctl(3) interface does not rely on the filesystem and
* allows us to change the console loglevel. Alternatively, we read the
* messages from _PATH_KLOG. */
unsafe extern "C" fn klogd_open() {
/* "Open the log. Currently a NOP" */
klogctl(1i32, 0 as *mut libc::c_char, 0i32);
}
unsafe extern "C" fn klogd_setloglevel(mut lvl: libc::c_int) {
/* "printk() prints a message on the console only if it has a loglevel
* less than console_loglevel". Here we set console_loglevel = lvl. */
klogctl(8i32, 0 as *mut libc::c_char, lvl);
}
unsafe extern "C" fn klogd_read(mut bufp: *mut libc::c_char, mut len: libc::c_int) -> libc::c_int {
/* "2 -- Read from the log." */
return klogctl(2i32, bufp, len);
}
unsafe extern "C" fn klogd_close() {
/* FYI: cmd 7 is equivalent to setting console_loglevel to 7
* via klogctl(8, NULL, 7). */
klogctl(7i32, 0 as *mut libc::c_char, 0i32); /* "7 -- Enable printk's to console" */
klogctl(0i32, 0 as *mut libc::c_char, 0i32);
/* "0 -- Close the log. Currently a NOP" */
}
/* TODO: glibc openlog(LOG_KERN) reverts to LOG_USER instead,
* because that's how they interpret word "default"
* in the openlog() manpage:
* LOG_USER (default)
* generic user-level messages
* and the fact that LOG_KERN is a constant 0.
* glibc interprets it as "0 in openlog() call means 'use default'".
* I think it means "if openlog wasn't called before syslog() is called,
* use default".
* Convincing glibc maintainers otherwise is, as usual, nearly impossible.
* Should we open-code syslog() here to use correct facility?
*/
#[no_mangle]
pub unsafe extern "C" fn klogd_main(
mut _argc: libc::c_int,
mut argv: *mut *mut libc::c_char,
) -> libc::c_int {
let mut i: libc::c_int = 0i32;
let mut opt_c: *mut libc::c_char = 0 as *mut libc::c_char;
let mut opt: libc::c_int = 0;
let mut used: libc::c_int = 0;
opt = getopt32(
argv,
b"c:n\x00" as *const u8 as *const libc::c_char,
&mut opt_c as *mut *mut libc::c_char,
) as libc::c_int;
if opt & OPT_LEVEL as libc::c_int != 0 {
/* Valid levels are between 1 and 8 */
i = xatou_range(opt_c, 1i32 as libc::c_uint, 8i32 as libc::c_uint) as libc::c_int
}
if opt & OPT_FOREGROUND as libc::c_int == 0 {
bb_daemonize_or_rexec(DAEMON_CHDIR_ROOT as libc::c_int);
}
logmode = LOGMODE_SYSLOG as libc::c_int as smallint;
/* klogd_open() before openlog(), since it might use fixed fd 3,
* and openlog() also may use the same fd 3 if we swap them:
*/
klogd_open();
openlog(
b"kernel\x00" as *const u8 as *const libc::c_char,
0i32,
0i32 << 3i32,
);
/*
* glibc problem: for some reason, glibc changes LOG_KERN to LOG_USER
* above. The logic behind this is that standard
* http://pubs.opengroup.org/onlinepubs/9699919799/functions/syslog.html
* says the following about openlog and syslog:
* "LOG_USER
* Messages generated by arbitrary processes.
* This is the default facility identifier if none is specified."
*
* I believe glibc misinterpreted this text as "if openlog's
* third parameter is 0 (=LOG_KERN), treat it as LOG_USER".
* Whereas it was meant to say "if *syslog* is called with facility
* 0 in its 1st parameter without prior call to openlog, then perform
* implicit openlog(LOG_USER)".
*
* As a result of this, eh, feature, standard klogd was forced
* to open-code its own openlog and syslog implementation (!).
*
* Note that prohibiting openlog(LOG_KERN) on libc level does not
* add any security: any process can open a socket to "/dev/log"
* and write a string "<0>Voila, a LOG_KERN + LOG_EMERG message"
*
* Google code search tells me there is no widespread use of
* openlog("foo", 0, 0), thus fixing glibc won't break userspace.
*
* The bug against glibc was filed:
* bugzilla.redhat.com/show_bug.cgi?id=547000
*/
if i != 0 {
klogd_setloglevel(i);
}
signal(
1i32,
::std::mem::transmute::<libc::intptr_t, __sighandler_t>(1i32 as libc::intptr_t),
);
/* We want klogd_read to not be restarted, thus _norestart: */
bb_signals_recursive_norestart(
BB_FATAL_SIGS as libc::c_int,
Some(record_signo as unsafe extern "C" fn(_: libc::c_int) -> ()),
);
syslog(
5i32,
b"klogd started: %s\x00" as *const u8 as *const libc::c_char,
bb_banner.as_ptr(),
);
write_pidfile_std_path_and_ext(b"klogd\x00" as *const u8 as *const libc::c_char);
used = 0i32;
while bb_got_signal == 0 {
let mut n: libc::c_int = 0;
let mut priority: libc::c_int = 0;
let mut start: *mut libc::c_char = 0 as *mut libc::c_char;
start = bb_common_bufsiz1.as_mut_ptr().offset(used as isize);
n = klogd_read(start, KLOGD_LOGBUF_SIZE as libc::c_int - 1i32 - used);
if n < 0i32 {
if *bb_errno == 4i32 {
continue;
}
bb_simple_perror_msg(b"klogctl(2) error\x00" as *const u8 as *const libc::c_char);
break;
} else | {
*start.offset(n as isize) = '\u{0}' as i32 as libc::c_char;
/* Process each newline-terminated line in the buffer */
start = bb_common_bufsiz1.as_mut_ptr();
loop {
let mut newline: *mut libc::c_char = strchrnul(start, '\n' as i32);
if *newline as libc::c_int == '\u{0}' as i32 {
/* This line is incomplete */
/* move it to the front of the buffer */
overlapping_strcpy(bb_common_bufsiz1.as_mut_ptr(), start);
used = newline.wrapping_offset_from(start) as libc::c_long as libc::c_int;
if used < KLOGD_LOGBUF_SIZE as libc::c_int - 1i32 {
break;
}
/* buffer is full, log it anyway */
used = 0i32;
newline = 0 as *mut libc::c_char
} else {
let fresh0 = newline;
newline = newline.offset(1); | conditional_block |
|
sandbox.go | x*y, and x^y. !Wow! :-)
}
}
return outer
}
//maps
type LocationCoordinate struct {
Lat, Long float64
}
var map1 map[string]LocationCoordinate
var map2 = map[string]LocationCoordinate{
"Bell Labs": LocationCoordinate{
40.68433, -74.39967,
},
"Google": LocationCoordinate{
37.42202, -122.08408,
},
"Apple": {37.42202, -122.08408},
}
//map exercise
func WordCount(s string) map[string]int {
var map4 = make(map[string]int)
for i, v := range strings.Fields(s) {
if map4[v] != 0 {
continue
}
fmt.Println("Looping", i)
var count int = 0
for _, word := range strings.Fields(s) {
if word == v {
count++
}
}
map4[v] = count
}
return map4
}
//closure
func | () func(int) int {
sum := 0
return func(x int) int {
sum += x
return sum
}
}
var (
previous, current int
)
func fibonacci() func() int {
return func() int {
sum := previous + current
if sum == 0 {
previous = 0
current = 1
return previous + current
} else {
previous = current
current = sum
return current
}
}
}
type FloatVertex struct {
X, Y float64
}
func (v *FloatVertex) Abs() float64 {
return math.Sqrt(v.X*v.X + v.Y*v.Y)
}
type MyFloat float64
func (f MyFloat) Abs() float64 {
if f < 0 {
return float64(-f)
}
return float64(f)
}
func (v *FloatVertex) Scale(f float64) {
v.X = v.X * f
v.Y = v.Y * f
}
//interface
type Abser interface {
Abs() float64
}
func runInterface() {
var abser Abser
f2 := MyFloat(-math.Sqrt2)
v9 := FloatVertex{3, 4}
abser = f2 // a MyFloat implements Abser
abser = &v9 // a *Vertex implements Abser
// In the following line, v is a Vertex (not *Vertex)
// and does NOT implement Abser.
// abser = v9
fmt.Println(abser.Abs())
}
// implicit interface
type Reader interface {
Read(b []byte) (n int, err error)
}
type Writer interface {
Write(b []byte) (n int, err error)
}
type ReadWriter interface {
Reader
Writer
}
type Person struct {
Name string
Age int
}
func (p Person) String() string {
return fmt.Sprintf("%v (%v years)", p.Name, p.Age)
}
func runImplicitInterface() {
fmt.Println("Implicit interface")
var w Writer
// os.Stdout implements Writer
w = os.Stdout
fmt.Fprintf(w, "hello, writer\n")
person := Person{"Arthur Dent", 42}
anotherPerson := Person{"Zaphod Beeblebrox", 9001}
fmt.Println(person, anotherPerson)
}
//stringer
type IPAddr [4]byte
func (ip IPAddr) String() string {
return fmt.Sprintf("%v.%v.%v.%v", ip[0], ip[1], ip[2], ip[3])
}
func runStringer() {
fmt.Println("stringer---")
addrs := map[string]IPAddr{
"loopback": {127, 0, 0, 1},
"googleDNS": {8, 8, 8, 8},
}
for n, a := range addrs {
fmt.Printf("%v: %v\n", n, a)
}
}
// errors
type MyError struct {
When time.Time
What string
}
func (e *MyError) Error() string {
return fmt.Sprintf("at %v, %s",
e.When, e.What)
}
func run() error {
return &MyError{
time.Now(),
"it didn't work",
}
}
func runErrors() {
fmt.Println("errors")
if err := run(); err != nil {
fmt.Println(err)
}
}
// go routine
func say(s string) {
for i := 0; i < 5; i++ {
time.Sleep(100 * time.Millisecond)
fmt.Println(s)
}
}
func sum(a []int, c chan int) {
sum := 0
for _, v := range a {
sum += v
}
c <- sum // send sum to channel c
}
func runGoRoutine() {
a := []int{7, 2, 8, -9, 4, 0}
c := make(chan int)
go sum(a[:len(a)/2], c)
go sum(a[len(a)/2:], c)
x, y := <-c, <-c // receive from channel c and assign value to x and y
fmt.Println(x, y, x+y)
}
func runBufferedChannel() {
c := make(chan int, 2)
c <- 1
c <- 2
fmt.Println(<-c)
fmt.Println(<-c)
}
func fibonacci2(n int, c chan int) {
x, y := 0, 1
for i := 0; i < n; i++ {
c <- x
x, y = y, x+y
}
close(c)
}
func runRangeAndClose() {
fmt.Println("run fibonacci")
c := make(chan int, 10)
go fibonacci2(cap(c), c)
for i := range c {
fmt.Println(i)
}
}
var (
startTime time.Time
)
func fibonacci3(c, quit chan int) {
x, y := 0, 1
for {
select {
case c <- x:
fmt.Println("Before write", time.Since(startTime))
fmt.Println("Send on chan", time.Since(startTime))
x, y = y, x+y
case <-quit:
fmt.Println("quit")
return
}
}
}
func runFibonacci3() {
c := make(chan int)
quit := make(chan int)
go func() {
for i := 0; i < 10; i++ {
fmt.Println("receivedChan", <-c, time.Since(startTime))
}
quit <- 0
}()
startTime = time.Now()
fibonacci3(c, quit)
}
func runDefaultSelection() {
tick := time.Tick(100 * time.Millisecond)
boom := time.After(500 * time.Millisecond)
for {
select {
case <-tick:
fmt.Println("tick.")
case <-boom:
fmt.Println("BOOM!")
return
default:
fmt.Println(" .")
time.Sleep(50 * time.Millisecond)
}
}
}
func main() {
fmt.Println("Welcome to the playground!")
fmt.Println("The time is", time.Now())
fmt.Println("My favorite number is", rand.Intn(10))
fmt.Printf("Now you have %g problems.", math.Nextafter(2, 3))
fmt.Println(math.Pi)
fmt.Println(add1(42, 13))
fmt.Println(add2(42, 13))
a, b := swap("hello", "world")
fmt.Println(a, b)
fmt.Println(split(17))
//1
var i1 int
fmt.Println(i1, c1, python1, java1)
//2
var c2, python2, java2 = true, false, "no!"
fmt.Println(i2, j2, c2, python2, java2)
//3
var i3, j3 int = 1, 2
k3 := 3
c3, python3, java3 := true, false, "no!"
fmt.Println(i3, j3, k3, c3, python3, java3)
const f4 = "%T(%v)\n"
fmt.Printf(f4, ToBe, ToBe)
fmt.Printf(f4, MaxInt, MaxInt)
fmt.Printf(f4, z4, z4)
// data types
// bool
// string
// int int8 int16 int32 int64
// uint uint8 uint16 uint32 uint64 uintptr
// byte // alias for uint8
// rune // alias for int32
// // represents a Unicode code point
// float32 float64
// complex64 complex128
// default value
var i5 int
var f5 float64
var b5 bool
var s5 string
fmt.Printf("%v %v %v %q\n", i5, f5, b5, s5)
var x6, y6 int = 3, 4
var f6 float64 = math.Sqrt(float64(x6*x6 + y6*y6))
var z6 int = int(f6)
fmt.Println(x | adder | identifier_name |
sandbox.go | *y, and x^y. !Wow! :-)
}
}
return outer
}
//maps
type LocationCoordinate struct {
Lat, Long float64
}
var map1 map[string]LocationCoordinate
var map2 = map[string]LocationCoordinate{
"Bell Labs": LocationCoordinate{
40.68433, -74.39967,
},
"Google": LocationCoordinate{
37.42202, -122.08408,
},
"Apple": {37.42202, -122.08408},
}
//map exercise
func WordCount(s string) map[string]int {
var map4 = make(map[string]int)
for i, v := range strings.Fields(s) {
if map4[v] != 0 {
continue
}
fmt.Println("Looping", i)
var count int = 0
for _, word := range strings.Fields(s) {
if word == v |
}
map4[v] = count
}
return map4
}
//closure
func adder() func(int) int {
sum := 0
return func(x int) int {
sum += x
return sum
}
}
var (
previous, current int
)
func fibonacci() func() int {
return func() int {
sum := previous + current
if sum == 0 {
previous = 0
current = 1
return previous + current
} else {
previous = current
current = sum
return current
}
}
}
type FloatVertex struct {
X, Y float64
}
func (v *FloatVertex) Abs() float64 {
return math.Sqrt(v.X*v.X + v.Y*v.Y)
}
type MyFloat float64
func (f MyFloat) Abs() float64 {
if f < 0 {
return float64(-f)
}
return float64(f)
}
func (v *FloatVertex) Scale(f float64) {
v.X = v.X * f
v.Y = v.Y * f
}
//interface
type Abser interface {
Abs() float64
}
func runInterface() {
var abser Abser
f2 := MyFloat(-math.Sqrt2)
v9 := FloatVertex{3, 4}
abser = f2 // a MyFloat implements Abser
abser = &v9 // a *Vertex implements Abser
// In the following line, v is a Vertex (not *Vertex)
// and does NOT implement Abser.
// abser = v9
fmt.Println(abser.Abs())
}
// implicit interface
type Reader interface {
Read(b []byte) (n int, err error)
}
type Writer interface {
Write(b []byte) (n int, err error)
}
type ReadWriter interface {
Reader
Writer
}
type Person struct {
Name string
Age int
}
func (p Person) String() string {
return fmt.Sprintf("%v (%v years)", p.Name, p.Age)
}
func runImplicitInterface() {
fmt.Println("Implicit interface")
var w Writer
// os.Stdout implements Writer
w = os.Stdout
fmt.Fprintf(w, "hello, writer\n")
person := Person{"Arthur Dent", 42}
anotherPerson := Person{"Zaphod Beeblebrox", 9001}
fmt.Println(person, anotherPerson)
}
//stringer
type IPAddr [4]byte
func (ip IPAddr) String() string {
return fmt.Sprintf("%v.%v.%v.%v", ip[0], ip[1], ip[2], ip[3])
}
func runStringer() {
fmt.Println("stringer---")
addrs := map[string]IPAddr{
"loopback": {127, 0, 0, 1},
"googleDNS": {8, 8, 8, 8},
}
for n, a := range addrs {
fmt.Printf("%v: %v\n", n, a)
}
}
// errors
type MyError struct {
When time.Time
What string
}
func (e *MyError) Error() string {
return fmt.Sprintf("at %v, %s",
e.When, e.What)
}
func run() error {
return &MyError{
time.Now(),
"it didn't work",
}
}
func runErrors() {
fmt.Println("errors")
if err := run(); err != nil {
fmt.Println(err)
}
}
// go routine
func say(s string) {
for i := 0; i < 5; i++ {
time.Sleep(100 * time.Millisecond)
fmt.Println(s)
}
}
func sum(a []int, c chan int) {
sum := 0
for _, v := range a {
sum += v
}
c <- sum // send sum to channel c
}
func runGoRoutine() {
a := []int{7, 2, 8, -9, 4, 0}
c := make(chan int)
go sum(a[:len(a)/2], c)
go sum(a[len(a)/2:], c)
x, y := <-c, <-c // receive from channel c and assign value to x and y
fmt.Println(x, y, x+y)
}
func runBufferedChannel() {
c := make(chan int, 2)
c <- 1
c <- 2
fmt.Println(<-c)
fmt.Println(<-c)
}
func fibonacci2(n int, c chan int) {
x, y := 0, 1
for i := 0; i < n; i++ {
c <- x
x, y = y, x+y
}
close(c)
}
func runRangeAndClose() {
fmt.Println("run fibonacci")
c := make(chan int, 10)
go fibonacci2(cap(c), c)
for i := range c {
fmt.Println(i)
}
}
var (
startTime time.Time
)
func fibonacci3(c, quit chan int) {
x, y := 0, 1
for {
select {
case c <- x:
fmt.Println("Before write", time.Since(startTime))
fmt.Println("Send on chan", time.Since(startTime))
x, y = y, x+y
case <-quit:
fmt.Println("quit")
return
}
}
}
func runFibonacci3() {
c := make(chan int)
quit := make(chan int)
go func() {
for i := 0; i < 10; i++ {
fmt.Println("receivedChan", <-c, time.Since(startTime))
}
quit <- 0
}()
startTime = time.Now()
fibonacci3(c, quit)
}
func runDefaultSelection() {
tick := time.Tick(100 * time.Millisecond)
boom := time.After(500 * time.Millisecond)
for {
select {
case <-tick:
fmt.Println("tick.")
case <-boom:
fmt.Println("BOOM!")
return
default:
fmt.Println(" .")
time.Sleep(50 * time.Millisecond)
}
}
}
func main() {
fmt.Println("Welcome to the playground!")
fmt.Println("The time is", time.Now())
fmt.Println("My favorite number is", rand.Intn(10))
fmt.Printf("Now you have %g problems.", math.Nextafter(2, 3))
fmt.Println(math.Pi)
fmt.Println(add1(42, 13))
fmt.Println(add2(42, 13))
a, b := swap("hello", "world")
fmt.Println(a, b)
fmt.Println(split(17))
//1
var i1 int
fmt.Println(i1, c1, python1, java1)
//2
var c2, python2, java2 = true, false, "no!"
fmt.Println(i2, j2, c2, python2, java2)
//3
var i3, j3 int = 1, 2
k3 := 3
c3, python3, java3 := true, false, "no!"
fmt.Println(i3, j3, k3, c3, python3, java3)
const f4 = "%T(%v)\n"
fmt.Printf(f4, ToBe, ToBe)
fmt.Printf(f4, MaxInt, MaxInt)
fmt.Printf(f4, z4, z4)
// data types
// bool
// string
// int int8 int16 int32 int64
// uint uint8 uint16 uint32 uint64 uintptr
// byte // alias for uint8
// rune // alias for int32
// // represents a Unicode code point
// float32 float64
// complex64 complex128
// default value
var i5 int
var f5 float64
var b5 bool
var s5 string
fmt.Printf("%v %v %v %q\n", i5, f5, b5, s5)
var x6, y6 int = 3, 4
var f6 float64 = math.Sqrt(float64(x6*x6 + y6*y6))
var z6 int = int(f6)
fmt.Println | {
count++
} | conditional_block |
sandbox.go | x*y, and x^y. !Wow! :-)
}
}
return outer
}
//maps
type LocationCoordinate struct {
Lat, Long float64
}
var map1 map[string]LocationCoordinate
var map2 = map[string]LocationCoordinate{
"Bell Labs": LocationCoordinate{
40.68433, -74.39967,
},
"Google": LocationCoordinate{
37.42202, -122.08408,
},
"Apple": {37.42202, -122.08408},
}
//map exercise
func WordCount(s string) map[string]int {
var map4 = make(map[string]int)
for i, v := range strings.Fields(s) {
if map4[v] != 0 {
continue
}
fmt.Println("Looping", i)
var count int = 0
for _, word := range strings.Fields(s) {
if word == v {
count++
}
}
map4[v] = count
}
return map4
}
//closure
func adder() func(int) int {
sum := 0
return func(x int) int {
sum += x
return sum
}
}
var (
previous, current int
)
func fibonacci() func() int {
return func() int {
sum := previous + current
if sum == 0 {
previous = 0
current = 1
return previous + current
} else {
previous = current
current = sum
return current
}
}
}
type FloatVertex struct {
X, Y float64
}
func (v *FloatVertex) Abs() float64 {
return math.Sqrt(v.X*v.X + v.Y*v.Y)
}
type MyFloat float64
func (f MyFloat) Abs() float64 {
if f < 0 {
return float64(-f)
}
return float64(f)
}
func (v *FloatVertex) Scale(f float64) {
v.X = v.X * f
v.Y = v.Y * f
}
//interface
type Abser interface {
Abs() float64
}
func runInterface() {
var abser Abser
f2 := MyFloat(-math.Sqrt2)
v9 := FloatVertex{3, 4}
abser = f2 // a MyFloat implements Abser
abser = &v9 // a *Vertex implements Abser
// In the following line, v is a Vertex (not *Vertex)
// and does NOT implement Abser.
// abser = v9
fmt.Println(abser.Abs())
}
// implicit interface
type Reader interface {
Read(b []byte) (n int, err error)
}
type Writer interface {
Write(b []byte) (n int, err error)
}
type ReadWriter interface {
Reader
Writer
}
type Person struct {
Name string
Age int
}
func (p Person) String() string {
return fmt.Sprintf("%v (%v years)", p.Name, p.Age)
}
func runImplicitInterface() {
fmt.Println("Implicit interface")
var w Writer
// os.Stdout implements Writer
w = os.Stdout
fmt.Fprintf(w, "hello, writer\n")
person := Person{"Arthur Dent", 42}
anotherPerson := Person{"Zaphod Beeblebrox", 9001}
fmt.Println(person, anotherPerson)
}
//stringer
type IPAddr [4]byte
func (ip IPAddr) String() string {
return fmt.Sprintf("%v.%v.%v.%v", ip[0], ip[1], ip[2], ip[3])
}
func runStringer() {
fmt.Println("stringer---")
addrs := map[string]IPAddr{
"loopback": {127, 0, 0, 1},
"googleDNS": {8, 8, 8, 8},
}
for n, a := range addrs {
fmt.Printf("%v: %v\n", n, a)
}
}
// errors
type MyError struct {
When time.Time
What string
}
func (e *MyError) Error() string {
return fmt.Sprintf("at %v, %s",
e.When, e.What)
}
func run() error {
return &MyError{
time.Now(),
"it didn't work",
}
}
func runErrors() {
fmt.Println("errors")
if err := run(); err != nil {
fmt.Println(err)
}
}
// go routine
func say(s string) {
for i := 0; i < 5; i++ {
time.Sleep(100 * time.Millisecond)
fmt.Println(s)
}
}
func sum(a []int, c chan int) {
sum := 0
for _, v := range a {
sum += v
}
c <- sum // send sum to channel c
} | func runGoRoutine() {
a := []int{7, 2, 8, -9, 4, 0}
c := make(chan int)
go sum(a[:len(a)/2], c)
go sum(a[len(a)/2:], c)
x, y := <-c, <-c // receive from channel c and assign value to x and y
fmt.Println(x, y, x+y)
}
func runBufferedChannel() {
c := make(chan int, 2)
c <- 1
c <- 2
fmt.Println(<-c)
fmt.Println(<-c)
}
func fibonacci2(n int, c chan int) {
x, y := 0, 1
for i := 0; i < n; i++ {
c <- x
x, y = y, x+y
}
close(c)
}
func runRangeAndClose() {
fmt.Println("run fibonacci")
c := make(chan int, 10)
go fibonacci2(cap(c), c)
for i := range c {
fmt.Println(i)
}
}
var (
startTime time.Time
)
func fibonacci3(c, quit chan int) {
x, y := 0, 1
for {
select {
case c <- x:
fmt.Println("Before write", time.Since(startTime))
fmt.Println("Send on chan", time.Since(startTime))
x, y = y, x+y
case <-quit:
fmt.Println("quit")
return
}
}
}
func runFibonacci3() {
c := make(chan int)
quit := make(chan int)
go func() {
for i := 0; i < 10; i++ {
fmt.Println("receivedChan", <-c, time.Since(startTime))
}
quit <- 0
}()
startTime = time.Now()
fibonacci3(c, quit)
}
func runDefaultSelection() {
tick := time.Tick(100 * time.Millisecond)
boom := time.After(500 * time.Millisecond)
for {
select {
case <-tick:
fmt.Println("tick.")
case <-boom:
fmt.Println("BOOM!")
return
default:
fmt.Println(" .")
time.Sleep(50 * time.Millisecond)
}
}
}
func main() {
fmt.Println("Welcome to the playground!")
fmt.Println("The time is", time.Now())
fmt.Println("My favorite number is", rand.Intn(10))
fmt.Printf("Now you have %g problems.", math.Nextafter(2, 3))
fmt.Println(math.Pi)
fmt.Println(add1(42, 13))
fmt.Println(add2(42, 13))
a, b := swap("hello", "world")
fmt.Println(a, b)
fmt.Println(split(17))
//1
var i1 int
fmt.Println(i1, c1, python1, java1)
//2
var c2, python2, java2 = true, false, "no!"
fmt.Println(i2, j2, c2, python2, java2)
//3
var i3, j3 int = 1, 2
k3 := 3
c3, python3, java3 := true, false, "no!"
fmt.Println(i3, j3, k3, c3, python3, java3)
const f4 = "%T(%v)\n"
fmt.Printf(f4, ToBe, ToBe)
fmt.Printf(f4, MaxInt, MaxInt)
fmt.Printf(f4, z4, z4)
// data types
// bool
// string
// int int8 int16 int32 int64
// uint uint8 uint16 uint32 uint64 uintptr
// byte // alias for uint8
// rune // alias for int32
// // represents a Unicode code point
// float32 float64
// complex64 complex128
// default value
var i5 int
var f5 float64
var b5 bool
var s5 string
fmt.Printf("%v %v %v %q\n", i5, f5, b5, s5)
var x6, y6 int = 3, 4
var f6 float64 = math.Sqrt(float64(x6*x6 + y6*y6))
var z6 int = int(f6)
fmt.Println(x | random_line_split |
|
sandbox.go |
func swap(x, y string) (string, string) {
return y, x
}
func split(sum int) (x, y int) {
x = sum * 4 / 9
y = sum - x
return //naked return
}
//1
var c1, python1, java1 bool
//2
var i2, j2 int = 1, 2
var (
ToBe bool = false
MaxInt uint64 = 1<<64 - 1
z4 complex128 = cmplx.Sqrt(-5 + 12i)
)
const Pi = 3.14
const (
Big = 1 << 100
Small = Big >> 99
)
func needInt(x int) int { return x*10 + 1 }
func needFloat(x float64) float64 {
return x * 0.1
}
func sqrt(x float64) string {
if x < 0 {
return sqrt(-x) + "i"
}
return fmt.Sprint(math.Sqrt(x))
}
func pow1(x, n, lim float64) float64 {
if v := math.Pow(x, n); v < lim {
return v
} else {
fmt.Printf("%g >= %g\n", v, lim)
}
// can't use v here, though
return lim
}
func DeferFunc() {
// defer
defer fmt.Println("world")
fmt.Println("hello")
// stacking defer
fmt.Println("counting")
for i := 0; i < 10; i++ {
defer fmt.Println(i)
}
fmt.Println("done")
}
type Vertex struct {
X int
Y int
}
var (
v3 = Vertex{1, 2} // has type Vertex
v4 = Vertex{X: 1} // Y:0 is implicit
v5 = Vertex{} // X:0 and Y:0
p2 = &Vertex{1, 2} // has type *Vertex
)
// making slices
func printSlice(s string, x []int) {
fmt.Printf("%s len=%d cap=%d %v\n",
s, len(x), cap(x), x)
}
// range
var pow2 = []int{1, 2, 4, 8, 16, 32, 64, 128}
//slicing exercise
func Pic(dx, dy int) [][]uint8 {
outer := make([][]uint8, dy)
for i := range outer {
outer[i] = make([]uint8, dx)
for j := range outer[i] {
outer[i][j] = uint8(j) // also try (x+y)/2, x*y, and x^y. !Wow! :-)
}
}
return outer
}
//maps
type LocationCoordinate struct {
Lat, Long float64
}
var map1 map[string]LocationCoordinate
var map2 = map[string]LocationCoordinate{
"Bell Labs": LocationCoordinate{
40.68433, -74.39967,
},
"Google": LocationCoordinate{
37.42202, -122.08408,
},
"Apple": {37.42202, -122.08408},
}
//map exercise
func WordCount(s string) map[string]int {
var map4 = make(map[string]int)
for i, v := range strings.Fields(s) {
if map4[v] != 0 {
continue
}
fmt.Println("Looping", i)
var count int = 0
for _, word := range strings.Fields(s) {
if word == v {
count++
}
}
map4[v] = count
}
return map4
}
//closure
func adder() func(int) int {
sum := 0
return func(x int) int {
sum += x
return sum
}
}
var (
previous, current int
)
func fibonacci() func() int {
return func() int {
sum := previous + current
if sum == 0 {
previous = 0
current = 1
return previous + current
} else {
previous = current
current = sum
return current
}
}
}
type FloatVertex struct {
X, Y float64
}
func (v *FloatVertex) Abs() float64 {
return math.Sqrt(v.X*v.X + v.Y*v.Y)
}
type MyFloat float64
func (f MyFloat) Abs() float64 {
if f < 0 {
return float64(-f)
}
return float64(f)
}
func (v *FloatVertex) Scale(f float64) {
v.X = v.X * f
v.Y = v.Y * f
}
//interface
type Abser interface {
Abs() float64
}
func runInterface() {
var abser Abser
f2 := MyFloat(-math.Sqrt2)
v9 := FloatVertex{3, 4}
abser = f2 // a MyFloat implements Abser
abser = &v9 // a *Vertex implements Abser
// In the following line, v is a Vertex (not *Vertex)
// and does NOT implement Abser.
// abser = v9
fmt.Println(abser.Abs())
}
// implicit interface
type Reader interface {
Read(b []byte) (n int, err error)
}
type Writer interface {
Write(b []byte) (n int, err error)
}
type ReadWriter interface {
Reader
Writer
}
type Person struct {
Name string
Age int
}
func (p Person) String() string {
return fmt.Sprintf("%v (%v years)", p.Name, p.Age)
}
func runImplicitInterface() {
fmt.Println("Implicit interface")
var w Writer
// os.Stdout implements Writer
w = os.Stdout
fmt.Fprintf(w, "hello, writer\n")
person := Person{"Arthur Dent", 42}
anotherPerson := Person{"Zaphod Beeblebrox", 9001}
fmt.Println(person, anotherPerson)
}
//stringer
type IPAddr [4]byte
func (ip IPAddr) String() string {
return fmt.Sprintf("%v.%v.%v.%v", ip[0], ip[1], ip[2], ip[3])
}
func runStringer() {
fmt.Println("stringer---")
addrs := map[string]IPAddr{
"loopback": {127, 0, 0, 1},
"googleDNS": {8, 8, 8, 8},
}
for n, a := range addrs {
fmt.Printf("%v: %v\n", n, a)
}
}
// errors
type MyError struct {
When time.Time
What string
}
func (e *MyError) Error() string {
return fmt.Sprintf("at %v, %s",
e.When, e.What)
}
func run() error {
return &MyError{
time.Now(),
"it didn't work",
}
}
func runErrors() {
fmt.Println("errors")
if err := run(); err != nil {
fmt.Println(err)
}
}
// go routine
func say(s string) {
for i := 0; i < 5; i++ {
time.Sleep(100 * time.Millisecond)
fmt.Println(s)
}
}
func sum(a []int, c chan int) {
sum := 0
for _, v := range a {
sum += v
}
c <- sum // send sum to channel c
}
func runGoRoutine() {
a := []int{7, 2, 8, -9, 4, 0}
c := make(chan int)
go sum(a[:len(a)/2], c)
go sum(a[len(a)/2:], c)
x, y := <-c, <-c // receive from channel c and assign value to x and y
fmt.Println(x, y, x+y)
}
func runBufferedChannel() {
c := make(chan int, 2)
c <- 1
c <- 2
fmt.Println(<-c)
fmt.Println(<-c)
}
func fibonacci2(n int, c chan int) {
x, y := 0, 1
for i := 0; i < n; i++ {
c <- x
x, y = y, x+y
}
close(c)
}
func runRangeAndClose() {
fmt.Println("run fibonacci")
c := make(chan int, 10)
go fibonacci2(cap(c), c)
for i := range c {
fmt.Println(i)
}
}
var (
startTime time.Time
)
func fibonacci3(c, quit chan int) {
x, y := 0, 1
for {
select {
case c <- x:
fmt.Println("Before write", time.Since(startTime))
fmt.Println("Send on chan", time.Since(startTime))
x, y = y, x+y
case <-quit:
fmt.Println("quit")
return
}
}
}
func runFibonacci3() {
c := make(chan int)
quit := make(chan int)
go func() {
for i := | {
return x + y
} | identifier_body |
|
_train_bot_with_prepared.py | best_action = -1
best_score = -1e9
for action in range(n_actions):
for _ in range(action_range): # random.randint(1,100)
bbox.do_action(action)
if bbox.get_score() > best_score:
best_score = bbox.get_score()
best_action = action
bbox.load_from_checkpoint(checkpoint_id)
bbox.clear_all_checkpoints()
return best_action
def train_minibatch(minibatch):
old_state_s = np.array([row[0] for row in minibatch])
action_s = np.array([row[1] for row in minibatch])
reward_s = np.array([row[2] for row in minibatch])
new_state_s = np.array([row[3] for row in minibatch])
old_qwal_s = model.predict(old_state_s, batch_size=32)
newQ_s = model.predict(new_state_s, batch_size=32)
maxQ_s = np.max(newQ_s, axis=1)
y = old_qwal_s
update_s = reward_s + gamma * maxQ_s
for i in range(len(action_s)):
y[i, action_s[i]] = update_s[i]
model_prim.fit(old_state_s, y, batch_size=batchSize, nb_epoch=1, verbose=0)
return
def run_bbox(verbose=False, epsilon=0.1, gamma=0.99, action_repeat=5, update_frequency=4, sample_fit_size=32,
replay_memory_size=100000,
load_weights=False, save_weights=False):
global pgi
has_next = 1
global actions
global bbox
# Prepare environment - load the game level
prepare_bbox()
update_frequency_cntr = 0
h = 0
if load_weights:
model.load_weights(root + 'my_model_weights.h5')
model_prim.load_weights(root + 'my_model_weights.h5')
# stores tuples of (S, A, R, S')
while has_next:
# Get current environment state
pgi += 1
if pgi % print_step == 0:
bar.update(pgi)
# state = copy.copy(bbox.get_state())
state = bbox.get_state()
train_states_logs.append((state.flatten().tolist())[0:-4])
prev_reward = copy.copy(bbox.get_score())
# Run the Q function on S to get predicted reward values on all the possible actions
qval = model.predict(state.reshape(1, n_features), batch_size=1)
train_qval.append(qval)
action = (np.argmax(qval))
actions[action] += 1
# Choose an action to perform at current step
if random.random() < epsilon: # choose random action or best action
action = np.random.randint(0, n_actions) # assumes 4 different actions
else: # choose best action from Q(s,a) values
action = (np.argmax(qval))
# Perform chosen action, observe new state S'
# Function do_action(action) returns False if level is finished, otherwise returns True.
for a in range(action_repeat):
has_next = bbox.do_action(action)
new_state = copy.copy(bbox.get_state())
reward = copy.copy(bbox.get_score()) - prev_reward
#if random.random() < 0.2 or reward > 0 : # в запоминаем все успешные действия и только 20% нейспешных
if True: # в запоминаем все успешные действия и только 20% нейспешных
if (len(replay) < replay_memory_size): # if buffer not filled, add to it
replay.append((state, action, reward, new_state))
else: # if buffer full, overwrite old values
h=np.random.randint(0,replay_memory_size-1)
replay[h] = (state, action, reward, new_state)
# randomly sample our experience replay memory
# minibatch = random.sample(replay, batchSize)
minibatch = random.sample(replay, sample_fit_size)
train_minibatch(minibatch=minibatch)
if update_frequency_cntr >= update_frequency:
prim_weights = model_prim.get_weights()
model.set_weights(prim_weights)
update_frequency_cntr = 0
update_frequency_cntr += 1
# step_times.append(time.time()-st)
# Finish the game simulation, print earned reward and save weights
if save_weights:
model_prim.save_weights(root + 'my_model_weights.h5', overwrite=True)
bbox.finish(verbose=0)
from keras.models import Sequential
from keras.models import model_from_json
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import RMSprop
from keras.regularizers import l1l2,activity_l1l2
import random
random.seed(6)
n_features = n_actions = max_time = -1
days_to_train =-1
first_run =False
resumple=False
replay_file = u'e:\\trade_data\\HistoryData\\replay.klp'
#bot_file_name = u'e:\\trade_data\\HistoryData\\train_50x40_data_2016.bot'
#u'e:\\trade_data\\HistoryData\\train_50x40_data_2015-2016.bot
bot_file_name = u'e:\\trade_data\\HistoryData\\Ri_train_50x40_data_2015-2016.bot'
d = klepto.archives.dir_archive(bot_file_name, cached=True, serialized=True)
d.load("bbox")
bbox = d["bbox"]
del d
if days_to_train != -1:
bbox.set_sample_days(days_to_train)
exploration_epochs = 1
learning_epochs =1
gamma = 0.8 # a high gamma makes a long term reward more valuable
epsilon=0.1
action_repeat = 3 # repeat each action this many times // было 4
update_frequency = 50 # the number of time steps between each Q-net update
batchSize = 32 # параметр для обучения сети
l1_reg=0.05
l2_reg=0.00001
| replay_memory_size=200000
print('replay_memory_size ', replay_memory_size)
sample_fit_size = 128 # Размер минибатча, по которому будет делаться выборка из буфера
print_step = 10
n_features = bbox.get_num_of_features() # учесть что мы сдесь получаем шайп
print('n_features=', n_features)
n_actions = bbox.get_num_of_actions()
max_time = bbox.get_max_time()
model = Sequential()
model.add(Dense(n_features, init='lecun_uniform', input_shape=(n_features,)))
model.add(Activation('relu'))
model.add(Dense(1600, init='lecun_uniform',
W_regularizer=l1l2(l1=l1_reg,l2=l2_reg)
)) # a 10 neuron network gives better than random result
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(800, init='lecun_uniform',
W_regularizer=l1l2(l1=l1_reg, l2=l2_reg)
)) # a 10 neuron network gives better than random result
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(n_actions, init='lecun_uniform'))
model.add(Activation('linear')) # linear output so we can have range of real-valued outputs
rms = RMSprop(lr=0.00025) # 0.00025
model.compile(loss='mse', optimizer=rms)
json_string = model.to_json()
root = u'e:\\trade_data\\HistoryData\\'
open(root + 'my_model_architecture.json', 'w').write(json_string)
model_prim = model_from_json(open(root + 'my_model_architecture.json').read())
model_prim.compile(loss='mse', optimizer=rms)
r = klepto.archives.dir_archive(replay_file, cached=True, serialized=True)
if not first_run: # "загружаем веса, если запуск не первый"
model.load_weights(root + 'my_model_weights.h5')
model_prim.load_weights(root + 'my_model_weights.h5')
r.load("replay")
replay = r['replay']
else:
replay = []
r['replay'] = replay
load_weights = False
replay = []
#r['replay'] = replay
pgi = 0
total_steps = int((exploration_epochs + learning_epochs) * bbox.total_steps / float(action_repeat))
bar = progressbar.ProgressBar(maxval=total_steps)
bar.start()
#текстовые логи
train_states_logs=[]
train_qval=[]
test_states_logs=[]
test_qval=[]
for i in range(exploration_epochs):
print("exploration ", i, " of ", exploration_epochs)
epsilon_t=1.0
actions = np.array([0, 0, 0])
run_bbox(verbose=0 | #replay_memory_size = np.minimum(int(bbox.total_steps / float(action_repeat)), 500000 ) # размер памяти, буфера
| random_line_split |
_train_bot_with_prepared.py | best_action = -1
best_score = -1e9
for action in range(n_actions):
for _ in range(action_range): # random.randint(1,100)
bbox.do_action(action)
if bbox.get_score() > best_score:
best_score = bbox.get_score()
best_action = action
bbox.load_from_checkpoint(checkpoint_id)
bbox.clear_all_checkpoints()
return best_action
def train_minibatch(minibatch):
old_state_s = np.array([row[0] for row in minibatch])
action_s = np.array([row[1] for row in minibatch])
reward_s = np.array([row[2] for row in minibatch])
new_state_s = np.array([row[3] for row in minibatch])
old_qwal_s = model.predict(old_state_s, batch_size=32)
newQ_s = model.predict(new_state_s, batch_size=32)
maxQ_s = np.max(newQ_s, axis=1)
y = old_qwal_s
update_s = reward_s + gamma * maxQ_s
for i in range(len(action_s)):
y[i, | model_prim.fit(old_state_s, y, batch_size=batchSize, nb_epoch=1, verbose=0)
return
def run_bbox(verbose=False, epsilon=0.1, gamma=0.99, action_repeat=5, update_frequency=4, sample_fit_size=32,
replay_memory_size=100000,
load_weights=False, save_weights=False):
global pgi
has_next = 1
global actions
global bbox
# Prepare environment - load the game level
prepare_bbox()
update_frequency_cntr = 0
h = 0
if load_weights:
model.load_weights(root + 'my_model_weights.h5')
model_prim.load_weights(root + 'my_model_weights.h5')
# stores tuples of (S, A, R, S')
while has_next:
# Get current environment state
pgi += 1
if pgi % print_step == 0:
bar.update(pgi)
# state = copy.copy(bbox.get_state())
state = bbox.get_state()
train_states_logs.append((state.flatten().tolist())[0:-4])
prev_reward = copy.copy(bbox.get_score())
# Run the Q function on S to get predicted reward values on all the possible actions
qval = model.predict(state.reshape(1, n_features), batch_size=1)
train_qval.append(qval)
action = (np.argmax(qval))
actions[action] += 1
# Choose an action to perform at current step
if random.random() < epsilon: # choose random action or best action
action = np.random.randint(0, n_actions) # assumes 4 different actions
else: # choose best action from Q(s,a) values
action = (np.argmax(qval))
# Perform chosen action, observe new state S'
# Function do_action(action) returns False if level is finished, otherwise returns True.
for a in range(action_repeat):
has_next = bbox.do_action(action)
new_state = copy.copy(bbox.get_state())
reward = copy.copy(bbox.get_score()) - prev_reward
#if random.random() < 0.2 or reward > 0 : # в запоминаем все успешные действия и только 20% нейспешных
if True: # в запоминаем все успешные действия и только 20% нейспешных
if (len(replay) < replay_memory_size): # if buffer not filled, add to it
replay.append((state, action, reward, new_state))
else: # if buffer full, overwrite old values
h=np.random.randint(0,replay_memory_size-1)
replay[h] = (state, action, reward, new_state)
# randomly sample our experience replay memory
# minibatch = random.sample(replay, batchSize)
minibatch = random.sample(replay, sample_fit_size)
train_minibatch(minibatch=minibatch)
if update_frequency_cntr >= update_frequency:
prim_weights = model_prim.get_weights()
model.set_weights(prim_weights)
update_frequency_cntr = 0
update_frequency_cntr += 1
# step_times.append(time.time()-st)
# Finish the game simulation, print earned reward and save weights
if save_weights:
model_prim.save_weights(root + 'my_model_weights.h5', overwrite=True)
bbox.finish(verbose=0)
from keras.models import Sequential
from keras.models import model_from_json
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import RMSprop
from keras.regularizers import l1l2,activity_l1l2
import random
random.seed(6)
n_features = n_actions = max_time = -1
days_to_train =-1
first_run =False
resumple=False
replay_file = u'e:\\trade_data\\HistoryData\\replay.klp'
#bot_file_name = u'e:\\trade_data\\HistoryData\\train_50x40_data_2016.bot'
#u'e:\\trade_data\\HistoryData\\train_50x40_data_2015-2016.bot
bot_file_name = u'e:\\trade_data\\HistoryData\\Ri_train_50x40_data_2015-2016.bot'
d = klepto.archives.dir_archive(bot_file_name, cached=True, serialized=True)
d.load("bbox")
bbox = d["bbox"]
del d
if days_to_train != -1:
bbox.set_sample_days(days_to_train)
exploration_epochs = 1
learning_epochs =1
gamma = 0.8 # a high gamma makes a long term reward more valuable
epsilon=0.1
action_repeat = 3 # repeat each action this many times // было 4
update_frequency = 50 # the number of time steps between each Q-net update
batchSize = 32 # параметр для обучения сети
l1_reg=0.05
l2_reg=0.00001
#replay_memory_size = np.minimum(int(bbox.total_steps / float(action_repeat)), 500000 ) # размер памяти, буфера
replay_memory_size=200000
print('replay_memory_size ', replay_memory_size)
sample_fit_size = 128 # Размер минибатча, по которому будет делаться выборка из буфера
print_step = 10
n_features = bbox.get_num_of_features() # учесть что мы сдесь получаем шайп
print('n_features=', n_features)
n_actions = bbox.get_num_of_actions()
max_time = bbox.get_max_time()
model = Sequential()
model.add(Dense(n_features, init='lecun_uniform', input_shape=(n_features,)))
model.add(Activation('relu'))
model.add(Dense(1600, init='lecun_uniform',
W_regularizer=l1l2(l1=l1_reg,l2=l2_reg)
)) # a 10 neuron network gives better than random result
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(800, init='lecun_uniform',
W_regularizer=l1l2(l1=l1_reg, l2=l2_reg)
)) # a 10 neuron network gives better than random result
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(n_actions, init='lecun_uniform'))
model.add(Activation('linear')) # linear output so we can have range of real-valued outputs
rms = RMSprop(lr=0.00025) # 0.00025
model.compile(loss='mse', optimizer=rms)
json_string = model.to_json()
root = u'e:\\trade_data\\HistoryData\\'
open(root + 'my_model_architecture.json', 'w').write(json_string)
model_prim = model_from_json(open(root + 'my_model_architecture.json').read())
model_prim.compile(loss='mse', optimizer=rms)
r = klepto.archives.dir_archive(replay_file, cached=True, serialized=True)
if not first_run: # "загружаем веса, если запуск не первый"
model.load_weights(root + 'my_model_weights.h5')
model_prim.load_weights(root + 'my_model_weights.h5')
r.load("replay")
replay = r['replay']
else:
replay = []
r['replay'] = replay
load_weights = False
replay = []
#r['replay'] = replay
pgi = 0
total_steps = int((exploration_epochs + learning_epochs) * bbox.total_steps / float(action_repeat))
bar = progressbar.ProgressBar(maxval=total_steps)
bar.start()
#текстовые логи
train_states_logs=[]
train_qval=[]
test_states_logs=[]
test_qval=[]
for i in range(exploration_epochs):
print("exploration ", i, " of ", exploration_epochs)
epsilon_t=1.0
actions = np.array([0, 0, 0])
run_bbox(verbose=0 | action_s[i]] = update_s[i]
| conditional_block |
_train_bot_with_prepared.py | val.append(qval)
action = (np.argmax(qval))
actions[action] += 1
# Choose an action to perform at current step
if random.random() < epsilon: # choose random action or best action
action = np.random.randint(0, n_actions) # assumes 4 different actions
else: # choose best action from Q(s,a) values
action = (np.argmax(qval))
# Perform chosen action, observe new state S'
# Function do_action(action) returns False if level is finished, otherwise returns True.
for a in range(action_repeat):
has_next = bbox.do_action(action)
new_state = copy.copy(bbox.get_state())
reward = copy.copy(bbox.get_score()) - prev_reward
#if random.random() < 0.2 or reward > 0 : # в запоминаем все успешные действия и только 20% нейспешных
if True: # в запоминаем все успешные действия и только 20% нейспешных
if (len(replay) < replay_memory_size): # if buffer not filled, add to it
replay.append((state, action, reward, new_state))
else: # if buffer full, overwrite old values
h=np.random.randint(0,replay_memory_size-1)
replay[h] = (state, action, reward, new_state)
# randomly sample our experience replay memory
# minibatch = random.sample(replay, batchSize)
minibatch = random.sample(replay, sample_fit_size)
train_minibatch(minibatch=minibatch)
if update_frequency_cntr >= update_frequency:
prim_weights = model_prim.get_weights()
model.set_weights(prim_weights)
update_frequency_cntr = 0
update_frequency_cntr += 1
# step_times.append(time.time()-st)
# Finish the game simulation, print earned reward and save weights
if save_weights:
model_prim.save_weights(root + 'my_model_weights.h5', overwrite=True)
bbox.finish(verbose=0)
from keras.models import Sequential
from keras.models import model_from_json
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import RMSprop
from keras.regularizers import l1l2,activity_l1l2
import random
random.seed(6)
n_features = n_actions = max_time = -1
days_to_train =-1
first_run =False
resumple=False
replay_file = u'e:\\trade_data\\HistoryData\\replay.klp'
#bot_file_name = u'e:\\trade_data\\HistoryData\\train_50x40_data_2016.bot'
#u'e:\\trade_data\\HistoryData\\train_50x40_data_2015-2016.bot
bot_file_name = u'e:\\trade_data\\HistoryData\\Ri_train_50x40_data_2015-2016.bot'
d = klepto.archives.dir_archive(bot_file_name, cached=True, serialized=True)
d.load("bbox")
bbox = d["bbox"]
del d
if days_to_train != -1:
bbox.set_sample_days(days_to_train)
exploration_epochs = 1
learning_epochs =1
gamma = 0.8 # a high gamma makes a long term reward more valuable
epsilon=0.1
action_repeat = 3 # repeat each action this many times // было 4
update_frequency = 50 # the number of time steps between each Q-net update
batchSize = 32 # параметр для обучения сети
l1_reg=0.05
l2_reg=0.00001
#replay_memory_size = np.minimum(int(bbox.total_steps / float(action_repeat)), 500000 ) # размер памяти, буфера
replay_memory_size=200000
print('replay_memory_size ', replay_memory_size)
sample_fit_size = 128 # Размер минибатча, по которому будет делаться выборка из буфера
print_step = 10
n_features = bbox.get_num_of_features() # учесть что мы сдесь получаем шайп
print('n_features=', n_features)
n_actions = bbox.get_num_of_actions()
max_time = bbox.get_max_time()
model = Sequential()
model.add(Dense(n_features, init='lecun_uniform', input_shape=(n_features,)))
model.add(Activation('relu'))
model.add(Dense(1600, init='lecun_uniform',
W_regularizer=l1l2(l1=l1_reg,l2=l2_reg)
)) # a 10 neuron network gives better than random result
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(800, init='lecun_uniform',
W_regularizer=l1l2(l1=l1_reg, l2=l2_reg)
)) # a 10 neuron network gives better than random result
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(n_actions, init='lecun_uniform'))
model.add(Activation('linear')) # linear output so we can have range of real-valued outputs
rms = RMSprop(lr=0.00025) # 0.00025
model.compile(loss='mse', optimizer=rms)
json_string = model.to_json()
root = u'e:\\trade_data\\HistoryData\\'
open(root + 'my_model_architecture.json', 'w').write(json_string)
model_prim = model_from_json(open(root + 'my_model_architecture.json').read())
model_prim.compile(loss='mse', optimizer=rms)
r = klepto.archives.dir_archive(replay_file, cached=True, serialized=True)
if not first_run: # "загружаем веса, если запуск не первый"
model.load_weights(root + 'my_model_weights.h5')
model_prim.load_weights(root + 'my_model_weights.h5')
r.load("replay")
replay = r['replay']
else:
replay = []
r['replay'] = replay
load_weights = False
replay = []
#r['replay'] = replay
pgi = 0
total_steps = int((exploration_epochs + learning_epochs) * bbox.total_steps / float(action_repeat))
bar = progressbar.ProgressBar(maxval=total_steps)
bar.start()
#текстовые логи
train_states_logs=[]
train_qval=[]
test_states_logs=[]
test_qval=[]
for i in range(exploration_epochs):
print("exploration ", i, " of ", exploration_epochs)
epsilon_t=1.0
actions = np.array([0, 0, 0])
run_bbox(verbose=0, epsilon=epsilon, gamma=gamma, action_repeat=action_repeat,
update_frequency=update_frequency, sample_fit_size=sample_fit_size,
replay_memory_size=replay_memory_size, load_weights=load_weights, save_weights=True)
print("score: ", np.round(bbox.get_score()), actions)
if epsilon_t > 0.1:
epsilon_t -= (1.0 / exploration_epochs) # потихоньку увеличиваем вероятность использования знаний
if resumple:
bbox.set_sample_days(days_to_train)
total_steps = int((exploration_epochs + learning_epochs) * bbox.total_steps / float(action_repeat))
bar = progressbar.ProgressBar(maxval=total_steps)
r.dump()
for i in range(learning_epochs):
actions = np.array([0, 0, 0])
print("learning ", i, " of ", learning_epochs)
epsilon = 0.1
run_bbox(verbose=0, epsilon=epsilon, gamma=gamma, action_repeat=action_repeat,
update_frequency=update_frequency, sample_fit_size=sample_fit_size,
replay_memory_size=replay_memory_size, load_weights=load_weights, save_weights=True)
print("score: ", np.round(bbox.get_score()), actions)
if resumple:
bbox.set_sample_days(days_to_train)
total_steps = int((exploration_epochs + learning_epochs) * bbox.total_steps / float(action_repeat))
bar = progressbar.ProgressBar(maxval=total_steps + 100)
r.dump()
def test_strategy(n=4, resample=True, action_repeat=6):
results = []
for i in range(n):
if resample:
random.seed(1 + i)
bbox.set_sample_days(days_to_train)
bbox.reset_level()
has_next = True
actions = np.array([0, 0, 0])
while has_next:
state = bbox.get_state()
qval | = model.predict(state.reshape(1, n_features), batch_size=1)
action = (np.argmax(qval))
actions[action] += 1
for a in range(action_repeat):
has_next = bbox.do_action(action)
bbox.finish(verbose=0)
print(" test ", i, " score: ", bbox.get_score(), actions)
return results
print ('тест на тренировочных данных')
test_times = 1
results = test_strategy(test_times, action_repeat=action_repeat, resample=False)
with open('test_states.txt', "w") as file:
for row in test_states_logs:
file.write(str(list(row)) + '\n')
file.flush()
| identifier_body |
|
_train_bot_with_prepared.py | best_action = -1
best_score = -1e9
for action in range(n_actions):
for _ in range(action_range): # random.randint(1,100)
bbox.do_action(action)
if bbox.get_score() > best_score:
best_score = bbox.get_score()
best_action = action
bbox.load_from_checkpoint(checkpoint_id)
bbox.clear_all_checkpoints()
return best_action
def trai | ibatch):
old_state_s = np.array([row[0] for row in minibatch])
action_s = np.array([row[1] for row in minibatch])
reward_s = np.array([row[2] for row in minibatch])
new_state_s = np.array([row[3] for row in minibatch])
old_qwal_s = model.predict(old_state_s, batch_size=32)
newQ_s = model.predict(new_state_s, batch_size=32)
maxQ_s = np.max(newQ_s, axis=1)
y = old_qwal_s
update_s = reward_s + gamma * maxQ_s
for i in range(len(action_s)):
y[i, action_s[i]] = update_s[i]
model_prim.fit(old_state_s, y, batch_size=batchSize, nb_epoch=1, verbose=0)
return
def run_bbox(verbose=False, epsilon=0.1, gamma=0.99, action_repeat=5, update_frequency=4, sample_fit_size=32,
replay_memory_size=100000,
load_weights=False, save_weights=False):
global pgi
has_next = 1
global actions
global bbox
# Prepare environment - load the game level
prepare_bbox()
update_frequency_cntr = 0
h = 0
if load_weights:
model.load_weights(root + 'my_model_weights.h5')
model_prim.load_weights(root + 'my_model_weights.h5')
# stores tuples of (S, A, R, S')
while has_next:
# Get current environment state
pgi += 1
if pgi % print_step == 0:
bar.update(pgi)
# state = copy.copy(bbox.get_state())
state = bbox.get_state()
train_states_logs.append((state.flatten().tolist())[0:-4])
prev_reward = copy.copy(bbox.get_score())
# Run the Q function on S to get predicted reward values on all the possible actions
qval = model.predict(state.reshape(1, n_features), batch_size=1)
train_qval.append(qval)
action = (np.argmax(qval))
actions[action] += 1
# Choose an action to perform at current step
if random.random() < epsilon: # choose random action or best action
action = np.random.randint(0, n_actions) # assumes 4 different actions
else: # choose best action from Q(s,a) values
action = (np.argmax(qval))
# Perform chosen action, observe new state S'
# Function do_action(action) returns False if level is finished, otherwise returns True.
for a in range(action_repeat):
has_next = bbox.do_action(action)
new_state = copy.copy(bbox.get_state())
reward = copy.copy(bbox.get_score()) - prev_reward
#if random.random() < 0.2 or reward > 0 : # в запоминаем все успешные действия и только 20% нейспешных
if True: # в запоминаем все успешные действия и только 20% нейспешных
if (len(replay) < replay_memory_size): # if buffer not filled, add to it
replay.append((state, action, reward, new_state))
else: # if buffer full, overwrite old values
h=np.random.randint(0,replay_memory_size-1)
replay[h] = (state, action, reward, new_state)
# randomly sample our experience replay memory
# minibatch = random.sample(replay, batchSize)
minibatch = random.sample(replay, sample_fit_size)
train_minibatch(minibatch=minibatch)
if update_frequency_cntr >= update_frequency:
prim_weights = model_prim.get_weights()
model.set_weights(prim_weights)
update_frequency_cntr = 0
update_frequency_cntr += 1
# step_times.append(time.time()-st)
# Finish the game simulation, print earned reward and save weights
if save_weights:
model_prim.save_weights(root + 'my_model_weights.h5', overwrite=True)
bbox.finish(verbose=0)
from keras.models import Sequential
from keras.models import model_from_json
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import RMSprop
from keras.regularizers import l1l2,activity_l1l2
import random
random.seed(6)
n_features = n_actions = max_time = -1
days_to_train =-1
first_run =False
resumple=False
replay_file = u'e:\\trade_data\\HistoryData\\replay.klp'
#bot_file_name = u'e:\\trade_data\\HistoryData\\train_50x40_data_2016.bot'
#u'e:\\trade_data\\HistoryData\\train_50x40_data_2015-2016.bot
bot_file_name = u'e:\\trade_data\\HistoryData\\Ri_train_50x40_data_2015-2016.bot'
d = klepto.archives.dir_archive(bot_file_name, cached=True, serialized=True)
d.load("bbox")
bbox = d["bbox"]
del d
if days_to_train != -1:
bbox.set_sample_days(days_to_train)
exploration_epochs = 1
learning_epochs =1
gamma = 0.8 # a high gamma makes a long term reward more valuable
epsilon=0.1
action_repeat = 3 # repeat each action this many times // было 4
update_frequency = 50 # the number of time steps between each Q-net update
batchSize = 32 # параметр для обучения сети
l1_reg=0.05
l2_reg=0.00001
#replay_memory_size = np.minimum(int(bbox.total_steps / float(action_repeat)), 500000 ) # размер памяти, буфера
replay_memory_size=200000
print('replay_memory_size ', replay_memory_size)
sample_fit_size = 128 # Размер минибатча, по которому будет делаться выборка из буфера
print_step = 10
n_features = bbox.get_num_of_features() # учесть что мы сдесь получаем шайп
print('n_features=', n_features)
n_actions = bbox.get_num_of_actions()
max_time = bbox.get_max_time()
model = Sequential()
model.add(Dense(n_features, init='lecun_uniform', input_shape=(n_features,)))
model.add(Activation('relu'))
model.add(Dense(1600, init='lecun_uniform',
W_regularizer=l1l2(l1=l1_reg,l2=l2_reg)
)) # a 10 neuron network gives better than random result
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(800, init='lecun_uniform',
W_regularizer=l1l2(l1=l1_reg, l2=l2_reg)
)) # a 10 neuron network gives better than random result
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(n_actions, init='lecun_uniform'))
model.add(Activation('linear')) # linear output so we can have range of real-valued outputs
rms = RMSprop(lr=0.00025) # 0.00025
model.compile(loss='mse', optimizer=rms)
json_string = model.to_json()
root = u'e:\\trade_data\\HistoryData\\'
open(root + 'my_model_architecture.json', 'w').write(json_string)
model_prim = model_from_json(open(root + 'my_model_architecture.json').read())
model_prim.compile(loss='mse', optimizer=rms)
r = klepto.archives.dir_archive(replay_file, cached=True, serialized=True)
if not first_run: # "загружаем веса, если запуск не первый"
model.load_weights(root + 'my_model_weights.h5')
model_prim.load_weights(root + 'my_model_weights.h5')
r.load("replay")
replay = r['replay']
else:
replay = []
r['replay'] = replay
load_weights = False
replay = []
#r['replay'] = replay
pgi = 0
total_steps = int((exploration_epochs + learning_epochs) * bbox.total_steps / float(action_repeat))
bar = progressbar.ProgressBar(maxval=total_steps)
bar.start()
#текстовые логи
train_states_logs=[]
train_qval=[]
test_states_logs=[]
test_qval=[]
for i in range(exploration_epochs):
print("exploration ", i, " of ", exploration_epochs)
epsilon_t=1.0
actions = np.array([0, 0, 0])
run_bbox(verbose | n_minibatch(min | identifier_name |
gitstore.go | }
/*
// map poolname => []nodeUuid, only the blobs that have a key set are going to be changed
func (s *Store) SaveNodeBlobs(uuid string, blobs map[string]io.Reader) error {
for blobPath, blob := range blobs {
path := filepath.Join(s.Git.Dir, s.BlobPath(uuid, blobPath))
if err := s.saveBlobToFile(path, blob); err != nil {
return err
}
}
return nil
}
*/
/*
func (s *Store) SaveIndex(indexpath string, shard string, rd io.Reader) error {
path := filepath.Join(s.Git.Dir, s.indexPath(shard, indexpath))
return s.saveBlobToFile(path, rd)
}
*/
/*
*/
/*
func (s *Store) callwithBlob(uuid string, blobPath string, fn func(string, io.Reader) error) error {
path := filepath.Join(s.Git.Dir, s.blobPath(uuid, blobPath))
if FileExists(path) {
file, err := os.Open(path)
if err != nil {
return err
}
defer file.Close()
return fn(blobPath, file)
}
return nil
}
*/
/*
func (s *Store) GetIndex(indexpath string, shard string, fn func(io.Reader) error) error {
path := filepath.Join(s.Git.Dir, s.indexPath(shard, indexpath))
if FileExists(path) {
file, err := os.Open(path)
if err != nil {
return err
}
defer file.Close()
return fn(file)
}
return nil
}
*/
// GetNodeBlobs calls fn for each existing blob in requestedBlobs
/*
func (s *Store) GetNodeBlobs(uuid string, requestedBlobs []string, fn func(string, io.Reader) error) error {
for _, blob := range requestedBlobs {
err := s.callwithBlob(uuid, blob, fn)
if err != nil {
return err
}
}
return nil
}
*/
func (s *Store) GetNodeTexts(uuid string, requestedTexts []string) (texts map[string]string, err error) {
texts = map[string]string{}
var known bool
for _, text := range requestedTexts {
known, err = s.IsFileKnown(s.textPath(uuid, text))
if err != nil {
return
}
// fmt.Printf("file %s is known: %v\n", text, known)
if known {
var buf bytes.Buffer
err = s.ReadCatHeadFile(s.textPath(uuid, text), &buf)
if err != nil {
return
}
texts[text] = buf.String()
}
}
return
}
func (s *Store) SaveEdges(category, uuid string, edges map[string]string) error {
path := s.edgePath(category, uuid)
known, err := s.IsFileKnown(path)
if err != nil {
return err
}
return s.save(path, !known, edges)
}
// RemoveEdges also removes the properties node of an edge
// Is the edges file is already removed, no error should be returned
func (s *Store) RemoveEdges(category, uuid string) error {
edges, err := s.GetEdges(category, uuid)
if err != nil {
return err
}
for _, propID := range edges {
if err := s.RemoveNode(propID); err != nil {
return err
}
}
path := s.edgePath(category, uuid)
return s.RemoveIndex(path)
}
// if there is no edge file for the given category, no error is returned, but empty edges map
func (s *Store) GetEdges(category, uuid string) (edges map[string]string, err error) {
path := s.edgePath(category, uuid)
edges = map[string]string{}
known, err := s.IsFileKnown(path)
if err != nil {
return edges, err
}
if !known {
return edges, nil
}
err = s.load(path, &edges)
return edges, err
}
func (s *Store) edgePath(category string, uuid string) string {
//return fmt.Sprintf("node/props/%s/%s", uuid[:2], uuid[2:])
return fmt.Sprintf("refs/%s/%s/%s/%s", category, s.shard, uuid[:2], uuid[2:])
}
func (s *Store) propPath(uuid string) string {
//return fmt.Sprintf("node/props/%s/%s", uuid[:2], uuid[2:])
return fmt.Sprintf("node/%s/%s/%s", s.shard, uuid[:2], uuid[2:])
}
func (s *Store) textPath(uuid string, key string) string {
// return fmt.Sprintf("node/rels/%s/%s", uuid[:2], uuid[2:])
return fmt.Sprintf("text/%s/%s/%s/%s", s.shard, uuid[:2], uuid[2:], key)
}
func (s *Store) BlobPath(uuid string, blobpath string) string {
return fmt.Sprintf("../blob/%s/%s/%s/%s", s.shard, uuid[:2], uuid[2:], blobpath)
}
func (g *Store) Commit(msg zoom.CommitMessage) error {
// fmt.Println("commit from store " + comment)
treeSha, err := g.Transaction.WriteTree()
if err != nil {
return err
}
var parent string
parent, err = g.ShowHeadsRef("master")
// fmt.Println("parent commit is: " + parent)
if err != nil {
return err
}
var commitSha string
commitSha, err = g.CommitTree(treeSha, parent, strings.NewReader(msg.String()))
if err != nil {
return err
}
return g.UpdateHeadsRef("master", commitSha)
}
func (g *Store) save(path string, isNew bool, data interface{}) error {
// fmt.Printf("storing: %#v in %#v\n", data, path)
var buf bytes.Buffer
// enc := msgpack.NewEncoder(&buf)
enc := json.NewEncoder(&buf)
err := enc.Encode(data)
if err != nil {
return err
}
// fmt.Println("result", buf.String())
var sha1 string
sha1, err = g.Transaction.WriteHashObject(&buf)
if err != nil {
return err
}
if isNew {
err = g.Transaction.AddIndexCache(sha1, path)
} else {
err = g.Transaction.UpdateIndexCache(sha1, path)
}
if err != nil {
return err
}
return nil
}
func (g *Store) load(path string, data interface{}) error {
// fmt.Println("loading from ", path)
var buf bytes.Buffer
err := g.Transaction.ReadCatHeadFile(path, &buf)
if err != nil {
fmt.Println(err)
return err
}
// fmt.Println("reading", buf.String())
//dec := msgpack.NewDecoder(&buf)
dec := json.NewDecoder(&buf)
return dec.Decode(data)
}
// only the props that have a key set are going to be changed
// if no node properties file does exist, no error should be returned and s.save(....isNew) should be used
func (g *Store) SaveNodeProperties(uuid string, props map[string]interface{}) error {
path := g.propPath(uuid)
known, err := g.IsFileKnown(path)
if err != nil {
return err
}
if known {
orig := map[string]interface{}{}
err := g.load(path, &orig)
if err != nil {
return err
}
for k, v := range props {
if v == nil {
delete(orig, k)
} else {
orig[k] = v
}
}
props = orig
}
return g.save(path, !known, props)
}
// TODO Rollback any actions that have been taken since the last commit
// stage should be cleared and any newly added data should be removed
// maybe a cleanup command should remove the orphaned sha1s (git gc maybe??)
func (g *Store) Rollback() error {
return g.ResetToHeadAll()
}
// TODO what happens on errors? changes will not be committed!
// TODO what about the edges? must delete them all (and identify them all)
// to identify them we need something like
// ls refs/*/shard/uuid[:2], uuid[2:]
// return fmt.Sprintf("refs/%s/%s/%s/%s", category, shard, uuid[:2], uuid[2:])
// if any file does not exist, no error should be returned
func (g *Store) RemoveNode(uuid string) error | {
// fmt.Printf("trying to remove node: uuid %#v shard %#v\n", uuid, shard)
// fmt.Println("proppath is ", g.propPath(uuid))
paths := []string{
fmt.Sprintf("text/%s/%s/%s", g.shard, uuid[:2], uuid[2:]),
fmt.Sprintf("blob/%s/%s/%s", g.shard, uuid[:2], uuid[2:]),
}
files, err := g.LsFiles(fmt.Sprintf("refs/*/%s/%s/%s", g.shard, uuid[:2], uuid[2:]))
if err != nil {
// fmt.Println("error from ls files")
return err
}
for _, file := range files {
err := g.Transaction.RemoveIndex(file)
if err != nil {
return err
} | identifier_body |
|
gitstore.go | , err := tx.WriteHashObject(strings.NewReader("ZOOM DATABASE\nThis is a zoom database.\nDon't write into this directory manually.\nUse the zoom database library instead.\n"))
if err != nil {
return err
}
err = tx.AddIndexCache(sha1, "README")
if err != nil {
return err
}
sha1, err = tx.WriteTree()
if err != nil {
return err
}
sha1, err = tx.CommitTree(sha1, "", strings.NewReader("add README"))
if err != nil {
return err
}
return tx.UpdateHeadsRef("master", sha1)
})
if err != nil {
return
}
}
g = Git{Git: git, shard: shard}
return
}
func (g *Git) Transaction(msg zoom.CommitMessage, action func(zoom.Transaction) error) (err error) {
return g.Git.Transaction(func(tx *gitlib.Transaction) error {
var store zoom.Store = &Store{tx, g.shard}
return zoom.NewTransaction(store, msg, action)
})
}
type Store struct {
*gitlib.Transaction
shard string
}
// map relname => nodeUuid, only the texts that have a key set are going to be changed
func (s *Store) SaveNodeTexts(uuid string, texts map[string]string) error {
for textPath, text := range texts {
path := s.textPath(uuid, textPath)
known, err := s.IsFileKnown(path)
if err != nil {
return err
}
rd := strings.NewReader(text)
sha1, err2 := s.Transaction.WriteHashObject(rd)
if err2 != nil {
return err2
}
if known {
err = s.Transaction.UpdateIndexCache(sha1, path)
} else {
err = s.Transaction.AddIndexCache(sha1, path)
}
if err != nil {
return err
}
}
return nil
}
func (s *Store) saveBlobToFile(path string, blob io.Reader) error {
dir := filepath.Dir(path)
os.MkdirAll(dir, 0755)
file, err := os.Create(path)
if err != nil {
return err
}
defer file.Close()
// make a buffer to keep chunks that are read
buf := make([]byte, 1024)
for {
// read a chunk
n, err := blob.Read(buf)
if err != nil && err != io.EOF {
return err
}
if n == 0 {
break
}
// write a chunk
if _, err := file.Write(buf[:n]); err != nil {
return err
}
}
return nil
}
/*
// map poolname => []nodeUuid, only the blobs that have a key set are going to be changed
func (s *Store) SaveNodeBlobs(uuid string, blobs map[string]io.Reader) error {
for blobPath, blob := range blobs {
path := filepath.Join(s.Git.Dir, s.BlobPath(uuid, blobPath))
if err := s.saveBlobToFile(path, blob); err != nil {
return err
}
}
return nil
}
*/
/*
func (s *Store) SaveIndex(indexpath string, shard string, rd io.Reader) error {
path := filepath.Join(s.Git.Dir, s.indexPath(shard, indexpath))
return s.saveBlobToFile(path, rd)
}
*/
/*
*/
/*
func (s *Store) callwithBlob(uuid string, blobPath string, fn func(string, io.Reader) error) error {
path := filepath.Join(s.Git.Dir, s.blobPath(uuid, blobPath))
if FileExists(path) {
file, err := os.Open(path)
if err != nil {
return err |
return fn(blobPath, file)
}
return nil
}
*/
/*
func (s *Store) GetIndex(indexpath string, shard string, fn func(io.Reader) error) error {
path := filepath.Join(s.Git.Dir, s.indexPath(shard, indexpath))
if FileExists(path) {
file, err := os.Open(path)
if err != nil {
return err
}
defer file.Close()
return fn(file)
}
return nil
}
*/
// GetNodeBlobs calls fn for each existing blob in requestedBlobs
/*
func (s *Store) GetNodeBlobs(uuid string, requestedBlobs []string, fn func(string, io.Reader) error) error {
for _, blob := range requestedBlobs {
err := s.callwithBlob(uuid, blob, fn)
if err != nil {
return err
}
}
return nil
}
*/
func (s *Store) GetNodeTexts(uuid string, requestedTexts []string) (texts map[string]string, err error) {
texts = map[string]string{}
var known bool
for _, text := range requestedTexts {
known, err = s.IsFileKnown(s.textPath(uuid, text))
if err != nil {
return
}
// fmt.Printf("file %s is known: %v\n", text, known)
if known {
var buf bytes.Buffer
err = s.ReadCatHeadFile(s.textPath(uuid, text), &buf)
if err != nil {
return
}
texts[text] = buf.String()
}
}
return
}
func (s *Store) SaveEdges(category, uuid string, edges map[string]string) error {
path := s.edgePath(category, uuid)
known, err := s.IsFileKnown(path)
if err != nil {
return err
}
return s.save(path, !known, edges)
}
// RemoveEdges also removes the properties node of an edge
// Is the edges file is already removed, no error should be returned
func (s *Store) RemoveEdges(category, uuid string) error {
edges, err := s.GetEdges(category, uuid)
if err != nil {
return err
}
for _, propID := range edges {
if err := s.RemoveNode(propID); err != nil {
return err
}
}
path := s.edgePath(category, uuid)
return s.RemoveIndex(path)
}
// if there is no edge file for the given category, no error is returned, but empty edges map
func (s *Store) GetEdges(category, uuid string) (edges map[string]string, err error) {
path := s.edgePath(category, uuid)
edges = map[string]string{}
known, err := s.IsFileKnown(path)
if err != nil {
return edges, err
}
if !known {
return edges, nil
}
err = s.load(path, &edges)
return edges, err
}
func (s *Store) edgePath(category string, uuid string) string {
//return fmt.Sprintf("node/props/%s/%s", uuid[:2], uuid[2:])
return fmt.Sprintf("refs/%s/%s/%s/%s", category, s.shard, uuid[:2], uuid[2:])
}
func (s *Store) propPath(uuid string) string {
//return fmt.Sprintf("node/props/%s/%s", uuid[:2], uuid[2:])
return fmt.Sprintf("node/%s/%s/%s", s.shard, uuid[:2], uuid[2:])
}
func (s *Store) textPath(uuid string, key string) string {
// return fmt.Sprintf("node/rels/%s/%s", uuid[:2], uuid[2:])
return fmt.Sprintf("text/%s/%s/%s/%s", s.shard, uuid[:2], uuid[2:], key)
}
func (s *Store) BlobPath(uuid string, blobpath string) string {
return fmt.Sprintf("../blob/%s/%s/%s/%s", s.shard, uuid[:2], uuid[2:], blobpath)
}
func (g *Store) Commit(msg zoom.CommitMessage) error {
// fmt.Println("commit from store " + comment)
treeSha, err := g.Transaction.WriteTree()
if err != nil {
return err
}
var parent string
parent, err = g.ShowHeadsRef("master")
// fmt.Println("parent commit is: " + parent)
if err != nil {
return err
}
var commitSha string
commitSha, err = g.CommitTree(treeSha, parent, strings.NewReader(msg.String()))
if err != nil {
return err
}
return g.UpdateHeadsRef("master", commitSha)
}
func (g *Store) save(path string, isNew bool, data interface{}) error {
// fmt.Printf("storing: %#v in %#v\n", data, path)
var buf bytes.Buffer
// enc := msgpack.NewEncoder(&buf)
enc := json.NewEncoder(&buf)
err := enc.Encode(data)
if err != nil {
return err
}
// fmt.Println("result", buf.String())
var sha1 string
sha1, err = g.Transaction.WriteHashObject(&buf)
if err != nil {
return err
}
if isNew {
err = g.Transaction.AddIndexCache(sha1, path)
} else {
err = g.Transaction.UpdateIndexCache(sha1, path)
}
if err != nil {
return err
}
return nil
}
func (g *Store) load(path string, data interface{}) error {
// | }
defer file.Close() | random_line_split |
gitstore.go | err := tx.WriteHashObject(strings.NewReader("ZOOM DATABASE\nThis is a zoom database.\nDon't write into this directory manually.\nUse the zoom database library instead.\n"))
if err != nil {
return err
}
err = tx.AddIndexCache(sha1, "README")
if err != nil {
return err
}
sha1, err = tx.WriteTree()
if err != nil {
return err
}
sha1, err = tx.CommitTree(sha1, "", strings.NewReader("add README"))
if err != nil {
return err
}
return tx.UpdateHeadsRef("master", sha1)
})
if err != nil {
return
}
}
g = Git{Git: git, shard: shard}
return
}
func (g *Git) Transaction(msg zoom.CommitMessage, action func(zoom.Transaction) error) (err error) {
return g.Git.Transaction(func(tx *gitlib.Transaction) error {
var store zoom.Store = &Store{tx, g.shard}
return zoom.NewTransaction(store, msg, action)
})
}
type Store struct {
*gitlib.Transaction
shard string
}
// map relname => nodeUuid, only the texts that have a key set are going to be changed
func (s *Store) SaveNodeTexts(uuid string, texts map[string]string) error {
for textPath, text := range texts {
path := s.textPath(uuid, textPath)
known, err := s.IsFileKnown(path)
if err != nil {
return err
}
rd := strings.NewReader(text)
sha1, err2 := s.Transaction.WriteHashObject(rd)
if err2 != nil {
return err2
}
if known {
err = s.Transaction.UpdateIndexCache(sha1, path)
} else {
err = s.Transaction.AddIndexCache(sha1, path)
}
if err != nil {
return err
}
}
return nil
}
func (s *Store) saveBlobToFile(path string, blob io.Reader) error {
dir := filepath.Dir(path)
os.MkdirAll(dir, 0755)
file, err := os.Create(path)
if err != nil {
return err
}
defer file.Close()
// make a buffer to keep chunks that are read
buf := make([]byte, 1024)
for {
// read a chunk
n, err := blob.Read(buf)
if err != nil && err != io.EOF {
return err
}
if n == 0 {
break
}
// write a chunk
if _, err := file.Write(buf[:n]); err != nil {
return err
}
}
return nil
}
/*
// map poolname => []nodeUuid, only the blobs that have a key set are going to be changed
func (s *Store) SaveNodeBlobs(uuid string, blobs map[string]io.Reader) error {
for blobPath, blob := range blobs {
path := filepath.Join(s.Git.Dir, s.BlobPath(uuid, blobPath))
if err := s.saveBlobToFile(path, blob); err != nil {
return err
}
}
return nil
}
*/
/*
func (s *Store) SaveIndex(indexpath string, shard string, rd io.Reader) error {
path := filepath.Join(s.Git.Dir, s.indexPath(shard, indexpath))
return s.saveBlobToFile(path, rd)
}
*/
/*
*/
/*
func (s *Store) callwithBlob(uuid string, blobPath string, fn func(string, io.Reader) error) error {
path := filepath.Join(s.Git.Dir, s.blobPath(uuid, blobPath))
if FileExists(path) {
file, err := os.Open(path)
if err != nil {
return err
}
defer file.Close()
return fn(blobPath, file)
}
return nil
}
*/
/*
func (s *Store) GetIndex(indexpath string, shard string, fn func(io.Reader) error) error {
path := filepath.Join(s.Git.Dir, s.indexPath(shard, indexpath))
if FileExists(path) {
file, err := os.Open(path)
if err != nil {
return err
}
defer file.Close()
return fn(file)
}
return nil
}
*/
// GetNodeBlobs calls fn for each existing blob in requestedBlobs
/*
func (s *Store) GetNodeBlobs(uuid string, requestedBlobs []string, fn func(string, io.Reader) error) error {
for _, blob := range requestedBlobs {
err := s.callwithBlob(uuid, blob, fn)
if err != nil {
return err
}
}
return nil
}
*/
func (s *Store) GetNodeTexts(uuid string, requestedTexts []string) (texts map[string]string, err error) {
texts = map[string]string{}
var known bool
for _, text := range requestedTexts {
known, err = s.IsFileKnown(s.textPath(uuid, text))
if err != nil {
return
}
// fmt.Printf("file %s is known: %v\n", text, known)
if known {
var buf bytes.Buffer
err = s.ReadCatHeadFile(s.textPath(uuid, text), &buf)
if err != nil {
return
}
texts[text] = buf.String()
}
}
return
}
func (s *Store) SaveEdges(category, uuid string, edges map[string]string) error {
path := s.edgePath(category, uuid)
known, err := s.IsFileKnown(path)
if err != nil {
return err
}
return s.save(path, !known, edges)
}
// RemoveEdges also removes the properties node of an edge
// Is the edges file is already removed, no error should be returned
func (s *Store) RemoveEdges(category, uuid string) error {
edges, err := s.GetEdges(category, uuid)
if err != nil {
return err
}
for _, propID := range edges {
if err := s.RemoveNode(propID); err != nil {
return err
}
}
path := s.edgePath(category, uuid)
return s.RemoveIndex(path)
}
// if there is no edge file for the given category, no error is returned, but empty edges map
func (s *Store) GetEdges(category, uuid string) (edges map[string]string, err error) {
path := s.edgePath(category, uuid)
edges = map[string]string{}
known, err := s.IsFileKnown(path)
if err != nil {
return edges, err
}
if !known {
return edges, nil
}
err = s.load(path, &edges)
return edges, err
}
func (s *Store) edgePath(category string, uuid string) string {
//return fmt.Sprintf("node/props/%s/%s", uuid[:2], uuid[2:])
return fmt.Sprintf("refs/%s/%s/%s/%s", category, s.shard, uuid[:2], uuid[2:])
}
func (s *Store) propPath(uuid string) string {
//return fmt.Sprintf("node/props/%s/%s", uuid[:2], uuid[2:])
return fmt.Sprintf("node/%s/%s/%s", s.shard, uuid[:2], uuid[2:])
}
func (s *Store) textPath(uuid string, key string) string {
// return fmt.Sprintf("node/rels/%s/%s", uuid[:2], uuid[2:])
return fmt.Sprintf("text/%s/%s/%s/%s", s.shard, uuid[:2], uuid[2:], key)
}
func (s *Store) BlobPath(uuid string, blobpath string) string {
return fmt.Sprintf("../blob/%s/%s/%s/%s", s.shard, uuid[:2], uuid[2:], blobpath)
}
func (g *Store) Commit(msg zoom.CommitMessage) error {
// fmt.Println("commit from store " + comment)
treeSha, err := g.Transaction.WriteTree()
if err != nil {
return err
}
var parent string
parent, err = g.ShowHeadsRef("master")
// fmt.Println("parent commit is: " + parent)
if err != nil {
return err
}
var commitSha string
commitSha, err = g.CommitTree(treeSha, parent, strings.NewReader(msg.String()))
if err != nil {
return err
}
return g.UpdateHeadsRef("master", commitSha)
}
func (g *Store) save(path string, isNew bool, data interface{}) error {
// fmt.Printf("storing: %#v in %#v\n", data, path)
var buf bytes.Buffer
// enc := msgpack.NewEncoder(&buf)
enc := json.NewEncoder(&buf)
err := enc.Encode(data)
if err != nil |
// fmt.Println("result", buf.String())
var sha1 string
sha1, err = g.Transaction.WriteHashObject(&buf)
if err != nil {
return err
}
if isNew {
err = g.Transaction.AddIndexCache(sha1, path)
} else {
err = g.Transaction.UpdateIndexCache(sha1, path)
}
if err != nil {
return err
}
return nil
}
func (g *Store) load(path string, data interface{}) error {
| {
return err
} | conditional_block |
gitstore.go | (name string) bool {
if _, err := os.Stat(name); err != nil {
if os.IsNotExist(err) {
return false
}
}
return true
}
type Git struct {
*gitlib.Git
shard string
}
func Open(baseDir string, shard string) (g Git, err error) {
// fmt.Println("opening")
//gitBase := filepath.Join(baseDir, ".git")
gitBase := baseDir
// ignoring error because gitBase might already exist
// println("creating " + gitBase)
// os.Mkdir(gitBase, 0755)
var git *gitlib.Git
git, err = gitlib.NewGit(gitBase)
if err != nil {
return
}
if !git.IsInitialized() {
// fmt.Println("initializing")
err = git.Transaction(func(tx *gitlib.Transaction) error {
// we got problems with rm --cached and ls-files therefor we prefer to not
// use bare repositories for now
if err := tx.Init(); err != nil {
return err
}
/*
sha1, err := tx.WriteHashObject(strings.NewReader("index\nblob\n"))
if err != nil {
return err
}
err = tx.AddIndexCache(sha1, ".gitignore")
if err != nil {
return err
}
*/
sha1, err := tx.WriteHashObject(strings.NewReader("ZOOM DATABASE\nThis is a zoom database.\nDon't write into this directory manually.\nUse the zoom database library instead.\n"))
if err != nil {
return err
}
err = tx.AddIndexCache(sha1, "README")
if err != nil {
return err
}
sha1, err = tx.WriteTree()
if err != nil {
return err
}
sha1, err = tx.CommitTree(sha1, "", strings.NewReader("add README"))
if err != nil {
return err
}
return tx.UpdateHeadsRef("master", sha1)
})
if err != nil {
return
}
}
g = Git{Git: git, shard: shard}
return
}
func (g *Git) Transaction(msg zoom.CommitMessage, action func(zoom.Transaction) error) (err error) {
return g.Git.Transaction(func(tx *gitlib.Transaction) error {
var store zoom.Store = &Store{tx, g.shard}
return zoom.NewTransaction(store, msg, action)
})
}
type Store struct {
*gitlib.Transaction
shard string
}
// map relname => nodeUuid, only the texts that have a key set are going to be changed
func (s *Store) SaveNodeTexts(uuid string, texts map[string]string) error {
for textPath, text := range texts {
path := s.textPath(uuid, textPath)
known, err := s.IsFileKnown(path)
if err != nil {
return err
}
rd := strings.NewReader(text)
sha1, err2 := s.Transaction.WriteHashObject(rd)
if err2 != nil {
return err2
}
if known {
err = s.Transaction.UpdateIndexCache(sha1, path)
} else {
err = s.Transaction.AddIndexCache(sha1, path)
}
if err != nil {
return err
}
}
return nil
}
func (s *Store) saveBlobToFile(path string, blob io.Reader) error {
dir := filepath.Dir(path)
os.MkdirAll(dir, 0755)
file, err := os.Create(path)
if err != nil {
return err
}
defer file.Close()
// make a buffer to keep chunks that are read
buf := make([]byte, 1024)
for {
// read a chunk
n, err := blob.Read(buf)
if err != nil && err != io.EOF {
return err
}
if n == 0 {
break
}
// write a chunk
if _, err := file.Write(buf[:n]); err != nil {
return err
}
}
return nil
}
/*
// map poolname => []nodeUuid, only the blobs that have a key set are going to be changed
func (s *Store) SaveNodeBlobs(uuid string, blobs map[string]io.Reader) error {
for blobPath, blob := range blobs {
path := filepath.Join(s.Git.Dir, s.BlobPath(uuid, blobPath))
if err := s.saveBlobToFile(path, blob); err != nil {
return err
}
}
return nil
}
*/
/*
func (s *Store) SaveIndex(indexpath string, shard string, rd io.Reader) error {
path := filepath.Join(s.Git.Dir, s.indexPath(shard, indexpath))
return s.saveBlobToFile(path, rd)
}
*/
/*
*/
/*
func (s *Store) callwithBlob(uuid string, blobPath string, fn func(string, io.Reader) error) error {
path := filepath.Join(s.Git.Dir, s.blobPath(uuid, blobPath))
if FileExists(path) {
file, err := os.Open(path)
if err != nil {
return err
}
defer file.Close()
return fn(blobPath, file)
}
return nil
}
*/
/*
func (s *Store) GetIndex(indexpath string, shard string, fn func(io.Reader) error) error {
path := filepath.Join(s.Git.Dir, s.indexPath(shard, indexpath))
if FileExists(path) {
file, err := os.Open(path)
if err != nil {
return err
}
defer file.Close()
return fn(file)
}
return nil
}
*/
// GetNodeBlobs calls fn for each existing blob in requestedBlobs
/*
func (s *Store) GetNodeBlobs(uuid string, requestedBlobs []string, fn func(string, io.Reader) error) error {
for _, blob := range requestedBlobs {
err := s.callwithBlob(uuid, blob, fn)
if err != nil {
return err
}
}
return nil
}
*/
func (s *Store) GetNodeTexts(uuid string, requestedTexts []string) (texts map[string]string, err error) {
texts = map[string]string{}
var known bool
for _, text := range requestedTexts {
known, err = s.IsFileKnown(s.textPath(uuid, text))
if err != nil {
return
}
// fmt.Printf("file %s is known: %v\n", text, known)
if known {
var buf bytes.Buffer
err = s.ReadCatHeadFile(s.textPath(uuid, text), &buf)
if err != nil {
return
}
texts[text] = buf.String()
}
}
return
}
func (s *Store) SaveEdges(category, uuid string, edges map[string]string) error {
path := s.edgePath(category, uuid)
known, err := s.IsFileKnown(path)
if err != nil {
return err
}
return s.save(path, !known, edges)
}
// RemoveEdges also removes the properties node of an edge
// Is the edges file is already removed, no error should be returned
func (s *Store) RemoveEdges(category, uuid string) error {
edges, err := s.GetEdges(category, uuid)
if err != nil {
return err
}
for _, propID := range edges {
if err := s.RemoveNode(propID); err != nil {
return err
}
}
path := s.edgePath(category, uuid)
return s.RemoveIndex(path)
}
// if there is no edge file for the given category, no error is returned, but empty edges map
func (s *Store) GetEdges(category, uuid string) (edges map[string]string, err error) {
path := s.edgePath(category, uuid)
edges = map[string]string{}
known, err := s.IsFileKnown(path)
if err != nil {
return edges, err
}
if !known {
return edges, nil
}
err = s.load(path, &edges)
return edges, err
}
func (s *Store) edgePath(category string, uuid string) string {
//return fmt.Sprintf("node/props/%s/%s", uuid[:2], uuid[2:])
return fmt.Sprintf("refs/%s/%s/%s/%s", category, s.shard, uuid[:2], uuid[2:])
}
func (s *Store) propPath(uuid string) string {
//return fmt.Sprintf("node/props/%s/%s", uuid[:2], uuid[2:])
return fmt.Sprintf("node/%s/%s/%s", s.shard, uuid[:2], uuid[2:])
}
func (s *Store) textPath(uuid string, key string) string {
// return fmt.Sprintf("node/rels/%s/%s", uuid[:2], uuid[2:])
return fmt.Sprintf("text/%s/%s/%s/%s", s.shard, uuid[:2], uuid[2:], key)
}
func (s *Store) BlobPath(uuid string, blobpath string) string {
return fmt.Sprintf("../blob/%s/%s/%s/%s", s.shard, uuid[:2], uuid[2:], blobpath)
}
func (g *Store) Commit(msg zoom.CommitMessage | FileExists | identifier_name |
|
_wx.py | .WXK_F5: keys.F5,
wx.WXK_F6: keys.F6,
wx.WXK_F7: keys.F7,
wx.WXK_F8: keys.F8,
wx.WXK_F9: keys.F9,
wx.WXK_F10: keys.F10,
wx.WXK_F11: keys.F11,
wx.WXK_F12: keys.F12,
wx.WXK_SPACE: keys.SPACE,
wx.WXK_RETURN: keys.ENTER, # == pyglet.window.key.RETURN
wx.WXK_NUMPAD_ENTER: keys.ENTER,
wx.WXK_TAB: keys.TAB,
}
except Exception as exp:
available, testable, why_not, which = False, False, str(exp), None
class GLCanvas(object):
pass
else:
if USE_EGL:
available, testable, why_not = False, False, 'EGL not supported'
else:
available, testable, why_not = True, True, None
which = 'wxPython ' + str(wx.__version__)
# -------------------------------------------------------------- capability ---
capability = dict( # things that can be set by the backend
title=True,
size=True,
position=True,
show=True,
vsync=True,
resizable=True,
decorate=True,
fullscreen=True,
context=True,
multi_window=True,
scroll=True,
parent=True,
always_on_top=True,
)
# ------------------------------------------------------- set_configuration ---
def _set_config(c):
"""Set gl configuration"""
gl_attribs = [glcanvas.WX_GL_RGBA,
glcanvas.WX_GL_DEPTH_SIZE, c['depth_size'],
glcanvas.WX_GL_STENCIL_SIZE, c['stencil_size'],
glcanvas.WX_GL_MIN_RED, c['red_size'],
glcanvas.WX_GL_MIN_GREEN, c['green_size'],
glcanvas.WX_GL_MIN_BLUE, c['blue_size'],
glcanvas.WX_GL_MIN_ALPHA, c['alpha_size']]
gl_attribs += [glcanvas.WX_GL_DOUBLEBUFFER] if c['double_buffer'] else []
gl_attribs += [glcanvas.WX_GL_STEREO] if c['stereo'] else []
return gl_attribs
# ------------------------------------------------------------- application ---
_wx_app = None
_timers = []
class ApplicationBackend(BaseApplicationBackend):
def __init__(self):
BaseApplicationBackend.__init__(self)
self._event_loop = wx.EventLoop()
wx.EventLoop.SetActive(self._event_loop)
def _vispy_get_backend_name(self):
return 'wx'
def _vispy_process_events(self):
# inpsired by https://github.com/wxWidgets/wxPython/blob/master/
# samples/mainloop/mainloop.py
for _ in range(3): # trial-and-error found this to work (!)
while self._event_loop.Pending():
self._event_loop.Dispatch()
_wx_app.ProcessIdle()
sleep(0.01)
def _vispy_run(self):
return _wx_app.MainLoop()
def _vispy_quit(self):
global _wx_app
_wx_app.ExitMainLoop()
def _vispy_get_native_app(self):
# Get native app in save way. Taken from guisupport.py
global _wx_app
_wx_app = wx.GetApp() # in case the user already has one
if _wx_app is None:
_wx_app = wx.PySimpleApp()
_wx_app.SetExitOnFrameDelete(True)
return _wx_app
# ------------------------------------------------------------------ canvas ---
def _get_mods(evt):
"""Helper to extract list of mods from event"""
mods = []
mods += [keys.CONTROL] if evt.ControlDown() else []
mods += [keys.ALT] if evt.AltDown() else []
mods += [keys.SHIFT] if evt.ShiftDown() else []
mods += [keys.META] if evt.MetaDown() else []
return mods
def _process_key(evt):
|
class DummySize(object):
def __init__(self, size):
self.size = size
def GetSize(self):
return self.size
def Skip(self):
pass
class CanvasBackend(GLCanvas, BaseCanvasBackend):
""" wxPython backend for Canvas abstract class."""
# args are for BaseCanvasBackend, kwargs are for us.
def __init__(self, *args, **kwargs):
BaseCanvasBackend.__init__(self, *args)
p = self._process_backend_kwargs(kwargs)
# WX supports OS double-click events, so we set this here to
# avoid double events
self._double_click_supported = True
# Set config
self._gl_attribs = _set_config(p.context.config)
# Deal with context
p.context.shared.add_ref('wx', self)
if p.context.shared.ref is self:
self._gl_context = None # set for real once we init the GLCanvas
else:
self._gl_context = p.context.shared.ref._gl_context
if p.parent is None:
style = (wx.MINIMIZE_BOX | wx.MAXIMIZE_BOX | wx.CLOSE_BOX |
wx.SYSTEM_MENU | wx.CAPTION | wx.CLIP_CHILDREN)
style |= wx.NO_BORDER if not p.decorate else wx.RESIZE_BORDER
style |= wx.STAY_ON_TOP if p.always_on_top else 0
self._frame = wx.Frame(None, wx.ID_ANY, p.title, p.position,
p.size, style)
if not p.resizable:
self._frame.SetSizeHints(p.size[0], p.size[1],
p.size[0], p.size[1])
if p.fullscreen is not False:
if p.fullscreen is not True:
logger.warning('Cannot specify monitor number for wx '
'fullscreen, using default')
self._fullscreen = True
else:
self._fullscreen = False
_wx_app.SetTopWindow(self._frame)
parent = self._frame
self._frame.Raise()
self._frame.Bind(wx.EVT_CLOSE, self.on_close)
else:
parent = p.parent
self._frame = None
self._fullscreen = False
self._init = False
GLCanvas.__init__(self, parent, wx.ID_ANY, pos=p.position,
size=p.size, style=0, name='GLCanvas',
attribList=self._gl_attribs)
if self._gl_context is None:
self._gl_context = glcanvas.GLContext(self)
self.SetFocus()
self._vispy_set_title(p.title)
self._size = None
self.Bind(wx.EVT_SIZE, self.on_resize)
self.Bind(wx.EVT_PAINT, self.on_draw)
self.Bind(wx.EVT_KEY_DOWN, self.on_key_down)
self.Bind(wx.EVT_KEY_UP, self.on_key_up)
self.Bind(wx.EVT_MOUSE_EVENTS, self.on_mouse_event)
self._size_init = p.size
self._vispy_set_visible(p.show)
def on_resize(self, event):
if self._vispy_canvas is None or not self._init:
event.Skip()
return
size = event.GetSize()
self._vispy_canvas.events.resize(size=size)
self.Refresh()
event.Skip()
def on_draw(self, event):
if self._vispy_canvas is None:
return
dc = wx.PaintDC(self) # needed for wx
if not self._init:
self._initialize()
self._vispy_canvas.set_current()
self._vispy_canvas.events.draw(region=None)
del dc
event.Skip()
def _initialize(self):
if self._vispy_canvas is None:
return
self._init = True
self._vispy_canvas.set_current()
self._vispy_canvas.events.initialize()
self.on_resize(DummySize(self._size_init))
def _vispy_set_current(self):
self.SetCurrent(self._gl_context)
def _vispy_warmup(self):
etime = time() + 0.3
while time() < etime:
sleep(0.01)
self._vispy_canvas.set_current()
self._vispy_canvas.app.process_events()
def _vispy_swap_buffers(self):
# Swap front and back buffer
self._vispy_canvas.set_current()
self.SwapBuffers()
def _vispy_set_title(self, title):
# Set the window title. Has no effect for widgets
if self._frame is not None:
self._frame.SetLabel(title)
def _vispy_set_size(self, w, h):
# Set size of the widget or window
if not self._init:
self._size_init = (w, h)
self.SetSizeWH(w, h)
def _vispy_set_position(self, x, y):
# Set positionof the widget or window. May have no effect for widgets
if self._frame is not None:
self._frame.SetPosition((x, y))
def _vis | """Helper to convert from wx keycode to vispy keycode"""
key = evt.GetKeyCode()
if key in KEYMAP:
return KEYMAP[key], ''
if 97 <= key <= 122:
key -= 32
if key >= 32 and key <= 127:
return keys.Key(chr(key)), chr(key)
else:
return None, None | identifier_body |
_wx.py | 5: keys.F5,
wx.WXK_F6: keys.F6,
wx.WXK_F7: keys.F7,
wx.WXK_F8: keys.F8,
wx.WXK_F9: keys.F9,
wx.WXK_F10: keys.F10,
wx.WXK_F11: keys.F11,
wx.WXK_F12: keys.F12,
wx.WXK_SPACE: keys.SPACE,
wx.WXK_RETURN: keys.ENTER, # == pyglet.window.key.RETURN
wx.WXK_NUMPAD_ENTER: keys.ENTER,
wx.WXK_TAB: keys.TAB,
}
except Exception as exp:
available, testable, why_not, which = False, False, str(exp), None
class GLCanvas(object):
pass
else:
if USE_EGL:
available, testable, why_not = False, False, 'EGL not supported'
else:
available, testable, why_not = True, True, None
which = 'wxPython ' + str(wx.__version__)
# -------------------------------------------------------------- capability ---
capability = dict( # things that can be set by the backend
title=True,
size=True,
position=True,
show=True,
vsync=True,
resizable=True,
decorate=True,
fullscreen=True,
context=True,
multi_window=True,
scroll=True,
parent=True,
always_on_top=True,
)
# ------------------------------------------------------- set_configuration ---
def _set_config(c):
"""Set gl configuration"""
gl_attribs = [glcanvas.WX_GL_RGBA,
glcanvas.WX_GL_DEPTH_SIZE, c['depth_size'],
glcanvas.WX_GL_STENCIL_SIZE, c['stencil_size'],
glcanvas.WX_GL_MIN_RED, c['red_size'],
glcanvas.WX_GL_MIN_GREEN, c['green_size'],
glcanvas.WX_GL_MIN_BLUE, c['blue_size'],
glcanvas.WX_GL_MIN_ALPHA, c['alpha_size']]
gl_attribs += [glcanvas.WX_GL_DOUBLEBUFFER] if c['double_buffer'] else []
gl_attribs += [glcanvas.WX_GL_STEREO] if c['stereo'] else []
return gl_attribs
# ------------------------------------------------------------- application ---
_wx_app = None
_timers = []
class ApplicationBackend(BaseApplicationBackend):
def __init__(self):
BaseApplicationBackend.__init__(self)
self._event_loop = wx.EventLoop()
wx.EventLoop.SetActive(self._event_loop)
def _vispy_get_backend_name(self):
return 'wx'
def _vispy_process_events(self):
# inpsired by https://github.com/wxWidgets/wxPython/blob/master/
# samples/mainloop/mainloop.py
for _ in range(3): # trial-and-error found this to work (!)
while self._event_loop.Pending():
self._event_loop.Dispatch()
_wx_app.ProcessIdle()
sleep(0.01)
def _vispy_run(self):
return _wx_app.MainLoop()
def _vispy_quit(self):
global _wx_app
_wx_app.ExitMainLoop()
def _vispy_get_native_app(self):
# Get native app in save way. Taken from guisupport.py
global _wx_app
_wx_app = wx.GetApp() # in case the user already has one
if _wx_app is None:
_wx_app = wx.PySimpleApp()
_wx_app.SetExitOnFrameDelete(True)
return _wx_app
# ------------------------------------------------------------------ canvas ---
def _get_mods(evt):
"""Helper to extract list of mods from event"""
mods = []
mods += [keys.CONTROL] if evt.ControlDown() else []
mods += [keys.ALT] if evt.AltDown() else []
mods += [keys.SHIFT] if evt.ShiftDown() else []
mods += [keys.META] if evt.MetaDown() else []
return mods
def _process_key(evt):
"""Helper to convert from wx keycode to vispy keycode"""
key = evt.GetKeyCode()
if key in KEYMAP:
return KEYMAP[key], ''
if 97 <= key <= 122:
key -= 32
if key >= 32 and key <= 127:
return keys.Key(chr(key)), chr(key)
else:
return None, None
class DummySize(object):
def __init__(self, size):
self.size = size
def GetSize(self):
return self.size
def Skip(self):
pass
class CanvasBackend(GLCanvas, BaseCanvasBackend):
""" wxPython backend for Canvas abstract class."""
# args are for BaseCanvasBackend, kwargs are for us.
def __init__(self, *args, **kwargs):
BaseCanvasBackend.__init__(self, *args)
p = self._process_backend_kwargs(kwargs)
# WX supports OS double-click events, so we set this here to
# avoid double events
self._double_click_supported = True
# Set config
self._gl_attribs = _set_config(p.context.config)
# Deal with context
p.context.shared.add_ref('wx', self)
if p.context.shared.ref is self:
self._gl_context = None # set for real once we init the GLCanvas
else:
self._gl_context = p.context.shared.ref._gl_context
if p.parent is None:
style = (wx.MINIMIZE_BOX | wx.MAXIMIZE_BOX | wx.CLOSE_BOX |
wx.SYSTEM_MENU | wx.CAPTION | wx.CLIP_CHILDREN)
style |= wx.NO_BORDER if not p.decorate else wx.RESIZE_BORDER
style |= wx.STAY_ON_TOP if p.always_on_top else 0
self._frame = wx.Frame(None, wx.ID_ANY, p.title, p.position,
p.size, style)
if not p.resizable:
self._frame.SetSizeHints(p.size[0], p.size[1],
p.size[0], p.size[1])
if p.fullscreen is not False:
if p.fullscreen is not True:
logger.warning('Cannot specify monitor number for wx '
'fullscreen, using default')
self._fullscreen = True
else:
self._fullscreen = False
_wx_app.SetTopWindow(self._frame)
parent = self._frame
self._frame.Raise()
self._frame.Bind(wx.EVT_CLOSE, self.on_close)
else:
parent = p.parent
self._frame = None
self._fullscreen = False
self._init = False
GLCanvas.__init__(self, parent, wx.ID_ANY, pos=p.position,
size=p.size, style=0, name='GLCanvas',
attribList=self._gl_attribs)
if self._gl_context is None:
self._gl_context = glcanvas.GLContext(self)
self.SetFocus()
self._vispy_set_title(p.title)
self._size = None
self.Bind(wx.EVT_SIZE, self.on_resize)
self.Bind(wx.EVT_PAINT, self.on_draw)
self.Bind(wx.EVT_KEY_DOWN, self.on_key_down)
self.Bind(wx.EVT_KEY_UP, self.on_key_up)
self.Bind(wx.EVT_MOUSE_EVENTS, self.on_mouse_event)
self._size_init = p.size
self._vispy_set_visible(p.show)
def on_resize(self, event):
if self._vispy_canvas is None or not self._init:
event.Skip()
return
size = event.GetSize()
self._vispy_canvas.events.resize(size=size)
self.Refresh()
event.Skip()
def on_draw(self, event):
if self._vispy_canvas is None:
return
dc = wx.PaintDC(self) # needed for wx
if not self._init:
self._initialize()
self._vispy_canvas.set_current()
self._vispy_canvas.events.draw(region=None)
del dc
event.Skip()
def _initialize(self):
if self._vispy_canvas is None:
return
self._init = True
self._vispy_canvas.set_current()
self._vispy_canvas.events.initialize()
self.on_resize(DummySize(self._size_init))
def _vispy_set_current(self):
self.SetCurrent(self._gl_context)
def _vispy_warmup(self):
etime = time() + 0.3
while time() < etime:
sleep(0.01)
self._vispy_canvas.set_current()
self._vispy_canvas.app.process_events()
def _vispy_swap_buffers(self):
# Swap front and back buffer
self._vispy_canvas.set_current()
self.SwapBuffers()
def _vispy_set_title(self, title):
# Set the window title. Has no effect for widgets
if self._frame is not None:
self._frame.SetLabel(title)
def _vispy_set_size(self, w, h):
# Set size of the widget or window
if not self._init:
self._size_init = (w, h)
self.SetSizeWH(w, h)
def _vispy_set_position(self, x, y):
# Set positionof the widget or window. May have no effect for widgets
if self._frame is not None:
self._frame.SetPosition((x, y))
def | _vispy_get_fullscreen | identifier_name |
|
_wx.py | .WXK_F5: keys.F5,
wx.WXK_F6: keys.F6,
wx.WXK_F7: keys.F7,
wx.WXK_F8: keys.F8,
wx.WXK_F9: keys.F9,
wx.WXK_F10: keys.F10,
wx.WXK_F11: keys.F11,
wx.WXK_F12: keys.F12,
wx.WXK_SPACE: keys.SPACE,
wx.WXK_RETURN: keys.ENTER, # == pyglet.window.key.RETURN
wx.WXK_NUMPAD_ENTER: keys.ENTER,
wx.WXK_TAB: keys.TAB,
}
except Exception as exp:
available, testable, why_not, which = False, False, str(exp), None
class GLCanvas(object):
pass
else:
if USE_EGL:
available, testable, why_not = False, False, 'EGL not supported'
else:
available, testable, why_not = True, True, None
which = 'wxPython ' + str(wx.__version__)
# -------------------------------------------------------------- capability ---
capability = dict( # things that can be set by the backend
title=True,
size=True,
position=True,
show=True,
vsync=True,
resizable=True,
decorate=True,
fullscreen=True,
context=True,
multi_window=True,
scroll=True,
parent=True,
always_on_top=True,
)
# ------------------------------------------------------- set_configuration ---
def _set_config(c):
"""Set gl configuration"""
gl_attribs = [glcanvas.WX_GL_RGBA,
glcanvas.WX_GL_DEPTH_SIZE, c['depth_size'],
glcanvas.WX_GL_STENCIL_SIZE, c['stencil_size'],
glcanvas.WX_GL_MIN_RED, c['red_size'],
glcanvas.WX_GL_MIN_GREEN, c['green_size'],
glcanvas.WX_GL_MIN_BLUE, c['blue_size'],
glcanvas.WX_GL_MIN_ALPHA, c['alpha_size']]
gl_attribs += [glcanvas.WX_GL_DOUBLEBUFFER] if c['double_buffer'] else []
gl_attribs += [glcanvas.WX_GL_STEREO] if c['stereo'] else []
return gl_attribs
# ------------------------------------------------------------- application ---
_wx_app = None
_timers = []
class ApplicationBackend(BaseApplicationBackend):
def __init__(self):
BaseApplicationBackend.__init__(self)
self._event_loop = wx.EventLoop()
wx.EventLoop.SetActive(self._event_loop)
def _vispy_get_backend_name(self):
return 'wx'
def _vispy_process_events(self):
# inpsired by https://github.com/wxWidgets/wxPython/blob/master/
# samples/mainloop/mainloop.py
for _ in range(3): # trial-and-error found this to work (!)
while self._event_loop.Pending():
self._event_loop.Dispatch()
_wx_app.ProcessIdle()
sleep(0.01)
def _vispy_run(self):
return _wx_app.MainLoop()
def _vispy_quit(self):
global _wx_app
_wx_app.ExitMainLoop()
def _vispy_get_native_app(self):
# Get native app in save way. Taken from guisupport.py
global _wx_app
_wx_app = wx.GetApp() # in case the user already has one
if _wx_app is None:
_wx_app = wx.PySimpleApp()
_wx_app.SetExitOnFrameDelete(True)
return _wx_app
# ------------------------------------------------------------------ canvas ---
def _get_mods(evt):
"""Helper to extract list of mods from event"""
mods = []
mods += [keys.CONTROL] if evt.ControlDown() else []
mods += [keys.ALT] if evt.AltDown() else []
mods += [keys.SHIFT] if evt.ShiftDown() else []
mods += [keys.META] if evt.MetaDown() else []
return mods
| """Helper to convert from wx keycode to vispy keycode"""
key = evt.GetKeyCode()
if key in KEYMAP:
return KEYMAP[key], ''
if 97 <= key <= 122:
key -= 32
if key >= 32 and key <= 127:
return keys.Key(chr(key)), chr(key)
else:
return None, None
class DummySize(object):
def __init__(self, size):
self.size = size
def GetSize(self):
return self.size
def Skip(self):
pass
class CanvasBackend(GLCanvas, BaseCanvasBackend):
""" wxPython backend for Canvas abstract class."""
# args are for BaseCanvasBackend, kwargs are for us.
def __init__(self, *args, **kwargs):
BaseCanvasBackend.__init__(self, *args)
p = self._process_backend_kwargs(kwargs)
# WX supports OS double-click events, so we set this here to
# avoid double events
self._double_click_supported = True
# Set config
self._gl_attribs = _set_config(p.context.config)
# Deal with context
p.context.shared.add_ref('wx', self)
if p.context.shared.ref is self:
self._gl_context = None # set for real once we init the GLCanvas
else:
self._gl_context = p.context.shared.ref._gl_context
if p.parent is None:
style = (wx.MINIMIZE_BOX | wx.MAXIMIZE_BOX | wx.CLOSE_BOX |
wx.SYSTEM_MENU | wx.CAPTION | wx.CLIP_CHILDREN)
style |= wx.NO_BORDER if not p.decorate else wx.RESIZE_BORDER
style |= wx.STAY_ON_TOP if p.always_on_top else 0
self._frame = wx.Frame(None, wx.ID_ANY, p.title, p.position,
p.size, style)
if not p.resizable:
self._frame.SetSizeHints(p.size[0], p.size[1],
p.size[0], p.size[1])
if p.fullscreen is not False:
if p.fullscreen is not True:
logger.warning('Cannot specify monitor number for wx '
'fullscreen, using default')
self._fullscreen = True
else:
self._fullscreen = False
_wx_app.SetTopWindow(self._frame)
parent = self._frame
self._frame.Raise()
self._frame.Bind(wx.EVT_CLOSE, self.on_close)
else:
parent = p.parent
self._frame = None
self._fullscreen = False
self._init = False
GLCanvas.__init__(self, parent, wx.ID_ANY, pos=p.position,
size=p.size, style=0, name='GLCanvas',
attribList=self._gl_attribs)
if self._gl_context is None:
self._gl_context = glcanvas.GLContext(self)
self.SetFocus()
self._vispy_set_title(p.title)
self._size = None
self.Bind(wx.EVT_SIZE, self.on_resize)
self.Bind(wx.EVT_PAINT, self.on_draw)
self.Bind(wx.EVT_KEY_DOWN, self.on_key_down)
self.Bind(wx.EVT_KEY_UP, self.on_key_up)
self.Bind(wx.EVT_MOUSE_EVENTS, self.on_mouse_event)
self._size_init = p.size
self._vispy_set_visible(p.show)
def on_resize(self, event):
if self._vispy_canvas is None or not self._init:
event.Skip()
return
size = event.GetSize()
self._vispy_canvas.events.resize(size=size)
self.Refresh()
event.Skip()
def on_draw(self, event):
if self._vispy_canvas is None:
return
dc = wx.PaintDC(self) # needed for wx
if not self._init:
self._initialize()
self._vispy_canvas.set_current()
self._vispy_canvas.events.draw(region=None)
del dc
event.Skip()
def _initialize(self):
if self._vispy_canvas is None:
return
self._init = True
self._vispy_canvas.set_current()
self._vispy_canvas.events.initialize()
self.on_resize(DummySize(self._size_init))
def _vispy_set_current(self):
self.SetCurrent(self._gl_context)
def _vispy_warmup(self):
etime = time() + 0.3
while time() < etime:
sleep(0.01)
self._vispy_canvas.set_current()
self._vispy_canvas.app.process_events()
def _vispy_swap_buffers(self):
# Swap front and back buffer
self._vispy_canvas.set_current()
self.SwapBuffers()
def _vispy_set_title(self, title):
# Set the window title. Has no effect for widgets
if self._frame is not None:
self._frame.SetLabel(title)
def _vispy_set_size(self, w, h):
# Set size of the widget or window
if not self._init:
self._size_init = (w, h)
self.SetSizeWH(w, h)
def _vispy_set_position(self, x, y):
# Set positionof the widget or window. May have no effect for widgets
if self._frame is not None:
self._frame.SetPosition((x, y))
def _ |
def _process_key(evt): | random_line_split |
_wx.py | ApplicationBackend.__init__(self)
self._event_loop = wx.EventLoop()
wx.EventLoop.SetActive(self._event_loop)
def _vispy_get_backend_name(self):
return 'wx'
def _vispy_process_events(self):
# inpsired by https://github.com/wxWidgets/wxPython/blob/master/
# samples/mainloop/mainloop.py
for _ in range(3): # trial-and-error found this to work (!)
while self._event_loop.Pending():
self._event_loop.Dispatch()
_wx_app.ProcessIdle()
sleep(0.01)
def _vispy_run(self):
return _wx_app.MainLoop()
def _vispy_quit(self):
global _wx_app
_wx_app.ExitMainLoop()
def _vispy_get_native_app(self):
# Get native app in save way. Taken from guisupport.py
global _wx_app
_wx_app = wx.GetApp() # in case the user already has one
if _wx_app is None:
_wx_app = wx.PySimpleApp()
_wx_app.SetExitOnFrameDelete(True)
return _wx_app
# ------------------------------------------------------------------ canvas ---
def _get_mods(evt):
"""Helper to extract list of mods from event"""
mods = []
mods += [keys.CONTROL] if evt.ControlDown() else []
mods += [keys.ALT] if evt.AltDown() else []
mods += [keys.SHIFT] if evt.ShiftDown() else []
mods += [keys.META] if evt.MetaDown() else []
return mods
def _process_key(evt):
"""Helper to convert from wx keycode to vispy keycode"""
key = evt.GetKeyCode()
if key in KEYMAP:
return KEYMAP[key], ''
if 97 <= key <= 122:
key -= 32
if key >= 32 and key <= 127:
return keys.Key(chr(key)), chr(key)
else:
return None, None
class DummySize(object):
def __init__(self, size):
self.size = size
def GetSize(self):
return self.size
def Skip(self):
pass
class CanvasBackend(GLCanvas, BaseCanvasBackend):
""" wxPython backend for Canvas abstract class."""
# args are for BaseCanvasBackend, kwargs are for us.
def __init__(self, *args, **kwargs):
BaseCanvasBackend.__init__(self, *args)
p = self._process_backend_kwargs(kwargs)
# WX supports OS double-click events, so we set this here to
# avoid double events
self._double_click_supported = True
# Set config
self._gl_attribs = _set_config(p.context.config)
# Deal with context
p.context.shared.add_ref('wx', self)
if p.context.shared.ref is self:
self._gl_context = None # set for real once we init the GLCanvas
else:
self._gl_context = p.context.shared.ref._gl_context
if p.parent is None:
style = (wx.MINIMIZE_BOX | wx.MAXIMIZE_BOX | wx.CLOSE_BOX |
wx.SYSTEM_MENU | wx.CAPTION | wx.CLIP_CHILDREN)
style |= wx.NO_BORDER if not p.decorate else wx.RESIZE_BORDER
style |= wx.STAY_ON_TOP if p.always_on_top else 0
self._frame = wx.Frame(None, wx.ID_ANY, p.title, p.position,
p.size, style)
if not p.resizable:
self._frame.SetSizeHints(p.size[0], p.size[1],
p.size[0], p.size[1])
if p.fullscreen is not False:
if p.fullscreen is not True:
logger.warning('Cannot specify monitor number for wx '
'fullscreen, using default')
self._fullscreen = True
else:
self._fullscreen = False
_wx_app.SetTopWindow(self._frame)
parent = self._frame
self._frame.Raise()
self._frame.Bind(wx.EVT_CLOSE, self.on_close)
else:
parent = p.parent
self._frame = None
self._fullscreen = False
self._init = False
GLCanvas.__init__(self, parent, wx.ID_ANY, pos=p.position,
size=p.size, style=0, name='GLCanvas',
attribList=self._gl_attribs)
if self._gl_context is None:
self._gl_context = glcanvas.GLContext(self)
self.SetFocus()
self._vispy_set_title(p.title)
self._size = None
self.Bind(wx.EVT_SIZE, self.on_resize)
self.Bind(wx.EVT_PAINT, self.on_draw)
self.Bind(wx.EVT_KEY_DOWN, self.on_key_down)
self.Bind(wx.EVT_KEY_UP, self.on_key_up)
self.Bind(wx.EVT_MOUSE_EVENTS, self.on_mouse_event)
self._size_init = p.size
self._vispy_set_visible(p.show)
def on_resize(self, event):
if self._vispy_canvas is None or not self._init:
event.Skip()
return
size = event.GetSize()
self._vispy_canvas.events.resize(size=size)
self.Refresh()
event.Skip()
def on_draw(self, event):
if self._vispy_canvas is None:
return
dc = wx.PaintDC(self) # needed for wx
if not self._init:
self._initialize()
self._vispy_canvas.set_current()
self._vispy_canvas.events.draw(region=None)
del dc
event.Skip()
def _initialize(self):
if self._vispy_canvas is None:
return
self._init = True
self._vispy_canvas.set_current()
self._vispy_canvas.events.initialize()
self.on_resize(DummySize(self._size_init))
def _vispy_set_current(self):
self.SetCurrent(self._gl_context)
def _vispy_warmup(self):
etime = time() + 0.3
while time() < etime:
sleep(0.01)
self._vispy_canvas.set_current()
self._vispy_canvas.app.process_events()
def _vispy_swap_buffers(self):
# Swap front and back buffer
self._vispy_canvas.set_current()
self.SwapBuffers()
def _vispy_set_title(self, title):
# Set the window title. Has no effect for widgets
if self._frame is not None:
self._frame.SetLabel(title)
def _vispy_set_size(self, w, h):
# Set size of the widget or window
if not self._init:
self._size_init = (w, h)
self.SetSizeWH(w, h)
def _vispy_set_position(self, x, y):
# Set positionof the widget or window. May have no effect for widgets
if self._frame is not None:
self._frame.SetPosition((x, y))
def _vispy_get_fullscreen(self):
return self._fullscreen
def _vispy_set_fullscreen(self, fullscreen):
if self._frame is not None:
self._fullscreen = bool(fullscreen)
self._vispy_set_visible(True)
def _vispy_set_visible(self, visible):
# Show or hide the window or widget
self.Show(visible)
if visible:
if self._frame is not None:
self._frame.ShowFullScreen(self._fullscreen)
def _vispy_update(self):
# Invoke a redraw
self.Refresh()
def _vispy_close(self):
if self._vispy_canvas is None:
return
# Force the window or widget to shut down
canvas = self
frame = self._frame
self._gl_context = None # let RC destroy this in case it's shared
canvas.Close()
canvas.Destroy()
if frame:
frame.Close()
frame.Destroy()
gc.collect() # ensure context gets destroyed if it should be
def _vispy_get_size(self):
if self._vispy_canvas is None:
return
w, h = self.GetClientSize()
return w, h
def _vispy_get_position(self):
if self._vispy_canvas is None:
return
x, y = self.GetPosition()
return x, y
def on_close(self, evt):
if not self: # wx control evaluates to false if C++ part deleted
return
if self._vispy_canvas is None:
return
self._vispy_canvas.close()
def on_mouse_event(self, evt):
if self._vispy_canvas is None:
return
pos = (evt.GetX(), evt.GetY())
mods = _get_mods(evt)
if evt.GetWheelRotation() != 0:
delta = (0., float(evt.GetWheelRotation()))
self._vispy_canvas.events.mouse_wheel(delta=delta, pos=pos,
modifiers=mods)
elif evt.Moving() or evt.Dragging(): # mouse move event
self._vispy_mouse_move(pos=pos, modifiers=mods)
elif evt.ButtonDown():
| if evt.LeftDown():
button = 0
elif evt.MiddleDown():
button = 1
elif evt.RightDown():
button = 2
else:
evt.Skip()
self._vispy_mouse_press(pos=pos, button=button, modifiers=mods) | conditional_block |
|
protocol_adapter.rs | decimal::{BigDecimal, FromPrimitive};
use graphql_parser::query::{
Definition, Document, OperationDefinition, Selection as GqlSelection, SelectionSet, Value,
};
use query_core::query_document::*;
/// Protocol adapter for GraphQL -> Query Document.
///
/// GraphQL is mapped as following:
/// - Every field of a `query { ... }` or single selection block `{ ... }` is mapped to an `Operation::Read`.
/// - Every field of a single `mutation { ... }` is mapped to an `Operation::Write`.
/// - If the JSON payload specifies an operation name, only that specific operation is picked and the rest ignored.
/// - Fields on the queries are mapped to `Field`s, including arguments.
/// - Concrete values (e.g. in arguments) are mapped to `ArgumentValue`s.
///
/// Currently unsupported features:
/// - Fragments in any form.
/// - Variables.
/// - Subscription queries.
/// - Query names are ignored
pub struct GraphQLProtocolAdapter;
impl GraphQLProtocolAdapter {
pub fn convert_query_to_operation(query: &str, operation_name: Option<String>) -> crate::Result<Operation> {
let gql_doc = match graphql_parser::parse_query(query) {
Ok(doc) => doc,
Err(err)
if err.to_string().contains("number too large to fit in target type")
| err.to_string().contains("number too small to fit in target type") =>
{
return Err(HandlerError::ValueFitError("Query parsing failure: A number used in the query does not fit into a 64 bit signed integer. Consider using `BigInt` as field type if you're trying to store large integers.".to_owned()));
}
err @ Err(_) => err?,
};
Self::convert(gql_doc, operation_name)
}
pub fn convert(gql_doc: Document<String>, operation: Option<String>) -> crate::Result<Operation> {
let mut operations: Vec<Operation> = match operation {
Some(ref op) => gql_doc
.definitions
.into_iter()
.find(|def| Self::matches_operation(def, op))
.ok_or_else(|| HandlerError::query_conversion(format!("Operation '{op}' does not match any query.")))
.and_then(Self::convert_definition),
None => gql_doc
.definitions
.into_iter()
.map(Self::convert_definition)
.collect::<crate::Result<Vec<Vec<Operation>>>>()
.map(|r| r.into_iter().flatten().collect::<Vec<Operation>>()),
}?;
let operation = operations
.pop()
.ok_or_else(|| HandlerError::query_conversion("Document contained no operations."))?
.dedup_selections();
Ok(operation)
}
fn convert_definition(def: Definition<String>) -> crate::Result<Vec<Operation>> |
fn convert_query(selection_set: SelectionSet<String>) -> crate::Result<Vec<Operation>> {
Self::convert_selection_set(selection_set).map(|fields| fields.into_iter().map(Operation::Read).collect())
}
fn convert_mutation(selection_set: SelectionSet<String>) -> crate::Result<Vec<Operation>> {
Self::convert_selection_set(selection_set).map(|fields| fields.into_iter().map(Operation::Write).collect())
}
fn convert_selection_set(selection_set: SelectionSet<String>) -> crate::Result<Vec<Selection>> {
selection_set
.items
.into_iter()
.map(|item| match item {
GqlSelection::Field(f) => {
let arguments: Vec<(String, ArgumentValue)> = f
.arguments
.into_iter()
.map(|(k, v)| Ok((k, Self::convert_value(v)?)))
.collect::<crate::Result<Vec<_>>>()?;
let nested_selections = Self::convert_selection_set(f.selection_set)?;
Ok(Selection::new(f.name, f.alias, arguments, nested_selections))
}
GqlSelection::FragmentSpread(fs) => Err(HandlerError::unsupported_feature(
"Fragment spread",
format!("Fragment '{}', at position {}.", fs.fragment_name, fs.position),
)),
GqlSelection::InlineFragment(i) => Err(HandlerError::unsupported_feature(
"Inline fragment",
format!("At position {}.", i.position),
)),
})
.collect()
}
/// Checks if the given GraphQL definition matches the operation name that should be executed.
fn matches_operation(def: &Definition<String>, operation: &str) -> bool {
let check = |n: Option<&String>| n.filter(|name| name.as_str() == operation).is_some();
match def {
Definition::Fragment(_) => false,
Definition::Operation(op) => match op {
OperationDefinition::Subscription(s) => check(s.name.as_ref()),
OperationDefinition::SelectionSet(_) => false,
OperationDefinition::Query(q) => check(q.name.as_ref()),
OperationDefinition::Mutation(m) => check(m.name.as_ref()),
},
}
}
fn convert_value(value: Value<String>) -> crate::Result<ArgumentValue> {
match value {
Value::Variable(name) => Err(HandlerError::unsupported_feature(
"Variable usage",
format!("Variable '{name}'."),
)),
Value::Int(i) => match i.as_i64() {
Some(i) => Ok(ArgumentValue::int(i)),
None => Err(HandlerError::query_conversion(format!("Invalid 64 bit integer: {i:?}"))),
},
Value::Float(f) => match BigDecimal::from_f64(f) {
Some(dec) => Ok(ArgumentValue::float(dec)),
None => Err(HandlerError::query_conversion(format!("invalid 64-bit float: {f:?}"))),
},
Value::String(s) => Ok(ArgumentValue::string(s)),
Value::Boolean(b) => Ok(ArgumentValue::bool(b)),
Value::Null => Ok(ArgumentValue::null()),
Value::Enum(e) => Ok(ArgumentValue::r#enum(e)),
Value::List(values) => {
let values: Vec<ArgumentValue> = values
.into_iter()
.map(Self::convert_value)
.collect::<crate::Result<Vec<ArgumentValue>>>()?;
Ok(ArgumentValue::list(values))
}
Value::Object(map) => {
let values = map
.into_iter()
.map(|(k, v)| Self::convert_value(v).map(|v| (k, v)))
.collect::<crate::Result<ArgumentValueObject>>()?;
Ok(ArgumentValue::object(values))
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn converts_single_query() {
let query = r#"
query findTheModelOperation {
findOneModel(where: {a_number: {gte: 1}}) {
id,
large_number,
other {
name
}
}
}
"#;
let operation = GraphQLProtocolAdapter::convert_query_to_operation(query, None).unwrap();
assert_eq!(operation.name(), "findOneModel");
assert!(matches!(operation, Operation::Read(_)));
let read = operation.into_read().unwrap();
let where_args = ArgumentValue::object([(
"a_number".to_string(),
ArgumentValue::object([("gte".to_string(), ArgumentValue::int(1))]),
)]);
assert_eq!(read.arguments(), [("where".to_string(), where_args)]);
let selections = Vec::from([
Selection::new("id", None, [], Vec::new()),
Selection::new("large_number", None, [], Vec::new()),
Selection::new("other", None, [], Vec::from([Selection::new("name", None, [], [])])),
]);
assert_eq!(read.nested_selections(), selections);
}
#[test]
fn converts_single_mutation() {
let query = r#"
mutation {
createOnePost(data: {
id: 1,
categories: {create: [{id: 1}, {id: 2}]}
}) {
id,
categories {
id
}
}
}
"#;
let operation = GraphQLProtocolAdapter::convert_query_to_operation(query, None).unwrap();
assert_eq!(operation.name(), "createOnePost");
assert!(matches!(operation, Operation::Write(_)));
let write = operation.into_write().unwrap();
let data_args = ArgumentValue::object([
("id".to_string(), ArgumentValue::int(1)),
(
"categories".to_string(),
ArgumentValue::object([(
"create".to_string(),
ArgumentValue::list([
ArgumentValue::object([("id".to_string(), ArgumentValue::int(1))]),
ArgumentValue::object([("id".to_string(), ArgumentValue::int(2))]),
]),
)]),
),
]);
println!("args {:?}", write | {
match def {
Definition::Fragment(f) => Err(HandlerError::unsupported_feature(
"Fragment definition",
format!("Fragment '{}', at position {}.", f.name, f.position),
)),
Definition::Operation(op) => match op {
OperationDefinition::Subscription(s) => Err(HandlerError::unsupported_feature(
"Subscription query",
format!("At position {}.", s.position),
)),
OperationDefinition::SelectionSet(s) => Self::convert_query(s),
OperationDefinition::Query(q) => Self::convert_query(q.selection_set),
OperationDefinition::Mutation(m) => Self::convert_mutation(m.selection_set),
},
}
} | identifier_body |
protocol_adapter.rs | decimal::{BigDecimal, FromPrimitive};
use graphql_parser::query::{
Definition, Document, OperationDefinition, Selection as GqlSelection, SelectionSet, Value,
};
use query_core::query_document::*;
/// Protocol adapter for GraphQL -> Query Document.
///
/// GraphQL is mapped as following:
/// - Every field of a `query { ... }` or single selection block `{ ... }` is mapped to an `Operation::Read`.
/// - Every field of a single `mutation { ... }` is mapped to an `Operation::Write`.
/// - If the JSON payload specifies an operation name, only that specific operation is picked and the rest ignored.
/// - Fields on the queries are mapped to `Field`s, including arguments.
/// - Concrete values (e.g. in arguments) are mapped to `ArgumentValue`s.
///
/// Currently unsupported features:
/// - Fragments in any form.
/// - Variables.
/// - Subscription queries.
/// - Query names are ignored
pub struct GraphQLProtocolAdapter;
impl GraphQLProtocolAdapter {
pub fn convert_query_to_operation(query: &str, operation_name: Option<String>) -> crate::Result<Operation> {
let gql_doc = match graphql_parser::parse_query(query) {
Ok(doc) => doc,
Err(err)
if err.to_string().contains("number too large to fit in target type")
| err.to_string().contains("number too small to fit in target type") =>
{
return Err(HandlerError::ValueFitError("Query parsing failure: A number used in the query does not fit into a 64 bit signed integer. Consider using `BigInt` as field type if you're trying to store large integers.".to_owned()));
}
err @ Err(_) => err?,
};
Self::convert(gql_doc, operation_name)
}
pub fn | (gql_doc: Document<String>, operation: Option<String>) -> crate::Result<Operation> {
let mut operations: Vec<Operation> = match operation {
Some(ref op) => gql_doc
.definitions
.into_iter()
.find(|def| Self::matches_operation(def, op))
.ok_or_else(|| HandlerError::query_conversion(format!("Operation '{op}' does not match any query.")))
.and_then(Self::convert_definition),
None => gql_doc
.definitions
.into_iter()
.map(Self::convert_definition)
.collect::<crate::Result<Vec<Vec<Operation>>>>()
.map(|r| r.into_iter().flatten().collect::<Vec<Operation>>()),
}?;
let operation = operations
.pop()
.ok_or_else(|| HandlerError::query_conversion("Document contained no operations."))?
.dedup_selections();
Ok(operation)
}
fn convert_definition(def: Definition<String>) -> crate::Result<Vec<Operation>> {
match def {
Definition::Fragment(f) => Err(HandlerError::unsupported_feature(
"Fragment definition",
format!("Fragment '{}', at position {}.", f.name, f.position),
)),
Definition::Operation(op) => match op {
OperationDefinition::Subscription(s) => Err(HandlerError::unsupported_feature(
"Subscription query",
format!("At position {}.", s.position),
)),
OperationDefinition::SelectionSet(s) => Self::convert_query(s),
OperationDefinition::Query(q) => Self::convert_query(q.selection_set),
OperationDefinition::Mutation(m) => Self::convert_mutation(m.selection_set),
},
}
}
fn convert_query(selection_set: SelectionSet<String>) -> crate::Result<Vec<Operation>> {
Self::convert_selection_set(selection_set).map(|fields| fields.into_iter().map(Operation::Read).collect())
}
fn convert_mutation(selection_set: SelectionSet<String>) -> crate::Result<Vec<Operation>> {
Self::convert_selection_set(selection_set).map(|fields| fields.into_iter().map(Operation::Write).collect())
}
fn convert_selection_set(selection_set: SelectionSet<String>) -> crate::Result<Vec<Selection>> {
selection_set
.items
.into_iter()
.map(|item| match item {
GqlSelection::Field(f) => {
let arguments: Vec<(String, ArgumentValue)> = f
.arguments
.into_iter()
.map(|(k, v)| Ok((k, Self::convert_value(v)?)))
.collect::<crate::Result<Vec<_>>>()?;
let nested_selections = Self::convert_selection_set(f.selection_set)?;
Ok(Selection::new(f.name, f.alias, arguments, nested_selections))
}
GqlSelection::FragmentSpread(fs) => Err(HandlerError::unsupported_feature(
"Fragment spread",
format!("Fragment '{}', at position {}.", fs.fragment_name, fs.position),
)),
GqlSelection::InlineFragment(i) => Err(HandlerError::unsupported_feature(
"Inline fragment",
format!("At position {}.", i.position),
)),
})
.collect()
}
/// Checks if the given GraphQL definition matches the operation name that should be executed.
fn matches_operation(def: &Definition<String>, operation: &str) -> bool {
let check = |n: Option<&String>| n.filter(|name| name.as_str() == operation).is_some();
match def {
Definition::Fragment(_) => false,
Definition::Operation(op) => match op {
OperationDefinition::Subscription(s) => check(s.name.as_ref()),
OperationDefinition::SelectionSet(_) => false,
OperationDefinition::Query(q) => check(q.name.as_ref()),
OperationDefinition::Mutation(m) => check(m.name.as_ref()),
},
}
}
fn convert_value(value: Value<String>) -> crate::Result<ArgumentValue> {
match value {
Value::Variable(name) => Err(HandlerError::unsupported_feature(
"Variable usage",
format!("Variable '{name}'."),
)),
Value::Int(i) => match i.as_i64() {
Some(i) => Ok(ArgumentValue::int(i)),
None => Err(HandlerError::query_conversion(format!("Invalid 64 bit integer: {i:?}"))),
},
Value::Float(f) => match BigDecimal::from_f64(f) {
Some(dec) => Ok(ArgumentValue::float(dec)),
None => Err(HandlerError::query_conversion(format!("invalid 64-bit float: {f:?}"))),
},
Value::String(s) => Ok(ArgumentValue::string(s)),
Value::Boolean(b) => Ok(ArgumentValue::bool(b)),
Value::Null => Ok(ArgumentValue::null()),
Value::Enum(e) => Ok(ArgumentValue::r#enum(e)),
Value::List(values) => {
let values: Vec<ArgumentValue> = values
.into_iter()
.map(Self::convert_value)
.collect::<crate::Result<Vec<ArgumentValue>>>()?;
Ok(ArgumentValue::list(values))
}
Value::Object(map) => {
let values = map
.into_iter()
.map(|(k, v)| Self::convert_value(v).map(|v| (k, v)))
.collect::<crate::Result<ArgumentValueObject>>()?;
Ok(ArgumentValue::object(values))
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn converts_single_query() {
let query = r#"
query findTheModelOperation {
findOneModel(where: {a_number: {gte: 1}}) {
id,
large_number,
other {
name
}
}
}
"#;
let operation = GraphQLProtocolAdapter::convert_query_to_operation(query, None).unwrap();
assert_eq!(operation.name(), "findOneModel");
assert!(matches!(operation, Operation::Read(_)));
let read = operation.into_read().unwrap();
let where_args = ArgumentValue::object([(
"a_number".to_string(),
ArgumentValue::object([("gte".to_string(), ArgumentValue::int(1))]),
)]);
assert_eq!(read.arguments(), [("where".to_string(), where_args)]);
let selections = Vec::from([
Selection::new("id", None, [], Vec::new()),
Selection::new("large_number", None, [], Vec::new()),
Selection::new("other", None, [], Vec::from([Selection::new("name", None, [], [])])),
]);
assert_eq!(read.nested_selections(), selections);
}
#[test]
fn converts_single_mutation() {
let query = r#"
mutation {
createOnePost(data: {
id: 1,
categories: {create: [{id: 1}, {id: 2}]}
}) {
id,
categories {
id
}
}
}
"#;
let operation = GraphQLProtocolAdapter::convert_query_to_operation(query, None).unwrap();
assert_eq!(operation.name(), "createOnePost");
assert!(matches!(operation, Operation::Write(_)));
let write = operation.into_write().unwrap();
let data_args = ArgumentValue::object([
("id".to_string(), ArgumentValue::int(1)),
(
"categories".to_string(),
ArgumentValue::object([(
"create".to_string(),
ArgumentValue::list([
ArgumentValue::object([("id".to_string(), ArgumentValue::int(1))]),
ArgumentValue::object([("id".to_string(), ArgumentValue::int(2))]),
]),
)]),
),
]);
println!("args {:?}", write.arguments | convert | identifier_name |
protocol_adapter.rs | decimal::{BigDecimal, FromPrimitive};
use graphql_parser::query::{
Definition, Document, OperationDefinition, Selection as GqlSelection, SelectionSet, Value,
};
use query_core::query_document::*;
/// Protocol adapter for GraphQL -> Query Document.
///
/// GraphQL is mapped as following:
/// - Every field of a `query { ... }` or single selection block `{ ... }` is mapped to an `Operation::Read`.
/// - Every field of a single `mutation { ... }` is mapped to an `Operation::Write`.
/// - If the JSON payload specifies an operation name, only that specific operation is picked and the rest ignored.
/// - Fields on the queries are mapped to `Field`s, including arguments.
/// - Concrete values (e.g. in arguments) are mapped to `ArgumentValue`s.
///
/// Currently unsupported features:
/// - Fragments in any form.
/// - Variables.
/// - Subscription queries.
/// - Query names are ignored
pub struct GraphQLProtocolAdapter;
impl GraphQLProtocolAdapter {
pub fn convert_query_to_operation(query: &str, operation_name: Option<String>) -> crate::Result<Operation> {
let gql_doc = match graphql_parser::parse_query(query) {
Ok(doc) => doc,
Err(err)
if err.to_string().contains("number too large to fit in target type")
| err.to_string().contains("number too small to fit in target type") =>
{
return Err(HandlerError::ValueFitError("Query parsing failure: A number used in the query does not fit into a 64 bit signed integer. Consider using `BigInt` as field type if you're trying to store large integers.".to_owned()));
}
err @ Err(_) => err?,
};
Self::convert(gql_doc, operation_name)
}
pub fn convert(gql_doc: Document<String>, operation: Option<String>) -> crate::Result<Operation> {
let mut operations: Vec<Operation> = match operation {
Some(ref op) => gql_doc
.definitions
.into_iter()
.find(|def| Self::matches_operation(def, op))
.ok_or_else(|| HandlerError::query_conversion(format!("Operation '{op}' does not match any query.")))
.and_then(Self::convert_definition),
None => gql_doc
.definitions
.into_iter()
.map(Self::convert_definition)
.collect::<crate::Result<Vec<Vec<Operation>>>>()
.map(|r| r.into_iter().flatten().collect::<Vec<Operation>>()),
}?;
let operation = operations
.pop()
.ok_or_else(|| HandlerError::query_conversion("Document contained no operations."))?
.dedup_selections();
Ok(operation)
}
fn convert_definition(def: Definition<String>) -> crate::Result<Vec<Operation>> {
match def {
Definition::Fragment(f) => Err(HandlerError::unsupported_feature(
"Fragment definition",
format!("Fragment '{}', at position {}.", f.name, f.position),
)),
Definition::Operation(op) => match op {
OperationDefinition::Subscription(s) => Err(HandlerError::unsupported_feature(
"Subscription query",
format!("At position {}.", s.position),
)),
OperationDefinition::SelectionSet(s) => Self::convert_query(s),
OperationDefinition::Query(q) => Self::convert_query(q.selection_set),
OperationDefinition::Mutation(m) => Self::convert_mutation(m.selection_set),
},
}
}
fn convert_query(selection_set: SelectionSet<String>) -> crate::Result<Vec<Operation>> {
Self::convert_selection_set(selection_set).map(|fields| fields.into_iter().map(Operation::Read).collect())
}
fn convert_mutation(selection_set: SelectionSet<String>) -> crate::Result<Vec<Operation>> {
Self::convert_selection_set(selection_set).map(|fields| fields.into_iter().map(Operation::Write).collect())
}
fn convert_selection_set(selection_set: SelectionSet<String>) -> crate::Result<Vec<Selection>> {
selection_set
.items
.into_iter()
.map(|item| match item {
GqlSelection::Field(f) => {
let arguments: Vec<(String, ArgumentValue)> = f
.arguments
.into_iter()
.map(|(k, v)| Ok((k, Self::convert_value(v)?)))
.collect::<crate::Result<Vec<_>>>()?;
let nested_selections = Self::convert_selection_set(f.selection_set)?;
Ok(Selection::new(f.name, f.alias, arguments, nested_selections))
}
GqlSelection::FragmentSpread(fs) => Err(HandlerError::unsupported_feature(
"Fragment spread",
format!("Fragment '{}', at position {}.", fs.fragment_name, fs.position),
)),
GqlSelection::InlineFragment(i) => Err(HandlerError::unsupported_feature(
"Inline fragment",
format!("At position {}.", i.position),
)),
})
.collect()
}
/// Checks if the given GraphQL definition matches the operation name that should be executed.
fn matches_operation(def: &Definition<String>, operation: &str) -> bool {
let check = |n: Option<&String>| n.filter(|name| name.as_str() == operation).is_some();
match def {
Definition::Fragment(_) => false,
Definition::Operation(op) => match op {
OperationDefinition::Subscription(s) => check(s.name.as_ref()),
OperationDefinition::SelectionSet(_) => false,
OperationDefinition::Query(q) => check(q.name.as_ref()),
OperationDefinition::Mutation(m) => check(m.name.as_ref()),
},
}
}
fn convert_value(value: Value<String>) -> crate::Result<ArgumentValue> {
match value {
Value::Variable(name) => Err(HandlerError::unsupported_feature(
"Variable usage",
format!("Variable '{name}'."),
)),
Value::Int(i) => match i.as_i64() {
Some(i) => Ok(ArgumentValue::int(i)),
None => Err(HandlerError::query_conversion(format!("Invalid 64 bit integer: {i:?}"))),
},
Value::Float(f) => match BigDecimal::from_f64(f) {
Some(dec) => Ok(ArgumentValue::float(dec)),
None => Err(HandlerError::query_conversion(format!("invalid 64-bit float: {f:?}"))),
},
Value::String(s) => Ok(ArgumentValue::string(s)),
Value::Boolean(b) => Ok(ArgumentValue::bool(b)),
Value::Null => Ok(ArgumentValue::null()),
Value::Enum(e) => Ok(ArgumentValue::r#enum(e)),
Value::List(values) => {
let values: Vec<ArgumentValue> = values
.into_iter()
.map(Self::convert_value)
.collect::<crate::Result<Vec<ArgumentValue>>>()?;
Ok(ArgumentValue::list(values))
}
Value::Object(map) => {
let values = map
.into_iter()
.map(|(k, v)| Self::convert_value(v).map(|v| (k, v)))
.collect::<crate::Result<ArgumentValueObject>>()?;
Ok(ArgumentValue::object(values))
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn converts_single_query() {
let query = r#"
query findTheModelOperation {
findOneModel(where: {a_number: {gte: 1}}) {
id,
large_number,
other {
name
}
}
}
"#;
let operation = GraphQLProtocolAdapter::convert_query_to_operation(query, None).unwrap();
assert_eq!(operation.name(), "findOneModel");
assert!(matches!(operation, Operation::Read(_)));
let read = operation.into_read().unwrap();
let where_args = ArgumentValue::object([(
"a_number".to_string(),
ArgumentValue::object([("gte".to_string(), ArgumentValue::int(1))]),
)]);
assert_eq!(read.arguments(), [("where".to_string(), where_args)]);
let selections = Vec::from([
Selection::new("id", None, [], Vec::new()),
Selection::new("large_number", None, [], Vec::new()),
Selection::new("other", None, [], Vec::from([Selection::new("name", None, [], [])])),
]);
assert_eq!(read.nested_selections(), selections);
}
#[test]
fn converts_single_mutation() {
let query = r#"
mutation {
createOnePost(data: {
id: 1,
categories: {create: [{id: 1}, {id: 2}]}
}) {
id,
categories {
id
}
}
}
"#;
let operation = GraphQLProtocolAdapter::convert_query_to_operation(query, None).unwrap();
assert_eq!(operation.name(), "createOnePost");
assert!(matches!(operation, Operation::Write(_)));
| (
"categories".to_string(),
ArgumentValue::object([(
"create".to_string(),
ArgumentValue::list([
ArgumentValue::object([("id".to_string(), ArgumentValue::int(1))]),
ArgumentValue::object([("id".to_string(), ArgumentValue::int(2))]),
]),
)]),
),
]);
println!("args {:?}", write.arguments());
| let write = operation.into_write().unwrap();
let data_args = ArgumentValue::object([
("id".to_string(), ArgumentValue::int(1)), | random_line_split |
resource.py | (code):
"""
Maps a L{CODE} constant to a HTTP code.
"""
if code == 103:
return http.NOT_FOUND
return http.INTERNAL_SERVER_ERROR
def _writeJSONErrorResponse(f, request):
"""
Serializes a L{Failure} to JSON and writes it to the C{request}
@param f: The L{Failure} to serialize.
@type f: L{Failure}
@param request: The request object to write the JSON to.
@type request: L{twisted.web.server.Request}
"""
code = getattr(f.value, 'code', CODE.UNKNOWN)
_writeJSONResponse(
result=f.getErrorMessage().decode('ascii'),
request=request,
code=code,
status=_mapErrorCodeToStatus(code))
raise f
def jsonResult(f):
"""
Decorator for render_* methods.
Serializes the return value or exception to JSON and then writes it to the request
object.
"""
def _inner(self, request):
d = maybeDeferred(f, self, request)
d.addCallback(_writeJSONResponse, request)
d.addErrback(_writeJSONErrorResponse, request)
return NOT_DONE_YET
return _inner
class RootResource(Resource):
def __init__(self, store, steamKey, paypalSandbox, threadPool):
Resource.__init__(self)
self.putChild("api", DonationAPI(store, steamKey, threadPool))
self.putChild("paypal", PayPal(store, paypalSandbox))
self.putChild("static", File('bdm/static/'))
self.putChild("", File('bdm/static/html/index.html'))
class PayPal(Resource):
isLeaf = True
def __init__(self, store, sandbox):
Resource.__init__(self)
self.store = store
self.SANDBOX = sandbox
def verify(self, request):
"""
Verify PayPal IPN data.
"""
paypalURL = 'https://www.sandbox.paypal.com/cgi-bin/webscr'
if not self.SANDBOX:
paypalURL = 'https://www.paypal.com/cgi-bin/webscr'
def _cb(response):
if response == 'INVALID':
raise PaypalError(
'IPN data invalid. data: %s', (data,))
elif response == 'VERIFIED':
return True
else:
raise PaypalError('Unrecognized verification response: %s', (response,))
data = request.content.read()
params = '?cmd=_notify-validate&' + data
d = getPage(paypalURL+params, method='POST')
d.addCallback(_cb)
return d
def _process(self, data):
paymentStatus = data['payment_status'][0].lower()
method = getattr(self, '_payment_%s' % (paymentStatus,))
if method is not None:
method(data)
else:
log.err('Unknown payment status: %s' % (paymentStatus,))
def _payment_completed(self, data):
txn_id = data['txn_id'][0]
amount = data.get('settle_amount', data['mc_gross'])[0]
custom = json.loads(b64decode(data['custom'][0]))
anonymous = custom['anonymous']
steamID = custom['steamid']
if steamID:
steamID = unicode(steamidTo64(steamID))
donator = self.store.findOrCreate(
Donator, steamID=steamID, anonymous=anonymous)
donator.addDonation(Decimal(amount), unicode(txn_id))
def _payment_refunded(self, data):
donation = self.store.query(
Donation, AND(Donation.paypalID == unicode(data['parent_txn_id'][0])))
donation.deleteFromStore()
def _payment_reversed(self, data):
donation = self.store.findUnique(
Donation, AND(Donation.paypalID == unicode(data['parent_txn_id'][0])))
donation.deleteFromStore()
def _payment_canceled_reversal(self, data):
#XXX: TODO if ithere is ever a reversal cancelled.
log.err("Reversal cancelled:")
log.err(data)
def render_POST(self, request):
"""
Recieves and verifies PayPal callbacks.
"""
log.msg("Paypal callback:")
log.msg(request.args)
d = self.verify(request)
d.addCallback(lambda ign: self._process(request.args))
d.addErrback(log.err)
return ''
class DonationAPI(Resource):
isLeaf = True
def __init__(self, store, steamKey, threadPool):
self.store = store
self.steamKey = steamKey
self.threadPool = threadPool
Resource.__init__(self)
def recent(self, limit):
"""
Retrieve a list of recent donations.
@param limit: The amount of donations to return.
@type limit: L{int}
@return: A list of donations.
@rtype: L{list} of L{dict}s.
"""
def _cb(players, donations):
donators = []
for donation in donations:
player = players[donation.donator.steamID].copy()
player['date'] = donation.date.asPOSIXTimestamp()
player['amount'] = str(donation.amount)
donators.append(player)
return donators
donations = []
steamids = set()
for donation in self.store.query(Donation,
AND(Donation.donator == Donator.storeID,
Donator.anonymous == False,
Donator.steamID != None),
limit=limit,
sort=Donation.date.descending):
steamids.add(donation.donator.steamID)
donations.append(donation)
d = self.getPlayerSummaries(steamids)
d.addCallback(_cb, donations)
return d
def steamID(self, steamid):
try:
donator = self.store.findUnique(
Donator, Donator.steamID == unicode(steamid))
except ItemNotFound:
raise BloodyError("SteamID '%s' not found." % (steamid,))
donations = []
for donation in donator.donations:
donations.append(donationToDict(donation))
return donations
def getPlayerSummaries(self, steamids):
def _cb(response):
r = json.loads(response)['response']
players = {}
for player in r['players']:
p = player['steamid']
players[p] = player
return players
url = 'http://api.steampowered.com/ISteamUser/GetPlayerSummaries/v0002/?'
params = 'key=%s&steamids=%s' % (self.steamKey, ','.join(steamids))
d = getPage(str(url+params))
d.addCallback(_cb)
return d
@jsonResult
def render_GET(self, request):
if not request.postpath:
return "nope"
name = request.postpath[0]
if name == u'steamid':
if len(request.postpath[1]) <= 1 or request.postpath[1] is None:
raise Exception("No SteamID provided.")
return self.steamID(request.postpath[1])
if name == u'recent':
try:
limit = request.postpath[1]
except IndexError:
limit = 5
return self.recent(limit)
if name == u'top':
try:
limit = request.postpath[1]
except IndexError:
limit = 5
return self.getTop(limit)
return NoResource('')
@jsonResult
def render_POST(self, request):
if not request.postpath:
return "maybe sam dox"
name = request.postpath[0]
content = json.loads(request.content.read())
if not content:
return 'No JSON provided'
if name == u'servers':
return self.serverStats(content)
return NoResource('')
def getTop(self, limit):
"""
Retrieves a list of donators sorted by total donation amount.
"""
def _cb(info, donators):
players = []
for donator in donators:
players.append(dict(donator, **info[donator['steamID']]))
return players
donators = []
steamIDs = []
for d in self.store.query(Donator,
AND(Donator.anonymous == False,
Donator.steamID != None),
sort=Donator.totalAmount.desc,
limit=limit):
steamIDs.append(d.steamID)
donators.append(donatorToDict(d))
d = self.getPlayerSummaries(steamIDs)
d.addCallback(_cb, donators)
return d
def serverStats(self, servers, querier=ServerQuerier):
def getInfo(server):
def _tx():
q = querier(server)
try:
info = q.info()
return {'server_name': info['server_name'],
'map': info['map'],
'player_count': info['player_count'],
'max_players': info['max_players'],
'online': True,
'location': server[2]}
except NoResponseError:
return {'server_name': server[0],
'online': False,
'location': server[2]}
return deferToThreadPool(reactor, self.threadPool, _tx)
deferreds = []
for server in servers:
| deferreds.append(getInfo(server)) | conditional_block |
|
resource.py | Dict
from bdm.error import BloodyError, PaypalError
from bdm.constants import CODE
from valve.source.a2s import ServerQuerier, NoResponseError
from valve.steam.id import SteamID as ValveSteamID
def steamidTo64(steamid):
|
def _writeJSONResponse(result, request, code=CODE.SUCCESS, status=http.OK):
"""
Serializes C{result} to JSON and writes it to C{request}.
@param result: The content to be serialized and written to the request.
@type result: An object accepted by json.dumps.
@param request: The request object to write JSON to.
@type request: L{twisted.web.server.Request}
@param code: A code to include in the JSON response.
@type code: C{int}
@param status: The HTTP status the response will have.
@type status: C{int}
"""
response = {
u'code': code.value,
u'result': result}
request.setHeader('content-type', 'application/json')
request.setResponseCode(status)
request.write(json.dumps(response))
request.finish()
def _mapErrorCodeToStatus(code):
"""
Maps a L{CODE} constant to a HTTP code.
"""
if code == 103:
return http.NOT_FOUND
return http.INTERNAL_SERVER_ERROR
def _writeJSONErrorResponse(f, request):
"""
Serializes a L{Failure} to JSON and writes it to the C{request}
@param f: The L{Failure} to serialize.
@type f: L{Failure}
@param request: The request object to write the JSON to.
@type request: L{twisted.web.server.Request}
"""
code = getattr(f.value, 'code', CODE.UNKNOWN)
_writeJSONResponse(
result=f.getErrorMessage().decode('ascii'),
request=request,
code=code,
status=_mapErrorCodeToStatus(code))
raise f
def jsonResult(f):
"""
Decorator for render_* methods.
Serializes the return value or exception to JSON and then writes it to the request
object.
"""
def _inner(self, request):
d = maybeDeferred(f, self, request)
d.addCallback(_writeJSONResponse, request)
d.addErrback(_writeJSONErrorResponse, request)
return NOT_DONE_YET
return _inner
class RootResource(Resource):
def __init__(self, store, steamKey, paypalSandbox, threadPool):
Resource.__init__(self)
self.putChild("api", DonationAPI(store, steamKey, threadPool))
self.putChild("paypal", PayPal(store, paypalSandbox))
self.putChild("static", File('bdm/static/'))
self.putChild("", File('bdm/static/html/index.html'))
class PayPal(Resource):
isLeaf = True
def __init__(self, store, sandbox):
Resource.__init__(self)
self.store = store
self.SANDBOX = sandbox
def verify(self, request):
"""
Verify PayPal IPN data.
"""
paypalURL = 'https://www.sandbox.paypal.com/cgi-bin/webscr'
if not self.SANDBOX:
paypalURL = 'https://www.paypal.com/cgi-bin/webscr'
def _cb(response):
if response == 'INVALID':
raise PaypalError(
'IPN data invalid. data: %s', (data,))
elif response == 'VERIFIED':
return True
else:
raise PaypalError('Unrecognized verification response: %s', (response,))
data = request.content.read()
params = '?cmd=_notify-validate&' + data
d = getPage(paypalURL+params, method='POST')
d.addCallback(_cb)
return d
def _process(self, data):
paymentStatus = data['payment_status'][0].lower()
method = getattr(self, '_payment_%s' % (paymentStatus,))
if method is not None:
method(data)
else:
log.err('Unknown payment status: %s' % (paymentStatus,))
def _payment_completed(self, data):
txn_id = data['txn_id'][0]
amount = data.get('settle_amount', data['mc_gross'])[0]
custom = json.loads(b64decode(data['custom'][0]))
anonymous = custom['anonymous']
steamID = custom['steamid']
if steamID:
steamID = unicode(steamidTo64(steamID))
donator = self.store.findOrCreate(
Donator, steamID=steamID, anonymous=anonymous)
donator.addDonation(Decimal(amount), unicode(txn_id))
def _payment_refunded(self, data):
donation = self.store.query(
Donation, AND(Donation.paypalID == unicode(data['parent_txn_id'][0])))
donation.deleteFromStore()
def _payment_reversed(self, data):
donation = self.store.findUnique(
Donation, AND(Donation.paypalID == unicode(data['parent_txn_id'][0])))
donation.deleteFromStore()
def _payment_canceled_reversal(self, data):
#XXX: TODO if ithere is ever a reversal cancelled.
log.err("Reversal cancelled:")
log.err(data)
def render_POST(self, request):
"""
Recieves and verifies PayPal callbacks.
"""
log.msg("Paypal callback:")
log.msg(request.args)
d = self.verify(request)
d.addCallback(lambda ign: self._process(request.args))
d.addErrback(log.err)
return ''
class DonationAPI(Resource):
isLeaf = True
def __init__(self, store, steamKey, threadPool):
self.store = store
self.steamKey = steamKey
self.threadPool = threadPool
Resource.__init__(self)
def recent(self, limit):
"""
Retrieve a list of recent donations.
@param limit: The amount of donations to return.
@type limit: L{int}
@return: A list of donations.
@rtype: L{list} of L{dict}s.
"""
def _cb(players, donations):
donators = []
for donation in donations:
player = players[donation.donator.steamID].copy()
player['date'] = donation.date.asPOSIXTimestamp()
player['amount'] = str(donation.amount)
donators.append(player)
return donators
donations = []
steamids = set()
for donation in self.store.query(Donation,
AND(Donation.donator == Donator.storeID,
Donator.anonymous == False,
Donator.steamID != None),
limit=limit,
sort=Donation.date.descending):
steamids.add(donation.donator.steamID)
donations.append(donation)
d = self.getPlayerSummaries(steamids)
d.addCallback(_cb, donations)
return d
def steamID(self, steamid):
try:
donator = self.store.findUnique(
Donator, Donator.steamID == unicode(steamid))
except ItemNotFound:
raise BloodyError("SteamID '%s' not found." % (steamid,))
donations = []
for donation in donator.donations:
donations.append(donationToDict(donation))
return donations
def getPlayerSummaries(self, steamids):
def _cb(response):
r = json.loads(response)['response']
players = {}
for player in r['players']:
p = player['steamid']
players[p] = player
return players
url = 'http://api.steampowered.com/ISteamUser/GetPlayerSummaries/v0002/?'
params = 'key=%s&steamids=%s' % (self.steamKey, ','.join(steamids))
d = getPage(str(url+params))
d.addCallback(_cb)
return d
@jsonResult
def render_GET(self, request):
if not request.postpath:
return "nope"
name = request.postpath[0]
if name == u'steamid':
if len(request.postpath[1]) <= 1 or request.postpath[1] is None:
raise Exception("No SteamID provided.")
return self.steamID(request.postpath[1])
if name == u'recent':
try:
limit = request.postpath[1]
except IndexError:
limit = 5
return self.recent(limit)
if name == u'top':
try:
limit = request.postpath[1]
except IndexError:
limit = 5
return self.getTop(limit)
return NoResource('')
@jsonResult
def render_POST(self, request):
if not request.postpath:
return "maybe sam dox"
name = request.postpath[0]
content = json.loads(request.content.read())
if not content:
return 'No JSON provided'
if name == u'servers':
return self.serverStats(content)
return NoResource('')
def getTop(self, limit):
"""
Retrieves a list of donators sorted by total donation amount.
"""
def _cb(info, donators):
players = []
for donator in donators:
players.append(dict(donator, **info[don | return ValveSteamID.from_text(steamid).as_64() | identifier_body |
resource.py | Dict
from bdm.error import BloodyError, PaypalError
from bdm.constants import CODE
from valve.source.a2s import ServerQuerier, NoResponseError
from valve.steam.id import SteamID as ValveSteamID
def steamidTo64(steamid):
return ValveSteamID.from_text(steamid).as_64()
def _writeJSONResponse(result, request, code=CODE.SUCCESS, status=http.OK):
"""
Serializes C{result} to JSON and writes it to C{request}.
@param result: The content to be serialized and written to the request.
@type result: An object accepted by json.dumps.
@param request: The request object to write JSON to.
@type request: L{twisted.web.server.Request}
@param code: A code to include in the JSON response.
@type code: C{int}
@param status: The HTTP status the response will have.
@type status: C{int}
"""
response = {
u'code': code.value,
u'result': result}
request.setHeader('content-type', 'application/json')
request.setResponseCode(status)
request.write(json.dumps(response))
request.finish()
def _mapErrorCodeToStatus(code):
"""
Maps a L{CODE} constant to a HTTP code.
"""
if code == 103:
return http.NOT_FOUND
return http.INTERNAL_SERVER_ERROR
def _writeJSONErrorResponse(f, request):
"""
Serializes a L{Failure} to JSON and writes it to the C{request}
@param f: The L{Failure} to serialize.
@type f: L{Failure}
@param request: The request object to write the JSON to.
@type request: L{twisted.web.server.Request}
"""
code = getattr(f.value, 'code', CODE.UNKNOWN)
_writeJSONResponse(
result=f.getErrorMessage().decode('ascii'),
request=request,
code=code,
status=_mapErrorCodeToStatus(code))
raise f
def jsonResult(f):
"""
Decorator for render_* methods.
Serializes the return value or exception to JSON and then writes it to the request
object.
"""
def _inner(self, request):
d = maybeDeferred(f, self, request)
d.addCallback(_writeJSONResponse, request)
d.addErrback(_writeJSONErrorResponse, request)
return NOT_DONE_YET
return _inner
class RootResource(Resource):
def __init__(self, store, steamKey, paypalSandbox, threadPool):
Resource.__init__(self)
self.putChild("api", DonationAPI(store, steamKey, threadPool))
self.putChild("paypal", PayPal(store, paypalSandbox))
self.putChild("static", File('bdm/static/'))
self.putChild("", File('bdm/static/html/index.html'))
class PayPal(Resource):
isLeaf = True
def __init__(self, store, sandbox):
Resource.__init__(self)
self.store = store
self.SANDBOX = sandbox
def verify(self, request):
"""
Verify PayPal IPN data.
"""
paypalURL = 'https://www.sandbox.paypal.com/cgi-bin/webscr'
if not self.SANDBOX:
paypalURL = 'https://www.paypal.com/cgi-bin/webscr' |
elif response == 'VERIFIED':
return True
else:
raise PaypalError('Unrecognized verification response: %s', (response,))
data = request.content.read()
params = '?cmd=_notify-validate&' + data
d = getPage(paypalURL+params, method='POST')
d.addCallback(_cb)
return d
def _process(self, data):
paymentStatus = data['payment_status'][0].lower()
method = getattr(self, '_payment_%s' % (paymentStatus,))
if method is not None:
method(data)
else:
log.err('Unknown payment status: %s' % (paymentStatus,))
def _payment_completed(self, data):
txn_id = data['txn_id'][0]
amount = data.get('settle_amount', data['mc_gross'])[0]
custom = json.loads(b64decode(data['custom'][0]))
anonymous = custom['anonymous']
steamID = custom['steamid']
if steamID:
steamID = unicode(steamidTo64(steamID))
donator = self.store.findOrCreate(
Donator, steamID=steamID, anonymous=anonymous)
donator.addDonation(Decimal(amount), unicode(txn_id))
def _payment_refunded(self, data):
donation = self.store.query(
Donation, AND(Donation.paypalID == unicode(data['parent_txn_id'][0])))
donation.deleteFromStore()
def _payment_reversed(self, data):
donation = self.store.findUnique(
Donation, AND(Donation.paypalID == unicode(data['parent_txn_id'][0])))
donation.deleteFromStore()
def _payment_canceled_reversal(self, data):
#XXX: TODO if ithere is ever a reversal cancelled.
log.err("Reversal cancelled:")
log.err(data)
def render_POST(self, request):
"""
Recieves and verifies PayPal callbacks.
"""
log.msg("Paypal callback:")
log.msg(request.args)
d = self.verify(request)
d.addCallback(lambda ign: self._process(request.args))
d.addErrback(log.err)
return ''
class DonationAPI(Resource):
isLeaf = True
def __init__(self, store, steamKey, threadPool):
self.store = store
self.steamKey = steamKey
self.threadPool = threadPool
Resource.__init__(self)
def recent(self, limit):
"""
Retrieve a list of recent donations.
@param limit: The amount of donations to return.
@type limit: L{int}
@return: A list of donations.
@rtype: L{list} of L{dict}s.
"""
def _cb(players, donations):
donators = []
for donation in donations:
player = players[donation.donator.steamID].copy()
player['date'] = donation.date.asPOSIXTimestamp()
player['amount'] = str(donation.amount)
donators.append(player)
return donators
donations = []
steamids = set()
for donation in self.store.query(Donation,
AND(Donation.donator == Donator.storeID,
Donator.anonymous == False,
Donator.steamID != None),
limit=limit,
sort=Donation.date.descending):
steamids.add(donation.donator.steamID)
donations.append(donation)
d = self.getPlayerSummaries(steamids)
d.addCallback(_cb, donations)
return d
def steamID(self, steamid):
try:
donator = self.store.findUnique(
Donator, Donator.steamID == unicode(steamid))
except ItemNotFound:
raise BloodyError("SteamID '%s' not found." % (steamid,))
donations = []
for donation in donator.donations:
donations.append(donationToDict(donation))
return donations
def getPlayerSummaries(self, steamids):
def _cb(response):
r = json.loads(response)['response']
players = {}
for player in r['players']:
p = player['steamid']
players[p] = player
return players
url = 'http://api.steampowered.com/ISteamUser/GetPlayerSummaries/v0002/?'
params = 'key=%s&steamids=%s' % (self.steamKey, ','.join(steamids))
d = getPage(str(url+params))
d.addCallback(_cb)
return d
@jsonResult
def render_GET(self, request):
if not request.postpath:
return "nope"
name = request.postpath[0]
if name == u'steamid':
if len(request.postpath[1]) <= 1 or request.postpath[1] is None:
raise Exception("No SteamID provided.")
return self.steamID(request.postpath[1])
if name == u'recent':
try:
limit = request.postpath[1]
except IndexError:
limit = 5
return self.recent(limit)
if name == u'top':
try:
limit = request.postpath[1]
except IndexError:
limit = 5
return self.getTop(limit)
return NoResource('')
@jsonResult
def render_POST(self, request):
if not request.postpath:
return "maybe sam dox"
name = request.postpath[0]
content = json.loads(request.content.read())
if not content:
return 'No JSON provided'
if name == u'servers':
return self.serverStats(content)
return NoResource('')
def getTop(self, limit):
"""
Retrieves a list of donators sorted by total donation amount.
"""
def _cb(info, donators):
players = []
for donator in donators:
players.append(dict(donator, **info[ |
def _cb(response):
if response == 'INVALID':
raise PaypalError(
'IPN data invalid. data: %s', (data,)) | random_line_split |
resource.py | Dict
from bdm.error import BloodyError, PaypalError
from bdm.constants import CODE
from valve.source.a2s import ServerQuerier, NoResponseError
from valve.steam.id import SteamID as ValveSteamID
def steamidTo64(steamid):
return ValveSteamID.from_text(steamid).as_64()
def _writeJSONResponse(result, request, code=CODE.SUCCESS, status=http.OK):
"""
Serializes C{result} to JSON and writes it to C{request}.
@param result: The content to be serialized and written to the request.
@type result: An object accepted by json.dumps.
@param request: The request object to write JSON to.
@type request: L{twisted.web.server.Request}
@param code: A code to include in the JSON response.
@type code: C{int}
@param status: The HTTP status the response will have.
@type status: C{int}
"""
response = {
u'code': code.value,
u'result': result}
request.setHeader('content-type', 'application/json')
request.setResponseCode(status)
request.write(json.dumps(response))
request.finish()
def _mapErrorCodeToStatus(code):
"""
Maps a L{CODE} constant to a HTTP code.
"""
if code == 103:
return http.NOT_FOUND
return http.INTERNAL_SERVER_ERROR
def _writeJSONErrorResponse(f, request):
"""
Serializes a L{Failure} to JSON and writes it to the C{request}
@param f: The L{Failure} to serialize.
@type f: L{Failure}
@param request: The request object to write the JSON to.
@type request: L{twisted.web.server.Request}
"""
code = getattr(f.value, 'code', CODE.UNKNOWN)
_writeJSONResponse(
result=f.getErrorMessage().decode('ascii'),
request=request,
code=code,
status=_mapErrorCodeToStatus(code))
raise f
def jsonResult(f):
"""
Decorator for render_* methods.
Serializes the return value or exception to JSON and then writes it to the request
object.
"""
def _inner(self, request):
d = maybeDeferred(f, self, request)
d.addCallback(_writeJSONResponse, request)
d.addErrback(_writeJSONErrorResponse, request)
return NOT_DONE_YET
return _inner
class RootResource(Resource):
def __init__(self, store, steamKey, paypalSandbox, threadPool):
Resource.__init__(self)
self.putChild("api", DonationAPI(store, steamKey, threadPool))
self.putChild("paypal", PayPal(store, paypalSandbox))
self.putChild("static", File('bdm/static/'))
self.putChild("", File('bdm/static/html/index.html'))
class PayPal(Resource):
isLeaf = True
def __init__(self, store, sandbox):
Resource.__init__(self)
self.store = store
self.SANDBOX = sandbox
def verify(self, request):
"""
Verify PayPal IPN data.
"""
paypalURL = 'https://www.sandbox.paypal.com/cgi-bin/webscr'
if not self.SANDBOX:
paypalURL = 'https://www.paypal.com/cgi-bin/webscr'
def _cb(response):
if response == 'INVALID':
raise PaypalError(
'IPN data invalid. data: %s', (data,))
elif response == 'VERIFIED':
return True
else:
raise PaypalError('Unrecognized verification response: %s', (response,))
data = request.content.read()
params = '?cmd=_notify-validate&' + data
d = getPage(paypalURL+params, method='POST')
d.addCallback(_cb)
return d
def _process(self, data):
paymentStatus = data['payment_status'][0].lower()
method = getattr(self, '_payment_%s' % (paymentStatus,))
if method is not None:
method(data)
else:
log.err('Unknown payment status: %s' % (paymentStatus,))
def _payment_completed(self, data):
txn_id = data['txn_id'][0]
amount = data.get('settle_amount', data['mc_gross'])[0]
custom = json.loads(b64decode(data['custom'][0]))
anonymous = custom['anonymous']
steamID = custom['steamid']
if steamID:
steamID = unicode(steamidTo64(steamID))
donator = self.store.findOrCreate(
Donator, steamID=steamID, anonymous=anonymous)
donator.addDonation(Decimal(amount), unicode(txn_id))
def _payment_refunded(self, data):
donation = self.store.query(
Donation, AND(Donation.paypalID == unicode(data['parent_txn_id'][0])))
donation.deleteFromStore()
def _payment_reversed(self, data):
donation = self.store.findUnique(
Donation, AND(Donation.paypalID == unicode(data['parent_txn_id'][0])))
donation.deleteFromStore()
def _payment_canceled_reversal(self, data):
#XXX: TODO if ithere is ever a reversal cancelled.
log.err("Reversal cancelled:")
log.err(data)
def render_POST(self, request):
"""
Recieves and verifies PayPal callbacks.
"""
log.msg("Paypal callback:")
log.msg(request.args)
d = self.verify(request)
d.addCallback(lambda ign: self._process(request.args))
d.addErrback(log.err)
return ''
class DonationAPI(Resource):
isLeaf = True
def __init__(self, store, steamKey, threadPool):
self.store = store
self.steamKey = steamKey
self.threadPool = threadPool
Resource.__init__(self)
def recent(self, limit):
"""
Retrieve a list of recent donations.
@param limit: The amount of donations to return.
@type limit: L{int}
@return: A list of donations.
@rtype: L{list} of L{dict}s.
"""
def _cb(players, donations):
donators = []
for donation in donations:
player = players[donation.donator.steamID].copy()
player['date'] = donation.date.asPOSIXTimestamp()
player['amount'] = str(donation.amount)
donators.append(player)
return donators
donations = []
steamids = set()
for donation in self.store.query(Donation,
AND(Donation.donator == Donator.storeID,
Donator.anonymous == False,
Donator.steamID != None),
limit=limit,
sort=Donation.date.descending):
steamids.add(donation.donator.steamID)
donations.append(donation)
d = self.getPlayerSummaries(steamids)
d.addCallback(_cb, donations)
return d
def steamID(self, steamid):
try:
donator = self.store.findUnique(
Donator, Donator.steamID == unicode(steamid))
except ItemNotFound:
raise BloodyError("SteamID '%s' not found." % (steamid,))
donations = []
for donation in donator.donations:
donations.append(donationToDict(donation))
return donations
def getPlayerSummaries(self, steamids):
def _cb(response):
r = json.loads(response)['response']
players = {}
for player in r['players']:
p = player['steamid']
players[p] = player
return players
url = 'http://api.steampowered.com/ISteamUser/GetPlayerSummaries/v0002/?'
params = 'key=%s&steamids=%s' % (self.steamKey, ','.join(steamids))
d = getPage(str(url+params))
d.addCallback(_cb)
return d
@jsonResult
def render_GET(self, request):
if not request.postpath:
return "nope"
name = request.postpath[0]
if name == u'steamid':
if len(request.postpath[1]) <= 1 or request.postpath[1] is None:
raise Exception("No SteamID provided.")
return self.steamID(request.postpath[1])
if name == u'recent':
try:
limit = request.postpath[1]
except IndexError:
limit = 5
return self.recent(limit)
if name == u'top':
try:
limit = request.postpath[1]
except IndexError:
limit = 5
return self.getTop(limit)
return NoResource('')
@jsonResult
def | (self, request):
if not request.postpath:
return "maybe sam dox"
name = request.postpath[0]
content = json.loads(request.content.read())
if not content:
return 'No JSON provided'
if name == u'servers':
return self.serverStats(content)
return NoResource('')
def getTop(self, limit):
"""
Retrieves a list of donators sorted by total donation amount.
"""
def _cb(info, donators):
players = []
for donator in donators:
players.append(dict(donator, **info[don | render_POST | identifier_name |
__init__.py | Distance function - frobenius, nuclear, mahalanobis (just form of mahalanobis), or
:param decay: decaying learning rate and radius - exponential or linear
:param seed: Random seed
"""
np.random.seed(seed = seed)
if xdim is None or ydim is None:
xdim = int(np.sqrt(5 * np.sqrt(data.shape[0])))
ydim = xdim
self.net_dim = np.array([xdim, ydim])
self.ncol = data.shape[2]
self.nrow = data.shape[1]
# Initialize codebook matrix
self.init_weight()
# Topology
topo_types = ["rectangular", "hexagonal"]
if topo not in topo_types:
raise ValueError("Invalid topo. Expected one of: %s" % topo_types)
self.topo = topo
self.init_grid()
self.dist_node()
# Neighborhood function
neighbor_types = ["gaussian", "bubble", "triangular"]
if neighbor not in neighbor_types:
raise ValueError("Invalid neighbor. Expected one of: %s" % neighbor_types)
self.neighbor_func = neighbor
# Distance function
dist_type = ["frobenius", "nuclear", "mahalanobis", "eros"]
if dist not in dist_type:
raise ValueError("Invalid dist. Expected one of: %s" % dist_type)
self.dist_func = dist
# Decay
decay_types = ["exponential", "linear"]
if decay not in decay_types:
raise ValueError("Invalid decay. Expected one of: %s" % decay_types)
self.decay_func = decay
# som()
self.epoch = None
self.alpha = None
self.sigma = None
self.initial_learn = None
self.initial_r = None
# find_bmu()
self.bmu = None
# plot
self.reconstruction_error = None
self.dist_normal = None
self.project = None
def init_weight(self):
self.net = np.random.rand(self.net_dim[0] * self.net_dim[1], self.nrow, self.ncol)
def init_grid(self):
"""
[row_pts, col_pts]
xdim x ydim rows (points)
[1,1]
[2,1]
[1,2]
[2,2]
2--------->
1--------->^
"""
self.pts = np.array(
np.meshgrid(
np.arange(self.net_dim[0]) + 1,
np.arange(self.net_dim[1]) + 1
)
).reshape(2, np.prod(self.net_dim)).T
if self.topo == "hexagonal":
self.pts[:, 0] = self.pts[:, 0] + .5 * (self.pts[:, 1] % 2)
self.pts[:, 1] = np.sqrt(3) / 2 * self.pts[:, 1]
def som(self, data, epoch = 100, init_rate = None, init_radius = None, keep_net = False):
"""
:param data: 3d array. processed data set for Online SOM Detector
:param epoch: epoch number
:param init_rate: initial learning rate
:param init_radius: initial radius of BMU neighborhood
:param keep_net: keep every weight matrix path?
"""
num_obs = data.shape[0]
obs_id = np.arange(num_obs)
chose_i = np.empty(1)
node_id = None
hci = None
self.epoch = epoch
if keep_net:
self.net_path = np.empty(
(self.epoch, self.net_dim[0] * self.net_dim[1], self.nrow, self.ncol)
)
# learning rate
if init_rate is None:
init_rate = .1
self.alpha = init_rate
self.initial_learn = init_rate
# radius of neighborhood
if init_radius is None:
init_radius = np.quantile(self.dci, q = 2 / 3, axis = None)
self.sigma = init_radius
self.initial_r = init_radius
# time constant (lambda)
rate_constant = epoch
radius_constant = epoch / np.log(self.sigma)
# distance between nodes
bmu_dist = self.dci[1, :]
rcst_err = np.empty(epoch)
for i in tqdm(range(epoch), desc = "epoch"):
chose_i = int(np.random.choice(obs_id, size = 1))
# BMU - self.bmu
self.find_bmu(data, chose_i)
# reconstruction error - sum of distances from BMU
rcst_err[i] = np.sum([np.square(self.dist_mat(data, j, self.bmu.astype(int))) for j in range(data.shape[0])])
bmu_dist = self.dci[self.bmu.astype(int), :].flatten()
# decay
self.sigma = self.decay(init_radius, i + 1, radius_constant)
self.alpha = self.decay(init_rate, i + 1, rate_constant)
# neighboring nodes (includes BMU)
neighbor_neuron = np.argwhere(bmu_dist <= self.sigma).flatten()
for k in tqdm(range(neighbor_neuron.shape[0]), desc = "updating"):
node_id = neighbor_neuron[k]
hci = self.neighborhood(bmu_dist[node_id], self.sigma)
# update codebook matrices of neighboring nodes
self.net[node_id, :, :] += \
self.alpha * hci * \
(data[chose_i, :, :] - self.net[node_id, :, :]).reshape((self.nrow, self.ncol))
if keep_net:
self.net_path[i, :, :, :] = self.net
self.reconstruction_error = pd.DataFrame({"Epoch": np.arange(self.epoch) + 1, "Reconstruction Error": rcst_err})
def find_bmu(self, data, index):
"""
:param data: Processed data set for SOM.
:param index: Randomly chosen observation id for input matrix among 3d tensor set.
"""
dist_code = np.asarray([self.dist_mat(data, index, j) for j in range(self.net.shape[0])])
self.bmu = np.argmin(dist_code)
def dist_mat(self, data, index, node):
"""
:param data: Processed data set for SOM.
:param index: Randomly chosen observation id for input matrix among 3d tensor set.
:param node: node index
:return: distance between input matrix observation and weight matrix of the node
"""
if self.dist_func == "frobenius":
return np.linalg.norm(data[index, :, :] - self.net[node, :, :], "fro")
elif self.dist_func == "nuclear":
return np.linalg.norm(data[index, :, :] - self.net[node, :, :], "nuc")
elif self.dist_func == "mahalanobis":
x = data[index, :, :] - self.net[node, :, :]
covmat = np.cov(x, rowvar = False)
# spectral decomposition sigma = udu.T
w, v = np.linalg.eigh(covmat)
# inverse = ud^-1u.T
w[w == 0] += .0001
covinv = v.dot(np.diag(1 / w)).dot(v.T)
ss = x.dot(covinv).dot(x.T)
return np.sqrt(np.trace(ss))
elif self.dist_func == "eros":
x = data[index, :, :] - self.net[node, :, :]
covmat = np.cov(x, rowvar = False)
# svd(covariance)
u, s, vh = randomized_svd(covmat, n_components = covmat.shape[1], n_iter = 1, random_state = None)
# normalize eigenvalue
w = s / s.sum()
# distance
ss = np.multiply(vh, w).dot(vh.T)
return np.sqrt(np.trace(ss))
def dist_node(self):
"""
:return: distance matrix of SOM neuron
"""
if self.topo == "hexagonal":
self.dci = distance.cdist(self.pts, self.pts, "euclidean")
elif self.topo == "rectangular":
self.dci = distance.cdist(self.pts, self.pts, "chebyshev")
def decay(self, init, time, time_constant):
"""
:param init: initial value
:param time: t
:param time_constant: lambda
:return: decaying value of alpha or sigma
"""
if self.decay_func == "exponential":
return init * np.exp(-time / time_constant)
elif self.decay_func == "linear":
return init * (1 - time / time_constant)
def neighborhood(self, node_distance, radius):
"""
:param node_distance: Distance between SOM neurons
:param radius: Radius of BMU neighborhood
:return: Neighborhood function hci
"""
if self.neighbor_func == "gaussian":
return np.exp(-node_distance ** 2 / (2 * (radius ** 2)))
elif self.neighbor_func == "bubble":
if node_distance <= radius:
return 1.0
| conditional_block |
||
__init__.py | , mahalanobis (just form of mahalanobis), or
:param decay: decaying learning rate and radius - exponential or linear
:param seed: Random seed
"""
np.random.seed(seed = seed)
if xdim is None or ydim is None:
xdim = int(np.sqrt(5 * np.sqrt(data.shape[0])))
ydim = xdim
self.net_dim = np.array([xdim, ydim])
self.ncol = data.shape[2]
self.nrow = data.shape[1]
# Initialize codebook matrix
self.init_weight()
# Topology
topo_types = ["rectangular", "hexagonal"]
if topo not in topo_types:
raise ValueError("Invalid topo. Expected one of: %s" % topo_types)
self.topo = topo
self.init_grid()
self.dist_node()
# Neighborhood function
neighbor_types = ["gaussian", "bubble", "triangular"]
if neighbor not in neighbor_types:
raise ValueError("Invalid neighbor. Expected one of: %s" % neighbor_types)
self.neighbor_func = neighbor
# Distance function
dist_type = ["frobenius", "nuclear", "mahalanobis", "eros"]
if dist not in dist_type:
raise ValueError("Invalid dist. Expected one of: %s" % dist_type)
self.dist_func = dist
# Decay
decay_types = ["exponential", "linear"]
if decay not in decay_types:
raise ValueError("Invalid decay. Expected one of: %s" % decay_types)
self.decay_func = decay
# som()
self.epoch = None
self.alpha = None
self.sigma = None
self.initial_learn = None
self.initial_r = None
# find_bmu()
self.bmu = None
# plot
self.reconstruction_error = None
self.dist_normal = None
self.project = None
def init_weight(self):
self.net = np.random.rand(self.net_dim[0] * self.net_dim[1], self.nrow, self.ncol)
def init_grid(self):
"""
[row_pts, col_pts]
xdim x ydim rows (points)
[1,1]
[2,1]
[1,2]
[2,2]
2--------->
1--------->^
"""
self.pts = np.array(
np.meshgrid(
np.arange(self.net_dim[0]) + 1,
np.arange(self.net_dim[1]) + 1
)
).reshape(2, np.prod(self.net_dim)).T
if self.topo == "hexagonal":
self.pts[:, 0] = self.pts[:, 0] + .5 * (self.pts[:, 1] % 2)
self.pts[:, 1] = np.sqrt(3) / 2 * self.pts[:, 1]
def som(self, data, epoch = 100, init_rate = None, init_radius = None, keep_net = False):
"""
:param data: 3d array. processed data set for Online SOM Detector
:param epoch: epoch number
:param init_rate: initial learning rate
:param init_radius: initial radius of BMU neighborhood
:param keep_net: keep every weight matrix path?
"""
num_obs = data.shape[0]
obs_id = np.arange(num_obs)
chose_i = np.empty(1)
node_id = None
hci = None
self.epoch = epoch
if keep_net:
self.net_path = np.empty(
(self.epoch, self.net_dim[0] * self.net_dim[1], self.nrow, self.ncol)
)
# learning rate
if init_rate is None:
init_rate = .1
self.alpha = init_rate
self.initial_learn = init_rate
# radius of neighborhood
if init_radius is None:
init_radius = np.quantile(self.dci, q = 2 / 3, axis = None)
self.sigma = init_radius
self.initial_r = init_radius
# time constant (lambda)
rate_constant = epoch
radius_constant = epoch / np.log(self.sigma)
# distance between nodes
bmu_dist = self.dci[1, :]
rcst_err = np.empty(epoch)
for i in tqdm(range(epoch), desc = "epoch"):
chose_i = int(np.random.choice(obs_id, size = 1))
# BMU - self.bmu
self.find_bmu(data, chose_i)
# reconstruction error - sum of distances from BMU
rcst_err[i] = np.sum([np.square(self.dist_mat(data, j, self.bmu.astype(int))) for j in range(data.shape[0])])
bmu_dist = self.dci[self.bmu.astype(int), :].flatten()
# decay
self.sigma = self.decay(init_radius, i + 1, radius_constant)
self.alpha = self.decay(init_rate, i + 1, rate_constant)
# neighboring nodes (includes BMU)
neighbor_neuron = np.argwhere(bmu_dist <= self.sigma).flatten()
for k in tqdm(range(neighbor_neuron.shape[0]), desc = "updating"):
node_id = neighbor_neuron[k]
hci = self.neighborhood(bmu_dist[node_id], self.sigma)
# update codebook matrices of neighboring nodes
self.net[node_id, :, :] += \
self.alpha * hci * \
(data[chose_i, :, :] - self.net[node_id, :, :]).reshape((self.nrow, self.ncol))
if keep_net:
self.net_path[i, :, :, :] = self.net
self.reconstruction_error = pd.DataFrame({"Epoch": np.arange(self.epoch) + 1, "Reconstruction Error": rcst_err})
def find_bmu(self, data, index):
"""
:param data: Processed data set for SOM.
:param index: Randomly chosen observation id for input matrix among 3d tensor set.
"""
dist_code = np.asarray([self.dist_mat(data, index, j) for j in range(self.net.shape[0])])
self.bmu = np.argmin(dist_code)
def dist_mat(self, data, index, node):
"""
:param data: Processed data set for SOM.
:param index: Randomly chosen observation id for input matrix among 3d tensor set.
:param node: node index
:return: distance between input matrix observation and weight matrix of the node
"""
if self.dist_func == "frobenius":
return np.linalg.norm(data[index, :, :] - self.net[node, :, :], "fro")
elif self.dist_func == "nuclear":
return np.linalg.norm(data[index, :, :] - self.net[node, :, :], "nuc")
elif self.dist_func == "mahalanobis":
x = data[index, :, :] - self.net[node, :, :]
covmat = np.cov(x, rowvar = False)
# spectral decomposition sigma = udu.T
w, v = np.linalg.eigh(covmat)
# inverse = ud^-1u.T
w[w == 0] += .0001
covinv = v.dot(np.diag(1 / w)).dot(v.T)
ss = x.dot(covinv).dot(x.T)
return np.sqrt(np.trace(ss))
elif self.dist_func == "eros":
x = data[index, :, :] - self.net[node, :, :]
covmat = np.cov(x, rowvar = False)
# svd(covariance)
u, s, vh = randomized_svd(covmat, n_components = covmat.shape[1], n_iter = 1, random_state = None)
# normalize eigenvalue
w = s / s.sum()
# distance
ss = np.multiply(vh, w).dot(vh.T)
return np.sqrt(np.trace(ss))
def dist_node(self):
"""
:return: distance matrix of SOM neuron
"""
if self.topo == "hexagonal":
self.dci = distance.cdist(self.pts, self.pts, "euclidean")
elif self.topo == "rectangular":
self.dci = distance.cdist(self.pts, self.pts, "chebyshev")
def decay(self, init, time, time_constant):
"""
:param init: initial value
:param time: t
:param time_constant: lambda
:return: decaying value of alpha or sigma
"""
if self.decay_func == "exponential":
return init * np.exp(-time / time_constant)
elif self.decay_func == "linear":
return init * (1 - time / time_constant)
def neighborhood(self, node_distance, radius):
"""
:param node_distance: Distance between SOM neurons
:param radius: Radius of BMU neighborhood
:return: Neighborhood function hci
"""
if self.neighbor_func == "gaussian":
return np.exp(-node_distance ** 2 / (2 * (radius ** 2)))
elif self.neighbor_func == "bubble":
if node_distance <= radius:
return 1.0
else: | return 0.0 | random_line_split |
|
__init__.py | :
"""
Matrix SOM
Initialize weight matrix
For epoch <- 1 to N do
Choose input matrix observation randomly - i
For k <- 1 to n_node do
compute d(input matrix i, weight matrix k)
end
Best Matching Unit = winning node = node with the smallest distance
For k <- 1 to n_node do
update weight matrix
end
end
Update weight mi(t + 1) = mi(t) + ⍺(t) * hci(t) [x(t) - mi(t)]
Neighborhood function hci(t) = h(dist(rc, ri), t)
rc, ri: location vectors of node c and i
if Gaussian:
hci(t) = exp(-dist^2 / (2 * σ^2(t)))
Radius: σ(t) = σ_0 * exp(-t / ƛ)
Learning rate: ⍺(t) = ⍺_0 * exp(-t / ƛ)
"""
def __init__(
self, data, xdim, ydim, topo = "rectangular", neighbor = "gaussian",
dist = "frobenius", decay = "exponential", seed = None
):
"""
:param data: 3d array. processed data set for Online SOM Detector
:param xdim: Number of x-grid
:param ydim: Number of y-grid
:param topo: Topology of output space - rectangular or hexagonal
:param neighbor: Neighborhood function - gaussian, bubble, or triangular
:param dist: Distance function - frobenius, nuclear, mahalanobis (just form of mahalanobis), or
:param decay: decaying learning rate and radius - exponential or linear
:param seed: Random seed
"""
np.random.seed(seed = seed)
if xdim is None or ydim is None:
xdim = int(np.sqrt(5 * np.sqrt(data.shape[0])))
ydim = xdim
self.net_dim = np.array([xdim, ydim])
self.ncol = data.shape[2]
self.nrow = data.shape[1]
# Initialize codebook matrix
self.init_weight()
# Topology
topo_types = ["rectangular", "hexagonal"]
if topo not in topo_types:
raise ValueError("Invalid topo. Expected one of: %s" % topo_types)
self.topo = topo
self.init_grid()
self.dist_node()
# Neighborhood function
neighbor_types = ["gaussian", "bubble", "triangular"]
if neighbor not in neighbor_types:
raise ValueError("Invalid neighbor. Expected one of: %s" % neighbor_types)
self.neighbor_func = neighbor
# Distance function
dist_type = ["frobenius", "nuclear", "mahalanobis", "eros"]
if dist not in dist_type:
raise ValueError("Invalid dist. Expected one of: %s" % dist_type)
self.dist_func = dist
# Decay
decay_types = ["exponential", "linear"]
if decay not in decay_types:
raise ValueError("Invalid decay. Expected one of: %s" % decay_types)
self.decay_func = decay
# som()
self.epoch = None
self.alpha = None
self.sigma = None
self.initial_learn = None
self.initial_r = None
# find_bmu()
self.bmu = None
# plot
self.reconstruction_error = None
self.dist_normal = None
self.project = None
def init_weight(self):
self.net = np.random.rand(self.net_dim[0] * self.net_dim[1], self.nrow, self.ncol)
def init_grid(self):
"""
[row_pts, col_pts]
xdim x ydim rows (points)
[1,1]
[2,1]
[1,2]
[2,2]
2--------->
1--------->^
"""
self.pts = np.array(
np.meshgrid(
np.arange(self.net_dim[0]) + 1,
np.arange(self.net_dim[1]) + 1
)
).reshape(2, np.prod(self.net_dim)).T
if self.topo == "hexagonal":
self.pts[:, 0] = self.pts[:, 0] + .5 * (self.pts[:, 1] % 2)
self.pts[:, 1] = np.sqrt(3) / 2 * self.pts[:, 1]
def som(self, data, epoch = 100, init_rate = None, init_radius = None, keep_net = False):
"""
:param data: 3d array. processed data set for Online SOM Detector
:param epoch: epoch number
:param init_rate: initial learning rate
:param init_radius: initial radius of BMU neighborhood
:param keep_net: keep every weight matrix path?
"""
num_obs = data.shape[0]
obs_id = np.arange(num_obs)
chose_i = np.empty(1)
node_id = None
hci = None
self.epoch = epoch
if keep_net:
self.net_path = np.empty(
(self.epoch, self.net_dim[0] * self.net_dim[1], self.nrow, self.ncol)
)
# learning rate
if init_rate is None:
init_rate = .1
self.alpha = init_rate
self.initial_learn = init_rate
# radius of neighborhood
if init_radius is None:
init_radius = np.quantile(self.dci, q = 2 / 3, axis = None)
self.sigma = init_radius
self.initial_r = init_radius
# time constant (lambda)
rate_constant = epoch
radius_constant = epoch / np.log(self.sigma)
# distance between nodes
bmu_dist = self.dci[1, :]
rcst_err = np.empty(epoch)
for i in tqdm(range(epoch), desc = "epoch"):
chose_i = int(np.random.choice(obs_id, size = 1))
# BMU - self.bmu
self.find_bmu(data, chose_i)
# reconstruction error - sum of distances from BMU
rcst_err[i] = np.sum([np.square(self.dist_mat(data, j, self.bmu.astype(int))) for j in range(data.shape[0])])
bmu_dist = self.dci[self.bmu.astype(int), :].flatten()
# decay
self.sigma = self.decay(init_radius, i + 1, radius_constant)
self.alpha = self.decay(init_rate, i + 1, rate_constant)
# neighboring nodes (includes BMU)
neighbor_neuron = np.argwhere(bmu_dist <= self.sigma).flatten()
for k in tqdm(range(neighbor_neuron.shape[0]), desc = "updating"):
node_id = neighbor_neuron[k]
hci = self.neighborhood(bmu_dist[node_id], self.sigma)
# update codebook matrices of neighboring nodes
self.net[node_id, :, :] += \
self.alpha * hci * \
(data[chose_i, :, :] - self.net[node_id, :, :]).reshape((self.nrow, self.ncol))
if keep_net:
self.net_path[i, :, :, :] = self.net
self.reconstruction_error = pd.DataFrame({"Epoch": np.arange(self.epoch) + 1, "Reconstruction Error": rcst_err})
def find_bmu(self, data, index):
"""
:param data: Processed data set for SOM.
:param index: Randomly chosen observation id for input matrix among 3d tensor set.
"""
dist_code = np.asarray([self.dist_mat(data, index, j) for j in range(self.net.shape[0])])
self.bmu = np.argmin(dist_code)
def dist_mat(self, data, index, node):
"""
:param data: Processed data set for SOM.
:param index: Randomly chosen observation id for input matrix among 3d tensor set.
:param node: node index
:return: distance between input matrix observation and weight matrix of the node
"""
if self.dist_func == "frobenius":
return np.linalg.norm(data[index, :, :] - self.net[node, :, :], "fro")
elif self.dist_func == "nuclear":
return np.linalg.norm(data[index, :, :] - self.net[node, :, :], "nuc")
elif self.dist_func == "mahalanobis":
x = data[index, :, :] - self.net[node, :, :]
covmat = np.cov(x, rowvar = False)
# spectral decomposition sigma = udu.T
w, v = np.linalg.eigh(covmat)
# inverse = ud^-1u.T
w[w == 0] += .0001
covinv = v.dot(np.diag(1 / w)).dot(v.T)
ss = x.dot(covinv).dot(x.T)
return np.sqrt(np.trace(ss))
elif self.dist_func == "eros":
x = data[index, :, :] - self.net[node, :, :]
covmat = np.cov(x, rowvar = False)
# svd(covariance)
u | kohonen | identifier_name |
|
__init__.py | Learning rate: ⍺(t) = ⍺_0 * exp(-t / ƛ)
"""
def __init__(
self, data, xdim, ydim, topo = "rectangular", neighbor = "gaussian",
dist = "frobenius", decay = "exponential", seed = None
):
"""
:param data: 3d array. processed data set for Online SOM Detector
:param xdim: Number of x-grid
:param ydim: Number of y-grid
:param topo: Topology of output space - rectangular or hexagonal
:param neighbor: Neighborhood function - gaussian, bubble, or triangular
:param dist: Distance function - frobenius, nuclear, mahalanobis (just form of mahalanobis), or
:param decay: decaying learning rate and radius - exponential or linear
:param seed: Random seed
"""
np.random.seed(seed = seed)
if xdim is None or ydim is None:
xdim = int(np.sqrt(5 * np.sqrt(data.shape[0])))
ydim = xdim
self.net_dim = np.array([xdim, ydim])
self.ncol = data.shape[2]
self.nrow = data.shape[1]
# Initialize codebook matrix
self.init_weight()
# Topology
topo_types = ["rectangular", "hexagonal"]
if topo not in topo_types:
raise ValueError("Invalid topo. Expected one of: %s" % topo_types)
self.topo = topo
self.init_grid()
self.dist_node()
# Neighborhood function
neighbor_types = ["gaussian", "bubble", "triangular"]
if neighbor not in neighbor_types:
raise ValueError("Invalid neighbor. Expected one of: %s" % neighbor_types)
self.neighbor_func = neighbor
# Distance function
dist_type = ["frobenius", "nuclear", "mahalanobis", "eros"]
if dist not in dist_type:
raise ValueError("Invalid dist. Expected one of: %s" % dist_type)
self.dist_func = dist
# Decay
decay_types = ["exponential", "linear"]
if decay not in decay_types:
raise ValueError("Invalid decay. Expected one of: %s" % decay_types)
self.decay_func = decay
# som()
self.epoch = None
self.alpha = None
self.sigma = None
self.initial_learn = None
self.initial_r = None
# find_bmu()
self.bmu = None
# plot
self.reconstruction_error = None
self.dist_normal = None
self.project = None
def init_weight(self):
self.net = np.random.rand(self.net_dim[0] * self.net_dim[1], self.nrow, self.ncol)
def init_grid(self):
"""
[row_pts, col_pts]
xdim x ydim rows (points)
[1,1]
[2,1]
[1,2]
[2,2]
2--------->
1--------->^
"""
self.pts = np.array(
np.meshgrid(
np.arange(self.net_dim[0]) + 1,
np.arange(self.net_dim[1]) + 1
)
).reshape(2, np.prod(self.net_dim)).T
if self.topo == "hexagonal":
self.pts[:, 0] = self.pts[:, 0] + .5 * (self.pts[:, 1] % 2)
self.pts[:, 1] = np.sqrt(3) / 2 * self.pts[:, 1]
def som(self, data, epoch = 100, init_rate = None, init_radius = None, keep_net = False):
"""
:param data: 3d array. processed data set for Online SOM Detector
:param epoch: epoch number
:param init_rate: initial learning rate
:param init_radius: initial radius of BMU neighborhood
:param keep_net: keep every weight matrix path?
"""
num_obs = data.shape[0]
obs_id = np.arange(num_obs)
chose_i = np.empty(1)
node_id = None
hci = None
self.epoch = epoch
if keep_net:
self.net_path = np.empty(
(self.epoch, self.net_dim[0] * self.net_dim[1], self.nrow, self.ncol)
)
# learning rate
if init_rate is None:
init_rate = .1
self.alpha = init_rate
self.initial_learn = init_rate
# radius of neighborhood
if init_radius is None:
init_radius = np.quantile(self.dci, q = 2 / 3, axis = None)
self.sigma = init_radius
self.initial_r = init_radius
# time constant (lambda)
rate_constant = epoch
radius_constant = epoch / np.log(self.sigma)
# distance between nodes
bmu_dist = self.dci[1, :]
rcst_err = np.empty(epoch)
for i in tqdm(range(epoch), desc = "epoch"):
chose_i = int(np.random.choice(obs_id, size = 1))
# BMU - self.bmu
self.find_bmu(data, chose_i)
# reconstruction error - sum of distances from BMU
rcst_err[i] = np.sum([np.square(self.dist_mat(data, j, self.bmu.astype(int))) for j in range(data.shape[0])])
bmu_dist = self.dci[self.bmu.astype(int), :].flatten()
# decay
self.sigma = self.decay(init_radius, i + 1, radius_constant)
self.alpha = self.decay(init_rate, i + 1, rate_constant)
# neighboring nodes (includes BMU)
neighbor_neuron = np.argwhere(bmu_dist <= self.sigma).flatten()
for k in tqdm(range(neighbor_neuron.shape[0]), desc = "updating"):
node_id = neighbor_neuron[k]
hci = self.neighborhood(bmu_dist[node_id], self.sigma)
# update codebook matrices of neighboring nodes
self.net[node_id, :, :] += \
self.alpha * hci * \
(data[chose_i, :, :] - self.net[node_id, :, :]).reshape((self.nrow, self.ncol))
if keep_net:
self.net_path[i, :, :, :] = self.net
self.reconstruction_error = pd.DataFrame({"Epoch": np.arange(self.epoch) + 1, "Reconstruction Error": rcst_err})
def find_bmu(self, data, index):
"""
:param data: Processed data set for SOM.
:param index: Randomly chosen observation id for input matrix among 3d tensor set.
"""
dist_code = np.asarray([self.dist_mat(data, index, j) for j in range(self.net.shape[0])])
self.bmu = np.argmin(dist_code)
def dist_mat(self, data, index, node):
"""
:param data: Processed data set for SOM.
:param index: Randomly chosen observation id for input matrix among 3d tensor set.
:param node: node index
:return: distance between input matrix observation and weight matrix of the node
"""
if self.dist_func == "frobenius":
return np.linalg.norm(data[index, :, :] - self.net[node, :, :], "fro")
elif self.dist_func == "nuclear":
return np.linalg.norm(data[index, :, :] - self.net[node, :, :], "nuc")
elif self.dist_func == "mahalanobis":
x = data[index, :, :] - self.net[node, :, :]
covmat = np.cov(x, rowvar = False)
# spectral decomposition sigma = udu.T
w, v = np.linalg.eigh(covmat)
# inverse = ud^-1u.T
w[w == 0] += .0001
covinv = v.dot(np.diag(1 / w)).dot(v.T)
ss = x.dot(covinv).dot(x.T)
return np.sqrt(np.trace(ss))
elif self.dist_func == "eros":
x = data[index, :, :] - self.net[node, :, :]
covmat = np.cov(x, rowvar = False)
# svd(covariance)
u, s, vh = | """
Matrix SOM
Initialize weight matrix
For epoch <- 1 to N do
Choose input matrix observation randomly - i
For k <- 1 to n_node do
compute d(input matrix i, weight matrix k)
end
Best Matching Unit = winning node = node with the smallest distance
For k <- 1 to n_node do
update weight matrix
end
end
Update weight mi(t + 1) = mi(t) + ⍺(t) * hci(t) [x(t) - mi(t)]
Neighborhood function hci(t) = h(dist(rc, ri), t)
rc, ri: location vectors of node c and i
if Gaussian:
hci(t) = exp(-dist^2 / (2 * σ^2(t)))
Radius: σ(t) = σ_0 * exp(-t / ƛ) | identifier_body |
|
local_audio_visualizer.js | window.requestAnimationFrame = requestAnimationFrame;
})();
window.onload = function () {
var element = document.getElementById("waves");
dropAndLoad(element, init, "ArrayBuffer");
};
// Reusable dropAndLoad function: it reads a local file dropped on a
// `dropElement` in the DOM in the specified `readFormat`
// (In this case, we want an arrayBuffer)
function dropAndLoad(dropElement, callback, readFormat) {
var readFormat = readFormat || "DataUrl";
dropElement.addEventListener(
"dragover",
function (e) {
e.stopPropagation();
e.preventDefault();
e.dataTransfer.dropEffect = "copy";
},
false
);
dropElement.addEventListener(
"drop",
function (e) {
e.stopPropagation();
e.preventDefault();
loadFile(e.dataTransfer.files[0]);
},
false
);
function loadFile(files) {
var file = files;
var reader = new FileReader();
reader.onload = function (e) {
callback(e.target.result);
};
reader["readAs" + readFormat](file);
}
}
var dogBarkingBuffer = null;
// Fix up prefixing
window.AudioContext = window.AudioContext || window.webkitAudioContext;
var context = new AudioContext();
function loadDogSound(url) {
var request = new XMLHttpRequest();
request.open("GET", url, true);
request.responseType = "arraybuffer";
// Decode asynchronously
request.onload = function () {
init(request.response);
/*context.decodeAudioData(request.response, function(buffer) {
dogBarkingBuffer = buffer;
// playSound(buffer);
init(buffer);
});*/
};
request.send();
}
function playSound(buffer) {
var source = context.createBufferSource(); // creates a sound source
source.buffer = buffer; // tell the source which sound to play
source.connect(context.destination); // connect the source to the context's destination (the speakers)
source.start(0); // play the source now
// note: on older systems, may have to use deprecated noteOn(time);
}
// Once the file is loaded, we start getting our hands dirty.
function init(arrayBuffer) {
// document.getElementById('instructions').innerHTML = 'Loading ...'
// Create a new `audioContext` and its `analyser`
window.audioCtx = new AudioContext();
context = audioCtx;
window.analyser = audioCtx.createAnalyser();
// If a sound is still playing, stop it.
// if (window.source)
// source.noteOff(0)
// Decode the data in our array into an audio buffer
audioCtx.decodeAudioData(arrayBuffer, function (buffer) {
// Use the audio buffer with as our audio source
window.source = audioCtx.createBufferSource();
source.buffer = buffer;
// Connect to the analyser ...
source.connect(analyser);
// and back to the destination, to play the sound after the analysis.
analyser.connect(audioCtx.destination);
// Start playing the buffer.
source.start(0);
// Initialize a visualizer object
var viz = new simpleViz();
// Finally, initialize the visualizer.
new visualizer(viz["update"], analyser);
// document.getElementById('instructions').innerHTML = ''
});
}
// The visualizer object.
// Calls the `visualization` function every time a new frame
// is available.
// Is passed an `analyser` (audioContext analyser).
function visualizer(visualization, analyser) {
var self = this;
this.visualization = visualization;
var last = Date.now();
var loop = function () {
var dt = Date.now() - last;
// we get the current byteFreq data from our analyser
var byteFreq = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteFrequencyData(byteFreq);
last = Date.now();
// We might want to use a delta time (`dt`) too for our visualization.
self.visualization(byteFreq, dt);
requestAnimationFrame(loop);
};
requestAnimationFrame(loop);
}
// A simple visualization. Its update function illustrates how to use
// the byte frequency data from an audioContext analyser.
function simpleViz(canvas) {
var self = this;
this.canvas = document.getElementById("canvas");
this.ctx = this.canvas.getContext("2d");
this.copyCtx = document.getElementById("canvas-copy").getContext("2d");
this.ctx.fillStyle = "#fff";
this.barWidth = 4;
this.barGap = 2;
// We get the total number of bars to display
this.bars = Math.floor(this.canvas.width / (this.barWidth + this.barGap));
// This function is launched for each frame, together with the byte frequency data.
this.update = function (byteFreq) {
self.ctx.clearRect(0, 0, self.canvas.width, self.canvas.height);
// We take an element from the byteFreq array for each of the bars.
// Let's pretend our byteFreq contains 20 elements, and we have five bars...
var step = Math.floor(byteFreq.length / self.bars);
// `||||||||||||||||||||` elements
// `| | | | | ` elements we'll use for our bars
for (var i = 0; i < self.bars; i++) {
// Draw each bar
var barHeight = byteFreq[i * step];
self.ctx.fillRect(
i * (self.barWidth + self.barGap),
self.canvas.height - barHeight,
self.barWidth,
barHeight
);
self.copyCtx.clearRect(0, 0, self.canvas.width, self.canvas.height);
self.copyCtx.drawImage(self.canvas, 0, 0);
}
};
}
gainNode = null;
source = null;
function play_sound() {
if (!context.createGain) context.createGain = context.createGainNode;
gainNode = context.createGain();
var source = context.createBufferSource();
source.buffer = BUFFERS.techno;
// Connect source to a gain node
source.connect(gainNode);
// Connect gain node to destination
gainNode.connect(context.destination);
// Start playback in a loop
source.loop = true;
if (!source.start) source.start = source.noteOn;
source.start(0);
source = source;
}
function changeVolume() {
var element = document.getElementById("change_vol");
var volume = element.value;
var fraction = parseInt(element.value) / parseInt(element.max);
gainNode.gain.value = fraction * fraction;
}
function stop_play() {
console.log(source);
if (!source.stop) source.stop = source.noteOff;
source.stop(0);
}
var url =
"https://file-examples-com.github.io/uploads/2017/11/file_example_MP3_700KB.mp3";
playerElement = document.querySelector("#player");
function Player(url) {
this.ac = new (window.AudioContext || webkitAudioContext)();
this.url = url;
this.mute = false;
// this.el = el;
this.button = document.getElementById("play_button");
this.volume_btn = document.getElementById("change_vol");
this.mute_btn = document.getElementById("vol_img");
this.rewind = document.getElementById("rewind");
this.rewind30 = document.getElementById("rewind30");
this.forward = document.getElementById("forward");
this.forward30 = document.getElementById("forward30");
this.speed1xB = document.getElementById("speed1x");
this.speed2xB = document.getElementById("speed2x");
this.left_duration = document.getElementById("left-duration");
this.track = document.getElementById("audio-overlay");
this.progress = document.getElementById("seekbar");
// console.log(this.button);
// this.scrubber = el.querySelector('.scrubber');
// this.message = el.querySelector('.message');
// this.message.innerHTML = 'Loading';
this.bindEvents();
this.fetch();
}
Player.prototype.bindEvents = function () {
this.button.addEventListener("click", this.toggle.bind(this));
this.volume_btn.addEventListener("change", this.changeVolume.bind(this));
this.mute_btn.addEventListener("click", this.muteSound.bind(this));
this.rewind.addEventListener("click", this.rewindSound.bind(this));
this.rewind30.addEventListener("click", this.rewind30Sound.bind(this));
this.forward.addEventListener("click", this.forwardSound.bind(this));
this.forward30.addEventListener("click", this.forward30Sound.bind(this));
this.speed1xB.addEventListener("click", this.speed1x.bind(this));
this.speed2xB.addEventListener("click", this.speed2x.bind(this));
// this.scrubber.addEventListener('mousedown', this.onMouseDown.bind(this));
// window.addEventListener('mousemove', this.onDrag.bind(this));
// window.addEventListener('mouseup', this.onMouseUp.bind(this));
};
Player.prototype.fetch = function () {
var xhr = new XMLHttpRequest();
xhr.open("GET", this.url, true);
xhr.responseType = "arraybuffer";
xhr.onload = function () {
this.decode(xhr.response);
// init2(xhr.response);
}.bind(this);
xhr.send();
};
Player.prototype.decode = function (arrayBuffer) {
this.ac.decodeAudioData(
arrayBuffer,
function (audioBuffer) {
// this.message.innerHTML = '';
this.buffer = audioBuffer;
this.play();
this.draw();
}.bind(this)
);
};
Player.prototype.connect = function () {
if (this.playing) {
this.pause | random_line_split |
||
local_audio_visualizer.js | (0); // play the source now
// note: on older systems, may have to use deprecated noteOn(time);
}
// Once the file is loaded, we start getting our hands dirty.
function init(arrayBuffer) {
// document.getElementById('instructions').innerHTML = 'Loading ...'
// Create a new `audioContext` and its `analyser`
window.audioCtx = new AudioContext();
context = audioCtx;
window.analyser = audioCtx.createAnalyser();
// If a sound is still playing, stop it.
// if (window.source)
// source.noteOff(0)
// Decode the data in our array into an audio buffer
audioCtx.decodeAudioData(arrayBuffer, function (buffer) {
// Use the audio buffer with as our audio source
window.source = audioCtx.createBufferSource();
source.buffer = buffer;
// Connect to the analyser ...
source.connect(analyser);
// and back to the destination, to play the sound after the analysis.
analyser.connect(audioCtx.destination);
// Start playing the buffer.
source.start(0);
// Initialize a visualizer object
var viz = new simpleViz();
// Finally, initialize the visualizer.
new visualizer(viz["update"], analyser);
// document.getElementById('instructions').innerHTML = ''
});
}
// The visualizer object.
// Calls the `visualization` function every time a new frame
// is available.
// Is passed an `analyser` (audioContext analyser).
function visualizer(visualization, analyser) {
var self = this;
this.visualization = visualization;
var last = Date.now();
var loop = function () {
var dt = Date.now() - last;
// we get the current byteFreq data from our analyser
var byteFreq = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteFrequencyData(byteFreq);
last = Date.now();
// We might want to use a delta time (`dt`) too for our visualization.
self.visualization(byteFreq, dt);
requestAnimationFrame(loop);
};
requestAnimationFrame(loop);
}
// A simple visualization. Its update function illustrates how to use
// the byte frequency data from an audioContext analyser.
function simpleViz(canvas) {
var self = this;
this.canvas = document.getElementById("canvas");
this.ctx = this.canvas.getContext("2d");
this.copyCtx = document.getElementById("canvas-copy").getContext("2d");
this.ctx.fillStyle = "#fff";
this.barWidth = 4;
this.barGap = 2;
// We get the total number of bars to display
this.bars = Math.floor(this.canvas.width / (this.barWidth + this.barGap));
// This function is launched for each frame, together with the byte frequency data.
this.update = function (byteFreq) {
self.ctx.clearRect(0, 0, self.canvas.width, self.canvas.height);
// We take an element from the byteFreq array for each of the bars.
// Let's pretend our byteFreq contains 20 elements, and we have five bars...
var step = Math.floor(byteFreq.length / self.bars);
// `||||||||||||||||||||` elements
// `| | | | | ` elements we'll use for our bars
for (var i = 0; i < self.bars; i++) {
// Draw each bar
var barHeight = byteFreq[i * step];
self.ctx.fillRect(
i * (self.barWidth + self.barGap),
self.canvas.height - barHeight,
self.barWidth,
barHeight
);
self.copyCtx.clearRect(0, 0, self.canvas.width, self.canvas.height);
self.copyCtx.drawImage(self.canvas, 0, 0);
}
};
}
gainNode = null;
source = null;
function play_sound() {
if (!context.createGain) context.createGain = context.createGainNode;
gainNode = context.createGain();
var source = context.createBufferSource();
source.buffer = BUFFERS.techno;
// Connect source to a gain node
source.connect(gainNode);
// Connect gain node to destination
gainNode.connect(context.destination);
// Start playback in a loop
source.loop = true;
if (!source.start) source.start = source.noteOn;
source.start(0);
source = source;
}
function changeVolume() {
var element = document.getElementById("change_vol");
var volume = element.value;
var fraction = parseInt(element.value) / parseInt(element.max);
gainNode.gain.value = fraction * fraction;
}
function stop_play() |
var url =
"https://file-examples-com.github.io/uploads/2017/11/file_example_MP3_700KB.mp3";
playerElement = document.querySelector("#player");
function Player(url) {
this.ac = new (window.AudioContext || webkitAudioContext)();
this.url = url;
this.mute = false;
// this.el = el;
this.button = document.getElementById("play_button");
this.volume_btn = document.getElementById("change_vol");
this.mute_btn = document.getElementById("vol_img");
this.rewind = document.getElementById("rewind");
this.rewind30 = document.getElementById("rewind30");
this.forward = document.getElementById("forward");
this.forward30 = document.getElementById("forward30");
this.speed1xB = document.getElementById("speed1x");
this.speed2xB = document.getElementById("speed2x");
this.left_duration = document.getElementById("left-duration");
this.track = document.getElementById("audio-overlay");
this.progress = document.getElementById("seekbar");
// console.log(this.button);
// this.scrubber = el.querySelector('.scrubber');
// this.message = el.querySelector('.message');
// this.message.innerHTML = 'Loading';
this.bindEvents();
this.fetch();
}
Player.prototype.bindEvents = function () {
this.button.addEventListener("click", this.toggle.bind(this));
this.volume_btn.addEventListener("change", this.changeVolume.bind(this));
this.mute_btn.addEventListener("click", this.muteSound.bind(this));
this.rewind.addEventListener("click", this.rewindSound.bind(this));
this.rewind30.addEventListener("click", this.rewind30Sound.bind(this));
this.forward.addEventListener("click", this.forwardSound.bind(this));
this.forward30.addEventListener("click", this.forward30Sound.bind(this));
this.speed1xB.addEventListener("click", this.speed1x.bind(this));
this.speed2xB.addEventListener("click", this.speed2x.bind(this));
// this.scrubber.addEventListener('mousedown', this.onMouseDown.bind(this));
// window.addEventListener('mousemove', this.onDrag.bind(this));
// window.addEventListener('mouseup', this.onMouseUp.bind(this));
};
Player.prototype.fetch = function () {
var xhr = new XMLHttpRequest();
xhr.open("GET", this.url, true);
xhr.responseType = "arraybuffer";
xhr.onload = function () {
this.decode(xhr.response);
// init2(xhr.response);
}.bind(this);
xhr.send();
};
Player.prototype.decode = function (arrayBuffer) {
this.ac.decodeAudioData(
arrayBuffer,
function (audioBuffer) {
// this.message.innerHTML = '';
this.buffer = audioBuffer;
this.play();
this.draw();
}.bind(this)
);
};
Player.prototype.connect = function () {
if (this.playing) {
this.pause();
}
this.source = this.ac.createBufferSource();
this.source.buffer = this.buffer;
this.gainNode = this.ac.createGain();
// this.source.connect();
this.analyser = this.ac.createAnalyser();
this.analyser.smoothingTimeConstant = 0.3;
this.analyser.fftSize = 1024;
// Connect to the analyser ...
this.source.connect(this.analyser);
this.gainNode.connect(this.ac.destination);
// this.source.connect(this.gainNode);
this.gainNode.gain.value = 0.5; // 10 %
// and back to the destination, to play the sound after the analysis.
this.source.connect(this.gainNode);
// this.source.connect(this.ac.destination);
};
Player.prototype.play = function (position) {
this.connect();
this.position = typeof position === "number" ? position : this.position || 0;
this.startTime = this.ac.currentTime - (this.position || 0);
this.source.start(this.ac.currentTime, this.position);
// Initialize a visualizer object
var viz = new simpleViz();
// Finally, initialize the visualizer.
new visualizer(viz["update"], this.analyser);
this.playing = true;
document.getElementById("play_button").src = "images/pause_big.png";
console.log("duration = " + this.buffer.duration);
document.getElementById("duration").innerHTML = formatTime(
this.buffer.duration
);
};
Player.prototype.changeVolume = function (element) {
element = document.getElementById("change_vol");
var volume = element.value;
console.log(volume);
var fraction = parseInt(element.value) / parseInt(element.max);
// console.log(fraction);
// Let's use an x*x curve (x-squared) since simple linear (x) does not
// sound as good.
this.gainNode.gain.value = volume; //fraction * fraction;
console | {
console.log(source);
if (!source.stop) source.stop = source.noteOff;
source.stop(0);
} | identifier_body |
local_audio_visualizer.js | console.log(source);
if (!source.stop) source.stop = source.noteOff;
source.stop(0);
}
var url =
"https://file-examples-com.github.io/uploads/2017/11/file_example_MP3_700KB.mp3";
playerElement = document.querySelector("#player");
function Player(url) {
this.ac = new (window.AudioContext || webkitAudioContext)();
this.url = url;
this.mute = false;
// this.el = el;
this.button = document.getElementById("play_button");
this.volume_btn = document.getElementById("change_vol");
this.mute_btn = document.getElementById("vol_img");
this.rewind = document.getElementById("rewind");
this.rewind30 = document.getElementById("rewind30");
this.forward = document.getElementById("forward");
this.forward30 = document.getElementById("forward30");
this.speed1xB = document.getElementById("speed1x");
this.speed2xB = document.getElementById("speed2x");
this.left_duration = document.getElementById("left-duration");
this.track = document.getElementById("audio-overlay");
this.progress = document.getElementById("seekbar");
// console.log(this.button);
// this.scrubber = el.querySelector('.scrubber');
// this.message = el.querySelector('.message');
// this.message.innerHTML = 'Loading';
this.bindEvents();
this.fetch();
}
Player.prototype.bindEvents = function () {
this.button.addEventListener("click", this.toggle.bind(this));
this.volume_btn.addEventListener("change", this.changeVolume.bind(this));
this.mute_btn.addEventListener("click", this.muteSound.bind(this));
this.rewind.addEventListener("click", this.rewindSound.bind(this));
this.rewind30.addEventListener("click", this.rewind30Sound.bind(this));
this.forward.addEventListener("click", this.forwardSound.bind(this));
this.forward30.addEventListener("click", this.forward30Sound.bind(this));
this.speed1xB.addEventListener("click", this.speed1x.bind(this));
this.speed2xB.addEventListener("click", this.speed2x.bind(this));
// this.scrubber.addEventListener('mousedown', this.onMouseDown.bind(this));
// window.addEventListener('mousemove', this.onDrag.bind(this));
// window.addEventListener('mouseup', this.onMouseUp.bind(this));
};
Player.prototype.fetch = function () {
var xhr = new XMLHttpRequest();
xhr.open("GET", this.url, true);
xhr.responseType = "arraybuffer";
xhr.onload = function () {
this.decode(xhr.response);
// init2(xhr.response);
}.bind(this);
xhr.send();
};
Player.prototype.decode = function (arrayBuffer) {
this.ac.decodeAudioData(
arrayBuffer,
function (audioBuffer) {
// this.message.innerHTML = '';
this.buffer = audioBuffer;
this.play();
this.draw();
}.bind(this)
);
};
Player.prototype.connect = function () {
if (this.playing) {
this.pause();
}
this.source = this.ac.createBufferSource();
this.source.buffer = this.buffer;
this.gainNode = this.ac.createGain();
// this.source.connect();
this.analyser = this.ac.createAnalyser();
this.analyser.smoothingTimeConstant = 0.3;
this.analyser.fftSize = 1024;
// Connect to the analyser ...
this.source.connect(this.analyser);
this.gainNode.connect(this.ac.destination);
// this.source.connect(this.gainNode);
this.gainNode.gain.value = 0.5; // 10 %
// and back to the destination, to play the sound after the analysis.
this.source.connect(this.gainNode);
// this.source.connect(this.ac.destination);
};
Player.prototype.play = function (position) {
this.connect();
this.position = typeof position === "number" ? position : this.position || 0;
this.startTime = this.ac.currentTime - (this.position || 0);
this.source.start(this.ac.currentTime, this.position);
// Initialize a visualizer object
var viz = new simpleViz();
// Finally, initialize the visualizer.
new visualizer(viz["update"], this.analyser);
this.playing = true;
document.getElementById("play_button").src = "images/pause_big.png";
console.log("duration = " + this.buffer.duration);
document.getElementById("duration").innerHTML = formatTime(
this.buffer.duration
);
};
Player.prototype.changeVolume = function (element) {
element = document.getElementById("change_vol");
var volume = element.value;
console.log(volume);
var fraction = parseInt(element.value) / parseInt(element.max);
// console.log(fraction);
// Let's use an x*x curve (x-squared) since simple linear (x) does not
// sound as good.
this.gainNode.gain.value = volume; //fraction * fraction;
console.log(this.gainNode);
};
Player.prototype.muteSound = function (element) {
if (!this.mute) {
this.gainNode.gain.value = 0;
document.getElementById("vol_img").src = "images/sound-mute.png";
this.mute = true;
} else {
this.mute = false;
document.getElementById("vol_img").src = "images/sound.png";
var aelement = document.getElementById("change_vol");
var volume = aelement.value;
console.log(volume);
this.gainNode.gain.value = volume; //fraction * fraction;
// console.log(this.gainNode);
}
};
Player.prototype.pause = function () {
if (this.source) {
this.source.stop(0);
this.source = null;
this.position = this.ac.currentTime - this.startTime;
this.playing = false;
document.getElementById("play_button").src = "images/play.png";
}
};
Player.prototype.seek = function (time) {
if (this.playing) {
this.play(time);
} else {
this.position = time;
}
};
Player.prototype.updatePosition = function () {
this.position = this.playing
? this.ac.currentTime - this.startTime
: this.position;
console.log(this.position);
if (this.position >= this.buffer.duration) {
this.position = this.buffer.duration;
this.pause();
}
var baki_time = this.buffer.duration - this.position;
console.log("==" + baki_time);
this.left_duration.innerHTML = formatTime(baki_time);
document.getElementById("duration").innerHTML = formatTime(this.position);
return this.position;
};
Player.prototype.toggle = function () {
if (!this.playing) {
this.play();
document.getElementById("play_button").src = "images/pause_big.png";
} else {
this.pause();
document.getElementById("play_button").src = "images/play.png";
}
};
Player.prototype.rewindSound = function () {
this.position = 0;
this.startTime = this.ac.currentTime - (this.position || 0);
this.source.start(this.ac.currentTime, this.position);
};
Player.prototype.rewind30Sound = function () {
this.position = typeof position === "number" ? position : this.position || 0;
if (this.buffer.duration > 30 && this.position > 30)
this.position = this.position - 30;
else this.position = 0;
this.source.stop(this.ac.currentTime);
this.source.disconnect();
this.play(this.position);
};
Player.prototype.forwardSound = function () {
this.position = this.buffer.duration;
this.startTime = this.ac.currentTime - (this.position || 0);
this.source.start(this.ac.currentTime, this.position);
};
Player.prototype.forward30Sound = function () {
this.position = typeof position === "number" ? position : this.position || 0;
console.log(this.buffer.duration + "buffer");
if (this.buffer.duration > 30) this.position = this.position + 30;
else this.position = 0;
this.source.stop(this.ac.currentTime);
this.source.disconnect();
this.play(this.position);
};
Player.prototype.speed1x = function () {
this.source.playbackRate.value = 1.0;
};
Player.prototype.speed2x = function () {
this.source.playbackRate.value = 2.0;
};
Player.prototype.onMouseDown = function (e) {
this.dragging = true;
this.startX = e.pageX;
this.startLeft = parseInt(this.scrubber.style.left || 0, 10);
};
Player.prototype.onDrag = function (e) {
/*var width, position;
if ( !this.dragging ) {
return;
}
width = this.track.offsetWidth;
position = this.startLeft + ( e.pageX - this.startX );
position = Math.max(Math.min(width, position), 0);
this.scrubber.style.left = position + 'px';*/
};
Player.prototype.onMouseUp = function () {
/*var width, left, time;
if ( this.dragging ) {
width = this.track.offsetWidth;
left = parseInt(this.scrubber.style.left || 0, 10);
time = left / width * this.buffer.duration;
this.seek(time);
this.dragging = false;
}*/
};
Player.prototype.draw = function () {
var progress = this.updatePosition() / this.buffer.duration;
// width = this.progress.value;
if (this.playing) {
this.button.classList.add("fa-pause");
this.button.classList.remove("fa-play");
} else | {
this.button.classList.add("fa-play");
this.button.classList.remove("fa-pause");
} | conditional_block |
|
local_audio_visualizer.js | .start(0); // play the source now
// note: on older systems, may have to use deprecated noteOn(time);
}
// Once the file is loaded, we start getting our hands dirty.
function init(arrayBuffer) {
// document.getElementById('instructions').innerHTML = 'Loading ...'
// Create a new `audioContext` and its `analyser`
window.audioCtx = new AudioContext();
context = audioCtx;
window.analyser = audioCtx.createAnalyser();
// If a sound is still playing, stop it.
// if (window.source)
// source.noteOff(0)
// Decode the data in our array into an audio buffer
audioCtx.decodeAudioData(arrayBuffer, function (buffer) {
// Use the audio buffer with as our audio source
window.source = audioCtx.createBufferSource();
source.buffer = buffer;
// Connect to the analyser ...
source.connect(analyser);
// and back to the destination, to play the sound after the analysis.
analyser.connect(audioCtx.destination);
// Start playing the buffer.
source.start(0);
// Initialize a visualizer object
var viz = new simpleViz();
// Finally, initialize the visualizer.
new visualizer(viz["update"], analyser);
// document.getElementById('instructions').innerHTML = ''
});
}
// The visualizer object.
// Calls the `visualization` function every time a new frame
// is available.
// Is passed an `analyser` (audioContext analyser).
function visualizer(visualization, analyser) {
var self = this;
this.visualization = visualization;
var last = Date.now();
var loop = function () {
var dt = Date.now() - last;
// we get the current byteFreq data from our analyser
var byteFreq = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteFrequencyData(byteFreq);
last = Date.now();
// We might want to use a delta time (`dt`) too for our visualization.
self.visualization(byteFreq, dt);
requestAnimationFrame(loop);
};
requestAnimationFrame(loop);
}
// A simple visualization. Its update function illustrates how to use
// the byte frequency data from an audioContext analyser.
function simpleViz(canvas) {
var self = this;
this.canvas = document.getElementById("canvas");
this.ctx = this.canvas.getContext("2d");
this.copyCtx = document.getElementById("canvas-copy").getContext("2d");
this.ctx.fillStyle = "#fff";
this.barWidth = 4;
this.barGap = 2;
// We get the total number of bars to display
this.bars = Math.floor(this.canvas.width / (this.barWidth + this.barGap));
// This function is launched for each frame, together with the byte frequency data.
this.update = function (byteFreq) {
self.ctx.clearRect(0, 0, self.canvas.width, self.canvas.height);
// We take an element from the byteFreq array for each of the bars.
// Let's pretend our byteFreq contains 20 elements, and we have five bars...
var step = Math.floor(byteFreq.length / self.bars);
// `||||||||||||||||||||` elements
// `| | | | | ` elements we'll use for our bars
for (var i = 0; i < self.bars; i++) {
// Draw each bar
var barHeight = byteFreq[i * step];
self.ctx.fillRect(
i * (self.barWidth + self.barGap),
self.canvas.height - barHeight,
self.barWidth,
barHeight
);
self.copyCtx.clearRect(0, 0, self.canvas.width, self.canvas.height);
self.copyCtx.drawImage(self.canvas, 0, 0);
}
};
}
gainNode = null;
source = null;
function play_sound() {
if (!context.createGain) context.createGain = context.createGainNode;
gainNode = context.createGain();
var source = context.createBufferSource();
source.buffer = BUFFERS.techno;
// Connect source to a gain node
source.connect(gainNode);
// Connect gain node to destination
gainNode.connect(context.destination);
// Start playback in a loop
source.loop = true;
if (!source.start) source.start = source.noteOn;
source.start(0);
source = source;
}
function changeVolume() {
var element = document.getElementById("change_vol");
var volume = element.value;
var fraction = parseInt(element.value) / parseInt(element.max);
gainNode.gain.value = fraction * fraction;
}
function stop_play() {
console.log(source);
if (!source.stop) source.stop = source.noteOff;
source.stop(0);
}
var url =
"https://file-examples-com.github.io/uploads/2017/11/file_example_MP3_700KB.mp3";
playerElement = document.querySelector("#player");
function | (url) {
this.ac = new (window.AudioContext || webkitAudioContext)();
this.url = url;
this.mute = false;
// this.el = el;
this.button = document.getElementById("play_button");
this.volume_btn = document.getElementById("change_vol");
this.mute_btn = document.getElementById("vol_img");
this.rewind = document.getElementById("rewind");
this.rewind30 = document.getElementById("rewind30");
this.forward = document.getElementById("forward");
this.forward30 = document.getElementById("forward30");
this.speed1xB = document.getElementById("speed1x");
this.speed2xB = document.getElementById("speed2x");
this.left_duration = document.getElementById("left-duration");
this.track = document.getElementById("audio-overlay");
this.progress = document.getElementById("seekbar");
// console.log(this.button);
// this.scrubber = el.querySelector('.scrubber');
// this.message = el.querySelector('.message');
// this.message.innerHTML = 'Loading';
this.bindEvents();
this.fetch();
}
Player.prototype.bindEvents = function () {
this.button.addEventListener("click", this.toggle.bind(this));
this.volume_btn.addEventListener("change", this.changeVolume.bind(this));
this.mute_btn.addEventListener("click", this.muteSound.bind(this));
this.rewind.addEventListener("click", this.rewindSound.bind(this));
this.rewind30.addEventListener("click", this.rewind30Sound.bind(this));
this.forward.addEventListener("click", this.forwardSound.bind(this));
this.forward30.addEventListener("click", this.forward30Sound.bind(this));
this.speed1xB.addEventListener("click", this.speed1x.bind(this));
this.speed2xB.addEventListener("click", this.speed2x.bind(this));
// this.scrubber.addEventListener('mousedown', this.onMouseDown.bind(this));
// window.addEventListener('mousemove', this.onDrag.bind(this));
// window.addEventListener('mouseup', this.onMouseUp.bind(this));
};
Player.prototype.fetch = function () {
var xhr = new XMLHttpRequest();
xhr.open("GET", this.url, true);
xhr.responseType = "arraybuffer";
xhr.onload = function () {
this.decode(xhr.response);
// init2(xhr.response);
}.bind(this);
xhr.send();
};
Player.prototype.decode = function (arrayBuffer) {
this.ac.decodeAudioData(
arrayBuffer,
function (audioBuffer) {
// this.message.innerHTML = '';
this.buffer = audioBuffer;
this.play();
this.draw();
}.bind(this)
);
};
Player.prototype.connect = function () {
if (this.playing) {
this.pause();
}
this.source = this.ac.createBufferSource();
this.source.buffer = this.buffer;
this.gainNode = this.ac.createGain();
// this.source.connect();
this.analyser = this.ac.createAnalyser();
this.analyser.smoothingTimeConstant = 0.3;
this.analyser.fftSize = 1024;
// Connect to the analyser ...
this.source.connect(this.analyser);
this.gainNode.connect(this.ac.destination);
// this.source.connect(this.gainNode);
this.gainNode.gain.value = 0.5; // 10 %
// and back to the destination, to play the sound after the analysis.
this.source.connect(this.gainNode);
// this.source.connect(this.ac.destination);
};
Player.prototype.play = function (position) {
this.connect();
this.position = typeof position === "number" ? position : this.position || 0;
this.startTime = this.ac.currentTime - (this.position || 0);
this.source.start(this.ac.currentTime, this.position);
// Initialize a visualizer object
var viz = new simpleViz();
// Finally, initialize the visualizer.
new visualizer(viz["update"], this.analyser);
this.playing = true;
document.getElementById("play_button").src = "images/pause_big.png";
console.log("duration = " + this.buffer.duration);
document.getElementById("duration").innerHTML = formatTime(
this.buffer.duration
);
};
Player.prototype.changeVolume = function (element) {
element = document.getElementById("change_vol");
var volume = element.value;
console.log(volume);
var fraction = parseInt(element.value) / parseInt(element.max);
// console.log(fraction);
// Let's use an x*x curve (x-squared) since simple linear (x) does not
// sound as good.
this.gainNode.gain.value = volume; //fraction * fraction;
console | Player | identifier_name |
parser.rs | "-g" | "-G" | "-h" | "-k" | "-L" | "-N"
| "-O" | "-p" | "-r" | "-s" | "-S" | "-t" | "-u" | "-w" | "-x" => {
Self::UnaryOp(UnaryOperator::FiletestOp(s))
}
_ => Self::Literal(s),
},
None => Self::Literal(s),
},
None => Self::None,
}
}
/// Convert this Symbol into a Symbol::Literal, useful for cases where
/// test treats an operator as a string operand (test has no reserved
/// words).
///
/// # Panics
///
/// Panics if `self` is Symbol::None
fn into_literal(self) -> Self {
Self::Literal(match self {
Self::LParen => OsString::from("("),
Self::Bang => OsString::from("!"),
Self::BoolOp(s)
| Self::Literal(s)
| Self::Op(Operator::String(s))
| Self::Op(Operator::Int(s))
| Self::Op(Operator::File(s))
| Self::UnaryOp(UnaryOperator::StrlenOp(s))
| Self::UnaryOp(UnaryOperator::FiletestOp(s)) => s,
Self::None => panic!(),
})
}
}
/// Implement Display trait for Symbol to make it easier to print useful errors.
/// We will try to match the format in which the symbol appears in the input.
impl std::fmt::Display for Symbol {
/// Format a Symbol for printing
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let s = match &self {
Self::LParen => OsStr::new("("),
Self::Bang => OsStr::new("!"),
Self::BoolOp(s)
| Self::Literal(s)
| Self::Op(Operator::String(s))
| Self::Op(Operator::Int(s))
| Self::Op(Operator::File(s))
| Self::UnaryOp(UnaryOperator::StrlenOp(s))
| Self::UnaryOp(UnaryOperator::FiletestOp(s)) => OsStr::new(s),
Self::None => OsStr::new("None"),
};
write!(f, "{}", s.quote())
}
}
/// Recursive descent parser for test, which converts a list of OsStrings
/// (typically command line arguments) into a stack of Symbols in postfix
/// order.
///
/// Grammar:
///
/// EXPR → TERM | EXPR BOOLOP EXPR
/// TERM → ( EXPR )
/// TERM → ! EXPR
/// TERM → UOP str
/// UOP → STRLEN | FILETEST
/// TERM → str OP str
/// TERM → str | 𝜖
/// OP → STRINGOP | INTOP | FILEOP
/// STRINGOP → = | == | !=
/// INTOP → -eq | -ge | -gt | -le | -lt | -ne
/// FILEOP → -ef | -nt | -ot
/// STRLEN → -n | -z
/// FILETEST → -b | -c | -d | -e | -f | -g | -G | -h | -k | -L | -N | -O | -p |
/// -r | -s | -S | -t | -u | -w | -x
/// BOOLOP → -a | -o
///
#[derive(Debug)]
struct Parser {
tokens: Peekable<std::vec::IntoIter<OsString>>,
pub stack: Vec<Symbol>,
}
impl Parser {
/// Construct a new Parser from a `Vec<OsString>` of tokens.
fn new(tokens: Vec<OsString>) -> Self {
Self {
tokens: tokens.into_iter().peekable(),
stack: vec![],
}
}
/// Fetch the next token from the input stream as a Symbol.
fn next_token(&mut self) -> Symbol {
Symbol::new(self.tokens.next())
}
/// Consume the next token & verify that it matches the provided value.
fn expect(&mut self, value: &str) -> ParseResult<()> {
match self.next_token() {
Symbol::Literal(s) if s == value => Ok(()),
_ => Err(ParseError::Expected(value.quote().to_string())),
}
}
/// Peek at the next token from the input stream, returning it as a Symbol.
/// The stream is unchanged and will return the same Symbol on subsequent
/// calls to `next()` or `peek()`.
fn peek(&mut self) -> Symbol {
Symbol::new(self.tokens.peek().map(|s| s.to_os_string()))
}
/// Test if the next token in the stream is a BOOLOP (-a or -o), without
/// removing the token from the stream.
fn peek_is_boolop(&mut self) -> bool {
matches!(self.peek(), Symbol::BoolOp(_))
}
/// Parse an expression.
///
/// EXPR → TERM | EXPR BOOLOP EXPR
fn expr(&mut self) -> ParseResult<()> {
if !self.peek_is_boolop() {
self.term()?;
}
self.maybe_boolop()?;
Ok(())
}
/// Parse a term token and possible subsequent symbols: "(", "!", UOP,
/// literal, or None.
fn term(&mut self) -> ParseResult<()> {
let symbol = self.next_token();
match symbol {
Symbol::LParen => self.lparen()?,
Symbol::Bang => self.bang()?,
Symbol::UnaryOp(_) => self.uop(symbol),
Symbol::None => self.stack.push(symbol),
literal => self.literal(literal)?,
}
Ok(())
}
/// Parse a (possibly) parenthesized expression.
///
/// test has no reserved keywords, so "(" will be interpreted as a literal
/// in certain cases:
///
/// * when found at the end of the token stream
/// * when followed by a binary operator that is not _itself_ interpreted | /// as a literal
///
fn lparen(&mut self) -> ParseResult<()> {
// Look ahead up to 3 tokens to determine if the lparen is being used
// as a grouping operator or should be treated as a literal string
let peek3: Vec<Symbol> = self
.tokens
.clone()
.take(3)
.map(|token| Symbol::new(Some(token)))
.collect();
match peek3.as_slice() {
// case 1: lparen is a literal when followed by nothing
[] => {
self.literal(Symbol::LParen.into_literal())?;
Ok(())
}
// case 2: error if end of stream is `( <any_token>`
[symbol] => Err(ParseError::MissingArgument(format!("{symbol}"))),
// case 3: `( uop <any_token> )` → parenthesized unary operation;
// this case ensures we don’t get confused by `( -f ) )`
// or `( -f ( )`, for example
[Symbol::UnaryOp(_), _, Symbol::Literal(s)] if s == ")" => {
let symbol = self.next_token();
self.uop(symbol);
self.expect(")")?;
Ok(())
}
// case 4: binary comparison of literal lparen, e.g. `( != )`
[Symbol::Op(_), Symbol::Literal(s)] | [Symbol::Op(_), Symbol::Literal(s), _]
if s == ")" =>
{
self.literal(Symbol::LParen.into_literal())?;
Ok(())
}
// case 5: after handling the prior cases, any single token inside
// parentheses is a literal, e.g. `( -f )`
[_, Symbol::Literal(s)] | [_, Symbol::Literal(s), _] if s == ")" => {
let symbol = self.next_token();
self.literal(symbol)?;
self.expect(")")?;
Ok(())
}
// case 6: two binary ops in a row, treat the first op as a literal
[Symbol::Op(_), Symbol::Op(_), _] => {
let symbol = self.next_token();
self.literal(symbol)?;
self.expect(")")?;
Ok(())
}
// case 7: if earlier cases didn’t match, `( op <any_token>…`
// indicates binary comparison of literal lparen with
// anything _except_ ")" (case 4)
[Symbol::Op(_), _] | [Symbol::Op(_), _, _] => {
self.literal(Symbol::LParen.into_literal())?;
Ok(())
}
// Otherwise, lparen indicates the start of a parenthesized
// expression
_ => {
self.expr()?;
self.expect(")")?;
Ok(())
}
}
}
/// Parse a (possibly) negated expression.
///
/// Example cases:
///
/// * `! =`: negate the result of the implicit string length test of `=`
/// * `! = foo`: compare the literal strings `!` and `foo`
/// * `! = = str`: negate comparison of literal `=` and `str`
/// * `!`: bang followed by nothing is literal
/// * | random_line_split |
|
parser.rs | "-g" | "-G" | "-h" | "-k" | "-L" | "-N"
| "-O" | "-p" | "-r" | "-s" | "-S" | "-t" | "-u" | "-w" | "-x" => {
Self::UnaryOp(UnaryOperator::FiletestOp(s))
}
_ => Self::Literal(s),
},
None => Self::Literal(s),
},
None => Self::None,
}
}
/// Convert this Symbol into a Symbol::Literal, useful for cases where
/// test treats an operator as a string operand (test has no reserved
/// words).
///
/// # Panics
///
/// Panics if `self` is Symbol::None
fn into_literal(self) -> Self {
Self::Literal(match self {
Self::LParen => OsString::from("("),
Self::Bang => OsString::from("!"),
Self::BoolOp(s)
| Self::Literal(s)
| Self::Op(Operator::String(s))
| Self::Op(Operator::Int(s))
| Self::Op(Operator::File(s))
| Self::UnaryOp(UnaryOperator::StrlenOp(s))
| Self::UnaryOp(UnaryOperator::FiletestOp(s)) => s,
Self::None => panic!(),
})
}
}
/// Implement Display trait for Symbol to make it easier to print useful errors.
/// We will try to match the format in which the symbol appears in the input.
impl std::fmt::Display for Symbol {
/// Format a Symbol for printing
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let s = match &self {
Self::LParen => OsStr::new("("),
Self::Bang => OsStr::new("!"),
Self::BoolOp(s)
| Self::Literal(s)
| Self::Op(Operator::String(s))
| Self::Op(Operator::Int(s))
| Self::Op(Operator::File(s))
| Self::UnaryOp(UnaryOperator::StrlenOp(s))
| Self::UnaryOp(UnaryOperator::FiletestOp(s)) => OsStr::new(s),
Self::None => OsStr::new("None"),
};
write!(f, "{}", s.quote())
}
}
/// Recursive descent parser for test, which converts a list of OsStrings
/// (typically command line arguments) into a stack of Symbols in postfix
/// order.
///
/// Grammar:
///
/// EXPR → TERM | EXPR BOOLOP EXPR
/// TERM → ( EXPR )
/// TERM → ! EXPR
/// TERM → UOP str
/// UOP → STRLEN | FILETEST
/// TERM → str OP str
/// TERM → str | 𝜖
/// OP → STRINGOP | INTOP | FILEOP
/// STRINGOP → = | == | !=
/// INTOP → -eq | -ge | -gt | -le | -lt | -ne
/// FILEOP → -ef | -nt | -ot
/// STRLEN → -n | -z
/// FILETEST → -b | -c | -d | -e | -f | -g | -G | -h | -k | -L | -N | -O | -p |
/// -r | -s | -S | -t | -u | -w | -x
/// BOOLOP → -a | -o
///
#[derive(Debug)]
struct Parser {
tokens: Peekable<std::vec::IntoIter<OsString>>,
pub stack: Vec<Symbol>,
}
impl Parser {
/// Construct a new Parser from a `Vec<OsString>` of tokens.
fn new(tokens: Vec<OsString>) -> Self {
Self {
tokens: tokens.into_iter().peekable(),
stack: vec![],
}
}
/// Fetch the next token from the input stream as a Symbol.
fn next_token(&mut self) -> Symbol {
Symbol::new(self.tokens.next())
}
/// Consume the next token & verify that it matches the provided value.
fn expect(&mut self, value: &str) -> ParseResult<()> {
match self.next_token() {
Symbol::Literal(s) if s == value => Ok(()),
_ => Err(ParseError::Expected(value.quote().to_string())),
}
}
/// Peek at the next token from the input stream, returning it as a Symbol.
/// The stream is unchanged and will return the same Symbol on subsequent
/// calls to `next()` or `peek()`.
fn peek(&mut self) -> Symbol {
Symbol::new(self.tokens.peek().map(|s| s.to_os_string()))
}
/// Test if the next token in the stream is a BOOLOP (-a or -o), without
/// removing the token from the stream.
fn peek_is_boolop(&mut self) -> bool {
matches!(self.peek(), Symbol::BoolOp(_))
}
/// Parse an expression.
///
/// EXPR → TERM | EXPR BOOLOP EXPR
fn expr(&mut self) -> ParseResult<()> {
if !self.peek_is_boolop() {
self.term()?;
}
self.maybe_boolop()?;
Ok(())
}
/// Parse a term token and possible subsequent symbols: "(", "!", UOP,
/// literal, or None.
fn term(&mut self) -> ParseResult<()> {
let symbol = self.next_t | thesized expression.
///
/// test has no reserved keywords, so "(" will be interpreted as a literal
/// in certain cases:
///
/// * when found at the end of the token stream
/// * when followed by a binary operator that is not _itself_ interpreted
/// as a literal
///
fn lparen(&mut self) -> ParseResult<()> {
// Look ahead up to 3 tokens to determine if the lparen is being used
// as a grouping operator or should be treated as a literal string
let peek3: Vec<Symbol> = self
.tokens
.clone()
.take(3)
.map(|token| Symbol::new(Some(token)))
.collect();
match peek3.as_slice() {
// case 1: lparen is a literal when followed by nothing
[] => {
self.literal(Symbol::LParen.into_literal())?;
Ok(())
}
// case 2: error if end of stream is `( <any_token>`
[symbol] => Err(ParseError::MissingArgument(format!("{symbol}"))),
// case 3: `( uop <any_token> )` → parenthesized unary operation;
// this case ensures we don’t get confused by `( -f ) )`
// or `( -f ( )`, for example
[Symbol::UnaryOp(_), _, Symbol::Literal(s)] if s == ")" => {
let symbol = self.next_token();
self.uop(symbol);
self.expect(")")?;
Ok(())
}
// case 4: binary comparison of literal lparen, e.g. `( != )`
[Symbol::Op(_), Symbol::Literal(s)] | [Symbol::Op(_), Symbol::Literal(s), _]
if s == ")" =>
{
self.literal(Symbol::LParen.into_literal())?;
Ok(())
}
// case 5: after handling the prior cases, any single token inside
// parentheses is a literal, e.g. `( -f )`
[_, Symbol::Literal(s)] | [_, Symbol::Literal(s), _] if s == ")" => {
let symbol = self.next_token();
self.literal(symbol)?;
self.expect(")")?;
Ok(())
}
// case 6: two binary ops in a row, treat the first op as a literal
[Symbol::Op(_), Symbol::Op(_), _] => {
let symbol = self.next_token();
self.literal(symbol)?;
self.expect(")")?;
Ok(())
}
// case 7: if earlier cases didn’t match, `( op <any_token>…`
// indicates binary comparison of literal lparen with
// anything _except_ ")" (case 4)
[Symbol::Op(_), _] | [Symbol::Op(_), _, _] => {
self.literal(Symbol::LParen.into_literal())?;
Ok(())
}
// Otherwise, lparen indicates the start of a parenthesized
// expression
_ => {
self.expr()?;
self.expect(")")?;
Ok(())
}
}
}
/// Parse a (possibly) negated expression.
///
/// Example cases:
///
/// * `! =`: negate the result of the implicit string length test of `=`
/// * `! = foo`: compare the literal strings `!` and `foo`
/// * `! = = str`: negate comparison of literal `=` and `str`
/// * `!`: bang followed by nothing is literal
| oken();
match symbol {
Symbol::LParen => self.lparen()?,
Symbol::Bang => self.bang()?,
Symbol::UnaryOp(_) => self.uop(symbol),
Symbol::None => self.stack.push(symbol),
literal => self.literal(literal)?,
}
Ok(())
}
/// Parse a (possibly) paren | identifier_body |
parser.rs | "-g" | "-G" | "-h" | "-k" | "-L" | "-N"
| "-O" | "-p" | "-r" | "-s" | "-S" | "-t" | "-u" | "-w" | "-x" => {
Self::UnaryOp(UnaryOperator::FiletestOp(s))
}
_ => Self::Literal(s),
},
None => Self::Literal(s),
},
None => Self::None,
}
}
/// Convert this Symbol into a Symbol::Literal, useful for cases where
/// test treats an operator as a string operand (test has no reserved
/// words).
///
/// # Panics
///
/// Panics if `self` is Symbol::None
fn into_literal(self) -> Self {
Self::Literal(match self {
Self::LParen => OsString::from("("),
Self::Bang => OsString::from("!"),
Self::BoolOp(s)
| Self::Literal(s)
| Self::Op(Operator::String(s))
| Self::Op(Operator::Int(s))
| Self::Op(Operator::File(s))
| Self::UnaryOp(UnaryOperator::StrlenOp(s))
| Self::UnaryOp(UnaryOperator::FiletestOp(s)) => s,
Self::None => panic!(),
})
}
}
/// Implement Display trait for Symbol to make it easier to print useful errors.
/// We will try to match the format in which the symbol appears in the input.
impl std::fmt::Display for Symbol {
/// Format a Symbol for printing
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let s = match &self {
Self::LParen => OsStr::new("("),
Self::Bang => OsStr::new("!"),
Self::BoolOp(s)
| Self::Literal(s)
| Self::Op(Operator::String(s))
| Self::Op(Operator::Int(s))
| Self::Op(Operator::File(s))
| Self::UnaryOp(UnaryOperator::StrlenOp(s))
| Self::UnaryOp(UnaryOperator::FiletestOp(s)) => OsStr::new(s),
Self::None => OsStr::new("None"),
};
write!(f, "{}", s.quote())
}
}
/// Recursive descent parser for test, which converts a list of OsStrings
/// (typically command line arguments) into a stack of Symbols in postfix
/// order.
///
/// Grammar:
///
/// EXPR → TERM | EXPR BOOLOP EXPR
/// TERM → ( EXPR )
/// TERM → ! EXPR
/// TERM → UOP str
/// UOP → STRLEN | FILETEST
/// TERM → str OP str
/// TERM → str | 𝜖
/// OP → STRINGOP | INTOP | FILEOP
/// STRINGOP → = | == | !=
/// INTOP → -eq | -ge | -gt | -le | -lt | -ne
/// FILEOP → -ef | -nt | -ot
/// STRLEN → -n | -z
/// FILETEST → -b | -c | -d | -e | -f | -g | -G | -h | -k | -L | -N | -O | -p |
/// -r | -s | -S | -t | -u | -w | -x
/// BOOLOP → -a | -o
///
#[derive(Debug)]
struct Parser {
tokens: Peekable<std::vec::IntoIter<OsString>>,
pub stack: Vec<Symbol>,
}
impl Parser {
/// Construct a new Parser from a `Vec<OsString>` of tokens.
fn new(tokens: Vec<OsString>) -> Self {
Self {
tokens: tokens.into_iter().peekable(),
stack: vec![],
}
}
/// Fetch the next token from the input stream as a Symbol.
fn next_token(&mut self) -> Symbol {
Symbol::new(self.tokens.next())
}
/// Consume the next token & verify that it matches the provided value.
fn expect(&mut self, value: &str) -> ParseResult<()> {
match self.next_token() {
Symbol::Literal(s) if s == value => Ok(()),
_ => Err(ParseError::Expected(value.quote().to_string())),
}
}
/// Peek at the next token from the input stream, returning it as a Symbol.
/// The stream is unchanged and will return the same Symbol on subsequent
/// calls to `next()` or `peek()`.
fn peek(&mut self) -> Symbol {
Symbol::new(self.tokens.peek().map(|s| s.to_os_string()))
}
/// Test if the next token in the stream is a BOOLOP (-a or -o), without
/// removing the token from the stream.
fn peek_is_boolop(&mut self) -> bool {
matches!(self.peek(), Symbol::BoolOp(_))
}
/// Parse an expression.
///
/// EXPR → TERM | EXPR BOOLOP EXPR
fn expr(&mut self) -> ParseResult<()> {
if !self.peek_is_boolop() {
self.term()?;
}
self.maybe_boolop()?;
Ok(())
}
/// Parse a term token and possible subsequent symbols: "(", "!", UOP,
/// literal, or None.
fn term(&mut self) -> ParseResult<()> | let symbol = self.next_token();
match symbol {
Symbol::LParen => self.lparen()?,
Symbol::Bang => self.bang()?,
Symbol::UnaryOp(_) => self.uop(symbol),
Symbol::None => self.stack.push(symbol),
literal => self.literal(literal)?,
}
Ok(())
}
/// Parse a (possibly) parenthesized expression.
///
/// test has no reserved keywords, so "(" will be interpreted as a literal
/// in certain cases:
///
/// * when found at the end of the token stream
/// * when followed by a binary operator that is not _itself_ interpreted
/// as a literal
///
fn lparen(&mut self) -> ParseResult<()> {
// Look ahead up to 3 tokens to determine if the lparen is being used
// as a grouping operator or should be treated as a literal string
let peek3: Vec<Symbol> = self
.tokens
.clone()
.take(3)
.map(|token| Symbol::new(Some(token)))
.collect();
match peek3.as_slice() {
// case 1: lparen is a literal when followed by nothing
[] => {
self.literal(Symbol::LParen.into_literal())?;
Ok(())
}
// case 2: error if end of stream is `( <any_token>`
[symbol] => Err(ParseError::MissingArgument(format!("{symbol}"))),
// case 3: `( uop <any_token> )` → parenthesized unary operation;
// this case ensures we don’t get confused by `( -f ) )`
// or `( -f ( )`, for example
[Symbol::UnaryOp(_), _, Symbol::Literal(s)] if s == ")" => {
let symbol = self.next_token();
self.uop(symbol);
self.expect(")")?;
Ok(())
}
// case 4: binary comparison of literal lparen, e.g. `( != )`
[Symbol::Op(_), Symbol::Literal(s)] | [Symbol::Op(_), Symbol::Literal(s), _]
if s == ")" =>
{
self.literal(Symbol::LParen.into_literal())?;
Ok(())
}
// case 5: after handling the prior cases, any single token inside
// parentheses is a literal, e.g. `( -f )`
[_, Symbol::Literal(s)] | [_, Symbol::Literal(s), _] if s == ")" => {
let symbol = self.next_token();
self.literal(symbol)?;
self.expect(")")?;
Ok(())
}
// case 6: two binary ops in a row, treat the first op as a literal
[Symbol::Op(_), Symbol::Op(_), _] => {
let symbol = self.next_token();
self.literal(symbol)?;
self.expect(")")?;
Ok(())
}
// case 7: if earlier cases didn’t match, `( op <any_token>…`
// indicates binary comparison of literal lparen with
// anything _except_ ")" (case 4)
[Symbol::Op(_), _] | [Symbol::Op(_), _, _] => {
self.literal(Symbol::LParen.into_literal())?;
Ok(())
}
// Otherwise, lparen indicates the start of a parenthesized
// expression
_ => {
self.expr()?;
self.expect(")")?;
Ok(())
}
}
}
/// Parse a (possibly) negated expression.
///
/// Example cases:
///
/// * `! =`: negate the result of the implicit string length test of `=`
/// * `! = foo`: compare the literal strings `!` and `foo`
/// * `! = = str`: negate comparison of literal `=` and `str`
/// * `!`: bang followed by nothing is literal
| {
| identifier_name |
builder.go | (hostRootMount string) BuilderOption {
return func(b *builder) error {
log.Infof("Host root filesystem will be remapped to %s", hostRootMount)
b.pathMapper = &pathMapper{
hostMountPath: hostRootMount,
}
return nil
}
}
// WithDocker configures using docker
func WithDocker() BuilderOption {
return func(b *builder) error {
cli, err := newDockerClient()
if err == nil {
b.dockerClient = cli
}
return err
}
}
// WithDockerClient configurs specific docker client
func WithDockerClient(cli env.DockerClient) BuilderOption {
return func(b *builder) error {
b.dockerClient = cli
return nil
}
}
// WithAudit configures using audit checks
func WithAudit() BuilderOption {
return func(b *builder) error {
cli, err := newAuditClient()
if err == nil {
b.auditClient = cli
}
return err
}
}
// WithAuditClient configures using specific audit client
func WithAuditClient(cli env.AuditClient) BuilderOption {
return func(b *builder) error {
b.auditClient = cli
return nil
}
}
// WithKubernetesClient allows specific Kubernetes client
func WithKubernetesClient(cli env.KubeClient) BuilderOption {
return func(b *builder) error {
b.kubeClient = cli
return nil
}
}
// WithIsLeader allows check runner to know if its a leader instance or not (DCA)
func WithIsLeader(isLeader func() bool) BuilderOption {
return func(b *builder) error {
b.isLeaderFunc = isLeader
return nil
}
}
// SuiteMatcher checks if a compliance suite is included
type SuiteMatcher func(*compliance.SuiteMeta) bool
// WithMatchSuite configures builder to use a suite matcher
func WithMatchSuite(matcher SuiteMatcher) BuilderOption {
return func(b *builder) error {
b.suiteMatcher = matcher
return nil
}
}
// RuleMatcher checks if a compliance rule is included
type RuleMatcher func(*compliance.Rule) bool
// WithMatchRule configures builder to use a suite matcher
func WithMatchRule(matcher RuleMatcher) BuilderOption {
return func(b *builder) error {
b.ruleMatcher = matcher
return nil
}
}
// MayFail configures a builder option to succeed on failures and logs an error
func MayFail(o BuilderOption) BuilderOption {
return func(b *builder) error {
if err := o(b); err != nil {
log.Warnf("Ignoring builder initialization failure: %v", err)
}
return nil
}
}
// WithNodeLabels configures a builder to use specified Kubernetes node labels
func WithNodeLabels(nodeLabels map[string]string) BuilderOption {
return func(b *builder) error {
b.nodeLabels = map[string]string{}
for k, v := range nodeLabels {
k, v := hostinfo.LabelPreprocessor(k, v)
b.nodeLabels[k] = v
}
return nil
}
}
// IsFramework matches a compliance suite by the name of the framework
func IsFramework(framework string) SuiteMatcher {
return func(s *compliance.SuiteMeta) bool {
return s.Framework == framework
}
}
// IsRuleID matches a compliance rule by ID
func IsRuleID(ruleID string) RuleMatcher {
return func(r *compliance.Rule) bool {
return r.ID == ruleID
}
}
// NewBuilder constructs a check builder
func NewBuilder(reporter event.Reporter, options ...BuilderOption) (Builder, error) {
b := &builder{
reporter: reporter,
checkInterval: 20 * time.Minute,
etcGroupPath: "/etc/group",
status: newStatus(),
}
for _, o := range options {
if err := o(b); err != nil {
return nil, err
}
}
b.valueCache = cache.New(
b.checkInterval/2,
b.checkInterval/4,
)
return b, nil
}
type builder struct {
checkInterval time.Duration
reporter event.Reporter
valueCache *cache.Cache
hostname string
pathMapper *pathMapper
etcGroupPath string
nodeLabels map[string]string
suiteMatcher SuiteMatcher
ruleMatcher RuleMatcher
dockerClient env.DockerClient
auditClient env.AuditClient
kubeClient env.KubeClient
isLeaderFunc func() bool
status *status
}
func (b *builder) Close() error {
if b.dockerClient != nil {
if err := b.dockerClient.Close(); err != nil {
return err
}
}
if b.auditClient != nil {
if err := b.auditClient.Close(); err != nil {
return err
}
}
return nil
}
func (b *builder) ChecksFromFile(file string, onCheck compliance.CheckVisitor) error {
suite, err := compliance.ParseSuite(file)
if err != nil {
return err
}
if b.suiteMatcher != nil {
if b.suiteMatcher(&suite.Meta) {
log.Infof("%s/%s: matched suite in %s", suite.Meta.Name, suite.Meta.Version, file)
} else {
log.Tracef("%s/%s: skipped suite in %s", suite.Meta.Name, suite.Meta.Version, file)
return nil
}
}
log.Infof("%s/%s: loading suite from %s", suite.Meta.Name, suite.Meta.Version, file)
matchedCount := 0
for _, r := range suite.Rules {
if b.ruleMatcher != nil {
if b.ruleMatcher(&r) {
log.Infof("%s/%s: matched rule %s in %s", suite.Meta.Name, suite.Meta.Version, r.ID, file)
} else {
log.Tracef("%s/%s: skipped rule %s in %s", suite.Meta.Name, suite.Meta.Version, r.ID, file)
continue
}
}
matchedCount++
if len(r.Resources) == 0 {
log.Infof("%s/%s: skipped rule %s - no configured resources", suite.Meta.Name, suite.Meta.Version, r.ID)
continue
}
log.Debugf("%s/%s: loading rule %s", suite.Meta.Name, suite.Meta.Version, r.ID)
check, err := b.checkFromRule(&suite.Meta, &r)
if err != nil {
if err != ErrRuleDoesNotApply {
log.Warnf("%s/%s: failed to load rule %s: %v", suite.Meta.Name, suite.Meta.Version, r.ID, err)
}
log.Infof("%s/%s: skipped rule %s - does not apply to this system", suite.Meta.Name, suite.Meta.Version, r.ID)
}
if b.status != nil {
b.status.addCheck(&compliance.CheckStatus{
RuleID: r.ID,
Description: r.Description,
Name: compliance.CheckName(r.ID, r.Description),
Framework: suite.Meta.Framework,
Source: suite.Meta.Source,
Version: suite.Meta.Version,
InitError: err,
})
}
ok := onCheck(&r, check, err)
if !ok {
log.Infof("%s/%s: stopping rule enumeration", suite.Meta.Name, suite.Meta.Version)
return err
}
}
if b.ruleMatcher != nil && matchedCount == 0 {
log.Infof("%s/%s: no rules matched", suite.Meta.Name, suite.Meta.Version)
}
return nil
}
func (b *builder) GetCheckStatus() compliance.CheckStatusList {
if b.status != nil {
return b.status.getChecksStatus()
}
return compliance.CheckStatusList{}
}
func (b *builder) checkFromRule(meta *compliance.SuiteMeta, rule *compliance.Rule) (compliance.Check, error) {
ruleScope, err := getRuleScope(meta, rule)
if err != nil {
return nil, err
}
eligible, err := b.hostMatcher(ruleScope, rule)
if err != nil {
return nil, err
}
if !eligible {
log.Debugf("rule %s/%s discarded by hostMatcher", meta.Framework, rule.ID)
return nil, ErrRuleDoesNotApply
}
return b.newCheck(meta, ruleScope, rule)
}
func getRuleScope(meta *compliance.SuiteMeta, rule *compliance.Rule) (compliance.RuleScope, error) {
switch {
case rule.Scope.Includes(compliance.DockerScope):
return compliance.DockerScope, nil
case rule.Scope.Includes(compliance.KubernetesNodeScope):
return compliance.KubernetesNodeScope, nil
case rule.Scope.Includes(compliance.KubernetesClusterScope):
return compliance.KubernetesClusterScope, nil
default:
return "", ErrRuleScopeNotSupported
}
}
func (b *builder) hostMatcher(scope compliance.RuleScope, rule *compliance.Rule) (bool, error) {
switch scope {
case compliance.DockerScope:
if b.dockerClient == nil {
log.Infof("rule %s skipped - not running in a docker environment", rule.ID)
return false, nil
}
case compliance.KubernetesClusterScope:
if b.kubeClient == nil {
log.Infof("rule %s skipped - not running as Cluster Agent", rule.ID)
return false, nil | WithHostRootMount | identifier_name |
|
builder.go | b.dockerClient = cli
return nil
}
}
// WithAudit configures using audit checks
func WithAudit() BuilderOption {
return func(b *builder) error {
cli, err := newAuditClient()
if err == nil {
b.auditClient = cli
}
return err
}
}
// WithAuditClient configures using specific audit client
func WithAuditClient(cli env.AuditClient) BuilderOption {
return func(b *builder) error {
b.auditClient = cli
return nil
}
}
// WithKubernetesClient allows specific Kubernetes client
func WithKubernetesClient(cli env.KubeClient) BuilderOption {
return func(b *builder) error {
b.kubeClient = cli
return nil
}
}
// WithIsLeader allows check runner to know if its a leader instance or not (DCA)
func WithIsLeader(isLeader func() bool) BuilderOption {
return func(b *builder) error {
b.isLeaderFunc = isLeader
return nil
}
}
// SuiteMatcher checks if a compliance suite is included
type SuiteMatcher func(*compliance.SuiteMeta) bool
// WithMatchSuite configures builder to use a suite matcher
func WithMatchSuite(matcher SuiteMatcher) BuilderOption {
return func(b *builder) error {
b.suiteMatcher = matcher
return nil
}
}
// RuleMatcher checks if a compliance rule is included
type RuleMatcher func(*compliance.Rule) bool
// WithMatchRule configures builder to use a suite matcher
func WithMatchRule(matcher RuleMatcher) BuilderOption {
return func(b *builder) error {
b.ruleMatcher = matcher
return nil
}
}
// MayFail configures a builder option to succeed on failures and logs an error
func MayFail(o BuilderOption) BuilderOption {
return func(b *builder) error {
if err := o(b); err != nil {
log.Warnf("Ignoring builder initialization failure: %v", err)
}
return nil
}
}
// WithNodeLabels configures a builder to use specified Kubernetes node labels
func WithNodeLabels(nodeLabels map[string]string) BuilderOption {
return func(b *builder) error {
b.nodeLabels = map[string]string{}
for k, v := range nodeLabels {
k, v := hostinfo.LabelPreprocessor(k, v)
b.nodeLabels[k] = v
}
return nil
}
}
// IsFramework matches a compliance suite by the name of the framework
func IsFramework(framework string) SuiteMatcher {
return func(s *compliance.SuiteMeta) bool {
return s.Framework == framework
}
}
// IsRuleID matches a compliance rule by ID
func IsRuleID(ruleID string) RuleMatcher {
return func(r *compliance.Rule) bool {
return r.ID == ruleID
}
}
// NewBuilder constructs a check builder
func NewBuilder(reporter event.Reporter, options ...BuilderOption) (Builder, error) {
b := &builder{
reporter: reporter,
checkInterval: 20 * time.Minute,
etcGroupPath: "/etc/group",
status: newStatus(),
}
for _, o := range options {
if err := o(b); err != nil {
return nil, err
}
}
b.valueCache = cache.New(
b.checkInterval/2,
b.checkInterval/4,
)
return b, nil
}
type builder struct {
checkInterval time.Duration
reporter event.Reporter
valueCache *cache.Cache
hostname string
pathMapper *pathMapper
etcGroupPath string
nodeLabels map[string]string
suiteMatcher SuiteMatcher
ruleMatcher RuleMatcher
dockerClient env.DockerClient
auditClient env.AuditClient
kubeClient env.KubeClient
isLeaderFunc func() bool
status *status
}
func (b *builder) Close() error {
if b.dockerClient != nil {
if err := b.dockerClient.Close(); err != nil {
return err
}
}
if b.auditClient != nil {
if err := b.auditClient.Close(); err != nil {
return err
}
}
return nil
}
func (b *builder) ChecksFromFile(file string, onCheck compliance.CheckVisitor) error {
suite, err := compliance.ParseSuite(file)
if err != nil {
return err
}
if b.suiteMatcher != nil {
if b.suiteMatcher(&suite.Meta) {
log.Infof("%s/%s: matched suite in %s", suite.Meta.Name, suite.Meta.Version, file)
} else {
log.Tracef("%s/%s: skipped suite in %s", suite.Meta.Name, suite.Meta.Version, file)
return nil
}
}
log.Infof("%s/%s: loading suite from %s", suite.Meta.Name, suite.Meta.Version, file)
matchedCount := 0
for _, r := range suite.Rules {
if b.ruleMatcher != nil {
if b.ruleMatcher(&r) {
log.Infof("%s/%s: matched rule %s in %s", suite.Meta.Name, suite.Meta.Version, r.ID, file)
} else {
log.Tracef("%s/%s: skipped rule %s in %s", suite.Meta.Name, suite.Meta.Version, r.ID, file)
continue
}
}
matchedCount++
if len(r.Resources) == 0 {
log.Infof("%s/%s: skipped rule %s - no configured resources", suite.Meta.Name, suite.Meta.Version, r.ID)
continue
}
log.Debugf("%s/%s: loading rule %s", suite.Meta.Name, suite.Meta.Version, r.ID)
check, err := b.checkFromRule(&suite.Meta, &r)
if err != nil {
if err != ErrRuleDoesNotApply {
log.Warnf("%s/%s: failed to load rule %s: %v", suite.Meta.Name, suite.Meta.Version, r.ID, err)
}
log.Infof("%s/%s: skipped rule %s - does not apply to this system", suite.Meta.Name, suite.Meta.Version, r.ID)
}
if b.status != nil {
b.status.addCheck(&compliance.CheckStatus{
RuleID: r.ID,
Description: r.Description,
Name: compliance.CheckName(r.ID, r.Description),
Framework: suite.Meta.Framework,
Source: suite.Meta.Source,
Version: suite.Meta.Version,
InitError: err,
})
}
ok := onCheck(&r, check, err)
if !ok {
log.Infof("%s/%s: stopping rule enumeration", suite.Meta.Name, suite.Meta.Version)
return err
}
}
if b.ruleMatcher != nil && matchedCount == 0 {
log.Infof("%s/%s: no rules matched", suite.Meta.Name, suite.Meta.Version)
}
return nil
}
func (b *builder) GetCheckStatus() compliance.CheckStatusList {
if b.status != nil {
return b.status.getChecksStatus()
}
return compliance.CheckStatusList{}
}
func (b *builder) checkFromRule(meta *compliance.SuiteMeta, rule *compliance.Rule) (compliance.Check, error) {
ruleScope, err := getRuleScope(meta, rule)
if err != nil {
return nil, err
}
eligible, err := b.hostMatcher(ruleScope, rule)
if err != nil {
return nil, err
}
if !eligible {
log.Debugf("rule %s/%s discarded by hostMatcher", meta.Framework, rule.ID)
return nil, ErrRuleDoesNotApply
}
return b.newCheck(meta, ruleScope, rule)
}
func getRuleScope(meta *compliance.SuiteMeta, rule *compliance.Rule) (compliance.RuleScope, error) {
switch {
case rule.Scope.Includes(compliance.DockerScope):
return compliance.DockerScope, nil
case rule.Scope.Includes(compliance.KubernetesNodeScope):
return compliance.KubernetesNodeScope, nil
case rule.Scope.Includes(compliance.KubernetesClusterScope):
return compliance.KubernetesClusterScope, nil
default:
return "", ErrRuleScopeNotSupported
}
}
func (b *builder) hostMatcher(scope compliance.RuleScope, rule *compliance.Rule) (bool, error) {
switch scope {
case compliance.DockerScope:
if b.dockerClient == nil {
log.Infof("rule %s skipped - not running in a docker environment", rule.ID)
return false, nil
}
case compliance.KubernetesClusterScope:
if b.kubeClient == nil {
log.Infof("rule %s skipped - not running as Cluster Agent", rule.ID)
return false, nil
}
case compliance.KubernetesNodeScope:
if config.IsKubernetes() {
return b.isKubernetesNodeEligible(rule.HostSelector)
}
log.Infof("rule %s skipped - not running on a Kubernetes node", rule.ID)
return false, nil
}
return true, nil
}
func (b *builder) isKubernetesNodeEligible(hostSelector string) (bool, error) {
if hostSelector == "" {
return true, nil
}
expr, err := eval.ParseExpression(hostSelector)
if err != nil {
return false, err
}
nodeInstance := &eval.Instance{
Functions: eval.FunctionMap{
"node.has | return func(b *builder) error { | random_line_split |
|
builder.go |
pathMapper *pathMapper
etcGroupPath string
nodeLabels map[string]string
suiteMatcher SuiteMatcher
ruleMatcher RuleMatcher
dockerClient env.DockerClient
auditClient env.AuditClient
kubeClient env.KubeClient
isLeaderFunc func() bool
status *status
}
func (b *builder) Close() error {
if b.dockerClient != nil {
if err := b.dockerClient.Close(); err != nil {
return err
}
}
if b.auditClient != nil {
if err := b.auditClient.Close(); err != nil {
return err
}
}
return nil
}
func (b *builder) ChecksFromFile(file string, onCheck compliance.CheckVisitor) error {
suite, err := compliance.ParseSuite(file)
if err != nil {
return err
}
if b.suiteMatcher != nil {
if b.suiteMatcher(&suite.Meta) {
log.Infof("%s/%s: matched suite in %s", suite.Meta.Name, suite.Meta.Version, file)
} else {
log.Tracef("%s/%s: skipped suite in %s", suite.Meta.Name, suite.Meta.Version, file)
return nil
}
}
log.Infof("%s/%s: loading suite from %s", suite.Meta.Name, suite.Meta.Version, file)
matchedCount := 0
for _, r := range suite.Rules {
if b.ruleMatcher != nil {
if b.ruleMatcher(&r) {
log.Infof("%s/%s: matched rule %s in %s", suite.Meta.Name, suite.Meta.Version, r.ID, file)
} else {
log.Tracef("%s/%s: skipped rule %s in %s", suite.Meta.Name, suite.Meta.Version, r.ID, file)
continue
}
}
matchedCount++
if len(r.Resources) == 0 {
log.Infof("%s/%s: skipped rule %s - no configured resources", suite.Meta.Name, suite.Meta.Version, r.ID)
continue
}
log.Debugf("%s/%s: loading rule %s", suite.Meta.Name, suite.Meta.Version, r.ID)
check, err := b.checkFromRule(&suite.Meta, &r)
if err != nil {
if err != ErrRuleDoesNotApply {
log.Warnf("%s/%s: failed to load rule %s: %v", suite.Meta.Name, suite.Meta.Version, r.ID, err)
}
log.Infof("%s/%s: skipped rule %s - does not apply to this system", suite.Meta.Name, suite.Meta.Version, r.ID)
}
if b.status != nil {
b.status.addCheck(&compliance.CheckStatus{
RuleID: r.ID,
Description: r.Description,
Name: compliance.CheckName(r.ID, r.Description),
Framework: suite.Meta.Framework,
Source: suite.Meta.Source,
Version: suite.Meta.Version,
InitError: err,
})
}
ok := onCheck(&r, check, err)
if !ok {
log.Infof("%s/%s: stopping rule enumeration", suite.Meta.Name, suite.Meta.Version)
return err
}
}
if b.ruleMatcher != nil && matchedCount == 0 {
log.Infof("%s/%s: no rules matched", suite.Meta.Name, suite.Meta.Version)
}
return nil
}
func (b *builder) GetCheckStatus() compliance.CheckStatusList {
if b.status != nil {
return b.status.getChecksStatus()
}
return compliance.CheckStatusList{}
}
func (b *builder) checkFromRule(meta *compliance.SuiteMeta, rule *compliance.Rule) (compliance.Check, error) {
ruleScope, err := getRuleScope(meta, rule)
if err != nil {
return nil, err
}
eligible, err := b.hostMatcher(ruleScope, rule)
if err != nil {
return nil, err
}
if !eligible {
log.Debugf("rule %s/%s discarded by hostMatcher", meta.Framework, rule.ID)
return nil, ErrRuleDoesNotApply
}
return b.newCheck(meta, ruleScope, rule)
}
func getRuleScope(meta *compliance.SuiteMeta, rule *compliance.Rule) (compliance.RuleScope, error) {
switch {
case rule.Scope.Includes(compliance.DockerScope):
return compliance.DockerScope, nil
case rule.Scope.Includes(compliance.KubernetesNodeScope):
return compliance.KubernetesNodeScope, nil
case rule.Scope.Includes(compliance.KubernetesClusterScope):
return compliance.KubernetesClusterScope, nil
default:
return "", ErrRuleScopeNotSupported
}
}
func (b *builder) hostMatcher(scope compliance.RuleScope, rule *compliance.Rule) (bool, error) {
switch scope {
case compliance.DockerScope:
if b.dockerClient == nil {
log.Infof("rule %s skipped - not running in a docker environment", rule.ID)
return false, nil
}
case compliance.KubernetesClusterScope:
if b.kubeClient == nil {
log.Infof("rule %s skipped - not running as Cluster Agent", rule.ID)
return false, nil
}
case compliance.KubernetesNodeScope:
if config.IsKubernetes() {
return b.isKubernetesNodeEligible(rule.HostSelector)
}
log.Infof("rule %s skipped - not running on a Kubernetes node", rule.ID)
return false, nil
}
return true, nil
}
func (b *builder) isKubernetesNodeEligible(hostSelector string) (bool, error) {
if hostSelector == "" {
return true, nil
}
expr, err := eval.ParseExpression(hostSelector)
if err != nil {
return false, err
}
nodeInstance := &eval.Instance{
Functions: eval.FunctionMap{
"node.hasLabel": b.nodeHasLabel,
"node.label": b.nodeLabel,
},
Vars: eval.VarMap{
"node.labels": b.nodeLabelKeys(),
},
}
result, err := expr.Evaluate(nodeInstance)
if err != nil {
return false, err
}
eligible, ok := result.(bool)
if !ok {
return false, fmt.Errorf("hostSelector %q does not evaluate to a boolean value", hostSelector)
}
return eligible, nil
}
func (b *builder) getNodeLabel(args ...interface{}) (string, bool, error) {
if len(args) == 0 {
return "", false, errors.New(`expecting one argument for label`)
}
label, ok := args[0].(string)
if !ok {
return "", false, fmt.Errorf(`expecting string value for label argument`)
}
if b.nodeLabels == nil {
return "", false, nil
}
v, ok := b.nodeLabels[label]
return v, ok, nil
}
func (b *builder) nodeHasLabel(_ *eval.Instance, args ...interface{}) (interface{}, error) {
_, ok, err := b.getNodeLabel(args...)
return ok, err
}
func (b *builder) nodeLabel(_ *eval.Instance, args ...interface{}) (interface{}, error) {
v, _, err := b.getNodeLabel(args...)
return v, err
}
func (b *builder) nodeLabelKeys() []string {
var keys []string
for k := range b.nodeLabels {
keys = append(keys, k)
}
return keys
}
func (b *builder) newCheck(meta *compliance.SuiteMeta, ruleScope compliance.RuleScope, rule *compliance.Rule) (compliance.Check, error) {
checkable, err := newResourceCheckList(b, rule.ID, rule.Resources)
if err != nil {
return nil, err
}
var notify eventNotify
if b.status != nil {
notify = b.status.updateCheck
}
// We capture err as configuration error but do not prevent check creation
return &complianceCheck{
Env: b,
ruleID: rule.ID,
description: rule.Description,
interval: b.checkInterval,
suiteMeta: meta,
// For now we are using rule scope (e.g. docker, kubernetesNode) as resource type
resourceType: string(ruleScope),
resourceID: b.hostname,
checkable: checkable,
eventNotify: notify,
}, nil
}
func (b *builder) Reporter() event.Reporter {
return b.reporter
}
func (b *builder) DockerClient() env.DockerClient {
return b.dockerClient
}
func (b *builder) AuditClient() env.AuditClient {
return b.auditClient
}
func (b *builder) KubeClient() env.KubeClient {
return b.kubeClient
}
func (b *builder) Hostname() string {
return b.hostname
}
func (b *builder) EtcGroupPath() string {
return b.etcGroupPath
}
func (b *builder) NormalizeToHostRoot(path string) string {
if b.pathMapper == nil {
return path
}
return b.pathMapper.normalizeToHostRoot(path)
}
func (b *builder) RelativeToHostRoot(path string) string {
if b.pathMapper == nil {
return path
}
return b.pathMapper.relativeToHostRoot(path)
}
func (b *builder) IsLeader() bool | {
if b.isLeaderFunc != nil {
return b.isLeaderFunc()
}
return true
} | identifier_body |
|
builder.go | rule.ID)
return false, nil
}
case compliance.KubernetesNodeScope:
if config.IsKubernetes() {
return b.isKubernetesNodeEligible(rule.HostSelector)
}
log.Infof("rule %s skipped - not running on a Kubernetes node", rule.ID)
return false, nil
}
return true, nil
}
func (b *builder) isKubernetesNodeEligible(hostSelector string) (bool, error) {
if hostSelector == "" {
return true, nil
}
expr, err := eval.ParseExpression(hostSelector)
if err != nil {
return false, err
}
nodeInstance := &eval.Instance{
Functions: eval.FunctionMap{
"node.hasLabel": b.nodeHasLabel,
"node.label": b.nodeLabel,
},
Vars: eval.VarMap{
"node.labels": b.nodeLabelKeys(),
},
}
result, err := expr.Evaluate(nodeInstance)
if err != nil {
return false, err
}
eligible, ok := result.(bool)
if !ok {
return false, fmt.Errorf("hostSelector %q does not evaluate to a boolean value", hostSelector)
}
return eligible, nil
}
func (b *builder) getNodeLabel(args ...interface{}) (string, bool, error) {
if len(args) == 0 {
return "", false, errors.New(`expecting one argument for label`)
}
label, ok := args[0].(string)
if !ok {
return "", false, fmt.Errorf(`expecting string value for label argument`)
}
if b.nodeLabels == nil {
return "", false, nil
}
v, ok := b.nodeLabels[label]
return v, ok, nil
}
func (b *builder) nodeHasLabel(_ *eval.Instance, args ...interface{}) (interface{}, error) {
_, ok, err := b.getNodeLabel(args...)
return ok, err
}
func (b *builder) nodeLabel(_ *eval.Instance, args ...interface{}) (interface{}, error) {
v, _, err := b.getNodeLabel(args...)
return v, err
}
func (b *builder) nodeLabelKeys() []string {
var keys []string
for k := range b.nodeLabels {
keys = append(keys, k)
}
return keys
}
func (b *builder) newCheck(meta *compliance.SuiteMeta, ruleScope compliance.RuleScope, rule *compliance.Rule) (compliance.Check, error) {
checkable, err := newResourceCheckList(b, rule.ID, rule.Resources)
if err != nil {
return nil, err
}
var notify eventNotify
if b.status != nil {
notify = b.status.updateCheck
}
// We capture err as configuration error but do not prevent check creation
return &complianceCheck{
Env: b,
ruleID: rule.ID,
description: rule.Description,
interval: b.checkInterval,
suiteMeta: meta,
// For now we are using rule scope (e.g. docker, kubernetesNode) as resource type
resourceType: string(ruleScope),
resourceID: b.hostname,
checkable: checkable,
eventNotify: notify,
}, nil
}
func (b *builder) Reporter() event.Reporter {
return b.reporter
}
func (b *builder) DockerClient() env.DockerClient {
return b.dockerClient
}
func (b *builder) AuditClient() env.AuditClient {
return b.auditClient
}
func (b *builder) KubeClient() env.KubeClient {
return b.kubeClient
}
func (b *builder) Hostname() string {
return b.hostname
}
func (b *builder) EtcGroupPath() string {
return b.etcGroupPath
}
func (b *builder) NormalizeToHostRoot(path string) string {
if b.pathMapper == nil {
return path
}
return b.pathMapper.normalizeToHostRoot(path)
}
func (b *builder) RelativeToHostRoot(path string) string {
if b.pathMapper == nil {
return path
}
return b.pathMapper.relativeToHostRoot(path)
}
func (b *builder) IsLeader() bool {
if b.isLeaderFunc != nil {
return b.isLeaderFunc()
}
return true
}
func (b *builder) EvaluateFromCache(ev eval.Evaluatable) (interface{}, error) {
instance := &eval.Instance{
Functions: eval.FunctionMap{
builderFuncShell: b.withValueCache(builderFuncShell, evalCommandShell),
builderFuncExec: b.withValueCache(builderFuncExec, evalCommandExec),
builderFuncProcessFlag: b.withValueCache(builderFuncProcessFlag, evalProcessFlag),
builderFuncJSON: b.withValueCache(builderFuncJSON, b.evalValueFromFile(jsonGetter)),
builderFuncYAML: b.withValueCache(builderFuncYAML, b.evalValueFromFile(yamlGetter)),
},
}
return ev.Evaluate(instance)
}
func (b *builder) withValueCache(funcName string, fn eval.Function) eval.Function {
return func(instance *eval.Instance, args ...interface{}) (interface{}, error) {
var sargs []string
for _, arg := range args {
sargs = append(sargs, fmt.Sprintf("%v", arg))
}
key := fmt.Sprintf("%s(%s)", funcName, strings.Join(sargs, ","))
if v, ok := b.valueCache.Get(key); ok {
return v, nil
}
v, err := fn(instance, args...)
if err == nil {
b.valueCache.Set(key, v, cache.DefaultExpiration)
}
return v, err
}
}
func evalCommandShell(_ *eval.Instance, args ...interface{}) (interface{}, error) {
if len(args) == 0 {
return nil, errors.New(`expecting at least one argument`)
}
command, ok := args[0].(string)
if !ok {
return nil, fmt.Errorf(`expecting string value for command argument`)
}
var shellAndArgs []string
if len(args) > 1 {
for _, arg := range args[1:] {
s, ok := arg.(string)
if !ok {
return nil, fmt.Errorf(`expecting only string value for shell command and arguments`)
}
shellAndArgs = append(shellAndArgs, s)
}
}
return valueFromShellCommand(command, shellAndArgs...)
}
func valueFromShellCommand(command string, shellAndArgs ...string) (interface{}, error) {
log.Debugf("Resolving value from shell command: %s, args [%s]", command, strings.Join(shellAndArgs, ","))
shellCmd := &compliance.ShellCmd{
Run: command,
}
if len(shellAndArgs) > 0 {
shellCmd.Shell = &compliance.BinaryCmd{
Name: shellAndArgs[0],
Args: shellAndArgs[1:],
}
}
execCommand := shellCmdToBinaryCmd(shellCmd)
exitCode, stdout, err := runBinaryCmd(execCommand, defaultTimeout)
if exitCode != 0 || err != nil {
return nil, fmt.Errorf("command '%v' execution failed, error: %v", command, err)
}
return stdout, nil
}
func evalCommandExec(_ *eval.Instance, args ...interface{}) (interface{}, error) {
if len(args) == 0 {
return nil, errors.New(`expecting at least one argument`)
}
var cmdArgs []string
for _, arg := range args {
s, ok := arg.(string)
if !ok {
return nil, fmt.Errorf(`expecting only string values for arguments`)
}
cmdArgs = append(cmdArgs, s)
}
return valueFromBinaryCommand(cmdArgs[0], cmdArgs[1:]...)
}
func valueFromBinaryCommand(name string, args ...string) (interface{}, error) {
log.Debugf("Resolving value from command: %s, args [%s]", name, strings.Join(args, ","))
execCommand := &compliance.BinaryCmd{
Name: name,
Args: args,
}
exitCode, stdout, err := runBinaryCmd(execCommand, defaultTimeout)
if exitCode != 0 || err != nil {
return nil, fmt.Errorf("command '%v' execution failed, error: %v", execCommand, err)
}
return stdout, nil
}
func evalProcessFlag(_ *eval.Instance, args ...interface{}) (interface{}, error) {
if len(args) != 2 {
return nil, errors.New(`expecting two arguments`)
}
name, ok := args[0].(string)
if !ok {
return nil, fmt.Errorf(`expecting string value for process name argument`)
}
flag, ok := args[1].(string)
if !ok {
return nil, fmt.Errorf(`expecting string value for process flag argument`)
}
return valueFromProcessFlag(name, flag)
}
func valueFromProcessFlag(name string, flag string) (interface{}, error) {
log.Debugf("Resolving value from process: %s, flag %s", name, flag)
processes, err := getProcesses(cacheValidity)
if err != nil {
return "", fmt.Errorf("unable to fetch processes: %w", err)
}
matchedProcesses := processes.findProcessesByName(name)
for _, mp := range matchedProcesses | {
flagValues := parseProcessCmdLine(mp.Cmdline)
return flagValues[flag], nil
} | conditional_block |
|
base_function.py | 路径名
print("file name", file_name)
# 文件名
title = file_name.split("/")[-1]
print(type(title), title)
isolate1 = Isolate('isolate', cases, rate = abnormal_rate)
np_array = isolate1.merge_arrays()
table_name = np_array[1, 0]
db = connectdb()
if not query_table(db, table_name):
create_table(db, np_array[0], table_name)
# 插入数据,表名为uuid
if insert_train_datas(db, table_name, np_array[1:]):
# 数据集列表存储表名(redis存储),断电就清空
redis_conn = get_redis_connection("default")
redis_conn.sadd('data_set_name', title)
# sv.data_set.append(title)
# 存储数据集表名(磁盘存储),断电可恢复
save_dataset_name_to_file(title)
# 存储文件与UUID对应关系到file2uuid表中
insert_file2uuid(title, table_name)
return True
return False
def save_dataset_name_to_file(file_name):
"""
将文件名存储到磁盘中,断电重启时能够保证继续运行
:param file_name: 文件名称
:return:
"""
print(os.getcwd())
file_path = "./models_file/data_set_name"
with open(file_path, 'a+') as file:
file.write(file_name + "\n")
def load_dataset_name_to_list():
"""
加载磁盘文件中数据集名到缓存中,data_set
:return:
"""
file_path = "./models_file/data_set_name"
with open(file_path, 'r') as file:
lines = file.read().splitlines()
for line in lines:
if line is None or line == "":
continue
redis_conn = get_redis_connection("default")
redis_conn.sadd('data_set_name', line)
def load_xgboost_name_to_list():
"""
加载磁盘文件中XGBoost模型名到缓存中,xgboost_name
:return:
"""
file_path = "./models_file/xgboost_name"
with open(file_path, 'r') as file:
lines = file.read().splitlines()
for line in lines:
if line is None or line == "":
continue
redis_conn = get_redis_connection("default")
redis_conn.sadd('xgboost_name', line)
def load_lstm_name_to_list():
"""
加载磁盘文件中LSTM模型名到缓存中,lstm_name
:return:
"""
file_path = "./models_file/lstm_name"
with open(file_path, 'r') as file:
lines = file.read().splitlines()
for line in lines:
if line is None or line == "":
continue
redis_conn = get_redis_connection("default")
redis_conn.sadd('lstm_name', line)
# def load_lstm_name_to_dict():
# """
# 加载磁盘文件中LSTM模型到内存中,lstm_name
# :return:
# """
# file_path = "./models_file/lstm_name"
# with open(file_path, 'r') as file:
# lines = file.read().splitlines()
# for line in lines:
# if line is None or line == "":
# continue
# elif line not in sv.lstm_model_dict.keys():
# sv.lstm_model_dict[line] = load_lstm_class(line)
# print("lstm---------------------", sv.lstm_model_dict)
#
#
# def load_xgboost_name_to_dict():
# """
# 加载磁盘文件中LSTM模型到内存中,lstm_name
# :return:
# """
# file_path = "./models_file/xgboost_name"
# with open(file_path, 'r') as file:
# lines = file.read().splitlines()
# for line in lines:
# if line is None or line == "":
# continue
# elif line not in sv.xgboost_model_dict.keys():
# sv.xgboost_model_dict[line] = load_xgboost_class(line)
# print("xgboost-------------------", sv.xgboost_model_dict)
def load_datas_from_disk_to_memory():
load_dataset_name_to_list()
load_xgboost_name_to_list()
load_lstm_name_to_list()
def str_to_time_hour_minute(time):
week = datetime.strptime(re.split(r" ", time)[0], "%Y/%m/%d").weekday()
year, month, day, hour, minute, secend = re.split(r"[/ :]", time)
return [hour, minute, week]
def use_XGBoost_predict(json_data):
"""
使用已训练的XGBoost模型检测异常
:param json_data:
:return:
"""
model_name = json_data["host_id"]
times = datetime.strptime(json_data["time"], '%Y/%m/%d %H:%M:%S')
print(times.hour)
print(type(times))
predict_list = [model_name, datetime.strptime(json_data["time"], '%Y/%m/%d %H:%M:%S'), json_data["kpi"]]
print(predict_list)
predict_array = np.array(predict_list)
# 转换成XGBoost能使用的数据格式
tmp = translate_to_xgboost_datas_from_mysql(predict_array.reshape(1, 3))
# 由于频次较低,每次从磁盘文件中读取模型然后判断
XGBoost_model = load_xgboost_class(model_name)
print("load xgboost0000000000000000000000")
print("model name", XGBoost_model.name)
return XGBoost_model.predict(tmp)
def translate_to_xgboost_datas_from_realtime():
pass
def translate_to_xgboost_datas_from_mysql(np_array):
"""
将数据转换成xgboost能够识别的数据,仅仅在时间格式上转换,其他列不变
:param np_array:输入的数组
:return:时间转换后的数组,仅仅在时间上做出改变,其他列不变
"""
# 删除id列
np_array = np.delete(np_array, 0, axis = 1)
# 获取时间列
time_array = np_array[:, 0]
# 删除时间列
np_array = np.delete(np_array, 0, axis = 1)
hour = []
minute = []
week = []
for time in time_array:
hour.append(time.hour)
minute.append(time.minute)
week.append(time.weekday())
np_array = np.insert(np_array, 0, values = minute, axis = 1)
np_array = np.insert(np_array, 0, values = hour, axis = 1)
np_array = np.insert(np_array, 0, values = week, axis = 1)
# 此时返回的属性分别是 week, hour, minute, kpi_1... kpi_n,label
return np_array
def load_data_for_xgboost_from_mysql(table_name, number_data=20000):
"""
从数据库为xgboost模型读取数据,并进行时间格式转换
:param number_data: 取最后多少个数据来训练或者预测
:param table_name: 要读取的表名
:return:
"""
db = connectdb()
np_array = np.array(query_datas(db, table_name = table_name, number = number_data))
# # 删除id列
# np_array = np.delete(np_array, 0, axis = 1)
# # 获取时间列
# time_array = np_array[:, 0]
# # 删除时间列
# np_array = np.delete(np_array, 0, axis = 1)
# hour = []
# minute = []
# week = []
# for time in time_array:
# hour.append(time.hour)
# minute.append(time.minute)
# week.append(time.weekday())
# np_array = np.insert(np_array, 0, values = minute, axis = 1)
# np_array = np.insert(np_array, 0, values = hour, axis = 1)
# np_array = np.insert(np_array, 0, values = week, axis = 1)
np_array = translate_to_xgboost_datas_from_mysql(np_array)
closedb(db)
# 此时返回的属性分别是 week, hour, minute, kpi_1... kpi_n,label
return np_array
def load_data_for_lstm_from_mysql(table_name, number_data=20000):
"""
从数据库为lstm模型读取一天的数据
:param number_data: 取最后多少个数据来训练或者预测
:param table_name: 表名
:param end_time: 最后截止时间,即什么时刻开始预测
:return:
"""
db = connectdb()
np_array = np.array(query_datas(db, table_name = table_name, number = number_data))
closedb(db)
return np_array[:, -2] |
def save_xgboost_class(model):
"""
xgboost 模型持久化,存储在models目录下,使用model.name作为文件名,同时持久化模型名称 | random_line_split |
|
base_function.py | # with open(file_path, 'r') as file:
# lines = file.read().splitlines()
# for line in lines:
# if line is None or line == "":
# continue
# elif line not in sv.xgboost_model_dict.keys():
# sv.xgboost_model_dict[line] = load_xgboost_class(line)
# print("xgboost-------------------", sv.xgboost_model_dict)
def load_datas_from_disk_to_memory():
load_dataset_name_to_list()
load_xgboost_name_to_list()
load_lstm_name_to_list()
def str_to_time_hour_minute(time):
week = datetime.strptime(re.split(r" ", time)[0], "%Y/%m/%d").weekday()
year, month, day, hour, minute, secend = re.split(r"[/ :]", time)
return [hour, minute, week]
def use_XGBoost_predict(json_data):
"""
使用已训练的XGBoost模型检测异常
:param json_data:
:return:
"""
model_name = json_data["host_id"]
times = datetime.strptime(json_data["time"], '%Y/%m/%d %H:%M:%S')
print(times.hour)
print(type(times))
predict_list = [model_name, datetime.strptime(json_data["time"], '%Y/%m/%d %H:%M:%S'), json_data["kpi"]]
print(predict_list)
predict_array = np.array(predict_list)
# 转换成XGBoost能使用的数据格式
tmp = translate_to_xgboost_datas_from_mysql(predict_array.reshape(1, 3))
# 由于频次较低,每次从磁盘文件中读取模型然后判断
XGBoost_model = load_xgboost_class(model_name)
print("load xgboost0000000000000000000000")
print("model name", XGBoost_model.name)
return XGBoost_model.predict(tmp)
def translate_to_xgboost_datas_from_realtime():
pass
def translate_to_xgboost_datas_from_mysql(np_array):
"""
将数据转换成xgboost能够识别的数据,仅仅在时间格式上转换,其他列不变
:param np_array:输入的数组
:return:时间转换后的数组,仅仅在时间上做出改变,其他列不变
"""
# 删除id列
np_array = np.delete(np_array, 0, axis = 1)
# 获取时间列
time_array = np_array[:, 0]
# 删除时间列
np_array = np.delete(np_array, 0, axis = 1)
hour = []
minute = []
week = []
for time in time_array:
hour.append(time.hour)
minute.append(time.minute)
week.append(time.weekday())
np_array = np.insert(np_array, 0, values = minute, axis = 1)
np_array = np.insert(np_array, 0, values = hour, axis = 1)
np_array = np.insert(np_array, 0, values = week, axis = 1)
# 此时返回的属性分别是 week, hour, minute, kpi_1... kpi_n,label
return np_array
def load_data_for_xgboost_from_mysql(table_name, number_data=20000):
"""
从数据库为xgboost模型读取数据,并进行时间格式转换
:param number_data: 取最后多少个数据来训练或者预测
:param table_name: 要读取的表名
:return:
"""
db = connectdb()
np_array = np.array(query_datas(db, table_name = table_name, number = number_data))
# # 删除id列
# np_array = np.delete(np_array, 0, axis = 1)
# # 获取时间列
# time_array = np_array[:, 0]
# # 删除时间列
# np_array = np.delete(np_array, 0, axis = 1)
# hour = []
# minute = []
# week = []
# for time in time_array:
# hour.append(time.hour)
# minute.append(time.minute)
# week.append(time.weekday())
# np_array = np.insert(np_array, 0, values = minute, axis = 1)
# np_array = np.insert(np_array, 0, values = hour, axis = 1)
# np_array = np.insert(np_array, 0, values = week, axis = 1)
np_array = translate_to_xgboost_datas_from_mysql(np_array)
closedb(db)
# 此时返回的属性分别是 week, hour, minute, kpi_1... kpi_n,label
return np_array
def load_data_for_lstm_from_mysql(table_name, number_data=20000):
"""
从数据库为lstm模型读取一天的数据
:param number_data: 取最后多少个数据来训练或者预测
:param table_name: 表名
:param end_time: 最后截止时间,即什么时刻开始预测
:return:
"""
db = connectdb()
np_array = np.array(query_datas(db, table_name = table_name, number = number_data))
closedb(db)
return np_array[:, -2]
def save_xgboost_class(model):
"""
xgboost 模型持久化,存储在models目录下,使用model.name作为文件名,同时持久化模型名称
:param model:
:return:
"""
# 存储模型
print("sava_xgboost_path", os.getcwd())
file_name = "./models_file/xgboost/%s" % model.name
with open(file_name, 'wb') as file_obj:
pickle.dump(model, file_obj)
# 存储名称
file_model_name = "./models_file/xgboost_name"
with open(file_model_name, 'a+') as name_obj:
name_obj.write(model.file_name + "\n")
def load_xgboost_class(model_name):
"""
根据模型名称加载模型,返回model
:param model_name:模型名
:return: 返回模型
"""
file_name = "./models_file/xgboost/%s" % model_name
# return pickle.load(open(file_name, "rb"))
print(os.getcwd())
print(file_name)
with open(file_name, 'rb') as f:
xgboost_class = pickle.load(f)
return xgboost_class
def save_lstm_class(LSTM_model):
"""
lstm 模型持久化,存储在models目录下,使用model.name作为文件名,同时持久化模型名称
:param model:
:return:
"""
# 存储模型
file_name = "./models_file/lstm/%s" % LSTM_model.name
print("save lstm path", file_name)
with open(file_name, 'wb') as file_obj:
pickle.dump(LSTM_model, file_obj)
# 存储名称
file_model_name = "./models_file/lstm_name"
with open(file_model_name, 'a+') as name_obj:
name_obj.write(LSTM_model.file_name + "\n")
def load_lstm_class(model_name):
"""
根据模型名称加载模型,返回model
:param model_name:模型名
:return: 返回模型
"""
print(os.getcwd())
file_name = "./models_file/lstm/%s" % model_name
print(file_name)
with open(file_name, 'rb') as f:
lstm_class = pickle.load(f)
return lstm_class
def print_model(model_kind, data_name):
print("print_model", model_kind, data_name)
def train_model(model_kind, data_name, force = 0):
"""训练模型"""
redis_conn = get_redis_connection("default")
print("类型", type(data_name))
print(data_name)
uuid = query_uuid_from_file2uuid_by_filename(data_name)
print(type(uuid), uuid)
if model_kind == "XGBoost":
# 多进程训练模型
if redis_conn.sismember("xgboost_name", data_name) and force != 1:
return 0
else:
from xgboost_model.xgboost_class import XGBoost
xgboost_train = XGBoost(data_name, uuid)
# 存储到redis中
redis_conn.hset('xgboost_model', data_name, pickle.dumps(xgboost_train))
redis_conn.sadd('xgboost_name', data_name)
# 模型持久化
save_xgboost_class(xgboost_train)
| print("xgboost_name", data_name)
return 1
elif model_kind == 'LSTM':
# 多进程训练模型
if redis_conn.sismember("lstm_name", data_name) and force != 1:
print("存在000000000", data_name)
return 0
else:
print("训练过程0000000")
print("类型", type(data_name))
from lstm_model.lstm_class import LSTMModel
# data_name是文件名,uuid是文件唯一标识
lstm_train = LSTMModel(data_name, uuid)
print("lasted", lstm_train.lasted_up | identifier_body |
|
base_function.py | return:
"""
length = len(arrays)
if length < 200:
print("测试集大小:", length)
return "测试集数据小于200,请重新传入大于200条数据的测试集"
elif length < 256:
print("测试集大小:", length)
return arrays
indexs = np.linspace(0, length - 1, size)
indexs = np.array(indexs, dtype = int)
res_arr = arrays[indexs]
print("测试集大小:", len(indexs) - 1)
return res_arr
def format_time(time):
"""
将传入的时间格式化,转换成没有秒的时间格式 yyyy-MM-DD hh-mm
:param time:
:return:
"""
# year, month, day, hour, minute, scend = re.split(r"/| |:", time)
# print(year, month, day, hour, minute, scend)
return time[0:-3]
def draw_with_diff_color(np_array):
"""
根据标签展示散点图,不同的标签具有不同颜色
:param np_array:
:return:
"""
red_arr = []
green_arr = []
for arr in np_array:
if arr[-1] == '0':
red_arr.append(arr)
else:
green_arr.append(arr)
print(red_arr)
print(green_arr)
def save_datas_with_labels(file_name, abnormal_rate):
"""
存储已经由孤立森林学习过的带有标签的数据
:return:True or False
"""
cases = load_csv(file_name)
# file_name是文件路径名
print("file name", file_name)
# 文件名
title = file_name.split("/")[-1]
print(type(title), title)
isolate1 = Isolate('isolate', cases, rate = abnormal_rate)
np_array = isolate1.merge_arrays()
table_name = np_array[1, 0]
db = connectdb()
if not query_table(db, table_name):
create_table(db, np_array[0], table_name)
# 插入数据,表名为uuid
if insert_train_datas(db, table_name, np_array[1:]):
# 数据集列表存储表名(redis存储),断电就清空
redis_conn = get_redis_connection("default")
redis_conn.sadd('data_set_name', title)
# sv.data_set.append(title)
# 存储数据集表名(磁盘存储),断电可恢复
save_dataset_name_to_file(title)
# 存储文件与UUID对应关系到file2uuid表中
insert_file2uuid(title, table_name)
return True
return False
def save_dataset_name_to_file(file_name):
"""
将文件名存储到磁盘中,断电重启时能够保证继续运行
:param file_name: 文件名称
:return:
"""
print(os.getcwd())
file_path = "./models_file/data_set_name"
with open(file_path, 'a+') as file:
file.write(file_name + "\n")
def load_dataset_name_to_list():
"""
加载磁盘文件中数据集名到缓存中,data_set
:return:
"""
file_path = "./models_file/data_set_name"
with open(file_path, 'r') as file:
lines = file.read().splitlines()
for line in lines:
if line is None or line == "":
continue
redis_conn = get_redis_connection("default")
redis_conn.sadd('data_set_name', line)
def load_xgboost_name_to_list():
"""
加载磁盘文件中XGBoost模型名到缓存中,xgboost_name
:return:
"""
file_path = "./models_file/xgboost_name"
with open(file_path, 'r') as file:
lines = file.read().splitlines()
for line in lines:
if line is None or line == "":
continue
redis_conn = get_redis_connection("default")
redis_conn.sadd('xgboost_name', line)
def load_lstm_name_to_list():
"""
加载磁盘文件中LSTM模型名到缓存中,lstm_name
:return:
"""
file_path = "./models_file/lstm_name"
with open(file_path, 'r') as file:
lines = file.read().splitlines()
for line in lines:
if line is None or line == "":
continue
redis_conn = get_redis_connection("default")
redis_conn.sadd('lstm_name', line)
# def load_lstm_name_to_dict():
# """
# 加载磁盘文件中LSTM模型到内存中,lstm_name
# :return:
# """
# file_path = "./models_file/lstm_name"
# with open(file_path, 'r') as file:
# lines = file.read().splitlines()
# for line in lines:
# if line is None or line == "":
# continue
# elif line not in sv.lstm_model_dict.keys():
# sv.lstm_model_dict[line] = load_lstm_class(line)
# print("lstm---------------------", sv.lstm_model_dict)
#
#
# def load_xgboost_name_to_dict():
# """
# 加载磁盘文件中LSTM模型到内存中,lstm_name
# :return:
# """
# file_path = "./models_file/xgboost_name"
# with open(file_path, 'r') as file:
# lines = file.read().splitlines()
# for line in lines:
# if line is None or line == "":
# continue
# elif line not in sv.xgboost_model_dict.keys():
# sv.xgboost_model_dict[line] = load_xgboost_class(line)
# print("xgboost-------------------", sv.xgboost_model_dict)
def load_datas_from_disk_to_memory():
load_dataset_name_to_list()
load_xgboost_name_to_list()
load_lstm_name_to_list()
def str_to_time_hour_minute(time):
week = datetime.strptime(re.split(r" ", time)[0], "%Y/%m/%d").weekday()
year, month, day, hour, minute, secend = re.split(r"[/ :]", time)
return [hour, minute, week]
def use_XGBoost_predict(json_data):
"""
使用已训练的XGBoost模型检测异常
:param json_data:
:return:
"""
model_name = json_data["host_id"]
times = datetime.strptime(json_data["time"], '%Y/%m/%d %H:%M:%S')
print(times.hour)
print(type(times))
predict_list = [model_name, datetime.strptime(json_data["time"], '%Y/%m/%d %H:%M:%S'), json_data["kpi"]]
print(predict_list)
predict_array = np.array(predict_list)
# 转换成XGBoost能使用的数据格式
tmp = translate_to_xgboost_datas_from_mysql(predict_array.reshape(1, 3))
# 由于频次较低,每次从磁盘文件中读取模型然后判断
XGBoost_model = load_xgboost_class(model_name)
print("load xgboost0000000000000000000000")
print("model name", XGBoost_model.name)
return XGBoost_model.predict(tmp)
def translate_to_xgboost_datas_from_realtime():
pass
def translate_to_xgboost_datas_from_mysql(np_array):
"""
将数据转换成xgboost能够识别的数据,仅仅在时间格式上转换,其他列不变
:param np_array:输入的数组
:return:时间转换后的数组,仅仅在时间上做出改变,其他列不变
"""
# 删除id列
np_array = np.delete(np_array, 0, axis = 1)
# 获取时间列
time_array = np_array[:, 0]
# 删除时间列
np_array = np.delete(np_array, 0, axis = 1)
hour = []
minute = []
week = []
for time in time_array:
hour.append(time.hour)
minute.append(time.minute)
week.append(time.weekday())
np_array = np.insert(np_array, 0, values = minute, axis = 1)
np_array = np.insert(np_array, 0, values = hour, axis = 1)
np_array = np.insert(np_array, 0, values = week, axis = 1)
# 此时返回的属性分别是 week, hour, minute, kpi_1... kpi_n,label
return np_array
def load_data_for_xgboost_from_mysql(table_name, number_data=20000):
"""
从数据库为xgboost模型读取数据,并进行时间格式转换
:param number_data: 取最后多少个数据来训练或者预测
:param table_name: 要读取的表名
:return:
"""
db = connectdb()
np_array = np.array(query_datas(db, table_name = table_name, number = number_data))
# # 删除id列
# np_array = np.de | lete(np_array, 0, axis = 1)
# # 获 | identifier_name |
|
base_function.py | :int, 要求均匀分为的份额,一般为256,用户可以自己设置,第一行为标签
:return:
"""
length = len(arrays)
if length < 200:
print("测试集大小:", length)
return "测试集数据小于200,请重新传入大于200条数据的测试集"
elif length < 256:
print("测试集大小:", length)
return arrays
indexs = np.linspace(0, length - 1, size)
indexs = np.array(indexs, dtype = int)
res_arr = arrays[indexs]
print("测试集大小:", len(indexs) - 1)
return res_arr
def format_time(time):
"""
将传入的时间格式化,转换成没有秒的时间格式 yyyy-MM-DD hh-mm
:param time:
:return:
"""
# year, month, day, hour, minute, scend = re.split(r"/| |:", time)
# print(year, month, day, hour, minute, scend)
return time[0:-3]
def draw_with_diff_color(np_array):
"""
根据标签展示散点图,不同的标签具有不同颜色
:param np_array:
:return:
"""
red_arr = []
green_arr = []
for arr in np_array:
if arr[-1] == '0':
red_arr.append(arr)
else:
green_arr.append(arr)
print(red_arr)
print(green_arr)
def save_datas_with_labels(file_name, abnormal_rate):
"""
存储已经由孤立森林学习过的带有标签的数据
:return:True or False
"""
cases = load_csv(file_name)
# file_name是文件路径名
print("file name", file_name)
# 文件名
title = file_name.split("/")[-1]
print(type(title), title)
isolate1 = Isolate('isolate', cases, rate = abnormal_rate)
np_array = isolate1.merge_arrays()
table_name = np_array[1, 0]
db = connectdb()
if not query_table(db, table_name):
create_table(db, np_array[0], table_name)
# 插入数据,表名为uuid
if insert_train_datas(db, table_name, | # 数据集列表存储表名(redis存储),断电就清空
redis_conn = get_redis_connection("default")
redis_conn.sadd('data_set_name', title)
# sv.data_set.append(title)
# 存储数据集表名(磁盘存储),断电可恢复
save_dataset_name_to_file(title)
# 存储文件与UUID对应关系到file2uuid表中
insert_file2uuid(title, table_name)
return True
return False
def save_dataset_name_to_file(file_name):
"""
将文件名存储到磁盘中,断电重启时能够保证继续运行
:param file_name: 文件名称
:return:
"""
print(os.getcwd())
file_path = "./models_file/data_set_name"
with open(file_path, 'a+') as file:
file.write(file_name + "\n")
def load_dataset_name_to_list():
"""
加载磁盘文件中数据集名到缓存中,data_set
:return:
"""
file_path = "./models_file/data_set_name"
with open(file_path, 'r') as file:
lines = file.read().splitlines()
for line in lines:
if line is None or line == "":
continue
redis_conn = get_redis_connection("default")
redis_conn.sadd('data_set_name', line)
def load_xgboost_name_to_list():
"""
加载磁盘文件中XGBoost模型名到缓存中,xgboost_name
:return:
"""
file_path = "./models_file/xgboost_name"
with open(file_path, 'r') as file:
lines = file.read().splitlines()
for line in lines:
if line is None or line == "":
continue
redis_conn = get_redis_connection("default")
redis_conn.sadd('xgboost_name', line)
def load_lstm_name_to_list():
"""
加载磁盘文件中LSTM模型名到缓存中,lstm_name
:return:
"""
file_path = "./models_file/lstm_name"
with open(file_path, 'r') as file:
lines = file.read().splitlines()
for line in lines:
if line is None or line == "":
continue
redis_conn = get_redis_connection("default")
redis_conn.sadd('lstm_name', line)
# def load_lstm_name_to_dict():
# """
# 加载磁盘文件中LSTM模型到内存中,lstm_name
# :return:
# """
# file_path = "./models_file/lstm_name"
# with open(file_path, 'r') as file:
# lines = file.read().splitlines()
# for line in lines:
# if line is None or line == "":
# continue
# elif line not in sv.lstm_model_dict.keys():
# sv.lstm_model_dict[line] = load_lstm_class(line)
# print("lstm---------------------", sv.lstm_model_dict)
#
#
# def load_xgboost_name_to_dict():
# """
# 加载磁盘文件中LSTM模型到内存中,lstm_name
# :return:
# """
# file_path = "./models_file/xgboost_name"
# with open(file_path, 'r') as file:
# lines = file.read().splitlines()
# for line in lines:
# if line is None or line == "":
# continue
# elif line not in sv.xgboost_model_dict.keys():
# sv.xgboost_model_dict[line] = load_xgboost_class(line)
# print("xgboost-------------------", sv.xgboost_model_dict)
def load_datas_from_disk_to_memory():
load_dataset_name_to_list()
load_xgboost_name_to_list()
load_lstm_name_to_list()
def str_to_time_hour_minute(time):
week = datetime.strptime(re.split(r" ", time)[0], "%Y/%m/%d").weekday()
year, month, day, hour, minute, secend = re.split(r"[/ :]", time)
return [hour, minute, week]
def use_XGBoost_predict(json_data):
"""
使用已训练的XGBoost模型检测异常
:param json_data:
:return:
"""
model_name = json_data["host_id"]
times = datetime.strptime(json_data["time"], '%Y/%m/%d %H:%M:%S')
print(times.hour)
print(type(times))
predict_list = [model_name, datetime.strptime(json_data["time"], '%Y/%m/%d %H:%M:%S'), json_data["kpi"]]
print(predict_list)
predict_array = np.array(predict_list)
# 转换成XGBoost能使用的数据格式
tmp = translate_to_xgboost_datas_from_mysql(predict_array.reshape(1, 3))
# 由于频次较低,每次从磁盘文件中读取模型然后判断
XGBoost_model = load_xgboost_class(model_name)
print("load xgboost0000000000000000000000")
print("model name", XGBoost_model.name)
return XGBoost_model.predict(tmp)
def translate_to_xgboost_datas_from_realtime():
pass
def translate_to_xgboost_datas_from_mysql(np_array):
"""
将数据转换成xgboost能够识别的数据,仅仅在时间格式上转换,其他列不变
:param np_array:输入的数组
:return:时间转换后的数组,仅仅在时间上做出改变,其他列不变
"""
# 删除id列
np_array = np.delete(np_array, 0, axis = 1)
# 获取时间列
time_array = np_array[:, 0]
# 删除时间列
np_array = np.delete(np_array, 0, axis = 1)
hour = []
minute = []
week = []
for time in time_array:
hour.append(time.hour)
minute.append(time.minute)
week.append(time.weekday())
np_array = np.insert(np_array, 0, values = minute, axis = 1)
np_array = np.insert(np_array, 0, values = hour, axis = 1)
np_array = np.insert(np_array, 0, values = week, axis = 1)
# 此时返回的属性分别是 week, hour, minute, kpi_1... kpi_n,label
return np_array
def load_data_for_xgboost_from_mysql(table_name, number_data=20000):
"""
从数据库为xgboost模型读取数据,并进行时间格式转换
:param number_data: 取最后多少个数据来训练或者预测
:param table_name: 要读取的表名
:return:
"""
db = connectdb()
np_array = np.array(query_datas(db, table_name = table_name, number = number_data))
| np_array[1:]):
| conditional_block |
logger.go | io-go/v6/pkg/set"
"github.com/minio/minio/cmd/logger/message/log"
)
var (
// HighwayHash key for logging in anonymous mode
magicHighwayHash256Key = []byte("\x4b\xe7\x34\xfa\x8e\x23\x8a\xcd\x26\x3e\x83\xe6\xbb\x96\x85\x52\x04\x0f\x93\x5d\xa3\x9f\x44\x14\x97\xe0\x9d\x13\x22\xde\x36\xa0")
// HighwayHash hasher for logging in anonymous mode
loggerHighwayHasher hash.Hash
)
// Disable disables all logging, false by default. (used for "go test")
var Disable = false
// Level type
type Level int8
// Enumerated level types
const (
InformationLvl Level = iota + 1
ErrorLvl
FatalLvl
)
var trimStrings []string
var globalDeploymentID string
// TimeFormat - logging time format.
const TimeFormat string = "15:04:05 MST 01/02/2006"
// List of error strings to be ignored by LogIf
const (
diskNotFoundError = "disk not found"
)
var matchingFuncNames = [...]string{
"http.HandlerFunc.ServeHTTP",
"cmd.serverMain",
"cmd.StartGateway",
"cmd.(*webAPIHandlers).ListBuckets",
"cmd.(*webAPIHandlers).MakeBucket",
"cmd.(*webAPIHandlers).DeleteBucket",
"cmd.(*webAPIHandlers).ListObjects",
"cmd.(*webAPIHandlers).RemoveObject",
"cmd.(*webAPIHandlers).Login",
"cmd.(*webAPIHandlers).GenerateAuth",
"cmd.(*webAPIHandlers).SetAuth",
"cmd.(*webAPIHandlers).GetAuth",
"cmd.(*webAPIHandlers).CreateURLToken",
"cmd.(*webAPIHandlers).Upload",
"cmd.(*webAPIHandlers).Download",
"cmd.(*webAPIHandlers).DownloadZip",
"cmd.(*webAPIHandlers).GetBucketPolicy",
"cmd.(*webAPIHandlers).ListAllBucketPolicies",
"cmd.(*webAPIHandlers).SetBucketPolicy",
"cmd.(*webAPIHandlers).PresignedGet",
"cmd.(*webAPIHandlers).ServerInfo",
"cmd.(*webAPIHandlers).StorageInfo",
// add more here ..
}
func (level Level) String() string {
var lvlStr string
switch level {
case InformationLvl:
lvlStr = "INFO"
case ErrorLvl:
lvlStr = "ERROR"
case FatalLvl:
lvlStr = "FATAL"
}
return lvlStr
}
// quietFlag: Hide startup messages if enabled
// jsonFlag: Display in JSON format, if enabled
var (
quietFlag, jsonFlag, anonFlag bool
// Custom function to format error
errorFmtFunc func(string, error, bool) string
)
// EnableQuiet - turns quiet option on.
func EnableQuiet() {
quietFlag = true
}
// EnableJSON - outputs logs in json format.
func EnableJSON() {
jsonFlag = true
quietFlag = true
}
// EnableAnonymous - turns anonymous flag
// to avoid printing sensitive information.
func EnableAnonymous() {
anonFlag = true
}
// IsJSON - returns true if jsonFlag is true
func IsJSON() bool {
return jsonFlag
}
// IsQuiet - returns true if quietFlag is true
func IsQuiet() bool {
return quietFlag
}
// RegisterUIError registers the specified rendering function. This latter
// will be called for a pretty rendering of fatal errors.
func | (f func(string, error, bool) string) {
errorFmtFunc = f
}
// Remove any duplicates and return unique entries.
func uniqueEntries(paths []string) []string {
m := make(set.StringSet)
for _, p := range paths {
if !m.Contains(p) {
m.Add(p)
}
}
return m.ToSlice()
}
// SetDeploymentID -- Deployment Id from the main package is set here
func SetDeploymentID(deploymentID string) {
globalDeploymentID = deploymentID
}
// Init sets the trimStrings to possible GOPATHs
// and GOROOT directories. Also append github.com/minio/minio
// This is done to clean up the filename, when stack trace is
// displayed when an error happens.
func Init(goPath string, goRoot string) {
var goPathList []string
var goRootList []string
var defaultgoPathList []string
var defaultgoRootList []string
pathSeperator := ":"
// Add all possible GOPATH paths into trimStrings
// Split GOPATH depending on the OS type
if runtime.GOOS == "windows" {
pathSeperator = ";"
}
goPathList = strings.Split(goPath, pathSeperator)
goRootList = strings.Split(goRoot, pathSeperator)
defaultgoPathList = strings.Split(build.Default.GOPATH, pathSeperator)
defaultgoRootList = strings.Split(build.Default.GOROOT, pathSeperator)
// Add trim string "{GOROOT}/src/" into trimStrings
trimStrings = []string{filepath.Join(runtime.GOROOT(), "src") + string(filepath.Separator)}
// Add all possible path from GOPATH=path1:path2...:pathN
// as "{path#}/src/" into trimStrings
for _, goPathString := range goPathList {
trimStrings = append(trimStrings, filepath.Join(goPathString, "src")+string(filepath.Separator))
}
for _, goRootString := range goRootList {
trimStrings = append(trimStrings, filepath.Join(goRootString, "src")+string(filepath.Separator))
}
for _, defaultgoPathString := range defaultgoPathList {
trimStrings = append(trimStrings, filepath.Join(defaultgoPathString, "src")+string(filepath.Separator))
}
for _, defaultgoRootString := range defaultgoRootList {
trimStrings = append(trimStrings, filepath.Join(defaultgoRootString, "src")+string(filepath.Separator))
}
// Remove duplicate entries.
trimStrings = uniqueEntries(trimStrings)
// Add "github.com/minio/minio" as the last to cover
// paths like "{GOROOT}/src/github.com/minio/minio"
// and "{GOPATH}/src/github.com/minio/minio"
trimStrings = append(trimStrings, filepath.Join("github.com", "minio", "minio")+string(filepath.Separator))
loggerHighwayHasher, _ = highwayhash.New(magicHighwayHash256Key) // New will never return error since key is 256 bit
}
func trimTrace(f string) string {
for _, trimString := range trimStrings {
f = strings.TrimPrefix(filepath.ToSlash(f), filepath.ToSlash(trimString))
}
return filepath.FromSlash(f)
}
func getSource(level int) string {
pc, file, lineNumber, ok := runtime.Caller(level)
if ok {
// Clean up the common prefixes
file = trimTrace(file)
_, funcName := filepath.Split(runtime.FuncForPC(pc).Name())
return fmt.Sprintf("%v:%v:%v()", file, lineNumber, funcName)
}
return ""
}
// getTrace method - creates and returns stack trace
func getTrace(traceLevel int) []string {
var trace []string
pc, file, lineNumber, ok := runtime.Caller(traceLevel)
for ok && file != "" {
// Clean up the common prefixes
file = trimTrace(file)
// Get the function name
_, funcName := filepath.Split(runtime.FuncForPC(pc).Name())
// Skip duplicate traces that start with file name, "<autogenerated>"
// and also skip traces with function name that starts with "runtime."
if !strings.HasPrefix(file, "<autogenerated>") &&
!strings.HasPrefix(funcName, "runtime.") {
// Form and append a line of stack trace into a
// collection, 'trace', to build full stack trace
trace = append(trace, fmt.Sprintf("%v:%v:%v()", file, lineNumber, funcName))
// Ignore trace logs beyond the following conditions
for _, name := range matchingFuncNames {
if funcName == name {
return trace
}
}
}
traceLevel++
// Read stack trace information from PC
pc, file, lineNumber, ok = runtime.Caller(traceLevel)
}
return trace
}
// Return the highway hash of the passed string
func hashString(input string) string {
defer loggerHighwayHasher.Reset()
loggerHighwayHasher.Write([]byte(input))
checksum := loggerHighwayHasher.Sum(nil)
return hex.EncodeToString(checksum)
}
// LogAlwaysIf prints a detailed error message during
// the execution of the server.
func LogAlwaysIf(ctx context.Context, err error) {
if err == nil {
return
}
logIf(ctx, err)
}
// LogIf prints a detailed error message during
// the execution of the server, if it is not an
// ignored error.
func LogIf(ctx context.Context, err error) {
if err == nil {
return
}
if err.Error() != diskNotFoundError {
logIf(ctx, err)
}
}
// logIf prints a detailed error message during
// the execution of the server.
func logIf(ctx context.Context, err error) {
if Disable {
return
| RegisterUIError | identifier_name |
logger.go | -go/v6/pkg/set"
"github.com/minio/minio/cmd/logger/message/log"
)
var (
// HighwayHash key for logging in anonymous mode
magicHighwayHash256Key = []byte("\x4b\xe7\x34\xfa\x8e\x23\x8a\xcd\x26\x3e\x83\xe6\xbb\x96\x85\x52\x04\x0f\x93\x5d\xa3\x9f\x44\x14\x97\xe0\x9d\x13\x22\xde\x36\xa0")
// HighwayHash hasher for logging in anonymous mode
loggerHighwayHasher hash.Hash
)
// Disable disables all logging, false by default. (used for "go test")
var Disable = false
// Level type
type Level int8
// Enumerated level types
const (
InformationLvl Level = iota + 1
ErrorLvl
FatalLvl
)
var trimStrings []string
var globalDeploymentID string
// TimeFormat - logging time format.
const TimeFormat string = "15:04:05 MST 01/02/2006"
// List of error strings to be ignored by LogIf
const (
diskNotFoundError = "disk not found"
)
var matchingFuncNames = [...]string{
"http.HandlerFunc.ServeHTTP",
"cmd.serverMain",
"cmd.StartGateway",
"cmd.(*webAPIHandlers).ListBuckets",
"cmd.(*webAPIHandlers).MakeBucket",
"cmd.(*webAPIHandlers).DeleteBucket",
"cmd.(*webAPIHandlers).ListObjects",
"cmd.(*webAPIHandlers).RemoveObject",
"cmd.(*webAPIHandlers).Login",
"cmd.(*webAPIHandlers).GenerateAuth",
"cmd.(*webAPIHandlers).SetAuth",
"cmd.(*webAPIHandlers).GetAuth",
"cmd.(*webAPIHandlers).CreateURLToken",
"cmd.(*webAPIHandlers).Upload",
"cmd.(*webAPIHandlers).Download",
"cmd.(*webAPIHandlers).DownloadZip",
"cmd.(*webAPIHandlers).GetBucketPolicy",
"cmd.(*webAPIHandlers).ListAllBucketPolicies",
"cmd.(*webAPIHandlers).SetBucketPolicy",
"cmd.(*webAPIHandlers).PresignedGet",
"cmd.(*webAPIHandlers).ServerInfo",
"cmd.(*webAPIHandlers).StorageInfo",
// add more here ..
}
func (level Level) String() string {
var lvlStr string
switch level {
case InformationLvl:
lvlStr = "INFO"
case ErrorLvl:
lvlStr = "ERROR"
case FatalLvl:
lvlStr = "FATAL"
}
return lvlStr
}
// quietFlag: Hide startup messages if enabled
// jsonFlag: Display in JSON format, if enabled
var (
quietFlag, jsonFlag, anonFlag bool
// Custom function to format error
errorFmtFunc func(string, error, bool) string
)
// EnableQuiet - turns quiet option on.
func EnableQuiet() {
quietFlag = true
}
// EnableJSON - outputs logs in json format.
func EnableJSON() |
// EnableAnonymous - turns anonymous flag
// to avoid printing sensitive information.
func EnableAnonymous() {
anonFlag = true
}
// IsJSON - returns true if jsonFlag is true
func IsJSON() bool {
return jsonFlag
}
// IsQuiet - returns true if quietFlag is true
func IsQuiet() bool {
return quietFlag
}
// RegisterUIError registers the specified rendering function. This latter
// will be called for a pretty rendering of fatal errors.
func RegisterUIError(f func(string, error, bool) string) {
errorFmtFunc = f
}
// Remove any duplicates and return unique entries.
func uniqueEntries(paths []string) []string {
m := make(set.StringSet)
for _, p := range paths {
if !m.Contains(p) {
m.Add(p)
}
}
return m.ToSlice()
}
// SetDeploymentID -- Deployment Id from the main package is set here
func SetDeploymentID(deploymentID string) {
globalDeploymentID = deploymentID
}
// Init sets the trimStrings to possible GOPATHs
// and GOROOT directories. Also append github.com/minio/minio
// This is done to clean up the filename, when stack trace is
// displayed when an error happens.
func Init(goPath string, goRoot string) {
var goPathList []string
var goRootList []string
var defaultgoPathList []string
var defaultgoRootList []string
pathSeperator := ":"
// Add all possible GOPATH paths into trimStrings
// Split GOPATH depending on the OS type
if runtime.GOOS == "windows" {
pathSeperator = ";"
}
goPathList = strings.Split(goPath, pathSeperator)
goRootList = strings.Split(goRoot, pathSeperator)
defaultgoPathList = strings.Split(build.Default.GOPATH, pathSeperator)
defaultgoRootList = strings.Split(build.Default.GOROOT, pathSeperator)
// Add trim string "{GOROOT}/src/" into trimStrings
trimStrings = []string{filepath.Join(runtime.GOROOT(), "src") + string(filepath.Separator)}
// Add all possible path from GOPATH=path1:path2...:pathN
// as "{path#}/src/" into trimStrings
for _, goPathString := range goPathList {
trimStrings = append(trimStrings, filepath.Join(goPathString, "src")+string(filepath.Separator))
}
for _, goRootString := range goRootList {
trimStrings = append(trimStrings, filepath.Join(goRootString, "src")+string(filepath.Separator))
}
for _, defaultgoPathString := range defaultgoPathList {
trimStrings = append(trimStrings, filepath.Join(defaultgoPathString, "src")+string(filepath.Separator))
}
for _, defaultgoRootString := range defaultgoRootList {
trimStrings = append(trimStrings, filepath.Join(defaultgoRootString, "src")+string(filepath.Separator))
}
// Remove duplicate entries.
trimStrings = uniqueEntries(trimStrings)
// Add "github.com/minio/minio" as the last to cover
// paths like "{GOROOT}/src/github.com/minio/minio"
// and "{GOPATH}/src/github.com/minio/minio"
trimStrings = append(trimStrings, filepath.Join("github.com", "minio", "minio")+string(filepath.Separator))
loggerHighwayHasher, _ = highwayhash.New(magicHighwayHash256Key) // New will never return error since key is 256 bit
}
func trimTrace(f string) string {
for _, trimString := range trimStrings {
f = strings.TrimPrefix(filepath.ToSlash(f), filepath.ToSlash(trimString))
}
return filepath.FromSlash(f)
}
func getSource(level int) string {
pc, file, lineNumber, ok := runtime.Caller(level)
if ok {
// Clean up the common prefixes
file = trimTrace(file)
_, funcName := filepath.Split(runtime.FuncForPC(pc).Name())
return fmt.Sprintf("%v:%v:%v()", file, lineNumber, funcName)
}
return ""
}
// getTrace method - creates and returns stack trace
func getTrace(traceLevel int) []string {
var trace []string
pc, file, lineNumber, ok := runtime.Caller(traceLevel)
for ok && file != "" {
// Clean up the common prefixes
file = trimTrace(file)
// Get the function name
_, funcName := filepath.Split(runtime.FuncForPC(pc).Name())
// Skip duplicate traces that start with file name, "<autogenerated>"
// and also skip traces with function name that starts with "runtime."
if !strings.HasPrefix(file, "<autogenerated>") &&
!strings.HasPrefix(funcName, "runtime.") {
// Form and append a line of stack trace into a
// collection, 'trace', to build full stack trace
trace = append(trace, fmt.Sprintf("%v:%v:%v()", file, lineNumber, funcName))
// Ignore trace logs beyond the following conditions
for _, name := range matchingFuncNames {
if funcName == name {
return trace
}
}
}
traceLevel++
// Read stack trace information from PC
pc, file, lineNumber, ok = runtime.Caller(traceLevel)
}
return trace
}
// Return the highway hash of the passed string
func hashString(input string) string {
defer loggerHighwayHasher.Reset()
loggerHighwayHasher.Write([]byte(input))
checksum := loggerHighwayHasher.Sum(nil)
return hex.EncodeToString(checksum)
}
// LogAlwaysIf prints a detailed error message during
// the execution of the server.
func LogAlwaysIf(ctx context.Context, err error) {
if err == nil {
return
}
logIf(ctx, err)
}
// LogIf prints a detailed error message during
// the execution of the server, if it is not an
// ignored error.
func LogIf(ctx context.Context, err error) {
if err == nil {
return
}
if err.Error() != diskNotFoundError {
logIf(ctx, err)
}
}
// logIf prints a detailed error message during
// the execution of the server.
func logIf(ctx context.Context, err error) {
if Disable {
return
| {
jsonFlag = true
quietFlag = true
} | identifier_body |
logger.go | io-go/v6/pkg/set"
"github.com/minio/minio/cmd/logger/message/log"
)
var (
// HighwayHash key for logging in anonymous mode
magicHighwayHash256Key = []byte("\x4b\xe7\x34\xfa\x8e\x23\x8a\xcd\x26\x3e\x83\xe6\xbb\x96\x85\x52\x04\x0f\x93\x5d\xa3\x9f\x44\x14\x97\xe0\x9d\x13\x22\xde\x36\xa0")
// HighwayHash hasher for logging in anonymous mode
loggerHighwayHasher hash.Hash
)
// Disable disables all logging, false by default. (used for "go test")
var Disable = false
// Level type
type Level int8
// Enumerated level types
const (
InformationLvl Level = iota + 1
ErrorLvl
FatalLvl
)
var trimStrings []string
var globalDeploymentID string
// TimeFormat - logging time format.
const TimeFormat string = "15:04:05 MST 01/02/2006"
// List of error strings to be ignored by LogIf
const (
diskNotFoundError = "disk not found"
)
var matchingFuncNames = [...]string{
"http.HandlerFunc.ServeHTTP",
"cmd.serverMain",
"cmd.StartGateway",
"cmd.(*webAPIHandlers).ListBuckets",
"cmd.(*webAPIHandlers).MakeBucket",
"cmd.(*webAPIHandlers).DeleteBucket",
"cmd.(*webAPIHandlers).ListObjects",
"cmd.(*webAPIHandlers).RemoveObject",
"cmd.(*webAPIHandlers).Login",
"cmd.(*webAPIHandlers).GenerateAuth",
"cmd.(*webAPIHandlers).SetAuth",
"cmd.(*webAPIHandlers).GetAuth",
"cmd.(*webAPIHandlers).CreateURLToken",
"cmd.(*webAPIHandlers).Upload",
"cmd.(*webAPIHandlers).Download",
"cmd.(*webAPIHandlers).DownloadZip",
"cmd.(*webAPIHandlers).GetBucketPolicy",
"cmd.(*webAPIHandlers).ListAllBucketPolicies",
"cmd.(*webAPIHandlers).SetBucketPolicy",
"cmd.(*webAPIHandlers).PresignedGet",
"cmd.(*webAPIHandlers).ServerInfo",
"cmd.(*webAPIHandlers).StorageInfo",
// add more here ..
}
func (level Level) String() string {
var lvlStr string
switch level {
case InformationLvl:
lvlStr = "INFO"
case ErrorLvl:
lvlStr = "ERROR"
case FatalLvl:
lvlStr = "FATAL"
}
return lvlStr
}
// quietFlag: Hide startup messages if enabled
// jsonFlag: Display in JSON format, if enabled
var (
quietFlag, jsonFlag, anonFlag bool
// Custom function to format error
errorFmtFunc func(string, error, bool) string
)
// EnableQuiet - turns quiet option on.
func EnableQuiet() {
quietFlag = true
}
// EnableJSON - outputs logs in json format.
func EnableJSON() {
jsonFlag = true
quietFlag = true
}
// EnableAnonymous - turns anonymous flag
// to avoid printing sensitive information.
func EnableAnonymous() {
anonFlag = true
}
// IsJSON - returns true if jsonFlag is true
func IsJSON() bool {
return jsonFlag
}
// IsQuiet - returns true if quietFlag is true
func IsQuiet() bool {
return quietFlag
}
// RegisterUIError registers the specified rendering function. This latter
// will be called for a pretty rendering of fatal errors.
func RegisterUIError(f func(string, error, bool) string) {
errorFmtFunc = f
}
// Remove any duplicates and return unique entries.
func uniqueEntries(paths []string) []string {
m := make(set.StringSet)
for _, p := range paths {
if !m.Contains(p) {
m.Add(p)
}
}
return m.ToSlice()
}
// SetDeploymentID -- Deployment Id from the main package is set here
func SetDeploymentID(deploymentID string) {
globalDeploymentID = deploymentID
}
// Init sets the trimStrings to possible GOPATHs
// and GOROOT directories. Also append github.com/minio/minio
// This is done to clean up the filename, when stack trace is
// displayed when an error happens.
func Init(goPath string, goRoot string) {
var goPathList []string
var goRootList []string
var defaultgoPathList []string
var defaultgoRootList []string
pathSeperator := ":"
// Add all possible GOPATH paths into trimStrings
// Split GOPATH depending on the OS type
if runtime.GOOS == "windows" {
pathSeperator = ";"
}
goPathList = strings.Split(goPath, pathSeperator)
goRootList = strings.Split(goRoot, pathSeperator)
defaultgoPathList = strings.Split(build.Default.GOPATH, pathSeperator) |
// Add trim string "{GOROOT}/src/" into trimStrings
trimStrings = []string{filepath.Join(runtime.GOROOT(), "src") + string(filepath.Separator)}
// Add all possible path from GOPATH=path1:path2...:pathN
// as "{path#}/src/" into trimStrings
for _, goPathString := range goPathList {
trimStrings = append(trimStrings, filepath.Join(goPathString, "src")+string(filepath.Separator))
}
for _, goRootString := range goRootList {
trimStrings = append(trimStrings, filepath.Join(goRootString, "src")+string(filepath.Separator))
}
for _, defaultgoPathString := range defaultgoPathList {
trimStrings = append(trimStrings, filepath.Join(defaultgoPathString, "src")+string(filepath.Separator))
}
for _, defaultgoRootString := range defaultgoRootList {
trimStrings = append(trimStrings, filepath.Join(defaultgoRootString, "src")+string(filepath.Separator))
}
// Remove duplicate entries.
trimStrings = uniqueEntries(trimStrings)
// Add "github.com/minio/minio" as the last to cover
// paths like "{GOROOT}/src/github.com/minio/minio"
// and "{GOPATH}/src/github.com/minio/minio"
trimStrings = append(trimStrings, filepath.Join("github.com", "minio", "minio")+string(filepath.Separator))
loggerHighwayHasher, _ = highwayhash.New(magicHighwayHash256Key) // New will never return error since key is 256 bit
}
func trimTrace(f string) string {
for _, trimString := range trimStrings {
f = strings.TrimPrefix(filepath.ToSlash(f), filepath.ToSlash(trimString))
}
return filepath.FromSlash(f)
}
func getSource(level int) string {
pc, file, lineNumber, ok := runtime.Caller(level)
if ok {
// Clean up the common prefixes
file = trimTrace(file)
_, funcName := filepath.Split(runtime.FuncForPC(pc).Name())
return fmt.Sprintf("%v:%v:%v()", file, lineNumber, funcName)
}
return ""
}
// getTrace method - creates and returns stack trace
func getTrace(traceLevel int) []string {
var trace []string
pc, file, lineNumber, ok := runtime.Caller(traceLevel)
for ok && file != "" {
// Clean up the common prefixes
file = trimTrace(file)
// Get the function name
_, funcName := filepath.Split(runtime.FuncForPC(pc).Name())
// Skip duplicate traces that start with file name, "<autogenerated>"
// and also skip traces with function name that starts with "runtime."
if !strings.HasPrefix(file, "<autogenerated>") &&
!strings.HasPrefix(funcName, "runtime.") {
// Form and append a line of stack trace into a
// collection, 'trace', to build full stack trace
trace = append(trace, fmt.Sprintf("%v:%v:%v()", file, lineNumber, funcName))
// Ignore trace logs beyond the following conditions
for _, name := range matchingFuncNames {
if funcName == name {
return trace
}
}
}
traceLevel++
// Read stack trace information from PC
pc, file, lineNumber, ok = runtime.Caller(traceLevel)
}
return trace
}
// Return the highway hash of the passed string
func hashString(input string) string {
defer loggerHighwayHasher.Reset()
loggerHighwayHasher.Write([]byte(input))
checksum := loggerHighwayHasher.Sum(nil)
return hex.EncodeToString(checksum)
}
// LogAlwaysIf prints a detailed error message during
// the execution of the server.
func LogAlwaysIf(ctx context.Context, err error) {
if err == nil {
return
}
logIf(ctx, err)
}
// LogIf prints a detailed error message during
// the execution of the server, if it is not an
// ignored error.
func LogIf(ctx context.Context, err error) {
if err == nil {
return
}
if err.Error() != diskNotFoundError {
logIf(ctx, err)
}
}
// logIf prints a detailed error message during
// the execution of the server.
func logIf(ctx context.Context, err error) {
if Disable {
return
| defaultgoRootList = strings.Split(build.Default.GOROOT, pathSeperator) | random_line_split |
logger.go | -go/v6/pkg/set"
"github.com/minio/minio/cmd/logger/message/log"
)
var (
// HighwayHash key for logging in anonymous mode
magicHighwayHash256Key = []byte("\x4b\xe7\x34\xfa\x8e\x23\x8a\xcd\x26\x3e\x83\xe6\xbb\x96\x85\x52\x04\x0f\x93\x5d\xa3\x9f\x44\x14\x97\xe0\x9d\x13\x22\xde\x36\xa0")
// HighwayHash hasher for logging in anonymous mode
loggerHighwayHasher hash.Hash
)
// Disable disables all logging, false by default. (used for "go test")
var Disable = false
// Level type
type Level int8
// Enumerated level types
const (
InformationLvl Level = iota + 1
ErrorLvl
FatalLvl
)
var trimStrings []string
var globalDeploymentID string
// TimeFormat - logging time format.
const TimeFormat string = "15:04:05 MST 01/02/2006"
// List of error strings to be ignored by LogIf
const (
diskNotFoundError = "disk not found"
)
var matchingFuncNames = [...]string{
"http.HandlerFunc.ServeHTTP",
"cmd.serverMain",
"cmd.StartGateway",
"cmd.(*webAPIHandlers).ListBuckets",
"cmd.(*webAPIHandlers).MakeBucket",
"cmd.(*webAPIHandlers).DeleteBucket",
"cmd.(*webAPIHandlers).ListObjects",
"cmd.(*webAPIHandlers).RemoveObject",
"cmd.(*webAPIHandlers).Login",
"cmd.(*webAPIHandlers).GenerateAuth",
"cmd.(*webAPIHandlers).SetAuth",
"cmd.(*webAPIHandlers).GetAuth",
"cmd.(*webAPIHandlers).CreateURLToken",
"cmd.(*webAPIHandlers).Upload",
"cmd.(*webAPIHandlers).Download",
"cmd.(*webAPIHandlers).DownloadZip",
"cmd.(*webAPIHandlers).GetBucketPolicy",
"cmd.(*webAPIHandlers).ListAllBucketPolicies",
"cmd.(*webAPIHandlers).SetBucketPolicy",
"cmd.(*webAPIHandlers).PresignedGet",
"cmd.(*webAPIHandlers).ServerInfo",
"cmd.(*webAPIHandlers).StorageInfo",
// add more here ..
}
func (level Level) String() string {
var lvlStr string
switch level {
case InformationLvl:
lvlStr = "INFO"
case ErrorLvl:
lvlStr = "ERROR"
case FatalLvl:
lvlStr = "FATAL"
}
return lvlStr
}
// quietFlag: Hide startup messages if enabled
// jsonFlag: Display in JSON format, if enabled
var (
quietFlag, jsonFlag, anonFlag bool
// Custom function to format error
errorFmtFunc func(string, error, bool) string
)
// EnableQuiet - turns quiet option on.
func EnableQuiet() {
quietFlag = true
}
// EnableJSON - outputs logs in json format.
func EnableJSON() {
jsonFlag = true
quietFlag = true
}
// EnableAnonymous - turns anonymous flag
// to avoid printing sensitive information.
func EnableAnonymous() {
anonFlag = true
}
// IsJSON - returns true if jsonFlag is true
func IsJSON() bool {
return jsonFlag
}
// IsQuiet - returns true if quietFlag is true
func IsQuiet() bool {
return quietFlag
}
// RegisterUIError registers the specified rendering function. This latter
// will be called for a pretty rendering of fatal errors.
func RegisterUIError(f func(string, error, bool) string) {
errorFmtFunc = f
}
// Remove any duplicates and return unique entries.
func uniqueEntries(paths []string) []string {
m := make(set.StringSet)
for _, p := range paths {
if !m.Contains(p) {
m.Add(p)
}
}
return m.ToSlice()
}
// SetDeploymentID -- Deployment Id from the main package is set here
func SetDeploymentID(deploymentID string) {
globalDeploymentID = deploymentID
}
// Init sets the trimStrings to possible GOPATHs
// and GOROOT directories. Also append github.com/minio/minio
// This is done to clean up the filename, when stack trace is
// displayed when an error happens.
func Init(goPath string, goRoot string) {
var goPathList []string
var goRootList []string
var defaultgoPathList []string
var defaultgoRootList []string
pathSeperator := ":"
// Add all possible GOPATH paths into trimStrings
// Split GOPATH depending on the OS type
if runtime.GOOS == "windows" {
pathSeperator = ";"
}
goPathList = strings.Split(goPath, pathSeperator)
goRootList = strings.Split(goRoot, pathSeperator)
defaultgoPathList = strings.Split(build.Default.GOPATH, pathSeperator)
defaultgoRootList = strings.Split(build.Default.GOROOT, pathSeperator)
// Add trim string "{GOROOT}/src/" into trimStrings
trimStrings = []string{filepath.Join(runtime.GOROOT(), "src") + string(filepath.Separator)}
// Add all possible path from GOPATH=path1:path2...:pathN
// as "{path#}/src/" into trimStrings
for _, goPathString := range goPathList {
trimStrings = append(trimStrings, filepath.Join(goPathString, "src")+string(filepath.Separator))
}
for _, goRootString := range goRootList {
trimStrings = append(trimStrings, filepath.Join(goRootString, "src")+string(filepath.Separator))
}
for _, defaultgoPathString := range defaultgoPathList {
trimStrings = append(trimStrings, filepath.Join(defaultgoPathString, "src")+string(filepath.Separator))
}
for _, defaultgoRootString := range defaultgoRootList {
trimStrings = append(trimStrings, filepath.Join(defaultgoRootString, "src")+string(filepath.Separator))
}
// Remove duplicate entries.
trimStrings = uniqueEntries(trimStrings)
// Add "github.com/minio/minio" as the last to cover
// paths like "{GOROOT}/src/github.com/minio/minio"
// and "{GOPATH}/src/github.com/minio/minio"
trimStrings = append(trimStrings, filepath.Join("github.com", "minio", "minio")+string(filepath.Separator))
loggerHighwayHasher, _ = highwayhash.New(magicHighwayHash256Key) // New will never return error since key is 256 bit
}
func trimTrace(f string) string {
for _, trimString := range trimStrings {
f = strings.TrimPrefix(filepath.ToSlash(f), filepath.ToSlash(trimString))
}
return filepath.FromSlash(f)
}
func getSource(level int) string {
pc, file, lineNumber, ok := runtime.Caller(level)
if ok {
// Clean up the common prefixes
file = trimTrace(file)
_, funcName := filepath.Split(runtime.FuncForPC(pc).Name())
return fmt.Sprintf("%v:%v:%v()", file, lineNumber, funcName)
}
return ""
}
// getTrace method - creates and returns stack trace
func getTrace(traceLevel int) []string {
var trace []string
pc, file, lineNumber, ok := runtime.Caller(traceLevel)
for ok && file != "" {
// Clean up the common prefixes
file = trimTrace(file)
// Get the function name
_, funcName := filepath.Split(runtime.FuncForPC(pc).Name())
// Skip duplicate traces that start with file name, "<autogenerated>"
// and also skip traces with function name that starts with "runtime."
if !strings.HasPrefix(file, "<autogenerated>") &&
!strings.HasPrefix(funcName, "runtime.") {
// Form and append a line of stack trace into a
// collection, 'trace', to build full stack trace
trace = append(trace, fmt.Sprintf("%v:%v:%v()", file, lineNumber, funcName))
// Ignore trace logs beyond the following conditions
for _, name := range matchingFuncNames {
if funcName == name |
}
}
traceLevel++
// Read stack trace information from PC
pc, file, lineNumber, ok = runtime.Caller(traceLevel)
}
return trace
}
// Return the highway hash of the passed string
func hashString(input string) string {
defer loggerHighwayHasher.Reset()
loggerHighwayHasher.Write([]byte(input))
checksum := loggerHighwayHasher.Sum(nil)
return hex.EncodeToString(checksum)
}
// LogAlwaysIf prints a detailed error message during
// the execution of the server.
func LogAlwaysIf(ctx context.Context, err error) {
if err == nil {
return
}
logIf(ctx, err)
}
// LogIf prints a detailed error message during
// the execution of the server, if it is not an
// ignored error.
func LogIf(ctx context.Context, err error) {
if err == nil {
return
}
if err.Error() != diskNotFoundError {
logIf(ctx, err)
}
}
// logIf prints a detailed error message during
// the execution of the server.
func logIf(ctx context.Context, err error) {
if Disable {
return
| {
return trace
} | conditional_block |
select.go | }
if k+1 < i && scases[lockorder[k]].c.sortkey() < scases[lockorder[k+1]].c.sortkey() {
k++
}
if c.sortkey() < scases[lockorder[k]].c.sortkey() {
lockorder[j] = lockorder[k]
j = k
continue
}
break
}
lockorder[j] = o
}
if debugSelect {
for i := 0; i+1 < len(lockorder); i++ {
if scases[lockorder[i]].c.sortkey() > scases[lockorder[i+1]].c.sortkey() {
print("i=", i, " x=", lockorder[i], " y=", lockorder[i+1], "\n")
throw("select: broken sort")
}
}
}
// lock all the channels involved in the select
sellock(scases, lockorder)
var (
gp *g
sg *sudog
c *hchan
k *scase
sglist *sudog
sgnext *sudog
qp unsafe.Pointer
nextp **sudog
)
// pass 1 - look for something already waiting
var casi int
var cas *scase
var caseSuccess bool
var caseReleaseTime int64 = -1
var recvOK bool
for _, casei := range pollorder {
casi = int(casei)
cas = &scases[casi]
c = cas.c
if casi >= nsends {
sg = c.sendq.dequeue()
if sg != nil {
goto recv
}
if c.qcount > 0 {
goto bufrecv
}
if c.closed != 0 {
goto rclose
}
} else {
if raceenabled {
racereadpc(c.raceaddr(), casePC(casi), chansendpc)
}
if c.closed != 0 {
goto sclose
}
sg = c.recvq.dequeue()
if sg != nil {
goto send
}
if c.qcount < c.dataqsiz {
goto bufsend
}
}
}
if !block {
selunlock(scases, lockorder)
casi = -1
goto retc
}
// pass 2 - enqueue on all chans
gp = getg()
if gp.waiting != nil {
throw("gp.waiting != nil")
}
nextp = &gp.waiting
for _, casei := range lockorder {
casi = int(casei)
cas = &scases[casi]
c = cas.c
sg := acquireSudog()
sg.g = gp
sg.isSelect = true
// No stack splits between assigning elem and enqueuing
// sg on gp.waiting where copystack can find it.
sg.elem = cas.elem
sg.releasetime = 0
if t0 != 0 {
sg.releasetime = -1
}
sg.c = c
// Construct waiting list in lock order.
*nextp = sg
nextp = &sg.waitlink
if casi < nsends {
c.sendq.enqueue(sg)
} else {
c.recvq.enqueue(sg)
}
}
// wait for someone to wake us up
gp.param = nil
// Signal to anyone trying to shrink our stack that we're about
// to park on a channel. The window between when this G's status
// changes and when we set gp.activeStackChans is not safe for
// stack shrinking.
atomic.Store8(&gp.parkingOnChan, 1)
gopark(selparkcommit, nil, waitReasonSelect, traceEvGoBlockSelect, 1)
gp.activeStackChans = false
sellock(scases, lockorder)
gp.selectDone = 0
sg = (*sudog)(gp.param)
gp.param = nil
// pass 3 - dequeue from unsuccessful chans
// otherwise they stack up on quiet channels
// record the successful case, if any.
// We singly-linked up the SudoGs in lock order.
casi = -1
cas = nil
caseSuccess = false
sglist = gp.waiting
// Clear all elem before unlinking from gp.waiting.
for sg1 := gp.waiting; sg1 != nil; sg1 = sg1.waitlink {
sg1.isSelect = false
sg1.elem = nil
sg1.c = nil
}
gp.waiting = nil
for _, casei := range lockorder {
k = &scases[casei]
if sg == sglist {
// sg has already been dequeued by the G that woke us up.
casi = int(casei)
cas = k
caseSuccess = sglist.success
if sglist.releasetime > 0 {
caseReleaseTime = sglist.releasetime
}
} else {
c = k.c
if int(casei) < nsends {
c.sendq.dequeueSudoG(sglist)
} else {
c.recvq.dequeueSudoG(sglist)
}
}
sgnext = sglist.waitlink
sglist.waitlink = nil
releaseSudog(sglist)
sglist = sgnext
}
if cas == nil {
throw("selectgo: bad wakeup")
}
c = cas.c
if debugSelect {
print("wait-return: cas0=", cas0, " c=", c, " cas=", cas, " send=", casi < nsends, "\n")
}
if casi < nsends {
if !caseSuccess {
goto sclose
}
} else {
recvOK = caseSuccess
}
if raceenabled {
if casi < nsends {
raceReadObjectPC(c.elemtype, cas.elem, casePC(casi), chansendpc)
} else if cas.elem != nil {
raceWriteObjectPC(c.elemtype, cas.elem, casePC(casi), chanrecvpc)
}
}
if msanenabled {
if casi < nsends {
msanread(cas.elem, c.elemtype.size)
} else if cas.elem != nil {
msanwrite(cas.elem, c.elemtype.size)
}
}
selunlock(scases, lockorder)
goto retc
bufrecv:
// can receive from buffer
if raceenabled {
if cas.elem != nil {
raceWriteObjectPC(c.elemtype, cas.elem, casePC(casi), chanrecvpc)
}
racereleaseacquire(chanbuf(c, c.recvx))
}
if msanenabled && cas.elem != nil {
msanwrite(cas.elem, c.elemtype.size)
}
recvOK = true
qp = chanbuf(c, c.recvx)
if cas.elem != nil {
typedmemmove(c.elemtype, cas.elem, qp)
}
typedmemclr(c.elemtype, qp)
c.recvx++
if c.recvx == c.dataqsiz {
c.recvx = 0
}
c.qcount--
selunlock(scases, lockorder)
goto retc
bufsend:
// can send to buffer
if raceenabled {
racereleaseacquire(chanbuf(c, c.sendx))
raceReadObjectPC(c.elemtype, cas.elem, casePC(casi), chansendpc)
}
if msanenabled {
msanread(cas.elem, c.elemtype.size)
}
typedmemmove(c.elemtype, chanbuf(c, c.sendx), cas.elem)
c.sendx++
if c.sendx == c.dataqsiz {
c.sendx = 0
}
c.qcount++
selunlock(scases, lockorder)
goto retc
recv:
// can receive from sleeping sender (sg)
recv(c, sg, cas.elem, func() { selunlock(scases, lockorder) }, 2)
if debugSelect {
print("syncrecv: cas0=", cas0, " c=", c, "\n")
}
recvOK = true
goto retc
rclose:
// read at end of closed channel
selunlock(scases, lockorder)
recvOK = false
if cas.elem != nil {
typedmemclr(c.elemtype, cas.elem)
}
if raceenabled {
raceacquire(c.raceaddr())
}
goto retc
send:
// can send to a sleeping receiver (sg)
if raceenabled {
raceReadObjectPC(c.elemtype, cas.elem, casePC(casi), chansendpc)
}
if msanenabled {
msanread(cas.elem, c.elemtype.size)
}
send(c, sg, cas.elem, func() { selunlock(scases, lockorder) }, 2)
if debugSelect {
| print("syncsend: cas0=", cas0, " c=", c, "\n")
}
g | conditional_block |
|
select.go | var lastc *hchan
for sg := gp.waiting; sg != nil; sg = sg.waitlink {
if sg.c != lastc && lastc != nil {
// As soon as we unlock the channel, fields in
// any sudog with that channel may change,
// including c and waitlink. Since multiple
// sudogs may have the same channel, we unlock
// only after we've passed the last instance
// of a channel.
unlock(&lastc.lock)
}
lastc = sg.c
}
if lastc != nil {
unlock(&lastc.lock)
}
return true
}
f
unc block() {
gopark(nil, nil, waitReasonSelectNoCases, traceEvGoStop, 1) // forever
}
// selectgo implements the select statement.
//
// cas0 points to an array of type [ncases]scase, and order0 points to
// an array of type [2*ncases]uint16 where ncases must be <= 65536.
// Both reside on the goroutine's stack (regardless of any escaping in
// selectgo).
//
// For race detector builds, pc0 points to an array of type
// [ncases]uintptr (also on the stack); for other builds, it's set to
// nil.
//
// selectgo returns the index of the chosen scase, which matches the
// ordinal position of its respective select{recv,send,default} call.
// Also, if the chosen scase was a receive operation, it reports whether
// a value was received.
func selectgo(cas0 *scase, order0 *uint16, pc0 *uintptr, nsends, nrecvs int, block bool) (int, bool) {
if debugSelect {
print("select: cas0=", cas0, "\n")
}
// NOTE: In order to maintain a lean stack size, the number of scases
// is capped at 65536.
cas1 := (*[1 << 16]scase)(unsafe.Pointer(cas0))
order1 := (*[1 << 17]uint16)(unsafe.Pointer(order0))
ncases := nsends + nrecvs
scases := cas1[:ncases:ncases]
pollorder := order1[:ncases:ncases]
lockorder := order1[ncases:][:ncases:ncases]
// NOTE: pollorder/lockorder's underlying array was not zero-initialized by compiler.
// Even when raceenabled is true, there might be select
// statements in packages compiled without -race (e.g.,
// ensureSigM in runtime/signal_unix.go).
var pcs []uintptr
if raceenabled && pc0 != nil {
pc1 := (*[1 << 16]uintptr)(unsafe.Pointer(pc0))
pcs = pc1[:ncases:ncases]
}
casePC := func(casi int) uintptr {
if pcs == nil {
return 0
}
return pcs[casi]
}
var t0 int64
if blockprofilerate > 0 {
t0 = cputicks()
}
// The compiler rewrites selects that statically have
// only 0 or 1 cases plus default into simpler constructs.
// The only way we can end up with such small sel.ncase
// values here is for a larger select in which most channels
// have been nilled out. The general code handles those
// cases correctly, and they are rare enough not to bother
// optimizing (and needing to test).
// generate permuted order
norder := 0
for i := range scases {
cas := &scases[i]
// Omit cases without channels from the poll and lock orders.
if cas.c == nil {
cas.elem = nil // allow GC
continue
}
j := fastrandn(uint32(norder + 1))
pollorder[norder] = pollorder[j]
pollorder[j] = uint16(i)
norder++
}
pollorder = pollorder[:norder]
lockorder = lockorder[:norder]
// sort the cases by Hchan address to get the locking order.
// simple heap sort, to guarantee n log n time and constant stack footprint.
for i := range lockorder {
j := i
// Start with the pollorder to permute cases on the same channel.
c := scases[pollorder[i]].c
for j > 0 && scases[lockorder[(j-1)/2]].c.sortkey() < c.sortkey() {
k := (j - 1) / 2
lockorder[j] = lockorder[k]
j = k
}
lockorder[j] = pollorder[i]
}
for i := len(lockorder) - 1; i >= 0; i-- {
o := lockorder[i]
c := scases[o].c
lockorder[i] = lockorder[0]
j := 0
for {
k := j*2 + 1
if k >= i {
break
}
if k+1 < i && scases[lockorder[k]].c.sortkey() < scases[lockorder[k+1]].c.sortkey() {
k++
}
if c.sortkey() < scases[lockorder[k]].c.sortkey() {
lockorder[j] = lockorder[k]
j = k
continue
}
break
}
lockorder[j] = o
}
if debugSelect {
for i := 0; i+1 < len(lockorder); i++ {
if scases[lockorder[i]].c.sortkey() > scases[lockorder[i+1]].c.sortkey() {
print("i=", i, " x=", lockorder[i], " y=", lockorder[i+1], "\n")
throw("select: broken sort")
}
}
}
// lock all the channels involved in the select
sellock(scases, lockorder)
var (
gp *g
sg *sudog
c *hchan
k *scase
sglist *sudog
sgnext *sudog
qp unsafe.Pointer
nextp **sudog
)
// pass 1 - look for something already waiting
var casi int
var cas *scase
var caseSuccess bool
var caseReleaseTime int64 = -1
var recvOK bool
for _, casei := range pollorder {
casi = int(casei)
cas = &scases[casi]
c = cas.c
if casi >= nsends {
sg = c.sendq.dequeue()
if sg != nil {
goto recv
}
if c.qcount > 0 {
goto bufrecv
}
if c.closed != 0 {
goto rclose
}
} else {
if raceenabled {
racereadpc(c.raceaddr(), casePC(casi), chansendpc)
}
if c.closed != 0 {
goto sclose
}
sg = c.recvq.dequeue()
if sg != nil {
goto send
}
if c.qcount < c.dataqsiz {
goto bufsend
}
}
}
if !block {
selunlock(scases, lockorder)
casi = -1
goto retc
}
// pass 2 - enqueue on all chans
gp = getg()
if gp.waiting != nil {
throw("gp.waiting != nil")
}
nextp = &gp.waiting
for _, casei := range lockorder {
casi = int(casei)
cas = &scases[casi]
c = cas.c
sg := acquireSudog()
sg.g = gp
sg.isSelect = true
// No stack splits between assigning elem and en | // There are unlocked sudogs that point into gp's stack. Stack
// copying must lock the channels of those sudogs.
// Set activeStackChans here instead of before we try parking
// because we could self-deadlock in stack growth on a
// channel lock.
gp.activeStackChans = true
// Mark that it's safe for stack shrinking to occur now,
// because any thread acquiring this G's stack for shrinking
// is guaranteed to observe activeStackChans after this store.
atomic.Store8(&gp.parkingOnChan, 0)
// Make sure we unlock after setting activeStackChans and
// unsetting parkingOnChan. The moment we unlock any of the
// channel locks we risk gp getting readied by a channel operation
// and so gp could continue running before everything before the
// unlock is visible (even to gp itself).
// This must not access gp's stack (see gopark). In
// particular, it must not access the *hselect. That's okay,
// because by the time this is called, gp.waiting has all
// channels in lock order. | identifier_body |
|
select.go | // only after we've passed the last instance
// of a channel.
unlock(&lastc.lock)
}
lastc = sg.c
}
if lastc != nil {
unlock(&lastc.lock)
}
return true
}
func block() {
gopark(nil, nil, waitReasonSelectNoCases, traceEvGoStop, 1) // forever
}
// selectgo implements the select statement.
//
// cas0 points to an array of type [ncases]scase, and order0 points to
// an array of type [2*ncases]uint16 where ncases must be <= 65536.
// Both reside on the goroutine's stack (regardless of any escaping in
// selectgo).
//
// For race detector builds, pc0 points to an array of type
// [ncases]uintptr (also on the stack); for other builds, it's set to
// nil.
//
// selectgo returns the index of the chosen scase, which matches the
// ordinal position of its respective select{recv,send,default} call.
// Also, if the chosen scase was a receive operation, it reports whether
// a value was received.
func sel | s0 *scase, order0 *uint16, pc0 *uintptr, nsends, nrecvs int, block bool) (int, bool) {
if debugSelect {
print("select: cas0=", cas0, "\n")
}
// NOTE: In order to maintain a lean stack size, the number of scases
// is capped at 65536.
cas1 := (*[1 << 16]scase)(unsafe.Pointer(cas0))
order1 := (*[1 << 17]uint16)(unsafe.Pointer(order0))
ncases := nsends + nrecvs
scases := cas1[:ncases:ncases]
pollorder := order1[:ncases:ncases]
lockorder := order1[ncases:][:ncases:ncases]
// NOTE: pollorder/lockorder's underlying array was not zero-initialized by compiler.
// Even when raceenabled is true, there might be select
// statements in packages compiled without -race (e.g.,
// ensureSigM in runtime/signal_unix.go).
var pcs []uintptr
if raceenabled && pc0 != nil {
pc1 := (*[1 << 16]uintptr)(unsafe.Pointer(pc0))
pcs = pc1[:ncases:ncases]
}
casePC := func(casi int) uintptr {
if pcs == nil {
return 0
}
return pcs[casi]
}
var t0 int64
if blockprofilerate > 0 {
t0 = cputicks()
}
// The compiler rewrites selects that statically have
// only 0 or 1 cases plus default into simpler constructs.
// The only way we can end up with such small sel.ncase
// values here is for a larger select in which most channels
// have been nilled out. The general code handles those
// cases correctly, and they are rare enough not to bother
// optimizing (and needing to test).
// generate permuted order
norder := 0
for i := range scases {
cas := &scases[i]
// Omit cases without channels from the poll and lock orders.
if cas.c == nil {
cas.elem = nil // allow GC
continue
}
j := fastrandn(uint32(norder + 1))
pollorder[norder] = pollorder[j]
pollorder[j] = uint16(i)
norder++
}
pollorder = pollorder[:norder]
lockorder = lockorder[:norder]
// sort the cases by Hchan address to get the locking order.
// simple heap sort, to guarantee n log n time and constant stack footprint.
for i := range lockorder {
j := i
// Start with the pollorder to permute cases on the same channel.
c := scases[pollorder[i]].c
for j > 0 && scases[lockorder[(j-1)/2]].c.sortkey() < c.sortkey() {
k := (j - 1) / 2
lockorder[j] = lockorder[k]
j = k
}
lockorder[j] = pollorder[i]
}
for i := len(lockorder) - 1; i >= 0; i-- {
o := lockorder[i]
c := scases[o].c
lockorder[i] = lockorder[0]
j := 0
for {
k := j*2 + 1
if k >= i {
break
}
if k+1 < i && scases[lockorder[k]].c.sortkey() < scases[lockorder[k+1]].c.sortkey() {
k++
}
if c.sortkey() < scases[lockorder[k]].c.sortkey() {
lockorder[j] = lockorder[k]
j = k
continue
}
break
}
lockorder[j] = o
}
if debugSelect {
for i := 0; i+1 < len(lockorder); i++ {
if scases[lockorder[i]].c.sortkey() > scases[lockorder[i+1]].c.sortkey() {
print("i=", i, " x=", lockorder[i], " y=", lockorder[i+1], "\n")
throw("select: broken sort")
}
}
}
// lock all the channels involved in the select
sellock(scases, lockorder)
var (
gp *g
sg *sudog
c *hchan
k *scase
sglist *sudog
sgnext *sudog
qp unsafe.Pointer
nextp **sudog
)
// pass 1 - look for something already waiting
var casi int
var cas *scase
var caseSuccess bool
var caseReleaseTime int64 = -1
var recvOK bool
for _, casei := range pollorder {
casi = int(casei)
cas = &scases[casi]
c = cas.c
if casi >= nsends {
sg = c.sendq.dequeue()
if sg != nil {
goto recv
}
if c.qcount > 0 {
goto bufrecv
}
if c.closed != 0 {
goto rclose
}
} else {
if raceenabled {
racereadpc(c.raceaddr(), casePC(casi), chansendpc)
}
if c.closed != 0 {
goto sclose
}
sg = c.recvq.dequeue()
if sg != nil {
goto send
}
if c.qcount < c.dataqsiz {
goto bufsend
}
}
}
if !block {
selunlock(scases, lockorder)
casi = -1
goto retc
}
// pass 2 - enqueue on all chans
gp = getg()
if gp.waiting != nil {
throw("gp.waiting != nil")
}
nextp = &gp.waiting
for _, casei := range lockorder {
casi = int(casei)
cas = &scases[casi]
c = cas.c
sg := acquireSudog()
sg.g = gp
sg.isSelect = true
// No stack splits between assigning elem and enqueuing
// sg on gp.waiting where copystack can find it.
sg.elem = cas.elem
sg.releasetime = 0
if t0 != 0 {
sg.releasetime = -1
}
sg.c = c
// Construct waiting list in lock order.
*nextp = sg
nextp = &sg.waitlink
if casi < nsends {
c.sendq.enqueue(sg)
} else {
c.recvq.enqueue(sg)
}
}
// wait for someone to wake us up
gp.param = nil
// Signal to anyone trying to shrink our stack that we're about
// to park on a channel. The window between when this G's status
// changes and when we set gp.activeStackChans is not safe for
// stack shrinking.
atomic.Store8(&gp.parkingOnChan, 1)
gopark(selparkcommit, nil, waitReasonSelect, traceEvGoBlockSelect, 1)
gp.activeStackChans = false
sellock(scases, lockorder)
gp.selectDone = 0
sg = (*sudog)(gp.param)
gp.param = nil
// pass 3 - dequeue from unsuccessful chans
// otherwise they stack up on quiet channels
// record the successful case, if any.
// We singly-linked up the SudoGs in lock order.
casi = -1
cas = nil
caseSuccess = false
sglist = | ectgo(ca | identifier_name |
select.go | on gp.waiting where copystack can find it.
sg.elem = cas.elem
sg.releasetime = 0
if t0 != 0 {
sg.releasetime = -1
}
sg.c = c
// Construct waiting list in lock order.
*nextp = sg
nextp = &sg.waitlink
if casi < nsends {
c.sendq.enqueue(sg)
} else {
c.recvq.enqueue(sg)
}
}
// wait for someone to wake us up
gp.param = nil
// Signal to anyone trying to shrink our stack that we're about
// to park on a channel. The window between when this G's status
// changes and when we set gp.activeStackChans is not safe for
// stack shrinking.
atomic.Store8(&gp.parkingOnChan, 1)
gopark(selparkcommit, nil, waitReasonSelect, traceEvGoBlockSelect, 1)
gp.activeStackChans = false
sellock(scases, lockorder)
gp.selectDone = 0
sg = (*sudog)(gp.param)
gp.param = nil
// pass 3 - dequeue from unsuccessful chans
// otherwise they stack up on quiet channels
// record the successful case, if any.
// We singly-linked up the SudoGs in lock order.
casi = -1
cas = nil
caseSuccess = false
sglist = gp.waiting
// Clear all elem before unlinking from gp.waiting.
for sg1 := gp.waiting; sg1 != nil; sg1 = sg1.waitlink {
sg1.isSelect = false
sg1.elem = nil
sg1.c = nil
}
gp.waiting = nil
for _, casei := range lockorder {
k = &scases[casei]
if sg == sglist {
// sg has already been dequeued by the G that woke us up.
casi = int(casei)
cas = k
caseSuccess = sglist.success
if sglist.releasetime > 0 {
caseReleaseTime = sglist.releasetime
}
} else {
c = k.c
if int(casei) < nsends {
c.sendq.dequeueSudoG(sglist)
} else {
c.recvq.dequeueSudoG(sglist)
}
}
sgnext = sglist.waitlink
sglist.waitlink = nil
releaseSudog(sglist)
sglist = sgnext
}
if cas == nil {
throw("selectgo: bad wakeup")
}
c = cas.c
if debugSelect {
print("wait-return: cas0=", cas0, " c=", c, " cas=", cas, " send=", casi < nsends, "\n")
}
if casi < nsends {
if !caseSuccess {
goto sclose
}
} else {
recvOK = caseSuccess
}
if raceenabled {
if casi < nsends {
raceReadObjectPC(c.elemtype, cas.elem, casePC(casi), chansendpc)
} else if cas.elem != nil {
raceWriteObjectPC(c.elemtype, cas.elem, casePC(casi), chanrecvpc)
}
}
if msanenabled {
if casi < nsends {
msanread(cas.elem, c.elemtype.size)
} else if cas.elem != nil {
msanwrite(cas.elem, c.elemtype.size)
}
}
selunlock(scases, lockorder)
goto retc
bufrecv:
// can receive from buffer
if raceenabled {
if cas.elem != nil {
raceWriteObjectPC(c.elemtype, cas.elem, casePC(casi), chanrecvpc)
}
racereleaseacquire(chanbuf(c, c.recvx))
}
if msanenabled && cas.elem != nil {
msanwrite(cas.elem, c.elemtype.size)
}
recvOK = true
qp = chanbuf(c, c.recvx)
if cas.elem != nil {
typedmemmove(c.elemtype, cas.elem, qp)
}
typedmemclr(c.elemtype, qp)
c.recvx++
if c.recvx == c.dataqsiz {
c.recvx = 0
}
c.qcount--
selunlock(scases, lockorder)
goto retc
bufsend:
// can send to buffer
if raceenabled {
racereleaseacquire(chanbuf(c, c.sendx))
raceReadObjectPC(c.elemtype, cas.elem, casePC(casi), chansendpc)
}
if msanenabled {
msanread(cas.elem, c.elemtype.size)
}
typedmemmove(c.elemtype, chanbuf(c, c.sendx), cas.elem)
c.sendx++
if c.sendx == c.dataqsiz {
c.sendx = 0
}
c.qcount++
selunlock(scases, lockorder)
goto retc
recv:
// can receive from sleeping sender (sg)
recv(c, sg, cas.elem, func() { selunlock(scases, lockorder) }, 2)
if debugSelect {
print("syncrecv: cas0=", cas0, " c=", c, "\n")
}
recvOK = true
goto retc
rclose:
// read at end of closed channel
selunlock(scases, lockorder)
recvOK = false
if cas.elem != nil {
typedmemclr(c.elemtype, cas.elem)
}
if raceenabled {
raceacquire(c.raceaddr())
}
goto retc
send:
// can send to a sleeping receiver (sg)
if raceenabled {
raceReadObjectPC(c.elemtype, cas.elem, casePC(casi), chansendpc)
}
if msanenabled {
msanread(cas.elem, c.elemtype.size)
}
send(c, sg, cas.elem, func() { selunlock(scases, lockorder) }, 2)
if debugSelect {
print("syncsend: cas0=", cas0, " c=", c, "\n")
}
goto retc
retc:
if caseReleaseTime > 0 {
blockevent(caseReleaseTime-t0, 1)
}
return casi, recvOK
sclose:
// send on closed channel
selunlock(scases, lockorder)
panic(plainError("send on closed channel"))
}
func (c *hchan) sortkey() uintptr {
return uintptr(unsafe.Pointer(c))
}
// A runtimeSelect is a single case passed to rselect.
// This must match ../reflect/value.go:/runtimeSelect
type runtimeSelect struct {
dir selectDir
typ unsafe.Pointer // channel type (not used here)
ch *hchan // channel
val unsafe.Pointer // ptr to data (SendDir) or ptr to receive buffer (RecvDir)
}
// These values must match ../reflect/value.go:/SelectDir.
type selectDir int
const (
_ selectDir = iota
selectSend // case Chan <- Send
selectRecv // case <-Chan:
selectDefault // default
)
//go:linkname reflect_rselect reflect.rselect
func reflect_rselect(cases []runtimeSelect) (int, bool) {
if len(cases) == 0 {
block()
}
sel := make([]scase, len(cases))
orig := make([]int, len(cases))
nsends, nrecvs := 0, 0
dflt := -1
for i, rc := range cases {
var j int
switch rc.dir {
case selectDefault:
dflt = i
continue
case selectSend:
j = nsends
nsends++
case selectRecv:
nrecvs++
j = len(cases) - nrecvs
}
sel[j] = scase{c: rc.ch, elem: rc.val}
orig[j] = i
}
// Only a default case.
if nsends+nrecvs == 0 {
return dflt, false
}
// Compact sel and orig if necessary.
if nsends+nrecvs < len(cases) {
copy(sel[nsends:], sel[len(cases)-nrecvs:])
copy(orig[nsends:], orig[len(cases)-nrecvs:])
}
order := make([]uint16, 2*(nsends+nrecvs))
var pc0 *uintptr
if raceenabled {
pcs := make([]uintptr, nsends+nrecvs)
for i := range pcs {
selectsetpc(&pcs[i])
}
pc0 = &pcs[0]
}
chosen, recvOK := selectgo(&sel[0], &order[0], pc0, nsends, nrecvs, dflt == -1)
// Translate chosen back to caller's ordering.
if chosen < 0 {
chosen = dflt
} else {
chosen = orig[chosen]
}
return chosen, recvOK
}
func (q *waitq) dequeueSudoG(sgp *sudog) {
x := sgp.prev | y := sgp.next
if x != nil { | random_line_split |
|
main.go | direction direction
paintedPoints []*point
}
func (r *paintingRobot) run() {
wg := sync.WaitGroup{}
wg.Add(1)
go r.brain.execute()
go func() {
r.brain.inChannel <- 1
readingColor := true
robotLoop: for {
var scannedColor int
select {
case reading := <-r.brain.outChannel:
// Program outputs have 2 possible meanings that switch periodically:
// * color (0 - black, 1 - white)
// * rotation (0 - CCW, 1 - CW)
if readingColor {
r.paint(int(reading))
} else {
r.changeDirection(int(reading))
r.move()
scannedColor = r.scanColor()
// After orientation change the program expects the code of detected color on that position as input.
select {
case r.brain.inChannel <- int64(scannedColor):
fmt.Println("robot detected color ", scannedColor)
case <-r.brain.done:
}
}
readingColor = !readingColor
case <-r.brain.done:
wg.Done()
break robotLoop
}
}
}()
wg.Wait()
}
// Gives the tile a color based on input (0 - black, 1 - white).
// In order to keep track of unique painted tiles we keep record in slice and just repaint existing items.
func (r *paintingRobot) paint(color int) |
// Rotates the direction robot is facing - 0 for CW rotation and 1 for CCW rotation.
func (r *paintingRobot) changeDirection(input int) {
if input == 1 {
if r.direction == up {
r.direction = left
} else {
r.direction -= 1
}
} else {
if r.direction == left {
r.direction = up
} else {
r.direction += 1
}
}
}
// Moves the robot by 1 distance point in the direction it is currently facing.
func (r *paintingRobot) move() {
posX, posY := r.position.x, r.position.y
switch r.direction {
case up:
posY -= 1
case right:
posX += 1
case down:
posY += 1
case left:
posX -= 1
}
r.position = &point{
x: posX,
y: posY,
}
fmt.Println(fmt.Sprintf("robot moved to [%d,%d]", r.position.x, r.position.y))
}
// Gets the color of underlying tile (based on robot's position). Default color is black (0).
func (r *paintingRobot) scanColor() int {
for _, p := range r.paintedPoints {
if p.x == r.position.x && p.y == r.position.y {
return p.color
}
}
return 0
}
// Calculates overall size of the grid (painted) and center of both axis.
func (r paintingRobot) getGridInfo() (int, int, point) {
xMin, xMax, yMin, yMax := 0, 0, 0, 0
for _, p := range r.paintedPoints {
if p.x > xMax {
xMax = p.x
}
if p.x < xMin {
xMin = p.x
}
if p.y > yMax {
yMax = p.y
}
if p.y < yMin {
yMin = p.y
}
}
return int(math.Abs(float64(xMin))) + xMax + 1, int(math.Abs(float64(yMin))) + yMax + 1,
point{
x: int(math.Abs(float64(xMin))),
y: int(math.Abs(float64(yMin))),
}
}
func (r paintingRobot) getTileColor(x, y int) color.RGBA {
for _, p := range r.paintedPoints {
if x == p.x && y == p.y {
switch p.color {
case 0:
return color.RGBA{R: 0, G: 0, B: 0, A: 0xff}
case 1:
return color.RGBA{R: 255, G: 255, B: 255, A: 0xff}
}
}
}
return color.RGBA{R: 0, G: 0, B: 0, A: 0xff}
}
func (r paintingRobot) exportToImage(output string) {
canvasWidth, canvasHeight, center := r.getGridInfo()
startPoint := image.Point{X: 0, Y: 0}
endPoint := image.Point{X: canvasWidth, Y: canvasHeight}
img := image.NewRGBA(image.Rectangle{Min: startPoint, Max: endPoint})
for x := 0; x < canvasWidth; x++ {
for y := 0; y < canvasHeight; y++ {
img.Set(x, y, r.getTileColor(x - center.x, y - center.y))
}
}
f, err := os.Create(output)
if err != nil {
fmt.Println(err)
}
err = png.Encode(f, img)
if err != nil {
fmt.Println(err)
}
}
type point struct {
x int
y int
color int
}
type instruction struct {
operation instructionOperation
length int
params []instructionParam
}
func (i *instruction) initialize(intCode []int64, pIndex int) {
instValue := int(intCode[pIndex])
i.operation = instructionOperation(instValue)
// Standard operation Codes are between 1 and 99, larger number means that Parameter Modes are included there
evalParamModes := false
if instValue >= 100 {
i.operation = instructionOperation(instValue % 100)
evalParamModes = true
}
i.length = InstructionLength[i.operation]
paramCount := i.length - 1
i.params = make([]instructionParam, paramCount, paramCount)
for j := 0; j < paramCount; j++ {
i.params[j] = instructionParam{0, intCode[pIndex+j+1]}
// Parameter mode is either 0 (by reference) or 1 (by value) and this mode is specified
// in the instruction code itself (as given number at respective position)
if evalParamModes {
i.params[j].mode = (instValue / int(math.Pow(float64(10), float64(j+2)))) % 10
}
}
}
func (i *instruction) getValuesCount() int {
switch i.operation {
case Add, Multiply, JumpIfTrue, JumpIfFalse, LessThan, Equals:
return 2
case Write, SetRelativeBase:
return 1
default:
return 0
}
}
func (i *instruction) doesStoreOutputInMemory() bool {
return i.operation == Read || i.operation == Add || i.operation == Multiply || i.operation == LessThan || i.operation == Equals
}
type instructionParam struct {
mode int
value int64
}
type program struct {
memory []int64
memorySize int
position int
relativeBase int
completed bool
halt bool
inChannel chan int64
outChannel chan int64
done chan interface{}
dataStack []int64
haltOnOutput bool
}
func (p *program) loadCodeFromFile(file string) {
bytes, err := ioutil.ReadFile(file)
if err != nil {
fmt.Println(err)
}
inputs := strings.Split(string(bytes), ",")
intInputs, err := convertStringArray(inputs)
if err != nil {
fmt.Println(err)
}
p.memorySize = len(intInputs) * 10
p.memory = make([]int64, p.memorySize, p.memorySize)
for i := 0; i < len(intInputs); i++ {
p.memory[i] = intInputs[i]
}
}
func (p *program) resetState() {
p.position = 0
p.completed = false
p.halt = false
}
func (p *program) resetMemory() {
p.dataStack = make([]int64, p.memorySize, p.memorySize)
}
func (p *program) execute() {
for !p.completed && !p.halt {
var instruction instruction
instruction.initialize(p.memory, p.position)
p.loadParameterValues(&instruction)
switch instruction.operation {
case Add:
p.doAdd(&instruction)
case Multiply:
p.doMultiply(&instruction | {
fmt.Println(fmt.Sprintf("robot paints [%d,%d] to color %d", r.position.x, r.position.y, color))
for _, p := range r.paintedPoints {
if p.x == r.position.x && p.y == r.position.y {
p.color = color
fmt.Println("just repainted, # of painted tiles: ", len(r.paintedPoints))
return
}
}
r.position.color = color
r.paintedPoints = append(r.paintedPoints, r.position)
fmt.Println("NEW painting, # of painted tiles: ", len(r.paintedPoints))
} | identifier_body |
main.go |
direction direction
paintedPoints []*point
}
func (r *paintingRobot) run() {
wg := sync.WaitGroup{}
wg.Add(1)
go r.brain.execute()
go func() {
r.brain.inChannel <- 1
readingColor := true
robotLoop: for {
var scannedColor int
select {
case reading := <-r.brain.outChannel:
// Program outputs have 2 possible meanings that switch periodically:
// * color (0 - black, 1 - white)
// * rotation (0 - CCW, 1 - CW)
if readingColor {
r.paint(int(reading))
} else {
r.changeDirection(int(reading))
r.move()
scannedColor = r.scanColor()
// After orientation change the program expects the code of detected color on that position as input.
select {
case r.brain.inChannel <- int64(scannedColor):
fmt.Println("robot detected color ", scannedColor)
case <-r.brain.done:
}
}
readingColor = !readingColor
case <-r.brain.done:
wg.Done()
break robotLoop
}
}
}()
wg.Wait()
}
// Gives the tile a color based on input (0 - black, 1 - white).
// In order to keep track of unique painted tiles we keep record in slice and just repaint existing items.
func (r *paintingRobot) paint(color int) {
fmt.Println(fmt.Sprintf("robot paints [%d,%d] to color %d", r.position.x, r.position.y, color))
for _, p := range r.paintedPoints {
if p.x == r.position.x && p.y == r.position.y {
p.color = color
fmt.Println("just repainted, # of painted tiles: ", len(r.paintedPoints))
return
}
}
r.position.color = color
r.paintedPoints = append(r.paintedPoints, r.position)
fmt.Println("NEW painting, # of painted tiles: ", len(r.paintedPoints))
}
// Rotates the direction robot is facing - 0 for CW rotation and 1 for CCW rotation.
func (r *paintingRobot) changeDirection(input int) {
if input == 1 {
if r.direction == up {
r.direction = left
} else {
r.direction -= 1
}
} else {
if r.direction == left {
r.direction = up
} else {
r.direction += 1
}
}
}
// Moves the robot by 1 distance point in the direction it is currently facing.
func (r *paintingRobot) move() {
posX, posY := r.position.x, r.position.y
switch r.direction {
case up:
posY -= 1
case right:
posX += 1
case down:
posY += 1
case left:
posX -= 1
}
r.position = &point{
x: posX,
y: posY,
}
fmt.Println(fmt.Sprintf("robot moved to [%d,%d]", r.position.x, r.position.y))
}
// Gets the color of underlying tile (based on robot's position). Default color is black (0).
func (r *paintingRobot) scanColor() int {
for _, p := range r.paintedPoints {
if p.x == r.position.x && p.y == r.position.y {
return p.color
}
}
return 0
}
// Calculates overall size of the grid (painted) and center of both axis.
func (r paintingRobot) | () (int, int, point) {
xMin, xMax, yMin, yMax := 0, 0, 0, 0
for _, p := range r.paintedPoints {
if p.x > xMax {
xMax = p.x
}
if p.x < xMin {
xMin = p.x
}
if p.y > yMax {
yMax = p.y
}
if p.y < yMin {
yMin = p.y
}
}
return int(math.Abs(float64(xMin))) + xMax + 1, int(math.Abs(float64(yMin))) + yMax + 1,
point{
x: int(math.Abs(float64(xMin))),
y: int(math.Abs(float64(yMin))),
}
}
func (r paintingRobot) getTileColor(x, y int) color.RGBA {
for _, p := range r.paintedPoints {
if x == p.x && y == p.y {
switch p.color {
case 0:
return color.RGBA{R: 0, G: 0, B: 0, A: 0xff}
case 1:
return color.RGBA{R: 255, G: 255, B: 255, A: 0xff}
}
}
}
return color.RGBA{R: 0, G: 0, B: 0, A: 0xff}
}
func (r paintingRobot) exportToImage(output string) {
canvasWidth, canvasHeight, center := r.getGridInfo()
startPoint := image.Point{X: 0, Y: 0}
endPoint := image.Point{X: canvasWidth, Y: canvasHeight}
img := image.NewRGBA(image.Rectangle{Min: startPoint, Max: endPoint})
for x := 0; x < canvasWidth; x++ {
for y := 0; y < canvasHeight; y++ {
img.Set(x, y, r.getTileColor(x - center.x, y - center.y))
}
}
f, err := os.Create(output)
if err != nil {
fmt.Println(err)
}
err = png.Encode(f, img)
if err != nil {
fmt.Println(err)
}
}
type point struct {
x int
y int
color int
}
type instruction struct {
operation instructionOperation
length int
params []instructionParam
}
func (i *instruction) initialize(intCode []int64, pIndex int) {
instValue := int(intCode[pIndex])
i.operation = instructionOperation(instValue)
// Standard operation Codes are between 1 and 99, larger number means that Parameter Modes are included there
evalParamModes := false
if instValue >= 100 {
i.operation = instructionOperation(instValue % 100)
evalParamModes = true
}
i.length = InstructionLength[i.operation]
paramCount := i.length - 1
i.params = make([]instructionParam, paramCount, paramCount)
for j := 0; j < paramCount; j++ {
i.params[j] = instructionParam{0, intCode[pIndex+j+1]}
// Parameter mode is either 0 (by reference) or 1 (by value) and this mode is specified
// in the instruction code itself (as given number at respective position)
if evalParamModes {
i.params[j].mode = (instValue / int(math.Pow(float64(10), float64(j+2)))) % 10
}
}
}
func (i *instruction) getValuesCount() int {
switch i.operation {
case Add, Multiply, JumpIfTrue, JumpIfFalse, LessThan, Equals:
return 2
case Write, SetRelativeBase:
return 1
default:
return 0
}
}
func (i *instruction) doesStoreOutputInMemory() bool {
return i.operation == Read || i.operation == Add || i.operation == Multiply || i.operation == LessThan || i.operation == Equals
}
type instructionParam struct {
mode int
value int64
}
type program struct {
memory []int64
memorySize int
position int
relativeBase int
completed bool
halt bool
inChannel chan int64
outChannel chan int64
done chan interface{}
dataStack []int64
haltOnOutput bool
}
func (p *program) loadCodeFromFile(file string) {
bytes, err := ioutil.ReadFile(file)
if err != nil {
fmt.Println(err)
}
inputs := strings.Split(string(bytes), ",")
intInputs, err := convertStringArray(inputs)
if err != nil {
fmt.Println(err)
}
p.memorySize = len(intInputs) * 10
p.memory = make([]int64, p.memorySize, p.memorySize)
for i := 0; i < len(intInputs); i++ {
p.memory[i] = intInputs[i]
}
}
func (p *program) resetState() {
p.position = 0
p.completed = false
p.halt = false
}
func (p *program) resetMemory() {
p.dataStack = make([]int64, p.memorySize, p.memorySize)
}
func (p *program) execute() {
for !p.completed && !p.halt {
var instruction instruction
instruction.initialize(p.memory, p.position)
p.loadParameterValues(&instruction)
switch instruction.operation {
case Add:
p.doAdd(&instruction)
case Multiply:
p.doMultiply(&instruction | getGridInfo | identifier_name |
main.go |
direction direction
paintedPoints []*point
}
func (r *paintingRobot) run() {
wg := sync.WaitGroup{}
wg.Add(1)
go r.brain.execute()
go func() {
r.brain.inChannel <- 1
readingColor := true
robotLoop: for {
var scannedColor int
select {
case reading := <-r.brain.outChannel:
// Program outputs have 2 possible meanings that switch periodically:
// * color (0 - black, 1 - white)
// * rotation (0 - CCW, 1 - CW)
if readingColor {
r.paint(int(reading))
} else {
r.changeDirection(int(reading))
r.move()
scannedColor = r.scanColor()
// After orientation change the program expects the code of detected color on that position as input.
select {
case r.brain.inChannel <- int64(scannedColor):
fmt.Println("robot detected color ", scannedColor)
case <-r.brain.done:
}
}
| readingColor = !readingColor
case <-r.brain.done:
wg.Done()
break robotLoop
}
}
}()
wg.Wait()
}
// Gives the tile a color based on input (0 - black, 1 - white).
// In order to keep track of unique painted tiles we keep record in slice and just repaint existing items.
func (r *paintingRobot) paint(color int) {
fmt.Println(fmt.Sprintf("robot paints [%d,%d] to color %d", r.position.x, r.position.y, color))
for _, p := range r.paintedPoints {
if p.x == r.position.x && p.y == r.position.y {
p.color = color
fmt.Println("just repainted, # of painted tiles: ", len(r.paintedPoints))
return
}
}
r.position.color = color
r.paintedPoints = append(r.paintedPoints, r.position)
fmt.Println("NEW painting, # of painted tiles: ", len(r.paintedPoints))
}
// Rotates the direction robot is facing - 0 for CW rotation and 1 for CCW rotation.
func (r *paintingRobot) changeDirection(input int) {
if input == 1 {
if r.direction == up {
r.direction = left
} else {
r.direction -= 1
}
} else {
if r.direction == left {
r.direction = up
} else {
r.direction += 1
}
}
}
// Moves the robot by 1 distance point in the direction it is currently facing.
func (r *paintingRobot) move() {
posX, posY := r.position.x, r.position.y
switch r.direction {
case up:
posY -= 1
case right:
posX += 1
case down:
posY += 1
case left:
posX -= 1
}
r.position = &point{
x: posX,
y: posY,
}
fmt.Println(fmt.Sprintf("robot moved to [%d,%d]", r.position.x, r.position.y))
}
// Gets the color of underlying tile (based on robot's position). Default color is black (0).
func (r *paintingRobot) scanColor() int {
for _, p := range r.paintedPoints {
if p.x == r.position.x && p.y == r.position.y {
return p.color
}
}
return 0
}
// Calculates overall size of the grid (painted) and center of both axis.
func (r paintingRobot) getGridInfo() (int, int, point) {
xMin, xMax, yMin, yMax := 0, 0, 0, 0
for _, p := range r.paintedPoints {
if p.x > xMax {
xMax = p.x
}
if p.x < xMin {
xMin = p.x
}
if p.y > yMax {
yMax = p.y
}
if p.y < yMin {
yMin = p.y
}
}
return int(math.Abs(float64(xMin))) + xMax + 1, int(math.Abs(float64(yMin))) + yMax + 1,
point{
x: int(math.Abs(float64(xMin))),
y: int(math.Abs(float64(yMin))),
}
}
func (r paintingRobot) getTileColor(x, y int) color.RGBA {
for _, p := range r.paintedPoints {
if x == p.x && y == p.y {
switch p.color {
case 0:
return color.RGBA{R: 0, G: 0, B: 0, A: 0xff}
case 1:
return color.RGBA{R: 255, G: 255, B: 255, A: 0xff}
}
}
}
return color.RGBA{R: 0, G: 0, B: 0, A: 0xff}
}
func (r paintingRobot) exportToImage(output string) {
canvasWidth, canvasHeight, center := r.getGridInfo()
startPoint := image.Point{X: 0, Y: 0}
endPoint := image.Point{X: canvasWidth, Y: canvasHeight}
img := image.NewRGBA(image.Rectangle{Min: startPoint, Max: endPoint})
for x := 0; x < canvasWidth; x++ {
for y := 0; y < canvasHeight; y++ {
img.Set(x, y, r.getTileColor(x - center.x, y - center.y))
}
}
f, err := os.Create(output)
if err != nil {
fmt.Println(err)
}
err = png.Encode(f, img)
if err != nil {
fmt.Println(err)
}
}
type point struct {
x int
y int
color int
}
type instruction struct {
operation instructionOperation
length int
params []instructionParam
}
func (i *instruction) initialize(intCode []int64, pIndex int) {
instValue := int(intCode[pIndex])
i.operation = instructionOperation(instValue)
// Standard operation Codes are between 1 and 99, larger number means that Parameter Modes are included there
evalParamModes := false
if instValue >= 100 {
i.operation = instructionOperation(instValue % 100)
evalParamModes = true
}
i.length = InstructionLength[i.operation]
paramCount := i.length - 1
i.params = make([]instructionParam, paramCount, paramCount)
for j := 0; j < paramCount; j++ {
i.params[j] = instructionParam{0, intCode[pIndex+j+1]}
// Parameter mode is either 0 (by reference) or 1 (by value) and this mode is specified
// in the instruction code itself (as given number at respective position)
if evalParamModes {
i.params[j].mode = (instValue / int(math.Pow(float64(10), float64(j+2)))) % 10
}
}
}
func (i *instruction) getValuesCount() int {
switch i.operation {
case Add, Multiply, JumpIfTrue, JumpIfFalse, LessThan, Equals:
return 2
case Write, SetRelativeBase:
return 1
default:
return 0
}
}
func (i *instruction) doesStoreOutputInMemory() bool {
return i.operation == Read || i.operation == Add || i.operation == Multiply || i.operation == LessThan || i.operation == Equals
}
type instructionParam struct {
mode int
value int64
}
type program struct {
memory []int64
memorySize int
position int
relativeBase int
completed bool
halt bool
inChannel chan int64
outChannel chan int64
done chan interface{}
dataStack []int64
haltOnOutput bool
}
func (p *program) loadCodeFromFile(file string) {
bytes, err := ioutil.ReadFile(file)
if err != nil {
fmt.Println(err)
}
inputs := strings.Split(string(bytes), ",")
intInputs, err := convertStringArray(inputs)
if err != nil {
fmt.Println(err)
}
p.memorySize = len(intInputs) * 10
p.memory = make([]int64, p.memorySize, p.memorySize)
for i := 0; i < len(intInputs); i++ {
p.memory[i] = intInputs[i]
}
}
func (p *program) resetState() {
p.position = 0
p.completed = false
p.halt = false
}
func (p *program) resetMemory() {
p.dataStack = make([]int64, p.memorySize, p.memorySize)
}
func (p *program) execute() {
for !p.completed && !p.halt {
var instruction instruction
instruction.initialize(p.memory, p.position)
p.loadParameterValues(&instruction)
switch instruction.operation {
case Add:
p.doAdd(&instruction)
case Multiply:
p.doMultiply(&instruction)
| random_line_split |
|
main.go |
direction direction
paintedPoints []*point
}
func (r *paintingRobot) run() {
wg := sync.WaitGroup{}
wg.Add(1)
go r.brain.execute()
go func() {
r.brain.inChannel <- 1
readingColor := true
robotLoop: for {
var scannedColor int
select {
case reading := <-r.brain.outChannel:
// Program outputs have 2 possible meanings that switch periodically:
// * color (0 - black, 1 - white)
// * rotation (0 - CCW, 1 - CW)
if readingColor {
r.paint(int(reading))
} else {
r.changeDirection(int(reading))
r.move()
scannedColor = r.scanColor()
// After orientation change the program expects the code of detected color on that position as input.
select {
case r.brain.inChannel <- int64(scannedColor):
fmt.Println("robot detected color ", scannedColor)
case <-r.brain.done:
}
}
readingColor = !readingColor
case <-r.brain.done:
wg.Done()
break robotLoop
}
}
}()
wg.Wait()
}
// Gives the tile a color based on input (0 - black, 1 - white).
// In order to keep track of unique painted tiles we keep record in slice and just repaint existing items.
func (r *paintingRobot) paint(color int) {
fmt.Println(fmt.Sprintf("robot paints [%d,%d] to color %d", r.position.x, r.position.y, color))
for _, p := range r.paintedPoints {
if p.x == r.position.x && p.y == r.position.y {
p.color = color
fmt.Println("just repainted, # of painted tiles: ", len(r.paintedPoints))
return
}
}
r.position.color = color
r.paintedPoints = append(r.paintedPoints, r.position)
fmt.Println("NEW painting, # of painted tiles: ", len(r.paintedPoints))
}
// Rotates the direction robot is facing - 0 for CW rotation and 1 for CCW rotation.
func (r *paintingRobot) changeDirection(input int) {
if input == 1 {
if r.direction == up {
r.direction = left
} else {
r.direction -= 1
}
} else {
if r.direction == left | else {
r.direction += 1
}
}
}
// Moves the robot by 1 distance point in the direction it is currently facing.
func (r *paintingRobot) move() {
posX, posY := r.position.x, r.position.y
switch r.direction {
case up:
posY -= 1
case right:
posX += 1
case down:
posY += 1
case left:
posX -= 1
}
r.position = &point{
x: posX,
y: posY,
}
fmt.Println(fmt.Sprintf("robot moved to [%d,%d]", r.position.x, r.position.y))
}
// Gets the color of underlying tile (based on robot's position). Default color is black (0).
func (r *paintingRobot) scanColor() int {
for _, p := range r.paintedPoints {
if p.x == r.position.x && p.y == r.position.y {
return p.color
}
}
return 0
}
// Calculates overall size of the grid (painted) and center of both axis.
func (r paintingRobot) getGridInfo() (int, int, point) {
xMin, xMax, yMin, yMax := 0, 0, 0, 0
for _, p := range r.paintedPoints {
if p.x > xMax {
xMax = p.x
}
if p.x < xMin {
xMin = p.x
}
if p.y > yMax {
yMax = p.y
}
if p.y < yMin {
yMin = p.y
}
}
return int(math.Abs(float64(xMin))) + xMax + 1, int(math.Abs(float64(yMin))) + yMax + 1,
point{
x: int(math.Abs(float64(xMin))),
y: int(math.Abs(float64(yMin))),
}
}
func (r paintingRobot) getTileColor(x, y int) color.RGBA {
for _, p := range r.paintedPoints {
if x == p.x && y == p.y {
switch p.color {
case 0:
return color.RGBA{R: 0, G: 0, B: 0, A: 0xff}
case 1:
return color.RGBA{R: 255, G: 255, B: 255, A: 0xff}
}
}
}
return color.RGBA{R: 0, G: 0, B: 0, A: 0xff}
}
func (r paintingRobot) exportToImage(output string) {
canvasWidth, canvasHeight, center := r.getGridInfo()
startPoint := image.Point{X: 0, Y: 0}
endPoint := image.Point{X: canvasWidth, Y: canvasHeight}
img := image.NewRGBA(image.Rectangle{Min: startPoint, Max: endPoint})
for x := 0; x < canvasWidth; x++ {
for y := 0; y < canvasHeight; y++ {
img.Set(x, y, r.getTileColor(x - center.x, y - center.y))
}
}
f, err := os.Create(output)
if err != nil {
fmt.Println(err)
}
err = png.Encode(f, img)
if err != nil {
fmt.Println(err)
}
}
type point struct {
x int
y int
color int
}
type instruction struct {
operation instructionOperation
length int
params []instructionParam
}
func (i *instruction) initialize(intCode []int64, pIndex int) {
instValue := int(intCode[pIndex])
i.operation = instructionOperation(instValue)
// Standard operation Codes are between 1 and 99, larger number means that Parameter Modes are included there
evalParamModes := false
if instValue >= 100 {
i.operation = instructionOperation(instValue % 100)
evalParamModes = true
}
i.length = InstructionLength[i.operation]
paramCount := i.length - 1
i.params = make([]instructionParam, paramCount, paramCount)
for j := 0; j < paramCount; j++ {
i.params[j] = instructionParam{0, intCode[pIndex+j+1]}
// Parameter mode is either 0 (by reference) or 1 (by value) and this mode is specified
// in the instruction code itself (as given number at respective position)
if evalParamModes {
i.params[j].mode = (instValue / int(math.Pow(float64(10), float64(j+2)))) % 10
}
}
}
func (i *instruction) getValuesCount() int {
switch i.operation {
case Add, Multiply, JumpIfTrue, JumpIfFalse, LessThan, Equals:
return 2
case Write, SetRelativeBase:
return 1
default:
return 0
}
}
func (i *instruction) doesStoreOutputInMemory() bool {
return i.operation == Read || i.operation == Add || i.operation == Multiply || i.operation == LessThan || i.operation == Equals
}
type instructionParam struct {
mode int
value int64
}
type program struct {
memory []int64
memorySize int
position int
relativeBase int
completed bool
halt bool
inChannel chan int64
outChannel chan int64
done chan interface{}
dataStack []int64
haltOnOutput bool
}
func (p *program) loadCodeFromFile(file string) {
bytes, err := ioutil.ReadFile(file)
if err != nil {
fmt.Println(err)
}
inputs := strings.Split(string(bytes), ",")
intInputs, err := convertStringArray(inputs)
if err != nil {
fmt.Println(err)
}
p.memorySize = len(intInputs) * 10
p.memory = make([]int64, p.memorySize, p.memorySize)
for i := 0; i < len(intInputs); i++ {
p.memory[i] = intInputs[i]
}
}
func (p *program) resetState() {
p.position = 0
p.completed = false
p.halt = false
}
func (p *program) resetMemory() {
p.dataStack = make([]int64, p.memorySize, p.memorySize)
}
func (p *program) execute() {
for !p.completed && !p.halt {
var instruction instruction
instruction.initialize(p.memory, p.position)
p.loadParameterValues(&instruction)
switch instruction.operation {
case Add:
p.doAdd(&instruction)
case Multiply:
p.doMultiply(&instruction | {
r.direction = up
} | conditional_block |
crud.ts | each, clone, map, filter as arr_filter, reduce, find,
values, Dictionary } from 'lodash';
import * as q from 'q';
import { v4 } from 'node-uuid';
const ID_MAP: SDict = { id: 'id' };
const ID_FIELDS = [ 'id', 'updated_by', 'created_by' ];
type RequestHandler = (req: Request, res: Response, next?: Function) => void
type SDict = Dictionary<string>;
type Tag = { tag: string, val: any };
type Query = SDict & any;
type QueryOptions = UpdateOptions & DestroyOptions & FindOptions;
interface QueryResultsModelMeta {
count?: number;
includes_me?: boolean;
}
// XXX remove this and just use `next`
function error(res: Response, err: Error) {
res.status(500);
res.json(<ServiceResponseV1<SDict>>{
meta: {
ok: false,
message: err.message,
},
body: {}
});
}
function generate_where(schema: SDict, params: SDict): SDict {
return reduce(schema, (prop_remap, lookup: string, field: string) => {
if (params[lookup]) {
prop_remap[field] = params[lookup];
}
return prop_remap;
}, {});
}
function build_query(prop_remap: SDict, params: SDict, extras: Object = {}): QueryOptions {
var query = <QueryOptions>clone(extras);
query.where = merge(generate_where(prop_remap, params), query.where);
query.raw = true;
return query;
}
function stamp_meta<V, H>(label: string, val: V): (holder: H) => H {
return holder => {
holder['@meta'] = holder['@meta'] || {};
holder['@meta'][label] = val;
return holder;
};
}
function tag(name: string): (string) => Tag {
return (val: string): Tag => {
return {
tag: name,
val: val
};
};
}
function replace_with_uuid(val: string, field: string): Boolean {
return val === '$UUID' && ID_FIELDS.indexOf(field) !== -1;
}
function populate_dates(body: Query): Query {
var cbody = clone(body);
if (!cbody.deleted_date) {
cbody.deleted_date = null;
}
return cbody;
}
function populate_uuids(body: Query): Query {
var id;
return reduce(body, (prop_remap: SDict, val: string, field: string) => {
if (replace_with_uuid(val, field)) {
id = id || v4();
val = id;
}
prop_remap[field] = val;
return prop_remap;
}, {});
}
function populate_extra_parameters(req: Request, extra_params: Object) {
if (extra_params) {
each(extra_params, function (field) {
req.body[field] = req.params[field];
});
}
}
// XXX remove this and just use `next`
function error_handler(res: Response, action): any {
return action.catch(err =>
error(res, err));
}
function response_handler(res: Response, property?: string): any {
var start_time = Date.now();
return results => {
var body = property ? results[property] : results,
meta = {
ok: true,
error: false,
elapsed_time: Date.now() - start_time
};
return res.json(<ServiceResponseV1<SDict | SDict[] | number>>{ meta, body });
};
}
export function upsert(model: any, extra_params: string[] = []): RequestHandler {
return (req, res) => {
populate_extra_parameters(req, extra_params);
error_handler(res, model.upsert(populate_uuids(populate_dates(req.body)))
.then(response_handler(res)));
};
}
/**
* XXX instead of a callback this should pass the response down so that
* handlers can just be appended.
*/
export function create(model: any, extra_params: string[] = [], cb?: (Model) => Promise<Model<any, any>>): RequestHandler {
return (req, res) => {
populate_extra_parameters(req, extra_params);
error_handler(res, model.create(populate_uuids(populate_dates(req.body)))
.then(model => cb ? cb(model) : model)
.then(response_handler(res)));
};
}
export function retrieve(model: any, prop_remap: SDict = ID_MAP): RequestHandler {
var find: string;
return (req, res) => {
// GET model/:id
// GET model/:parent_id/sub_model
// GET model/:parent_id/sub_model/:id
if (req.params.id || prop_remap) {
find = req.params.id ? 'findOne' : 'findAll';
error_handler(res, model[find](build_query(prop_remap, req.params, {
paranoid: !req.params.id,
order: ['created_date']
})).then(response_handler(res)));
} else {
error(res, new Error('search not implemented'));
}
};
}
export function update(model: any): RequestHandler {
return (req, res) => error_handler(res, model.update(
populate_uuids(populate_dates(req.body)),
build_query(ID_MAP, req.params)
).then(response_handler(res)));
}
/**
* NOTE this will always do a soft-delete unless "purge=true" is passed as a
* query paramter along with a valid "purge_key" value. this "purge_key" is
* retrieved from the CP_PURGE_KEY env var.
*/
export function del(model: any, prop_remap: SDict = ID_MAP): RequestHandler {
return (req, res, next) => {
var deleted_by = req.user.id,
where = { deleted_date: null },
force = req.query.purge === 'true' &&
req.query.purge_key === process.env.CP_PURGE_KEY &&
process.env.CP_PURGE_KEY;
// mismatching prop_remap to req.* is resulting in `delete from X`
// queries
for (var prop in prop_remap) {
if (prop_remap.hasOwnProperty(prop) && !req.params[prop_remap[prop]]) {
next(new BadRequestError(ERR_MSG_MISSING_FIELDS(values<string>(prop_remap))));
return;
}
}
error_handler(res, (<any>model).sequelize.transaction(transaction =>
model.update({ deleted_by }, build_query(prop_remap, req.params,
{ transaction, where })).then(() =>
model.destroy(build_query(prop_remap, req.params,
{ transaction, force }))))).then(response_handler(res));
};
}
export function parts(model: any, prop_remap, parts_def?): RequestHandler {
if (!parts_def) {
parts_def = prop_remap;
prop_remap = {id: 'id'};
}
return (req, res, next) => {
var parts_wanted = arr_filter((req.query.parts || '').split(',')),
expand_wanted = arr_filter((req.query.expand || '').split(',')),
bad_parts = [],
queries = [];
// check for invalid parts first
each(parts_wanted, (part: string) => {
if (!(part in parts_def)) {
bad_parts.push(part);
}
});
if (bad_parts.length) {
next(new BadRequestError(ERR_MSG_INVALID_PARTS(bad_parts)));
return;
}
// mian
queries.push(model.findOne(build_query(prop_remap, req.params, {
paranoid: false
})).then(tag('main')));
// parts
each(parts_wanted, (part: string) => {
var query = null,
model = parts_def[part][0],
prop_remap = parts_def[part][1],
meta = parts_def[part][2] || {};
if (meta.expand && includes(expand_wanted, part)) {
query = model.findAll(build_query(prop_remap, req.params))
.then(results => {
var model = meta.expand[0],
remap = meta.expand[1];
results = Array.isArray(results) ? results : [results];
return q.all(map(results, val =>
model.findOne(build_query(remap, <SDict>val))
.then(stamp_meta('relationship', val))))
.then(tag(part));
});
} else if (meta.instead) {
query = new Promise((resolve, reject) => {
var instead: QueryResultsModelMeta = {},
user_id = req.user.id,
checks = [];
if (meta.instead.count) {
checks.push(new Promise((resolve, reject) => {
model.findAndCountAll(build_query(prop_remap, req.params))
.then(count => {
instead.count = count.count;
resolve();
});
}));
}
if (meta.instead.includes_me) {
checks.push(new Promise((resolve, reject) => {
if (!user_id) {
instead.includes_me = false;
resolve();
} else {
model.findOne(build_query(prop_remap, req.params, {
where: { user_id }
})).then(row => {
instead.includes_me = !!row;
resolve();
});
}
}));
}
return Promise.all(checks).then(() => q.when({})
.then(stamp_meta('instead', instead)) | } else {
query = model.findAll(build_query(prop_remap, req.params | .then(tag(part))
.then(resolve));
}); | random_line_split |
crud.ts | each, clone, map, filter as arr_filter, reduce, find,
values, Dictionary } from 'lodash';
import * as q from 'q';
import { v4 } from 'node-uuid';
const ID_MAP: SDict = { id: 'id' };
const ID_FIELDS = [ 'id', 'updated_by', 'created_by' ];
type RequestHandler = (req: Request, res: Response, next?: Function) => void
type SDict = Dictionary<string>;
type Tag = { tag: string, val: any };
type Query = SDict & any;
type QueryOptions = UpdateOptions & DestroyOptions & FindOptions;
interface QueryResultsModelMeta {
count?: number;
includes_me?: boolean;
}
// XXX remove this and just use `next`
function error(res: Response, err: Error) {
res.status(500);
res.json(<ServiceResponseV1<SDict>>{
meta: {
ok: false,
message: err.message,
},
body: {}
});
}
function generate_where(schema: SDict, params: SDict): SDict {
return reduce(schema, (prop_remap, lookup: string, field: string) => {
if (params[lookup]) {
prop_remap[field] = params[lookup];
}
return prop_remap;
}, {});
}
function build_query(prop_remap: SDict, params: SDict, extras: Object = {}): QueryOptions {
var query = <QueryOptions>clone(extras);
query.where = merge(generate_where(prop_remap, params), query.where);
query.raw = true;
return query;
}
function stamp_meta<V, H>(label: string, val: V): (holder: H) => H {
return holder => {
holder['@meta'] = holder['@meta'] || {};
holder['@meta'][label] = val;
return holder;
};
}
function tag(name: string): (string) => Tag {
return (val: string): Tag => {
return {
tag: name,
val: val
};
};
}
function replace_with_uuid(val: string, field: string): Boolean {
return val === '$UUID' && ID_FIELDS.indexOf(field) !== -1;
}
function populate_dates(body: Query): Query {
var cbody = clone(body);
if (!cbody.deleted_date) {
cbody.deleted_date = null;
}
return cbody;
}
function populate_uuids(body: Query): Query {
var id;
return reduce(body, (prop_remap: SDict, val: string, field: string) => {
if (replace_with_uuid(val, field)) {
id = id || v4();
val = id;
}
prop_remap[field] = val;
return prop_remap;
}, {});
}
function populate_extra_parameters(req: Request, extra_params: Object) {
if (extra_params) {
each(extra_params, function (field) {
req.body[field] = req.params[field];
});
}
}
// XXX remove this and just use `next`
function error_handler(res: Response, action): any {
return action.catch(err =>
error(res, err));
}
function response_handler(res: Response, property?: string): any {
var start_time = Date.now();
return results => {
var body = property ? results[property] : results,
meta = {
ok: true,
error: false,
elapsed_time: Date.now() - start_time
};
return res.json(<ServiceResponseV1<SDict | SDict[] | number>>{ meta, body });
};
}
export function upsert(model: any, extra_params: string[] = []): RequestHandler {
return (req, res) => {
populate_extra_parameters(req, extra_params);
error_handler(res, model.upsert(populate_uuids(populate_dates(req.body)))
.then(response_handler(res)));
};
}
/**
* XXX instead of a callback this should pass the response down so that
* handlers can just be appended.
*/
export function create(model: any, extra_params: string[] = [], cb?: (Model) => Promise<Model<any, any>>): RequestHandler {
return (req, res) => {
populate_extra_parameters(req, extra_params);
error_handler(res, model.create(populate_uuids(populate_dates(req.body)))
.then(model => cb ? cb(model) : model)
.then(response_handler(res)));
};
}
export function retrieve(model: any, prop_remap: SDict = ID_MAP): RequestHandler {
var find: string;
return (req, res) => {
// GET model/:id
// GET model/:parent_id/sub_model
// GET model/:parent_id/sub_model/:id
if (req.params.id || prop_remap) {
find = req.params.id ? 'findOne' : 'findAll';
error_handler(res, model[find](build_query(prop_remap, req.params, {
paranoid: !req.params.id,
order: ['created_date']
})).then(response_handler(res)));
} else {
error(res, new Error('search not implemented'));
}
};
}
export function | (model: any): RequestHandler {
return (req, res) => error_handler(res, model.update(
populate_uuids(populate_dates(req.body)),
build_query(ID_MAP, req.params)
).then(response_handler(res)));
}
/**
* NOTE this will always do a soft-delete unless "purge=true" is passed as a
* query paramter along with a valid "purge_key" value. this "purge_key" is
* retrieved from the CP_PURGE_KEY env var.
*/
export function del(model: any, prop_remap: SDict = ID_MAP): RequestHandler {
return (req, res, next) => {
var deleted_by = req.user.id,
where = { deleted_date: null },
force = req.query.purge === 'true' &&
req.query.purge_key === process.env.CP_PURGE_KEY &&
process.env.CP_PURGE_KEY;
// mismatching prop_remap to req.* is resulting in `delete from X`
// queries
for (var prop in prop_remap) {
if (prop_remap.hasOwnProperty(prop) && !req.params[prop_remap[prop]]) {
next(new BadRequestError(ERR_MSG_MISSING_FIELDS(values<string>(prop_remap))));
return;
}
}
error_handler(res, (<any>model).sequelize.transaction(transaction =>
model.update({ deleted_by }, build_query(prop_remap, req.params,
{ transaction, where })).then(() =>
model.destroy(build_query(prop_remap, req.params,
{ transaction, force }))))).then(response_handler(res));
};
}
export function parts(model: any, prop_remap, parts_def?): RequestHandler {
if (!parts_def) {
parts_def = prop_remap;
prop_remap = {id: 'id'};
}
return (req, res, next) => {
var parts_wanted = arr_filter((req.query.parts || '').split(',')),
expand_wanted = arr_filter((req.query.expand || '').split(',')),
bad_parts = [],
queries = [];
// check for invalid parts first
each(parts_wanted, (part: string) => {
if (!(part in parts_def)) {
bad_parts.push(part);
}
});
if (bad_parts.length) {
next(new BadRequestError(ERR_MSG_INVALID_PARTS(bad_parts)));
return;
}
// mian
queries.push(model.findOne(build_query(prop_remap, req.params, {
paranoid: false
})).then(tag('main')));
// parts
each(parts_wanted, (part: string) => {
var query = null,
model = parts_def[part][0],
prop_remap = parts_def[part][1],
meta = parts_def[part][2] || {};
if (meta.expand && includes(expand_wanted, part)) {
query = model.findAll(build_query(prop_remap, req.params))
.then(results => {
var model = meta.expand[0],
remap = meta.expand[1];
results = Array.isArray(results) ? results : [results];
return q.all(map(results, val =>
model.findOne(build_query(remap, <SDict>val))
.then(stamp_meta('relationship', val))))
.then(tag(part));
});
} else if (meta.instead) {
query = new Promise((resolve, reject) => {
var instead: QueryResultsModelMeta = {},
user_id = req.user.id,
checks = [];
if (meta.instead.count) {
checks.push(new Promise((resolve, reject) => {
model.findAndCountAll(build_query(prop_remap, req.params))
.then(count => {
instead.count = count.count;
resolve();
});
}));
}
if (meta.instead.includes_me) {
checks.push(new Promise((resolve, reject) => {
if (!user_id) {
instead.includes_me = false;
resolve();
} else {
model.findOne(build_query(prop_remap, req.params, {
where: { user_id }
})).then(row => {
instead.includes_me = !!row;
resolve();
});
}
}));
}
return Promise.all(checks).then(() => q.when({})
.then(stamp_meta('instead', instead))
.then(tag(part))
.then(resolve));
});
} else {
query = model.findAll(build_query(prop_remap, req | update | identifier_name |
crud.ts | , clone, map, filter as arr_filter, reduce, find,
values, Dictionary } from 'lodash';
import * as q from 'q';
import { v4 } from 'node-uuid';
const ID_MAP: SDict = { id: 'id' };
const ID_FIELDS = [ 'id', 'updated_by', 'created_by' ];
type RequestHandler = (req: Request, res: Response, next?: Function) => void
type SDict = Dictionary<string>;
type Tag = { tag: string, val: any };
type Query = SDict & any;
type QueryOptions = UpdateOptions & DestroyOptions & FindOptions;
interface QueryResultsModelMeta {
count?: number;
includes_me?: boolean;
}
// XXX remove this and just use `next`
function error(res: Response, err: Error) {
res.status(500);
res.json(<ServiceResponseV1<SDict>>{
meta: {
ok: false,
message: err.message,
},
body: {}
});
}
function generate_where(schema: SDict, params: SDict): SDict {
return reduce(schema, (prop_remap, lookup: string, field: string) => {
if (params[lookup]) {
prop_remap[field] = params[lookup];
}
return prop_remap;
}, {});
}
function build_query(prop_remap: SDict, params: SDict, extras: Object = {}): QueryOptions {
var query = <QueryOptions>clone(extras);
query.where = merge(generate_where(prop_remap, params), query.where);
query.raw = true;
return query;
}
function stamp_meta<V, H>(label: string, val: V): (holder: H) => H {
return holder => {
holder['@meta'] = holder['@meta'] || {};
holder['@meta'][label] = val;
return holder;
};
}
function tag(name: string): (string) => Tag {
return (val: string): Tag => {
return {
tag: name,
val: val
};
};
}
function replace_with_uuid(val: string, field: string): Boolean {
return val === '$UUID' && ID_FIELDS.indexOf(field) !== -1;
}
function populate_dates(body: Query): Query {
var cbody = clone(body);
if (!cbody.deleted_date) {
cbody.deleted_date = null;
}
return cbody;
}
function populate_uuids(body: Query): Query {
var id;
return reduce(body, (prop_remap: SDict, val: string, field: string) => {
if (replace_with_uuid(val, field)) {
id = id || v4();
val = id;
}
prop_remap[field] = val;
return prop_remap;
}, {});
}
function populate_extra_parameters(req: Request, extra_params: Object) {
if (extra_params) {
each(extra_params, function (field) {
req.body[field] = req.params[field];
});
}
}
// XXX remove this and just use `next`
function error_handler(res: Response, action): any {
return action.catch(err =>
error(res, err));
}
function response_handler(res: Response, property?: string): any {
var start_time = Date.now();
return results => {
var body = property ? results[property] : results,
meta = {
ok: true,
error: false,
elapsed_time: Date.now() - start_time
};
return res.json(<ServiceResponseV1<SDict | SDict[] | number>>{ meta, body });
};
}
export function upsert(model: any, extra_params: string[] = []): RequestHandler {
return (req, res) => {
populate_extra_parameters(req, extra_params);
error_handler(res, model.upsert(populate_uuids(populate_dates(req.body)))
.then(response_handler(res)));
};
}
/**
* XXX instead of a callback this should pass the response down so that
* handlers can just be appended.
*/
export function create(model: any, extra_params: string[] = [], cb?: (Model) => Promise<Model<any, any>>): RequestHandler {
return (req, res) => {
populate_extra_parameters(req, extra_params);
error_handler(res, model.create(populate_uuids(populate_dates(req.body)))
.then(model => cb ? cb(model) : model)
.then(response_handler(res)));
};
}
export function retrieve(model: any, prop_remap: SDict = ID_MAP): RequestHandler {
var find: string;
return (req, res) => {
// GET model/:id
// GET model/:parent_id/sub_model
// GET model/:parent_id/sub_model/:id
if (req.params.id || prop_remap) {
find = req.params.id ? 'findOne' : 'findAll';
error_handler(res, model[find](build_query(prop_remap, req.params, {
paranoid: !req.params.id,
order: ['created_date']
})).then(response_handler(res)));
} else {
error(res, new Error('search not implemented'));
}
};
}
export function update(model: any): RequestHandler {
return (req, res) => error_handler(res, model.update(
populate_uuids(populate_dates(req.body)),
build_query(ID_MAP, req.params)
).then(response_handler(res)));
}
/**
* NOTE this will always do a soft-delete unless "purge=true" is passed as a
* query paramter along with a valid "purge_key" value. this "purge_key" is
* retrieved from the CP_PURGE_KEY env var.
*/
export function del(model: any, prop_remap: SDict = ID_MAP): RequestHandler {
return (req, res, next) => {
var deleted_by = req.user.id,
where = { deleted_date: null },
force = req.query.purge === 'true' &&
req.query.purge_key === process.env.CP_PURGE_KEY &&
process.env.CP_PURGE_KEY;
// mismatching prop_remap to req.* is resulting in `delete from X`
// queries
for (var prop in prop_remap) {
if (prop_remap.hasOwnProperty(prop) && !req.params[prop_remap[prop]]) {
next(new BadRequestError(ERR_MSG_MISSING_FIELDS(values<string>(prop_remap))));
return;
}
}
error_handler(res, (<any>model).sequelize.transaction(transaction =>
model.update({ deleted_by }, build_query(prop_remap, req.params,
{ transaction, where })).then(() =>
model.destroy(build_query(prop_remap, req.params,
{ transaction, force }))))).then(response_handler(res));
};
}
export function parts(model: any, prop_remap, parts_def?): RequestHandler {
if (!parts_def) {
parts_def = prop_remap;
prop_remap = {id: 'id'};
}
return (req, res, next) => {
var parts_wanted = arr_filter((req.query.parts || '').split(',')),
expand_wanted = arr_filter((req.query.expand || '').split(',')),
bad_parts = [],
queries = [];
// check for invalid parts first
each(parts_wanted, (part: string) => {
if (!(part in parts_def)) {
bad_parts.push(part);
}
});
if (bad_parts.length) {
next(new BadRequestError(ERR_MSG_INVALID_PARTS(bad_parts)));
return;
}
// mian
queries.push(model.findOne(build_query(prop_remap, req.params, {
paranoid: false
})).then(tag('main')));
// parts
each(parts_wanted, (part: string) => {
var query = null,
model = parts_def[part][0],
prop_remap = parts_def[part][1],
meta = parts_def[part][2] || {};
if (meta.expand && includes(expand_wanted, part)) {
query = model.findAll(build_query(prop_remap, req.params))
.then(results => {
var model = meta.expand[0],
remap = meta.expand[1];
results = Array.isArray(results) ? results : [results];
return q.all(map(results, val =>
model.findOne(build_query(remap, <SDict>val))
.then(stamp_meta('relationship', val))))
.then(tag(part));
});
} else if (meta.instead) {
query = new Promise((resolve, reject) => {
var instead: QueryResultsModelMeta = {},
user_id = req.user.id,
checks = [];
if (meta.instead.count) {
checks.push(new Promise((resolve, reject) => {
model.findAndCountAll(build_query(prop_remap, req.params))
.then(count => {
instead.count = count.count;
resolve();
});
}));
}
if (meta.instead.includes_me) {
checks.push(new Promise((resolve, reject) => {
if (!user_id) {
instead.includes_me = false;
resolve();
} else |
}));
}
return Promise.all(checks).then(() => q.when({})
.then(stamp_meta('instead', instead))
.then(tag(part))
.then(resolve));
});
} else {
query = model.findAll(build_query(prop_remap, req | {
model.findOne(build_query(prop_remap, req.params, {
where: { user_id }
})).then(row => {
instead.includes_me = !!row;
resolve();
});
} | conditional_block |
crud.ts | , clone, map, filter as arr_filter, reduce, find,
values, Dictionary } from 'lodash';
import * as q from 'q';
import { v4 } from 'node-uuid';
const ID_MAP: SDict = { id: 'id' };
const ID_FIELDS = [ 'id', 'updated_by', 'created_by' ];
type RequestHandler = (req: Request, res: Response, next?: Function) => void
type SDict = Dictionary<string>;
type Tag = { tag: string, val: any };
type Query = SDict & any;
type QueryOptions = UpdateOptions & DestroyOptions & FindOptions;
interface QueryResultsModelMeta {
count?: number;
includes_me?: boolean;
}
// XXX remove this and just use `next`
function error(res: Response, err: Error) {
res.status(500);
res.json(<ServiceResponseV1<SDict>>{
meta: {
ok: false,
message: err.message,
},
body: {}
});
}
function generate_where(schema: SDict, params: SDict): SDict |
function build_query(prop_remap: SDict, params: SDict, extras: Object = {}): QueryOptions {
var query = <QueryOptions>clone(extras);
query.where = merge(generate_where(prop_remap, params), query.where);
query.raw = true;
return query;
}
function stamp_meta<V, H>(label: string, val: V): (holder: H) => H {
return holder => {
holder['@meta'] = holder['@meta'] || {};
holder['@meta'][label] = val;
return holder;
};
}
function tag(name: string): (string) => Tag {
return (val: string): Tag => {
return {
tag: name,
val: val
};
};
}
function replace_with_uuid(val: string, field: string): Boolean {
return val === '$UUID' && ID_FIELDS.indexOf(field) !== -1;
}
function populate_dates(body: Query): Query {
var cbody = clone(body);
if (!cbody.deleted_date) {
cbody.deleted_date = null;
}
return cbody;
}
function populate_uuids(body: Query): Query {
var id;
return reduce(body, (prop_remap: SDict, val: string, field: string) => {
if (replace_with_uuid(val, field)) {
id = id || v4();
val = id;
}
prop_remap[field] = val;
return prop_remap;
}, {});
}
function populate_extra_parameters(req: Request, extra_params: Object) {
if (extra_params) {
each(extra_params, function (field) {
req.body[field] = req.params[field];
});
}
}
// XXX remove this and just use `next`
function error_handler(res: Response, action): any {
return action.catch(err =>
error(res, err));
}
function response_handler(res: Response, property?: string): any {
var start_time = Date.now();
return results => {
var body = property ? results[property] : results,
meta = {
ok: true,
error: false,
elapsed_time: Date.now() - start_time
};
return res.json(<ServiceResponseV1<SDict | SDict[] | number>>{ meta, body });
};
}
export function upsert(model: any, extra_params: string[] = []): RequestHandler {
return (req, res) => {
populate_extra_parameters(req, extra_params);
error_handler(res, model.upsert(populate_uuids(populate_dates(req.body)))
.then(response_handler(res)));
};
}
/**
* XXX instead of a callback this should pass the response down so that
* handlers can just be appended.
*/
export function create(model: any, extra_params: string[] = [], cb?: (Model) => Promise<Model<any, any>>): RequestHandler {
return (req, res) => {
populate_extra_parameters(req, extra_params);
error_handler(res, model.create(populate_uuids(populate_dates(req.body)))
.then(model => cb ? cb(model) : model)
.then(response_handler(res)));
};
}
export function retrieve(model: any, prop_remap: SDict = ID_MAP): RequestHandler {
var find: string;
return (req, res) => {
// GET model/:id
// GET model/:parent_id/sub_model
// GET model/:parent_id/sub_model/:id
if (req.params.id || prop_remap) {
find = req.params.id ? 'findOne' : 'findAll';
error_handler(res, model[find](build_query(prop_remap, req.params, {
paranoid: !req.params.id,
order: ['created_date']
})).then(response_handler(res)));
} else {
error(res, new Error('search not implemented'));
}
};
}
export function update(model: any): RequestHandler {
return (req, res) => error_handler(res, model.update(
populate_uuids(populate_dates(req.body)),
build_query(ID_MAP, req.params)
).then(response_handler(res)));
}
/**
* NOTE this will always do a soft-delete unless "purge=true" is passed as a
* query paramter along with a valid "purge_key" value. this "purge_key" is
* retrieved from the CP_PURGE_KEY env var.
*/
export function del(model: any, prop_remap: SDict = ID_MAP): RequestHandler {
return (req, res, next) => {
var deleted_by = req.user.id,
where = { deleted_date: null },
force = req.query.purge === 'true' &&
req.query.purge_key === process.env.CP_PURGE_KEY &&
process.env.CP_PURGE_KEY;
// mismatching prop_remap to req.* is resulting in `delete from X`
// queries
for (var prop in prop_remap) {
if (prop_remap.hasOwnProperty(prop) && !req.params[prop_remap[prop]]) {
next(new BadRequestError(ERR_MSG_MISSING_FIELDS(values<string>(prop_remap))));
return;
}
}
error_handler(res, (<any>model).sequelize.transaction(transaction =>
model.update({ deleted_by }, build_query(prop_remap, req.params,
{ transaction, where })).then(() =>
model.destroy(build_query(prop_remap, req.params,
{ transaction, force }))))).then(response_handler(res));
};
}
export function parts(model: any, prop_remap, parts_def?): RequestHandler {
if (!parts_def) {
parts_def = prop_remap;
prop_remap = {id: 'id'};
}
return (req, res, next) => {
var parts_wanted = arr_filter((req.query.parts || '').split(',')),
expand_wanted = arr_filter((req.query.expand || '').split(',')),
bad_parts = [],
queries = [];
// check for invalid parts first
each(parts_wanted, (part: string) => {
if (!(part in parts_def)) {
bad_parts.push(part);
}
});
if (bad_parts.length) {
next(new BadRequestError(ERR_MSG_INVALID_PARTS(bad_parts)));
return;
}
// mian
queries.push(model.findOne(build_query(prop_remap, req.params, {
paranoid: false
})).then(tag('main')));
// parts
each(parts_wanted, (part: string) => {
var query = null,
model = parts_def[part][0],
prop_remap = parts_def[part][1],
meta = parts_def[part][2] || {};
if (meta.expand && includes(expand_wanted, part)) {
query = model.findAll(build_query(prop_remap, req.params))
.then(results => {
var model = meta.expand[0],
remap = meta.expand[1];
results = Array.isArray(results) ? results : [results];
return q.all(map(results, val =>
model.findOne(build_query(remap, <SDict>val))
.then(stamp_meta('relationship', val))))
.then(tag(part));
});
} else if (meta.instead) {
query = new Promise((resolve, reject) => {
var instead: QueryResultsModelMeta = {},
user_id = req.user.id,
checks = [];
if (meta.instead.count) {
checks.push(new Promise((resolve, reject) => {
model.findAndCountAll(build_query(prop_remap, req.params))
.then(count => {
instead.count = count.count;
resolve();
});
}));
}
if (meta.instead.includes_me) {
checks.push(new Promise((resolve, reject) => {
if (!user_id) {
instead.includes_me = false;
resolve();
} else {
model.findOne(build_query(prop_remap, req.params, {
where: { user_id }
})).then(row => {
instead.includes_me = !!row;
resolve();
});
}
}));
}
return Promise.all(checks).then(() => q.when({})
.then(stamp_meta('instead', instead))
.then(tag(part))
.then(resolve));
});
} else {
query = model.findAll(build_query(prop_remap, req | {
return reduce(schema, (prop_remap, lookup: string, field: string) => {
if (params[lookup]) {
prop_remap[field] = params[lookup];
}
return prop_remap;
}, {});
} | identifier_body |
cpu_time.rs | current() -> io::Result<LinuxStyleCpuTime> {
imp::current()
}
}
pub use std::io::Result;
pub use imp::cpu_time;
/// A struct to monitor process cpu usage
#[derive(Clone, Copy)]
pub struct ProcessStat {
current_time: Instant,
cpu_time: Duration,
}
impl ProcessStat {
pub fn cur_proc_stat() -> io::Result<Self> {
Ok(ProcessStat {
current_time: Instant::now(),
cpu_time: imp::cpu_time()?,
})
}
/// return the cpu usage from last invoke,
/// or when this struct created if it is the first invoke.
pub fn cpu_usage(&mut self) -> io::Result<f64> {
let new_time = imp::cpu_time()?;
let old_time = mem::replace(&mut self.cpu_time, new_time);
let old_now = mem::replace(&mut self.current_time, Instant::now());
let real_time = self.current_time.duration_since(old_now).as_secs_f64();
if real_time > 0.0 {
let cpu_time = new_time
.checked_sub(old_time)
.map(|dur| dur.as_secs_f64())
.unwrap_or(0.0);
Ok(cpu_time / real_time)
} else {
Ok(0.0)
}
}
}
#[cfg(any(target_os = "linux", target_os = "freebsd"))]
mod imp {
use std::{fs::File, io, io::Read, time::Duration};
pub fn current() -> io::Result<super::LinuxStyleCpuTime> {
let mut state = String::new();
File::open("/proc/stat")?.read_to_string(&mut state)?;
(|| {
let mut parts = state.lines().next()?.split_whitespace();
if parts.next()? != "cpu" {
return None;
}
Some(super::LinuxStyleCpuTime {
user: parts.next()?.parse::<u64>().ok()?,
nice: parts.next()?.parse::<u64>().ok()?,
system: parts.next()?.parse::<u64>().ok()?,
idle: parts.next()?.parse::<u64>().ok()?,
iowait: parts.next()?.parse::<u64>().ok()?,
irq: parts.next()?.parse::<u64>().ok()?,
softirq: parts.next()?.parse::<u64>().ok()?,
steal: parts.next()?.parse::<u64>().ok()?,
guest: parts.next()?.parse::<u64>().ok()?,
guest_nice: parts.next()?.parse::<u64>().ok()?,
})
})()
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "first line of /proc/stat malformed"))
}
pub fn cpu_time() -> io::Result<Duration> {
let mut time = libc::timespec {
tv_sec: 0,
tv_nsec: 0,
};
if unsafe { libc::clock_gettime(libc::CLOCK_PROCESS_CPUTIME_ID, &mut time) } == 0 {
Ok(Duration::new(time.tv_sec as u64, time.tv_nsec as u32))
} else {
Err(io::Error::last_os_error())
}
}
}
#[cfg(target_os = "macos")]
mod imp {
use std::{io, ptr};
use libc::*;
pub fn current() -> io::Result<super::LinuxStyleCpuTime> {
// There's scant little documentation on `host_processor_info`
// throughout the internet, so this is just modeled after what everyone
// else is doing. For now this is modeled largely after libuv.
unsafe {
let mut num_cpus_u = 0;
let mut cpu_info = ptr::null_mut();
let mut msg_type = 0;
let ret = host_processor_info(
mach_host_self(),
PROCESSOR_CPU_LOAD_INFO as processor_flavor_t,
&mut num_cpus_u,
&mut cpu_info,
&mut msg_type,
);
if ret != KERN_SUCCESS {
return Err(io::Error::from_raw_os_error(ret));
}
let mut ret = super::LinuxStyleCpuTime {
user: 0,
system: 0,
idle: 0,
iowait: 0,
irq: 0,
softirq: 0,
steal: 0,
guest: 0,
nice: 0,
guest_nice: 0,
};
let mut current = cpu_info as *const processor_cpu_load_info_data_t;
for _ in 0..num_cpus_u {
ret.user += (*current).cpu_ticks[CPU_STATE_USER as usize] as u64;
ret.system += (*current).cpu_ticks[CPU_STATE_SYSTEM as usize] as u64;
ret.idle += (*current).cpu_ticks[CPU_STATE_IDLE as usize] as u64;
ret.nice += (*current).cpu_ticks[CPU_STATE_NICE as usize] as u64;
current = current.offset(1);
}
vm_deallocate(mach_task_self_, cpu_info as vm_address_t, msg_type as usize);
Ok(ret)
}
}
pub fn cpu_time() -> io::Result<std::time::Duration> {
let mut time = unsafe { std::mem::zeroed() };
if unsafe { libc::getrusage(libc::RUSAGE_SELF, &mut time) } == 0 {
let sec = time.ru_utime.tv_sec as u64 + time.ru_stime.tv_sec as u64;
let nsec = (time.ru_utime.tv_usec as u32 + time.ru_stime.tv_usec as u32) * 1000;
Ok(std::time::Duration::new(sec, nsec))
} else {
Err(io::Error::last_os_error())
}
}
}
#[cfg(target_os = "windows")]
mod imp {
use std::io;
pub fn current() -> io::Result<super::LinuxStyleCpuTime> {
Err(io::Error::new(
io::ErrorKind::Other,
"unsupported platform to learn CPU state",
))
}
use std::{io, mem, time::Duration};
use scopeguard::defer;
use winapi::{
shared::{
minwindef::FILETIME,
ntdef::{FALSE, NULL},
},
um::{
handleapi::CloseHandle,
processthreadsapi::{
GetCurrentProcess, GetCurrentThreadId, GetProcessTimes, GetSystemTimes,
GetThreadTimes, OpenThread,
},
sysinfoapi::{GetSystemInfo, SYSTEM_INFO},
winnt::THREAD_QUERY_INFORMATION,
},
};
/// convert to u64, unit 100 ns
fn filetime_to_ns100(ft: FILETIME) -> u64 {
((ft.dwHighDateTime as u64) << 32) + ft.dwLowDateTime as u64
}
fn get_sys_times() -> io::Result<(u64, u64, u64)> {
let mut idle = FILETIME::default();
let mut kernel = FILETIME::default();
let mut user = FILETIME::default();
let ret = unsafe { GetSystemTimes(&mut idle, &mut kernel, &mut user) };
if ret == 0 {
return Err(io::Error::last_os_error());
}
let idle = filetime_to_ns100(idle);
let kernel = filetime_to_ns100(kernel);
let user = filetime_to_ns100(user);
Ok((idle, kernel, user))
}
fn get_thread_times(tid: u32) -> io::Result<(u64, u64)> {
let handler = unsafe { OpenThread(THREAD_QUERY_INFORMATION, FALSE as i32, tid) };
if handler == NULL {
return Err(io::Error::last_os_error());
}
defer! {{
unsafe { CloseHandle(handler) };
}}
let mut create_time = FILETIME::default();
let mut exit_time = FILETIME::default();
let mut kernel_time = FILETIME::default();
let mut user_time = FILETIME::default();
let ret = unsafe {
GetThreadTimes(
handler,
&mut create_time,
&mut exit_time,
&mut kernel_time,
&mut user_time,
)
};
if ret == 0 {
return Err(io::Error::last_os_error());
}
let kernel_time = filetime_to_ns100(kernel_time);
let user_time = filetime_to_ns100(user_time);
Ok((kernel_time, user_time))
}
#[inline]
pub fn cpu_time() -> io::Result<Duration> | {
let (kernel_time, user_time) = unsafe {
let process = GetCurrentProcess();
let mut create_time = mem::zeroed();
let mut exit_time = mem::zeroed();
let mut kernel_time = mem::zeroed();
let mut user_time = mem::zeroed();
let ret = GetProcessTimes(
process,
&mut create_time,
&mut exit_time,
&mut kernel_time,
&mut user_time,
);
if ret != 0 {
(kernel_time, user_time)
} else {
return Err(io::Error::last_os_error()); | identifier_body |
|
cpu_time.rs | pub softirq: u64,
pub steal: u64,
pub guest: u64,
pub guest_nice: u64,
}
impl LinuxStyleCpuTime {
pub fn total(&self) -> u64 {
// Note: guest(_nice) is not counted, since it is already in user.
// See https://unix.stackexchange.com/questions/178045/proc-stat-is-guest-counted-into-user-time
self.user
+ self.system
+ self.idle
+ self.nice
+ self.iowait
+ self.irq
+ self.softirq
+ self.steal
}
pub fn current() -> io::Result<LinuxStyleCpuTime> {
imp::current()
}
}
pub use std::io::Result;
pub use imp::cpu_time;
/// A struct to monitor process cpu usage
#[derive(Clone, Copy)]
pub struct ProcessStat {
current_time: Instant,
cpu_time: Duration,
}
impl ProcessStat {
pub fn cur_proc_stat() -> io::Result<Self> {
Ok(ProcessStat {
current_time: Instant::now(),
cpu_time: imp::cpu_time()?,
})
}
/// return the cpu usage from last invoke,
/// or when this struct created if it is the first invoke.
pub fn cpu_usage(&mut self) -> io::Result<f64> {
let new_time = imp::cpu_time()?;
let old_time = mem::replace(&mut self.cpu_time, new_time);
let old_now = mem::replace(&mut self.current_time, Instant::now());
let real_time = self.current_time.duration_since(old_now).as_secs_f64();
if real_time > 0.0 {
let cpu_time = new_time
.checked_sub(old_time)
.map(|dur| dur.as_secs_f64())
.unwrap_or(0.0);
Ok(cpu_time / real_time)
} else {
Ok(0.0)
}
}
}
#[cfg(any(target_os = "linux", target_os = "freebsd"))]
mod imp {
use std::{fs::File, io, io::Read, time::Duration};
pub fn current() -> io::Result<super::LinuxStyleCpuTime> {
let mut state = String::new();
File::open("/proc/stat")?.read_to_string(&mut state)?;
(|| {
let mut parts = state.lines().next()?.split_whitespace();
if parts.next()? != "cpu" {
return None;
}
Some(super::LinuxStyleCpuTime {
user: parts.next()?.parse::<u64>().ok()?,
nice: parts.next()?.parse::<u64>().ok()?,
system: parts.next()?.parse::<u64>().ok()?,
idle: parts.next()?.parse::<u64>().ok()?,
iowait: parts.next()?.parse::<u64>().ok()?,
irq: parts.next()?.parse::<u64>().ok()?,
softirq: parts.next()?.parse::<u64>().ok()?,
steal: parts.next()?.parse::<u64>().ok()?,
guest: parts.next()?.parse::<u64>().ok()?,
guest_nice: parts.next()?.parse::<u64>().ok()?,
})
})()
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "first line of /proc/stat malformed"))
}
pub fn cpu_time() -> io::Result<Duration> {
let mut time = libc::timespec {
tv_sec: 0,
tv_nsec: 0,
};
if unsafe { libc::clock_gettime(libc::CLOCK_PROCESS_CPUTIME_ID, &mut time) } == 0 {
Ok(Duration::new(time.tv_sec as u64, time.tv_nsec as u32))
} else {
Err(io::Error::last_os_error())
}
}
}
#[cfg(target_os = "macos")]
mod imp {
use std::{io, ptr};
use libc::*;
pub fn current() -> io::Result<super::LinuxStyleCpuTime> {
// There's scant little documentation on `host_processor_info`
// throughout the internet, so this is just modeled after what everyone
// else is doing. For now this is modeled largely after libuv.
unsafe {
let mut num_cpus_u = 0;
let mut cpu_info = ptr::null_mut();
let mut msg_type = 0;
let ret = host_processor_info(
mach_host_self(),
PROCESSOR_CPU_LOAD_INFO as processor_flavor_t,
&mut num_cpus_u,
&mut cpu_info,
&mut msg_type,
);
if ret != KERN_SUCCESS {
return Err(io::Error::from_raw_os_error(ret));
}
let mut ret = super::LinuxStyleCpuTime {
user: 0,
system: 0,
idle: 0,
iowait: 0,
irq: 0,
softirq: 0,
steal: 0,
guest: 0,
nice: 0,
guest_nice: 0,
};
let mut current = cpu_info as *const processor_cpu_load_info_data_t;
for _ in 0..num_cpus_u {
ret.user += (*current).cpu_ticks[CPU_STATE_USER as usize] as u64;
ret.system += (*current).cpu_ticks[CPU_STATE_SYSTEM as usize] as u64;
ret.idle += (*current).cpu_ticks[CPU_STATE_IDLE as usize] as u64;
ret.nice += (*current).cpu_ticks[CPU_STATE_NICE as usize] as u64;
current = current.offset(1);
}
vm_deallocate(mach_task_self_, cpu_info as vm_address_t, msg_type as usize);
Ok(ret)
}
}
pub fn | () -> io::Result<std::time::Duration> {
let mut time = unsafe { std::mem::zeroed() };
if unsafe { libc::getrusage(libc::RUSAGE_SELF, &mut time) } == 0 {
let sec = time.ru_utime.tv_sec as u64 + time.ru_stime.tv_sec as u64;
let nsec = (time.ru_utime.tv_usec as u32 + time.ru_stime.tv_usec as u32) * 1000;
Ok(std::time::Duration::new(sec, nsec))
} else {
Err(io::Error::last_os_error())
}
}
}
#[cfg(target_os = "windows")]
mod imp {
use std::io;
pub fn current() -> io::Result<super::LinuxStyleCpuTime> {
Err(io::Error::new(
io::ErrorKind::Other,
"unsupported platform to learn CPU state",
))
}
use std::{io, mem, time::Duration};
use scopeguard::defer;
use winapi::{
shared::{
minwindef::FILETIME,
ntdef::{FALSE, NULL},
},
um::{
handleapi::CloseHandle,
processthreadsapi::{
GetCurrentProcess, GetCurrentThreadId, GetProcessTimes, GetSystemTimes,
GetThreadTimes, OpenThread,
},
sysinfoapi::{GetSystemInfo, SYSTEM_INFO},
winnt::THREAD_QUERY_INFORMATION,
},
};
/// convert to u64, unit 100 ns
fn filetime_to_ns100(ft: FILETIME) -> u64 {
((ft.dwHighDateTime as u64) << 32) + ft.dwLowDateTime as u64
}
fn get_sys_times() -> io::Result<(u64, u64, u64)> {
let mut idle = FILETIME::default();
let mut kernel = FILETIME::default();
let mut user = FILETIME::default();
let ret = unsafe { GetSystemTimes(&mut idle, &mut kernel, &mut user) };
if ret == 0 {
return Err(io::Error::last_os_error());
}
let idle = filetime_to_ns100(idle);
let kernel = filetime_to_ns100(kernel);
let user = filetime_to_ns100(user);
Ok((idle, kernel, user))
}
fn get_thread_times(tid: u32) -> io::Result<(u64, u64)> {
let handler = unsafe { OpenThread(THREAD_QUERY_INFORMATION, FALSE as i32, tid) };
if handler == NULL {
return Err(io::Error::last_os_error());
}
defer! {{
unsafe { CloseHandle(handler) };
}}
let mut create_time = FILETIME::default();
let mut exit_time = FILETIME::default();
let mut kernel_time = FILETIME::default();
let mut user_time = FILETIME::default();
let ret = unsafe {
GetThreadTimes(
handler,
&mut create_time,
&mut exit_time,
&mut kernel_time,
&mut user_time,
)
};
if ret == 0 {
return Err(io::Error::last_os_error());
}
let kernel_time = filetime_to_ns100(kernel_time);
let user_time = filetime_to_ns100(user_time);
Ok((kernel | cpu_time | identifier_name |
cpu_time.rs | fn current() -> io::Result<LinuxStyleCpuTime> {
imp::current()
}
}
pub use std::io::Result;
pub use imp::cpu_time;
/// A struct to monitor process cpu usage
#[derive(Clone, Copy)]
pub struct ProcessStat {
current_time: Instant,
cpu_time: Duration,
}
impl ProcessStat {
pub fn cur_proc_stat() -> io::Result<Self> {
Ok(ProcessStat {
current_time: Instant::now(),
cpu_time: imp::cpu_time()?,
})
}
/// return the cpu usage from last invoke,
/// or when this struct created if it is the first invoke.
pub fn cpu_usage(&mut self) -> io::Result<f64> {
let new_time = imp::cpu_time()?;
let old_time = mem::replace(&mut self.cpu_time, new_time);
let old_now = mem::replace(&mut self.current_time, Instant::now());
let real_time = self.current_time.duration_since(old_now).as_secs_f64();
if real_time > 0.0 {
let cpu_time = new_time
.checked_sub(old_time)
.map(|dur| dur.as_secs_f64())
.unwrap_or(0.0);
Ok(cpu_time / real_time)
} else {
Ok(0.0)
}
}
}
#[cfg(any(target_os = "linux", target_os = "freebsd"))]
mod imp {
use std::{fs::File, io, io::Read, time::Duration};
pub fn current() -> io::Result<super::LinuxStyleCpuTime> {
let mut state = String::new();
File::open("/proc/stat")?.read_to_string(&mut state)?;
(|| {
let mut parts = state.lines().next()?.split_whitespace();
if parts.next()? != "cpu" {
return None;
}
Some(super::LinuxStyleCpuTime {
user: parts.next()?.parse::<u64>().ok()?,
nice: parts.next()?.parse::<u64>().ok()?,
system: parts.next()?.parse::<u64>().ok()?,
idle: parts.next()?.parse::<u64>().ok()?,
iowait: parts.next()?.parse::<u64>().ok()?,
irq: parts.next()?.parse::<u64>().ok()?,
softirq: parts.next()?.parse::<u64>().ok()?,
steal: parts.next()?.parse::<u64>().ok()?,
guest: parts.next()?.parse::<u64>().ok()?,
guest_nice: parts.next()?.parse::<u64>().ok()?,
})
})()
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "first line of /proc/stat malformed"))
}
pub fn cpu_time() -> io::Result<Duration> {
let mut time = libc::timespec {
tv_sec: 0,
tv_nsec: 0,
};
if unsafe { libc::clock_gettime(libc::CLOCK_PROCESS_CPUTIME_ID, &mut time) } == 0 {
Ok(Duration::new(time.tv_sec as u64, time.tv_nsec as u32))
} else {
Err(io::Error::last_os_error())
}
}
}
#[cfg(target_os = "macos")]
mod imp {
use std::{io, ptr};
use libc::*;
pub fn current() -> io::Result<super::LinuxStyleCpuTime> {
// There's scant little documentation on `host_processor_info`
// throughout the internet, so this is just modeled after what everyone
// else is doing. For now this is modeled largely after libuv.
unsafe {
let mut num_cpus_u = 0;
let mut cpu_info = ptr::null_mut();
let mut msg_type = 0;
let ret = host_processor_info(
mach_host_self(),
PROCESSOR_CPU_LOAD_INFO as processor_flavor_t,
&mut num_cpus_u,
&mut cpu_info,
&mut msg_type,
);
if ret != KERN_SUCCESS {
return Err(io::Error::from_raw_os_error(ret));
}
let mut ret = super::LinuxStyleCpuTime {
user: 0,
system: 0,
idle: 0,
iowait: 0,
irq: 0,
softirq: 0,
steal: 0,
guest: 0,
nice: 0,
guest_nice: 0,
};
let mut current = cpu_info as *const processor_cpu_load_info_data_t;
for _ in 0..num_cpus_u {
ret.user += (*current).cpu_ticks[CPU_STATE_USER as usize] as u64;
ret.system += (*current).cpu_ticks[CPU_STATE_SYSTEM as usize] as u64;
ret.idle += (*current).cpu_ticks[CPU_STATE_IDLE as usize] as u64;
ret.nice += (*current).cpu_ticks[CPU_STATE_NICE as usize] as u64;
current = current.offset(1);
}
vm_deallocate(mach_task_self_, cpu_info as vm_address_t, msg_type as usize);
Ok(ret)
}
}
pub fn cpu_time() -> io::Result<std::time::Duration> {
let mut time = unsafe { std::mem::zeroed() };
if unsafe { libc::getrusage(libc::RUSAGE_SELF, &mut time) } == 0 {
let sec = time.ru_utime.tv_sec as u64 + time.ru_stime.tv_sec as u64;
let nsec = (time.ru_utime.tv_usec as u32 + time.ru_stime.tv_usec as u32) * 1000;
Ok(std::time::Duration::new(sec, nsec))
} else {
Err(io::Error::last_os_error())
}
}
}
#[cfg(target_os = "windows")]
mod imp {
use std::io;
pub fn current() -> io::Result<super::LinuxStyleCpuTime> {
Err(io::Error::new(
io::ErrorKind::Other,
"unsupported platform to learn CPU state",
))
}
use std::{io, mem, time::Duration};
use scopeguard::defer;
use winapi::{
shared::{
minwindef::FILETIME,
ntdef::{FALSE, NULL},
},
um::{
handleapi::CloseHandle,
processthreadsapi::{
GetCurrentProcess, GetCurrentThreadId, GetProcessTimes, GetSystemTimes,
GetThreadTimes, OpenThread,
},
sysinfoapi::{GetSystemInfo, SYSTEM_INFO},
winnt::THREAD_QUERY_INFORMATION,
},
};
/// convert to u64, unit 100 ns
fn filetime_to_ns100(ft: FILETIME) -> u64 {
((ft.dwHighDateTime as u64) << 32) + ft.dwLowDateTime as u64
}
fn get_sys_times() -> io::Result<(u64, u64, u64)> {
let mut idle = FILETIME::default();
let mut kernel = FILETIME::default();
let mut user = FILETIME::default();
let ret = unsafe { GetSystemTimes(&mut idle, &mut kernel, &mut user) };
if ret == 0 {
return Err(io::Error::last_os_error());
}
let idle = filetime_to_ns100(idle);
let kernel = filetime_to_ns100(kernel);
let user = filetime_to_ns100(user);
Ok((idle, kernel, user))
}
fn get_thread_times(tid: u32) -> io::Result<(u64, u64)> {
let handler = unsafe { OpenThread(THREAD_QUERY_INFORMATION, FALSE as i32, tid) };
if handler == NULL {
return Err(io::Error::last_os_error());
}
defer! {{
unsafe { CloseHandle(handler) };
}}
let mut create_time = FILETIME::default();
let mut exit_time = FILETIME::default();
let mut kernel_time = FILETIME::default();
let mut user_time = FILETIME::default();
let ret = unsafe {
GetThreadTimes(
handler,
&mut create_time,
&mut exit_time,
&mut kernel_time,
&mut user_time,
)
};
if ret == 0 {
return Err(io::Error::last_os_error());
}
let kernel_time = filetime_to_ns100(kernel_time);
let user_time = filetime_to_ns100(user_time);
Ok((kernel_time, user_time))
}
#[inline]
pub fn cpu_time() -> io::Result<Duration> {
let (kernel_time, user_time) = unsafe {
let process = GetCurrentProcess();
let mut create_time = mem::zeroed();
let mut exit_time = mem::zeroed();
let mut kernel_time = mem::zeroed();
let mut user_time = mem::zeroed();
let ret = GetProcessTimes(
process,
&mut create_time,
&mut exit_time,
&mut kernel_time,
&mut user_time,
);
if ret != 0 {
(kernel_time, user_time)
} else {
return Err(io::Error::last_os_error()); | random_line_split |
||
cpu_time.rs | pub softirq: u64,
pub steal: u64,
pub guest: u64,
pub guest_nice: u64,
}
impl LinuxStyleCpuTime {
pub fn total(&self) -> u64 {
// Note: guest(_nice) is not counted, since it is already in user.
// See https://unix.stackexchange.com/questions/178045/proc-stat-is-guest-counted-into-user-time
self.user
+ self.system
+ self.idle
+ self.nice
+ self.iowait
+ self.irq
+ self.softirq
+ self.steal
}
pub fn current() -> io::Result<LinuxStyleCpuTime> {
imp::current()
}
}
pub use std::io::Result;
pub use imp::cpu_time;
/// A struct to monitor process cpu usage
#[derive(Clone, Copy)]
pub struct ProcessStat {
current_time: Instant,
cpu_time: Duration,
}
impl ProcessStat {
pub fn cur_proc_stat() -> io::Result<Self> {
Ok(ProcessStat {
current_time: Instant::now(),
cpu_time: imp::cpu_time()?,
})
}
/// return the cpu usage from last invoke,
/// or when this struct created if it is the first invoke.
pub fn cpu_usage(&mut self) -> io::Result<f64> {
let new_time = imp::cpu_time()?;
let old_time = mem::replace(&mut self.cpu_time, new_time);
let old_now = mem::replace(&mut self.current_time, Instant::now());
let real_time = self.current_time.duration_since(old_now).as_secs_f64();
if real_time > 0.0 | else {
Ok(0.0)
}
}
}
#[cfg(any(target_os = "linux", target_os = "freebsd"))]
mod imp {
use std::{fs::File, io, io::Read, time::Duration};
pub fn current() -> io::Result<super::LinuxStyleCpuTime> {
let mut state = String::new();
File::open("/proc/stat")?.read_to_string(&mut state)?;
(|| {
let mut parts = state.lines().next()?.split_whitespace();
if parts.next()? != "cpu" {
return None;
}
Some(super::LinuxStyleCpuTime {
user: parts.next()?.parse::<u64>().ok()?,
nice: parts.next()?.parse::<u64>().ok()?,
system: parts.next()?.parse::<u64>().ok()?,
idle: parts.next()?.parse::<u64>().ok()?,
iowait: parts.next()?.parse::<u64>().ok()?,
irq: parts.next()?.parse::<u64>().ok()?,
softirq: parts.next()?.parse::<u64>().ok()?,
steal: parts.next()?.parse::<u64>().ok()?,
guest: parts.next()?.parse::<u64>().ok()?,
guest_nice: parts.next()?.parse::<u64>().ok()?,
})
})()
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "first line of /proc/stat malformed"))
}
pub fn cpu_time() -> io::Result<Duration> {
let mut time = libc::timespec {
tv_sec: 0,
tv_nsec: 0,
};
if unsafe { libc::clock_gettime(libc::CLOCK_PROCESS_CPUTIME_ID, &mut time) } == 0 {
Ok(Duration::new(time.tv_sec as u64, time.tv_nsec as u32))
} else {
Err(io::Error::last_os_error())
}
}
}
#[cfg(target_os = "macos")]
mod imp {
use std::{io, ptr};
use libc::*;
pub fn current() -> io::Result<super::LinuxStyleCpuTime> {
// There's scant little documentation on `host_processor_info`
// throughout the internet, so this is just modeled after what everyone
// else is doing. For now this is modeled largely after libuv.
unsafe {
let mut num_cpus_u = 0;
let mut cpu_info = ptr::null_mut();
let mut msg_type = 0;
let ret = host_processor_info(
mach_host_self(),
PROCESSOR_CPU_LOAD_INFO as processor_flavor_t,
&mut num_cpus_u,
&mut cpu_info,
&mut msg_type,
);
if ret != KERN_SUCCESS {
return Err(io::Error::from_raw_os_error(ret));
}
let mut ret = super::LinuxStyleCpuTime {
user: 0,
system: 0,
idle: 0,
iowait: 0,
irq: 0,
softirq: 0,
steal: 0,
guest: 0,
nice: 0,
guest_nice: 0,
};
let mut current = cpu_info as *const processor_cpu_load_info_data_t;
for _ in 0..num_cpus_u {
ret.user += (*current).cpu_ticks[CPU_STATE_USER as usize] as u64;
ret.system += (*current).cpu_ticks[CPU_STATE_SYSTEM as usize] as u64;
ret.idle += (*current).cpu_ticks[CPU_STATE_IDLE as usize] as u64;
ret.nice += (*current).cpu_ticks[CPU_STATE_NICE as usize] as u64;
current = current.offset(1);
}
vm_deallocate(mach_task_self_, cpu_info as vm_address_t, msg_type as usize);
Ok(ret)
}
}
pub fn cpu_time() -> io::Result<std::time::Duration> {
let mut time = unsafe { std::mem::zeroed() };
if unsafe { libc::getrusage(libc::RUSAGE_SELF, &mut time) } == 0 {
let sec = time.ru_utime.tv_sec as u64 + time.ru_stime.tv_sec as u64;
let nsec = (time.ru_utime.tv_usec as u32 + time.ru_stime.tv_usec as u32) * 1000;
Ok(std::time::Duration::new(sec, nsec))
} else {
Err(io::Error::last_os_error())
}
}
}
#[cfg(target_os = "windows")]
mod imp {
use std::io;
pub fn current() -> io::Result<super::LinuxStyleCpuTime> {
Err(io::Error::new(
io::ErrorKind::Other,
"unsupported platform to learn CPU state",
))
}
use std::{io, mem, time::Duration};
use scopeguard::defer;
use winapi::{
shared::{
minwindef::FILETIME,
ntdef::{FALSE, NULL},
},
um::{
handleapi::CloseHandle,
processthreadsapi::{
GetCurrentProcess, GetCurrentThreadId, GetProcessTimes, GetSystemTimes,
GetThreadTimes, OpenThread,
},
sysinfoapi::{GetSystemInfo, SYSTEM_INFO},
winnt::THREAD_QUERY_INFORMATION,
},
};
/// convert to u64, unit 100 ns
fn filetime_to_ns100(ft: FILETIME) -> u64 {
((ft.dwHighDateTime as u64) << 32) + ft.dwLowDateTime as u64
}
fn get_sys_times() -> io::Result<(u64, u64, u64)> {
let mut idle = FILETIME::default();
let mut kernel = FILETIME::default();
let mut user = FILETIME::default();
let ret = unsafe { GetSystemTimes(&mut idle, &mut kernel, &mut user) };
if ret == 0 {
return Err(io::Error::last_os_error());
}
let idle = filetime_to_ns100(idle);
let kernel = filetime_to_ns100(kernel);
let user = filetime_to_ns100(user);
Ok((idle, kernel, user))
}
fn get_thread_times(tid: u32) -> io::Result<(u64, u64)> {
let handler = unsafe { OpenThread(THREAD_QUERY_INFORMATION, FALSE as i32, tid) };
if handler == NULL {
return Err(io::Error::last_os_error());
}
defer! {{
unsafe { CloseHandle(handler) };
}}
let mut create_time = FILETIME::default();
let mut exit_time = FILETIME::default();
let mut kernel_time = FILETIME::default();
let mut user_time = FILETIME::default();
let ret = unsafe {
GetThreadTimes(
handler,
&mut create_time,
&mut exit_time,
&mut kernel_time,
&mut user_time,
)
};
if ret == 0 {
return Err(io::Error::last_os_error());
}
let kernel_time = filetime_to_ns100(kernel_time);
let user_time = filetime_to_ns100(user_time);
Ok((kernel | {
let cpu_time = new_time
.checked_sub(old_time)
.map(|dur| dur.as_secs_f64())
.unwrap_or(0.0);
Ok(cpu_time / real_time)
} | conditional_block |
esv.py | , 27, 14, 17, 14, 15],
[21],
[17, 10, 10, 11],
[16, 13, 12, 13, 15, 16, 20],
[15, 13, 19],
[17, 20, 19],
[18, 15, 20],
[15, 23],
[21, 13, 10, 14, 11, 15, 14, 23, 17, 12, 17, 14, 9, 21],
[14, 17, 18, 6],
[25, 23, 17, 25, 48, 34, 29, 34, 38, 42, 30, 50, 58, 36, 39, 28, 27, 35,
30, 34, 46, 46, 39, 51, 46, 75, 66, 20],
[45, 28, 35, 41, 43, 56, 37, 38, 50, 52, 33, 44, 37, 72, 47, 20],
[80, 52, 38, 44, 39, 49, 50, 56, 62, 42, 54, 59, 35, 35, 32, 31, 37, 43,
48, 47, 38, 71, 56, 53],
[51, 25, 36, 54, 47, 71, 53, 59, 41, 42, 57, 50, 38, 31, 27, 33, 26, 40,
42, 31, 25],
[26, 47, 26, 37, 42, 15, 60, 40, 43, 48, 30, 25, 52, 28, 41, 40, 34, 28,
41, 38, 40, 30, 35, 27, 27, 32, 44, 31],
[32, 29, 31, 25, 21, 23, 25, 39, 33, 21, 36, 21, 14, 23, 33, 27],
[31, 16, 23, 21, 13, 20, 40, 13, 27, 33, 34, 31, 13, 40, 58, 24],
[24, 17, 18, 18, 21, 18, 16, 24, 15, 18, 33, 21, 14],
[24, 21, 29, 31, 26, 18],
[23, 22, 21, 32, 33, 24],
[30, 30, 21, 23],
[29, 23, 25, 18],
[10, 20, 13, 18, 28],
[12, 17, 18],
[20, 15, 16, 16, 25, 21],
[18, 26, 17, 22],
[16, 15, 15],
[25],
[14, 18, 19, 16, 14, 20, 28, 13, 28, 39, 40, 29, 25],
[27, 26, 18, 17, 20],
[25, 25, 22, 19, 14],
[21, 22, 18],
[10, 29, 24, 21, 21],
[13],
[15],
[25],
[20, 29, 22, 11, 14, 17, 17, 13, 21, 11, 19, 17, 18, 20, 8, 21, 18, 24,
21, 15, 27, 21]
]
missing_verses = {
(40, 12): [47],
(40, 17): [21],
(40, 18): [11],
(40, 23): [14],
(41, 7): [16],
(41, 9): [44, 46],
(41, 11): [26],
(41, 15): [28],
(42, 17): [36],
(42, 23): [17],
(43, 5): [4],
(44, 8): [37],
(44, 15): [34],
(44, 24): [7],
(44, 28): [29],
(45, 16): [24],
}
# Creating number_chapters, last_verses and number_verses_in_book dictionaries
# from last_verse_data and missing_verses information
number_chapters = {}
last_verses = {}
number_verses_in_book = {}
for b, vv in enumerate(last_verse_data):
book = b + 1
number_chapters[book] = len(vv)
total_verses = 0
for c, last_verse in enumerate(vv):
chapter = c + 1
last_verses[book, chapter] = last_verse
total_verses += last_verse - \
len(missing_verses.get((book, chapter), []))
number_verses_in_book[book] = total_verses
try:
from urllib.parse import urlencode
from urllib.request import urlopen, Request
except ImportError: # Python 2
from urllib import urlencode
from urllib2 import urlopen, Request
import json
from .text_cache import SimpleCache
API_TOTAL_PROPORTION_OF_BOOK = 0.5
API_CONSECUTIVE_VERSES = 500
CACHE_TOTAL_PROPORTION_OF_BOOK = 0.5
CACHE_CONSECUTIVE_VERSES = 500
book_limits = dict([(k, v*CACHE_TOTAL_PROPORTION_OF_BOOK)
for (k, v) in number_verses_in_book.items()])
default_cache = SimpleCache(CACHE_CONSECUTIVE_VERSES, book_limits)
def get_passage_text(passage, api_key="", html=False, options={},
cache=default_cache):
| """
Fetch biblical text (in ESV translation) corresponding to the provided
Passage object. Returns tuple of (passage_text, truncated), where
'truncated' is a boolean indicating whether passage was shortened to comply
with API conditions.
Parameters:
'passage' is any object that returns a string representation of
itself with str(passage), the total number of verses it contains with
len(passage), and the book number with passage.book_n.
'api_key' is an alphanumeric code provided by the ESV API at
https://api.esv.org/account/
'options' is a dict of custom parameters, as per
https://api.esv.org/v3/docs/
'html' is a boolean indicating whether function should return passage in
html format or plain-text format
'cache' is a dictionary-like object or function that stores tuples of
(book_n, passage_length, passage_text) keyed to params string.
"""
# Set default parameters
params = { | identifier_body |
|
esv.py | 21, 28, 18, 16, 18, 22, 13, 30, 5, 28, 7, 47, 39, 46, 64, 34],
[22, 22, 66, 22, 22],
[28, 10, 27, 17, 17, 14, 27, 18, 11, 22, 25, 28, 23, 23, 8, 63, 24, 32,
14, 49, 32, 31, 49, 27, 17, 21, 36, 26, 21, 26, 18, 32, 33, 31, 15, 38,
28, 23, 29, 49, 26, 20, 27, 31, 25, 24, 23, 35],
[21, 49, 30, 37, 31, 28, 28, 27, 27, 21, 45, 13],
[11, 23, 5, 19, 15, 11, 16, 14, 17, 15, 12, 14, 16, 9],
[20, 32, 21],
[15, 16, 15, 13, 27, 14, 17, 14, 15],
[21],
[17, 10, 10, 11],
[16, 13, 12, 13, 15, 16, 20],
[15, 13, 19],
[17, 20, 19],
[18, 15, 20],
[15, 23],
[21, 13, 10, 14, 11, 15, 14, 23, 17, 12, 17, 14, 9, 21],
[14, 17, 18, 6],
[25, 23, 17, 25, 48, 34, 29, 34, 38, 42, 30, 50, 58, 36, 39, 28, 27, 35,
30, 34, 46, 46, 39, 51, 46, 75, 66, 20],
[45, 28, 35, 41, 43, 56, 37, 38, 50, 52, 33, 44, 37, 72, 47, 20],
[80, 52, 38, 44, 39, 49, 50, 56, 62, 42, 54, 59, 35, 35, 32, 31, 37, 43,
48, 47, 38, 71, 56, 53],
[51, 25, 36, 54, 47, 71, 53, 59, 41, 42, 57, 50, 38, 31, 27, 33, 26, 40,
42, 31, 25],
[26, 47, 26, 37, 42, 15, 60, 40, 43, 48, 30, 25, 52, 28, 41, 40, 34, 28,
41, 38, 40, 30, 35, 27, 27, 32, 44, 31],
[32, 29, 31, 25, 21, 23, 25, 39, 33, 21, 36, 21, 14, 23, 33, 27],
[31, 16, 23, 21, 13, 20, 40, 13, 27, 33, 34, 31, 13, 40, 58, 24],
[24, 17, 18, 18, 21, 18, 16, 24, 15, 18, 33, 21, 14],
[24, 21, 29, 31, 26, 18],
[23, 22, 21, 32, 33, 24],
[30, 30, 21, 23],
[29, 23, 25, 18],
[10, 20, 13, 18, 28],
[12, 17, 18],
[20, 15, 16, 16, 25, 21],
[18, 26, 17, 22],
[16, 15, 15],
[25],
[14, 18, 19, 16, 14, 20, 28, 13, 28, 39, 40, 29, 25],
[27, 26, 18, 17, 20],
[25, 25, 22, 19, 14],
[21, 22, 18],
[10, 29, 24, 21, 21],
[13],
[15],
[25],
[20, 29, 22, 11, 14, 17, 17, 13, 21, 11, 19, 17, 18, 20, 8, 21, 18, 24,
21, 15, 27, 21]
]
missing_verses = {
(40, 12): [47],
(40, 17): [21],
(40, 18): [11],
(40, 23): [14],
(41, 7): [16],
(41, 9): [44, 46],
(41, 11): [26],
(41, 15): [28],
(42, 17): [36],
(42, 23): [17],
(43, 5): [4],
(44, 8): [37],
(44, 15): [34],
(44, 24): [7],
(44, 28): [29],
(45, 16): [24],
}
# Creating number_chapters, last_verses and number_verses_in_book dictionaries
# from last_verse_data and missing_verses information
number_chapters = {}
last_verses = {}
number_verses_in_book = {}
for b, vv in enumerate(last_verse_data):
book = b + 1
number_chapters[book] = len(vv)
total_verses = 0
for c, last_verse in enumerate(vv):
| chapter = c + 1
last_verses[book, chapter] = last_verse
total_verses += last_verse - \
len(missing_verses.get((book, chapter), [])) | conditional_block |
|
esv.py | , 12, 25, 11, 31, 13],
[27, 32, 39, 12, 25, 23, 29, 18, 13, 19, 27, 31, 39, 33, 37, 23, 29, 33,
43, 26, 22, 51, 39, 25],
[53, 46, 28, 34, 18, 38, 51, 66, 28, 29, 43, 33, 34, 31, 34, 34, 24, 46,
21, 43, 29, 53],
[18, 25, 27, 44, 27, 33, 20, 29, 37, 36, 21, 21, 25, 29, 38, 20, 41, 37,
37, 21, 26, 20, 37, 20, 30],
[54, 55, 24, 43, 26, 81, 40, 40, 44, 14, 47, 40, 14, 17, 29, 43, 27, 17,
19, 8, 30, 19, 32, 31, 31, 32, 34, 21, 30],
[17, 18, 17, 22, 14, 42, 22, 18, 31, 19, 23, 16, 22, 15, 19, 14, 19, 34,
11, 37, 20, 12, 21, 27, 28, 23, 9, 27, 36, 27, 21, 33, 25, 33, 27, 23],
[11, 70, 13, 24, 17, 22, 28, 36, 15, 44],
[11, 20, 32, 23, 19, 19, 73, 18, 38, 39, 36, 47, 31],
[22, 23, 15, 17, 14, 14, 10, 17, 32, 3],
[22, 13, 26, 21, 27, 30, 21, 22, 35, 22, 20, 25, 28, 22, 35, 22, 16, 21,
29, 29, 34, 30, 17, 25, 6, 14, 23, 28, 25, 31, 40, 22, 33, 37, 16, 33,
24, 41, 30, 24, 34, 17],
[6, 12, 8, 8, 12, 10, 17, 9, 20, 18, 7, 8, 6, 7, 5, 11, 15, 50,
14, 9, 13, 31, 6, 10, 22, 12, 14, 9, 11, 12, 24, 11, 22, 22, 28, 12,
40, 22, 13, 17, 13, 11, 5, 26, 17, 11, 9, 14, 20, 23, 19, 9, 6, 7,
23, 13, 11, 11, 17, 12, 8, 12, 11, 10, 13, 20, 7, 35, 36, 5, 24, 20,
28, 23, 10, 12, 20, 72, 13, 19, 16, 8, 18, 12, 13, 17, 7, 18, 52, 17, | 31, 7, 10, 10, 9, 8, 18, 19, 2, 29, 176,7, 8, 9, 4, 8, 5, 6,
5, 6, 8, 8, 3, 18, 3, 3, 21, 26, 9, 8, 24, 13, 10, 7, 12, 15,
21, 10, 20, 14, 9, 6],
[33, 22, 35, 27, 23, 35, 27, 36, 18, 32, 31, 28, 25, 35, 33, 33, 28, 24,
29, 30, 31, 29, 35, 34, 28, 28, 27, 28, 27, 33, 31],
[18, 26, 22, 16, 20, 12, 29, 17, 18, 20, 10, 14],
[17, 17, 11, 16, 16, 13, 13, 14],
[31, 22, 26, 6, 30, 13, 25, 22, 21, 34, 16, 6, 22, 32, 9, 14, 14, 7,
25, 6, 17, 25, 18, 23, 12, 21, 13, 29, 24, 33, 9, 20, 24, 17, 10, 22,
38, 22, 8, 31, 29, 25, 28, 28, 25, 13, 15, 22, 26, 11, 23, 15, 12, 17,
13, 12, 21, 14, 21, 22, 11, 12, 19, 12, 25, 24],
[19, 37, 25, 31, 31, 30, 34, 22, 26, 25, 23, 17, 27, 22, 21, 21, 27, 23 | 16, 15, 5, 23, 11, 13, 12, 9, 9, 5, 8, 28, 22, 35, 45, 48, 43, 13, | random_line_split |
esv.py | 27, 17, 21, 36, 26, 21, 26, 18, 32, 33, 31, 15, 38,
28, 23, 29, 49, 26, 20, 27, 31, 25, 24, 23, 35],
[21, 49, 30, 37, 31, 28, 28, 27, 27, 21, 45, 13],
[11, 23, 5, 19, 15, 11, 16, 14, 17, 15, 12, 14, 16, 9],
[20, 32, 21],
[15, 16, 15, 13, 27, 14, 17, 14, 15],
[21],
[17, 10, 10, 11],
[16, 13, 12, 13, 15, 16, 20],
[15, 13, 19],
[17, 20, 19],
[18, 15, 20],
[15, 23],
[21, 13, 10, 14, 11, 15, 14, 23, 17, 12, 17, 14, 9, 21],
[14, 17, 18, 6],
[25, 23, 17, 25, 48, 34, 29, 34, 38, 42, 30, 50, 58, 36, 39, 28, 27, 35,
30, 34, 46, 46, 39, 51, 46, 75, 66, 20],
[45, 28, 35, 41, 43, 56, 37, 38, 50, 52, 33, 44, 37, 72, 47, 20],
[80, 52, 38, 44, 39, 49, 50, 56, 62, 42, 54, 59, 35, 35, 32, 31, 37, 43,
48, 47, 38, 71, 56, 53],
[51, 25, 36, 54, 47, 71, 53, 59, 41, 42, 57, 50, 38, 31, 27, 33, 26, 40,
42, 31, 25],
[26, 47, 26, 37, 42, 15, 60, 40, 43, 48, 30, 25, 52, 28, 41, 40, 34, 28,
41, 38, 40, 30, 35, 27, 27, 32, 44, 31],
[32, 29, 31, 25, 21, 23, 25, 39, 33, 21, 36, 21, 14, 23, 33, 27],
[31, 16, 23, 21, 13, 20, 40, 13, 27, 33, 34, 31, 13, 40, 58, 24],
[24, 17, 18, 18, 21, 18, 16, 24, 15, 18, 33, 21, 14],
[24, 21, 29, 31, 26, 18],
[23, 22, 21, 32, 33, 24],
[30, 30, 21, 23],
[29, 23, 25, 18],
[10, 20, 13, 18, 28],
[12, 17, 18],
[20, 15, 16, 16, 25, 21],
[18, 26, 17, 22],
[16, 15, 15],
[25],
[14, 18, 19, 16, 14, 20, 28, 13, 28, 39, 40, 29, 25],
[27, 26, 18, 17, 20],
[25, 25, 22, 19, 14],
[21, 22, 18],
[10, 29, 24, 21, 21],
[13],
[15],
[25],
[20, 29, 22, 11, 14, 17, 17, 13, 21, 11, 19, 17, 18, 20, 8, 21, 18, 24,
21, 15, 27, 21]
]
missing_verses = {
(40, 12): [47],
(40, 17): [21],
(40, 18): [11],
(40, 23): [14],
(41, 7): [16],
(41, 9): [44, 46],
(41, 11): [26],
(41, 15): [28],
(42, 17): [36],
(42, 23): [17],
(43, 5): [4],
(44, 8): [37],
(44, 15): [34],
(44, 24): [7],
(44, 28): [29],
(45, 16): [24],
}
# Creating number_chapters, last_verses and number_verses_in_book dictionaries
# from last_verse_data and missing_verses information
number_chapters = {}
last_verses = {}
number_verses_in_book = {}
for b, vv in enumerate(last_verse_data):
book = b + 1
number_chapters[book] = len(vv)
total_verses = 0
for c, last_verse in enumerate(vv):
chapter = c + 1
last_verses[book, chapter] = last_verse
total_verses += last_verse - \
len(missing_verses.get((book, chapter), []))
number_verses_in_book[book] = total_verses
try:
from urllib.parse import urlencode
from urllib.request import urlopen, Request
except ImportError: # Python 2
from urllib import urlencode
from urllib2 import urlopen, Request
import json
from .text_cache import SimpleCache
API_TOTAL_PROPORTION_OF_BOOK = 0.5
API_CONSECUTIVE_VERSES = 500
CACHE_TOTAL_PROPORTION_OF_BOOK = 0.5
CACHE_CONSECUTIVE_VERSES = 500
book_limits = dict([(k, v*CACHE_TOTAL_PROPORTION_OF_BOOK)
for (k, v) in number_verses_in_book.items()])
default_cache = SimpleCache(CACHE_CONSECUTIVE_VERSES, book_limits)
def | get_passage_text | identifier_name |
|
codegen.go |
// Generate declarations from module to be used together with encoding/asn1.
//
// Feature support status:
// - [x] ModuleIdentifier
// - [x] TagDefault (except AUTOMATIC)
// - [ ] ExtensibilityImplied
// - [.] ModuleBody -- see moduleContext.generateDeclarations.
func (gen declCodeGen) Generate(module ModuleDefinition, writer io.Writer) error {
if module.TagDefault == TAGS_AUTOMATIC {
// See x.680, section 12.3. It implies certain transformations to component and alternative lists that are not implemented.
return errors.New("AUTOMATIC tagged modules are not supported")
}
ctx := moduleContext{
extensibilityImplied: module.ExtensibilityImplied,
tagDefault: module.TagDefault,
lookupContext: module.ModuleBody,
params: gen.Params,
}
moduleName := goast.NewIdent(goifyName(module.ModuleIdentifier.Reference))
if len(gen.Params.Package) > 0 {
moduleName = goast.NewIdent(gen.Params.Package)
}
ast := &goast.File{
Name: moduleName,
Decls: ctx.generateDeclarations(module),
}
if len(ctx.errors) != 0 {
msg := "errors generating Go AST from module: \n"
for _, err := range ctx.errors {
msg += " " + err.Error() + "\n"
}
return errors.New(msg)
}
importDecls := make([]goast.Decl, 0)
for _, moduleName := range ctx.requiredModules {
modulePath := &goast.BasicLit{Kind: gotoken.STRING, Value: fmt.Sprintf("\"%v\"", moduleName)}
specs := []goast.Spec{&goast.ImportSpec{Path: modulePath}}
importDecls = append(importDecls, &goast.GenDecl{Tok: gotoken.IMPORT, Specs: specs})
}
ast.Decls = append(importDecls, ast.Decls...)
return goprint.Fprint(writer, gotoken.NewFileSet(), ast)
}
func goifyName(name string) string {
return strings.Title(strings.Replace(name, "-", "_", -1))
}
// generateDeclarations produces go declarations based on ModuleBody of module.
//
// Feature support status:
// - [.] AssignmentList
// - [ ] ValueAssignment
// - [x] TypeAssignment
// - [ ] Imports
func (ctx *moduleContext) generateDeclarations(module ModuleDefinition) []goast.Decl {
decls := make([]goast.Decl, 0)
for _, assignment := range module.ModuleBody.AssignmentList {
switch a := assignment.(type) {
case TypeAssignment:
decls = append(decls, ctx.generateTypeDecl(a.TypeReference, a.Type))
if decl := ctx.generateAssociatedValuesIfNeeded(a.TypeReference, a.Type); decl != nil {
decls = append(decls, decl)
}
case ValueAssignment:
if decl := ctx.tryGenerateValueAssignment(a.ValueReference, a.Type, a.Value); decl != nil {
decls = append(decls, decl)
}
}
}
return decls
}
func (ctx *moduleContext) generateTypeDecl(reference TypeReference, typeDescr Type) goast.Decl {
var isSet bool
typeBody := ctx.generateTypeBody(typeDescr, &isSet)
spec := &goast.TypeSpec{
Name: goast.NewIdent(goifyName(reference.Name())),
Type: typeBody,
Assign: 1, // not a valid Pos, but formatter just needs non-empty value
}
decl := &goast.GenDecl{
Tok: gotoken.TYPE,
Specs: []goast.Spec{spec},
}
if _, ok := typeBody.(*goast.StructType); ok {
spec.Assign = 0
}
if isSet {
oldName := spec.Name.Name
spec.Name.Name += "SET"
spec.Assign = 0
newName := spec.Name.Name
decl.Specs = append(decl.Specs, &goast.TypeSpec{
Name: goast.NewIdent(oldName),
Assign: 1,
Type: goast.NewIdent(newName),
})
}
return decl
}
func (ctx *moduleContext) tryGenerateValueAssignment(ref ValueReference, t Type, val Value) goast.Decl {
stubIsSet := false
var valExpr goast.Expr
switch val := val.(type) {
case Number:
valExpr = numberToExpr(val, ctx.params.IntegerRepr)
case Boolean:
if val {
valExpr = &goast.BasicLit{Value: "true"}
} else {
valExpr = &goast.BasicLit{Value: "false"}
}
case Real:
valExpr = &goast.BasicLit{Value: fmt.Sprint(val)}
default:
// TODO: produce a warning?
return nil
}
return &goast.GenDecl{
Tok: gotoken.VAR,
Specs: []goast.Spec{
&goast.ValueSpec{
Names: []*goast.Ident{valueRefToIdent(ref)},
Type: ctx.generateTypeBody(t, &stubIsSet),
Values: []goast.Expr{valExpr},
},
},
}
}
func (ctx *moduleContext) generateTypeBody(typeDescr Type, isSet *bool) goast.Expr {
switch t := typeDescr.(type) {
case BooleanType:
return goast.NewIdent("bool")
case IntegerType:
// TODO: generate consts
switch ctx.params.IntegerRepr {
case IntegerReprInt64:
return goast.NewIdent("int64") // TODO signed, unsigned, range constraints
case IntegerReprBigInt:
ctx.requireModule("math/big")
return &goast.StarExpr{X: goast.NewIdent("big.Int")}
default:
ctx.appendError(fmt.Errorf("unknown int type mode: %v", ctx.params.IntegerRepr))
return goast.NewIdent("int64")
}
case CharacterStringType:
return goast.NewIdent("string")
case RealType:
return goast.NewIdent("float64")
case OctetStringType:
return &goast.ArrayType{Elt: goast.NewIdent("byte")}
case SequenceType:
return ctx.structFromComponents(t.Components, t.ExtensionAdditions)
case SetType:
*isSet = true
return ctx.structFromComponents(t.Components, t.ExtensionAdditions)
case SequenceOfType:
return &goast.ArrayType{Elt: ctx.generateTypeBody(t.Type, isSet)}
case SetOfType:
*isSet = true
return &goast.ArrayType{Elt: ctx.generateTypeBody(t.Type, isSet)}
case TaggedType: // TODO should put tags in go code?
return ctx.generateTypeBody(t.Type, isSet)
case ConstraintedType: // TODO should generate checking code?
return ctx.generateTypeBody(t.Type, isSet)
case TypeReference: // TODO should useful types be separate type by itself?
nameAndType := ctx.resolveTypeReference(t)
if nameAndType != nil {
specialCase := ctx.generateSpecialCase(*nameAndType)
if specialCase != nil {
return specialCase
}
}
return goast.NewIdent(goifyName(t.Name()))
case RestrictedStringType: // TODO should generate checking code?
return goast.NewIdent("string")
case BitStringType:
ctx.requireModule("encoding/asn1")
return goast.NewIdent("asn1.BitString")
case EnumeratedType:
// TODO: generate consts
ctx.requireModule("encoding/asn1")
return goast.NewIdent("asn1.Enumerated")
case AnyType:
return &goast.InterfaceType{Methods: &goast.FieldList{}}
case ObjectIdentifierType:
ctx.requireModule("encoding/asn1")
return goast.NewIdent("asn1.ObjectIdentifier")
case ChoiceType:
return ctx.generateChoiceType(t, isSet)
default:
// NullType
ctx.appendError(fmt.Errorf("ignoring unsupported type %#v", typeDescr))
return nil
}
}
func (ctx *moduleContext) generateAssociatedValuesIfNeeded(reference TypeReference, typeDescr Type) goast.Decl {
switch typeDescr := ctx.removeWrapperTypes(typeDescr).(type) {
case IntegerType:
if len(typeDescr.NamedNumberList) == 0 {
return nil
}
var specs []goast.Spec
for _, namedNumber := range typeDescr.NamedNumberList {
var valueExpr goast.Expr
switch v := namedNumber.Value.(type) {
case Number:
valueExpr = numberToExpr(v, ctx.params.IntegerRepr)
case DefinedValue:
if v.ModuleName != "" {
ctx.appendError(fmt.Errorf("%v.%v: value references from other modules are not supported", v.ModuleName, v.ValueName))
}
valueExpr = valueRefToIdent(v.ValueName)
}
typeName := goifyName(string(reference))
specs = append(specs, &goast.ValueSpec{
Type: goast.NewIdent(goifyName(string(reference))),
| {
for _, existing := range ctx.requiredModules {
if existing == module {
return
}
}
ctx.requiredModules = append(ctx.requiredModules, module)
} | identifier_body |
|
codegen.go | moduleName = goast.NewIdent(gen.Params.Package)
}
ast := &goast.File{
Name: moduleName,
Decls: ctx.generateDeclarations(module),
}
if len(ctx.errors) != 0 {
msg := "errors generating Go AST from module: \n"
for _, err := range ctx.errors {
msg += " " + err.Error() + "\n"
}
return errors.New(msg)
}
importDecls := make([]goast.Decl, 0)
for _, moduleName := range ctx.requiredModules {
modulePath := &goast.BasicLit{Kind: gotoken.STRING, Value: fmt.Sprintf("\"%v\"", moduleName)}
specs := []goast.Spec{&goast.ImportSpec{Path: modulePath}}
importDecls = append(importDecls, &goast.GenDecl{Tok: gotoken.IMPORT, Specs: specs})
}
ast.Decls = append(importDecls, ast.Decls...)
return goprint.Fprint(writer, gotoken.NewFileSet(), ast)
}
func goifyName(name string) string {
return strings.Title(strings.Replace(name, "-", "_", -1))
}
// generateDeclarations produces go declarations based on ModuleBody of module.
//
// Feature support status:
// - [.] AssignmentList
// - [ ] ValueAssignment
// - [x] TypeAssignment
// - [ ] Imports
func (ctx *moduleContext) generateDeclarations(module ModuleDefinition) []goast.Decl {
decls := make([]goast.Decl, 0)
for _, assignment := range module.ModuleBody.AssignmentList {
switch a := assignment.(type) {
case TypeAssignment:
decls = append(decls, ctx.generateTypeDecl(a.TypeReference, a.Type))
if decl := ctx.generateAssociatedValuesIfNeeded(a.TypeReference, a.Type); decl != nil {
decls = append(decls, decl)
}
case ValueAssignment:
if decl := ctx.tryGenerateValueAssignment(a.ValueReference, a.Type, a.Value); decl != nil {
decls = append(decls, decl)
}
}
}
return decls
}
func (ctx *moduleContext) generateTypeDecl(reference TypeReference, typeDescr Type) goast.Decl {
var isSet bool
typeBody := ctx.generateTypeBody(typeDescr, &isSet)
spec := &goast.TypeSpec{
Name: goast.NewIdent(goifyName(reference.Name())),
Type: typeBody,
Assign: 1, // not a valid Pos, but formatter just needs non-empty value
}
decl := &goast.GenDecl{
Tok: gotoken.TYPE,
Specs: []goast.Spec{spec},
}
if _, ok := typeBody.(*goast.StructType); ok {
spec.Assign = 0
}
if isSet {
oldName := spec.Name.Name
spec.Name.Name += "SET"
spec.Assign = 0
newName := spec.Name.Name
decl.Specs = append(decl.Specs, &goast.TypeSpec{
Name: goast.NewIdent(oldName),
Assign: 1,
Type: goast.NewIdent(newName),
})
}
return decl
}
func (ctx *moduleContext) tryGenerateValueAssignment(ref ValueReference, t Type, val Value) goast.Decl {
stubIsSet := false
var valExpr goast.Expr
switch val := val.(type) {
case Number:
valExpr = numberToExpr(val, ctx.params.IntegerRepr)
case Boolean:
if val {
valExpr = &goast.BasicLit{Value: "true"}
} else {
valExpr = &goast.BasicLit{Value: "false"}
}
case Real:
valExpr = &goast.BasicLit{Value: fmt.Sprint(val)}
default:
// TODO: produce a warning?
return nil
}
return &goast.GenDecl{
Tok: gotoken.VAR,
Specs: []goast.Spec{
&goast.ValueSpec{
Names: []*goast.Ident{valueRefToIdent(ref)},
Type: ctx.generateTypeBody(t, &stubIsSet),
Values: []goast.Expr{valExpr},
},
},
}
}
func (ctx *moduleContext) generateTypeBody(typeDescr Type, isSet *bool) goast.Expr {
switch t := typeDescr.(type) {
case BooleanType:
return goast.NewIdent("bool") | case IntegerType:
// TODO: generate consts
switch ctx.params.IntegerRepr {
case IntegerReprInt64:
return goast.NewIdent("int64") // TODO signed, unsigned, range constraints
case IntegerReprBigInt:
ctx.requireModule("math/big")
return &goast.StarExpr{X: goast.NewIdent("big.Int")}
default:
ctx.appendError(fmt.Errorf("unknown int type mode: %v", ctx.params.IntegerRepr))
return goast.NewIdent("int64")
}
case CharacterStringType:
return goast.NewIdent("string")
case RealType:
return goast.NewIdent("float64")
case OctetStringType:
return &goast.ArrayType{Elt: goast.NewIdent("byte")}
case SequenceType:
return ctx.structFromComponents(t.Components, t.ExtensionAdditions)
case SetType:
*isSet = true
return ctx.structFromComponents(t.Components, t.ExtensionAdditions)
case SequenceOfType:
return &goast.ArrayType{Elt: ctx.generateTypeBody(t.Type, isSet)}
case SetOfType:
*isSet = true
return &goast.ArrayType{Elt: ctx.generateTypeBody(t.Type, isSet)}
case TaggedType: // TODO should put tags in go code?
return ctx.generateTypeBody(t.Type, isSet)
case ConstraintedType: // TODO should generate checking code?
return ctx.generateTypeBody(t.Type, isSet)
case TypeReference: // TODO should useful types be separate type by itself?
nameAndType := ctx.resolveTypeReference(t)
if nameAndType != nil {
specialCase := ctx.generateSpecialCase(*nameAndType)
if specialCase != nil {
return specialCase
}
}
return goast.NewIdent(goifyName(t.Name()))
case RestrictedStringType: // TODO should generate checking code?
return goast.NewIdent("string")
case BitStringType:
ctx.requireModule("encoding/asn1")
return goast.NewIdent("asn1.BitString")
case EnumeratedType:
// TODO: generate consts
ctx.requireModule("encoding/asn1")
return goast.NewIdent("asn1.Enumerated")
case AnyType:
return &goast.InterfaceType{Methods: &goast.FieldList{}}
case ObjectIdentifierType:
ctx.requireModule("encoding/asn1")
return goast.NewIdent("asn1.ObjectIdentifier")
case ChoiceType:
return ctx.generateChoiceType(t, isSet)
default:
// NullType
ctx.appendError(fmt.Errorf("ignoring unsupported type %#v", typeDescr))
return nil
}
}
func (ctx *moduleContext) generateAssociatedValuesIfNeeded(reference TypeReference, typeDescr Type) goast.Decl {
switch typeDescr := ctx.removeWrapperTypes(typeDescr).(type) {
case IntegerType:
if len(typeDescr.NamedNumberList) == 0 {
return nil
}
var specs []goast.Spec
for _, namedNumber := range typeDescr.NamedNumberList {
var valueExpr goast.Expr
switch v := namedNumber.Value.(type) {
case Number:
valueExpr = numberToExpr(v, ctx.params.IntegerRepr)
case DefinedValue:
if v.ModuleName != "" {
ctx.appendError(fmt.Errorf("%v.%v: value references from other modules are not supported", v.ModuleName, v.ValueName))
}
valueExpr = valueRefToIdent(v.ValueName)
}
typeName := goifyName(string(reference))
specs = append(specs, &goast.ValueSpec{
Type: goast.NewIdent(goifyName(string(reference))),
Names: []*goast.Ident{goast.NewIdent(typeName + "Val" + goifyName(string(namedNumber.Name)))},
Values: []goast.Expr{valueExpr},
})
}
return &goast.GenDecl{
Tok: gotoken.VAR,
Specs: specs,
}
default:
return nil
}
}
func valueRefToIdent(ref ValueReference) *goast.Ident {
return goast.NewIdent("Val" + goifyName(string(ref)))
}
func numberToExpr(val Number, repr IntegerRepr) goast.Expr {
var valueExpr goast.Expr
valueExpr = &goast.BasicLit{Value: fmt.Sprint(val.IntValue())}
if repr == IntegerReprBigInt {
valueExpr = &goast.CallExpr{Fun: goast.NewIdent("big.NewInt"), Args: []goast.Expr{valueExpr}}
}
return valueExpr
}
func (ctx *moduleContext) generateChoiceType(t ChoiceType, isSet *bool) goast.Expr {
if ctx.hasTaggedAlternatives(t) {
return goast.NewIdent("asn1.RawValue")
| random_line_split |
|
codegen.go |
}
ctx.requiredModules = append(ctx.requiredModules, module)
}
// Generate declarations from module to be used together with encoding/asn1.
//
// Feature support status:
// - [x] ModuleIdentifier
// - [x] TagDefault (except AUTOMATIC)
// - [ ] ExtensibilityImplied
// - [.] ModuleBody -- see moduleContext.generateDeclarations.
func (gen declCodeGen) Generate(module ModuleDefinition, writer io.Writer) error {
if module.TagDefault == TAGS_AUTOMATIC {
// See x.680, section 12.3. It implies certain transformations to component and alternative lists that are not implemented.
return errors.New("AUTOMATIC tagged modules are not supported")
}
ctx := moduleContext{
extensibilityImplied: module.ExtensibilityImplied,
tagDefault: module.TagDefault,
lookupContext: module.ModuleBody,
params: gen.Params,
}
moduleName := goast.NewIdent(goifyName(module.ModuleIdentifier.Reference))
if len(gen.Params.Package) > 0 {
moduleName = goast.NewIdent(gen.Params.Package)
}
ast := &goast.File{
Name: moduleName,
Decls: ctx.generateDeclarations(module),
}
if len(ctx.errors) != 0 {
msg := "errors generating Go AST from module: \n"
for _, err := range ctx.errors {
msg += " " + err.Error() + "\n"
}
return errors.New(msg)
}
importDecls := make([]goast.Decl, 0)
for _, moduleName := range ctx.requiredModules {
modulePath := &goast.BasicLit{Kind: gotoken.STRING, Value: fmt.Sprintf("\"%v\"", moduleName)}
specs := []goast.Spec{&goast.ImportSpec{Path: modulePath}}
importDecls = append(importDecls, &goast.GenDecl{Tok: gotoken.IMPORT, Specs: specs})
}
ast.Decls = append(importDecls, ast.Decls...)
return goprint.Fprint(writer, gotoken.NewFileSet(), ast)
}
func goifyName(name string) string {
return strings.Title(strings.Replace(name, "-", "_", -1))
}
// generateDeclarations produces go declarations based on ModuleBody of module.
//
// Feature support status:
// - [.] AssignmentList
// - [ ] ValueAssignment
// - [x] TypeAssignment
// - [ ] Imports
func (ctx *moduleContext) generateDeclarations(module ModuleDefinition) []goast.Decl {
decls := make([]goast.Decl, 0)
for _, assignment := range module.ModuleBody.AssignmentList {
switch a := assignment.(type) {
case TypeAssignment:
decls = append(decls, ctx.generateTypeDecl(a.TypeReference, a.Type))
if decl := ctx.generateAssociatedValuesIfNeeded(a.TypeReference, a.Type); decl != nil {
decls = append(decls, decl)
}
case ValueAssignment:
if decl := ctx.tryGenerateValueAssignment(a.ValueReference, a.Type, a.Value); decl != nil {
decls = append(decls, decl)
}
}
}
return decls
}
func (ctx *moduleContext) generateTypeDecl(reference TypeReference, typeDescr Type) goast.Decl {
var isSet bool
typeBody := ctx.generateTypeBody(typeDescr, &isSet)
spec := &goast.TypeSpec{
Name: goast.NewIdent(goifyName(reference.Name())),
Type: typeBody,
Assign: 1, // not a valid Pos, but formatter just needs non-empty value
}
decl := &goast.GenDecl{
Tok: gotoken.TYPE,
Specs: []goast.Spec{spec},
}
if _, ok := typeBody.(*goast.StructType); ok {
spec.Assign = 0
}
if isSet {
oldName := spec.Name.Name
spec.Name.Name += "SET"
spec.Assign = 0
newName := spec.Name.Name
decl.Specs = append(decl.Specs, &goast.TypeSpec{
Name: goast.NewIdent(oldName),
Assign: 1,
Type: goast.NewIdent(newName),
})
}
return decl
}
func (ctx *moduleContext) tryGenerateValueAssignment(ref ValueReference, t Type, val Value) goast.Decl {
stubIsSet := false
var valExpr goast.Expr
switch val := val.(type) {
case Number:
valExpr = numberToExpr(val, ctx.params.IntegerRepr)
case Boolean:
if val {
valExpr = &goast.BasicLit{Value: "true"}
} else {
valExpr = &goast.BasicLit{Value: "false"}
}
case Real:
valExpr = &goast.BasicLit{Value: fmt.Sprint(val)}
default:
// TODO: produce a warning?
return nil
}
return &goast.GenDecl{
Tok: gotoken.VAR,
Specs: []goast.Spec{
&goast.ValueSpec{
Names: []*goast.Ident{valueRefToIdent(ref)},
Type: ctx.generateTypeBody(t, &stubIsSet),
Values: []goast.Expr{valExpr},
},
},
}
}
func (ctx *moduleContext) generateTypeBody(typeDescr Type, isSet *bool) goast.Expr {
switch t := typeDescr.(type) {
case BooleanType:
return goast.NewIdent("bool")
case IntegerType:
// TODO: generate consts
switch ctx.params.IntegerRepr {
case IntegerReprInt64:
return goast.NewIdent("int64") // TODO signed, unsigned, range constraints
case IntegerReprBigInt:
ctx.requireModule("math/big")
return &goast.StarExpr{X: goast.NewIdent("big.Int")}
default:
ctx.appendError(fmt.Errorf("unknown int type mode: %v", ctx.params.IntegerRepr))
return goast.NewIdent("int64")
}
case CharacterStringType:
return goast.NewIdent("string")
case RealType:
return goast.NewIdent("float64")
case OctetStringType:
return &goast.ArrayType{Elt: goast.NewIdent("byte")}
case SequenceType:
return ctx.structFromComponents(t.Components, t.ExtensionAdditions)
case SetType:
*isSet = true
return ctx.structFromComponents(t.Components, t.ExtensionAdditions)
case SequenceOfType:
return &goast.ArrayType{Elt: ctx.generateTypeBody(t.Type, isSet)}
case SetOfType:
*isSet = true
return &goast.ArrayType{Elt: ctx.generateTypeBody(t.Type, isSet)}
case TaggedType: // TODO should put tags in go code?
return ctx.generateTypeBody(t.Type, isSet)
case ConstraintedType: // TODO should generate checking code?
return ctx.generateTypeBody(t.Type, isSet)
case TypeReference: // TODO should useful types be separate type by itself?
nameAndType := ctx.resolveTypeReference(t)
if nameAndType != nil {
specialCase := ctx.generateSpecialCase(*nameAndType)
if specialCase != nil {
return specialCase
}
}
return goast.NewIdent(goifyName(t.Name()))
case RestrictedStringType: // TODO should generate checking code?
return goast.NewIdent("string")
case BitStringType:
ctx.requireModule("encoding/asn1")
return goast.NewIdent("asn1.BitString")
case EnumeratedType:
// TODO: generate consts
ctx.requireModule("encoding/asn1")
return goast.NewIdent("asn1.Enumerated")
case AnyType:
return &goast.InterfaceType{Methods: &goast.FieldList{}}
case ObjectIdentifierType:
ctx.requireModule("encoding/asn1")
return goast.NewIdent("asn1.ObjectIdentifier")
case ChoiceType:
return ctx.generateChoiceType(t, isSet)
default:
// NullType
ctx.appendError(fmt.Errorf("ignoring unsupported type %#v", typeDescr))
return nil
}
}
func (ctx *moduleContext) generateAssociatedValuesIfNeeded(reference TypeReference, typeDescr Type) goast.Decl {
switch typeDescr := ctx.removeWrapperTypes(typeDescr).(type) {
case IntegerType:
if len(typeDescr.NamedNumberList) == 0 {
return nil
}
var specs []goast.Spec
for _, namedNumber := range typeDescr.NamedNumberList {
var valueExpr goast.Expr
switch v := namedNumber.Value.(type) {
case Number:
valueExpr = numberToExpr(v, ctx.params.IntegerRepr)
case DefinedValue:
if v.ModuleName != "" {
ctx.appendError(fmt.Errorf("%v.%v: value references from other modules are not supported", v.ModuleName, v.ValueName))
}
valueExpr = valueRefToIdent(v.ValueName)
}
typeName := goifyName(string(reference))
specs = append(specs, &goast.ValueSpec{
Type: goast.NewIdent(goifyName(string(reference))),
Names: []*goast.Ident{goast.NewIdent(typeName | {
return
} | conditional_block |
|
codegen.go | : fmt.Sprintf("\"%v\"", moduleName)}
specs := []goast.Spec{&goast.ImportSpec{Path: modulePath}}
importDecls = append(importDecls, &goast.GenDecl{Tok: gotoken.IMPORT, Specs: specs})
}
ast.Decls = append(importDecls, ast.Decls...)
return goprint.Fprint(writer, gotoken.NewFileSet(), ast)
}
func goifyName(name string) string {
return strings.Title(strings.Replace(name, "-", "_", -1))
}
// generateDeclarations produces go declarations based on ModuleBody of module.
//
// Feature support status:
// - [.] AssignmentList
// - [ ] ValueAssignment
// - [x] TypeAssignment
// - [ ] Imports
func (ctx *moduleContext) generateDeclarations(module ModuleDefinition) []goast.Decl {
decls := make([]goast.Decl, 0)
for _, assignment := range module.ModuleBody.AssignmentList {
switch a := assignment.(type) {
case TypeAssignment:
decls = append(decls, ctx.generateTypeDecl(a.TypeReference, a.Type))
if decl := ctx.generateAssociatedValuesIfNeeded(a.TypeReference, a.Type); decl != nil {
decls = append(decls, decl)
}
case ValueAssignment:
if decl := ctx.tryGenerateValueAssignment(a.ValueReference, a.Type, a.Value); decl != nil {
decls = append(decls, decl)
}
}
}
return decls
}
func (ctx *moduleContext) generateTypeDecl(reference TypeReference, typeDescr Type) goast.Decl {
var isSet bool
typeBody := ctx.generateTypeBody(typeDescr, &isSet)
spec := &goast.TypeSpec{
Name: goast.NewIdent(goifyName(reference.Name())),
Type: typeBody,
Assign: 1, // not a valid Pos, but formatter just needs non-empty value
}
decl := &goast.GenDecl{
Tok: gotoken.TYPE,
Specs: []goast.Spec{spec},
}
if _, ok := typeBody.(*goast.StructType); ok {
spec.Assign = 0
}
if isSet {
oldName := spec.Name.Name
spec.Name.Name += "SET"
spec.Assign = 0
newName := spec.Name.Name
decl.Specs = append(decl.Specs, &goast.TypeSpec{
Name: goast.NewIdent(oldName),
Assign: 1,
Type: goast.NewIdent(newName),
})
}
return decl
}
func (ctx *moduleContext) tryGenerateValueAssignment(ref ValueReference, t Type, val Value) goast.Decl {
stubIsSet := false
var valExpr goast.Expr
switch val := val.(type) {
case Number:
valExpr = numberToExpr(val, ctx.params.IntegerRepr)
case Boolean:
if val {
valExpr = &goast.BasicLit{Value: "true"}
} else {
valExpr = &goast.BasicLit{Value: "false"}
}
case Real:
valExpr = &goast.BasicLit{Value: fmt.Sprint(val)}
default:
// TODO: produce a warning?
return nil
}
return &goast.GenDecl{
Tok: gotoken.VAR,
Specs: []goast.Spec{
&goast.ValueSpec{
Names: []*goast.Ident{valueRefToIdent(ref)},
Type: ctx.generateTypeBody(t, &stubIsSet),
Values: []goast.Expr{valExpr},
},
},
}
}
func (ctx *moduleContext) generateTypeBody(typeDescr Type, isSet *bool) goast.Expr {
switch t := typeDescr.(type) {
case BooleanType:
return goast.NewIdent("bool")
case IntegerType:
// TODO: generate consts
switch ctx.params.IntegerRepr {
case IntegerReprInt64:
return goast.NewIdent("int64") // TODO signed, unsigned, range constraints
case IntegerReprBigInt:
ctx.requireModule("math/big")
return &goast.StarExpr{X: goast.NewIdent("big.Int")}
default:
ctx.appendError(fmt.Errorf("unknown int type mode: %v", ctx.params.IntegerRepr))
return goast.NewIdent("int64")
}
case CharacterStringType:
return goast.NewIdent("string")
case RealType:
return goast.NewIdent("float64")
case OctetStringType:
return &goast.ArrayType{Elt: goast.NewIdent("byte")}
case SequenceType:
return ctx.structFromComponents(t.Components, t.ExtensionAdditions)
case SetType:
*isSet = true
return ctx.structFromComponents(t.Components, t.ExtensionAdditions)
case SequenceOfType:
return &goast.ArrayType{Elt: ctx.generateTypeBody(t.Type, isSet)}
case SetOfType:
*isSet = true
return &goast.ArrayType{Elt: ctx.generateTypeBody(t.Type, isSet)}
case TaggedType: // TODO should put tags in go code?
return ctx.generateTypeBody(t.Type, isSet)
case ConstraintedType: // TODO should generate checking code?
return ctx.generateTypeBody(t.Type, isSet)
case TypeReference: // TODO should useful types be separate type by itself?
nameAndType := ctx.resolveTypeReference(t)
if nameAndType != nil {
specialCase := ctx.generateSpecialCase(*nameAndType)
if specialCase != nil {
return specialCase
}
}
return goast.NewIdent(goifyName(t.Name()))
case RestrictedStringType: // TODO should generate checking code?
return goast.NewIdent("string")
case BitStringType:
ctx.requireModule("encoding/asn1")
return goast.NewIdent("asn1.BitString")
case EnumeratedType:
// TODO: generate consts
ctx.requireModule("encoding/asn1")
return goast.NewIdent("asn1.Enumerated")
case AnyType:
return &goast.InterfaceType{Methods: &goast.FieldList{}}
case ObjectIdentifierType:
ctx.requireModule("encoding/asn1")
return goast.NewIdent("asn1.ObjectIdentifier")
case ChoiceType:
return ctx.generateChoiceType(t, isSet)
default:
// NullType
ctx.appendError(fmt.Errorf("ignoring unsupported type %#v", typeDescr))
return nil
}
}
func (ctx *moduleContext) generateAssociatedValuesIfNeeded(reference TypeReference, typeDescr Type) goast.Decl {
switch typeDescr := ctx.removeWrapperTypes(typeDescr).(type) {
case IntegerType:
if len(typeDescr.NamedNumberList) == 0 {
return nil
}
var specs []goast.Spec
for _, namedNumber := range typeDescr.NamedNumberList {
var valueExpr goast.Expr
switch v := namedNumber.Value.(type) {
case Number:
valueExpr = numberToExpr(v, ctx.params.IntegerRepr)
case DefinedValue:
if v.ModuleName != "" {
ctx.appendError(fmt.Errorf("%v.%v: value references from other modules are not supported", v.ModuleName, v.ValueName))
}
valueExpr = valueRefToIdent(v.ValueName)
}
typeName := goifyName(string(reference))
specs = append(specs, &goast.ValueSpec{
Type: goast.NewIdent(goifyName(string(reference))),
Names: []*goast.Ident{goast.NewIdent(typeName + "Val" + goifyName(string(namedNumber.Name)))},
Values: []goast.Expr{valueExpr},
})
}
return &goast.GenDecl{
Tok: gotoken.VAR,
Specs: specs,
}
default:
return nil
}
}
func valueRefToIdent(ref ValueReference) *goast.Ident {
return goast.NewIdent("Val" + goifyName(string(ref)))
}
func numberToExpr(val Number, repr IntegerRepr) goast.Expr {
var valueExpr goast.Expr
valueExpr = &goast.BasicLit{Value: fmt.Sprint(val.IntValue())}
if repr == IntegerReprBigInt {
valueExpr = &goast.CallExpr{Fun: goast.NewIdent("big.NewInt"), Args: []goast.Expr{valueExpr}}
}
return valueExpr
}
func (ctx *moduleContext) generateChoiceType(t ChoiceType, isSet *bool) goast.Expr {
if ctx.hasTaggedAlternatives(t) {
return goast.NewIdent("asn1.RawValue")
}
if len(t.AlternativeTypeList) == 1 {
return ctx.generateTypeBody(t.AlternativeTypeList[0].Type, isSet) // optimization for X.509 edge case
}
return &goast.InterfaceType{Methods: &goast.FieldList{}}
}
func (ctx *moduleContext) hasTaggedAlternatives(t ChoiceType) bool {
for _, f := range t.AlternativeTypeList {
if ctx.taggedChoiceTypeAlternative(f.Identifier, f.Type) {
return true
}
}
return false
}
func (ctx *moduleContext) | taggedChoiceTypeAlternative | identifier_name |
|
family_structs.go | ID: util.RandomID(),
PersonID: f.model.PersonID,
Latitude: city.Latitude,
Longitude: city.Longitude,
Country: city.Country,
City: city.City,
EventType: name,
Year: year,
}
f.events = append(f.events, event)
return event
}
func (f *Person) createMirrorEvent(event models.Event) {
event.EventID = util.RandomID()
event.PersonID = f.model.PersonID
f.events = append(f.events, event)
}
/*
NumEvents returns the person's number of events
*/
func (f Person) NumEvents() int {
return len(f.events)
}
/*
Save will persist the person and their events in the database
*/
func (f *Person) Save(username string) (err error) {
f.model.Username = username
err = f.model.Save()
if err != nil {
return
}
for _, event := range f.events {
event.Username = username
err = event.Save()
if err != nil {
return
}
}
return
}
/*
Dies will appropriately set the Person as dead at given year
*/
func (f *Person) Dies(year int) {
f.deathYear = year
f.createEvent("DEATH", year)
}
/*
Born will set the person's birth year and create the birth event
*/
func (f *Person) Born(year int) {
f.birthYear = year
f.createEvent("BIRTH", year)
}
/*
Marry will set appropriate fields for Person now being married.
*/
func (f *Person) Marry(spouse *Person, year int) {
if f.married {
panic(fmt.Sprintf("Person is already married\n%v", *f))
}
f.spouses = append(f.spouses, spouse)
f.marriageYears = append(f.marriageYears, year)
f.married = true
f.model.SpouseID = sql.NullString{spouse.model.PersonID, true}
event := f.createEvent("MARRIAGE", year)
spouse.spouses = append(spouse.spouses, f)
spouse.marriageYears = append(spouse.marriageYears, year)
spouse.married = true
spouse.model.SpouseID = sql.NullString{f.model.PersonID, true}
spouse.createMirrorEvent(event)
}
/*
Divorce will set appropriate fields for Person getting divorced.
*/
func (f *Person) Divorce(year int) {
if !f.married {
panic(fmt.Sprintf("Person is not married\n%v", *f))
}
spouse, err := f.CurrSpouse()
if err != nil {
panic(err)
}
f.divorceYears = append(f.divorceYears, year)
f.married = false
f.model.SpouseID = sql.NullString{"", false}
event := f.createEvent("DIVORCE", year)
spouse.divorceYears = append(spouse.divorceYears, year)
spouse.married = false
spouse.model.SpouseID = sql.NullString{"", false}
spouse.createMirrorEvent(event)
}
/*
HaveChild will edit the person's children and add the newborn event
*/
func (f *Person) HaveChild(child *Person, year int) {
spouse, err := f.CurrSpouse()
if err != nil {
panic(err)
}
f.children = append(f.children, child)
spouse.children = append(spouse.children, child)
event := f.createEvent("NEWBORN", year)
spouse.createMirrorEvent(event)
}
/*
HaveParents will set the parents of the person
*/
func (f *Person) HaveParents(father *Person, mother *Person) {
f.father = father
f.mother = mother
f.model.FatherID = sql.NullString{father.model.PersonID, true}
f.model.MotherID = sql.NullString{mother.model.PersonID, true}
}
/*
IsDead returns if the person is dead or not
*/
func (f Person) IsDead() bool {
return f.deathYear != -1
}
/*
IsMarried returns if the person is married or not
*/
func (f Person) IsMarried() bool {
return f.married
}
/*
IsStraight returns if the person is straight or not
*/
func (f Person) IsStraight() bool {
return f.straight
}
/*
Gender will return the person's gender
*/
func (f Person) Gender() string {
return f.model.Gender
}
/*
Age returns the person's year, given a year
*/
func (f Person) Age(year int) int {
return year - f.birthYear
}
/*
DeathYear returns the person's DeathYear, which is -1 if
the person isn't dead yet
*/
func (f Person) DeathYear() int {
return f.deathYear
}
/*
Spouses returns a slice of the person's spouses in
chronological order
*/
func (f Person) Spouses() []*Person {
return f.spouses
}
/*
CurrSpouse will return the person's current spouse,
and return an error if person isn't married
*/
func (f Person) CurrSpouse() (spouse *Person, err error) {
if !f.married {
return nil, fmt.Errorf("person is not married:\n%v", f)
}
return f.spouses[len(f.spouses)-1], nil
}
/*
Children will return a map of all spouses -> slice of children
*/
func (f Person) Children() (m map[*Person][]*Person) {
m = make(map[*Person][]*Person)
for _, spouse := range f.spouses {
for _, child := range f.children {
if child.father == spouse || child.mother == spouse {
if _, ok := m[spouse]; !ok {
m[spouse] = make([]*Person, 0)
} | }
}
return
}
/*
MarriageYears will return the person's marriage years
*/
func (f Person) MarriageYears() []int {
return f.marriageYears
}
/*
DivorceYears returns the person's divorce years
*/
func (f Person) DivorceYears() []int {
return f.divorceYears
}
/*
Generation is what the name implies, and represented by a slice
of Person pointers, in order to keep everything mutable.
*/
type Generation []*Person
/*
AllDead will returns whether or not everyone in the generation
is dead
*/
func (g Generation) AllDead() bool {
if g == nil || len(g) == 0 {
return true
}
for _, p := range g {
if !p.IsDead() {
return false
}
}
return true
}
/*
Population is what the name implies, and represented by a slice
of Generations
*/
type Population []Generation
/*
GetAlive returns a slice of all alive people, irrespective of their generation
*/
func (pop *Population) GetAlive() []*Person {
people := make([]*Person, 0)
for _, gen := range *pop {
for _, p := range gen {
if !p.IsDead() {
people = append(people, p)
}
}
}
return people
}
/*
AddPerson will add the person to the proper generation in the population
*/
func (pop *Population) AddPerson(f *Person) {
generation := 0
for i, gen := range *pop {
for _, p := range gen {
if (p == f.mother || p == f.father) && i >= generation {
generation = i + 1
}
}
}
if generation >= len(*pop) {
// should only need to add one more generation
*pop = append(*pop, make(Generation, 0))
}
(*pop)[generation] = append((*pop)[generation], f)
}
/*
AreFamily returns whether or not the two people are family members
*/
func AreFamily(a *Person, b *Person) bool {
if a == b {
return false
}
// are parents or siblings x removed
if recursiveAreParentsOrSiblings(a, b) {
return true
}
// are spouses
for _, spouse := range a.spouses {
if spouse == b {
return true
}
if recursiveAreParentsOrSiblings(spouse, b) {
return true
}
}
for _, spouse := range b.spouses {
if spouse == a {
return true
}
if recursiveAreParentsOrSiblings(spouse, a) {
return true
}
}
// are cousins
for _, parentA := range []*Person{a.mother, a.father} {
if parentA == nil {
continue
}
for _, parentB := range []*Person{b.mother, b.father} {
if parentB == nil {
continue
}
if areSiblingsOrParents(parentA, parentB) {
return true
}
}
}
return false
}
func recursiveAreParentsOrSiblings(a *Person, b *Person) bool {
// recursion up the tree
if goUpTree(a, b) || goUpTree(b, a) {
return true
}
// recusion down the tree
if goDownTree(a, b) || goDownTree(b, a) {
return true
}
return false
}
func areSiblingsOrParents(a *Person, b *Person) bool {
// is either one parents | m[spouse] = append(m[spouse], child)
} | random_line_split |
family_structs.go |
err = event.Save()
if err != nil {
return
}
}
return
}
/*
Dies will appropriately set the Person as dead at given year
*/
func (f *Person) Dies(year int) {
f.deathYear = year
f.createEvent("DEATH", year)
}
/*
Born will set the person's birth year and create the birth event
*/
func (f *Person) Born(year int) {
f.birthYear = year
f.createEvent("BIRTH", year)
}
/*
Marry will set appropriate fields for Person now being married.
*/
func (f *Person) Marry(spouse *Person, year int) {
if f.married {
panic(fmt.Sprintf("Person is already married\n%v", *f))
}
f.spouses = append(f.spouses, spouse)
f.marriageYears = append(f.marriageYears, year)
f.married = true
f.model.SpouseID = sql.NullString{spouse.model.PersonID, true}
event := f.createEvent("MARRIAGE", year)
spouse.spouses = append(spouse.spouses, f)
spouse.marriageYears = append(spouse.marriageYears, year)
spouse.married = true
spouse.model.SpouseID = sql.NullString{f.model.PersonID, true}
spouse.createMirrorEvent(event)
}
/*
Divorce will set appropriate fields for Person getting divorced.
*/
func (f *Person) Divorce(year int) {
if !f.married {
panic(fmt.Sprintf("Person is not married\n%v", *f))
}
spouse, err := f.CurrSpouse()
if err != nil {
panic(err)
}
f.divorceYears = append(f.divorceYears, year)
f.married = false
f.model.SpouseID = sql.NullString{"", false}
event := f.createEvent("DIVORCE", year)
spouse.divorceYears = append(spouse.divorceYears, year)
spouse.married = false
spouse.model.SpouseID = sql.NullString{"", false}
spouse.createMirrorEvent(event)
}
/*
HaveChild will edit the person's children and add the newborn event
*/
func (f *Person) HaveChild(child *Person, year int) {
spouse, err := f.CurrSpouse()
if err != nil {
panic(err)
}
f.children = append(f.children, child)
spouse.children = append(spouse.children, child)
event := f.createEvent("NEWBORN", year)
spouse.createMirrorEvent(event)
}
/*
HaveParents will set the parents of the person
*/
func (f *Person) HaveParents(father *Person, mother *Person) {
f.father = father
f.mother = mother
f.model.FatherID = sql.NullString{father.model.PersonID, true}
f.model.MotherID = sql.NullString{mother.model.PersonID, true}
}
/*
IsDead returns if the person is dead or not
*/
func (f Person) IsDead() bool {
return f.deathYear != -1
}
/*
IsMarried returns if the person is married or not
*/
func (f Person) IsMarried() bool {
return f.married
}
/*
IsStraight returns if the person is straight or not
*/
func (f Person) IsStraight() bool {
return f.straight
}
/*
Gender will return the person's gender
*/
func (f Person) Gender() string {
return f.model.Gender
}
/*
Age returns the person's year, given a year
*/
func (f Person) Age(year int) int {
return year - f.birthYear
}
/*
DeathYear returns the person's DeathYear, which is -1 if
the person isn't dead yet
*/
func (f Person) DeathYear() int {
return f.deathYear
}
/*
Spouses returns a slice of the person's spouses in
chronological order
*/
func (f Person) Spouses() []*Person {
return f.spouses
}
/*
CurrSpouse will return the person's current spouse,
and return an error if person isn't married
*/
func (f Person) CurrSpouse() (spouse *Person, err error) {
if !f.married {
return nil, fmt.Errorf("person is not married:\n%v", f)
}
return f.spouses[len(f.spouses)-1], nil
}
/*
Children will return a map of all spouses -> slice of children
*/
func (f Person) Children() (m map[*Person][]*Person) {
m = make(map[*Person][]*Person)
for _, spouse := range f.spouses {
for _, child := range f.children {
if child.father == spouse || child.mother == spouse {
if _, ok := m[spouse]; !ok {
m[spouse] = make([]*Person, 0)
}
m[spouse] = append(m[spouse], child)
}
}
}
return
}
/*
MarriageYears will return the person's marriage years
*/
func (f Person) MarriageYears() []int {
return f.marriageYears
}
/*
DivorceYears returns the person's divorce years
*/
func (f Person) DivorceYears() []int {
return f.divorceYears
}
/*
Generation is what the name implies, and represented by a slice
of Person pointers, in order to keep everything mutable.
*/
type Generation []*Person
/*
AllDead will returns whether or not everyone in the generation
is dead
*/
func (g Generation) AllDead() bool {
if g == nil || len(g) == 0 {
return true
}
for _, p := range g {
if !p.IsDead() {
return false
}
}
return true
}
/*
Population is what the name implies, and represented by a slice
of Generations
*/
type Population []Generation
/*
GetAlive returns a slice of all alive people, irrespective of their generation
*/
func (pop *Population) GetAlive() []*Person {
people := make([]*Person, 0)
for _, gen := range *pop {
for _, p := range gen {
if !p.IsDead() {
people = append(people, p)
}
}
}
return people
}
/*
AddPerson will add the person to the proper generation in the population
*/
func (pop *Population) AddPerson(f *Person) {
generation := 0
for i, gen := range *pop {
for _, p := range gen {
if (p == f.mother || p == f.father) && i >= generation {
generation = i + 1
}
}
}
if generation >= len(*pop) {
// should only need to add one more generation
*pop = append(*pop, make(Generation, 0))
}
(*pop)[generation] = append((*pop)[generation], f)
}
/*
AreFamily returns whether or not the two people are family members
*/
func AreFamily(a *Person, b *Person) bool {
if a == b {
return false
}
// are parents or siblings x removed
if recursiveAreParentsOrSiblings(a, b) {
return true
}
// are spouses
for _, spouse := range a.spouses {
if spouse == b {
return true
}
if recursiveAreParentsOrSiblings(spouse, b) {
return true
}
}
for _, spouse := range b.spouses {
if spouse == a {
return true
}
if recursiveAreParentsOrSiblings(spouse, a) {
return true
}
}
// are cousins
for _, parentA := range []*Person{a.mother, a.father} {
if parentA == nil {
continue
}
for _, parentB := range []*Person{b.mother, b.father} {
if parentB == nil {
continue
}
if areSiblingsOrParents(parentA, parentB) {
return true
}
}
}
return false
}
func recursiveAreParentsOrSiblings(a *Person, b *Person) bool {
// recursion up the tree
if goUpTree(a, b) || goUpTree(b, a) {
return true
}
// recusion down the tree
if goDownTree(a, b) || goDownTree(b, a) {
return true
}
return false
}
func areSiblingsOrParents(a *Person, b *Person) bool {
// is either one parents of the other
if a.mother == b ||
a.father == b ||
b.mother == a ||
b.father == a {
return true
}
// are siblings
if (a.mother == b.mother && a.mother != nil) ||
(a.father == b.father && a.father != nil) {
return true
}
return false
}
func goUpTree(a *Person, b *Person) bool {
if a == nil || b == nil {
return false
}
if areSiblingsOrParents(a, b) {
return true
}
if goUpTree(a.father, b) ||
goUpTree(a.mother, b) {
return true
}
return false
}
func goDownTree(a *Person, b *Person) bool {
if a == nil || b == nil {
return false
}
if areSiblingsOrParents(a, b) | {
return true
} | conditional_block |
|
family_structs.go | : util.RandomID(),
PersonID: f.model.PersonID,
Latitude: city.Latitude,
Longitude: city.Longitude,
Country: city.Country,
City: city.City,
EventType: name,
Year: year,
}
f.events = append(f.events, event)
return event
}
func (f *Person) createMirrorEvent(event models.Event) {
event.EventID = util.RandomID()
event.PersonID = f.model.PersonID
f.events = append(f.events, event)
}
/*
NumEvents returns the person's number of events
*/
func (f Person) NumEvents() int {
return len(f.events)
}
/*
Save will persist the person and their events in the database
*/
func (f *Person) Save(username string) (err error) {
f.model.Username = username
err = f.model.Save()
if err != nil {
return
}
for _, event := range f.events {
event.Username = username
err = event.Save()
if err != nil {
return
}
}
return
}
/*
Dies will appropriately set the Person as dead at given year
*/
func (f *Person) Dies(year int) {
f.deathYear = year
f.createEvent("DEATH", year)
}
/*
Born will set the person's birth year and create the birth event
*/
func (f *Person) Born(year int) {
f.birthYear = year
f.createEvent("BIRTH", year)
}
/*
Marry will set appropriate fields for Person now being married.
*/
func (f *Person) Marry(spouse *Person, year int) {
if f.married {
panic(fmt.Sprintf("Person is already married\n%v", *f))
}
f.spouses = append(f.spouses, spouse)
f.marriageYears = append(f.marriageYears, year)
f.married = true
f.model.SpouseID = sql.NullString{spouse.model.PersonID, true}
event := f.createEvent("MARRIAGE", year)
spouse.spouses = append(spouse.spouses, f)
spouse.marriageYears = append(spouse.marriageYears, year)
spouse.married = true
spouse.model.SpouseID = sql.NullString{f.model.PersonID, true}
spouse.createMirrorEvent(event)
}
/*
Divorce will set appropriate fields for Person getting divorced.
*/
func (f *Person) Divorce(year int) {
if !f.married {
panic(fmt.Sprintf("Person is not married\n%v", *f))
}
spouse, err := f.CurrSpouse()
if err != nil {
panic(err)
}
f.divorceYears = append(f.divorceYears, year)
f.married = false
f.model.SpouseID = sql.NullString{"", false}
event := f.createEvent("DIVORCE", year)
spouse.divorceYears = append(spouse.divorceYears, year)
spouse.married = false
spouse.model.SpouseID = sql.NullString{"", false}
spouse.createMirrorEvent(event)
}
/*
HaveChild will edit the person's children and add the newborn event
*/
func (f *Person) HaveChild(child *Person, year int) {
spouse, err := f.CurrSpouse()
if err != nil {
panic(err)
}
f.children = append(f.children, child)
spouse.children = append(spouse.children, child)
event := f.createEvent("NEWBORN", year)
spouse.createMirrorEvent(event)
}
/*
HaveParents will set the parents of the person
*/
func (f *Person) HaveParents(father *Person, mother *Person) {
f.father = father
f.mother = mother
f.model.FatherID = sql.NullString{father.model.PersonID, true}
f.model.MotherID = sql.NullString{mother.model.PersonID, true}
}
/*
IsDead returns if the person is dead or not
*/
func (f Person) | () bool {
return f.deathYear != -1
}
/*
IsMarried returns if the person is married or not
*/
func (f Person) IsMarried() bool {
return f.married
}
/*
IsStraight returns if the person is straight or not
*/
func (f Person) IsStraight() bool {
return f.straight
}
/*
Gender will return the person's gender
*/
func (f Person) Gender() string {
return f.model.Gender
}
/*
Age returns the person's year, given a year
*/
func (f Person) Age(year int) int {
return year - f.birthYear
}
/*
DeathYear returns the person's DeathYear, which is -1 if
the person isn't dead yet
*/
func (f Person) DeathYear() int {
return f.deathYear
}
/*
Spouses returns a slice of the person's spouses in
chronological order
*/
func (f Person) Spouses() []*Person {
return f.spouses
}
/*
CurrSpouse will return the person's current spouse,
and return an error if person isn't married
*/
func (f Person) CurrSpouse() (spouse *Person, err error) {
if !f.married {
return nil, fmt.Errorf("person is not married:\n%v", f)
}
return f.spouses[len(f.spouses)-1], nil
}
/*
Children will return a map of all spouses -> slice of children
*/
func (f Person) Children() (m map[*Person][]*Person) {
m = make(map[*Person][]*Person)
for _, spouse := range f.spouses {
for _, child := range f.children {
if child.father == spouse || child.mother == spouse {
if _, ok := m[spouse]; !ok {
m[spouse] = make([]*Person, 0)
}
m[spouse] = append(m[spouse], child)
}
}
}
return
}
/*
MarriageYears will return the person's marriage years
*/
func (f Person) MarriageYears() []int {
return f.marriageYears
}
/*
DivorceYears returns the person's divorce years
*/
func (f Person) DivorceYears() []int {
return f.divorceYears
}
/*
Generation is what the name implies, and represented by a slice
of Person pointers, in order to keep everything mutable.
*/
type Generation []*Person
/*
AllDead will returns whether or not everyone in the generation
is dead
*/
func (g Generation) AllDead() bool {
if g == nil || len(g) == 0 {
return true
}
for _, p := range g {
if !p.IsDead() {
return false
}
}
return true
}
/*
Population is what the name implies, and represented by a slice
of Generations
*/
type Population []Generation
/*
GetAlive returns a slice of all alive people, irrespective of their generation
*/
func (pop *Population) GetAlive() []*Person {
people := make([]*Person, 0)
for _, gen := range *pop {
for _, p := range gen {
if !p.IsDead() {
people = append(people, p)
}
}
}
return people
}
/*
AddPerson will add the person to the proper generation in the population
*/
func (pop *Population) AddPerson(f *Person) {
generation := 0
for i, gen := range *pop {
for _, p := range gen {
if (p == f.mother || p == f.father) && i >= generation {
generation = i + 1
}
}
}
if generation >= len(*pop) {
// should only need to add one more generation
*pop = append(*pop, make(Generation, 0))
}
(*pop)[generation] = append((*pop)[generation], f)
}
/*
AreFamily returns whether or not the two people are family members
*/
func AreFamily(a *Person, b *Person) bool {
if a == b {
return false
}
// are parents or siblings x removed
if recursiveAreParentsOrSiblings(a, b) {
return true
}
// are spouses
for _, spouse := range a.spouses {
if spouse == b {
return true
}
if recursiveAreParentsOrSiblings(spouse, b) {
return true
}
}
for _, spouse := range b.spouses {
if spouse == a {
return true
}
if recursiveAreParentsOrSiblings(spouse, a) {
return true
}
}
// are cousins
for _, parentA := range []*Person{a.mother, a.father} {
if parentA == nil {
continue
}
for _, parentB := range []*Person{b.mother, b.father} {
if parentB == nil {
continue
}
if areSiblingsOrParents(parentA, parentB) {
return true
}
}
}
return false
}
func recursiveAreParentsOrSiblings(a *Person, b *Person) bool {
// recursion up the tree
if goUpTree(a, b) || goUpTree(b, a) {
return true
}
// recusion down the tree
if goDownTree(a, b) || goDownTree(b, a) {
return true
}
return false
}
func areSiblingsOrParents(a *Person, b *Person) bool {
// is either one parents | IsDead | identifier_name |
family_structs.go | : util.RandomID(),
PersonID: f.model.PersonID,
Latitude: city.Latitude,
Longitude: city.Longitude,
Country: city.Country,
City: city.City,
EventType: name,
Year: year,
}
f.events = append(f.events, event)
return event
}
func (f *Person) createMirrorEvent(event models.Event) {
event.EventID = util.RandomID()
event.PersonID = f.model.PersonID
f.events = append(f.events, event)
}
/*
NumEvents returns the person's number of events
*/
func (f Person) NumEvents() int {
return len(f.events)
}
/*
Save will persist the person and their events in the database
*/
func (f *Person) Save(username string) (err error) {
f.model.Username = username
err = f.model.Save()
if err != nil {
return
}
for _, event := range f.events {
event.Username = username
err = event.Save()
if err != nil {
return
}
}
return
}
/*
Dies will appropriately set the Person as dead at given year
*/
func (f *Person) Dies(year int) {
f.deathYear = year
f.createEvent("DEATH", year)
}
/*
Born will set the person's birth year and create the birth event
*/
func (f *Person) Born(year int) {
f.birthYear = year
f.createEvent("BIRTH", year)
}
/*
Marry will set appropriate fields for Person now being married.
*/
func (f *Person) Marry(spouse *Person, year int) {
if f.married {
panic(fmt.Sprintf("Person is already married\n%v", *f))
}
f.spouses = append(f.spouses, spouse)
f.marriageYears = append(f.marriageYears, year)
f.married = true
f.model.SpouseID = sql.NullString{spouse.model.PersonID, true}
event := f.createEvent("MARRIAGE", year)
spouse.spouses = append(spouse.spouses, f)
spouse.marriageYears = append(spouse.marriageYears, year)
spouse.married = true
spouse.model.SpouseID = sql.NullString{f.model.PersonID, true}
spouse.createMirrorEvent(event)
}
/*
Divorce will set appropriate fields for Person getting divorced.
*/
func (f *Person) Divorce(year int) {
if !f.married {
panic(fmt.Sprintf("Person is not married\n%v", *f))
}
spouse, err := f.CurrSpouse()
if err != nil {
panic(err)
}
f.divorceYears = append(f.divorceYears, year)
f.married = false
f.model.SpouseID = sql.NullString{"", false}
event := f.createEvent("DIVORCE", year)
spouse.divorceYears = append(spouse.divorceYears, year)
spouse.married = false
spouse.model.SpouseID = sql.NullString{"", false}
spouse.createMirrorEvent(event)
}
/*
HaveChild will edit the person's children and add the newborn event
*/
func (f *Person) HaveChild(child *Person, year int) {
spouse, err := f.CurrSpouse()
if err != nil {
panic(err)
}
f.children = append(f.children, child)
spouse.children = append(spouse.children, child)
event := f.createEvent("NEWBORN", year)
spouse.createMirrorEvent(event)
}
/*
HaveParents will set the parents of the person
*/
func (f *Person) HaveParents(father *Person, mother *Person) {
f.father = father
f.mother = mother
f.model.FatherID = sql.NullString{father.model.PersonID, true}
f.model.MotherID = sql.NullString{mother.model.PersonID, true}
}
/*
IsDead returns if the person is dead or not
*/
func (f Person) IsDead() bool {
return f.deathYear != -1
}
/*
IsMarried returns if the person is married or not
*/
func (f Person) IsMarried() bool {
return f.married
}
/*
IsStraight returns if the person is straight or not
*/
func (f Person) IsStraight() bool {
return f.straight
}
/*
Gender will return the person's gender
*/
func (f Person) Gender() string {
return f.model.Gender
}
/*
Age returns the person's year, given a year
*/
func (f Person) Age(year int) int {
return year - f.birthYear
}
/*
DeathYear returns the person's DeathYear, which is -1 if
the person isn't dead yet
*/
func (f Person) DeathYear() int |
/*
Spouses returns a slice of the person's spouses in
chronological order
*/
func (f Person) Spouses() []*Person {
return f.spouses
}
/*
CurrSpouse will return the person's current spouse,
and return an error if person isn't married
*/
func (f Person) CurrSpouse() (spouse *Person, err error) {
if !f.married {
return nil, fmt.Errorf("person is not married:\n%v", f)
}
return f.spouses[len(f.spouses)-1], nil
}
/*
Children will return a map of all spouses -> slice of children
*/
func (f Person) Children() (m map[*Person][]*Person) {
m = make(map[*Person][]*Person)
for _, spouse := range f.spouses {
for _, child := range f.children {
if child.father == spouse || child.mother == spouse {
if _, ok := m[spouse]; !ok {
m[spouse] = make([]*Person, 0)
}
m[spouse] = append(m[spouse], child)
}
}
}
return
}
/*
MarriageYears will return the person's marriage years
*/
func (f Person) MarriageYears() []int {
return f.marriageYears
}
/*
DivorceYears returns the person's divorce years
*/
func (f Person) DivorceYears() []int {
return f.divorceYears
}
/*
Generation is what the name implies, and represented by a slice
of Person pointers, in order to keep everything mutable.
*/
type Generation []*Person
/*
AllDead will returns whether or not everyone in the generation
is dead
*/
func (g Generation) AllDead() bool {
if g == nil || len(g) == 0 {
return true
}
for _, p := range g {
if !p.IsDead() {
return false
}
}
return true
}
/*
Population is what the name implies, and represented by a slice
of Generations
*/
type Population []Generation
/*
GetAlive returns a slice of all alive people, irrespective of their generation
*/
func (pop *Population) GetAlive() []*Person {
people := make([]*Person, 0)
for _, gen := range *pop {
for _, p := range gen {
if !p.IsDead() {
people = append(people, p)
}
}
}
return people
}
/*
AddPerson will add the person to the proper generation in the population
*/
func (pop *Population) AddPerson(f *Person) {
generation := 0
for i, gen := range *pop {
for _, p := range gen {
if (p == f.mother || p == f.father) && i >= generation {
generation = i + 1
}
}
}
if generation >= len(*pop) {
// should only need to add one more generation
*pop = append(*pop, make(Generation, 0))
}
(*pop)[generation] = append((*pop)[generation], f)
}
/*
AreFamily returns whether or not the two people are family members
*/
func AreFamily(a *Person, b *Person) bool {
if a == b {
return false
}
// are parents or siblings x removed
if recursiveAreParentsOrSiblings(a, b) {
return true
}
// are spouses
for _, spouse := range a.spouses {
if spouse == b {
return true
}
if recursiveAreParentsOrSiblings(spouse, b) {
return true
}
}
for _, spouse := range b.spouses {
if spouse == a {
return true
}
if recursiveAreParentsOrSiblings(spouse, a) {
return true
}
}
// are cousins
for _, parentA := range []*Person{a.mother, a.father} {
if parentA == nil {
continue
}
for _, parentB := range []*Person{b.mother, b.father} {
if parentB == nil {
continue
}
if areSiblingsOrParents(parentA, parentB) {
return true
}
}
}
return false
}
func recursiveAreParentsOrSiblings(a *Person, b *Person) bool {
// recursion up the tree
if goUpTree(a, b) || goUpTree(b, a) {
return true
}
// recusion down the tree
if goDownTree(a, b) || goDownTree(b, a) {
return true
}
return false
}
func areSiblingsOrParents(a *Person, b *Person) bool {
// is either one | {
return f.deathYear
} | identifier_body |
medicalbillregistersummary.component.ts | ',
dateA11yLabel: 'DD-MM-YYYY',
monthYearA11yLabel: 'MMMM YYYY',
},
}
@Component({
selector: 'app-medicalbillregistersummary',
templateUrl: './medicalbillregistersummary.component.html',
styleUrls: ['./medicalbillregistersummary.component.less'],
providers: [
{ provide: DateAdapter, useClass: MomentDateAdapter, deps: [MAT_DATE_LOCALE] },
{ provide: MAT_DATE_FORMATS, useValue: MY_FORMATS },
]
})
export class MedicalbillregistersummaryComponent implements OnInit {
MFromDate;
@ViewChild('MedForm') Form: NgForm
maxDate(): string {
return new Date().toISOString().split('T')[0]
}
//maxDate2(): string {
// return new Date().toISOString().split('T')[0]
//}
constructor(public commonService: CommonService<BillingPharmacy>) { }
MedicalBillRegisterTable: boolean = false;
MedicalBillSummaryTable: boolean = false;
MBS_label: boolean = false;
date = new FormControl(new Date());
ngOnInit() {
}
applyFilter(filterValue: string) {
this.dataSource.filter = filterValue.trim().toLowerCase();
}
applyFilter1(filterValue: string) {
this.dataSource1.filter = filterValue.trim().toLowerCase();
}
displayedColumn: string[] = ['BillNo', 'BillDate', 'PatientName', 'item', 'UOM', 'Quantity', 'Rate', 'ProductValue', 'Discount', 'DiscountAmount', 'TaxDescription','GST', 'GSTValue', 'TotalCost'];
dataSource = new MatTableDataSource();
displayedColumnsummary: string[] = ['Item', 'Uom', 'Quan', 'Irate', 'Tvalue', 'IDis', 'Damt', 'IGst', 'Gamt', 'TotalCost1'];
dataSource1 = new MatTableDataSource();
@ViewChild(MatSort) sort: MatSort;
minToDate = new Date();
CheckToDate() {
debugger;
this.minToDate = this.MFromDate;
}
MToDate;
M_FromDat;
M_ToDat;
changeValueTotal(id, element, property: string) {
var resTotal = (element.Quantity * element.Rate) + element.GSTValue - element.DiscountAmount;
resTotal = parseFloat(resTotal.toFixed(2));
element.TotalCost = resTotal;
}
getTotalProdVal() {
var totProdVal = this.commonService.data.getRegisterDetail.map(t => t.ProductValue).reduce((acc, value) => acc + value, 0);
totProdVal = parseFloat(totProdVal.toFixed(2));
return totProdVal;
}
getDiscountAmount() {
var totDiscntAmt = this.commonService.data.getRegisterDetail.map(t => t.DiscountAmount).reduce((acc, value) => acc + value, 0);
totDiscntAmt = parseFloat(totDiscntAmt.toFixed(2));
return totDiscntAmt;
}
getGSTAmount() {
var totGSTAmt = this.commonService.data.getRegisterDetail.map(t => t.GSTValue).reduce((acc, value) => acc + value, 0);
totGSTAmt = parseFloat(totGSTAmt.toFixed(2));
return totGSTAmt;
}
getTotalCostamount() {
var totCstAmt = this.commonService.data.getRegisterDetail.map(t => t.TotalCost).reduce((acc, value) => acc + value, 0);
totCstAmt = parseFloat(totCstAmt.toFixed(2));
return totCstAmt;
}
getTotalCostamount1() {
var totCstAmt1 = this.commonService.data.getSummaryDet.map(t => t.TotalCost1).reduce((acc, value) => acc + value, 0);
totCstAmt1 = parseFloat(totCstAmt1.toFixed(2));
return totCstAmt1;
}
getTotalProdVal1() {
var totProdVal1 = this.commonService.data.getSummaryDet.map(t => t.Tvalue).reduce((acc, value) => acc + value, 0);
totProdVal1 = parseFloat(totProdVal1.toFixed(2));
return totProdVal1;
}
getDiscountAmount1() {
var totDiscntAmt1 = this.commonService.data.getSummaryDet.map(t => t.Damt).reduce((acc, value) => acc + value, 0);
totDiscntAmt1 = parseFloat(totDiscntAmt1.toFixed(2));
return totDiscntAmt1;
}
getGSTAmount1() {
var totGSTAmt1 = this.commonService.data.getSummaryDet.map(t => t.Gamt).reduce((acc, value) => acc + value, 0);
totGSTAmt1 = parseFloat(totGSTAmt1.toFixed(2));
return totGSTAmt1;
}
@ViewChild('RegTable') RegTable: ElementRef;
@ViewChild('SummaryTable') SummaryTable: ElementRef;
@ViewChild('table') table: ElementRef;
@ViewChild('table1') table1: ElementRef;
fireEvent() {
debugger;
const ws: XLSX.WorkSheet = XLSX.utils.table_to_sheet(this.table.nativeElement);
const wb: XLSX.WorkBook = XLSX.utils.book_new();
XLSX.utils.book_append_sheet(wb, ws, 'Sheet1');
XLSX.writeFile(wb, 'Medical_Bill_Register.xlsx');
}
captureScreen() {
var data = document.getElementById('RegTable');
html2canvas(data).then(canvas => {
var imgWidth = 239;
var pageHeight = 55;
//var width = data.internal.pageSize.getWidth();
//var height = data.internal.pageSize.getHeight();
var imgHeight = canvas.height * imgWidth / canvas.width;
var heightLeft = imgHeight;
const contentDataURL = canvas.toDataURL('image/PDF')
//let pdf = new jspdf('p', 'mm', 'a4'); // A4 size page of PDF
var position = 5;
//pdf.addImage(contentDataURL, 'PDF', 0, position, imgWidth, imgHeight)
//pdf.save('Medical_Bill_Register.pdf'); // Generated PDF
});
//const tabletojson = require('tabletojson');
//var table = tabletojson($('#table-id').get(0));
//var doc = new jspdf('l', 'pt', 'letter', true);
//$.each(table, function (i, row) {
// $.each(row, function (j, cell) {
// if (j == "email" | j == 1) {
// doc.cell(1, 10, 190, 20, cell, i);
// }
// else {
// doc.cell(1, 10, 90, 20, cell, i);
// }
// });
//});
//doc.save('Safaa.pdf');
//var doc = new jspdf();
//var specialElementHandlers = {
// '#hidediv': function (element, render) { return true; }
//};
//doc.fromHTML($('#RegTable').get(0), 20, 20, {
// 'width': 500,
// 'elementHandlers': specialElementHandlers
//});
//doc.save('Test.pdf');
}
captureScreen1() {
var data = document.getElementById('SummaryTable');
html2canvas(data).then(canvas => {
var imgWidth = 239;
var pageHeight = 55;
var imgHeight = canvas.height * imgWidth / canvas.width;
var heightLeft = imgHeight;
const contentDataURL = canvas.toDataURL('image/PDF')
//let pdf = new jspdf('p', 'mm', 'a4'); // A4 size page of PDF
var position = 5;
//pdf.addImage(contentDataURL, 'PDF', 1, position, imgWidth, imgHeight)
//pdf.save('Medical_Bill_Summary.pdf'); // Generated PDF
});
}
fireEvent1() {
const ws: XLSX.WorkSheet = XLSX.utils.table_to_sheet(this.table1.nativeElement);
const wb: XLSX.WorkBook = XLSX.utils.book_new();
XLSX.utils.book_append_sheet(wb, ws, 'Sheet1');
XLSX.writeFile(wb, 'Medical_Bill_Summary.xlsx');
}
backdrop;
cancelblock;
Cancel() {
debugger;
if (this.MFromDate != null || this.MToDate != null) {
this.backdrop = 'block';
this.cancelblock = 'block';
}
else {
this.Form.onReset();
}
}
modalClose() {
this.backdrop = 'none';
this.cancelblock = 'none';
}
CancelNo() {
this.backdrop = 'none';
this.cancelblock = 'none';
}
CancelYes() |
onSubmit(form: NgForm) {
debugger;
| {
debugger;
this.backdrop = 'none';
this.cancelblock = 'none';
//this.MFromDate = '';
//this.MToDate = '';
this.MedicalBillRegisterTable = false;
this.MedicalBillSummaryTable = false;
this.MBS_label = false;
} | identifier_body |
medicalbillregistersummary.component.ts | ',
dateA11yLabel: 'DD-MM-YYYY',
monthYearA11yLabel: 'MMMM YYYY',
},
}
@Component({
selector: 'app-medicalbillregistersummary',
templateUrl: './medicalbillregistersummary.component.html',
styleUrls: ['./medicalbillregistersummary.component.less'],
providers: [
{ provide: DateAdapter, useClass: MomentDateAdapter, deps: [MAT_DATE_LOCALE] },
{ provide: MAT_DATE_FORMATS, useValue: MY_FORMATS },
]
})
export class MedicalbillregistersummaryComponent implements OnInit {
MFromDate;
@ViewChild('MedForm') Form: NgForm
maxDate(): string {
return new Date().toISOString().split('T')[0]
}
//maxDate2(): string {
// return new Date().toISOString().split('T')[0]
//}
constructor(public commonService: CommonService<BillingPharmacy>) { }
MedicalBillRegisterTable: boolean = false;
MedicalBillSummaryTable: boolean = false;
MBS_label: boolean = false;
date = new FormControl(new Date());
ngOnInit() {
}
applyFilter(filterValue: string) {
this.dataSource.filter = filterValue.trim().toLowerCase();
}
applyFilter1(filterValue: string) {
this.dataSource1.filter = filterValue.trim().toLowerCase();
}
displayedColumn: string[] = ['BillNo', 'BillDate', 'PatientName', 'item', 'UOM', 'Quantity', 'Rate', 'ProductValue', 'Discount', 'DiscountAmount', 'TaxDescription','GST', 'GSTValue', 'TotalCost'];
dataSource = new MatTableDataSource();
displayedColumnsummary: string[] = ['Item', 'Uom', 'Quan', 'Irate', 'Tvalue', 'IDis', 'Damt', 'IGst', 'Gamt', 'TotalCost1'];
dataSource1 = new MatTableDataSource();
@ViewChild(MatSort) sort: MatSort;
minToDate = new Date();
CheckToDate() {
debugger;
this.minToDate = this.MFromDate;
}
MToDate;
M_FromDat;
M_ToDat;
changeValueTotal(id, element, property: string) {
var resTotal = (element.Quantity * element.Rate) + element.GSTValue - element.DiscountAmount;
resTotal = parseFloat(resTotal.toFixed(2));
element.TotalCost = resTotal;
}
getTotalProdVal() {
var totProdVal = this.commonService.data.getRegisterDetail.map(t => t.ProductValue).reduce((acc, value) => acc + value, 0);
totProdVal = parseFloat(totProdVal.toFixed(2));
return totProdVal;
}
getDiscountAmount() {
var totDiscntAmt = this.commonService.data.getRegisterDetail.map(t => t.DiscountAmount).reduce((acc, value) => acc + value, 0);
totDiscntAmt = parseFloat(totDiscntAmt.toFixed(2));
return totDiscntAmt;
}
getGSTAmount() {
var totGSTAmt = this.commonService.data.getRegisterDetail.map(t => t.GSTValue).reduce((acc, value) => acc + value, 0);
totGSTAmt = parseFloat(totGSTAmt.toFixed(2));
return totGSTAmt;
}
getTotalCostamount() {
var totCstAmt = this.commonService.data.getRegisterDetail.map(t => t.TotalCost).reduce((acc, value) => acc + value, 0);
totCstAmt = parseFloat(totCstAmt.toFixed(2));
return totCstAmt;
}
getTotalCostamount1() {
var totCstAmt1 = this.commonService.data.getSummaryDet.map(t => t.TotalCost1).reduce((acc, value) => acc + value, 0);
totCstAmt1 = parseFloat(totCstAmt1.toFixed(2));
return totCstAmt1;
}
getTotalProdVal1() {
var totProdVal1 = this.commonService.data.getSummaryDet.map(t => t.Tvalue).reduce((acc, value) => acc + value, 0);
totProdVal1 = parseFloat(totProdVal1.toFixed(2));
return totProdVal1;
}
| () {
var totDiscntAmt1 = this.commonService.data.getSummaryDet.map(t => t.Damt).reduce((acc, value) => acc + value, 0);
totDiscntAmt1 = parseFloat(totDiscntAmt1.toFixed(2));
return totDiscntAmt1;
}
getGSTAmount1() {
var totGSTAmt1 = this.commonService.data.getSummaryDet.map(t => t.Gamt).reduce((acc, value) => acc + value, 0);
totGSTAmt1 = parseFloat(totGSTAmt1.toFixed(2));
return totGSTAmt1;
}
@ViewChild('RegTable') RegTable: ElementRef;
@ViewChild('SummaryTable') SummaryTable: ElementRef;
@ViewChild('table') table: ElementRef;
@ViewChild('table1') table1: ElementRef;
fireEvent() {
debugger;
const ws: XLSX.WorkSheet = XLSX.utils.table_to_sheet(this.table.nativeElement);
const wb: XLSX.WorkBook = XLSX.utils.book_new();
XLSX.utils.book_append_sheet(wb, ws, 'Sheet1');
XLSX.writeFile(wb, 'Medical_Bill_Register.xlsx');
}
captureScreen() {
var data = document.getElementById('RegTable');
html2canvas(data).then(canvas => {
var imgWidth = 239;
var pageHeight = 55;
//var width = data.internal.pageSize.getWidth();
//var height = data.internal.pageSize.getHeight();
var imgHeight = canvas.height * imgWidth / canvas.width;
var heightLeft = imgHeight;
const contentDataURL = canvas.toDataURL('image/PDF')
//let pdf = new jspdf('p', 'mm', 'a4'); // A4 size page of PDF
var position = 5;
//pdf.addImage(contentDataURL, 'PDF', 0, position, imgWidth, imgHeight)
//pdf.save('Medical_Bill_Register.pdf'); // Generated PDF
});
//const tabletojson = require('tabletojson');
//var table = tabletojson($('#table-id').get(0));
//var doc = new jspdf('l', 'pt', 'letter', true);
//$.each(table, function (i, row) {
// $.each(row, function (j, cell) {
// if (j == "email" | j == 1) {
// doc.cell(1, 10, 190, 20, cell, i);
// }
// else {
// doc.cell(1, 10, 90, 20, cell, i);
// }
// });
//});
//doc.save('Safaa.pdf');
//var doc = new jspdf();
//var specialElementHandlers = {
// '#hidediv': function (element, render) { return true; }
//};
//doc.fromHTML($('#RegTable').get(0), 20, 20, {
// 'width': 500,
// 'elementHandlers': specialElementHandlers
//});
//doc.save('Test.pdf');
}
captureScreen1() {
var data = document.getElementById('SummaryTable');
html2canvas(data).then(canvas => {
var imgWidth = 239;
var pageHeight = 55;
var imgHeight = canvas.height * imgWidth / canvas.width;
var heightLeft = imgHeight;
const contentDataURL = canvas.toDataURL('image/PDF')
//let pdf = new jspdf('p', 'mm', 'a4'); // A4 size page of PDF
var position = 5;
//pdf.addImage(contentDataURL, 'PDF', 1, position, imgWidth, imgHeight)
//pdf.save('Medical_Bill_Summary.pdf'); // Generated PDF
});
}
fireEvent1() {
const ws: XLSX.WorkSheet = XLSX.utils.table_to_sheet(this.table1.nativeElement);
const wb: XLSX.WorkBook = XLSX.utils.book_new();
XLSX.utils.book_append_sheet(wb, ws, 'Sheet1');
XLSX.writeFile(wb, 'Medical_Bill_Summary.xlsx');
}
backdrop;
cancelblock;
Cancel() {
debugger;
if (this.MFromDate != null || this.MToDate != null) {
this.backdrop = 'block';
this.cancelblock = 'block';
}
else {
this.Form.onReset();
}
}
modalClose() {
this.backdrop = 'none';
this.cancelblock = 'none';
}
CancelNo() {
this.backdrop = 'none';
this.cancelblock = 'none';
}
CancelYes() {
debugger;
this.backdrop = 'none';
this.cancelblock = 'none';
//this.MFromDate = '';
//this.MToDate = '';
this.MedicalBillRegisterTable = false;
this.MedicalBillSummaryTable = false;
this.MBS_label = false;
}
onSubmit(form: NgForm) {
debugger;
if | getDiscountAmount1 | identifier_name |
medicalbillregistersummary.component.ts | ',
dateA11yLabel: 'DD-MM-YYYY',
monthYearA11yLabel: 'MMMM YYYY',
},
}
@Component({
selector: 'app-medicalbillregistersummary',
templateUrl: './medicalbillregistersummary.component.html',
styleUrls: ['./medicalbillregistersummary.component.less'],
providers: [
{ provide: DateAdapter, useClass: MomentDateAdapter, deps: [MAT_DATE_LOCALE] },
{ provide: MAT_DATE_FORMATS, useValue: MY_FORMATS },
]
})
export class MedicalbillregistersummaryComponent implements OnInit {
MFromDate;
@ViewChild('MedForm') Form: NgForm
maxDate(): string {
return new Date().toISOString().split('T')[0]
}
//maxDate2(): string {
// return new Date().toISOString().split('T')[0]
//}
constructor(public commonService: CommonService<BillingPharmacy>) { }
MedicalBillRegisterTable: boolean = false;
MedicalBillSummaryTable: boolean = false;
MBS_label: boolean = false;
date = new FormControl(new Date());
ngOnInit() {
}
applyFilter(filterValue: string) {
this.dataSource.filter = filterValue.trim().toLowerCase();
}
applyFilter1(filterValue: string) {
this.dataSource1.filter = filterValue.trim().toLowerCase();
}
displayedColumn: string[] = ['BillNo', 'BillDate', 'PatientName', 'item', 'UOM', 'Quantity', 'Rate', 'ProductValue', 'Discount', 'DiscountAmount', 'TaxDescription','GST', 'GSTValue', 'TotalCost'];
dataSource = new MatTableDataSource();
displayedColumnsummary: string[] = ['Item', 'Uom', 'Quan', 'Irate', 'Tvalue', 'IDis', 'Damt', 'IGst', 'Gamt', 'TotalCost1'];
dataSource1 = new MatTableDataSource();
@ViewChild(MatSort) sort: MatSort;
minToDate = new Date();
CheckToDate() {
debugger;
this.minToDate = this.MFromDate;
}
MToDate;
M_FromDat; | changeValueTotal(id, element, property: string) {
var resTotal = (element.Quantity * element.Rate) + element.GSTValue - element.DiscountAmount;
resTotal = parseFloat(resTotal.toFixed(2));
element.TotalCost = resTotal;
}
getTotalProdVal() {
var totProdVal = this.commonService.data.getRegisterDetail.map(t => t.ProductValue).reduce((acc, value) => acc + value, 0);
totProdVal = parseFloat(totProdVal.toFixed(2));
return totProdVal;
}
getDiscountAmount() {
var totDiscntAmt = this.commonService.data.getRegisterDetail.map(t => t.DiscountAmount).reduce((acc, value) => acc + value, 0);
totDiscntAmt = parseFloat(totDiscntAmt.toFixed(2));
return totDiscntAmt;
}
getGSTAmount() {
var totGSTAmt = this.commonService.data.getRegisterDetail.map(t => t.GSTValue).reduce((acc, value) => acc + value, 0);
totGSTAmt = parseFloat(totGSTAmt.toFixed(2));
return totGSTAmt;
}
getTotalCostamount() {
var totCstAmt = this.commonService.data.getRegisterDetail.map(t => t.TotalCost).reduce((acc, value) => acc + value, 0);
totCstAmt = parseFloat(totCstAmt.toFixed(2));
return totCstAmt;
}
getTotalCostamount1() {
var totCstAmt1 = this.commonService.data.getSummaryDet.map(t => t.TotalCost1).reduce((acc, value) => acc + value, 0);
totCstAmt1 = parseFloat(totCstAmt1.toFixed(2));
return totCstAmt1;
}
getTotalProdVal1() {
var totProdVal1 = this.commonService.data.getSummaryDet.map(t => t.Tvalue).reduce((acc, value) => acc + value, 0);
totProdVal1 = parseFloat(totProdVal1.toFixed(2));
return totProdVal1;
}
getDiscountAmount1() {
var totDiscntAmt1 = this.commonService.data.getSummaryDet.map(t => t.Damt).reduce((acc, value) => acc + value, 0);
totDiscntAmt1 = parseFloat(totDiscntAmt1.toFixed(2));
return totDiscntAmt1;
}
getGSTAmount1() {
var totGSTAmt1 = this.commonService.data.getSummaryDet.map(t => t.Gamt).reduce((acc, value) => acc + value, 0);
totGSTAmt1 = parseFloat(totGSTAmt1.toFixed(2));
return totGSTAmt1;
}
@ViewChild('RegTable') RegTable: ElementRef;
@ViewChild('SummaryTable') SummaryTable: ElementRef;
@ViewChild('table') table: ElementRef;
@ViewChild('table1') table1: ElementRef;
fireEvent() {
debugger;
const ws: XLSX.WorkSheet = XLSX.utils.table_to_sheet(this.table.nativeElement);
const wb: XLSX.WorkBook = XLSX.utils.book_new();
XLSX.utils.book_append_sheet(wb, ws, 'Sheet1');
XLSX.writeFile(wb, 'Medical_Bill_Register.xlsx');
}
captureScreen() {
var data = document.getElementById('RegTable');
html2canvas(data).then(canvas => {
var imgWidth = 239;
var pageHeight = 55;
//var width = data.internal.pageSize.getWidth();
//var height = data.internal.pageSize.getHeight();
var imgHeight = canvas.height * imgWidth / canvas.width;
var heightLeft = imgHeight;
const contentDataURL = canvas.toDataURL('image/PDF')
//let pdf = new jspdf('p', 'mm', 'a4'); // A4 size page of PDF
var position = 5;
//pdf.addImage(contentDataURL, 'PDF', 0, position, imgWidth, imgHeight)
//pdf.save('Medical_Bill_Register.pdf'); // Generated PDF
});
//const tabletojson = require('tabletojson');
//var table = tabletojson($('#table-id').get(0));
//var doc = new jspdf('l', 'pt', 'letter', true);
//$.each(table, function (i, row) {
// $.each(row, function (j, cell) {
// if (j == "email" | j == 1) {
// doc.cell(1, 10, 190, 20, cell, i);
// }
// else {
// doc.cell(1, 10, 90, 20, cell, i);
// }
// });
//});
//doc.save('Safaa.pdf');
//var doc = new jspdf();
//var specialElementHandlers = {
// '#hidediv': function (element, render) { return true; }
//};
//doc.fromHTML($('#RegTable').get(0), 20, 20, {
// 'width': 500,
// 'elementHandlers': specialElementHandlers
//});
//doc.save('Test.pdf');
}
captureScreen1() {
var data = document.getElementById('SummaryTable');
html2canvas(data).then(canvas => {
var imgWidth = 239;
var pageHeight = 55;
var imgHeight = canvas.height * imgWidth / canvas.width;
var heightLeft = imgHeight;
const contentDataURL = canvas.toDataURL('image/PDF')
//let pdf = new jspdf('p', 'mm', 'a4'); // A4 size page of PDF
var position = 5;
//pdf.addImage(contentDataURL, 'PDF', 1, position, imgWidth, imgHeight)
//pdf.save('Medical_Bill_Summary.pdf'); // Generated PDF
});
}
fireEvent1() {
const ws: XLSX.WorkSheet = XLSX.utils.table_to_sheet(this.table1.nativeElement);
const wb: XLSX.WorkBook = XLSX.utils.book_new();
XLSX.utils.book_append_sheet(wb, ws, 'Sheet1');
XLSX.writeFile(wb, 'Medical_Bill_Summary.xlsx');
}
backdrop;
cancelblock;
Cancel() {
debugger;
if (this.MFromDate != null || this.MToDate != null) {
this.backdrop = 'block';
this.cancelblock = 'block';
}
else {
this.Form.onReset();
}
}
modalClose() {
this.backdrop = 'none';
this.cancelblock = 'none';
}
CancelNo() {
this.backdrop = 'none';
this.cancelblock = 'none';
}
CancelYes() {
debugger;
this.backdrop = 'none';
this.cancelblock = 'none';
//this.MFromDate = '';
//this.MToDate = '';
this.MedicalBillRegisterTable = false;
this.MedicalBillSummaryTable = false;
this.MBS_label = false;
}
onSubmit(form: NgForm) {
debugger;
if | M_ToDat;
| random_line_split |
medicalbillregistersummary.component.ts | (public commonService: CommonService<BillingPharmacy>) { }
MedicalBillRegisterTable: boolean = false;
MedicalBillSummaryTable: boolean = false;
MBS_label: boolean = false;
date = new FormControl(new Date());
ngOnInit() {
}
applyFilter(filterValue: string) {
this.dataSource.filter = filterValue.trim().toLowerCase();
}
applyFilter1(filterValue: string) {
this.dataSource1.filter = filterValue.trim().toLowerCase();
}
displayedColumn: string[] = ['BillNo', 'BillDate', 'PatientName', 'item', 'UOM', 'Quantity', 'Rate', 'ProductValue', 'Discount', 'DiscountAmount', 'TaxDescription','GST', 'GSTValue', 'TotalCost'];
dataSource = new MatTableDataSource();
displayedColumnsummary: string[] = ['Item', 'Uom', 'Quan', 'Irate', 'Tvalue', 'IDis', 'Damt', 'IGst', 'Gamt', 'TotalCost1'];
dataSource1 = new MatTableDataSource();
@ViewChild(MatSort) sort: MatSort;
minToDate = new Date();
CheckToDate() {
debugger;
this.minToDate = this.MFromDate;
}
MToDate;
M_FromDat;
M_ToDat;
changeValueTotal(id, element, property: string) {
var resTotal = (element.Quantity * element.Rate) + element.GSTValue - element.DiscountAmount;
resTotal = parseFloat(resTotal.toFixed(2));
element.TotalCost = resTotal;
}
getTotalProdVal() {
var totProdVal = this.commonService.data.getRegisterDetail.map(t => t.ProductValue).reduce((acc, value) => acc + value, 0);
totProdVal = parseFloat(totProdVal.toFixed(2));
return totProdVal;
}
getDiscountAmount() {
var totDiscntAmt = this.commonService.data.getRegisterDetail.map(t => t.DiscountAmount).reduce((acc, value) => acc + value, 0);
totDiscntAmt = parseFloat(totDiscntAmt.toFixed(2));
return totDiscntAmt;
}
getGSTAmount() {
var totGSTAmt = this.commonService.data.getRegisterDetail.map(t => t.GSTValue).reduce((acc, value) => acc + value, 0);
totGSTAmt = parseFloat(totGSTAmt.toFixed(2));
return totGSTAmt;
}
getTotalCostamount() {
var totCstAmt = this.commonService.data.getRegisterDetail.map(t => t.TotalCost).reduce((acc, value) => acc + value, 0);
totCstAmt = parseFloat(totCstAmt.toFixed(2));
return totCstAmt;
}
getTotalCostamount1() {
var totCstAmt1 = this.commonService.data.getSummaryDet.map(t => t.TotalCost1).reduce((acc, value) => acc + value, 0);
totCstAmt1 = parseFloat(totCstAmt1.toFixed(2));
return totCstAmt1;
}
getTotalProdVal1() {
var totProdVal1 = this.commonService.data.getSummaryDet.map(t => t.Tvalue).reduce((acc, value) => acc + value, 0);
totProdVal1 = parseFloat(totProdVal1.toFixed(2));
return totProdVal1;
}
getDiscountAmount1() {
var totDiscntAmt1 = this.commonService.data.getSummaryDet.map(t => t.Damt).reduce((acc, value) => acc + value, 0);
totDiscntAmt1 = parseFloat(totDiscntAmt1.toFixed(2));
return totDiscntAmt1;
}
getGSTAmount1() {
var totGSTAmt1 = this.commonService.data.getSummaryDet.map(t => t.Gamt).reduce((acc, value) => acc + value, 0);
totGSTAmt1 = parseFloat(totGSTAmt1.toFixed(2));
return totGSTAmt1;
}
@ViewChild('RegTable') RegTable: ElementRef;
@ViewChild('SummaryTable') SummaryTable: ElementRef;
@ViewChild('table') table: ElementRef;
@ViewChild('table1') table1: ElementRef;
fireEvent() {
debugger;
const ws: XLSX.WorkSheet = XLSX.utils.table_to_sheet(this.table.nativeElement);
const wb: XLSX.WorkBook = XLSX.utils.book_new();
XLSX.utils.book_append_sheet(wb, ws, 'Sheet1');
XLSX.writeFile(wb, 'Medical_Bill_Register.xlsx');
}
captureScreen() {
var data = document.getElementById('RegTable');
html2canvas(data).then(canvas => {
var imgWidth = 239;
var pageHeight = 55;
//var width = data.internal.pageSize.getWidth();
//var height = data.internal.pageSize.getHeight();
var imgHeight = canvas.height * imgWidth / canvas.width;
var heightLeft = imgHeight;
const contentDataURL = canvas.toDataURL('image/PDF')
//let pdf = new jspdf('p', 'mm', 'a4'); // A4 size page of PDF
var position = 5;
//pdf.addImage(contentDataURL, 'PDF', 0, position, imgWidth, imgHeight)
//pdf.save('Medical_Bill_Register.pdf'); // Generated PDF
});
//const tabletojson = require('tabletojson');
//var table = tabletojson($('#table-id').get(0));
//var doc = new jspdf('l', 'pt', 'letter', true);
//$.each(table, function (i, row) {
// $.each(row, function (j, cell) {
// if (j == "email" | j == 1) {
// doc.cell(1, 10, 190, 20, cell, i);
// }
// else {
// doc.cell(1, 10, 90, 20, cell, i);
// }
// });
//});
//doc.save('Safaa.pdf');
//var doc = new jspdf();
//var specialElementHandlers = {
// '#hidediv': function (element, render) { return true; }
//};
//doc.fromHTML($('#RegTable').get(0), 20, 20, {
// 'width': 500,
// 'elementHandlers': specialElementHandlers
//});
//doc.save('Test.pdf');
}
captureScreen1() {
var data = document.getElementById('SummaryTable');
html2canvas(data).then(canvas => {
var imgWidth = 239;
var pageHeight = 55;
var imgHeight = canvas.height * imgWidth / canvas.width;
var heightLeft = imgHeight;
const contentDataURL = canvas.toDataURL('image/PDF')
//let pdf = new jspdf('p', 'mm', 'a4'); // A4 size page of PDF
var position = 5;
//pdf.addImage(contentDataURL, 'PDF', 1, position, imgWidth, imgHeight)
//pdf.save('Medical_Bill_Summary.pdf'); // Generated PDF
});
}
fireEvent1() {
const ws: XLSX.WorkSheet = XLSX.utils.table_to_sheet(this.table1.nativeElement);
const wb: XLSX.WorkBook = XLSX.utils.book_new();
XLSX.utils.book_append_sheet(wb, ws, 'Sheet1');
XLSX.writeFile(wb, 'Medical_Bill_Summary.xlsx');
}
backdrop;
cancelblock;
Cancel() {
debugger;
if (this.MFromDate != null || this.MToDate != null) {
this.backdrop = 'block';
this.cancelblock = 'block';
}
else {
this.Form.onReset();
}
}
modalClose() {
this.backdrop = 'none';
this.cancelblock = 'none';
}
CancelNo() {
this.backdrop = 'none';
this.cancelblock = 'none';
}
CancelYes() {
debugger;
this.backdrop = 'none';
this.cancelblock = 'none';
//this.MFromDate = '';
//this.MToDate = '';
this.MedicalBillRegisterTable = false;
this.MedicalBillSummaryTable = false;
this.MBS_label = false;
}
onSubmit(form: NgForm) {
debugger;
if (form.valid) {
this.M_FromDat = this.MFromDate.toISOString();
this.M_ToDat = this.MToDate.toISOString();
this.commonService.getListOfData("MedicalBillRegister/getMedBillDet/" + this.M_FromDat + '/' + this.M_ToDat + '/' + parseInt(localStorage.getItem("CompanyID")))
.subscribe(data => {
debugger;
if (data.getRegisterDetail != null && data.getRegisterDetail.length != 0) {
debugger;
if (data.getRegisterDetail != null) {
for (var i = 0; i < data.getRegisterDetail.length; i++) | {
debugger;
var res = ((data.getRegisterDetail[i].Quantity * data.getRegisterDetail[i].Rate) + data.getRegisterDetail[i].GSTValue) - data.getRegisterDetail[i].DiscountAmount;
data.getRegisterDetail[i].TotalCost = res;
} | conditional_block |
|
cargo_workspace.rs | ra_arena::{Arena, Idx};
use ra_cargo_watch::run_cargo;
use ra_db::Edition;
use rustc_hash::FxHashMap;
use serde::Deserialize;
/// `CargoWorkspace` represents the logical structure of, well, a Cargo
/// workspace. It pretty closely mirrors `cargo metadata` output.
///
/// Note that internally, rust analyzer uses a different structure:
/// `CrateGraph`. `CrateGraph` is lower-level: it knows only about the crates,
/// while this knows about `Packages` & `Targets`: purely cargo-related
/// concepts.
#[derive(Debug, Clone)]
pub struct CargoWorkspace {
packages: Arena<PackageData>,
targets: Arena<TargetData>,
workspace_root: PathBuf,
}
impl ops::Index<Package> for CargoWorkspace {
type Output = PackageData;
fn index(&self, index: Package) -> &PackageData {
&self.packages[index]
}
}
impl ops::Index<Target> for CargoWorkspace {
type Output = TargetData;
fn index(&self, index: Target) -> &TargetData {
&self.targets[index]
}
}
#[derive(Deserialize, Clone, Debug, PartialEq, Eq)]
#[serde(rename_all = "camelCase", default)]
pub struct CargoFeatures {
/// Do not activate the `default` feature.
pub no_default_features: bool,
/// Activate all available features
pub all_features: bool,
/// List of features to activate.
/// This will be ignored if `cargo_all_features` is true.
pub features: Vec<String>,
/// Runs cargo check on launch to figure out the correct values of OUT_DIR
pub load_out_dirs_from_check: bool,
}
impl Default for CargoFeatures {
fn default() -> Self {
CargoFeatures {
no_default_features: false,
all_features: true,
features: Vec::new(),
load_out_dirs_from_check: false,
}
}
}
pub type Package = Idx<PackageData>;
pub type Target = Idx<TargetData>;
#[derive(Debug, Clone)]
pub struct PackageData {
pub name: String,
pub manifest: PathBuf,
pub targets: Vec<Target>,
pub is_member: bool,
pub dependencies: Vec<PackageDependency>,
pub edition: Edition,
pub features: Vec<String>,
pub out_dir: Option<PathBuf>,
pub proc_macro_dylib_path: Option<PathBuf>,
}
#[derive(Debug, Clone)]
pub struct PackageDependency {
pub pkg: Package,
pub name: String,
}
#[derive(Debug, Clone)]
pub struct TargetData {
pub package: Package,
pub name: String,
pub root: PathBuf,
pub kind: TargetKind,
pub is_proc_macro: bool,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum TargetKind {
Bin,
/// Any kind of Cargo lib crate-type (dylib, rlib, proc-macro, ...).
Lib,
Example,
Test,
Bench,
Other,
}
impl TargetKind {
fn new(kinds: &[String]) -> TargetKind {
for kind in kinds {
return match kind.as_str() {
"bin" => TargetKind::Bin,
"test" => TargetKind::Test,
"bench" => TargetKind::Bench,
"example" => TargetKind::Example,
"proc-macro" => TargetKind::Lib,
_ if kind.contains("lib") => TargetKind::Lib,
_ => continue,
};
}
TargetKind::Other
}
}
impl PackageData {
pub fn root(&self) -> &Path {
self.manifest.parent().unwrap()
}
}
impl CargoWorkspace {
pub fn from_cargo_metadata(
cargo_toml: &Path,
cargo_features: &CargoFeatures,
) -> Result<CargoWorkspace> {
let mut meta = MetadataCommand::new();
meta.manifest_path(cargo_toml);
if cargo_features.all_features {
meta.features(CargoOpt::AllFeatures);
} else if cargo_features.no_default_features {
// FIXME: `NoDefaultFeatures` is mutual exclusive with `SomeFeatures`
// https://github.com/oli-obk/cargo_metadata/issues/79
meta.features(CargoOpt::NoDefaultFeatures);
} else if !cargo_features.features.is_empty() {
meta.features(CargoOpt::SomeFeatures(cargo_features.features.clone()));
}
if let Some(parent) = cargo_toml.parent() {
meta.current_dir(parent);
}
let meta = meta.exec().with_context(|| {
format!("Failed to run `cargo metadata --manifest-path {}`", cargo_toml.display())
})?;
let mut out_dir_by_id = FxHashMap::default();
let mut proc_macro_dylib_paths = FxHashMap::default();
if cargo_features.load_out_dirs_from_check {
let resources = load_extern_resources(cargo_toml, cargo_features);
out_dir_by_id = resources.out_dirs;
proc_macro_dylib_paths = resources.proc_dylib_paths;
}
let mut pkg_by_id = FxHashMap::default();
let mut packages = Arena::default();
let mut targets = Arena::default();
let ws_members = &meta.workspace_members;
for meta_pkg in meta.packages {
let cargo_metadata::Package { id, edition, name, manifest_path, .. } = meta_pkg;
let is_member = ws_members.contains(&id);
let edition = edition
.parse::<Edition>()
.with_context(|| format!("Failed to parse edition {}", edition))?;
let pkg = packages.alloc(PackageData {
name,
manifest: manifest_path,
targets: Vec::new(),
is_member,
edition,
dependencies: Vec::new(),
features: Vec::new(),
out_dir: out_dir_by_id.get(&id).cloned(),
proc_macro_dylib_path: proc_macro_dylib_paths.get(&id).cloned(),
});
let pkg_data = &mut packages[pkg];
pkg_by_id.insert(id, pkg);
for meta_tgt in meta_pkg.targets {
let is_proc_macro = meta_tgt.kind.as_slice() == ["proc-macro"];
let tgt = targets.alloc(TargetData {
package: pkg,
name: meta_tgt.name,
root: meta_tgt.src_path.clone(),
kind: TargetKind::new(meta_tgt.kind.as_slice()),
is_proc_macro,
});
pkg_data.targets.push(tgt);
}
}
let resolve = meta.resolve.expect("metadata executed with deps");
for node in resolve.nodes {
let source = match pkg_by_id.get(&node.id) {
Some(&src) => src,
// FIXME: replace this and a similar branch below with `.unwrap`, once
// https://github.com/rust-lang/cargo/issues/7841
// is fixed and hits stable (around 1.43-is probably?).
None => {
log::error!("Node id do not match in cargo metadata, ignoring {}", node.id);
continue;
}
};
for dep_node in node.deps {
let pkg = match pkg_by_id.get(&dep_node.pkg) {
Some(&pkg) => pkg,
None => {
log::error!(
"Dep node id do not match in cargo metadata, ignoring {}",
dep_node.pkg
);
continue;
}
};
let dep = PackageDependency { name: dep_node.name, pkg };
packages[source].dependencies.push(dep);
}
packages[source].features.extend(node.features);
}
Ok(CargoWorkspace { packages, targets, workspace_root: meta.workspace_root })
}
pub fn packages<'a>(&'a self) -> impl Iterator<Item = Package> + ExactSizeIterator + 'a {
self.packages.iter().map(|(id, _pkg)| id)
}
pub fn target_by_root(&self, root: &Path) -> Option<Target> {
self.packages()
.filter_map(|pkg| self[pkg].targets.iter().find(|&&it| self[it].root == root))
.next() | }
}
#[derive(Debug, Clone, Default)]
pub struct ExternResources {
out_dirs: FxHashMap<PackageId, PathBuf>,
proc_dylib_paths: FxHashMap<PackageId, PathBuf>,
}
pub fn load_extern_resources(cargo_toml: &Path, cargo_features: &CargoFeatures) -> ExternResources {
let mut args: Vec<String> = vec![
"check".to_string(),
"--message-format=json".to_string(),
"--manifest-path".to_string(),
cargo_toml.display().to_string(),
];
if cargo_features.all_features {
args.push("--all-features".to_string());
} else if cargo_features.no_default_features {
// FIXME: `NoDefaultFeatures` is mutual exclusive with `SomeFeatures`
// https://github.com/oli-obk/cargo_metadata/issues/79
args.push("--no-default-features".to_string());
} else {
args.extend(cargo_features.features.iter().cloned());
}
let mut acc = ExternResources::default();
let res = run_cargo(&args, cargo_toml.parent(), &mut |message| {
match message {
Message::BuildScriptExecuted(BuildScript { package_id, out_dir, .. }) => {
acc.out_dirs.insert(package_id, out_dir);
}
Message:: | .copied()
}
pub fn workspace_root(&self) -> &Path {
&self.workspace_root | random_line_split |
cargo_workspace.rs | ra_arena::{Arena, Idx};
use ra_cargo_watch::run_cargo;
use ra_db::Edition;
use rustc_hash::FxHashMap;
use serde::Deserialize;
/// `CargoWorkspace` represents the logical structure of, well, a Cargo
/// workspace. It pretty closely mirrors `cargo metadata` output.
///
/// Note that internally, rust analyzer uses a different structure:
/// `CrateGraph`. `CrateGraph` is lower-level: it knows only about the crates,
/// while this knows about `Packages` & `Targets`: purely cargo-related
/// concepts.
#[derive(Debug, Clone)]
pub struct CargoWorkspace {
packages: Arena<PackageData>,
targets: Arena<TargetData>,
workspace_root: PathBuf,
}
impl ops::Index<Package> for CargoWorkspace {
type Output = PackageData;
fn index(&self, index: Package) -> &PackageData {
&self.packages[index]
}
}
impl ops::Index<Target> for CargoWorkspace {
type Output = TargetData;
fn index(&self, index: Target) -> &TargetData {
&self.targets[index]
}
}
#[derive(Deserialize, Clone, Debug, PartialEq, Eq)]
#[serde(rename_all = "camelCase", default)]
pub struct | {
/// Do not activate the `default` feature.
pub no_default_features: bool,
/// Activate all available features
pub all_features: bool,
/// List of features to activate.
/// This will be ignored if `cargo_all_features` is true.
pub features: Vec<String>,
/// Runs cargo check on launch to figure out the correct values of OUT_DIR
pub load_out_dirs_from_check: bool,
}
impl Default for CargoFeatures {
fn default() -> Self {
CargoFeatures {
no_default_features: false,
all_features: true,
features: Vec::new(),
load_out_dirs_from_check: false,
}
}
}
pub type Package = Idx<PackageData>;
pub type Target = Idx<TargetData>;
#[derive(Debug, Clone)]
pub struct PackageData {
pub name: String,
pub manifest: PathBuf,
pub targets: Vec<Target>,
pub is_member: bool,
pub dependencies: Vec<PackageDependency>,
pub edition: Edition,
pub features: Vec<String>,
pub out_dir: Option<PathBuf>,
pub proc_macro_dylib_path: Option<PathBuf>,
}
#[derive(Debug, Clone)]
pub struct PackageDependency {
pub pkg: Package,
pub name: String,
}
#[derive(Debug, Clone)]
pub struct TargetData {
pub package: Package,
pub name: String,
pub root: PathBuf,
pub kind: TargetKind,
pub is_proc_macro: bool,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum TargetKind {
Bin,
/// Any kind of Cargo lib crate-type (dylib, rlib, proc-macro, ...).
Lib,
Example,
Test,
Bench,
Other,
}
impl TargetKind {
fn new(kinds: &[String]) -> TargetKind {
for kind in kinds {
return match kind.as_str() {
"bin" => TargetKind::Bin,
"test" => TargetKind::Test,
"bench" => TargetKind::Bench,
"example" => TargetKind::Example,
"proc-macro" => TargetKind::Lib,
_ if kind.contains("lib") => TargetKind::Lib,
_ => continue,
};
}
TargetKind::Other
}
}
impl PackageData {
pub fn root(&self) -> &Path {
self.manifest.parent().unwrap()
}
}
impl CargoWorkspace {
pub fn from_cargo_metadata(
cargo_toml: &Path,
cargo_features: &CargoFeatures,
) -> Result<CargoWorkspace> {
let mut meta = MetadataCommand::new();
meta.manifest_path(cargo_toml);
if cargo_features.all_features {
meta.features(CargoOpt::AllFeatures);
} else if cargo_features.no_default_features {
// FIXME: `NoDefaultFeatures` is mutual exclusive with `SomeFeatures`
// https://github.com/oli-obk/cargo_metadata/issues/79
meta.features(CargoOpt::NoDefaultFeatures);
} else if !cargo_features.features.is_empty() {
meta.features(CargoOpt::SomeFeatures(cargo_features.features.clone()));
}
if let Some(parent) = cargo_toml.parent() {
meta.current_dir(parent);
}
let meta = meta.exec().with_context(|| {
format!("Failed to run `cargo metadata --manifest-path {}`", cargo_toml.display())
})?;
let mut out_dir_by_id = FxHashMap::default();
let mut proc_macro_dylib_paths = FxHashMap::default();
if cargo_features.load_out_dirs_from_check {
let resources = load_extern_resources(cargo_toml, cargo_features);
out_dir_by_id = resources.out_dirs;
proc_macro_dylib_paths = resources.proc_dylib_paths;
}
let mut pkg_by_id = FxHashMap::default();
let mut packages = Arena::default();
let mut targets = Arena::default();
let ws_members = &meta.workspace_members;
for meta_pkg in meta.packages {
let cargo_metadata::Package { id, edition, name, manifest_path, .. } = meta_pkg;
let is_member = ws_members.contains(&id);
let edition = edition
.parse::<Edition>()
.with_context(|| format!("Failed to parse edition {}", edition))?;
let pkg = packages.alloc(PackageData {
name,
manifest: manifest_path,
targets: Vec::new(),
is_member,
edition,
dependencies: Vec::new(),
features: Vec::new(),
out_dir: out_dir_by_id.get(&id).cloned(),
proc_macro_dylib_path: proc_macro_dylib_paths.get(&id).cloned(),
});
let pkg_data = &mut packages[pkg];
pkg_by_id.insert(id, pkg);
for meta_tgt in meta_pkg.targets {
let is_proc_macro = meta_tgt.kind.as_slice() == ["proc-macro"];
let tgt = targets.alloc(TargetData {
package: pkg,
name: meta_tgt.name,
root: meta_tgt.src_path.clone(),
kind: TargetKind::new(meta_tgt.kind.as_slice()),
is_proc_macro,
});
pkg_data.targets.push(tgt);
}
}
let resolve = meta.resolve.expect("metadata executed with deps");
for node in resolve.nodes {
let source = match pkg_by_id.get(&node.id) {
Some(&src) => src,
// FIXME: replace this and a similar branch below with `.unwrap`, once
// https://github.com/rust-lang/cargo/issues/7841
// is fixed and hits stable (around 1.43-is probably?).
None => {
log::error!("Node id do not match in cargo metadata, ignoring {}", node.id);
continue;
}
};
for dep_node in node.deps {
let pkg = match pkg_by_id.get(&dep_node.pkg) {
Some(&pkg) => pkg,
None => {
log::error!(
"Dep node id do not match in cargo metadata, ignoring {}",
dep_node.pkg
);
continue;
}
};
let dep = PackageDependency { name: dep_node.name, pkg };
packages[source].dependencies.push(dep);
}
packages[source].features.extend(node.features);
}
Ok(CargoWorkspace { packages, targets, workspace_root: meta.workspace_root })
}
pub fn packages<'a>(&'a self) -> impl Iterator<Item = Package> + ExactSizeIterator + 'a {
self.packages.iter().map(|(id, _pkg)| id)
}
pub fn target_by_root(&self, root: &Path) -> Option<Target> {
self.packages()
.filter_map(|pkg| self[pkg].targets.iter().find(|&&it| self[it].root == root))
.next()
.copied()
}
pub fn workspace_root(&self) -> &Path {
&self.workspace_root
}
}
#[derive(Debug, Clone, Default)]
pub struct ExternResources {
out_dirs: FxHashMap<PackageId, PathBuf>,
proc_dylib_paths: FxHashMap<PackageId, PathBuf>,
}
pub fn load_extern_resources(cargo_toml: &Path, cargo_features: &CargoFeatures) -> ExternResources {
let mut args: Vec<String> = vec![
"check".to_string(),
"--message-format=json".to_string(),
"--manifest-path".to_string(),
cargo_toml.display().to_string(),
];
if cargo_features.all_features {
args.push("--all-features".to_string());
} else if cargo_features.no_default_features {
// FIXME: `NoDefaultFeatures` is mutual exclusive with `SomeFeatures`
// https://github.com/oli-obk/cargo_metadata/issues/79
args.push("--no-default-features".to_string());
} else {
args.extend(cargo_features.features.iter().cloned());
}
let mut acc = ExternResources::default();
let res = run_cargo(&args, cargo_toml.parent(), &mut |message| {
match message {
Message::BuildScriptExecuted(BuildScript { package_id, out_dir, .. }) => {
acc.out_dirs.insert(package_id, out_dir);
}
| CargoFeatures | identifier_name |
SIMulator_functions.py | (x, opt):
return np.clip(np.cos(x), 0, 1)
def cos_wave_envelope(x, h, opt):
period_in_pixels = opt.w / (opt.k2)
p = period_in_pixels
f = 1 / p
# h = 2*pi*opt.k2*(h-0.5)+10
h = h*opt.w - opt.w/2 + 10
window = np.where(np.abs(x - h) <= period_in_pixels/4, 1, 0)
maxval = np.max(window * np.cos(2*pi*f*(x - h)))
return window * np.cos(2*pi*f*(x - h))
# def cos_wave_envelope(x, h, opt):
# period_in_pixels = opt.w / (2*opt.k2)
# w = period_in_pixels
# # h = (2*h/2/pi) % opt.w
# h = (h*period_in_pixels / 2 / pi)
# window = np.where(np.abs(x - h) <= w/2, 1, 0)
# return window * (1 + np.cos(2 * np.pi * (x - h) / w))
def square_wave(x, opt):
return np.heaviside(np.cos(x), 0)
# return np.where(np.cos(x) >= 0, 1, 0)
# def square_wave(x, opt):
# # Calculate the period and duty cycle
# # period = 4*pi*opt.k2 / opt.w
# # duty_cycle = 1 / (opt.Nshifts)
# peak_width = opt.peak_width
# peak_spacing = opt.peak_spacing
# # Convert these pixel values into fractions of the total width
# duty_cycle = peak_width / peak_spacing
# # Generate the square wave
# return signal.square(x, duty_cycle)
def square_wave_one_third(x, opt):
# sums to 0
return 2 * (np.heaviside(np.cos(x) - np.cos(1 * np.pi / 3), 0) - 1 / 3)
def square_wave_large_spacing(x, opt):
# sums to 1
# d : peak width
d = 2 * np.pi / opt.Nshifts
d_pixels = opt.w / (2*pi*opt.k2) * d
min_d = 1/ (d_pixels / d)
max_d = 2/d_pixels
d_orig = d
# d = np.clip(d, min_d, max_d)
d = max(d, min_d)
print(f"d_pixels: {d_pixels}, min_d: {min_d}, max_d: {max_d}, d: {d}, d_orig: {d_orig}")
return 2*(np.heaviside(np.cos(x) - np.cos(d/2), 0)-0.3)
@jit(nopython=True)
def DMDPixelTransform(input_img, dmdMapping, xoffset=0, yoffset=0):
# Initialize an array of zeros with same size as the input image
transformed_img = np.zeros_like(input_img)
# Get the dimensions of the input image
rows, cols = input_img.shape
# Iterate over the pixels of the input image
for i in range(rows):
for j in range(cols):
# Calculate the new coordinates for the pixel
ip = i + yoffset
jp = j + xoffset
# Apply the dmdMapping transformation if set
if dmdMapping > 0:
transformed_i = jp + ip - 2
transformed_j = (jp - ip + 4) // 2
else:
transformed_i = ip
transformed_j = jp
# If the new coordinates are within the bounds of the image, copy the pixel value
if 0 <= transformed_i < rows and 0 <= transformed_j < cols:
transformed_img[transformed_i, transformed_j] = input_img[i, j]
# Return the transformed image
return transformed_img
def Get_X_Y_MeshGrids(w, opt, forPSF=False):
# TODO: these hard-coded values are not ideal
# and this way of scaling the patterns is
# likely going to lead to undesired behaviour
if opt.crop_factor:
if opt.patterns > 0: # assuming DMD resolution
crop_factor_x = 1
crop_factor_y = 1
else:
dim = opt.imageSize
if type(dim) is int:
dim = (dim, dim)
crop_factor_x = dim[1] / 912 # 428
crop_factor_y = dim[0] / 1140 # 684
# data from dec 2022 acquired with DMD patterns with the below factors
# crop_factor_x = 1
# crop_factor_y = 1
# first version, december 2022
# wo = w / 2
# x = np.linspace(0, w - 1, 912)
# y = np.linspace(0, w - 1, 1140)
# [X, Y] = np.meshgrid(x, y)
if (
opt.dmdMapping == 2
or (opt.dmdMapping == 1 and opt.SIMmodality == "stripes")
) and not forPSF:
padding = 4
else:
padding = 1
x = np.linspace(
0, padding * crop_factor_x * 512 - 1, padding * int(crop_factor_x * 912)
)
y = np.linspace(
0, padding * crop_factor_y * 512 - 1, padding * int(crop_factor_y * 1140)
)
[X, Y] = np.meshgrid(x, y)
else:
x = np.linspace(0, w - 1, w)
y = np.linspace(0, w - 1, w)
X, Y = np.meshgrid(x, y)
return X, Y
def PsfOtf(w, opt):
# AIM: To generate PSF and OTF using Bessel function
# INPUT VARIABLES
# w: image size
# scale: a parameter used to adjust PSF/OTF width
# OUTPUT VRAIBLES
# yyo: system PSF
# OTF2dc: system OTF
eps = np.finfo(np.float64).eps
X, Y = Get_X_Y_MeshGrids(w, opt, forPSF=True)
scale = opt.PSFOTFscale
# Generation of the PSF with Besselj.
R = np.sqrt(np.minimum(X, np.abs(X - w)) ** 2 + np.minimum(Y, np.abs(Y - w)) ** 2)
yy = np.abs(2 * scipy.special.jv(1, scale * R + eps) / (scale * R + eps)) ** 2
yy0 = fftshift(yy)
# Generate 2D OTF.
OTF2d = fft2(yy)
OTF2dmax = np.max([np.abs(OTF2d)])
OTF2d = OTF2d / OTF2dmax
OTF2dc = np.abs(fftshift(OTF2d))
return (yy0, OTF2dc)
def conv2(x, y, mode="same"):
# Make it equivalent to Matlab's conv2 function
# https://stackoverflow.com/questions/3731093/is-there-a-python-equivalent-of-matlabs-conv2-function
return np.rot90(convolve2d(np.rot90(x, 2), np.rot90(y, 2), mode=mode), 2)/x.size/y.size
def SIMimages(opt, DIo, func=cos_wave, pixelsize_ratio=1):
# AIM: to generate raw sim images
# INPUT VARIABLES
# k2: illumination frequency
# DIo: specimen image or integer (dimension) if only patterns are wanted
# PSFo: system PSF
# OTFo: system OTF
# UsePSF: 1 (to blur SIM images by convloving with PSF)
# 0 (to blur SIM images by truncating its fourier content beyond OTF)
# NoiseLevel: percentage noise level for generating gaussian noise
# OUTPUT VARIABLES
# frames: raw sim images
# DIoTnoisy: noisy wide field image
# DIoT: noise-free wide field image
if type(DIo) == int:
assert(opt.patterns == 1) # only patterns are wanted
w = DIo
wo = w / 2
else:
assert(opt.patterns != 1)
w = DIo.shape[0]
wo = w / 2
opt.w = w
X, Y = Get_X_Y_MeshGrids(w, opt)
PSFo, OTFo = PsfOtf(w, opt)
# Illum | cos_wave | identifier_name |
|
SIMulator_functions.py | 512 - 1, padding * int(crop_factor_x * 912)
)
y = np.linspace(
0, padding * crop_factor_y * 512 - 1, padding * int(crop_factor_y * 1140)
)
[X, Y] = np.meshgrid(x, y)
else:
x = np.linspace(0, w - 1, w)
y = np.linspace(0, w - 1, w)
X, Y = np.meshgrid(x, y)
return X, Y
def PsfOtf(w, opt):
# AIM: To generate PSF and OTF using Bessel function
# INPUT VARIABLES
# w: image size
# scale: a parameter used to adjust PSF/OTF width
# OUTPUT VRAIBLES
# yyo: system PSF
# OTF2dc: system OTF
eps = np.finfo(np.float64).eps
X, Y = Get_X_Y_MeshGrids(w, opt, forPSF=True)
scale = opt.PSFOTFscale
# Generation of the PSF with Besselj.
R = np.sqrt(np.minimum(X, np.abs(X - w)) ** 2 + np.minimum(Y, np.abs(Y - w)) ** 2)
yy = np.abs(2 * scipy.special.jv(1, scale * R + eps) / (scale * R + eps)) ** 2
yy0 = fftshift(yy)
# Generate 2D OTF.
OTF2d = fft2(yy)
OTF2dmax = np.max([np.abs(OTF2d)])
OTF2d = OTF2d / OTF2dmax
OTF2dc = np.abs(fftshift(OTF2d))
return (yy0, OTF2dc)
def conv2(x, y, mode="same"):
# Make it equivalent to Matlab's conv2 function
# https://stackoverflow.com/questions/3731093/is-there-a-python-equivalent-of-matlabs-conv2-function
return np.rot90(convolve2d(np.rot90(x, 2), np.rot90(y, 2), mode=mode), 2)/x.size/y.size
def SIMimages(opt, DIo, func=cos_wave, pixelsize_ratio=1):
# AIM: to generate raw sim images
# INPUT VARIABLES
# k2: illumination frequency
# DIo: specimen image or integer (dimension) if only patterns are wanted
# PSFo: system PSF
# OTFo: system OTF
# UsePSF: 1 (to blur SIM images by convloving with PSF)
# 0 (to blur SIM images by truncating its fourier content beyond OTF)
# NoiseLevel: percentage noise level for generating gaussian noise
# OUTPUT VARIABLES
# frames: raw sim images
# DIoTnoisy: noisy wide field image
# DIoT: noise-free wide field image
if type(DIo) == int:
assert(opt.patterns == 1) # only patterns are wanted
w = DIo
wo = w / 2
else:
assert(opt.patterns != 1)
w = DIo.shape[0]
wo = w / 2
opt.w = w
X, Y = Get_X_Y_MeshGrids(w, opt)
PSFo, OTFo = PsfOtf(w, opt)
# Illuminating pattern
# orientation direction of illumination patterns
orientation = np.zeros(opt.Nangles)
for i in range(opt.Nangles):
orientation[i] = i * pi / opt.Nangles + opt.alpha + opt.angleError
if opt.shuffleOrientations:
np.random.shuffle(orientation)
# illumination frequency vectors
k2mat = np.zeros((opt.Nangles, 2))
for i in range(opt.Nangles):
theta = orientation[i]
k2mat[i, :] = np.array(
[(opt.k2 * pixelsize_ratio / w) * cos(theta), (opt.k2 / w) * sin(theta)]
)
# illumination phase shifts along directions with errors
ps = np.zeros((opt.Nangles, opt.Nshifts))
for i_a in range(opt.Nangles):
for i_s in range(opt.Nshifts):
ps[i_a, i_s] = 2 * pi * i_s / opt.Nshifts + opt.phaseError[i_a, i_s]
# illumination patterns
frames = []
auxil = []
for i_a in range(opt.Nangles):
for i_s in range(opt.Nshifts):
# illuminated signal
if not opt.noStripes:
if func == cos_wave_envelope:
sig = opt.meanInten[i_a] + opt.ampInten[i_a] * cos_wave_envelope(
(k2mat[i_a, 0]/opt.k2*opt.w * (X - opt.w/2) + k2mat[i_a, 1]/opt.k2*opt.w * (Y - opt.w/2)),
i_s/opt.Nshifts, opt)
else:
sig = opt.meanInten[i_a] + opt.ampInten[i_a] * func(
2*pi * (k2mat[i_a, 0] * (X - wo) + k2mat[i_a, 1] * (Y - wo))
+ ps[i_a, i_s]
, opt)
else:
sig = 1 # simulating widefield
# whether to transform sig for dmd
if opt.dmdMapping > 0:
# crop to upper left quadrant if padding was added
if opt.dmdMapping == 1:
sig = DMDPixelTransform(
sig,
opt.dmdMapping,
xoffset=-sig.shape[1] // 2,
yoffset=-sig.shape[0] // 2,
)
sig = sig[: sig.shape[0] // 4, : sig.shape[1] // 4]
elif opt.dmdMapping == 2:
# rotate image by 45 degrees
rotated_image = transform.rotate(sig, -45)
rows, cols = rotated_image.shape[0], rotated_image.shape[1]
# crop centre to avoid black corners
row_start = rows // 4 + rows // 8
row_end = row_start + rows // 4
col_start = cols // 4 + cols // 8
col_end = col_start + cols // 4
# Crop the center of the image
sig = rotated_image[row_start:row_end, col_start:col_end]
if int(opt.patterns) == 1: # only patterns
frame = sig
elif int(opt.patterns) == 2: # patterns + specimen
sig = sig.clip(0, 1)
frame = DIo * sig
auxil.append(sig)
else: # with diffraction, pattern = False/0
sup_sig = DIo * sig # superposed signal
# superposed (noise-free) Images
if opt.UsePSF == 1:
ST = conv2(sup_sig, PSFo, "same")
else:
ST = np.real(ifft2(fft2(sup_sig) * fftshift(OTFo)))
# Noise generation
if opt.usePoissonNoise:
# Poisson
vals = 2 ** np.ceil(
np.log2(opt.NoiseLevel)
) # NoiseLevel could be 200 for Poisson: degradation seems similar to Noiselevel 20 for Gaussian
STnoisy = np.random.poisson(ST * vals) / float(vals)
else:
# Gaussian
aNoise = opt.NoiseLevel / 100 # noise
# SNR = 1/aNoise
# SNRdb = 20*log10(1/aNoise)
nST = np.random.normal(0, aNoise * np.std(ST, ddof=1), (ST.shape))
NoiseFrac = 1 # may be set to 0 to avoid noise addition
# noise added raw SIM images
STnoisy = ST + NoiseFrac * nST
frame = STnoisy.clip(0, 1)
frames.append(frame)
opt.auxil = auxil
return frames
def GenSpeckle(dim, opt):
| N = opt.Nspeckles
I = np.zeros((dim, dim))
randx = np.random.choice(
list(range(dim)) * np.ceil(N / dim).astype("int"), size=N, replace=False
)
randy = np.random.choice(
list(range(dim)) * np.ceil(N / dim).astype("int"), size=N, replace=False
)
for i in range(N):
x = randx[i]
y = randy[i]
r = np.random.randint(3, 5)
cr, cc = draw.ellipse(x, y, r, r, (dim, dim))
I[cr, cc] += 0.1
return I | identifier_body |
|
SIMulator_functions.py | 2dmax = np.max([np.abs(OTF2d)])
OTF2d = OTF2d / OTF2dmax
OTF2dc = np.abs(fftshift(OTF2d))
return (yy0, OTF2dc)
def conv2(x, y, mode="same"):
# Make it equivalent to Matlab's conv2 function
# https://stackoverflow.com/questions/3731093/is-there-a-python-equivalent-of-matlabs-conv2-function
return np.rot90(convolve2d(np.rot90(x, 2), np.rot90(y, 2), mode=mode), 2)/x.size/y.size
def SIMimages(opt, DIo, func=cos_wave, pixelsize_ratio=1):
# AIM: to generate raw sim images
# INPUT VARIABLES
# k2: illumination frequency
# DIo: specimen image or integer (dimension) if only patterns are wanted
# PSFo: system PSF
# OTFo: system OTF
# UsePSF: 1 (to blur SIM images by convloving with PSF)
# 0 (to blur SIM images by truncating its fourier content beyond OTF)
# NoiseLevel: percentage noise level for generating gaussian noise
# OUTPUT VARIABLES
# frames: raw sim images
# DIoTnoisy: noisy wide field image
# DIoT: noise-free wide field image
if type(DIo) == int:
assert(opt.patterns == 1) # only patterns are wanted
w = DIo
wo = w / 2
else:
assert(opt.patterns != 1)
w = DIo.shape[0]
wo = w / 2
opt.w = w
X, Y = Get_X_Y_MeshGrids(w, opt)
PSFo, OTFo = PsfOtf(w, opt)
# Illuminating pattern
# orientation direction of illumination patterns
orientation = np.zeros(opt.Nangles)
for i in range(opt.Nangles):
orientation[i] = i * pi / opt.Nangles + opt.alpha + opt.angleError
if opt.shuffleOrientations:
np.random.shuffle(orientation)
# illumination frequency vectors
k2mat = np.zeros((opt.Nangles, 2))
for i in range(opt.Nangles):
theta = orientation[i]
k2mat[i, :] = np.array(
[(opt.k2 * pixelsize_ratio / w) * cos(theta), (opt.k2 / w) * sin(theta)]
)
# illumination phase shifts along directions with errors
ps = np.zeros((opt.Nangles, opt.Nshifts))
for i_a in range(opt.Nangles):
for i_s in range(opt.Nshifts):
ps[i_a, i_s] = 2 * pi * i_s / opt.Nshifts + opt.phaseError[i_a, i_s]
# illumination patterns
frames = []
auxil = []
for i_a in range(opt.Nangles):
for i_s in range(opt.Nshifts):
# illuminated signal
if not opt.noStripes:
if func == cos_wave_envelope:
sig = opt.meanInten[i_a] + opt.ampInten[i_a] * cos_wave_envelope(
(k2mat[i_a, 0]/opt.k2*opt.w * (X - opt.w/2) + k2mat[i_a, 1]/opt.k2*opt.w * (Y - opt.w/2)),
i_s/opt.Nshifts, opt)
else:
sig = opt.meanInten[i_a] + opt.ampInten[i_a] * func(
2*pi * (k2mat[i_a, 0] * (X - wo) + k2mat[i_a, 1] * (Y - wo))
+ ps[i_a, i_s]
, opt)
else:
sig = 1 # simulating widefield
# whether to transform sig for dmd
if opt.dmdMapping > 0:
# crop to upper left quadrant if padding was added
if opt.dmdMapping == 1:
sig = DMDPixelTransform(
sig,
opt.dmdMapping,
xoffset=-sig.shape[1] // 2,
yoffset=-sig.shape[0] // 2,
)
sig = sig[: sig.shape[0] // 4, : sig.shape[1] // 4]
elif opt.dmdMapping == 2:
# rotate image by 45 degrees
rotated_image = transform.rotate(sig, -45)
rows, cols = rotated_image.shape[0], rotated_image.shape[1]
# crop centre to avoid black corners
row_start = rows // 4 + rows // 8
row_end = row_start + rows // 4
col_start = cols // 4 + cols // 8
col_end = col_start + cols // 4
# Crop the center of the image
sig = rotated_image[row_start:row_end, col_start:col_end]
if int(opt.patterns) == 1: # only patterns
frame = sig
elif int(opt.patterns) == 2: # patterns + specimen
sig = sig.clip(0, 1)
frame = DIo * sig
auxil.append(sig)
else: # with diffraction, pattern = False/0
sup_sig = DIo * sig # superposed signal
# superposed (noise-free) Images
if opt.UsePSF == 1:
ST = conv2(sup_sig, PSFo, "same")
else:
ST = np.real(ifft2(fft2(sup_sig) * fftshift(OTFo)))
# Noise generation
if opt.usePoissonNoise:
# Poisson
vals = 2 ** np.ceil(
np.log2(opt.NoiseLevel)
) # NoiseLevel could be 200 for Poisson: degradation seems similar to Noiselevel 20 for Gaussian
STnoisy = np.random.poisson(ST * vals) / float(vals)
else:
# Gaussian
aNoise = opt.NoiseLevel / 100 # noise
# SNR = 1/aNoise
# SNRdb = 20*log10(1/aNoise)
nST = np.random.normal(0, aNoise * np.std(ST, ddof=1), (ST.shape))
NoiseFrac = 1 # may be set to 0 to avoid noise addition
# noise added raw SIM images
STnoisy = ST + NoiseFrac * nST
frame = STnoisy.clip(0, 1)
frames.append(frame)
opt.auxil = auxil
return frames
def GenSpeckle(dim, opt):
N = opt.Nspeckles
I = np.zeros((dim, dim))
randx = np.random.choice(
list(range(dim)) * np.ceil(N / dim).astype("int"), size=N, replace=False
)
randy = np.random.choice(
list(range(dim)) * np.ceil(N / dim).astype("int"), size=N, replace=False
)
for i in range(N):
x = randx[i]
y = randy[i]
r = np.random.randint(3, 5)
cr, cc = draw.ellipse(x, y, r, r, (dim, dim))
I[cr, cc] += 0.1
return I
def SIMimages_speckle(opt, DIo):
# AIM: to generate raw sim images
# INPUT VARIABLES
# k2: illumination frequency
# DIo: specimen image
# PSFo: system PSF
# OTFo: system OTF
# UsePSF: 1 (to blur SIM images by convloving with PSF)
# 0 (to blur SIM images by truncating its fourier content beyond OTF)
# NoiseLevel: percentage noise level for generating gaussian noise
# OUTPUT VARIABLES
# frames: raw sim images
# DIoTnoisy: noisy wide field image
# DIoT: noise-free wide field image
w = DIo.shape[0]
X, Y = Get_X_Y_MeshGrids(w, opt)
PSFo, OTFo = PsfOtf(w, opt)
# illumination patterns
frames = []
for i_a in range(opt.Nframes):
# illuminated signal
sig = GenSpeckle(
w, opt
) # opt.meanInten[i_a] + opt.ampInten[i_a] * GenSpeckle(w)
sup_sig = DIo * sig # superposed signal
# superposed (noise-free) Images
if opt.UsePSF == 1:
ST = conv2(sup_sig, PSFo, "same")
else:
ST = np.real(ifft2(fft2(sup_sig) * fftshift(OTFo)))
# Gaussian noise generation
aNoise = opt.NoiseLevel / 100 # noise | random_line_split |
||
SIMulator_functions.py |
crop_factor_x = dim[1] / 912 # 428
crop_factor_y = dim[0] / 1140 # 684
# data from dec 2022 acquired with DMD patterns with the below factors
# crop_factor_x = 1
# crop_factor_y = 1
# first version, december 2022
# wo = w / 2
# x = np.linspace(0, w - 1, 912)
# y = np.linspace(0, w - 1, 1140)
# [X, Y] = np.meshgrid(x, y)
if (
opt.dmdMapping == 2
or (opt.dmdMapping == 1 and opt.SIMmodality == "stripes")
) and not forPSF:
padding = 4
else:
padding = 1
x = np.linspace(
0, padding * crop_factor_x * 512 - 1, padding * int(crop_factor_x * 912)
)
y = np.linspace(
0, padding * crop_factor_y * 512 - 1, padding * int(crop_factor_y * 1140)
)
[X, Y] = np.meshgrid(x, y)
else:
x = np.linspace(0, w - 1, w)
y = np.linspace(0, w - 1, w)
X, Y = np.meshgrid(x, y)
return X, Y
def PsfOtf(w, opt):
# AIM: To generate PSF and OTF using Bessel function
# INPUT VARIABLES
# w: image size
# scale: a parameter used to adjust PSF/OTF width
# OUTPUT VRAIBLES
# yyo: system PSF
# OTF2dc: system OTF
eps = np.finfo(np.float64).eps
X, Y = Get_X_Y_MeshGrids(w, opt, forPSF=True)
scale = opt.PSFOTFscale
# Generation of the PSF with Besselj.
R = np.sqrt(np.minimum(X, np.abs(X - w)) ** 2 + np.minimum(Y, np.abs(Y - w)) ** 2)
yy = np.abs(2 * scipy.special.jv(1, scale * R + eps) / (scale * R + eps)) ** 2
yy0 = fftshift(yy)
# Generate 2D OTF.
OTF2d = fft2(yy)
OTF2dmax = np.max([np.abs(OTF2d)])
OTF2d = OTF2d / OTF2dmax
OTF2dc = np.abs(fftshift(OTF2d))
return (yy0, OTF2dc)
def conv2(x, y, mode="same"):
# Make it equivalent to Matlab's conv2 function
# https://stackoverflow.com/questions/3731093/is-there-a-python-equivalent-of-matlabs-conv2-function
return np.rot90(convolve2d(np.rot90(x, 2), np.rot90(y, 2), mode=mode), 2)/x.size/y.size
def SIMimages(opt, DIo, func=cos_wave, pixelsize_ratio=1):
# AIM: to generate raw sim images
# INPUT VARIABLES
# k2: illumination frequency
# DIo: specimen image or integer (dimension) if only patterns are wanted
# PSFo: system PSF
# OTFo: system OTF
# UsePSF: 1 (to blur SIM images by convloving with PSF)
# 0 (to blur SIM images by truncating its fourier content beyond OTF)
# NoiseLevel: percentage noise level for generating gaussian noise
# OUTPUT VARIABLES
# frames: raw sim images
# DIoTnoisy: noisy wide field image
# DIoT: noise-free wide field image
if type(DIo) == int:
assert(opt.patterns == 1) # only patterns are wanted
w = DIo
wo = w / 2
else:
assert(opt.patterns != 1)
w = DIo.shape[0]
wo = w / 2
opt.w = w
X, Y = Get_X_Y_MeshGrids(w, opt)
PSFo, OTFo = PsfOtf(w, opt)
# Illuminating pattern
# orientation direction of illumination patterns
orientation = np.zeros(opt.Nangles)
for i in range(opt.Nangles):
orientation[i] = i * pi / opt.Nangles + opt.alpha + opt.angleError
if opt.shuffleOrientations:
np.random.shuffle(orientation)
# illumination frequency vectors
k2mat = np.zeros((opt.Nangles, 2))
for i in range(opt.Nangles):
theta = orientation[i]
k2mat[i, :] = np.array(
[(opt.k2 * pixelsize_ratio / w) * cos(theta), (opt.k2 / w) * sin(theta)]
)
# illumination phase shifts along directions with errors
ps = np.zeros((opt.Nangles, opt.Nshifts))
for i_a in range(opt.Nangles):
for i_s in range(opt.Nshifts):
ps[i_a, i_s] = 2 * pi * i_s / opt.Nshifts + opt.phaseError[i_a, i_s]
# illumination patterns
frames = []
auxil = []
for i_a in range(opt.Nangles):
for i_s in range(opt.Nshifts):
# illuminated signal
if not opt.noStripes:
if func == cos_wave_envelope:
sig = opt.meanInten[i_a] + opt.ampInten[i_a] * cos_wave_envelope(
(k2mat[i_a, 0]/opt.k2*opt.w * (X - opt.w/2) + k2mat[i_a, 1]/opt.k2*opt.w * (Y - opt.w/2)),
i_s/opt.Nshifts, opt)
else:
sig = opt.meanInten[i_a] + opt.ampInten[i_a] * func(
2*pi * (k2mat[i_a, 0] * (X - wo) + k2mat[i_a, 1] * (Y - wo))
+ ps[i_a, i_s]
, opt)
else:
sig = 1 # simulating widefield
# whether to transform sig for dmd
if opt.dmdMapping > 0:
# crop to upper left quadrant if padding was added
if opt.dmdMapping == 1:
sig = DMDPixelTransform(
sig,
opt.dmdMapping,
xoffset=-sig.shape[1] // 2,
yoffset=-sig.shape[0] // 2,
)
sig = sig[: sig.shape[0] // 4, : sig.shape[1] // 4]
elif opt.dmdMapping == 2:
# rotate image by 45 degrees
rotated_image = transform.rotate(sig, -45)
rows, cols = rotated_image.shape[0], rotated_image.shape[1]
# crop centre to avoid black corners
row_start = rows // 4 + rows // 8
row_end = row_start + rows // 4
col_start = cols // 4 + cols // 8
col_end = col_start + cols // 4
# Crop the center of the image
sig = rotated_image[row_start:row_end, col_start:col_end]
if int(opt.patterns) == 1: # only patterns
frame = sig
elif int(opt.patterns) == 2: # patterns + specimen
sig = sig.clip(0, 1)
frame = DIo * sig
auxil.append(sig)
else: # with diffraction, pattern = False/0
sup_sig = DIo * sig # superposed signal
# superposed (noise-free) Images
if opt.UsePSF == 1:
ST = conv2(sup_sig, PSFo, "same")
else:
ST = np.real(ifft2(fft2(sup_sig) * fftshift(OTFo)))
# Noise generation
if opt.usePoissonNoise:
# Poisson
vals = 2 ** np.ceil(
np.log2(opt.NoiseLevel)
) # NoiseLevel could be 200 for Poisson: degradation seems similar to Noiselevel 20 for Gaussian
STnoisy = np.random.poisson(ST * vals) / float(vals)
else:
# Gaussian
aNoise = opt.NoiseLevel / 100 # noise
# SNR = 1/aNoise
# SNRdb = 20*log10(1/aNoise)
nST = np.random.normal(0, aNoise * np.std(ST, ddof=1), (ST.shape | dim = (dim, dim) | conditional_block |
|
factory.go | 91:
signMode = signing.SignMode_SIGN_MODE_EIP_191
}
var accNum, accSeq uint64
if clientCtx.Offline {
if flagSet.Changed(flags.FlagAccountNumber) && flagSet.Changed(flags.FlagSequence) {
accNum = clientCtx.Viper.GetUint64(flags.FlagAccountNumber)
accSeq = clientCtx.Viper.GetUint64(flags.FlagSequence)
} else {
return Factory{}, errors.New("account-number and sequence must be set in offline mode")
}
}
gasAdj := clientCtx.Viper.GetFloat64(flags.FlagGasAdjustment)
memo := clientCtx.Viper.GetString(flags.FlagNote)
timeoutHeight := clientCtx.Viper.GetUint64(flags.FlagTimeoutHeight)
gasStr := clientCtx.Viper.GetString(flags.FlagGas)
gasSetting, _ := flags.ParseGasSetting(gasStr)
f := Factory{
txConfig: clientCtx.TxConfig,
accountRetriever: clientCtx.AccountRetriever,
keybase: clientCtx.Keyring,
chainID: clientCtx.ChainID,
offline: clientCtx.Offline,
generateOnly: clientCtx.GenerateOnly,
gas: gasSetting.Gas,
simulateAndExecute: gasSetting.Simulate,
accountNumber: accNum,
sequence: accSeq,
timeoutHeight: timeoutHeight,
gasAdjustment: gasAdj,
memo: memo,
signMode: signMode,
feeGranter: clientCtx.FeeGranter,
feePayer: clientCtx.FeePayer,
}
feesStr := clientCtx.Viper.GetString(flags.FlagFees)
f = f.WithFees(feesStr)
tipsStr := clientCtx.Viper.GetString(flags.FlagTip)
// Add tips to factory. The tipper is necessarily the Msg signer, i.e.
// the from address.
f = f.WithTips(tipsStr, clientCtx.FromAddress.String())
gasPricesStr := clientCtx.Viper.GetString(flags.FlagGasPrices)
f = f.WithGasPrices(gasPricesStr)
f = f.WithPreprocessTxHook(clientCtx.PreprocessTxHook)
return f, nil
}
func (f Factory) AccountNumber() uint64 { return f.accountNumber }
func (f Factory) Sequence() uint64 { return f.sequence }
func (f Factory) Gas() uint64 { return f.gas }
func (f Factory) GasAdjustment() float64 { return f.gasAdjustment }
func (f Factory) Keybase() keyring.Keyring { return f.keybase }
func (f Factory) ChainID() string { return f.chainID }
func (f Factory) Memo() string { return f.memo }
func (f Factory) Fees() sdk.Coins { return f.fees }
func (f Factory) GasPrices() sdk.DecCoins { return f.gasPrices }
func (f Factory) AccountRetriever() client.AccountRetriever { return f.accountRetriever }
func (f Factory) TimeoutHeight() uint64 { return f.timeoutHeight }
// SimulateAndExecute returns the option to simulate and then execute the transaction
// using the gas from the simulation results
func (f Factory) SimulateAndExecute() bool { return f.simulateAndExecute }
// WithTxConfig returns a copy of the Factory with an updated TxConfig.
func (f Factory) WithTxConfig(g client.TxConfig) Factory {
f.txConfig = g
return f
}
// WithAccountRetriever returns a copy of the Factory with an updated AccountRetriever.
func (f Factory) WithAccountRetriever(ar client.AccountRetriever) Factory {
f.accountRetriever = ar
return f
}
// WithChainID returns a copy of the Factory with an updated chainID.
func (f Factory) WithChainID(chainID string) Factory {
f.chainID = chainID
return f
}
// WithGas returns a copy of the Factory with an updated gas value.
func (f Factory) WithGas(gas uint64) Factory {
f.gas = gas
return f
}
// WithFees returns a copy of the Factory with an updated fee.
func (f Factory) WithFees(fees string) Factory {
parsedFees, err := sdk.ParseCoinsNormalized(fees)
if err != nil {
panic(err)
}
f.fees = parsedFees
return f
}
// WithTips returns a copy of the Factory with an updated tip.
func (f Factory) WithTips(tip, tipper string) Factory {
parsedTips, err := sdk.ParseCoinsNormalized(tip)
if err != nil {
panic(err)
}
f.tip = &tx.Tip{
Tipper: tipper,
Amount: parsedTips,
}
return f
}
// WithGasPrices returns a copy of the Factory with updated gas prices.
func (f Factory) WithGasPrices(gasPrices string) Factory {
parsedGasPrices, err := sdk.ParseDecCoins(gasPrices)
if err != nil {
panic(err)
}
f.gasPrices = parsedGasPrices
return f
}
// WithKeybase returns a copy of the Factory with updated Keybase.
func (f Factory) WithKeybase(keybase keyring.Keyring) Factory {
f.keybase = keybase
return f
}
// WithSequence returns a copy of the Factory with an updated sequence number.
func (f Factory) WithSequence(sequence uint64) Factory {
f.sequence = sequence
return f
}
// WithMemo returns a copy of the Factory with an updated memo.
func (f Factory) WithMemo(memo string) Factory {
f.memo = memo
return f
}
// WithAccountNumber returns a copy of the Factory with an updated account number.
func (f Factory) WithAccountNumber(accnum uint64) Factory {
f.accountNumber = accnum
return f
}
// WithGasAdjustment returns a copy of the Factory with an updated gas adjustment.
func (f Factory) WithGasAdjustment(gasAdj float64) Factory {
f.gasAdjustment = gasAdj
return f
}
// WithSimulateAndExecute returns a copy of the Factory with an updated gas
// simulation value.
func (f Factory) WithSimulateAndExecute(sim bool) Factory {
f.simulateAndExecute = sim
return f
}
// SignMode returns the sign mode configured in the Factory
func (f Factory) SignMode() signing.SignMode {
return f.signMode
}
// WithSignMode returns a copy of the Factory with an updated sign mode value.
func (f Factory) WithSignMode(mode signing.SignMode) Factory {
f.signMode = mode
return f
}
// WithTimeoutHeight returns a copy of the Factory with an updated timeout height.
func (f Factory) WithTimeoutHeight(height uint64) Factory {
f.timeoutHeight = height
return f
}
// WithFeeGranter returns a copy of the Factory with an updated fee granter.
func (f Factory) WithFeeGranter(fg sdk.AccAddress) Factory {
f.feeGranter = fg
return f
}
// WithFeePayer returns a copy of the Factory with an updated fee granter.
func (f Factory) | (fp sdk.AccAddress) Factory {
f.feePayer = fp
return f
}
// WithPreprocessTxHook returns a copy of the Factory with an updated preprocess tx function,
// allows for preprocessing of transaction data using the TxBuilder.
func (f Factory) WithPreprocessTxHook(preprocessFn client.PreprocessTxFn) Factory {
f.preprocessTxHook = preprocessFn
return f
}
// PreprocessTx calls the preprocessing hook with the factory parameters and
// returns the result.
func (f Factory) PreprocessTx(keyname string, builder client.TxBuilder) error {
if f.preprocessTxHook == nil {
// Allow pass-through
return nil
}
key, err := f.Keybase().Key(keyname)
if err != nil {
return fmt.Errorf("error retrieving key from keyring: %w", err)
}
return f.preprocessTxHook(f.chainID, key.GetType(), builder)
}
// WithExtensionOptions returns a Factory with given extension options added to the existing options,
// Example to add dynamic fee extension options:
//
// extOpt := ethermint.ExtensionOptionDynamicFeeTx{
// MaxPriorityPrice: math.NewInt(1000000),
// }
//
// extBytes, _ := extOpt.Marshal()
//
// extOpts := []*types.Any{
// {
// TypeUrl: "/ethermint.types.v1.ExtensionOptionDynamicFeeTx",
// Value: extBytes,
// },
// }
//
// txf.WithExtensionOptions(extOpts...)
func (f Factory) WithExtensionOptions(extOpts ...*codectypes.Any) Factory {
f.extOptions = extOpts
return f
}
// BuildUnsignedTx builds a transaction to be signed given a set of messages.
// Once created, the fee, memo, and messages are set.
func (f Factory) BuildUnsignedTx(msgs ...sdk.Msg) (client.TxBuilder, error) {
if f.offline && f.generateOnly {
if f.chainID != "" {
return nil, fmt.Errorf("chain ID cannot be used when offline and generate-only flags are set")
}
} else if f.chainID == "" {
return nil, fmt.Errorf("chain ID required but not specified")
}
fees := f.fees
if | WithFeePayer | identifier_name |
factory.go | 1:
signMode = signing.SignMode_SIGN_MODE_EIP_191
}
var accNum, accSeq uint64
if clientCtx.Offline {
if flagSet.Changed(flags.FlagAccountNumber) && flagSet.Changed(flags.FlagSequence) {
accNum = clientCtx.Viper.GetUint64(flags.FlagAccountNumber)
accSeq = clientCtx.Viper.GetUint64(flags.FlagSequence)
} else {
return Factory{}, errors.New("account-number and sequence must be set in offline mode")
}
}
gasAdj := clientCtx.Viper.GetFloat64(flags.FlagGasAdjustment)
memo := clientCtx.Viper.GetString(flags.FlagNote)
timeoutHeight := clientCtx.Viper.GetUint64(flags.FlagTimeoutHeight)
gasStr := clientCtx.Viper.GetString(flags.FlagGas)
gasSetting, _ := flags.ParseGasSetting(gasStr)
f := Factory{
txConfig: clientCtx.TxConfig,
accountRetriever: clientCtx.AccountRetriever,
keybase: clientCtx.Keyring,
chainID: clientCtx.ChainID,
offline: clientCtx.Offline,
generateOnly: clientCtx.GenerateOnly,
gas: gasSetting.Gas,
simulateAndExecute: gasSetting.Simulate,
accountNumber: accNum,
sequence: accSeq,
timeoutHeight: timeoutHeight,
gasAdjustment: gasAdj,
memo: memo,
signMode: signMode,
feeGranter: clientCtx.FeeGranter,
feePayer: clientCtx.FeePayer,
}
feesStr := clientCtx.Viper.GetString(flags.FlagFees)
f = f.WithFees(feesStr)
tipsStr := clientCtx.Viper.GetString(flags.FlagTip)
// Add tips to factory. The tipper is necessarily the Msg signer, i.e.
// the from address.
f = f.WithTips(tipsStr, clientCtx.FromAddress.String())
gasPricesStr := clientCtx.Viper.GetString(flags.FlagGasPrices)
f = f.WithGasPrices(gasPricesStr)
f = f.WithPreprocessTxHook(clientCtx.PreprocessTxHook)
return f, nil
}
func (f Factory) AccountNumber() uint64 { return f.accountNumber }
func (f Factory) Sequence() uint64 { return f.sequence }
func (f Factory) Gas() uint64 { return f.gas }
func (f Factory) GasAdjustment() float64 { return f.gasAdjustment }
func (f Factory) Keybase() keyring.Keyring { return f.keybase }
func (f Factory) ChainID() string { return f.chainID }
func (f Factory) Memo() string { return f.memo }
func (f Factory) Fees() sdk.Coins { return f.fees }
func (f Factory) GasPrices() sdk.DecCoins { return f.gasPrices }
func (f Factory) AccountRetriever() client.AccountRetriever { return f.accountRetriever }
func (f Factory) TimeoutHeight() uint64 { return f.timeoutHeight }
// SimulateAndExecute returns the option to simulate and then execute the transaction
// using the gas from the simulation results
func (f Factory) SimulateAndExecute() bool { return f.simulateAndExecute }
// WithTxConfig returns a copy of the Factory with an updated TxConfig.
func (f Factory) WithTxConfig(g client.TxConfig) Factory {
f.txConfig = g
return f
}
// WithAccountRetriever returns a copy of the Factory with an updated AccountRetriever.
func (f Factory) WithAccountRetriever(ar client.AccountRetriever) Factory {
f.accountRetriever = ar
return f
}
// WithChainID returns a copy of the Factory with an updated chainID.
func (f Factory) WithChainID(chainID string) Factory {
f.chainID = chainID
return f
}
// WithGas returns a copy of the Factory with an updated gas value.
func (f Factory) WithGas(gas uint64) Factory {
f.gas = gas
return f
}
// WithFees returns a copy of the Factory with an updated fee.
func (f Factory) WithFees(fees string) Factory {
parsedFees, err := sdk.ParseCoinsNormalized(fees)
if err != nil {
panic(err)
}
f.fees = parsedFees
return f
}
// WithTips returns a copy of the Factory with an updated tip.
func (f Factory) WithTips(tip, tipper string) Factory {
parsedTips, err := sdk.ParseCoinsNormalized(tip)
if err != nil {
panic(err)
}
f.tip = &tx.Tip{
Tipper: tipper,
Amount: parsedTips,
}
return f
}
// WithGasPrices returns a copy of the Factory with updated gas prices.
func (f Factory) WithGasPrices(gasPrices string) Factory {
parsedGasPrices, err := sdk.ParseDecCoins(gasPrices)
if err != nil {
panic(err)
}
f.gasPrices = parsedGasPrices
return f
}
// WithKeybase returns a copy of the Factory with updated Keybase.
func (f Factory) WithKeybase(keybase keyring.Keyring) Factory {
f.keybase = keybase
return f
}
// WithSequence returns a copy of the Factory with an updated sequence number.
func (f Factory) WithSequence(sequence uint64) Factory {
f.sequence = sequence
return f
}
// WithMemo returns a copy of the Factory with an updated memo.
func (f Factory) WithMemo(memo string) Factory {
f.memo = memo
return f
}
// WithAccountNumber returns a copy of the Factory with an updated account number.
func (f Factory) WithAccountNumber(accnum uint64) Factory {
f.accountNumber = accnum
return f
}
// WithGasAdjustment returns a copy of the Factory with an updated gas adjustment.
func (f Factory) WithGasAdjustment(gasAdj float64) Factory {
f.gasAdjustment = gasAdj
return f
}
// WithSimulateAndExecute returns a copy of the Factory with an updated gas
// simulation value.
func (f Factory) WithSimulateAndExecute(sim bool) Factory {
f.simulateAndExecute = sim
return f
}
// SignMode returns the sign mode configured in the Factory
func (f Factory) SignMode() signing.SignMode {
return f.signMode
}
// WithSignMode returns a copy of the Factory with an updated sign mode value.
func (f Factory) WithSignMode(mode signing.SignMode) Factory {
f.signMode = mode
return f
}
// WithTimeoutHeight returns a copy of the Factory with an updated timeout height.
func (f Factory) WithTimeoutHeight(height uint64) Factory {
f.timeoutHeight = height
return f
}
// WithFeeGranter returns a copy of the Factory with an updated fee granter.
func (f Factory) WithFeeGranter(fg sdk.AccAddress) Factory |
// WithFeePayer returns a copy of the Factory with an updated fee granter.
func (f Factory) WithFeePayer(fp sdk.AccAddress) Factory {
f.feePayer = fp
return f
}
// WithPreprocessTxHook returns a copy of the Factory with an updated preprocess tx function,
// allows for preprocessing of transaction data using the TxBuilder.
func (f Factory) WithPreprocessTxHook(preprocessFn client.PreprocessTxFn) Factory {
f.preprocessTxHook = preprocessFn
return f
}
// PreprocessTx calls the preprocessing hook with the factory parameters and
// returns the result.
func (f Factory) PreprocessTx(keyname string, builder client.TxBuilder) error {
if f.preprocessTxHook == nil {
// Allow pass-through
return nil
}
key, err := f.Keybase().Key(keyname)
if err != nil {
return fmt.Errorf("error retrieving key from keyring: %w", err)
}
return f.preprocessTxHook(f.chainID, key.GetType(), builder)
}
// WithExtensionOptions returns a Factory with given extension options added to the existing options,
// Example to add dynamic fee extension options:
//
// extOpt := ethermint.ExtensionOptionDynamicFeeTx{
// MaxPriorityPrice: math.NewInt(1000000),
// }
//
// extBytes, _ := extOpt.Marshal()
//
// extOpts := []*types.Any{
// {
// TypeUrl: "/ethermint.types.v1.ExtensionOptionDynamicFeeTx",
// Value: extBytes,
// },
// }
//
// txf.WithExtensionOptions(extOpts...)
func (f Factory) WithExtensionOptions(extOpts ...*codectypes.Any) Factory {
f.extOptions = extOpts
return f
}
// BuildUnsignedTx builds a transaction to be signed given a set of messages.
// Once created, the fee, memo, and messages are set.
func (f Factory) BuildUnsignedTx(msgs ...sdk.Msg) (client.TxBuilder, error) {
if f.offline && f.generateOnly {
if f.chainID != "" {
return nil, fmt.Errorf("chain ID cannot be used when offline and generate-only flags are set")
}
} else if f.chainID == "" {
return nil, fmt.Errorf("chain ID required but not specified")
}
fees := f.fees
if | {
f.feeGranter = fg
return f
} | identifier_body |
factory.go | feePayer sdk.AccAddress
gasPrices sdk.DecCoins
extOptions []*codectypes.Any
signMode signing.SignMode
simulateAndExecute bool
preprocessTxHook client.PreprocessTxFn
}
// NewFactoryCLI creates a new Factory.
func NewFactoryCLI(clientCtx client.Context, flagSet *pflag.FlagSet) (Factory, error) {
if clientCtx.Viper == nil {
clientCtx.Viper = viper.New()
}
if err := clientCtx.Viper.BindPFlags(flagSet); err != nil {
return Factory{}, fmt.Errorf("failed to bind flags to viper: %w", err)
}
signModeStr := clientCtx.SignModeStr
signMode := signing.SignMode_SIGN_MODE_UNSPECIFIED
switch signModeStr {
case flags.SignModeDirect:
signMode = signing.SignMode_SIGN_MODE_DIRECT
case flags.SignModeLegacyAminoJSON:
signMode = signing.SignMode_SIGN_MODE_LEGACY_AMINO_JSON
case flags.SignModeDirectAux:
signMode = signing.SignMode_SIGN_MODE_DIRECT_AUX
case flags.SignModeTextual:
signMode = signing.SignMode_SIGN_MODE_TEXTUAL
case flags.SignModeEIP191:
signMode = signing.SignMode_SIGN_MODE_EIP_191
}
var accNum, accSeq uint64
if clientCtx.Offline {
if flagSet.Changed(flags.FlagAccountNumber) && flagSet.Changed(flags.FlagSequence) {
accNum = clientCtx.Viper.GetUint64(flags.FlagAccountNumber)
accSeq = clientCtx.Viper.GetUint64(flags.FlagSequence)
} else {
return Factory{}, errors.New("account-number and sequence must be set in offline mode")
}
}
gasAdj := clientCtx.Viper.GetFloat64(flags.FlagGasAdjustment)
memo := clientCtx.Viper.GetString(flags.FlagNote)
timeoutHeight := clientCtx.Viper.GetUint64(flags.FlagTimeoutHeight)
gasStr := clientCtx.Viper.GetString(flags.FlagGas)
gasSetting, _ := flags.ParseGasSetting(gasStr)
f := Factory{
txConfig: clientCtx.TxConfig,
accountRetriever: clientCtx.AccountRetriever,
keybase: clientCtx.Keyring,
chainID: clientCtx.ChainID,
offline: clientCtx.Offline,
generateOnly: clientCtx.GenerateOnly,
gas: gasSetting.Gas,
simulateAndExecute: gasSetting.Simulate,
accountNumber: accNum,
sequence: accSeq,
timeoutHeight: timeoutHeight,
gasAdjustment: gasAdj,
memo: memo,
signMode: signMode,
feeGranter: clientCtx.FeeGranter,
feePayer: clientCtx.FeePayer,
}
feesStr := clientCtx.Viper.GetString(flags.FlagFees)
f = f.WithFees(feesStr)
tipsStr := clientCtx.Viper.GetString(flags.FlagTip)
// Add tips to factory. The tipper is necessarily the Msg signer, i.e.
// the from address.
f = f.WithTips(tipsStr, clientCtx.FromAddress.String())
gasPricesStr := clientCtx.Viper.GetString(flags.FlagGasPrices)
f = f.WithGasPrices(gasPricesStr)
f = f.WithPreprocessTxHook(clientCtx.PreprocessTxHook)
return f, nil
}
func (f Factory) AccountNumber() uint64 { return f.accountNumber }
func (f Factory) Sequence() uint64 { return f.sequence }
func (f Factory) Gas() uint64 { return f.gas }
func (f Factory) GasAdjustment() float64 { return f.gasAdjustment }
func (f Factory) Keybase() keyring.Keyring { return f.keybase }
func (f Factory) ChainID() string { return f.chainID }
func (f Factory) Memo() string { return f.memo }
func (f Factory) Fees() sdk.Coins { return f.fees }
func (f Factory) GasPrices() sdk.DecCoins { return f.gasPrices }
func (f Factory) AccountRetriever() client.AccountRetriever { return f.accountRetriever }
func (f Factory) TimeoutHeight() uint64 { return f.timeoutHeight }
// SimulateAndExecute returns the option to simulate and then execute the transaction
// using the gas from the simulation results
func (f Factory) SimulateAndExecute() bool { return f.simulateAndExecute }
// WithTxConfig returns a copy of the Factory with an updated TxConfig.
func (f Factory) WithTxConfig(g client.TxConfig) Factory {
f.txConfig = g
return f
}
// WithAccountRetriever returns a copy of the Factory with an updated AccountRetriever.
func (f Factory) WithAccountRetriever(ar client.AccountRetriever) Factory {
f.accountRetriever = ar
return f
}
// WithChainID returns a copy of the Factory with an updated chainID.
func (f Factory) WithChainID(chainID string) Factory {
f.chainID = chainID
return f
}
// WithGas returns a copy of the Factory with an updated gas value.
func (f Factory) WithGas(gas uint64) Factory {
f.gas = gas
return f
}
// WithFees returns a copy of the Factory with an updated fee.
func (f Factory) WithFees(fees string) Factory {
parsedFees, err := sdk.ParseCoinsNormalized(fees)
if err != nil {
panic(err)
}
f.fees = parsedFees
return f
}
// WithTips returns a copy of the Factory with an updated tip.
func (f Factory) WithTips(tip, tipper string) Factory {
parsedTips, err := sdk.ParseCoinsNormalized(tip)
if err != nil {
panic(err)
}
f.tip = &tx.Tip{
Tipper: tipper,
Amount: parsedTips,
}
return f
}
// WithGasPrices returns a copy of the Factory with updated gas prices.
func (f Factory) WithGasPrices(gasPrices string) Factory {
parsedGasPrices, err := sdk.ParseDecCoins(gasPrices)
if err != nil {
panic(err)
}
f.gasPrices = parsedGasPrices
return f
}
// WithKeybase returns a copy of the Factory with updated Keybase.
func (f Factory) WithKeybase(keybase keyring.Keyring) Factory {
f.keybase = keybase
return f
}
// WithSequence returns a copy of the Factory with an updated sequence number.
func (f Factory) WithSequence(sequence uint64) Factory {
f.sequence = sequence
return f
}
// WithMemo returns a copy of the Factory with an updated memo.
func (f Factory) WithMemo(memo string) Factory {
f.memo = memo
return f
}
// WithAccountNumber returns a copy of the Factory with an updated account number.
func (f Factory) WithAccountNumber(accnum uint64) Factory {
f.accountNumber = accnum
return f
}
// WithGasAdjustment returns a copy of the Factory with an updated gas adjustment.
func (f Factory) WithGasAdjustment(gasAdj float64) Factory {
f.gasAdjustment = gasAdj
return f
}
// WithSimulateAndExecute returns a copy of the Factory with an updated gas
// simulation value.
func (f Factory) WithSimulateAndExecute(sim bool) Factory {
f.simulateAndExecute = sim
return f
}
// SignMode returns the sign mode configured in the Factory
func (f Factory) SignMode() signing.SignMode {
return f.signMode
}
// WithSignMode returns a copy of the Factory with an updated sign mode value.
func (f Factory) WithSignMode(mode signing.SignMode) Factory {
f.signMode = mode
return f
}
// WithTimeoutHeight returns a copy of the Factory with an updated timeout height.
func (f Factory) WithTimeoutHeight(height uint64) Factory {
f.timeoutHeight = height
return f
}
// WithFeeGranter returns a copy of the Factory with an updated fee granter.
func (f Factory) WithFeeGranter(fg sdk.AccAddress) Factory {
f.feeGranter = fg
return f
}
// WithFeePayer returns a copy of the Factory with an updated fee granter.
func (f Factory) WithFeePayer(fp sdk.AccAddress) Factory {
f.feePayer = fp
return f
}
// WithPreprocessTxHook returns a copy of the Factory with an updated preprocess tx function,
// allows for preprocessing of transaction data using the TxBuilder.
func (f Factory) WithPreprocessTxHook(preprocessFn client.PreprocessTxFn) Factory {
f.preprocessTxHook = preprocessFn
return f
}
// PreprocessTx calls the preprocessing hook with the factory parameters and
// returns the result.
func (f Factory) PreprocessTx(keyname string, builder client.TxBuilder) error {
if f.preprocessTxHook == nil {
// Allow pass-through
return nil
}
key, err := f.Keybase().Key(keyname)
if err != nil {
return fmt.Errorf("error retrieving key from keyring: %w", err)
}
return f.preprocessTxHook(f.chainID, key.GetType(), builder)
}
// WithExtensionOptions returns a Factory with given extension options added to the existing options,
// Example to add dynamic | feeGranter sdk.AccAddress | random_line_split |
|
factory.go | AndExecute(sim bool) Factory {
f.simulateAndExecute = sim
return f
}
// SignMode returns the sign mode configured in the Factory
func (f Factory) SignMode() signing.SignMode {
return f.signMode
}
// WithSignMode returns a copy of the Factory with an updated sign mode value.
func (f Factory) WithSignMode(mode signing.SignMode) Factory {
f.signMode = mode
return f
}
// WithTimeoutHeight returns a copy of the Factory with an updated timeout height.
func (f Factory) WithTimeoutHeight(height uint64) Factory {
f.timeoutHeight = height
return f
}
// WithFeeGranter returns a copy of the Factory with an updated fee granter.
func (f Factory) WithFeeGranter(fg sdk.AccAddress) Factory {
f.feeGranter = fg
return f
}
// WithFeePayer returns a copy of the Factory with an updated fee granter.
func (f Factory) WithFeePayer(fp sdk.AccAddress) Factory {
f.feePayer = fp
return f
}
// WithPreprocessTxHook returns a copy of the Factory with an updated preprocess tx function,
// allows for preprocessing of transaction data using the TxBuilder.
func (f Factory) WithPreprocessTxHook(preprocessFn client.PreprocessTxFn) Factory {
f.preprocessTxHook = preprocessFn
return f
}
// PreprocessTx calls the preprocessing hook with the factory parameters and
// returns the result.
func (f Factory) PreprocessTx(keyname string, builder client.TxBuilder) error {
if f.preprocessTxHook == nil {
// Allow pass-through
return nil
}
key, err := f.Keybase().Key(keyname)
if err != nil {
return fmt.Errorf("error retrieving key from keyring: %w", err)
}
return f.preprocessTxHook(f.chainID, key.GetType(), builder)
}
// WithExtensionOptions returns a Factory with given extension options added to the existing options,
// Example to add dynamic fee extension options:
//
// extOpt := ethermint.ExtensionOptionDynamicFeeTx{
// MaxPriorityPrice: math.NewInt(1000000),
// }
//
// extBytes, _ := extOpt.Marshal()
//
// extOpts := []*types.Any{
// {
// TypeUrl: "/ethermint.types.v1.ExtensionOptionDynamicFeeTx",
// Value: extBytes,
// },
// }
//
// txf.WithExtensionOptions(extOpts...)
func (f Factory) WithExtensionOptions(extOpts ...*codectypes.Any) Factory {
f.extOptions = extOpts
return f
}
// BuildUnsignedTx builds a transaction to be signed given a set of messages.
// Once created, the fee, memo, and messages are set.
func (f Factory) BuildUnsignedTx(msgs ...sdk.Msg) (client.TxBuilder, error) {
if f.offline && f.generateOnly {
if f.chainID != "" {
return nil, fmt.Errorf("chain ID cannot be used when offline and generate-only flags are set")
}
} else if f.chainID == "" {
return nil, fmt.Errorf("chain ID required but not specified")
}
fees := f.fees
if !f.gasPrices.IsZero() {
if !fees.IsZero() {
return nil, errors.New("cannot provide both fees and gas prices")
}
glDec := math.LegacyNewDec(int64(f.gas))
// Derive the fees based on the provided gas prices, where
// fee = ceil(gasPrice * gasLimit).
fees = make(sdk.Coins, len(f.gasPrices))
for i, gp := range f.gasPrices {
fee := gp.Amount.Mul(glDec)
fees[i] = sdk.NewCoin(gp.Denom, fee.Ceil().RoundInt())
}
}
// Prevent simple inclusion of a valid mnemonic in the memo field
if f.memo != "" && bip39.IsMnemonicValid(strings.ToLower(f.memo)) {
return nil, errors.New("cannot provide a valid mnemonic seed in the memo field")
}
tx := f.txConfig.NewTxBuilder()
if err := tx.SetMsgs(msgs...); err != nil {
return nil, err
}
tx.SetMemo(f.memo)
tx.SetFeeAmount(fees)
tx.SetGasLimit(f.gas)
tx.SetFeeGranter(f.feeGranter)
tx.SetFeePayer(f.feePayer)
tx.SetTimeoutHeight(f.TimeoutHeight())
if etx, ok := tx.(client.ExtendedTxBuilder); ok {
etx.SetExtensionOptions(f.extOptions...)
}
return tx, nil
}
// PrintUnsignedTx will generate an unsigned transaction and print it to the writer
// specified by ctx.Output. If simulation was requested, the gas will be
// simulated and also printed to the same writer before the transaction is
// printed.
func (f Factory) PrintUnsignedTx(clientCtx client.Context, msgs ...sdk.Msg) error {
if f.SimulateAndExecute() {
if clientCtx.Offline {
return errors.New("cannot estimate gas in offline mode")
}
// Prepare TxFactory with acc & seq numbers as CalculateGas requires
// account and sequence numbers to be set
preparedTxf, err := f.Prepare(clientCtx)
if err != nil {
return err
}
_, adjusted, err := CalculateGas(clientCtx, preparedTxf, msgs...)
if err != nil {
return err
}
f = f.WithGas(adjusted)
_, _ = fmt.Fprintf(os.Stderr, "%s\n", GasEstimateResponse{GasEstimate: f.Gas()})
}
unsignedTx, err := f.BuildUnsignedTx(msgs...)
if err != nil {
return err
}
json, err := clientCtx.TxConfig.TxJSONEncoder()(unsignedTx.GetTx())
if err != nil {
return err
}
return clientCtx.PrintString(fmt.Sprintf("%s\n", json))
}
// BuildSimTx creates an unsigned tx with an empty single signature and returns
// the encoded transaction or an error if the unsigned transaction cannot be
// built.
func (f Factory) BuildSimTx(msgs ...sdk.Msg) ([]byte, error) {
txb, err := f.BuildUnsignedTx(msgs...)
if err != nil {
return nil, err
}
pk, err := f.getSimPK()
if err != nil {
return nil, err
}
// Create an empty signature literal as the ante handler will populate with a
// sentinel pubkey.
sig := signing.SignatureV2{
PubKey: pk,
Data: &signing.SingleSignatureData{
SignMode: f.signMode,
},
Sequence: f.Sequence(),
}
if err := txb.SetSignatures(sig); err != nil {
return nil, err
}
return f.txConfig.TxEncoder()(txb.GetTx())
}
// getSimPK gets the public key to use for building a simulation tx.
// Note, we should only check for keys in the keybase if we are in simulate and execute mode,
// e.g. when using --gas=auto.
// When using --dry-run, we are is simulation mode only and should not check the keybase.
// Ref: https://github.com/cosmos/cosmos-sdk/issues/11283
func (f Factory) getSimPK() (cryptotypes.PubKey, error) {
var (
ok bool
pk cryptotypes.PubKey = &secp256k1.PubKey{} // use default public key type
)
// Use the first element from the list of keys in order to generate a valid
// pubkey that supports multiple algorithms.
if f.simulateAndExecute && f.keybase != nil {
records, _ := f.keybase.List()
if len(records) == 0 {
return nil, errors.New("cannot build signature for simulation, key records slice is empty")
}
// take the first record just for simulation purposes
pk, ok = records[0].PubKey.GetCachedValue().(cryptotypes.PubKey)
if !ok {
return nil, errors.New("cannot build signature for simulation, failed to convert proto Any to public key")
}
}
return pk, nil
}
// Prepare ensures the account defined by ctx.GetFromAddress() exists and
// if the account number and/or the account sequence number are zero (not set),
// they will be queried for and set on the provided Factory.
// A new Factory with the updated fields will be returned.
// Note: When in offline mode, the Prepare does nothing and returns the original factory.
func (f Factory) Prepare(clientCtx client.Context) (Factory, error) {
if clientCtx.Offline {
return f, nil
}
fc := f
from := clientCtx.GetFromAddress()
if err := fc.accountRetriever.EnsureExists(clientCtx, from); err != nil {
return fc, err
}
initNum, initSeq := fc.accountNumber, fc.sequence
if initNum == 0 || initSeq == 0 {
num, seq, err := fc.accountRetriever.GetAccountNumberSequence(clientCtx, from)
if err != nil {
return fc, err
}
if initNum == 0 {
fc = fc.WithAccountNumber(num)
}
if initSeq == 0 | {
fc = fc.WithSequence(seq)
} | conditional_block |
|
mgclarge.go | Iter {
f := treapFilter(mask, match)
return treapIter{f, root.treap.findMaximal(f)}
}
// mutate allows one to mutate the span without removing it from the treap via a
// callback. The span's base and size are allowed to change as long as the span
// remains in the same order relative to its predecessor and successor.
//
// Note however that any operation that causes a treap rebalancing inside of fn
// is strictly forbidden, as that may cause treap node metadata to go
// out-of-sync.
func (root *mTreap) mutate(i treapIter, fn func(span *mspan)) {
s := i.span()
// Save some state about the span for later inspection.
hpages := s.hugePages()
scavenged := s.scavenged
// Call the mutator.
fn(s)
// Update unscavHugePages appropriately.
if !scavenged {
mheap_.free.unscavHugePages -= hpages
}
if !s.scavenged {
mheap_.free.unscavHugePages += s.hugePages()
}
// Update the key in case the base changed.
i.t.key = s.base()
// Updating invariants up the tree needs to happen if
// anything changed at all, so just go ahead and do it
// unconditionally.
//
// If it turns out nothing changed, it'll exit quickly.
t := i.t
for t != nil && t.updateInvariants() {
t = t.parent
}
}
// insert adds span to the large span treap.
func (root *mTreap) insert(span *mspan) {
if !span.scavenged {
root.unscavHugePages += span.hugePages()
}
base := span.base()
var last *treapNode
pt := &root.treap
for t := *pt; t != nil; t = *pt {
last = t
if t.key < base {
pt = &t.right
} else if t.key > base {
pt = &t.left
} else {
throw("inserting span already in treap")
}
}
// Add t as new leaf in tree of span size and unique addrs.
// The balanced tree is a treap using priority as the random heap priority.
// That is, it is a binary tree ordered according to the key,
// but then among the space of possible binary trees respecting those
// keys, it is kept balanced on average by maintaining a heap ordering
// on the priority: s.priority <= both s.right.priority and s.right.priority.
// https://en.wikipedia.org/wiki/Treap
// https://faculty.washington.edu/aragon/pubs/rst89.pdf
t := (*treapNode)(mheap_.treapalloc.alloc())
t.key = span.base()
t.priority = fastrand()
t.span = span
t.maxPages = span.npages
t.types = span.treapFilter()
t.parent = last
*pt = t // t now at a leaf.
// Update the tree to maintain the various invariants.
i := t
for i.parent != nil && i.parent.updateInvariants() {
i = i.parent
}
// Rotate up into tree according to priority.
for t.parent != nil && t.parent.priority > t.priority {
if t != nil && t.span.base() != t.key {
println("runtime: insert t=", t, "t.key=", t.key)
println("runtime: t.span=", t.span, "t.span.base()=", t.span.base())
throw("span and treap node base addresses do not match")
}
if t.parent.left == t {
root.rotateRight(t.parent)
} else {
if t.parent.right != t {
throw("treap insert finds a broken treap")
}
root.rotateLeft(t.parent)
}
}
}
func (root *mTreap) removeNode(t *treapNode) {
if !t.span.scavenged {
root.unscavHugePages -= t.span.hugePages()
}
if t.span.base() != t.key {
throw("span and treap node base addresses do not match")
}
// Rotate t down to be leaf of tree for removal, respecting priorities.
for t.right != nil || t.left != nil {
if t.right == nil || t.left != nil && t.left.priority < t.right.priority {
root.rotateRight(t)
} else {
root.rotateLeft(t)
}
}
// Remove t, now a leaf.
if t.parent != nil {
p := t.parent
if p.left == t {
p.left = nil
} else {
p.right = nil
}
// Walk up the tree updating invariants until no updates occur.
for p != nil && p.updateInvariants() {
p = p.parent
}
} else {
root.treap = nil
}
// Return the found treapNode's span after freeing the treapNode.
mheap_.treapalloc.free(unsafe.Pointer(t))
}
// find searches for, finds, and returns the treap iterator over all spans
// representing the position of the span with the smallest base address which is
// at least npages in size. If no span has at least npages it returns an invalid
// iterator.
//
// This algorithm is as follows:
// * If there's a left child and its subtree can satisfy this allocation,
// continue down that subtree.
// * If there's no such left child, check if the root of this subtree can
// satisfy the allocation. If so, we're done.
// * If the root cannot satisfy the allocation either, continue down the
// right subtree if able.
// * Else, break and report that we cannot satisfy the allocation.
//
// The preference for left, then current, then right, results in us getting
// the left-most node which will contain the span with the lowest base
// address.
//
// Note that if a request cannot be satisfied the fourth case will be
// reached immediately at the root, since neither the left subtree nor
// the right subtree will have a sufficient maxPages, whilst the root
// node is also unable to satisfy it.
func (root *mTreap) find(npages uintptr) treapIter {
t := root.treap
for t != nil {
if t.span == nil {
throw("treap node with nil span found")
}
// Iterate over the treap trying to go as far left
// as possible while simultaneously ensuring that the
// subtrees we choose always have a span which can
// satisfy the allocation.
if t.left != nil && t.left.maxPages >= npages {
t = t.left
} else if t.span.npages >= npages {
// Before going right, if this span can satisfy the
// request, stop here.
break
} else if t.right != nil && t.right.maxPages >= npages {
t = t.right
} else {
t = nil
}
}
return treapIter{treapFilterAll, t}
}
// removeSpan searches for, finds, deletes span along with
// the associated treap node. If the span is not in the treap
// then t will eventually be set to nil and the t.span
// will throw.
func (root *mTreap) removeSpan(span *mspan) {
base := span.base()
t := root.treap
for t.span != span {
if t.key < base {
t = t.right
} else if t.key > base {
t = t.left
}
}
root.removeNode(t)
}
// erase removes the element referred to by the current position of the
// iterator. This operation consumes the given iterator, so it should no
// longer be used. It is up to the caller to get the next or previous
// iterator before calling erase, if need be.
func (root *mTreap) erase(i treapIter) {
root.removeNode(i.t)
}
// rotateLeft rotates the tree rooted at node x.
// turning (x a (y b c)) into (y (x a b) c).
func (root *mTreap) rotateLeft(x *treapNode) {
// p -> (x a (y b c))
p := x.parent
a, y := x.left, x.right
b, c := y.left, y.right
y.left = x
x.parent = y
y.right = c
if c != nil {
c.parent = y
}
x.left = a
if a != nil {
a.parent = x
}
x.right = b
if b != nil {
b.parent = x
}
y.parent = p
if p == nil {
root.treap = y
} else if p.left == x {
p.left = y
} else {
if p.right != x {
throw("large span treap rotateLeft")
}
p.right = y
}
x.updateInvariants()
y.updateInvariants()
}
// rotateRight rotates the tree rooted at node y.
// turning (y (x a b) c) into (x a (y b c)).
func (root *mTreap) | rotateRight | identifier_name |
|
mgclarge.go |
// pred returns the predecessor of t in the treap subject to the criteria
// specified by the filter f. Returns nil if no such predecessor exists.
func (t *treapNode) pred(f treapIterFilter) *treapNode {
if t.left != nil && f.matches(t.left.types) {
// The node has a left subtree which contains at least one matching
// node, find the maximal matching node in that subtree.
return t.left.findMaximal(f)
}
// Lacking a left subtree, look to the parents.
p := t // previous node
t = t.parent
for t != nil {
// Walk up the tree until we find a node that has a left subtree
// that we haven't already visited.
if t.right == p {
if f.matches(t.span.treapFilter()) {
// If this node matches, then it's guaranteed to be the
// predecessor since everything to its left is strictly
// greater.
return t
} else if t.left != nil && f.matches(t.left.types) {
// Failing the root of this subtree, if its left subtree has
// something, that's where we'll find our predecessor.
return t.left.findMaximal(f)
}
}
p = t
t = t.parent
}
// If the parent is nil, then we've hit the root without finding
// a suitable left subtree containing the node (and the predecessor
// wasn't on the path). Thus, there's no predecessor, so just return
// nil.
return nil
}
// succ returns the successor of t in the treap subject to the criteria
// specified by the filter f. Returns nil if no such successor exists.
func (t *treapNode) succ(f treapIterFilter) *treapNode {
// See pred. This method is just the logical inversion of it.
if t.right != nil && f.matches(t.right.types) {
return t.right.findMinimal(f)
}
p := t
t = t.parent
for t != nil {
if t.left == p {
if f.matches(t.span.treapFilter()) {
return t
} else if t.right != nil && f.matches(t.right.types) {
return t.right.findMinimal(f)
}
}
p = t
t = t.parent
}
return nil
}
// isSpanInTreap is handy for debugging. One should hold the heap lock, usually
// mheap_.lock().
func (t *treapNode) isSpanInTreap(s *mspan) bool {
if t == nil {
return false
}
return t.span == s || t.left.isSpanInTreap(s) || t.right.isSpanInTreap(s)
}
// walkTreap is handy for debugging and testing.
// Starting at some treapnode t, for example the root, do a depth first preorder walk of
// the tree executing fn at each treap node. One should hold the heap lock, usually
// mheap_.lock().
func (t *treapNode) walkTreap(fn func(tn *treapNode)) {
if t == nil {
return
}
fn(t)
t.left.walkTreap(fn)
t.right.walkTreap(fn)
}
// checkTreapNode when used in conjunction with walkTreap can usually detect a
// poorly formed treap.
func checkTreapNode(t *treapNode) {
if t == nil {
return
}
if t.span.next != nil || t.span.prev != nil || t.span.list != nil {
throw("span may be on an mSpanList while simultaneously in the treap")
}
if t.span.base() != t.key {
println("runtime: checkTreapNode treapNode t=", t, " t.key=", t.key,
"t.span.base()=", t.span.base())
throw("why does span.base() and treap.key do not match?")
}
if t.left != nil && t.key < t.left.key {
throw("found out-of-order spans in treap (left child has greater base address)")
}
if t.right != nil && t.key > t.right.key {
throw("found out-of-order spans in treap (right child has lesser base address)")
}
}
// validateInvariants is handy for debugging and testing.
// It ensures that the various invariants on each treap node are
// appropriately maintained throughout the treap by walking the
// treap in a post-order manner.
func (t *treapNode) validateInvariants() (uintptr, treapIterFilter) {
if t == nil {
return 0, 0
}
leftMax, leftTypes := t.left.validateInvariants()
rightMax, rightTypes := t.right.validateInvariants()
max := t.span.npages
if leftMax > max {
max = leftMax
}
if rightMax > max {
max = rightMax
}
if max != t.maxPages {
println("runtime: t.maxPages=", t.maxPages, "want=", max)
throw("maxPages invariant violated in treap")
}
typ := t.span.treapFilter() | leftTypes | rightTypes
if typ != t.types {
println("runtime: t.types=", t.types, "want=", typ)
throw("types invariant violated in treap")
}
return max, typ
}
// treapIterType represents the type of iteration to perform
// over the treap. Each different flag is represented by a bit
// in the type, and types may be combined together by a bitwise
// or operation.
//
// Note that only 5 bits are available for treapIterType, do not
// use the 3 higher-order bits. This constraint is to allow for
// expansion into a treapIterFilter, which is a uint32.
type treapIterType uint8
const (
treapIterScav treapIterType = 1 << iota // scavenged spans
treapIterHuge // spans containing at least one huge page
treapIterBits = iota
)
// treapIterFilter is a bitwise filter of different spans by binary
// properties. Each bit of a treapIterFilter represents a unique
// combination of bits set in a treapIterType, in other words, it
// represents the power set of a treapIterType.
//
// The purpose of this representation is to allow the existence of
// a specific span type to bubble up in the treap (see the types
// field on treapNode).
//
// More specifically, any treapIterType may be transformed into a
// treapIterFilter for a specific combination of flags via the
// following operation: 1 << (0x1f&treapIterType).
type treapIterFilter uint32
// treapFilterAll represents the filter which allows all spans.
const treapFilterAll = ^treapIterFilter(0)
// treapFilter creates a new treapIterFilter from two treapIterTypes.
// mask represents a bitmask for which flags we should check against
// and match for the expected result after applying the mask.
func treapFilter(mask, match treapIterType) treapIterFilter {
allow := treapIterFilter(0)
for i := treapIterType(0); i < 1<<treapIterBits; i++ {
if mask&i == match {
allow |= 1 << i
}
}
return allow
}
// matches returns true if m and f intersect.
func (f treapIterFilter) matches(m treapIterFilter) bool {
return f&m != 0
}
// treapFilter returns the treapIterFilter exactly matching this span,
// i.e. popcount(result) == 1.
func (s *mspan) treapFilter() treapIterFilter {
have := treapIterType(0)
if s.scavenged {
have |= treapIterScav
}
if s.hugePages() > 0 {
have |= treapIterHuge
}
return treapIterFilter(uint32(1) << (0x1f & have))
}
// treapIter is a bidirectional iterator type which may be used to iterate over a
// an mTreap in-order forwards (increasing order) or backwards (decreasing order).
// Its purpose is to hide details about the treap from users when trying to iterate
// over it.
//
// To create iterators over the treap, call start or end on an mTreap.
type treapIter struct {
f treapIterFilter
t *treapNode
}
// span returns the span at the current position in the treap.
// If the treap is not valid, span will panic.
func (i *treapIter) span() *mspan {
return i.t.span
}
// valid returns whether the iterator | {
if t == nil || !f.matches(t.types) {
return nil
}
for t != nil {
if t.right != nil && f.matches(t.right.types) {
t = t.right
} else if f.matches(t.span.treapFilter()) {
break
} else if t.left != nil && f.matches(t.left.types) {
t = t.left
} else {
println("runtime: f=", f)
throw("failed to find minimal node matching filter")
}
}
return t
} | identifier_body |
|
mgclarge.go |
// over the treap. Each different flag is represented by a bit
// in the type, and types may be combined together by a bitwise
// or operation.
//
// Note that only 5 bits are available for treapIterType, do not
// use the 3 higher-order bits. This constraint is to allow for
// expansion into a treapIterFilter, which is a uint32.
type treapIterType uint8
const (
treapIterScav treapIterType = 1 << iota // scavenged spans
treapIterHuge // spans containing at least one huge page
treapIterBits = iota
)
// treapIterFilter is a bitwise filter of different spans by binary
// properties. Each bit of a treapIterFilter represents a unique
// combination of bits set in a treapIterType, in other words, it
// represents the power set of a treapIterType.
//
// The purpose of this representation is to allow the existence of
// a specific span type to bubble up in the treap (see the types
// field on treapNode).
//
// More specifically, any treapIterType may be transformed into a
// treapIterFilter for a specific combination of flags via the
// following operation: 1 << (0x1f&treapIterType).
type treapIterFilter uint32
// treapFilterAll represents the filter which allows all spans.
const treapFilterAll = ^treapIterFilter(0)
// treapFilter creates a new treapIterFilter from two treapIterTypes.
// mask represents a bitmask for which flags we should check against
// and match for the expected result after applying the mask.
func treapFilter(mask, match treapIterType) treapIterFilter {
allow := treapIterFilter(0)
for i := treapIterType(0); i < 1<<treapIterBits; i++ {
if mask&i == match {
allow |= 1 << i
}
}
return allow
}
// matches returns true if m and f intersect.
func (f treapIterFilter) matches(m treapIterFilter) bool {
return f&m != 0
}
// treapFilter returns the treapIterFilter exactly matching this span,
// i.e. popcount(result) == 1.
func (s *mspan) treapFilter() treapIterFilter {
have := treapIterType(0)
if s.scavenged {
have |= treapIterScav
}
if s.hugePages() > 0 {
have |= treapIterHuge
}
return treapIterFilter(uint32(1) << (0x1f & have))
}
// treapIter is a bidirectional iterator type which may be used to iterate over a
// an mTreap in-order forwards (increasing order) or backwards (decreasing order).
// Its purpose is to hide details about the treap from users when trying to iterate
// over it.
//
// To create iterators over the treap, call start or end on an mTreap.
type treapIter struct {
f treapIterFilter
t *treapNode
}
// span returns the span at the current position in the treap.
// If the treap is not valid, span will panic.
func (i *treapIter) span() *mspan {
return i.t.span
}
// valid returns whether the iterator represents a valid position
// in the mTreap.
func (i *treapIter) valid() bool {
return i.t != nil
}
// next moves the iterator forward by one. Once the iterator
// ceases to be valid, calling next will panic.
func (i treapIter) next() treapIter {
i.t = i.t.succ(i.f)
return i
}
// prev moves the iterator backwards by one. Once the iterator
// ceases to be valid, calling prev will panic.
func (i treapIter) prev() treapIter {
i.t = i.t.pred(i.f)
return i
}
// start returns an iterator which points to the start of the treap (the
// left-most node in the treap) subject to mask and match constraints.
func (root *mTreap) start(mask, match treapIterType) treapIter {
f := treapFilter(mask, match)
return treapIter{f, root.treap.findMinimal(f)}
}
// end returns an iterator which points to the end of the treap (the
// right-most node in the treap) subject to mask and match constraints.
func (root *mTreap) end(mask, match treapIterType) treapIter {
f := treapFilter(mask, match)
return treapIter{f, root.treap.findMaximal(f)}
}
// mutate allows one to mutate the span without removing it from the treap via a
// callback. The span's base and size are allowed to change as long as the span
// remains in the same order relative to its predecessor and successor.
//
// Note however that any operation that causes a treap rebalancing inside of fn
// is strictly forbidden, as that may cause treap node metadata to go
// out-of-sync.
func (root *mTreap) mutate(i treapIter, fn func(span *mspan)) {
s := i.span()
// Save some state about the span for later inspection.
hpages := s.hugePages()
scavenged := s.scavenged
// Call the mutator.
fn(s)
// Update unscavHugePages appropriately.
if !scavenged {
mheap_.free.unscavHugePages -= hpages
}
if !s.scavenged {
mheap_.free.unscavHugePages += s.hugePages()
}
// Update the key in case the base changed.
i.t.key = s.base()
// Updating invariants up the tree needs to happen if
// anything changed at all, so just go ahead and do it
// unconditionally.
//
// If it turns out nothing changed, it'll exit quickly.
t := i.t
for t != nil && t.updateInvariants() {
t = t.parent
}
}
// insert adds span to the large span treap.
func (root *mTreap) insert(span *mspan) {
if !span.scavenged {
root.unscavHugePages += span.hugePages()
}
base := span.base()
var last *treapNode
pt := &root.treap
for t := *pt; t != nil; t = *pt {
last = t
if t.key < base {
pt = &t.right
} else if t.key > base {
pt = &t.left
} else {
throw("inserting span already in treap")
}
}
// Add t as new leaf in tree of span size and unique addrs.
// The balanced tree is a treap using priority as the random heap priority.
// That is, it is a binary tree ordered according to the key,
// but then among the space of possible binary trees respecting those
// keys, it is kept balanced on average by maintaining a heap ordering
// on the priority: s.priority <= both s.right.priority and s.right.priority.
// https://en.wikipedia.org/wiki/Treap
// https://faculty.washington.edu/aragon/pubs/rst89.pdf
t := (*treapNode)(mheap_.treapalloc.alloc())
t.key = span.base()
t.priority = fastrand()
t.span = span
t.maxPages = span.npages
t.types = span.treapFilter()
t.parent = last
*pt = t // t now at a leaf.
// Update the tree to maintain the various invariants.
i := t
for i.parent != nil && i.parent.updateInvariants() {
i = i.parent
}
// Rotate up into tree according to priority.
for t.parent != nil && t.parent.priority > t.priority {
if t != nil && t.span.base() != t.key {
println("runtime: insert t=", t, "t.key=", t.key)
println("runtime: t.span=", t.span, "t.span.base()=", t.span.base())
throw("span and treap node base addresses do not match")
}
if t.parent.left == t {
root.rotateRight(t.parent)
} else {
if t.parent.right != t {
throw("treap insert finds a broken treap")
}
root.rotateLeft(t.parent)
}
}
}
func (root *mTreap) removeNode(t *treapNode) {
if !t.span.scavenged {
root.unscavHugePages -= t.span.hugePages()
}
if t.span.base() != t.key {
throw("span and treap node base addresses do not match")
}
// Rotate t down to be leaf of tree for removal, respecting priorities.
for t.right != nil || t.left != nil {
if t.right == nil || t.left != nil && t.left.priority < t.right.priority {
root.rotateRight(t)
} else {
root.rotateLeft(t)
}
}
// Remove t, now a leaf.
if t.parent != nil {
p := t.parent
if p.left == t | {
p.left = nil
} | conditional_block |
|
mgclarge.go | ap rebalancing inside of fn
// is strictly forbidden, as that may cause treap node metadata to go
// out-of-sync.
func (root *mTreap) mutate(i treapIter, fn func(span *mspan)) {
s := i.span()
// Save some state about the span for later inspection.
hpages := s.hugePages()
scavenged := s.scavenged
// Call the mutator.
fn(s)
// Update unscavHugePages appropriately.
if !scavenged {
mheap_.free.unscavHugePages -= hpages
}
if !s.scavenged {
mheap_.free.unscavHugePages += s.hugePages()
}
// Update the key in case the base changed.
i.t.key = s.base()
// Updating invariants up the tree needs to happen if
// anything changed at all, so just go ahead and do it
// unconditionally.
//
// If it turns out nothing changed, it'll exit quickly.
t := i.t
for t != nil && t.updateInvariants() {
t = t.parent
}
}
// insert adds span to the large span treap.
func (root *mTreap) insert(span *mspan) {
if !span.scavenged {
root.unscavHugePages += span.hugePages()
}
base := span.base()
var last *treapNode
pt := &root.treap
for t := *pt; t != nil; t = *pt {
last = t
if t.key < base {
pt = &t.right
} else if t.key > base {
pt = &t.left
} else {
throw("inserting span already in treap")
}
}
// Add t as new leaf in tree of span size and unique addrs.
// The balanced tree is a treap using priority as the random heap priority.
// That is, it is a binary tree ordered according to the key,
// but then among the space of possible binary trees respecting those
// keys, it is kept balanced on average by maintaining a heap ordering
// on the priority: s.priority <= both s.right.priority and s.right.priority.
// https://en.wikipedia.org/wiki/Treap
// https://faculty.washington.edu/aragon/pubs/rst89.pdf
t := (*treapNode)(mheap_.treapalloc.alloc())
t.key = span.base()
t.priority = fastrand()
t.span = span
t.maxPages = span.npages
t.types = span.treapFilter()
t.parent = last
*pt = t // t now at a leaf.
// Update the tree to maintain the various invariants.
i := t
for i.parent != nil && i.parent.updateInvariants() {
i = i.parent
}
// Rotate up into tree according to priority.
for t.parent != nil && t.parent.priority > t.priority {
if t != nil && t.span.base() != t.key {
println("runtime: insert t=", t, "t.key=", t.key)
println("runtime: t.span=", t.span, "t.span.base()=", t.span.base())
throw("span and treap node base addresses do not match")
}
if t.parent.left == t {
root.rotateRight(t.parent)
} else {
if t.parent.right != t {
throw("treap insert finds a broken treap")
}
root.rotateLeft(t.parent)
}
}
}
func (root *mTreap) removeNode(t *treapNode) {
if !t.span.scavenged {
root.unscavHugePages -= t.span.hugePages()
}
if t.span.base() != t.key {
throw("span and treap node base addresses do not match")
}
// Rotate t down to be leaf of tree for removal, respecting priorities.
for t.right != nil || t.left != nil {
if t.right == nil || t.left != nil && t.left.priority < t.right.priority {
root.rotateRight(t)
} else {
root.rotateLeft(t)
}
}
// Remove t, now a leaf.
if t.parent != nil {
p := t.parent
if p.left == t {
p.left = nil
} else {
p.right = nil
}
// Walk up the tree updating invariants until no updates occur.
for p != nil && p.updateInvariants() {
p = p.parent
}
} else {
root.treap = nil
}
// Return the found treapNode's span after freeing the treapNode.
mheap_.treapalloc.free(unsafe.Pointer(t))
}
// find searches for, finds, and returns the treap iterator over all spans
// representing the position of the span with the smallest base address which is
// at least npages in size. If no span has at least npages it returns an invalid
// iterator.
//
// This algorithm is as follows:
// * If there's a left child and its subtree can satisfy this allocation,
// continue down that subtree.
// * If there's no such left child, check if the root of this subtree can
// satisfy the allocation. If so, we're done.
// * If the root cannot satisfy the allocation either, continue down the
// right subtree if able.
// * Else, break and report that we cannot satisfy the allocation.
//
// The preference for left, then current, then right, results in us getting
// the left-most node which will contain the span with the lowest base
// address.
//
// Note that if a request cannot be satisfied the fourth case will be
// reached immediately at the root, since neither the left subtree nor
// the right subtree will have a sufficient maxPages, whilst the root
// node is also unable to satisfy it.
func (root *mTreap) find(npages uintptr) treapIter {
t := root.treap
for t != nil {
if t.span == nil {
throw("treap node with nil span found")
}
// Iterate over the treap trying to go as far left
// as possible while simultaneously ensuring that the
// subtrees we choose always have a span which can
// satisfy the allocation.
if t.left != nil && t.left.maxPages >= npages {
t = t.left
} else if t.span.npages >= npages {
// Before going right, if this span can satisfy the
// request, stop here.
break
} else if t.right != nil && t.right.maxPages >= npages {
t = t.right
} else {
t = nil
}
}
return treapIter{treapFilterAll, t}
}
// removeSpan searches for, finds, deletes span along with
// the associated treap node. If the span is not in the treap
// then t will eventually be set to nil and the t.span
// will throw.
func (root *mTreap) removeSpan(span *mspan) {
base := span.base()
t := root.treap
for t.span != span {
if t.key < base {
t = t.right
} else if t.key > base {
t = t.left
}
}
root.removeNode(t)
}
// erase removes the element referred to by the current position of the
// iterator. This operation consumes the given iterator, so it should no
// longer be used. It is up to the caller to get the next or previous
// iterator before calling erase, if need be.
func (root *mTreap) erase(i treapIter) {
root.removeNode(i.t)
}
// rotateLeft rotates the tree rooted at node x.
// turning (x a (y b c)) into (y (x a b) c).
func (root *mTreap) rotateLeft(x *treapNode) {
// p -> (x a (y b c))
p := x.parent
a, y := x.left, x.right
b, c := y.left, y.right
y.left = x
x.parent = y
y.right = c
if c != nil {
c.parent = y
}
x.left = a
if a != nil {
a.parent = x
}
x.right = b
if b != nil {
b.parent = x
}
y.parent = p
if p == nil {
root.treap = y
} else if p.left == x {
p.left = y
} else {
if p.right != x {
throw("large span treap rotateLeft")
}
p.right = y
}
x.updateInvariants()
y.updateInvariants()
}
// rotateRight rotates the tree rooted at node y.
// turning (y (x a b) c) into (x a (y b c)).
func (root *mTreap) rotateRight(y *treapNode) {
// p -> (y (x a b) c)
p := y.parent
x, c := y.left, y.right
a, b := x.left, x.right
x.left = a
if a != nil {
a.parent = x
}
x.right = y | y.parent = x
y.left = b
if b != nil {
b.parent = y
} | random_line_split |
|
variables.go | elletier/go-toml"
)
const (
bootTimeoutSecondsDefault = 60
bootRetrySecondsDefault = 1
defaultConfDirValue = "./res"
envKeyConfigUrl = "EDGEX_CONFIGURATION_PROVIDER"
envKeyUseRegistry = "EDGEX_USE_REGISTRY"
envKeyStartupDuration = "EDGEX_STARTUP_DURATION"
envKeyStartupInterval = "EDGEX_STARTUP_INTERVAL"
envConfDir = "EDGEX_CONF_DIR"
envProfile = "EDGEX_PROFILE"
envFile = "EDGEX_CONFIG_FILE"
tomlPathSeparator = "."
tomlNameSeparator = "-"
envNameSeparator = "_"
)
// Variables is receiver that holds Variables variables and encapsulates toml.Tree-based configuration field
// overrides. Assumes "_" embedded in Variables variable key separates sub-structs; e.g. foo_bar_baz might refer to
//
// type foo struct {
// bar struct {
// baz string
// }
// }
type Variables struct {
variables map[string]string
lc logger.LoggingClient
}
// NewVariables constructor reads/stores os.Environ() for use by Variables receiver methods.
func NewVariables(lc logger.LoggingClient) *Variables {
osEnv := os.Environ()
e := &Variables{
variables: make(map[string]string, len(osEnv)),
lc: lc,
}
for _, env := range osEnv {
// Can not use Split() on '=' since the value may have an '=' in it, so changed to use Index()
index := strings.Index(env, "=")
if index == -1 {
continue
}
key := env[:index]
value := env[index+1:]
e.variables[key] = value
}
return e
}
// UseRegistry returns whether the envKeyUseRegistry key is set to true and whether the override was used
func (e *Variables) UseRegistry() (bool, bool) {
value := os.Getenv(envKeyUseRegistry)
if len(value) == 0 {
return false, false
}
logEnvironmentOverride(e.lc, "-r/--registry", envKeyUseRegistry, value)
return value == "true", true
}
// OverrideConfiguration method replaces values in the configuration for matching Variables variable keys.
// serviceConfig must be pointer to the service configuration.
func (e *Variables) OverrideConfiguration(serviceConfig interface{}) (int, error) {
var overrideCount = 0
contents, err := toml.Marshal(reflect.ValueOf(serviceConfig).Elem().Interface())
if err != nil {
return 0, err
}
configTree, err := toml.LoadBytes(contents)
if err != nil {
return 0, err
}
// The toml.Tree API keys() only return to top level keys, rather that paths.
// It is also missing a GetPaths so have to spin our own
paths := e.buildPaths(configTree.ToMap())
// Now that we have all the paths in the config tree, we need to create map of corresponding override names that
// could match override environment variable names.
overrideNames := e.buildOverrideNames(paths)
for envVar, envValue := range e.variables {
path, found := overrideNames[envVar]
if !found {
continue
}
oldValue := configTree.Get(path)
newValue, err := e.convertToType(oldValue, envValue)
if err != nil {
return 0, fmt.Errorf("environment value override failed for %s=%s: %s", envVar, envValue, err.Error())
}
configTree.Set(path, newValue)
overrideCount++
logEnvironmentOverride(e.lc, path, envVar, envValue)
}
// Put the configuration back into the services configuration struct with the overridden values
err = configTree.Unmarshal(serviceConfig)
if err != nil {
return 0, fmt.Errorf("could not marshal toml configTree to configuration: %s", err.Error())
}
return overrideCount, nil
}
// buildPaths create the path strings for all settings in the Config tree's key map
func (e *Variables) buildPaths(keyMap map[string]interface{}) []string {
var paths []string
for key, item := range keyMap {
if reflect.TypeOf(item).Kind() != reflect.Map {
paths = append(paths, key)
continue
}
subMap := item.(map[string]interface{})
subPaths := e.buildPaths(subMap)
for _, path := range subPaths {
paths = append(paths, fmt.Sprintf("%s.%s", key, path))
}
}
return paths
}
func (e *Variables) buildOverrideNames(paths []string) map[string]string {
names := map[string]string{}
for _, path := range paths {
names[e.getOverrideNameFor(path)] = path
}
return names
}
func (_ *Variables) getOverrideNameFor(path string) string {
// "." & "-" are the only special character allowed in TOML path not allowed in environment variable Name
override := strings.ReplaceAll(path, tomlPathSeparator, envNameSeparator)
override = strings.ReplaceAll(override, tomlNameSeparator, envNameSeparator)
override = strings.ToUpper(override)
return override
}
// OverrideConfigProviderInfo overrides the Configuration Provider ServiceConfig values
// from an Variables variable value (if it exists).
func (e *Variables) OverrideConfigProviderInfo(configProviderInfo types.ServiceConfig) (types.ServiceConfig, error) |
// convertToType attempts to convert the string value to the specified type of the old value
func (_ *Variables) convertToType(oldValue interface{}, value string) (newValue interface{}, err error) {
switch oldValue.(type) {
case []string:
newValue = parseCommaSeparatedSlice(value)
case []interface{}:
newValue = parseCommaSeparatedSlice(value)
case string:
newValue = value
case bool:
newValue, err = strconv.ParseBool(value)
case int:
newValue, err = strconv.ParseInt(value, 10, strconv.IntSize)
newValue = int(newValue.(int64))
case int8:
newValue, err = strconv.ParseInt(value, 10, 8)
newValue = int8(newValue.(int64))
case int16:
newValue, err = strconv.ParseInt(value, 10, 16)
newValue = int16(newValue.(int64))
case int32:
newValue, err = strconv.ParseInt(value, 10, 32)
newValue = int32(newValue.(int64))
case int64:
newValue, err = strconv.ParseInt(value, 10, 64)
case uint:
newValue, err = strconv.ParseUint(value, 10, strconv.IntSize)
newValue = uint(newValue.(uint64))
case uint8:
newValue, err = strconv.ParseUint(value, 10, 8)
newValue = uint8(newValue.(uint64))
case uint16:
newValue, err = strconv.ParseUint(value, 10, 16)
newValue = uint16(newValue.(uint64))
case uint32:
newValue, err = strconv.ParseUint(value, 10, 32)
newValue = uint32(newValue.(uint64))
case uint64:
newValue, err = strconv.ParseUint(value, 10, 64)
case float32:
newValue, err = strconv.ParseFloat(value, 32)
newValue = float32(newValue.(float64))
case float64:
newValue, err = strconv.ParseFloat(value, 64)
default:
err = fmt.Errorf(
"configuration type of '%s' is not supported for environment variable override",
reflect.TypeOf(oldValue).String())
}
return newValue, err
}
// StartupInfo provides the startup timer values which are applied to the StartupTimer created at boot.
type StartupInfo struct {
Duration int
Interval int
}
// GetStartupInfo gets the Service StartupInfo values from an Variables variable value (if it exists)
// or uses the default values.
func GetStartupInfo(serviceKey string) StartupInfo {
// lc hasn't be created at the time this info is needed so have to create local client.
lc := logger.NewClient(serviceKey, models.InfoLog)
startup := StartupInfo{
Duration: bootTimeoutSecondsDefault,
Interval: bootRetrySecondsDefault,
}
// Get the startup timer configuration from environment, if provided.
value := os.Getenv(envKeyStartupDuration)
if len(value) > 0 {
logEnvironmentOverride(lc, "Startup Duration", envKeyStartupDuration, value)
if n, err := strconv.ParseInt(value, 10, 0); err == nil && n > 0 {
startup.Duration = int(n)
}
}
// Get the startup timer interval, if provided.
value = os.Getenv(envKeyStartupInterval)
if len(value) > 0 {
logEnvironmentOverride(lc, "Startup Interval", envKeyStartupInterval, value)
if | {
url := os.Getenv(envKeyConfigUrl)
if len(url) > 0 {
logEnvironmentOverride(e.lc, "Configuration Provider Information", envKeyConfigUrl, url)
if err := configProviderInfo.PopulateFromUrl(url); err != nil {
return types.ServiceConfig{}, err
}
}
return configProviderInfo, nil
} | identifier_body |
variables.go | elletier/go-toml"
)
const (
bootTimeoutSecondsDefault = 60
bootRetrySecondsDefault = 1
defaultConfDirValue = "./res"
envKeyConfigUrl = "EDGEX_CONFIGURATION_PROVIDER"
envKeyUseRegistry = "EDGEX_USE_REGISTRY"
envKeyStartupDuration = "EDGEX_STARTUP_DURATION"
envKeyStartupInterval = "EDGEX_STARTUP_INTERVAL"
envConfDir = "EDGEX_CONF_DIR"
envProfile = "EDGEX_PROFILE"
envFile = "EDGEX_CONFIG_FILE"
tomlPathSeparator = "."
tomlNameSeparator = "-"
envNameSeparator = "_"
)
// Variables is receiver that holds Variables variables and encapsulates toml.Tree-based configuration field
// overrides. Assumes "_" embedded in Variables variable key separates sub-structs; e.g. foo_bar_baz might refer to
//
// type foo struct {
// bar struct {
// baz string
// }
// }
type Variables struct {
variables map[string]string
lc logger.LoggingClient
}
// NewVariables constructor reads/stores os.Environ() for use by Variables receiver methods.
func NewVariables(lc logger.LoggingClient) *Variables {
osEnv := os.Environ()
e := &Variables{
variables: make(map[string]string, len(osEnv)),
lc: lc,
}
for _, env := range osEnv {
// Can not use Split() on '=' since the value may have an '=' in it, so changed to use Index()
index := strings.Index(env, "=")
if index == -1 {
continue
}
key := env[:index]
value := env[index+1:]
e.variables[key] = value
}
return e
}
// UseRegistry returns whether the envKeyUseRegistry key is set to true and whether the override was used
func (e *Variables) UseRegistry() (bool, bool) {
value := os.Getenv(envKeyUseRegistry)
if len(value) == 0 {
return false, false
}
logEnvironmentOverride(e.lc, "-r/--registry", envKeyUseRegistry, value)
return value == "true", true
}
// OverrideConfiguration method replaces values in the configuration for matching Variables variable keys.
// serviceConfig must be pointer to the service configuration.
func (e *Variables) OverrideConfiguration(serviceConfig interface{}) (int, error) {
var overrideCount = 0
contents, err := toml.Marshal(reflect.ValueOf(serviceConfig).Elem().Interface())
if err != nil {
return 0, err
}
configTree, err := toml.LoadBytes(contents)
if err != nil |
// The toml.Tree API keys() only return to top level keys, rather that paths.
// It is also missing a GetPaths so have to spin our own
paths := e.buildPaths(configTree.ToMap())
// Now that we have all the paths in the config tree, we need to create map of corresponding override names that
// could match override environment variable names.
overrideNames := e.buildOverrideNames(paths)
for envVar, envValue := range e.variables {
path, found := overrideNames[envVar]
if !found {
continue
}
oldValue := configTree.Get(path)
newValue, err := e.convertToType(oldValue, envValue)
if err != nil {
return 0, fmt.Errorf("environment value override failed for %s=%s: %s", envVar, envValue, err.Error())
}
configTree.Set(path, newValue)
overrideCount++
logEnvironmentOverride(e.lc, path, envVar, envValue)
}
// Put the configuration back into the services configuration struct with the overridden values
err = configTree.Unmarshal(serviceConfig)
if err != nil {
return 0, fmt.Errorf("could not marshal toml configTree to configuration: %s", err.Error())
}
return overrideCount, nil
}
// buildPaths create the path strings for all settings in the Config tree's key map
func (e *Variables) buildPaths(keyMap map[string]interface{}) []string {
var paths []string
for key, item := range keyMap {
if reflect.TypeOf(item).Kind() != reflect.Map {
paths = append(paths, key)
continue
}
subMap := item.(map[string]interface{})
subPaths := e.buildPaths(subMap)
for _, path := range subPaths {
paths = append(paths, fmt.Sprintf("%s.%s", key, path))
}
}
return paths
}
func (e *Variables) buildOverrideNames(paths []string) map[string]string {
names := map[string]string{}
for _, path := range paths {
names[e.getOverrideNameFor(path)] = path
}
return names
}
func (_ *Variables) getOverrideNameFor(path string) string {
// "." & "-" are the only special character allowed in TOML path not allowed in environment variable Name
override := strings.ReplaceAll(path, tomlPathSeparator, envNameSeparator)
override = strings.ReplaceAll(override, tomlNameSeparator, envNameSeparator)
override = strings.ToUpper(override)
return override
}
// OverrideConfigProviderInfo overrides the Configuration Provider ServiceConfig values
// from an Variables variable value (if it exists).
func (e *Variables) OverrideConfigProviderInfo(configProviderInfo types.ServiceConfig) (types.ServiceConfig, error) {
url := os.Getenv(envKeyConfigUrl)
if len(url) > 0 {
logEnvironmentOverride(e.lc, "Configuration Provider Information", envKeyConfigUrl, url)
if err := configProviderInfo.PopulateFromUrl(url); err != nil {
return types.ServiceConfig{}, err
}
}
return configProviderInfo, nil
}
// convertToType attempts to convert the string value to the specified type of the old value
func (_ *Variables) convertToType(oldValue interface{}, value string) (newValue interface{}, err error) {
switch oldValue.(type) {
case []string:
newValue = parseCommaSeparatedSlice(value)
case []interface{}:
newValue = parseCommaSeparatedSlice(value)
case string:
newValue = value
case bool:
newValue, err = strconv.ParseBool(value)
case int:
newValue, err = strconv.ParseInt(value, 10, strconv.IntSize)
newValue = int(newValue.(int64))
case int8:
newValue, err = strconv.ParseInt(value, 10, 8)
newValue = int8(newValue.(int64))
case int16:
newValue, err = strconv.ParseInt(value, 10, 16)
newValue = int16(newValue.(int64))
case int32:
newValue, err = strconv.ParseInt(value, 10, 32)
newValue = int32(newValue.(int64))
case int64:
newValue, err = strconv.ParseInt(value, 10, 64)
case uint:
newValue, err = strconv.ParseUint(value, 10, strconv.IntSize)
newValue = uint(newValue.(uint64))
case uint8:
newValue, err = strconv.ParseUint(value, 10, 8)
newValue = uint8(newValue.(uint64))
case uint16:
newValue, err = strconv.ParseUint(value, 10, 16)
newValue = uint16(newValue.(uint64))
case uint32:
newValue, err = strconv.ParseUint(value, 10, 32)
newValue = uint32(newValue.(uint64))
case uint64:
newValue, err = strconv.ParseUint(value, 10, 64)
case float32:
newValue, err = strconv.ParseFloat(value, 32)
newValue = float32(newValue.(float64))
case float64:
newValue, err = strconv.ParseFloat(value, 64)
default:
err = fmt.Errorf(
"configuration type of '%s' is not supported for environment variable override",
reflect.TypeOf(oldValue).String())
}
return newValue, err
}
// StartupInfo provides the startup timer values which are applied to the StartupTimer created at boot.
type StartupInfo struct {
Duration int
Interval int
}
// GetStartupInfo gets the Service StartupInfo values from an Variables variable value (if it exists)
// or uses the default values.
func GetStartupInfo(serviceKey string) StartupInfo {
// lc hasn't be created at the time this info is needed so have to create local client.
lc := logger.NewClient(serviceKey, models.InfoLog)
startup := StartupInfo{
Duration: bootTimeoutSecondsDefault,
Interval: bootRetrySecondsDefault,
}
// Get the startup timer configuration from environment, if provided.
value := os.Getenv(envKeyStartupDuration)
if len(value) > 0 {
logEnvironmentOverride(lc, "Startup Duration", envKeyStartupDuration, value)
if n, err := strconv.ParseInt(value, 10, 0); err == nil && n > 0 {
startup.Duration = int(n)
}
}
// Get the startup timer interval, if provided.
value = os.Getenv(envKeyStartupInterval)
if len(value) > 0 {
logEnvironmentOverride(lc, "Startup Interval", envKeyStartupInterval, value)
if | {
return 0, err
} | conditional_block |
variables.go | /pelletier/go-toml"
)
const (
bootTimeoutSecondsDefault = 60
bootRetrySecondsDefault = 1
defaultConfDirValue = "./res"
envKeyConfigUrl = "EDGEX_CONFIGURATION_PROVIDER"
envKeyUseRegistry = "EDGEX_USE_REGISTRY"
envKeyStartupDuration = "EDGEX_STARTUP_DURATION"
envKeyStartupInterval = "EDGEX_STARTUP_INTERVAL"
envConfDir = "EDGEX_CONF_DIR"
envProfile = "EDGEX_PROFILE"
envFile = "EDGEX_CONFIG_FILE"
tomlPathSeparator = "."
tomlNameSeparator = "-"
envNameSeparator = "_"
)
// Variables is receiver that holds Variables variables and encapsulates toml.Tree-based configuration field
// overrides. Assumes "_" embedded in Variables variable key separates sub-structs; e.g. foo_bar_baz might refer to
//
// type foo struct {
// bar struct {
// baz string
// }
// }
type Variables struct {
variables map[string]string
lc logger.LoggingClient
}
// NewVariables constructor reads/stores os.Environ() for use by Variables receiver methods.
func NewVariables(lc logger.LoggingClient) *Variables {
osEnv := os.Environ()
e := &Variables{
variables: make(map[string]string, len(osEnv)),
lc: lc,
}
for _, env := range osEnv {
// Can not use Split() on '=' since the value may have an '=' in it, so changed to use Index()
index := strings.Index(env, "=")
if index == -1 {
continue
}
key := env[:index]
value := env[index+1:]
e.variables[key] = value
}
return e
}
// UseRegistry returns whether the envKeyUseRegistry key is set to true and whether the override was used
func (e *Variables) UseRegistry() (bool, bool) {
value := os.Getenv(envKeyUseRegistry)
if len(value) == 0 {
return false, false
}
logEnvironmentOverride(e.lc, "-r/--registry", envKeyUseRegistry, value)
return value == "true", true
}
// OverrideConfiguration method replaces values in the configuration for matching Variables variable keys.
// serviceConfig must be pointer to the service configuration.
func (e *Variables) OverrideConfiguration(serviceConfig interface{}) (int, error) {
var overrideCount = 0
contents, err := toml.Marshal(reflect.ValueOf(serviceConfig).Elem().Interface())
if err != nil {
return 0, err
}
configTree, err := toml.LoadBytes(contents)
if err != nil {
return 0, err
}
// The toml.Tree API keys() only return to top level keys, rather that paths.
// It is also missing a GetPaths so have to spin our own
paths := e.buildPaths(configTree.ToMap())
// Now that we have all the paths in the config tree, we need to create map of corresponding override names that
// could match override environment variable names.
overrideNames := e.buildOverrideNames(paths)
for envVar, envValue := range e.variables {
path, found := overrideNames[envVar]
if !found {
continue
}
oldValue := configTree.Get(path)
newValue, err := e.convertToType(oldValue, envValue)
if err != nil {
return 0, fmt.Errorf("environment value override failed for %s=%s: %s", envVar, envValue, err.Error())
}
configTree.Set(path, newValue)
overrideCount++
logEnvironmentOverride(e.lc, path, envVar, envValue)
}
// Put the configuration back into the services configuration struct with the overridden values
err = configTree.Unmarshal(serviceConfig)
if err != nil {
return 0, fmt.Errorf("could not marshal toml configTree to configuration: %s", err.Error())
}
return overrideCount, nil
}
// buildPaths create the path strings for all settings in the Config tree's key map
func (e *Variables) buildPaths(keyMap map[string]interface{}) []string {
var paths []string
for key, item := range keyMap {
if reflect.TypeOf(item).Kind() != reflect.Map {
paths = append(paths, key)
continue
}
subMap := item.(map[string]interface{})
subPaths := e.buildPaths(subMap)
for _, path := range subPaths {
paths = append(paths, fmt.Sprintf("%s.%s", key, path))
}
}
return paths
}
func (e *Variables) buildOverrideNames(paths []string) map[string]string {
names := map[string]string{}
for _, path := range paths {
names[e.getOverrideNameFor(path)] = path | }
return names
}
func (_ *Variables) getOverrideNameFor(path string) string {
// "." & "-" are the only special character allowed in TOML path not allowed in environment variable Name
override := strings.ReplaceAll(path, tomlPathSeparator, envNameSeparator)
override = strings.ReplaceAll(override, tomlNameSeparator, envNameSeparator)
override = strings.ToUpper(override)
return override
}
// OverrideConfigProviderInfo overrides the Configuration Provider ServiceConfig values
// from an Variables variable value (if it exists).
func (e *Variables) OverrideConfigProviderInfo(configProviderInfo types.ServiceConfig) (types.ServiceConfig, error) {
url := os.Getenv(envKeyConfigUrl)
if len(url) > 0 {
logEnvironmentOverride(e.lc, "Configuration Provider Information", envKeyConfigUrl, url)
if err := configProviderInfo.PopulateFromUrl(url); err != nil {
return types.ServiceConfig{}, err
}
}
return configProviderInfo, nil
}
// convertToType attempts to convert the string value to the specified type of the old value
func (_ *Variables) convertToType(oldValue interface{}, value string) (newValue interface{}, err error) {
switch oldValue.(type) {
case []string:
newValue = parseCommaSeparatedSlice(value)
case []interface{}:
newValue = parseCommaSeparatedSlice(value)
case string:
newValue = value
case bool:
newValue, err = strconv.ParseBool(value)
case int:
newValue, err = strconv.ParseInt(value, 10, strconv.IntSize)
newValue = int(newValue.(int64))
case int8:
newValue, err = strconv.ParseInt(value, 10, 8)
newValue = int8(newValue.(int64))
case int16:
newValue, err = strconv.ParseInt(value, 10, 16)
newValue = int16(newValue.(int64))
case int32:
newValue, err = strconv.ParseInt(value, 10, 32)
newValue = int32(newValue.(int64))
case int64:
newValue, err = strconv.ParseInt(value, 10, 64)
case uint:
newValue, err = strconv.ParseUint(value, 10, strconv.IntSize)
newValue = uint(newValue.(uint64))
case uint8:
newValue, err = strconv.ParseUint(value, 10, 8)
newValue = uint8(newValue.(uint64))
case uint16:
newValue, err = strconv.ParseUint(value, 10, 16)
newValue = uint16(newValue.(uint64))
case uint32:
newValue, err = strconv.ParseUint(value, 10, 32)
newValue = uint32(newValue.(uint64))
case uint64:
newValue, err = strconv.ParseUint(value, 10, 64)
case float32:
newValue, err = strconv.ParseFloat(value, 32)
newValue = float32(newValue.(float64))
case float64:
newValue, err = strconv.ParseFloat(value, 64)
default:
err = fmt.Errorf(
"configuration type of '%s' is not supported for environment variable override",
reflect.TypeOf(oldValue).String())
}
return newValue, err
}
// StartupInfo provides the startup timer values which are applied to the StartupTimer created at boot.
type StartupInfo struct {
Duration int
Interval int
}
// GetStartupInfo gets the Service StartupInfo values from an Variables variable value (if it exists)
// or uses the default values.
func GetStartupInfo(serviceKey string) StartupInfo {
// lc hasn't be created at the time this info is needed so have to create local client.
lc := logger.NewClient(serviceKey, models.InfoLog)
startup := StartupInfo{
Duration: bootTimeoutSecondsDefault,
Interval: bootRetrySecondsDefault,
}
// Get the startup timer configuration from environment, if provided.
value := os.Getenv(envKeyStartupDuration)
if len(value) > 0 {
logEnvironmentOverride(lc, "Startup Duration", envKeyStartupDuration, value)
if n, err := strconv.ParseInt(value, 10, 0); err == nil && n > 0 {
startup.Duration = int(n)
}
}
// Get the startup timer interval, if provided.
value = os.Getenv(envKeyStartupInterval)
if len(value) > 0 {
logEnvironmentOverride(lc, "Startup Interval", envKeyStartupInterval, value)
if n, | random_line_split |
|
variables.go | elletier/go-toml"
)
const (
bootTimeoutSecondsDefault = 60
bootRetrySecondsDefault = 1
defaultConfDirValue = "./res"
envKeyConfigUrl = "EDGEX_CONFIGURATION_PROVIDER"
envKeyUseRegistry = "EDGEX_USE_REGISTRY"
envKeyStartupDuration = "EDGEX_STARTUP_DURATION"
envKeyStartupInterval = "EDGEX_STARTUP_INTERVAL"
envConfDir = "EDGEX_CONF_DIR"
envProfile = "EDGEX_PROFILE"
envFile = "EDGEX_CONFIG_FILE"
tomlPathSeparator = "."
tomlNameSeparator = "-"
envNameSeparator = "_"
)
// Variables is receiver that holds Variables variables and encapsulates toml.Tree-based configuration field
// overrides. Assumes "_" embedded in Variables variable key separates sub-structs; e.g. foo_bar_baz might refer to
//
// type foo struct {
// bar struct {
// baz string
// }
// }
type Variables struct {
variables map[string]string
lc logger.LoggingClient
}
// NewVariables constructor reads/stores os.Environ() for use by Variables receiver methods.
func NewVariables(lc logger.LoggingClient) *Variables {
osEnv := os.Environ()
e := &Variables{
variables: make(map[string]string, len(osEnv)),
lc: lc,
}
for _, env := range osEnv {
// Can not use Split() on '=' since the value may have an '=' in it, so changed to use Index()
index := strings.Index(env, "=")
if index == -1 {
continue
}
key := env[:index]
value := env[index+1:]
e.variables[key] = value
}
return e
}
// UseRegistry returns whether the envKeyUseRegistry key is set to true and whether the override was used
func (e *Variables) UseRegistry() (bool, bool) {
value := os.Getenv(envKeyUseRegistry)
if len(value) == 0 {
return false, false
}
logEnvironmentOverride(e.lc, "-r/--registry", envKeyUseRegistry, value)
return value == "true", true
}
// OverrideConfiguration method replaces values in the configuration for matching Variables variable keys.
// serviceConfig must be pointer to the service configuration.
func (e *Variables) | (serviceConfig interface{}) (int, error) {
var overrideCount = 0
contents, err := toml.Marshal(reflect.ValueOf(serviceConfig).Elem().Interface())
if err != nil {
return 0, err
}
configTree, err := toml.LoadBytes(contents)
if err != nil {
return 0, err
}
// The toml.Tree API keys() only return to top level keys, rather that paths.
// It is also missing a GetPaths so have to spin our own
paths := e.buildPaths(configTree.ToMap())
// Now that we have all the paths in the config tree, we need to create map of corresponding override names that
// could match override environment variable names.
overrideNames := e.buildOverrideNames(paths)
for envVar, envValue := range e.variables {
path, found := overrideNames[envVar]
if !found {
continue
}
oldValue := configTree.Get(path)
newValue, err := e.convertToType(oldValue, envValue)
if err != nil {
return 0, fmt.Errorf("environment value override failed for %s=%s: %s", envVar, envValue, err.Error())
}
configTree.Set(path, newValue)
overrideCount++
logEnvironmentOverride(e.lc, path, envVar, envValue)
}
// Put the configuration back into the services configuration struct with the overridden values
err = configTree.Unmarshal(serviceConfig)
if err != nil {
return 0, fmt.Errorf("could not marshal toml configTree to configuration: %s", err.Error())
}
return overrideCount, nil
}
// buildPaths create the path strings for all settings in the Config tree's key map
func (e *Variables) buildPaths(keyMap map[string]interface{}) []string {
var paths []string
for key, item := range keyMap {
if reflect.TypeOf(item).Kind() != reflect.Map {
paths = append(paths, key)
continue
}
subMap := item.(map[string]interface{})
subPaths := e.buildPaths(subMap)
for _, path := range subPaths {
paths = append(paths, fmt.Sprintf("%s.%s", key, path))
}
}
return paths
}
func (e *Variables) buildOverrideNames(paths []string) map[string]string {
names := map[string]string{}
for _, path := range paths {
names[e.getOverrideNameFor(path)] = path
}
return names
}
func (_ *Variables) getOverrideNameFor(path string) string {
// "." & "-" are the only special character allowed in TOML path not allowed in environment variable Name
override := strings.ReplaceAll(path, tomlPathSeparator, envNameSeparator)
override = strings.ReplaceAll(override, tomlNameSeparator, envNameSeparator)
override = strings.ToUpper(override)
return override
}
// OverrideConfigProviderInfo overrides the Configuration Provider ServiceConfig values
// from an Variables variable value (if it exists).
func (e *Variables) OverrideConfigProviderInfo(configProviderInfo types.ServiceConfig) (types.ServiceConfig, error) {
url := os.Getenv(envKeyConfigUrl)
if len(url) > 0 {
logEnvironmentOverride(e.lc, "Configuration Provider Information", envKeyConfigUrl, url)
if err := configProviderInfo.PopulateFromUrl(url); err != nil {
return types.ServiceConfig{}, err
}
}
return configProviderInfo, nil
}
// convertToType attempts to convert the string value to the specified type of the old value
func (_ *Variables) convertToType(oldValue interface{}, value string) (newValue interface{}, err error) {
switch oldValue.(type) {
case []string:
newValue = parseCommaSeparatedSlice(value)
case []interface{}:
newValue = parseCommaSeparatedSlice(value)
case string:
newValue = value
case bool:
newValue, err = strconv.ParseBool(value)
case int:
newValue, err = strconv.ParseInt(value, 10, strconv.IntSize)
newValue = int(newValue.(int64))
case int8:
newValue, err = strconv.ParseInt(value, 10, 8)
newValue = int8(newValue.(int64))
case int16:
newValue, err = strconv.ParseInt(value, 10, 16)
newValue = int16(newValue.(int64))
case int32:
newValue, err = strconv.ParseInt(value, 10, 32)
newValue = int32(newValue.(int64))
case int64:
newValue, err = strconv.ParseInt(value, 10, 64)
case uint:
newValue, err = strconv.ParseUint(value, 10, strconv.IntSize)
newValue = uint(newValue.(uint64))
case uint8:
newValue, err = strconv.ParseUint(value, 10, 8)
newValue = uint8(newValue.(uint64))
case uint16:
newValue, err = strconv.ParseUint(value, 10, 16)
newValue = uint16(newValue.(uint64))
case uint32:
newValue, err = strconv.ParseUint(value, 10, 32)
newValue = uint32(newValue.(uint64))
case uint64:
newValue, err = strconv.ParseUint(value, 10, 64)
case float32:
newValue, err = strconv.ParseFloat(value, 32)
newValue = float32(newValue.(float64))
case float64:
newValue, err = strconv.ParseFloat(value, 64)
default:
err = fmt.Errorf(
"configuration type of '%s' is not supported for environment variable override",
reflect.TypeOf(oldValue).String())
}
return newValue, err
}
// StartupInfo provides the startup timer values which are applied to the StartupTimer created at boot.
type StartupInfo struct {
Duration int
Interval int
}
// GetStartupInfo gets the Service StartupInfo values from an Variables variable value (if it exists)
// or uses the default values.
func GetStartupInfo(serviceKey string) StartupInfo {
// lc hasn't be created at the time this info is needed so have to create local client.
lc := logger.NewClient(serviceKey, models.InfoLog)
startup := StartupInfo{
Duration: bootTimeoutSecondsDefault,
Interval: bootRetrySecondsDefault,
}
// Get the startup timer configuration from environment, if provided.
value := os.Getenv(envKeyStartupDuration)
if len(value) > 0 {
logEnvironmentOverride(lc, "Startup Duration", envKeyStartupDuration, value)
if n, err := strconv.ParseInt(value, 10, 0); err == nil && n > 0 {
startup.Duration = int(n)
}
}
// Get the startup timer interval, if provided.
value = os.Getenv(envKeyStartupInterval)
if len(value) > 0 {
logEnvironmentOverride(lc, "Startup Interval", envKeyStartupInterval, value)
if n | OverrideConfiguration | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.