file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
lib.rs | c >= 'a' && c <= 'f') ||
(c >= 'A' && c <= 'F')
).map(|c| if c >= '0' && c <= '9' {
c as u64 - '0' as u64
} else if c >= 'a' && c <= 'f' {
10 + c as u64 - 'a' as u64
} else {
10 + c as u64 - 'A' as u64
} as u32
)
}
fn unicode_char<'a>() -> impl Parser<&'a str, Output = Option<char>> {
c_hx_do!{
__ <- string(r#"\u"#),
d3 <- parse_hex(),
d2 <- parse_hex(),
d1 <- parse_hex(),
d0 <- parse_hex();
{
let unicode = d0 +
0x10 * d1 +
0x100 * d2 +
0x1000 * d3;
char::try_from(unicode).ok()
}
}
}
#[derive(PartialEq)]
enum StringPiece<'a >
{
Ref(&'a str),
Char(Option<char>)
}
fn braced_parser<'a, PBL, P, PBR, O>(pbl: PBL, p: P, pbr: PBR) -> impl Parser<&'a str, Output = O>
where
PBL: Parser<&'a str>,
PBR: Parser<&'a str>,
P: Parser<&'a str, Output = O>
{
between(
c_compre![c; c <- pbl, __ <- skip_many(space())],
c_compre![c; __ <- skip_many(space()), c <- pbr],
p
)
}
fn string_part<'a>() -> impl Parser<&'a str, Output = Vec<StringPiece<'a >>> {
many(
choice(
(
attempt(take_while1(|c: char| c != '\\' && c != '"' && c != '\n' && c != '\r' && c != '\t')
.map(|chars: &str| StringPiece::Ref(chars))),
attempt(string("\\\"").map(|_|StringPiece::Ref("\""))),
attempt(string("\\\\").map(|_|StringPiece::Ref("\\"))),
attempt(string("\\n").map(|_|StringPiece::Ref("\n"))),
attempt(string("\\t").map(|_|StringPiece::Ref("\t"))),
attempt(string("\\/").map(|_|StringPiece::Ref("/"))),
attempt(string("\\r").map(|_|StringPiece::Ref("\r"))),
attempt(string("\\f").map(|_|StringPiece::Ref("\u{000c}"))),
attempt(string("\\b").map(|_|StringPiece::Ref("\u{0008}"))),
attempt(unicode_char().map(|s|StringPiece::Char(s))),
)
)
)
}
fn string_parser_inner<'a>() -> impl Parser<&'a str, Output = SmolStr> {
c_hx_do! {
x <- between(char('"'), char('"'), string_part());
{
let cap = x.iter().fold(0, |acc, s|
acc +
match s {
StringPiece::Ref(strref) => strref.len(),
StringPiece::Char(c) => c.map(|c_inner| c_inner.len_utf8()).unwrap_or(0)
}
);
if cap <= 22 {
let mut buf: [u8; 22] = [0; 22];
let mut offset = 0;
for s in x.iter() {
match s {
StringPiece::Ref(strref) => {
for &b in strref.as_bytes() {
buf[offset] = b;
offset += 1;
}
},
StringPiece::Char(c) => {
if let Some(chr) = c {
chr.encode_utf8(&mut buf[offset..]);
offset += chr.len_utf8();
}
}
}
}
return unsafe {
SmolStr::new(str::from_utf8_unchecked(&buf[0..cap]))
};
}
let mut str = String::with_capacity(cap);
for s in x.iter() {
match s {
StringPiece::Ref(strref) => str.push_str(strref),
StringPiece::Char(c) => if let Some(chr) = c { str.push(*chr); }
}
}
SmolStr::new(str)
}
}
}
fn string_parser<'a>() -> impl Parser<&'a str, Output = Node> {
string_parser_inner().map(|x| Node::String(x))
}
fn digit_sequence<'a>() -> impl Parser<&'a str, Output = &'a str> {
take_while1(|c: char| c >= '0' && c <= '9')
}
#[inline(always)]
fn power(lhs: f64, rhs: f64) -> f64 {
lhs.powf(rhs)
}
fn trailing_digit_sequence<'a>() -> impl Parser<&'a str, Output = &'a str> {
c_hx_do! {
__ <- char('.'),
rest <- digit_sequence();
rest
}
}
fn exponent_parser<'a>() -> impl Parser<&'a str, Output = f64> {
c_hx_do!{
__ <- satisfy(|c: char| c == 'e' || c == 'E'),
sign_char <- optional(satisfy(|c: char| c == '+' || c == '-')),
digits <- digit_sequence();
{
let sign = match sign_char {
Some('-') => -1.0,
_ => 1.0
};
let mut acc = 0;
for c in digits.as_bytes() {
acc = acc * 10 + (c - b'0') as u64;
}
power(10.0, sign * acc as f64)
}
}
}
#[derive(PartialEq, Copy, Clone)]
enum NumberPrefix<'a >
{
LeadingZero,
Digits(char, &'a str)
}
fn leading_zero_parser <'a>() -> impl Parser<&'a str, Output = NumberPrefix<'a >> {
char('0').map(|_| NumberPrefix::LeadingZero)
}
fn leading_digits_parser <'a>() -> impl Parser<&'a str, Output = NumberPrefix<'a >> {
c_hx_do! {
leading_digit <- satisfy(|c: char| c >= '1' && c <= '9'),
digs <- optional(digit_sequence());
NumberPrefix::Digits(leading_digit, digs.unwrap_or(""))
}
}
fn leading_parser <'a>() -> impl Parser<&'a str, Output = NumberPrefix<'a >> {
choice((
attempt(leading_digits_parser()),
attempt(leading_zero_parser()),
))
}
fn number_parser<'a>() -> impl Parser<&'a str, Output = Node> {
c_hx_do! {
minus_sign <- optional(char('-')),
leading <- leading_parser(),
trail <- optional(trailing_digit_sequence()),
exp <- optional(exponent_parser());
{
Node::Number({
let mut acc = match leading {
NumberPrefix::LeadingZero => 0.0,
NumberPrefix::Digits(leading_digit, l_digs) => {
let mut l = (leading_digit as u8 - b'0') as u64;
for c in l_digs.as_bytes() {
l = l * 10 + (c - b'0') as u64;
}
l as f64
}
};
if let Some(t_digs) = trail {
let mut divider = 1.0;
for c in t_digs.as_bytes() {
divider /= 10.0;
acc += (c - b'0') as f64 * divider;
}
}
if let Some(exponent) = exp {
acc *= exponent;
}
if let Some(_) = minus_sign {
-acc
} else {
acc
}
})
}
}
}
fn | <'a>() -> impl Parser<&'a str, Output = Node> {
c_hx_do!{
word <- string("true").or(string("false"));
match word {
"true" => Node::Boolean(true),
_ => Node::Boolean(false)
}
}
}
fn null_parser<'a>() -> impl Parser<&'a str, Output = Node> {
c_hx_do!{
_word <- string("null");
Node::Null
}
}
macro_rules! ref_parser {
($parser_fn:ident) => {
parser(|input| {
let _: &mut &str = input;
$parser_fn().parse_stream(input).into_result()
})
}
}
fn primitive_parser<'a>() -> impl Parser<&'a str, Output = Node> {
let possible_parser = bool_parser()
.or(number_parser())
.or(string_parser())
.or(null_parser())
.or(ref_parser!(array_parser))
.or(ref_parser!(dictionary_parser));
c_hx_do! {
__ <- skip_many(space()),
pars <- possible_parser,
___ <- skip_many(space());
pars
}
}
fn array_parser<'a>() -> impl Parser<&'a | bool_parser | identifier_name |
main.rs | : Some("demo Company"),
// model_number: None,
// serial_number: None,
// system_id: Some("sysid69"),
// ieee_cert: None,
// hw_revision: None,
// sw_revision: None,
// pnp_id: None
// };
//
#[entry]
fn entry() -> ! {
//rtt_init_print!(BlockIfFull, 4096);
rtt_init_print!(NoBlockSkip, 4096);
run();
loop {
continue;
}
}
fn run() {
let dp = hal::device::Peripherals::take().unwrap();
let mut rcc = dp.RCC.constrain();
rcc.set_stop_wakeup_clock(StopWakeupClock::HSI16);
// Fastest clock configuration.
// * External low-speed crystal is used (LSE)
// * 32 MHz HSE with PLL
// * 64 MHz CPU1, 32 MHz CPU2
// * 64 MHz for APB1, APB2
// * HSI as a clock source after wake-up from low-power mode
let clock_config = Config::new(SysClkSrc::Pll(PllSrc::Hse(HseDivider::NotDivided)))
.with_lse()
.cpu1_hdiv(HDivider::NotDivided)
.cpu2_hdiv(HDivider::Div2)
.apb1_div(ApbDivider::NotDivided)
.apb2_div(ApbDivider::NotDivided)
.pll_cfg(PllConfig {
m: 2,
n: 12,
r: 3,
q: Some(4),
p: Some(3),
})
.rtc_src(RtcClkSrc::Lse)
.rf_wkp_sel(RfWakeupClock::Lse);
let mut rcc = rcc.apply_clock_config(clock_config, &mut dp.FLASH.constrain().acr);
rprintln!("Boot");
// RTC is required for proper operation of BLE stack
let _rtc = hal::rtc::Rtc::rtc(dp.RTC, &mut rcc);
let mut ipcc = dp.IPCC.constrain();
let mbox = TlMbox::tl_init(&mut rcc, &mut ipcc);
let config = ShciBleInitCmdParam {
p_ble_buffer_address: 0,
ble_buffer_size: 0,
num_attr_record: 100, | extended_packet_length_enable: 1,
pr_write_list_size: 0x3A,
mb_lock_count: 0x79,
att_mtu: 312,
slave_sca: 500,
master_sca: 0,
ls_source: 1,
max_conn_event_length: 0xFFFFFFFF,
hs_startup_time: 0x148,
viterbi_enable: 1,
ll_only: 0,
hw_version: 0,
};
setup_coprocessor(config, ipcc, mbox);
// enable interrupts -> interrupts are enabled in Ipcc::init(), which is called TlMbox::tl_init
// Boot CPU2
hal::pwr::set_cpu2(true);
let ready_event = block!(receive_event());
rprintln!("Received packet: {:?}", ready_event);
rprintln!("Resetting processor...");
let reset_response = perform_command(|rc| rc.reset()).expect("Failed to reset processor");
rprintln!("Received packet: {:?}", reset_response);
init_gap_and_gatt().expect("Failed to initialize GAP and GATT");
rprintln!("Succesfully initialized GAP and GATT");
let di = DeviceInformation::new(
Some("klabs"),
Some("9871234"),
Some("my-serial"),
None,
None,
Some("fw1.234"),
Some("my-system-id"),
None,
None,
);
let dis_service = init_dis(&di).expect("failed to activate DIS");
let hrs_service = init_hrs().expect("failed to activate heart rate service");
// Set our discovery parameters, this is "application specific" regardless of what services
// we've turned on
perform_command(|rc| {
rc.set_discoverable(&DISCOVERY_PARAMS)
.map_err(|_| nb::Error::Other(()))
});
loop {
let response = block!(receive_event());
rprintln!("Received event: {:x?}", response);
if let Ok(Packet::Event(event)) = response {
match event {
// karl - this isn't quite working...
Event::DisconnectionComplete(_state) => {
// Enter advertising mode again
// Put the device in a connectable mode
perform_command(|rc| {
rc.set_discoverable(&DISCOVERY_PARAMS)
.map_err(|_| nb::Error::Other(()))
})
.expect("Failed to enable discoverable mode again");
// perform_command(|rc| {
// rc.update_advertising_data(&ADVERTISING_DATA[..])
// .map_err(|_| nb::Error::Other(()))
// })
// .expect("Failed to update advertising data");
}
// FIXME - I want some sort of "list of event handlers" that can be plugged in here?
// ST style has a list of handlers, and stops at the first one to say "handled"
_ => handle_event(&event, &dis_service, &hrs_service),
}
}
}
}
fn handle_event(event: &Event<Stm32Wb5xEvent>, dis: &DisService, hrs: &HrsService) {
if let Event::Vendor(stm_event) = event {
match stm_event {
Stm32Wb5xEvent::AttReadPermitRequest(AttReadPermitRequest {
conn_handle,
attribute_handle,
offset,
}) => {
rprintln!("Allowing read on ch: {:?} ah: {:?}, offset: {}", conn_handle, attribute_handle, offset);
perform_command(|rc| rc.allow_read(*conn_handle))
.expect("Failed to allow read");
}
other => rprintln!("ignoring event {:?}", other),
}
}
}
#[exception]
unsafe fn DefaultHandler(irqn: i16) -> ! {
panic!("Unhandled IRQ: {}", irqn);
}
fn init_gap_and_gatt() -> Result<(), ()> {
let response = perform_command(|rc: &mut RadioCopro| {
rc.write_config_data(&ConfigData::public_address(get_bd_addr()).build())
})?;
rprintln!("Response to write_config_data: {:?}", response);
perform_command(|rc| {
rc.write_config_data(&ConfigData::random_address(get_random_addr()).build())
})?;
perform_command(|rc| rc.write_config_data(&ConfigData::identity_root(&get_irk()).build()))?;
perform_command(|rc| rc.write_config_data(&ConfigData::encryption_root(&get_erk()).build()))?;
perform_command(|rc| rc.set_tx_power_level(PowerLevel::ZerodBm))?;
perform_command(|rc| rc.init_gatt())?;
let return_params =
perform_command(|rc| rc.init_gap(Role::PERIPHERAL, false, BLE_GAP_DEVICE_NAME_LENGTH))?;
// let sh, dh, ah == return parameters... if it was of the type of GapInit ReturnParameters....?
let (service_handle, dev_name_handle, appearance_handle) = if let ReturnParameters::Vendor(
stm32wb55::event::command::ReturnParameters::GapInit(stm32wb55::event::command::GapInit {
service_handle,
dev_name_handle,
appearance_handle,
..
}),
) = return_params
{
(service_handle, dev_name_handle, appearance_handle)
} else {
rprintln!("Unexpected response to init_gap command");
return Err(());
};
perform_command(|rc| {
rc.update_characteristic_value(&UpdateCharacteristicValueParameters {
service_handle,
characteristic_handle: dev_name_handle,
offset: 0,
value: BT_NAME,
})
.map_err(|_| nb::Error::Other(()))
})?;
let appearance_characteristic = Characteristic {
service: service_handle,
characteristic: appearance_handle,
max_len: 4,
};
appearance_characteristic.set_value(&bt_appearances::HeartRateSensor::GENERIC.0.to_le_bytes());
return Ok(());
}
fn init_dis(di: &DeviceInformation) -> Result<DisService, ()> {
// homekit demo uses 24, st uses max 19, 2 per char, plus 1.
// using less than required here saves memory in the shared space I believe, so, ideally, this would
// check how many "real" values are configured in the "DisServiceInfo" blob....
let dis_service = DisService::new(svc_dis::uuid::DEVICE_INFORMATION_SERVICE, 19)?;
di.register(&dis_service);
// FIXME - neither of these should be in this function, it makes it hard to compose services...
// Disable scan response. not even sure we need this at all.
// perform_command(| | num_attr_serv: 10,
attr_value_arr_size: 3500, //2788,
num_of_links: 8, | random_line_split |
main.rs | : Some("demo Company"),
// model_number: None,
// serial_number: None,
// system_id: Some("sysid69"),
// ieee_cert: None,
// hw_revision: None,
// sw_revision: None,
// pnp_id: None
// };
//
#[entry]
fn entry() -> ! {
//rtt_init_print!(BlockIfFull, 4096);
rtt_init_print!(NoBlockSkip, 4096);
run();
loop {
continue;
}
}
fn run() {
let dp = hal::device::Peripherals::take().unwrap();
let mut rcc = dp.RCC.constrain();
rcc.set_stop_wakeup_clock(StopWakeupClock::HSI16);
// Fastest clock configuration.
// * External low-speed crystal is used (LSE)
// * 32 MHz HSE with PLL
// * 64 MHz CPU1, 32 MHz CPU2
// * 64 MHz for APB1, APB2
// * HSI as a clock source after wake-up from low-power mode
let clock_config = Config::new(SysClkSrc::Pll(PllSrc::Hse(HseDivider::NotDivided)))
.with_lse()
.cpu1_hdiv(HDivider::NotDivided)
.cpu2_hdiv(HDivider::Div2)
.apb1_div(ApbDivider::NotDivided)
.apb2_div(ApbDivider::NotDivided)
.pll_cfg(PllConfig {
m: 2,
n: 12,
r: 3,
q: Some(4),
p: Some(3),
})
.rtc_src(RtcClkSrc::Lse)
.rf_wkp_sel(RfWakeupClock::Lse);
let mut rcc = rcc.apply_clock_config(clock_config, &mut dp.FLASH.constrain().acr);
rprintln!("Boot");
// RTC is required for proper operation of BLE stack
let _rtc = hal::rtc::Rtc::rtc(dp.RTC, &mut rcc);
let mut ipcc = dp.IPCC.constrain();
let mbox = TlMbox::tl_init(&mut rcc, &mut ipcc);
let config = ShciBleInitCmdParam {
p_ble_buffer_address: 0,
ble_buffer_size: 0,
num_attr_record: 100,
num_attr_serv: 10,
attr_value_arr_size: 3500, //2788,
num_of_links: 8,
extended_packet_length_enable: 1,
pr_write_list_size: 0x3A,
mb_lock_count: 0x79,
att_mtu: 312,
slave_sca: 500,
master_sca: 0,
ls_source: 1,
max_conn_event_length: 0xFFFFFFFF,
hs_startup_time: 0x148,
viterbi_enable: 1,
ll_only: 0,
hw_version: 0,
};
setup_coprocessor(config, ipcc, mbox);
// enable interrupts -> interrupts are enabled in Ipcc::init(), which is called TlMbox::tl_init
// Boot CPU2
hal::pwr::set_cpu2(true);
let ready_event = block!(receive_event());
rprintln!("Received packet: {:?}", ready_event);
rprintln!("Resetting processor...");
let reset_response = perform_command(|rc| rc.reset()).expect("Failed to reset processor");
rprintln!("Received packet: {:?}", reset_response);
init_gap_and_gatt().expect("Failed to initialize GAP and GATT");
rprintln!("Succesfully initialized GAP and GATT");
let di = DeviceInformation::new(
Some("klabs"),
Some("9871234"),
Some("my-serial"),
None,
None,
Some("fw1.234"),
Some("my-system-id"),
None,
None,
);
let dis_service = init_dis(&di).expect("failed to activate DIS");
let hrs_service = init_hrs().expect("failed to activate heart rate service");
// Set our discovery parameters, this is "application specific" regardless of what services
// we've turned on
perform_command(|rc| {
rc.set_discoverable(&DISCOVERY_PARAMS)
.map_err(|_| nb::Error::Other(()))
});
loop {
let response = block!(receive_event());
rprintln!("Received event: {:x?}", response);
if let Ok(Packet::Event(event)) = response {
match event {
// karl - this isn't quite working...
Event::DisconnectionComplete(_state) => {
// Enter advertising mode again
// Put the device in a connectable mode
perform_command(|rc| {
rc.set_discoverable(&DISCOVERY_PARAMS)
.map_err(|_| nb::Error::Other(()))
})
.expect("Failed to enable discoverable mode again");
// perform_command(|rc| {
// rc.update_advertising_data(&ADVERTISING_DATA[..])
// .map_err(|_| nb::Error::Other(()))
// })
// .expect("Failed to update advertising data");
}
// FIXME - I want some sort of "list of event handlers" that can be plugged in here?
// ST style has a list of handlers, and stops at the first one to say "handled"
_ => handle_event(&event, &dis_service, &hrs_service),
}
}
}
}
fn handle_event(event: &Event<Stm32Wb5xEvent>, dis: &DisService, hrs: &HrsService) {
if let Event::Vendor(stm_event) = event {
match stm_event {
Stm32Wb5xEvent::AttReadPermitRequest(AttReadPermitRequest {
conn_handle,
attribute_handle,
offset,
}) => {
rprintln!("Allowing read on ch: {:?} ah: {:?}, offset: {}", conn_handle, attribute_handle, offset);
perform_command(|rc| rc.allow_read(*conn_handle))
.expect("Failed to allow read");
}
other => rprintln!("ignoring event {:?}", other),
}
}
}
#[exception]
unsafe fn DefaultHandler(irqn: i16) -> ! {
panic!("Unhandled IRQ: {}", irqn);
}
fn init_gap_and_gatt() -> Result<(), ()> {
let response = perform_command(|rc: &mut RadioCopro| {
rc.write_config_data(&ConfigData::public_address(get_bd_addr()).build())
})?;
rprintln!("Response to write_config_data: {:?}", response);
perform_command(|rc| {
rc.write_config_data(&ConfigData::random_address(get_random_addr()).build())
})?;
perform_command(|rc| rc.write_config_data(&ConfigData::identity_root(&get_irk()).build()))?;
perform_command(|rc| rc.write_config_data(&ConfigData::encryption_root(&get_erk()).build()))?;
perform_command(|rc| rc.set_tx_power_level(PowerLevel::ZerodBm))?;
perform_command(|rc| rc.init_gatt())?;
let return_params =
perform_command(|rc| rc.init_gap(Role::PERIPHERAL, false, BLE_GAP_DEVICE_NAME_LENGTH))?;
// let sh, dh, ah == return parameters... if it was of the type of GapInit ReturnParameters....?
let (service_handle, dev_name_handle, appearance_handle) = if let ReturnParameters::Vendor(
stm32wb55::event::command::ReturnParameters::GapInit(stm32wb55::event::command::GapInit {
service_handle,
dev_name_handle,
appearance_handle,
..
}),
) = return_params
| else {
rprintln!("Unexpected response to init_gap command");
return Err(());
};
perform_command(|rc| {
rc.update_characteristic_value(&UpdateCharacteristicValueParameters {
service_handle,
characteristic_handle: dev_name_handle,
offset: 0,
value: BT_NAME,
})
.map_err(|_| nb::Error::Other(()))
})?;
let appearance_characteristic = Characteristic {
service: service_handle,
characteristic: appearance_handle,
max_len: 4,
};
appearance_characteristic.set_value(&bt_appearances::HeartRateSensor::GENERIC.0.to_le_bytes());
return Ok(());
}
fn init_dis(di: &DeviceInformation) -> Result<DisService, ()> {
// homekit demo uses 24, st uses max 19, 2 per char, plus 1.
// using less than required here saves memory in the shared space I believe, so, ideally, this would
// check how many "real" values are configured in the "DisServiceInfo" blob....
let dis_service = DisService::new(svc_dis::uuid::DEVICE_INFORMATION_SERVICE, 19)?;
di.register(&dis_service);
// FIXME - neither of these should be in this function, it makes it hard to compose services...
// Disable scan response. not even sure we need this at all.
// perform_command(| | {
(service_handle, dev_name_handle, appearance_handle)
} | conditional_block |
main.rs | = ShciBleInitCmdParam {
p_ble_buffer_address: 0,
ble_buffer_size: 0,
num_attr_record: 100,
num_attr_serv: 10,
attr_value_arr_size: 3500, //2788,
num_of_links: 8,
extended_packet_length_enable: 1,
pr_write_list_size: 0x3A,
mb_lock_count: 0x79,
att_mtu: 312,
slave_sca: 500,
master_sca: 0,
ls_source: 1,
max_conn_event_length: 0xFFFFFFFF,
hs_startup_time: 0x148,
viterbi_enable: 1,
ll_only: 0,
hw_version: 0,
};
setup_coprocessor(config, ipcc, mbox);
// enable interrupts -> interrupts are enabled in Ipcc::init(), which is called TlMbox::tl_init
// Boot CPU2
hal::pwr::set_cpu2(true);
let ready_event = block!(receive_event());
rprintln!("Received packet: {:?}", ready_event);
rprintln!("Resetting processor...");
let reset_response = perform_command(|rc| rc.reset()).expect("Failed to reset processor");
rprintln!("Received packet: {:?}", reset_response);
init_gap_and_gatt().expect("Failed to initialize GAP and GATT");
rprintln!("Succesfully initialized GAP and GATT");
let di = DeviceInformation::new(
Some("klabs"),
Some("9871234"),
Some("my-serial"),
None,
None,
Some("fw1.234"),
Some("my-system-id"),
None,
None,
);
let dis_service = init_dis(&di).expect("failed to activate DIS");
let hrs_service = init_hrs().expect("failed to activate heart rate service");
// Set our discovery parameters, this is "application specific" regardless of what services
// we've turned on
perform_command(|rc| {
rc.set_discoverable(&DISCOVERY_PARAMS)
.map_err(|_| nb::Error::Other(()))
});
loop {
let response = block!(receive_event());
rprintln!("Received event: {:x?}", response);
if let Ok(Packet::Event(event)) = response {
match event {
// karl - this isn't quite working...
Event::DisconnectionComplete(_state) => {
// Enter advertising mode again
// Put the device in a connectable mode
perform_command(|rc| {
rc.set_discoverable(&DISCOVERY_PARAMS)
.map_err(|_| nb::Error::Other(()))
})
.expect("Failed to enable discoverable mode again");
// perform_command(|rc| {
// rc.update_advertising_data(&ADVERTISING_DATA[..])
// .map_err(|_| nb::Error::Other(()))
// })
// .expect("Failed to update advertising data");
}
// FIXME - I want some sort of "list of event handlers" that can be plugged in here?
// ST style has a list of handlers, and stops at the first one to say "handled"
_ => handle_event(&event, &dis_service, &hrs_service),
}
}
}
}
fn handle_event(event: &Event<Stm32Wb5xEvent>, dis: &DisService, hrs: &HrsService) {
if let Event::Vendor(stm_event) = event {
match stm_event {
Stm32Wb5xEvent::AttReadPermitRequest(AttReadPermitRequest {
conn_handle,
attribute_handle,
offset,
}) => {
rprintln!("Allowing read on ch: {:?} ah: {:?}, offset: {}", conn_handle, attribute_handle, offset);
perform_command(|rc| rc.allow_read(*conn_handle))
.expect("Failed to allow read");
}
other => rprintln!("ignoring event {:?}", other),
}
}
}
#[exception]
unsafe fn DefaultHandler(irqn: i16) -> ! {
panic!("Unhandled IRQ: {}", irqn);
}
fn init_gap_and_gatt() -> Result<(), ()> {
let response = perform_command(|rc: &mut RadioCopro| {
rc.write_config_data(&ConfigData::public_address(get_bd_addr()).build())
})?;
rprintln!("Response to write_config_data: {:?}", response);
perform_command(|rc| {
rc.write_config_data(&ConfigData::random_address(get_random_addr()).build())
})?;
perform_command(|rc| rc.write_config_data(&ConfigData::identity_root(&get_irk()).build()))?;
perform_command(|rc| rc.write_config_data(&ConfigData::encryption_root(&get_erk()).build()))?;
perform_command(|rc| rc.set_tx_power_level(PowerLevel::ZerodBm))?;
perform_command(|rc| rc.init_gatt())?;
let return_params =
perform_command(|rc| rc.init_gap(Role::PERIPHERAL, false, BLE_GAP_DEVICE_NAME_LENGTH))?;
// let sh, dh, ah == return parameters... if it was of the type of GapInit ReturnParameters....?
let (service_handle, dev_name_handle, appearance_handle) = if let ReturnParameters::Vendor(
stm32wb55::event::command::ReturnParameters::GapInit(stm32wb55::event::command::GapInit {
service_handle,
dev_name_handle,
appearance_handle,
..
}),
) = return_params
{
(service_handle, dev_name_handle, appearance_handle)
} else {
rprintln!("Unexpected response to init_gap command");
return Err(());
};
perform_command(|rc| {
rc.update_characteristic_value(&UpdateCharacteristicValueParameters {
service_handle,
characteristic_handle: dev_name_handle,
offset: 0,
value: BT_NAME,
})
.map_err(|_| nb::Error::Other(()))
})?;
let appearance_characteristic = Characteristic {
service: service_handle,
characteristic: appearance_handle,
max_len: 4,
};
appearance_characteristic.set_value(&bt_appearances::HeartRateSensor::GENERIC.0.to_le_bytes());
return Ok(());
}
fn init_dis(di: &DeviceInformation) -> Result<DisService, ()> {
// homekit demo uses 24, st uses max 19, 2 per char, plus 1.
// using less than required here saves memory in the shared space I believe, so, ideally, this would
// check how many "real" values are configured in the "DisServiceInfo" blob....
let dis_service = DisService::new(svc_dis::uuid::DEVICE_INFORMATION_SERVICE, 19)?;
di.register(&dis_service);
// FIXME - neither of these should be in this function, it makes it hard to compose services...
// Disable scan response. not even sure we need this at all.
// perform_command(|rc: &mut RadioCopro| {
// rc.le_set_scan_response_data(&[])
// .map_err(|_| nb::Error::Other(()))
// })?;
return Ok(dis_service);
}
// https://stackoverflow.com/questions/28127165/how-to-convert-struct-to-u8
// except we have no std....
// unsafe fn any_as_u8_slice<T: Sized>(p: &T) -> &[u8] {
// ::std::slice::from_raw_parts(
// (p as *const T) as *const u8,
// ::std::mem::size_of::<T>(),
// )
// }
fn init_hrs() -> Result<HrsService, ()> {
// analog to hrs_init
let hrs_service = HrsService::new(true, true, true)?;
// analog to hrsapp_init...
if hrs_service.with_location {
let loc = HrsBodySensorLocation::Finger as u8;
hrs_service.body_sensor_location.as_ref().unwrap().set_value(&loc.to_le_bytes());
}
let mut hrs_measure = HrsMeasure {
value: 1,
energy_expended: 100,
aRR_interval_values: [200],
valid_intervals: 1,
flags: HrsHrmFlags::VALUE_FORMAT_UINT16 | HrsHrmFlags::SENSOR_CONTACTS_PRESENT | HrsHrmFlags::SENSOR_CONTACTS_SUPPORTED | HrsHrmFlags::ENERGY_EXPENDED_PRESENT | HrsHrmFlags::RR_INTERVAL_PRESENT,
};
// TODO We need to keep that hrs_measure around somewhere, and get our task to start processing periodic events for it....
let mut bytes:[u8;8] = [0; 8];
LittleEndian::write_u16(&mut bytes[0..2], hrs_measure.value);
//bytes[0..2] = *hrs_measure.value.to_le_bytes();
LittleEndian::write_u16(&mut bytes[2..4], hrs_measure.energy_expended);
LittleEndian::write_u16(&mut bytes[4..6], hrs_measure.aRR_interval_values[0]);
bytes[6] = hrs_measure.valid_intervals;
bytes[7] = hrs_measure.flags.bits();
hrs_service.heart_rate_measurement.set_value(&bytes);
return Ok(hrs_service);
}
fn | get_bd_addr | identifier_name |
|
main.rs | | {
rc.set_discoverable(&DISCOVERY_PARAMS)
.map_err(|_| nb::Error::Other(()))
})
.expect("Failed to enable discoverable mode again");
// perform_command(|rc| {
// rc.update_advertising_data(&ADVERTISING_DATA[..])
// .map_err(|_| nb::Error::Other(()))
// })
// .expect("Failed to update advertising data");
}
// FIXME - I want some sort of "list of event handlers" that can be plugged in here?
// ST style has a list of handlers, and stops at the first one to say "handled"
_ => handle_event(&event, &dis_service, &hrs_service),
}
}
}
}
fn handle_event(event: &Event<Stm32Wb5xEvent>, dis: &DisService, hrs: &HrsService) {
if let Event::Vendor(stm_event) = event {
match stm_event {
Stm32Wb5xEvent::AttReadPermitRequest(AttReadPermitRequest {
conn_handle,
attribute_handle,
offset,
}) => {
rprintln!("Allowing read on ch: {:?} ah: {:?}, offset: {}", conn_handle, attribute_handle, offset);
perform_command(|rc| rc.allow_read(*conn_handle))
.expect("Failed to allow read");
}
other => rprintln!("ignoring event {:?}", other),
}
}
}
#[exception]
unsafe fn DefaultHandler(irqn: i16) -> ! {
panic!("Unhandled IRQ: {}", irqn);
}
fn init_gap_and_gatt() -> Result<(), ()> {
let response = perform_command(|rc: &mut RadioCopro| {
rc.write_config_data(&ConfigData::public_address(get_bd_addr()).build())
})?;
rprintln!("Response to write_config_data: {:?}", response);
perform_command(|rc| {
rc.write_config_data(&ConfigData::random_address(get_random_addr()).build())
})?;
perform_command(|rc| rc.write_config_data(&ConfigData::identity_root(&get_irk()).build()))?;
perform_command(|rc| rc.write_config_data(&ConfigData::encryption_root(&get_erk()).build()))?;
perform_command(|rc| rc.set_tx_power_level(PowerLevel::ZerodBm))?;
perform_command(|rc| rc.init_gatt())?;
let return_params =
perform_command(|rc| rc.init_gap(Role::PERIPHERAL, false, BLE_GAP_DEVICE_NAME_LENGTH))?;
// let sh, dh, ah == return parameters... if it was of the type of GapInit ReturnParameters....?
let (service_handle, dev_name_handle, appearance_handle) = if let ReturnParameters::Vendor(
stm32wb55::event::command::ReturnParameters::GapInit(stm32wb55::event::command::GapInit {
service_handle,
dev_name_handle,
appearance_handle,
..
}),
) = return_params
{
(service_handle, dev_name_handle, appearance_handle)
} else {
rprintln!("Unexpected response to init_gap command");
return Err(());
};
perform_command(|rc| {
rc.update_characteristic_value(&UpdateCharacteristicValueParameters {
service_handle,
characteristic_handle: dev_name_handle,
offset: 0,
value: BT_NAME,
})
.map_err(|_| nb::Error::Other(()))
})?;
let appearance_characteristic = Characteristic {
service: service_handle,
characteristic: appearance_handle,
max_len: 4,
};
appearance_characteristic.set_value(&bt_appearances::HeartRateSensor::GENERIC.0.to_le_bytes());
return Ok(());
}
fn init_dis(di: &DeviceInformation) -> Result<DisService, ()> {
// homekit demo uses 24, st uses max 19, 2 per char, plus 1.
// using less than required here saves memory in the shared space I believe, so, ideally, this would
// check how many "real" values are configured in the "DisServiceInfo" blob....
let dis_service = DisService::new(svc_dis::uuid::DEVICE_INFORMATION_SERVICE, 19)?;
di.register(&dis_service);
// FIXME - neither of these should be in this function, it makes it hard to compose services...
// Disable scan response. not even sure we need this at all.
// perform_command(|rc: &mut RadioCopro| {
// rc.le_set_scan_response_data(&[])
// .map_err(|_| nb::Error::Other(()))
// })?;
return Ok(dis_service);
}
// https://stackoverflow.com/questions/28127165/how-to-convert-struct-to-u8
// except we have no std....
// unsafe fn any_as_u8_slice<T: Sized>(p: &T) -> &[u8] {
// ::std::slice::from_raw_parts(
// (p as *const T) as *const u8,
// ::std::mem::size_of::<T>(),
// )
// }
fn init_hrs() -> Result<HrsService, ()> {
// analog to hrs_init
let hrs_service = HrsService::new(true, true, true)?;
// analog to hrsapp_init...
if hrs_service.with_location {
let loc = HrsBodySensorLocation::Finger as u8;
hrs_service.body_sensor_location.as_ref().unwrap().set_value(&loc.to_le_bytes());
}
let mut hrs_measure = HrsMeasure {
value: 1,
energy_expended: 100,
aRR_interval_values: [200],
valid_intervals: 1,
flags: HrsHrmFlags::VALUE_FORMAT_UINT16 | HrsHrmFlags::SENSOR_CONTACTS_PRESENT | HrsHrmFlags::SENSOR_CONTACTS_SUPPORTED | HrsHrmFlags::ENERGY_EXPENDED_PRESENT | HrsHrmFlags::RR_INTERVAL_PRESENT,
};
// TODO We need to keep that hrs_measure around somewhere, and get our task to start processing periodic events for it....
let mut bytes:[u8;8] = [0; 8];
LittleEndian::write_u16(&mut bytes[0..2], hrs_measure.value);
//bytes[0..2] = *hrs_measure.value.to_le_bytes();
LittleEndian::write_u16(&mut bytes[2..4], hrs_measure.energy_expended);
LittleEndian::write_u16(&mut bytes[4..6], hrs_measure.aRR_interval_values[0]);
bytes[6] = hrs_measure.valid_intervals;
bytes[7] = hrs_measure.flags.bits();
hrs_service.heart_rate_measurement.set_value(&bytes);
return Ok(hrs_service);
}
fn get_bd_addr() -> BdAddr {
let mut bytes = [0u8; 6];
let lhci_info = LhciC1DeviceInformationCcrp::new();
bytes[0] = (lhci_info.uid64 & 0xff) as u8;
bytes[1] = ((lhci_info.uid64 >> 8) & 0xff) as u8;
bytes[2] = ((lhci_info.uid64 >> 16) & 0xff) as u8;
bytes[3] = lhci_info.device_type_id;
bytes[4] = (lhci_info.st_company_id & 0xff) as u8;
bytes[5] = (lhci_info.st_company_id >> 8 & 0xff) as u8;
BdAddr(bytes)
}
fn get_random_addr() -> BdAddr {
let mut bytes = [0u8; 6];
let lhci_info = LhciC1DeviceInformationCcrp::new();
bytes[0] = (lhci_info.uid64 & 0xff) as u8;
bytes[1] = ((lhci_info.uid64 >> 8) & 0xff) as u8;
bytes[2] = ((lhci_info.uid64 >> 16) & 0xff) as u8;
bytes[3] = 0;
bytes[4] = 0x6E;
bytes[5] = 0xED;
BdAddr(bytes)
}
const BLE_CFG_IRK: [u8; 16] = [
0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf0, 0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf0,
];
const BLE_CFG_ERK: [u8; 16] = [
0xfe, 0xdc, 0xba, 0x09, 0x87, 0x65, 0x43, 0x21, 0xfe, 0xdc, 0xba, 0x09, 0x87, 0x65, 0x43, 0x21,
];
fn get_irk() -> EncryptionKey {
EncryptionKey(BLE_CFG_IRK)
}
fn get_erk() -> EncryptionKey | {
EncryptionKey(BLE_CFG_ERK)
} | identifier_body |
|
main.rs | (x : &String) -> &str {
let bytes = x.as_bytes();
for (i, &item) in bytes.iter().enumerate() {
if item == b' ' {
return &x[0..i];
}
}
&x[..]
}
fn second_word(x : &String) -> &str {
let mut bytes = x.as_bytes();
for (i, &item) in bytes.iter().enumerate() {
if item == b' '{
let y = &x[i+1..];
bytes = y.as_bytes();
for (i, &item) in bytes.iter().enumerate() {
if item == b' ' {
return &y[0..i];
}
}
// Return this IF there were only two words.
return &y[..];
}
}
&x[..]
}
//__________________________________________________________________________________________________________ //
//--------------------------------------------------//
//*** GENERAL NOTES ***
//*** Info: Random Notes I found either important, ***
//*** hard to remember, funny, cool, or who knows ***
//*** why but I added it to this list ***
//--------------------------------------------------//
// Notes:
// 1. Variables immutable by default
// 2.Constants ALWAYS immutable, type must be annotated ex: const MAX_POINTS: u32 = 758000;
// 3. Scaler variables: Ints, floats, bools, and chars
// Length Signed Unsigned
// 8-bit i8 u8
// 16-bit i16 u16
// 32-bit i32 u32
// 64-bit i64 u64
// 128-bit i128 u128
// arch isize usize
// 4. Integer literals
// Number literals Example
// Decimal 98_222
// Hex 0xff
// Octal 0o77
// Binary 0b1111_0000
// Byte (u8 only) b'A'
// 5. Floats can be either f32 or f64 (32 and 64 bits respectively)...default is f64
//ex:
// fn main() {
// let x = 2.0; // f64
// let y: f32 = 3.0; // f32
//}
// 6. bools example
// fn main() {
// let t = true;
// let f: bool = false; // with explicit type annotation
// }
// 7. Tuples AND Arrays are fixed length.
// 8. Arrays allocate to stack instead of the heap, are used instead of vectors when you KNOW the num of elements wont need to change, like an array
// containing the days of the week.
// 9. Functions can be defined anywhere in the program.
// 10. In rust cannot do statements like: x = y = 6
// 11. *IMPORTANT* Expressions (return something) lines do not contain a semi colon. Statements(return NOTHING) DO have a semi colon.
// 12. Unlike other languages, conditions for if statements MUST be a bool.
// 13. *IMPORTANT* (Straight out of the rust book)
// To ensure memory safety, there’s one more detail to what happens in this situation in Rust. Instead of trying to copy the allocated memory, Rust considers s1 to no longer be valid and, therefore, Rust doesn’t need to free anything when s1 goes out of scope. Check out what happens when you try to use s1 after s2 is created; it won’t work:
// This code does not compile!
// let s1 = String::from("hello");
// let s2 = s1;
// println!("{}, world!", s1);
// You’ll get an error like this because Rust prevents you from using the invalidated reference:
// error[E0382]: use of moved value: `s1`
// --> src/main.rs:5:28
// |
// 3 | let s2 = s1;
// | -- value moved here
// 4 |
// 5 | println!("{}, world!", s1);
// | ^^ value used here after move
// |
// = note: move occurs because `s1` has type `std::string::String`, which does
// not implement the `Copy` trait
// 14. *FUTURE notes on copying, moving, and losing access to data
// 15. References are immutable by default just like variables, know you can have only ONE mutable reference to a piece of data in a particular SCOPE.
// 16.
// 17.
// 18.
// 19.
// 20.
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** INFINITE loop, unless x is ever >= 10 ***
//---------------------------------------------//
// fn main() {
// let mut x = 0;
// loop {
// x += 1;
// println!("Value of x is {}", x);
// if x >= 10 {
// break;
// }
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** WHILE loop, while n <= 50, also only prints multiples of 5 ***
//---------------------------------------------//
// fn main() {
// let mut n = 1;
// while n <= 50 {
// if n % 5 == 0 {
// println!("n is {}", n);
// }
// n += 1;
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** FOR loop, a few options for iterating ***
//---------------------------------------------//
// fn main() {
// // makes a range from 0-129, nums is of type range
// let nums = 0..130;
// //must have an iterator
// for i in nums {
// println!("Num is {}", i );
// }
// // can use a vector
// let fruits = vec!["Watermelon", "Appple", "Orange"];
// //must have an iterator, calls iterator method of fruits vector
// for a in fruits.iter() {
// println!("Fruit type is: {}", a );
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** ENUM def && MATCH usage ***
//---------------------------------------------//
// enum cardinal_direction {
// North,
// South,
// East,
// West
// }
// fn main() {
// let player_cardinal:cardinal_direction = cardinal_direction:: North;
// //like a switch statement
// match player_cardinal {
// cardinal_direction::North => println!("Heading North!"),
// cardinal_direction::South => println!("Heading South!"),
// cardinal_direction::East => println!("Heading East!"),
// cardinal_direction::West => println!("Heading West!"),
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** CONST defintion ***
//---------------------------------------------//
// const MAX_NUM: u8 = 20;
//
// fn main() {
// for n in 1..MAX_NUM {
// print!("{}", n );
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** TUPLES ***
//---------------------------------------------//
// fn main() {
// // creating a tuple
// // NOTE: CAN have nested tuples
// let johnshon = (5, "ch" , 20.5 , 23);
// // to be more specific...
// let tup: (i32, f64, u8) = (500, 6.4, 1);
// println!("{}", johnshon.2 );
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** ARRAYS ***
// Info: Indexed the same as C based langs
//---------------------------------------------//
// fn main() {
// let a = [1, 2, 3, 4, 5];
// // the 5 indicates the array size
// let a: [i32; 5] = [1, 2, 3, 4, 5];
// }
//
//__________________________________________________________________________________________________________ //
// * ALSO *
// Info: This shows a pattern to turn the tup into those 3 seperate variables, also demonstrates how to index a tuple
// fn main() {
// let tup = (500, 6.4, 1);
//
// let (x, y, z) = tup;
//
// println!("The value of y is: {}", y);
//
// let x: (i32, f64, u8) = (500, 6.4, 1);
//
// let five_hundred = x.0;
//
// let six_point_four = x.1;
//
// let one = x.2;
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** Calling Functions ***
// Info: Youhave to declare | first_word | identifier_name |
|
main.rs |
fn second_word(x : &String) -> &str {
let mut bytes = x.as_bytes();
for (i, &item) in bytes.iter().enumerate() {
if item == b' '{
let y = &x[i+1..];
bytes = y.as_bytes();
for (i, &item) in bytes.iter().enumerate() {
if item == b' ' {
return &y[0..i];
}
}
// Return this IF there were only two words.
return &y[..];
}
}
&x[..]
}
//__________________________________________________________________________________________________________ //
//--------------------------------------------------//
//*** GENERAL NOTES ***
//*** Info: Random Notes I found either important, ***
//*** hard to remember, funny, cool, or who knows ***
//*** why but I added it to this list ***
//--------------------------------------------------//
// Notes:
// 1. Variables immutable by default
// 2.Constants ALWAYS immutable, type must be annotated ex: const MAX_POINTS: u32 = 758000;
// 3. Scaler variables: Ints, floats, bools, and chars
// Length Signed Unsigned
// 8-bit i8 u8
// 16-bit i16 u16
// 32-bit i32 u32
// 64-bit i64 u64
// 128-bit i128 u128
// arch isize usize
// 4. Integer literals
// Number literals Example
// Decimal 98_222
// Hex 0xff
// Octal 0o77
// Binary 0b1111_0000
// Byte (u8 only) b'A'
// 5. Floats can be either f32 or f64 (32 and 64 bits respectively)...default is f64
//ex:
// fn main() {
// let x = 2.0; // f64
// let y: f32 = 3.0; // f32
//}
// 6. bools example
// fn main() {
// let t = true;
// let f: bool = false; // with explicit type annotation
// }
// 7. Tuples AND Arrays are fixed length.
// 8. Arrays allocate to stack instead of the heap, are used instead of vectors when you KNOW the num of elements wont need to change, like an array
// containing the days of the week.
// 9. Functions can be defined anywhere in the program.
// 10. In rust cannot do statements like: x = y = 6
// 11. *IMPORTANT* Expressions (return something) lines do not contain a semi colon. Statements(return NOTHING) DO have a semi colon.
// 12. Unlike other languages, conditions for if statements MUST be a bool.
// 13. *IMPORTANT* (Straight out of the rust book)
// To ensure memory safety, there’s one more detail to what happens in this situation in Rust. Instead of trying to copy the allocated memory, Rust considers s1 to no longer be valid and, therefore, Rust doesn’t need to free anything when s1 goes out of scope. Check out what happens when you try to use s1 after s2 is created; it won’t work:
// This code does not compile!
// let s1 = String::from("hello");
// let s2 = s1;
// println!("{}, world!", s1);
// You’ll get an error like this because Rust prevents you from using the invalidated reference:
// error[E0382]: use of moved value: `s1`
// --> src/main.rs:5:28
// |
// 3 | let s2 = s1;
// | -- value moved here
// 4 |
// 5 | println!("{}, world!", s1);
// | ^^ value used here after move
// |
// = note: move occurs because `s1` has type `std::string::String`, which does
// not implement the `Copy` trait
// 14. *FUTURE notes on copying, moving, and losing access to data
// 15. References are immutable by default just like variables, know you can have only ONE mutable reference to a piece of data in a particular SCOPE.
// 16.
// 17.
// 18.
// 19.
// 20.
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** INFINITE loop, unless x is ever >= 10 ***
//---------------------------------------------//
// fn main() {
// let mut x = 0;
// loop {
// x += 1;
// println!("Value of x is {}", x);
// if x >= 10 {
// break;
// }
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** WHILE loop, while n <= 50, also only prints multiples of 5 ***
//---------------------------------------------//
// fn main() {
// let mut n = 1;
// while n <= 50 {
// if n % 5 == 0 {
// println!("n is {}", n);
// }
// n += 1;
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** FOR loop, a few options for iterating ***
//---------------------------------------------//
// fn main() {
// // makes a range from 0-129, nums is of type range
// let nums = 0..130;
// //must have an iterator
// for i in nums {
// println!("Num is {}", i );
// }
// // can use a vector
// let fruits = vec!["Watermelon", "Appple", "Orange"];
// //must have an iterator, calls iterator method of fruits vector
// for a in fruits.iter() {
// println!("Fruit type is: {}", a );
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** ENUM def && MATCH usage ***
//---------------------------------------------//
// enum cardinal_direction {
// North,
// South,
// East,
// West
// }
// fn main() {
// let player_cardinal:cardinal_direction = cardinal_direction:: North;
// //like a switch statement
// match player_cardinal {
// cardinal_direction::North => println!("Heading North!"),
// cardinal_direction::South => println!("Heading South!"),
// cardinal_direction::East => println!("Heading East!"),
// cardinal_direction::West => println!("Heading West!"),
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** CONST defintion ***
//---------------------------------------------//
// const MAX_NUM: u8 = 20;
//
// fn main() {
// for n in 1..MAX_NUM {
// print!("{}", n );
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** TUPLES ***
//---------------------------------------------//
// fn main() {
// // creating a tuple
// // NOTE: CAN have nested tuples
// let johnshon = (5, "ch" , 20.5 , 23);
// // to be more specific...
// let tup: (i32, f64, u8) = (500, 6.4, 1);
// println!("{}", johnshon.2 );
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** ARRAYS ***
// Info: Indexed the same as C based langs
//---------------------------------------------//
// fn main() {
// let a = [1, 2, 3, 4, 5];
// // the 5 indicates the array size
// let a: [i32; 5] = [1, 2, 3, 4, 5];
// }
//
//__________________________________________________________________________________________________________ //
// * ALSO *
// Info: This shows a pattern to turn the tup into those 3 seperate variables, also demonstrates how to index a tuple
// fn main() {
// let tup = (500, 6.4, 1);
//
// let (x, y, z) = tup;
//
// println!("The value of y is: {}", y);
//
// let x: (i32, f64, u8) = (500, 6.4, 1);
//
// let five_hundred = x.0;
//
// let six_point_four = x.1;
//
// let one = x.2;
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** Calling Functions ***
// Info: Youhave to declare the type of each parameter
//--------------------------------------------- | {
let bytes = x.as_bytes();
for (i, &item) in bytes.iter().enumerate() {
if item == b' ' {
return &x[0..i];
}
}
&x[..]
} | identifier_body |
|
main.rs | &x[..]
}
fn second_word(x : &String) -> &str {
let mut bytes = x.as_bytes();
for (i, &item) in bytes.iter().enumerate() {
if item == b' '{
let y = &x[i+1..];
bytes = y.as_bytes();
for (i, &item) in bytes.iter().enumerate() {
if item == b' ' {
return &y[0..i];
}
}
// Return this IF there were only two words.
return &y[..];
}
}
&x[..]
}
//__________________________________________________________________________________________________________ //
//--------------------------------------------------//
//*** GENERAL NOTES ***
//*** Info: Random Notes I found either important, ***
//*** hard to remember, funny, cool, or who knows ***
//*** why but I added it to this list ***
//--------------------------------------------------//
// Notes:
// 1. Variables immutable by default
// 2.Constants ALWAYS immutable, type must be annotated ex: const MAX_POINTS: u32 = 758000;
// 3. Scaler variables: Ints, floats, bools, and chars
// Length Signed Unsigned
// 8-bit i8 u8
// 16-bit i16 u16
// 32-bit i32 u32
// 64-bit i64 u64
// 128-bit i128 u128
// arch isize usize
// 4. Integer literals
// Number literals Example
// Decimal 98_222
// Hex 0xff
// Octal 0o77
// Binary 0b1111_0000
// Byte (u8 only) b'A'
// 5. Floats can be either f32 or f64 (32 and 64 bits respectively)...default is f64
//ex:
// fn main() {
// let x = 2.0; // f64
// let y: f32 = 3.0; // f32
//}
// 6. bools example
// fn main() {
// let t = true;
// let f: bool = false; // with explicit type annotation
// }
// 7. Tuples AND Arrays are fixed length.
// 8. Arrays allocate to stack instead of the heap, are used instead of vectors when you KNOW the num of elements wont need to change, like an array
// containing the days of the week.
// 9. Functions can be defined anywhere in the program.
// 10. In rust cannot do statements like: x = y = 6
// 11. *IMPORTANT* Expressions (return something) lines do not contain a semi colon. Statements(return NOTHING) DO have a semi colon.
// 12. Unlike other languages, conditions for if statements MUST be a bool.
// 13. *IMPORTANT* (Straight out of the rust book)
// To ensure memory safety, there’s one more detail to what happens in this situation in Rust. Instead of trying to copy the allocated memory, Rust considers s1 to no longer be valid and, therefore, Rust doesn’t need to free anything when s1 goes out of scope. Check out what happens when you try to use s1 after s2 is created; it won’t work:
// This code does not compile!
// let s1 = String::from("hello");
// let s2 = s1;
// println!("{}, world!", s1);
// You’ll get an error like this because Rust prevents you from using the invalidated reference:
// error[E0382]: use of moved value: `s1`
// --> src/main.rs:5:28
// |
// 3 | let s2 = s1;
// | -- value moved here
// 4 |
// 5 | println!("{}, world!", s1);
// | ^^ value used here after move
// |
// = note: move occurs because `s1` has type `std::string::String`, which does
// not implement the `Copy` trait
// 14. *FUTURE notes on copying, moving, and losing access to data
// 15. References are immutable by default just like variables, know you can have only ONE mutable reference to a piece of data in a particular SCOPE.
// 16.
// 17.
// 18.
// 19.
// 20.
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** INFINITE loop, unless x is ever >= 10 ***
//---------------------------------------------//
// fn main() {
// let mut x = 0;
// loop {
// x += 1;
// println!("Value of x is {}", x);
// if x >= 10 {
// break;
// }
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** WHILE loop, while n <= 50, also only prints multiples of 5 ***
//---------------------------------------------//
// fn main() {
// let mut n = 1;
// while n <= 50 {
// if n % 5 == 0 {
// println!("n is {}", n);
// }
// n += 1;
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** FOR loop, a few options for iterating ***
//---------------------------------------------//
// fn main() {
// // makes a range from 0-129, nums is of type range
// let nums = 0..130;
// //must have an iterator
// for i in nums {
// println!("Num is {}", i );
// }
// // can use a vector
// let fruits = vec!["Watermelon", "Appple", "Orange"];
// //must have an iterator, calls iterator method of fruits vector
// for a in fruits.iter() {
// println!("Fruit type is: {}", a );
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** ENUM def && MATCH usage ***
//---------------------------------------------//
// enum cardinal_direction {
// North,
// South,
// East,
// West
// }
// fn main() {
// let player_cardinal:cardinal_direction = cardinal_direction:: North;
// //like a switch statement
// match player_cardinal {
// cardinal_direction::North => println!("Heading North!"),
// cardinal_direction::South => println!("Heading South!"),
// cardinal_direction::East => println!("Heading East!"),
// cardinal_direction::West => println!("Heading West!"),
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** CONST defintion ***
//---------------------------------------------//
// const MAX_NUM: u8 = 20;
//
// fn main() {
// for n in 1..MAX_NUM {
// print!("{}", n );
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** TUPLES ***
//---------------------------------------------//
// fn main() {
// // creating a tuple
// // NOTE: CAN have nested tuples
// let johnshon = (5, "ch" , 20.5 , 23);
// // to be more specific...
// let tup: (i32, f64, u8) = (500, 6.4, 1);
// println!("{}", johnshon.2 );
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** ARRAYS ***
// Info: Indexed the same as C based langs
//---------------------------------------------//
// fn main() {
// let a = [1, 2, 3, 4, 5];
// // the 5 indicates the array size
// let a: [i32; 5] = [1, 2, 3, 4, 5];
// }
//
//__________________________________________________________________________________________________________ //
// * ALSO *
// Info: This shows a pattern to turn the tup into those 3 seperate variables, also demonstrates how to index a tuple
// fn main() {
// let tup = (500, 6.4, 1);
//
// let (x, y, z) = tup;
//
// println!("The value of y is: {}", y);
//
// let x: (i32, f64, u8) = (500, 6.4, 1);
//
// let five_hundred = x.0;
//
// let six_point_four = x.1;
//
// let one = x.2;
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** Calling Functions ***
// Info: Youhave to declare the type of each parameter
//---------------------------------------------//
// fn main() {
// let x = 5;
// print_number(x);
// if is_even(x){
// println!("Is even!");
// }
// else {
// println | random_line_split |
||
main.rs |
}
&x[..]
}
//__________________________________________________________________________________________________________ //
//--------------------------------------------------//
//*** GENERAL NOTES ***
//*** Info: Random Notes I found either important, ***
//*** hard to remember, funny, cool, or who knows ***
//*** why but I added it to this list ***
//--------------------------------------------------//
// Notes:
// 1. Variables immutable by default
// 2.Constants ALWAYS immutable, type must be annotated ex: const MAX_POINTS: u32 = 758000;
// 3. Scaler variables: Ints, floats, bools, and chars
// Length Signed Unsigned
// 8-bit i8 u8
// 16-bit i16 u16
// 32-bit i32 u32
// 64-bit i64 u64
// 128-bit i128 u128
// arch isize usize
// 4. Integer literals
// Number literals Example
// Decimal 98_222
// Hex 0xff
// Octal 0o77
// Binary 0b1111_0000
// Byte (u8 only) b'A'
// 5. Floats can be either f32 or f64 (32 and 64 bits respectively)...default is f64
//ex:
// fn main() {
// let x = 2.0; // f64
// let y: f32 = 3.0; // f32
//}
// 6. bools example
// fn main() {
// let t = true;
// let f: bool = false; // with explicit type annotation
// }
// 7. Tuples AND Arrays are fixed length.
// 8. Arrays allocate to stack instead of the heap, are used instead of vectors when you KNOW the num of elements wont need to change, like an array
// containing the days of the week.
// 9. Functions can be defined anywhere in the program.
// 10. In rust cannot do statements like: x = y = 6
// 11. *IMPORTANT* Expressions (return something) lines do not contain a semi colon. Statements(return NOTHING) DO have a semi colon.
// 12. Unlike other languages, conditions for if statements MUST be a bool.
// 13. *IMPORTANT* (Straight out of the rust book)
// To ensure memory safety, there’s one more detail to what happens in this situation in Rust. Instead of trying to copy the allocated memory, Rust considers s1 to no longer be valid and, therefore, Rust doesn’t need to free anything when s1 goes out of scope. Check out what happens when you try to use s1 after s2 is created; it won’t work:
// This code does not compile!
// let s1 = String::from("hello");
// let s2 = s1;
// println!("{}, world!", s1);
// You’ll get an error like this because Rust prevents you from using the invalidated reference:
// error[E0382]: use of moved value: `s1`
// --> src/main.rs:5:28
// |
// 3 | let s2 = s1;
// | -- value moved here
// 4 |
// 5 | println!("{}, world!", s1);
// | ^^ value used here after move
// |
// = note: move occurs because `s1` has type `std::string::String`, which does
// not implement the `Copy` trait
// 14. *FUTURE notes on copying, moving, and losing access to data
// 15. References are immutable by default just like variables, know you can have only ONE mutable reference to a piece of data in a particular SCOPE.
// 16.
// 17.
// 18.
// 19.
// 20.
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** INFINITE loop, unless x is ever >= 10 ***
//---------------------------------------------//
// fn main() {
// let mut x = 0;
// loop {
// x += 1;
// println!("Value of x is {}", x);
// if x >= 10 {
// break;
// }
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** WHILE loop, while n <= 50, also only prints multiples of 5 ***
//---------------------------------------------//
// fn main() {
// let mut n = 1;
// while n <= 50 {
// if n % 5 == 0 {
// println!("n is {}", n);
// }
// n += 1;
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** FOR loop, a few options for iterating ***
//---------------------------------------------//
// fn main() {
// // makes a range from 0-129, nums is of type range
// let nums = 0..130;
// //must have an iterator
// for i in nums {
// println!("Num is {}", i );
// }
// // can use a vector
// let fruits = vec!["Watermelon", "Appple", "Orange"];
// //must have an iterator, calls iterator method of fruits vector
// for a in fruits.iter() {
// println!("Fruit type is: {}", a );
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** ENUM def && MATCH usage ***
//---------------------------------------------//
// enum cardinal_direction {
// North,
// South,
// East,
// West
// }
// fn main() {
// let player_cardinal:cardinal_direction = cardinal_direction:: North;
// //like a switch statement
// match player_cardinal {
// cardinal_direction::North => println!("Heading North!"),
// cardinal_direction::South => println!("Heading South!"),
// cardinal_direction::East => println!("Heading East!"),
// cardinal_direction::West => println!("Heading West!"),
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** CONST defintion ***
//---------------------------------------------//
// const MAX_NUM: u8 = 20;
//
// fn main() {
// for n in 1..MAX_NUM {
// print!("{}", n );
// }
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** TUPLES ***
//---------------------------------------------//
// fn main() {
// // creating a tuple
// // NOTE: CAN have nested tuples
// let johnshon = (5, "ch" , 20.5 , 23);
// // to be more specific...
// let tup: (i32, f64, u8) = (500, 6.4, 1);
// println!("{}", johnshon.2 );
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** ARRAYS ***
// Info: Indexed the same as C based langs
//---------------------------------------------//
// fn main() {
// let a = [1, 2, 3, 4, 5];
// // the 5 indicates the array size
// let a: [i32; 5] = [1, 2, 3, 4, 5];
// }
//
//__________________________________________________________________________________________________________ //
// * ALSO *
// Info: This shows a pattern to turn the tup into those 3 seperate variables, also demonstrates how to index a tuple
// fn main() {
// let tup = (500, 6.4, 1);
//
// let (x, y, z) = tup;
//
// println!("The value of y is: {}", y);
//
// let x: (i32, f64, u8) = (500, 6.4, 1);
//
// let five_hundred = x.0;
//
// let six_point_four = x.1;
//
// let one = x.2;
// }
//__________________________________________________________________________________________________________ //
//---------------------------------------------//
//*** Calling Functions ***
// Info: Youhave to declare the type of each parameter
//---------------------------------------------//
// fn main() {
// let x = 5;
// print_number(x);
// if is_even(x){
// println!("Is even!");
// }
// else {
// println!("Is odd!", );
// }
// }
// fn print_number(num:u32){
// println!("number is {}", num);
// }
// // fn accepts u32 num and "returns" a bool | {
let y = &x[i+1..];
bytes = y.as_bytes();
for (i, &item) in bytes.iter().enumerate() {
if item == b' ' {
return &y[0..i];
}
}
// Return this IF there were only two words.
return &y[..];
} | conditional_block |
|
SocialSpiderAlgorithm.py | .exp((-distance(pa.s, pb.s))/(ra * sd))
# Calculate Standard_Deviation σ along each dimension
def standard_deviation():
pop = [spider.s for spider in spiders]
return np.sum(np.std(pop, axis=1)) / n
def f(a):
z = []
if Minimize_problem:
z.extend(a)
return eval(y)
elif Maximize_problem:
z.extend(-a)
return -eval(y)
# there is a array with 100 elements with one and zero,100*p elements with 0 , 100(1-p) with 1,0=false,1=true
# where p is the probability
def probability(p):
arr = np.array([0] * int(100 * p) + [1] * int(100 - 100 * p))
np.random.shuffle(arr)
rand = random.choice(arr)
if rand == 0:
return True
else:
return False
def show(generate_vibration):
for x in range(population):
print("")
print("spider" + str(x))
spiders[x].printout()
print("generate vibration = " + str(generate_vibration[x].intensity))
print("")
# if return true then it is out of bounds [a,b]
def out_of_bounds(position):
for x in range(len(position)):
if position[x] < bounds[x, 0] or position[x] > bounds[x, 1]:
return True
return False
def create_population_of_spiders():
for x in range(population):
s = np.zeros(n)
for x1 in range(n):
s[x1] = np.random.uniform(bounds[x1, 0], bounds[x1, 1])
vibration = Vibration(s, 0)
spiders.append(Spider(s, s, 0, vibration, 0, np.zeros(n)))
def initialization_graphics(graph):
global ax
if graph and n==2:
plt.close('all')
fig = plt.figure(figsize=(7, 7))
ax = plt.axes()
plt.xlim(bounds[0, 0]-1, bounds[0, 1]+1)
plt.ylim(bounds[1, 0]-1, bounds[1, 1]+1)
def graphics(X, Y):
if n == 2:
ax.scatter(X, Y, c='black')
plt.draw()
plt.pause(1)
ax.collections[0].remove()
def social_spider_algorithm(graph):
g | # Calculate the intensity of the vibrations V
# generated by all spiders and Select the strongest vibration
sd = standard_deviation()
for x in range(population):
max_vibration = Vibration(np.zeros(n), -1)
for t in range(population):
if x != t:
intensity = intensity_position_pa_position_pb(spiders[x], spiders[t], sd, generate_vibration[x])
if max_vibration.intensity < intensity:
max_vibration.set_position_and_intensity(spiders[t].s, intensity)
if max_vibration.intensity > spiders[x].vibration.intensity:
spiders[x].vibration.set_position_and_intensity(max_vibration.position, max_vibration.intensity)
spiders[x].cs = 0
else:
spiders[x].cs += 1
# change mask or not
if not probability(pc**spiders[x].cs):
for p in range(n):
if probability(pm):
spiders[x].mask[p] = 1
else:
spiders[x].mask[p] = 0
# In case all bits are zeros or ones
if n == np.count_nonzero(spiders[x].mask): # all ones
spiders[x].mask[random.randint(0, n - 1)] = 0
elif np.count_nonzero(spiders[x].mask) == 0: # all zeros
spiders[x].mask[random.randint(0, n - 1)] = 1
p_s_fo = np.array([]) # position is generated based on the mask for s
r = random.randint(0, population - 1)
for d in range(n):
if spiders[x].mask[d] == 0:
p_s_fo = np.append(p_s_fo, spiders[x].vibration.position[d])
elif spiders[x].mask[d] == 1:
p_s_fo = np.append(p_s_fo, generate_vibration[r].position[d])
# Calculate next position
R = np.random.uniform(0, 1, n)
next_position = spiders[x].s + (spiders[x].s - spiders[x].s_previous) * r + (p_s_fo - spiders[x].s) * R
spiders[x].s_previous = spiders[x].s
# Address any violated constraints.
if out_of_bounds(next_position):
rand_float = random.random() # random [0,1]
for t in range(n):
if next_position[t] > bounds[t, 1]:
next_position[t] = (bounds[t, 1] - spiders[x].s[t]) * rand_float
elif next_position[t] < bounds[t, 0]:
next_position[t] = (spiders[x].s[t] - bounds[t, 0]) * rand_float
spiders[x].s = next_position
if graph and n == 2:
graphics([atr.s[0] for atr in spiders], [atr.s[1] for atr in spiders])
number_of_iterations += 1
return "global minimize = " + str(minimize)+'\n'+"f(minimize) = " + str(f(minimize))
# Sphere function minimum = 0
def test_function_0():
global c, ra, pc, pm, y, n, population, bounds, lim
c = 1E-100 # where C is a small constant such fitness values are larger than C
set_ra = {1 / 10, 1 / 5, 1 / 4, 1 / 3, 1 / 2, 1, 2, 3, 4, 5}
ra = random.choice(tuple(set_ra))
set_pc_and_pm = {0.01, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.99}
pc = random.choice(tuple(set_pc_and_pm))
pm = random.choice(tuple(set_pc_and_pm))
print('\n'+"ra = " + str(ra) + '\n' + "pc = " + str(pc) + " pm = " + str(pm) + '\n')
y = "z[0]**2 + z[1]**2 + z[2]**2 + z[3]**2 + z[4]**2"
n = 5 # dimensions
# solution space or domain of definition [a , b] each dimensions
bounds = np.array([[-5000, 5000],
[-5000, 5000],
[-5000, 5000],
[-5000, 5000],
[-5000, 5000]])
population = 20
lim = 3000 # max steps of iterations
return
# Three-hump camel function minimum = 0
def test_function_1():
global c, ra, pc, pm, y, n, population, bounds, lim
c = 1E-100 # where C is a small constant such fitness values are larger than C
set_ra = {1 / 10, 1 / 5, 1 / 4, 1 / 3, 1 / 2, 1, 2, 3, 4, 5}
ra = random.choice(tuple(set_ra))
set_pc_and_pm = {0.01, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.99}
pc = random.choice(tuple(set_pc_and_pm))
pm = random.choice(tuple(set_pc_and_pm))
print('\n'+"ra = " + str(ra) + '\n' + | lobal spiders
spiders = []
create_population_of_spiders()
minimize = spiders[0].s
number_of_iterations = 0
initialization_graphics(graph)
# In the iteration phase
while number_of_iterations <= lim:
# Calculates the fitness , update the global optimum and generate vibrations
generate_vibration = []
for x in range(population):
spiders[x].fs = f(spiders[x].s)
if f(minimize) > spiders[x].fs:
minimize = spiders[x].s
generate_vibration.append(
Vibration(spiders[x].s, spiders[x].vibration.intensity_position_ps_position_ps(spiders[x].fs)))
# show(generate_vibration)
# print("minimize = " + str(minimize)+"f(minimize) = "+str(f(minimize)))
| identifier_body |
SocialSpiderAlgorithm.py | .exp((-distance(pa.s, pb.s))/(ra * sd))
# Calculate Standard_Deviation σ along each dimension
def standard_deviation():
pop = [spider.s for spider in spiders]
return np.sum(np.std(pop, axis=1)) / n
def f | a):
z = []
if Minimize_problem:
z.extend(a)
return eval(y)
elif Maximize_problem:
z.extend(-a)
return -eval(y)
# there is a array with 100 elements with one and zero,100*p elements with 0 , 100(1-p) with 1,0=false,1=true
# where p is the probability
def probability(p):
arr = np.array([0] * int(100 * p) + [1] * int(100 - 100 * p))
np.random.shuffle(arr)
rand = random.choice(arr)
if rand == 0:
return True
else:
return False
def show(generate_vibration):
for x in range(population):
print("")
print("spider" + str(x))
spiders[x].printout()
print("generate vibration = " + str(generate_vibration[x].intensity))
print("")
# if return true then it is out of bounds [a,b]
def out_of_bounds(position):
for x in range(len(position)):
if position[x] < bounds[x, 0] or position[x] > bounds[x, 1]:
return True
return False
def create_population_of_spiders():
for x in range(population):
s = np.zeros(n)
for x1 in range(n):
s[x1] = np.random.uniform(bounds[x1, 0], bounds[x1, 1])
vibration = Vibration(s, 0)
spiders.append(Spider(s, s, 0, vibration, 0, np.zeros(n)))
def initialization_graphics(graph):
global ax
if graph and n==2:
plt.close('all')
fig = plt.figure(figsize=(7, 7))
ax = plt.axes()
plt.xlim(bounds[0, 0]-1, bounds[0, 1]+1)
plt.ylim(bounds[1, 0]-1, bounds[1, 1]+1)
def graphics(X, Y):
if n == 2:
ax.scatter(X, Y, c='black')
plt.draw()
plt.pause(1)
ax.collections[0].remove()
def social_spider_algorithm(graph):
global spiders
spiders = []
create_population_of_spiders()
minimize = spiders[0].s
number_of_iterations = 0
initialization_graphics(graph)
# In the iteration phase
while number_of_iterations <= lim:
# Calculates the fitness , update the global optimum and generate vibrations
generate_vibration = []
for x in range(population):
spiders[x].fs = f(spiders[x].s)
if f(minimize) > spiders[x].fs:
minimize = spiders[x].s
generate_vibration.append(
Vibration(spiders[x].s, spiders[x].vibration.intensity_position_ps_position_ps(spiders[x].fs)))
# show(generate_vibration)
# print("minimize = " + str(minimize)+"f(minimize) = "+str(f(minimize)))
# Calculate the intensity of the vibrations V
# generated by all spiders and Select the strongest vibration
sd = standard_deviation()
for x in range(population):
max_vibration = Vibration(np.zeros(n), -1)
for t in range(population):
if x != t:
intensity = intensity_position_pa_position_pb(spiders[x], spiders[t], sd, generate_vibration[x])
if max_vibration.intensity < intensity:
max_vibration.set_position_and_intensity(spiders[t].s, intensity)
if max_vibration.intensity > spiders[x].vibration.intensity:
spiders[x].vibration.set_position_and_intensity(max_vibration.position, max_vibration.intensity)
spiders[x].cs = 0
else:
spiders[x].cs += 1
# change mask or not
if not probability(pc**spiders[x].cs):
for p in range(n):
if probability(pm):
spiders[x].mask[p] = 1
else:
spiders[x].mask[p] = 0
# In case all bits are zeros or ones
if n == np.count_nonzero(spiders[x].mask): # all ones
spiders[x].mask[random.randint(0, n - 1)] = 0
elif np.count_nonzero(spiders[x].mask) == 0: # all zeros
spiders[x].mask[random.randint(0, n - 1)] = 1
p_s_fo = np.array([]) # position is generated based on the mask for s
r = random.randint(0, population - 1)
for d in range(n):
if spiders[x].mask[d] == 0:
p_s_fo = np.append(p_s_fo, spiders[x].vibration.position[d])
elif spiders[x].mask[d] == 1:
p_s_fo = np.append(p_s_fo, generate_vibration[r].position[d])
# Calculate next position
R = np.random.uniform(0, 1, n)
next_position = spiders[x].s + (spiders[x].s - spiders[x].s_previous) * r + (p_s_fo - spiders[x].s) * R
spiders[x].s_previous = spiders[x].s
# Address any violated constraints.
if out_of_bounds(next_position):
rand_float = random.random() # random [0,1]
for t in range(n):
if next_position[t] > bounds[t, 1]:
next_position[t] = (bounds[t, 1] - spiders[x].s[t]) * rand_float
elif next_position[t] < bounds[t, 0]:
next_position[t] = (spiders[x].s[t] - bounds[t, 0]) * rand_float
spiders[x].s = next_position
if graph and n == 2:
graphics([atr.s[0] for atr in spiders], [atr.s[1] for atr in spiders])
number_of_iterations += 1
return "global minimize = " + str(minimize)+'\n'+"f(minimize) = " + str(f(minimize))
# Sphere function minimum = 0
def test_function_0():
global c, ra, pc, pm, y, n, population, bounds, lim
c = 1E-100 # where C is a small constant such fitness values are larger than C
set_ra = {1 / 10, 1 / 5, 1 / 4, 1 / 3, 1 / 2, 1, 2, 3, 4, 5}
ra = random.choice(tuple(set_ra))
set_pc_and_pm = {0.01, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.99}
pc = random.choice(tuple(set_pc_and_pm))
pm = random.choice(tuple(set_pc_and_pm))
print('\n'+"ra = " + str(ra) + '\n' + "pc = " + str(pc) + " pm = " + str(pm) + '\n')
y = "z[0]**2 + z[1]**2 + z[2]**2 + z[3]**2 + z[4]**2"
n = 5 # dimensions
# solution space or domain of definition [a , b] each dimensions
bounds = np.array([[-5000, 5000],
[-5000, 5000],
[-5000, 5000],
[-5000, 5000],
[-5000, 5000]])
population = 20
lim = 3000 # max steps of iterations
return
# Three-hump camel function minimum = 0
def test_function_1():
global c, ra, pc, pm, y, n, population, bounds, lim
c = 1E-100 # where C is a small constant such fitness values are larger than C
set_ra = {1 / 10, 1 / 5, 1 / 4, 1 / 3, 1 / 2, 1, 2, 3, 4, 5}
ra = random.choice(tuple(set_ra))
set_pc_and_pm = {0.01, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.99}
pc = random.choice(tuple(set_pc_and_pm))
pm = random.choice(tuple(set_pc_and_pm))
print('\n'+"ra = " + str(ra) + '\n' + | ( | identifier_name |
SocialSpiderAlgorithm.py | ("spider" + str(x))
spiders[x].printout()
print("generate vibration = " + str(generate_vibration[x].intensity))
print("")
# if return true then it is out of bounds [a,b]
def out_of_bounds(position):
for x in range(len(position)):
if position[x] < bounds[x, 0] or position[x] > bounds[x, 1]:
return True
return False
def create_population_of_spiders():
for x in range(population):
s = np.zeros(n)
for x1 in range(n):
s[x1] = np.random.uniform(bounds[x1, 0], bounds[x1, 1])
vibration = Vibration(s, 0)
spiders.append(Spider(s, s, 0, vibration, 0, np.zeros(n)))
def initialization_graphics(graph):
global ax
if graph and n==2:
plt.close('all')
fig = plt.figure(figsize=(7, 7))
ax = plt.axes()
plt.xlim(bounds[0, 0]-1, bounds[0, 1]+1)
plt.ylim(bounds[1, 0]-1, bounds[1, 1]+1)
def graphics(X, Y):
if n == 2:
ax.scatter(X, Y, c='black')
plt.draw()
plt.pause(1)
ax.collections[0].remove()
def social_spider_algorithm(graph):
global spiders
spiders = []
create_population_of_spiders()
minimize = spiders[0].s
number_of_iterations = 0
initialization_graphics(graph)
# In the iteration phase
while number_of_iterations <= lim:
# Calculates the fitness , update the global optimum and generate vibrations
generate_vibration = []
for x in range(population):
spiders[x].fs = f(spiders[x].s)
if f(minimize) > spiders[x].fs:
minimize = spiders[x].s
generate_vibration.append(
Vibration(spiders[x].s, spiders[x].vibration.intensity_position_ps_position_ps(spiders[x].fs)))
# show(generate_vibration)
# print("minimize = " + str(minimize)+"f(minimize) = "+str(f(minimize)))
# Calculate the intensity of the vibrations V
# generated by all spiders and Select the strongest vibration
sd = standard_deviation()
for x in range(population):
max_vibration = Vibration(np.zeros(n), -1)
for t in range(population):
if x != t:
intensity = intensity_position_pa_position_pb(spiders[x], spiders[t], sd, generate_vibration[x])
if max_vibration.intensity < intensity:
max_vibration.set_position_and_intensity(spiders[t].s, intensity)
if max_vibration.intensity > spiders[x].vibration.intensity:
spiders[x].vibration.set_position_and_intensity(max_vibration.position, max_vibration.intensity)
spiders[x].cs = 0
else:
spiders[x].cs += 1
# change mask or not
if not probability(pc**spiders[x].cs):
for p in range(n):
if probability(pm):
spiders[x].mask[p] = 1
else:
spiders[x].mask[p] = 0
# In case all bits are zeros or ones
if n == np.count_nonzero(spiders[x].mask): # all ones
spiders[x].mask[random.randint(0, n - 1)] = 0
elif np.count_nonzero(spiders[x].mask) == 0: # all zeros
spiders[x].mask[random.randint(0, n - 1)] = 1
p_s_fo = np.array([]) # position is generated based on the mask for s
r = random.randint(0, population - 1)
for d in range(n):
if spiders[x].mask[d] == 0:
p_s_fo = np.append(p_s_fo, spiders[x].vibration.position[d])
elif spiders[x].mask[d] == 1:
p_s_fo = np.append(p_s_fo, generate_vibration[r].position[d])
# Calculate next position
R = np.random.uniform(0, 1, n)
next_position = spiders[x].s + (spiders[x].s - spiders[x].s_previous) * r + (p_s_fo - spiders[x].s) * R
spiders[x].s_previous = spiders[x].s
# Address any violated constraints.
if out_of_bounds(next_position):
rand_float = random.random() # random [0,1]
for t in range(n):
if next_position[t] > bounds[t, 1]:
next_position[t] = (bounds[t, 1] - spiders[x].s[t]) * rand_float
elif next_position[t] < bounds[t, 0]:
next_position[t] = (spiders[x].s[t] - bounds[t, 0]) * rand_float
spiders[x].s = next_position
if graph and n == 2:
graphics([atr.s[0] for atr in spiders], [atr.s[1] for atr in spiders])
number_of_iterations += 1
return "global minimize = " + str(minimize)+'\n'+"f(minimize) = " + str(f(minimize))
# Sphere function minimum = 0
def test_function_0():
global c, ra, pc, pm, y, n, population, bounds, lim
c = 1E-100 # where C is a small constant such fitness values are larger than C
set_ra = {1 / 10, 1 / 5, 1 / 4, 1 / 3, 1 / 2, 1, 2, 3, 4, 5}
ra = random.choice(tuple(set_ra))
set_pc_and_pm = {0.01, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.99}
pc = random.choice(tuple(set_pc_and_pm))
pm = random.choice(tuple(set_pc_and_pm))
print('\n'+"ra = " + str(ra) + '\n' + "pc = " + str(pc) + " pm = " + str(pm) + '\n')
y = "z[0]**2 + z[1]**2 + z[2]**2 + z[3]**2 + z[4]**2"
n = 5 # dimensions
# solution space or domain of definition [a , b] each dimensions
bounds = np.array([[-5000, 5000],
[-5000, 5000],
[-5000, 5000],
[-5000, 5000],
[-5000, 5000]])
population = 20
lim = 3000 # max steps of iterations
return
# Three-hump camel function minimum = 0
def test_function_1():
global c, ra, pc, pm, y, n, population, bounds, lim
c = 1E-100 # where C is a small constant such fitness values are larger than C
set_ra = {1 / 10, 1 / 5, 1 / 4, 1 / 3, 1 / 2, 1, 2, 3, 4, 5}
ra = random.choice(tuple(set_ra))
set_pc_and_pm = {0.01, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.99}
pc = random.choice(tuple(set_pc_and_pm))
pm = random.choice(tuple(set_pc_and_pm))
print('\n'+"ra = " + str(ra) + '\n' + "pc = " + str(pc) + " pm = " + str(pm) + '\n')
y = "2*z[0]**2 - 1.05 * z[0]**4 + z[0]**6 /6 + z[0] * z[1] + z[1]**2"
n = 2
bounds = np.array([[-5, 5],
[-5, 5]])
population = 10
lim = 10 # max steps of iterations
return
# McCormick function minimum = -1.9133
def test_function_2():
global c, ra, pc, pm, y, n, population, bounds, lim
c = -100 # where C is a small constant such fitness values are larger than C
set_ra = {1 / 10, 1 / 5, 1 / 4, 1 / 3, 1 / 2, 1, 2, 3, 4, 5}
ra = random.choice(tuple(set_ra)) | random_line_split |
||
SocialSpiderAlgorithm.py | .exp((-distance(pa.s, pb.s))/(ra * sd))
# Calculate Standard_Deviation σ along each dimension
def standard_deviation():
pop = [spider.s for spider in spiders]
return np.sum(np.std(pop, axis=1)) / n
def f(a):
z = []
if Minimize_problem:
z.extend(a)
return eval(y)
elif Maximize_problem:
z.extend(-a)
return -eval(y)
# there is a array with 100 elements with one and zero,100*p elements with 0 , 100(1-p) with 1,0=false,1=true
# where p is the probability
def probability(p):
arr = np.array([0] * int(100 * p) + [1] * int(100 - 100 * p))
np.random.shuffle(arr)
rand = random.choice(arr)
if rand == 0:
r | else:
return False
def show(generate_vibration):
for x in range(population):
print("")
print("spider" + str(x))
spiders[x].printout()
print("generate vibration = " + str(generate_vibration[x].intensity))
print("")
# if return true then it is out of bounds [a,b]
def out_of_bounds(position):
for x in range(len(position)):
if position[x] < bounds[x, 0] or position[x] > bounds[x, 1]:
return True
return False
def create_population_of_spiders():
for x in range(population):
s = np.zeros(n)
for x1 in range(n):
s[x1] = np.random.uniform(bounds[x1, 0], bounds[x1, 1])
vibration = Vibration(s, 0)
spiders.append(Spider(s, s, 0, vibration, 0, np.zeros(n)))
def initialization_graphics(graph):
global ax
if graph and n==2:
plt.close('all')
fig = plt.figure(figsize=(7, 7))
ax = plt.axes()
plt.xlim(bounds[0, 0]-1, bounds[0, 1]+1)
plt.ylim(bounds[1, 0]-1, bounds[1, 1]+1)
def graphics(X, Y):
if n == 2:
ax.scatter(X, Y, c='black')
plt.draw()
plt.pause(1)
ax.collections[0].remove()
def social_spider_algorithm(graph):
global spiders
spiders = []
create_population_of_spiders()
minimize = spiders[0].s
number_of_iterations = 0
initialization_graphics(graph)
# In the iteration phase
while number_of_iterations <= lim:
# Calculates the fitness , update the global optimum and generate vibrations
generate_vibration = []
for x in range(population):
spiders[x].fs = f(spiders[x].s)
if f(minimize) > spiders[x].fs:
minimize = spiders[x].s
generate_vibration.append(
Vibration(spiders[x].s, spiders[x].vibration.intensity_position_ps_position_ps(spiders[x].fs)))
# show(generate_vibration)
# print("minimize = " + str(minimize)+"f(minimize) = "+str(f(minimize)))
# Calculate the intensity of the vibrations V
# generated by all spiders and Select the strongest vibration
sd = standard_deviation()
for x in range(population):
max_vibration = Vibration(np.zeros(n), -1)
for t in range(population):
if x != t:
intensity = intensity_position_pa_position_pb(spiders[x], spiders[t], sd, generate_vibration[x])
if max_vibration.intensity < intensity:
max_vibration.set_position_and_intensity(spiders[t].s, intensity)
if max_vibration.intensity > spiders[x].vibration.intensity:
spiders[x].vibration.set_position_and_intensity(max_vibration.position, max_vibration.intensity)
spiders[x].cs = 0
else:
spiders[x].cs += 1
# change mask or not
if not probability(pc**spiders[x].cs):
for p in range(n):
if probability(pm):
spiders[x].mask[p] = 1
else:
spiders[x].mask[p] = 0
# In case all bits are zeros or ones
if n == np.count_nonzero(spiders[x].mask): # all ones
spiders[x].mask[random.randint(0, n - 1)] = 0
elif np.count_nonzero(spiders[x].mask) == 0: # all zeros
spiders[x].mask[random.randint(0, n - 1)] = 1
p_s_fo = np.array([]) # position is generated based on the mask for s
r = random.randint(0, population - 1)
for d in range(n):
if spiders[x].mask[d] == 0:
p_s_fo = np.append(p_s_fo, spiders[x].vibration.position[d])
elif spiders[x].mask[d] == 1:
p_s_fo = np.append(p_s_fo, generate_vibration[r].position[d])
# Calculate next position
R = np.random.uniform(0, 1, n)
next_position = spiders[x].s + (spiders[x].s - spiders[x].s_previous) * r + (p_s_fo - spiders[x].s) * R
spiders[x].s_previous = spiders[x].s
# Address any violated constraints.
if out_of_bounds(next_position):
rand_float = random.random() # random [0,1]
for t in range(n):
if next_position[t] > bounds[t, 1]:
next_position[t] = (bounds[t, 1] - spiders[x].s[t]) * rand_float
elif next_position[t] < bounds[t, 0]:
next_position[t] = (spiders[x].s[t] - bounds[t, 0]) * rand_float
spiders[x].s = next_position
if graph and n == 2:
graphics([atr.s[0] for atr in spiders], [atr.s[1] for atr in spiders])
number_of_iterations += 1
return "global minimize = " + str(minimize)+'\n'+"f(minimize) = " + str(f(minimize))
# Sphere function minimum = 0
def test_function_0():
global c, ra, pc, pm, y, n, population, bounds, lim
c = 1E-100 # where C is a small constant such fitness values are larger than C
set_ra = {1 / 10, 1 / 5, 1 / 4, 1 / 3, 1 / 2, 1, 2, 3, 4, 5}
ra = random.choice(tuple(set_ra))
set_pc_and_pm = {0.01, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.99}
pc = random.choice(tuple(set_pc_and_pm))
pm = random.choice(tuple(set_pc_and_pm))
print('\n'+"ra = " + str(ra) + '\n' + "pc = " + str(pc) + " pm = " + str(pm) + '\n')
y = "z[0]**2 + z[1]**2 + z[2]**2 + z[3]**2 + z[4]**2"
n = 5 # dimensions
# solution space or domain of definition [a , b] each dimensions
bounds = np.array([[-5000, 5000],
[-5000, 5000],
[-5000, 5000],
[-5000, 5000],
[-5000, 5000]])
population = 20
lim = 3000 # max steps of iterations
return
# Three-hump camel function minimum = 0
def test_function_1():
global c, ra, pc, pm, y, n, population, bounds, lim
c = 1E-100 # where C is a small constant such fitness values are larger than C
set_ra = {1 / 10, 1 / 5, 1 / 4, 1 / 3, 1 / 2, 1, 2, 3, 4, 5}
ra = random.choice(tuple(set_ra))
set_pc_and_pm = {0.01, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.99}
pc = random.choice(tuple(set_pc_and_pm))
pm = random.choice(tuple(set_pc_and_pm))
print('\n'+"ra = " + str(ra) + '\n' + | eturn True
| conditional_block |
__init__.py | return group_config
def init(self):
"""
called after the plugin is initialized, plugin may define this for any
other initialization code
"""
pass
def on_start(self):
"""
called when the daemon is starting
"""
pass
def on_stop(self):
"""
called when the daemon is stopping
"""
pass
def new_message(self):
"""
creates and returns new message `dict`, setting `type`, `source`, `ts`, `data`
`data` is initialized to an empty array
**Returns**
message (`dict`)
"""
msg = {}
msg['data'] = []
msg['type'] = self.plugin_type
msg['source'] = self.name
msg['ts'] = (datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)).total_seconds()
return msg
def popen(self, args, **kwargs):
"""
creates a subprocess with passed args
**Returns**
Popen instance
"""
self.log.debug("popen %s", ' '.join(args))
return vaping.io.subprocess.Popen(args, **kwargs)
@property
def log(self):
"""
logger instance for plugin type
"""
if not self._logger:
self._logger = logging.getLogger('vaping.plugins.' + self.plugin_type)
return self._logger
def __init__(self, config, ctx):
"""
**Arguments**
- config (`dict`)
- ctx: vaping context
"""
if hasattr(self, 'default_config'):
self.config = munge.util.recursive_update(copy.deepcopy(self.default_config), copy.deepcopy(config))
else:
self.config = config
# set for pluginmgr
self.pluginmgr_config = self.config
self.vaping = ctx
self.name = self.config.get("name")
self._logger = None
super(PluginBase, self).__init__()
self.init()
def _run(self):
self.on_start()
class ProbeBase(with_metaclass(abc.ABCMeta, PluginBase)):
"""
Base class for probe plugin, used for getting data
expects method probe() to be defined
"""
def init(self):
pass
@abc.abstractmethod
def probe(self):
"""
probe for data, return a list of dicts
"""
def __init__(self, config, ctx, emit=None):
if emit:
self._emit = [emit]
else:
self._emit = []
self._emit_queue = vaping.io.Queue()
super(ProbeBase, self).__init__(config, ctx)
def _run(self):
super(ProbeBase, self)._run()
self.run_level = 1
while self.run_level:
self.send_emission()
msg = self.probe()
if msg:
self.queue_emission(msg)
else:
self.log.debug("probe returned no data")
def queue_emission(self, msg):
"""
queue an emission of a message for all output plugins
**Arguments**
- msg (`dict`): dict containing `type`, `source`, `ts` and `data` keys
"""
if not msg:
return
for _emitter in self._emit:
if not hasattr(_emitter, 'emit'):
continue
def emit(emitter=_emitter):
self.log.debug("emit to {}".format(emitter.name))
emitter.emit(msg)
self.log.debug("queue emission to {} ({})".format(
_emitter.name, self._emit_queue.qsize()))
self._emit_queue.put(emit)
def send_emission(self):
"""
emit and remove the first emission in the queue
"""
if self._emit_queue.empty():
return
emit = self._emit_queue.get()
emit()
def emit_all(self):
"""
emit and remove all emissions in the queue
"""
while not self._emit_queue.empty():
self.send_emission()
class TimedProbe(ProbeBase):
"""
Probe class that calls probe every config defined interval
"""
def __init__(self, config, ctx, emit=None):
super(TimedProbe, self).__init__(config, ctx, emit)
if 'interval' not in self.pluginmgr_config:
raise ValueError('interval not set in config')
self.interval = parse_interval(self.pluginmgr_config['interval'])
self.run_level = 0
def _run(self):
self.run_level = 1
while self.run_level:
start = datetime.datetime.now()
# since the TimedProbe will sleep between cycles
# we need to emit all queued emissions each cycle
self.emit_all()
msg = self.probe()
if msg:
self.queue_emission(msg)
else:
self.log.debug("probe returned no data")
done = datetime.datetime.now()
elapsed = done - start
if elapsed.total_seconds() > self.interval:
self.log.warning("probe time exceeded interval")
else:
sleeptime = datetime.timedelta(seconds=self.interval) - elapsed
vaping.io.sleep(sleeptime.total_seconds())
class FileProbe(ProbeBase):
"""
Probes a file and emits everytime a new line is read
# Config
- path (`str`): path to file
- backlog (`int=0`): number of bytes to read from backlog
- max_lines (`int=1000`): maximum number of lines to read during probe
# Instanced Attributes
- path (`str`): path to file
- backlog (`int`): number of bytes to read from backlog
- max_lines (`int`): maximum number of liens to read during probe
- fh (`filehandler`): file handler for opened file (only available if `path` is set)
"""
def __init__(self, config, ctx, emit=None):
super(FileProbe, self).__init__(config, ctx, emit)
self.path = self.pluginmgr_config.get("path")
self.run_level = 0
self.backlog = int(self.pluginmgr_config.get("backlog",0))
self.max_lines = int(self.pluginmgr_config.get("max_lines",1000))
if self.path:
self.fh = open(self.path, "r")
self.fh.seek(0,2)
if self.backlog:
try:
self.fh.seek(self.fh.tell() - self.backlog, os.SEEK_SET)
except ValueError as exc:
if str(exc).find("negative seek position") > -1:
self.fh.seek(0)
else:
raise
def _run(self):
self.run_level = 1
while self.run_level:
self.send_emission()
for msg in self.probe():
self.queue_emission(msg)
vaping.io.sleep(0.1)
def validate_file_handler(self):
"""
Here we validate that our filehandler is pointing
to an existing file. | If it doesnt, because file has been deleted, we close
the filehander and try to reopen
"""
if self.fh.closed:
try:
self.fh = open(self.path, "r")
self.fh.seek(0, 2)
except OSError as err:
logging.error("Could not reopen file: {}".format(err))
return False
open_stat = os.fstat(self.fh.fileno())
try:
file_stat = os.stat(self.path)
except OSError as err:
logging.error("Could not stat file: {}".format(err))
return False
if open_stat != file_stat:
self.log
self.fh.close()
return False
return True
def probe(self):
"""
Probe the file for new lines
"""
# make sure the filehandler is still valid
# (e.g. file stat hasnt changed, file exists etc.)
if not self.validate_file_handler():
return []
messages = []
# read any new lines and push them onto the stack
for line in self.fh.readlines(self.max_lines):
data = {"path":self.path}
msg = self.new_message()
# process the line - this is where parsing happens
parsed = self.process_line(line, data)
if not parsed:
continue
data.update(parsed)
# process the probe - this is where data assignment
# happens
data = self.process_probe(data)
msg["data"] = [data]
messages.append(msg)
# process all new messages before returning them
# for emission
messages = self.process_messages(messages)
return messages
def process_line(self, line, data):
""" override this - parse your line in here """
return data
def process_probe(self, data):
""" override this - assign your data values here """
return data
def process_messages(self, messages):
"""
override this - process your messages before they
are emitted
"""
return messages
class EmitBase(with_metaclass(abc.ABCMeta, PluginBase)):
"""
Base class for emit plugins, used for sending data
expects method emit() to be defined
"""
def __init__(self, config, ctx):
super(EmitBase, self).__init__(config, ctx)
@abc.abstractmethod
def emit(self, message):
""" accept message to emit """
class TimeSeriesDB(EmitBase):
"""
Base interface for times | random_line_split |
|
__init__.py | """
logger instance for plugin type
"""
if not self._logger:
self._logger = logging.getLogger('vaping.plugins.' + self.plugin_type)
return self._logger
def __init__(self, config, ctx):
"""
**Arguments**
- config (`dict`)
- ctx: vaping context
"""
if hasattr(self, 'default_config'):
self.config = munge.util.recursive_update(copy.deepcopy(self.default_config), copy.deepcopy(config))
else:
self.config = config
# set for pluginmgr
self.pluginmgr_config = self.config
self.vaping = ctx
self.name = self.config.get("name")
self._logger = None
super(PluginBase, self).__init__()
self.init()
def _run(self):
self.on_start()
class ProbeBase(with_metaclass(abc.ABCMeta, PluginBase)):
"""
Base class for probe plugin, used for getting data
expects method probe() to be defined
"""
def init(self):
pass
@abc.abstractmethod
def probe(self):
"""
probe for data, return a list of dicts
"""
def __init__(self, config, ctx, emit=None):
if emit:
self._emit = [emit]
else:
self._emit = []
self._emit_queue = vaping.io.Queue()
super(ProbeBase, self).__init__(config, ctx)
def _run(self):
super(ProbeBase, self)._run()
self.run_level = 1
while self.run_level:
self.send_emission()
msg = self.probe()
if msg:
self.queue_emission(msg)
else:
self.log.debug("probe returned no data")
def queue_emission(self, msg):
"""
queue an emission of a message for all output plugins
**Arguments**
- msg (`dict`): dict containing `type`, `source`, `ts` and `data` keys
"""
if not msg:
return
for _emitter in self._emit:
if not hasattr(_emitter, 'emit'):
continue
def emit(emitter=_emitter):
self.log.debug("emit to {}".format(emitter.name))
emitter.emit(msg)
self.log.debug("queue emission to {} ({})".format(
_emitter.name, self._emit_queue.qsize()))
self._emit_queue.put(emit)
def send_emission(self):
"""
emit and remove the first emission in the queue
"""
if self._emit_queue.empty():
return
emit = self._emit_queue.get()
emit()
def emit_all(self):
"""
emit and remove all emissions in the queue
"""
while not self._emit_queue.empty():
self.send_emission()
class TimedProbe(ProbeBase):
"""
Probe class that calls probe every config defined interval
"""
def __init__(self, config, ctx, emit=None):
super(TimedProbe, self).__init__(config, ctx, emit)
if 'interval' not in self.pluginmgr_config:
raise ValueError('interval not set in config')
self.interval = parse_interval(self.pluginmgr_config['interval'])
self.run_level = 0
def _run(self):
self.run_level = 1
while self.run_level:
start = datetime.datetime.now()
# since the TimedProbe will sleep between cycles
# we need to emit all queued emissions each cycle
self.emit_all()
msg = self.probe()
if msg:
self.queue_emission(msg)
else:
self.log.debug("probe returned no data")
done = datetime.datetime.now()
elapsed = done - start
if elapsed.total_seconds() > self.interval:
self.log.warning("probe time exceeded interval")
else:
sleeptime = datetime.timedelta(seconds=self.interval) - elapsed
vaping.io.sleep(sleeptime.total_seconds())
class FileProbe(ProbeBase):
"""
Probes a file and emits everytime a new line is read
# Config
- path (`str`): path to file
- backlog (`int=0`): number of bytes to read from backlog
- max_lines (`int=1000`): maximum number of lines to read during probe
# Instanced Attributes
- path (`str`): path to file
- backlog (`int`): number of bytes to read from backlog
- max_lines (`int`): maximum number of liens to read during probe
- fh (`filehandler`): file handler for opened file (only available if `path` is set)
"""
def __init__(self, config, ctx, emit=None):
super(FileProbe, self).__init__(config, ctx, emit)
self.path = self.pluginmgr_config.get("path")
self.run_level = 0
self.backlog = int(self.pluginmgr_config.get("backlog",0))
self.max_lines = int(self.pluginmgr_config.get("max_lines",1000))
if self.path:
self.fh = open(self.path, "r")
self.fh.seek(0,2)
if self.backlog:
try:
self.fh.seek(self.fh.tell() - self.backlog, os.SEEK_SET)
except ValueError as exc:
if str(exc).find("negative seek position") > -1:
self.fh.seek(0)
else:
raise
def _run(self):
self.run_level = 1
while self.run_level:
self.send_emission()
for msg in self.probe():
self.queue_emission(msg)
vaping.io.sleep(0.1)
def validate_file_handler(self):
"""
Here we validate that our filehandler is pointing
to an existing file.
If it doesnt, because file has been deleted, we close
the filehander and try to reopen
"""
if self.fh.closed:
try:
self.fh = open(self.path, "r")
self.fh.seek(0, 2)
except OSError as err:
logging.error("Could not reopen file: {}".format(err))
return False
open_stat = os.fstat(self.fh.fileno())
try:
file_stat = os.stat(self.path)
except OSError as err:
logging.error("Could not stat file: {}".format(err))
return False
if open_stat != file_stat:
self.log
self.fh.close()
return False
return True
def probe(self):
"""
Probe the file for new lines
"""
# make sure the filehandler is still valid
# (e.g. file stat hasnt changed, file exists etc.)
if not self.validate_file_handler():
return []
messages = []
# read any new lines and push them onto the stack
for line in self.fh.readlines(self.max_lines):
data = {"path":self.path}
msg = self.new_message()
# process the line - this is where parsing happens
parsed = self.process_line(line, data)
if not parsed:
continue
data.update(parsed)
# process the probe - this is where data assignment
# happens
data = self.process_probe(data)
msg["data"] = [data]
messages.append(msg)
# process all new messages before returning them
# for emission
messages = self.process_messages(messages)
return messages
def process_line(self, line, data):
""" override this - parse your line in here """
return data
def process_probe(self, data):
""" override this - assign your data values here """
return data
def process_messages(self, messages):
"""
override this - process your messages before they
are emitted
"""
return messages
class EmitBase(with_metaclass(abc.ABCMeta, PluginBase)):
"""
Base class for emit plugins, used for sending data
expects method emit() to be defined
"""
def __init__(self, config, ctx):
super(EmitBase, self).__init__(config, ctx)
@abc.abstractmethod
def emit(self, message):
""" accept message to emit """
class TimeSeriesDB(EmitBase):
"""
Base interface for timeseries db storage plugins
# Config
- filename (`str`): database file name template
- field (`str`): fieeld name to read the value from
# Instanced Attributes
- filename (`str`): database file name template
- field (`str`): fieeld name to read the value from
"""
def __init__(self, config, ctx):
super(TimeSeriesDB, self).__init__(config, ctx)
# filename template
self.filename = self.config.get("filename")
# field name to read the value from
self.field = self.config.get("field")
if not self.filename:
raise ValueError("No filename specified")
if not self.field:
raise ValueError("No field specified, field should specify which value to store in the database")
def create(self, filename):
"""
Create database
**Arguments**
- filename (`str`): database filename
"""
raise NotImplementedError()
def update(self, filename, time, value):
"""
Update database
**Arguments**
- filename (`str`): database filename
- time (`int`): epoch timestamp
- value (`mixed`)
"""
raise NotImplementedError()
def | get | identifier_name |
|
__init__.py | return group_config
def init(self):
"""
called after the plugin is initialized, plugin may define this for any
other initialization code
"""
pass
def on_start(self):
"""
called when the daemon is starting
"""
pass
def on_stop(self):
"""
called when the daemon is stopping
"""
pass
def new_message(self):
"""
creates and returns new message `dict`, setting `type`, `source`, `ts`, `data`
`data` is initialized to an empty array
**Returns**
message (`dict`)
"""
msg = {}
msg['data'] = []
msg['type'] = self.plugin_type
msg['source'] = self.name
msg['ts'] = (datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)).total_seconds()
return msg
def popen(self, args, **kwargs):
"""
creates a subprocess with passed args
**Returns**
Popen instance
"""
self.log.debug("popen %s", ' '.join(args))
return vaping.io.subprocess.Popen(args, **kwargs)
@property
def log(self):
"""
logger instance for plugin type
"""
if not self._logger:
self._logger = logging.getLogger('vaping.plugins.' + self.plugin_type)
return self._logger
def __init__(self, config, ctx):
"""
**Arguments**
- config (`dict`)
- ctx: vaping context
"""
if hasattr(self, 'default_config'):
self.config = munge.util.recursive_update(copy.deepcopy(self.default_config), copy.deepcopy(config))
else:
self.config = config
# set for pluginmgr
self.pluginmgr_config = self.config
self.vaping = ctx
self.name = self.config.get("name")
self._logger = None
super(PluginBase, self).__init__()
self.init()
def _run(self):
self.on_start()
class ProbeBase(with_metaclass(abc.ABCMeta, PluginBase)):
"""
Base class for probe plugin, used for getting data
expects method probe() to be defined
"""
def init(self):
pass
@abc.abstractmethod
def probe(self):
"""
probe for data, return a list of dicts
"""
def __init__(self, config, ctx, emit=None):
if emit:
self._emit = [emit]
else:
self._emit = []
self._emit_queue = vaping.io.Queue()
super(ProbeBase, self).__init__(config, ctx)
def _run(self):
super(ProbeBase, self)._run()
self.run_level = 1
while self.run_level:
self.send_emission()
msg = self.probe()
if msg:
self.queue_emission(msg)
else:
self.log.debug("probe returned no data")
def queue_emission(self, msg):
"""
queue an emission of a message for all output plugins
**Arguments**
- msg (`dict`): dict containing `type`, `source`, `ts` and `data` keys
"""
if not msg:
return
for _emitter in self._emit:
if not hasattr(_emitter, 'emit'):
continue
def emit(emitter=_emitter):
self.log.debug("emit to {}".format(emitter.name))
emitter.emit(msg)
self.log.debug("queue emission to {} ({})".format(
_emitter.name, self._emit_queue.qsize()))
self._emit_queue.put(emit)
def send_emission(self):
"""
emit and remove the first emission in the queue
"""
if self._emit_queue.empty():
|
emit = self._emit_queue.get()
emit()
def emit_all(self):
"""
emit and remove all emissions in the queue
"""
while not self._emit_queue.empty():
self.send_emission()
class TimedProbe(ProbeBase):
"""
Probe class that calls probe every config defined interval
"""
def __init__(self, config, ctx, emit=None):
super(TimedProbe, self).__init__(config, ctx, emit)
if 'interval' not in self.pluginmgr_config:
raise ValueError('interval not set in config')
self.interval = parse_interval(self.pluginmgr_config['interval'])
self.run_level = 0
def _run(self):
self.run_level = 1
while self.run_level:
start = datetime.datetime.now()
# since the TimedProbe will sleep between cycles
# we need to emit all queued emissions each cycle
self.emit_all()
msg = self.probe()
if msg:
self.queue_emission(msg)
else:
self.log.debug("probe returned no data")
done = datetime.datetime.now()
elapsed = done - start
if elapsed.total_seconds() > self.interval:
self.log.warning("probe time exceeded interval")
else:
sleeptime = datetime.timedelta(seconds=self.interval) - elapsed
vaping.io.sleep(sleeptime.total_seconds())
class FileProbe(ProbeBase):
"""
Probes a file and emits everytime a new line is read
# Config
- path (`str`): path to file
- backlog (`int=0`): number of bytes to read from backlog
- max_lines (`int=1000`): maximum number of lines to read during probe
# Instanced Attributes
- path (`str`): path to file
- backlog (`int`): number of bytes to read from backlog
- max_lines (`int`): maximum number of liens to read during probe
- fh (`filehandler`): file handler for opened file (only available if `path` is set)
"""
def __init__(self, config, ctx, emit=None):
super(FileProbe, self).__init__(config, ctx, emit)
self.path = self.pluginmgr_config.get("path")
self.run_level = 0
self.backlog = int(self.pluginmgr_config.get("backlog",0))
self.max_lines = int(self.pluginmgr_config.get("max_lines",1000))
if self.path:
self.fh = open(self.path, "r")
self.fh.seek(0,2)
if self.backlog:
try:
self.fh.seek(self.fh.tell() - self.backlog, os.SEEK_SET)
except ValueError as exc:
if str(exc).find("negative seek position") > -1:
self.fh.seek(0)
else:
raise
def _run(self):
self.run_level = 1
while self.run_level:
self.send_emission()
for msg in self.probe():
self.queue_emission(msg)
vaping.io.sleep(0.1)
def validate_file_handler(self):
"""
Here we validate that our filehandler is pointing
to an existing file.
If it doesnt, because file has been deleted, we close
the filehander and try to reopen
"""
if self.fh.closed:
try:
self.fh = open(self.path, "r")
self.fh.seek(0, 2)
except OSError as err:
logging.error("Could not reopen file: {}".format(err))
return False
open_stat = os.fstat(self.fh.fileno())
try:
file_stat = os.stat(self.path)
except OSError as err:
logging.error("Could not stat file: {}".format(err))
return False
if open_stat != file_stat:
self.log
self.fh.close()
return False
return True
def probe(self):
"""
Probe the file for new lines
"""
# make sure the filehandler is still valid
# (e.g. file stat hasnt changed, file exists etc.)
if not self.validate_file_handler():
return []
messages = []
# read any new lines and push them onto the stack
for line in self.fh.readlines(self.max_lines):
data = {"path":self.path}
msg = self.new_message()
# process the line - this is where parsing happens
parsed = self.process_line(line, data)
if not parsed:
continue
data.update(parsed)
# process the probe - this is where data assignment
# happens
data = self.process_probe(data)
msg["data"] = [data]
messages.append(msg)
# process all new messages before returning them
# for emission
messages = self.process_messages(messages)
return messages
def process_line(self, line, data):
""" override this - parse your line in here """
return data
def process_probe(self, data):
""" override this - assign your data values here """
return data
def process_messages(self, messages):
"""
override this - process your messages before they
are emitted
"""
return messages
class EmitBase(with_metaclass(abc.ABCMeta, PluginBase)):
"""
Base class for emit plugins, used for sending data
expects method emit() to be defined
"""
def __init__(self, config, ctx):
super(EmitBase, self).__init__(config, ctx)
@abc.abstractmethod
def emit(self, message):
""" accept message to emit """
class TimeSeriesDB(EmitBase):
"""
Base interface for times | return | conditional_block |
__init__.py | return group_config
def init(self):
"""
called after the plugin is initialized, plugin may define this for any
other initialization code
"""
pass
def on_start(self):
"""
called when the daemon is starting
"""
pass
def on_stop(self):
"""
called when the daemon is stopping
"""
pass
def new_message(self):
"""
creates and returns new message `dict`, setting `type`, `source`, `ts`, `data`
`data` is initialized to an empty array
**Returns**
message (`dict`)
"""
msg = {}
msg['data'] = []
msg['type'] = self.plugin_type
msg['source'] = self.name
msg['ts'] = (datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)).total_seconds()
return msg
def popen(self, args, **kwargs):
"""
creates a subprocess with passed args
**Returns**
Popen instance
"""
self.log.debug("popen %s", ' '.join(args))
return vaping.io.subprocess.Popen(args, **kwargs)
@property
def log(self):
"""
logger instance for plugin type
"""
if not self._logger:
self._logger = logging.getLogger('vaping.plugins.' + self.plugin_type)
return self._logger
def __init__(self, config, ctx):
"""
**Arguments**
- config (`dict`)
- ctx: vaping context
"""
if hasattr(self, 'default_config'):
self.config = munge.util.recursive_update(copy.deepcopy(self.default_config), copy.deepcopy(config))
else:
self.config = config
# set for pluginmgr
self.pluginmgr_config = self.config
self.vaping = ctx
self.name = self.config.get("name")
self._logger = None
super(PluginBase, self).__init__()
self.init()
def _run(self):
self.on_start()
class ProbeBase(with_metaclass(abc.ABCMeta, PluginBase)):
"""
Base class for probe plugin, used for getting data
expects method probe() to be defined
"""
def init(self):
pass
@abc.abstractmethod
def probe(self):
"""
probe for data, return a list of dicts
"""
def __init__(self, config, ctx, emit=None):
if emit:
self._emit = [emit]
else:
self._emit = []
self._emit_queue = vaping.io.Queue()
super(ProbeBase, self).__init__(config, ctx)
def _run(self):
super(ProbeBase, self)._run()
self.run_level = 1
while self.run_level:
self.send_emission()
msg = self.probe()
if msg:
self.queue_emission(msg)
else:
self.log.debug("probe returned no data")
def queue_emission(self, msg):
"""
queue an emission of a message for all output plugins
**Arguments**
- msg (`dict`): dict containing `type`, `source`, `ts` and `data` keys
"""
if not msg:
return
for _emitter in self._emit:
if not hasattr(_emitter, 'emit'):
continue
def emit(emitter=_emitter):
self.log.debug("emit to {}".format(emitter.name))
emitter.emit(msg)
self.log.debug("queue emission to {} ({})".format(
_emitter.name, self._emit_queue.qsize()))
self._emit_queue.put(emit)
def send_emission(self):
"""
emit and remove the first emission in the queue
"""
if self._emit_queue.empty():
return
emit = self._emit_queue.get()
emit()
def emit_all(self):
"""
emit and remove all emissions in the queue
"""
while not self._emit_queue.empty():
self.send_emission()
class TimedProbe(ProbeBase):
"""
Probe class that calls probe every config defined interval
"""
def __init__(self, config, ctx, emit=None):
super(TimedProbe, self).__init__(config, ctx, emit)
if 'interval' not in self.pluginmgr_config:
raise ValueError('interval not set in config')
self.interval = parse_interval(self.pluginmgr_config['interval'])
self.run_level = 0
def _run(self):
self.run_level = 1
while self.run_level:
start = datetime.datetime.now()
# since the TimedProbe will sleep between cycles
# we need to emit all queued emissions each cycle
self.emit_all()
msg = self.probe()
if msg:
self.queue_emission(msg)
else:
self.log.debug("probe returned no data")
done = datetime.datetime.now()
elapsed = done - start
if elapsed.total_seconds() > self.interval:
self.log.warning("probe time exceeded interval")
else:
sleeptime = datetime.timedelta(seconds=self.interval) - elapsed
vaping.io.sleep(sleeptime.total_seconds())
class FileProbe(ProbeBase):
"""
Probes a file and emits everytime a new line is read
# Config
- path (`str`): path to file
- backlog (`int=0`): number of bytes to read from backlog
- max_lines (`int=1000`): maximum number of lines to read during probe
# Instanced Attributes
- path (`str`): path to file
- backlog (`int`): number of bytes to read from backlog
- max_lines (`int`): maximum number of liens to read during probe
- fh (`filehandler`): file handler for opened file (only available if `path` is set)
"""
def __init__(self, config, ctx, emit=None):
super(FileProbe, self).__init__(config, ctx, emit)
self.path = self.pluginmgr_config.get("path")
self.run_level = 0
self.backlog = int(self.pluginmgr_config.get("backlog",0))
self.max_lines = int(self.pluginmgr_config.get("max_lines",1000))
if self.path:
self.fh = open(self.path, "r")
self.fh.seek(0,2)
if self.backlog:
try:
self.fh.seek(self.fh.tell() - self.backlog, os.SEEK_SET)
except ValueError as exc:
if str(exc).find("negative seek position") > -1:
self.fh.seek(0)
else:
raise
def _run(self):
self.run_level = 1
while self.run_level:
self.send_emission()
for msg in self.probe():
self.queue_emission(msg)
vaping.io.sleep(0.1)
def validate_file_handler(self):
"""
Here we validate that our filehandler is pointing
to an existing file.
If it doesnt, because file has been deleted, we close
the filehander and try to reopen
"""
if self.fh.closed:
try:
self.fh = open(self.path, "r")
self.fh.seek(0, 2)
except OSError as err:
logging.error("Could not reopen file: {}".format(err))
return False
open_stat = os.fstat(self.fh.fileno())
try:
file_stat = os.stat(self.path)
except OSError as err:
logging.error("Could not stat file: {}".format(err))
return False
if open_stat != file_stat:
self.log
self.fh.close()
return False
return True
def probe(self):
"""
Probe the file for new lines
"""
# make sure the filehandler is still valid
# (e.g. file stat hasnt changed, file exists etc.)
if not self.validate_file_handler():
return []
messages = []
# read any new lines and push them onto the stack
for line in self.fh.readlines(self.max_lines):
data = {"path":self.path}
msg = self.new_message()
# process the line - this is where parsing happens
parsed = self.process_line(line, data)
if not parsed:
continue
data.update(parsed)
# process the probe - this is where data assignment
# happens
data = self.process_probe(data)
msg["data"] = [data]
messages.append(msg)
# process all new messages before returning them
# for emission
messages = self.process_messages(messages)
return messages
def process_line(self, line, data):
""" override this - parse your line in here """
return data
def process_probe(self, data):
""" override this - assign your data values here """
return data
def process_messages(self, messages):
"""
override this - process your messages before they
are emitted
"""
return messages
class EmitBase(with_metaclass(abc.ABCMeta, PluginBase)):
"""
Base class for emit plugins, used for sending data
expects method emit() to be defined
"""
def __init__(self, config, ctx):
|
@abc.abstractmethod
def emit(self, message):
""" accept message to emit """
class TimeSeriesDB(EmitBase):
"""
Base interface for | super(EmitBase, self).__init__(config, ctx) | identifier_body |
integratedAnalysisPipeline.py | (stringFind,configFile):
"""findPath will find path or value of associated specified string or info from config file"""
for line in configFile:
if stringFind in line: # if find string specified, return pathname or specific value trying to find
configFile.seek(0)
return line.split()[-1].strip('\n')
configFile.seek(0)
# open master configuration file
open('masterConfig.txt','r').close()
masterConfigFile = open('masterConfig.txt','r')
# grab the following information from the configuration file
weightsList = parseConfigFindList('Weights info',masterConfigFile)
findInfoList = ['performSynteny','performCircos', 'performALLMAPS', 'querySpecies', 'NameAnalysis','writeFastaOut', 'Loci_Threshold',
'pathPython','pathSystem', 'pathALLMAPS', 'BdPath', 'pathUnOut', 'pathGFF', 'pathSort', 'BPsMergeDist', 'softMasked', 'genomePath',
'karyotypesFilesPath','circosConfigFilesPath', 'LinkPath', 'circosOutPath', 'BPsThreshold',
'multipleSeqAlignFastasPath','fastaOutputName', 'allMAPImageOutputPath', 'online','projectName',
'nerscUsername','nohup','cactusRun','cactusFolder'] # find the following information
# list of query strings into config path finder
for i in range(len(findInfoList)): # find the paths/info of above queries
findInfoList[i] = parseConfigFindPath(findInfoList[i], masterConfigFile)
# assign values
(performSynteny, performCircos, performALLMAPS,querySpecies, NameAnalysis, writeFastaOut, Loci_Threshold, pathPython,
pathSystem,pathALLMAPS, BdPath, pathUnOut, pathGFF, pathSort, BPsMergeDist , softmask, genomePath, karyotypesFilesPath,
circosConfigFilesPath, LinkPath, circosOutPath, BPsThreshold, multipleSeqAlignFastasPath,
fastaOutputName, allMAPImageOutputPath, online, projectName, nerscUsername, nohup, cactusRun,cactusFolder) = tuple(findInfoList)
# for debugging, see if all of your data has passed through
print tuple(findInfoList)
# generate weights file for allmaps...
open('%sweights.txt'%pathALLMAPS,'w').close()
weightsFile = open('%sweights.txt'%pathALLMAPS,'w')
for weight in weightsList:
weightsFile.write('fake%s_'%querySpecies+weight+'\n')
weightsFile.close()
#fake473_283 1
# generate config files
# first need to generate 3 syntenic files list and list of genome files for synteny analysis
# second generate faiFiles list and bed files list circos
# third generate bedfile list and fastainputname
# get list of genome files, fai files, and fasta input filename for fragmented genome
listGenomePathFiles=str(subprocess.Popen(['ls','%s'%genomePath], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
.stdout.read()).split('\n')
listGenomeFiles = []
listFaiFiles = []
# find lists of .fa and .fai files and format for config files
for file in listGenomePathFiles:
if file.endswith('.fa') or file.endswith('.fasta'):
listGenomeFiles.append(file+'\n')
listFaiFiles.append(file+'.fai\n')
genomeFilesText = ''.join(file for file in listGenomeFiles)
faiFilesText = ''.join(file for file in listFaiFiles)
# if query species, use .fa file for genome reconstruction
for filename in listGenomeFiles:
if querySpecies in filename:
fastaInputName = filename.strip('\n') # fasta filename of query species
# list of unout files
listPathUnout = str(subprocess.Popen(['ls', '%s' % pathUnOut], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
.stdout.read()).split('\n')
def gff2sort2(gff, pathgff, pathsort):
"""Takes a gffFiles and converts them to sort2 files to use in the final synteny analysis.
Please let Joshua Levy know if there are any errors or have problems!"""
outFileName = pathsort + gff[:gff.rfind('.')] + '.sort2'
inputFile = open(pathgff + gff, 'r')
open(outFileName, 'w').close()
outputFile = open(outFileName, 'w')
for line in inputFile:
# grab gene info from each line if it's longest and mRNA strand and output to sort2 file
if 'mRNA' in line and 'longest=1' in line:
lineInList = line.split()
parserList = lineInList[-1].split(';')
lineOutputList = [parserList[1].replace('Name=',''), lineInList[0].replace('-', 'S'), lineInList[3],
lineInList[4]]
outputFile.write('%s %s %s %s\n' % tuple(lineOutputList))
inputFile.close()
outputFile.close()
# prints True if there are no sortfiles, generally want that for each analysis... for now, is possible to modify code to
# be more versatile
print not str(subprocess.Popen(['ls', '%s' % pathSort], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
.stdout.read())
# if no sort files present, generate them from gffs. PLEASE DELETE SORT FILES AFTER EVERY ANALYSIS
if not str(subprocess.Popen(['ls', '%s' % pathSort], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
.stdout.read()):
listGFFfiles = str(subprocess.Popen(['ls', '%s' % pathGFF], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
.stdout.read()).split('\n')
# turn gff into sort files
for file in listGFFfiles:
if file.endswith('.gff') or file.endswith('.gff3'):
gff2sort2(file,pathGFF,pathSort)
# find sort files
listPathSort = str(subprocess.Popen(['ls', '%s' % pathSort], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
.stdout.read()).split('\n')
unoutList = []
sortFileList = []
bedList = []
# generate list of bed files for allmaps and circos, and unout and sort files for synteny analysis
# unouts can also be used alternate allmaps
for file in listPathUnout:
if file.endswith('.unout'):
bedList.append(file[:file.rfind('.')]+'.bed')
unoutList.append(file)
for file in listPathSort:
if file.endswith('.sort2'):
sortFileList.append(file)
# make sure all sort files are included
print sortFileList
# Bedfile text for config files
bedFilesText = ''.join(file+'\n' for file in bedList)
# generate tuples for three syntenic files
listSyntenicFilesTuples = []
for file in unoutList:
# find target species then corresponding target sort files, add to list of syntenic files
targetSpecies = file[file.find('-')+1:file.rfind('_')].replace('PAC2_0.','').replace('PAC4GC.','')
print targetSpecies
for sortFile in sortFileList:
if querySpecies in sortFile:
querySortFile = sortFile
if targetSpecies in sortFile:
targetSortFile = sortFile
print targetSortFile
listSyntenicFilesTuples.append((file,querySortFile,targetSortFile))
# text for synteny analysis config
syntenicFilesText = ''.join('%s %s %s\n'%synFilesTuple for synFilesTuple in listSyntenicFilesTuples)
# if on NERSC load proper modules, try to load these modules beforehand...
print 'online' + online
if int(online):
try:
subprocess.call('module load bedtools/2.25.0',shell=True)
subprocess.call('module load circos',shell=True)
except:
print 'Unable to load online modules...'
try:
int(BPsMergeDist)
except:
BPsMergeDist = '100000'
# write syntenic Config text
generateIntAnalysisConfig('syntenyAnalysis',(NameAnalysis,writeFastaOut,Loci_Threshold,pathPython,pathUnOut,pathSort, BPsMergeDist,softmask,
NameAnalysis,multipleSeqAlignFastasPath,syntenicFilesText,genomePath,
genomeFilesText))
# if choosing to perform multiple synteny
if int(performSynteny):
# run synteny analysis
#execfile(os.path.join(os.path.dirname(sys.argv[0]), 'syntenyFinal.py'))
execfile('SyntenyFinal.py')
# move BedFiles to Bd folder
for bedFile in bedList:
try:
shutil.copy(BdPath[:BdPath.rfind('BdFiles')]
+ bedFile, BdPath)
except:
print 'Unable to copy bed file %s to BdFiles...'%bedFile
# move genome to allmaps folder
try:
shutil.copy(genomePath+fastaInputName,pathALLMAPS+fastaInputName)
except:
print 'Unable to copy genome to allmaps directory | parseConfigFindPath | identifier_name |
|
integratedAnalysisPipeline.py | OutPath, BPsThreshold, multipleSeqAlignFastasPath,
fastaOutputName, allMAPImageOutputPath, online, projectName, nerscUsername, nohup, cactusRun,cactusFolder) = tuple(findInfoList)
# for debugging, see if all of your data has passed through
print tuple(findInfoList)
# generate weights file for allmaps...
open('%sweights.txt'%pathALLMAPS,'w').close()
weightsFile = open('%sweights.txt'%pathALLMAPS,'w')
for weight in weightsList:
weightsFile.write('fake%s_'%querySpecies+weight+'\n')
weightsFile.close()
#fake473_283 1
# generate config files
# first need to generate 3 syntenic files list and list of genome files for synteny analysis
# second generate faiFiles list and bed files list circos
# third generate bedfile list and fastainputname
# get list of genome files, fai files, and fasta input filename for fragmented genome
listGenomePathFiles=str(subprocess.Popen(['ls','%s'%genomePath], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
.stdout.read()).split('\n')
listGenomeFiles = []
listFaiFiles = []
# find lists of .fa and .fai files and format for config files
for file in listGenomePathFiles:
if file.endswith('.fa') or file.endswith('.fasta'):
listGenomeFiles.append(file+'\n')
listFaiFiles.append(file+'.fai\n')
genomeFilesText = ''.join(file for file in listGenomeFiles)
faiFilesText = ''.join(file for file in listFaiFiles)
# if query species, use .fa file for genome reconstruction
for filename in listGenomeFiles:
if querySpecies in filename:
fastaInputName = filename.strip('\n') # fasta filename of query species
# list of unout files
listPathUnout = str(subprocess.Popen(['ls', '%s' % pathUnOut], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
.stdout.read()).split('\n')
def gff2sort2(gff, pathgff, pathsort):
"""Takes a gffFiles and converts them to sort2 files to use in the final synteny analysis.
Please let Joshua Levy know if there are any errors or have problems!"""
outFileName = pathsort + gff[:gff.rfind('.')] + '.sort2'
inputFile = open(pathgff + gff, 'r')
open(outFileName, 'w').close()
outputFile = open(outFileName, 'w')
for line in inputFile:
# grab gene info from each line if it's longest and mRNA strand and output to sort2 file
if 'mRNA' in line and 'longest=1' in line:
lineInList = line.split()
parserList = lineInList[-1].split(';')
lineOutputList = [parserList[1].replace('Name=',''), lineInList[0].replace('-', 'S'), lineInList[3],
lineInList[4]]
outputFile.write('%s %s %s %s\n' % tuple(lineOutputList))
inputFile.close()
outputFile.close()
# prints True if there are no sortfiles, generally want that for each analysis... for now, is possible to modify code to
# be more versatile
print not str(subprocess.Popen(['ls', '%s' % pathSort], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
.stdout.read())
# if no sort files present, generate them from gffs. PLEASE DELETE SORT FILES AFTER EVERY ANALYSIS
if not str(subprocess.Popen(['ls', '%s' % pathSort], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
.stdout.read()):
listGFFfiles = str(subprocess.Popen(['ls', '%s' % pathGFF], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
.stdout.read()).split('\n')
# turn gff into sort files
for file in listGFFfiles:
if file.endswith('.gff') or file.endswith('.gff3'):
gff2sort2(file,pathGFF,pathSort)
# find sort files
listPathSort = str(subprocess.Popen(['ls', '%s' % pathSort], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
.stdout.read()).split('\n')
unoutList = []
sortFileList = []
bedList = []
# generate list of bed files for allmaps and circos, and unout and sort files for synteny analysis
# unouts can also be used alternate allmaps
for file in listPathUnout:
if file.endswith('.unout'):
bedList.append(file[:file.rfind('.')]+'.bed')
unoutList.append(file)
for file in listPathSort:
if file.endswith('.sort2'):
sortFileList.append(file)
# make sure all sort files are included
print sortFileList
# Bedfile text for config files
bedFilesText = ''.join(file+'\n' for file in bedList)
# generate tuples for three syntenic files
listSyntenicFilesTuples = []
for file in unoutList:
# find target species then corresponding target sort files, add to list of syntenic files
targetSpecies = file[file.find('-')+1:file.rfind('_')].replace('PAC2_0.','').replace('PAC4GC.','')
print targetSpecies
for sortFile in sortFileList:
if querySpecies in sortFile:
querySortFile = sortFile
if targetSpecies in sortFile:
targetSortFile = sortFile
print targetSortFile
listSyntenicFilesTuples.append((file,querySortFile,targetSortFile))
# text for synteny analysis config
syntenicFilesText = ''.join('%s %s %s\n'%synFilesTuple for synFilesTuple in listSyntenicFilesTuples)
# if on NERSC load proper modules, try to load these modules beforehand...
print 'online' + online
if int(online):
try:
subprocess.call('module load bedtools/2.25.0',shell=True)
subprocess.call('module load circos',shell=True)
except:
print 'Unable to load online modules...' | try:
int(BPsMergeDist)
except:
BPsMergeDist = '100000'
# write syntenic Config text
generateIntAnalysisConfig('syntenyAnalysis',(NameAnalysis,writeFastaOut,Loci_Threshold,pathPython,pathUnOut,pathSort, BPsMergeDist,softmask,
NameAnalysis,multipleSeqAlignFastasPath,syntenicFilesText,genomePath,
genomeFilesText))
# if choosing to perform multiple synteny
if int(performSynteny):
# run synteny analysis
#execfile(os.path.join(os.path.dirname(sys.argv[0]), 'syntenyFinal.py'))
execfile('SyntenyFinal.py')
# move BedFiles to Bd folder
for bedFile in bedList:
try:
shutil.copy(BdPath[:BdPath.rfind('BdFiles')]
+ bedFile, BdPath)
except:
print 'Unable to copy bed file %s to BdFiles...'%bedFile
# move genome to allmaps folder
try:
shutil.copy(genomePath+fastaInputName,pathALLMAPS+fastaInputName)
except:
print 'Unable to copy genome to allmaps directory...'
# generate config files for relevant analyses
if int(performCircos):
print 'config circos'
generateIntAnalysisConfig('circosAnalysis',(faiFilesText,bedFilesText,genomePath,karyotypesFilesPath,BdPath,
circosConfigFilesPath,LinkPath,circosOutPath,pathPython,BPsThreshold))
if int(performALLMAPS):
print 'config allmaps'
generateIntAnalysisConfig('reconstructGenome',(pathALLMAPS,pathPython,pathSystem,BdPath,allMAPImageOutputPath,
fastaInputName,fastaOutputName,bedFilesText))
# see if will be performing alternate genome reconstruction using syntenic genes, MORE ACCURATE
try:
masterConfigFile = open('masterConfig.txt','r')
masterConfigFile.seek(0)
performAltAllMaps = parseConfigFindPath('performAltAllMaps', masterConfigFile)
masterConfigFile.close()
if int(performAltAllMaps):
print 'config alt allmaps'
generateIntAnalysisConfig('reconstructGenome2', (pathALLMAPS, pathPython, pathSystem, BdPath, allMAPImageOutputPath,
fastaInputName, fastaOutputName, pathUnOut,pathSort,Loci_Threshold))
open('%sweights.txt' % pathALLMAPS, 'w').close()
weightsFile = open('%sweights.txt' % pathALLMAPS, 'w')
for weight in weightsList:
weightsFile.write('%s_' % querySpecies + weight + '\n')
weightsFile.close()
except:
print 'Unable to set up alternate allmaps'
performAltAllMaps = 0
# perform circos analysis but not reconstruction
if int(performCircos) and not int(performALLMAPS):
print 'circos'
# try to run circos online
if int(online):
open('runCircos.sh', 'w').close()
# writing shell script to run circos
circos = open('runCircos | random_line_split |
|
integratedAnalysisPipeline.py | %querySpecies+weight+'\n')
weightsFile.close()
#fake473_283 1
# generate config files
# first need to generate 3 syntenic files list and list of genome files for synteny analysis
# second generate faiFiles list and bed files list circos
# third generate bedfile list and fastainputname
# get list of genome files, fai files, and fasta input filename for fragmented genome
listGenomePathFiles=str(subprocess.Popen(['ls','%s'%genomePath], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
.stdout.read()).split('\n')
listGenomeFiles = []
listFaiFiles = []
# find lists of .fa and .fai files and format for config files
for file in listGenomePathFiles:
if file.endswith('.fa') or file.endswith('.fasta'):
listGenomeFiles.append(file+'\n')
listFaiFiles.append(file+'.fai\n')
genomeFilesText = ''.join(file for file in listGenomeFiles)
faiFilesText = ''.join(file for file in listFaiFiles)
# if query species, use .fa file for genome reconstruction
for filename in listGenomeFiles:
if querySpecies in filename:
fastaInputName = filename.strip('\n') # fasta filename of query species
# list of unout files
listPathUnout = str(subprocess.Popen(['ls', '%s' % pathUnOut], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
.stdout.read()).split('\n')
def gff2sort2(gff, pathgff, pathsort):
"""Takes a gffFiles and converts them to sort2 files to use in the final synteny analysis.
Please let Joshua Levy know if there are any errors or have problems!"""
outFileName = pathsort + gff[:gff.rfind('.')] + '.sort2'
inputFile = open(pathgff + gff, 'r')
open(outFileName, 'w').close()
outputFile = open(outFileName, 'w')
for line in inputFile:
# grab gene info from each line if it's longest and mRNA strand and output to sort2 file
if 'mRNA' in line and 'longest=1' in line:
lineInList = line.split()
parserList = lineInList[-1].split(';')
lineOutputList = [parserList[1].replace('Name=',''), lineInList[0].replace('-', 'S'), lineInList[3],
lineInList[4]]
outputFile.write('%s %s %s %s\n' % tuple(lineOutputList))
inputFile.close()
outputFile.close()
# prints True if there are no sortfiles, generally want that for each analysis... for now, is possible to modify code to
# be more versatile
print not str(subprocess.Popen(['ls', '%s' % pathSort], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
.stdout.read())
# if no sort files present, generate them from gffs. PLEASE DELETE SORT FILES AFTER EVERY ANALYSIS
if not str(subprocess.Popen(['ls', '%s' % pathSort], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
.stdout.read()):
listGFFfiles = str(subprocess.Popen(['ls', '%s' % pathGFF], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
.stdout.read()).split('\n')
# turn gff into sort files
for file in listGFFfiles:
if file.endswith('.gff') or file.endswith('.gff3'):
gff2sort2(file,pathGFF,pathSort)
# find sort files
listPathSort = str(subprocess.Popen(['ls', '%s' % pathSort], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
.stdout.read()).split('\n')
unoutList = []
sortFileList = []
bedList = []
# generate list of bed files for allmaps and circos, and unout and sort files for synteny analysis
# unouts can also be used alternate allmaps
for file in listPathUnout:
if file.endswith('.unout'):
bedList.append(file[:file.rfind('.')]+'.bed')
unoutList.append(file)
for file in listPathSort:
if file.endswith('.sort2'):
sortFileList.append(file)
# make sure all sort files are included
print sortFileList
# Bedfile text for config files
bedFilesText = ''.join(file+'\n' for file in bedList)
# generate tuples for three syntenic files
listSyntenicFilesTuples = []
for file in unoutList:
# find target species then corresponding target sort files, add to list of syntenic files
targetSpecies = file[file.find('-')+1:file.rfind('_')].replace('PAC2_0.','').replace('PAC4GC.','')
print targetSpecies
for sortFile in sortFileList:
if querySpecies in sortFile:
querySortFile = sortFile
if targetSpecies in sortFile:
targetSortFile = sortFile
print targetSortFile
listSyntenicFilesTuples.append((file,querySortFile,targetSortFile))
# text for synteny analysis config
syntenicFilesText = ''.join('%s %s %s\n'%synFilesTuple for synFilesTuple in listSyntenicFilesTuples)
# if on NERSC load proper modules, try to load these modules beforehand...
print 'online' + online
if int(online):
try:
subprocess.call('module load bedtools/2.25.0',shell=True)
subprocess.call('module load circos',shell=True)
except:
print 'Unable to load online modules...'
try:
int(BPsMergeDist)
except:
BPsMergeDist = '100000'
# write syntenic Config text
generateIntAnalysisConfig('syntenyAnalysis',(NameAnalysis,writeFastaOut,Loci_Threshold,pathPython,pathUnOut,pathSort, BPsMergeDist,softmask,
NameAnalysis,multipleSeqAlignFastasPath,syntenicFilesText,genomePath,
genomeFilesText))
# if choosing to perform multiple synteny
if int(performSynteny):
# run synteny analysis
#execfile(os.path.join(os.path.dirname(sys.argv[0]), 'syntenyFinal.py'))
execfile('SyntenyFinal.py')
# move BedFiles to Bd folder
for bedFile in bedList:
try:
shutil.copy(BdPath[:BdPath.rfind('BdFiles')]
+ bedFile, BdPath)
except:
print 'Unable to copy bed file %s to BdFiles...'%bedFile
# move genome to allmaps folder
try:
shutil.copy(genomePath+fastaInputName,pathALLMAPS+fastaInputName)
except:
print 'Unable to copy genome to allmaps directory...'
# generate config files for relevant analyses
if int(performCircos):
print 'config circos'
generateIntAnalysisConfig('circosAnalysis',(faiFilesText,bedFilesText,genomePath,karyotypesFilesPath,BdPath,
circosConfigFilesPath,LinkPath,circosOutPath,pathPython,BPsThreshold))
if int(performALLMAPS):
print 'config allmaps'
generateIntAnalysisConfig('reconstructGenome',(pathALLMAPS,pathPython,pathSystem,BdPath,allMAPImageOutputPath,
fastaInputName,fastaOutputName,bedFilesText))
# see if will be performing alternate genome reconstruction using syntenic genes, MORE ACCURATE
try:
masterConfigFile = open('masterConfig.txt','r')
masterConfigFile.seek(0)
performAltAllMaps = parseConfigFindPath('performAltAllMaps', masterConfigFile)
masterConfigFile.close()
if int(performAltAllMaps):
print 'config alt allmaps'
generateIntAnalysisConfig('reconstructGenome2', (pathALLMAPS, pathPython, pathSystem, BdPath, allMAPImageOutputPath,
fastaInputName, fastaOutputName, pathUnOut,pathSort,Loci_Threshold))
open('%sweights.txt' % pathALLMAPS, 'w').close()
weightsFile = open('%sweights.txt' % pathALLMAPS, 'w')
for weight in weightsList:
weightsFile.write('%s_' % querySpecies + weight + '\n')
weightsFile.close()
except:
print 'Unable to set up alternate allmaps'
performAltAllMaps = 0
# perform circos analysis but not reconstruction
if int(performCircos) and not int(performALLMAPS):
| print 'circos'
# try to run circos online
if int(online):
open('runCircos.sh', 'w').close()
# writing shell script to run circos
circos = open('runCircos.sh', 'w')
circos.write('#!/bin/bash\npython circosFiguresPipeline.py')
circos.close()
try:
# try to run circos
subprocess.call('nohup sh runCircos.sh', shell=True)
except:
print 'Unable to run circos via command line..'
else: # try offline analysis
try:
execfile('circosFiguresPipeline.py')
except:
print 'Unable to run circos analysis.'
#except:
# print 'Unable to run circos analysis.' | conditional_block |
|
integratedAnalysisPipeline.py |
def parseConfigFindPath(stringFind,configFile):
"""findPath will find path or value of associated specified string or info from config file"""
for line in configFile:
if stringFind in line: # if find string specified, return pathname or specific value trying to find
configFile.seek(0)
return line.split()[-1].strip('\n')
configFile.seek(0)
# open master configuration file
open('masterConfig.txt','r').close()
masterConfigFile = open('masterConfig.txt','r')
# grab the following information from the configuration file
weightsList = parseConfigFindList('Weights info',masterConfigFile)
findInfoList = ['performSynteny','performCircos', 'performALLMAPS', 'querySpecies', 'NameAnalysis','writeFastaOut', 'Loci_Threshold',
'pathPython','pathSystem', 'pathALLMAPS', 'BdPath', 'pathUnOut', 'pathGFF', 'pathSort', 'BPsMergeDist', 'softMasked', 'genomePath',
'karyotypesFilesPath','circosConfigFilesPath', 'LinkPath', 'circosOutPath', 'BPsThreshold',
'multipleSeqAlignFastasPath','fastaOutputName', 'allMAPImageOutputPath', 'online','projectName',
'nerscUsername','nohup','cactusRun','cactusFolder'] # find the following information
# list of query strings into config path finder
for i in range(len(findInfoList)): # find the paths/info of above queries
findInfoList[i] = parseConfigFindPath(findInfoList[i], masterConfigFile)
# assign values
(performSynteny, performCircos, performALLMAPS,querySpecies, NameAnalysis, writeFastaOut, Loci_Threshold, pathPython,
pathSystem,pathALLMAPS, BdPath, pathUnOut, pathGFF, pathSort, BPsMergeDist , softmask, genomePath, karyotypesFilesPath,
circosConfigFilesPath, LinkPath, circosOutPath, BPsThreshold, multipleSeqAlignFastasPath,
fastaOutputName, allMAPImageOutputPath, online, projectName, nerscUsername, nohup, cactusRun,cactusFolder) = tuple(findInfoList)
# for debugging, see if all of your data has passed through
print tuple(findInfoList)
# generate weights file for allmaps...
open('%sweights.txt'%pathALLMAPS,'w').close()
weightsFile = open('%sweights.txt'%pathALLMAPS,'w')
for weight in weightsList:
weightsFile.write('fake%s_'%querySpecies+weight+'\n')
weightsFile.close()
#fake473_283 1
# generate config files
# first need to generate 3 syntenic files list and list of genome files for synteny analysis
# second generate faiFiles list and bed files list circos
# third generate bedfile list and fastainputname
# get list of genome files, fai files, and fasta input filename for fragmented genome
listGenomePathFiles=str(subprocess.Popen(['ls','%s'%genomePath], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
.stdout.read()).split('\n')
listGenomeFiles = []
listFaiFiles = []
# find lists of .fa and .fai files and format for config files
for file in listGenomePathFiles:
if file.endswith('.fa') or file.endswith('.fasta'):
listGenomeFiles.append(file+'\n')
listFaiFiles.append(file+'.fai\n')
genomeFilesText = ''.join(file for file in listGenomeFiles)
faiFilesText = ''.join(file for file in listFaiFiles)
# if query species, use .fa file for genome reconstruction
for filename in listGenomeFiles:
if querySpecies in filename:
fastaInputName = filename.strip('\n') # fasta filename of query species
# list of unout files
listPathUnout = str(subprocess.Popen(['ls', '%s' % pathUnOut], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
.stdout.read()).split('\n')
def gff2sort2(gff, pathgff, pathsort):
"""Takes a gffFiles and converts them to sort2 files to use in the final synteny analysis.
Please let Joshua Levy know if there are any errors or have problems!"""
outFileName = pathsort + gff[:gff.rfind('.')] + '.sort2'
inputFile = open(pathgff + gff, 'r')
open(outFileName, 'w').close()
outputFile = open(outFileName, 'w')
for line in inputFile:
# grab gene info from each line if it's longest and mRNA strand and output to sort2 file
if 'mRNA' in line and 'longest=1' in line:
lineInList = line.split()
parserList = lineInList[-1].split(';')
lineOutputList = [parserList[1].replace('Name=',''), lineInList[0].replace('-', 'S'), lineInList[3],
lineInList[4]]
outputFile.write('%s %s %s %s\n' % tuple(lineOutputList))
inputFile.close()
outputFile.close()
# prints True if there are no sortfiles, generally want that for each analysis... for now, is possible to modify code to
# be more versatile
print not str(subprocess.Popen(['ls', '%s' % pathSort], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
.stdout.read())
# if no sort files present, generate them from gffs. PLEASE DELETE SORT FILES AFTER EVERY ANALYSIS
if not str(subprocess.Popen(['ls', '%s' % pathSort], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
.stdout.read()):
listGFFfiles = str(subprocess.Popen(['ls', '%s' % pathGFF], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
.stdout.read()).split('\n')
# turn gff into sort files
for file in listGFFfiles:
if file.endswith('.gff') or file.endswith('.gff3'):
gff2sort2(file,pathGFF,pathSort)
# find sort files
listPathSort = str(subprocess.Popen(['ls', '%s' % pathSort], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
.stdout.read()).split('\n')
unoutList = []
sortFileList = []
bedList = []
# generate list of bed files for allmaps and circos, and unout and sort files for synteny analysis
# unouts can also be used alternate allmaps
for file in listPathUnout:
if file.endswith('.unout'):
bedList.append(file[:file.rfind('.')]+'.bed')
unoutList.append(file)
for file in listPathSort:
if file.endswith('.sort2'):
sortFileList.append(file)
# make sure all sort files are included
print sortFileList
# Bedfile text for config files
bedFilesText = ''.join(file+'\n' for file in bedList)
# generate tuples for three syntenic files
listSyntenicFilesTuples = []
for file in unoutList:
# find target species then corresponding target sort files, add to list of syntenic files
targetSpecies = file[file.find('-')+1:file.rfind('_')].replace('PAC2_0.','').replace('PAC4GC.','')
print targetSpecies
for sortFile in sortFileList:
if querySpecies in sortFile:
querySortFile = sortFile
if targetSpecies in sortFile:
targetSortFile = sortFile
print targetSortFile
listSyntenicFilesTuples.append((file,querySortFile,targetSortFile))
# text for synteny analysis config
syntenicFilesText = ''.join('%s %s %s\n'%synFilesTuple for synFilesTuple in listSyntenicFilesTuples)
# if on NERSC load proper modules, try to load these modules beforehand...
print 'online' + online
if int(online):
try:
subprocess.call('module load bedtools/2.25.0',shell=True)
subprocess.call('module load circos',shell=True)
except:
print 'Unable to load online modules...'
try:
int(BPsMergeDist)
except:
BPsMergeDist = '100000'
# write syntenic Config text
generateIntAnalysisConfig('syntenyAnalysis',(NameAnalysis,writeFastaOut,Loci_Threshold,pathPython,pathUnOut,pathSort, BPsMergeDist,softmask,
NameAnalysis,multipleSeqAlignFastasPath,syntenicFilesText,genomePath,
genomeFilesText))
# if choosing to perform multiple synteny
if int(performSynteny):
# run synteny analysis
#execfile(os.path.join(os.path.dirname(sys.argv[0 | """parseConfigFindList inputs a particular string to find and read file after and a configuration file object
outputs list of relevant filenames"""
read = 0
listOfItems = []
for line in configFile:
if line:
if read == 1:
if 'Stop' in line:
configFile.seek(0)
break # exit the function and return the list of files or list information
listOfItems.append(line.strip('\n'))
if stringFind in line:
read = 1 # if find string specified, begin reading lines
configFile.seek(0)
return listOfItems | identifier_body |
|
vector_test.go | (t *testing.T) {
var i, j uint
for i = 0; i < 100; i++ {
randslice := makeRandSlice(i)
v := NewFrom(randslice)
if v.Dim() != i {
t.Errorf("Wrong dimension. Got %d, expected %d.", v.Dim(), i)
}
for j = 0; j < i; j++ {
val, _ := v.Get(j)
if val != randslice[j] {
t.Error(
"Wrong values in vector initialized from a random slice.")
}
}
}
}
// Creates pseudo-random vectors with various dimensions, copies them and
// verifies that the new vector is equal.
func TestCopy(t *testing.T) {
var i uint
for i = 0; i < 100; i++ {
v := makeRandomVector(i)
w := v.Copy()
if !Equal(v, w) {
t.Error("Copied vector is not equal to source vector.")
}
}
}
// =================== [ General Methods/Functions Tests ] ====================
// Creates pseudo-random vectors with various dimensions, then check if Get()
// returns the correct values and errors on out-of-range indexes.
func TestGet(t *testing.T) {
var i uint
for i = 0; i < 100; i++ {
v := makeRandomVector(i)
for j, val := range v.dims {
getval, err := v.Get(uint(j))
if err != nil {
t.Error("Get() errored on a correct index.")
}
if val != getval {
t.Error("Get() returned a wrong value.")
}
}
_, err := v.Get(v.Dim())
if err == nil {
t.Error("Get didn't error on an out-of-range index.")
}
}
}
// Creates uninitialized vectors of various dimensions, then sets their values
// to pseudo-random values. It then compares those values to check if they
// were set correctly. Also verifies is Set() correctly errors on out-of-range
// indexes.
func TestSet(t *testing.T) {
var i, j uint
for i = 0; i < 100; i++ {
v := New(i)
for j = 0; j < i; j++ {
val := rand.ExpFloat64()
err := v.Set(j, val)
if err != nil {
t.Error("Set() errored on a correct index.")
}
if v.dims[j] != val {
t.Error("Set didn't correctly set a value.")
}
}
err := v.Set(v.Dim(), 0)
if err == nil {
t.Error("Set didn't error on an out-of-range index.")
}
}
}
// Creates a vector with known length, then compares the expected value with
// what Len() returns.
func TestLen(t *testing.T) {
v := New(1)
v.Set(0, 2) // has length 2
if v.Len() != 2 {
t.Error("Len returned a wrong length")
}
}
// Creates Vectors which are known to be (un)equal, then verifies that Equal()
// has correct oytput.
func TestEqual(t *testing.T) {
slc := make([]float64, 10)
for i := range slc {
slc[i] = float64(i)
}
v := NewFrom(slc)
w := NewFrom(slc)
if !Equal(v, w) {
t.Error("Equal() != true for equal vectors.")
}
w = New(10)
if Equal(v, w) {
t.Error("Equal() == true for unequal vectors.")
}
}
// =========================== [ Operation Tests ] ============================
// Creates pesudo-random vectors, then adds them first as a non-destructive,
// then as an in-place operations, checking if both operation were correct.
func TestAdd(t *testing.T) {
var i, j uint
for i = 1; i < 100; i++ {
a := makeRandomVector(i)
b := makeRandomVector(i)
c, _ := Add(a, b)
for j = 0; j < i; j++ {
if c.dims[j] != a.dims[j]+b.dims[j] {
t.Error("Addition failed, didn't get expected values.")
t.Logf("%f + %f != %f", a.dims[j], b.dims[j], c.dims[j])
}
}
// Test in-place addition.
c = a.Copy()
c.Add(b)
for j = 0; j < i; j++ {
if c.dims[j] != a.dims[j]+b.dims[j] {
t.Error(
"In-place Addition failed, didn't get expected values.")
t.Logf("%f + %f != %f", a.dims[j], b.dims[j], c.dims[j])
}
}
}
}
// Same as TestAdd, but with substraction. Heck, it's basically the same code.
func TestSubstract(t *testing.T) {
var i, j uint
for i = 1; i < 100; i++ {
a := makeRandomVector(i)
b := makeRandomVector(i)
c, _ := Substract(a, b)
for j = 0; j < i; j++ {
if c.dims[j] != a.dims[j]-b.dims[j] {
t.Error("Substraction failed, didn't get expected values.")
t.Logf("%f - %f != %f", a.dims[j], b.dims[j], c.dims[j])
}
}
// Test in-place sybstraction
c = a.Copy()
c.Substract(b)
for j = 0; j < i; j++ {
if c.dims[j] != a.dims[j]-b.dims[j] {
t.Error(
"In-place Substraction failed, didn't get expected values.")
t.Logf("%f - %f != %f", a.dims[j], b.dims[j], c.dims[j])
}
}
}
}
// Creates pseudo-random vectors, does scalar multiplication with pseudo-random
// floats, then checks if the result is correct. It checks both the in-place
// and the non-destructive version.
func TestScale(t *testing.T) {
var i, j uint
for i = 0; i < 100; i++ {
a := makeRandomVector(i)
x := rand.ExpFloat64()
b := Scale(a, x)
for j = 0; j < i; j++ {
if b.dims[j] != a.dims[j]*x {
t.Error("Scalar Multiplication failed, ",
"didn't get expected values.")
t.Logf("%f * %f != %f", a.dims[j], x, b.dims[j])
}
}
// Test in-place scalar multiplication
b = a.Copy()
b.Scale(x)
for j = 0; j < i; j++ {
if b.dims[j] != a.dims[j]*x {
t.Error("In-place Scalar Multiplication failed, ",
"didn't get expected values.")
t.Logf("%f * %f != %f", a.dims[j], x, b.dims[j])
}
}
}
}
// Creates pseudo-random vectors, normalizes them both in-place and
// non-destructive, and verifies that the result is correct.
func TestNormalize(t *testing.T) {
var i uint
// It makes no sense to normalize a zero vector, therefore we start at 1.
for i = 1; i < 100; i++ {
a := makeRandomVector(i)
b := Normalize(a)
if b.Len() != float64(1) {
t.Error("Normalization failed, vector doesn't have length 1.")
t.Logf("%f != 1", b.Len())
}
}
}
// Uses vectors with known angles to calculate their DotProduct, then verifies
// if the result is correct.
func TestDotProduct(t *testing.T) {
a := New(2)
b := New(2)
// Set the vectors as parallel.
a.Set(0, 1)
b.Set(0, 1)
dot, _ := DotProduct(a, b)
if dot != 1 {
t.Error("Dot Product of parallel vectors isn't 1.")
}
// Set the vectors as orthogonal.
b = New(2)
b.Set(1, 1)
dot, _ = DotProduct(a, b)
if dot != 0 {
t.Error("Dot Product of orthogonal vectors isn't 0.")
}
// Set the vectors as anti-parallel.
b = New(2)
b.Set(0, -1)
dot, _ = DotProduct(a, b)
if dot != -1 {
t.Error("Dot Product of anti-parallel vectors isn't -1.")
}
}
// Uses vectors with known angles to verify that Angle() is correct.
func TestAngle(t *testing.T) {
a := New(2)
b := New(2)
// Set the | TestNewFrom | identifier_name |
|
vector_test.go | < 100; i++ {
randslice := makeRandSlice(i)
v := NewFrom(randslice)
if v.Dim() != i {
t.Errorf("Wrong dimension. Got %d, expected %d.", v.Dim(), i)
}
for j = 0; j < i; j++ {
val, _ := v.Get(j)
if val != randslice[j] {
t.Error(
"Wrong values in vector initialized from a random slice.")
}
}
}
}
// Creates pseudo-random vectors with various dimensions, copies them and
// verifies that the new vector is equal.
func TestCopy(t *testing.T) {
var i uint
for i = 0; i < 100; i++ {
v := makeRandomVector(i)
w := v.Copy()
if !Equal(v, w) {
t.Error("Copied vector is not equal to source vector.")
}
}
}
// =================== [ General Methods/Functions Tests ] ====================
// Creates pseudo-random vectors with various dimensions, then check if Get()
// returns the correct values and errors on out-of-range indexes.
func TestGet(t *testing.T) {
var i uint
for i = 0; i < 100; i++ {
v := makeRandomVector(i)
for j, val := range v.dims {
getval, err := v.Get(uint(j))
if err != nil {
t.Error("Get() errored on a correct index.")
}
if val != getval {
t.Error("Get() returned a wrong value.")
}
}
_, err := v.Get(v.Dim())
if err == nil {
t.Error("Get didn't error on an out-of-range index.")
}
}
}
// Creates uninitialized vectors of various dimensions, then sets their values
// to pseudo-random values. It then compares those values to check if they
// were set correctly. Also verifies is Set() correctly errors on out-of-range
// indexes.
func TestSet(t *testing.T) {
var i, j uint
for i = 0; i < 100; i++ {
v := New(i)
for j = 0; j < i; j++ {
val := rand.ExpFloat64()
err := v.Set(j, val)
if err != nil {
t.Error("Set() errored on a correct index.")
}
if v.dims[j] != val {
t.Error("Set didn't correctly set a value.")
}
}
err := v.Set(v.Dim(), 0)
if err == nil {
t.Error("Set didn't error on an out-of-range index.")
}
}
}
// Creates a vector with known length, then compares the expected value with
// what Len() returns.
func TestLen(t *testing.T) {
v := New(1)
v.Set(0, 2) // has length 2
if v.Len() != 2 {
t.Error("Len returned a wrong length")
}
}
// Creates Vectors which are known to be (un)equal, then verifies that Equal()
// has correct oytput.
func TestEqual(t *testing.T) {
slc := make([]float64, 10)
for i := range slc {
slc[i] = float64(i)
}
v := NewFrom(slc)
w := NewFrom(slc)
if !Equal(v, w) {
t.Error("Equal() != true for equal vectors.")
}
w = New(10)
if Equal(v, w) {
t.Error("Equal() == true for unequal vectors.")
}
}
| // then as an in-place operations, checking if both operation were correct.
func TestAdd(t *testing.T) {
var i, j uint
for i = 1; i < 100; i++ {
a := makeRandomVector(i)
b := makeRandomVector(i)
c, _ := Add(a, b)
for j = 0; j < i; j++ {
if c.dims[j] != a.dims[j]+b.dims[j] {
t.Error("Addition failed, didn't get expected values.")
t.Logf("%f + %f != %f", a.dims[j], b.dims[j], c.dims[j])
}
}
// Test in-place addition.
c = a.Copy()
c.Add(b)
for j = 0; j < i; j++ {
if c.dims[j] != a.dims[j]+b.dims[j] {
t.Error(
"In-place Addition failed, didn't get expected values.")
t.Logf("%f + %f != %f", a.dims[j], b.dims[j], c.dims[j])
}
}
}
}
// Same as TestAdd, but with substraction. Heck, it's basically the same code.
func TestSubstract(t *testing.T) {
var i, j uint
for i = 1; i < 100; i++ {
a := makeRandomVector(i)
b := makeRandomVector(i)
c, _ := Substract(a, b)
for j = 0; j < i; j++ {
if c.dims[j] != a.dims[j]-b.dims[j] {
t.Error("Substraction failed, didn't get expected values.")
t.Logf("%f - %f != %f", a.dims[j], b.dims[j], c.dims[j])
}
}
// Test in-place sybstraction
c = a.Copy()
c.Substract(b)
for j = 0; j < i; j++ {
if c.dims[j] != a.dims[j]-b.dims[j] {
t.Error(
"In-place Substraction failed, didn't get expected values.")
t.Logf("%f - %f != %f", a.dims[j], b.dims[j], c.dims[j])
}
}
}
}
// Creates pseudo-random vectors, does scalar multiplication with pseudo-random
// floats, then checks if the result is correct. It checks both the in-place
// and the non-destructive version.
func TestScale(t *testing.T) {
var i, j uint
for i = 0; i < 100; i++ {
a := makeRandomVector(i)
x := rand.ExpFloat64()
b := Scale(a, x)
for j = 0; j < i; j++ {
if b.dims[j] != a.dims[j]*x {
t.Error("Scalar Multiplication failed, ",
"didn't get expected values.")
t.Logf("%f * %f != %f", a.dims[j], x, b.dims[j])
}
}
// Test in-place scalar multiplication
b = a.Copy()
b.Scale(x)
for j = 0; j < i; j++ {
if b.dims[j] != a.dims[j]*x {
t.Error("In-place Scalar Multiplication failed, ",
"didn't get expected values.")
t.Logf("%f * %f != %f", a.dims[j], x, b.dims[j])
}
}
}
}
// Creates pseudo-random vectors, normalizes them both in-place and
// non-destructive, and verifies that the result is correct.
func TestNormalize(t *testing.T) {
var i uint
// It makes no sense to normalize a zero vector, therefore we start at 1.
for i = 1; i < 100; i++ {
a := makeRandomVector(i)
b := Normalize(a)
if b.Len() != float64(1) {
t.Error("Normalization failed, vector doesn't have length 1.")
t.Logf("%f != 1", b.Len())
}
}
}
// Uses vectors with known angles to calculate their DotProduct, then verifies
// if the result is correct.
func TestDotProduct(t *testing.T) {
a := New(2)
b := New(2)
// Set the vectors as parallel.
a.Set(0, 1)
b.Set(0, 1)
dot, _ := DotProduct(a, b)
if dot != 1 {
t.Error("Dot Product of parallel vectors isn't 1.")
}
// Set the vectors as orthogonal.
b = New(2)
b.Set(1, 1)
dot, _ = DotProduct(a, b)
if dot != 0 {
t.Error("Dot Product of orthogonal vectors isn't 0.")
}
// Set the vectors as anti-parallel.
b = New(2)
b.Set(0, -1)
dot, _ = DotProduct(a, b)
if dot != -1 {
t.Error("Dot Product of anti-parallel vectors isn't -1.")
}
}
// Uses vectors with known angles to verify that Angle() is correct.
func TestAngle(t *testing.T) {
a := New(2)
b := New(2)
// Set the vectors as parallel (Θ == 0).
a.Set(0, 1)
b.Set(0, | // =========================== [ Operation Tests ] ============================
// Creates pesudo-random vectors, then adds them first as a non-destructive, | random_line_split |
vector_test.go | 0)
if Equal(v, w) {
t.Error("Equal() == true for unequal vectors.")
}
}
// =========================== [ Operation Tests ] ============================
// Creates pesudo-random vectors, then adds them first as a non-destructive,
// then as an in-place operations, checking if both operation were correct.
func TestAdd(t *testing.T) {
var i, j uint
for i = 1; i < 100; i++ {
a := makeRandomVector(i)
b := makeRandomVector(i)
c, _ := Add(a, b)
for j = 0; j < i; j++ {
if c.dims[j] != a.dims[j]+b.dims[j] {
t.Error("Addition failed, didn't get expected values.")
t.Logf("%f + %f != %f", a.dims[j], b.dims[j], c.dims[j])
}
}
// Test in-place addition.
c = a.Copy()
c.Add(b)
for j = 0; j < i; j++ {
if c.dims[j] != a.dims[j]+b.dims[j] {
t.Error(
"In-place Addition failed, didn't get expected values.")
t.Logf("%f + %f != %f", a.dims[j], b.dims[j], c.dims[j])
}
}
}
}
// Same as TestAdd, but with substraction. Heck, it's basically the same code.
func TestSubstract(t *testing.T) {
var i, j uint
for i = 1; i < 100; i++ {
a := makeRandomVector(i)
b := makeRandomVector(i)
c, _ := Substract(a, b)
for j = 0; j < i; j++ {
if c.dims[j] != a.dims[j]-b.dims[j] {
t.Error("Substraction failed, didn't get expected values.")
t.Logf("%f - %f != %f", a.dims[j], b.dims[j], c.dims[j])
}
}
// Test in-place sybstraction
c = a.Copy()
c.Substract(b)
for j = 0; j < i; j++ {
if c.dims[j] != a.dims[j]-b.dims[j] {
t.Error(
"In-place Substraction failed, didn't get expected values.")
t.Logf("%f - %f != %f", a.dims[j], b.dims[j], c.dims[j])
}
}
}
}
// Creates pseudo-random vectors, does scalar multiplication with pseudo-random
// floats, then checks if the result is correct. It checks both the in-place
// and the non-destructive version.
func TestScale(t *testing.T) {
var i, j uint
for i = 0; i < 100; i++ {
a := makeRandomVector(i)
x := rand.ExpFloat64()
b := Scale(a, x)
for j = 0; j < i; j++ {
if b.dims[j] != a.dims[j]*x {
t.Error("Scalar Multiplication failed, ",
"didn't get expected values.")
t.Logf("%f * %f != %f", a.dims[j], x, b.dims[j])
}
}
// Test in-place scalar multiplication
b = a.Copy()
b.Scale(x)
for j = 0; j < i; j++ {
if b.dims[j] != a.dims[j]*x {
t.Error("In-place Scalar Multiplication failed, ",
"didn't get expected values.")
t.Logf("%f * %f != %f", a.dims[j], x, b.dims[j])
}
}
}
}
// Creates pseudo-random vectors, normalizes them both in-place and
// non-destructive, and verifies that the result is correct.
func TestNormalize(t *testing.T) {
var i uint
// It makes no sense to normalize a zero vector, therefore we start at 1.
for i = 1; i < 100; i++ {
a := makeRandomVector(i)
b := Normalize(a)
if b.Len() != float64(1) {
t.Error("Normalization failed, vector doesn't have length 1.")
t.Logf("%f != 1", b.Len())
}
}
}
// Uses vectors with known angles to calculate their DotProduct, then verifies
// if the result is correct.
func TestDotProduct(t *testing.T) {
a := New(2)
b := New(2)
// Set the vectors as parallel.
a.Set(0, 1)
b.Set(0, 1)
dot, _ := DotProduct(a, b)
if dot != 1 {
t.Error("Dot Product of parallel vectors isn't 1.")
}
// Set the vectors as orthogonal.
b = New(2)
b.Set(1, 1)
dot, _ = DotProduct(a, b)
if dot != 0 {
t.Error("Dot Product of orthogonal vectors isn't 0.")
}
// Set the vectors as anti-parallel.
b = New(2)
b.Set(0, -1)
dot, _ = DotProduct(a, b)
if dot != -1 {
t.Error("Dot Product of anti-parallel vectors isn't -1.")
}
}
// Uses vectors with known angles to verify that Angle() is correct.
func TestAngle(t *testing.T) {
a := New(2)
b := New(2)
// Set the vectors as parallel (Θ == 0).
a.Set(0, 1)
b.Set(0, 1)
Θ, _ := Angle(a, b)
if Θ != 0 {
t.Error("Angle between parallel vectors isn't 0.")
t.Logf("%f != 0", Θ)
}
// Set the vectors as orthogonal (Θ == 0.5π).
b = New(2)
b.Set(1, 1)
Θ, _ = Angle(a, b)
if Θ != 0.5*math.Pi {
t.Error("Angle between orthonal vectors isn't 0.5π.")
t.Logf("%f != %f", Θ, 0.5*math.Pi)
}
// Set the vectors as anti-parallel (Θ == π).
b = New(2)
b.Set(0, -1)
Θ, _ = Angle(a, b)
if Θ != math.Pi {
t.Error("Angle between anti-parallel vectors isn't π.")
t.Logf("%f != %f", Θ, math.Pi)
}
}
// Calculates the cross product of two pseudo-random vectors, then checks if
// the resulting vector is orthogonal to both the original vectors. Tests both
// in-place and non-destructive versions of the operation.
func TestCrossProduct(t *testing.T) {
check := func(a, b, c *Vector) {
dot_a, _ := DotProduct(a, c)
dot_b, _ := DotProduct(b, c)
ε := 0.0000000005
if math.Abs(0-dot_a) < ε {
dot_a = 0
}
if math.Abs(0-dot_b) < ε {
dot_b = 0
}
if dot_a != 0 || dot_b != 0 {
t.Error("Either or both vectors aren't orthogonal",
"to their Cross Product.")
t.Logf("a * c = %f", dot_a)
t.Logf("b * c = %f", dot_b)
}
}
a := makeRandomVector(3)
b := makeRandomVector(3)
c, _ := CrossProduct(a, b)
check(a, b, c)
// Check in-place, too.
c = a.Copy()
c.CrossProduct(b)
check(a, b, c)
// Check if vectors ∉ ℝ³ are rejected.
d := New(2)
e := New(4)
_, err := CrossProduct(d, e)
if err == nil {
t.Error("CrossProduct() didn't error with invalid input vectors",
"(∉ ℝ³)")
}
}
// Check whether the various functions that take more than one vector error on
// being supplied with vectors of missmatched dimensions.
// It suffices to check the helper function checkDims, since every function
// must call it to verify its inputs.
func TestMissmatchedDims(t *testing.T) {
a := New(2)
b := New(3)
err := checkDims(a, b)
if err == nil {
t.Error("Missmatched dimension check succeeded on unequal dimensions.")
}
a = New(4)
b = New(4)
err = checkDims(a, b)
if err != nil {
t.Error("Missmatched dimension check failed on equal dimensions.")
}
}
// =========================== [ Helper Functions ] ===========================
// Helper function, makes pseudo-random slices.
func makeRandSlice(length uint) (randslice []float64) {
randslice = make([]float64 | , length)
for i := range randslice {
randslice[i] = rand.ExpFloat64()
}
return
}
// Helper function, make a | identifier_body |
|
vector_test.go | 100; i++ {
randslice := makeRandSlice(i)
v := NewFrom(randslice)
if v.Dim() != i {
t.Errorf("Wrong dimension. Got %d, expected %d.", v.Dim(), i)
}
for j = 0; j < i; j++ {
val, _ := v.Get(j)
if val != randslice[j] {
t.Error(
"Wrong values in vector initialized from a random slice.")
}
}
}
}
// Creates pseudo-random vectors with various dimensions, copies them and
// verifies that the new vector is equal.
func TestCopy(t *testing.T) {
var i uint
for i = 0; i < 100; i++ {
v := makeRandomVector(i)
w := v.Copy()
if !Equal(v, w) {
t.Error("Copied vector is not equal to source vector.")
}
}
}
// =================== [ General Methods/Functions Tests ] ====================
// Creates pseudo-random vectors with various dimensions, then check if Get()
// returns the correct values and errors on out-of-range indexes.
func TestGet(t *testing.T) {
var i uint
for i = 0; i < 100; i++ {
v := makeRandomVector(i)
for j, val := range v.dims {
getval, err := v.Get(uint(j))
if err != nil {
t.Error("Get() errored on a correct index.")
}
if val != getval {
t.Error("Get() returned a wrong value.")
}
}
_, err := v.Get(v.Dim())
if err == nil {
t.Error("Get didn't error on an out-of-range index.")
}
}
}
// Creates uninitialized vectors of various dimensions, then sets their values
// to pseudo-random values. It then compares those values to check if they
// were set correctly. Also verifies is Set() correctly errors on out-of-range
// indexes.
func TestSet(t *testing.T) {
var i, j uint
for i = 0; i < 100; i++ {
v := New(i)
for j = 0; j < i; j++ {
val := rand.ExpFloat64()
err := v.Set(j, val)
if err != nil {
t.Error("Set() errored on a correct index.")
}
if v.dims[j] != val {
t.Error("Set didn't correctly set a value.")
}
}
err := v.Set(v.Dim(), 0)
if err == nil {
t.Error("Set didn't error on an out-of-range index.")
}
}
}
// Creates a vector with known length, then compares the expected value with
// what Len() returns.
func TestLen(t *testing.T) {
v := New(1)
v.Set(0, 2) // has length 2
if v.Len() != 2 {
t.Error("Len returned a wrong length")
}
}
// Creates Vectors which are known to be (un)equal, then verifies that Equal()
// has correct oytput.
func TestEqual(t *testing.T) {
slc := make([]float64, 10)
for i := range slc {
slc[i] = float64(i)
}
v := NewFrom(slc)
w := NewFrom(slc)
if !Equal(v, w) {
t.Error("Equal() != true for equal vectors.")
}
w = New(10)
if Equal(v, w) |
}
// =========================== [ Operation Tests ] ============================
// Creates pesudo-random vectors, then adds them first as a non-destructive,
// then as an in-place operations, checking if both operation were correct.
func TestAdd(t *testing.T) {
var i, j uint
for i = 1; i < 100; i++ {
a := makeRandomVector(i)
b := makeRandomVector(i)
c, _ := Add(a, b)
for j = 0; j < i; j++ {
if c.dims[j] != a.dims[j]+b.dims[j] {
t.Error("Addition failed, didn't get expected values.")
t.Logf("%f + %f != %f", a.dims[j], b.dims[j], c.dims[j])
}
}
// Test in-place addition.
c = a.Copy()
c.Add(b)
for j = 0; j < i; j++ {
if c.dims[j] != a.dims[j]+b.dims[j] {
t.Error(
"In-place Addition failed, didn't get expected values.")
t.Logf("%f + %f != %f", a.dims[j], b.dims[j], c.dims[j])
}
}
}
}
// Same as TestAdd, but with substraction. Heck, it's basically the same code.
func TestSubstract(t *testing.T) {
var i, j uint
for i = 1; i < 100; i++ {
a := makeRandomVector(i)
b := makeRandomVector(i)
c, _ := Substract(a, b)
for j = 0; j < i; j++ {
if c.dims[j] != a.dims[j]-b.dims[j] {
t.Error("Substraction failed, didn't get expected values.")
t.Logf("%f - %f != %f", a.dims[j], b.dims[j], c.dims[j])
}
}
// Test in-place sybstraction
c = a.Copy()
c.Substract(b)
for j = 0; j < i; j++ {
if c.dims[j] != a.dims[j]-b.dims[j] {
t.Error(
"In-place Substraction failed, didn't get expected values.")
t.Logf("%f - %f != %f", a.dims[j], b.dims[j], c.dims[j])
}
}
}
}
// Creates pseudo-random vectors, does scalar multiplication with pseudo-random
// floats, then checks if the result is correct. It checks both the in-place
// and the non-destructive version.
func TestScale(t *testing.T) {
var i, j uint
for i = 0; i < 100; i++ {
a := makeRandomVector(i)
x := rand.ExpFloat64()
b := Scale(a, x)
for j = 0; j < i; j++ {
if b.dims[j] != a.dims[j]*x {
t.Error("Scalar Multiplication failed, ",
"didn't get expected values.")
t.Logf("%f * %f != %f", a.dims[j], x, b.dims[j])
}
}
// Test in-place scalar multiplication
b = a.Copy()
b.Scale(x)
for j = 0; j < i; j++ {
if b.dims[j] != a.dims[j]*x {
t.Error("In-place Scalar Multiplication failed, ",
"didn't get expected values.")
t.Logf("%f * %f != %f", a.dims[j], x, b.dims[j])
}
}
}
}
// Creates pseudo-random vectors, normalizes them both in-place and
// non-destructive, and verifies that the result is correct.
func TestNormalize(t *testing.T) {
var i uint
// It makes no sense to normalize a zero vector, therefore we start at 1.
for i = 1; i < 100; i++ {
a := makeRandomVector(i)
b := Normalize(a)
if b.Len() != float64(1) {
t.Error("Normalization failed, vector doesn't have length 1.")
t.Logf("%f != 1", b.Len())
}
}
}
// Uses vectors with known angles to calculate their DotProduct, then verifies
// if the result is correct.
func TestDotProduct(t *testing.T) {
a := New(2)
b := New(2)
// Set the vectors as parallel.
a.Set(0, 1)
b.Set(0, 1)
dot, _ := DotProduct(a, b)
if dot != 1 {
t.Error("Dot Product of parallel vectors isn't 1.")
}
// Set the vectors as orthogonal.
b = New(2)
b.Set(1, 1)
dot, _ = DotProduct(a, b)
if dot != 0 {
t.Error("Dot Product of orthogonal vectors isn't 0.")
}
// Set the vectors as anti-parallel.
b = New(2)
b.Set(0, -1)
dot, _ = DotProduct(a, b)
if dot != -1 {
t.Error("Dot Product of anti-parallel vectors isn't -1.")
}
}
// Uses vectors with known angles to verify that Angle() is correct.
func TestAngle(t *testing.T) {
a := New(2)
b := New(2)
// Set the vectors as parallel (Θ == 0).
a.Set(0, 1)
b.Set(0 | {
t.Error("Equal() == true for unequal vectors.")
} | conditional_block |
preprocessing.py | = pd.DataFrame(data=x_train, columns=dataset.columns[1:-1])
train["Trend"] = pd.Series(y_train)
# Do the same for x_test and y__test
test = pd.DataFrame(data=x_test, columns=dataset.columns[1:-1])
test["Trend"] = pd.Series(y_test)
# Apply random undersampling to both data frames
train_downsampled = random_undersampling(train)
test_downsampled = random_undersampling(test)
train_trend = train_downsampled["Trend"].values
test_trend = test_downsampled["Trend"].values
train_trimmed = train_downsampled.drop(["Trend"], axis=1).values
test_trimmed = test_downsampled.drop(["Trend"], axis=1).values
return train_trimmed, test_trimmed, train_trend, test_trend
def unbalanced_split(dataset, test_size):
"""Randomly splits dataset into unbalanced training and test sets."""
print("\tSplitting data into *unbalanced* training and test sets")
dataset = dataset.drop("Date", axis=1)
output = train_test_split(dataset.drop("Trend", axis=1).values, dataset["Trend"].values, test_size=test_size, random_state=RANDOM_STATE)
return output
# Main #
def calculate_indicators(ohlcv):
"""Extracts technical indicators from OHLCV data."""
print("\tCalculating technical indicators")
ohlcv = ohlcv.drop(["Volume (BTC)", "Weighted Price"], axis=1)
ohlcv.columns = ["Date", "Open", "High", "Low", "Close", "Volume"]
temp_ohlcv = ohlcv.copy()
# Converts ISO 8601 timestamps to UNIX
unix_times = [int((dp.parse(temp_ohlcv.iloc[index]["Date"])).strftime("%s")) for index in range(temp_ohlcv.shape[0])]
temp_ohlcv["Date"] = (pd.Series(unix_times)).values
# Converts column headers to lowercase and sorts rows in chronological order
temp_ohlcv.columns = ["date", "open", "high", "low", "close", "volume"]
temp_ohlcv = temp_ohlcv.iloc[::-1]
# Rate of Change Ratio
rocr3 = ((Indicator(temp_ohlcv, "ROCR", 3)).getHistorical())[::-1]
rocr6 = ((Indicator(temp_ohlcv, "ROCR", 6)).getHistorical())[::-1]
# Average True Range
atr = ((Indicator(temp_ohlcv, "ATR", 14)).getHistorical())[::-1]
# On-Balance Volume
obv = ((Indicator(temp_ohlcv, "OBV")).getHistorical())[::-1]
# Triple Exponential Moving Average
trix = ((Indicator(temp_ohlcv, "TRIX", 20)).getHistorical())[::-1]
# Momentum
mom1 = ((Indicator(temp_ohlcv, "MOM", 1)).getHistorical())[::-1]
mom3 = ((Indicator(temp_ohlcv, "MOM", 3)).getHistorical())[::-1]
# Average Directional Index
adx14 = ((Indicator(temp_ohlcv, "ADX", 14)).getHistorical())[::-1]
adx20 = ((Indicator(temp_ohlcv, "ADX", 20)).getHistorical())[::-1]
# Williams %R
willr = ((Indicator(temp_ohlcv, "WILLR", 14)).getHistorical())[::-1]
# Relative Strength Index
rsi6 = ((Indicator(temp_ohlcv, "RSI", 6)).getHistorical())[::-1]
rsi12 = ((Indicator(temp_ohlcv, "RSI", 12)).getHistorical())[::-1]
# Moving Average Convergence Divergence
macd, macd_signal, macd_hist = (Indicator(temp_ohlcv, "MACD", 12, 26, 9)).getHistorical()
macd, macd_signal, macd_hist = macd[::-1], macd_signal[::-1], macd_hist[::-1]
# Exponential Moving Average
ema6 = ((Indicator(temp_ohlcv, "MA", 6, 1)).getHistorical())[::-1]
ema12 = ((Indicator(temp_ohlcv, "MA", 12, 1)).getHistorical())[::-1]
# Append indicators to the input datasets
min_length = min(len(mom1), len(mom3), len(adx14), len(adx20), len(willr), len(rsi6), len(rsi12), len(macd), len(macd_signal), len(macd_hist), len(ema6), len(ema12), len(rocr3), len(rocr6), len(atr), len(obv), len(trix))
ohlcv = ohlcv[:min_length].drop(["Open", "High", "Low"], axis=1)
ohlcv["MOM (1)"], ohlcv["MOM (3)"], ohlcv["ADX (14)"] = (pd.Series(mom1[:min_length])).values, (pd.Series(mom3[:min_length])).values, (pd.Series(adx14[:min_length])).values
ohlcv["ADX (20)"], ohlcv["WILLR"], ohlcv["RSI (6)"] = (pd.Series(adx20[:min_length])).values, (pd.Series(willr[:min_length])).values, (pd.Series(rsi6[:min_length])).values
ohlcv["RSI (12)"], ohlcv["MACD"], ohlcv["MACD (Signal)"] = (pd.Series(rsi12[:min_length])).values, (pd.Series(macd[:min_length])).values, (pd.Series(macd_signal[:min_length])).values
ohlcv["MACD (Historical)"], ohlcv["EMA (6)"], ohlcv["EMA (12)"] = (pd.Series(macd_hist[:min_length])).values, (pd.Series(ema6[:min_length])).values, (pd.Series(ema12[:min_length])).values
ohlcv["ROCR (3)"], ohlcv["ROCR (6)"], ohlcv["ATR (14)"] = (pd.Series(rocr3[:min_length])).values, (pd.Series(rocr6[:min_length])).values, (pd.Series(atr[:min_length])).values
ohlcv["OBV"], ohlcv["TRIX (20)"] = (pd.Series(obv[:min_length])).values, (pd.Series(trix[:min_length])).values
return ohlcv
def calculate_sentiment(headlines):
sentiment_scores = {}
numer, denom = 0.0, 0.0
for index, currRow in headlines.iterrows():
print(currRow)
currDate = currRow["Date"]
if currDate in sentiment_scores:
pass
else:
numer = currRow["Sentiment"] * currRow["Tweets"]
denom = currRow["Tweets"]
for index, nextRow in headlines.iloc[index + 1:].iterrows():
if nextRow["Date"] == currDate:
numer += (nextRow["Sentiment"] * nextRow["Tweets"])
denom += nextRow["Tweets"]
else:
break
sentiment_scores[currDate] = numer / denom
numer, denom = 0.0, 0.0
sentiment_scores_df = pd.DataFrame(list(sentiment_scores.items()), columns=["Date", "Sentiment"])
return sentiment_scores_df
def merge_datasets(origin, other_sets):
print("\tMerging datasets")
merged = origin
for set in other_sets:
merged = pd.merge(merged, set, on="Date")
return merged
def fix_null_vals(dataset):
"""Implements the Last Observation Carried Forward (LOCF) method to fill missing values."""
print("\tFixing null values")
if not dataset.isnull().any().any():
return dataset
else:
return dataset.fillna(method="ffill")
def binarize_labels(dataset):
"""Transforms daily price data into binary values indicating price change."""
print("\tBinarizing price movements")
trends = [None]
for index in range(dataset.shape[0] - 1):
difference = dataset.iloc[index]["Close"] - dataset.iloc[index + 1]["Close"]
if difference < 0:
trends.append(-1)
else:
trends.append(1)
dataset["Trend"] = (pd.Series(trends)).values
dataset = dataset.drop(dataset.index[0])
return dataset
def add_lag_variables(dataset, lag=3):
| print("\tAdding lag variables")
new_df_dict = {}
for col_header in dataset.drop(["Date", "Trend"], axis=1):
new_df_dict[col_header] = dataset[col_header]
for lag in range(1, lag + 1):
new_df_dict["%s_lag%d" % (col_header, lag)] = dataset[col_header].shift(-lag)
new_df = pd.DataFrame(new_df_dict, index=dataset.index)
new_df["Date"], new_df["Trend"] = dataset["Date"], dataset["Trend"]
return new_df.dropna() | identifier_body |
|
preprocessing.py | = train_downsampled.drop(["Trend"], axis=1).values
test_trimmed = test_downsampled.drop(["Trend"], axis=1).values
return train_trimmed, test_trimmed, train_trend, test_trend
def unbalanced_split(dataset, test_size):
"""Randomly splits dataset into unbalanced training and test sets."""
print("\tSplitting data into *unbalanced* training and test sets")
dataset = dataset.drop("Date", axis=1)
output = train_test_split(dataset.drop("Trend", axis=1).values, dataset["Trend"].values, test_size=test_size, random_state=RANDOM_STATE)
return output
# Main #
def calculate_indicators(ohlcv):
"""Extracts technical indicators from OHLCV data."""
print("\tCalculating technical indicators")
ohlcv = ohlcv.drop(["Volume (BTC)", "Weighted Price"], axis=1)
ohlcv.columns = ["Date", "Open", "High", "Low", "Close", "Volume"]
temp_ohlcv = ohlcv.copy()
# Converts ISO 8601 timestamps to UNIX
unix_times = [int((dp.parse(temp_ohlcv.iloc[index]["Date"])).strftime("%s")) for index in range(temp_ohlcv.shape[0])]
temp_ohlcv["Date"] = (pd.Series(unix_times)).values
# Converts column headers to lowercase and sorts rows in chronological order
temp_ohlcv.columns = ["date", "open", "high", "low", "close", "volume"]
temp_ohlcv = temp_ohlcv.iloc[::-1]
# Rate of Change Ratio
rocr3 = ((Indicator(temp_ohlcv, "ROCR", 3)).getHistorical())[::-1]
rocr6 = ((Indicator(temp_ohlcv, "ROCR", 6)).getHistorical())[::-1]
# Average True Range
atr = ((Indicator(temp_ohlcv, "ATR", 14)).getHistorical())[::-1]
# On-Balance Volume
obv = ((Indicator(temp_ohlcv, "OBV")).getHistorical())[::-1]
# Triple Exponential Moving Average
trix = ((Indicator(temp_ohlcv, "TRIX", 20)).getHistorical())[::-1]
# Momentum
mom1 = ((Indicator(temp_ohlcv, "MOM", 1)).getHistorical())[::-1]
mom3 = ((Indicator(temp_ohlcv, "MOM", 3)).getHistorical())[::-1]
# Average Directional Index
adx14 = ((Indicator(temp_ohlcv, "ADX", 14)).getHistorical())[::-1]
adx20 = ((Indicator(temp_ohlcv, "ADX", 20)).getHistorical())[::-1]
# Williams %R
willr = ((Indicator(temp_ohlcv, "WILLR", 14)).getHistorical())[::-1]
# Relative Strength Index
rsi6 = ((Indicator(temp_ohlcv, "RSI", 6)).getHistorical())[::-1]
rsi12 = ((Indicator(temp_ohlcv, "RSI", 12)).getHistorical())[::-1]
# Moving Average Convergence Divergence
macd, macd_signal, macd_hist = (Indicator(temp_ohlcv, "MACD", 12, 26, 9)).getHistorical()
macd, macd_signal, macd_hist = macd[::-1], macd_signal[::-1], macd_hist[::-1]
# Exponential Moving Average
ema6 = ((Indicator(temp_ohlcv, "MA", 6, 1)).getHistorical())[::-1]
ema12 = ((Indicator(temp_ohlcv, "MA", 12, 1)).getHistorical())[::-1]
# Append indicators to the input datasets
min_length = min(len(mom1), len(mom3), len(adx14), len(adx20), len(willr), len(rsi6), len(rsi12), len(macd), len(macd_signal), len(macd_hist), len(ema6), len(ema12), len(rocr3), len(rocr6), len(atr), len(obv), len(trix))
ohlcv = ohlcv[:min_length].drop(["Open", "High", "Low"], axis=1)
ohlcv["MOM (1)"], ohlcv["MOM (3)"], ohlcv["ADX (14)"] = (pd.Series(mom1[:min_length])).values, (pd.Series(mom3[:min_length])).values, (pd.Series(adx14[:min_length])).values
ohlcv["ADX (20)"], ohlcv["WILLR"], ohlcv["RSI (6)"] = (pd.Series(adx20[:min_length])).values, (pd.Series(willr[:min_length])).values, (pd.Series(rsi6[:min_length])).values
ohlcv["RSI (12)"], ohlcv["MACD"], ohlcv["MACD (Signal)"] = (pd.Series(rsi12[:min_length])).values, (pd.Series(macd[:min_length])).values, (pd.Series(macd_signal[:min_length])).values
ohlcv["MACD (Historical)"], ohlcv["EMA (6)"], ohlcv["EMA (12)"] = (pd.Series(macd_hist[:min_length])).values, (pd.Series(ema6[:min_length])).values, (pd.Series(ema12[:min_length])).values
ohlcv["ROCR (3)"], ohlcv["ROCR (6)"], ohlcv["ATR (14)"] = (pd.Series(rocr3[:min_length])).values, (pd.Series(rocr6[:min_length])).values, (pd.Series(atr[:min_length])).values
ohlcv["OBV"], ohlcv["TRIX (20)"] = (pd.Series(obv[:min_length])).values, (pd.Series(trix[:min_length])).values
return ohlcv
def calculate_sentiment(headlines):
sentiment_scores = {}
numer, denom = 0.0, 0.0
for index, currRow in headlines.iterrows():
print(currRow)
currDate = currRow["Date"]
if currDate in sentiment_scores:
pass
else:
numer = currRow["Sentiment"] * currRow["Tweets"]
denom = currRow["Tweets"]
for index, nextRow in headlines.iloc[index + 1:].iterrows():
if nextRow["Date"] == currDate:
numer += (nextRow["Sentiment"] * nextRow["Tweets"])
denom += nextRow["Tweets"]
else:
break
sentiment_scores[currDate] = numer / denom
numer, denom = 0.0, 0.0
sentiment_scores_df = pd.DataFrame(list(sentiment_scores.items()), columns=["Date", "Sentiment"])
return sentiment_scores_df
def merge_datasets(origin, other_sets):
print("\tMerging datasets")
merged = origin
for set in other_sets:
merged = pd.merge(merged, set, on="Date")
return merged
def fix_null_vals(dataset):
"""Implements the Last Observation Carried Forward (LOCF) method to fill missing values."""
print("\tFixing null values")
if not dataset.isnull().any().any():
return dataset
else:
return dataset.fillna(method="ffill")
def binarize_labels(dataset):
"""Transforms daily price data into binary values indicating price change."""
print("\tBinarizing price movements")
trends = [None]
for index in range(dataset.shape[0] - 1):
difference = dataset.iloc[index]["Close"] - dataset.iloc[index + 1]["Close"]
if difference < 0:
trends.append(-1)
else:
trends.append(1)
dataset["Trend"] = (pd.Series(trends)).values
dataset = dataset.drop(dataset.index[0])
return dataset
def add_lag_variables(dataset, lag=3):
print("\tAdding lag variables")
new_df_dict = {}
for col_header in dataset.drop(["Date", "Trend"], axis=1):
new_df_dict[col_header] = dataset[col_header]
for lag in range(1, lag + 1):
new_df_dict["%s_lag%d" % (col_header, lag)] = dataset[col_header].shift(-lag)
new_df = pd.DataFrame(new_df_dict, index=dataset.index)
new_df["Date"], new_df["Trend"] = dataset["Date"], dataset["Trend"]
return new_df.dropna()
def power_transform(dataset):
print("\tApplying a box-cox transform to selected features")
for header in dataset.drop(["Date", "Trend"], axis=1).columns:
if not (dataset[header] < 0).any() and not (dataset[header] == 0).any():
dataset[header] = boxcox(dataset[header])[0]
return dataset
def split(dataset, test_size, balanced=True):
# TODO: Splits can't be random, they need to respect the temporal order of each observation | if balanced:
return balanced_split(dataset, test_size)
else:
return unbalanced_split(dataset, test_size) | random_line_split |
|
preprocessing.py | impson_est.append(simps(x, dx=1))
dead_values = list([None] * interval)
dead_values.extend(integral_simpson_est)
dead_values.reverse()
return dead_values
def random_undersampling(dataset):
"""Randomly deleting rows that contain the majority class until the number
in the majority class is equal with the number in the minority class."""
minority_set = dataset[dataset.Trend == -1.0]
majority_set = dataset[dataset.Trend == 1.0]
# print(dataset.Trend.value_counts())
# If minority set larger than majority set, swap
if len(minority_set) > len(majority_set):
minority_set, majority_set = majority_set, minority_set
# Downsample majority class
majority_downsampled = resample(majority_set,
replace=False, # sample without replacement
n_samples=len(minority_set), # to match minority class
random_state=123) # reproducible results
# Combine minority class with downsampled majority class
return pd.concat([majority_downsampled, minority_set])
def get_popularity(headlines):
# TODO: Randomize user-agents OR figure out how to handle popups
if "Tweets" not in headlines.columns:
counts = []
driver = webdriver.Chrome()
for index, row in headlines.iterrows():
try:
driver.get(row["URL"])
time.sleep(3)
twitter_containers = driver.find_elements_by_xpath("//li[@class='twitter']")
count = twitter_containers[0].find_elements_by_xpath("//span[@class='count']")
if count[0].text == "":
counts.append(1)
else:
counts.append(int(count[0].text))
except:
counts.append(1) # QUESTION: Should it be None?
headlines["Tweets"] = (pd.Series(counts)).values
print(counts)
return headlines
def balanced_split(dataset, test_size):
"""Randomly splits dataset into balanced training and test sets."""
print("\tSplitting data into *balanced* training and test sets")
# Use sklearn.train_test_split to split original dataset into x_train, y_train, x_test, y_test numpy arrays
x_train, x_test, y_train, y_test = train_test_split(dataset.drop(["Date", "Trend"], axis=1).values, dataset["Trend"].values, test_size=test_size, random_state=RANDOM_STATE)
# Combine x_train and y_train (numpy arrays) into a single dataframe, with column labels
train = pd.DataFrame(data=x_train, columns=dataset.columns[1:-1])
train["Trend"] = pd.Series(y_train)
# Do the same for x_test and y__test
test = pd.DataFrame(data=x_test, columns=dataset.columns[1:-1])
test["Trend"] = pd.Series(y_test)
# Apply random undersampling to both data frames
train_downsampled = random_undersampling(train)
test_downsampled = random_undersampling(test)
train_trend = train_downsampled["Trend"].values
test_trend = test_downsampled["Trend"].values
train_trimmed = train_downsampled.drop(["Trend"], axis=1).values
test_trimmed = test_downsampled.drop(["Trend"], axis=1).values
return train_trimmed, test_trimmed, train_trend, test_trend
def unbalanced_split(dataset, test_size):
"""Randomly splits dataset into unbalanced training and test sets."""
print("\tSplitting data into *unbalanced* training and test sets")
dataset = dataset.drop("Date", axis=1)
output = train_test_split(dataset.drop("Trend", axis=1).values, dataset["Trend"].values, test_size=test_size, random_state=RANDOM_STATE)
return output
# Main #
def calculate_indicators(ohlcv):
"""Extracts technical indicators from OHLCV data."""
print("\tCalculating technical indicators")
ohlcv = ohlcv.drop(["Volume (BTC)", "Weighted Price"], axis=1)
ohlcv.columns = ["Date", "Open", "High", "Low", "Close", "Volume"]
temp_ohlcv = ohlcv.copy()
# Converts ISO 8601 timestamps to UNIX
unix_times = [int((dp.parse(temp_ohlcv.iloc[index]["Date"])).strftime("%s")) for index in range(temp_ohlcv.shape[0])]
temp_ohlcv["Date"] = (pd.Series(unix_times)).values
# Converts column headers to lowercase and sorts rows in chronological order
temp_ohlcv.columns = ["date", "open", "high", "low", "close", "volume"]
temp_ohlcv = temp_ohlcv.iloc[::-1]
# Rate of Change Ratio
rocr3 = ((Indicator(temp_ohlcv, "ROCR", 3)).getHistorical())[::-1]
rocr6 = ((Indicator(temp_ohlcv, "ROCR", 6)).getHistorical())[::-1]
# Average True Range
atr = ((Indicator(temp_ohlcv, "ATR", 14)).getHistorical())[::-1]
# On-Balance Volume
obv = ((Indicator(temp_ohlcv, "OBV")).getHistorical())[::-1]
# Triple Exponential Moving Average
trix = ((Indicator(temp_ohlcv, "TRIX", 20)).getHistorical())[::-1]
# Momentum
mom1 = ((Indicator(temp_ohlcv, "MOM", 1)).getHistorical())[::-1]
mom3 = ((Indicator(temp_ohlcv, "MOM", 3)).getHistorical())[::-1]
# Average Directional Index
adx14 = ((Indicator(temp_ohlcv, "ADX", 14)).getHistorical())[::-1]
adx20 = ((Indicator(temp_ohlcv, "ADX", 20)).getHistorical())[::-1]
# Williams %R
willr = ((Indicator(temp_ohlcv, "WILLR", 14)).getHistorical())[::-1]
# Relative Strength Index
rsi6 = ((Indicator(temp_ohlcv, "RSI", 6)).getHistorical())[::-1]
rsi12 = ((Indicator(temp_ohlcv, "RSI", 12)).getHistorical())[::-1]
# Moving Average Convergence Divergence
macd, macd_signal, macd_hist = (Indicator(temp_ohlcv, "MACD", 12, 26, 9)).getHistorical()
macd, macd_signal, macd_hist = macd[::-1], macd_signal[::-1], macd_hist[::-1]
# Exponential Moving Average
ema6 = ((Indicator(temp_ohlcv, "MA", 6, 1)).getHistorical())[::-1]
ema12 = ((Indicator(temp_ohlcv, "MA", 12, 1)).getHistorical())[::-1]
# Append indicators to the input datasets
min_length = min(len(mom1), len(mom3), len(adx14), len(adx20), len(willr), len(rsi6), len(rsi12), len(macd), len(macd_signal), len(macd_hist), len(ema6), len(ema12), len(rocr3), len(rocr6), len(atr), len(obv), len(trix))
ohlcv = ohlcv[:min_length].drop(["Open", "High", "Low"], axis=1)
ohlcv["MOM (1)"], ohlcv["MOM (3)"], ohlcv["ADX (14)"] = (pd.Series(mom1[:min_length])).values, (pd.Series(mom3[:min_length])).values, (pd.Series(adx14[:min_length])).values
ohlcv["ADX (20)"], ohlcv["WILLR"], ohlcv["RSI (6)"] = (pd.Series(adx20[:min_length])).values, (pd.Series(willr[:min_length])).values, (pd.Series(rsi6[:min_length])).values
ohlcv["RSI (12)"], ohlcv["MACD"], ohlcv["MACD (Signal)"] = (pd.Series(rsi12[:min_length])).values, (pd.Series(macd[:min_length])).values, (pd.Series(macd_signal[:min_length])).values
ohlcv["MACD (Historical)"], ohlcv["EMA (6)"], ohlcv["EMA (12)"] = (pd.Series(macd_hist[:min_length])).values, (pd.Series(ema6[:min_length])).values, (pd.Series(ema12[:min_length])).values
ohlcv["ROCR (3)"], ohlcv["ROCR (6)"], ohlcv["ATR (14)"] = (pd.Series(rocr3[:min_length])).values, (pd.Series(rocr6[:min_length])).values, (pd.Series(atr[:min_length])).values
ohlcv["OBV"], ohlcv["TRIX (20)"] = (pd.Series(obv[:min_length])).values, (pd.Series(trix[:min_length])).values
return ohlcv
def | calculate_sentiment | identifier_name |
|
preprocessing.py | ):
minority_set, majority_set = majority_set, minority_set
# Downsample majority class
majority_downsampled = resample(majority_set,
replace=False, # sample without replacement
n_samples=len(minority_set), # to match minority class
random_state=123) # reproducible results
# Combine minority class with downsampled majority class
return pd.concat([majority_downsampled, minority_set])
def get_popularity(headlines):
# TODO: Randomize user-agents OR figure out how to handle popups
if "Tweets" not in headlines.columns:
counts = []
driver = webdriver.Chrome()
for index, row in headlines.iterrows():
try:
driver.get(row["URL"])
time.sleep(3)
twitter_containers = driver.find_elements_by_xpath("//li[@class='twitter']")
count = twitter_containers[0].find_elements_by_xpath("//span[@class='count']")
if count[0].text == "":
counts.append(1)
else:
counts.append(int(count[0].text))
except:
counts.append(1) # QUESTION: Should it be None?
headlines["Tweets"] = (pd.Series(counts)).values
print(counts)
return headlines
def balanced_split(dataset, test_size):
"""Randomly splits dataset into balanced training and test sets."""
print("\tSplitting data into *balanced* training and test sets")
# Use sklearn.train_test_split to split original dataset into x_train, y_train, x_test, y_test numpy arrays
x_train, x_test, y_train, y_test = train_test_split(dataset.drop(["Date", "Trend"], axis=1).values, dataset["Trend"].values, test_size=test_size, random_state=RANDOM_STATE)
# Combine x_train and y_train (numpy arrays) into a single dataframe, with column labels
train = pd.DataFrame(data=x_train, columns=dataset.columns[1:-1])
train["Trend"] = pd.Series(y_train)
# Do the same for x_test and y__test
test = pd.DataFrame(data=x_test, columns=dataset.columns[1:-1])
test["Trend"] = pd.Series(y_test)
# Apply random undersampling to both data frames
train_downsampled = random_undersampling(train)
test_downsampled = random_undersampling(test)
train_trend = train_downsampled["Trend"].values
test_trend = test_downsampled["Trend"].values
train_trimmed = train_downsampled.drop(["Trend"], axis=1).values
test_trimmed = test_downsampled.drop(["Trend"], axis=1).values
return train_trimmed, test_trimmed, train_trend, test_trend
def unbalanced_split(dataset, test_size):
"""Randomly splits dataset into unbalanced training and test sets."""
print("\tSplitting data into *unbalanced* training and test sets")
dataset = dataset.drop("Date", axis=1)
output = train_test_split(dataset.drop("Trend", axis=1).values, dataset["Trend"].values, test_size=test_size, random_state=RANDOM_STATE)
return output
# Main #
def calculate_indicators(ohlcv):
"""Extracts technical indicators from OHLCV data."""
print("\tCalculating technical indicators")
ohlcv = ohlcv.drop(["Volume (BTC)", "Weighted Price"], axis=1)
ohlcv.columns = ["Date", "Open", "High", "Low", "Close", "Volume"]
temp_ohlcv = ohlcv.copy()
# Converts ISO 8601 timestamps to UNIX
unix_times = [int((dp.parse(temp_ohlcv.iloc[index]["Date"])).strftime("%s")) for index in range(temp_ohlcv.shape[0])]
temp_ohlcv["Date"] = (pd.Series(unix_times)).values
# Converts column headers to lowercase and sorts rows in chronological order
temp_ohlcv.columns = ["date", "open", "high", "low", "close", "volume"]
temp_ohlcv = temp_ohlcv.iloc[::-1]
# Rate of Change Ratio
rocr3 = ((Indicator(temp_ohlcv, "ROCR", 3)).getHistorical())[::-1]
rocr6 = ((Indicator(temp_ohlcv, "ROCR", 6)).getHistorical())[::-1]
# Average True Range
atr = ((Indicator(temp_ohlcv, "ATR", 14)).getHistorical())[::-1]
# On-Balance Volume
obv = ((Indicator(temp_ohlcv, "OBV")).getHistorical())[::-1]
# Triple Exponential Moving Average
trix = ((Indicator(temp_ohlcv, "TRIX", 20)).getHistorical())[::-1]
# Momentum
mom1 = ((Indicator(temp_ohlcv, "MOM", 1)).getHistorical())[::-1]
mom3 = ((Indicator(temp_ohlcv, "MOM", 3)).getHistorical())[::-1]
# Average Directional Index
adx14 = ((Indicator(temp_ohlcv, "ADX", 14)).getHistorical())[::-1]
adx20 = ((Indicator(temp_ohlcv, "ADX", 20)).getHistorical())[::-1]
# Williams %R
willr = ((Indicator(temp_ohlcv, "WILLR", 14)).getHistorical())[::-1]
# Relative Strength Index
rsi6 = ((Indicator(temp_ohlcv, "RSI", 6)).getHistorical())[::-1]
rsi12 = ((Indicator(temp_ohlcv, "RSI", 12)).getHistorical())[::-1]
# Moving Average Convergence Divergence
macd, macd_signal, macd_hist = (Indicator(temp_ohlcv, "MACD", 12, 26, 9)).getHistorical()
macd, macd_signal, macd_hist = macd[::-1], macd_signal[::-1], macd_hist[::-1]
# Exponential Moving Average
ema6 = ((Indicator(temp_ohlcv, "MA", 6, 1)).getHistorical())[::-1]
ema12 = ((Indicator(temp_ohlcv, "MA", 12, 1)).getHistorical())[::-1]
# Append indicators to the input datasets
min_length = min(len(mom1), len(mom3), len(adx14), len(adx20), len(willr), len(rsi6), len(rsi12), len(macd), len(macd_signal), len(macd_hist), len(ema6), len(ema12), len(rocr3), len(rocr6), len(atr), len(obv), len(trix))
ohlcv = ohlcv[:min_length].drop(["Open", "High", "Low"], axis=1)
ohlcv["MOM (1)"], ohlcv["MOM (3)"], ohlcv["ADX (14)"] = (pd.Series(mom1[:min_length])).values, (pd.Series(mom3[:min_length])).values, (pd.Series(adx14[:min_length])).values
ohlcv["ADX (20)"], ohlcv["WILLR"], ohlcv["RSI (6)"] = (pd.Series(adx20[:min_length])).values, (pd.Series(willr[:min_length])).values, (pd.Series(rsi6[:min_length])).values
ohlcv["RSI (12)"], ohlcv["MACD"], ohlcv["MACD (Signal)"] = (pd.Series(rsi12[:min_length])).values, (pd.Series(macd[:min_length])).values, (pd.Series(macd_signal[:min_length])).values
ohlcv["MACD (Historical)"], ohlcv["EMA (6)"], ohlcv["EMA (12)"] = (pd.Series(macd_hist[:min_length])).values, (pd.Series(ema6[:min_length])).values, (pd.Series(ema12[:min_length])).values
ohlcv["ROCR (3)"], ohlcv["ROCR (6)"], ohlcv["ATR (14)"] = (pd.Series(rocr3[:min_length])).values, (pd.Series(rocr6[:min_length])).values, (pd.Series(atr[:min_length])).values
ohlcv["OBV"], ohlcv["TRIX (20)"] = (pd.Series(obv[:min_length])).values, (pd.Series(trix[:min_length])).values
return ohlcv
def calculate_sentiment(headlines):
sentiment_scores = {}
numer, denom = 0.0, 0.0
for index, currRow in headlines.iterrows():
print(currRow)
currDate = currRow["Date"]
if currDate in sentiment_scores:
pass
else:
numer = currRow["Sentiment"] * currRow["Tweets"]
denom = currRow["Tweets"]
for index, nextRow in headlines.iloc[index + 1:].iterrows():
if nextRow["Date"] == currDate:
| numer += (nextRow["Sentiment"] * nextRow["Tweets"])
denom += nextRow["Tweets"] | conditional_block |
|
networking.go | a newline.
The STRING command includes one line of string
data, which can be handled by simple read and write methods from `bufio`.
The GOB command comes with a `struct` that contains a couple of fields,
including a slice, a map, and a even a pointer to itself. As you can see when
running the code, the `gob` package moves all this through our network
connection without any fuss.
What we basically have here is some sort of ad-hoc protocol, where the client
and the server agree that a command is a string followed by a newline followed
by some data. For each command, the server must know the exact data format
and how to process the data.
To achieve this, the server code takes a two-step approach.
Step 1: When the `Listen()` function accepts a new connection, it spawns
a new goroutine that calls function `handleMessage()`. This function reads
the command name from the connection, looks up the appropriate handler
function from a map, and calls this function.
Step 2: The selected handler function reads and processes the command's data.
Here is a visual summary of this process.
HYPE[Server Command Dispatch](tcpserver.html)
Keep these pictures in mind, they help reading the actual code.
## The Code
*/
// ## Imports and globals
package main
import (
"bufio"
"io"
"log"
"net"
"strconv"
"strings"
"sync"
"github.com/pkg/errors"
"encoding/gob"
"flag"
)
// A struct with a mix of fields, used for the GOB example.
type complexData struct {
N int
S string
M map[string]int
P []byte
C *complexData
}
const (
// Port is the port number that the server listens to.
Port = ":61000"
)
/*
## Outgoing connections
Using an outgoing connection is a snap. A `net.Conn` satisfies the io.Reader
and `io.Writer` interfaces, so we can treat a TCP connection just like any other
`Reader` or `Writer`.
*/
// Open connects to a TCP Address.
// It returns a TCP connection armed with a timeout and wrapped into a
// buffered ReadWriter.
func Open(addr string) (*bufio.ReadWriter, error) {
// Dial the remote process.
// Note that the local port is chosen on the fly. If the local port
// must be a specific one, use DialTCP() instead.
log.Println("Dial " + addr)
conn, err := net.Dial("tcp", addr)
if err != nil {
return nil, errors.Wrap(err, "Dialing "+addr+" failed")
}
return bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn)), nil
}
/*
## Incoming connections
Preparing for incoming data is a bit more involved. According to our ad-hoc
protocol, we receive the name of a command terminated by `\n`, followed by data.
The nature of the data depends on the respective command. To handle this, we
create an `Endpoint` object with the following properties:
* It allows to register one or more handler functions, where each can handle a
particular command.
* It dispatches incoming commands to the associated handler based on the commands
name.
*/
// HandleFunc is a function that handles an incoming command.
// It receives the open connection wrapped in a `ReadWriter` interface.
type HandleFunc func(*bufio.ReadWriter)
// Endpoint provides an endpoint to other processess
// that they can send data to.
type Endpoint struct {
listener net.Listener
handler map[string]HandleFunc
// Maps are not threadsafe, so we need a mutex to control access.
mutex sync.RWMutex
}
// NewEndpoint creates a new endpoint. To keep things simple,
// the endpoint listens on a fixed port number.
func NewEndpoint() *Endpoint {
// Create a new Endpoint with an empty list of handler funcs.
return &Endpoint{
handler: map[string]HandleFunc{},
}
}
// AddHandleFunc adds a new function for handling incoming data.
func (e *Endpoint) AddHandleFunc(name string, f HandleFunc) {
e.mutex.Lock()
e.handler[name] = f
e.mutex.Unlock()
}
// Listen starts listening on the endpoint port on all interfaces.
// At least one handler function must have been added
// through AddHandleFunc() before.
func (e *Endpoint) Listen() error {
var err error
e.listener, err = net.Listen("tcp", Port)
if err != nil {
return errors.Wrapf(err, "Unable to listen on port %s\n", Port)
}
log.Println("Listen on", e.listener.Addr().String())
for {
log.Println("Accept a connection request.")
conn, err := e.listener.Accept()
if err != nil {
log.Println("Failed accepting a connection request:", err)
continue
}
log.Println("Handle incoming messages.")
go e.handleMessages(conn)
}
}
// handleMessages reads the connection up to the first newline.
// Based on this string, it calls the appropriate HandleFunc.
func (e *Endpoint) handleMessages(conn net.Conn) {
// Wrap the connection into a buffered reader for easier reading.
rw := bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn))
defer conn.Close()
// Read from the connection until EOF. Expect a command name as the
// next input. Call the handler that is registered for this command.
for {
log.Print("Receive command '")
cmd, err := rw.ReadString('\n')
switch {
case err == io.EOF:
log.Println("Reached EOF - close this connection.\n ---")
return
case err != nil:
log.Println("\nError reading command. Got: '"+cmd+"'\n", err)
return
}
// Trim the request string - ReadString does not strip any newlines.
cmd = strings.Trim(cmd, "\n ")
log.Println(cmd + "'")
// Fetch the appropriate handler function from the 'handler' map and call it.
e.mutex.RLock()
handleCommand, ok := e.handler[cmd]
e.mutex.RUnlock()
if !ok {
log.Println("Command '" + cmd + "' is not registered.")
return
}
handleCommand(rw)
}
}
/* Now let's create two handler functions. The easiest case is where our
ad-hoc protocol only sends string data.
The second handler receives and processes a struct that was sent as GOB data.
*/
// handleStrings handles the "STRING" request.
func handleStrings(rw *bufio.ReadWriter) {
// Receive a string.
log.Print("Receive STRING message:")
s, err := rw.ReadString('\n')
if err != nil {
log.Println("Cannot read from connection.\n", err)
}
s = strings.Trim(s, "\n ")
log.Println(s)
_, err = rw.WriteString("Thank you.\n")
if err != nil {
log.Println("Cannot write to connection.\n", err)
}
err = rw.Flush()
if err != nil {
log.Println("Flush failed.", err)
}
}
// handleGob handles the "GOB" request. It decodes the received GOB data
// into a struct.
func handleGob(rw *bufio.ReadWriter) {
log.Print("Receive GOB data:")
var data complexData
// Create a decoder that decodes directly into a struct variable.
dec := gob.NewDecoder(rw)
err := dec.Decode(&data)
if err != nil {
log.Println("Error decoding GOB data:", err)
return
}
// Print the complexData struct and the nested one, too, to prove
// that both travelled across the wire.
log.Printf("Outer complexData struct: \n%#v\n", data)
log.Printf("Inner complexData struct: \n%#v\n", data.C)
}
/*
## The client and server functions
With all this in place, we can now set up client and server functions.
The client function connects to the server and sends STRING and GOB requests.
The server starts listening for requests and triggers the appropriate handlers.
*/
// client is called if the app is called with -connect=`ip addr`.
func client(ip string) error {
// Some test data. Note how GOB even handles maps, slices, and
// recursive data structures without problems.
testStruct := complexData{
N: 23,
S: "string data",
M: map[string]int{"one": 1, "two": 2, "three": 3},
P: []byte("abc"),
C: &complexData{
N: 256,
S: "Recursive structs? Piece of cake!",
M: map[string]int{"01": 1, "10": 2, "11": 3},
},
}
// Open a connection to the server.
rw, err := Open(ip + Port)
if err != nil {
return errors.Wrap(err, "Client: Failed to open connection to "+ip+Port)
}
// Send a STRING request.
// Send the request name.
// Send the data.
log.Println("Send the string request.")
n, err := rw.WriteString("STRING\n")
if err != nil {
return errors.Wrap(err, "Could not send the STRING request ("+strconv.Itoa(n)+" bytes written)")
}
n, err = rw.WriteString("Additional data.\n")
if err != nil | {
return errors.Wrap(err, "Could not send additional STRING data ("+strconv.Itoa(n)+" bytes written)")
} | conditional_block |
|
networking.go | package, this requires no efforts. The following
animation shows how gob data gets from a client to a server, and when
this looks quite unspectacular, it's because using `gob` *is* unspectacular.
HYPE[Sending a struct as GOB](gob.html)
It's not much more than that!
## Basic ingredients for sending string data over TCP
### On the sending side
Sending strings requires three simple steps.
1. Open a connection to the receiving process
2. Write the string
3. Close the connection
The `net` package provides a couple of methods for this.
`ResolveTCPAddr()` takes a string representing a TCP address (like, for example,
`localhost:80`, `127.0.0.1:80`, or `[::1]:80`, which all represent port #80 on
the local machine) and returns a `net.TCPAddr` (or an error if the string
cannot be resolved to a valid TCP address).
`DialTCP()` takes a `net.TCPAddr` and connects to this address. It returns
the open connection as a `net.TCPConn` object (or an error if the connection
attempt fails).
If we don't need much fine-grained control over the Dial settings, we can use
`net.Dial()` instead. This function takes an address string directly and
returns a general `net.Conn` object. This is sufficient for our test case.
However, if you need functionality that is only available on TCP connections,
you have to use the "TCP" variants (`DialTCP`, `TCPConn`, `TCPAddr`, etc).
After successful dialing, we can treat the new connection like any other
input/output stream, as mentioned above. We can even wrap the connection into
a `bufio.ReadWriter` and benefit from the various `ReadWriter` methods like
`ReadString()`, `ReadBytes`, `WriteString`, etc.
** Remember that buffered Writers need to call `Flush()` after writing,
so that all data is forwarded to the underlying network connection.**
Finally, each connection object has a `Close()` method to conclude the
communication.
### Fine tuning
A couple of tuning options are also available. Some examples:
The `Dialer` interface provides these options (among others):
* `DeadLine` and `Timeout` options for timing out an unsuccessful dial;
* `KeepAlive` option for managing the life span of the connection
The `Conn` interface also has deadline settings; either for the connection as
a whole (`SetDeadLine()`), or specific to read or write calls (`SetReadDeadLine()`
and `SetWriteDeadLine()`).
Note that the deadlines are fixed points in (wallclock) time. Unlike timeouts,
they don't reset after a new activity. Each activity on the connection must
therefore set a new deadline.
The sample code below uses no deadlines, as it is simple enough so that we can
easily see when things get stuck. `Ctrl-C` is our manual "deadline trigger
tool".
### On the receiving side
The receiver has to follow these steps.
1. Start listening on a local port.
2. When a request comes in, spawn a goroutine to handle the request.
3. In the goroutine, read the data. Optionally, send a response.
4. Close the connection.
Listening requires a local port to listen to. Typically, the listening
application (a.k.a. "server") announces the port it listens to, or if it
provides a standard service, it uses the port associated with that service.
For example, Web servers usually listen on port 80 for HTTP requests and
on port 443 for HTTPS requests. SSH daemons listen on port 22 by default,
and a WHOIS server uses port 43.
The core parts of the `net` package for implementing the server side are:
`net.Listen()` creates a new listener on a given local network address. If
only a port is passed, as in ":61000", then the listener listens on
all available network interfaces. This is quite handy, as a computer usually
has at least two active interfaces, the loopback interface and at least one
real network card.
A listener's `Accept()` method waits until a connection request comes in.
Then it accepts the request and returns the new connection to the caller.
`Accept()` is typically called within a loop to be able to serve multiple
connections simultaneously. Each connection can be handled by a goroutine,
as we will see in the code.
## The code
Instead of just pushing a few bytes around, I wanted the code to demonstrate
something more useful. I want to be able to send different commands with
a different data payload to the server. The server shall identify each
command and decode the command's data.
So the client in the code below sends two test commands: "STRING" and "GOB".
Each are terminated by a newline.
The STRING command includes one line of string
data, which can be handled by simple read and write methods from `bufio`.
The GOB command comes with a `struct` that contains a couple of fields,
including a slice, a map, and a even a pointer to itself. As you can see when
running the code, the `gob` package moves all this through our network
connection without any fuss.
What we basically have here is some sort of ad-hoc protocol, where the client
and the server agree that a command is a string followed by a newline followed
by some data. For each command, the server must know the exact data format
and how to process the data.
To achieve this, the server code takes a two-step approach.
Step 1: When the `Listen()` function accepts a new connection, it spawns
a new goroutine that calls function `handleMessage()`. This function reads
the command name from the connection, looks up the appropriate handler
function from a map, and calls this function.
Step 2: The selected handler function reads and processes the command's data.
Here is a visual summary of this process.
HYPE[Server Command Dispatch](tcpserver.html)
Keep these pictures in mind, they help reading the actual code.
## The Code
*/
// ## Imports and globals
package main
import (
"bufio"
"io"
"log"
"net"
"strconv"
"strings"
"sync"
"github.com/pkg/errors"
"encoding/gob"
"flag"
)
// A struct with a mix of fields, used for the GOB example.
type complexData struct {
N int
S string
M map[string]int
P []byte
C *complexData
}
const (
// Port is the port number that the server listens to.
Port = ":61000"
)
/*
## Outgoing connections
Using an outgoing connection is a snap. A `net.Conn` satisfies the io.Reader
and `io.Writer` interfaces, so we can treat a TCP connection just like any other
`Reader` or `Writer`.
*/
// Open connects to a TCP Address.
// It returns a TCP connection armed with a timeout and wrapped into a
// buffered ReadWriter.
func Open(addr string) (*bufio.ReadWriter, error) {
// Dial the remote process.
// Note that the local port is chosen on the fly. If the local port
// must be a specific one, use DialTCP() instead.
log.Println("Dial " + addr)
conn, err := net.Dial("tcp", addr)
if err != nil {
return nil, errors.Wrap(err, "Dialing "+addr+" failed")
}
return bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn)), nil
}
/*
## Incoming connections
Preparing for incoming data is a bit more involved. According to our ad-hoc
protocol, we receive the name of a command terminated by `\n`, followed by data.
| * It dispatches incoming commands to the associated handler based on the commands
name.
*/
// HandleFunc is a function that handles an incoming command.
// It receives the open connection wrapped in a `ReadWriter` interface.
type HandleFunc func(*bufio.ReadWriter)
// Endpoint provides an endpoint to other processess
// that they can send data to.
type Endpoint struct {
listener net.Listener
handler map[string]HandleFunc
// Maps are not threadsafe, so we need a mutex to control access.
mutex sync.RWMutex
}
// NewEndpoint creates a new endpoint. To keep things simple,
// the endpoint listens on a fixed port number.
func NewEndpoint() *Endpoint {
// Create a new Endpoint with an empty list of handler funcs.
return &Endpoint{
handler: map[string]HandleFunc{},
}
}
// AddHandleFunc adds a new function for handling incoming data.
func (e *Endpoint) AddHandleFunc(name string, f HandleFunc) {
e.mutex.Lock()
e.handler[name] = f
e.mutex.Unlock()
}
// Listen starts listening on the endpoint port on all interfaces.
// At least one handler function must have been added
// through AddHandleFunc() before.
func (e *Endpoint) Listen() error {
var err error
e.listener, err = net.Listen("tcp", Port)
if err != nil {
return errors.Wrapf(err, "Unable to listen on port %s\n", Port)
}
log.Println("Listen on", e.listener.Addr().String())
for {
log.Println("Accept a connection | The nature of the data depends on the respective command. To handle this, we
create an `Endpoint` object with the following properties:
* It allows to register one or more handler functions, where each can handle a
particular command.
| random_line_split |
networking.go | }
return bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn)), nil
}
/*
## Incoming connections
Preparing for incoming data is a bit more involved. According to our ad-hoc
protocol, we receive the name of a command terminated by `\n`, followed by data.
The nature of the data depends on the respective command. To handle this, we
create an `Endpoint` object with the following properties:
* It allows to register one or more handler functions, where each can handle a
particular command.
* It dispatches incoming commands to the associated handler based on the commands
name.
*/
// HandleFunc is a function that handles an incoming command.
// It receives the open connection wrapped in a `ReadWriter` interface.
type HandleFunc func(*bufio.ReadWriter)
// Endpoint provides an endpoint to other processess
// that they can send data to.
type Endpoint struct {
listener net.Listener
handler map[string]HandleFunc
// Maps are not threadsafe, so we need a mutex to control access.
mutex sync.RWMutex
}
// NewEndpoint creates a new endpoint. To keep things simple,
// the endpoint listens on a fixed port number.
func NewEndpoint() *Endpoint {
// Create a new Endpoint with an empty list of handler funcs.
return &Endpoint{
handler: map[string]HandleFunc{},
}
}
// AddHandleFunc adds a new function for handling incoming data.
func (e *Endpoint) AddHandleFunc(name string, f HandleFunc) {
e.mutex.Lock()
e.handler[name] = f
e.mutex.Unlock()
}
// Listen starts listening on the endpoint port on all interfaces.
// At least one handler function must have been added
// through AddHandleFunc() before.
func (e *Endpoint) Listen() error {
var err error
e.listener, err = net.Listen("tcp", Port)
if err != nil {
return errors.Wrapf(err, "Unable to listen on port %s\n", Port)
}
log.Println("Listen on", e.listener.Addr().String())
for {
log.Println("Accept a connection request.")
conn, err := e.listener.Accept()
if err != nil {
log.Println("Failed accepting a connection request:", err)
continue
}
log.Println("Handle incoming messages.")
go e.handleMessages(conn)
}
}
// handleMessages reads the connection up to the first newline.
// Based on this string, it calls the appropriate HandleFunc.
func (e *Endpoint) handleMessages(conn net.Conn) {
// Wrap the connection into a buffered reader for easier reading.
rw := bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn))
defer conn.Close()
// Read from the connection until EOF. Expect a command name as the
// next input. Call the handler that is registered for this command.
for {
log.Print("Receive command '")
cmd, err := rw.ReadString('\n')
switch {
case err == io.EOF:
log.Println("Reached EOF - close this connection.\n ---")
return
case err != nil:
log.Println("\nError reading command. Got: '"+cmd+"'\n", err)
return
}
// Trim the request string - ReadString does not strip any newlines.
cmd = strings.Trim(cmd, "\n ")
log.Println(cmd + "'")
// Fetch the appropriate handler function from the 'handler' map and call it.
e.mutex.RLock()
handleCommand, ok := e.handler[cmd]
e.mutex.RUnlock()
if !ok {
log.Println("Command '" + cmd + "' is not registered.")
return
}
handleCommand(rw)
}
}
/* Now let's create two handler functions. The easiest case is where our
ad-hoc protocol only sends string data.
The second handler receives and processes a struct that was sent as GOB data.
*/
// handleStrings handles the "STRING" request.
func handleStrings(rw *bufio.ReadWriter) {
// Receive a string.
log.Print("Receive STRING message:")
s, err := rw.ReadString('\n')
if err != nil {
log.Println("Cannot read from connection.\n", err)
}
s = strings.Trim(s, "\n ")
log.Println(s)
_, err = rw.WriteString("Thank you.\n")
if err != nil {
log.Println("Cannot write to connection.\n", err)
}
err = rw.Flush()
if err != nil {
log.Println("Flush failed.", err)
}
}
// handleGob handles the "GOB" request. It decodes the received GOB data
// into a struct.
func handleGob(rw *bufio.ReadWriter) {
log.Print("Receive GOB data:")
var data complexData
// Create a decoder that decodes directly into a struct variable.
dec := gob.NewDecoder(rw)
err := dec.Decode(&data)
if err != nil {
log.Println("Error decoding GOB data:", err)
return
}
// Print the complexData struct and the nested one, too, to prove
// that both travelled across the wire.
log.Printf("Outer complexData struct: \n%#v\n", data)
log.Printf("Inner complexData struct: \n%#v\n", data.C)
}
/*
## The client and server functions
With all this in place, we can now set up client and server functions.
The client function connects to the server and sends STRING and GOB requests.
The server starts listening for requests and triggers the appropriate handlers.
*/
// client is called if the app is called with -connect=`ip addr`.
func client(ip string) error {
// Some test data. Note how GOB even handles maps, slices, and
// recursive data structures without problems.
testStruct := complexData{
N: 23,
S: "string data",
M: map[string]int{"one": 1, "two": 2, "three": 3},
P: []byte("abc"),
C: &complexData{
N: 256,
S: "Recursive structs? Piece of cake!",
M: map[string]int{"01": 1, "10": 2, "11": 3},
},
}
// Open a connection to the server.
rw, err := Open(ip + Port)
if err != nil {
return errors.Wrap(err, "Client: Failed to open connection to "+ip+Port)
}
// Send a STRING request.
// Send the request name.
// Send the data.
log.Println("Send the string request.")
n, err := rw.WriteString("STRING\n")
if err != nil {
return errors.Wrap(err, "Could not send the STRING request ("+strconv.Itoa(n)+" bytes written)")
}
n, err = rw.WriteString("Additional data.\n")
if err != nil {
return errors.Wrap(err, "Could not send additional STRING data ("+strconv.Itoa(n)+" bytes written)")
}
log.Println("Flush the buffer.")
err = rw.Flush()
if err != nil {
return errors.Wrap(err, "Flush failed.")
}
// Read the reply.
log.Println("Read the reply.")
response, err := rw.ReadString('\n')
if err != nil {
return errors.Wrap(err, "Client: Failed to read the reply: '"+response+"'")
}
log.Println("STRING request: got a response:", response)
// Send a GOB request.
// Create an encoder that directly transmits to `rw`.
// Send the request name.
// Send the GOB.
log.Println("Send a struct as GOB:")
log.Printf("Outer complexData struct: \n%#v\n", testStruct)
log.Printf("Inner complexData struct: \n%#v\n", testStruct.C)
enc := gob.NewEncoder(rw)
n, err = rw.WriteString("GOB\n")
if err != nil {
return errors.Wrap(err, "Could not write GOB data ("+strconv.Itoa(n)+" bytes written)")
}
err = enc.Encode(testStruct)
if err != nil {
return errors.Wrapf(err, "Encode failed for struct: %#v", testStruct)
}
err = rw.Flush()
if err != nil {
return errors.Wrap(err, "Flush failed.")
}
return nil
}
// server listens for incoming requests and dispatches them to
// registered handler functions.
func server() error {
endpoint := NewEndpoint()
// Add the handle funcs.
endpoint.AddHandleFunc("STRING", handleStrings)
endpoint.AddHandleFunc("GOB", handleGob)
// Start listening.
return endpoint.Listen()
}
/*
## Main
Main starts either a client or a server, depending on whether the `connect`
flag is set. Without the flag, the process starts as a server, listening
for incoming requests. With the flag the process starts as a client and connects
to the host specified by the flag value.
Try "localhost" or "127.0.0.1" when running both processes on the same machine.
*/
// main
func main() | {
connect := flag.String("connect", "", "IP address of process to join. If empty, go into listen mode.")
flag.Parse()
// If the connect flag is set, go into client mode.
if *connect != "" {
err := client(*connect)
if err != nil {
log.Println("Error:", errors.WithStack(err))
}
log.Println("Client done.")
return
}
// Else go into server mode.
err := server()
if err != nil {
log.Println("Error:", errors.WithStack(err))
}
| identifier_body |
|
networking.go | it uses the port associated with that service.
For example, Web servers usually listen on port 80 for HTTP requests and
on port 443 for HTTPS requests. SSH daemons listen on port 22 by default,
and a WHOIS server uses port 43.
The core parts of the `net` package for implementing the server side are:
`net.Listen()` creates a new listener on a given local network address. If
only a port is passed, as in ":61000", then the listener listens on
all available network interfaces. This is quite handy, as a computer usually
has at least two active interfaces, the loopback interface and at least one
real network card.
A listener's `Accept()` method waits until a connection request comes in.
Then it accepts the request and returns the new connection to the caller.
`Accept()` is typically called within a loop to be able to serve multiple
connections simultaneously. Each connection can be handled by a goroutine,
as we will see in the code.
## The code
Instead of just pushing a few bytes around, I wanted the code to demonstrate
something more useful. I want to be able to send different commands with
a different data payload to the server. The server shall identify each
command and decode the command's data.
So the client in the code below sends two test commands: "STRING" and "GOB".
Each are terminated by a newline.
The STRING command includes one line of string
data, which can be handled by simple read and write methods from `bufio`.
The GOB command comes with a `struct` that contains a couple of fields,
including a slice, a map, and a even a pointer to itself. As you can see when
running the code, the `gob` package moves all this through our network
connection without any fuss.
What we basically have here is some sort of ad-hoc protocol, where the client
and the server agree that a command is a string followed by a newline followed
by some data. For each command, the server must know the exact data format
and how to process the data.
To achieve this, the server code takes a two-step approach.
Step 1: When the `Listen()` function accepts a new connection, it spawns
a new goroutine that calls function `handleMessage()`. This function reads
the command name from the connection, looks up the appropriate handler
function from a map, and calls this function.
Step 2: The selected handler function reads and processes the command's data.
Here is a visual summary of this process.
HYPE[Server Command Dispatch](tcpserver.html)
Keep these pictures in mind, they help reading the actual code.
## The Code
*/
// ## Imports and globals
package main
import (
"bufio"
"io"
"log"
"net"
"strconv"
"strings"
"sync"
"github.com/pkg/errors"
"encoding/gob"
"flag"
)
// A struct with a mix of fields, used for the GOB example.
type complexData struct {
N int
S string
M map[string]int
P []byte
C *complexData
}
const (
// Port is the port number that the server listens to.
Port = ":61000"
)
/*
## Outgoing connections
Using an outgoing connection is a snap. A `net.Conn` satisfies the io.Reader
and `io.Writer` interfaces, so we can treat a TCP connection just like any other
`Reader` or `Writer`.
*/
// Open connects to a TCP Address.
// It returns a TCP connection armed with a timeout and wrapped into a
// buffered ReadWriter.
func Open(addr string) (*bufio.ReadWriter, error) {
// Dial the remote process.
// Note that the local port is chosen on the fly. If the local port
// must be a specific one, use DialTCP() instead.
log.Println("Dial " + addr)
conn, err := net.Dial("tcp", addr)
if err != nil {
return nil, errors.Wrap(err, "Dialing "+addr+" failed")
}
return bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn)), nil
}
/*
## Incoming connections
Preparing for incoming data is a bit more involved. According to our ad-hoc
protocol, we receive the name of a command terminated by `\n`, followed by data.
The nature of the data depends on the respective command. To handle this, we
create an `Endpoint` object with the following properties:
* It allows to register one or more handler functions, where each can handle a
particular command.
* It dispatches incoming commands to the associated handler based on the commands
name.
*/
// HandleFunc is a function that handles an incoming command.
// It receives the open connection wrapped in a `ReadWriter` interface.
type HandleFunc func(*bufio.ReadWriter)
// Endpoint provides an endpoint to other processess
// that they can send data to.
type Endpoint struct {
listener net.Listener
handler map[string]HandleFunc
// Maps are not threadsafe, so we need a mutex to control access.
mutex sync.RWMutex
}
// NewEndpoint creates a new endpoint. To keep things simple,
// the endpoint listens on a fixed port number.
func NewEndpoint() *Endpoint {
// Create a new Endpoint with an empty list of handler funcs.
return &Endpoint{
handler: map[string]HandleFunc{},
}
}
// AddHandleFunc adds a new function for handling incoming data.
func (e *Endpoint) AddHandleFunc(name string, f HandleFunc) {
e.mutex.Lock()
e.handler[name] = f
e.mutex.Unlock()
}
// Listen starts listening on the endpoint port on all interfaces.
// At least one handler function must have been added
// through AddHandleFunc() before.
func (e *Endpoint) Listen() error {
var err error
e.listener, err = net.Listen("tcp", Port)
if err != nil {
return errors.Wrapf(err, "Unable to listen on port %s\n", Port)
}
log.Println("Listen on", e.listener.Addr().String())
for {
log.Println("Accept a connection request.")
conn, err := e.listener.Accept()
if err != nil {
log.Println("Failed accepting a connection request:", err)
continue
}
log.Println("Handle incoming messages.")
go e.handleMessages(conn)
}
}
// handleMessages reads the connection up to the first newline.
// Based on this string, it calls the appropriate HandleFunc.
func (e *Endpoint) handleMessages(conn net.Conn) {
// Wrap the connection into a buffered reader for easier reading.
rw := bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn))
defer conn.Close()
// Read from the connection until EOF. Expect a command name as the
// next input. Call the handler that is registered for this command.
for {
log.Print("Receive command '")
cmd, err := rw.ReadString('\n')
switch {
case err == io.EOF:
log.Println("Reached EOF - close this connection.\n ---")
return
case err != nil:
log.Println("\nError reading command. Got: '"+cmd+"'\n", err)
return
}
// Trim the request string - ReadString does not strip any newlines.
cmd = strings.Trim(cmd, "\n ")
log.Println(cmd + "'")
// Fetch the appropriate handler function from the 'handler' map and call it.
e.mutex.RLock()
handleCommand, ok := e.handler[cmd]
e.mutex.RUnlock()
if !ok {
log.Println("Command '" + cmd + "' is not registered.")
return
}
handleCommand(rw)
}
}
/* Now let's create two handler functions. The easiest case is where our
ad-hoc protocol only sends string data.
The second handler receives and processes a struct that was sent as GOB data.
*/
// handleStrings handles the "STRING" request.
func handleStrings(rw *bufio.ReadWriter) {
// Receive a string.
log.Print("Receive STRING message:")
s, err := rw.ReadString('\n')
if err != nil {
log.Println("Cannot read from connection.\n", err)
}
s = strings.Trim(s, "\n ")
log.Println(s)
_, err = rw.WriteString("Thank you.\n")
if err != nil {
log.Println("Cannot write to connection.\n", err)
}
err = rw.Flush()
if err != nil {
log.Println("Flush failed.", err)
}
}
// handleGob handles the "GOB" request. It decodes the received GOB data
// into a struct.
func handleGob(rw *bufio.ReadWriter) {
log.Print("Receive GOB data:")
var data complexData
// Create a decoder that decodes directly into a struct variable.
dec := gob.NewDecoder(rw)
err := dec.Decode(&data)
if err != nil {
log.Println("Error decoding GOB data:", err)
return
}
// Print the complexData struct and the nested one, too, to prove
// that both travelled across the wire.
log.Printf("Outer complexData struct: \n%#v\n", data)
log.Printf("Inner complexData struct: \n%#v\n", data.C)
}
/*
## The client and server functions
With all this in place, we can now set up client and server functions.
The client function connects to the server and sends STRING and GOB requests.
The server starts listening for requests and triggers the appropriate handlers.
*/
// client is called if the app is called with -connect=`ip addr`.
func | client | identifier_name |
|
model_seq.py | y in y_pred:
# y_fixed = decode_labels(y)
# temp_pred.append(y_fixed)
# y_pred = temp_pred
# Convert crf output to xml tags
test_dict = {}
for x in range(len(test_ids)):
rec_id = test_ids[x]
rec_seq = zip((item[0] for item in test_seqs[x]), y_pred_labels[x])
# Concatenate all the sequences for each record
if rec_id not in test_dict:
test_dict[rec_id] = []
test_dict[rec_id] = test_dict[rec_id] + rec_seq # TODO: add line breaks???
xml_tree = xmltoseq.seq_to_xml(test_dict, testfile)
# write the xml to file
if len(outfile) > 0:
print "Writing test output to xml file..."
xml_tree.write(outfile)
subprocess.call(["sed", "-i", "-e", 's/</</g', outfile])
subprocess.call(["sed", "-i", "-e", 's/>/>/g', outfile])
def train_crf(trainx, trainy):
print "training CRF..."
crf = CRF(
algorithm='lbfgs',
c1=0.1,
c2=0.1,
max_iterations=100,
all_possible_transitions=True
)
crf.fit(trainx, trainy)
return crf
def train_seq2seq(trainx, trainy, num_nodes=100, vec_labels=False, loss_function="cosine_proximity", num_epochs=10):
trainx = numpy.array(trainx)
print "trainx shape: " + str(trainx.shape)
trainy = numpy.array(trainy)
print "trainy shape: " + str(trainy.shape)
input_dim = trainx.shape[-1]
output_dim = trainy.shape[-1]
input_seq_len = trainx.shape[1]
output_seq_len = trainy.shape[1]
# Create decoder target data
trainy_target = []
zero_lab = data_util.zero_vec(output_dim)
if not vec_labels:
zero_lab = encode_labels([['O']])[0][0]
print "zero_lab shape: " + str(numpy.asarray(zero_lab))
for i in range(trainy.shape[0]):
row = trainy[i].tolist()
new_row = row[1:]
new_row.append(zero_lab)
trainy_target.append(new_row)
trainy_target = numpy.asarray(trainy_target)
print "trainy_target shape: " + str(trainy_target.shape)
# Set up the encoder
latent_dim = num_nodes
dropout = 0.1
encoder_inputs = Input(shape=(None, input_dim)) #seq_len
encoder = LSTM(latent_dim, return_state=True)
# Encoder-Decoder model
encoder_outputs, state_h, state_c = encoder(encoder_inputs)
encoder_states = [state_h, state_c]
# Set up the decoder, using `encoder_states` as initial state.
decoder_inputs = Input(shape=(None, output_dim))
decoder_rnn = LSTM(latent_dim, return_sequences=True, return_state=True)
decoder_outputs, d_state_h, d_state_c = decoder_rnn(decoder_inputs, initial_state=encoder_states)
decoder_dense = Dense(output_dim, activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)
# Define the model that will turn
# `encoder_input_data` & `decoder_input_data` into `decoder_target_data`
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
model.compile(optimizer='rmsprop', loss=loss_function)
model.fit([trainx, trainy], trainy_target, epochs=num_epochs)
# Normal RNN
#rnn_out = GRU(latent_dim, return_sequences=False)(encoder_inputs)
#dropout_out = Dropout(dropout)(rnn_out)
#prediction = Dense(output_dim, activation='softmax')(dropout_out)
#model = Model(inputs=encoder_inputs, outputs=prediction)
#model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
#model.fit(trainx, trainy, nb_epoch=20)
model.summary()
model.save('seq2seq.model')
# Create models for inference
encoder_model = Model(encoder_inputs, encoder_states)
decoder_state_input_h = Input(shape=(latent_dim,))
decoder_state_input_c = Input(shape=(latent_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_outputs, state_h, state_c = decoder_rnn(decoder_inputs, initial_state=decoder_states_inputs)
decoder_states = [state_h, state_c]
decoder_outputs = decoder_dense(decoder_outputs)
decoder_model = Model([decoder_inputs] + decoder_states_inputs, [decoder_outputs] + decoder_states)
return model, encoder_model, decoder_model, output_dim
def decode_sequence(encoder_model, decoder_model, input_seq, output_seq_len, output_dim, vec_labels=False):
# Encode the input as state vectors.
states_value = encoder_model.predict(input_seq, batch_size=1)
# Generate empty target sequence of length 1.
#output_dim = 5
#print "output_dim: " + str(output_dim)
target_seq = numpy.zeros((1, 1, int(output_dim)))
# Populate the first character of target sequence with the start character.
zero_lab = data_util.zero_vec(output_dim)
if vec_labels:
target_seq[0, 0] = zero_lab
else:
zero_lab = encode_labels([['O']])[0][0]
index = zero_lab.index(1)
target_seq[0, 0, index] = 1
# Sampling loop for a batch of sequences
# (to simplify, here we assume a batch of size 1).
stop_condition = False
decoded_sentence = []
while not stop_condition:
output_tokens, h, c = decoder_model.predict([target_seq] + states_value)
# Sample a token
#sampled_token_index = np.argmax(output_tokens[0, -1, :])
#sampled_lab = reverse_target_char_index[sampled_token_index]
#print "output_tokens shape: " + str(output_tokens.shape)
token = output_tokens[0, -1]
#print "token: " + str(token)
encoded_label = numpy.zeros((output_dim,), dtype=numpy.int).tolist()
if vec_labels:
decoded_sentence.append(encoded_label)
else:
ind = numpy.argmax(token)
encoded_label[ind] = 1
#print "encoded_label: " + str(encoded_label)
sampled_lab = decode_labels([encoded_label])[0]
print "sampled_lab: " + str(sampled_lab)
decoded_sentence.append(sampled_lab)
# Exit condition: either hit max length or find stop character.
if (len(decoded_sentence) > output_seq_len):
stop_condition = True
# Update the target sequence (of length 1).
target_seq = numpy.zeros((1, 1, output_dim))
for x in range(output_dim):
target_seq[0, 0, x] = token[x]
# Update states
states_value = [h, c]
return decoded_sentence
''' Predict sequences for test input
encoder_model: the encoder model
decoder_model: the decoder model
testx: the test input: [num_samples, max_seq_len, output_dim)
'''
def predict_seqs(encoder_model, decoder_model, testx, output_seq_len, output_dim, vec_labels=False):
testy_pred = []
print "output_seq_len: " + str(output_seq_len)
print "output_dim: " + str(output_dim)
print "vec_labels: " + str(vec_labels)
for test_seq in testx:
input_seq = []
input_seq.append(test_seq)
input_seq = numpy.array(input_seq)
#print "input_seq shape: " + str(input_seq.shape)
decoded_sentence = decode_sequence(encoder_model, decoder_model, input_seq, output_seq_len, output_dim, vec_labels)
#print('-')
#print('Input seq:', test_seq)
#print('Predicted:', decoded_sentence)
testy_pred.append(decoded_sentence)
return testy_pred
def score(testy, y_pred, model, modelname):
# Ignore O tags for evaluation
if modelname == "crf":
labels = list(model.classes_)
elif modelname == "nn" or modelname == 'gru':
labels = list(label_set)
labels.remove('O')
f1_score = metrics.flat_f1_score(testy, y_pred, average='weighted', labels=labels)
print "F1: " + str(f1_score)
sorted_labels = sorted(labels, key=lambda name: (name[1:], name[0]))
print(metrics.flat_classification_report(testy, y_pred, labels=sorted_labels, digits=3))
return f1_score
def split_labels(y):
t_labels = ['BT', 'IT']
e_labels = ['BE', 'IE']
y_time = []
y_event = []
for y_seq in y:
time_seq = []
event_seq = []
for lab in y_seq:
#print "lab: " + lab
if lab in t_labels:
| time_seq.append(lab)
event_seq.append('O') | conditional_block |
|
model_seq.py | y_time)
#y_pred_time = predict_seqs(time_encoder_model, time_decoder_model, testx)
#testy_labels_time = []
#for seq in testy_time:
# testy_labels_time.append(decode_labels(seq))
#testy_time = testy_labels_time
#event_model, event_encoder_model, event_decoder_model = train_seq2seq(trainx, trainy_event)
#y_pred_event = predict_seqs(event_encoder_model, event_decoder_model, testx)
#testy_labels_event = []
#for seq in testy_event:
# testy_labels_event.append(decode_labels(seq))
#testy_event = testy_labels_event
# Print metrics
#print "testy: " + str(testy[0])
#print "y_pred: " + str(y_pred[0])
#print "labels: " + str(labels[0])
f1_score = score(testy_labels, y_pred_labels, model, modelname)
#f1_score_time = score(testy_time, y_pred_time, time_model, modelname)
#f1_score_event = score(testy_event, y_pred_event, event_model, modelname)
# Convert the labels back to text
#if modelname == "nn":
# temp_pred = []
# for y in y_pred:
# y_fixed = decode_labels(y)
# temp_pred.append(y_fixed)
# y_pred = temp_pred
# Convert crf output to xml tags
test_dict = {}
for x in range(len(test_ids)):
rec_id = test_ids[x]
rec_seq = zip((item[0] for item in test_seqs[x]), y_pred_labels[x])
# Concatenate all the sequences for each record
if rec_id not in test_dict:
test_dict[rec_id] = []
test_dict[rec_id] = test_dict[rec_id] + rec_seq # TODO: add line breaks???
xml_tree = xmltoseq.seq_to_xml(test_dict, testfile)
# write the xml to file
if len(outfile) > 0:
print "Writing test output to xml file..."
xml_tree.write(outfile)
subprocess.call(["sed", "-i", "-e", 's/</</g', outfile])
subprocess.call(["sed", "-i", "-e", 's/>/>/g', outfile])
def train_crf(trainx, trainy):
print "training CRF..."
crf = CRF(
algorithm='lbfgs',
c1=0.1,
c2=0.1,
max_iterations=100,
all_possible_transitions=True
)
crf.fit(trainx, trainy)
return crf
def train_seq2seq(trainx, trainy, num_nodes=100, vec_labels=False, loss_function="cosine_proximity", num_epochs=10):
trainx = numpy.array(trainx)
print "trainx shape: " + str(trainx.shape)
trainy = numpy.array(trainy)
print "trainy shape: " + str(trainy.shape)
input_dim = trainx.shape[-1]
output_dim = trainy.shape[-1]
input_seq_len = trainx.shape[1]
output_seq_len = trainy.shape[1]
# Create decoder target data
trainy_target = []
zero_lab = data_util.zero_vec(output_dim)
if not vec_labels:
zero_lab = encode_labels([['O']])[0][0]
print "zero_lab shape: " + str(numpy.asarray(zero_lab))
for i in range(trainy.shape[0]):
row = trainy[i].tolist()
new_row = row[1:]
new_row.append(zero_lab)
trainy_target.append(new_row)
trainy_target = numpy.asarray(trainy_target)
print "trainy_target shape: " + str(trainy_target.shape)
# Set up the encoder
latent_dim = num_nodes
dropout = 0.1
encoder_inputs = Input(shape=(None, input_dim)) #seq_len
encoder = LSTM(latent_dim, return_state=True)
# Encoder-Decoder model
encoder_outputs, state_h, state_c = encoder(encoder_inputs)
encoder_states = [state_h, state_c]
# Set up the decoder, using `encoder_states` as initial state.
decoder_inputs = Input(shape=(None, output_dim))
decoder_rnn = LSTM(latent_dim, return_sequences=True, return_state=True)
decoder_outputs, d_state_h, d_state_c = decoder_rnn(decoder_inputs, initial_state=encoder_states)
decoder_dense = Dense(output_dim, activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)
# Define the model that will turn
# `encoder_input_data` & `decoder_input_data` into `decoder_target_data`
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
model.compile(optimizer='rmsprop', loss=loss_function)
model.fit([trainx, trainy], trainy_target, epochs=num_epochs)
# Normal RNN
#rnn_out = GRU(latent_dim, return_sequences=False)(encoder_inputs)
#dropout_out = Dropout(dropout)(rnn_out)
#prediction = Dense(output_dim, activation='softmax')(dropout_out)
#model = Model(inputs=encoder_inputs, outputs=prediction)
#model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
#model.fit(trainx, trainy, nb_epoch=20)
model.summary()
model.save('seq2seq.model')
# Create models for inference
encoder_model = Model(encoder_inputs, encoder_states)
decoder_state_input_h = Input(shape=(latent_dim,))
decoder_state_input_c = Input(shape=(latent_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_outputs, state_h, state_c = decoder_rnn(decoder_inputs, initial_state=decoder_states_inputs)
decoder_states = [state_h, state_c]
decoder_outputs = decoder_dense(decoder_outputs)
decoder_model = Model([decoder_inputs] + decoder_states_inputs, [decoder_outputs] + decoder_states)
return model, encoder_model, decoder_model, output_dim
def decode_sequence(encoder_model, decoder_model, input_seq, output_seq_len, output_dim, vec_labels=False):
# Encode the input as state vectors.
states_value = encoder_model.predict(input_seq, batch_size=1)
# Generate empty target sequence of length 1.
#output_dim = 5
#print "output_dim: " + str(output_dim)
target_seq = numpy.zeros((1, 1, int(output_dim)))
# Populate the first character of target sequence with the start character.
zero_lab = data_util.zero_vec(output_dim)
if vec_labels:
target_seq[0, 0] = zero_lab
else:
zero_lab = encode_labels([['O']])[0][0]
index = zero_lab.index(1)
target_seq[0, 0, index] = 1
# Sampling loop for a batch of sequences
# (to simplify, here we assume a batch of size 1).
stop_condition = False
decoded_sentence = []
while not stop_condition:
output_tokens, h, c = decoder_model.predict([target_seq] + states_value)
# Sample a token
#sampled_token_index = np.argmax(output_tokens[0, -1, :])
#sampled_lab = reverse_target_char_index[sampled_token_index]
#print "output_tokens shape: " + str(output_tokens.shape)
token = output_tokens[0, -1]
#print "token: " + str(token)
encoded_label = numpy.zeros((output_dim,), dtype=numpy.int).tolist()
if vec_labels:
decoded_sentence.append(encoded_label)
else:
ind = numpy.argmax(token)
encoded_label[ind] = 1
#print "encoded_label: " + str(encoded_label)
sampled_lab = decode_labels([encoded_label])[0]
print "sampled_lab: " + str(sampled_lab)
decoded_sentence.append(sampled_lab)
# Exit condition: either hit max length or find stop character.
if (len(decoded_sentence) > output_seq_len):
stop_condition = True
# Update the target sequence (of length 1).
target_seq = numpy.zeros((1, 1, output_dim))
for x in range(output_dim):
target_seq[0, 0, x] = token[x]
# Update states
states_value = [h, c]
return decoded_sentence
''' Predict sequences for test input
encoder_model: the encoder model
decoder_model: the decoder model
testx: the test input: [num_samples, max_seq_len, output_dim)
'''
def | (encoder_model, decoder_model, testx, output_seq_len, output_dim, vec_labels=False):
testy_pred = []
print "output_seq_len: " + str(output_seq_len)
print "output_dim: " + str(output_dim)
print "vec_labels: " + str(vec_labels)
for test_seq in testx:
input_seq = []
input_seq.append(test_seq)
input_seq = numpy.array(input_seq)
#print "input_seq shape: " + str(input_seq.shape)
decoded_sentence = decode_sequence(encoder_model, decoder_model, input_seq, output | predict_seqs | identifier_name |
model_seq.py | label_set
label_set = set([])
for s in seqs:
s_feats = []
s_labels = []
for pair in s:
word = pair[0]
vector = word2vec.get(word, vec_model)
s_feats.append(vector)
s_labels.append(pair[1])
label_set.add(pair[1])
feats.append(s_feats)
labels.append(s_labels)
if train:
num_labels = len(list(label_set))
create_labelencoder(list(label_set), num_labels)
global max_seq_len
#max_seq_len = max([len(txt) for txt in feats])
print "max_seq_len: " + str(max_seq_len)
# Pad sequences
#feats = pad_sequences(numpy.array(feats), maxlen=max_seq_len, dtype='float32', padding="pre")
#labels = pad_sequences(numpy.array(labels), maxlen=max_seq_len, dtype='str', padding="pre", value='O')
padded_feats = []
padded_labels = []
for feat in feats:
#print "seq len: " + str(len(feat))
while len(feat) > max_seq_len:
feat_part = feat[0:max_seq_len]
padded_feats.append(pad_feat(feat_part, max_seq_len, zero_vec))
feat = feat[max_seq_len:]
new_feat = pad_feat(feat, max_seq_len, zero_vec)
padded_feats.append(new_feat)
for labs in labels:
while len(labs) > max_seq_len:
labs_part = labs[0:max_seq_len]
padded_labels.append(pad_feat(labs_part, max_seq_len, 'O'))
labs = labs[max_seq_len:]
padded_labels.append(pad_feat(labs, max_seq_len, 'O'))
feats = padded_feats
labels = padded_labels
# Encode labels
encoded_labels = encode_labels(labels, max_len=max_seq_len)
print "labels[0]: " + str(encoded_labels[0])
#for row in labels:
# encoded_row = encode_labels(row)
# encoded_labels.append(encoded_row)
print "feats: " + str(len(feats)) + " labels: " + str(len(encoded_labels))
return feats, encoded_labels
def pad_feat(feat, max_seq_len, pad_item):
pad_size = max_seq_len - len(feat)
assert(pad_size >= 0)
new_feat = []
#new_feat.append(pad_item) # Start symbol for encoder-decoder
for w in feat:
new_feat.append(w)
for k in range(pad_size):
new_feat.append(pad_item)
return new_feat
def get_seqs(filename, split_sents=False, inline=True):
print "get_seqs " + filename
ids = []
narrs = []
anns = []
seqs = []
seq_ids = []
# Get the xml from file
tree = etree.parse(filename)
root = tree.getroot()
for child in root:
narr = ""
rec_id = child.find(id_name).text
ids.append(rec_id)
# Get the narrative text
node = child.find("narr_timeml_simple")
if inline:
if node == None:
narr_node = child.find("narrative")
if narr_node == None:
print "no narrative: " + data_util.stringify_children(child)
else:
narr = narr_node.text
#print "narr: " + narr
narrs.append(narr)
else:
rec_id = child.find(id_name).text
#print "rec_id: " + rec_id
narr = data_util.stringify_children(node).encode('utf-8')
#print "narr: " + narr
ids.append(rec_id)
narrs.append(narr)
else: # NOT inline
anns.append(data_util.stringify_children(node).encode('utf8'))
narr_node = child.find("narrative")
narrs.append(narr_node.text)
if inline:
for x in range(len(narrs)):
narr = narrs[x]
rec_id = ids[x]
if split_sents:
sents = narr.split('.')
for sent in sents:
sent_seq = xmltoseq.xml_to_seq(sent.strip())
seqs.append(sent_seq)
seq_ids.append(rec_id)
else:
narr_seq = xmltoseq.xml_to_seq(narr)
seqs.append(narr_seq)
seq_ids.append(rec_id)
else:
for x in range(len(narrs)):
narr = narrs[x]
ann = anns[x]
rec_id = ids[x]
print "split_sents: " + str(split_sents)
ann_seqs = xmltoseq.ann_to_seq(narr, ann, split_sents)
print "seqs: " + str(len(ann_seqs))
for s in ann_seqs:
seqs.append(s)
seq_ids.append(rec_id)
return seq_ids, seqs
def create_labelencoder(data, num=0):
global labelencoder, onehotencoder, num_labels
print "create_labelencoder: data[0]: " + str(data[0])
labelencoder = LabelEncoder()
labelencoder.fit(data)
num_labels = len(labelencoder.classes_)
#onehotencoder = OneHotEncoder()
#onehotencoder.fit(data2)
return labelencoder
''' Encodes labels as one-hot vectors (entire dataset: 2D array)
data: a 1D array of labels
num_labels: the number of label classes
'''
def encode_labels(data, labenc=None, max_len=50):
if labenc == None:
labenc = labelencoder
if labenc == None: # or onehotencoder == None:
print "Error: labelencoder must be trained before it can be used!"
return None
#return onehotencoder.transform(labelencoder.transform(data))
data2 = []
num_labels = len(labenc.classes_)
zero_vec = data_util.zero_vec(num_labels)
print "data: " + str(len(data))
for item in data:
#print "item len: " + str(len(item))
new_item = []
if len(item) > 0:
item2 = labenc.transform(item)
for lab in item2:
onehot = []
for x in range(num_labels):
onehot.append(0)
onehot[lab] = 1
new_item.append(onehot)
# Pad vectors
if len(new_item) > max_len:
new_item = new_item[0:max_len]
while len(new_item) < max_len:
new_item.append(zero_vec)
data2.append(new_item)
#else:
# data2.append([])
return data2
''' Decodes one sequence of labels
'''
def decode_labels(data, labenc=None):
#print "decode_labels"
if labenc is None:
labenc = labelencoder
data2 = []
for row in data:
#print "- row: " + str(row)
lab = numpy.argmax(numpy.asarray(row))
#print "- lab: " + str(lab)
data2.append(lab)
#print "- data2: " + str(data2)
return labenc.inverse_transform(data2)
#return labelencoder.inverse_transform(onehotencoder.reverse_transform(data))
def decode_all_labels(data, labenc=None):
decoded_labels = []
for sequence in data:
labs = decode_labels(sequence, labenc)
decoded_labels.append(labs)
return decoded_labels
def word2features(sent, i):
word = sent[i][0]
#postag = sent[i][1]
features = {
'bias': 1.0,
'word.lower()': word.lower(),
'word[-3:]': word[-3:],
'word[-2:]': word[-2:],
'word.isupper()': word.isupper(),
'word.istitle()': word.istitle(),
'word.isdigit()': word.isdigit(),
#'postag': postag,
#'postag[:2]': postag[:2],
}
if i > 0:
word1 = sent[i-1][0]
postag1 = sent[i-1][1]
features.update({
'-1:word.lower()': word1.lower(),
'-1:word.istitle()': word1.istitle(),
'-1:word.isupper()': word1.isupper(),
#'-1:postag': postag1,
#'-1:postag[:2]': postag1[:2],
})
else:
features['BOS'] = True
if i < len(sent)-1:
word1 = sent[i+1][0]
postag1 = sent[i+1][1]
features.update({
'+1:word.lower()': word1.lower(),
'+1:word.istitle()': word1.istitle(),
'+1:word.isupper()': word1.isupper(),
#'+1:postag': postag1,
#'+1:postag[:2]': postag1[:2],
})
else:
features['EOS'] = True
return features
def sent2features(sent):
return [word2features(sent, i) for i in range(len(sent))]
def sent2labels(sent):
| return [label for token, label in sent] | identifier_body |
|
model_seq.py | _score(testy, y_pred, average='weighted', labels=labels)
print "F1: " + str(f1_score)
sorted_labels = sorted(labels, key=lambda name: (name[1:], name[0]))
print(metrics.flat_classification_report(testy, y_pred, labels=sorted_labels, digits=3))
return f1_score
def split_labels(y):
t_labels = ['BT', 'IT']
e_labels = ['BE', 'IE']
y_time = []
y_event = []
for y_seq in y:
time_seq = []
event_seq = []
for lab in y_seq:
#print "lab: " + lab
if lab in t_labels:
time_seq.append(lab)
event_seq.append('O')
elif lab in e_labels:
time_seq.append('O')
event_seq.append(lab)
else:
time_seq.append(lab)
event_seq.append(lab)
y_time.append(time_seq)
y_event.append(event_seq)
return y_time, y_event
'''
Get word vectors for each word and encode labels
seqs: the sequence of pairs (word, label)
'''
def get_feats(seqs, train=False):
print "get_feats"
vec_model, dim = word2vec.load(vecfile)
zero_vec = data_util.zero_vec(dim)
feats = []
labels = []
global label_set
label_set = set([])
for s in seqs:
s_feats = []
s_labels = []
for pair in s:
word = pair[0]
vector = word2vec.get(word, vec_model)
s_feats.append(vector)
s_labels.append(pair[1])
label_set.add(pair[1])
feats.append(s_feats)
labels.append(s_labels)
if train:
num_labels = len(list(label_set))
create_labelencoder(list(label_set), num_labels)
global max_seq_len
#max_seq_len = max([len(txt) for txt in feats])
print "max_seq_len: " + str(max_seq_len)
# Pad sequences
#feats = pad_sequences(numpy.array(feats), maxlen=max_seq_len, dtype='float32', padding="pre")
#labels = pad_sequences(numpy.array(labels), maxlen=max_seq_len, dtype='str', padding="pre", value='O')
padded_feats = []
padded_labels = []
for feat in feats:
#print "seq len: " + str(len(feat))
while len(feat) > max_seq_len:
feat_part = feat[0:max_seq_len]
padded_feats.append(pad_feat(feat_part, max_seq_len, zero_vec))
feat = feat[max_seq_len:]
new_feat = pad_feat(feat, max_seq_len, zero_vec)
padded_feats.append(new_feat)
for labs in labels:
while len(labs) > max_seq_len:
labs_part = labs[0:max_seq_len]
padded_labels.append(pad_feat(labs_part, max_seq_len, 'O'))
labs = labs[max_seq_len:]
padded_labels.append(pad_feat(labs, max_seq_len, 'O'))
feats = padded_feats
labels = padded_labels
# Encode labels
encoded_labels = encode_labels(labels, max_len=max_seq_len)
print "labels[0]: " + str(encoded_labels[0])
#for row in labels:
# encoded_row = encode_labels(row)
# encoded_labels.append(encoded_row)
print "feats: " + str(len(feats)) + " labels: " + str(len(encoded_labels))
return feats, encoded_labels
def pad_feat(feat, max_seq_len, pad_item):
pad_size = max_seq_len - len(feat)
assert(pad_size >= 0)
new_feat = []
#new_feat.append(pad_item) # Start symbol for encoder-decoder
for w in feat:
new_feat.append(w)
for k in range(pad_size):
new_feat.append(pad_item)
return new_feat
def get_seqs(filename, split_sents=False, inline=True):
print "get_seqs " + filename
ids = []
narrs = []
anns = []
seqs = []
seq_ids = []
# Get the xml from file
tree = etree.parse(filename)
root = tree.getroot()
for child in root:
narr = ""
rec_id = child.find(id_name).text
ids.append(rec_id)
# Get the narrative text
node = child.find("narr_timeml_simple")
if inline:
if node == None:
narr_node = child.find("narrative")
if narr_node == None:
print "no narrative: " + data_util.stringify_children(child)
else:
narr = narr_node.text
#print "narr: " + narr
narrs.append(narr)
else:
rec_id = child.find(id_name).text
#print "rec_id: " + rec_id
narr = data_util.stringify_children(node).encode('utf-8')
#print "narr: " + narr
ids.append(rec_id)
narrs.append(narr)
else: # NOT inline
anns.append(data_util.stringify_children(node).encode('utf8'))
narr_node = child.find("narrative")
narrs.append(narr_node.text)
if inline:
for x in range(len(narrs)):
narr = narrs[x]
rec_id = ids[x]
if split_sents:
sents = narr.split('.')
for sent in sents:
sent_seq = xmltoseq.xml_to_seq(sent.strip())
seqs.append(sent_seq)
seq_ids.append(rec_id)
else:
narr_seq = xmltoseq.xml_to_seq(narr)
seqs.append(narr_seq)
seq_ids.append(rec_id)
else:
for x in range(len(narrs)):
narr = narrs[x]
ann = anns[x]
rec_id = ids[x]
print "split_sents: " + str(split_sents)
ann_seqs = xmltoseq.ann_to_seq(narr, ann, split_sents)
print "seqs: " + str(len(ann_seqs))
for s in ann_seqs:
seqs.append(s)
seq_ids.append(rec_id)
return seq_ids, seqs
def create_labelencoder(data, num=0):
global labelencoder, onehotencoder, num_labels
print "create_labelencoder: data[0]: " + str(data[0])
labelencoder = LabelEncoder()
labelencoder.fit(data)
num_labels = len(labelencoder.classes_)
#onehotencoder = OneHotEncoder()
#onehotencoder.fit(data2)
return labelencoder
''' Encodes labels as one-hot vectors (entire dataset: 2D array)
data: a 1D array of labels
num_labels: the number of label classes
'''
def encode_labels(data, labenc=None, max_len=50):
if labenc == None:
labenc = labelencoder
if labenc == None: # or onehotencoder == None:
print "Error: labelencoder must be trained before it can be used!"
return None
#return onehotencoder.transform(labelencoder.transform(data))
data2 = []
num_labels = len(labenc.classes_)
zero_vec = data_util.zero_vec(num_labels)
print "data: " + str(len(data))
for item in data:
#print "item len: " + str(len(item))
new_item = []
if len(item) > 0:
item2 = labenc.transform(item)
for lab in item2:
onehot = []
for x in range(num_labels):
onehot.append(0)
onehot[lab] = 1
new_item.append(onehot)
# Pad vectors
if len(new_item) > max_len:
new_item = new_item[0:max_len]
while len(new_item) < max_len:
new_item.append(zero_vec)
data2.append(new_item)
#else:
# data2.append([])
return data2
''' Decodes one sequence of labels
'''
def decode_labels(data, labenc=None):
#print "decode_labels"
if labenc is None:
labenc = labelencoder
data2 = []
for row in data:
#print "- row: " + str(row)
lab = numpy.argmax(numpy.asarray(row))
#print "- lab: " + str(lab)
data2.append(lab)
#print "- data2: " + str(data2)
return labenc.inverse_transform(data2)
#return labelencoder.inverse_transform(onehotencoder.reverse_transform(data))
def decode_all_labels(data, labenc=None):
decoded_labels = []
for sequence in data:
labs = decode_labels(sequence, labenc)
decoded_labels.append(labs)
return decoded_labels
def word2features(sent, i):
word = sent[i][0]
#postag = sent[i][1]
features = {
'bias': 1.0,
'word.lower()': word.lower(),
'word[-3:]': word[-3:],
'word[-2:]': word[-2:],
'word.isupper()': word.isupper(),
'word.istitle()': word.istitle(), | 'word.isdigit()': word.isdigit(),
#'postag': postag, | random_line_split |
|
ChoiceSet.js | *
* @param {Array<Object>} n-number of Objects as described in the
* constructor reference. There should be at least a name property
* and a value property.
*/
static of(...values)
{
let array =
values.length === 1 &&
values[0] instanceof Array &&
values[0] || values;
console.log(array);
return new ChoiceSet(...array);
}
/**
* Instantiates a ChoiceSet with numeric choices ranging from
* the supplied from value to the supplied to value. All values
* are returned with an equal weight by default.
*
* @param {Number} from a number indicating the start range
* @param {Number} to a number indicating the end range, inclusive
* @return {ChoiceSet} an instance of ChoiceSet
*/
static weightedRange(from, to, weights = null)
{
let set = new ChoiceSet();
for (let i = from; i < (to + 1); i++) {
set.choices.push({
name: i,
weight: 100
});
}
if (weights)
{
set.setWeights(weights);
}
else {
set.calcIntervals();
}
return set;
}
/**
* An easy way to instantiate a ChoiceSet of names and values. The
* function takes an even number of parameters with the first being
* the name and the second being the weight.
*
* @param {String} name the name of the choice
* @param {Number} weight the weight of the choice
* @return {ChoiceSet} an instance of ChoiceSet
*/
static weightedSet(...values)
{
let set = new ChoiceSet();
if (values.length % 2 != 0) {
throw new Error("WeightedChoice must be instantiated with pairs");
}
set.choices = [];
for (let i = 0; i < values.length; i+=2)
{
let name = values[i];
let weight = values[i + 1];
set.choices.push({name: name, weight:weight});
}
set.calcIntervals();
return set;
}
/**
* An easy way to instantiate a ChoiceSet of names and values. The
* function takes an even number of parameters with the first being
* the name and the second being the weight.
*
* @param {String} name the name of the choice
* @param {Number} weight the weight of the choice
* @param {Object} value the value of the choice
* @return {ChoiceSet} an instance of ChoiceSet
*/
static weightedValuedSet(...values)
{
let set = new ChoiceSet();
if (values.length % 3 != 0) {
throw new Error("weightedValuedSet must be instantiated with triplets");
}
set.choices = [];
for (let i = 0; i < values.length; i+=3)
{
let name = values[i];
let weight = values[i + 1];
let value = values[i + 2];
set.choices.push({name: name, weight:weight, value: value});
}
set.calcIntervals();
return set;
}
/**
* An easy way to instantiate a ChoiceSet of names, weights and values. The
* function takes parameters in multiples of three. The first parameter is
* the name of the choice and the second is the weight. The third object is a
* collection of keys and values that will be applied to the choice in
* question.
*
* @param {String} name the name of the choice
* @param {Number} weight the weight of the choice
* @param {Object} object to be merged with the resulting choice
* @return {ChoiceSet} an instance of ChoiceSet
*/
static weightedObjectSet(...values)
{
let set = new ChoiceSet();
if (values.length % 3 != 0) {
throw new Error("weightedObjectSet must be instantiated with triplets");
}
set.choices = [];
for (let i = 0; i < values.length; i+=3)
{
let name = values[i];
let weight = values[i + 1];
let object = values[i + 2];
set.choices.push({name: name, weight: weight, _obj: object});
}
set.calcIntervals();
return set;
}
/**
* Calculates the intervals of the weights of the choices in the set. It
* also determines the maximum total weight in the set.
*/
calcIntervals()
{
let intervals = [];
this.choices.reduce(
function(p, c) {
intervals.push(
((p && p.weight) || 0) +
((c && c.weight) || 0)
);
},
null
);
intervals = intervals.map(function(cur, idx, array) {
return cur + array.slice(0,idx).reduce((p, c) => p + c, 0);
});
this.intervals = intervals;
this.maxInterval = intervals[intervals.length - 1];
}
/**
* Allows easy adjustment of a weight for a given index. The weight is
* modified and then calcIntervals() is called to realign things for
* the next choosing.
*
* NOTE see if this is the optimal setting for adjusting the weights
*
* @param {Number} index the index of the choice to modify
* @param {Number} weight the weight of the choice in general
*/
setWeightFor(index, weight)
{
if (this.choices[index]) {
this.choices[index].weight = weight || 100;
this.calcIntervals();
}
}
/**
* This allows weights to be set in bulk. The code will attempt
* to apply a weight for a given choice at equivalent indices.
*
* @param {Array} arrayOfWeights an array of Numberered weights.
*/
setWeights(arrayOfWeights)
{
if (this.choices.length !== arrayOfWeights.length)
{
console.warn('Array length mismatch; applying what is present');
}
for (let i = 0; i < this.choices.length; i++)
{
let choice = this.choices[i];
let weight = arrayOfWeights[i];
if (!choice || !weight || isNaN(weight)) {
continue;
}
choice.weight = weight;
}
this.calcIntervals();
}
/**
* It randomly choose one item from the set. It does so based on a
* randomly chosen number within the given weight set.
*
* @param {String} prop the property of the randomly chosen item
* @return {Mixed} the property value specified for the chosen item. This
* defaults to 'name'.
*/
chooseOne(prop = 'name')
{
return this.chooseProp(prop);
}
/**
* Returns an array of results equivalent to those returned by
* chooseOne.
*
* @param {Number} count an integer denoting the number of choices to pick
* @param {String} prop the property of the randomly chosen item
* @return {Mixed} the property value specified for the chosen item. This
* defaults to 'name'.
*/
chooseSome(count, prop = 'name') |
/**
* Simulates rolling dice with n-number of sides. In pen and paper
* role-playing games, 3d6 means to roll three six sided dice together
* and sum their results. Calling ChoiceSet.rollDice(3, 6) will simulate
* the same effect.
*
* Optionally, if repeat is set to a number greater than 1, an array of
* values is returned with the repeated results numbering the supplied
* repeat count.
*
* @param {Number} times the number of times the die should be rolled
* @param {Number} sides the number of sides the die should have
* @param {Number} repeat the number of times the whole process should be
* repeated
* @return {Mixed} either an array of Numbers or a single Number resulting
* in the sum of a die with sides rolled times times.
*/
static rollDice(times, sides, repeat = 1, dropLowest = 0) {
let count = [];
let die = ChoiceSet.weightedRange(1, sides);
for (let i = 0; i < repeat; i++) {
let rolls = die.chooseSome(times);
rolls.sort();
if (dropLowest) {
rolls = rolls.slice(dropLowest);
}
count.push(rolls.reduce((p,c) => p + c, 0));
}
return repeat === 1 ? count[0] : count;
}
/**
* Randomly chooses a value from the set and returns the specified property
* from the chosen object.
*
* @return {String} key the property value specified for the chosen item.
* This defaults to 'name'.
*/
chooseProp(key = ' | {
let choices = [];
for (let i = 0; i < count; i++) {
choices.push(this.chooseProp(prop))
}
return choices;
} | identifier_body |
ChoiceSet.js | *
* @param {Array<Object>} n-number of Objects as described in the
* constructor reference. There should be at least a name property
* and a value property.
*/
static of(...values)
{
let array =
values.length === 1 &&
values[0] instanceof Array &&
values[0] || values;
console.log(array);
return new ChoiceSet(...array);
}
/**
* Instantiates a ChoiceSet with numeric choices ranging from
* the supplied from value to the supplied to value. All values
* are returned with an equal weight by default.
*
* @param {Number} from a number indicating the start range
* @param {Number} to a number indicating the end range, inclusive
* @return {ChoiceSet} an instance of ChoiceSet
*/
static weightedRange(from, to, weights = null)
{
let set = new ChoiceSet();
for (let i = from; i < (to + 1); i++) {
set.choices.push({
name: i,
weight: 100
});
}
if (weights)
{
set.setWeights(weights);
}
else {
set.calcIntervals();
}
return set;
}
/**
* An easy way to instantiate a ChoiceSet of names and values. The
* function takes an even number of parameters with the first being
* the name and the second being the weight.
*
* @param {String} name the name of the choice
* @param {Number} weight the weight of the choice
* @return {ChoiceSet} an instance of ChoiceSet
*/
static weightedSet(...values)
{
let set = new ChoiceSet();
if (values.length % 2 != 0) {
throw new Error("WeightedChoice must be instantiated with pairs");
}
set.choices = [];
for (let i = 0; i < values.length; i+=2)
{
let name = values[i];
let weight = values[i + 1];
set.choices.push({name: name, weight:weight});
}
set.calcIntervals();
return set;
}
/**
* An easy way to instantiate a ChoiceSet of names and values. The
* function takes an even number of parameters with the first being
* the name and the second being the weight.
*
* @param {String} name the name of the choice
* @param {Number} weight the weight of the choice
* @param {Object} value the value of the choice
* @return {ChoiceSet} an instance of ChoiceSet
*/
static weightedValuedSet(...values)
{
let set = new ChoiceSet();
if (values.length % 3 != 0) {
throw new Error("weightedValuedSet must be instantiated with triplets");
}
set.choices = [];
for (let i = 0; i < values.length; i+=3)
{
let name = values[i];
let weight = values[i + 1];
let value = values[i + 2];
set.choices.push({name: name, weight:weight, value: value});
}
set.calcIntervals();
return set;
}
/**
* An easy way to instantiate a ChoiceSet of names, weights and values. The
* function takes parameters in multiples of three. The first parameter is
* the name of the choice and the second is the weight. The third object is a
* collection of keys and values that will be applied to the choice in
* question.
*
* @param {String} name the name of the choice
* @param {Number} weight the weight of the choice
* @param {Object} object to be merged with the resulting choice
* @return {ChoiceSet} an instance of ChoiceSet
*/
static weightedObjectSet(...values)
{
let set = new ChoiceSet();
if (values.length % 3 != 0) {
throw new Error("weightedObjectSet must be instantiated with triplets");
}
set.choices = [];
for (let i = 0; i < values.length; i+=3)
{
let name = values[i];
let weight = values[i + 1];
let object = values[i + 2];
set.choices.push({name: name, weight: weight, _obj: object});
}
set.calcIntervals();
return set;
}
/**
* Calculates the intervals of the weights of the choices in the set. It
* also determines the maximum total weight in the set.
*/
calcIntervals()
{
let intervals = [];
this.choices.reduce(
function(p, c) {
intervals.push(
((p && p.weight) || 0) +
((c && c.weight) || 0)
);
},
null
);
intervals = intervals.map(function(cur, idx, array) { | }
/**
* Allows easy adjustment of a weight for a given index. The weight is
* modified and then calcIntervals() is called to realign things for
* the next choosing.
*
* NOTE see if this is the optimal setting for adjusting the weights
*
* @param {Number} index the index of the choice to modify
* @param {Number} weight the weight of the choice in general
*/
setWeightFor(index, weight)
{
if (this.choices[index]) {
this.choices[index].weight = weight || 100;
this.calcIntervals();
}
}
/**
* This allows weights to be set in bulk. The code will attempt
* to apply a weight for a given choice at equivalent indices.
*
* @param {Array} arrayOfWeights an array of Numberered weights.
*/
setWeights(arrayOfWeights)
{
if (this.choices.length !== arrayOfWeights.length)
{
console.warn('Array length mismatch; applying what is present');
}
for (let i = 0; i < this.choices.length; i++)
{
let choice = this.choices[i];
let weight = arrayOfWeights[i];
if (!choice || !weight || isNaN(weight)) {
continue;
}
choice.weight = weight;
}
this.calcIntervals();
}
/**
* It randomly choose one item from the set. It does so based on a
* randomly chosen number within the given weight set.
*
* @param {String} prop the property of the randomly chosen item
* @return {Mixed} the property value specified for the chosen item. This
* defaults to 'name'.
*/
chooseOne(prop = 'name')
{
return this.chooseProp(prop);
}
/**
* Returns an array of results equivalent to those returned by
* chooseOne.
*
* @param {Number} count an integer denoting the number of choices to pick
* @param {String} prop the property of the randomly chosen item
* @return {Mixed} the property value specified for the chosen item. This
* defaults to 'name'.
*/
chooseSome(count, prop = 'name') {
let choices = [];
for (let i = 0; i < count; i++) {
choices.push(this.chooseProp(prop))
}
return choices;
}
/**
* Simulates rolling dice with n-number of sides. In pen and paper
* role-playing games, 3d6 means to roll three six sided dice together
* and sum their results. Calling ChoiceSet.rollDice(3, 6) will simulate
* the same effect.
*
* Optionally, if repeat is set to a number greater than 1, an array of
* values is returned with the repeated results numbering the supplied
* repeat count.
*
* @param {Number} times the number of times the die should be rolled
* @param {Number} sides the number of sides the die should have
* @param {Number} repeat the number of times the whole process should be
* repeated
* @return {Mixed} either an array of Numbers or a single Number resulting
* in the sum of a die with sides rolled times times.
*/
static rollDice(times, sides, repeat = 1, dropLowest = 0) {
let count = [];
let die = ChoiceSet.weightedRange(1, sides);
for (let i = 0; i < repeat; i++) {
let rolls = die.chooseSome(times);
rolls.sort();
if (dropLowest) {
rolls = rolls.slice(dropLowest);
}
count.push(rolls.reduce((p,c) => p + c, 0));
}
return repeat === 1 ? count[0] : count;
}
/**
* Randomly chooses a value from the set and returns the specified property
* from the chosen object.
*
* @return {String} key the property value specified for the chosen item.
* This defaults to 'name'.
*/
chooseProp(key = 'name')
| return cur + array.slice(0,idx).reduce((p, c) => p + c, 0);
});
this.intervals = intervals;
this.maxInterval = intervals[intervals.length - 1]; | random_line_split |
ChoiceSet.js | = from; i < (to + 1); i++) {
set.choices.push({
name: i,
weight: 100
});
}
if (weights)
{
set.setWeights(weights);
}
else {
set.calcIntervals();
}
return set;
}
/**
* An easy way to instantiate a ChoiceSet of names and values. The
* function takes an even number of parameters with the first being
* the name and the second being the weight.
*
* @param {String} name the name of the choice
* @param {Number} weight the weight of the choice
* @return {ChoiceSet} an instance of ChoiceSet
*/
static weightedSet(...values)
{
let set = new ChoiceSet();
if (values.length % 2 != 0) {
throw new Error("WeightedChoice must be instantiated with pairs");
}
set.choices = [];
for (let i = 0; i < values.length; i+=2)
{
let name = values[i];
let weight = values[i + 1];
set.choices.push({name: name, weight:weight});
}
set.calcIntervals();
return set;
}
/**
* An easy way to instantiate a ChoiceSet of names and values. The
* function takes an even number of parameters with the first being
* the name and the second being the weight.
*
* @param {String} name the name of the choice
* @param {Number} weight the weight of the choice
* @param {Object} value the value of the choice
* @return {ChoiceSet} an instance of ChoiceSet
*/
static weightedValuedSet(...values)
{
let set = new ChoiceSet();
if (values.length % 3 != 0) {
throw new Error("weightedValuedSet must be instantiated with triplets");
}
set.choices = [];
for (let i = 0; i < values.length; i+=3)
{
let name = values[i];
let weight = values[i + 1];
let value = values[i + 2];
set.choices.push({name: name, weight:weight, value: value});
}
set.calcIntervals();
return set;
}
/**
* An easy way to instantiate a ChoiceSet of names, weights and values. The
* function takes parameters in multiples of three. The first parameter is
* the name of the choice and the second is the weight. The third object is a
* collection of keys and values that will be applied to the choice in
* question.
*
* @param {String} name the name of the choice
* @param {Number} weight the weight of the choice
* @param {Object} object to be merged with the resulting choice
* @return {ChoiceSet} an instance of ChoiceSet
*/
static weightedObjectSet(...values)
{
let set = new ChoiceSet();
if (values.length % 3 != 0) {
throw new Error("weightedObjectSet must be instantiated with triplets");
}
set.choices = [];
for (let i = 0; i < values.length; i+=3)
{
let name = values[i];
let weight = values[i + 1];
let object = values[i + 2];
set.choices.push({name: name, weight: weight, _obj: object});
}
set.calcIntervals();
return set;
}
/**
* Calculates the intervals of the weights of the choices in the set. It
* also determines the maximum total weight in the set.
*/
calcIntervals()
{
let intervals = [];
this.choices.reduce(
function(p, c) {
intervals.push(
((p && p.weight) || 0) +
((c && c.weight) || 0)
);
},
null
);
intervals = intervals.map(function(cur, idx, array) {
return cur + array.slice(0,idx).reduce((p, c) => p + c, 0);
});
this.intervals = intervals;
this.maxInterval = intervals[intervals.length - 1];
}
/**
* Allows easy adjustment of a weight for a given index. The weight is
* modified and then calcIntervals() is called to realign things for
* the next choosing.
*
* NOTE see if this is the optimal setting for adjusting the weights
*
* @param {Number} index the index of the choice to modify
* @param {Number} weight the weight of the choice in general
*/
setWeightFor(index, weight)
{
if (this.choices[index]) {
this.choices[index].weight = weight || 100;
this.calcIntervals();
}
}
/**
* This allows weights to be set in bulk. The code will attempt
* to apply a weight for a given choice at equivalent indices.
*
* @param {Array} arrayOfWeights an array of Numberered weights.
*/
setWeights(arrayOfWeights)
{
if (this.choices.length !== arrayOfWeights.length)
{
console.warn('Array length mismatch; applying what is present');
}
for (let i = 0; i < this.choices.length; i++)
{
let choice = this.choices[i];
let weight = arrayOfWeights[i];
if (!choice || !weight || isNaN(weight)) {
continue;
}
choice.weight = weight;
}
this.calcIntervals();
}
/**
* It randomly choose one item from the set. It does so based on a
* randomly chosen number within the given weight set.
*
* @param {String} prop the property of the randomly chosen item
* @return {Mixed} the property value specified for the chosen item. This
* defaults to 'name'.
*/
chooseOne(prop = 'name')
{
return this.chooseProp(prop);
}
/**
* Returns an array of results equivalent to those returned by
* chooseOne.
*
* @param {Number} count an integer denoting the number of choices to pick
* @param {String} prop the property of the randomly chosen item
* @return {Mixed} the property value specified for the chosen item. This
* defaults to 'name'.
*/
chooseSome(count, prop = 'name') {
let choices = [];
for (let i = 0; i < count; i++) {
choices.push(this.chooseProp(prop))
}
return choices;
}
/**
* Simulates rolling dice with n-number of sides. In pen and paper
* role-playing games, 3d6 means to roll three six sided dice together
* and sum their results. Calling ChoiceSet.rollDice(3, 6) will simulate
* the same effect.
*
* Optionally, if repeat is set to a number greater than 1, an array of
* values is returned with the repeated results numbering the supplied
* repeat count.
*
* @param {Number} times the number of times the die should be rolled
* @param {Number} sides the number of sides the die should have
* @param {Number} repeat the number of times the whole process should be
* repeated
* @return {Mixed} either an array of Numbers or a single Number resulting
* in the sum of a die with sides rolled times times.
*/
static rollDice(times, sides, repeat = 1, dropLowest = 0) {
let count = [];
let die = ChoiceSet.weightedRange(1, sides);
for (let i = 0; i < repeat; i++) {
let rolls = die.chooseSome(times);
rolls.sort();
if (dropLowest) {
rolls = rolls.slice(dropLowest);
}
count.push(rolls.reduce((p,c) => p + c, 0));
}
return repeat === 1 ? count[0] : count;
}
/**
* Randomly chooses a value from the set and returns the specified property
* from the chosen object.
*
* @return {String} key the property value specified for the chosen item.
* This defaults to 'name'.
*/
chooseProp(key = 'name')
{
let choice = this.one;
return (choice._obj && choice._obj[key]) || choice[key];
}
/**
* Chooses a value via .one and then retrieves the value property of
* the choice.
*
* @return {Mixed} the value of the chosen item from the set
*/
get oneValue()
{
let choice = this.one;
return choice.value || choice._obj || choice.name;
}
/**
* Randomly chooses a value from the set and returns it in its entirety.
*
* @return {Mixed} an object from the ChoiceSet.
*/
get one()
{
let roll = Math.random() * this.maxInterval;
let item = null;
let index = -1;
for (let i = 0; i < this.intervals.length; i++) {
if (roll < this.intervals[i]) | {
index = i;
break;
} | conditional_block |
|
ChoiceSet.js | *
* @param {Array<Object>} n-number of Objects as described in the
* constructor reference. There should be at least a name property
* and a value property.
*/
static of(...values)
{
let array =
values.length === 1 &&
values[0] instanceof Array &&
values[0] || values;
console.log(array);
return new ChoiceSet(...array);
}
/**
* Instantiates a ChoiceSet with numeric choices ranging from
* the supplied from value to the supplied to value. All values
* are returned with an equal weight by default.
*
* @param {Number} from a number indicating the start range
* @param {Number} to a number indicating the end range, inclusive
* @return {ChoiceSet} an instance of ChoiceSet
*/
static weightedRange(from, to, weights = null)
{
let set = new ChoiceSet();
for (let i = from; i < (to + 1); i++) {
set.choices.push({
name: i,
weight: 100
});
}
if (weights)
{
set.setWeights(weights);
}
else {
set.calcIntervals();
}
return set;
}
/**
* An easy way to instantiate a ChoiceSet of names and values. The
* function takes an even number of parameters with the first being
* the name and the second being the weight.
*
* @param {String} name the name of the choice
* @param {Number} weight the weight of the choice
* @return {ChoiceSet} an instance of ChoiceSet
*/
static weightedSet(...values)
{
let set = new ChoiceSet();
if (values.length % 2 != 0) {
throw new Error("WeightedChoice must be instantiated with pairs");
}
set.choices = [];
for (let i = 0; i < values.length; i+=2)
{
let name = values[i];
let weight = values[i + 1];
set.choices.push({name: name, weight:weight});
}
set.calcIntervals();
return set;
}
/**
* An easy way to instantiate a ChoiceSet of names and values. The
* function takes an even number of parameters with the first being
* the name and the second being the weight.
*
* @param {String} name the name of the choice
* @param {Number} weight the weight of the choice
* @param {Object} value the value of the choice
* @return {ChoiceSet} an instance of ChoiceSet
*/
static weightedValuedSet(...values)
{
let set = new ChoiceSet();
if (values.length % 3 != 0) {
throw new Error("weightedValuedSet must be instantiated with triplets");
}
set.choices = [];
for (let i = 0; i < values.length; i+=3)
{
let name = values[i];
let weight = values[i + 1];
let value = values[i + 2];
set.choices.push({name: name, weight:weight, value: value});
}
set.calcIntervals();
return set;
}
/**
* An easy way to instantiate a ChoiceSet of names, weights and values. The
* function takes parameters in multiples of three. The first parameter is
* the name of the choice and the second is the weight. The third object is a
* collection of keys and values that will be applied to the choice in
* question.
*
* @param {String} name the name of the choice
* @param {Number} weight the weight of the choice
* @param {Object} object to be merged with the resulting choice
* @return {ChoiceSet} an instance of ChoiceSet
*/
static weightedObjectSet(...values)
{
let set = new ChoiceSet();
if (values.length % 3 != 0) {
throw new Error("weightedObjectSet must be instantiated with triplets");
}
set.choices = [];
for (let i = 0; i < values.length; i+=3)
{
let name = values[i];
let weight = values[i + 1];
let object = values[i + 2];
set.choices.push({name: name, weight: weight, _obj: object});
}
set.calcIntervals();
return set;
}
/**
* Calculates the intervals of the weights of the choices in the set. It
* also determines the maximum total weight in the set.
*/
calcIntervals()
{
let intervals = [];
this.choices.reduce(
function(p, c) {
intervals.push(
((p && p.weight) || 0) +
((c && c.weight) || 0)
);
},
null
);
intervals = intervals.map(function(cur, idx, array) {
return cur + array.slice(0,idx).reduce((p, c) => p + c, 0);
});
this.intervals = intervals;
this.maxInterval = intervals[intervals.length - 1];
}
/**
* Allows easy adjustment of a weight for a given index. The weight is
* modified and then calcIntervals() is called to realign things for
* the next choosing.
*
* NOTE see if this is the optimal setting for adjusting the weights
*
* @param {Number} index the index of the choice to modify
* @param {Number} weight the weight of the choice in general
*/
setWeightFor(index, weight)
{
if (this.choices[index]) {
this.choices[index].weight = weight || 100;
this.calcIntervals();
}
}
/**
* This allows weights to be set in bulk. The code will attempt
* to apply a weight for a given choice at equivalent indices.
*
* @param {Array} arrayOfWeights an array of Numberered weights.
*/
setWeights(arrayOfWeights)
{
if (this.choices.length !== arrayOfWeights.length)
{
console.warn('Array length mismatch; applying what is present');
}
for (let i = 0; i < this.choices.length; i++)
{
let choice = this.choices[i];
let weight = arrayOfWeights[i];
if (!choice || !weight || isNaN(weight)) {
continue;
}
choice.weight = weight;
}
this.calcIntervals();
}
/**
* It randomly choose one item from the set. It does so based on a
* randomly chosen number within the given weight set.
*
* @param {String} prop the property of the randomly chosen item
* @return {Mixed} the property value specified for the chosen item. This
* defaults to 'name'.
*/
| (prop = 'name')
{
return this.chooseProp(prop);
}
/**
* Returns an array of results equivalent to those returned by
* chooseOne.
*
* @param {Number} count an integer denoting the number of choices to pick
* @param {String} prop the property of the randomly chosen item
* @return {Mixed} the property value specified for the chosen item. This
* defaults to 'name'.
*/
chooseSome(count, prop = 'name') {
let choices = [];
for (let i = 0; i < count; i++) {
choices.push(this.chooseProp(prop))
}
return choices;
}
/**
* Simulates rolling dice with n-number of sides. In pen and paper
* role-playing games, 3d6 means to roll three six sided dice together
* and sum their results. Calling ChoiceSet.rollDice(3, 6) will simulate
* the same effect.
*
* Optionally, if repeat is set to a number greater than 1, an array of
* values is returned with the repeated results numbering the supplied
* repeat count.
*
* @param {Number} times the number of times the die should be rolled
* @param {Number} sides the number of sides the die should have
* @param {Number} repeat the number of times the whole process should be
* repeated
* @return {Mixed} either an array of Numbers or a single Number resulting
* in the sum of a die with sides rolled times times.
*/
static rollDice(times, sides, repeat = 1, dropLowest = 0) {
let count = [];
let die = ChoiceSet.weightedRange(1, sides);
for (let i = 0; i < repeat; i++) {
let rolls = die.chooseSome(times);
rolls.sort();
if (dropLowest) {
rolls = rolls.slice(dropLowest);
}
count.push(rolls.reduce((p,c) => p + c, 0));
}
return repeat === 1 ? count[0] : count;
}
/**
* Randomly chooses a value from the set and returns the specified property
* from the chosen object.
*
* @return {String} key the property value specified for the chosen item.
* This defaults to 'name'.
*/
chooseProp(key = 'name')
| chooseOne | identifier_name |
canvas.go | .size.X*c.size.Y)
c.visited = make([]bool, c.size.X*c.size.Y)
for y, line := range lines {
x := 0
for len(line) > 0 {
r, l := utf8.DecodeRune(line)
c.grid[y*c.size.X+x] = char(r)
x++
line = line[l:]
}
for ; x < c.size.X; x++ {
c.grid[y*c.size.X+x] = ' '
}
}
c.findObjects()
return c, nil
}
// The expandTabs function pads tab characters to the specified width of spaces for the provided
// line of input. We cannot simply pad based on byte-offset since our input is UTF-8 encoded.
// Fortunately, we can assume that this function is called that the line contains only valid
// UTF-8 sequences. We first decode the line rune-wise, and use individual runes to figure out
// where we are within the line. When we encounter a tab character, we expand based on our rune
// index.
func expandTabs(line []byte, tabWidth int) ([]byte, error) {
// Initial sizing of our output slice assumes no UTF-8 bytes or tabs, since this is often
// the common case.
out := make([]byte, 0, len(line))
// pos tracks our position in the input byte slice, while index tracks our position in the
// resulting output slice.
pos := 0
index := 0
for _, c := range line {
if c == '\t' {
// Loop over the remaining space count for this particular tabstop until
// the next, replacing each position with a space.
for s := tabWidth - (pos % tabWidth); s > 0; s-- {
out = append(out, ' ')
index++
}
pos++
} else {
// We need to know the byte length of the rune at this position so that we
// can account for our tab expansion properly. So we first decode the rune
// at this position to get its length in bytes, plop that rune back into our
// output slice, and account accordingly.
r, l := utf8.DecodeRune(line[pos:])
if r == utf8.RuneError {
return nil, fmt.Errorf("invalid rune at byte offset %d; rune offset %d", pos, index)
}
enc := make([]byte, l)
utf8.EncodeRune(enc, r)
out = append(out, enc...)
pos += l
index++
}
}
return out, nil
}
// canvas is the parsed source data.
type canvas struct {
// (0,0) is top left.
grid []char
visited []bool
objects objects
size image.Point
options map[string]map[string]interface{}
}
func (c *canvas) String() string {
return fmt.Sprintf("%+v", c.grid)
}
func (c *canvas) Objects() []Object {
return c.objects
}
func (c *canvas) Size() image.Point {
return c.size
}
func (c *canvas) Options() map[string]map[string]interface{} {
return c.options
}
func (c *canvas) EnclosingObjects(p Point) []Object |
// findObjects finds all objects (lines, polygons, and text) within the underlying grid.
func (c *canvas) findObjects() {
p := Point{}
// Find any new paths by starting with a point that wasn't yet visited, beginning at the top
// left of the grid.
for y := 0; y < c.size.Y; y++ {
p.Y = y
for x := 0; x < c.size.X; x++ {
p.X = x
if c.isVisited(p) {
continue
}
if ch := c.at(p); ch.isPathStart() {
// Found the start of a one or multiple connected paths. Traverse all
// connecting points. This will generate multiple objects if multiple
// paths (either open or closed) are found.
c.visit(p)
objs := c.scanPath([]Point{p})
for _, obj := range objs {
// For all points in all objects found, mark the points as visited.
for _, p := range obj.Points() {
c.visit(p)
}
}
c.objects = append(c.objects, objs...)
}
}
}
// A second pass through the grid attempts to identify any text within the grid.
for y := 0; y < c.size.Y; y++ {
p.Y = y
for x := 0; x < c.size.X; x++ {
p.X = x
if c.isVisited(p) {
continue
}
if ch := c.at(p); ch.isTextStart() {
obj := c.scanText(p)
// scanText will return nil if the text at this area is simply
// setting options on a container object.
if obj == nil {
continue
}
for _, p := range obj.Points() {
c.visit(p)
}
c.objects = append(c.objects, obj)
}
}
}
sort.Sort(c.objects)
}
// scanPath tries to complete a total path (for lines or polygons) starting with some partial path.
// It recurses when it finds multiple unvisited outgoing paths.
func (c *canvas) scanPath(points []Point) objects {
cur := points[len(points)-1]
next := c.next(cur)
// If there are no points that can progress traversal of the path, finalize the one we're
// working on, and return it. This is the terminal condition in the passive flow.
if len(next) == 0 {
if len(points) == 1 {
// Discard 'path' of 1 point. Do not mark point as visited.
c.unvisit(cur)
return nil
}
// TODO(dhobsd): Determine if path is sharing the line with another path. If so,
// we may want to join the objects such that we don't get weird rendering artifacts.
o := &object{points: points}
o.seal(c)
return objects{o}
}
// If we have hit a point that can create a closed path, create an object and close
// the path. Additionally, recurse to other progress directions in case e.g. an open
// path spawns from this point. Paths are always closed vertically.
if cur.X == points[0].X && cur.Y == points[0].Y+1 {
o := &object{points: points}
o.seal(c)
r := objects{o}
return append(r, c.scanPath([]Point{cur})...)
}
// We scan depth-first instead of breadth-first, making it possible to find a
// closed path.
var objs objects
for _, n := range next {
if c.isVisited(n) {
continue
}
c.visit(n)
p2 := make([]Point, len(points)+1)
copy(p2, points)
p2[len(p2)-1] = n
objs = append(objs, c.scanPath(p2)...)
}
return objs
}
// The next returns the points that can be used to make progress, scanning (in order) horizontal
// progress to the left or right, vertical progress above or below, or diagonal progress to NW,
// NE, SW, and SE. It skips any points already visited, and returns all of the possible progress
// points.
func (c *canvas) next(pos Point) []Point {
// Our caller must have called c.visit prior to calling this function.
if !c.isVisited(pos) {
panic(fmt.Errorf("internal error; revisiting %s", pos))
}
var out []Point
ch := c.at(pos)
if ch.canHorizontal() {
nextHorizontal := func(p Point) {
if !c.isVisited(p) && c.at(p).canHorizontal() {
out = append(out, p)
}
}
if c.canLeft(pos) {
n := pos
n.X--
nextHorizontal(n)
}
if c.canRight(pos) {
n := pos
n.X++
nextHorizontal(n)
}
}
if ch.canVertical() {
nextVertical := func(p Point) {
if !c.isVisited(p) && c.at(p).canVertical() {
out = append(out, p)
}
}
if c.canUp(pos) {
n := pos
n.Y--
nextVertical(n)
}
if c.canDown(pos) {
n := pos
n.Y++
nextVertical(n)
}
}
if | {
maxTL := Point{X: -1, Y: -1}
var q []Object
for _, o := range c.objects {
// An object can't really contain another unless it is a polygon.
if !o.IsClosed() {
continue
}
if o.HasPoint(p) && o.Corners()[0].X > maxTL.X && o.Corners()[0].Y > maxTL.Y {
q = append(q, o)
maxTL.X = o.Corners()[0].X
maxTL.Y = o.Corners()[0].Y
}
}
return q
} | identifier_body |
canvas.go | // paths (either open or closed) are found.
c.visit(p)
objs := c.scanPath([]Point{p})
for _, obj := range objs {
// For all points in all objects found, mark the points as visited.
for _, p := range obj.Points() {
c.visit(p)
}
}
c.objects = append(c.objects, objs...)
}
}
}
// A second pass through the grid attempts to identify any text within the grid.
for y := 0; y < c.size.Y; y++ {
p.Y = y
for x := 0; x < c.size.X; x++ {
p.X = x
if c.isVisited(p) {
continue
}
if ch := c.at(p); ch.isTextStart() {
obj := c.scanText(p)
// scanText will return nil if the text at this area is simply
// setting options on a container object.
if obj == nil {
continue
}
for _, p := range obj.Points() {
c.visit(p)
}
c.objects = append(c.objects, obj)
}
}
}
sort.Sort(c.objects)
}
// scanPath tries to complete a total path (for lines or polygons) starting with some partial path.
// It recurses when it finds multiple unvisited outgoing paths.
func (c *canvas) scanPath(points []Point) objects {
cur := points[len(points)-1]
next := c.next(cur)
// If there are no points that can progress traversal of the path, finalize the one we're
// working on, and return it. This is the terminal condition in the passive flow.
if len(next) == 0 {
if len(points) == 1 {
// Discard 'path' of 1 point. Do not mark point as visited.
c.unvisit(cur)
return nil
}
// TODO(dhobsd): Determine if path is sharing the line with another path. If so,
// we may want to join the objects such that we don't get weird rendering artifacts.
o := &object{points: points}
o.seal(c)
return objects{o}
}
// If we have hit a point that can create a closed path, create an object and close
// the path. Additionally, recurse to other progress directions in case e.g. an open
// path spawns from this point. Paths are always closed vertically.
if cur.X == points[0].X && cur.Y == points[0].Y+1 {
o := &object{points: points}
o.seal(c)
r := objects{o}
return append(r, c.scanPath([]Point{cur})...)
}
// We scan depth-first instead of breadth-first, making it possible to find a
// closed path.
var objs objects
for _, n := range next {
if c.isVisited(n) {
continue
}
c.visit(n)
p2 := make([]Point, len(points)+1)
copy(p2, points)
p2[len(p2)-1] = n
objs = append(objs, c.scanPath(p2)...)
}
return objs
}
// The next returns the points that can be used to make progress, scanning (in order) horizontal
// progress to the left or right, vertical progress above or below, or diagonal progress to NW,
// NE, SW, and SE. It skips any points already visited, and returns all of the possible progress
// points.
func (c *canvas) next(pos Point) []Point {
// Our caller must have called c.visit prior to calling this function.
if !c.isVisited(pos) {
panic(fmt.Errorf("internal error; revisiting %s", pos))
}
var out []Point
ch := c.at(pos)
if ch.canHorizontal() {
nextHorizontal := func(p Point) {
if !c.isVisited(p) && c.at(p).canHorizontal() {
out = append(out, p)
}
}
if c.canLeft(pos) {
n := pos
n.X--
nextHorizontal(n)
}
if c.canRight(pos) {
n := pos
n.X++
nextHorizontal(n)
}
}
if ch.canVertical() {
nextVertical := func(p Point) {
if !c.isVisited(p) && c.at(p).canVertical() {
out = append(out, p)
}
}
if c.canUp(pos) {
n := pos
n.Y--
nextVertical(n)
}
if c.canDown(pos) {
n := pos
n.Y++
nextVertical(n)
}
}
if c.canDiagonal(pos) {
nextDiagonal := func(from, to Point) {
if !c.isVisited(to) && c.at(to).canDiagonalFrom(c.at(from)) {
out = append(out, to)
}
}
if c.canUp(pos) {
if c.canLeft(pos) {
n := pos
n.X--
n.Y--
nextDiagonal(pos, n)
}
if c.canRight(pos) {
n := pos
n.X++
n.Y--
nextDiagonal(pos, n)
}
}
if c.canDown(pos) {
if c.canLeft(pos) {
n := pos
n.X--
n.Y++
nextDiagonal(pos, n)
}
if c.canRight(pos) {
n := pos
n.X++
n.Y++
nextDiagonal(pos, n)
}
}
}
return out
}
// Used for matching [X, Y]: {...} tag definitions. These definitions target specific objects.
var objTagRE = regexp.MustCompile(`(\d+)\s*,\s*(\d+)$`)
// scanText extracts a line of text.
func (c *canvas) scanText(start Point) Object {
obj := &object{points: []Point{start}, isText: true}
whiteSpaceStreak := 0
cur := start
tagged := 0
tag := []rune{}
tagDef := []rune{}
for c.canRight(cur) {
if cur.X == start.X && c.at(cur).isObjectStartTag() {
tagged++
} else if cur.X > start.X && c.at(cur).isObjectEndTag() {
tagged++
}
cur.X++
if c.isVisited(cur) {
// If the point is already visited, we hit a polygon or a line.
break
}
ch := c.at(cur)
if !ch.isTextCont() {
break
}
if tagged == 0 && ch.isSpace() {
whiteSpaceStreak++
// Stop when we see 3 consecutive whitespace points.
if whiteSpaceStreak > 2 {
break
}
} else {
whiteSpaceStreak = 0
}
switch tagged {
case 1:
if !c.at(cur).isObjectEndTag() {
tag = append(tag, rune(ch))
}
case 2:
if c.at(cur).isTagDefinitionSeparator() {
tagged++
} else {
tagged = -1
}
case 3:
tagDef = append(tagDef, rune(ch))
}
obj.points = append(obj.points, cur)
}
// If we found a start and end tag marker, we either need to assign the tag to the object,
// or we need to assign the specified options to the global canvas option space.
if tagged == 2 {
t := string(tag)
if container := c.EnclosingObjects(start); container != nil {
container[0].SetTag(t)
}
// The tag applies to the text object as well so that properties like
// a2s:label can be set.
obj.SetTag(t)
} else if tagged == 3 {
t := string(tag)
// A tag definition targeting an object will not be found within any object; we need
// to do that calculation here.
if matches := objTagRE.FindStringSubmatch(t); matches != nil {
if targetX, err := strconv.ParseInt(matches[1], 10, 0); err == nil {
if targetY, err := strconv.ParseInt(matches[2], 10, 0); err == nil {
for i, o := range c.objects {
corner := o.Corners()[0]
if corner.X == int(targetX) && corner.Y == int(targetY) {
c.objects[i].SetTag(t)
break
}
}
}
}
}
// This is a tag definition. Parse the JSON and assign the options to the canvas.
var m interface{}
def := []byte(string(tagDef))
if err := json.Unmarshal(def, &m); err != nil {
// TODO(dhobsd): Gross.
panic(err)
}
// The tag applies to the reference object as well, so that properties like
// a2s:delref can be set. | obj.SetTag(t)
c.options[t] = m.(map[string]interface{})
} | random_line_split |
|
canvas.go | ch := c.at(p); ch.isTextStart() {
obj := c.scanText(p)
// scanText will return nil if the text at this area is simply
// setting options on a container object.
if obj == nil {
continue
}
for _, p := range obj.Points() {
c.visit(p)
}
c.objects = append(c.objects, obj)
}
}
}
sort.Sort(c.objects)
}
// scanPath tries to complete a total path (for lines or polygons) starting with some partial path.
// It recurses when it finds multiple unvisited outgoing paths.
func (c *canvas) scanPath(points []Point) objects {
cur := points[len(points)-1]
next := c.next(cur)
// If there are no points that can progress traversal of the path, finalize the one we're
// working on, and return it. This is the terminal condition in the passive flow.
if len(next) == 0 {
if len(points) == 1 {
// Discard 'path' of 1 point. Do not mark point as visited.
c.unvisit(cur)
return nil
}
// TODO(dhobsd): Determine if path is sharing the line with another path. If so,
// we may want to join the objects such that we don't get weird rendering artifacts.
o := &object{points: points}
o.seal(c)
return objects{o}
}
// If we have hit a point that can create a closed path, create an object and close
// the path. Additionally, recurse to other progress directions in case e.g. an open
// path spawns from this point. Paths are always closed vertically.
if cur.X == points[0].X && cur.Y == points[0].Y+1 {
o := &object{points: points}
o.seal(c)
r := objects{o}
return append(r, c.scanPath([]Point{cur})...)
}
// We scan depth-first instead of breadth-first, making it possible to find a
// closed path.
var objs objects
for _, n := range next {
if c.isVisited(n) {
continue
}
c.visit(n)
p2 := make([]Point, len(points)+1)
copy(p2, points)
p2[len(p2)-1] = n
objs = append(objs, c.scanPath(p2)...)
}
return objs
}
// The next returns the points that can be used to make progress, scanning (in order) horizontal
// progress to the left or right, vertical progress above or below, or diagonal progress to NW,
// NE, SW, and SE. It skips any points already visited, and returns all of the possible progress
// points.
func (c *canvas) next(pos Point) []Point {
// Our caller must have called c.visit prior to calling this function.
if !c.isVisited(pos) {
panic(fmt.Errorf("internal error; revisiting %s", pos))
}
var out []Point
ch := c.at(pos)
if ch.canHorizontal() {
nextHorizontal := func(p Point) {
if !c.isVisited(p) && c.at(p).canHorizontal() {
out = append(out, p)
}
}
if c.canLeft(pos) {
n := pos
n.X--
nextHorizontal(n)
}
if c.canRight(pos) {
n := pos
n.X++
nextHorizontal(n)
}
}
if ch.canVertical() {
nextVertical := func(p Point) {
if !c.isVisited(p) && c.at(p).canVertical() {
out = append(out, p)
}
}
if c.canUp(pos) {
n := pos
n.Y--
nextVertical(n)
}
if c.canDown(pos) {
n := pos
n.Y++
nextVertical(n)
}
}
if c.canDiagonal(pos) {
nextDiagonal := func(from, to Point) {
if !c.isVisited(to) && c.at(to).canDiagonalFrom(c.at(from)) {
out = append(out, to)
}
}
if c.canUp(pos) {
if c.canLeft(pos) {
n := pos
n.X--
n.Y--
nextDiagonal(pos, n)
}
if c.canRight(pos) {
n := pos
n.X++
n.Y--
nextDiagonal(pos, n)
}
}
if c.canDown(pos) {
if c.canLeft(pos) {
n := pos
n.X--
n.Y++
nextDiagonal(pos, n)
}
if c.canRight(pos) {
n := pos
n.X++
n.Y++
nextDiagonal(pos, n)
}
}
}
return out
}
// Used for matching [X, Y]: {...} tag definitions. These definitions target specific objects.
var objTagRE = regexp.MustCompile(`(\d+)\s*,\s*(\d+)$`)
// scanText extracts a line of text.
func (c *canvas) scanText(start Point) Object {
obj := &object{points: []Point{start}, isText: true}
whiteSpaceStreak := 0
cur := start
tagged := 0
tag := []rune{}
tagDef := []rune{}
for c.canRight(cur) {
if cur.X == start.X && c.at(cur).isObjectStartTag() {
tagged++
} else if cur.X > start.X && c.at(cur).isObjectEndTag() {
tagged++
}
cur.X++
if c.isVisited(cur) {
// If the point is already visited, we hit a polygon or a line.
break
}
ch := c.at(cur)
if !ch.isTextCont() {
break
}
if tagged == 0 && ch.isSpace() {
whiteSpaceStreak++
// Stop when we see 3 consecutive whitespace points.
if whiteSpaceStreak > 2 {
break
}
} else {
whiteSpaceStreak = 0
}
switch tagged {
case 1:
if !c.at(cur).isObjectEndTag() {
tag = append(tag, rune(ch))
}
case 2:
if c.at(cur).isTagDefinitionSeparator() {
tagged++
} else {
tagged = -1
}
case 3:
tagDef = append(tagDef, rune(ch))
}
obj.points = append(obj.points, cur)
}
// If we found a start and end tag marker, we either need to assign the tag to the object,
// or we need to assign the specified options to the global canvas option space.
if tagged == 2 {
t := string(tag)
if container := c.EnclosingObjects(start); container != nil {
container[0].SetTag(t)
}
// The tag applies to the text object as well so that properties like
// a2s:label can be set.
obj.SetTag(t)
} else if tagged == 3 {
t := string(tag)
// A tag definition targeting an object will not be found within any object; we need
// to do that calculation here.
if matches := objTagRE.FindStringSubmatch(t); matches != nil {
if targetX, err := strconv.ParseInt(matches[1], 10, 0); err == nil {
if targetY, err := strconv.ParseInt(matches[2], 10, 0); err == nil {
for i, o := range c.objects {
corner := o.Corners()[0]
if corner.X == int(targetX) && corner.Y == int(targetY) {
c.objects[i].SetTag(t)
break
}
}
}
}
}
// This is a tag definition. Parse the JSON and assign the options to the canvas.
var m interface{}
def := []byte(string(tagDef))
if err := json.Unmarshal(def, &m); err != nil {
// TODO(dhobsd): Gross.
panic(err)
}
// The tag applies to the reference object as well, so that properties like
// a2s:delref can be set.
obj.SetTag(t)
c.options[t] = m.(map[string]interface{})
}
// Trim the right side of the text object.
for len(obj.points) != 0 && c.at(obj.points[len(obj.points)-1]).isSpace() {
obj.points = obj.points[:len(obj.points)-1]
}
obj.seal(c)
return obj
}
func (c *canvas) at(p Point) char {
return c.grid[p.Y*c.size.X+p.X]
}
func (c *canvas) isVisited(p Point) bool {
return c.visited[p.Y*c.size.X+p.X]
}
func (c *canvas) visit(p Point) {
// TODO(dhobsd): Change code to ensure that visit() is called once and only
// once per point.
c.visited[p.Y*c.size.X+p.X] = true
}
func (c *canvas) | unvisit | identifier_name |
|
canvas.go | .size.X*c.size.Y)
c.visited = make([]bool, c.size.X*c.size.Y)
for y, line := range lines {
x := 0
for len(line) > 0 {
r, l := utf8.DecodeRune(line)
c.grid[y*c.size.X+x] = char(r)
x++
line = line[l:]
}
for ; x < c.size.X; x++ {
c.grid[y*c.size.X+x] = ' '
}
}
c.findObjects()
return c, nil
}
// The expandTabs function pads tab characters to the specified width of spaces for the provided
// line of input. We cannot simply pad based on byte-offset since our input is UTF-8 encoded.
// Fortunately, we can assume that this function is called that the line contains only valid
// UTF-8 sequences. We first decode the line rune-wise, and use individual runes to figure out
// where we are within the line. When we encounter a tab character, we expand based on our rune
// index.
func expandTabs(line []byte, tabWidth int) ([]byte, error) {
// Initial sizing of our output slice assumes no UTF-8 bytes or tabs, since this is often
// the common case.
out := make([]byte, 0, len(line))
// pos tracks our position in the input byte slice, while index tracks our position in the
// resulting output slice.
pos := 0
index := 0
for _, c := range line {
if c == '\t' {
// Loop over the remaining space count for this particular tabstop until
// the next, replacing each position with a space.
for s := tabWidth - (pos % tabWidth); s > 0; s-- {
out = append(out, ' ')
index++
}
pos++
} else {
// We need to know the byte length of the rune at this position so that we
// can account for our tab expansion properly. So we first decode the rune
// at this position to get its length in bytes, plop that rune back into our
// output slice, and account accordingly.
r, l := utf8.DecodeRune(line[pos:])
if r == utf8.RuneError {
return nil, fmt.Errorf("invalid rune at byte offset %d; rune offset %d", pos, index)
}
enc := make([]byte, l)
utf8.EncodeRune(enc, r)
out = append(out, enc...)
pos += l
index++
}
}
return out, nil
}
// canvas is the parsed source data.
type canvas struct {
// (0,0) is top left.
grid []char
visited []bool
objects objects
size image.Point
options map[string]map[string]interface{}
}
func (c *canvas) String() string {
return fmt.Sprintf("%+v", c.grid)
}
func (c *canvas) Objects() []Object {
return c.objects
}
func (c *canvas) Size() image.Point {
return c.size
}
func (c *canvas) Options() map[string]map[string]interface{} {
return c.options
}
func (c *canvas) EnclosingObjects(p Point) []Object {
maxTL := Point{X: -1, Y: -1}
var q []Object
for _, o := range c.objects {
// An object can't really contain another unless it is a polygon.
if !o.IsClosed() {
continue
}
if o.HasPoint(p) && o.Corners()[0].X > maxTL.X && o.Corners()[0].Y > maxTL.Y {
q = append(q, o)
maxTL.X = o.Corners()[0].X
maxTL.Y = o.Corners()[0].Y
}
}
return q
}
// findObjects finds all objects (lines, polygons, and text) within the underlying grid.
func (c *canvas) findObjects() {
p := Point{}
// Find any new paths by starting with a point that wasn't yet visited, beginning at the top
// left of the grid.
for y := 0; y < c.size.Y; y++ {
p.Y = y
for x := 0; x < c.size.X; x++ {
p.X = x
if c.isVisited(p) {
continue
}
if ch := c.at(p); ch.isPathStart() {
// Found the start of a one or multiple connected paths. Traverse all
// connecting points. This will generate multiple objects if multiple
// paths (either open or closed) are found.
c.visit(p)
objs := c.scanPath([]Point{p})
for _, obj := range objs {
// For all points in all objects found, mark the points as visited.
for _, p := range obj.Points() {
c.visit(p)
}
}
c.objects = append(c.objects, objs...)
}
}
}
// A second pass through the grid attempts to identify any text within the grid.
for y := 0; y < c.size.Y; y++ {
p.Y = y
for x := 0; x < c.size.X; x++ {
p.X = x
if c.isVisited(p) {
continue
}
if ch := c.at(p); ch.isTextStart() {
obj := c.scanText(p)
// scanText will return nil if the text at this area is simply
// setting options on a container object.
if obj == nil {
continue
}
for _, p := range obj.Points() {
c.visit(p)
}
c.objects = append(c.objects, obj)
}
}
}
sort.Sort(c.objects)
}
// scanPath tries to complete a total path (for lines or polygons) starting with some partial path.
// It recurses when it finds multiple unvisited outgoing paths.
func (c *canvas) scanPath(points []Point) objects {
cur := points[len(points)-1]
next := c.next(cur)
// If there are no points that can progress traversal of the path, finalize the one we're
// working on, and return it. This is the terminal condition in the passive flow.
if len(next) == 0 {
if len(points) == 1 {
// Discard 'path' of 1 point. Do not mark point as visited.
c.unvisit(cur)
return nil
}
// TODO(dhobsd): Determine if path is sharing the line with another path. If so,
// we may want to join the objects such that we don't get weird rendering artifacts.
o := &object{points: points}
o.seal(c)
return objects{o}
}
// If we have hit a point that can create a closed path, create an object and close
// the path. Additionally, recurse to other progress directions in case e.g. an open
// path spawns from this point. Paths are always closed vertically.
if cur.X == points[0].X && cur.Y == points[0].Y+1 {
o := &object{points: points}
o.seal(c)
r := objects{o}
return append(r, c.scanPath([]Point{cur})...)
}
// We scan depth-first instead of breadth-first, making it possible to find a
// closed path.
var objs objects
for _, n := range next |
return objs
}
// The next returns the points that can be used to make progress, scanning (in order) horizontal
// progress to the left or right, vertical progress above or below, or diagonal progress to NW,
// NE, SW, and SE. It skips any points already visited, and returns all of the possible progress
// points.
func (c *canvas) next(pos Point) []Point {
// Our caller must have called c.visit prior to calling this function.
if !c.isVisited(pos) {
panic(fmt.Errorf("internal error; revisiting %s", pos))
}
var out []Point
ch := c.at(pos)
if ch.canHorizontal() {
nextHorizontal := func(p Point) {
if !c.isVisited(p) && c.at(p).canHorizontal() {
out = append(out, p)
}
}
if c.canLeft(pos) {
n := pos
n.X--
nextHorizontal(n)
}
if c.canRight(pos) {
n := pos
n.X++
nextHorizontal(n)
}
}
if ch.canVertical() {
nextVertical := func(p Point) {
if !c.isVisited(p) && c.at(p).canVertical() {
out = append(out, p)
}
}
if c.canUp(pos) {
n := pos
n.Y--
nextVertical(n)
}
if c.canDown(pos) {
n := pos
n.Y++
nextVertical(n)
}
}
if | {
if c.isVisited(n) {
continue
}
c.visit(n)
p2 := make([]Point, len(points)+1)
copy(p2, points)
p2[len(p2)-1] = n
objs = append(objs, c.scanPath(p2)...)
} | conditional_block |
client.rs | fn new(reader: R, writer: W, rng: OsRng) -> TlsResult<TlsClient<R, W>> {
let mut client = TlsClient {
reader: TlsReader::new(reader),
writer: TlsWriter::new(writer),
rng: rng,
buf: Vec::new(),
};
// handshake failed. send alert if necessary
match client.handshake() {
Ok(()) => {}
Err(err) => return Err(client.send_tls_alert(err)),
}
Ok(client)
}
#[inline]
pub fn reader(&mut self) -> &mut R {
self.reader.get_mut()
}
#[inline]
pub fn writer(&mut self) -> &mut W {
self.writer.get_mut()
}
// this does not send alert when error occurs
fn handshake(&mut self) -> TlsResult<()> {
// expect specific HandshakeMessage. otherwise return Err
macro_rules! expect {
($var:ident) => ({
match try!(self.reader.read_handshake()) {
handshake::Handshake::$var(data) => data,
_ => return tls_err!(UnexpectedMessage, "unexpected handshake message found"),
}
})
}
let cli_random = {
let mut random_bytes = [0u8; 32];
self.rng.fill_bytes(&mut random_bytes);
random_bytes.to_vec()
};
let random = try!(handshake::Random::new(cli_random.clone()));
// the only cipher we currently support
let cipher_suite = cipher::CipherSuite::TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256;
let curve_list = vec!(handshake::NamedCurve::secp256r1);
let curve_list = try!(handshake::Extension::new_elliptic_curve_list(curve_list));
let format_list = vec!(handshake::ECPointFormat::uncompressed);
let format_list = try!(handshake::Extension::new_ec_point_formats(format_list));
let extensions = vec!(curve_list, format_list);
let client_hello = try!(Handshake::new_client_hello(random, cipher_suite, extensions));
try!(self.writer.write_handshake(&client_hello));
let server_hello_data = expect!(server_hello);
{
let server_major = server_hello_data.server_version.major;
let server_minor = server_hello_data.server_version.minor;
if (server_major, server_minor) != TLS_VERSION {
return tls_err!(IllegalParameter,
"wrong server version: {} {}",
server_major,
server_minor);
}
if server_hello_data.cipher_suite != cipher_suite {
return tls_err!(IllegalParameter,
"cipher suite mismatch: found {:?}",
server_hello_data.cipher_suite);
}
if server_hello_data.compression_method != handshake::CompressionMethod::null {
return tls_err!(IllegalParameter, "compression method mismatch");
}
// FIXME: check if server sent unknown extension
// it is currently done by just not understanding any extensions
// other than we used.
}
// we always expect certificate.
let certificate_list = expect!(certificate);
// TODO: cert validation not implemented yet
// we always use server key exchange
let server_key_ex_data = expect!(server_key_exchange);
let kex = cipher_suite.new_kex();
let (key_data, pre_master_secret) = try!(kex.compute_keys(&server_key_ex_data,
&mut self.rng));
expect!(server_hello_done);
let client_key_exchange = try!(Handshake::new_client_key_exchange(key_data));
try!(self.writer.write_handshake(&client_key_exchange));
try!(self.writer.write_change_cipher_spec());
// SECRET
let master_secret = {
let mut label_seed = b"master secret".to_vec();
label_seed.extend(&cli_random);
label_seed.extend(&server_hello_data.random[..]);
let mut prf = Prf::new(pre_master_secret, label_seed);
prf.get_bytes(48)
};
let aead = cipher_suite.new_aead();
// SECRET
let read_key = {
let mut label_seed = b"key expansion".to_vec();
label_seed.extend(&server_hello_data.random[..]);
label_seed.extend(&cli_random);
let mut prf = Prf::new(master_secret.clone(), label_seed);
// mac_key is not used in AEAD configuration.
let enc_key_length = aead.key_size();
let write_key = prf.get_bytes(enc_key_length);
let encryptor = aead.new_encryptor(write_key);
self.writer.set_encryptor(encryptor);
// this will be set after receiving ChangeCipherSpec.
let read_key = prf.get_bytes(enc_key_length);
// chacha20-poly1305 does not use iv.
read_key
};
// FIXME we should get "raw" packet data and hash them incrementally
let msgs = {
let mut msgs = Vec::new();
try!(client_hello.tls_write(&mut msgs));
try!(Handshake::server_hello(server_hello_data).tls_write(&mut msgs));
try!(Handshake::certificate(certificate_list).tls_write(&mut msgs));
try!(Handshake::server_key_exchange(server_key_ex_data).tls_write(&mut msgs));
try!(Handshake::server_hello_done(DummyItem).tls_write(&mut msgs));
try!(client_key_exchange.tls_write(&mut msgs));
msgs
};
// this only verifies Handshake messages! what about others?
// ApplicationData messages are not permitted until now.
// ChangeCipherSpec messages are only permitted after ClinetKeyExchange.
// Alert messages can be problematic - they are not verified and
// can be broken into several records. This leads to alert attack.
// since we don't accept strange alerts, all "normal" alert messages are
// treated as error, so now we can assert that we haven't received alerts.
let verify_hash = sha256(&msgs);
let client_verify_data = {
let finished_label = b"client finished";
let mut label_seed = finished_label.to_vec();
label_seed.extend(&verify_hash);
let mut prf = Prf::new(master_secret.clone(), label_seed);
prf.get_bytes(cipher_suite.verify_data_len())
};
let finished = try!(Handshake::new_finished(client_verify_data));
try!(self.writer.write_handshake(&finished));
// Although client->server is encrypted, server->client isn't yet.
// server may send either ChangeCipherSpec or Alert.
try!(self.reader.read_change_cipher_spec());
// from now server starts encryption.
self.reader.set_decryptor(aead.new_decryptor(read_key));
let server_finished = expect!(finished);
{
let verify_hash = {
// ideally we may save "raw" packet data..
let mut serv_msgs = Vec::new();
// FIXME: this should not throw "io error".. should throw "internal error"
try!(Write::write_all(&mut serv_msgs, &msgs));
try!(finished.tls_write(&mut serv_msgs));
let verify_hash = sha256(&serv_msgs);
verify_hash
};
let server_verify_data = {
let finished_label = b"server finished";
let mut label_seed = finished_label.to_vec();
label_seed.extend(&verify_hash);
let mut prf = Prf::new(master_secret, label_seed);
prf.get_bytes(cipher_suite.verify_data_len())
};
let verify_ok = crypto_compare(&server_finished,
&server_verify_data);
if !verify_ok {
return tls_err!(DecryptError, "server sent wrong verify data");
}
}
Ok(())
}
pub fn close(&mut self) -> TlsResult<()> {
let alert_data = alert::Alert {
level: alert::AlertLevel::fatal,
description: alert::AlertDescription::close_notify,
};
try!(self.writer.write_alert(&alert_data));
Ok(())
}
// send fatal alert and return error
// (it may be different to `err`, because writing alert can fail)
pub fn send_tls_alert(&mut self, err: TlsError) -> TlsError {
match err.kind {
TlsErrorKind::IoFailure => return err,
_ => {
let alert = alert::Alert::from_tls_err(&err);
let result = self.writer.write_alert(&alert);
match result {
Ok(()) => return err,
Err(err) => return err,
}
}
}
}
}
impl TlsClient<TcpStream, TcpStream> {
pub fn from_tcp(stream: TcpStream) -> TlsResult<TlsClient<TcpStream, TcpStream>> {
let rng = match OsRng::new() {
Ok(rng) => rng,
Err(..) => return tls_err!(InternalError, "failed to create OsRng"),
};
let reader = try!(stream.try_clone());
let writer = stream;
TlsClient::new(reader, writer, rng)
}
}
impl<R: Read, W: Write> Write for TlsClient<R, W> {
// this either writes all or fails.
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
try!(self.write_all(buf));
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
fn | write_all | identifier_name |
|
client.rs | <R>,
pub writer: TlsWriter<W>,
pub rng: OsRng,
buf: Vec<u8>,
}
impl<R: Read, W: Write> TlsClient<R, W> {
pub fn new(reader: R, writer: W, rng: OsRng) -> TlsResult<TlsClient<R, W>> {
let mut client = TlsClient {
reader: TlsReader::new(reader),
writer: TlsWriter::new(writer),
rng: rng,
buf: Vec::new(),
};
// handshake failed. send alert if necessary
match client.handshake() {
Ok(()) => {}
Err(err) => return Err(client.send_tls_alert(err)),
}
Ok(client)
}
#[inline]
pub fn reader(&mut self) -> &mut R {
self.reader.get_mut()
}
#[inline]
pub fn writer(&mut self) -> &mut W {
self.writer.get_mut()
}
// this does not send alert when error occurs
fn handshake(&mut self) -> TlsResult<()> {
// expect specific HandshakeMessage. otherwise return Err
macro_rules! expect {
($var:ident) => ({
match try!(self.reader.read_handshake()) {
handshake::Handshake::$var(data) => data,
_ => return tls_err!(UnexpectedMessage, "unexpected handshake message found"),
}
})
}
let cli_random = {
let mut random_bytes = [0u8; 32];
self.rng.fill_bytes(&mut random_bytes);
random_bytes.to_vec()
};
let random = try!(handshake::Random::new(cli_random.clone()));
// the only cipher we currently support
let cipher_suite = cipher::CipherSuite::TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256;
let curve_list = vec!(handshake::NamedCurve::secp256r1);
let curve_list = try!(handshake::Extension::new_elliptic_curve_list(curve_list));
let format_list = vec!(handshake::ECPointFormat::uncompressed);
let format_list = try!(handshake::Extension::new_ec_point_formats(format_list));
let extensions = vec!(curve_list, format_list);
let client_hello = try!(Handshake::new_client_hello(random, cipher_suite, extensions));
try!(self.writer.write_handshake(&client_hello));
let server_hello_data = expect!(server_hello);
{
let server_major = server_hello_data.server_version.major;
let server_minor = server_hello_data.server_version.minor;
if (server_major, server_minor) != TLS_VERSION {
return tls_err!(IllegalParameter,
"wrong server version: {} {}",
server_major,
server_minor);
}
if server_hello_data.cipher_suite != cipher_suite {
return tls_err!(IllegalParameter,
"cipher suite mismatch: found {:?}",
server_hello_data.cipher_suite);
}
if server_hello_data.compression_method != handshake::CompressionMethod::null {
return tls_err!(IllegalParameter, "compression method mismatch");
}
// FIXME: check if server sent unknown extension
// it is currently done by just not understanding any extensions
// other than we used.
}
// we always expect certificate.
let certificate_list = expect!(certificate);
// TODO: cert validation not implemented yet
// we always use server key exchange
let server_key_ex_data = expect!(server_key_exchange);
let kex = cipher_suite.new_kex();
let (key_data, pre_master_secret) = try!(kex.compute_keys(&server_key_ex_data,
&mut self.rng));
expect!(server_hello_done);
let client_key_exchange = try!(Handshake::new_client_key_exchange(key_data)); | try!(self.writer.write_handshake(&client_key_exchange));
try!(self.writer.write_change_cipher_spec());
// SECRET
let master_secret = {
let mut label_seed = b"master secret".to_vec();
label_seed.extend(&cli_random);
label_seed.extend(&server_hello_data.random[..]);
let mut prf = Prf::new(pre_master_secret, label_seed);
prf.get_bytes(48)
};
let aead = cipher_suite.new_aead();
// SECRET
let read_key = {
let mut label_seed = b"key expansion".to_vec();
label_seed.extend(&server_hello_data.random[..]);
label_seed.extend(&cli_random);
let mut prf = Prf::new(master_secret.clone(), label_seed);
// mac_key is not used in AEAD configuration.
let enc_key_length = aead.key_size();
let write_key = prf.get_bytes(enc_key_length);
let encryptor = aead.new_encryptor(write_key);
self.writer.set_encryptor(encryptor);
// this will be set after receiving ChangeCipherSpec.
let read_key = prf.get_bytes(enc_key_length);
// chacha20-poly1305 does not use iv.
read_key
};
// FIXME we should get "raw" packet data and hash them incrementally
let msgs = {
let mut msgs = Vec::new();
try!(client_hello.tls_write(&mut msgs));
try!(Handshake::server_hello(server_hello_data).tls_write(&mut msgs));
try!(Handshake::certificate(certificate_list).tls_write(&mut msgs));
try!(Handshake::server_key_exchange(server_key_ex_data).tls_write(&mut msgs));
try!(Handshake::server_hello_done(DummyItem).tls_write(&mut msgs));
try!(client_key_exchange.tls_write(&mut msgs));
msgs
};
// this only verifies Handshake messages! what about others?
// ApplicationData messages are not permitted until now.
// ChangeCipherSpec messages are only permitted after ClinetKeyExchange.
// Alert messages can be problematic - they are not verified and
// can be broken into several records. This leads to alert attack.
// since we don't accept strange alerts, all "normal" alert messages are
// treated as error, so now we can assert that we haven't received alerts.
let verify_hash = sha256(&msgs);
let client_verify_data = {
let finished_label = b"client finished";
let mut label_seed = finished_label.to_vec();
label_seed.extend(&verify_hash);
let mut prf = Prf::new(master_secret.clone(), label_seed);
prf.get_bytes(cipher_suite.verify_data_len())
};
let finished = try!(Handshake::new_finished(client_verify_data));
try!(self.writer.write_handshake(&finished));
// Although client->server is encrypted, server->client isn't yet.
// server may send either ChangeCipherSpec or Alert.
try!(self.reader.read_change_cipher_spec());
// from now server starts encryption.
self.reader.set_decryptor(aead.new_decryptor(read_key));
let server_finished = expect!(finished);
{
let verify_hash = {
// ideally we may save "raw" packet data..
let mut serv_msgs = Vec::new();
// FIXME: this should not throw "io error".. should throw "internal error"
try!(Write::write_all(&mut serv_msgs, &msgs));
try!(finished.tls_write(&mut serv_msgs));
let verify_hash = sha256(&serv_msgs);
verify_hash
};
let server_verify_data = {
let finished_label = b"server finished";
let mut label_seed = finished_label.to_vec();
label_seed.extend(&verify_hash);
let mut prf = Prf::new(master_secret, label_seed);
prf.get_bytes(cipher_suite.verify_data_len())
};
let verify_ok = crypto_compare(&server_finished,
&server_verify_data);
if !verify_ok {
return tls_err!(DecryptError, "server sent wrong verify data");
}
}
Ok(())
}
pub fn close(&mut self) -> TlsResult<()> {
let alert_data = alert::Alert {
level: alert::AlertLevel::fatal,
description: alert::AlertDescription::close_notify,
};
try!(self.writer.write_alert(&alert_data));
Ok(())
}
// send fatal alert and return error
// (it may be different to `err`, because writing alert can fail)
pub fn send_tls_alert(&mut self, err: TlsError) -> TlsError {
match err.kind {
TlsErrorKind::IoFailure => return err,
_ => {
let alert = alert::Alert::from_tls_err(&err);
let result = self.writer.write_alert(&alert);
match result {
Ok(()) => return err,
Err(err) => return err,
}
}
}
}
}
impl TlsClient<TcpStream, TcpStream> {
pub fn from_tcp(stream: TcpStream) -> TlsResult<TlsClient<TcpStream, TcpStream>> {
let rng = match OsRng::new() {
Ok(rng) => rng,
Err(..) => return tls_err!(InternalError, "failed to create OsRng"),
};
let reader = try!(stream.try_clone());
let writer = stream;
TlsClient::new(reader, writer, rng)
}
}
impl<R: Read, W: Write> Write for TlsClient<R, W> {
// this either writes all or fails.
fn write(&mut self, buf: &[u8 | random_line_split |
|
client.rs | ) -> &mut W {
self.writer.get_mut()
}
// this does not send alert when error occurs
fn handshake(&mut self) -> TlsResult<()> {
// expect specific HandshakeMessage. otherwise return Err
macro_rules! expect {
($var:ident) => ({
match try!(self.reader.read_handshake()) {
handshake::Handshake::$var(data) => data,
_ => return tls_err!(UnexpectedMessage, "unexpected handshake message found"),
}
})
}
let cli_random = {
let mut random_bytes = [0u8; 32];
self.rng.fill_bytes(&mut random_bytes);
random_bytes.to_vec()
};
let random = try!(handshake::Random::new(cli_random.clone()));
// the only cipher we currently support
let cipher_suite = cipher::CipherSuite::TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256;
let curve_list = vec!(handshake::NamedCurve::secp256r1);
let curve_list = try!(handshake::Extension::new_elliptic_curve_list(curve_list));
let format_list = vec!(handshake::ECPointFormat::uncompressed);
let format_list = try!(handshake::Extension::new_ec_point_formats(format_list));
let extensions = vec!(curve_list, format_list);
let client_hello = try!(Handshake::new_client_hello(random, cipher_suite, extensions));
try!(self.writer.write_handshake(&client_hello));
let server_hello_data = expect!(server_hello);
{
let server_major = server_hello_data.server_version.major;
let server_minor = server_hello_data.server_version.minor;
if (server_major, server_minor) != TLS_VERSION {
return tls_err!(IllegalParameter,
"wrong server version: {} {}",
server_major,
server_minor);
}
if server_hello_data.cipher_suite != cipher_suite {
return tls_err!(IllegalParameter,
"cipher suite mismatch: found {:?}",
server_hello_data.cipher_suite);
}
if server_hello_data.compression_method != handshake::CompressionMethod::null {
return tls_err!(IllegalParameter, "compression method mismatch");
}
// FIXME: check if server sent unknown extension
// it is currently done by just not understanding any extensions
// other than we used.
}
// we always expect certificate.
let certificate_list = expect!(certificate);
// TODO: cert validation not implemented yet
// we always use server key exchange
let server_key_ex_data = expect!(server_key_exchange);
let kex = cipher_suite.new_kex();
let (key_data, pre_master_secret) = try!(kex.compute_keys(&server_key_ex_data,
&mut self.rng));
expect!(server_hello_done);
let client_key_exchange = try!(Handshake::new_client_key_exchange(key_data));
try!(self.writer.write_handshake(&client_key_exchange));
try!(self.writer.write_change_cipher_spec());
// SECRET
let master_secret = {
let mut label_seed = b"master secret".to_vec();
label_seed.extend(&cli_random);
label_seed.extend(&server_hello_data.random[..]);
let mut prf = Prf::new(pre_master_secret, label_seed);
prf.get_bytes(48)
};
let aead = cipher_suite.new_aead();
// SECRET
let read_key = {
let mut label_seed = b"key expansion".to_vec();
label_seed.extend(&server_hello_data.random[..]);
label_seed.extend(&cli_random);
let mut prf = Prf::new(master_secret.clone(), label_seed);
// mac_key is not used in AEAD configuration.
let enc_key_length = aead.key_size();
let write_key = prf.get_bytes(enc_key_length);
let encryptor = aead.new_encryptor(write_key);
self.writer.set_encryptor(encryptor);
// this will be set after receiving ChangeCipherSpec.
let read_key = prf.get_bytes(enc_key_length);
// chacha20-poly1305 does not use iv.
read_key
};
// FIXME we should get "raw" packet data and hash them incrementally
let msgs = {
let mut msgs = Vec::new();
try!(client_hello.tls_write(&mut msgs));
try!(Handshake::server_hello(server_hello_data).tls_write(&mut msgs));
try!(Handshake::certificate(certificate_list).tls_write(&mut msgs));
try!(Handshake::server_key_exchange(server_key_ex_data).tls_write(&mut msgs));
try!(Handshake::server_hello_done(DummyItem).tls_write(&mut msgs));
try!(client_key_exchange.tls_write(&mut msgs));
msgs
};
// this only verifies Handshake messages! what about others?
// ApplicationData messages are not permitted until now.
// ChangeCipherSpec messages are only permitted after ClinetKeyExchange.
// Alert messages can be problematic - they are not verified and
// can be broken into several records. This leads to alert attack.
// since we don't accept strange alerts, all "normal" alert messages are
// treated as error, so now we can assert that we haven't received alerts.
let verify_hash = sha256(&msgs);
let client_verify_data = {
let finished_label = b"client finished";
let mut label_seed = finished_label.to_vec();
label_seed.extend(&verify_hash);
let mut prf = Prf::new(master_secret.clone(), label_seed);
prf.get_bytes(cipher_suite.verify_data_len())
};
let finished = try!(Handshake::new_finished(client_verify_data));
try!(self.writer.write_handshake(&finished));
// Although client->server is encrypted, server->client isn't yet.
// server may send either ChangeCipherSpec or Alert.
try!(self.reader.read_change_cipher_spec());
// from now server starts encryption.
self.reader.set_decryptor(aead.new_decryptor(read_key));
let server_finished = expect!(finished);
{
let verify_hash = {
// ideally we may save "raw" packet data..
let mut serv_msgs = Vec::new();
// FIXME: this should not throw "io error".. should throw "internal error"
try!(Write::write_all(&mut serv_msgs, &msgs));
try!(finished.tls_write(&mut serv_msgs));
let verify_hash = sha256(&serv_msgs);
verify_hash
};
let server_verify_data = {
let finished_label = b"server finished";
let mut label_seed = finished_label.to_vec();
label_seed.extend(&verify_hash);
let mut prf = Prf::new(master_secret, label_seed);
prf.get_bytes(cipher_suite.verify_data_len())
};
let verify_ok = crypto_compare(&server_finished,
&server_verify_data);
if !verify_ok {
return tls_err!(DecryptError, "server sent wrong verify data");
}
}
Ok(())
}
pub fn close(&mut self) -> TlsResult<()> {
let alert_data = alert::Alert {
level: alert::AlertLevel::fatal,
description: alert::AlertDescription::close_notify,
};
try!(self.writer.write_alert(&alert_data));
Ok(())
}
// send fatal alert and return error
// (it may be different to `err`, because writing alert can fail)
pub fn send_tls_alert(&mut self, err: TlsError) -> TlsError {
match err.kind {
TlsErrorKind::IoFailure => return err,
_ => {
let alert = alert::Alert::from_tls_err(&err);
let result = self.writer.write_alert(&alert);
match result {
Ok(()) => return err,
Err(err) => return err,
}
}
}
}
}
impl TlsClient<TcpStream, TcpStream> {
pub fn from_tcp(stream: TcpStream) -> TlsResult<TlsClient<TcpStream, TcpStream>> {
let rng = match OsRng::new() {
Ok(rng) => rng,
Err(..) => return tls_err!(InternalError, "failed to create OsRng"),
};
let reader = try!(stream.try_clone());
let writer = stream;
TlsClient::new(reader, writer, rng)
}
}
impl<R: Read, W: Write> Write for TlsClient<R, W> {
// this either writes all or fails.
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
try!(self.write_all(buf));
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
let result = self.writer.write_application_data(buf);
match result {
Ok(()) => Ok(()),
Err(err) => {
let err = self.send_tls_alert(err);
// FIXME more verbose io error
Err(io::Error::new(io::ErrorKind::Other, SurugaError {
desc: "TLS write error",
cause: Some(Box::new(err)),
}))
}
}
}
}
// A replacement for the deprecated std::slice::bytes::copy_memory
fn copy_memory(from: &[u8], mut to: &mut [u8]) -> usize | {
to.write(from).unwrap()
} | identifier_body |
|
analysis.py | object and returns a matrix with each
column normalized so its minimum value is mapped to zero and its maximum value is mapped to 1."""
column_matrix=data.get_data(headers)
column_max=column_matrix.max(1)
column_min=column_matrix.min(1)
range=column_max-column_min
nomalized=(column_matrix-column_min)/range
return nomalized
def normalize_columns_together(headers, data):
""" Takes in a list of column headers and the Data object and returns a matrix with each entry normalized
so that the minimum value (of all the data in this set of columns) is mapped to zero and its maximum value
is mapped to 1."""
column_matrix=data.get_data(headers)
max=column_matrix.max()
print "The maximum: ", max
min=column_matrix.min()
print "The minimum: ", min
range=max-min
print "range: ", range
column_matrix=column_matrix-min
normalized=column_matrix/range
return normalized
def sort(headers, data): # extension
""" Return the numeric matrices with sorted columns """
column_matrix=data.get_data(headers) # get raw matrix data for numeric values
print "\n before sorting \n "
print column_matrix
column_matrix=column_matrix.tolist()
column_array=np.asarray(column_matrix)
column_array.sort(axis=0)
print "\n \n done sorting here is your matrix \n"
return column_array
def normalize_sort(headers, data): # extension
column_matrix=data.get_data(headers)
max=column_matrix.max()
min=column_matrix.min()
range=max-min
column_matrix=column_matrix-min
normalized=column_matrix/range
print "\n before sorting \n ", normalized, "\n \n "
normalized.sort(axis=0)
print "\n after sorting \n "
return normalized
def linear_regression(d, ind, dep):
""" takes in data object and then creates a linear regression using the dependant variable"""
y=d.get_data([dep])
print "y :",y
A=d.get_data(ind)
print "A :",A
ones = np.asmatrix(np.ones( (A.shape[0]) )).transpose()
A=np.concatenate((A, ones), axis=1)
print "concatenated A :",A
AAinv=np.linalg.inv( np.dot(A.transpose(), A))
print "AAinv: \n",AAinv
"""
print "A :",A
print "y: ",y
print "AAinv: ",AAinv"""
print "shape A: ",A.shape
print "shape y :", y.shape
x=np.linalg.lstsq(A,y)
print "x :\n",x
b=x[0]
print "\n b : \n",b
N=len(y)
print "N : \n",N
C=len(b)
print "C : ",C
df_e=N-C
df_r=C-1
error=y - np.dot(A, b)
print "error: ",error
sse=np.dot(error.transpose(), error) / df_e
print "sse :",sse
stderr=np.sqrt( np.diagonal( sse[0, 0] * AAinv ) )
print "stderr: ",stderr
t = b.transpose() / stderr
print "t :", t
p=2*(1 - scipy.stats.t.cdf(abs(t), df_e))
print "p: ",p
r2=1 - error.var() / y.var()
print "R^2 :",r2, "\n \n \n \n*************************************"
return [b,sse,r2,t,p]
# This version uses SVD
def pca(d, headers, normalize=True):
if normalize==True:
A=normalize_columns_separately(headers, d)
else:
A=d.get_data(headers)
m=mean(headers, d)
D=A-m
#calculate eigenvectors and eigenvalues
U,S,V=np.linalg.svd(D,full_matrices=False)
index=0
#get the eigenvalues using the number of degress of freedom
for d in S:
e=(d*d)/(U.shape[0]-1)
S[index]=e
index=index+1
#the projected data
pdata=np.dot(V,(D.T))
pdata=pdata.T
pcad=data.PCAData(headers,pdata,S,V,m)
return pcad
def kmeans_numpy( d, headers, K, whiten = True):
'''Takes in a Data object, a set of headers, and the number of clusters to create
Computes and returns the codebook, codes, and representation error.
'''
A=d.get_data(headers)
W=vq.whiten(A)
codebook, bookerror=vq.kmeans(W,K)
codes, error=vq.vq(W, codebook)
return codebook, codes, error
def kmeans_init(d, K, catergories=[]) :
#return numpy matrix with K rows of the data
print "type K :",type(K)
r=np.matrix(np.zeros(K))
c=np.matrix(np.zeros(d.shape[1]))
r=r.transpose()
retval=np.dot(r,c)
retval=retval
print "shape retval :",retval.shape
# If no categories are given, a simple way to select the means is to randomly choose K data points
if len(catergories)==0:
values=[] # values to be selected list
h=d.shape[0]
if K>h:
print "The value of K is too high"
return None
#pick random rows
while K>=0:
val=random.randint(0,h-1) #pick random value
while val in values: #avoid duplicates, reselect if duplicate found
val=random.randint(0,h-1)
values.append(val) #add random index to values
#retval[K,:]=d[val,:]
K-=1
# given an Nx1 matrix of categories/labels, then compute the mean values of each category
# and return those as the initial set of means
else:
print "here"
unique,labels=np.unique(catergories.tolist(),return_inverse=True)
means=np.zeros((K,d.shape[1]))
for i in range(len(unique)): #for all unique values
means [i,:]=np.mean(d[labels==i,:],axis=0) #calculate means using categories
retval=means
return retval
def kmeans_classify(d, means):
ID=[] # list of ID values
mindistances=[] # minimum distances algorithm
for dpoint in d:
distances=[] # distances from each mean
for mean in means: # compute distance of each mean, using the distance formula
differences=dpoint-mean
squares=np.square(differences)
sums=np.sum(squares)
distance=np.sqrt(sums)
distances.append(distance) # add the distance to the distances list
ID.append(np.argmin(distances))
mindistances.append(distances[np.argmin(distances)])
retval=[]
retval.append(ID)
retval.append(mindistances)
return np.matrix(ID).transpose(), np.matrix(mindistances).transpose() # return a list of the ID values and the distances
"""
def kmeans_algorithm(A, means):
# set up some useful constants
MIN_CHANGE = 1e-7
MAX_ITERATIONS = 100
D = means.shape[1]
K = means.shape[0]
N = A.shape[0]
# iterate no more than MAX_ITERATIONS
for i in range(MAX_ITERATIONS):
# calculate the codes
codes, errors = kmeans_classify( A, means )
# calculate the new means
newmeans = np.matrix(np.zeros_like( means ))
counts = np.zeros( (K, 1) )
for j in range(N):
print "j :",j
print "A[j,:] :",A[j,:]
print "codes ", codes
print "codes[j,0] :",codes[j,0]
newmeans[codes[j,0],:] += A[j,:]
counts[codes[j,0],0] += 1.0
print "newmeans type: ",type(newmeans)
# finish calculating the means, taking into account possible zero counts
for j in range(K):
if counts[j,0] > 0.0:
newmeans[j,:] /= counts[j, 0]
else:
newmeans[j,:] = A[random.randint(0,A.shape[0]-1),:] #randint is inclusive
# test if the change is small enough
diff = np.sum(np.square(means - newmeans))
means = newmeans
if diff < MIN_CHANGE:
break
# call classify with the final means
codes, errors = kmeans_classify( A, means )
print "result: ",means, codes, errors
# return the means, codes, and errors
return (means, codes, errors)
"""
def | (A, means):
# set up some useful constants
MIN_CHANGE = 1e-7
MAX_ITERATIONS = 100
D = means.shape[1]
K = means.shape[0]
N = A.shape[0]
# iterate no more than MAX_ITERATIONS
for i in range(MAX_ITERATIONS):
# calculate the codes
codes, errors = kmeans_classify( A, | kmeans_algorithm | identifier_name |
analysis.py |
column_max=column_matrix.max(1)
column_min=column_matrix.min(1)
final=np.concatenate((column_min, column_max), axis=1)
rng=final.tolist()
return rng
def mean(headers, data):
""" Takes in a list of column headers and the Data object and returns a list of the
mean values for each column. Use the built-in numpy functions to execute this calculation."""
column_matrix=data.get_data(headers)
mean_values=column_matrix.mean(0)
return mean_values
def stdev(headers, data):
"""stdev - Takes in a list of column headers and the Data object and returns a list of the
standard deviation for each specified column. Use the built-in numpy functions to execute
this calculation."""
column_matrix=data.get_data(headers)
mean_values=column_matrix.std(0)
std_values=mean_values.tolist()
return std_values
def normalize_columns_separately(headers, data):
"""Takes in a list of column headers and the Data object and returns a matrix with each
column normalized so its minimum value is mapped to zero and its maximum value is mapped to 1."""
column_matrix=data.get_data(headers)
column_max=column_matrix.max(1)
column_min=column_matrix.min(1)
range=column_max-column_min
nomalized=(column_matrix-column_min)/range
return nomalized
def normalize_columns_together(headers, data):
""" Takes in a list of column headers and the Data object and returns a matrix with each entry normalized
so that the minimum value (of all the data in this set of columns) is mapped to zero and its maximum value
is mapped to 1."""
column_matrix=data.get_data(headers)
max=column_matrix.max()
print "The maximum: ", max
min=column_matrix.min()
print "The minimum: ", min
range=max-min
print "range: ", range
column_matrix=column_matrix-min
normalized=column_matrix/range
return normalized
def sort(headers, data): # extension
""" Return the numeric matrices with sorted columns """
column_matrix=data.get_data(headers) # get raw matrix data for numeric values
print "\n before sorting \n "
print column_matrix
column_matrix=column_matrix.tolist()
column_array=np.asarray(column_matrix)
column_array.sort(axis=0)
print "\n \n done sorting here is your matrix \n"
return column_array
def normalize_sort(headers, data): # extension
column_matrix=data.get_data(headers)
max=column_matrix.max()
min=column_matrix.min()
range=max-min
column_matrix=column_matrix-min
normalized=column_matrix/range
print "\n before sorting \n ", normalized, "\n \n "
normalized.sort(axis=0)
print "\n after sorting \n "
return normalized
def linear_regression(d, ind, dep):
""" takes in data object and then creates a linear regression using the dependant variable"""
y=d.get_data([dep])
print "y :",y
A=d.get_data(ind)
print "A :",A
ones = np.asmatrix(np.ones( (A.shape[0]) )).transpose()
A=np.concatenate((A, ones), axis=1)
print "concatenated A :",A
AAinv=np.linalg.inv( np.dot(A.transpose(), A))
print "AAinv: \n",AAinv
"""
print "A :",A
print "y: ",y
print "AAinv: ",AAinv"""
print "shape A: ",A.shape
print "shape y :", y.shape
x=np.linalg.lstsq(A,y)
print "x :\n",x
b=x[0]
print "\n b : \n",b
N=len(y)
print "N : \n",N
C=len(b)
print "C : ",C
df_e=N-C
df_r=C-1
error=y - np.dot(A, b)
print "error: ",error
sse=np.dot(error.transpose(), error) / df_e
print "sse :",sse
stderr=np.sqrt( np.diagonal( sse[0, 0] * AAinv ) )
print "stderr: ",stderr
t = b.transpose() / stderr
print "t :", t
p=2*(1 - scipy.stats.t.cdf(abs(t), df_e))
print "p: ",p
r2=1 - error.var() / y.var()
print "R^2 :",r2, "\n \n \n \n*************************************"
return [b,sse,r2,t,p]
# This version uses SVD
def pca(d, headers, normalize=True):
if normalize==True:
A=normalize_columns_separately(headers, d)
else:
A=d.get_data(headers)
m=mean(headers, d)
D=A-m
#calculate eigenvectors and eigenvalues
U,S,V=np.linalg.svd(D,full_matrices=False)
index=0
#get the eigenvalues using the number of degress of freedom
for d in S:
e=(d*d)/(U.shape[0]-1)
S[index]=e
index=index+1
#the projected data
pdata=np.dot(V,(D.T))
pdata=pdata.T
pcad=data.PCAData(headers,pdata,S,V,m)
return pcad
def kmeans_numpy( d, headers, K, whiten = True):
'''Takes in a Data object, a set of headers, and the number of clusters to create
Computes and returns the codebook, codes, and representation error.
'''
A=d.get_data(headers)
W=vq.whiten(A)
codebook, bookerror=vq.kmeans(W,K)
codes, error=vq.vq(W, codebook)
return codebook, codes, error
def kmeans_init(d, K, catergories=[]) :
#return numpy matrix with K rows of the data
print "type K :",type(K)
r=np.matrix(np.zeros(K))
c=np.matrix(np.zeros(d.shape[1]))
r=r.transpose()
retval=np.dot(r,c)
retval=retval
print "shape retval :",retval.shape
# If no categories are given, a simple way to select the means is to randomly choose K data points
if len(catergories)==0:
values=[] # values to be selected list
h=d.shape[0]
if K>h:
print "The value of K is too high"
return None
#pick random rows
while K>=0:
val=random.randint(0,h-1) #pick random value
while val in values: #avoid duplicates, reselect if duplicate found
val=random.randint(0,h-1)
values.append(val) #add random index to values
#retval[K,:]=d[val,:]
K-=1
# given an Nx1 matrix of categories/labels, then compute the mean values of each category
# and return those as the initial set of means
else:
print "here"
unique,labels=np.unique(catergories.tolist(),return_inverse=True)
means=np.zeros((K,d.shape[1]))
for i in range(len(unique)): #for all unique values
means [i,:]=np.mean(d[labels==i,:],axis=0) #calculate means using categories
retval=means
return retval
def kmeans_classify(d, means):
ID=[] # list of ID values
mindistances=[] # minimum distances algorithm
for dpoint in d:
distances=[] # distances from each mean
for mean in means: # compute distance of each mean, using the distance formula
differences=dpoint-mean
squares=np.square(differences)
sums=np.sum(squares)
distance=np.sqrt(sums)
distances.append(distance) # add the distance to the distances list
ID.append(np.argmin(distances))
mindistances.append(distances[np.argmin(distances)])
retval=[]
retval.append(ID)
retval.append(mindistances)
return np.matrix(ID).transpose(), np.matrix(mindistances).transpose() # return a list of the ID values and the distances
"""
def kmeans_algorithm(A, means):
# set up some useful constants
MIN_CHANGE = 1e-7
MAX_ITERATIONS = 100
D = means.shape[1]
K = means.shape[0]
N = A.shape[0]
# iterate no more than MAX_ITERATIONS
for i in range(MAX_ITERATIONS):
# calculate the codes
codes, errors = kmeans_classify( A, means )
# calculate the new means
newmeans = np.matrix(np.zeros_like( means ))
counts = np.zeros( (K, 1) )
for j in range(N):
print "j :",j
print "A[j,:] :",A[j,:]
print "codes ", codes
print "codes[j,0] :",codes[j,0]
newmeans[codes[j,0],:] += A[j,:]
counts[codes[j,0],0] += 1.0
print "newmeans type: ",type(newmeans)
# finish calculating the means, taking into account possible zero counts
for j in range(K):
if counts[j,0] > 0.0:
newmeans[j,:] /= counts | print "wrong headers, not present in data Object"
return [] | conditional_block |
|
analysis.py | and the Data object and returns a matrix with each
column normalized so its minimum value is mapped to zero and its maximum value is mapped to 1."""
column_matrix=data.get_data(headers)
column_max=column_matrix.max(1)
column_min=column_matrix.min(1)
range=column_max-column_min
nomalized=(column_matrix-column_min)/range
return nomalized
def normalize_columns_together(headers, data):
""" Takes in a list of column headers and the Data object and returns a matrix with each entry normalized
so that the minimum value (of all the data in this set of columns) is mapped to zero and its maximum value
is mapped to 1."""
column_matrix=data.get_data(headers)
max=column_matrix.max()
print "The maximum: ", max
min=column_matrix.min()
print "The minimum: ", min
range=max-min
print "range: ", range
column_matrix=column_matrix-min
normalized=column_matrix/range
return normalized
def sort(headers, data): # extension
""" Return the numeric matrices with sorted columns """
column_matrix=data.get_data(headers) # get raw matrix data for numeric values
print "\n before sorting \n "
print column_matrix
column_matrix=column_matrix.tolist()
column_array=np.asarray(column_matrix)
column_array.sort(axis=0)
print "\n \n done sorting here is your matrix \n"
return column_array
def normalize_sort(headers, data): # extension
column_matrix=data.get_data(headers)
max=column_matrix.max()
min=column_matrix.min()
range=max-min
column_matrix=column_matrix-min
normalized=column_matrix/range
print "\n before sorting \n ", normalized, "\n \n "
normalized.sort(axis=0)
print "\n after sorting \n "
return normalized
def linear_regression(d, ind, dep):
""" takes in data object and then creates a linear regression using the dependant variable"""
y=d.get_data([dep])
print "y :",y
A=d.get_data(ind)
print "A :",A
ones = np.asmatrix(np.ones( (A.shape[0]) )).transpose()
A=np.concatenate((A, ones), axis=1)
print "concatenated A :",A
AAinv=np.linalg.inv( np.dot(A.transpose(), A))
print "AAinv: \n",AAinv
"""
print "A :",A
print "y: ",y
print "AAinv: ",AAinv"""
print "shape A: ",A.shape
print "shape y :", y.shape
x=np.linalg.lstsq(A,y)
print "x :\n",x
b=x[0]
print "\n b : \n",b
N=len(y)
print "N : \n",N
C=len(b)
print "C : ",C
df_e=N-C
df_r=C-1
error=y - np.dot(A, b)
print "error: ",error
sse=np.dot(error.transpose(), error) / df_e
print "sse :",sse
stderr=np.sqrt( np.diagonal( sse[0, 0] * AAinv ) )
print "stderr: ",stderr
t = b.transpose() / stderr
print "t :", t
p=2*(1 - scipy.stats.t.cdf(abs(t), df_e))
print "p: ",p
r2=1 - error.var() / y.var()
print "R^2 :",r2, "\n \n \n \n*************************************"
return [b,sse,r2,t,p]
# This version uses SVD
def pca(d, headers, normalize=True):
if normalize==True:
A=normalize_columns_separately(headers, d)
else:
A=d.get_data(headers)
m=mean(headers, d)
D=A-m
#calculate eigenvectors and eigenvalues
U,S,V=np.linalg.svd(D,full_matrices=False)
index=0
#get the eigenvalues using the number of degress of freedom
for d in S: | index=index+1
#the projected data
pdata=np.dot(V,(D.T))
pdata=pdata.T
pcad=data.PCAData(headers,pdata,S,V,m)
return pcad
def kmeans_numpy( d, headers, K, whiten = True):
'''Takes in a Data object, a set of headers, and the number of clusters to create
Computes and returns the codebook, codes, and representation error.
'''
A=d.get_data(headers)
W=vq.whiten(A)
codebook, bookerror=vq.kmeans(W,K)
codes, error=vq.vq(W, codebook)
return codebook, codes, error
def kmeans_init(d, K, catergories=[]) :
#return numpy matrix with K rows of the data
print "type K :",type(K)
r=np.matrix(np.zeros(K))
c=np.matrix(np.zeros(d.shape[1]))
r=r.transpose()
retval=np.dot(r,c)
retval=retval
print "shape retval :",retval.shape
# If no categories are given, a simple way to select the means is to randomly choose K data points
if len(catergories)==0:
values=[] # values to be selected list
h=d.shape[0]
if K>h:
print "The value of K is too high"
return None
#pick random rows
while K>=0:
val=random.randint(0,h-1) #pick random value
while val in values: #avoid duplicates, reselect if duplicate found
val=random.randint(0,h-1)
values.append(val) #add random index to values
#retval[K,:]=d[val,:]
K-=1
# given an Nx1 matrix of categories/labels, then compute the mean values of each category
# and return those as the initial set of means
else:
print "here"
unique,labels=np.unique(catergories.tolist(),return_inverse=True)
means=np.zeros((K,d.shape[1]))
for i in range(len(unique)): #for all unique values
means [i,:]=np.mean(d[labels==i,:],axis=0) #calculate means using categories
retval=means
return retval
def kmeans_classify(d, means):
ID=[] # list of ID values
mindistances=[] # minimum distances algorithm
for dpoint in d:
distances=[] # distances from each mean
for mean in means: # compute distance of each mean, using the distance formula
differences=dpoint-mean
squares=np.square(differences)
sums=np.sum(squares)
distance=np.sqrt(sums)
distances.append(distance) # add the distance to the distances list
ID.append(np.argmin(distances))
mindistances.append(distances[np.argmin(distances)])
retval=[]
retval.append(ID)
retval.append(mindistances)
return np.matrix(ID).transpose(), np.matrix(mindistances).transpose() # return a list of the ID values and the distances
"""
def kmeans_algorithm(A, means):
# set up some useful constants
MIN_CHANGE = 1e-7
MAX_ITERATIONS = 100
D = means.shape[1]
K = means.shape[0]
N = A.shape[0]
# iterate no more than MAX_ITERATIONS
for i in range(MAX_ITERATIONS):
# calculate the codes
codes, errors = kmeans_classify( A, means )
# calculate the new means
newmeans = np.matrix(np.zeros_like( means ))
counts = np.zeros( (K, 1) )
for j in range(N):
print "j :",j
print "A[j,:] :",A[j,:]
print "codes ", codes
print "codes[j,0] :",codes[j,0]
newmeans[codes[j,0],:] += A[j,:]
counts[codes[j,0],0] += 1.0
print "newmeans type: ",type(newmeans)
# finish calculating the means, taking into account possible zero counts
for j in range(K):
if counts[j,0] > 0.0:
newmeans[j,:] /= counts[j, 0]
else:
newmeans[j,:] = A[random.randint(0,A.shape[0]-1),:] #randint is inclusive
# test if the change is small enough
diff = np.sum(np.square(means - newmeans))
means = newmeans
if diff < MIN_CHANGE:
break
# call classify with the final means
codes, errors = kmeans_classify( A, means )
print "result: ",means, codes, errors
# return the means, codes, and errors
return (means, codes, errors)
"""
def kmeans_algorithm(A, means):
# set up some useful constants
MIN_CHANGE = 1e-7
MAX_ITERATIONS = 100
D = means.shape[1]
K = means.shape[0]
N = A.shape[0]
# iterate no more than MAX_ITERATIONS
for i in range(MAX_ITERATIONS):
# calculate the codes
codes, errors = kmeans_classify( A | e=(d*d)/(U.shape[0]-1)
S[index]=e | random_line_split |
analysis.py | object and returns a matrix with each
column normalized so its minimum value is mapped to zero and its maximum value is mapped to 1."""
column_matrix=data.get_data(headers)
column_max=column_matrix.max(1)
column_min=column_matrix.min(1)
range=column_max-column_min
nomalized=(column_matrix-column_min)/range
return nomalized
def normalize_columns_together(headers, data):
""" Takes in a list of column headers and the Data object and returns a matrix with each entry normalized
so that the minimum value (of all the data in this set of columns) is mapped to zero and its maximum value
is mapped to 1."""
column_matrix=data.get_data(headers)
max=column_matrix.max()
print "The maximum: ", max
min=column_matrix.min()
print "The minimum: ", min
range=max-min
print "range: ", range
column_matrix=column_matrix-min
normalized=column_matrix/range
return normalized
def sort(headers, data): # extension
""" Return the numeric matrices with sorted columns """
column_matrix=data.get_data(headers) # get raw matrix data for numeric values
print "\n before sorting \n "
print column_matrix
column_matrix=column_matrix.tolist()
column_array=np.asarray(column_matrix)
column_array.sort(axis=0)
print "\n \n done sorting here is your matrix \n"
return column_array
def normalize_sort(headers, data): # extension
column_matrix=data.get_data(headers)
max=column_matrix.max()
min=column_matrix.min()
range=max-min
column_matrix=column_matrix-min
normalized=column_matrix/range
print "\n before sorting \n ", normalized, "\n \n "
normalized.sort(axis=0)
print "\n after sorting \n "
return normalized
def linear_regression(d, ind, dep):
""" takes in data object and then creates a linear regression using the dependant variable"""
y=d.get_data([dep])
print "y :",y
A=d.get_data(ind)
print "A :",A
ones = np.asmatrix(np.ones( (A.shape[0]) )).transpose()
A=np.concatenate((A, ones), axis=1)
print "concatenated A :",A
AAinv=np.linalg.inv( np.dot(A.transpose(), A))
print "AAinv: \n",AAinv
"""
print "A :",A
print "y: ",y
print "AAinv: ",AAinv"""
print "shape A: ",A.shape
print "shape y :", y.shape
x=np.linalg.lstsq(A,y)
print "x :\n",x
b=x[0]
print "\n b : \n",b
N=len(y)
print "N : \n",N
C=len(b)
print "C : ",C
df_e=N-C
df_r=C-1
error=y - np.dot(A, b)
print "error: ",error
sse=np.dot(error.transpose(), error) / df_e
print "sse :",sse
stderr=np.sqrt( np.diagonal( sse[0, 0] * AAinv ) )
print "stderr: ",stderr
t = b.transpose() / stderr
print "t :", t
p=2*(1 - scipy.stats.t.cdf(abs(t), df_e))
print "p: ",p
r2=1 - error.var() / y.var()
print "R^2 :",r2, "\n \n \n \n*************************************"
return [b,sse,r2,t,p]
# This version uses SVD
def pca(d, headers, normalize=True):
if normalize==True:
A=normalize_columns_separately(headers, d)
else:
A=d.get_data(headers)
m=mean(headers, d)
D=A-m
#calculate eigenvectors and eigenvalues
U,S,V=np.linalg.svd(D,full_matrices=False)
index=0
#get the eigenvalues using the number of degress of freedom
for d in S:
e=(d*d)/(U.shape[0]-1)
S[index]=e
index=index+1
#the projected data
pdata=np.dot(V,(D.T))
pdata=pdata.T
pcad=data.PCAData(headers,pdata,S,V,m)
return pcad
def kmeans_numpy( d, headers, K, whiten = True):
'''Takes in a Data object, a set of headers, and the number of clusters to create
Computes and returns the codebook, codes, and representation error.
'''
A=d.get_data(headers)
W=vq.whiten(A)
codebook, bookerror=vq.kmeans(W,K)
codes, error=vq.vq(W, codebook)
return codebook, codes, error
def kmeans_init(d, K, catergories=[]) :
#return numpy matrix with K rows of the data
print "type K :",type(K)
r=np.matrix(np.zeros(K))
c=np.matrix(np.zeros(d.shape[1]))
r=r.transpose()
retval=np.dot(r,c)
retval=retval
print "shape retval :",retval.shape
# If no categories are given, a simple way to select the means is to randomly choose K data points
if len(catergories)==0:
values=[] # values to be selected list
h=d.shape[0]
if K>h:
print "The value of K is too high"
return None
#pick random rows
while K>=0:
val=random.randint(0,h-1) #pick random value
while val in values: #avoid duplicates, reselect if duplicate found
val=random.randint(0,h-1)
values.append(val) #add random index to values
#retval[K,:]=d[val,:]
K-=1
# given an Nx1 matrix of categories/labels, then compute the mean values of each category
# and return those as the initial set of means
else:
print "here"
unique,labels=np.unique(catergories.tolist(),return_inverse=True)
means=np.zeros((K,d.shape[1]))
for i in range(len(unique)): #for all unique values
means [i,:]=np.mean(d[labels==i,:],axis=0) #calculate means using categories
retval=means
return retval
def kmeans_classify(d, means):
|
"""
def kmeans_algorithm(A, means):
# set up some useful constants
MIN_CHANGE = 1e-7
MAX_ITERATIONS = 100
D = means.shape[1]
K = means.shape[0]
N = A.shape[0]
# iterate no more than MAX_ITERATIONS
for i in range(MAX_ITERATIONS):
# calculate the codes
codes, errors = kmeans_classify( A, means )
# calculate the new means
newmeans = np.matrix(np.zeros_like( means ))
counts = np.zeros( (K, 1) )
for j in range(N):
print "j :",j
print "A[j,:] :",A[j,:]
print "codes ", codes
print "codes[j,0] :",codes[j,0]
newmeans[codes[j,0],:] += A[j,:]
counts[codes[j,0],0] += 1.0
print "newmeans type: ",type(newmeans)
# finish calculating the means, taking into account possible zero counts
for j in range(K):
if counts[j,0] > 0.0:
newmeans[j,:] /= counts[j, 0]
else:
newmeans[j,:] = A[random.randint(0,A.shape[0]-1),:] #randint is inclusive
# test if the change is small enough
diff = np.sum(np.square(means - newmeans))
means = newmeans
if diff < MIN_CHANGE:
break
# call classify with the final means
codes, errors = kmeans_classify( A, means )
print "result: ",means, codes, errors
# return the means, codes, and errors
return (means, codes, errors)
"""
def kmeans_algorithm(A, means):
# set up some useful constants
MIN_CHANGE = 1e-7
MAX_ITERATIONS = 100
D = means.shape[1]
K = means.shape[0]
N = A.shape[0]
# iterate no more than MAX_ITERATIONS
for i in range(MAX_ITERATIONS):
# calculate the codes
codes, errors = kmeans_classify( A, | ID=[] # list of ID values
mindistances=[] # minimum distances algorithm
for dpoint in d:
distances=[] # distances from each mean
for mean in means: # compute distance of each mean, using the distance formula
differences=dpoint-mean
squares=np.square(differences)
sums=np.sum(squares)
distance=np.sqrt(sums)
distances.append(distance) # add the distance to the distances list
ID.append(np.argmin(distances))
mindistances.append(distances[np.argmin(distances)])
retval=[]
retval.append(ID)
retval.append(mindistances)
return np.matrix(ID).transpose(), np.matrix(mindistances).transpose() # return a list of the ID values and the distances | identifier_body |
bundle.js | 0 * placeIndex;
if (days.value == '' || people.value == '' || days.value == '0' || people.value == '0'){
totalOutput.innerHTML = 0;
} else {
totalOutput.innerHTML = totalSum;
}
});
days.addEventListener('blur', function(){
daysSum = +this.value;
totalSum = (peopleSum + daysSum) * 4000 * placeIndex;
if (days.value == '' || people.value == '' || days.value == '0' || people.value == '0'){
totalOutput.innerHTML = 0;
} else {
totalOutput.innerHTML = totalSum;
}
});
place.addEventListener('change', function(){
if (days.value == '' || people.value == '' || days.value == '0' || people.value == '0'){
totalOutput.innerHTML = 0;
} else {
let a = totalSum;
totalOutput.innerHTML = a * this.options[this.selectedIndex].value;
}
});
}
module.exports = calculator;
/***/ }),
/***/ "./js/parts/form.js":
/*!**************************!*\
!*** ./js/parts/form.js ***!
\**************************/
/*! no static exports found */
/***/ (function(module, exports) {
function form() {
// FORM
let message = {
loading: 'Загрузка...',
success: 'Спасибо! Скоро мы с вами свяжемся.',
failure: 'Что-то пошло не так.',
};
let form = document.querySelectorAll('form'), //select two forms: from modal and "we will get in touch" section at the end input,
input,//will be used to select the input(s) of one of the forms
statusMessage = document.createElement('div');//to display a message according to stage and successfulness
form = Array.from(form);//to create an array from a nodeList(you can't put eventListeners on nodeLists)
statusMessage.classList.add('status');
function sendForms(){
return new Promise(function(resolve, reject){
for (let i = 0; i < form.length; i++){//assigning eventListeners to each form
form[i].addEventListener('submit', function(){
event.preventDefault();
form = this;
form.appendChild(statusMessage);
input = form.getElementsByTagName('input');
let request = new XMLHttpRequest();
request.open('POST', 'server.php');
//request.setRequestHeader('Content-type', 'application/x-www-form-urlencoded');
request.setRequestHeader('Content-type', 'application/json; charset=utf-8');
let formData = new FormData(form);
//request.send(formData)
let obj = {};
formData.forEach(function(value, key){
obj[key] = value;
});
let json = JSON.stringify(obj);
request.send(json);
request.addEventListener('readystatechange', function(){
if (request.readyState < 4){
statusMessage.innerHTML = message.loading;
} else if (request.readyState === 4 && request.status == 200){
resolve();
} else {
reject();
}
});
});
}
});
}
sendForms()
.then(() => statusMessage.innerHTML = message.success)
.catch(() => statusMessage.innerHTML = message.failure);
}
module.exports = form;
/***/ }),
/***/ "./js/parts/modal.js":
/*!***************************!*\
!*** ./js/parts/modal.js ***!
\***************************/
/*! no static exports found */
/***/ (function(module, exports) {
function modal() {
//MODAL WINDOW
let moreButton = document.querySelector('.more'),//The "Узнать больше" button
descriptionBtn = document.querySelectorAll('.description-btn'),//the buttons from the tabs
overlay = document.querySelector('.overlay'),//the modal window
closeCross = document.querySelector('.popup-close');
descriptionBtn = Array.from(descriptionBtn);//making an array from the nodelist to push the moreButton
descriptionBtn.push(moreButton);
descriptionBtn.forEach(function(el){
el.addEventListener('click', function () {
overlay.style.display = 'block';
this.classList.add('more-splash');//this means current element (similar to event.target somehow)
document.body.style.overflow = 'hidden';//to disable scroll on the page while the modal window is visible
});
});
closeCross.addEventListener('click', function() {
overlay.style.display = 'none';
moreButton.classList.remove('more-splash');
document.body.style.overflow = '';
});
}
module.exports = modal;
/***/ }),
/***/ "./js/parts/slider.js":
/*!****************************!*\
!*** ./js/parts/slider.js ***!
\****************************/
/*! no static exports found */
/***/ (function(module, exports) {
function slider() {
// SLIDER
let slides = document.querySelectorAll('.slider-item'),
prev = document.querySelector('.prev'),
next = document.querySelector('.next'),
dotWrap = document.querySelector('.slider-dots'),
dots = document.querySelectorAll('.dot'),
sliderIndex = 0;
dots = Array.from(dots);// to use indexOf when click on a dot
function showSlide() {
// the first slide will be shown when a user clicks right being on the last slide
if (sliderIndex == slides.length){
sliderIndex = 0;
}
// the last slide will be shown when a user clicks left being on the first slide
if (sliderIndex < 0){
sliderIndex = 3;
}
slides.forEach((item) => item.style.display = 'none');
slides[sliderIndex].style.display = 'block';
dots.forEach((item) => item.classList.remove('dot-active'));
dots[sliderIndex].classList.add('dot-active');
}
showSlide();// to show only the first slide
prev.addEventListener('click', function() {
sliderIndex--;
showSlide();
});
next.addEventListener('click', function() {
sliderIndex++;
showSlide();
});
dotWrap.addEventListener('click', function(event){
if (event.target.classList.contains('dot')){
//getting index of the clicked dot and assigning it to the appropriate slide
sliderIndex = dots.indexOf(event.target);
showSlide();
}
});
}
module.exports = slider;
/***/ }),
/***/ "./js/parts/tabs.js":
/*!**************************!*\
!*** ./js/parts/tabs.js ***!
\**************************/
/*! no static exports found */
/***/ (function(module, exports) {
function tabs() {
// TABS
//Working with div class='info'
let tab = document.querySelectorAll('.info-header-tab'),//get the buttons in the header
info = document.querySelector('.info-header'),//parental element for the buttons
tabContent = document.querySelectorAll('.info-tabcontent');//content that must be assigned to a particular tab
function hideTabContent(a) {// to hide all the content elements
for (let i = a; i < tabContent.length; i++){
tabContent[i].classList.remove('show');
tabContent[i].classList.add('hide');//manipulate classes in the css file to hide the content elements
}
}
hideTabContent(1);// Need to hide all the content elements except for the first one
function showTabContent(b){//to show the content element we need
if (tabContent[b].classList.contains('hide')){
tabContent[b].classList.remove('hide');
tabContent[b].classList.add('show');
}
}
// attaching EventListener to the parent using delegetion
info.addEventListener('click', function(event){
let target = event.target;
if (target && target.classList.contains('info-header-tab')){
// Using for-loop to assign a particular content element to a particular tab
for (let i = 0; i < tab.length; i++) {
if (target == tab[i]){
hideTabContent(0);//to hide the first content element
showTabContent(i);//to display the one that matches the target of our click
break;
}
}
}
});
}
module.exports = tabs;
/***/ }),
/***/ "./js/parts/timer.js":
/*!***************************!*\
!*** ./js/parts/timer.js ***!
\***************************/
/*! no static exports found */
/***/ (function(module, exports) {
function timer() {
// TIMER
let deadline = "2019-06-14";//if the date expires, change the date so the timer works appropriately
function getRemainingTime(endtime) {
let t = Date.parse(endtime) - Date.parse(new Date()),//difference between the date when timer expires and the moment when the function is executed (ms)
seconds = Math.floor((t/1000) % 60),
minutes = Math.floor((t/1000/60) % 60),
hours = Math.floor((t/(1000*60*60)));
if (t < 0){ //making the timer look nice on the page in case the date has expired
seconds = 0;
minutes = 0;
hours = 0;
}
return {
'total' : t,
'seconds' : seconds,
'minutes' : minutes,
'hours' : hours
};
}
function setClock (id, endtime) { //id - the timer div's id
let timer = document.getElementById(id),
hours = timer.querySelector('.hours'),
minutes = timer.querySelector('.minutes'),
seconds = timer.querySelector('.seconds'),
timeInterval = setInterval(updateClock, 1000);
function updateClock() {
let t = getRemainingTime(endtime);
| function | identifier_name |
|
bundle.js | && value && value.__esModule) return value;
/******/ var ns = Object.create(null);
/******/ __webpack_require__.r(ns);
/******/ Object.defineProperty(ns, 'default', { enumerable: true, value: value });
/******/ if(mode & 2 && typeof value != 'string') for(var key in value) __webpack_require__.d(ns, key, function(key) { return value[key]; }.bind(null, key));
/******/ return ns;
/******/ };
/******/
/******/ // getDefaultExport function for compatibility with non-harmony modules
/******/ __webpack_require__.n = function(module) {
/******/ var getter = module && module.__esModule ?
/******/ function getDefault() { return module['default']; } :
/******/ function getModuleExports() { return module; };
/******/ __webpack_require__.d(getter, 'a', getter);
/******/ return getter;
/******/ };
/******/
/******/ // Object.prototype.hasOwnProperty.call
/******/ __webpack_require__.o = function(object, property) { return Object.prototype.hasOwnProperty.call(object, property); };
/******/
/******/ // __webpack_public_path__
/******/ __webpack_require__.p = "";
/******/
/******/
/******/ // Load entry module and return exports
/******/ return __webpack_require__(__webpack_require__.s = "./js/script.js");
/******/ })
/************************************************************************/
/******/ ({
/***/ "./js/parts/calculator.js":
/*!********************************!*\
!*** ./js/parts/calculator.js ***!
\********************************/
/*! no static exports found */
/***/ (function(module, exports) {
function calculator() {
//CALCULATOR
let people = document.querySelectorAll('.counter-block-input')[0],
days = document.querySelectorAll('.counter-block-input')[1],
place = document.querySelector('#select'),
totalOutput = document.querySelector('#total'),
peopleSum = 0,
daysSum = 0,
placeIndex = +place.options[0].value,//index given by default
totalSum;
totalOutput.innerHTML = 0;
people.value = 0;
days.value = 0;
people.addEventListener('blur', function(){
peopleSum = +this.value;
totalSum = (peopleSum + daysSum) * 4000 * placeIndex;
if (days.value == '' || people.value == '' || days.value == '0' || people.value == '0'){
totalOutput.innerHTML = 0;
} else {
totalOutput.innerHTML = totalSum;
}
});
days.addEventListener('blur', function(){
daysSum = +this.value;
totalSum = (peopleSum + daysSum) * 4000 * placeIndex;
if (days.value == '' || people.value == '' || days.value == '0' || people.value == '0'){
totalOutput.innerHTML = 0;
} else {
totalOutput.innerHTML = totalSum;
}
});
place.addEventListener('change', function(){
if (days.value == '' || people.value == '' || days.value == '0' || people.value == '0'){
totalOutput.innerHTML = 0;
} else {
let a = totalSum;
totalOutput.innerHTML = a * this.options[this.selectedIndex].value;
}
});
}
module.exports = calculator;
/***/ }),
/***/ "./js/parts/form.js":
/*!**************************!*\
!*** ./js/parts/form.js ***!
\**************************/
/*! no static exports found */
/***/ (function(module, exports) {
function form() {
// FORM
let message = {
loading: 'Загрузка...',
success: 'Спасибо! Скоро мы с вами свяжемся.',
failure: 'Что-то пошло не так.',
};
let form = document.querySelectorAll('form'), //select two forms: from modal and "we will get in touch" section at the end input,
input,//will be used to select the input(s) of one of the forms
statusMessage = document.createElement('div');//to display a message according to stage and successfulness
form = Array.from(form);//to create an array from a nodeList(you can't put eventListeners on nodeLists)
statusMessage.classList.add('status');
function sendForms(){
return new Promise(function(resolve, reject){
for (let i = 0; i < form.length; i++){//assigning eventListeners to each form
form[i].addEventListener('submit', function(){
event.preventDefault();
form = this;
form.appendChild(statusMessage);
input = form.getElementsByTagName('input');
let request = new XMLHttpRequest();
request.open('POST', 'server.php');
//request.setRequestHeader('Content-type', 'application/x-www-form-urlencoded');
request.setRequestHeader('Content-type', 'application/json; charset=utf-8');
let formData = new FormData(form);
//request.send(formData)
let obj = {};
formData.forEach(function(value, key){
obj[key] = value;
});
let json = JSON.stringify(obj);
request.send(json);
request.addEventListener('readystatechange', function(){
if (request.readyState < 4){
statusMessage.innerHTML = message.loading;
} else if (request.readyState === 4 && request.status == 200){
resolve();
} else {
reject();
}
});
});
}
});
}
sendForms()
.then(() => statusMessage.innerHTML = message.success)
.catch(() => statusMessage.innerHTML = message.failure);
}
module.exports = form;
/***/ }),
/***/ "./js/parts/modal.js":
/*!***************************!*\
!*** ./js/parts/modal.js ***!
\***************************/
/*! no static exports found */
/***/ (function(module, exports) {
function modal() {
//MODAL WINDOW
let moreButton = document.querySelector('.more'),//The "Узнать больше" button
descriptionBtn = document.querySelectorAll('.description-btn'),//the buttons from the tabs
overlay = document.querySelector('.overlay'),//the modal window
closeCross = document.querySelector('.popup-close');
descriptionBtn = Array.from(descriptionBtn);//making an array from the nodelist to push the moreButton
descriptionBtn.push(moreButton);
descriptionBtn.forEach(function(el){
el.addEventListener('click', function () {
overlay.style.display = 'block';
this.classList.add('more-splash');//this means current element (similar to event.target somehow)
document.body.style.overflow = 'hidden';//to disable scroll on the page while the modal window is visible
});
});
closeCross.addEventListener('click', function() {
overlay.style.display = 'none';
moreButton.classList.remove('more-splash');
document.body.style.overflow = '';
});
}
module.exports = modal;
/***/ }),
/***/ "./js/parts/slider.js":
/*!****************************!*\
!*** ./js/parts/slider.js ***!
\****************************/
/*! no static exports found */
/***/ (function(module, exports) {
function slider() {
// SLIDER
let slides = document.querySelectorAll('.slider-item'),
prev = document.querySelector('.prev'),
next = document.querySelector('.next'),
dotWrap = document.querySelector('.slider-dots'),
dots = document.querySelectorAll('.dot'),
sliderIndex = 0;
dots = Array.from(dots);// to use indexOf when click on a dot
function showSlide() {
// the first slide will be shown when a user clicks right being on the last slide
if (sliderIndex == slides.length){
sliderIndex = 0;
}
// the last slide will be shown when a user clicks left being on the first slide
if (sliderIndex < 0){
sliderIndex = 3;
}
slides.forEach((item) => item.style.display = 'none');
slides[sliderIndex].style.display = 'block';
dots.forEach((item) => item.classList.remove('dot-active'));
dots[sliderIndex].classList.add('dot-active');
}
showSlide();// to show only the first slide
prev.addEventListener('click', function() {
sliderIndex--;
showSlide();
});
next.addEventListener('click', function() {
sliderIndex++;
showSlide();
});
dotWrap.addEventListener('click', function(event){
if (event.target.classList.contains('dot')){
//getting index of the clicked dot and assigning it to the appropriate slide
sliderIndex = dots.indexOf(event.target);
showSlide();
}
});
}
module.exports = slider;
/***/ }),
/***/ "./js/parts/tabs.js":
/*!**************************!*\
!*** ./js/parts/tabs.js ***!
\**************************/
/*! no static exports found */
/***/ (function(module, exports) {
function tabs() {
// TABS
//Working with div class='info'
let tab = document.querySelectorAll('.info-header-tab'),//get the buttons in the header
info = document.querySelector('.info-header'),//parental element for the buttons
tabContent = document.querySelectorAll('.info-tabcontent');//content that must be assigned to a particular tab
function hideTabContent(a) {// to hide all the content elements
for (let i = a; i < tabContent.length; i++){
tabContent[i].classList.remove('show');
tabContent[i].classList.add('hide');//manipulate classes in the css file to hide the content elements
}
}
hideTabContent(1);// Need to hide all the content elements except for the first one
function showTabContent(b){//to show the content element we need
if (tabContent[b].c | lassList.contains('hide')){
tabContent[b].classList.remove('hide');
tabContent[b].classList.add('show');
}
}
// attaching EventListener to the parent using delegetion
| identifier_body |
|
bundle.js | namespace object
/******/ // mode & 1: value is a module id, require it
/******/ // mode & 2: merge all properties of value into the ns
/******/ // mode & 4: return value when already ns object
/******/ // mode & 8|1: behave like require
/******/ __webpack_require__.t = function(value, mode) {
/******/ if(mode & 1) value = __webpack_require__(value);
/******/ if(mode & 8) return value;
/******/ if((mode & 4) && typeof value === 'object' && value && value.__esModule) return value;
/******/ var ns = Object.create(null);
/******/ __webpack_require__.r(ns);
/******/ Object.defineProperty(ns, 'default', { enumerable: true, value: value });
/******/ if(mode & 2 && typeof value != 'string') for(var key in value) __webpack_require__.d(ns, key, function(key) { return value[key]; }.bind(null, key));
/******/ return ns;
/******/ };
/******/
/******/ // getDefaultExport function for compatibility with non-harmony modules
/******/ __webpack_require__.n = function(module) {
/******/ var getter = module && module.__esModule ?
/******/ function getDefault() { return module['default']; } :
/******/ function getModuleExports() { return module; };
/******/ __webpack_require__.d(getter, 'a', getter);
/******/ return getter;
/******/ };
/******/
/******/ // Object.prototype.hasOwnProperty.call
/******/ __webpack_require__.o = function(object, property) { return Object.prototype.hasOwnProperty.call(object, property); };
/******/
/******/ // __webpack_public_path__
/******/ __webpack_require__.p = "";
/******/
/******/
/******/ // Load entry module and return exports
/******/ return __webpack_require__(__webpack_require__.s = "./js/script.js");
/******/ })
/************************************************************************/
/******/ ({
/***/ "./js/parts/calculator.js":
/*!********************************!*\
!*** ./js/parts/calculator.js ***!
\********************************/
/*! no static exports found */
/***/ (function(module, exports) {
function calculator() {
//CALCULATOR
let people = document.querySelectorAll('.counter-block-input')[0],
days = document.querySelectorAll('.counter-block-input')[1],
place = document.querySelector('#select'),
totalOutput = document.querySelector('#total'),
peopleSum = 0,
daysSum = 0,
placeIndex = +place.options[0].value,//index given by default
totalSum;
totalOutput.innerHTML = 0;
people.value = 0;
days.value = 0;
people.addEventListener('blur', function(){
peopleSum = +this.value;
totalSum = (peopleSum + daysSum) * 4000 * placeIndex;
if (days.value == '' || people.value == '' || days.value == '0' || people.value == '0'){
totalOutput.innerHTML = 0;
} else |
});
days.addEventListener('blur', function(){
daysSum = +this.value;
totalSum = (peopleSum + daysSum) * 4000 * placeIndex;
if (days.value == '' || people.value == '' || days.value == '0' || people.value == '0'){
totalOutput.innerHTML = 0;
} else {
totalOutput.innerHTML = totalSum;
}
});
place.addEventListener('change', function(){
if (days.value == '' || people.value == '' || days.value == '0' || people.value == '0'){
totalOutput.innerHTML = 0;
} else {
let a = totalSum;
totalOutput.innerHTML = a * this.options[this.selectedIndex].value;
}
});
}
module.exports = calculator;
/***/ }),
/***/ "./js/parts/form.js":
/*!**************************!*\
!*** ./js/parts/form.js ***!
\**************************/
/*! no static exports found */
/***/ (function(module, exports) {
function form() {
// FORM
let message = {
loading: 'Загрузка...',
success: 'Спасибо! Скоро мы с вами свяжемся.',
failure: 'Что-то пошло не так.',
};
let form = document.querySelectorAll('form'), //select two forms: from modal and "we will get in touch" section at the end input,
input,//will be used to select the input(s) of one of the forms
statusMessage = document.createElement('div');//to display a message according to stage and successfulness
form = Array.from(form);//to create an array from a nodeList(you can't put eventListeners on nodeLists)
statusMessage.classList.add('status');
function sendForms(){
return new Promise(function(resolve, reject){
for (let i = 0; i < form.length; i++){//assigning eventListeners to each form
form[i].addEventListener('submit', function(){
event.preventDefault();
form = this;
form.appendChild(statusMessage);
input = form.getElementsByTagName('input');
let request = new XMLHttpRequest();
request.open('POST', 'server.php');
//request.setRequestHeader('Content-type', 'application/x-www-form-urlencoded');
request.setRequestHeader('Content-type', 'application/json; charset=utf-8');
let formData = new FormData(form);
//request.send(formData)
let obj = {};
formData.forEach(function(value, key){
obj[key] = value;
});
let json = JSON.stringify(obj);
request.send(json);
request.addEventListener('readystatechange', function(){
if (request.readyState < 4){
statusMessage.innerHTML = message.loading;
} else if (request.readyState === 4 && request.status == 200){
resolve();
} else {
reject();
}
});
});
}
});
}
sendForms()
.then(() => statusMessage.innerHTML = message.success)
.catch(() => statusMessage.innerHTML = message.failure);
}
module.exports = form;
/***/ }),
/***/ "./js/parts/modal.js":
/*!***************************!*\
!*** ./js/parts/modal.js ***!
\***************************/
/*! no static exports found */
/***/ (function(module, exports) {
function modal() {
//MODAL WINDOW
let moreButton = document.querySelector('.more'),//The "Узнать больше" button
descriptionBtn = document.querySelectorAll('.description-btn'),//the buttons from the tabs
overlay = document.querySelector('.overlay'),//the modal window
closeCross = document.querySelector('.popup-close');
descriptionBtn = Array.from(descriptionBtn);//making an array from the nodelist to push the moreButton
descriptionBtn.push(moreButton);
descriptionBtn.forEach(function(el){
el.addEventListener('click', function () {
overlay.style.display = 'block';
this.classList.add('more-splash');//this means current element (similar to event.target somehow)
document.body.style.overflow = 'hidden';//to disable scroll on the page while the modal window is visible
});
});
closeCross.addEventListener('click', function() {
overlay.style.display = 'none';
moreButton.classList.remove('more-splash');
document.body.style.overflow = '';
});
}
module.exports = modal;
/***/ }),
/***/ "./js/parts/slider.js":
/*!****************************!*\
!*** ./js/parts/slider.js ***!
\****************************/
/*! no static exports found */
/***/ (function(module, exports) {
function slider() {
// SLIDER
let slides = document.querySelectorAll('.slider-item'),
prev = document.querySelector('.prev'),
next = document.querySelector('.next'),
dotWrap = document.querySelector('.slider-dots'),
dots = document.querySelectorAll('.dot'),
sliderIndex = 0;
dots = Array.from(dots);// to use indexOf when click on a dot
function showSlide() {
// the first slide will be shown when a user clicks right being on the last slide
if (sliderIndex == slides.length){
sliderIndex = 0;
}
// the last slide will be shown when a user clicks left being on the first slide
if (sliderIndex < 0){
sliderIndex = 3;
}
slides.forEach((item) => item.style.display = 'none');
slides[sliderIndex].style.display = 'block';
dots.forEach((item) => item.classList.remove('dot-active'));
dots[sliderIndex].classList.add('dot-active');
}
showSlide();// to show only the first slide
prev.addEventListener('click', function() {
sliderIndex--;
showSlide();
});
next.addEventListener('click', function() {
sliderIndex++;
showSlide();
});
dotWrap.addEventListener('click', function(event){
if (event.target.classList.contains('dot')){
//getting index of the clicked dot and assigning it to the appropriate slide
sliderIndex = dots.indexOf(event.target);
showSlide();
}
});
}
module.exports = slider;
/***/ }),
/***/ "./js/parts/tabs.js":
/*!**************************!*\
!*** ./js/parts/tabs.js ***!
\**************************/
/*! no static exports found */
/***/ (function(module, exports) {
function tabs() {
// TABS
//Working with div class='info'
let tab = document.querySelectorAll('.info-header-tab'),//get the buttons in the header
info = document.querySelector('.info-header'),//parental element for the buttons
tabContent = document.querySelectorAll('.info-tabcontent');//content that must be assigned to a particular tab
function hideTabContent(a) {// to hide all the content elements
for (let i = a; i < tabContent | {
totalOutput.innerHTML = totalSum;
} | conditional_block |
bundle.js | {
/******/ var getter = module && module.__esModule ?
/******/ function getDefault() { return module['default']; } :
/******/ function getModuleExports() { return module; };
/******/ __webpack_require__.d(getter, 'a', getter);
/******/ return getter;
/******/ };
/******/
/******/ // Object.prototype.hasOwnProperty.call
/******/ __webpack_require__.o = function(object, property) { return Object.prototype.hasOwnProperty.call(object, property); };
/******/
/******/ // __webpack_public_path__
/******/ __webpack_require__.p = "";
/******/
/******/
/******/ // Load entry module and return exports
/******/ return __webpack_require__(__webpack_require__.s = "./js/script.js");
/******/ })
/************************************************************************/
/******/ ({
/***/ "./js/parts/calculator.js":
/*!********************************!*\
!*** ./js/parts/calculator.js ***!
\********************************/
/*! no static exports found */
/***/ (function(module, exports) {
function calculator() {
//CALCULATOR
let people = document.querySelectorAll('.counter-block-input')[0],
days = document.querySelectorAll('.counter-block-input')[1],
place = document.querySelector('#select'),
totalOutput = document.querySelector('#total'),
peopleSum = 0,
daysSum = 0,
placeIndex = +place.options[0].value,//index given by default
totalSum;
totalOutput.innerHTML = 0;
people.value = 0;
days.value = 0;
people.addEventListener('blur', function(){
peopleSum = +this.value;
totalSum = (peopleSum + daysSum) * 4000 * placeIndex;
if (days.value == '' || people.value == '' || days.value == '0' || people.value == '0'){
totalOutput.innerHTML = 0;
} else {
totalOutput.innerHTML = totalSum;
}
});
days.addEventListener('blur', function(){
daysSum = +this.value;
totalSum = (peopleSum + daysSum) * 4000 * placeIndex;
if (days.value == '' || people.value == '' || days.value == '0' || people.value == '0'){
totalOutput.innerHTML = 0;
} else {
totalOutput.innerHTML = totalSum;
}
});
place.addEventListener('change', function(){
if (days.value == '' || people.value == '' || days.value == '0' || people.value == '0'){
totalOutput.innerHTML = 0;
} else {
let a = totalSum;
totalOutput.innerHTML = a * this.options[this.selectedIndex].value;
}
});
}
module.exports = calculator;
/***/ }),
/***/ "./js/parts/form.js":
/*!**************************!*\
!*** ./js/parts/form.js ***!
\**************************/
/*! no static exports found */
/***/ (function(module, exports) {
function form() {
// FORM
let message = {
loading: 'Загрузка...',
success: 'Спасибо! Скоро мы с вами свяжемся.',
failure: 'Что-то пошло не так.',
};
let form = document.querySelectorAll('form'), //select two forms: from modal and "we will get in touch" section at the end input,
input,//will be used to select the input(s) of one of the forms
statusMessage = document.createElement('div');//to display a message according to stage and successfulness
form = Array.from(form);//to create an array from a nodeList(you can't put eventListeners on nodeLists)
statusMessage.classList.add('status');
function sendForms(){
return new Promise(function(resolve, reject){
for (let i = 0; i < form.length; i++){//assigning eventListeners to each form
form[i].addEventListener('submit', function(){
event.preventDefault();
form = this;
form.appendChild(statusMessage);
input = form.getElementsByTagName('input');
let request = new XMLHttpRequest();
request.open('POST', 'server.php');
//request.setRequestHeader('Content-type', 'application/x-www-form-urlencoded');
request.setRequestHeader('Content-type', 'application/json; charset=utf-8');
let formData = new FormData(form);
//request.send(formData)
let obj = {};
formData.forEach(function(value, key){
obj[key] = value;
});
let json = JSON.stringify(obj);
request.send(json);
request.addEventListener('readystatechange', function(){
if (request.readyState < 4){
statusMessage.innerHTML = message.loading;
} else if (request.readyState === 4 && request.status == 200){
resolve();
} else {
reject();
}
});
});
}
});
}
sendForms()
.then(() => statusMessage.innerHTML = message.success)
.catch(() => statusMessage.innerHTML = message.failure);
}
module.exports = form;
/***/ }),
/***/ "./js/parts/modal.js":
/*!***************************!*\
!*** ./js/parts/modal.js ***!
\***************************/
/*! no static exports found */
/***/ (function(module, exports) {
function modal() {
//MODAL WINDOW
let moreButton = document.querySelector('.more'),//The "Узнать больше" button
descriptionBtn = document.querySelectorAll('.description-btn'),//the buttons from the tabs
overlay = document.querySelector('.overlay'),//the modal window
closeCross = document.querySelector('.popup-close');
descriptionBtn = Array.from(descriptionBtn);//making an array from the nodelist to push the moreButton
descriptionBtn.push(moreButton);
descriptionBtn.forEach(function(el){
el.addEventListener('click', function () {
overlay.style.display = 'block';
this.classList.add('more-splash');//this means current element (similar to event.target somehow)
document.body.style.overflow = 'hidden';//to disable scroll on the page while the modal window is visible
});
});
closeCross.addEventListener('click', function() {
overlay.style.display = 'none';
moreButton.classList.remove('more-splash');
document.body.style.overflow = '';
});
}
module.exports = modal;
/***/ }),
/***/ "./js/parts/slider.js":
/*!****************************!*\
!*** ./js/parts/slider.js ***!
\****************************/
/*! no static exports found */
/***/ (function(module, exports) {
function slider() {
// SLIDER
let slides = document.querySelectorAll('.slider-item'),
prev = document.querySelector('.prev'),
next = document.querySelector('.next'),
dotWrap = document.querySelector('.slider-dots'),
dots = document.querySelectorAll('.dot'),
sliderIndex = 0;
dots = Array.from(dots);// to use indexOf when click on a dot
function showSlide() {
// the first slide will be shown when a user clicks right being on the last slide
if (sliderIndex == slides.length){
sliderIndex = 0;
}
// the last slide will be shown when a user clicks left being on the first slide
if (sliderIndex < 0){
sliderIndex = 3;
}
slides.forEach((item) => item.style.display = 'none');
slides[sliderIndex].style.display = 'block';
dots.forEach((item) => item.classList.remove('dot-active'));
dots[sliderIndex].classList.add('dot-active');
}
showSlide();// to show only the first slide
prev.addEventListener('click', function() {
sliderIndex--;
showSlide();
});
next.addEventListener('click', function() {
sliderIndex++;
showSlide();
});
dotWrap.addEventListener('click', function(event){
if (event.target.classList.contains('dot')){
//getting index of the clicked dot and assigning it to the appropriate slide
sliderIndex = dots.indexOf(event.target);
showSlide();
}
});
}
module.exports = slider;
/***/ }),
/***/ "./js/parts/tabs.js":
/*!**************************!*\
!*** ./js/parts/tabs.js ***!
\**************************/
/*! no static exports found */
/***/ (function(module, exports) {
function tabs() {
// TABS
//Working with div class='info'
let tab = document.querySelectorAll('.info-header-tab'),//get the buttons in the header
info = document.querySelector('.info-header'),//parental element for the buttons
tabContent = document.querySelectorAll('.info-tabcontent');//content that must be assigned to a particular tab
function hideTabContent(a) {// to hide all the content elements
for (let i = a; i < tabContent.length; i++){
tabContent[i].classList.remove('show');
tabContent[i].classList.add('hide');//manipulate classes in the css file to hide the content elements
}
}
hideTabContent(1);// Need to hide all the content elements except for the first one
function showTabContent(b){//to show the content element we need
if (tabContent[b].classList.contains('hide')){
tabContent[b].classList.remove('hide');
tabContent[b].classList.add('show');
}
}
// attaching EventListener to the parent using delegetion
info.addEventListener('click', function(event){
let target = event.target;
if (target && target.classList.contains('info-header-tab')){
// Using for-loop to assign a particular content element to a particular tab
for (let i = 0; i < tab.length; i++) {
if (target == tab[i]){
hideTabContent(0);//to hide the first content element
showTabContent(i);//to display the one that matches the target of our click
break;
}
}
}
});
}
module.exports = tabs;
/***/ }),
/***/ "./js/parts/timer.js": | random_line_split |
||
codec.rs | ());
}
}
}
}
}
impl<EncodeFn, Item> Encode<Item> for EncodeFn
where
EncodeFn: Fn(&Item, &mut BytesMut),
{
fn encode(&mut self, item: &Item, buffer: &mut BytesMut) {
self(item, buffer)
}
}
//
// Implementer's notes: We store future values in `Source` and `Sink`. These future values have to
// satisfy the `'static` lifetime bound because trait methods like `Stream::poll_next` do not take
// a lifetime parameter on `&mut Self`. To satisfy this, when producing a future value, we move
// all related values into it, and they will be moved back after it is completed (so that we can
// produce the next future value).
//
// There may be other ways to satisfy the `'static` lifetime bound, but for now, this "move" trick
// is the best I have.
//
/// Byte Stream to `futures::stream::Stream` Adapter
#[derive(DebugExt)]
pub struct Source<Stream, Decoder>
where
Stream: StreamRecv,
Decoder: Decode<Stream>,
{
#[debug(with = InsertPlaceholder)]
source: Option<(Stream, Decoder)>,
#[debug(with = InsertPlaceholder)]
next_future: Option<SourceFuture<Stream, Decoder, Decoder::Item, Decoder::Error>>,
}
/// `futures::sink::Sink` to Byte Stream Adapter
#[derive(DebugExt)]
pub struct Sink<Stream, Encoder>
where
Stream: StreamSend,
{
#[debug(with = InsertPlaceholder)]
stream: Option<Stream>,
#[debug(with = InsertPlaceholder)]
encoder: Encoder,
#[debug(with = InsertPlaceholder)]
flush_future: Option<SinkFuture<Stream, Stream::Error>>,
#[debug(with = InsertPlaceholder)]
close_future: Option<SinkFuture<Stream, Stream::Error>>,
}
// TODO: Use where clauses to simplify these type aliases when rustc starts enforcing where clauses
// in type aliases. For more details, check [rust-lang/rust#21903][#21903].
//
// [#21903]: https://github.com/rust-lang/rust/issues/21903
type SourceFuture<Stream, Decoder, Item, Error> =
BoxFuture<'static, SourceOutput<Stream, Decoder, Item, Error>>;
type SourceOutput<Stream, Decoder, Item, Error> = ((Stream, Decoder), Option<Result<Item, Error>>);
type SinkFuture<Stream, Error> = BoxFuture<'static, SinkOutput<Stream, Error>>;
type SinkOutput<Stream, Error> = (Stream, Result<(), Error>);
macro_rules! poll {
($this:ident, $get_future:ident, $context:ident $(,)?) => {
match $this.$get_future().as_mut().poll($context) {
Poll::Ready((state, result)) => {
$this.reset(state);
Poll::Ready(result)
}
Poll::Pending => Poll::Pending,
}
};
}
impl<Stream, Decoder> Source<Stream, Decoder>
where
Stream: StreamRecv,
Decoder: Decode<Stream>,
{
pub fn new(stream: Stream, decoder: Decoder) -> Self {
Self {
source: Some((stream, decoder)),
next_future: None,
}
}
fn reset(&mut self, source: (Stream, Decoder)) {
self.source = Some(source);
self.next_future = None;
}
}
impl<Stream, Decoder> Source<Stream, Decoder>
where
Stream: StreamRecv + Send + 'static,
Decoder: Decode<Stream> + Send + 'static,
{
fn next_future(&mut self) -> &mut SourceFuture<Stream, Decoder, Decoder::Item, Decoder::Error> {
self.next_future
.get_or_insert_with(|| Box::pin(Self::next(self.source.take().unwrap())))
}
async fn next(
(mut stream, mut decoder): (Stream, Decoder),
) -> SourceOutput<Stream, Decoder, Decoder::Item, Decoder::Error> {
let result = decoder.decode(&mut stream).await.transpose();
((stream, decoder), result)
}
}
impl<Stream, Decoder> stream::Stream for Source<Stream, Decoder>
where
Stream: StreamRecv + Send + Unpin + 'static,
Decoder: Decode<Stream> + Send + Unpin + 'static,
{
type Item = Result<Decoder::Item, Decoder::Error>;
fn poll_next(self: Pin<&mut Self>, context: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
poll!(this, next_future, context)
}
}
impl<Stream, Encoder> Sink<Stream, Encoder>
where
Stream: StreamSend,
{
pub fn new(stream: Stream, encoder: Encoder) -> Self {
Self {
stream: Some(stream),
encoder,
flush_future: None,
close_future: None,
}
}
fn reset(&mut self, stream: Stream) {
self.stream = Some(stream);
self.flush_future = None;
self.close_future = None;
}
}
impl<Stream, Encoder> Sink<Stream, Encoder>
where
Stream: StreamSend + Send + 'static,
Encoder: 'static,
{
fn flush_future(&mut self) -> &mut SinkFuture<Stream, Stream::Error> {
self.flush_future
.get_or_insert_with(|| Box::pin(Self::flush(self.stream.take().unwrap())))
}
async fn flush(mut stream: Stream) -> SinkOutput<Stream, Stream::Error> {
let result = stream.send_all().await;
(stream, result)
}
fn close_future(&mut self) -> &mut SinkFuture<Stream, Stream::Error> {
self.close_future
.get_or_insert_with(|| Box::pin(Self::close(self.stream.take().unwrap())))
}
async fn close(mut stream: Stream) -> SinkOutput<Stream, Stream::Error> {
let result = stream.shutdown().await;
(stream, result)
}
}
impl<Stream, Encoder, Item> sink::Sink<Item> for Sink<Stream, Encoder>
where
Stream: StreamSend + Send + Unpin + 'static,
Encoder: Encode<Item> + Unpin + 'static,
{
type Error = Stream::Error;
fn poll_ready(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
match self.get_mut().stream {
Some(_) => Poll::Ready(Ok(())),
None => Poll::Pending,
}
}
fn start_send(self: Pin<&mut Self>, item: Item) -> Result<(), Self::Error> {
let this = self.get_mut();
this.encoder
.encode(&item, &mut this.stream.as_mut().unwrap().buffer());
Ok(())
}
fn poll_flush(
self: Pin<&mut Self>,
context: &mut Context<'_>,
) -> Poll<Result<(), Self::Error>> {
let this = self.get_mut();
poll!(this, flush_future, context)
}
fn poll_close(
self: Pin<&mut Self>,
context: &mut Context<'_>,
) -> Poll<Result<(), Self::Error>> {
let this = self.get_mut();
poll!(this, close_future, context)
}
}
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
use std::io::{Error, ErrorKind};
use bytes::{Buf, BufMut};
use futures::{sink::SinkExt, stream::StreamExt};
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use crate::io::{RecvStream, SendStream};
use super::*;
fn decode(buffer: &mut BytesMut) -> Result<Option<String>, Error> {
if buffer.remaining() < 1 {
return Ok(None);
}
let size = usize::from(buffer[0]);
if buffer.remaining() < 1 + size {
return Ok(None);
}
let mut vec = vec![0u8; size];
buffer.get_u8();
buffer.copy_to_slice(&mut vec);
Ok(Some(String::from_utf8(vec).unwrap()))
}
#[tokio::test]
async fn source() {
let (stream, mut mock) = RecvStream::new_mock(4096);
let mut source = Source::new(stream, decode);
mock.write_all(b"\x0bhello world\x03foo\x03bar")
.await
.unwrap();
drop(mock);
assert_matches!(source.next().await, Some(Ok(x)) if x == "hello world");
assert_matches!(source.next().await, Some(Ok(x)) if x == "foo");
assert_matches!(source.next().await, Some(Ok(x)) if x == "bar");
assert_matches!(source.next().await, None);
}
#[tokio::test]
async fn source_unexpected_eof() {
let (stream, mut mock) = RecvStream::new_mock(4096);
let mut source = Source::new(stream, decode);
mock.write_all(b"\x0bhello").await.unwrap();
drop(mock);
assert_matches!(source.next().await, Some(Err(e)) if e.kind() == ErrorKind::UnexpectedEof);
}
| fn encode(item: &String, buffer: &mut BytesMut) { | random_line_split |
|
codec.rs | : &mut BytesMut);
}
// TODO: We intend to implement `Decode` for async functions, similar to what we do for `Encode`.
// However, for unknown reasons, it is causing the compiler to crash. Currently, we are only able
// to provide an implementation for non-async functions.
#[async_trait]
impl<Stream, DecodeFn, Item, Error> Decode<Stream> for DecodeFn
where
Stream: StreamRecv + Send,
DecodeFn: Fn(&mut BytesMut) -> Result<Option<Item>, Error> + Send,
Item: Send,
Error: From<Stream::Error> + Send,
{
type Item = Item;
type Error = Error;
async fn decode(&mut self, stream: &mut Stream) -> Result<Option<Self::Item>, Self::Error> {
loop {
if let Some(item) = self(&mut stream.buffer())? {
return Ok(Some(item));
}
if stream.recv_or_eof().await?.is_none() {
if stream.buffer().is_empty() {
return Ok(None);
} else {
// Return the `UnexpectedEof` error raised by the `recv` function.
return Err(stream.recv().await.expect_err("expect EOF").into());
}
}
}
}
}
impl<EncodeFn, Item> Encode<Item> for EncodeFn
where
EncodeFn: Fn(&Item, &mut BytesMut),
{
fn encode(&mut self, item: &Item, buffer: &mut BytesMut) {
self(item, buffer)
}
}
//
// Implementer's notes: We store future values in `Source` and `Sink`. These future values have to
// satisfy the `'static` lifetime bound because trait methods like `Stream::poll_next` do not take
// a lifetime parameter on `&mut Self`. To satisfy this, when producing a future value, we move
// all related values into it, and they will be moved back after it is completed (so that we can
// produce the next future value).
//
// There may be other ways to satisfy the `'static` lifetime bound, but for now, this "move" trick
// is the best I have.
//
/// Byte Stream to `futures::stream::Stream` Adapter
#[derive(DebugExt)]
pub struct Source<Stream, Decoder>
where
Stream: StreamRecv,
Decoder: Decode<Stream>,
{
#[debug(with = InsertPlaceholder)]
source: Option<(Stream, Decoder)>,
#[debug(with = InsertPlaceholder)]
next_future: Option<SourceFuture<Stream, Decoder, Decoder::Item, Decoder::Error>>,
}
/// `futures::sink::Sink` to Byte Stream Adapter
#[derive(DebugExt)]
pub struct Sink<Stream, Encoder>
where
Stream: StreamSend,
{
#[debug(with = InsertPlaceholder)]
stream: Option<Stream>,
#[debug(with = InsertPlaceholder)]
encoder: Encoder,
#[debug(with = InsertPlaceholder)]
flush_future: Option<SinkFuture<Stream, Stream::Error>>,
#[debug(with = InsertPlaceholder)]
close_future: Option<SinkFuture<Stream, Stream::Error>>,
}
// TODO: Use where clauses to simplify these type aliases when rustc starts enforcing where clauses
// in type aliases. For more details, check [rust-lang/rust#21903][#21903].
//
// [#21903]: https://github.com/rust-lang/rust/issues/21903
type SourceFuture<Stream, Decoder, Item, Error> =
BoxFuture<'static, SourceOutput<Stream, Decoder, Item, Error>>;
type SourceOutput<Stream, Decoder, Item, Error> = ((Stream, Decoder), Option<Result<Item, Error>>);
type SinkFuture<Stream, Error> = BoxFuture<'static, SinkOutput<Stream, Error>>;
type SinkOutput<Stream, Error> = (Stream, Result<(), Error>);
macro_rules! poll {
($this:ident, $get_future:ident, $context:ident $(,)?) => {
match $this.$get_future().as_mut().poll($context) {
Poll::Ready((state, result)) => {
$this.reset(state);
Poll::Ready(result)
}
Poll::Pending => Poll::Pending,
}
};
}
impl<Stream, Decoder> Source<Stream, Decoder>
where
Stream: StreamRecv,
Decoder: Decode<Stream>,
{
pub fn new(stream: Stream, decoder: Decoder) -> Self {
Self {
source: Some((stream, decoder)),
next_future: None,
}
}
fn reset(&mut self, source: (Stream, Decoder)) {
self.source = Some(source);
self.next_future = None;
}
}
impl<Stream, Decoder> Source<Stream, Decoder>
where
Stream: StreamRecv + Send + 'static,
Decoder: Decode<Stream> + Send + 'static,
{
fn next_future(&mut self) -> &mut SourceFuture<Stream, Decoder, Decoder::Item, Decoder::Error> {
self.next_future
.get_or_insert_with(|| Box::pin(Self::next(self.source.take().unwrap())))
}
async fn next(
(mut stream, mut decoder): (Stream, Decoder),
) -> SourceOutput<Stream, Decoder, Decoder::Item, Decoder::Error> {
let result = decoder.decode(&mut stream).await.transpose();
((stream, decoder), result)
}
}
impl<Stream, Decoder> stream::Stream for Source<Stream, Decoder>
where
Stream: StreamRecv + Send + Unpin + 'static,
Decoder: Decode<Stream> + Send + Unpin + 'static,
{
type Item = Result<Decoder::Item, Decoder::Error>;
fn poll_next(self: Pin<&mut Self>, context: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
poll!(this, next_future, context)
}
}
impl<Stream, Encoder> Sink<Stream, Encoder>
where
Stream: StreamSend,
{
pub fn new(stream: Stream, encoder: Encoder) -> Self {
Self {
stream: Some(stream),
encoder,
flush_future: None,
close_future: None,
}
}
fn reset(&mut self, stream: Stream) {
self.stream = Some(stream);
self.flush_future = None;
self.close_future = None;
}
}
impl<Stream, Encoder> Sink<Stream, Encoder>
where
Stream: StreamSend + Send + 'static,
Encoder: 'static,
{
fn flush_future(&mut self) -> &mut SinkFuture<Stream, Stream::Error> {
self.flush_future
.get_or_insert_with(|| Box::pin(Self::flush(self.stream.take().unwrap())))
}
async fn flush(mut stream: Stream) -> SinkOutput<Stream, Stream::Error> {
let result = stream.send_all().await;
(stream, result)
}
fn close_future(&mut self) -> &mut SinkFuture<Stream, Stream::Error> {
self.close_future
.get_or_insert_with(|| Box::pin(Self::close(self.stream.take().unwrap())))
}
async fn close(mut stream: Stream) -> SinkOutput<Stream, Stream::Error> {
let result = stream.shutdown().await;
(stream, result)
}
}
impl<Stream, Encoder, Item> sink::Sink<Item> for Sink<Stream, Encoder>
where
Stream: StreamSend + Send + Unpin + 'static,
Encoder: Encode<Item> + Unpin + 'static,
{
type Error = Stream::Error;
fn poll_ready(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
match self.get_mut().stream {
Some(_) => Poll::Ready(Ok(())),
None => Poll::Pending,
}
}
fn start_send(self: Pin<&mut Self>, item: Item) -> Result<(), Self::Error> {
let this = self.get_mut();
this.encoder
.encode(&item, &mut this.stream.as_mut().unwrap().buffer());
Ok(())
}
fn poll_flush(
self: Pin<&mut Self>,
context: &mut Context<'_>,
) -> Poll<Result<(), Self::Error>> {
let this = self.get_mut();
poll!(this, flush_future, context)
}
fn poll_close(
self: Pin<&mut Self>,
context: &mut Context<'_>,
) -> Poll<Result<(), Self::Error>> {
let this = self.get_mut();
poll!(this, close_future, context)
}
}
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
use std::io::{Error, ErrorKind};
use bytes::{Buf, BufMut};
use futures::{sink::SinkExt, stream::StreamExt};
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use crate::io::{RecvStream, SendStream};
use super::*;
fn decode(buffer: &mut BytesMut) -> Result<Option<String>, Error> | {
if buffer.remaining() < 1 {
return Ok(None);
}
let size = usize::from(buffer[0]);
if buffer.remaining() < 1 + size {
return Ok(None);
}
let mut vec = vec![0u8; size];
buffer.get_u8();
buffer.copy_to_slice(&mut vec);
Ok(Some(String::from_utf8(vec).unwrap()))
} | identifier_body |
|
codec.rs | a stream item and writes the output to a byte stream buffer.
fn encode(&mut self, item: &Item, buffer: &mut BytesMut);
}
// TODO: We intend to implement `Decode` for async functions, similar to what we do for `Encode`.
// However, for unknown reasons, it is causing the compiler to crash. Currently, we are only able
// to provide an implementation for non-async functions.
#[async_trait]
impl<Stream, DecodeFn, Item, Error> Decode<Stream> for DecodeFn
where
Stream: StreamRecv + Send,
DecodeFn: Fn(&mut BytesMut) -> Result<Option<Item>, Error> + Send,
Item: Send,
Error: From<Stream::Error> + Send,
{
type Item = Item;
type Error = Error;
async fn | (&mut self, stream: &mut Stream) -> Result<Option<Self::Item>, Self::Error> {
loop {
if let Some(item) = self(&mut stream.buffer())? {
return Ok(Some(item));
}
if stream.recv_or_eof().await?.is_none() {
if stream.buffer().is_empty() {
return Ok(None);
} else {
// Return the `UnexpectedEof` error raised by the `recv` function.
return Err(stream.recv().await.expect_err("expect EOF").into());
}
}
}
}
}
impl<EncodeFn, Item> Encode<Item> for EncodeFn
where
EncodeFn: Fn(&Item, &mut BytesMut),
{
fn encode(&mut self, item: &Item, buffer: &mut BytesMut) {
self(item, buffer)
}
}
//
// Implementer's notes: We store future values in `Source` and `Sink`. These future values have to
// satisfy the `'static` lifetime bound because trait methods like `Stream::poll_next` do not take
// a lifetime parameter on `&mut Self`. To satisfy this, when producing a future value, we move
// all related values into it, and they will be moved back after it is completed (so that we can
// produce the next future value).
//
// There may be other ways to satisfy the `'static` lifetime bound, but for now, this "move" trick
// is the best I have.
//
/// Byte Stream to `futures::stream::Stream` Adapter
#[derive(DebugExt)]
pub struct Source<Stream, Decoder>
where
Stream: StreamRecv,
Decoder: Decode<Stream>,
{
#[debug(with = InsertPlaceholder)]
source: Option<(Stream, Decoder)>,
#[debug(with = InsertPlaceholder)]
next_future: Option<SourceFuture<Stream, Decoder, Decoder::Item, Decoder::Error>>,
}
/// `futures::sink::Sink` to Byte Stream Adapter
#[derive(DebugExt)]
pub struct Sink<Stream, Encoder>
where
Stream: StreamSend,
{
#[debug(with = InsertPlaceholder)]
stream: Option<Stream>,
#[debug(with = InsertPlaceholder)]
encoder: Encoder,
#[debug(with = InsertPlaceholder)]
flush_future: Option<SinkFuture<Stream, Stream::Error>>,
#[debug(with = InsertPlaceholder)]
close_future: Option<SinkFuture<Stream, Stream::Error>>,
}
// TODO: Use where clauses to simplify these type aliases when rustc starts enforcing where clauses
// in type aliases. For more details, check [rust-lang/rust#21903][#21903].
//
// [#21903]: https://github.com/rust-lang/rust/issues/21903
type SourceFuture<Stream, Decoder, Item, Error> =
BoxFuture<'static, SourceOutput<Stream, Decoder, Item, Error>>;
type SourceOutput<Stream, Decoder, Item, Error> = ((Stream, Decoder), Option<Result<Item, Error>>);
type SinkFuture<Stream, Error> = BoxFuture<'static, SinkOutput<Stream, Error>>;
type SinkOutput<Stream, Error> = (Stream, Result<(), Error>);
macro_rules! poll {
($this:ident, $get_future:ident, $context:ident $(,)?) => {
match $this.$get_future().as_mut().poll($context) {
Poll::Ready((state, result)) => {
$this.reset(state);
Poll::Ready(result)
}
Poll::Pending => Poll::Pending,
}
};
}
impl<Stream, Decoder> Source<Stream, Decoder>
where
Stream: StreamRecv,
Decoder: Decode<Stream>,
{
pub fn new(stream: Stream, decoder: Decoder) -> Self {
Self {
source: Some((stream, decoder)),
next_future: None,
}
}
fn reset(&mut self, source: (Stream, Decoder)) {
self.source = Some(source);
self.next_future = None;
}
}
impl<Stream, Decoder> Source<Stream, Decoder>
where
Stream: StreamRecv + Send + 'static,
Decoder: Decode<Stream> + Send + 'static,
{
fn next_future(&mut self) -> &mut SourceFuture<Stream, Decoder, Decoder::Item, Decoder::Error> {
self.next_future
.get_or_insert_with(|| Box::pin(Self::next(self.source.take().unwrap())))
}
async fn next(
(mut stream, mut decoder): (Stream, Decoder),
) -> SourceOutput<Stream, Decoder, Decoder::Item, Decoder::Error> {
let result = decoder.decode(&mut stream).await.transpose();
((stream, decoder), result)
}
}
impl<Stream, Decoder> stream::Stream for Source<Stream, Decoder>
where
Stream: StreamRecv + Send + Unpin + 'static,
Decoder: Decode<Stream> + Send + Unpin + 'static,
{
type Item = Result<Decoder::Item, Decoder::Error>;
fn poll_next(self: Pin<&mut Self>, context: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
poll!(this, next_future, context)
}
}
impl<Stream, Encoder> Sink<Stream, Encoder>
where
Stream: StreamSend,
{
pub fn new(stream: Stream, encoder: Encoder) -> Self {
Self {
stream: Some(stream),
encoder,
flush_future: None,
close_future: None,
}
}
fn reset(&mut self, stream: Stream) {
self.stream = Some(stream);
self.flush_future = None;
self.close_future = None;
}
}
impl<Stream, Encoder> Sink<Stream, Encoder>
where
Stream: StreamSend + Send + 'static,
Encoder: 'static,
{
fn flush_future(&mut self) -> &mut SinkFuture<Stream, Stream::Error> {
self.flush_future
.get_or_insert_with(|| Box::pin(Self::flush(self.stream.take().unwrap())))
}
async fn flush(mut stream: Stream) -> SinkOutput<Stream, Stream::Error> {
let result = stream.send_all().await;
(stream, result)
}
fn close_future(&mut self) -> &mut SinkFuture<Stream, Stream::Error> {
self.close_future
.get_or_insert_with(|| Box::pin(Self::close(self.stream.take().unwrap())))
}
async fn close(mut stream: Stream) -> SinkOutput<Stream, Stream::Error> {
let result = stream.shutdown().await;
(stream, result)
}
}
impl<Stream, Encoder, Item> sink::Sink<Item> for Sink<Stream, Encoder>
where
Stream: StreamSend + Send + Unpin + 'static,
Encoder: Encode<Item> + Unpin + 'static,
{
type Error = Stream::Error;
fn poll_ready(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
match self.get_mut().stream {
Some(_) => Poll::Ready(Ok(())),
None => Poll::Pending,
}
}
fn start_send(self: Pin<&mut Self>, item: Item) -> Result<(), Self::Error> {
let this = self.get_mut();
this.encoder
.encode(&item, &mut this.stream.as_mut().unwrap().buffer());
Ok(())
}
fn poll_flush(
self: Pin<&mut Self>,
context: &mut Context<'_>,
) -> Poll<Result<(), Self::Error>> {
let this = self.get_mut();
poll!(this, flush_future, context)
}
fn poll_close(
self: Pin<&mut Self>,
context: &mut Context<'_>,
) -> Poll<Result<(), Self::Error>> {
let this = self.get_mut();
poll!(this, close_future, context)
}
}
#[cfg(test)]
mod tests {
use std::assert_matches::assert_matches;
use std::io::{Error, ErrorKind};
use bytes::{Buf, BufMut};
use futures::{sink::SinkExt, stream::StreamExt};
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use crate::io::{RecvStream, SendStream};
use super::*;
fn decode(buffer: &mut BytesMut) -> Result<Option<String>, Error> {
if buffer.remaining() < 1 {
return Ok(None);
}
let size = usize::from(buffer[0]);
if buffer.remaining() < 1 + size {
return Ok(None);
}
let mut vec = vec![0u8; size];
buffer.get | decode | identifier_name |
issued_certificate.pb.go | ,json=issuedCertificateSecret,proto3" json:"issued_certificate_secret,omitempty"`
// A ref to a PodBounceDirective specifying a list of k8s pods to bounce
// (delete and cause a restart) when the certificate is issued.
// This will include the control plane pods as well as any pods
// which share a data plane with the target mesh.
PodBounceDirective *v1.ObjectRef `protobuf:"bytes,5,opt,name=pod_bounce_directive,json=podBounceDirective,proto3" json:"pod_bounce_directive,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *IssuedCertificateSpec) Reset() { *m = IssuedCertificateSpec{} }
func (m *IssuedCertificateSpec) String() string { return proto.CompactTextString(m) }
func (*IssuedCertificateSpec) ProtoMessage() {}
func (*IssuedCertificateSpec) Descriptor() ([]byte, []int) {
return fileDescriptor_86ade12c22739639, []int{0}
}
func (m *IssuedCertificateSpec) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_IssuedCertificateSpec.Unmarshal(m, b)
}
func (m *IssuedCertificateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_IssuedCertificateSpec.Marshal(b, m, deterministic)
} | func (m *IssuedCertificateSpec) XXX_Merge(src proto.Message) {
xxx_messageInfo_IssuedCertificateSpec.Merge(m, src)
}
func (m *IssuedCertificateSpec) XXX_Size() int {
return xxx_messageInfo_IssuedCertificateSpec.Size(m)
}
func (m *IssuedCertificateSpec) XXX_DiscardUnknown() {
xxx_messageInfo_IssuedCertificateSpec.DiscardUnknown(m)
}
var xxx_messageInfo_IssuedCertificateSpec proto.InternalMessageInfo
func (m *IssuedCertificateSpec) GetHosts() []string {
if m != nil {
return m.Hosts
}
return nil
}
func (m *IssuedCertificateSpec) GetOrg() string {
if m != nil {
return m.Org
}
return ""
}
func (m *IssuedCertificateSpec) GetSigningCertificateSecret() *v1.ObjectRef {
if m != nil {
return m.SigningCertificateSecret
}
return nil
}
func (m *IssuedCertificateSpec) GetIssuedCertificateSecret() *v1.ObjectRef {
if m != nil {
return m.IssuedCertificateSecret
}
return nil
}
func (m *IssuedCertificateSpec) GetPodBounceDirective() *v1.ObjectRef {
if m != nil {
return m.PodBounceDirective
}
return nil
}
// The IssuedCertificate status is written by the CertificateRequesting agent.
type IssuedCertificateStatus struct {
// The most recent generation observed in the the IssuedCertificate metadata.
// If the observedGeneration does not match generation, the Certificate Requesting Agent has not processed the most
// recent version of this IssuedCertificate.
ObservedGeneration int64 `protobuf:"varint,1,opt,name=observed_generation,json=observedGeneration,proto3" json:"observed_generation,omitempty"`
// Any error observed which prevented the CertificateRequest from being processed.
// If the error is empty, the request has been processed successfully.
Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"`
// The current state of the IssuedCertificate workflow, reported by the agent.
State IssuedCertificateStatus_State `protobuf:"varint,3,opt,name=state,proto3,enum=certificates.mesh.gloo.solo.io.IssuedCertificateStatus_State" json:"state,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *IssuedCertificateStatus) Reset() { *m = IssuedCertificateStatus{} }
func (m *IssuedCertificateStatus) String() string { return proto.CompactTextString(m) }
func (*IssuedCertificateStatus) ProtoMessage() {}
func (*IssuedCertificateStatus) Descriptor() ([]byte, []int) {
return fileDescriptor_86ade12c22739639, []int{1}
}
func (m *IssuedCertificateStatus) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_IssuedCertificateStatus.Unmarshal(m, b)
}
func (m *IssuedCertificateStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_IssuedCertificateStatus.Marshal(b, m, deterministic)
}
func (m *IssuedCertificateStatus) XXX_Merge(src proto.Message) {
xxx_messageInfo_IssuedCertificateStatus.Merge(m, src)
}
func (m *IssuedCertificateStatus) XXX_Size() int {
return xxx_messageInfo_IssuedCertificateStatus.Size(m)
}
func (m *IssuedCertificateStatus) XXX_DiscardUnknown() {
xxx_messageInfo_IssuedCertificateStatus.DiscardUnknown(m)
}
var xxx_messageInfo_IssuedCertificateStatus proto.InternalMessageInfo
func (m *IssuedCertificateStatus) GetObservedGeneration() int64 {
if m != nil {
return m.ObservedGeneration
}
return 0
}
func (m *IssuedCertificateStatus) GetError() string {
if m != nil {
return m.Error
}
return ""
}
func (m *IssuedCertificateStatus) GetState() IssuedCertificateStatus_State {
if m != nil {
return m.State
}
return IssuedCertificateStatus_PENDING
}
func init() {
proto.RegisterEnum("certificates.mesh.gloo.solo.io.IssuedCertificateStatus_State", IssuedCertificateStatus_State_name, IssuedCertificateStatus_State_value)
proto.RegisterType((*IssuedCertificateSpec)(nil), "certificates.mesh.gloo.solo.io.IssuedCertificateSpec")
proto.RegisterType((*IssuedCertificateStatus)(nil), "certificates.mesh.gloo.solo.io.IssuedCertificateStatus")
}
func init() {
proto.RegisterFile("github.com/solo-io/gloo-mesh/api/certificates/issued_certificate.proto", fileDescriptor_86ade12c22739639)
}
var fileDescriptor_86ade12c22739639 = []byte{
// 479 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0x5d, 0x6f, 0xd3, 0x30,
0x14, 0x25, 0xed, 0x3a, 0xa8, 0x07, 0x28, 0x32, 0x45, 0x0b, 0x05, 0x95, 0xaa, 0x4f, 0x7d, 0x99,
0xad, 0x95, 0x67, 0x1e, 0x18, 0x49, 0x47, 0xa4, 0xa9, 0x40, 0xc2, 0x24, 0xb4, 0x97, 0x2a, 0x1f,
0xb7, 0xae, 0x59, 0xdb, 0x1b, 0xd9, 0x4e, 0x9f, 0xf9, 0x39, 0xfc, 0x11, 0xfe, 0x08, 0xbf, 0x04,
0xc5, 0x69, 0xa1, 0x22, 0x62, 0xda, 0x53, 0xee, 0xf5, 0xbd, 0xe7, 0xe8, 0x9c, 0x13, 0x9b, 0x4c,
0x85, 0x34, 0xcb, 0x32, 0x65, 0x19, 0xae, 0xb9, 0xc6, 0x15, 0x9e, 0x49, 0xe4, 0x62, 0x85, 0x78,
0xb6, 0x06, 0xbd, 0xe4, 0x49, 0x21, 0x79, 0x06, 0xca, 0xc8, 0x85, 0xcc, 0x12, 0x03, 0x9a, 0x4b,
0xad, 0x4b, | random_line_split |
|
issued_certificate.pb.go | =issuedCertificateSecret,proto3" json:"issued_certificate_secret,omitempty"`
// A ref to a PodBounceDirective specifying a list of k8s pods to bounce
// (delete and cause a restart) when the certificate is issued.
// This will include the control plane pods as well as any pods
// which share a data plane with the target mesh.
PodBounceDirective *v1.ObjectRef `protobuf:"bytes,5,opt,name=pod_bounce_directive,json=podBounceDirective,proto3" json:"pod_bounce_directive,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *IssuedCertificateSpec) Reset() { *m = IssuedCertificateSpec{} }
func (m *IssuedCertificateSpec) String() string { return proto.CompactTextString(m) }
func (*IssuedCertificateSpec) ProtoMessage() {}
func (*IssuedCertificateSpec) Descriptor() ([]byte, []int) {
return fileDescriptor_86ade12c22739639, []int{0}
}
func (m *IssuedCertificateSpec) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_IssuedCertificateSpec.Unmarshal(m, b)
}
func (m *IssuedCertificateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_IssuedCertificateSpec.Marshal(b, m, deterministic)
}
func (m *IssuedCertificateSpec) XXX_Merge(src proto.Message) {
xxx_messageInfo_IssuedCertificateSpec.Merge(m, src)
}
func (m *IssuedCertificateSpec) XXX_Size() int {
return xxx_messageInfo_IssuedCertificateSpec.Size(m)
}
func (m *IssuedCertificateSpec) XXX_DiscardUnknown() {
xxx_messageInfo_IssuedCertificateSpec.DiscardUnknown(m)
}
var xxx_messageInfo_IssuedCertificateSpec proto.InternalMessageInfo
func (m *IssuedCertificateSpec) GetHosts() []string {
if m != nil {
return m.Hosts
}
return nil
}
func (m *IssuedCertificateSpec) GetOrg() string {
if m != nil {
return m.Org
}
return ""
}
func (m *IssuedCertificateSpec) GetSigningCertificateSecret() *v1.ObjectRef {
if m != nil {
return m.SigningCertificateSecret
}
return nil
}
func (m *IssuedCertificateSpec) GetIssuedCertificateSecret() *v1.ObjectRef {
if m != nil {
return m.IssuedCertificateSecret
}
return nil
}
func (m *IssuedCertificateSpec) GetPodBounceDirective() *v1.ObjectRef {
if m != nil {
return m.PodBounceDirective
}
return nil
}
// The IssuedCertificate status is written by the CertificateRequesting agent.
type IssuedCertificateStatus struct {
// The most recent generation observed in the the IssuedCertificate metadata.
// If the observedGeneration does not match generation, the Certificate Requesting Agent has not processed the most
// recent version of this IssuedCertificate.
ObservedGeneration int64 `protobuf:"varint,1,opt,name=observed_generation,json=observedGeneration,proto3" json:"observed_generation,omitempty"`
// Any error observed which prevented the CertificateRequest from being processed.
// If the error is empty, the request has been processed successfully.
Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"`
// The current state of the IssuedCertificate workflow, reported by the agent.
State IssuedCertificateStatus_State `protobuf:"varint,3,opt,name=state,proto3,enum=certificates.mesh.gloo.solo.io.IssuedCertificateStatus_State" json:"state,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *IssuedCertificateStatus) Reset() { *m = IssuedCertificateStatus{} }
func (m *IssuedCertificateStatus) String() string { return proto.CompactTextString(m) }
func (*IssuedCertificateStatus) ProtoMessage() {}
func (*IssuedCertificateStatus) Descriptor() ([]byte, []int) {
return fileDescriptor_86ade12c22739639, []int{1}
}
func (m *IssuedCertificateStatus) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_IssuedCertificateStatus.Unmarshal(m, b)
}
func (m *IssuedCertificateStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_IssuedCertificateStatus.Marshal(b, m, deterministic)
}
func (m *IssuedCertificateStatus) XXX_Merge(src proto.Message) {
xxx_messageInfo_IssuedCertificateStatus.Merge(m, src)
}
func (m *IssuedCertificateStatus) XXX_Size() int {
return xxx_messageInfo_IssuedCertificateStatus.Size(m)
}
func (m *IssuedCertificateStatus) XXX_DiscardUnknown() {
xxx_messageInfo_IssuedCertificateStatus.DiscardUnknown(m)
}
var xxx_messageInfo_IssuedCertificateStatus proto.InternalMessageInfo
func (m *IssuedCertificateStatus) GetObservedGeneration() int64 {
if m != nil {
return m.ObservedGeneration
}
return 0
}
func (m *IssuedCertificateStatus) GetError() string {
if m != nil {
return m.Error
}
return ""
}
func (m *IssuedCertificateStatus) GetState() IssuedCertificateStatus_State {
if m != nil {
return m.State
}
return IssuedCertificateStatus_PENDING
}
func init() |
func init() {
proto.RegisterFile("github.com/solo-io/gloo-mesh/api/certificates/issued_certificate.proto", fileDescriptor_86ade12c22739639)
}
var fileDescriptor_86ade12c22739639 = []byte{
// 479 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0x5d, 0x6f, 0xd3, 0x30,
0x14, 0x25, 0xed, 0x3a, 0xa8, 0x07, 0x28, 0x32, 0x45, 0x0b, 0x05, 0x95, 0xaa, 0x4f, 0x7d, 0x99,
0xad, 0x95, 0x67, 0x1e, 0x18, 0x49, 0x47, 0xa4, 0xa9, 0x40, 0xc2, 0x24, 0xb4, 0x97, 0x2a, 0x1f,
0xb7, 0xae, 0x59, 0xdb, 0x1b, 0xd9, 0x4e, 0x9f, 0xf9, 0x39, 0xfc, 0x11, 0xfe, 0x08, 0xbf, 0x04,
0xc5, 0x69, 0xa1, 0x22, 0x62, 0xda, 0x53, 0xee, 0xf5, 0xbd, 0xe7, 0xe8, 0x9c, 0x13, 0x9b, 0x4c,
0x85, 0x34, 0xcb, 0x32, 0x65, 0x19, 0xae, 0xb9, 0xc6, 0x15, 0x9e, 0x49, 0xe4, 0x62, 0x85, 0x78,
0xb6, 0x06, 0xbd, 0xe4, 0x49, 0x21, 0x79, 0x06, 0xca, 0xc8, 0x85, 0xcc, 0x12, 0x03, 0x9a, 0x4b,
0xad, 0x4b | {
proto.RegisterEnum("certificates.mesh.gloo.solo.io.IssuedCertificateStatus_State", IssuedCertificateStatus_State_name, IssuedCertificateStatus_State_value)
proto.RegisterType((*IssuedCertificateSpec)(nil), "certificates.mesh.gloo.solo.io.IssuedCertificateSpec")
proto.RegisterType((*IssuedCertificateStatus)(nil), "certificates.mesh.gloo.solo.io.IssuedCertificateStatus")
} | identifier_body |
issued_certificate.pb.go | =issuedCertificateSecret,proto3" json:"issued_certificate_secret,omitempty"`
// A ref to a PodBounceDirective specifying a list of k8s pods to bounce
// (delete and cause a restart) when the certificate is issued.
// This will include the control plane pods as well as any pods
// which share a data plane with the target mesh.
PodBounceDirective *v1.ObjectRef `protobuf:"bytes,5,opt,name=pod_bounce_directive,json=podBounceDirective,proto3" json:"pod_bounce_directive,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *IssuedCertificateSpec) Reset() { *m = IssuedCertificateSpec{} }
func (m *IssuedCertificateSpec) String() string { return proto.CompactTextString(m) }
func (*IssuedCertificateSpec) ProtoMessage() {}
func (*IssuedCertificateSpec) Descriptor() ([]byte, []int) {
return fileDescriptor_86ade12c22739639, []int{0}
}
func (m *IssuedCertificateSpec) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_IssuedCertificateSpec.Unmarshal(m, b)
}
func (m *IssuedCertificateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_IssuedCertificateSpec.Marshal(b, m, deterministic)
}
func (m *IssuedCertificateSpec) XXX_Merge(src proto.Message) {
xxx_messageInfo_IssuedCertificateSpec.Merge(m, src)
}
func (m *IssuedCertificateSpec) XXX_Size() int {
return xxx_messageInfo_IssuedCertificateSpec.Size(m)
}
func (m *IssuedCertificateSpec) XXX_DiscardUnknown() {
xxx_messageInfo_IssuedCertificateSpec.DiscardUnknown(m)
}
var xxx_messageInfo_IssuedCertificateSpec proto.InternalMessageInfo
func (m *IssuedCertificateSpec) GetHosts() []string {
if m != nil {
return m.Hosts
}
return nil
}
func (m *IssuedCertificateSpec) | () string {
if m != nil {
return m.Org
}
return ""
}
func (m *IssuedCertificateSpec) GetSigningCertificateSecret() *v1.ObjectRef {
if m != nil {
return m.SigningCertificateSecret
}
return nil
}
func (m *IssuedCertificateSpec) GetIssuedCertificateSecret() *v1.ObjectRef {
if m != nil {
return m.IssuedCertificateSecret
}
return nil
}
func (m *IssuedCertificateSpec) GetPodBounceDirective() *v1.ObjectRef {
if m != nil {
return m.PodBounceDirective
}
return nil
}
// The IssuedCertificate status is written by the CertificateRequesting agent.
type IssuedCertificateStatus struct {
// The most recent generation observed in the the IssuedCertificate metadata.
// If the observedGeneration does not match generation, the Certificate Requesting Agent has not processed the most
// recent version of this IssuedCertificate.
ObservedGeneration int64 `protobuf:"varint,1,opt,name=observed_generation,json=observedGeneration,proto3" json:"observed_generation,omitempty"`
// Any error observed which prevented the CertificateRequest from being processed.
// If the error is empty, the request has been processed successfully.
Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"`
// The current state of the IssuedCertificate workflow, reported by the agent.
State IssuedCertificateStatus_State `protobuf:"varint,3,opt,name=state,proto3,enum=certificates.mesh.gloo.solo.io.IssuedCertificateStatus_State" json:"state,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *IssuedCertificateStatus) Reset() { *m = IssuedCertificateStatus{} }
func (m *IssuedCertificateStatus) String() string { return proto.CompactTextString(m) }
func (*IssuedCertificateStatus) ProtoMessage() {}
func (*IssuedCertificateStatus) Descriptor() ([]byte, []int) {
return fileDescriptor_86ade12c22739639, []int{1}
}
func (m *IssuedCertificateStatus) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_IssuedCertificateStatus.Unmarshal(m, b)
}
func (m *IssuedCertificateStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_IssuedCertificateStatus.Marshal(b, m, deterministic)
}
func (m *IssuedCertificateStatus) XXX_Merge(src proto.Message) {
xxx_messageInfo_IssuedCertificateStatus.Merge(m, src)
}
func (m *IssuedCertificateStatus) XXX_Size() int {
return xxx_messageInfo_IssuedCertificateStatus.Size(m)
}
func (m *IssuedCertificateStatus) XXX_DiscardUnknown() {
xxx_messageInfo_IssuedCertificateStatus.DiscardUnknown(m)
}
var xxx_messageInfo_IssuedCertificateStatus proto.InternalMessageInfo
func (m *IssuedCertificateStatus) GetObservedGeneration() int64 {
if m != nil {
return m.ObservedGeneration
}
return 0
}
func (m *IssuedCertificateStatus) GetError() string {
if m != nil {
return m.Error
}
return ""
}
func (m *IssuedCertificateStatus) GetState() IssuedCertificateStatus_State {
if m != nil {
return m.State
}
return IssuedCertificateStatus_PENDING
}
func init() {
proto.RegisterEnum("certificates.mesh.gloo.solo.io.IssuedCertificateStatus_State", IssuedCertificateStatus_State_name, IssuedCertificateStatus_State_value)
proto.RegisterType((*IssuedCertificateSpec)(nil), "certificates.mesh.gloo.solo.io.IssuedCertificateSpec")
proto.RegisterType((*IssuedCertificateStatus)(nil), "certificates.mesh.gloo.solo.io.IssuedCertificateStatus")
}
func init() {
proto.RegisterFile("github.com/solo-io/gloo-mesh/api/certificates/issued_certificate.proto", fileDescriptor_86ade12c22739639)
}
var fileDescriptor_86ade12c22739639 = []byte{
// 479 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0x5d, 0x6f, 0xd3, 0x30,
0x14, 0x25, 0xed, 0x3a, 0xa8, 0x07, 0x28, 0x32, 0x45, 0x0b, 0x05, 0x95, 0xaa, 0x4f, 0x7d, 0x99,
0xad, 0x95, 0x67, 0x1e, 0x18, 0x49, 0x47, 0xa4, 0xa9, 0x40, 0xc2, 0x24, 0xb4, 0x97, 0x2a, 0x1f,
0xb7, 0xae, 0x59, 0xdb, 0x1b, 0xd9, 0x4e, 0x9f, 0xf9, 0x39, 0xfc, 0x11, 0xfe, 0x08, 0xbf, 0x04,
0xc5, 0x69, 0xa1, 0x22, 0x62, 0xda, 0x53, 0xee, 0xf5, 0xbd, 0xe7, 0xe8, 0x9c, 0x13, 0x9b, 0x4c,
0x85, 0x34, 0xcb, 0x32, 0x65, 0x19, 0xae, 0xb9, 0xc6, 0x15, 0x9e, 0x49, 0xe4, 0x62, 0x85, 0x78,
0xb6, 0x06, 0xbd, 0xe4, 0x49, 0x21, 0x79, 0x06, 0xca, 0xc8, 0x85, 0xcc, 0x12, 0x03, 0x9a, 0x4b,
0xad, 0x4b, | GetOrg | identifier_name |
issued_certificate.pb.go | 0xef, 0x09, 0x14, 0x68, 0x4b, 0x5e, 0x55, 0xf5, 0xe9, 0xe8, 0x67, 0x8b, 0x3c, 0x0f, 0xad,
0xe0, 0xf7, 0x7f, 0xc5, 0xc5, 0x05, 0x64, 0xb4, 0x47, 0x3a, 0x4b, 0xd4, 0x46, 0x7b, 0xce, 0xb0,
0x3d, 0xee, 0x46, 0x75, 0x43, 0x5d, 0xd2, 0x46, 0x25, 0xbc, 0xd6, 0xd0, 0x19, 0x77, 0xa3, 0xaa,
0xa4, 0x37, 0xa4, 0xaf, 0xa5, 0xd8, 0xc8, 0x8d, 0x38, 0xb4, 0x3c, 0xd7, 0x90, 0x29, 0x30, 0x5e,
0x7b, 0xe8, 0x8c, 0x4f, 0x26, 0xaf, 0x98, 0x75, 0x52, 0xf9, 0xdb, 0xbb, 0x65, 0x1f, 0xd3, 0x6f,
0x90, 0x99, 0x08, 0x16, 0x91, 0xb7, 0xc3, 0x1f, 0x2a, 0xb0, 0x68, 0xfa, 0x95, 0xbc, 0x68, 0xa6,
0xb9, 0xa7, 0x3e, 0xba, 0x07, 0xf5, 0xa9, 0x6c, 0x78, 0xab, 0x99, 0x67, 0xa4, 0x57, 0x60, 0x3e,
0x4f, 0xb1, 0xdc, 0x64, 0x30, 0xcf, 0xa5, 0x82, 0xcc, 0xc8, 0x2d, 0x78, 0x9d, 0x7b, 0x90, 0xd2,
0x02, 0xf3, 0x0b, 0x0b, 0xf4, 0xf7, 0xb8, 0xd1, 0xf7, 0x16, 0x39, 0x6d, 0xe6, 0x68, 0x12, 0x53,
0x6a, 0xca, 0xc9, 0x33, 0x4c, 0x35, 0xa8, 0x2d, 0xe4, 0x73, 0x01, 0x1b, 0xa8, 0x7f, 0x8b, 0xe7,
0x0c, 0x9d, 0x71, 0x3b, 0xa2, 0xfb, 0xd1, 0xe5, 0x9f, 0x49, 0x15, 0x3d, 0x28, 0x85, 0x6a, 0x17,
0x73, 0xdd, 0xd0, 0x98, 0x74, 0xb4, 0x49, 0x0c, 0xd8, 0x4c, 0x9f, 0x4e, 0xde, 0xb2, 0xbb, 0xaf,
0x13, 0xfb, 0x8f, 0x1c, 0x56, 0x7d, 0x20, 0xaa, 0xb9, 0x46, 0x21, 0xe9, 0xd8, 0x9e, 0x9e, 0x90,
0x87, 0x9f, 0x82, 0x99, 0x1f, 0xce, 0x2e, 0xdd, 0x07, 0xf4, 0x09, 0xe9, 0x46, 0xc1, 0xe7, 0xeb,
0x20, 0xfe, 0x12, 0xf8, 0xae, 0x43, 0x09, 0x39, 0x0e, 0xe3, 0xf8, 0x3a, 0xf0, 0xdd, 0x16, 0x7d,
0x4c, 0x1e, 0x4d, 0xc3, 0x59, 0x18, 0x7f, 0x08, 0x7c, 0xb7, 0x5d, 0x4d, 0xa6, 0xef, 0xc2, 0xab,
0xc0, 0x77, 0x8f, 0x2e, 0xa2, 0x1f, 0xbf, 0x06, 0xce, 0xcd, 0xd5, 0x9d, 0x8f, 0xa5, 0xb8, 0x15,
0x8d, 0x07, 0xd3, 0x94, 0xce, 0xb7, 0xe7, 0xc9, 0xaa, 0x58, 0x26, 0x93, 0xf4, 0xd8, 0xde, 0xd2,
0x37, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x0f, 0x1b, 0x55, 0x1f, 0x83, 0x03, 0x00, 0x00,
}
func (this *IssuedCertificateSpec) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
that1, ok := that.(*IssuedCertificateSpec)
if !ok {
that2, ok := that.(IssuedCertificateSpec)
if ok {
that1 = &that2
} else {
return false
}
}
if that1 == nil {
return this == nil
} else if this == nil {
return false
}
if len(this.Hosts) != len(that1.Hosts) {
return false
}
for i := range this.Hosts {
if this.Hosts[i] != that1.Hosts[i] {
return false
}
}
if this.Org != that1.Org {
return false
}
if !this.SigningCertificateSecret.Equal(that1.SigningCertificateSecret) {
return false
}
if !this.IssuedCertificateSecret.Equal(that1.IssuedCertificateSecret) {
return false
}
if !this.PodBounceDirective.Equal(that1.PodBounceDirective) {
return false
}
if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) {
return false
}
return true
}
func (this *IssuedCertificateStatus) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
that1, ok := that.(*IssuedCertificateStatus)
if !ok {
that2, ok := that.(IssuedCertificateStatus)
if ok {
that1 = &that2
} else {
return false
}
}
if that1 == nil {
return this == nil
} else if this == nil {
return false
}
if this.ObservedGeneration != that1.ObservedGeneration | {
return false
} | conditional_block |
|
mprocessing.py | function to run function in multiprocessing mode
"""
# -----------------------------------------------------------------------------
# Import section for Python 2 and 3 compatible code
# from __future__ import absolute_import, division, print_function,
# unicode_literals
from __future__ import division # This way: 3 / 2 == 1.5; 3 // 2 == 1
# -----------------------------------------------------------------------------
# Import section
#
import logging
import multiprocessing
from itertools import islice
import lib.NicePrint as NicePrint
# =========================================================================
# Functions aliases
#
# NPR.NicePrint = from NicePrint module
# -------------------------------------------------------------------------
NPR = NicePrint.NicePrint()
# -------------------------------------------------------------------------
# use_lock
#
# Control use of DB lock. acquire/release
#
def use_lock(adb_lock, operation, nprocs=0):
""" use_lock
adb_lock = lock to be used
operation = True => Lock
= False => Release
nprocs = >0 when in multiprocessing mode
>>> alock = multiprocessing.Lock()
>>> use_lock(alock, True, 2)
True
>>> use_lock(alock, False, 2)
True
"""
use_dblock_return = False
logging.debug('Entering use_lock with operation:[%s].', operation)
if adb_lock is None:
logging.debug('use_lock: adb_lock is [None].')
return use_dblock_return
logging.debug('use_lock: adb_lock.semlock:[%s].', adb_lock._semlock)
if operation is None:
return use_dblock_return
if (nprocs is not None) and (nprocs) and (nprocs > 0):
if operation:
# Control for when running multiprocessing set locking
|
else:
# Control for when running multiprocessing release locking
logging.debug('===Multiprocessing=== <--[ ].lock.release')
try:
adb_lock.release()
use_dblock_return = True
except Exception:
NPR.niceerror(caught=True,
caughtprefix='+++ ',
caughtcode='003',
caughtmsg='Caught an exception lock.release',
useniceprint=True,
exceptsysinfo=True)
# Raise aborts execution
raise
logging.info('===Multiprocessing=== <--[v].lock.release')
logging.info('Exiting use_lock with operation:[%s]. Result:[%s]',
operation, use_dblock_return)
else:
use_dblock_return = True
logging.warning('(No multiprocessing. Nothing to do) '
'Exiting use_lock with operation:[%s]. Result:[%s]',
operation, use_dblock_return)
return use_dblock_return
# -----------------------------------------------------------------------------
# mprocessing
#
def mprocessing(nprocs, lockdb, running, mutex, itemslist, a_fn, cur):
""" mprocessing Function
nprocs = Number of processes to launch (int)
lockdb = lock for access to Database (lock obj to be created)
running = Value to count processed items (count obj to be created)
mutex = mutex for access to value running (obj to be created)
itemslist = list of items to be processed
a_fn = a function which is the target of the multiprocessing
a_fn must cater the following arguments
lockdb
running
mutex
splititemslist = partial splitted list
count_total = len(itemslist)
cur
cur = cursor variable for DB access
"""
# proc_pool = Local variable proc_pool for Pool of processes
# log_level = log_level
# count_total = Total counter of items to distribute/play/indicate progress
# len(itemslist)
log_level = logging.getLogger().getEffectiveLevel()
logging.info('===mprocessing [%s] target_fn():[%s] nprocs:[%s]',
__name__, a_fn.__name__, nprocs)
# if log_level <= logging.WARNING:
# if args is not None:
# for i, arg in enumerate(args):
# logging.info('===mprocessing f():[%s] arg[%s]={%s}',
# a_fn.__name__, i, arg)
# if __name__ == '__main__':
logging.debug('===Multiprocessing=== Setting up logger!')
# CODING No need for such low level debugging to stderr
# multiprocessing.log_to_stderr()
logger = multiprocessing.get_logger()
logger.setLevel(log_level)
logging.debug('===Multiprocessing=== Logging defined!')
# ---------------------------------------------------------
# chunk
#
# Divides an iterable in slices/chunks of size size
#
def chunk(iter_list, size):
"""
Divides an iterable in slices/chunks of size size
>>> for a in chunk([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 3):
... len(a)
3
3
3
1
"""
iter_list = iter(iter_list)
# lambda: creates a returning expression function
# which returns slices
# iter, with the second argument () stops creating
# iterators when it reaches the end
return iter(lambda: tuple(islice(iter_list, size)), ())
proc_pool = []
lockdb = multiprocessing.Lock()
running = multiprocessing.Value('i', 0)
mutex = multiprocessing.Lock()
count_total = len(itemslist)
size = (len(itemslist) // int(nprocs)) \
if ((len(itemslist) // int(nprocs)) > 0) \
else 1
logging.debug('len(itemslist):[%s] int(nprocs):[%s] size per process:[%s]',
len(itemslist), int(nprocs), size)
# Split itemslist in chunks to distribute accross Processes
for splititemslist in chunk(itemslist, size):
logging.warning('===Actual/Planned Chunk size: [%s]/[%s]',
len(splititemslist), size)
logging.debug('===type(splititemslist)=[%s]', type(splititemslist))
logging.debug('===Job/Task Process: Creating...')
proc_task = multiprocessing.Process(
target=a_fn, # argument function
args=(lockdb,
running,
mutex,
splititemslist,
count_total,
cur,))
proc_pool.append(proc_task)
logging.debug('===Job/Task Process: Starting...')
proc_task.start()
NPR.niceprint('===Job/Task Process: [{!s}] Started '
'with pid:[{!s}]'
.format(proc_task.name,
proc_task.pid),
verbosity=3,
logalso=logging.DEBUG)
# Check status of jobs/tasks in the Process Pool
if log_level <= logging.DEBUG:
NPR.niceprint('===Checking Processes launched/status:',
verbosity=3, logalso=logging.DEBUG)
for j in proc_pool:
NPR.niceprint('{!s}.is_alive = {!s}'.format(j.name, j.is_alive()),
verbosity=3, logalso=logging.DEBUG)
# Regularly print status of jobs/tasks in the Process Pool
# Prints status while there are processes active
# Exits when all jobs/tasks are done.
while True:
if not any(multiprocessing.active_children()):
logging.debug('===No active children Processes.')
break
for prc in multiprocessing.active_children():
logging.debug('===%s.is_alive = %s', prc.name, prc.is_alive())
proc_task_active = prc
NPR.niceprint('===Will wait for 60 on {!s}.is_alive = {!s}'
.format(proc_task_active.name,
proc_task_active.is_alive()),
verbosity=3, logalso=logging.INFO)
proc_task_active.join(timeout=60)
NPR.niceprint('===Waited for 60s on '
'{!s}.is_alive = {!s}'
.format(proc_task_active.name,
proc_task_active.is_alive()),
verbosity=3, logalso=logging.INFO)
# Wait for join all jobs/tasks in the Process Pool
# All should be done by now!
for j in proc_pool:
j.join()
NPR.niceprint('==={!s} (is alive: {!s}).exitcode = {!s}'
.format(j.name, j.is_alive(), j.exitcode),
verbosity=2)
logging.warning('===Multiprocessing=== pool joined! '
'All processes finished.')
# Will release (set to None) the lockdb lock control
# this prevents subsequent calls to
# use_lock( nuLockDB, False)
# to raise exception:
# ValueError('semaphore or lock released too many times')
logging.info('===Multiprocessing=== pool joined! '
'Is lockdb None? [%s]. Setting lockdb to None anyhow.',
lockdb is None)
lockdb = None
# Show number of total files processed
NPR.niceprocessedfiles(running.value, count_total, | logging.debug('===Multiprocessing=== -->[ ].lock.acquire')
try:
if adb_lock.acquire():
use_dblock_return = True
except Exception:
NPR.niceerror(caught=True,
caughtprefix='+++ ',
caughtcode='002',
caughtmsg='Caught an exception lock.acquire',
useniceprint=True,
exceptsysinfo=True)
raise
logging.info('===Multiprocessing=== --->[v].lock.acquire') | conditional_block |
mprocessing.py | Helper function to run function in multiprocessing mode
"""
# -----------------------------------------------------------------------------
# Import section for Python 2 and 3 compatible code
# from __future__ import absolute_import, division, print_function,
# unicode_literals
from __future__ import division # This way: 3 / 2 == 1.5; 3 // 2 == 1
# -----------------------------------------------------------------------------
# Import section
#
import logging
import multiprocessing
from itertools import islice
import lib.NicePrint as NicePrint
# =========================================================================
# Functions aliases
#
# NPR.NicePrint = from NicePrint module
# -------------------------------------------------------------------------
NPR = NicePrint.NicePrint()
# -------------------------------------------------------------------------
# use_lock
#
# Control use of DB lock. acquire/release
#
def use_lock(adb_lock, operation, nprocs=0):
| return use_dblock_return
logging.debug('use_lock: adb_lock.semlock:[%s].', adb_lock._semlock)
if operation is None:
return use_dblock_return
if (nprocs is not None) and (nprocs) and (nprocs > 0):
if operation:
# Control for when running multiprocessing set locking
logging.debug('===Multiprocessing=== -->[ ].lock.acquire')
try:
if adb_lock.acquire():
use_dblock_return = True
except Exception:
NPR.niceerror(caught=True,
caughtprefix='+++ ',
caughtcode='002',
caughtmsg='Caught an exception lock.acquire',
useniceprint=True,
exceptsysinfo=True)
raise
logging.info('===Multiprocessing=== --->[v].lock.acquire')
else:
# Control for when running multiprocessing release locking
logging.debug('===Multiprocessing=== <--[ ].lock.release')
try:
adb_lock.release()
use_dblock_return = True
except Exception:
NPR.niceerror(caught=True,
caughtprefix='+++ ',
caughtcode='003',
caughtmsg='Caught an exception lock.release',
useniceprint=True,
exceptsysinfo=True)
# Raise aborts execution
raise
logging.info('===Multiprocessing=== <--[v].lock.release')
logging.info('Exiting use_lock with operation:[%s]. Result:[%s]',
operation, use_dblock_return)
else:
use_dblock_return = True
logging.warning('(No multiprocessing. Nothing to do) '
'Exiting use_lock with operation:[%s]. Result:[%s]',
operation, use_dblock_return)
return use_dblock_return
# -----------------------------------------------------------------------------
# mprocessing
#
def mprocessing(nprocs, lockdb, running, mutex, itemslist, a_fn, cur):
""" mprocessing Function
nprocs = Number of processes to launch (int)
lockdb = lock for access to Database (lock obj to be created)
running = Value to count processed items (count obj to be created)
mutex = mutex for access to value running (obj to be created)
itemslist = list of items to be processed
a_fn = a function which is the target of the multiprocessing
a_fn must cater the following arguments
lockdb
running
mutex
splititemslist = partial splitted list
count_total = len(itemslist)
cur
cur = cursor variable for DB access
"""
# proc_pool = Local variable proc_pool for Pool of processes
# log_level = log_level
# count_total = Total counter of items to distribute/play/indicate progress
# len(itemslist)
log_level = logging.getLogger().getEffectiveLevel()
logging.info('===mprocessing [%s] target_fn():[%s] nprocs:[%s]',
__name__, a_fn.__name__, nprocs)
# if log_level <= logging.WARNING:
# if args is not None:
# for i, arg in enumerate(args):
# logging.info('===mprocessing f():[%s] arg[%s]={%s}',
# a_fn.__name__, i, arg)
# if __name__ == '__main__':
logging.debug('===Multiprocessing=== Setting up logger!')
# CODING No need for such low level debugging to stderr
# multiprocessing.log_to_stderr()
logger = multiprocessing.get_logger()
logger.setLevel(log_level)
logging.debug('===Multiprocessing=== Logging defined!')
# ---------------------------------------------------------
# chunk
#
# Divides an iterable in slices/chunks of size size
#
def chunk(iter_list, size):
"""
Divides an iterable in slices/chunks of size size
>>> for a in chunk([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 3):
... len(a)
3
3
3
1
"""
iter_list = iter(iter_list)
# lambda: creates a returning expression function
# which returns slices
# iter, with the second argument () stops creating
# iterators when it reaches the end
return iter(lambda: tuple(islice(iter_list, size)), ())
proc_pool = []
lockdb = multiprocessing.Lock()
running = multiprocessing.Value('i', 0)
mutex = multiprocessing.Lock()
count_total = len(itemslist)
size = (len(itemslist) // int(nprocs)) \
if ((len(itemslist) // int(nprocs)) > 0) \
else 1
logging.debug('len(itemslist):[%s] int(nprocs):[%s] size per process:[%s]',
len(itemslist), int(nprocs), size)
# Split itemslist in chunks to distribute accross Processes
for splititemslist in chunk(itemslist, size):
logging.warning('===Actual/Planned Chunk size: [%s]/[%s]',
len(splititemslist), size)
logging.debug('===type(splititemslist)=[%s]', type(splititemslist))
logging.debug('===Job/Task Process: Creating...')
proc_task = multiprocessing.Process(
target=a_fn, # argument function
args=(lockdb,
running,
mutex,
splititemslist,
count_total,
cur,))
proc_pool.append(proc_task)
logging.debug('===Job/Task Process: Starting...')
proc_task.start()
NPR.niceprint('===Job/Task Process: [{!s}] Started '
'with pid:[{!s}]'
.format(proc_task.name,
proc_task.pid),
verbosity=3,
logalso=logging.DEBUG)
# Check status of jobs/tasks in the Process Pool
if log_level <= logging.DEBUG:
NPR.niceprint('===Checking Processes launched/status:',
verbosity=3, logalso=logging.DEBUG)
for j in proc_pool:
NPR.niceprint('{!s}.is_alive = {!s}'.format(j.name, j.is_alive()),
verbosity=3, logalso=logging.DEBUG)
# Regularly print status of jobs/tasks in the Process Pool
# Prints status while there are processes active
# Exits when all jobs/tasks are done.
while True:
if not any(multiprocessing.active_children()):
logging.debug('===No active children Processes.')
break
for prc in multiprocessing.active_children():
logging.debug('===%s.is_alive = %s', prc.name, prc.is_alive())
proc_task_active = prc
NPR.niceprint('===Will wait for 60 on {!s}.is_alive = {!s}'
.format(proc_task_active.name,
proc_task_active.is_alive()),
verbosity=3, logalso=logging.INFO)
proc_task_active.join(timeout=60)
NPR.niceprint('===Waited for 60s on '
'{!s}.is_alive = {!s}'
.format(proc_task_active.name,
proc_task_active.is_alive()),
verbosity=3, logalso=logging.INFO)
# Wait for join all jobs/tasks in the Process Pool
# All should be done by now!
for j in proc_pool:
j.join()
NPR.niceprint('==={!s} (is alive: {!s}).exitcode = {!s}'
.format(j.name, j.is_alive(), j.exitcode),
verbosity=2)
logging.warning('===Multiprocessing=== pool joined! '
'All processes finished.')
# Will release (set to None) the lockdb lock control
# this prevents subsequent calls to
# use_lock( nuLockDB, False)
# to raise exception:
# ValueError('semaphore or lock released too many times')
logging.info('===Multiprocessing=== pool joined! '
'Is lockdb None? [%s]. Setting lockdb to None anyhow.',
lockdb is None)
lockdb = None
# Show number of total files processed
NPR.niceprocessedfiles(running.value, count_total, | """ use_lock
adb_lock = lock to be used
operation = True => Lock
= False => Release
nprocs = >0 when in multiprocessing mode
>>> alock = multiprocessing.Lock()
>>> use_lock(alock, True, 2)
True
>>> use_lock(alock, False, 2)
True
"""
use_dblock_return = False
logging.debug('Entering use_lock with operation:[%s].', operation)
if adb_lock is None:
logging.debug('use_lock: adb_lock is [None].') | identifier_body |
mprocessing.py | function to run function in multiprocessing mode
"""
# -----------------------------------------------------------------------------
# Import section for Python 2 and 3 compatible code
# from __future__ import absolute_import, division, print_function,
# unicode_literals
from __future__ import division # This way: 3 / 2 == 1.5; 3 // 2 == 1
# -----------------------------------------------------------------------------
# Import section
#
import logging
import multiprocessing
from itertools import islice
import lib.NicePrint as NicePrint
# =========================================================================
# Functions aliases
#
# NPR.NicePrint = from NicePrint module
# -------------------------------------------------------------------------
NPR = NicePrint.NicePrint()
# -------------------------------------------------------------------------
# use_lock
#
# Control use of DB lock. acquire/release
#
def use_lock(adb_lock, operation, nprocs=0):
""" use_lock
adb_lock = lock to be used
operation = True => Lock
= False => Release
nprocs = >0 when in multiprocessing mode
>>> alock = multiprocessing.Lock()
>>> use_lock(alock, True, 2)
True
>>> use_lock(alock, False, 2)
True
"""
use_dblock_return = False
logging.debug('Entering use_lock with operation:[%s].', operation)
if adb_lock is None:
logging.debug('use_lock: adb_lock is [None].')
return use_dblock_return
logging.debug('use_lock: adb_lock.semlock:[%s].', adb_lock._semlock)
if operation is None:
return use_dblock_return
if (nprocs is not None) and (nprocs) and (nprocs > 0):
if operation:
# Control for when running multiprocessing set locking
logging.debug('===Multiprocessing=== -->[ ].lock.acquire')
try:
if adb_lock.acquire():
use_dblock_return = True
except Exception:
NPR.niceerror(caught=True,
caughtprefix='+++ ',
caughtcode='002',
caughtmsg='Caught an exception lock.acquire',
useniceprint=True,
exceptsysinfo=True)
raise
logging.info('===Multiprocessing=== --->[v].lock.acquire')
else:
# Control for when running multiprocessing release locking
logging.debug('===Multiprocessing=== <--[ ].lock.release')
try:
adb_lock.release()
use_dblock_return = True
except Exception:
NPR.niceerror(caught=True,
caughtprefix='+++ ',
caughtcode='003',
caughtmsg='Caught an exception lock.release',
useniceprint=True,
exceptsysinfo=True)
# Raise aborts execution
raise
logging.info('===Multiprocessing=== <--[v].lock.release')
logging.info('Exiting use_lock with operation:[%s]. Result:[%s]',
operation, use_dblock_return)
else:
use_dblock_return = True
logging.warning('(No multiprocessing. Nothing to do) '
'Exiting use_lock with operation:[%s]. Result:[%s]',
operation, use_dblock_return)
return use_dblock_return
# -----------------------------------------------------------------------------
# mprocessing
#
def mprocessing(nprocs, lockdb, running, mutex, itemslist, a_fn, cur):
""" mprocessing Function
nprocs = Number of processes to launch (int)
lockdb = lock for access to Database (lock obj to be created)
running = Value to count processed items (count obj to be created)
mutex = mutex for access to value running (obj to be created)
itemslist = list of items to be processed
a_fn = a function which is the target of the multiprocessing
a_fn must cater the following arguments
lockdb
running
mutex
splititemslist = partial splitted list
count_total = len(itemslist)
cur
cur = cursor variable for DB access
"""
# proc_pool = Local variable proc_pool for Pool of processes
# log_level = log_level
# count_total = Total counter of items to distribute/play/indicate progress
# len(itemslist)
log_level = logging.getLogger().getEffectiveLevel()
logging.info('===mprocessing [%s] target_fn():[%s] nprocs:[%s]',
__name__, a_fn.__name__, nprocs)
# if log_level <= logging.WARNING:
# if args is not None:
# for i, arg in enumerate(args):
# logging.info('===mprocessing f():[%s] arg[%s]={%s}',
# a_fn.__name__, i, arg)
# if __name__ == '__main__':
logging.debug('===Multiprocessing=== Setting up logger!') | logger = multiprocessing.get_logger()
logger.setLevel(log_level)
logging.debug('===Multiprocessing=== Logging defined!')
# ---------------------------------------------------------
# chunk
#
# Divides an iterable in slices/chunks of size size
#
def chunk(iter_list, size):
"""
Divides an iterable in slices/chunks of size size
>>> for a in chunk([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 3):
... len(a)
3
3
3
1
"""
iter_list = iter(iter_list)
# lambda: creates a returning expression function
# which returns slices
# iter, with the second argument () stops creating
# iterators when it reaches the end
return iter(lambda: tuple(islice(iter_list, size)), ())
proc_pool = []
lockdb = multiprocessing.Lock()
running = multiprocessing.Value('i', 0)
mutex = multiprocessing.Lock()
count_total = len(itemslist)
size = (len(itemslist) // int(nprocs)) \
if ((len(itemslist) // int(nprocs)) > 0) \
else 1
logging.debug('len(itemslist):[%s] int(nprocs):[%s] size per process:[%s]',
len(itemslist), int(nprocs), size)
# Split itemslist in chunks to distribute accross Processes
for splititemslist in chunk(itemslist, size):
logging.warning('===Actual/Planned Chunk size: [%s]/[%s]',
len(splititemslist), size)
logging.debug('===type(splititemslist)=[%s]', type(splititemslist))
logging.debug('===Job/Task Process: Creating...')
proc_task = multiprocessing.Process(
target=a_fn, # argument function
args=(lockdb,
running,
mutex,
splititemslist,
count_total,
cur,))
proc_pool.append(proc_task)
logging.debug('===Job/Task Process: Starting...')
proc_task.start()
NPR.niceprint('===Job/Task Process: [{!s}] Started '
'with pid:[{!s}]'
.format(proc_task.name,
proc_task.pid),
verbosity=3,
logalso=logging.DEBUG)
# Check status of jobs/tasks in the Process Pool
if log_level <= logging.DEBUG:
NPR.niceprint('===Checking Processes launched/status:',
verbosity=3, logalso=logging.DEBUG)
for j in proc_pool:
NPR.niceprint('{!s}.is_alive = {!s}'.format(j.name, j.is_alive()),
verbosity=3, logalso=logging.DEBUG)
# Regularly print status of jobs/tasks in the Process Pool
# Prints status while there are processes active
# Exits when all jobs/tasks are done.
while True:
if not any(multiprocessing.active_children()):
logging.debug('===No active children Processes.')
break
for prc in multiprocessing.active_children():
logging.debug('===%s.is_alive = %s', prc.name, prc.is_alive())
proc_task_active = prc
NPR.niceprint('===Will wait for 60 on {!s}.is_alive = {!s}'
.format(proc_task_active.name,
proc_task_active.is_alive()),
verbosity=3, logalso=logging.INFO)
proc_task_active.join(timeout=60)
NPR.niceprint('===Waited for 60s on '
'{!s}.is_alive = {!s}'
.format(proc_task_active.name,
proc_task_active.is_alive()),
verbosity=3, logalso=logging.INFO)
# Wait for join all jobs/tasks in the Process Pool
# All should be done by now!
for j in proc_pool:
j.join()
NPR.niceprint('==={!s} (is alive: {!s}).exitcode = {!s}'
.format(j.name, j.is_alive(), j.exitcode),
verbosity=2)
logging.warning('===Multiprocessing=== pool joined! '
'All processes finished.')
# Will release (set to None) the lockdb lock control
# this prevents subsequent calls to
# use_lock( nuLockDB, False)
# to raise exception:
# ValueError('semaphore or lock released too many times')
logging.info('===Multiprocessing=== pool joined! '
'Is lockdb None? [%s]. Setting lockdb to None anyhow.',
lockdb is None)
lockdb = None
# Show number of total files processed
NPR.niceprocessedfiles(running.value, count_total, | # CODING No need for such low level debugging to stderr
# multiprocessing.log_to_stderr() | random_line_split |
mprocessing.py | function to run function in multiprocessing mode
"""
# -----------------------------------------------------------------------------
# Import section for Python 2 and 3 compatible code
# from __future__ import absolute_import, division, print_function,
# unicode_literals
from __future__ import division # This way: 3 / 2 == 1.5; 3 // 2 == 1
# -----------------------------------------------------------------------------
# Import section
#
import logging
import multiprocessing
from itertools import islice
import lib.NicePrint as NicePrint
# =========================================================================
# Functions aliases
#
# NPR.NicePrint = from NicePrint module
# -------------------------------------------------------------------------
NPR = NicePrint.NicePrint()
# -------------------------------------------------------------------------
# use_lock
#
# Control use of DB lock. acquire/release
#
def use_lock(adb_lock, operation, nprocs=0):
""" use_lock
adb_lock = lock to be used
operation = True => Lock
= False => Release
nprocs = >0 when in multiprocessing mode
>>> alock = multiprocessing.Lock()
>>> use_lock(alock, True, 2)
True
>>> use_lock(alock, False, 2)
True
"""
use_dblock_return = False
logging.debug('Entering use_lock with operation:[%s].', operation)
if adb_lock is None:
logging.debug('use_lock: adb_lock is [None].')
return use_dblock_return
logging.debug('use_lock: adb_lock.semlock:[%s].', adb_lock._semlock)
if operation is None:
return use_dblock_return
if (nprocs is not None) and (nprocs) and (nprocs > 0):
if operation:
# Control for when running multiprocessing set locking
logging.debug('===Multiprocessing=== -->[ ].lock.acquire')
try:
if adb_lock.acquire():
use_dblock_return = True
except Exception:
NPR.niceerror(caught=True,
caughtprefix='+++ ',
caughtcode='002',
caughtmsg='Caught an exception lock.acquire',
useniceprint=True,
exceptsysinfo=True)
raise
logging.info('===Multiprocessing=== --->[v].lock.acquire')
else:
# Control for when running multiprocessing release locking
logging.debug('===Multiprocessing=== <--[ ].lock.release')
try:
adb_lock.release()
use_dblock_return = True
except Exception:
NPR.niceerror(caught=True,
caughtprefix='+++ ',
caughtcode='003',
caughtmsg='Caught an exception lock.release',
useniceprint=True,
exceptsysinfo=True)
# Raise aborts execution
raise
logging.info('===Multiprocessing=== <--[v].lock.release')
logging.info('Exiting use_lock with operation:[%s]. Result:[%s]',
operation, use_dblock_return)
else:
use_dblock_return = True
logging.warning('(No multiprocessing. Nothing to do) '
'Exiting use_lock with operation:[%s]. Result:[%s]',
operation, use_dblock_return)
return use_dblock_return
# -----------------------------------------------------------------------------
# mprocessing
#
def | (nprocs, lockdb, running, mutex, itemslist, a_fn, cur):
""" mprocessing Function
nprocs = Number of processes to launch (int)
lockdb = lock for access to Database (lock obj to be created)
running = Value to count processed items (count obj to be created)
mutex = mutex for access to value running (obj to be created)
itemslist = list of items to be processed
a_fn = a function which is the target of the multiprocessing
a_fn must cater the following arguments
lockdb
running
mutex
splititemslist = partial splitted list
count_total = len(itemslist)
cur
cur = cursor variable for DB access
"""
# proc_pool = Local variable proc_pool for Pool of processes
# log_level = log_level
# count_total = Total counter of items to distribute/play/indicate progress
# len(itemslist)
log_level = logging.getLogger().getEffectiveLevel()
logging.info('===mprocessing [%s] target_fn():[%s] nprocs:[%s]',
__name__, a_fn.__name__, nprocs)
# if log_level <= logging.WARNING:
# if args is not None:
# for i, arg in enumerate(args):
# logging.info('===mprocessing f():[%s] arg[%s]={%s}',
# a_fn.__name__, i, arg)
# if __name__ == '__main__':
logging.debug('===Multiprocessing=== Setting up logger!')
# CODING No need for such low level debugging to stderr
# multiprocessing.log_to_stderr()
logger = multiprocessing.get_logger()
logger.setLevel(log_level)
logging.debug('===Multiprocessing=== Logging defined!')
# ---------------------------------------------------------
# chunk
#
# Divides an iterable in slices/chunks of size size
#
def chunk(iter_list, size):
"""
Divides an iterable in slices/chunks of size size
>>> for a in chunk([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 3):
... len(a)
3
3
3
1
"""
iter_list = iter(iter_list)
# lambda: creates a returning expression function
# which returns slices
# iter, with the second argument () stops creating
# iterators when it reaches the end
return iter(lambda: tuple(islice(iter_list, size)), ())
proc_pool = []
lockdb = multiprocessing.Lock()
running = multiprocessing.Value('i', 0)
mutex = multiprocessing.Lock()
count_total = len(itemslist)
size = (len(itemslist) // int(nprocs)) \
if ((len(itemslist) // int(nprocs)) > 0) \
else 1
logging.debug('len(itemslist):[%s] int(nprocs):[%s] size per process:[%s]',
len(itemslist), int(nprocs), size)
# Split itemslist in chunks to distribute accross Processes
for splititemslist in chunk(itemslist, size):
logging.warning('===Actual/Planned Chunk size: [%s]/[%s]',
len(splititemslist), size)
logging.debug('===type(splititemslist)=[%s]', type(splititemslist))
logging.debug('===Job/Task Process: Creating...')
proc_task = multiprocessing.Process(
target=a_fn, # argument function
args=(lockdb,
running,
mutex,
splititemslist,
count_total,
cur,))
proc_pool.append(proc_task)
logging.debug('===Job/Task Process: Starting...')
proc_task.start()
NPR.niceprint('===Job/Task Process: [{!s}] Started '
'with pid:[{!s}]'
.format(proc_task.name,
proc_task.pid),
verbosity=3,
logalso=logging.DEBUG)
# Check status of jobs/tasks in the Process Pool
if log_level <= logging.DEBUG:
NPR.niceprint('===Checking Processes launched/status:',
verbosity=3, logalso=logging.DEBUG)
for j in proc_pool:
NPR.niceprint('{!s}.is_alive = {!s}'.format(j.name, j.is_alive()),
verbosity=3, logalso=logging.DEBUG)
# Regularly print status of jobs/tasks in the Process Pool
# Prints status while there are processes active
# Exits when all jobs/tasks are done.
while True:
if not any(multiprocessing.active_children()):
logging.debug('===No active children Processes.')
break
for prc in multiprocessing.active_children():
logging.debug('===%s.is_alive = %s', prc.name, prc.is_alive())
proc_task_active = prc
NPR.niceprint('===Will wait for 60 on {!s}.is_alive = {!s}'
.format(proc_task_active.name,
proc_task_active.is_alive()),
verbosity=3, logalso=logging.INFO)
proc_task_active.join(timeout=60)
NPR.niceprint('===Waited for 60s on '
'{!s}.is_alive = {!s}'
.format(proc_task_active.name,
proc_task_active.is_alive()),
verbosity=3, logalso=logging.INFO)
# Wait for join all jobs/tasks in the Process Pool
# All should be done by now!
for j in proc_pool:
j.join()
NPR.niceprint('==={!s} (is alive: {!s}).exitcode = {!s}'
.format(j.name, j.is_alive(), j.exitcode),
verbosity=2)
logging.warning('===Multiprocessing=== pool joined! '
'All processes finished.')
# Will release (set to None) the lockdb lock control
# this prevents subsequent calls to
# use_lock( nuLockDB, False)
# to raise exception:
# ValueError('semaphore or lock released too many times')
logging.info('===Multiprocessing=== pool joined! '
'Is lockdb None? [%s]. Setting lockdb to None anyhow.',
lockdb is None)
lockdb = None
# Show number of total files processed
NPR.niceprocessedfiles(running.value, count_total, | mprocessing | identifier_name |
aes.rs | 1b4ef5bcb3e92e21123e951cf6f8f188e"
),
expand_key(&vec![0; 16])
);
assert_eq!(
parse_byte_string(
"ffffffffffffffffffffffffffffffffe8e9e9e917161616e8e9e9e917161616adaeae19bab8b80f525151e6454747f0090e2277b3b69a78e1e7cb9ea4a08c6ee16abd3e52dc2746b33becd8179b60b6e5baf3ceb766d488045d385013c658e671d07db3c6b6a93bc2eb916bd12dc98de90d208d2fbb89b6ed5018dd3c7dd15096337366b988fad054d8e20d68a5335d8bf03f233278c5f366a027fe0e0514a3d60a3588e472f07b82d2d7858cd7c326"
),
expand_key(&vec![0xff; 16])
);
assert_eq!(
parse_byte_string(
"000102030405060708090a0b0c0d0e0fd6aa74fdd2af72fadaa678f1d6ab76feb692cf0b643dbdf1be9bc5006830b3feb6ff744ed2c2c9bf6c590cbf0469bf4147f7f7bc95353e03f96c32bcfd058dfd3caaa3e8a99f9deb50f3af57adf622aa5e390f7df7a69296a7553dc10aa31f6b14f9701ae35fe28c440adf4d4ea9c02647438735a41c65b9e016baf4aebf7ad2549932d1f08557681093ed9cbe2c974e13111d7fe3944a17f307a78b4d2b30c5"
),
expand_key(&parse_byte_string("000102030405060708090a0b0c0d0e0f"))
);
assert_eq!(
parse_byte_string(
"6920e299a5202a6d656e636869746f2afa8807605fa82d0d3ac64e6553b2214fcf75838d90ddae80aa1be0e5f9a9c1aa180d2f1488d0819422cb6171db62a0dbbaed96ad323d173910f67648cb94d693881b4ab2ba265d8baad02bc36144fd50b34f195d096944d6a3b96f15c2fd9245a7007778ae6933ae0dd05cbbcf2dcefeff8bccf251e2ff5c5c32a3e7931f6d1924b7182e7555e77229674495ba78298cae127cdadb479ba8f220df3d4858f6b1"
),
expand_key(&parse_byte_string("6920e299a5202a6d656e636869746f2a"))
);
}
#[test]
fn expand_key_24() {
assert_eq!(208, expand_key(&vec![0; 24]).len());
}
#[test]
fn expand_key_32() {
assert_eq!(240, expand_key(&vec![0; 32]).len());
}
fn add_round_key(state: &mut [u8], key: &[u8]) {
xor::buffer_mut(state, xor::Key::FullBuffer(key));
}
// Shifted by 0, 1, 2, 3 columns
const ROW_SHIFTS: [usize; 16] = [0, 5, 10, 15, 4, 9, 14, 3, 8, 13, 2, 7, 12, 1, 6, 11];
fn shift_rows(state: &mut [u8]) {
let copy = util::convert_to_fixed_array(state);
for (index, e) in state.iter_mut().enumerate() {
*e = copy[ROW_SHIFTS[index]];
}
}
#[test]
fn test_shift_rows() {
let mut rows = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
shift_rows(&mut rows);
assert_eq!(
rows,
[1, 6, 11, 16, 5, 10, 15, 4, 9, 14, 3, 8, 13, 2, 7, 12]
);
}
// Shifted by 0, -1, -2, -3 columns
const INV_ROW_SHIFTS: [usize; 16] = [0, 13, 10, 7, 4, 1, 14, 11, 8, 5, 2, 15, 12, 9, 6, 3];
fn inv_shift_rows(state: &mut [u8]) {
let copy = util::convert_to_fixed_array(state);
for (index, e) in state.iter_mut().enumerate() {
*e = copy[INV_ROW_SHIFTS[index]];
}
}
#[test]
fn test_inv_shift_rows() {
let mut rows = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
inv_shift_rows(&mut rows);
assert_ne!(
rows,
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
);
assert_eq!(
rows,
[1, 14, 11, 8, 5, 2, 15, 12, 9, 6, 3, 16, 13, 10, 7, 4,]
);
}
#[test]
fn test_shift_rows_ident() | {
let mut rows = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
shift_rows(&mut rows);
assert_ne!(
rows,
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
);
inv_shift_rows(&mut rows);
assert_eq!(
rows,
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
);
} | identifier_body |
|
aes.rs | 0x08, 0x2E, 0xA1, 0x66, 0x28, 0xD9, 0x24, 0xB2, 0x76, 0x5B, 0xA2, 0x49, 0x6D, 0x8B, 0xD1, 0x25,
0x72, 0xF8, 0xF6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xD4, 0xA4, 0x5C, 0xCC, 0x5D, 0x65, 0xB6, 0x92,
0x6C, 0x70, 0x48, 0x50, 0xFD, 0xED, 0xB9, 0xDA, 0x5E, 0x15, 0x46, 0x57, 0xA7, 0x8D, 0x9D, 0x84,
0x90, 0xD8, 0xAB, 0x00, 0x8C, 0xBC, 0xD3, 0x0A, 0xF7, 0xE4, 0x58, 0x05, 0xB8, 0xB3, 0x45, 0x06,
0xD0, 0x2C, 0x1E, 0x8F, 0xCA, 0x3F, 0x0F, 0x02, 0xC1, 0xAF, 0xBD, 0x03, 0x01, 0x13, 0x8A, 0x6B,
0x3A, 0x91, 0x11, 0x41, 0x4F, 0x67, 0xDC, 0xEA, 0x97, 0xF2, 0xCF, 0xCE, 0xF0, 0xB4, 0xE6, 0x73,
0x96, 0xAC, 0x74, 0x22, 0xE7, 0xAD, 0x35, 0x85, 0xE2, 0xF9, 0x37, 0xE8, 0x1C, 0x75, 0xDF, 0x6E,
0x47, 0xF1, 0x1A, 0x71, 0x1D, 0x29, 0xC5, 0x89, 0x6F, 0xB7, 0x62, 0x0E, 0xAA, 0x18, 0xBE, 0x1B,
0xFC, 0x56, 0x3E, 0x4B, 0xC6, 0xD2, 0x79, 0x20, 0x9A, 0xDB, 0xC0, 0xFE, 0x78, 0xCD, 0x5A, 0xF4,
0x1F, 0xDD, 0xA8, 0x33, 0x88, 0x07, 0xC7, 0x31, 0xB1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xEC, 0x5F,
0x60, 0x51, 0x7F, 0xA9, 0x19, 0xB5, 0x4A, 0x0D, 0x2D, 0xE5, 0x7A, 0x9F, 0x93, 0xC9, 0x9C, 0xEF,
0xA0, 0xE0, 0x3B, 0x4D, 0xAE, 0x2A, 0xF5, 0xB0, 0xC8, 0xEB, 0xBB, 0x3C, 0x83, 0x53, 0x99, 0x61,
0x17, 0x2B, 0x04, 0x7E, 0xBA, 0x77, 0xD6, 0x26, 0xE1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0C, 0x7D,
];
fn sbox(data: &mut [u8]) {
for e in data.iter_mut() {
*e = SBOX[*e as usize];
}
}
fn inv_sbox(data: &mut [u8]) {
for e in data.iter_mut() {
*e = INV_SBOX[*e as usize];
}
}
fn to_four_byte_array(data: &[u8]) -> [u8; 4] {
[data[0], data[1], data[2], data[3]]
}
enum KeyExpansionMode {
Xor,
Sbox,
Full,
}
fn generate_four_bytes(
key_length: usize,
expanded_key: &[u8],
rcon_iteration: &mut usize,
mode: KeyExpansionMode,
) -> [u8; 4] {
let i = expanded_key.len();
let source_bytes = &expanded_key[i - 4..i];
let mut t: [u8; 4] = to_four_byte_array(source_bytes);
match mode {
KeyExpansionMode::Xor => {}
KeyExpansionMode::Sbox => {
sbox(&mut t);
}
KeyExpansionMode::Full => {
t.rotate_left(1);
sbox(&mut t);
t[0].bitxor_assign(RCON[*rcon_iteration]);
*rcon_iteration += 1;
}
};
let xor_source = &expanded_key[i - key_length..i - key_length + 4];
xor::buffer_mut(&mut t, xor::Key::FullBuffer(xor_source));
t
}
fn | (key: &[u8]) -> Vec<u8> {
let key_length = key.len();
let (rounds, sbox_round, extra_expansions) = match key_length {
16 => (10, false, 0),
24 => (12, false, 2),
32 => (14, true, 3),
len => panic!("Unsupported key length {}", len),
};
let expanded_key_size = 16 * (rounds + 1);
let mut expanded_key = Vec::with_capacity(expanded_key_size);
expanded_key.extend_from_slice(&key);
let mut rcon_iteration = 1usize;
while expanded_key.len() < expanded_key_size {
let t = generate_four_bytes(
key_length,
&expanded_key,
&mut rcon_iteration,
KeyExpansionMode::Full,
);
expanded_key.extend(t.iter());
for _i in 0..3 {
let t = generate_four_bytes(
key_length,
&expanded_key,
&mut rcon_iteration,
KeyExpansionMode::Xor,
);
expanded_key.extend(t.iter());
}
if sbox_round {
let t = generate_four_bytes(
key_length,
&expanded_key,
&mut rcon_iteration,
KeyExpansionMode::Sbox,
);
expanded_key.extend(t.iter());
}
for _i in 0..extra_expansions {
let t = generate_four_bytes(
key_length,
&expanded_key,
&mut rcon_iteration,
KeyExpansionMode::Xor,
);
expanded_key.extend(t.iter());
}
}
// Truncate any extra bytes
expanded_key.resize(expanded_key_size, 0);
assert!(
expanded_key.len() == expanded_key_size,
"Expanded key is too long: {}",
expanded_key.len(),
);
expanded_key
}
#[test]
fn expand_key_16() {
use util::parse_byte_string;
assert_eq!(
parse_byte_string(
"00000000000000000000000000000000626363636263636362636363626363639b9898c9f9fbfbaa9b9898c9f9fbfbaa90973450696ccffaf | expand_key | identifier_name |
aes.rs | ] = [0, 5, 10, 15, 4, 9, 14, 3, 8, 13, 2, 7, 12, 1, 6, 11];
fn shift_rows(state: &mut [u8]) {
let copy = util::convert_to_fixed_array(state);
for (index, e) in state.iter_mut().enumerate() {
*e = copy[ROW_SHIFTS[index]];
}
}
#[test]
fn test_shift_rows() {
let mut rows = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
shift_rows(&mut rows);
assert_eq!(
rows,
[1, 6, 11, 16, 5, 10, 15, 4, 9, 14, 3, 8, 13, 2, 7, 12]
);
}
// Shifted by 0, -1, -2, -3 columns
const INV_ROW_SHIFTS: [usize; 16] = [0, 13, 10, 7, 4, 1, 14, 11, 8, 5, 2, 15, 12, 9, 6, 3];
fn inv_shift_rows(state: &mut [u8]) {
let copy = util::convert_to_fixed_array(state);
for (index, e) in state.iter_mut().enumerate() {
*e = copy[INV_ROW_SHIFTS[index]];
}
}
#[test]
fn test_inv_shift_rows() {
let mut rows = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
inv_shift_rows(&mut rows);
assert_ne!(
rows,
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
);
assert_eq!(
rows,
[1, 14, 11, 8, 5, 2, 15, 12, 9, 6, 3, 16, 13, 10, 7, 4,]
);
}
#[test]
fn test_shift_rows_ident() {
let mut rows = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
shift_rows(&mut rows);
assert_ne!(
rows,
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
);
inv_shift_rows(&mut rows);
assert_eq!(
rows,
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
);
}
const COLUMN_MATRIX: [u8; 16] = [2, 3, 1, 1, 1, 2, 3, 1, 1, 1, 2, 3, 3, 1, 1, 2];
const INV_COLUMN_MATRIX: [u8; 16] = [14, 11, 13, 9, 9, 14, 11, 13, 13, 9, 14, 11, 11, 13, 9, 14];
fn gmul(mut a: u8, mut b: u8) -> u8 {
let mut p = 0;
for _ in 0..8 {
if (b & 0x1) != 0 {
p.bitxor_assign(a);
}
let has_high_bit = (a & 0x80) == 0x80;
a <<= 1;
if has_high_bit {
a.bitxor_assign(0x1b);
}
b >>= 1;
}
p
}
fn mix_column(matrix: &[u8; 16], state_column: &[u8]) -> Vec<u8> {
matrix
.chunks(4)
.map(|mc| {
mc.iter()
.enumerate()
.map(|(i, &coefficient)| gmul(coefficient, state_column[i]))
.fold(None, |accum, current| match accum {
None => Some(current),
Some(x) => Some(x.bitxor(current)),
})
.unwrap()
})
.collect()
}
#[test]
fn test_mix_column() {
use util::parse_byte_string;
assert_eq!(
parse_byte_string("8e4da1bc"),
mix_column(&COLUMN_MATRIX, &parse_byte_string("db135345")),
);
assert_eq!(
parse_byte_string("9fdc589d"),
mix_column(&COLUMN_MATRIX, &parse_byte_string("f20a225c")),
);
assert_eq!(
parse_byte_string("01010101"),
mix_column(&COLUMN_MATRIX, &parse_byte_string("01010101")),
);
assert_eq!(
parse_byte_string("c6c6c6c6"),
mix_column(&COLUMN_MATRIX, &parse_byte_string("c6c6c6c6")),
);
assert_eq!(
parse_byte_string("d5d5d7d6"),
mix_column(&COLUMN_MATRIX, &parse_byte_string("d4d4d4d5")),
);
assert_eq!(
parse_byte_string("4d7ebdf8"),
mix_column(&COLUMN_MATRIX, &parse_byte_string("2d26314c")),
);
}
fn mix_columns(state: &mut [u8]) {
for column in state.chunks_mut(4) {
let new_column = mix_column(&COLUMN_MATRIX, column);
column.copy_from_slice(&new_column);
}
}
fn inv_mix_columns(state: &mut [u8]) {
for column in state.chunks_mut(4) {
let new_column = mix_column(&INV_COLUMN_MATRIX, column);
column.copy_from_slice(&new_column);
}
}
#[derive(Clone)]
pub enum CipherMode {
ECB,
CBC([u8; 16]),
}
fn transform_chunk(chunk: &[u8], expanded_key: &[u8], operation: Operation) -> [u8; 16] {
const STATE_SIZE: usize = 16;
assert!(
chunk.len() == STATE_SIZE,
"Chunk size of {} is invalid; expected {}",
chunk.len(),
STATE_SIZE
);
let last_round = expanded_key.chunks(STATE_SIZE).count() - 1;
let mut state = util::convert_to_fixed_array(chunk);
match operation {
Operation::Encrypt => {
for (round, round_key) in expanded_key.chunks(STATE_SIZE).enumerate() {
match round {
0 => {
add_round_key(&mut state, round_key);
}
n if n != last_round => {
sbox(&mut state);
shift_rows(&mut state);
mix_columns(&mut state);
add_round_key(&mut state, round_key);
}
_ => {
sbox(&mut state);
shift_rows(&mut state);
add_round_key(&mut state, round_key);
}
}
}
}
Operation::Decrypt => {
for (round, round_key) in expanded_key.chunks(STATE_SIZE).rev().enumerate() {
match round {
0 => {
add_round_key(&mut state, round_key);
}
n if n != last_round => {
inv_shift_rows(&mut state);
inv_sbox(&mut state);
add_round_key(&mut state, round_key);
inv_mix_columns(&mut state);
}
_ => {
inv_shift_rows(&mut state);
inv_sbox(&mut state);
add_round_key(&mut state, round_key);
}
}
}
}
};
state
}
#[derive(Clone, Copy)]
pub enum Operation {
Encrypt,
Decrypt,
}
trait CipherModeImpl {
fn transform_chunks(
&mut self,
data: &[u8],
transform: &(Fn(&[u8; 16]) -> [u8; 16] + Sync),
) -> Vec<u8>;
}
struct ECBCipherMode {}
struct CBCCipherMode {
initialization_vector: [u8; 16],
operation: Operation, | }
| random_line_split |
|
tcp.rs | , relay::socks5::Address, ServerAddr};
use super::{
sys::{set_tcp_fastopen, TcpStream as SysTcpStream},
AcceptOpts,
ConnectOpts,
};
/// TcpStream for outbound connections
#[pin_project]
pub struct TcpStream(#[pin] SysTcpStream);
impl TcpStream {
/// Connects to address
pub async fn connect_with_opts(addr: &SocketAddr, opts: &ConnectOpts) -> io::Result<TcpStream> {
// tcp_stream_connect(addr, opts).await.map(TcpStream)
SysTcpStream::connect(*addr, opts).await.map(TcpStream)
}
/// Connects shadowsocks server
pub async fn connect_server_with_opts(
context: &Context,
addr: &ServerAddr,
opts: &ConnectOpts,
) -> io::Result<TcpStream> {
let stream = match *addr {
ServerAddr::SocketAddr(ref addr) => SysTcpStream::connect(*addr, opts).await?,
ServerAddr::DomainName(ref domain, port) => {
lookup_then!(&context, &domain, port, |addr| {
SysTcpStream::connect(addr, opts).await
})?
.1
}
};
Ok(TcpStream(stream))
}
/// Connects proxy remote target
pub async fn connect_remote_with_opts(
context: &Context,
addr: &Address,
opts: &ConnectOpts,
) -> io::Result<TcpStream> {
let stream = match *addr {
Address::SocketAddress(ref addr) => SysTcpStream::connect(*addr, opts).await?,
Address::DomainNameAddress(ref domain, port) => {
lookup_then!(&context, &domain, port, |addr| {
SysTcpStream::connect(addr, opts).await
})?
.1
}
};
Ok(TcpStream(stream))
}
}
impl Deref for TcpStream {
type Target = TokioTcpStream;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for TcpStream {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl AsyncRead for TcpStream {
fn poll_read(self: Pin<&mut Self>, cx: &mut task::Context<'_>, buf: &mut ReadBuf<'_>) -> Poll<io::Result<()>> {
self.project().0.poll_read(cx, buf)
}
}
impl AsyncWrite for TcpStream {
fn poll_write(self: Pin<&mut Self>, cx: &mut task::Context<'_>, buf: &[u8]) -> Poll<io::Result<usize>> {
self.project().0.poll_write(cx, buf)
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<io::Result<()>> {
self.project().0.poll_flush(cx)
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<io::Result<()>> {
self.project().0.poll_shutdown(cx)
}
}
/// `TcpListener` for accepting inbound connections
pub struct TcpListener {
inner: TokioTcpListener,
accept_opts: AcceptOpts,
}
impl TcpListener {
/// Creates a new TcpListener, which will be bound to the specified address.
pub async fn bind_with_opts(addr: &SocketAddr, accept_opts: AcceptOpts) -> io::Result<TcpListener> {
let socket = match *addr {
SocketAddr::V4(..) => TcpSocket::new_v4()?,
SocketAddr::V6(..) => TcpSocket::new_v6()?,
};
// On platforms with Berkeley-derived sockets, this allows to quickly
// rebind a socket, without needing to wait for the OS to clean up the
// previous one.
//
// On Windows, this allows rebinding sockets which are actively in use,
// which allows “socket hijacking”, so we explicitly don't set it here.
// https://docs.microsoft.com/en-us/windows/win32/winsock/using-so-reuseaddr-and-so-exclusiveaddruse
#[cfg(not(windows))]
socket.set_reuseaddr(true)?;
let set_dual_stack = if let SocketAddr::V6(ref v6) = *addr {
v6.ip().is_unspecified()
} else {
false
};
if set_dual_stack {
// Set to DUAL STACK mode by default.
// WARNING: This would fail if you want to start another program listening on the same port.
//
// Should this behavior be configurable?
fn set_only_v6(socket: &TcpSocket, only_v6: bool) {
unsafe {
// WARN: If the following code panics, FD will be closed twice.
#[cfg(unix)]
let s = Socket::from_raw_fd(socket.as_raw_fd());
#[cfg(windows)]
let s = Socket::from_raw_socket(socket.as_raw_socket());
if let Err(err) = s.set_only_v6(only_v6) {
warn!("failed to set IPV6_V6ONLY: {} for listener, error: {}", only_v6, err);
// This is not a fatal error, just warn and skip
}
#[cfg(unix)]
let _ = s.into_raw_fd();
#[cfg(windows)]
let _ = s.into_raw_socket();
}
}
set_only_v6(&socket, false);
match socket.bind(*addr) {
Ok(..) => {}
Err(ref err) if err.kind() == ErrorKind::AddrInUse => { | debug!(
"0.0.0.0:{} may have already been occupied, retry with IPV6_V6ONLY",
addr.port()
);
set_only_v6(&socket, true);
socket.bind(*addr)?;
}
Err(err) => return Err(err),
}
} else {
socket.bind(*addr)?;
}
// mio's default backlog is 1024
let inner = socket.listen(1024)?;
// Enable TFO if supported
// macos requires TCP_FASTOPEN to be set after listen(), but other platform doesn't have this constraint
if accept_opts.tcp.fastopen {
set_tcp_fastopen(&inner)?;
}
Ok(TcpListener { inner, accept_opts })
}
/// Create a `TcpListener` from tokio's `TcpListener`
pub fn from_listener(listener: TokioTcpListener, accept_opts: AcceptOpts) -> TcpListener {
TcpListener {
inner: listener,
accept_opts,
}
}
/// Polls to accept a new incoming connection to this listener.
pub fn poll_accept(&self, cx: &mut task::Context<'_>) -> Poll<io::Result<(TokioTcpStream, SocketAddr)>> {
let (stream, peer_addr) = ready!(self.inner.poll_accept(cx))?;
setsockopt_with_opt(&stream, &self.accept_opts)?;
Poll::Ready(Ok((stream, peer_addr)))
}
/// Accept a new incoming connection to this listener
pub async fn accept(&self) -> io::Result<(TokioTcpStream, SocketAddr)> {
future::poll_fn(|cx| self.poll_accept(cx)).await
}
/// Unwraps and take the internal `TcpListener`
pub fn into_inner(self) -> TokioTcpListener {
self.inner
}
}
impl Deref for TcpListener {
type Target = TokioTcpListener;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl DerefMut for TcpListener {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}
impl From<TcpListener> for TokioTcpListener {
fn from(listener: TcpListener) -> TokioTcpListener {
listener.inner
}
}
#[cfg(unix)]
fn setsockopt_with_opt(f: &tokio::net::TcpStream, opts: &AcceptOpts) -> io::Result<()> {
let socket = unsafe { Socket::from_raw_fd(f.as_raw_fd()) };
macro_rules! try_sockopt {
($socket:ident . $func:ident ($($arg:expr),*)) => {
match $socket . $func ($($arg),*) {
Ok(e) => e,
Err(err) => {
let _ = socket.into_raw_fd();
return Err(err);
}
}
};
}
if let Some(buf_size) = opts.tcp.send_buffer_size {
try_sockopt!(socket.set_send_buffer_size(buf_size as usize));
}
if let Some(buf_size) = opts.tcp.recv_buffer_size {
try_sockopt!(socket.set_recv_buffer_size(buf_size as usize));
}
try_sockopt!(socket.set_nodelay(opts.tcp.nodelay));
if let Some(keepalive_duration) = opts.tcp.keepalive {
#[allow(unused_mut)]
let mut keepalive = TcpKeepalive::new().with_time(keepalive_duration);
#[cfg(any(
target_os = "freebsd",
target_os = "fuchsia",
target_os = "linux",
target_os = "netbsd",
target_vendor = "apple",
))]
{
keepalive = keepalive.with_interval(keepalive_duration);
}
| // This is probably 0.0.0.0 with the same port has already been occupied | random_line_split |
tcp.rs | relay::socks5::Address, ServerAddr};
use super::{
sys::{set_tcp_fastopen, TcpStream as SysTcpStream},
AcceptOpts,
ConnectOpts,
};
/// TcpStream for outbound connections
#[pin_project]
pub struct TcpStream(#[pin] SysTcpStream);
impl TcpStream {
/// Connects to address
pub async fn connect_with_opts(addr: &SocketAddr, opts: &ConnectOpts) -> io::Result<TcpStream> {
// tcp_stream_connect(addr, opts).await.map(TcpStream)
SysTcpStream::connect(*addr, opts).await.map(TcpStream)
}
/// Connects shadowsocks server
pub async fn connect_server_with_opts(
context: &Context,
addr: &ServerAddr,
opts: &ConnectOpts,
) -> io::Result<TcpStream> {
let stream = match *addr {
ServerAddr::SocketAddr(ref addr) => SysTcpStream::connect(*addr, opts).await?,
ServerAddr::DomainName(ref domain, port) => {
lookup_then!(&context, &domain, port, |addr| {
SysTcpStream::connect(addr, opts).await
})?
.1
}
};
Ok(TcpStream(stream))
}
/// Connects proxy remote target
pub async fn connect_remote_with_opts(
context: &Context,
addr: &Address,
opts: &ConnectOpts,
) -> io::Result<TcpStream> {
let stream = match *addr {
Address::SocketAddress(ref addr) => SysTcpStream::connect(*addr, opts).await?,
Address::DomainNameAddress(ref domain, port) => {
lookup_then!(&context, &domain, port, |addr| {
SysTcpStream::connect(addr, opts).await
})?
.1
}
};
Ok(TcpStream(stream))
}
}
impl Deref for TcpStream {
type Target = TokioTcpStream;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for TcpStream {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl AsyncRead for TcpStream {
fn poll_read(self: Pin<&mut Self>, cx: &mut task::Context<'_>, buf: &mut ReadBuf<'_>) -> Poll<io::Result<()>> {
self.project().0.poll_read(cx, buf)
}
}
impl AsyncWrite for TcpStream {
fn poll_write(self: Pin<&mut Self>, cx: &mut task::Context<'_>, buf: &[u8]) -> Poll<io::Result<usize>> {
self.project().0.poll_write(cx, buf)
}
fn | (self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<io::Result<()>> {
self.project().0.poll_flush(cx)
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<io::Result<()>> {
self.project().0.poll_shutdown(cx)
}
}
/// `TcpListener` for accepting inbound connections
pub struct TcpListener {
inner: TokioTcpListener,
accept_opts: AcceptOpts,
}
impl TcpListener {
/// Creates a new TcpListener, which will be bound to the specified address.
pub async fn bind_with_opts(addr: &SocketAddr, accept_opts: AcceptOpts) -> io::Result<TcpListener> {
let socket = match *addr {
SocketAddr::V4(..) => TcpSocket::new_v4()?,
SocketAddr::V6(..) => TcpSocket::new_v6()?,
};
// On platforms with Berkeley-derived sockets, this allows to quickly
// rebind a socket, without needing to wait for the OS to clean up the
// previous one.
//
// On Windows, this allows rebinding sockets which are actively in use,
// which allows “socket hijacking”, so we explicitly don't set it here.
// https://docs.microsoft.com/en-us/windows/win32/winsock/using-so-reuseaddr-and-so-exclusiveaddruse
#[cfg(not(windows))]
socket.set_reuseaddr(true)?;
let set_dual_stack = if let SocketAddr::V6(ref v6) = *addr {
v6.ip().is_unspecified()
} else {
false
};
if set_dual_stack {
// Set to DUAL STACK mode by default.
// WARNING: This would fail if you want to start another program listening on the same port.
//
// Should this behavior be configurable?
fn set_only_v6(socket: &TcpSocket, only_v6: bool) {
unsafe {
// WARN: If the following code panics, FD will be closed twice.
#[cfg(unix)]
let s = Socket::from_raw_fd(socket.as_raw_fd());
#[cfg(windows)]
let s = Socket::from_raw_socket(socket.as_raw_socket());
if let Err(err) = s.set_only_v6(only_v6) {
warn!("failed to set IPV6_V6ONLY: {} for listener, error: {}", only_v6, err);
// This is not a fatal error, just warn and skip
}
#[cfg(unix)]
let _ = s.into_raw_fd();
#[cfg(windows)]
let _ = s.into_raw_socket();
}
}
set_only_v6(&socket, false);
match socket.bind(*addr) {
Ok(..) => {}
Err(ref err) if err.kind() == ErrorKind::AddrInUse => {
// This is probably 0.0.0.0 with the same port has already been occupied
debug!(
"0.0.0.0:{} may have already been occupied, retry with IPV6_V6ONLY",
addr.port()
);
set_only_v6(&socket, true);
socket.bind(*addr)?;
}
Err(err) => return Err(err),
}
} else {
socket.bind(*addr)?;
}
// mio's default backlog is 1024
let inner = socket.listen(1024)?;
// Enable TFO if supported
// macos requires TCP_FASTOPEN to be set after listen(), but other platform doesn't have this constraint
if accept_opts.tcp.fastopen {
set_tcp_fastopen(&inner)?;
}
Ok(TcpListener { inner, accept_opts })
}
/// Create a `TcpListener` from tokio's `TcpListener`
pub fn from_listener(listener: TokioTcpListener, accept_opts: AcceptOpts) -> TcpListener {
TcpListener {
inner: listener,
accept_opts,
}
}
/// Polls to accept a new incoming connection to this listener.
pub fn poll_accept(&self, cx: &mut task::Context<'_>) -> Poll<io::Result<(TokioTcpStream, SocketAddr)>> {
let (stream, peer_addr) = ready!(self.inner.poll_accept(cx))?;
setsockopt_with_opt(&stream, &self.accept_opts)?;
Poll::Ready(Ok((stream, peer_addr)))
}
/// Accept a new incoming connection to this listener
pub async fn accept(&self) -> io::Result<(TokioTcpStream, SocketAddr)> {
future::poll_fn(|cx| self.poll_accept(cx)).await
}
/// Unwraps and take the internal `TcpListener`
pub fn into_inner(self) -> TokioTcpListener {
self.inner
}
}
impl Deref for TcpListener {
type Target = TokioTcpListener;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl DerefMut for TcpListener {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}
impl From<TcpListener> for TokioTcpListener {
fn from(listener: TcpListener) -> TokioTcpListener {
listener.inner
}
}
#[cfg(unix)]
fn setsockopt_with_opt(f: &tokio::net::TcpStream, opts: &AcceptOpts) -> io::Result<()> {
let socket = unsafe { Socket::from_raw_fd(f.as_raw_fd()) };
macro_rules! try_sockopt {
($socket:ident . $func:ident ($($arg:expr),*)) => {
match $socket . $func ($($arg),*) {
Ok(e) => e,
Err(err) => {
let _ = socket.into_raw_fd();
return Err(err);
}
}
};
}
if let Some(buf_size) = opts.tcp.send_buffer_size {
try_sockopt!(socket.set_send_buffer_size(buf_size as usize));
}
if let Some(buf_size) = opts.tcp.recv_buffer_size {
try_sockopt!(socket.set_recv_buffer_size(buf_size as usize));
}
try_sockopt!(socket.set_nodelay(opts.tcp.nodelay));
if let Some(keepalive_duration) = opts.tcp.keepalive {
#[allow(unused_mut)]
let mut keepalive = TcpKeepalive::new().with_time(keepalive_duration);
#[cfg(any(
target_os = "freebsd",
target_os = "fuchsia",
target_os = "linux",
target_os = "netbsd",
target_vendor = "apple",
))]
{
keepalive = keepalive.with_interval(keepalive_duration);
| poll_flush | identifier_name |
tcp.rs | }
};
Ok(TcpStream(stream))
}
}
impl Deref for TcpStream {
type Target = TokioTcpStream;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for TcpStream {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl AsyncRead for TcpStream {
fn poll_read(self: Pin<&mut Self>, cx: &mut task::Context<'_>, buf: &mut ReadBuf<'_>) -> Poll<io::Result<()>> {
self.project().0.poll_read(cx, buf)
}
}
impl AsyncWrite for TcpStream {
fn poll_write(self: Pin<&mut Self>, cx: &mut task::Context<'_>, buf: &[u8]) -> Poll<io::Result<usize>> {
self.project().0.poll_write(cx, buf)
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<io::Result<()>> {
self.project().0.poll_flush(cx)
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<io::Result<()>> {
self.project().0.poll_shutdown(cx)
}
}
/// `TcpListener` for accepting inbound connections
pub struct TcpListener {
inner: TokioTcpListener,
accept_opts: AcceptOpts,
}
impl TcpListener {
/// Creates a new TcpListener, which will be bound to the specified address.
pub async fn bind_with_opts(addr: &SocketAddr, accept_opts: AcceptOpts) -> io::Result<TcpListener> {
let socket = match *addr {
SocketAddr::V4(..) => TcpSocket::new_v4()?,
SocketAddr::V6(..) => TcpSocket::new_v6()?,
};
// On platforms with Berkeley-derived sockets, this allows to quickly
// rebind a socket, without needing to wait for the OS to clean up the
// previous one.
//
// On Windows, this allows rebinding sockets which are actively in use,
// which allows “socket hijacking”, so we explicitly don't set it here.
// https://docs.microsoft.com/en-us/windows/win32/winsock/using-so-reuseaddr-and-so-exclusiveaddruse
#[cfg(not(windows))]
socket.set_reuseaddr(true)?;
let set_dual_stack = if let SocketAddr::V6(ref v6) = *addr {
v6.ip().is_unspecified()
} else {
false
};
if set_dual_stack {
// Set to DUAL STACK mode by default.
// WARNING: This would fail if you want to start another program listening on the same port.
//
// Should this behavior be configurable?
fn set_only_v6(socket: &TcpSocket, only_v6: bool) {
unsafe {
// WARN: If the following code panics, FD will be closed twice.
#[cfg(unix)]
let s = Socket::from_raw_fd(socket.as_raw_fd());
#[cfg(windows)]
let s = Socket::from_raw_socket(socket.as_raw_socket());
if let Err(err) = s.set_only_v6(only_v6) {
warn!("failed to set IPV6_V6ONLY: {} for listener, error: {}", only_v6, err);
// This is not a fatal error, just warn and skip
}
#[cfg(unix)]
let _ = s.into_raw_fd();
#[cfg(windows)]
let _ = s.into_raw_socket();
}
}
set_only_v6(&socket, false);
match socket.bind(*addr) {
Ok(..) => {}
Err(ref err) if err.kind() == ErrorKind::AddrInUse => {
// This is probably 0.0.0.0 with the same port has already been occupied
debug!(
"0.0.0.0:{} may have already been occupied, retry with IPV6_V6ONLY",
addr.port()
);
set_only_v6(&socket, true);
socket.bind(*addr)?;
}
Err(err) => return Err(err),
}
} else {
socket.bind(*addr)?;
}
// mio's default backlog is 1024
let inner = socket.listen(1024)?;
// Enable TFO if supported
// macos requires TCP_FASTOPEN to be set after listen(), but other platform doesn't have this constraint
if accept_opts.tcp.fastopen {
set_tcp_fastopen(&inner)?;
}
Ok(TcpListener { inner, accept_opts })
}
/// Create a `TcpListener` from tokio's `TcpListener`
pub fn from_listener(listener: TokioTcpListener, accept_opts: AcceptOpts) -> TcpListener {
TcpListener {
inner: listener,
accept_opts,
}
}
/// Polls to accept a new incoming connection to this listener.
pub fn poll_accept(&self, cx: &mut task::Context<'_>) -> Poll<io::Result<(TokioTcpStream, SocketAddr)>> {
let (stream, peer_addr) = ready!(self.inner.poll_accept(cx))?;
setsockopt_with_opt(&stream, &self.accept_opts)?;
Poll::Ready(Ok((stream, peer_addr)))
}
/// Accept a new incoming connection to this listener
pub async fn accept(&self) -> io::Result<(TokioTcpStream, SocketAddr)> {
future::poll_fn(|cx| self.poll_accept(cx)).await
}
/// Unwraps and take the internal `TcpListener`
pub fn into_inner(self) -> TokioTcpListener {
self.inner
}
}
impl Deref for TcpListener {
type Target = TokioTcpListener;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl DerefMut for TcpListener {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}
impl From<TcpListener> for TokioTcpListener {
fn from(listener: TcpListener) -> TokioTcpListener {
listener.inner
}
}
#[cfg(unix)]
fn setsockopt_with_opt(f: &tokio::net::TcpStream, opts: &AcceptOpts) -> io::Result<()> {
let socket = unsafe { Socket::from_raw_fd(f.as_raw_fd()) };
macro_rules! try_sockopt {
($socket:ident . $func:ident ($($arg:expr),*)) => {
match $socket . $func ($($arg),*) {
Ok(e) => e,
Err(err) => {
let _ = socket.into_raw_fd();
return Err(err);
}
}
};
}
if let Some(buf_size) = opts.tcp.send_buffer_size {
try_sockopt!(socket.set_send_buffer_size(buf_size as usize));
}
if let Some(buf_size) = opts.tcp.recv_buffer_size {
try_sockopt!(socket.set_recv_buffer_size(buf_size as usize));
}
try_sockopt!(socket.set_nodelay(opts.tcp.nodelay));
if let Some(keepalive_duration) = opts.tcp.keepalive {
#[allow(unused_mut)]
let mut keepalive = TcpKeepalive::new().with_time(keepalive_duration);
#[cfg(any(
target_os = "freebsd",
target_os = "fuchsia",
target_os = "linux",
target_os = "netbsd",
target_vendor = "apple",
))]
{
keepalive = keepalive.with_interval(keepalive_duration);
}
try_sockopt!(socket.set_tcp_keepalive(&keepalive));
}
let _ = socket.into_raw_fd();
Ok(())
}
#[cfg(windows)]
fn setsockopt_with_opt(f: &tokio::net::TcpStream, opts: &AcceptOpts) -> io::Result<()> {
let socket = unsafe { Socket::from_raw_socket(f.as_raw_socket()) };
macro_rules! try_sockopt {
($socket:ident . $func:ident ($($arg:expr),*)) => {
match $socket . $func ($($arg),*) {
Ok(e) => e,
Err(err) => {
let _ = socket.into_raw_socket();
return Err(err);
}
}
};
}
if let Some(buf_size) = opts.tcp.send_buffer_size {
try_sockopt!(socket.set_send_buffer_size(buf_size as usize));
}
if let Some(buf_size) = opts.tcp.recv_buffer_size {
try_sockopt!(socket.set_recv_buffer_size(buf_size as usize));
}
try_sockopt!(socket.set_nodelay(opts.tcp.nodelay));
if let Some(keepalive_duration) = opts.tcp.keepalive {
let keepalive = TcpKeepalive::new()
.with_time(keepalive_duration)
.with_interval(keepalive_duration);
try_sockopt!(socket.set_tcp_keepalive(&keepalive));
}
let _ = socket.into_raw_socket();
Ok(())
}
#[cfg(all(not(windows), not(unix)))]
fn setsockopt_with_opt(f: &tokio::net::TcpStream, opts: &AcceptOpts) -> io::Result<()> {
f.set_nodelay(opts.tcp.nodelay)?;
Ok(())
}
#[cfg(unix)]
impl AsRawFd for TcpStream {
fn as_raw_fd(&self) -> RawFd {
| self.0.as_raw_fd()
}
}
| identifier_body |
|
signature.go | (Disclosure []byte) []int {
HiddenIndices := make([]int, 0)
for index, disclose := range Disclosure {
if disclose == 0 {
HiddenIndices = append(HiddenIndices, index)
}
}
return HiddenIndices
}
// NewSignature creates a new idemix signature (Schnorr-type signature)
// The []byte Disclosure steers which attributes are disclosed:
// if Disclosure[i] == 0 then attribute i remains hidden and otherwise it is disclosed.
// We use the zero-knowledge proof by http://eprint.iacr.org/2016/663.pdf to prove knowledge of a BBS+ signature
//We use the zero-knowledge proof by https://link.springer.com/content/pdf/10.1007%2F978-3-540-28628-8_3.pdf to prove the knowledge of user's secret key.
func NewSignature(cred *Credential, sk *FP256BN.BIG, Nym *FP256BN.ECP, RNym *FP256BN.BIG, ipk *IssuerPublicKey, gpk *GroupPublicKey, uk *UserKey, Disclosure []byte, msg []byte, rng *amcl.RAND) (*Signature, error) {
if cred == nil || sk == nil || Nym == nil || RNym == nil || ipk == nil || rng == nil {
return nil, errors.Errorf("cannot create idemix signature: received nil input")
}
HiddenIndices := hiddenIndices(Disclosure)
// Start sig
r1 := RandModOrder(rng)
r2 := RandModOrder(rng)
r3 := FP256BN.NewBIGcopy(r1)
r3.Invmodp(GroupOrder)
Nonce := RandModOrder(rng)
A := EcpFromProto(cred.A)
B := EcpFromProto(cred.B)
APrime := FP256BN.G1mul(A, r1) // A' = A^{r1}
ABar := FP256BN.G1mul(B, r1)
ABar.Sub(FP256BN.G1mul(APrime, FP256BN.FromBytes(cred.E))) // barA = A'^{-e} b^{r1}
BPrime := FP256BN.G1mul(B, r1)
HRand := EcpFromProto(ipk.HRand)
HSk := EcpFromProto(ipk.HSk)
BPrime.Sub(FP256BN.G1mul(HRand, r2)) // b' = b^{r1} h_r^{-r2}
S := FP256BN.FromBytes(cred.S)
E := FP256BN.FromBytes(cred.E)
sPrime := Modsub(S, FP256BN.Modmul(r2, r3, GroupOrder), GroupOrder)
// Construct ZK proof
rSk := RandModOrder(rng)
re := RandModOrder(rng)
rR2 := RandModOrder(rng)
rR3 := RandModOrder(rng)
rSPrime := RandModOrder(rng)
rRNym := RandModOrder(rng)
rAttrs := make([]*FP256BN.BIG, len(HiddenIndices))
for i := range HiddenIndices {
rAttrs[i] = RandModOrder(rng)
}
t1 := APrime.Mul2(re, HRand, rR2)
t2 := FP256BN.G1mul(HRand, rSPrime)
t2.Add(BPrime.Mul2(rR3, HSk, rSk))
for i := 0; i < len(HiddenIndices)/2; i++ {
t2.Add(EcpFromProto(ipk.HAttrs[HiddenIndices[2*i]]).Mul2(rAttrs[2*i], EcpFromProto(ipk.HAttrs[HiddenIndices[2*i+1]]), rAttrs[2*i+1]))
}
if len(HiddenIndices)%2 != 0 {
t2.Add(FP256BN.G1mul(EcpFromProto(ipk.HAttrs[HiddenIndices[len(HiddenIndices)-1]]), rAttrs[len(HiddenIndices)-1]))
}
t3 := HSk.Mul2(rSk, HRand, rRNym)
//Zk proof of BBS group signature
alpha := RandModOrder(rng)
beta := RandModOrder(rng)
rAlpha := RandModOrder(rng)
rBeta := RandModOrder(rng)
rx := RandModOrder(rng)
rDelta1 := RandModOrder(rng)
rDelta2 := RandModOrder(rng)
temp := Modadd(alpha,beta,GroupOrder)
temp1 := Modadd(rAlpha,rBeta,GroupOrder)
temp2 := Modadd(rDelta1,rDelta2,GroupOrder)
U := EcpFromProto(gpk.U)
V := EcpFromProto(gpk.V)
H := EcpFromProto(gpk.H)
Ax := EcpFromProto(uk.UK2)
W := Ecp2FromProto(ipk.W)
T1 := FP256BN.G1mul(U,alpha)
T2 := FP256BN.G1mul(V,beta)
T3 := FP256BN.G1mul(H,temp)
T3.Add(Ax)
R1 := FP256BN.G1mul(U,rAlpha)
R2 := FP256BN.G1mul(V,rBeta)
//compute pairing
Grx := GenG2.Mul(rx)
T4 := H.Mul(temp)
e1 := FP256BN.Ate2(Grx,Ax,Grx,T4)
Htp1 := H.Mul(temp1)
e2 := FP256BN.Ate(W,Htp1)
e2.Inverse()
e2.Mul(e1)
Htp2 := H.Mul(temp2)
e4 := FP256BN.Ate(GenG2,Htp2)
e4.Inverse()
e4.Mul(e2)
R3 := FP256BN.Fexp(e4)
R4 := FP256BN.G1mul(T1,rx)
R4.Sub(U.Mul(rDelta1))
R5 := FP256BN.G1mul(T2,rx)
R5.Sub(V.Mul(rDelta2))
// proofData is the data being hashed, it consists of:
// the signature label
// 14 elements of G1 each taking 2*FieldBytes+1 bytes and 1 element of GT taking 12*FieldBytes
// one bigint (hash of the issuer public key) of length FieldBytes
// disclosed attributes
// message being signed
proofData := make([]byte, len([]byte(signLabel))+14*(2*FieldBytes+1)+13*FieldBytes+len(Disclosure)+len(msg))
index := 0
index = appendBytesString(proofData, index, signLabel)
index = appendBytesG1(proofData, index, t1)
index = appendBytesG1(proofData, index, t2)
index = appendBytesG1(proofData, index, t3)
index = appendBytesG1(proofData, index, APrime)
index = appendBytesG1(proofData, index, ABar)
index = appendBytesG1(proofData, index, BPrime)
index = appendBytesG1(proofData, index, Nym)
index = appendBytesG1(proofData, index, T1)
index = appendBytesG1(proofData, index, T2)
index = appendBytesG1(proofData, index, T3)
index = appendBytesG1(proofData, index, R1)
index = appendBytesG1(proofData, index, R2)
index = appendBytesGT(proofData, index, R3)
index = appendBytesG1(proofData, index, R4)
index = appendBytesG1(proofData, index, R5)
copy(proofData[index:], ipk.Hash)
index = index + FieldBytes
copy(proofData[index:], Disclosure)
index = index + len(Disclosure)
copy(proofData[index:], msg)
c := HashModOrder(proofData)
// add the previous hash and the nonce and hash again to compute a second hash (C value)
index = 0
proofData = proofData[:2*FieldBytes]
index = appendBytesBig(proofData, index, c)
index = appendBytesBig(proofData, index, Nonce)
ProofC := HashModOrder(proofData)
ProofSSk := Modadd(rSk, FP256BN.Modmul(ProofC, sk, GroupOrder), GroupOrder)
ProofSE := Modsub(re, FP256BN.Modmul(ProofC, E, GroupOrder), GroupOrder)
ProofSR2 := Modadd(rR2, FP256BN.Modmul(ProofC, r2, GroupOrder), GroupOrder)
ProofSR3 := Modsub(rR3, FP256BN.Modmul(ProofC, r3, GroupOrder), GroupOrder)
ProofSSPrime := Modadd(rSPrime, FP256BN.Modmul(ProofC, sPrime, GroupOrder), GroupOrder)
ProofSRNym := Modadd(rRNym, FP256BN.Modmul(ProofC, RNym, GroupOrder), GroupOrder)
//compute s-values for zk proof of group signature
ProofSalpha := Modadd(rAlpha,FP256BN.Modmul(ProofC,alpha,Group | hiddenIndices | identifier_name |
|
signature.go |
HiddenIndices := hiddenIndices(Disclosure)
// Start sig
r1 := RandModOrder(rng)
r2 := RandModOrder(rng)
r3 := FP256BN.NewBIGcopy(r1)
r3.Invmodp(GroupOrder)
Nonce := RandModOrder(rng)
A := EcpFromProto(cred.A)
B := EcpFromProto(cred.B)
APrime := FP256BN.G1mul(A, r1) // A' = A^{r1}
ABar := FP256BN.G1mul(B, r1)
ABar.Sub(FP256BN.G1mul(APrime, FP256BN.FromBytes(cred.E))) // barA = A'^{-e} b^{r1}
BPrime := FP256BN.G1mul(B, r1)
HRand := EcpFromProto(ipk.HRand)
HSk := EcpFromProto(ipk.HSk)
BPrime.Sub(FP256BN.G1mul(HRand, r2)) // b' = b^{r1} h_r^{-r2}
S := FP256BN.FromBytes(cred.S)
E := FP256BN.FromBytes(cred.E)
sPrime := Modsub(S, FP256BN.Modmul(r2, r3, GroupOrder), GroupOrder)
// Construct ZK proof
rSk := RandModOrder(rng)
re := RandModOrder(rng)
rR2 := RandModOrder(rng)
rR3 := RandModOrder(rng)
rSPrime := RandModOrder(rng)
rRNym := RandModOrder(rng)
rAttrs := make([]*FP256BN.BIG, len(HiddenIndices))
for i := range HiddenIndices {
rAttrs[i] = RandModOrder(rng)
}
t1 := APrime.Mul2(re, HRand, rR2)
t2 := FP256BN.G1mul(HRand, rSPrime)
t2.Add(BPrime.Mul2(rR3, HSk, rSk))
for i := 0; i < len(HiddenIndices)/2; i++ {
t2.Add(EcpFromProto(ipk.HAttrs[HiddenIndices[2*i]]).Mul2(rAttrs[2*i], EcpFromProto(ipk.HAttrs[HiddenIndices[2*i+1]]), rAttrs[2*i+1]))
}
if len(HiddenIndices)%2 != 0 {
t2.Add(FP256BN.G1mul(EcpFromProto(ipk.HAttrs[HiddenIndices[len(HiddenIndices)-1]]), rAttrs[len(HiddenIndices)-1]))
}
t3 := HSk.Mul2(rSk, HRand, rRNym)
//Zk proof of BBS group signature
alpha := RandModOrder(rng)
beta := RandModOrder(rng)
rAlpha := RandModOrder(rng)
rBeta := RandModOrder(rng)
rx := RandModOrder(rng)
rDelta1 := RandModOrder(rng)
rDelta2 := RandModOrder(rng)
temp := Modadd(alpha,beta,GroupOrder)
temp1 := Modadd(rAlpha,rBeta,GroupOrder)
temp2 := Modadd(rDelta1,rDelta2,GroupOrder)
U := EcpFromProto(gpk.U)
V := EcpFromProto(gpk.V)
H := EcpFromProto(gpk.H)
Ax := EcpFromProto(uk.UK2)
W := Ecp2FromProto(ipk.W)
T1 := FP256BN.G1mul(U,alpha)
T2 := FP256BN.G1mul(V,beta)
T3 := FP256BN.G1mul(H,temp)
T3.Add(Ax)
R1 := FP256BN.G1mul(U,rAlpha)
R2 := FP256BN.G1mul(V,rBeta)
//compute pairing
Grx := GenG2.Mul(rx)
T4 := H.Mul(temp)
e1 := FP256BN.Ate2(Grx,Ax,Grx,T4)
Htp1 := H.Mul(temp1)
e2 := FP256BN.Ate(W,Htp1)
e2.Inverse()
e2.Mul(e1)
Htp2 := H.Mul(temp2)
e4 := FP256BN.Ate(GenG2,Htp2)
e4.Inverse()
e4.Mul(e2)
R3 := FP256BN.Fexp(e4)
R4 := FP256BN.G1mul(T1,rx)
R4.Sub(U.Mul(rDelta1))
R5 := FP256BN.G1mul(T2,rx)
R5.Sub(V.Mul(rDelta2))
// proofData is the data being hashed, it consists of:
// the signature label
// 14 elements of G1 each taking 2*FieldBytes+1 bytes and 1 element of GT taking 12*FieldBytes
// one bigint (hash of the issuer public key) of length FieldBytes
// disclosed attributes
// message being signed
proofData := make([]byte, len([]byte(signLabel))+14*(2*FieldBytes+1)+13*FieldBytes+len(Disclosure)+len(msg))
index := 0
index = appendBytesString(proofData, index, signLabel)
index = appendBytesG1(proofData, index, t1)
index = appendBytesG1(proofData, index, t2)
index = appendBytesG1(proofData, index, t3)
index = appendBytesG1(proofData, index, APrime)
index = appendBytesG1(proofData, index, ABar)
index = appendBytesG1(proofData, index, BPrime)
index = appendBytesG1(proofData, index, Nym)
index = appendBytesG1(proofData, index, T1)
index = appendBytesG1(proofData, index, T2)
index = appendBytesG1(proofData, index, T3)
index = appendBytesG1(proofData, index, R1)
index = appendBytesG1(proofData, index, R2)
index = appendBytesGT(proofData, index, R3)
index = appendBytesG1(proofData, index, R4)
index = appendBytesG1(proofData, index, R5)
copy(proofData[index:], ipk.Hash)
index = index + FieldBytes
copy(proofData[index:], Disclosure)
index = index + len(Disclosure)
copy(proofData[index:], msg)
c := HashModOrder(proofData)
// add the previous hash and the nonce and hash again to compute a second hash (C value)
index = 0
proofData = proofData[:2*FieldBytes]
index = appendBytesBig(proofData, index, c)
index = appendBytesBig(proofData, index, Nonce)
ProofC := HashModOrder(proofData)
ProofSSk := Modadd(rSk, FP256BN.Modmul(ProofC, sk, GroupOrder), GroupOrder)
ProofSE := Modsub(re, FP256BN.Modmul(ProofC, E, GroupOrder), GroupOrder)
ProofSR2 := Modadd(rR2, FP256BN.Modmul(ProofC, r2, GroupOrder), GroupOrder)
ProofSR3 := Modsub(rR3, FP256BN.Modmul(ProofC, r3, GroupOrder), GroupOrder)
ProofSSPrime := Modadd(rSPrime, FP256BN.Modmul(ProofC, sPrime, GroupOrder), GroupOrder)
ProofSRNym := Modadd(rRNym, FP256BN.Modmul(ProofC, RNym, GroupOrder), GroupOrder)
//compute s-values for zk proof of group signature
ProofSalpha := Modadd(rAlpha,FP256BN.Modmul(ProofC,alpha,GroupOrder),GroupOrder)
ProofSbeta := Modadd(rBeta,FP256BN.Modmul(ProofC,beta,GroupOrder),GroupOrder)
x := FP256BN.FromBytes(uk.UK1)
ProofSx := Modadd(rx,FP256BN.Modmul(ProofC,x,GroupOrder),GroupOrder)
delta1 := FP256BN.Modmul(alpha,x,GroupOrder)
ProofSdelta1 := Modadd(rDelta1,FP256BN.Modmul(ProofC,delta1,GroupOrder),GroupOrder)
delta2 := FP256BN.Modmul(beta,x,GroupOrder)
ProofSdelta2 := Modadd(rDelta2,FP256BN.Modmul(ProofC,delta2,GroupOrder),GroupOrder)
ProofSAttrs := make([][]byte, len(HiddenIndices))
for i, j := range HiddenIndices {
ProofSAttrs[i] = BigToBytes(Modadd(rAttrs[i], FP256BN.Modmul(ProofC, FP256BN.FromBytes(cred.Attrs[j]), GroupOrder), GroupOrder))
}
return &Signature{
EcpToProto(APrime),
EcpToProto(ABar),
EcpToProto(BPrime),
EcpToProto(T1),
EcpToProto(T2),
EcpToProto(T3),
BigToBytes( | {
return nil, errors.Errorf("cannot create idemix signature: received nil input")
} | conditional_block |
|
signature.go | (EcpFromProto(ipk.HAttrs[HiddenIndices[2*i]]).Mul2(rAttrs[2*i], EcpFromProto(ipk.HAttrs[HiddenIndices[2*i+1]]), rAttrs[2*i+1]))
}
if len(HiddenIndices)%2 != 0 {
t2.Add(FP256BN.G1mul(EcpFromProto(ipk.HAttrs[HiddenIndices[len(HiddenIndices)-1]]), rAttrs[len(HiddenIndices)-1]))
}
t3 := HSk.Mul2(rSk, HRand, rRNym)
//Zk proof of BBS group signature
alpha := RandModOrder(rng)
beta := RandModOrder(rng)
rAlpha := RandModOrder(rng)
rBeta := RandModOrder(rng)
rx := RandModOrder(rng)
rDelta1 := RandModOrder(rng)
rDelta2 := RandModOrder(rng)
temp := Modadd(alpha,beta,GroupOrder)
temp1 := Modadd(rAlpha,rBeta,GroupOrder)
temp2 := Modadd(rDelta1,rDelta2,GroupOrder)
U := EcpFromProto(gpk.U)
V := EcpFromProto(gpk.V)
H := EcpFromProto(gpk.H)
Ax := EcpFromProto(uk.UK2)
W := Ecp2FromProto(ipk.W)
T1 := FP256BN.G1mul(U,alpha)
T2 := FP256BN.G1mul(V,beta)
T3 := FP256BN.G1mul(H,temp)
T3.Add(Ax)
R1 := FP256BN.G1mul(U,rAlpha)
R2 := FP256BN.G1mul(V,rBeta)
//compute pairing
Grx := GenG2.Mul(rx)
T4 := H.Mul(temp)
e1 := FP256BN.Ate2(Grx,Ax,Grx,T4)
Htp1 := H.Mul(temp1)
e2 := FP256BN.Ate(W,Htp1)
e2.Inverse()
e2.Mul(e1)
Htp2 := H.Mul(temp2)
e4 := FP256BN.Ate(GenG2,Htp2)
e4.Inverse()
e4.Mul(e2)
R3 := FP256BN.Fexp(e4)
R4 := FP256BN.G1mul(T1,rx)
R4.Sub(U.Mul(rDelta1))
R5 := FP256BN.G1mul(T2,rx)
R5.Sub(V.Mul(rDelta2))
// proofData is the data being hashed, it consists of:
// the signature label
// 14 elements of G1 each taking 2*FieldBytes+1 bytes and 1 element of GT taking 12*FieldBytes
// one bigint (hash of the issuer public key) of length FieldBytes
// disclosed attributes
// message being signed
proofData := make([]byte, len([]byte(signLabel))+14*(2*FieldBytes+1)+13*FieldBytes+len(Disclosure)+len(msg))
index := 0
index = appendBytesString(proofData, index, signLabel)
index = appendBytesG1(proofData, index, t1)
index = appendBytesG1(proofData, index, t2)
index = appendBytesG1(proofData, index, t3)
index = appendBytesG1(proofData, index, APrime)
index = appendBytesG1(proofData, index, ABar)
index = appendBytesG1(proofData, index, BPrime)
index = appendBytesG1(proofData, index, Nym)
index = appendBytesG1(proofData, index, T1)
index = appendBytesG1(proofData, index, T2)
index = appendBytesG1(proofData, index, T3)
index = appendBytesG1(proofData, index, R1)
index = appendBytesG1(proofData, index, R2)
index = appendBytesGT(proofData, index, R3)
index = appendBytesG1(proofData, index, R4)
index = appendBytesG1(proofData, index, R5)
copy(proofData[index:], ipk.Hash)
index = index + FieldBytes
copy(proofData[index:], Disclosure)
index = index + len(Disclosure)
copy(proofData[index:], msg)
c := HashModOrder(proofData)
// add the previous hash and the nonce and hash again to compute a second hash (C value)
index = 0
proofData = proofData[:2*FieldBytes]
index = appendBytesBig(proofData, index, c)
index = appendBytesBig(proofData, index, Nonce)
ProofC := HashModOrder(proofData)
ProofSSk := Modadd(rSk, FP256BN.Modmul(ProofC, sk, GroupOrder), GroupOrder)
ProofSE := Modsub(re, FP256BN.Modmul(ProofC, E, GroupOrder), GroupOrder)
ProofSR2 := Modadd(rR2, FP256BN.Modmul(ProofC, r2, GroupOrder), GroupOrder)
ProofSR3 := Modsub(rR3, FP256BN.Modmul(ProofC, r3, GroupOrder), GroupOrder)
ProofSSPrime := Modadd(rSPrime, FP256BN.Modmul(ProofC, sPrime, GroupOrder), GroupOrder)
ProofSRNym := Modadd(rRNym, FP256BN.Modmul(ProofC, RNym, GroupOrder), GroupOrder)
//compute s-values for zk proof of group signature
ProofSalpha := Modadd(rAlpha,FP256BN.Modmul(ProofC,alpha,GroupOrder),GroupOrder)
ProofSbeta := Modadd(rBeta,FP256BN.Modmul(ProofC,beta,GroupOrder),GroupOrder)
x := FP256BN.FromBytes(uk.UK1)
ProofSx := Modadd(rx,FP256BN.Modmul(ProofC,x,GroupOrder),GroupOrder)
delta1 := FP256BN.Modmul(alpha,x,GroupOrder)
ProofSdelta1 := Modadd(rDelta1,FP256BN.Modmul(ProofC,delta1,GroupOrder),GroupOrder)
delta2 := FP256BN.Modmul(beta,x,GroupOrder)
ProofSdelta2 := Modadd(rDelta2,FP256BN.Modmul(ProofC,delta2,GroupOrder),GroupOrder)
ProofSAttrs := make([][]byte, len(HiddenIndices))
for i, j := range HiddenIndices {
ProofSAttrs[i] = BigToBytes(Modadd(rAttrs[i], FP256BN.Modmul(ProofC, FP256BN.FromBytes(cred.Attrs[j]), GroupOrder), GroupOrder))
}
return &Signature{
EcpToProto(APrime),
EcpToProto(ABar),
EcpToProto(BPrime),
EcpToProto(T1),
EcpToProto(T2),
EcpToProto(T3),
BigToBytes(ProofC),
BigToBytes(ProofSSk),
BigToBytes(ProofSE),
BigToBytes(ProofSR2),
BigToBytes(ProofSR3),
BigToBytes(ProofSSPrime),
BigToBytes(ProofSalpha),
BigToBytes(ProofSbeta),
BigToBytes(ProofSx),
BigToBytes(ProofSdelta1),
BigToBytes(ProofSdelta2),
ProofSAttrs,
BigToBytes(Nonce),
EcpToProto(Nym),
BigToBytes(ProofSRNym)},
nil
}
// Ver verifies an idemix signature
// Disclosure steers which attributes it expects to be disclosed
// attributeValues[i] contains the desired attribute value for the i-th undisclosed attribute in Disclosure
func (sig *Signature) Ver(Disclosure []byte, ipk *IssuerPublicKey, gpk *GroupPublicKey, msg []byte, attributeValues []*FP256BN.BIG) error | {
HiddenIndices := hiddenIndices(Disclosure)
APrime := EcpFromProto(sig.GetAPrime())
ABar := EcpFromProto(sig.GetABar())
BPrime := EcpFromProto(sig.GetBPrime())
Nym := EcpFromProto(sig.GetNym())
ProofC := FP256BN.FromBytes(sig.GetProofC())
ProofSSk := FP256BN.FromBytes(sig.GetProofSSk())
ProofSE := FP256BN.FromBytes(sig.GetProofSE())
ProofSR2 := FP256BN.FromBytes(sig.GetProofSR2())
ProofSR3 := FP256BN.FromBytes(sig.GetProofSR3())
ProofSSPrime := FP256BN.FromBytes(sig.GetProofSSPrime())
ProofSRNym := FP256BN.FromBytes(sig.GetProofSRNym())
ProofSAttrs := make([]*FP256BN.BIG, len(sig.GetProofSAttrs()))
T1 := EcpFromProto(sig.GetT1())
T2 := EcpFromProto(sig.GetT2())
T3 := EcpFromProto(sig.GetT3())
ProofSalpha := FP256BN.FromBytes(sig.GetProofSalpha()) | identifier_body |
|
signature.go | 6BN.G1mul(APrime, FP256BN.FromBytes(cred.E))) // barA = A'^{-e} b^{r1}
BPrime := FP256BN.G1mul(B, r1)
HRand := EcpFromProto(ipk.HRand)
HSk := EcpFromProto(ipk.HSk)
BPrime.Sub(FP256BN.G1mul(HRand, r2)) // b' = b^{r1} h_r^{-r2}
S := FP256BN.FromBytes(cred.S)
E := FP256BN.FromBytes(cred.E)
sPrime := Modsub(S, FP256BN.Modmul(r2, r3, GroupOrder), GroupOrder)
// Construct ZK proof
rSk := RandModOrder(rng)
re := RandModOrder(rng)
rR2 := RandModOrder(rng)
rR3 := RandModOrder(rng)
rSPrime := RandModOrder(rng)
rRNym := RandModOrder(rng)
rAttrs := make([]*FP256BN.BIG, len(HiddenIndices))
for i := range HiddenIndices {
rAttrs[i] = RandModOrder(rng)
}
t1 := APrime.Mul2(re, HRand, rR2)
t2 := FP256BN.G1mul(HRand, rSPrime)
t2.Add(BPrime.Mul2(rR3, HSk, rSk))
for i := 0; i < len(HiddenIndices)/2; i++ {
t2.Add(EcpFromProto(ipk.HAttrs[HiddenIndices[2*i]]).Mul2(rAttrs[2*i], EcpFromProto(ipk.HAttrs[HiddenIndices[2*i+1]]), rAttrs[2*i+1]))
}
if len(HiddenIndices)%2 != 0 {
t2.Add(FP256BN.G1mul(EcpFromProto(ipk.HAttrs[HiddenIndices[len(HiddenIndices)-1]]), rAttrs[len(HiddenIndices)-1]))
}
t3 := HSk.Mul2(rSk, HRand, rRNym)
//Zk proof of BBS group signature
alpha := RandModOrder(rng)
beta := RandModOrder(rng)
rAlpha := RandModOrder(rng)
rBeta := RandModOrder(rng)
rx := RandModOrder(rng)
rDelta1 := RandModOrder(rng)
rDelta2 := RandModOrder(rng)
temp := Modadd(alpha,beta,GroupOrder)
temp1 := Modadd(rAlpha,rBeta,GroupOrder)
temp2 := Modadd(rDelta1,rDelta2,GroupOrder)
U := EcpFromProto(gpk.U)
V := EcpFromProto(gpk.V)
H := EcpFromProto(gpk.H)
Ax := EcpFromProto(uk.UK2)
W := Ecp2FromProto(ipk.W)
T1 := FP256BN.G1mul(U,alpha)
T2 := FP256BN.G1mul(V,beta)
T3 := FP256BN.G1mul(H,temp)
T3.Add(Ax)
R1 := FP256BN.G1mul(U,rAlpha)
R2 := FP256BN.G1mul(V,rBeta) | Htp1 := H.Mul(temp1)
e2 := FP256BN.Ate(W,Htp1)
e2.Inverse()
e2.Mul(e1)
Htp2 := H.Mul(temp2)
e4 := FP256BN.Ate(GenG2,Htp2)
e4.Inverse()
e4.Mul(e2)
R3 := FP256BN.Fexp(e4)
R4 := FP256BN.G1mul(T1,rx)
R4.Sub(U.Mul(rDelta1))
R5 := FP256BN.G1mul(T2,rx)
R5.Sub(V.Mul(rDelta2))
// proofData is the data being hashed, it consists of:
// the signature label
// 14 elements of G1 each taking 2*FieldBytes+1 bytes and 1 element of GT taking 12*FieldBytes
// one bigint (hash of the issuer public key) of length FieldBytes
// disclosed attributes
// message being signed
proofData := make([]byte, len([]byte(signLabel))+14*(2*FieldBytes+1)+13*FieldBytes+len(Disclosure)+len(msg))
index := 0
index = appendBytesString(proofData, index, signLabel)
index = appendBytesG1(proofData, index, t1)
index = appendBytesG1(proofData, index, t2)
index = appendBytesG1(proofData, index, t3)
index = appendBytesG1(proofData, index, APrime)
index = appendBytesG1(proofData, index, ABar)
index = appendBytesG1(proofData, index, BPrime)
index = appendBytesG1(proofData, index, Nym)
index = appendBytesG1(proofData, index, T1)
index = appendBytesG1(proofData, index, T2)
index = appendBytesG1(proofData, index, T3)
index = appendBytesG1(proofData, index, R1)
index = appendBytesG1(proofData, index, R2)
index = appendBytesGT(proofData, index, R3)
index = appendBytesG1(proofData, index, R4)
index = appendBytesG1(proofData, index, R5)
copy(proofData[index:], ipk.Hash)
index = index + FieldBytes
copy(proofData[index:], Disclosure)
index = index + len(Disclosure)
copy(proofData[index:], msg)
c := HashModOrder(proofData)
// add the previous hash and the nonce and hash again to compute a second hash (C value)
index = 0
proofData = proofData[:2*FieldBytes]
index = appendBytesBig(proofData, index, c)
index = appendBytesBig(proofData, index, Nonce)
ProofC := HashModOrder(proofData)
ProofSSk := Modadd(rSk, FP256BN.Modmul(ProofC, sk, GroupOrder), GroupOrder)
ProofSE := Modsub(re, FP256BN.Modmul(ProofC, E, GroupOrder), GroupOrder)
ProofSR2 := Modadd(rR2, FP256BN.Modmul(ProofC, r2, GroupOrder), GroupOrder)
ProofSR3 := Modsub(rR3, FP256BN.Modmul(ProofC, r3, GroupOrder), GroupOrder)
ProofSSPrime := Modadd(rSPrime, FP256BN.Modmul(ProofC, sPrime, GroupOrder), GroupOrder)
ProofSRNym := Modadd(rRNym, FP256BN.Modmul(ProofC, RNym, GroupOrder), GroupOrder)
//compute s-values for zk proof of group signature
ProofSalpha := Modadd(rAlpha,FP256BN.Modmul(ProofC,alpha,GroupOrder),GroupOrder)
ProofSbeta := Modadd(rBeta,FP256BN.Modmul(ProofC,beta,GroupOrder),GroupOrder)
x := FP256BN.FromBytes(uk.UK1)
ProofSx := Modadd(rx,FP256BN.Modmul(ProofC,x,GroupOrder),GroupOrder)
delta1 := FP256BN.Modmul(alpha,x,GroupOrder)
ProofSdelta1 := Modadd(rDelta1,FP256BN.Modmul(ProofC,delta1,GroupOrder),GroupOrder)
delta2 := FP256BN.Modmul(beta,x,GroupOrder)
ProofSdelta2 := Modadd(rDelta2,FP256BN.Modmul(ProofC,delta2,GroupOrder),GroupOrder)
ProofSAttrs := make([][]byte, len(HiddenIndices))
for i, j := range HiddenIndices {
ProofSAttrs[i] = BigToBytes(Modadd(rAttrs[i], FP256BN.Modmul(ProofC, FP256BN.FromBytes(cred.Attrs[j]), GroupOrder), GroupOrder))
}
return &Signature{
EcpToProto(APrime),
EcpToProto(ABar),
EcpToProto(BPrime),
EcpToProto(T1),
EcpToProto(T2),
EcpToProto(T3),
BigToBytes(ProofC),
BigToBytes(ProofSSk),
BigToBytes(ProofSE),
BigToBytes(ProofSR2),
BigToBytes(ProofSR3),
BigToBytes(ProofSSPrime),
BigToBytes(ProofSalpha),
BigToBytes(ProofSbeta),
BigToBytes(ProofSx),
BigToBytes(ProofSdelta1),
BigToBytes(ProofSdelta2),
ProofSAttrs,
BigToBytes(Nonce),
EcpToProto(Nym),
BigToBytes(ProofSRNym)},
nil
}
// Ver verifies an idemix signature
// Disclosure steers |
//compute pairing
Grx := GenG2.Mul(rx)
T4 := H.Mul(temp)
e1 := FP256BN.Ate2(Grx,Ax,Grx,T4) | random_line_split |
in-memory-plants-db-service.ts | () {
this.categories = [
{
'id': 0,
'name': 'Balkonnövények',
'plantsCount': 3
},
{
'id': 1,
'name': 'Rózsák',
'plantsCount': 4
},
{
'id': 2,
'name': 'Bogyós gyülömcsök',
'plantsCount': 5
},
{
'id': 3,
'name': 'Kúszónövények',
'plantsCount': 4
},
{
'id': 4,
'name': 'Gyógyhatású növények',
'plantsCount': 2
}
];
}
createDb() {
const login = [];
const register = [];
const categories = this.categories;
const plants = [
{
'name': 'Estrella White',
'price': 2650,
'id': 0,
'categoryId': 0,
'isFrostProof': false,
'lightReq': 'HALF_SHADY',
'description': 'Virágait erős szárakon a lomb felett hozza. Bőséges virágzás egészen őszig. Az extrém időjárási ' +
'viszonyokat is jól bírja, hajtásait a szél sem töri meg. A bőséges virágzáshoz elengedhetetlen a gyakori öntözés ' +
'és tápoldatozás. Az általunk ajánlott „Muskátli virágvarázs” kitűnően megfelel erre a célra.',
'plantingTime': ['APRIL', 'MAY', 'JUNE'],
'waterReq': 'HIGH',
'nutritionReq': 'BIWEEKLY',
'isFavorite': false,
'imageUrl': 'https://www.starkl.hu/img/eshop/thumbnails/100839_jpg_137965_406_488.jpg'
},
{
'name': 'Álló muskátli',
'price': 1470,
'id': 1,
'categoryId': 0,
'isFrostProof': false,
'lightReq': 'SUNNY',
'description': 'Minden évben újabb fajták érkeznek. A legszebb virágú és legjobb fajtákat válogattam össze ' +
'Önnek! Gazdagon virágoznak, nagyok a viráglabdák, szél- és időjárásállóak. Talán ezek a legfontosabb ' +
'tulajdonságok. Bizonyára elégedett lesz a képen látható fajtákkal.',
'plantingTime': ['APRIL', 'MAY', 'JUNE'],
'waterReq': 'HIGH',
'nutritionReq': 'BIWEEKLY',
'isFavorite': true,
'imageUrl': 'https://www.starkl.hu/img/eshop/thumbnails/100164_jpg_70579_406_488.jpg'
},
{
'name': 'Csüngő begónia',
'price': 1390,
'id': 2,
'categoryId': 0,
'isFrostProof': false,
'lightReq': 'SUNNY',
'description': 'Újdonság, ami nem olcsó, de annál értékesebb! Barátai irigyelni fogják a rengeteg virágért. ' +
'A 10 centiméteresre is megnövő fénylő virágok finom illatosak. Nem kell már az árnyékban sem nélkülöznie ' +
'a bőséges virágzást! Ilyen helyekre a begóniák ültetése a legjobb választás. Egészen őszig messziről fénylenek ' +
'virágai. Jó minőségű balkon virágföldet és rendszeres öntözést kíván.',
'plantingTime': ['APRIL', 'MAY', 'JUNE'],
'waterReq': 'HIGH',
'nutritionReq': 'WEEKLY',
'isFavorite': false,
'imageUrl': 'https://www.starkl.hu/img/eshop/thumbnails/100696_jpg_84843_406_488.jpg'
},
{
'name': 'Tearózsa',
'price': 1390,
'id': 3,
'categoryId': 1,
'isFrostProof': true,
'lightReq': 'SUNNY',
'description': 'Virágait erős szárakon a lomb felett hozza. Bőséges virágzás egészen őszig. Az extrém időjárási ' +
'viszonyokat is jól bírja, hajtásait a szél sem töri meg. A bőséges virágzáshoz elengedhetetlen a gyakori öntözés ' +
'és tápoldatozás. Az általunk ajánlott „Muskátli virágvarázs” kitűnően megfelel erre a célra.',
'plantingTime': ['MARCH', 'APRIL', 'MAY', 'OCT', 'NOV'],
'waterReq': 'MEDIUM',
'nutritionReq': 'MONTHLY',
'isFavorite': false,
'imageUrl': 'https://www.starkl.hu/img/eshop/thumbnails/100839_jpg_137965_406_488.jpg'
},
{
'name': 'Bokorrózsa Emilia Maria',
'price': 1590,
'id': 4,
'categoryId': 1,
'isFrostProof': true,
'lightReq': 'SUNNY',
'description': 'Egy nagyon gazdagon és szinte folyamatosan virágzó bokorrózsa. Pinkszínű, tömvetelt virágai ' +
'csodálatos,graepfruit,iris és ibolya keverékére emlékeztető illatúak. Kisebb rácson felvezetve magasabbra is nevelhető. ',
'plantingTime': ['MARCH', 'APRIL', 'MAY', 'OCT', 'NOV'],
'waterReq': 'MEDIUM',
'nutritionReq': 'BIWEEKLY',
'isFavorite': false,
'imageUrl': 'https://www.starkl.hu/img/eshop/thumbnails/115150_jpg_56050_406_488.jpg'
},
{
'name': 'Ágyásrózsa Julia Child',
'price': 1590,
'id': 5,
'categoryId': 1,
'isFrostProof': true,
'lightReq': 'HALF_SHADY',
'description': 'Ágyásrózsa. Egy száron több virágot is hoz, amik akár 12 cm nagyságúak, erős, aranysárga színűek. ' +
'Legértékesebb csodálatos illata, különösen reggel mikor az első napsugarak felmelegítik a virágokat.' +
'Ez a fajta nem hiányozhat egyetlen rózsakertből sem!',
'plantingTime': ['MARCH', 'APRIL', 'MAY', 'OCT', 'NOV'],
'waterReq': 'LOW',
'nutritionReq': 'WEEKLY',
'isFavorite': true,
'imageUrl': 'https://www.starkl.hu/img/eshop/thumbnails/119100_jpg_37607_406_488.jpg'
},
{
'name': 'Művészrózsa Maurice Utrillo',
'price': 1890,
'id': 6,
'categoryId': 1,
'isFrostProof': true,
'lightReq': 'HALF_SHADY',
'description': 'Tűzpiros virágai fehéren csíkozottak. Mindehhez elragadó illat párosul.',
'planting | constructor | identifier_name |
|
in-memory-plants-db-service.ts | 0 centiméteresre is megnövő fénylő virágok finom illatosak. Nem kell már az árnyékban sem nélkülöznie ' +
'a bőséges virágzást! Ilyen helyekre a begóniák ültetése a legjobb választás. Egészen őszig messziről fénylenek ' +
'virágai. Jó minőségű balkon virágföldet és rendszeres öntözést kíván.',
'plantingTime': ['APRIL', 'MAY', 'JUNE'],
'waterReq': 'HIGH',
'nutritionReq': 'WEEKLY',
'isFavorite': false,
'imageUrl': 'https://www.starkl.hu/img/eshop/thumbnails/100696_jpg_84843_406_488.jpg'
},
{
'name': 'Tearózsa',
'price': 1390,
'id': 3,
'categoryId': 1,
'isFrostProof': true,
'lightReq': 'SUNNY',
'description': 'Virágait erős szárakon a lomb felett hozza. Bőséges virágzás egészen őszig. Az extrém időjárási ' +
'viszonyokat is jól bírja, hajtásait a szél sem töri meg. A bőséges virágzáshoz elengedhetetlen a gyakori öntözés ' +
'és tápoldatozás. Az általunk ajánlott „Muskátli virágvarázs” kitűnően megfelel erre a célra.',
'plantingTime': ['MARCH', 'APRIL', 'MAY', 'OCT', 'NOV'],
'waterReq': 'MEDIUM',
'nutritionReq': 'MONTHLY',
'isFavorite': false,
'imageUrl': 'https://www.starkl.hu/img/eshop/thumbnails/100839_jpg_137965_406_488.jpg'
},
{
'name': 'Bokorrózsa Emilia Maria',
'price': 1590,
'id': 4,
'categoryId': 1,
'isFrostProof': true,
'lightReq': 'SUNNY',
'description': 'Egy nagyon gazdagon és szinte folyamatosan virágzó bokorrózsa. Pinkszínű, tömvetelt virágai ' +
'csodálatos,graepfruit,iris és ibolya keverékére emlékeztető illatúak. Kisebb rácson felvezetve magasabbra is nevelhető. ',
'plantingTime': ['MARCH', 'APRIL', 'MAY', 'OCT', 'NOV'],
'waterReq': 'MEDIUM',
'nutritionReq': 'BIWEEKLY',
'isFavorite': false,
'imageUrl': 'https://www.starkl.hu/img/eshop/thumbnails/115150_jpg_56050_406_488.jpg'
},
{
'name': 'Ágyásrózsa Julia Child',
'price': 1590,
'id': 5,
'categoryId': 1,
'isFrostProof': true,
'lightReq': 'HALF_SHADY',
'description': 'Ágyásrózsa. Egy száron több virágot is hoz, amik akár 12 cm nagyságúak, erős, aranysárga színűek. ' +
'Legértékesebb csodálatos illata, különösen reggel mikor az első napsugarak felmelegítik a virágokat.' +
'Ez a fajta nem hiányozhat egyetlen rózsakertből sem!',
'plantingTime': ['MARCH', 'APRIL', 'MAY', 'OCT', 'NOV'],
'waterReq': 'LOW',
'nutritionReq': 'WEEKLY',
'isFavorite': true,
'imageUrl': 'https://www.starkl.hu/img/eshop/thumbnails/119100_jpg_37607_406_488.jpg'
},
{
'name': 'Művészrózsa Maurice Utrillo',
'price': 1890,
'id': 6, | 'description': 'Tűzpiros virágai fehéren csíkozottak. Mindehhez elragadó illat párosul.',
'plantingTime': ['MARCH', 'APRIL', 'MAY', 'OCT', 'NOV'],
'waterReq': 'MEDIUM',
'nutritionReq': 'BIWEEKLY',
'isFavorite': true,
'imageUrl': 'https://www.starkl.hu/img/eshop/thumbnails/110840_jpg_91884_406_488.jpg'
},
{
'name': 'Delikatess csüngőeper',
'price': 1380,
'id': 6,
'categoryId': 2,
'isFrostProof': true,
'lightReq': 'SUNNY',
'description': 'Kert nélkül is folyamatosan szedheti a finom epreket! Használjon jó minőségű földet, megfelelő ' +
'méretű virágládát. Gondoskodjon a felesleges víz elvezetéséről! Napi rendszerességgel végzett öntözés és a ' +
'megfelelő tápanyagutánpótlás mellett rövid időn belül megjelennek az első termések.',
'plantingTime': ['MARCH', 'APRIL', 'MAY', 'JUNE'],
'waterReq': 'HIGH',
'nutritionReq': 'WEEKLY',
'isFavorite': true,
'imageUrl': 'https://www.starkl.hu/img/eshop/thumbnails/090060_jpg_535663_406_488.jpg'
},
{
'name': 'Málna Boheme',
'price': 1090,
'id': 7,
'categoryId': 2,
'isFrostProof': true,
'lightReq': 'SUNNY',
'description': 'Egy őszi érésű málna, amilyenre az ember vágyik. Szép színű, kellemes illatú, csodálatos ízű.' +
' Bogyói nagyon korán érnek. Felálló és kompakt növekedésű.',
'plantingTime': ['MARCH', 'APRIL', 'MAY', 'OCT', 'NOV'],
'waterReq': 'MEDIUM',
'nutritionReq': 'NONE',
'isFavorite': false,
'imageUrl': 'https://www.starkl.hu/img/eshop/thumbnails/151330_jpg_131481_406_488.jpg'
},
{
'name': 'Málna Golden Everest',
'price': 1290,
'id': 8,
'categoryId': 2,
'isFrostProof': true,
'lightReq': 'HALF_SHADY',
'description': 'A sárga termések teszik értékessé ezt a fajtát, mely különleges aromájával kiváló lekvárok, ' +
'édességek és gyümölcslé készítéséhez. Terméseit június közepén a második éves vesszők végén hozza.',
'plantingTime': ['MARCH', 'APRIL', 'MAY', 'SEPT', 'OCT', 'NOV'],
'waterReq': 'MEDIUM',
'nutritionReq': 'NONE',
'isFavorite': false,
'imageUrl': 'https://www.starkl.hu/img/eshop/thumbnails/151240_jpg_273492_406_488.jpg'
},
{
| 'categoryId': 1,
'isFrostProof': true,
'lightReq': 'HALF_SHADY', | random_line_split |
in-memory-plants-db-service.ts | ', 'APRIL', 'MAY', 'SEPT', 'OCT', 'NOV'],
'waterReq': 'MEDIUM',
'nutritionReq': 'NONE',
'isFavorite': false,
'imageUrl': 'https://www.starkl.hu/img/eshop/thumbnails/154530_jpg_345266_406_488.jpg'
},
{
'name': 'Vörös áfonya',
'price': 1690,
'id': 10,
'categoryId': 2,
'isFrostProof': true,
'lightReq': 'HALF_SHADY',
'description': 'Hosszabbítsa meg a szüretelési időt egy közkedvelt bogyós fajtával! Sokan kedvelik az áfonyát ' +
'akár müzlibe, gyümölcssalátába vagy csak egyszerűen frissen a bokorról.',
'plantingTime': ['MARCH', 'APRIL', 'MAY', 'JUNE', 'SEPT', 'OCT', 'NOV'],
'waterReq': 'MEDIUM',
'nutritionReq': 'NONE',
'isFavorite': false,
'imageUrl': 'https://www.starkl.hu/img/eshop/thumbnails/154520_jpg_402033_406_488.jpg'
},
{
'name': 'Csokoládészőlő',
'price': 1890,
'id': 11,
'categoryId': 3,
'isFrostProof': true,
'lightReq': 'SUNNY',
'description': 'Kínában és Japánban a szárított kérgét gyógyszerként használják. Gyulladáscsökkentő és a ' +
'menstruációs problémákat enyhítő hatása is ismert. Gyökere lázcsillapító hatású, termése egyes vélemények ' +
'szerint hatásos a daganatos betegségekkel szemben.',
'plantingTime': ['MARCH', 'APRIL', 'MAY', 'JUNE', 'SEPT', 'OCT', 'NOV'],
'waterReq': 'LOW',
'nutritionReq': 'MONTHLY',
'isFavorite': false,
'imageUrl': 'https://www.starkl.hu/img/eshop/thumbnails/070535_jpg_51932_406_488.jpg'
},
{
'name': 'Klemátisz ´Guernsey Cream´',
'price': 2190,
'id': 12,
'categoryId': 3,
'isFrostProof': false,
'lightReq': 'HALF_SHADY',
'description': 'Mint ahogy a neve is jelzi, az első krémszínű klemátisz! Különleges színű virágainak értékét egy ' +
'sötét háttér még inkább kiemeli. A virág zöld közepe a sötétsárga porzókkal egyedülálló szépséget kölcsönöz ' +
'a fajtának.',
'plantingTime': ['MARCH', 'APRIL', 'MAY', 'SEPT', 'OCT', 'NOV'],
'waterReq': 'LOW',
'nutritionReq': 'BIWEEKLY',
'isFavorite': true,
'imageUrl': 'https://www.starkl.hu/img/eshop/thumbnails/070770_jpg_431988_406_488.jpg'
},
{
'name': 'Klemátisz \'Innocent Blush\'',
'price': 2990,
'id': 13,
'categoryId': 3,
'isFrostProof': false,
'lightReq': 'SUNNY',
'description': 'Csodaszép, nosztalgikus, új nemesítésű fajta. A 2. éves hajtásokon fejlődnek ki nagy, telt virágai.',
'plantingTime': ['MARCH', 'APRIL', 'MAY', 'SEPT', 'OCT'],
'waterReq': 'MEDIUM',
'nutritionReq': 'BIWEEKLY',
'isFavorite': true,
'imageUrl': 'https://www.starkl.hu/img/eshop/thumbnails/070735_jpg_228664_406_488.jpg'
},
{
'name': 'Nyári lilaakác',
'price': 2590,
'id': 14,
'categoryId': 3,
'isFrostProof': true,
'lightReq': 'SUNNY',
'description': 'Június - júliusban nyíló, rózsaszín - lilás virágai akár a 35 cm nagyságot is elérhetik. ' +
'Szereti a védett helyeket. Minél idősebb, annál szebb a virágzása. Edényben is nevelhető.',
'plantingTime': ['MARCH', 'APRIL', 'MAY', 'SEPT', 'OCT', 'NOV'],
'waterReq': 'LOW',
'nutritionReq': 'MONTHLY',
'isFavorite': false,
'imageUrl': 'https://www.starkl.hu/img/eshop/thumbnails/070820_jpg_611190_406_488.jpg'
},
{
'name': 'Aloe Vera',
'price': 1790,
'id': 15,
'categoryId': 4,
'isFrostProof': false,
'lightReq': 'SUNNY',
'description': 'Az Aloe vera az egyik legrégebbi gyógynövény a különféle külső és belső bajok gyógyítására. ' +
'Enyhíti a fájdalmat, segíti a sebek, sérülések gyorsabb gyógyulását. Vágjon le egy darab levelet, majd a ' +
'kipréselt levet kenje a sebre! A vaskos levelekből koktélok, italok is készülhetnek.',
'plantingTime': ['APRIL', 'MAY', 'JUNE', 'JULY', 'AUG', 'SEPT', 'OCT'],
'waterReq': 'HIGH',
'nutritionReq': 'WEEKLY',
'isFavorite': true,
'imageUrl': 'https://www.starkl.hu/img/eshop/thumbnails/102440_jpg_83473_406_488.jpg'
},
{
'name': 'Homoktövis pár',
'price': 4150,
'id': 17,
'categoryId': 4,
'isFrostProof': true,
'lightReq': 'HALF_SHADY',
'description': 'Tudta Ön, hogy a homoktövis 10 - szer több C - vitamint tartalmaz mint a citrusfélék? ' +
'Egy igazi bomba az immunrendszernek, és egy hatásos fegyver a megfázások ellen.',
'plantingTime': ['APRIL', 'MAY', 'SEPT'],
'waterReq': 'HIGH',
'nutritionReq': 'MONTHLY',
'isFavorite': false,
'imageUrl': 'https://www.starkl.hu/img/eshop/thumbnails/130583_jpg_109750_406_488.jpg'
}
];
return {login, register, categories, plants};
}
responseInterceptor(responseOptions: ResponseOptions, requestInfo: RequestInfo) {
if (requestInfo.resourceUrl.includes('login') || requestInfo.resourceUrl.includes('register')) {
responseOptions.body = {token: 'asdasd'};
responseOptions.status = 200;
} else if (typeof responseOptions.body === 'object') {
responseOptions.body = responseOptions.body ? (responseOptions.body as any).data : null;
}
if (requestInfo.resourceUrl.includes('categories') && requestInfo.req.url.includes('?')) {
const result = this.categories.slice(Math.random() * 4);
responseOptions.body = result;
}
return responseOptions;
}
}
| conditional_block |
||
piv.rs | pecialPublications/NIST.SP.800-78-4.pdf
// TODO(zmb3): support non-RSA keys, if needed.
if cmd.p1 != 0x07 {
return Err(invalid_data_error(&format!(
"unsupported algorithm identifier P1:{:#X} in general authenticate command",
cmd.p1
)));
}
// P2='9A' means PIV Authentication Key (matches our cert '5FC105' in handle_get_data).
if cmd.p2 != 0x9A {
return Err(invalid_data_error(&format!(
"unsupported key reference P2:{:#X} in general authenticate command",
cmd.p2
)));
}
let request_tlv = Tlv::from_bytes(cmd.data())
.map_err(|e| invalid_data_error(&format!("TLV invalid: {e:?}")))?;
if *request_tlv.tag() != tlv_tag(TLV_TAG_DYNAMIC_AUTHENTICATION_TEMPLATE)? {
return Err(invalid_data_error(&format!(
"general authenticate command TLV invalid: {request_tlv:?}"
)));
}
// Extract the challenge field.
let request_tlvs = match request_tlv.value() {
Value::Primitive(_) => {
return Err(invalid_data_error(&format!(
"general authenticate command TLV invalid: {request_tlv:?}"
)));
}
Value::Constructed(tlvs) => tlvs,
};
let mut challenge = None;
for data in request_tlvs {
if *data.tag() != tlv_tag(TLV_TAG_CHALLENGE)? {
continue;
}
challenge = match data.value() {
Value::Primitive(chal) => Some(chal),
Value::Constructed(_) => {
return Err(invalid_data_error(&format!(
"general authenticate command TLV invalid: {request_tlv:?}"
)));
}
};
}
let challenge = challenge.ok_or_else(|| {
invalid_data_error(&format!(
"general authenticate command TLV invalid: {request_tlv:?}, missing challenge data"
))
})?;
// TODO(zmb3): support non-RSA keys, if needed.
let signed_challenge = self.sign_auth_challenge(challenge);
// Return signed challenge.
let resp = tlv(
TLV_TAG_DYNAMIC_AUTHENTICATION_TEMPLATE,
Value::Constructed(vec![tlv(
TLV_TAG_RESPONSE,
Value::Primitive(signed_challenge),
)?]),
)?
.to_vec();
self.pending_response = Some(Cursor::new(resp));
self.handle_get_response(cmd)
}
fn build_chuid(uuid: Uuid) -> Vec<u8> {
// This is gross: the response is a BER-TLV value, but it has nested SIMPLE-TLV
// values. None of the TLV encoding libraries out there support this, they fail
// when checking the tag of nested TLVs.
//
// So, construct the TLV by hand from raw bytes. Hopefully the comments will be
// enough to explain the structure.
//
// https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-73-4.pdf
// table 9 has the explanation of fields.
//
// Start with a top-level BER-TLV tag and length:
let mut resp = vec![TLV_TAG_DATA_FIELD, 0x3B];
// TLV tag and length for FASC-N.
resp.extend_from_slice(&[TLV_TAG_FASC_N, 0x19]);
// FASC-N value containing S9999F9999F999999F0F1F0000000000300001E, with a
// weird encoding from section 6 of:
// https://www.idmanagement.gov/docs/pacs-tig-scepacs.pdf
resp.extend_from_slice(&[
0xd4, 0xe7, 0x39, 0xda, 0x73, 0x9c, 0xed, 0x39, 0xce, 0x73, 0x9d, 0x83, 0x68, 0x58,
0x21, 0x08, 0x42, 0x10, 0x84, 0x21, 0xc8, 0x42, 0x10, 0xc3, 0xeb,
]);
// TLV for user UUID.
resp.extend_from_slice(&[TLV_TAG_GUID, 0x10]);
resp.extend_from_slice(uuid.as_bytes());
// TLV for expiration date (YYYYMMDD).
resp.extend_from_slice(&[TLV_TAG_EXPIRATION_DATE, 0x08]);
// TODO(awly): generate this from current time.
resp.extend_from_slice("20300101".as_bytes());
// TLV for signature (empty).
resp.extend_from_slice(&[TLV_TAG_ISSUER_ASYMMETRIC_SIGNATURE, 0x00]);
// TLV for error detection code.
resp.extend_from_slice(&[TLV_TAG_ERROR_DETECTION_CODE, 0x00]);
resp
}
fn build_piv_auth_cert(cert_der: &[u8]) -> Vec<u8> {
// Same as above, tags in this BER-TLV value are not compatible with the spec
// and existing libraries. Marshal by hand.
//
// Certificate TLV tag and length.
let mut resp = vec![TLV_TAG_CERTIFICATE];
resp.extend_from_slice(&len_to_vec(cert_der.len()));
resp.extend_from_slice(cert_der);
// CertInfo TLV (0x00 indicates uncompressed cert).
resp.extend_from_slice(&[TLV_TAG_CERTINFO, 0x01, 0x00]);
// TLV for error detection code.
resp.extend_from_slice(&[TLV_TAG_ERROR_DETECTION_CODE, 0x00]);
// Wrap with top-level TLV tag and length.
let mut resp_outer = vec![TLV_TAG_DATA_FIELD];
resp_outer.extend_from_slice(&len_to_vec(resp.len()));
resp_outer.extend_from_slice(&resp);
resp_outer
}
}
#[derive(Debug)]
pub struct Response {
data: Option<Vec<u8>>,
status: Status,
}
impl Response {
fn new(status: Status) -> Self {
Self { data: None, status }
}
fn with_data(status: Status, data: Vec<u8>) -> Self {
Self {
data: Some(data),
status,
}
}
pub fn encode(&self) -> Vec<u8> {
let mut buf = Vec::new();
if let Some(data) = &self.data {
buf.extend_from_slice(data);
}
let status: [u8; 2] = self.status.into();
buf.extend_from_slice(&status);
buf
}
}
// SELECT command tags.
const TLV_TAG_PIV_APPLICATION_PROPERTY_TEMPLATE: u8 = 0x61;
const TLV_TAG_AID: u8 = 0x4F;
const TLV_TAG_COEXISTENT_TAG_ALLOCATION_AUTHORITY: u8 = 0x79;
const TLV_TAG_DATA_FIELD: u8 = 0x53;
const TLV_TAG_FASC_N: u8 = 0x30;
const TLV_TAG_GUID: u8 = 0x34;
const TLV_TAG_EXPIRATION_DATE: u8 = 0x35;
const TLV_TAG_ISSUER_ASYMMETRIC_SIGNATURE: u8 = 0x3E;
const TLV_TAG_ERROR_DETECTION_CODE: u8 = 0xFE;
const TLV_TAG_CERTIFICATE: u8 = 0x70;
const TLV_TAG_CERTINFO: u8 = 0x71;
// GENERAL AUTHENTICATE command tags.
const TLV_TAG_DYNAMIC_AUTHENTICATION_TEMPLATE: u8 = 0x7C;
const TLV_TAG_CHALLENGE: u8 = 0x81;
const TLV_TAG_RESPONSE: u8 = 0x82;
fn tlv(tag: u8, value: Value) -> RdpResult<Tlv> {
Tlv::new(tlv_tag(tag)?, value)
.map_err(|e| invalid_data_error(&format!("TLV with tag {tag:#X} invalid: {e:?}")))
}
fn tlv_tag(val: u8) -> RdpResult<Tag> {
Tag::try_from(val).map_err(|e| invalid_data_error(&format!("TLV tag {val:#X} invalid: {e:?}")))
}
fn hex_data<const S: usize>(cmd: &Command<S>) -> String {
to_hex(cmd.data())
}
fn to_hex(bytes: &[u8]) -> String {
let mut s = String::new();
for b in bytes {
// https://rust-lang.github.io/rust-clippy/master/index.html#format_push_string
let _ = write!(s, "{b:02X}");
}
s
}
#[allow(clippy::cast_possible_truncation)]
fn len_to_vec(len: usize) -> Vec<u8> {
if len < 0x7f | {
vec![len as u8]
} | conditional_block |
|
piv.rs | chunk = [0; CHUNK_SIZE];
let n = cursor.read(&mut chunk)?;
let mut chunk = chunk.to_vec();
chunk.truncate(n);
let remaining = cursor.get_ref().len() as u64 - cursor.position();
let status = if remaining == 0 {
Status::Success
} else if remaining < CHUNK_SIZE as u64 {
Status::MoreAvailable(remaining as u8)
} else {
Status::MoreAvailable(0)
};
Ok(Response::with_data(status, chunk))
}
}
}
/// Sign the challenge.
///
/// Note: for signatures, typically you'd use a signer that hashes the input data, adds padding
/// according to some scheme (like PKCS1v15 or PSS) and then "decrypts" this data with the key.
/// The decrypted blob is the signature.
///
/// In our case, the RDP server does the hashing and padding, and only gives us a finished blob
/// to decrypt. Most crypto libraries don't directly expose RSA decryption without padding, as
/// it's easy to build insecure crypto systems. Thankfully for us, this decryption is just a single
/// modpow operation which is suppored by RustCrypto.
fn sign_auth_challenge(&self, challenge: &[u8]) -> Vec<u8> {
let c = BigUint::from_bytes_be(challenge);
let plain_text = c
.modpow(self.piv_auth_key.d(), self.piv_auth_key.n())
.to_bytes_be();
let mut result = vec![0u8; self.piv_auth_key.size()];
let start = result.len() - plain_text.len();
result[start..].copy_from_slice(&plain_text);
result
}
fn handle_general_authenticate(&mut self, cmd: Command<S>) -> RdpResult<Response> {
// See section 3.2.4 and example in Appending A.3 from
// https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-73-4.pdf
// P1='07' means 2048-bit RSA.
//
// TODO(zmb3): compare algorithm against the private key using consts from
// https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-78-4.pdf
// TODO(zmb3): support non-RSA keys, if needed.
if cmd.p1 != 0x07 {
return Err(invalid_data_error(&format!(
"unsupported algorithm identifier P1:{:#X} in general authenticate command",
cmd.p1
)));
}
// P2='9A' means PIV Authentication Key (matches our cert '5FC105' in handle_get_data).
if cmd.p2 != 0x9A {
return Err(invalid_data_error(&format!(
"unsupported key reference P2:{:#X} in general authenticate command",
cmd.p2
)));
}
let request_tlv = Tlv::from_bytes(cmd.data())
.map_err(|e| invalid_data_error(&format!("TLV invalid: {e:?}")))?;
if *request_tlv.tag() != tlv_tag(TLV_TAG_DYNAMIC_AUTHENTICATION_TEMPLATE)? {
return Err(invalid_data_error(&format!(
"general authenticate command TLV invalid: {request_tlv:?}"
)));
}
// Extract the challenge field.
let request_tlvs = match request_tlv.value() {
Value::Primitive(_) => {
return Err(invalid_data_error(&format!(
"general authenticate command TLV invalid: {request_tlv:?}"
)));
}
Value::Constructed(tlvs) => tlvs,
};
let mut challenge = None;
for data in request_tlvs {
if *data.tag() != tlv_tag(TLV_TAG_CHALLENGE)? {
continue;
}
challenge = match data.value() {
Value::Primitive(chal) => Some(chal),
Value::Constructed(_) => {
return Err(invalid_data_error(&format!(
"general authenticate command TLV invalid: {request_tlv:?}"
)));
}
};
}
let challenge = challenge.ok_or_else(|| {
invalid_data_error(&format!(
"general authenticate command TLV invalid: {request_tlv:?}, missing challenge data"
))
})?;
// TODO(zmb3): support non-RSA keys, if needed.
let signed_challenge = self.sign_auth_challenge(challenge);
// Return signed challenge.
let resp = tlv(
TLV_TAG_DYNAMIC_AUTHENTICATION_TEMPLATE,
Value::Constructed(vec![tlv(
TLV_TAG_RESPONSE,
Value::Primitive(signed_challenge),
)?]),
)?
.to_vec();
self.pending_response = Some(Cursor::new(resp));
self.handle_get_response(cmd)
}
fn build_chuid(uuid: Uuid) -> Vec<u8> {
// This is gross: the response is a BER-TLV value, but it has nested SIMPLE-TLV
// values. None of the TLV encoding libraries out there support this, they fail
// when checking the tag of nested TLVs.
//
// So, construct the TLV by hand from raw bytes. Hopefully the comments will be
// enough to explain the structure.
//
// https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-73-4.pdf
// table 9 has the explanation of fields.
//
// Start with a top-level BER-TLV tag and length:
let mut resp = vec![TLV_TAG_DATA_FIELD, 0x3B];
// TLV tag and length for FASC-N.
resp.extend_from_slice(&[TLV_TAG_FASC_N, 0x19]);
// FASC-N value containing S9999F9999F999999F0F1F0000000000300001E, with a
// weird encoding from section 6 of:
// https://www.idmanagement.gov/docs/pacs-tig-scepacs.pdf
resp.extend_from_slice(&[
0xd4, 0xe7, 0x39, 0xda, 0x73, 0x9c, 0xed, 0x39, 0xce, 0x73, 0x9d, 0x83, 0x68, 0x58,
0x21, 0x08, 0x42, 0x10, 0x84, 0x21, 0xc8, 0x42, 0x10, 0xc3, 0xeb,
]);
// TLV for user UUID.
resp.extend_from_slice(&[TLV_TAG_GUID, 0x10]);
resp.extend_from_slice(uuid.as_bytes());
// TLV for expiration date (YYYYMMDD).
resp.extend_from_slice(&[TLV_TAG_EXPIRATION_DATE, 0x08]);
// TODO(awly): generate this from current time.
resp.extend_from_slice("20300101".as_bytes());
// TLV for signature (empty).
resp.extend_from_slice(&[TLV_TAG_ISSUER_ASYMMETRIC_SIGNATURE, 0x00]);
// TLV for error detection code.
resp.extend_from_slice(&[TLV_TAG_ERROR_DETECTION_CODE, 0x00]);
resp
}
fn build_piv_auth_cert(cert_der: &[u8]) -> Vec<u8> {
// Same as above, tags in this BER-TLV value are not compatible with the spec
// and existing libraries. Marshal by hand.
//
// Certificate TLV tag and length.
let mut resp = vec![TLV_TAG_CERTIFICATE];
resp.extend_from_slice(&len_to_vec(cert_der.len()));
resp.extend_from_slice(cert_der);
// CertInfo TLV (0x00 indicates uncompressed cert).
resp.extend_from_slice(&[TLV_TAG_CERTINFO, 0x01, 0x00]);
// TLV for error detection code.
resp.extend_from_slice(&[TLV_TAG_ERROR_DETECTION_CODE, 0x00]);
// Wrap with top-level TLV tag and length.
let mut resp_outer = vec![TLV_TAG_DATA_FIELD];
resp_outer.extend_from_slice(&len_to_vec(resp.len()));
resp_outer.extend_from_slice(&resp);
resp_outer
}
}
#[derive(Debug)]
pub struct Response {
data: Option<Vec<u8>>,
status: Status,
}
impl Response {
fn new(status: Status) -> Self {
Self { data: None, status }
}
fn with_data(status: Status, data: Vec<u8>) -> Self {
Self {
data: Some(data),
status,
}
}
pub fn encode(&self) -> Vec<u8> | {
let mut buf = Vec::new();
if let Some(data) = &self.data {
buf.extend_from_slice(data);
}
let status: [u8; 2] = self.status.into();
buf.extend_from_slice(&status);
buf
} | identifier_body |
|
piv.rs | , _cmd: Command<S>) -> RdpResult<Response> {
// CHINK_SIZE is the max response data size in bytes, without resorting to "extended"
// messages.
const CHUNK_SIZE: usize = 256;
match &mut self.pending_response {
None => Ok(Response::new(Status::NotFound)),
Some(cursor) => {
let mut chunk = [0; CHUNK_SIZE];
let n = cursor.read(&mut chunk)?;
let mut chunk = chunk.to_vec();
chunk.truncate(n);
let remaining = cursor.get_ref().len() as u64 - cursor.position();
let status = if remaining == 0 {
Status::Success
} else if remaining < CHUNK_SIZE as u64 {
Status::MoreAvailable(remaining as u8)
} else {
Status::MoreAvailable(0)
};
Ok(Response::with_data(status, chunk))
}
}
}
/// Sign the challenge.
///
/// Note: for signatures, typically you'd use a signer that hashes the input data, adds padding
/// according to some scheme (like PKCS1v15 or PSS) and then "decrypts" this data with the key.
/// The decrypted blob is the signature.
///
/// In our case, the RDP server does the hashing and padding, and only gives us a finished blob
/// to decrypt. Most crypto libraries don't directly expose RSA decryption without padding, as
/// it's easy to build insecure crypto systems. Thankfully for us, this decryption is just a single
/// modpow operation which is suppored by RustCrypto.
fn sign_auth_challenge(&self, challenge: &[u8]) -> Vec<u8> {
let c = BigUint::from_bytes_be(challenge);
let plain_text = c
.modpow(self.piv_auth_key.d(), self.piv_auth_key.n())
.to_bytes_be();
let mut result = vec![0u8; self.piv_auth_key.size()];
let start = result.len() - plain_text.len();
result[start..].copy_from_slice(&plain_text);
result
}
fn handle_general_authenticate(&mut self, cmd: Command<S>) -> RdpResult<Response> {
// See section 3.2.4 and example in Appending A.3 from
// https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-73-4.pdf
// P1='07' means 2048-bit RSA.
//
// TODO(zmb3): compare algorithm against the private key using consts from
// https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-78-4.pdf
// TODO(zmb3): support non-RSA keys, if needed.
if cmd.p1 != 0x07 {
return Err(invalid_data_error(&format!(
"unsupported algorithm identifier P1:{:#X} in general authenticate command",
cmd.p1
)));
}
// P2='9A' means PIV Authentication Key (matches our cert '5FC105' in handle_get_data).
if cmd.p2 != 0x9A {
return Err(invalid_data_error(&format!(
"unsupported key reference P2:{:#X} in general authenticate command",
cmd.p2
)));
}
let request_tlv = Tlv::from_bytes(cmd.data())
.map_err(|e| invalid_data_error(&format!("TLV invalid: {e:?}")))?;
if *request_tlv.tag() != tlv_tag(TLV_TAG_DYNAMIC_AUTHENTICATION_TEMPLATE)? {
return Err(invalid_data_error(&format!(
"general authenticate command TLV invalid: {request_tlv:?}"
)));
}
// Extract the challenge field.
let request_tlvs = match request_tlv.value() {
Value::Primitive(_) => {
return Err(invalid_data_error(&format!(
"general authenticate command TLV invalid: {request_tlv:?}"
)));
}
Value::Constructed(tlvs) => tlvs,
};
let mut challenge = None;
for data in request_tlvs {
if *data.tag() != tlv_tag(TLV_TAG_CHALLENGE)? {
continue;
}
challenge = match data.value() {
Value::Primitive(chal) => Some(chal),
Value::Constructed(_) => {
return Err(invalid_data_error(&format!(
"general authenticate command TLV invalid: {request_tlv:?}"
)));
}
};
}
let challenge = challenge.ok_or_else(|| {
invalid_data_error(&format!(
"general authenticate command TLV invalid: {request_tlv:?}, missing challenge data"
))
})?;
// TODO(zmb3): support non-RSA keys, if needed.
let signed_challenge = self.sign_auth_challenge(challenge);
// Return signed challenge.
let resp = tlv(
TLV_TAG_DYNAMIC_AUTHENTICATION_TEMPLATE,
Value::Constructed(vec![tlv(
TLV_TAG_RESPONSE,
Value::Primitive(signed_challenge),
)?]),
)?
.to_vec();
self.pending_response = Some(Cursor::new(resp));
self.handle_get_response(cmd)
}
fn build_chuid(uuid: Uuid) -> Vec<u8> {
// This is gross: the response is a BER-TLV value, but it has nested SIMPLE-TLV
// values. None of the TLV encoding libraries out there support this, they fail
// when checking the tag of nested TLVs.
//
// So, construct the TLV by hand from raw bytes. Hopefully the comments will be
// enough to explain the structure.
//
// https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-73-4.pdf
// table 9 has the explanation of fields.
//
// Start with a top-level BER-TLV tag and length:
let mut resp = vec![TLV_TAG_DATA_FIELD, 0x3B];
// TLV tag and length for FASC-N.
resp.extend_from_slice(&[TLV_TAG_FASC_N, 0x19]);
// FASC-N value containing S9999F9999F999999F0F1F0000000000300001E, with a
// weird encoding from section 6 of:
// https://www.idmanagement.gov/docs/pacs-tig-scepacs.pdf
resp.extend_from_slice(&[
0xd4, 0xe7, 0x39, 0xda, 0x73, 0x9c, 0xed, 0x39, 0xce, 0x73, 0x9d, 0x83, 0x68, 0x58,
0x21, 0x08, 0x42, 0x10, 0x84, 0x21, 0xc8, 0x42, 0x10, 0xc3, 0xeb,
]);
// TLV for user UUID.
resp.extend_from_slice(&[TLV_TAG_GUID, 0x10]);
resp.extend_from_slice(uuid.as_bytes());
// TLV for expiration date (YYYYMMDD).
resp.extend_from_slice(&[TLV_TAG_EXPIRATION_DATE, 0x08]);
// TODO(awly): generate this from current time.
resp.extend_from_slice("20300101".as_bytes());
// TLV for signature (empty).
resp.extend_from_slice(&[TLV_TAG_ISSUER_ASYMMETRIC_SIGNATURE, 0x00]);
// TLV for error detection code.
resp.extend_from_slice(&[TLV_TAG_ERROR_DETECTION_CODE, 0x00]);
resp
}
fn build_piv_auth_cert(cert_der: &[u8]) -> Vec<u8> {
// Same as above, tags in this BER-TLV value are not compatible with the spec
// and existing libraries. Marshal by hand.
//
// Certificate TLV tag and length.
let mut resp = vec![TLV_TAG_CERTIFICATE];
resp.extend_from_slice(&len_to_vec(cert_der.len()));
resp.extend_from_slice(cert_der);
// CertInfo TLV (0x00 indicates uncompressed cert).
resp.extend_from_slice(&[TLV_TAG_CERTINFO, 0x01, 0x00]);
// TLV for error detection code.
resp.extend_from_slice(&[TLV_TAG_ERROR_DETECTION_CODE, 0x00]);
// Wrap with top-level TLV tag and length.
let mut resp_outer = vec![TLV_TAG_DATA_FIELD];
resp_outer.extend_from_slice(&len_to_vec(resp.len()));
resp_outer.extend_from_slice(&resp);
resp_outer
}
}
#[derive(Debug)]
pub struct Response {
data: Option<Vec<u8>>,
status: Status,
}
impl Response {
fn new(status: Status) -> Self {
Self { data: None, status }
}
fn with_data(status: Status, data: Vec<u8>) -> Self {
Self {
data: Some(data), | random_line_split |
||
piv.rs | (zmb3): compare algorithm against the private key using consts from
// https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-78-4.pdf
// TODO(zmb3): support non-RSA keys, if needed.
if cmd.p1 != 0x07 {
return Err(invalid_data_error(&format!(
"unsupported algorithm identifier P1:{:#X} in general authenticate command",
cmd.p1
)));
}
// P2='9A' means PIV Authentication Key (matches our cert '5FC105' in handle_get_data).
if cmd.p2 != 0x9A {
return Err(invalid_data_error(&format!(
"unsupported key reference P2:{:#X} in general authenticate command",
cmd.p2
)));
}
let request_tlv = Tlv::from_bytes(cmd.data())
.map_err(|e| invalid_data_error(&format!("TLV invalid: {e:?}")))?;
if *request_tlv.tag() != tlv_tag(TLV_TAG_DYNAMIC_AUTHENTICATION_TEMPLATE)? {
return Err(invalid_data_error(&format!(
"general authenticate command TLV invalid: {request_tlv:?}"
)));
}
// Extract the challenge field.
let request_tlvs = match request_tlv.value() {
Value::Primitive(_) => {
return Err(invalid_data_error(&format!(
"general authenticate command TLV invalid: {request_tlv:?}"
)));
}
Value::Constructed(tlvs) => tlvs,
};
let mut challenge = None;
for data in request_tlvs {
if *data.tag() != tlv_tag(TLV_TAG_CHALLENGE)? {
continue;
}
challenge = match data.value() {
Value::Primitive(chal) => Some(chal),
Value::Constructed(_) => {
return Err(invalid_data_error(&format!(
"general authenticate command TLV invalid: {request_tlv:?}"
)));
}
};
}
let challenge = challenge.ok_or_else(|| {
invalid_data_error(&format!(
"general authenticate command TLV invalid: {request_tlv:?}, missing challenge data"
))
})?;
// TODO(zmb3): support non-RSA keys, if needed.
let signed_challenge = self.sign_auth_challenge(challenge);
// Return signed challenge.
let resp = tlv(
TLV_TAG_DYNAMIC_AUTHENTICATION_TEMPLATE,
Value::Constructed(vec![tlv(
TLV_TAG_RESPONSE,
Value::Primitive(signed_challenge),
)?]),
)?
.to_vec();
self.pending_response = Some(Cursor::new(resp));
self.handle_get_response(cmd)
}
fn build_chuid(uuid: Uuid) -> Vec<u8> {
// This is gross: the response is a BER-TLV value, but it has nested SIMPLE-TLV
// values. None of the TLV encoding libraries out there support this, they fail
// when checking the tag of nested TLVs.
//
// So, construct the TLV by hand from raw bytes. Hopefully the comments will be
// enough to explain the structure.
//
// https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-73-4.pdf
// table 9 has the explanation of fields.
//
// Start with a top-level BER-TLV tag and length:
let mut resp = vec![TLV_TAG_DATA_FIELD, 0x3B];
// TLV tag and length for FASC-N.
resp.extend_from_slice(&[TLV_TAG_FASC_N, 0x19]);
// FASC-N value containing S9999F9999F999999F0F1F0000000000300001E, with a
// weird encoding from section 6 of:
// https://www.idmanagement.gov/docs/pacs-tig-scepacs.pdf
resp.extend_from_slice(&[
0xd4, 0xe7, 0x39, 0xda, 0x73, 0x9c, 0xed, 0x39, 0xce, 0x73, 0x9d, 0x83, 0x68, 0x58,
0x21, 0x08, 0x42, 0x10, 0x84, 0x21, 0xc8, 0x42, 0x10, 0xc3, 0xeb,
]);
// TLV for user UUID.
resp.extend_from_slice(&[TLV_TAG_GUID, 0x10]);
resp.extend_from_slice(uuid.as_bytes());
// TLV for expiration date (YYYYMMDD).
resp.extend_from_slice(&[TLV_TAG_EXPIRATION_DATE, 0x08]);
// TODO(awly): generate this from current time.
resp.extend_from_slice("20300101".as_bytes());
// TLV for signature (empty).
resp.extend_from_slice(&[TLV_TAG_ISSUER_ASYMMETRIC_SIGNATURE, 0x00]);
// TLV for error detection code.
resp.extend_from_slice(&[TLV_TAG_ERROR_DETECTION_CODE, 0x00]);
resp
}
fn build_piv_auth_cert(cert_der: &[u8]) -> Vec<u8> {
// Same as above, tags in this BER-TLV value are not compatible with the spec
// and existing libraries. Marshal by hand.
//
// Certificate TLV tag and length.
let mut resp = vec![TLV_TAG_CERTIFICATE];
resp.extend_from_slice(&len_to_vec(cert_der.len()));
resp.extend_from_slice(cert_der);
// CertInfo TLV (0x00 indicates uncompressed cert).
resp.extend_from_slice(&[TLV_TAG_CERTINFO, 0x01, 0x00]);
// TLV for error detection code.
resp.extend_from_slice(&[TLV_TAG_ERROR_DETECTION_CODE, 0x00]);
// Wrap with top-level TLV tag and length.
let mut resp_outer = vec![TLV_TAG_DATA_FIELD];
resp_outer.extend_from_slice(&len_to_vec(resp.len()));
resp_outer.extend_from_slice(&resp);
resp_outer
}
}
#[derive(Debug)]
pub struct Response {
data: Option<Vec<u8>>,
status: Status,
}
impl Response {
fn new(status: Status) -> Self {
Self { data: None, status }
}
fn with_data(status: Status, data: Vec<u8>) -> Self {
Self {
data: Some(data),
status,
}
}
pub fn encode(&self) -> Vec<u8> {
let mut buf = Vec::new();
if let Some(data) = &self.data {
buf.extend_from_slice(data);
}
let status: [u8; 2] = self.status.into();
buf.extend_from_slice(&status);
buf
}
}
// SELECT command tags.
const TLV_TAG_PIV_APPLICATION_PROPERTY_TEMPLATE: u8 = 0x61;
const TLV_TAG_AID: u8 = 0x4F;
const TLV_TAG_COEXISTENT_TAG_ALLOCATION_AUTHORITY: u8 = 0x79;
const TLV_TAG_DATA_FIELD: u8 = 0x53;
const TLV_TAG_FASC_N: u8 = 0x30;
const TLV_TAG_GUID: u8 = 0x34;
const TLV_TAG_EXPIRATION_DATE: u8 = 0x35;
const TLV_TAG_ISSUER_ASYMMETRIC_SIGNATURE: u8 = 0x3E;
const TLV_TAG_ERROR_DETECTION_CODE: u8 = 0xFE;
const TLV_TAG_CERTIFICATE: u8 = 0x70;
const TLV_TAG_CERTINFO: u8 = 0x71;
// GENERAL AUTHENTICATE command tags.
const TLV_TAG_DYNAMIC_AUTHENTICATION_TEMPLATE: u8 = 0x7C;
const TLV_TAG_CHALLENGE: u8 = 0x81;
const TLV_TAG_RESPONSE: u8 = 0x82;
fn tlv(tag: u8, value: Value) -> RdpResult<Tlv> {
Tlv::new(tlv_tag(tag)?, value)
.map_err(|e| invalid_data_error(&format!("TLV with tag {tag:#X} invalid: {e:?}")))
}
fn tlv_tag(val: u8) -> RdpResult<Tag> {
Tag::try_from(val).map_err(|e| invalid_data_error(&format!("TLV tag {val:#X} invalid: {e:?}")))
}
fn hex_data<const S: usize>(cmd: &Command<S>) -> String {
to_hex(cmd.data())
}
fn to_hex(bytes: &[u8]) -> String {
let mut s = String::new();
for b in bytes {
// https://rust-lang.github.io/rust-clippy/master/index.html#format_push_string
let _ = write!(s, "{b:02X}");
}
s
}
#[allow(clippy::cast_possible_truncation)]
fn | len_to_vec | identifier_name |
|
monitor.go | // create index indicates modification
m.ui.Output(fmt.Sprintf(
"Allocation %q modified: node %q, group %q",
limit(alloc.id, m.length), limit(alloc.node, m.length), alloc.group))
case alloc.desired == structs.AllocDesiredStatusRun:
// New allocation with desired status running
m.ui.Output(fmt.Sprintf(
"Allocation %q created: node %q, group %q",
limit(alloc.id, m.length), limit(alloc.node, m.length), alloc.group))
}
} else {
switch {
case existing.client != alloc.client:
description := ""
if alloc.clientDesc != "" {
description = fmt.Sprintf(" (%s)", alloc.clientDesc)
}
// Allocation status has changed
m.ui.Output(fmt.Sprintf(
"Allocation %q status changed: %q -> %q%s",
limit(alloc.id, m.length), existing.client, alloc.client, description))
}
}
}
// Check if the status changed. We skip any transitions to pending status.
if existing.status != "" &&
update.status != structs.AllocClientStatusPending &&
existing.status != update.status {
m.ui.Output(fmt.Sprintf("Evaluation status changed: %q -> %q",
existing.status, update.status))
}
}
// monitor is used to start monitoring the given evaluation ID. It
// writes output directly to the monitor's ui, and returns the
// exit code for the command. If allowPrefix is false, monitor will only accept
// exact matching evalIDs.
//
// The return code will be 0 on successful evaluation. If there are
// problems scheduling the job (impossible constraints, resources
// exhausted, etc), then the return code will be 2. For any other
// failures (API connectivity, internal errors, etc), the return code
// will be 1.
func (m *monitor) monitor(evalID string, allowPrefix bool) int {
// Track if we encounter a scheduling failure. This can only be
// detected while querying allocations, so we use this bool to
// carry that status into the return code.
var schedFailure bool
// The user may have specified a prefix as eval id. We need to lookup the
// full id from the database first. Since we do this in a loop we need a
// variable to keep track if we've already written the header message.
var headerWritten bool
// Add the initial pending state
m.update(newEvalState())
for {
// Query the evaluation
eval, _, err := m.client.Evaluations().Info(evalID, nil)
if err != nil {
if !allowPrefix {
m.ui.Error(fmt.Sprintf("No evaluation with id %q found", evalID))
return 1
}
if len(evalID) == 1 {
m.ui.Error(fmt.Sprintf("Identifier must contain at least two characters."))
return 1
}
if len(evalID)%2 == 1 {
// Identifiers must be of even length, so we strip off the last byte
// to provide a consistent user experience.
evalID = evalID[:len(evalID)-1]
}
evals, _, err := m.client.Evaluations().PrefixList(evalID)
if err != nil {
m.ui.Error(fmt.Sprintf("Error reading evaluation: %s", err))
return 1
}
if len(evals) == 0 {
m.ui.Error(fmt.Sprintf("No evaluation(s) with prefix or id %q found", evalID))
return 1
}
if len(evals) > 1 {
// Format the evaluations
out := make([]string, len(evals)+1)
out[0] = "ID|Priority|Type|Triggered By|Status"
for i, eval := range evals {
out[i+1] = fmt.Sprintf("%s|%d|%s|%s|%s",
limit(eval.ID, m.length),
eval.Priority,
eval.Type,
eval.TriggeredBy,
eval.Status)
}
m.ui.Output(fmt.Sprintf("Prefix matched multiple evaluations\n\n%s", formatList(out)))
return 0
}
// Prefix lookup matched a single evaluation
eval, _, err = m.client.Evaluations().Info(evals[0].ID, nil)
if err != nil {
m.ui.Error(fmt.Sprintf("Error reading evaluation: %s", err))
}
}
if !headerWritten {
m.ui.Info(fmt.Sprintf("Monitoring evaluation %q", limit(eval.ID, m.length)))
headerWritten = true
}
// Create the new eval state.
state := newEvalState()
state.status = eval.Status
state.desc = eval.StatusDescription
state.node = eval.NodeID
state.job = eval.JobID
state.wait = eval.Wait
state.index = eval.CreateIndex
// Query the allocations associated with the evaluation
allocs, _, err := m.client.Evaluations().Allocations(eval.ID, nil)
if err != nil {
m.ui.Error(fmt.Sprintf("Error reading allocations: %s", err))
return 1
}
// Add the allocs to the state
for _, alloc := range allocs {
state.allocs[alloc.ID] = &allocState{
id: alloc.ID,
group: alloc.TaskGroup,
node: alloc.NodeID,
desired: alloc.DesiredStatus,
desiredDesc: alloc.DesiredDescription,
client: alloc.ClientStatus,
clientDesc: alloc.ClientDescription,
index: alloc.CreateIndex,
}
}
// Update the state
m.update(state)
switch eval.Status {
case structs.EvalStatusComplete, structs.EvalStatusFailed, structs.EvalStatusCancelled:
if len(eval.FailedTGAllocs) == 0 {
m.ui.Info(fmt.Sprintf("Evaluation %q finished with status %q",
limit(eval.ID, m.length), eval.Status))
} else {
// There were failures making the allocations
schedFailure = true
m.ui.Info(fmt.Sprintf("Evaluation %q finished with status %q but failed to place all allocations:",
limit(eval.ID, m.length), eval.Status))
// Print the failures per task group
for tg, metrics := range eval.FailedTGAllocs {
noun := "allocation"
if metrics.CoalescedFailures > 0 {
noun += "s"
}
m.ui.Output(fmt.Sprintf("Task Group %q (failed to place %d %s):", tg, metrics.CoalescedFailures+1, noun))
metrics := formatAllocMetrics(metrics, false, " ")
for _, line := range strings.Split(metrics, "\n") {
m.ui.Output(line)
}
}
if eval.BlockedEval != "" {
m.ui.Output(fmt.Sprintf("Evaluation %q waiting for additional capacity to place remainder",
limit(eval.BlockedEval, m.length)))
}
}
default:
// Wait for the next update
time.Sleep(updateWait)
continue
}
// Monitor the next eval in the chain, if present
if eval.NextEval != "" {
if eval.Wait.Nanoseconds() != 0 {
m.ui.Info(fmt.Sprintf(
"Monitoring next evaluation %q in %s",
limit(eval.NextEval, m.length), eval.Wait))
// Skip some unnecessary polling
time.Sleep(eval.Wait)
}
// Reset the state and monitor the new eval
m.state = newEvalState()
return m.monitor(eval.NextEval, allowPrefix)
}
break
}
// Treat scheduling failures specially using a dedicated exit code.
// This makes it easier to detect failures from the CLI.
if schedFailure {
return 2
}
return 0
}
// dumpAllocStatus is a helper to generate a more user-friendly error message
// for scheduling failures, displaying a high level status of why the job
// could not be scheduled out.
func dumpAllocStatus(ui cli.Ui, alloc *api.Allocation, length int) {
// Print filter stats
ui.Output(fmt.Sprintf("Allocation %q status %q (%d/%d nodes filtered)",
limit(alloc.ID, length), alloc.ClientStatus,
alloc.Metrics.NodesFiltered, alloc.Metrics.NodesEvaluated))
ui.Output(formatAllocMetrics(alloc.Metrics, true, " "))
}
func formatAllocMetrics(metrics *api.AllocationMetric, scores bool, prefix string) string {
// Print a helpful message if we have an eligibility problem
var out string
if metrics.NodesEvaluated == 0 {
out += fmt.Sprintf("%s* No nodes were eligible for evaluation\n", prefix)
}
// Print a helpful message if the user has asked for a DC that has no
// available nodes.
for dc, available := range metrics.NodesAvailable {
if available == 0 {
out += fmt.Sprintf("%s* No nodes are available in datacenter %q\n", prefix, dc)
}
}
// Print filter info
for class, num := range metrics.ClassFiltered {
out += fmt.Sprintf("%s* Class %q filtered %d nodes\n", prefix, class, num)
} | for cs, num := range metrics.ConstraintFiltered { | random_line_split |
|
monitor.go | . This
// must be queried for explicitly so it is only included
// if there is important error information inside.
full *api.Allocation
}
// monitor wraps an evaluation monitor and holds metadata and
// state information.
type monitor struct {
ui cli.Ui
client *api.Client
state *evalState
// length determines the number of characters for identifiers in the ui.
length int
sync.Mutex
}
// newMonitor returns a new monitor. The returned monitor will
// write output information to the provided ui. The length parameter determines
// the number of characters for identifiers in the ui.
func newMonitor(ui cli.Ui, client *api.Client, length int) *monitor {
mon := &monitor{
ui: &cli.PrefixedUi{
InfoPrefix: "==> ",
OutputPrefix: " ",
ErrorPrefix: "==> ",
Ui: ui,
},
client: client,
state: newEvalState(),
length: length,
}
return mon
}
// update is used to update our monitor with new state. It can be
// called whether the passed information is new or not, and will
// only dump update messages when state changes.
func (m *monitor) update(update *evalState) | }
// Check the allocations
for allocID, alloc := range update.allocs {
if existing, ok := existing.allocs[allocID]; !ok {
switch {
case alloc.index < update.index:
// New alloc with create index lower than the eval
// create index indicates modification
m.ui.Output(fmt.Sprintf(
"Allocation %q modified: node %q, group %q",
limit(alloc.id, m.length), limit(alloc.node, m.length), alloc.group))
case alloc.desired == structs.AllocDesiredStatusRun:
// New allocation with desired status running
m.ui.Output(fmt.Sprintf(
"Allocation %q created: node %q, group %q",
limit(alloc.id, m.length), limit(alloc.node, m.length), alloc.group))
}
} else {
switch {
case existing.client != alloc.client:
description := ""
if alloc.clientDesc != "" {
description = fmt.Sprintf(" (%s)", alloc.clientDesc)
}
// Allocation status has changed
m.ui.Output(fmt.Sprintf(
"Allocation %q status changed: %q -> %q%s",
limit(alloc.id, m.length), existing.client, alloc.client, description))
}
}
}
// Check if the status changed. We skip any transitions to pending status.
if existing.status != "" &&
update.status != structs.AllocClientStatusPending &&
existing.status != update.status {
m.ui.Output(fmt.Sprintf("Evaluation status changed: %q -> %q",
existing.status, update.status))
}
}
// monitor is used to start monitoring the given evaluation ID. It
// writes output directly to the monitor's ui, and returns the
// exit code for the command. If allowPrefix is false, monitor will only accept
// exact matching evalIDs.
//
// The return code will be 0 on successful evaluation. If there are
// problems scheduling the job (impossible constraints, resources
// exhausted, etc), then the return code will be 2. For any other
// failures (API connectivity, internal errors, etc), the return code
// will be 1.
func (m *monitor) monitor(evalID string, allowPrefix bool) int {
// Track if we encounter a scheduling failure. This can only be
// detected while querying allocations, so we use this bool to
// carry that status into the return code.
var schedFailure bool
// The user may have specified a prefix as eval id. We need to lookup the
// full id from the database first. Since we do this in a loop we need a
// variable to keep track if we've already written the header message.
var headerWritten bool
// Add the initial pending state
m.update(newEvalState())
for {
// Query the evaluation
eval, _, err := m.client.Evaluations().Info(evalID, nil)
if err != nil {
if !allowPrefix {
m.ui.Error(fmt.Sprintf("No evaluation with id %q found", evalID))
return 1
}
if len(evalID) == 1 {
m.ui.Error(fmt.Sprintf("Identifier must contain at least two characters."))
return 1
}
if len(evalID)%2 == 1 {
// Identifiers must be of even length, so we strip off the last byte
// to provide a consistent user experience.
evalID = evalID[:len(evalID)-1]
}
evals, _, err := m.client.Evaluations().PrefixList(evalID)
if err != nil {
m.ui.Error(fmt.Sprintf("Error reading evaluation: %s", err))
return 1
}
if len(evals) == 0 {
m.ui.Error(fmt.Sprintf("No evaluation(s) with prefix or id %q found", evalID))
return 1
}
if len(evals) > 1 {
// Format the evaluations
out := make([]string, len(evals)+1)
out[0] = "ID|Priority|Type|Triggered By|Status"
for i, eval := range evals {
out[i+1] = fmt.Sprintf("%s|%d|%s|%s|%s",
limit(eval.ID, m.length),
eval.Priority,
eval.Type,
eval.TriggeredBy,
eval.Status)
}
m.ui.Output(fmt.Sprintf("Prefix matched multiple evaluations\n\n%s", formatList(out)))
return 0
}
// Prefix lookup matched a single evaluation
eval, _, err = m.client.Evaluations().Info(evals[0].ID, nil)
if err != nil {
m.ui.Error(fmt.Sprintf("Error reading evaluation: %s", err))
}
}
if !headerWritten {
m.ui.Info(fmt.Sprintf("Monitoring evaluation %q", limit(eval.ID, m.length)))
headerWritten = true
}
// Create the new eval state.
state := newEvalState()
state.status = eval.Status
state.desc = eval.StatusDescription
state.node = eval.NodeID
state.job = eval.JobID
state.wait = eval.Wait
state.index = eval.CreateIndex
// Query the allocations associated with the evaluation
allocs, _, err := m.client.Evaluations().Allocations(eval.ID, nil)
if err != nil {
m.ui.Error(fmt.Sprintf("Error reading allocations: %s", err))
return 1
}
// Add the allocs to the state
for _, alloc := range allocs {
state.allocs[alloc.ID] = &allocState{
id: alloc.ID,
group: alloc.TaskGroup,
node: alloc.NodeID,
desired: alloc.DesiredStatus,
desiredDesc: alloc.DesiredDescription,
client: alloc.ClientStatus,
clientDesc: alloc.ClientDescription,
index: alloc.CreateIndex,
}
}
// Update the state
m.update(state)
switch eval.Status {
case structs.EvalStatusComplete, structs.EvalStatusFailed, structs.EvalStatusCancelled:
if len(eval.FailedTGAllocs) == 0 {
m.ui.Info(fmt.Sprintf("Evaluation %q finished with status %q",
limit(eval.ID, m.length), eval.Status))
} else {
// There were failures making the allocations
schedFailure = true
m.ui.Info(fmt.Sprintf("Evaluation %q finished with status %q but failed to place all allocations:",
limit(eval.ID, m.length), eval.Status))
// Print the failures per task group
for tg, metrics := range eval.FailedTGAllocs {
noun := "allocation"
if metrics.CoalescedFailures > 0 {
noun += "s"
}
m.ui.Output(fmt.Sprintf("Task Group %q (failed to place %d %s):", tg, metrics.CoalescedFailures+1, noun))
metrics := formatAllocMetrics(metrics, false, " ")
for _, line := range strings.Split(metrics, "\n") {
m.ui.Output(line)
}
}
if eval.BlockedEval != "" {
m.ui.Output(fmt.Sprintf("Evaluation %q waiting for additional capacity to place remainder",
limit(eval.BlockedEval, m.length)))
}
}
default:
// Wait for the next update
time.Sleep(updateWait)
continue
}
// Monitor the next eval in the chain, if present | {
m.Lock()
defer m.Unlock()
existing := m.state
// Swap in the new state at the end
defer func() {
m.state = update
}()
// Check if the evaluation was triggered by a node
if existing.node == "" && update.node != "" {
m.ui.Output(fmt.Sprintf("Evaluation triggered by node %q",
limit(update.node, m.length)))
}
// Check if the evaluation was triggered by a job
if existing.job == "" && update.job != "" {
m.ui.Output(fmt.Sprintf("Evaluation triggered by job %q", update.job)) | identifier_body |
monitor.go | This
// must be queried for explicitly so it is only included
// if there is important error information inside.
full *api.Allocation
}
// monitor wraps an evaluation monitor and holds metadata and
// state information.
type monitor struct {
ui cli.Ui
client *api.Client
state *evalState
// length determines the number of characters for identifiers in the ui.
length int
sync.Mutex
}
// newMonitor returns a new monitor. The returned monitor will
// write output information to the provided ui. The length parameter determines
// the number of characters for identifiers in the ui.
func newMonitor(ui cli.Ui, client *api.Client, length int) *monitor {
mon := &monitor{
ui: &cli.PrefixedUi{
InfoPrefix: "==> ",
OutputPrefix: " ",
ErrorPrefix: "==> ",
Ui: ui,
},
client: client,
state: newEvalState(),
length: length,
}
return mon
}
// update is used to update our monitor with new state. It can be
// called whether the passed information is new or not, and will
// only dump update messages when state changes.
func (m *monitor) update(update *evalState) {
m.Lock()
defer m.Unlock()
existing := m.state
// Swap in the new state at the end
defer func() {
m.state = update
}()
// Check if the evaluation was triggered by a node
if existing.node == "" && update.node != "" {
m.ui.Output(fmt.Sprintf("Evaluation triggered by node %q",
limit(update.node, m.length)))
}
// Check if the evaluation was triggered by a job
if existing.job == "" && update.job != "" {
m.ui.Output(fmt.Sprintf("Evaluation triggered by job %q", update.job))
}
// Check the allocations
for allocID, alloc := range update.allocs {
if existing, ok := existing.allocs[allocID]; !ok {
switch {
case alloc.index < update.index:
// New alloc with create index lower than the eval
// create index indicates modification
m.ui.Output(fmt.Sprintf(
"Allocation %q modified: node %q, group %q",
limit(alloc.id, m.length), limit(alloc.node, m.length), alloc.group))
case alloc.desired == structs.AllocDesiredStatusRun:
// New allocation with desired status running
m.ui.Output(fmt.Sprintf(
"Allocation %q created: node %q, group %q",
limit(alloc.id, m.length), limit(alloc.node, m.length), alloc.group))
}
} else {
switch {
case existing.client != alloc.client:
description := ""
if alloc.clientDesc != "" {
description = fmt.Sprintf(" (%s)", alloc.clientDesc)
}
// Allocation status has changed
m.ui.Output(fmt.Sprintf(
"Allocation %q status changed: %q -> %q%s",
limit(alloc.id, m.length), existing.client, alloc.client, description))
}
}
}
// Check if the status changed. We skip any transitions to pending status.
if existing.status != "" &&
update.status != structs.AllocClientStatusPending &&
existing.status != update.status {
m.ui.Output(fmt.Sprintf("Evaluation status changed: %q -> %q",
existing.status, update.status))
}
}
// monitor is used to start monitoring the given evaluation ID. It
// writes output directly to the monitor's ui, and returns the
// exit code for the command. If allowPrefix is false, monitor will only accept
// exact matching evalIDs.
//
// The return code will be 0 on successful evaluation. If there are
// problems scheduling the job (impossible constraints, resources
// exhausted, etc), then the return code will be 2. For any other
// failures (API connectivity, internal errors, etc), the return code
// will be 1.
func (m *monitor) monitor(evalID string, allowPrefix bool) int {
// Track if we encounter a scheduling failure. This can only be
// detected while querying allocations, so we use this bool to
// carry that status into the return code.
var schedFailure bool
// The user may have specified a prefix as eval id. We need to lookup the
// full id from the database first. Since we do this in a loop we need a
// variable to keep track if we've already written the header message.
var headerWritten bool
// Add the initial pending state
m.update(newEvalState())
for {
// Query the evaluation
eval, _, err := m.client.Evaluations().Info(evalID, nil)
if err != nil {
if !allowPrefix {
m.ui.Error(fmt.Sprintf("No evaluation with id %q found", evalID))
return 1
}
if len(evalID) == 1 {
m.ui.Error(fmt.Sprintf("Identifier must contain at least two characters."))
return 1
}
if len(evalID)%2 == 1 {
// Identifiers must be of even length, so we strip off the last byte
// to provide a consistent user experience.
evalID = evalID[:len(evalID)-1]
}
evals, _, err := m.client.Evaluations().PrefixList(evalID)
if err != nil |
if len(evals) == 0 {
m.ui.Error(fmt.Sprintf("No evaluation(s) with prefix or id %q found", evalID))
return 1
}
if len(evals) > 1 {
// Format the evaluations
out := make([]string, len(evals)+1)
out[0] = "ID|Priority|Type|Triggered By|Status"
for i, eval := range evals {
out[i+1] = fmt.Sprintf("%s|%d|%s|%s|%s",
limit(eval.ID, m.length),
eval.Priority,
eval.Type,
eval.TriggeredBy,
eval.Status)
}
m.ui.Output(fmt.Sprintf("Prefix matched multiple evaluations\n\n%s", formatList(out)))
return 0
}
// Prefix lookup matched a single evaluation
eval, _, err = m.client.Evaluations().Info(evals[0].ID, nil)
if err != nil {
m.ui.Error(fmt.Sprintf("Error reading evaluation: %s", err))
}
}
if !headerWritten {
m.ui.Info(fmt.Sprintf("Monitoring evaluation %q", limit(eval.ID, m.length)))
headerWritten = true
}
// Create the new eval state.
state := newEvalState()
state.status = eval.Status
state.desc = eval.StatusDescription
state.node = eval.NodeID
state.job = eval.JobID
state.wait = eval.Wait
state.index = eval.CreateIndex
// Query the allocations associated with the evaluation
allocs, _, err := m.client.Evaluations().Allocations(eval.ID, nil)
if err != nil {
m.ui.Error(fmt.Sprintf("Error reading allocations: %s", err))
return 1
}
// Add the allocs to the state
for _, alloc := range allocs {
state.allocs[alloc.ID] = &allocState{
id: alloc.ID,
group: alloc.TaskGroup,
node: alloc.NodeID,
desired: alloc.DesiredStatus,
desiredDesc: alloc.DesiredDescription,
client: alloc.ClientStatus,
clientDesc: alloc.ClientDescription,
index: alloc.CreateIndex,
}
}
// Update the state
m.update(state)
switch eval.Status {
case structs.EvalStatusComplete, structs.EvalStatusFailed, structs.EvalStatusCancelled:
if len(eval.FailedTGAllocs) == 0 {
m.ui.Info(fmt.Sprintf("Evaluation %q finished with status %q",
limit(eval.ID, m.length), eval.Status))
} else {
// There were failures making the allocations
schedFailure = true
m.ui.Info(fmt.Sprintf("Evaluation %q finished with status %q but failed to place all allocations:",
limit(eval.ID, m.length), eval.Status))
// Print the failures per task group
for tg, metrics := range eval.FailedTGAllocs {
noun := "allocation"
if metrics.CoalescedFailures > 0 {
noun += "s"
}
m.ui.Output(fmt.Sprintf("Task Group %q (failed to place %d %s):", tg, metrics.CoalescedFailures+1, noun))
metrics := formatAllocMetrics(metrics, false, " ")
for _, line := range strings.Split(metrics, "\n") {
m.ui.Output(line)
}
}
if eval.BlockedEval != "" {
m.ui.Output(fmt.Sprintf("Evaluation %q waiting for additional capacity to place remainder",
limit(eval.BlockedEval, m.length)))
}
}
default:
// Wait for the next update
time.Sleep(updateWait)
continue
}
// Monitor the next eval in the chain, if present | {
m.ui.Error(fmt.Sprintf("Error reading evaluation: %s", err))
return 1
} | conditional_block |
monitor.go | ,
},
client: client,
state: newEvalState(),
length: length,
}
return mon
}
// update is used to update our monitor with new state. It can be
// called whether the passed information is new or not, and will
// only dump update messages when state changes.
func (m *monitor) update(update *evalState) {
m.Lock()
defer m.Unlock()
existing := m.state
// Swap in the new state at the end
defer func() {
m.state = update
}()
// Check if the evaluation was triggered by a node
if existing.node == "" && update.node != "" {
m.ui.Output(fmt.Sprintf("Evaluation triggered by node %q",
limit(update.node, m.length)))
}
// Check if the evaluation was triggered by a job
if existing.job == "" && update.job != "" {
m.ui.Output(fmt.Sprintf("Evaluation triggered by job %q", update.job))
}
// Check the allocations
for allocID, alloc := range update.allocs {
if existing, ok := existing.allocs[allocID]; !ok {
switch {
case alloc.index < update.index:
// New alloc with create index lower than the eval
// create index indicates modification
m.ui.Output(fmt.Sprintf(
"Allocation %q modified: node %q, group %q",
limit(alloc.id, m.length), limit(alloc.node, m.length), alloc.group))
case alloc.desired == structs.AllocDesiredStatusRun:
// New allocation with desired status running
m.ui.Output(fmt.Sprintf(
"Allocation %q created: node %q, group %q",
limit(alloc.id, m.length), limit(alloc.node, m.length), alloc.group))
}
} else {
switch {
case existing.client != alloc.client:
description := ""
if alloc.clientDesc != "" {
description = fmt.Sprintf(" (%s)", alloc.clientDesc)
}
// Allocation status has changed
m.ui.Output(fmt.Sprintf(
"Allocation %q status changed: %q -> %q%s",
limit(alloc.id, m.length), existing.client, alloc.client, description))
}
}
}
// Check if the status changed. We skip any transitions to pending status.
if existing.status != "" &&
update.status != structs.AllocClientStatusPending &&
existing.status != update.status {
m.ui.Output(fmt.Sprintf("Evaluation status changed: %q -> %q",
existing.status, update.status))
}
}
// monitor is used to start monitoring the given evaluation ID. It
// writes output directly to the monitor's ui, and returns the
// exit code for the command. If allowPrefix is false, monitor will only accept
// exact matching evalIDs.
//
// The return code will be 0 on successful evaluation. If there are
// problems scheduling the job (impossible constraints, resources
// exhausted, etc), then the return code will be 2. For any other
// failures (API connectivity, internal errors, etc), the return code
// will be 1.
func (m *monitor) monitor(evalID string, allowPrefix bool) int {
// Track if we encounter a scheduling failure. This can only be
// detected while querying allocations, so we use this bool to
// carry that status into the return code.
var schedFailure bool
// The user may have specified a prefix as eval id. We need to lookup the
// full id from the database first. Since we do this in a loop we need a
// variable to keep track if we've already written the header message.
var headerWritten bool
// Add the initial pending state
m.update(newEvalState())
for {
// Query the evaluation
eval, _, err := m.client.Evaluations().Info(evalID, nil)
if err != nil {
if !allowPrefix {
m.ui.Error(fmt.Sprintf("No evaluation with id %q found", evalID))
return 1
}
if len(evalID) == 1 {
m.ui.Error(fmt.Sprintf("Identifier must contain at least two characters."))
return 1
}
if len(evalID)%2 == 1 {
// Identifiers must be of even length, so we strip off the last byte
// to provide a consistent user experience.
evalID = evalID[:len(evalID)-1]
}
evals, _, err := m.client.Evaluations().PrefixList(evalID)
if err != nil {
m.ui.Error(fmt.Sprintf("Error reading evaluation: %s", err))
return 1
}
if len(evals) == 0 {
m.ui.Error(fmt.Sprintf("No evaluation(s) with prefix or id %q found", evalID))
return 1
}
if len(evals) > 1 {
// Format the evaluations
out := make([]string, len(evals)+1)
out[0] = "ID|Priority|Type|Triggered By|Status"
for i, eval := range evals {
out[i+1] = fmt.Sprintf("%s|%d|%s|%s|%s",
limit(eval.ID, m.length),
eval.Priority,
eval.Type,
eval.TriggeredBy,
eval.Status)
}
m.ui.Output(fmt.Sprintf("Prefix matched multiple evaluations\n\n%s", formatList(out)))
return 0
}
// Prefix lookup matched a single evaluation
eval, _, err = m.client.Evaluations().Info(evals[0].ID, nil)
if err != nil {
m.ui.Error(fmt.Sprintf("Error reading evaluation: %s", err))
}
}
if !headerWritten {
m.ui.Info(fmt.Sprintf("Monitoring evaluation %q", limit(eval.ID, m.length)))
headerWritten = true
}
// Create the new eval state.
state := newEvalState()
state.status = eval.Status
state.desc = eval.StatusDescription
state.node = eval.NodeID
state.job = eval.JobID
state.wait = eval.Wait
state.index = eval.CreateIndex
// Query the allocations associated with the evaluation
allocs, _, err := m.client.Evaluations().Allocations(eval.ID, nil)
if err != nil {
m.ui.Error(fmt.Sprintf("Error reading allocations: %s", err))
return 1
}
// Add the allocs to the state
for _, alloc := range allocs {
state.allocs[alloc.ID] = &allocState{
id: alloc.ID,
group: alloc.TaskGroup,
node: alloc.NodeID,
desired: alloc.DesiredStatus,
desiredDesc: alloc.DesiredDescription,
client: alloc.ClientStatus,
clientDesc: alloc.ClientDescription,
index: alloc.CreateIndex,
}
}
// Update the state
m.update(state)
switch eval.Status {
case structs.EvalStatusComplete, structs.EvalStatusFailed, structs.EvalStatusCancelled:
if len(eval.FailedTGAllocs) == 0 {
m.ui.Info(fmt.Sprintf("Evaluation %q finished with status %q",
limit(eval.ID, m.length), eval.Status))
} else {
// There were failures making the allocations
schedFailure = true
m.ui.Info(fmt.Sprintf("Evaluation %q finished with status %q but failed to place all allocations:",
limit(eval.ID, m.length), eval.Status))
// Print the failures per task group
for tg, metrics := range eval.FailedTGAllocs {
noun := "allocation"
if metrics.CoalescedFailures > 0 {
noun += "s"
}
m.ui.Output(fmt.Sprintf("Task Group %q (failed to place %d %s):", tg, metrics.CoalescedFailures+1, noun))
metrics := formatAllocMetrics(metrics, false, " ")
for _, line := range strings.Split(metrics, "\n") {
m.ui.Output(line)
}
}
if eval.BlockedEval != "" {
m.ui.Output(fmt.Sprintf("Evaluation %q waiting for additional capacity to place remainder",
limit(eval.BlockedEval, m.length)))
}
}
default:
// Wait for the next update
time.Sleep(updateWait)
continue
}
// Monitor the next eval in the chain, if present
if eval.NextEval != "" {
if eval.Wait.Nanoseconds() != 0 {
m.ui.Info(fmt.Sprintf(
"Monitoring next evaluation %q in %s",
limit(eval.NextEval, m.length), eval.Wait))
// Skip some unnecessary polling
time.Sleep(eval.Wait)
}
// Reset the state and monitor the new eval
m.state = newEvalState()
return m.monitor(eval.NextEval, allowPrefix)
}
break
}
// Treat scheduling failures specially using a dedicated exit code.
// This makes it easier to detect failures from the CLI.
if schedFailure {
return 2
}
return 0
}
// dumpAllocStatus is a helper to generate a more user-friendly error message
// for scheduling failures, displaying a high level status of why the job
// could not be scheduled out.
func | dumpAllocStatus | identifier_name |
|
git.rs | self) -> Result<Option<Vec<u8>>> {
let partially_staged_files = self.get_partially_staged_files(true)?;
if partially_staged_files.is_empty() {
return Ok(None);
}
let mut diff_options = DiffOptions::new();
diff_options.show_binary(true);
for file in partially_staged_files.iter() {
diff_options.pathspec(file);
}
let unstaged_diff = self
.repository
.diff_index_to_workdir(None, Some(&mut diff_options))?;
// The Diff created by diff_index_to_workdir is owned by the repository.
// It means storing this diff separately isn't possible, and it is also
// difficult to store it along with the repository together in a struct,
// because that struct then will have a self reference between its diff
// and its repository.
//
// I'm not comfortable enough with ownership to understand the correct
// way to work around this, so the current approach that I'm taking is
// to copy the diff out into a buffer. This is not the most performant.
//
// For updates about this issue, we can keep tabs on
//
// https://github.com/rust-lang/git2-rs/issues/622
//
fn copy_diff(diff: &Diff) -> Result<Vec<u8>> {
let mut buffer = vec![];
diff.print(DiffFormat::Patch, |_, _, line| {
let origin = line.origin();
if origin == '+' || origin == '-' || origin == ' ' {
buffer.push(origin as u8);
}
buffer.append(&mut line.content().to_vec());
true
})?;
Ok(buffer)
}
Ok(Some(copy_diff(&unstaged_diff)?))
}
fn hide_partially_staged_changes(&self) -> Result<()> {
let partially_staged_files = self.get_partially_staged_files(false)?;
if partially_staged_files.is_empty() {
return Ok(());
}
let mut checkout_options = CheckoutBuilder::new();
checkout_options.force();
checkout_options.update_index(false);
for file in partially_staged_files.iter() {
checkout_options.path(file);
}
self.repository
.checkout_index(None, Some(&mut checkout_options))?;
Ok(())
}
pub fn get_staged_files(&self) -> Result<Vec<PathBuf>> {
let head_tree = match self.repository.head() {
Ok(head) => Ok(Some(head.peel_to_tree()?)),
Err(error) if error.code() == ErrorCode::UnbornBranch => Ok(None),
Err(error) => Err(error),
}?;
let staged_files = self
.repository
.diff_tree_to_index(head_tree.as_ref(), None, None)?
.deltas()
.flat_map(|delta| {
if delta.old_file().path() == delta.new_file().path() {
vec![delta.old_file().path()]
} else {
vec![delta.old_file().path(), delta.new_file().path()]
}
})
.filter_map(std::convert::identity)
.map(Path::to_path_buf)
.collect();
Ok(staged_files)
}
fn get_partially_staged_files(&self, include_from_files: bool) -> Result<HashSet<PathBuf>> {
let staged_files = HashSet::from_iter(self.get_staged_files()?);
let unstaged_files = HashSet::from_iter(
self.repository
.diff_index_to_workdir(None, Some(DiffOptions::default().show_binary(true)))?
.deltas()
.flat_map(|delta| {
if include_from_files {
vec![delta.old_file().path(), delta.new_file().path()]
} else {
vec![delta.new_file().path()]
}
})
.filter_map(std::convert::identity)
.map(Path::to_path_buf),
);
fn intersect<P: Eq + Hash>(one: HashSet<P>, two: &HashSet<P>) -> HashSet<P> {
one.into_iter().filter(|p| two.contains(p)).collect()
}
Ok(intersect(staged_files, &unstaged_files))
}
fn get_deleted_files(&self) -> Result<Vec<PathBuf>> {
let deleted_files = self
.repository
.diff_index_to_workdir(None, None)?
.deltas()
.filter(|delta| delta.status() == Delta::Deleted)
.filter_map(|delta| delta.old_file().path())
.map(Path::to_path_buf)
.collect_vec();
Ok(deleted_files)
}
fn save_snapshot_stash(&mut self) -> Result<Option<Stash>> {
if self.repository.is_empty()? {
return Ok(None);
}
fn create_signature<'a>() -> Result<Signature<'a>> {
// Because this time is only used to create a dummy signature to
// make the stash_save method happy, we don't need to use a real
// time, which skips some calls to the kernel.
//
let time = Time::new(0, 0);
Signature::new("Dummy", "[email protected]", &time)
.with_context(|| "Encountered an error when creating dummy authorship information.")
}
// Save state when in the middle of a merge prior to stashing changes in
// the working directory so that we can restore it afterward.
//
let merge_status = self.save_merge_status()?;
let signature = create_signature()?;
let stash_result = self
.repository
.stash_create(&signature, None, None);
if let Ok(stash_id) = stash_result {
self.repository.stash_store(&stash_id, Some("offstage backup"))?;
}
match stash_result {
Ok(stash_id) => Ok(Some(Stash {
stash_id,
merge_status,
})),
Err(error) if error.code() == ErrorCode::NotFound => Ok(None),
Err(error) => Err(anyhow!(error)
.context("Encountered an error when stashing a backup of the working directory.")),
}
}
fn save_merge_status(&self) -> Result<MergeStatus> {
let merge_head_path = &self.repository.path().join("MERGE_HEAD");
let merge_head = Self::read_file_to_string(merge_head_path).with_context(|| {
format!(
"Encountered an error when saving {}.",
merge_head_path.display()
)
})?;
let merge_mode_path = &self.repository.path().join("MERGE_MODE");
let merge_mode = Self::read_file_to_string(merge_mode_path).with_context(|| {
format!(
"Encountered an error when saving {}.",
merge_mode_path.display()
)
})?;
let merge_msg_path = &self.repository.path().join("MERGE_MSG");
let merge_msg = Self::read_file_to_string(merge_msg_path).with_context(|| {
format!(
"Encountered an error when saving {}.",
merge_msg_path.display()
)
})?;
Ok(MergeStatus {
merge_head,
merge_mode,
merge_msg,
})
}
fn restore_merge_status(&self, merge_status: &MergeStatus) -> Result<()> {
// Tries to restore all files before returning the first error if one exists.
let restore_merge_head_result =
merge_status
.merge_head
.as_ref()
.map_or(Ok(()), |merge_head| {
let merge_head_path = &self.repository.path().join("MERGE_HEAD");
fs::write(merge_head_path, merge_head).with_context(|| {
format!(
"Encountered an error when restoring {}.",
merge_head_path.display()
)
})
});
let restore_merge_mode_result =
merge_status
.merge_mode
.as_ref()
.map_or(Ok(()), |merge_mode| {
let merge_mode_path = &self.repository.path().join("MERGE_MODE");
fs::write(merge_mode_path, merge_mode).with_context(|| {
format!(
"Encountered an error when restoring {}.",
&merge_mode_path.display()
)
})
});
let restore_merge_msg_result =
merge_status.merge_msg.as_ref().map_or(Ok(()), |merge_msg| {
let merge_msg_path = &self.repository.path().join("MERGE_MSG");
fs::write(merge_msg_path, merge_msg).with_context(|| {
format!(
"Encountered an error when restoring {}.",
merge_msg_path.display()
)
})
});
restore_merge_head_result?;
restore_merge_mode_result?;
restore_merge_msg_result?;
Ok(())
}
fn read_file_to_string<P: AsRef<Path>>(file: P) -> Result<Option<String>> {
match fs::read_to_string(file) {
Ok(contents) => Ok(Some(contents)),
Err(error) if error.kind() == NotFound => Ok(None),
Err(error) => Err(anyhow!(error)),
}
}
fn delete_files<P: AsRef<Path>>(files: &Vec<P>) -> Result<()> {
for file in files.iter() {
fs::remove_file(file).with_context(|| {
format!(
"Encountered error when deleting {}.",
file.as_ref().display()
)
})?;
}
Ok(())
}
}
#[derive(Debug)]
pub struct Snapshot {
pub staged_files: Vec<PathBuf>,
backup_stash: Option<Stash>,
unstaged_diff: Option<Vec<u8>>,
}
#[derive(Debug)]
struct | Stash | identifier_name |
|
git.rs |
pub fn save_snapshot(&mut self, staged_files: Vec<PathBuf>) -> Result<Snapshot> {
let inner = || -> Result<Snapshot> {
let deleted_files = self.get_deleted_files()?;
let unstaged_diff = self.save_unstaged_diff()?;
let backup_stash = self.save_snapshot_stash()?;
// Because `git stash` restores the HEAD commit, it brings back uncommitted
// deleted files. We need to clear them before creating our snapshot.
GitRepository::delete_files(&deleted_files)?;
self.hide_partially_staged_changes()?;
Ok(Snapshot {
backup_stash,
staged_files,
unstaged_diff,
})
};
inner().with_context(|| "Encountered an error when saving a snapshot.")
}
pub fn apply_modifications(&mut self, snapshot: &Snapshot) -> Result<()> {
self.stage_modifications(snapshot)?;
if self.get_staged_files()?.is_empty() {
return Err(anyhow!("Prevented an empty git commit."));
}
if let Some(raw_diff) = &snapshot.unstaged_diff {
let unstaged_diff = Diff::from_buffer(raw_diff)?;
self.merge_modifications(unstaged_diff)?;
}
Ok(())
}
pub fn restore_snapshot(&mut self, snapshot: &Snapshot) -> Result<()> {
let mut inner = || -> Result<()> {
self.hard_reset()?;
if let Some(backup_stash) = &snapshot.backup_stash {
self.apply_stash(&backup_stash.stash_id)?;
self.restore_merge_status(&backup_stash.merge_status)?;
}
Ok(())
};
inner().with_context(|| "Encountered an error when restoring snapshot after another error.")
}
pub fn clean_snapshot(&mut self, snapshot: Snapshot) -> Result<()> {
let inner = || -> Result<()> {
if let Some(backup_stash) = snapshot.backup_stash {
let stash_index = self
.get_stash_index_from_id(&backup_stash.stash_id)?
.ok_or_else(|| {
anyhow!(
"Could not find a backup stash with id {}.",
&backup_stash.stash_id
)
})?;
self.repository.stash_drop(stash_index)?;
}
Ok(())
};
inner().with_context(|| {
"Encountered an error when cleaning snapshot. You might find a stash entry \
in the stash list."
})
}
fn stage_modifications(&mut self, snapshot: &Snapshot) -> Result<()> {
let mut index = self.repository.index()?;
index.add_all(
&snapshot.staged_files,
IndexAddOption::DEFAULT | IndexAddOption::DISABLE_PATHSPEC_MATCH,
None,
)?;
index.write()?;
Ok(())
}
fn merge_modifications(&self, unstaged_diff: Diff) -> Result<()> {
self.repository
.apply(&unstaged_diff, ApplyLocation::WorkDir, None)
.with_context(|| "Unstaged changes could not be restored due to a merge conflict.")
}
fn hard_reset(&self) -> Result<()> {
let head = self.repository.head()?.peel_to_commit()?;
self.repository
.reset(head.as_object(), ResetType::Hard, None)
.map_err(|error| anyhow!(error))
}
fn get_stash_index_from_id(&mut self, stash_id: &Oid) -> Result<Option<usize>> {
// It would be much better if libgit2 accepted a stash Oid
// instead of an index from the stash list.
let ref_stash_index = RefCell::new(None);
self.repository.stash_foreach(|index, _, oid| {
if oid == stash_id {
*ref_stash_index.borrow_mut() = Some(index);
false
} else {
true
}
})?;
// Copy the data out of the RefCell.
let stash_index = match *ref_stash_index.borrow() {
Some(index) => Some(index),
None => None,
};
Ok(stash_index)
}
fn apply_stash(&mut self, stash_id: &Oid) -> Result<()> {
let stash_index = self
.get_stash_index_from_id(stash_id)?
.ok_or_else(|| anyhow!("Could not find a backup stash with id {}.", stash_id))?;
self.repository.stash_apply(
stash_index,
Some(StashApplyOptions::default().reinstantiate_index()),
)?;
Ok(())
}
fn save_unstaged_diff(&self) -> Result<Option<Vec<u8>>> {
let partially_staged_files = self.get_partially_staged_files(true)?;
if partially_staged_files.is_empty() {
return Ok(None);
}
let mut diff_options = DiffOptions::new();
diff_options.show_binary(true);
for file in partially_staged_files.iter() {
diff_options.pathspec(file);
}
let unstaged_diff = self
.repository
.diff_index_to_workdir(None, Some(&mut diff_options))?;
// The Diff created by diff_index_to_workdir is owned by the repository.
// It means storing this diff separately isn't possible, and it is also
// difficult to store it along with the repository together in a struct,
// because that struct then will have a self reference between its diff
// and its repository.
//
// I'm not comfortable enough with ownership to understand the correct
// way to work around this, so the current approach that I'm taking is
// to copy the diff out into a buffer. This is not the most performant.
//
// For updates about this issue, we can keep tabs on
//
// https://github.com/rust-lang/git2-rs/issues/622
//
fn copy_diff(diff: &Diff) -> Result<Vec<u8>> {
let mut buffer = vec![];
diff.print(DiffFormat::Patch, |_, _, line| {
let origin = line.origin();
if origin == '+' || origin == '-' || origin == ' ' {
buffer.push(origin as u8);
}
buffer.append(&mut line.content().to_vec());
true
})?;
Ok(buffer)
}
Ok(Some(copy_diff(&unstaged_diff)?))
}
fn hide_partially_staged_changes(&self) -> Result<()> {
let partially_staged_files = self.get_partially_staged_files(false)?;
if partially_staged_files.is_empty() {
return Ok(());
}
let mut checkout_options = CheckoutBuilder::new();
checkout_options.force();
checkout_options.update_index(false);
for file in partially_staged_files.iter() {
checkout_options.path(file);
}
self.repository
.checkout_index(None, Some(&mut checkout_options))?;
Ok(())
}
pub fn get_staged_files(&self) -> Result<Vec<PathBuf>> {
let head_tree = match self.repository.head() {
Ok(head) => Ok(Some(head.peel_to_tree()?)),
Err(error) if error.code() == ErrorCode::UnbornBranch => Ok(None),
Err(error) => Err(error),
}?;
let staged_files = self
.repository
.diff_tree_to_index(head_tree.as_ref(), None, None)?
.deltas()
.flat_map(|delta| {
if delta.old_file().path() == delta.new_file().path() {
vec![delta.old_file().path()]
} else {
vec![delta.old_file().path(), delta.new_file().path()]
}
})
.filter_map(std::convert::identity)
.map(Path::to_path_buf)
.collect();
Ok(staged_files)
}
fn get_partially_staged_files(&self, include_from_files: bool) -> Result<HashSet<PathBuf>> {
let staged_files = HashSet::from_iter(self.get_staged_files()?);
let unstaged_files = HashSet::from_iter(
self.repository
.diff_index_to_workdir(None, Some(DiffOptions::default().show_binary(true)))?
.deltas()
.flat_map(|delta| {
if include_from_files {
vec![delta.old_file().path(), delta.new_file().path()]
} else {
vec![delta.new_file().path()]
}
})
.filter_map(std::convert::identity)
.map(Path::to_path_buf),
);
fn intersect<P: Eq + Hash>(one: HashSet<P>, two: &HashSet<P>) -> HashSet<P> {
one.into_iter().filter(|p| two.contains(p)).collect()
}
Ok(intersect(staged_files, &unstaged_files))
}
fn get_deleted_files(&self) -> Result<Vec<PathBuf>> {
let deleted_files = self
.repository
.diff_index_to_workdir(None, | {
// When strict hash verification is disabled, it means libgit2 will not
// compute the "object id" of Git objects (which is a SHA-1 hash) after
// reading them to verify they match the object ids being used to look
// them up. This improves performance, and I don't have in front of me
// a concrete example where this is necessary to prevent data loss. If
// one becomes obvious, then we should make this configurable.
//
git2::opts::strict_hash_verification(false);
let repository = Repository::open_from_env()
.with_context(|| "Encountered an error when opening the Git repository.")?;
Ok(Self { repository })
} | identifier_body |
|
git.rs | backup stash with id {}.", stash_id))?;
self.repository.stash_apply(
stash_index,
Some(StashApplyOptions::default().reinstantiate_index()),
)?;
Ok(())
}
fn save_unstaged_diff(&self) -> Result<Option<Vec<u8>>> {
let partially_staged_files = self.get_partially_staged_files(true)?;
if partially_staged_files.is_empty() {
return Ok(None);
}
let mut diff_options = DiffOptions::new();
diff_options.show_binary(true);
for file in partially_staged_files.iter() {
diff_options.pathspec(file);
}
let unstaged_diff = self
.repository
.diff_index_to_workdir(None, Some(&mut diff_options))?;
// The Diff created by diff_index_to_workdir is owned by the repository.
// It means storing this diff separately isn't possible, and it is also
// difficult to store it along with the repository together in a struct,
// because that struct then will have a self reference between its diff
// and its repository.
//
// I'm not comfortable enough with ownership to understand the correct
// way to work around this, so the current approach that I'm taking is
// to copy the diff out into a buffer. This is not the most performant.
//
// For updates about this issue, we can keep tabs on
//
// https://github.com/rust-lang/git2-rs/issues/622
//
fn copy_diff(diff: &Diff) -> Result<Vec<u8>> {
let mut buffer = vec![];
diff.print(DiffFormat::Patch, |_, _, line| {
let origin = line.origin();
if origin == '+' || origin == '-' || origin == ' ' {
buffer.push(origin as u8);
}
buffer.append(&mut line.content().to_vec());
true
})?;
Ok(buffer)
}
Ok(Some(copy_diff(&unstaged_diff)?))
}
fn hide_partially_staged_changes(&self) -> Result<()> {
let partially_staged_files = self.get_partially_staged_files(false)?;
if partially_staged_files.is_empty() {
return Ok(());
}
let mut checkout_options = CheckoutBuilder::new();
checkout_options.force();
checkout_options.update_index(false);
for file in partially_staged_files.iter() {
checkout_options.path(file);
}
self.repository
.checkout_index(None, Some(&mut checkout_options))?;
Ok(())
}
pub fn get_staged_files(&self) -> Result<Vec<PathBuf>> {
let head_tree = match self.repository.head() {
Ok(head) => Ok(Some(head.peel_to_tree()?)),
Err(error) if error.code() == ErrorCode::UnbornBranch => Ok(None),
Err(error) => Err(error),
}?;
let staged_files = self
.repository
.diff_tree_to_index(head_tree.as_ref(), None, None)?
.deltas()
.flat_map(|delta| {
if delta.old_file().path() == delta.new_file().path() {
vec![delta.old_file().path()]
} else {
vec![delta.old_file().path(), delta.new_file().path()]
}
})
.filter_map(std::convert::identity)
.map(Path::to_path_buf)
.collect();
Ok(staged_files)
}
fn get_partially_staged_files(&self, include_from_files: bool) -> Result<HashSet<PathBuf>> {
let staged_files = HashSet::from_iter(self.get_staged_files()?);
let unstaged_files = HashSet::from_iter(
self.repository
.diff_index_to_workdir(None, Some(DiffOptions::default().show_binary(true)))?
.deltas()
.flat_map(|delta| {
if include_from_files {
vec![delta.old_file().path(), delta.new_file().path()]
} else {
vec![delta.new_file().path()]
}
})
.filter_map(std::convert::identity)
.map(Path::to_path_buf),
);
fn intersect<P: Eq + Hash>(one: HashSet<P>, two: &HashSet<P>) -> HashSet<P> {
one.into_iter().filter(|p| two.contains(p)).collect()
}
Ok(intersect(staged_files, &unstaged_files))
}
fn get_deleted_files(&self) -> Result<Vec<PathBuf>> {
let deleted_files = self
.repository
.diff_index_to_workdir(None, None)?
.deltas()
.filter(|delta| delta.status() == Delta::Deleted)
.filter_map(|delta| delta.old_file().path())
.map(Path::to_path_buf)
.collect_vec();
Ok(deleted_files)
}
fn save_snapshot_stash(&mut self) -> Result<Option<Stash>> {
if self.repository.is_empty()? {
return Ok(None);
}
fn create_signature<'a>() -> Result<Signature<'a>> {
// Because this time is only used to create a dummy signature to
// make the stash_save method happy, we don't need to use a real
// time, which skips some calls to the kernel.
//
let time = Time::new(0, 0);
Signature::new("Dummy", "[email protected]", &time)
.with_context(|| "Encountered an error when creating dummy authorship information.")
}
// Save state when in the middle of a merge prior to stashing changes in
// the working directory so that we can restore it afterward.
//
let merge_status = self.save_merge_status()?;
let signature = create_signature()?;
let stash_result = self
.repository
.stash_create(&signature, None, None);
if let Ok(stash_id) = stash_result {
self.repository.stash_store(&stash_id, Some("offstage backup"))?;
}
match stash_result {
Ok(stash_id) => Ok(Some(Stash {
stash_id,
merge_status,
})),
Err(error) if error.code() == ErrorCode::NotFound => Ok(None),
Err(error) => Err(anyhow!(error)
.context("Encountered an error when stashing a backup of the working directory.")),
}
}
fn save_merge_status(&self) -> Result<MergeStatus> {
let merge_head_path = &self.repository.path().join("MERGE_HEAD");
let merge_head = Self::read_file_to_string(merge_head_path).with_context(|| {
format!(
"Encountered an error when saving {}.",
merge_head_path.display()
)
})?;
let merge_mode_path = &self.repository.path().join("MERGE_MODE");
let merge_mode = Self::read_file_to_string(merge_mode_path).with_context(|| {
format!(
"Encountered an error when saving {}.",
merge_mode_path.display()
)
})?;
let merge_msg_path = &self.repository.path().join("MERGE_MSG");
let merge_msg = Self::read_file_to_string(merge_msg_path).with_context(|| {
format!(
"Encountered an error when saving {}.",
merge_msg_path.display()
)
})?;
Ok(MergeStatus {
merge_head,
merge_mode,
merge_msg,
})
}
fn restore_merge_status(&self, merge_status: &MergeStatus) -> Result<()> {
// Tries to restore all files before returning the first error if one exists.
let restore_merge_head_result =
merge_status
.merge_head
.as_ref()
.map_or(Ok(()), |merge_head| {
let merge_head_path = &self.repository.path().join("MERGE_HEAD");
fs::write(merge_head_path, merge_head).with_context(|| {
format!(
"Encountered an error when restoring {}.",
merge_head_path.display()
)
})
});
let restore_merge_mode_result =
merge_status
.merge_mode
.as_ref()
.map_or(Ok(()), |merge_mode| {
let merge_mode_path = &self.repository.path().join("MERGE_MODE");
fs::write(merge_mode_path, merge_mode).with_context(|| {
format!(
"Encountered an error when restoring {}.",
&merge_mode_path.display()
)
})
});
let restore_merge_msg_result =
merge_status.merge_msg.as_ref().map_or(Ok(()), |merge_msg| {
let merge_msg_path = &self.repository.path().join("MERGE_MSG");
fs::write(merge_msg_path, merge_msg).with_context(|| {
format!(
"Encountered an error when restoring {}.",
merge_msg_path.display()
)
})
});
restore_merge_head_result?;
restore_merge_mode_result?;
restore_merge_msg_result?;
Ok(())
}
fn read_file_to_string<P: AsRef<Path>>(file: P) -> Result<Option<String>> {
match fs::read_to_string(file) {
Ok(contents) => Ok(Some(contents)),
Err(error) if error.kind() == NotFound => Ok(None),
Err(error) => Err(anyhow!(error)),
}
}
fn delete_files<P: AsRef<Path>>(files: &Vec<P>) -> Result<()> {
for file in files.iter() {
fs::remove_file(file).with_context(|| {
format!( | "Encountered error when deleting {}.",
file.as_ref().display()
)
})?;
} | random_line_split |
|
client.rs | afka", "librdkafka: {} {}", fac, log_message),
RDKafkaLogLevel::Info => info!(target: "librdkafka", "librdkafka: {} {}", fac, log_message),
RDKafkaLogLevel::Debug => debug!(target: "librdkafka", "librdkafka: {} {}", fac, log_message),
}
}
/// Receives the statistics of the librdkafka client. To enable, the
/// "statistics.interval.ms" configuration parameter must be specified.
fn stats(&self, statistics: Statistics) {
info!("Client stats: {:?}", statistics);
}
/// Receives global errors from the librdkafka client.
fn error(&self, error: KafkaError, reason: &str) {
error!("librdkafka: {}: {}", error, reason);
}
// NOTE: when adding a new method, remember to add it to the FutureProducerContext as well.
// https://github.com/rust-lang/rfcs/pull/1406 will maybe help in the future.
}
/// An empty `ClientContext` that can be used when no context is needed. Default
/// callback implementations will be used.
#[derive(Clone, Default)]
pub struct DefaultClientContext;
impl ClientContext for DefaultClientContext {}
//
// ********** CLIENT **********
//
/// A native rdkafka-sys client. This struct shouldn't be used directly. Use higher level `Client`
/// or producers and consumers.
pub struct NativeClient {
ptr: *mut RDKafka,
}
// The library is completely thread safe, according to the documentation.
unsafe impl Sync for NativeClient {}
unsafe impl Send for NativeClient {}
impl NativeClient {
/// Wraps a pointer to an RDKafka object and returns a new NativeClient.
pub(crate) unsafe fn from_ptr(ptr: *mut RDKafka) -> NativeClient {
NativeClient { ptr }
}
/// Returns the wrapped pointer to RDKafka.
pub fn ptr(&self) -> *mut RDKafka {
self.ptr
}
}
impl Drop for NativeClient {
fn drop(&mut self) {
trace!("Destroying client: {:p}", self.ptr);
unsafe {
rdsys::rd_kafka_destroy(self.ptr);
}
trace!("Client destroyed: {:?}", self.ptr);
}
}
/// A low level rdkafka client. This client shouldn't be used directly. The producer and consumer modules
/// provide different producer and consumer implementations based on top of `Client` that can be
/// used instead.
pub struct Client<C: ClientContext = DefaultClientContext> {
native: NativeClient,
context: Box<C>,
}
impl<C: ClientContext> Client<C> {
/// Creates a new `Client` given a configuration, a client type and a context.
pub fn new(
config: &ClientConfig,
native_config: NativeClientConfig,
rd_kafka_type: RDKafkaType,
context: C,
) -> KafkaResult<Client<C>> {
let mut err_buf = ErrBuf::new();
let mut boxed_context = Box::new(context);
unsafe {
rdsys::rd_kafka_conf_set_opaque(
native_config.ptr(),
(&mut *boxed_context) as *mut C as *mut c_void,
)
};
unsafe { rdsys::rd_kafka_conf_set_log_cb(native_config.ptr(), Some(native_log_cb::<C>)) };
unsafe {
rdsys::rd_kafka_conf_set_stats_cb(native_config.ptr(), Some(native_stats_cb::<C>))
};
unsafe {
rdsys::rd_kafka_conf_set_error_cb(native_config.ptr(), Some(native_error_cb::<C>))
};
let client_ptr = unsafe {
rdsys::rd_kafka_new(
rd_kafka_type,
native_config.ptr_move(),
err_buf.as_mut_ptr(),
err_buf.len(),
)
};
trace!("Create new librdkafka client {:p}", client_ptr);
if client_ptr.is_null() {
return Err(KafkaError::ClientCreation(err_buf.to_string()));
}
unsafe { rdsys::rd_kafka_set_log_level(client_ptr, config.log_level as i32) };
Ok(Client {
native: unsafe { NativeClient::from_ptr(client_ptr) },
context: boxed_context,
})
}
/// Returns a reference to the native rdkafka-sys client.
pub fn native_client(&self) -> &NativeClient {
&self.native
}
/// Returns a pointer to the native rdkafka-sys client.
pub fn native_ptr(&self) -> *mut RDKafka {
self.native.ptr
}
/// Returns a reference to the context.
pub fn context(&self) -> &C {
self.context.as_ref()
}
/// Returns the metadata information for the specified topic, or for all topics in the cluster
/// if no topic is specified.
pub fn fetch_metadata<T: Into<Option<Duration>>>(
&self,
topic: Option<&str>,
timeout: T,
) -> KafkaResult<Metadata> {
let mut metadata_ptr: *const RDKafkaMetadata = ptr::null_mut();
let (flag, native_topic) = if let Some(topic_name) = topic {
(0, Some(self.native_topic(topic_name)?))
} else {
(1, None)
};
trace!("Starting metadata fetch");
let ret = unsafe {
rdsys::rd_kafka_metadata(
self.native_ptr(),
flag,
native_topic
.map(|t| t.ptr())
.unwrap_or_else(NativeTopic::null),
&mut metadata_ptr as *mut *const RDKafkaMetadata,
timeout_to_ms(timeout),
)
};
trace!("Metadata fetch completed");
if ret.is_error() {
return Err(KafkaError::MetadataFetch(ret.into()));
}
Ok(unsafe { Metadata::from_ptr(metadata_ptr) })
}
/// Returns high and low watermark for the specified topic and partition.
pub fn fetch_watermarks<T: Into<Option<Duration>>>(
&self,
topic: &str,
partition: i32,
timeout: T,
) -> KafkaResult<(i64, i64)> {
let mut low = -1;
let mut high = -1;
let topic_c = CString::new(topic.to_string())?;
let ret = unsafe {
rdsys::rd_kafka_query_watermark_offsets(
self.native_ptr(),
topic_c.as_ptr(),
partition,
&mut low as *mut i64,
&mut high as *mut i64,
timeout_to_ms(timeout),
)
};
if ret.is_error() {
return Err(KafkaError::MetadataFetch(ret.into()));
}
Ok((low, high))
}
/// Returns the group membership information for the given group. If no group is
/// specified, all groups will be returned.
pub fn fetch_group_list<T: Into<Option<Duration>>>(
&self,
group: Option<&str>,
timeout: T,
) -> KafkaResult<GroupList> {
// Careful with group_c getting freed before time
let group_c = CString::new(group.map_or("".to_string(), ToString::to_string))?;
let group_c_ptr = if group.is_some() {
group_c.as_ptr()
} else {
ptr::null_mut()
}; | let mut group_list_ptr: *const RDKafkaGroupList = ptr::null_mut();
trace!("Starting group list fetch");
let ret = unsafe {
rdsys::rd_kafka_list_groups(
self.native_ptr(),
group_c_ptr,
&mut group_list_ptr as *mut *const RDKafkaGroupList,
timeout_to_ms(timeout),
)
};
trace!("Group list fetch completed");
if ret.is_error() {
return Err(KafkaError::GroupListFetch(ret.into()));
}
Ok(unsafe { GroupList::from_ptr(group_list_ptr) })
}
/// Returns a NativeTopic from the current client. The NativeTopic shouldn't outlive the client
/// it was generated from.
fn native_topic(&self, topic: &str) -> KafkaResult<NativeTopic> {
let topic_c = CString::new(topic.to_string())?;
Ok(unsafe {
NativeTopic::from_ptr(rdsys::rd_kafka_topic_new(
self.native_ptr(),
topic_c.as_ptr(),
ptr::null_mut(),
))
})
}
/// Returns a NativeQueue from the current client. The NativeQueue shouldn't
/// outlive the client it was generated from.
pub(crate) fn new_native_queue(&self) -> NativeQueue {
unsafe { NativeQueue::from_ptr(rdsys::rd_kafka_queue_new(self.native_ptr())) }
}
}
struct NativeTopic {
ptr: *mut RDKafkaTopic,
}
unsafe impl Send for NativeTopic {}
unsafe impl Sync for NativeTopic {}
impl NativeTopic {
/// Wraps a pointer to an `RDKafkaTopic` object and returns a new `NativeTopic`.
unsafe fn from_ptr(ptr: *mut RDKafkaTopic) -> NativeTopic {
NativeTopic { ptr }
}
/// Returns the pointer to the librdkafka RDKafkaTopic structure.
fn ptr(&self) -> *mut RDKafkaTopic {
self.ptr
}
/// Returns a null pointer.
fn null() -> *mut RDKafkaTopic {
ptr::null::<u | random_line_split |
|
client.rs | unsafe {
rdsys::rd_kafka_destroy(self.ptr);
}
trace!("Client destroyed: {:?}", self.ptr);
}
}
/// A low level rdkafka client. This client shouldn't be used directly. The producer and consumer modules
/// provide different producer and consumer implementations based on top of `Client` that can be
/// used instead.
pub struct Client<C: ClientContext = DefaultClientContext> {
native: NativeClient,
context: Box<C>,
}
impl<C: ClientContext> Client<C> {
/// Creates a new `Client` given a configuration, a client type and a context.
pub fn new(
config: &ClientConfig,
native_config: NativeClientConfig,
rd_kafka_type: RDKafkaType,
context: C,
) -> KafkaResult<Client<C>> {
let mut err_buf = ErrBuf::new();
let mut boxed_context = Box::new(context);
unsafe {
rdsys::rd_kafka_conf_set_opaque(
native_config.ptr(),
(&mut *boxed_context) as *mut C as *mut c_void,
)
};
unsafe { rdsys::rd_kafka_conf_set_log_cb(native_config.ptr(), Some(native_log_cb::<C>)) };
unsafe {
rdsys::rd_kafka_conf_set_stats_cb(native_config.ptr(), Some(native_stats_cb::<C>))
};
unsafe {
rdsys::rd_kafka_conf_set_error_cb(native_config.ptr(), Some(native_error_cb::<C>))
};
let client_ptr = unsafe {
rdsys::rd_kafka_new(
rd_kafka_type,
native_config.ptr_move(),
err_buf.as_mut_ptr(),
err_buf.len(),
)
};
trace!("Create new librdkafka client {:p}", client_ptr);
if client_ptr.is_null() {
return Err(KafkaError::ClientCreation(err_buf.to_string()));
}
unsafe { rdsys::rd_kafka_set_log_level(client_ptr, config.log_level as i32) };
Ok(Client {
native: unsafe { NativeClient::from_ptr(client_ptr) },
context: boxed_context,
})
}
/// Returns a reference to the native rdkafka-sys client.
pub fn native_client(&self) -> &NativeClient {
&self.native
}
/// Returns a pointer to the native rdkafka-sys client.
pub fn native_ptr(&self) -> *mut RDKafka {
self.native.ptr
}
/// Returns a reference to the context.
pub fn context(&self) -> &C {
self.context.as_ref()
}
/// Returns the metadata information for the specified topic, or for all topics in the cluster
/// if no topic is specified.
pub fn fetch_metadata<T: Into<Option<Duration>>>(
&self,
topic: Option<&str>,
timeout: T,
) -> KafkaResult<Metadata> {
let mut metadata_ptr: *const RDKafkaMetadata = ptr::null_mut();
let (flag, native_topic) = if let Some(topic_name) = topic {
(0, Some(self.native_topic(topic_name)?))
} else {
(1, None)
};
trace!("Starting metadata fetch");
let ret = unsafe {
rdsys::rd_kafka_metadata(
self.native_ptr(),
flag,
native_topic
.map(|t| t.ptr())
.unwrap_or_else(NativeTopic::null),
&mut metadata_ptr as *mut *const RDKafkaMetadata,
timeout_to_ms(timeout),
)
};
trace!("Metadata fetch completed");
if ret.is_error() {
return Err(KafkaError::MetadataFetch(ret.into()));
}
Ok(unsafe { Metadata::from_ptr(metadata_ptr) })
}
/// Returns high and low watermark for the specified topic and partition.
pub fn fetch_watermarks<T: Into<Option<Duration>>>(
&self,
topic: &str,
partition: i32,
timeout: T,
) -> KafkaResult<(i64, i64)> {
let mut low = -1;
let mut high = -1;
let topic_c = CString::new(topic.to_string())?;
let ret = unsafe {
rdsys::rd_kafka_query_watermark_offsets(
self.native_ptr(),
topic_c.as_ptr(),
partition,
&mut low as *mut i64,
&mut high as *mut i64,
timeout_to_ms(timeout),
)
};
if ret.is_error() {
return Err(KafkaError::MetadataFetch(ret.into()));
}
Ok((low, high))
}
/// Returns the group membership information for the given group. If no group is
/// specified, all groups will be returned.
pub fn fetch_group_list<T: Into<Option<Duration>>>(
&self,
group: Option<&str>,
timeout: T,
) -> KafkaResult<GroupList> {
// Careful with group_c getting freed before time
let group_c = CString::new(group.map_or("".to_string(), ToString::to_string))?;
let group_c_ptr = if group.is_some() {
group_c.as_ptr()
} else {
ptr::null_mut()
};
let mut group_list_ptr: *const RDKafkaGroupList = ptr::null_mut();
trace!("Starting group list fetch");
let ret = unsafe {
rdsys::rd_kafka_list_groups(
self.native_ptr(),
group_c_ptr,
&mut group_list_ptr as *mut *const RDKafkaGroupList,
timeout_to_ms(timeout),
)
};
trace!("Group list fetch completed");
if ret.is_error() {
return Err(KafkaError::GroupListFetch(ret.into()));
}
Ok(unsafe { GroupList::from_ptr(group_list_ptr) })
}
/// Returns a NativeTopic from the current client. The NativeTopic shouldn't outlive the client
/// it was generated from.
fn native_topic(&self, topic: &str) -> KafkaResult<NativeTopic> {
let topic_c = CString::new(topic.to_string())?;
Ok(unsafe {
NativeTopic::from_ptr(rdsys::rd_kafka_topic_new(
self.native_ptr(),
topic_c.as_ptr(),
ptr::null_mut(),
))
})
}
/// Returns a NativeQueue from the current client. The NativeQueue shouldn't
/// outlive the client it was generated from.
pub(crate) fn new_native_queue(&self) -> NativeQueue {
unsafe { NativeQueue::from_ptr(rdsys::rd_kafka_queue_new(self.native_ptr())) }
}
}
struct NativeTopic {
ptr: *mut RDKafkaTopic,
}
unsafe impl Send for NativeTopic {}
unsafe impl Sync for NativeTopic {}
impl NativeTopic {
/// Wraps a pointer to an `RDKafkaTopic` object and returns a new `NativeTopic`.
unsafe fn from_ptr(ptr: *mut RDKafkaTopic) -> NativeTopic {
NativeTopic { ptr }
}
/// Returns the pointer to the librdkafka RDKafkaTopic structure.
fn ptr(&self) -> *mut RDKafkaTopic {
self.ptr
}
/// Returns a null pointer.
fn null() -> *mut RDKafkaTopic {
ptr::null::<u8>() as *mut RDKafkaTopic
}
}
impl Drop for NativeTopic {
fn drop(&mut self) {
trace!("Destroying NativeTopic: {:?}", self.ptr);
unsafe {
rdsys::rd_kafka_topic_destroy(self.ptr);
}
trace!("NativeTopic destroyed: {:?}", self.ptr);
}
}
pub(crate) struct NativeQueue {
ptr: *mut RDKafkaQueue,
}
// The library is completely thread safe, according to the documentation.
unsafe impl Sync for NativeQueue {}
unsafe impl Send for NativeQueue {}
impl NativeQueue {
/// Wraps a pointer to an `RDKafkaQueue` object and returns a new
/// `NativeQueue`.
unsafe fn from_ptr(ptr: *mut RDKafkaQueue) -> NativeQueue {
NativeQueue { ptr }
}
/// Returns the pointer to the librdkafka RDKafkaQueue structure.
pub fn ptr(&self) -> *mut RDKafkaQueue {
self.ptr
}
pub fn poll<T: Into<Option<Duration>>>(&self, t: T) -> *mut RDKafkaEvent {
unsafe { rdsys::rd_kafka_queue_poll(self.ptr, timeout_to_ms(t)) }
}
}
impl Drop for NativeQueue {
fn drop(&mut self) {
trace!("Destroying queue: {:?}", self.ptr);
unsafe {
rdsys::rd_kafka_queue_destroy(self.ptr);
}
trace!("Queue destroyed: {:?}", self.ptr);
}
}
pub(crate) unsafe extern "C" fn native_log_cb<C: ClientContext>(
client: *const RDKafka,
level: i32,
fac: *const c_char,
buf: *const c_char,
) | {
let fac = CStr::from_ptr(fac).to_string_lossy();
let log_message = CStr::from_ptr(buf).to_string_lossy();
let context = Box::from_raw(rdsys::rd_kafka_opaque(client) as *mut C);
(*context).log(
RDKafkaLogLevel::from_int(level),
fac.trim(),
log_message.trim(),
);
mem::forget(context); // Do not free the context
} | identifier_body |
|
client.rs | afka", "librdkafka: {} {}", fac, log_message),
RDKafkaLogLevel::Info => info!(target: "librdkafka", "librdkafka: {} {}", fac, log_message),
RDKafkaLogLevel::Debug => debug!(target: "librdkafka", "librdkafka: {} {}", fac, log_message),
}
}
/// Receives the statistics of the librdkafka client. To enable, the
/// "statistics.interval.ms" configuration parameter must be specified.
fn stats(&self, statistics: Statistics) {
info!("Client stats: {:?}", statistics);
}
/// Receives global errors from the librdkafka client.
fn error(&self, error: KafkaError, reason: &str) {
error!("librdkafka: {}: {}", error, reason);
}
// NOTE: when adding a new method, remember to add it to the FutureProducerContext as well.
// https://github.com/rust-lang/rfcs/pull/1406 will maybe help in the future.
}
/// An empty `ClientContext` that can be used when no context is needed. Default
/// callback implementations will be used.
#[derive(Clone, Default)]
pub struct DefaultClientContext;
impl ClientContext for DefaultClientContext {}
//
// ********** CLIENT **********
//
/// A native rdkafka-sys client. This struct shouldn't be used directly. Use higher level `Client`
/// or producers and consumers.
pub struct NativeClient {
ptr: *mut RDKafka,
}
// The library is completely thread safe, according to the documentation.
unsafe impl Sync for NativeClient {}
unsafe impl Send for NativeClient {}
impl NativeClient {
/// Wraps a pointer to an RDKafka object and returns a new NativeClient.
pub(crate) unsafe fn from_ptr(ptr: *mut RDKafka) -> NativeClient {
NativeClient { ptr }
}
/// Returns the wrapped pointer to RDKafka.
pub fn ptr(&self) -> *mut RDKafka {
self.ptr
}
}
impl Drop for NativeClient {
fn drop(&mut self) {
trace!("Destroying client: {:p}", self.ptr);
unsafe {
rdsys::rd_kafka_destroy(self.ptr);
}
trace!("Client destroyed: {:?}", self.ptr);
}
}
/// A low level rdkafka client. This client shouldn't be used directly. The producer and consumer modules
/// provide different producer and consumer implementations based on top of `Client` that can be
/// used instead.
pub struct Client<C: ClientContext = DefaultClientContext> {
native: NativeClient,
context: Box<C>,
}
impl<C: ClientContext> Client<C> {
/// Creates a new `Client` given a configuration, a client type and a context.
pub fn new(
config: &ClientConfig,
native_config: NativeClientConfig,
rd_kafka_type: RDKafkaType,
context: C,
) -> KafkaResult<Client<C>> {
let mut err_buf = ErrBuf::new();
let mut boxed_context = Box::new(context);
unsafe {
rdsys::rd_kafka_conf_set_opaque(
native_config.ptr(),
(&mut *boxed_context) as *mut C as *mut c_void,
)
};
unsafe { rdsys::rd_kafka_conf_set_log_cb(native_config.ptr(), Some(native_log_cb::<C>)) };
unsafe {
rdsys::rd_kafka_conf_set_stats_cb(native_config.ptr(), Some(native_stats_cb::<C>))
};
unsafe {
rdsys::rd_kafka_conf_set_error_cb(native_config.ptr(), Some(native_error_cb::<C>))
};
let client_ptr = unsafe {
rdsys::rd_kafka_new(
rd_kafka_type,
native_config.ptr_move(),
err_buf.as_mut_ptr(),
err_buf.len(),
)
};
trace!("Create new librdkafka client {:p}", client_ptr);
if client_ptr.is_null() {
return Err(KafkaError::ClientCreation(err_buf.to_string()));
}
unsafe { rdsys::rd_kafka_set_log_level(client_ptr, config.log_level as i32) };
Ok(Client {
native: unsafe { NativeClient::from_ptr(client_ptr) },
context: boxed_context,
})
}
/// Returns a reference to the native rdkafka-sys client.
pub fn native_client(&self) -> &NativeClient {
&self.native
}
/// Returns a pointer to the native rdkafka-sys client.
pub fn native_ptr(&self) -> *mut RDKafka {
self.native.ptr
}
/// Returns a reference to the context.
pub fn context(&self) -> &C {
self.context.as_ref()
}
/// Returns the metadata information for the specified topic, or for all topics in the cluster
/// if no topic is specified.
pub fn fetch_metadata<T: Into<Option<Duration>>>(
&self,
topic: Option<&str>,
timeout: T,
) -> KafkaResult<Metadata> {
let mut metadata_ptr: *const RDKafkaMetadata = ptr::null_mut();
let (flag, native_topic) = if let Some(topic_name) = topic {
(0, Some(self.native_topic(topic_name)?))
} else {
(1, None)
};
trace!("Starting metadata fetch");
let ret = unsafe {
rdsys::rd_kafka_metadata(
self.native_ptr(),
flag,
native_topic
.map(|t| t.ptr())
.unwrap_or_else(NativeTopic::null),
&mut metadata_ptr as *mut *const RDKafkaMetadata,
timeout_to_ms(timeout),
)
};
trace!("Metadata fetch completed");
if ret.is_error() {
return Err(KafkaError::MetadataFetch(ret.into()));
}
Ok(unsafe { Metadata::from_ptr(metadata_ptr) })
}
/// Returns high and low watermark for the specified topic and partition.
pub fn | <T: Into<Option<Duration>>>(
&self,
topic: &str,
partition: i32,
timeout: T,
) -> KafkaResult<(i64, i64)> {
let mut low = -1;
let mut high = -1;
let topic_c = CString::new(topic.to_string())?;
let ret = unsafe {
rdsys::rd_kafka_query_watermark_offsets(
self.native_ptr(),
topic_c.as_ptr(),
partition,
&mut low as *mut i64,
&mut high as *mut i64,
timeout_to_ms(timeout),
)
};
if ret.is_error() {
return Err(KafkaError::MetadataFetch(ret.into()));
}
Ok((low, high))
}
/// Returns the group membership information for the given group. If no group is
/// specified, all groups will be returned.
pub fn fetch_group_list<T: Into<Option<Duration>>>(
&self,
group: Option<&str>,
timeout: T,
) -> KafkaResult<GroupList> {
// Careful with group_c getting freed before time
let group_c = CString::new(group.map_or("".to_string(), ToString::to_string))?;
let group_c_ptr = if group.is_some() {
group_c.as_ptr()
} else {
ptr::null_mut()
};
let mut group_list_ptr: *const RDKafkaGroupList = ptr::null_mut();
trace!("Starting group list fetch");
let ret = unsafe {
rdsys::rd_kafka_list_groups(
self.native_ptr(),
group_c_ptr,
&mut group_list_ptr as *mut *const RDKafkaGroupList,
timeout_to_ms(timeout),
)
};
trace!("Group list fetch completed");
if ret.is_error() {
return Err(KafkaError::GroupListFetch(ret.into()));
}
Ok(unsafe { GroupList::from_ptr(group_list_ptr) })
}
/// Returns a NativeTopic from the current client. The NativeTopic shouldn't outlive the client
/// it was generated from.
fn native_topic(&self, topic: &str) -> KafkaResult<NativeTopic> {
let topic_c = CString::new(topic.to_string())?;
Ok(unsafe {
NativeTopic::from_ptr(rdsys::rd_kafka_topic_new(
self.native_ptr(),
topic_c.as_ptr(),
ptr::null_mut(),
))
})
}
/// Returns a NativeQueue from the current client. The NativeQueue shouldn't
/// outlive the client it was generated from.
pub(crate) fn new_native_queue(&self) -> NativeQueue {
unsafe { NativeQueue::from_ptr(rdsys::rd_kafka_queue_new(self.native_ptr())) }
}
}
struct NativeTopic {
ptr: *mut RDKafkaTopic,
}
unsafe impl Send for NativeTopic {}
unsafe impl Sync for NativeTopic {}
impl NativeTopic {
/// Wraps a pointer to an `RDKafkaTopic` object and returns a new `NativeTopic`.
unsafe fn from_ptr(ptr: *mut RDKafkaTopic) -> NativeTopic {
NativeTopic { ptr }
}
/// Returns the pointer to the librdkafka RDKafkaTopic structure.
fn ptr(&self) -> *mut RDKafkaTopic {
self.ptr
}
/// Returns a null pointer.
fn null() -> *mut RDKafkaTopic {
ptr::null::< | fetch_watermarks | identifier_name |
three-d-bar-chart.component.ts | (private hostRef: ElementRef) { }
ngOnInit() {
if (this.data) {
this.createChart(this.data)
}
}
createChart(data) {
let el = this.chartContainer.nativeElement;
d3.select(el).select("svg").remove();
var margin = {
top: 30,
right: 20,
bottom: 40,
left: 20,
front: 0,
back: 0
},
width =
$(this.hostRef.nativeElement).parent().width() - margin.right - margin.left,
height = $(this.hostRef.nativeElement).parent().height() - margin.top - margin.bottom;
var depth = 100 - margin.front - margin.back;
var xScale = d3.scaleBand().range([0, width]).padding(0.5),
yScale = d3.scaleLinear().rangeRound([height, 0]),
zScale = d3.scaleOrdinal().domain([0, 1, 2]).range([0, depth], .4);
var xAxis = d3.axisBottom().scale(xScale);
var yAxis = d3.axisLeft().scale(yScale).ticks(6);
var max = d3.max(data[0].map(function (d) {
return parseFloat(d.value);
}));
if (max < 100){
max = 100
}
if(max == undefined){
max = 100
}
var gridY = d3.scaleLinear().domain([0, max]).range(
[height, 0]);
var chart = d3.select(el).append("svg").attr("id", "columnbarChart")
.attr("width",
width + margin.left + margin.right).attr("height",
height + margin.top + margin.bottom)
.append("g")
.attr("transform", "translate(35," + (data[0][0].axis.length > 20 ? 12 : 15) + ")");
// add the Y gridlines
if (!this.yGrid) {
chart.append("g")
.attr("class", "grid")
.attr('opacity', 0.3)
// .attr("stroke","#ebebeb")
.call(make_y_gridlines()
.tickSize(-width).tickFormat(null)
).selectAll("text").remove();
}
var color = ["#F4A775"];
var layers = data;
layers.forEach(function (el, j) {
el.y = undefined;
el.y0 = j;
});
var layer = chart.selectAll(".layer").data(layers).enter()
.append("g").attr("class", "layer")
.style("fill", color[0]);
data = data[0];
//d3.tsv('data.tsv', type, function(err, data) {
//if (err) return;
xScale.domain(data.map(function (d) {
return d.axis;
}));
yScale.domain([0, max]);
const xBandwidth = xScale.bandwidth() > 50 * data.length ? 50 * data.length : xScale.bandwidth();
function x(d) { return xScale(d.axis); }
function y(d) { return yScale(d.value); }
var camera = [width / 2, height / 2, -200];
var barGen = bar3d()
.camera(camera)
.x(x)
.y(y)
.z(zScale(0))
// .attr('width', xScale.rangeBand())
.width(xScale.bandwidth())
.height(function (d) { return height - y(d); })
.depth(xScale.bandwidth());
chart.append('g')
.attr('class', 'x axis')
.attr("transform",
"translate(0," + height + ")")
.call(xAxis)
.selectAll("text").style("text-anchor", "middle")
.attr("class", function (d, i) { return "chartBartext" + i })
.attr("dx", "-.2em").attr("dy", ".70em")
.call(wrap, xScale.bandwidth(), width);
chart.append('g')
.attr('class', 'y axis')
.call(yAxis)
.append('text')
.attr('transform', svgHelp.rotate(-90))
.attr("y", -18 - margin.left)
.attr("x", 0 - (height / 2))
.attr("dy", "1em")
.style('text-anchor', 'end')
.style("fill", "#333")
.style("font-weight", "400")
.attr("font-family", "'Questrial', sans-serif")
.style("font-size", "13px")
.text(data[0].unit);
//check for no data availble
let allNullValues = true;
for (let j = 0; j < data.length; j++) {
if (data[j].value != null) {
allNullValues = false;
break;
}
}
if (allNullValues) {
chart.append("text")
.attr("transform", "translate("+ width/2 +",0)")
.attr("x", 0)
.attr("y",30)
.attr("font-size", "28px")
.style("text-anchor", "middle")
.text("Data Not Available");
return;
}
let cubeBar = layer.selectAll('.bar').data(data)
.enter().append('g')
.attr('class', 'bar')
.style("cursor", "pointer")
.on("mouseover", function (d) {
if(d.value)
showPopover.call(this, d)
}).on("mouseout", function (d) {
removePopovers()
}) // sort based on distance from center, so we draw outermost
// bars first. otherwise, bars drawn later might overlap bars drawn first
.sort(function (a, b) {
return Math.abs(x(b) - 450) - Math.abs(x(a) - 450);
})
.call(barGen)
cubeBar.append("text")
.attr("class", "below")
.attr(
"x",
function (d) {
return xScale(d.axis) + (xScale.bandwidth() - xBandwidth) / 2 + xBandwidth
/ (2 * data.length) + (xBandwidth / data.length);
})
.attr("y", function (d) {
return yScale(d.value) - 18;
})
.attr("dy", "1.2em")
// .attr("text-anchor", "left")
.text(function (d) {
if(d.value)
return Math.round(d.value);
})
.style("fill", "#000").style("font-size", "12px");
function removePopovers() {
$('.popover').each(function () {
$(this).remove();
});
}
function showPopover(d) {
$(this).popover(
{
title: '',
placement: 'top',
container: 'body',
trigger: 'manual',
html: true,
animation: false,
content: function () {
if (d.axis != '' && d.denominator != null && d.numerator != null && d.unit == 'Percentage') {
return "<div style='color: #495769;'>" + "<b>" + d.axis + "</b>" + "</div>" +
"<div>" + " Data Value : " + "<span style='color: #495769;font-weight:500;'>" + d.value + "%"+"</span>" + "</div>" +
"<div>" + "Numerator : " + "<span style='color: #495769;font-weight:500'>" + d.numerator + "</span>" + "</div>" +
"<div>" + "Denominator : " + "<span style='color: #495769;font-weight:500'>" + d.denominator + "</span>" + "</div>";
} else if (d.denominator == null && d.numerator == null && d.unit == 'Percentage') {
return "<div style='color: #495769;'>" + "<b>" + d.axis + "</b>" + "</div>" +
"<div>" + " Data Value : " + "<span style='color: #495769;font-weight:500;'>" + d.value +"%"+ "</span>" + "</div>";
} else if (d.denominator == null && d.numerator != null && d.unit == 'Percentage') {
return "<div style='color: #495769;'>" + "<b>" + d.axis + "</b>" + "</div>" +
"<div>" + " Data Value : " + "<span style='color: #495769;font-weight:500;'>" + d.value +"%"+ "</span>" + "</div>" +
"<div>" + "Numerator : " + "<span style='color: #495769;font-weight:500'>" + d.numerator + "</span>" + "</div>";
} | constructor | identifier_name |
|
three-d-bar-chart.component.ts |
}
createChart(data) {
let el = this.chartContainer.nativeElement;
d3.select(el).select("svg").remove();
var margin = {
top: 30,
right: 20,
bottom: 40,
left: 20,
front: 0,
back: 0
},
width =
$(this.hostRef.nativeElement).parent().width() - margin.right - margin.left,
height = $(this.hostRef.nativeElement).parent().height() - margin.top - margin.bottom;
var depth = 100 - margin.front - margin.back;
var xScale = d3.scaleBand().range([0, width]).padding(0.5),
yScale = d3.scaleLinear().rangeRound([height, 0]),
zScale = d3.scaleOrdinal().domain([0, 1, 2]).range([0, depth], .4);
var xAxis = d3.axisBottom().scale(xScale);
var yAxis = d3.axisLeft().scale(yScale).ticks(6);
var max = d3.max(data[0].map(function (d) {
return parseFloat(d.value);
}));
if (max < 100){
max = 100
}
if(max == undefined){
max = 100
}
var gridY = d3.scaleLinear().domain([0, max]).range(
[height, 0]);
var chart = d3.select(el).append("svg").attr("id", "columnbarChart")
.attr("width",
width + margin.left + margin.right).attr("height",
height + margin.top + margin.bottom)
.append("g")
.attr("transform", "translate(35," + (data[0][0].axis.length > 20 ? 12 : 15) + ")");
// add the Y gridlines
if (!this.yGrid) {
chart.append("g")
.attr("class", "grid")
.attr('opacity', 0.3)
// .attr("stroke","#ebebeb")
.call(make_y_gridlines()
.tickSize(-width).tickFormat(null)
).selectAll("text").remove();
}
var color = ["#F4A775"];
var layers = data;
layers.forEach(function (el, j) {
el.y = undefined;
el.y0 = j;
});
var layer = chart.selectAll(".layer").data(layers).enter()
.append("g").attr("class", "layer")
.style("fill", color[0]);
data = data[0];
//d3.tsv('data.tsv', type, function(err, data) {
//if (err) return;
xScale.domain(data.map(function (d) {
return d.axis;
}));
yScale.domain([0, max]);
const xBandwidth = xScale.bandwidth() > 50 * data.length ? 50 * data.length : xScale.bandwidth();
function x(d) { return xScale(d.axis); }
function y(d) { return yScale(d.value); }
var camera = [width / 2, height / 2, -200];
var barGen = bar3d()
.camera(camera)
.x(x)
.y(y)
.z(zScale(0))
// .attr('width', xScale.rangeBand())
.width(xScale.bandwidth())
.height(function (d) { return height - y(d); })
.depth(xScale.bandwidth());
chart.append('g')
.attr('class', 'x axis')
.attr("transform",
"translate(0," + height + ")")
.call(xAxis)
.selectAll("text").style("text-anchor", "middle")
.attr("class", function (d, i) { return "chartBartext" + i })
.attr("dx", "-.2em").attr("dy", ".70em")
.call(wrap, xScale.bandwidth(), width);
chart.append('g')
.attr('class', 'y axis')
.call(yAxis)
.append('text')
.attr('transform', svgHelp.rotate(-90))
.attr("y", -18 - margin.left)
.attr("x", 0 - (height / 2))
.attr("dy", "1em")
.style('text-anchor', 'end')
.style("fill", "#333")
.style("font-weight", "400")
.attr("font-family", "'Questrial', sans-serif")
.style("font-size", "13px")
.text(data[0].unit);
//check for no data availble
let allNullValues = true;
for (let j = 0; j < data.length; j++) {
if (data[j].value != null) {
allNullValues = false;
break;
}
}
if (allNullValues) {
chart.append("text")
.attr("transform", "translate("+ width/2 +",0)")
.attr("x", 0)
.attr("y",30)
.attr("font-size", "28px")
.style("text-anchor", "middle")
.text("Data Not Available");
return;
}
let cubeBar = layer.selectAll('.bar').data(data)
.enter().append('g')
.attr('class', 'bar')
.style("cursor", "pointer")
.on("mouseover", function (d) {
if(d.value)
showPopover.call(this, d)
}).on("mouseout", function (d) {
removePopovers()
}) // sort based on distance from center, so we draw outermost
// bars first. otherwise, bars drawn later might overlap bars drawn first
.sort(function (a, b) {
return Math.abs(x(b) - 450) - Math.abs(x(a) - 450);
})
.call(barGen)
cubeBar.append("text")
.attr("class", "below")
.attr(
"x",
function (d) {
return xScale(d.axis) + (xScale.bandwidth() - xBandwidth) / 2 + xBandwidth
/ (2 * data.length) + (xBandwidth / data.length);
})
.attr("y", function (d) {
return yScale(d.value) - 18;
})
.attr("dy", "1.2em")
// .attr("text-anchor", "left")
.text(function (d) {
if(d.value)
return Math.round(d.value);
})
.style("fill", "#000").style("font-size", "12px");
function removePopovers() {
$('.popover').each(function () {
$(this).remove();
});
}
function showPopover(d) {
$(this).popover(
{
title: '',
placement: 'top',
container: 'body',
trigger: 'manual',
html: true,
animation: false,
content: function () {
if (d.axis != '' && d.denominator != null && d.numerator != null && d.unit == 'Percentage') {
return "<div style='color: #495769;'>" + "<b>" + d.axis + "</b>" + "</div>" +
"<div>" + " Data Value : " + "<span style='color: #495769;font-weight:500;'>" + d.value + "%"+"</span>" + "</div>" +
"<div>" + "Numerator : " + "<span style='color: #495769;font-weight:500'>" + d.numerator + "</span>" + "</div>" +
"<div>" + "Denominator : " + "<span style='color: #495769;font-weight:500'>" + d.denominator + "</span>" + "</div>";
} else if (d.denominator == null && d.numerator == null && d.unit == 'Percentage') {
return "<div style='color: #495769;'>" + "<b>" + d.axis + "</b>" + "</div>" +
"<div>" + " Data Value : " + "<span style='color: #495769;font-weight:500;'>" + d.value +"%"+ "</span>" + "</div>";
} else if (d.denominator == null && d.numerator != null && d.unit == 'Percentage') {
return "<div style='color: #495769;'>" + "<b>" + d.axis + "</b>" + "</div>" +
"<div>" + " Data Value : " + "<span style='color: #495769;font-weight:500;'>" + d.value +"%"+ "</span>" + "</div>" +
"<div>" + "Numerator : " + "<span style='color: #495769;font-weight:500'>" + d.numerator + "</span>" + "</div>";
} else if (d.denominator != null && d.numerator == null && d.unit == | {
this.createChart(this.data)
} | conditional_block |
|
three-d-bar-chart.component.ts | 6);
var max = d3.max(data[0].map(function (d) {
return parseFloat(d.value);
}));
if (max < 100){
max = 100
}
if(max == undefined){
max = 100
}
var gridY = d3.scaleLinear().domain([0, max]).range(
[height, 0]);
var chart = d3.select(el).append("svg").attr("id", "columnbarChart")
.attr("width",
width + margin.left + margin.right).attr("height",
height + margin.top + margin.bottom)
.append("g")
.attr("transform", "translate(35," + (data[0][0].axis.length > 20 ? 12 : 15) + ")");
// add the Y gridlines
if (!this.yGrid) {
chart.append("g")
.attr("class", "grid")
.attr('opacity', 0.3)
// .attr("stroke","#ebebeb")
.call(make_y_gridlines()
.tickSize(-width).tickFormat(null)
).selectAll("text").remove();
}
var color = ["#F4A775"];
var layers = data;
layers.forEach(function (el, j) {
el.y = undefined;
el.y0 = j;
});
var layer = chart.selectAll(".layer").data(layers).enter()
.append("g").attr("class", "layer")
.style("fill", color[0]);
data = data[0];
//d3.tsv('data.tsv', type, function(err, data) {
//if (err) return;
xScale.domain(data.map(function (d) {
return d.axis;
}));
yScale.domain([0, max]);
const xBandwidth = xScale.bandwidth() > 50 * data.length ? 50 * data.length : xScale.bandwidth();
function x(d) { return xScale(d.axis); }
function y(d) { return yScale(d.value); }
var camera = [width / 2, height / 2, -200];
var barGen = bar3d()
.camera(camera)
.x(x)
.y(y)
.z(zScale(0))
// .attr('width', xScale.rangeBand())
.width(xScale.bandwidth())
.height(function (d) { return height - y(d); })
.depth(xScale.bandwidth());
chart.append('g')
.attr('class', 'x axis')
.attr("transform",
"translate(0," + height + ")")
.call(xAxis)
.selectAll("text").style("text-anchor", "middle")
.attr("class", function (d, i) { return "chartBartext" + i })
.attr("dx", "-.2em").attr("dy", ".70em")
.call(wrap, xScale.bandwidth(), width);
chart.append('g')
.attr('class', 'y axis')
.call(yAxis)
.append('text')
.attr('transform', svgHelp.rotate(-90))
.attr("y", -18 - margin.left)
.attr("x", 0 - (height / 2))
.attr("dy", "1em")
.style('text-anchor', 'end')
.style("fill", "#333")
.style("font-weight", "400")
.attr("font-family", "'Questrial', sans-serif")
.style("font-size", "13px")
.text(data[0].unit);
//check for no data availble
let allNullValues = true;
for (let j = 0; j < data.length; j++) {
if (data[j].value != null) {
allNullValues = false;
break;
}
}
if (allNullValues) {
chart.append("text")
.attr("transform", "translate("+ width/2 +",0)")
.attr("x", 0)
.attr("y",30)
.attr("font-size", "28px")
.style("text-anchor", "middle")
.text("Data Not Available");
return;
}
let cubeBar = layer.selectAll('.bar').data(data)
.enter().append('g')
.attr('class', 'bar')
.style("cursor", "pointer")
.on("mouseover", function (d) {
if(d.value)
showPopover.call(this, d)
}).on("mouseout", function (d) {
removePopovers()
}) // sort based on distance from center, so we draw outermost
// bars first. otherwise, bars drawn later might overlap bars drawn first
.sort(function (a, b) {
return Math.abs(x(b) - 450) - Math.abs(x(a) - 450);
})
.call(barGen)
cubeBar.append("text")
.attr("class", "below")
.attr(
"x",
function (d) {
return xScale(d.axis) + (xScale.bandwidth() - xBandwidth) / 2 + xBandwidth
/ (2 * data.length) + (xBandwidth / data.length);
})
.attr("y", function (d) {
return yScale(d.value) - 18;
})
.attr("dy", "1.2em")
// .attr("text-anchor", "left")
.text(function (d) {
if(d.value)
return Math.round(d.value);
})
.style("fill", "#000").style("font-size", "12px");
function removePopovers() {
$('.popover').each(function () {
$(this).remove();
});
}
function showPopover(d) {
$(this).popover(
{
title: '',
placement: 'top',
container: 'body',
trigger: 'manual',
html: true,
animation: false,
content: function () {
if (d.axis != '' && d.denominator != null && d.numerator != null && d.unit == 'Percentage') {
return "<div style='color: #495769;'>" + "<b>" + d.axis + "</b>" + "</div>" +
"<div>" + " Data Value : " + "<span style='color: #495769;font-weight:500;'>" + d.value + "%"+"</span>" + "</div>" +
"<div>" + "Numerator : " + "<span style='color: #495769;font-weight:500'>" + d.numerator + "</span>" + "</div>" +
"<div>" + "Denominator : " + "<span style='color: #495769;font-weight:500'>" + d.denominator + "</span>" + "</div>";
} else if (d.denominator == null && d.numerator == null && d.unit == 'Percentage') {
return "<div style='color: #495769;'>" + "<b>" + d.axis + "</b>" + "</div>" +
"<div>" + " Data Value : " + "<span style='color: #495769;font-weight:500;'>" + d.value +"%"+ "</span>" + "</div>";
} else if (d.denominator == null && d.numerator != null && d.unit == 'Percentage') {
return "<div style='color: #495769;'>" + "<b>" + d.axis + "</b>" + "</div>" +
"<div>" + " Data Value : " + "<span style='color: #495769;font-weight:500;'>" + d.value +"%"+ "</span>" + "</div>" +
"<div>" + "Numerator : " + "<span style='color: #495769;font-weight:500'>" + d.numerator + "</span>" + "</div>";
} else if (d.denominator != null && d.numerator == null && d.unit == 'Percentage') {
return "<div style='color: #495769;'>" + "<b>" + d.axis + "</b>" + "</div>" +
"<div>" + " Data Value : " + "<span style='color: #495769;font-weight:500;'>" + d.value + "%"+"</span>" + "</div>" +
"<div>" + "Denominator : " + "<span style='color: #495769;font-weight:500'>" + d.denominator + "</span>" + "</div>";
}
else {
return "<div style='color: #495769;'>" + "<b>" + d.axis + "</b>" + "</div>" +
"<div style='color: #495769;'> Data Value: " + d.value + "</div>";
}
}
});
$(this).popover('show');
}
// gridlines in x axis function
function make_x_gridlines() | {
return d3.axisBottom(x)
.ticks(5)
} | identifier_body |
|
three-d-bar-chart.component.ts | =
$(this.hostRef.nativeElement).parent().width() - margin.right - margin.left,
height = $(this.hostRef.nativeElement).parent().height() - margin.top - margin.bottom;
var depth = 100 - margin.front - margin.back;
var xScale = d3.scaleBand().range([0, width]).padding(0.5),
yScale = d3.scaleLinear().rangeRound([height, 0]),
zScale = d3.scaleOrdinal().domain([0, 1, 2]).range([0, depth], .4);
var xAxis = d3.axisBottom().scale(xScale);
var yAxis = d3.axisLeft().scale(yScale).ticks(6);
var max = d3.max(data[0].map(function (d) {
return parseFloat(d.value);
}));
if (max < 100){
max = 100
}
if(max == undefined){
max = 100
}
var gridY = d3.scaleLinear().domain([0, max]).range(
[height, 0]);
var chart = d3.select(el).append("svg").attr("id", "columnbarChart")
.attr("width",
width + margin.left + margin.right).attr("height",
height + margin.top + margin.bottom)
.append("g")
.attr("transform", "translate(35," + (data[0][0].axis.length > 20 ? 12 : 15) + ")");
// add the Y gridlines
if (!this.yGrid) {
chart.append("g")
.attr("class", "grid")
.attr('opacity', 0.3)
// .attr("stroke","#ebebeb")
.call(make_y_gridlines()
.tickSize(-width).tickFormat(null)
).selectAll("text").remove();
}
var color = ["#F4A775"];
var layers = data;
layers.forEach(function (el, j) {
el.y = undefined;
el.y0 = j;
});
var layer = chart.selectAll(".layer").data(layers).enter()
.append("g").attr("class", "layer")
.style("fill", color[0]);
data = data[0];
//d3.tsv('data.tsv', type, function(err, data) {
//if (err) return;
xScale.domain(data.map(function (d) {
return d.axis;
}));
yScale.domain([0, max]);
const xBandwidth = xScale.bandwidth() > 50 * data.length ? 50 * data.length : xScale.bandwidth();
function x(d) { return xScale(d.axis); }
function y(d) { return yScale(d.value); }
var camera = [width / 2, height / 2, -200];
var barGen = bar3d()
.camera(camera)
.x(x)
.y(y)
.z(zScale(0))
// .attr('width', xScale.rangeBand())
.width(xScale.bandwidth())
.height(function (d) { return height - y(d); })
.depth(xScale.bandwidth());
chart.append('g')
.attr('class', 'x axis')
.attr("transform",
"translate(0," + height + ")")
.call(xAxis)
.selectAll("text").style("text-anchor", "middle")
.attr("class", function (d, i) { return "chartBartext" + i })
.attr("dx", "-.2em").attr("dy", ".70em")
.call(wrap, xScale.bandwidth(), width);
chart.append('g')
.attr('class', 'y axis')
.call(yAxis)
.append('text')
.attr('transform', svgHelp.rotate(-90))
.attr("y", -18 - margin.left)
.attr("x", 0 - (height / 2))
.attr("dy", "1em")
.style('text-anchor', 'end')
.style("fill", "#333")
.style("font-weight", "400")
.attr("font-family", "'Questrial', sans-serif")
.style("font-size", "13px")
.text(data[0].unit);
//check for no data availble
let allNullValues = true;
for (let j = 0; j < data.length; j++) {
if (data[j].value != null) {
allNullValues = false;
break;
}
}
if (allNullValues) {
chart.append("text")
.attr("transform", "translate("+ width/2 +",0)")
.attr("x", 0)
.attr("y",30)
.attr("font-size", "28px")
.style("text-anchor", "middle")
.text("Data Not Available");
return;
}
let cubeBar = layer.selectAll('.bar').data(data)
.enter().append('g')
.attr('class', 'bar') | .on("mouseover", function (d) {
if(d.value)
showPopover.call(this, d)
}).on("mouseout", function (d) {
removePopovers()
}) // sort based on distance from center, so we draw outermost
// bars first. otherwise, bars drawn later might overlap bars drawn first
.sort(function (a, b) {
return Math.abs(x(b) - 450) - Math.abs(x(a) - 450);
})
.call(barGen)
cubeBar.append("text")
.attr("class", "below")
.attr(
"x",
function (d) {
return xScale(d.axis) + (xScale.bandwidth() - xBandwidth) / 2 + xBandwidth
/ (2 * data.length) + (xBandwidth / data.length);
})
.attr("y", function (d) {
return yScale(d.value) - 18;
})
.attr("dy", "1.2em")
// .attr("text-anchor", "left")
.text(function (d) {
if(d.value)
return Math.round(d.value);
})
.style("fill", "#000").style("font-size", "12px");
function removePopovers() {
$('.popover').each(function () {
$(this).remove();
});
}
function showPopover(d) {
$(this).popover(
{
title: '',
placement: 'top',
container: 'body',
trigger: 'manual',
html: true,
animation: false,
content: function () {
if (d.axis != '' && d.denominator != null && d.numerator != null && d.unit == 'Percentage') {
return "<div style='color: #495769;'>" + "<b>" + d.axis + "</b>" + "</div>" +
"<div>" + " Data Value : " + "<span style='color: #495769;font-weight:500;'>" + d.value + "%"+"</span>" + "</div>" +
"<div>" + "Numerator : " + "<span style='color: #495769;font-weight:500'>" + d.numerator + "</span>" + "</div>" +
"<div>" + "Denominator : " + "<span style='color: #495769;font-weight:500'>" + d.denominator + "</span>" + "</div>";
} else if (d.denominator == null && d.numerator == null && d.unit == 'Percentage') {
return "<div style='color: #495769;'>" + "<b>" + d.axis + "</b>" + "</div>" +
"<div>" + " Data Value : " + "<span style='color: #495769;font-weight:500;'>" + d.value +"%"+ "</span>" + "</div>";
} else if (d.denominator == null && d.numerator != null && d.unit == 'Percentage') {
return "<div style='color: #495769;'>" + "<b>" + d.axis + "</b>" + "</div>" +
"<div>" + " Data Value : " + "<span style='color: #495769;font-weight:500;'>" + d.value +"%"+ "</span>" + "</div>" +
"<div>" + "Numerator : " + "<span style='color: #495769;font-weight:500'>" + d.numerator + "</span>" + "</div>";
} else if (d.denominator != null && d.numerator == null && d.unit == 'Percentage') {
return "<div style='color: #495769;'>" + "<b>" + d.axis + "</b>" + "</div>" +
"<div>" + " Data Value : " + "<span style='color: #495769;font-weight:500;'>" + d.value + "%"+"</span>" + "</div>" +
"<div>" + "Denominator | .style("cursor", "pointer") | random_line_split |
pageserver.rs | ").zip(get_arg("relish-storage-region"))
{
Some(RelishStorage::AwsS3 {
bucket_name,
bucket_region,
access_key_id: get_arg("relish-storage-access-key"),
secret_access_key: get_arg("relish-storage-secret-access-key"),
})
} else {
None
};
Self {
listen_pg_addr: get_arg("listen-pg"),
listen_http_addr: get_arg("listen-http"),
checkpoint_distance: get_arg("checkpoint_distance"),
checkpoint_period: get_arg("checkpoint_period"),
gc_horizon: get_arg("gc_horizon"),
gc_period: get_arg("gc_period"),
pg_distrib_dir: get_arg("postgres-distrib"),
auth_validation_public_key_path: get_arg("auth-validation-public-key-path"),
auth_type: get_arg("auth-type"),
relish_storage,
}
}
/// Fill missing values in `self` with `other`
fn or(self, other: CfgFileParams) -> Self {
// TODO cleaner way to do this
Self {
listen_pg_addr: self.listen_pg_addr.or(other.listen_pg_addr),
listen_http_addr: self.listen_http_addr.or(other.listen_http_addr),
checkpoint_distance: self.checkpoint_distance.or(other.checkpoint_distance),
checkpoint_period: self.checkpoint_period.or(other.checkpoint_period),
gc_horizon: self.gc_horizon.or(other.gc_horizon),
gc_period: self.gc_period.or(other.gc_period),
pg_distrib_dir: self.pg_distrib_dir.or(other.pg_distrib_dir),
auth_validation_public_key_path: self
.auth_validation_public_key_path
.or(other.auth_validation_public_key_path),
auth_type: self.auth_type.or(other.auth_type),
relish_storage: self.relish_storage.or(other.relish_storage),
}
}
/// Create a PageServerConf from these string parameters
fn try_into_config(&self) -> Result<PageServerConf> {
let workdir = PathBuf::from(".");
let listen_pg_addr = match self.listen_pg_addr.as_ref() {
Some(addr) => addr.clone(),
None => DEFAULT_PG_LISTEN_ADDR.to_owned(),
};
let listen_http_addr = match self.listen_http_addr.as_ref() {
Some(addr) => addr.clone(),
None => DEFAULT_HTTP_LISTEN_ADDR.to_owned(),
};
let checkpoint_distance: u64 = match self.checkpoint_distance.as_ref() {
Some(checkpoint_distance_str) => checkpoint_distance_str.parse()?,
None => DEFAULT_CHECKPOINT_DISTANCE,
};
let checkpoint_period = match self.checkpoint_period.as_ref() {
Some(checkpoint_period_str) => humantime::parse_duration(checkpoint_period_str)?,
None => DEFAULT_CHECKPOINT_PERIOD,
};
let gc_horizon: u64 = match self.gc_horizon.as_ref() {
Some(horizon_str) => horizon_str.parse()?,
None => DEFAULT_GC_HORIZON,
};
let gc_period = match self.gc_period.as_ref() {
Some(period_str) => humantime::parse_duration(period_str)?,
None => DEFAULT_GC_PERIOD,
};
let pg_distrib_dir = match self.pg_distrib_dir.as_ref() {
Some(pg_distrib_dir_str) => PathBuf::from(pg_distrib_dir_str),
None => env::current_dir()?.join("tmp_install"),
};
let auth_validation_public_key_path = self
.auth_validation_public_key_path
.as_ref()
.map(PathBuf::from);
let auth_type = self
.auth_type
.as_ref()
.map_or(Ok(AuthType::Trust), |auth_type| {
AuthType::from_str(auth_type)
})?;
if !pg_distrib_dir.join("bin/postgres").exists() {
bail!("Can't find postgres binary at {:?}", pg_distrib_dir);
}
if auth_type == AuthType::ZenithJWT {
ensure!(
auth_validation_public_key_path.is_some(),
"Missing auth_validation_public_key_path when auth_type is ZenithJWT"
);
let path_ref = auth_validation_public_key_path.as_ref().unwrap();
ensure!(
path_ref.exists(),
format!("Can't find auth_validation_public_key at {:?}", path_ref)
);
}
let relish_storage_config =
self.relish_storage
.as_ref()
.map(|storage_params| match storage_params.clone() {
RelishStorage::Local { local_path } => {
RelishStorageConfig::LocalFs(PathBuf::from(local_path))
}
RelishStorage::AwsS3 {
bucket_name,
bucket_region,
access_key_id,
secret_access_key,
} => RelishStorageConfig::AwsS3(S3Config {
bucket_name,
bucket_region,
access_key_id,
secret_access_key,
}),
});
Ok(PageServerConf {
daemonize: false,
listen_pg_addr,
listen_http_addr,
checkpoint_distance,
checkpoint_period,
gc_horizon,
gc_period,
superuser: String::from(DEFAULT_SUPERUSER),
workdir,
pg_distrib_dir,
auth_validation_public_key_path,
auth_type,
relish_storage_config,
})
}
}
fn | () -> Result<()> {
let arg_matches = App::new("Zenith page server")
.about("Materializes WAL stream to pages and serves them to the postgres")
.arg(
Arg::with_name("listen-pg")
.short("l")
.long("listen-pg")
.alias("listen") // keep some compatibility
.takes_value(true)
.help(formatcp!("listen for incoming page requests on ip:port (default: {DEFAULT_PG_LISTEN_ADDR})")),
)
.arg(
Arg::with_name("listen-http")
.long("listen-http")
.alias("http_endpoint") // keep some compatibility
.takes_value(true)
.help(formatcp!("http endpoint address for metrics and management API calls on ip:port (default: {DEFAULT_HTTP_LISTEN_ADDR})")),
)
.arg(
Arg::with_name("daemonize")
.short("d")
.long("daemonize")
.takes_value(false)
.help("Run in the background"),
)
.arg(
Arg::with_name("init")
.long("init")
.takes_value(false)
.help("Initialize pageserver repo"),
)
.arg(
Arg::with_name("checkpoint_distance")
.long("checkpoint_distance")
.takes_value(true)
.help("Distance from current LSN to perform checkpoint of in-memory layers"),
)
.arg(
Arg::with_name("checkpoint_period")
.long("checkpoint_period")
.takes_value(true)
.help("Interval between checkpoint iterations"),
)
.arg(
Arg::with_name("gc_horizon")
.long("gc_horizon")
.takes_value(true)
.help("Distance from current LSN to perform all wal records cleanup"),
)
.arg(
Arg::with_name("gc_period")
.long("gc_period")
.takes_value(true)
.help("Interval between garbage collector iterations"),
)
.arg(
Arg::with_name("workdir")
.short("D")
.long("workdir")
.takes_value(true)
.help("Working directory for the pageserver"),
)
.arg(
Arg::with_name("postgres-distrib")
.long("postgres-distrib")
.takes_value(true)
.help("Postgres distribution directory"),
)
.arg(
Arg::with_name("create-tenant")
.long("create-tenant")
.takes_value(true)
.help("Create tenant during init")
.requires("init"),
)
.arg(
Arg::with_name("auth-validation-public-key-path")
.long("auth-validation-public-key-path")
.takes_value(true)
.help("Path to public key used to validate jwt signature"),
)
.arg(
Arg::with_name("auth-type")
.long("auth-type")
.takes_value(true)
.help("Authentication scheme type. One of: Trust, MD5, ZenithJWT"),
)
.arg(
Arg::with_name("relish-storage-local-path")
.long("relish-storage-local-path")
.takes_value(true)
.help("Path to the local directory, to be used as an external relish storage")
.conflicts_with_all(&[
"relish-storage-s3-bucket",
"relish-storage-region",
"relish-storage-access-key",
"relish-storage-secret-access-key",
]),
)
.arg(
Arg::with_name("relish-storage-s3-bucket")
.long("relish-storage-s3-bucket")
.takes_value(true)
.help("Name of the AWS S3 bucket to use an external relish storage")
.requires("relish-storage-region"),
)
.arg(
Arg::with_name("relish-storage-region")
.long("relish-storage-region")
.takes_value(true)
.help("Region of the AWS S3 bucket"),
)
.arg(
Arg::with_name("relish-storage-access-key")
.long("relish-storage-access-key")
.takes_value(true)
.help("Credentials to access the AWS S3 bucket"),
)
.arg(
Arg::with_name("relish-storage-secret-access-key")
.long("relish-storage-secret-access | main | identifier_name |
pageserver.rs | _config =
self.relish_storage
.as_ref()
.map(|storage_params| match storage_params.clone() {
RelishStorage::Local { local_path } => {
RelishStorageConfig::LocalFs(PathBuf::from(local_path))
}
RelishStorage::AwsS3 {
bucket_name,
bucket_region,
access_key_id,
secret_access_key,
} => RelishStorageConfig::AwsS3(S3Config {
bucket_name,
bucket_region,
access_key_id,
secret_access_key,
}),
});
Ok(PageServerConf {
daemonize: false,
listen_pg_addr,
listen_http_addr,
checkpoint_distance,
checkpoint_period,
gc_horizon,
gc_period,
superuser: String::from(DEFAULT_SUPERUSER),
workdir,
pg_distrib_dir,
auth_validation_public_key_path,
auth_type,
relish_storage_config,
})
}
}
fn main() -> Result<()> {
let arg_matches = App::new("Zenith page server")
.about("Materializes WAL stream to pages and serves them to the postgres")
.arg(
Arg::with_name("listen-pg")
.short("l")
.long("listen-pg")
.alias("listen") // keep some compatibility
.takes_value(true)
.help(formatcp!("listen for incoming page requests on ip:port (default: {DEFAULT_PG_LISTEN_ADDR})")),
)
.arg(
Arg::with_name("listen-http")
.long("listen-http")
.alias("http_endpoint") // keep some compatibility
.takes_value(true)
.help(formatcp!("http endpoint address for metrics and management API calls on ip:port (default: {DEFAULT_HTTP_LISTEN_ADDR})")),
)
.arg(
Arg::with_name("daemonize")
.short("d")
.long("daemonize")
.takes_value(false)
.help("Run in the background"),
)
.arg(
Arg::with_name("init")
.long("init")
.takes_value(false)
.help("Initialize pageserver repo"),
)
.arg(
Arg::with_name("checkpoint_distance")
.long("checkpoint_distance")
.takes_value(true)
.help("Distance from current LSN to perform checkpoint of in-memory layers"),
)
.arg(
Arg::with_name("checkpoint_period")
.long("checkpoint_period")
.takes_value(true)
.help("Interval between checkpoint iterations"),
)
.arg(
Arg::with_name("gc_horizon")
.long("gc_horizon")
.takes_value(true)
.help("Distance from current LSN to perform all wal records cleanup"),
)
.arg(
Arg::with_name("gc_period")
.long("gc_period")
.takes_value(true)
.help("Interval between garbage collector iterations"),
)
.arg(
Arg::with_name("workdir")
.short("D")
.long("workdir")
.takes_value(true)
.help("Working directory for the pageserver"),
)
.arg(
Arg::with_name("postgres-distrib")
.long("postgres-distrib")
.takes_value(true)
.help("Postgres distribution directory"),
)
.arg(
Arg::with_name("create-tenant")
.long("create-tenant")
.takes_value(true)
.help("Create tenant during init")
.requires("init"),
)
.arg(
Arg::with_name("auth-validation-public-key-path")
.long("auth-validation-public-key-path")
.takes_value(true)
.help("Path to public key used to validate jwt signature"),
)
.arg(
Arg::with_name("auth-type")
.long("auth-type")
.takes_value(true)
.help("Authentication scheme type. One of: Trust, MD5, ZenithJWT"),
)
.arg(
Arg::with_name("relish-storage-local-path")
.long("relish-storage-local-path")
.takes_value(true)
.help("Path to the local directory, to be used as an external relish storage")
.conflicts_with_all(&[
"relish-storage-s3-bucket",
"relish-storage-region",
"relish-storage-access-key",
"relish-storage-secret-access-key",
]),
)
.arg(
Arg::with_name("relish-storage-s3-bucket")
.long("relish-storage-s3-bucket")
.takes_value(true)
.help("Name of the AWS S3 bucket to use an external relish storage")
.requires("relish-storage-region"),
)
.arg(
Arg::with_name("relish-storage-region")
.long("relish-storage-region")
.takes_value(true)
.help("Region of the AWS S3 bucket"),
)
.arg(
Arg::with_name("relish-storage-access-key")
.long("relish-storage-access-key")
.takes_value(true)
.help("Credentials to access the AWS S3 bucket"),
)
.arg(
Arg::with_name("relish-storage-secret-access-key")
.long("relish-storage-secret-access-key")
.takes_value(true)
.help("Credentials to access the AWS S3 bucket"),
)
.get_matches();
let workdir = Path::new(arg_matches.value_of("workdir").unwrap_or(".zenith"));
let cfg_file_path = workdir
.canonicalize()
.with_context(|| format!("Error opening workdir '{}'", workdir.display()))?
.join("pageserver.toml");
let args_params = CfgFileParams::from_args(&arg_matches);
let init = arg_matches.is_present("init");
let create_tenant = arg_matches.value_of("create-tenant");
let params = if init {
// We're initializing the repo, so there's no config file yet
args_params
} else {
// Supplement the CLI arguments with the config file
let cfg_file_contents = std::fs::read_to_string(&cfg_file_path)
.with_context(|| format!("No pageserver config at '{}'", cfg_file_path.display()))?;
let file_params: CfgFileParams = toml::from_str(&cfg_file_contents).with_context(|| {
format!(
"Failed to read '{}' as pageserver config",
cfg_file_path.display()
)
})?;
args_params.or(file_params)
};
// Set CWD to workdir for non-daemon modes
env::set_current_dir(&workdir).with_context(|| {
format!(
"Failed to set application's current dir to '{}'",
workdir.display()
)
})?;
// Ensure the config is valid, even if just init-ing
let mut conf = params.try_into_config().with_context(|| {
format!(
"Pageserver config at '{}' is not valid",
cfg_file_path.display()
)
})?;
conf.daemonize = arg_matches.is_present("daemonize");
if init && conf.daemonize {
bail!("--daemonize cannot be used with --init")
}
// The configuration is all set up now. Turn it into a 'static
// that can be freely stored in structs and passed across threads
// as a ref.
let conf: &'static PageServerConf = Box::leak(Box::new(conf));
// Create repo and exit if init was requested
if init {
branches::init_pageserver(conf, create_tenant).context("Failed to init pageserver")?;
// write the config file
let cfg_file_contents = toml::to_string_pretty(¶ms)
.context("Failed to create pageserver config contents for initialisation")?;
// TODO support enable-auth flag
std::fs::write(&cfg_file_path, cfg_file_contents).with_context(|| {
format!(
"Failed to initialize pageserver config at '{}'",
cfg_file_path.display()
)
})?;
Ok(())
} else {
start_pageserver(conf).context("Failed to start pageserver")
}
}
fn start_pageserver(conf: &'static PageServerConf) -> Result<()> {
// Initialize logger
let (_scope_guard, log_file) = logging::init(LOG_FILE_NAME, conf.daemonize)?;
// TODO: Check that it looks like a valid repository before going further
// bind sockets before daemonizing so we report errors early and do not return until we are listening
info!(
"Starting pageserver http handler on {}",
conf.listen_http_addr
);
let http_listener = TcpListener::bind(conf.listen_http_addr.clone())?;
info!(
"Starting pageserver pg protocol handler on {}",
conf.listen_pg_addr
);
let pageserver_listener = TcpListener::bind(conf.listen_pg_addr.clone())?;
if conf.daemonize | {
info!("daemonizing...");
// There shouldn't be any logging to stdin/stdout. Redirect it to the main log so
// that we will see any accidental manual fprintf's or backtraces.
let stdout = log_file.try_clone().unwrap();
let stderr = log_file;
let daemonize = Daemonize::new()
.pid_file("pageserver.pid")
.working_directory(".")
.stdout(stdout)
.stderr(stderr);
match daemonize.start() {
Ok(_) => info!("Success, daemonized"),
Err(e) => error!("Error, {}", e),
}
} | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.