file_name
large_stringlengths
4
69
prefix
large_stringlengths
0
26.7k
suffix
large_stringlengths
0
24.8k
middle
large_stringlengths
0
2.12k
fim_type
large_stringclasses
4 values
hls_live.rs
} let mut all_mimes = self.all_mimes.clone(); all_mimes.sort(); all_mimes.dedup(); let playlist = MasterPlaylist { version: Some(7), variants: self .video_streams .iter() .map(|stream| { let mut path = PathBuf::new(); path.push(&stream.name); path.push("manifest.m3u8"); VariantStream { uri: path.as_path().display().to_string(), bandwidth: stream.bitrate, codecs: Some(all_mimes.join(",")), resolution: Some(m3u8_rs::Resolution { width: stream.width, height: stream.height, }), audio: Some("audio".to_string()), ..Default::default() } }) .collect(), alternatives: self .audio_streams .iter() .map(|stream| { let mut path = PathBuf::new(); path.push(&stream.name); path.push("manifest.m3u8"); AlternativeMedia { media_type: AlternativeMediaType::Audio, uri: Some(path.as_path().display().to_string()), group_id: "audio".to_string(), language: Some(stream.lang.clone()), name: stream.name.clone(), default: stream.default, autoselect: stream.default, channels: Some("2".to_string()), ..Default::default() } }) .collect(), independent_segments: true, ..Default::default() }; println!("Writing master manifest to {}", self.path.display()); let mut file = std::fs::File::create(&self.path).unwrap(); playlist .write_to(&mut file) .expect("Failed to write master playlist"); self.wrote_manifest = true; } } struct Segment { date_time: DateTime<Utc>, duration: gst::ClockTime, path: String, } struct UnreffedSegment { removal_time: DateTime<Utc>, path: String, } struct StreamState { path: PathBuf, segments: VecDeque<Segment>, trimmed_segments: VecDeque<UnreffedSegment>, start_date_time: Option<DateTime<Utc>>, start_time: Option<gst::ClockTime>, media_sequence: u64, segment_index: u32, } struct VideoStream { name: String, bitrate: u64, width: u64, height: u64, } struct AudioStream { name: String, lang: String, default: bool, wave: String, } fn trim_segments(state: &mut StreamState) { // Arbitrary 5 segments window while state.segments.len() > 5 { let segment = state.segments.pop_front().unwrap(); state.media_sequence += 1; state.trimmed_segments.push_back(UnreffedSegment { // HLS spec mandates that segments are removed from the filesystem no sooner // than the duration of the longest playlist + duration of the segment. // This is 15 seconds (12.5 + 2.5) in our case, we use 20 seconds to be on the // safe side removal_time: segment .date_time .checked_add_signed(Duration::seconds(20)) .unwrap(), path: segment.path.clone(), }); } while let Some(segment) = state.trimmed_segments.front() { if segment.removal_time < state.segments.front().unwrap().date_time { let segment = state.trimmed_segments.pop_front().unwrap(); let mut path = state.path.clone(); path.push(segment.path); println!("Removing {}", path.display()); std::fs::remove_file(path).expect("Failed to remove old segment"); } else {
fn update_manifest(state: &mut StreamState) { // Now write the manifest let mut path = state.path.clone(); path.push("manifest.m3u8"); println!("writing manifest to {}", path.display()); trim_segments(state); let playlist = MediaPlaylist { version: Some(7), target_duration: 2.5, media_sequence: state.media_sequence, segments: state .segments .iter() .enumerate() .map(|(idx, segment)| MediaSegment { uri: segment.path.to_string(), duration: (segment.duration.nseconds() as f64 / gst::ClockTime::SECOND.nseconds() as f64) as f32, map: Some(m3u8_rs::Map { uri: "init.cmfi".into(), ..Default::default() }), program_date_time: if idx == 0 { Some(segment.date_time.into()) } else { None }, ..Default::default() }) .collect(), end_list: false, playlist_type: None, i_frames_only: false, start: None, independent_segments: true, ..Default::default() }; let mut file = std::fs::File::create(path).unwrap(); playlist .write_to(&mut file) .expect("Failed to write media playlist"); } fn setup_appsink(appsink: &gst_app::AppSink, name: &str, path: &Path, is_video: bool) { let mut path: PathBuf = path.into(); path.push(name); let state = Arc::new(Mutex::new(StreamState { segments: VecDeque::new(), trimmed_segments: VecDeque::new(), path, start_date_time: None, start_time: gst::ClockTime::NONE, media_sequence: 0, segment_index: 0, })); appsink.set_callbacks( gst_app::AppSinkCallbacks::builder() .new_sample(move |sink| { let sample = sink.pull_sample().map_err(|_| gst::FlowError::Eos)?; let mut state = state.lock().unwrap(); // The muxer only outputs non-empty buffer lists let mut buffer_list = sample.buffer_list_owned().expect("no buffer list"); assert!(!buffer_list.is_empty()); let mut first = buffer_list.get(0).unwrap(); // Each list contains a full segment, i.e. does not start with a DELTA_UNIT assert!(!first.flags().contains(gst::BufferFlags::DELTA_UNIT)); // If the buffer has the DISCONT and HEADER flag set then it contains the media // header, i.e. the `ftyp`, `moov` and other media boxes. // // This might be the initial header or the updated header at the end of the stream. if first .flags() .contains(gst::BufferFlags::DISCONT | gst::BufferFlags::HEADER) { let mut path = state.path.clone(); std::fs::create_dir_all(&path).expect("failed to create directory"); path.push("init.cmfi"); println!("writing header to {}", path.display()); let map = first.map_readable().unwrap(); std::fs::write(path, &map).expect("failed to write header"); drop(map); // Remove the header from the buffer list buffer_list.make_mut().remove(0, 1); // If the list is now empty then it only contained the media header and nothing // else. if buffer_list.is_empty() { return Ok(gst::FlowSuccess::Ok); } // Otherwise get the next buffer and continue working with that. first = buffer_list.get(0).unwrap(); } // If the buffer only has the HEADER flag set then this is a segment header that is // followed by one or more actual media buffers. assert!(first.flags().contains(gst::BufferFlags::HEADER)); let mut path = state.path.clone(); let basename = format!( "segment_{}.{}", state.segment_index, if is_video { "cmfv" } else { "cmfa" } ); state.segment_index += 1; path.push(&basename); let segment = sample .segment() .expect("no segment") .downcast_ref::<gst::ClockTime>() .expect("no time segment"); let pts = segment .to_running_time(first.pts().unwrap()) .expect("can't get running time"); if state.start_time.is_none() { state.start_time = Some(pts); } if state.start_date_time.is_none() { let now_utc = Utc::now(); let now_gst = sink.clock().unwrap().time().unwrap(); let pts_clock_time = pts + sink.base_time().unwrap(); let diff = now_gst.checked_sub(pts_clock_time).unwrap(); let pts_utc = now_utc .checked_sub_signed(Duration::nanoseconds(diff.nseconds() as i64)) .unwrap(); state.start_date_time = Some(pts_utc); } let duration = first.duration().unwrap(); let mut file = std::fs::File::create(&path).expect("failed to open fragment"); for buffer in &*buffer_list { use std::io::prelude::*; let map = buffer.map_readable().unwrap(); file.write_all(&map).expect("failed to write fragment"); } let date_time = state .start_date_time .unwrap() .checked_add_signed(Duration::nanoseconds( pts.opt_checked_sub(state.start_time) .unwrap() .unwrap() .nseconds() as i64, )) .unwrap(); println!( "wrote segment with date time {} to {}", date_time, path.display() ); state.segments.push_back(Segment { duration, path: basename.to_string(), date_time, }); update_manifest(&mut state); Ok(gst::FlowSuccess::Ok) }) .eos(move |_sink| { unreachable!(); }) .build(), ); } fn probe_encoder(state: Arc<Mutex<State>>, enc: gst::Element) { enc.static_pad("src").unwrap().add_probe( gst::PadProbeType::EVENT_DOWNSTREAM, move |_pad, info| match info.data { Some(gst::PadProbeData::Event(ref ev)) => match ev.view() { gst::EventView::Caps(e) => { let mime = gst_pbutils::codec_utils_caps_get_mime_codec(e.caps()); let mut state = state.lock().unwrap(); state.all_mimes.push(mime.unwrap().into()); state.maybe_write_manifest(); gst::PadProbeReturn::Remove } _ => gst::PadProbeReturn::Ok, }, _ => gst::PadProbeReturn::Ok, }, ); } impl VideoStream { fn setup( &self, state: Arc<Mutex<State>>, pipeline: &gst::Pipeline, path: &Path, ) -> Result<(), Error> { let src = gst::ElementFactory::make("videotestsrc") .property("is-live", true) .build()?; let raw_capsfilter = gst::ElementFactory::make("capsfilter") .property( "caps", gst_video::VideoCapsBuilder::new() .format(gst_video::VideoFormat::I420) .width(self.width as i32) .height(self.height as i32) .framerate(30.into()) .build(), ) .build()?; let timeoverlay = gst::ElementFactory::make("timeoverlay").build()?; let enc = gst::ElementFactory::make("x264enc") .property("bframes", 0u32) .property("bitrate", self.bitrate as u32 / 1000u32) .property_from_str("tune", "zerolatency") .build()?; let h264_capsfilter = gst::ElementFactory::make("capsfilter") .property( "caps", gst::Caps::builder("video/x-h264") .field("profile", "main") .build(), ) .build()?; let mux = gst::ElementFactory::make("cmafmux") .property("fragment-duration", 2500.mseconds()) .property_from_str("header-update-mode", "update") .property("write-mehd", true) .build()?; let appsink = gst_app::AppSink::builder().buffer_list(true).build(); pipeline.add_many([ &src, &raw_capsfilter, &timeoverlay, &enc, &h264_capsfilter, &mux, appsink.upcast_ref(), ])?; gst::Element::link_many([ &src, &raw_capsfilter, &timeoverlay, &enc, &h264_capsfilter, &mux, appsink.upcast_ref(), ])?; probe_encoder(state, enc); setup_appsink(&appsink, &self.name, path, true); Ok(()) } } impl AudioStream { fn setup( &self, state: Arc<Mutex<State>>, pipeline: &gst::Pipeline, path: &Path, ) -> Result<(), Error> { let src = gst::ElementFactory::make("audiotestsrc") .property("is-live", true) .property_from_str("wave", &self.wave) .build()?; let enc = gst::ElementFactory::make("avenc_aac").build()?; let mux = gst::ElementFactory::make("cmafmux") .property_from_str("header-update-mode", "update") .property("write-mehd", true) .property("fragment-duration", 2500.mseconds()) .build()?; let appsink = gst_app::AppSink::builder().buffer_list(true).build(); pipeline.add_many([&src, &enc, &mux, appsink.upcast_ref()])?; gst::Element::link_many([&src, &enc, &mux, appsink.upcast_ref()])?; probe_encoder(state, enc); setup_appsink(&appsink, &self.name, path, false); Ok(()) } } fn main() -> Result<(), Error> { gst::init()?; gstfmp4::plugin_register_static()?; let path = PathBuf::from("hls_live_stream"); let pipeline = gst::Pipeline::default(); std::fs::create_dir_all(&path).expect("failed to create directory"); let mut manifest_path = path.clone(); manifest_path.push("manifest.m3u8"); let state = Arc::new(Mutex::new(State { video_streams: vec![VideoStream { name: "video_0".to_string(), bitrate: 2_048_000, width: 1280, height: 720, }], audio_streams: vec![ AudioStream { name: "audio_0".to_string(), lang
break; } } }
random_line_split
hls_live.rs
} let mut all_mimes = self.all_mimes.clone(); all_mimes.sort(); all_mimes.dedup(); let playlist = MasterPlaylist { version: Some(7), variants: self .video_streams .iter() .map(|stream| { let mut path = PathBuf::new(); path.push(&stream.name); path.push("manifest.m3u8"); VariantStream { uri: path.as_path().display().to_string(), bandwidth: stream.bitrate, codecs: Some(all_mimes.join(",")), resolution: Some(m3u8_rs::Resolution { width: stream.width, height: stream.height, }), audio: Some("audio".to_string()), ..Default::default() } }) .collect(), alternatives: self .audio_streams .iter() .map(|stream| { let mut path = PathBuf::new(); path.push(&stream.name); path.push("manifest.m3u8"); AlternativeMedia { media_type: AlternativeMediaType::Audio, uri: Some(path.as_path().display().to_string()), group_id: "audio".to_string(), language: Some(stream.lang.clone()), name: stream.name.clone(), default: stream.default, autoselect: stream.default, channels: Some("2".to_string()), ..Default::default() } }) .collect(), independent_segments: true, ..Default::default() }; println!("Writing master manifest to {}", self.path.display()); let mut file = std::fs::File::create(&self.path).unwrap(); playlist .write_to(&mut file) .expect("Failed to write master playlist"); self.wrote_manifest = true; } } struct Segment { date_time: DateTime<Utc>, duration: gst::ClockTime, path: String, } struct UnreffedSegment { removal_time: DateTime<Utc>, path: String, } struct StreamState { path: PathBuf, segments: VecDeque<Segment>, trimmed_segments: VecDeque<UnreffedSegment>, start_date_time: Option<DateTime<Utc>>, start_time: Option<gst::ClockTime>, media_sequence: u64, segment_index: u32, } struct VideoStream { name: String, bitrate: u64, width: u64, height: u64, } struct AudioStream { name: String, lang: String, default: bool, wave: String, } fn trim_segments(state: &mut StreamState) { // Arbitrary 5 segments window while state.segments.len() > 5 { let segment = state.segments.pop_front().unwrap(); state.media_sequence += 1; state.trimmed_segments.push_back(UnreffedSegment { // HLS spec mandates that segments are removed from the filesystem no sooner // than the duration of the longest playlist + duration of the segment. // This is 15 seconds (12.5 + 2.5) in our case, we use 20 seconds to be on the // safe side removal_time: segment .date_time .checked_add_signed(Duration::seconds(20)) .unwrap(), path: segment.path.clone(), }); } while let Some(segment) = state.trimmed_segments.front() { if segment.removal_time < state.segments.front().unwrap().date_time { let segment = state.trimmed_segments.pop_front().unwrap(); let mut path = state.path.clone(); path.push(segment.path); println!("Removing {}", path.display()); std::fs::remove_file(path).expect("Failed to remove old segment"); } else { break; } } } fn update_manifest(state: &mut StreamState) { // Now write the manifest let mut path = state.path.clone(); path.push("manifest.m3u8"); println!("writing manifest to {}", path.display()); trim_segments(state); let playlist = MediaPlaylist { version: Some(7), target_duration: 2.5, media_sequence: state.media_sequence, segments: state .segments .iter() .enumerate() .map(|(idx, segment)| MediaSegment { uri: segment.path.to_string(), duration: (segment.duration.nseconds() as f64 / gst::ClockTime::SECOND.nseconds() as f64) as f32, map: Some(m3u8_rs::Map { uri: "init.cmfi".into(), ..Default::default() }), program_date_time: if idx == 0 { Some(segment.date_time.into()) } else { None }, ..Default::default() }) .collect(), end_list: false, playlist_type: None, i_frames_only: false, start: None, independent_segments: true, ..Default::default() }; let mut file = std::fs::File::create(path).unwrap(); playlist .write_to(&mut file) .expect("Failed to write media playlist"); } fn setup_appsink(appsink: &gst_app::AppSink, name: &str, path: &Path, is_video: bool) { let mut path: PathBuf = path.into(); path.push(name); let state = Arc::new(Mutex::new(StreamState { segments: VecDeque::new(), trimmed_segments: VecDeque::new(), path, start_date_time: None, start_time: gst::ClockTime::NONE, media_sequence: 0, segment_index: 0, })); appsink.set_callbacks( gst_app::AppSinkCallbacks::builder() .new_sample(move |sink| { let sample = sink.pull_sample().map_err(|_| gst::FlowError::Eos)?; let mut state = state.lock().unwrap(); // The muxer only outputs non-empty buffer lists let mut buffer_list = sample.buffer_list_owned().expect("no buffer list"); assert!(!buffer_list.is_empty()); let mut first = buffer_list.get(0).unwrap(); // Each list contains a full segment, i.e. does not start with a DELTA_UNIT assert!(!first.flags().contains(gst::BufferFlags::DELTA_UNIT)); // If the buffer has the DISCONT and HEADER flag set then it contains the media // header, i.e. the `ftyp`, `moov` and other media boxes. // // This might be the initial header or the updated header at the end of the stream. if first .flags() .contains(gst::BufferFlags::DISCONT | gst::BufferFlags::HEADER)
first = buffer_list.get(0).unwrap(); } // If the buffer only has the HEADER flag set then this is a segment header that is // followed by one or more actual media buffers. assert!(first.flags().contains(gst::BufferFlags::HEADER)); let mut path = state.path.clone(); let basename = format!( "segment_{}.{}", state.segment_index, if is_video { "cmfv" } else { "cmfa" } ); state.segment_index += 1; path.push(&basename); let segment = sample .segment() .expect("no segment") .downcast_ref::<gst::ClockTime>() .expect("no time segment"); let pts = segment .to_running_time(first.pts().unwrap()) .expect("can't get running time"); if state.start_time.is_none() { state.start_time = Some(pts); } if state.start_date_time.is_none() { let now_utc = Utc::now(); let now_gst = sink.clock().unwrap().time().unwrap(); let pts_clock_time = pts + sink.base_time().unwrap(); let diff = now_gst.checked_sub(pts_clock_time).unwrap(); let pts_utc = now_utc .checked_sub_signed(Duration::nanoseconds(diff.nseconds() as i64)) .unwrap(); state.start_date_time = Some(pts_utc); } let duration = first.duration().unwrap(); let mut file = std::fs::File::create(&path).expect("failed to open fragment"); for buffer in &*buffer_list { use std::io::prelude::*; let map = buffer.map_readable().unwrap(); file.write_all(&map).expect("failed to write fragment"); } let date_time = state .start_date_time .unwrap() .checked_add_signed(Duration::nanoseconds( pts.opt_checked_sub(state.start_time) .unwrap() .unwrap() .nseconds() as i64, )) .unwrap(); println!( "wrote segment with date time {} to {}", date_time, path.display() ); state.segments.push_back(Segment { duration, path: basename.to_string(), date_time, }); update_manifest(&mut state); Ok(gst::FlowSuccess::Ok) }) .eos(move |_sink| { unreachable!(); }) .build(), ); } fn probe_encoder(state: Arc<Mutex<State>>, enc: gst::Element) { enc.static_pad("src").unwrap().add_probe( gst::PadProbeType::EVENT_DOWNSTREAM, move |_pad, info| match info.data { Some(gst::PadProbeData::Event(ref ev)) => match ev.view() { gst::EventView::Caps(e) => { let mime = gst_pbutils::codec_utils_caps_get_mime_codec(e.caps()); let mut state = state.lock().unwrap(); state.all_mimes.push(mime.unwrap().into()); state.maybe_write_manifest(); gst::PadProbeReturn::Remove } _ => gst::PadProbeReturn::Ok, }, _ => gst::PadProbeReturn::Ok, }, ); } impl VideoStream { fn setup( &self, state: Arc<Mutex<State>>, pipeline: &gst::Pipeline, path: &Path, ) -> Result<(), Error> { let src = gst::ElementFactory::make("videotestsrc") .property("is-live", true) .build()?; let raw_capsfilter = gst::ElementFactory::make("capsfilter") .property( "caps", gst_video::VideoCapsBuilder::new() .format(gst_video::VideoFormat::I420) .width(self.width as i32) .height(self.height as i32) .framerate(30.into()) .build(), ) .build()?; let timeoverlay = gst::ElementFactory::make("timeoverlay").build()?; let enc = gst::ElementFactory::make("x264enc") .property("bframes", 0u32) .property("bitrate", self.bitrate as u32 / 1000u32) .property_from_str("tune", "zerolatency") .build()?; let h264_capsfilter = gst::ElementFactory::make("capsfilter") .property( "caps", gst::Caps::builder("video/x-h264") .field("profile", "main") .build(), ) .build()?; let mux = gst::ElementFactory::make("cmafmux") .property("fragment-duration", 2500.mseconds()) .property_from_str("header-update-mode", "update") .property("write-mehd", true) .build()?; let appsink = gst_app::AppSink::builder().buffer_list(true).build(); pipeline.add_many([ &src, &raw_capsfilter, &timeoverlay, &enc, &h264_capsfilter, &mux, appsink.upcast_ref(), ])?; gst::Element::link_many([ &src, &raw_capsfilter, &timeoverlay, &enc, &h264_capsfilter, &mux, appsink.upcast_ref(), ])?; probe_encoder(state, enc); setup_appsink(&appsink, &self.name, path, true); Ok(()) } } impl AudioStream { fn setup( &self, state: Arc<Mutex<State>>, pipeline: &gst::Pipeline, path: &Path, ) -> Result<(), Error> { let src = gst::ElementFactory::make("audiotestsrc") .property("is-live", true) .property_from_str("wave", &self.wave) .build()?; let enc = gst::ElementFactory::make("avenc_aac").build()?; let mux = gst::ElementFactory::make("cmafmux") .property_from_str("header-update-mode", "update") .property("write-mehd", true) .property("fragment-duration", 2500.mseconds()) .build()?; let appsink = gst_app::AppSink::builder().buffer_list(true).build(); pipeline.add_many([&src, &enc, &mux, appsink.upcast_ref()])?; gst::Element::link_many([&src, &enc, &mux, appsink.upcast_ref()])?; probe_encoder(state, enc); setup_appsink(&appsink, &self.name, path, false); Ok(()) } } fn main() -> Result<(), Error> { gst::init()?; gstfmp4::plugin_register_static()?; let path = PathBuf::from("hls_live_stream"); let pipeline = gst::Pipeline::default(); std::fs::create_dir_all(&path).expect("failed to create directory"); let mut manifest_path = path.clone(); manifest_path.push("manifest.m3u8"); let state = Arc::new(Mutex::new(State { video_streams: vec![VideoStream { name: "video_0".to_string(), bitrate: 2_048_000, width: 1280, height: 720, }], audio_streams: vec![ AudioStream { name: "audio_0".to_string(),
{ let mut path = state.path.clone(); std::fs::create_dir_all(&path).expect("failed to create directory"); path.push("init.cmfi"); println!("writing header to {}", path.display()); let map = first.map_readable().unwrap(); std::fs::write(path, &map).expect("failed to write header"); drop(map); // Remove the header from the buffer list buffer_list.make_mut().remove(0, 1); // If the list is now empty then it only contained the media header and nothing // else. if buffer_list.is_empty() { return Ok(gst::FlowSuccess::Ok); } // Otherwise get the next buffer and continue working with that.
conditional_block
hls_live.rs
} let mut all_mimes = self.all_mimes.clone(); all_mimes.sort(); all_mimes.dedup(); let playlist = MasterPlaylist { version: Some(7), variants: self .video_streams .iter() .map(|stream| { let mut path = PathBuf::new(); path.push(&stream.name); path.push("manifest.m3u8"); VariantStream { uri: path.as_path().display().to_string(), bandwidth: stream.bitrate, codecs: Some(all_mimes.join(",")), resolution: Some(m3u8_rs::Resolution { width: stream.width, height: stream.height, }), audio: Some("audio".to_string()), ..Default::default() } }) .collect(), alternatives: self .audio_streams .iter() .map(|stream| { let mut path = PathBuf::new(); path.push(&stream.name); path.push("manifest.m3u8"); AlternativeMedia { media_type: AlternativeMediaType::Audio, uri: Some(path.as_path().display().to_string()), group_id: "audio".to_string(), language: Some(stream.lang.clone()), name: stream.name.clone(), default: stream.default, autoselect: stream.default, channels: Some("2".to_string()), ..Default::default() } }) .collect(), independent_segments: true, ..Default::default() }; println!("Writing master manifest to {}", self.path.display()); let mut file = std::fs::File::create(&self.path).unwrap(); playlist .write_to(&mut file) .expect("Failed to write master playlist"); self.wrote_manifest = true; } } struct Segment { date_time: DateTime<Utc>, duration: gst::ClockTime, path: String, } struct UnreffedSegment { removal_time: DateTime<Utc>, path: String, } struct StreamState { path: PathBuf, segments: VecDeque<Segment>, trimmed_segments: VecDeque<UnreffedSegment>, start_date_time: Option<DateTime<Utc>>, start_time: Option<gst::ClockTime>, media_sequence: u64, segment_index: u32, } struct VideoStream { name: String, bitrate: u64, width: u64, height: u64, } struct AudioStream { name: String, lang: String, default: bool, wave: String, } fn trim_segments(state: &mut StreamState) { // Arbitrary 5 segments window while state.segments.len() > 5 { let segment = state.segments.pop_front().unwrap(); state.media_sequence += 1; state.trimmed_segments.push_back(UnreffedSegment { // HLS spec mandates that segments are removed from the filesystem no sooner // than the duration of the longest playlist + duration of the segment. // This is 15 seconds (12.5 + 2.5) in our case, we use 20 seconds to be on the // safe side removal_time: segment .date_time .checked_add_signed(Duration::seconds(20)) .unwrap(), path: segment.path.clone(), }); } while let Some(segment) = state.trimmed_segments.front() { if segment.removal_time < state.segments.front().unwrap().date_time { let segment = state.trimmed_segments.pop_front().unwrap(); let mut path = state.path.clone(); path.push(segment.path); println!("Removing {}", path.display()); std::fs::remove_file(path).expect("Failed to remove old segment"); } else { break; } } } fn update_manifest(state: &mut StreamState) { // Now write the manifest let mut path = state.path.clone(); path.push("manifest.m3u8"); println!("writing manifest to {}", path.display()); trim_segments(state); let playlist = MediaPlaylist { version: Some(7), target_duration: 2.5, media_sequence: state.media_sequence, segments: state .segments .iter() .enumerate() .map(|(idx, segment)| MediaSegment { uri: segment.path.to_string(), duration: (segment.duration.nseconds() as f64 / gst::ClockTime::SECOND.nseconds() as f64) as f32, map: Some(m3u8_rs::Map { uri: "init.cmfi".into(), ..Default::default() }), program_date_time: if idx == 0 { Some(segment.date_time.into()) } else { None }, ..Default::default() }) .collect(), end_list: false, playlist_type: None, i_frames_only: false, start: None, independent_segments: true, ..Default::default() }; let mut file = std::fs::File::create(path).unwrap(); playlist .write_to(&mut file) .expect("Failed to write media playlist"); } fn setup_appsink(appsink: &gst_app::AppSink, name: &str, path: &Path, is_video: bool)
// The muxer only outputs non-empty buffer lists let mut buffer_list = sample.buffer_list_owned().expect("no buffer list"); assert!(!buffer_list.is_empty()); let mut first = buffer_list.get(0).unwrap(); // Each list contains a full segment, i.e. does not start with a DELTA_UNIT assert!(!first.flags().contains(gst::BufferFlags::DELTA_UNIT)); // If the buffer has the DISCONT and HEADER flag set then it contains the media // header, i.e. the `ftyp`, `moov` and other media boxes. // // This might be the initial header or the updated header at the end of the stream. if first .flags() .contains(gst::BufferFlags::DISCONT | gst::BufferFlags::HEADER) { let mut path = state.path.clone(); std::fs::create_dir_all(&path).expect("failed to create directory"); path.push("init.cmfi"); println!("writing header to {}", path.display()); let map = first.map_readable().unwrap(); std::fs::write(path, &map).expect("failed to write header"); drop(map); // Remove the header from the buffer list buffer_list.make_mut().remove(0, 1); // If the list is now empty then it only contained the media header and nothing // else. if buffer_list.is_empty() { return Ok(gst::FlowSuccess::Ok); } // Otherwise get the next buffer and continue working with that. first = buffer_list.get(0).unwrap(); } // If the buffer only has the HEADER flag set then this is a segment header that is // followed by one or more actual media buffers. assert!(first.flags().contains(gst::BufferFlags::HEADER)); let mut path = state.path.clone(); let basename = format!( "segment_{}.{}", state.segment_index, if is_video { "cmfv" } else { "cmfa" } ); state.segment_index += 1; path.push(&basename); let segment = sample .segment() .expect("no segment") .downcast_ref::<gst::ClockTime>() .expect("no time segment"); let pts = segment .to_running_time(first.pts().unwrap()) .expect("can't get running time"); if state.start_time.is_none() { state.start_time = Some(pts); } if state.start_date_time.is_none() { let now_utc = Utc::now(); let now_gst = sink.clock().unwrap().time().unwrap(); let pts_clock_time = pts + sink.base_time().unwrap(); let diff = now_gst.checked_sub(pts_clock_time).unwrap(); let pts_utc = now_utc .checked_sub_signed(Duration::nanoseconds(diff.nseconds() as i64)) .unwrap(); state.start_date_time = Some(pts_utc); } let duration = first.duration().unwrap(); let mut file = std::fs::File::create(&path).expect("failed to open fragment"); for buffer in &*buffer_list { use std::io::prelude::*; let map = buffer.map_readable().unwrap(); file.write_all(&map).expect("failed to write fragment"); } let date_time = state .start_date_time .unwrap() .checked_add_signed(Duration::nanoseconds( pts.opt_checked_sub(state.start_time) .unwrap() .unwrap() .nseconds() as i64, )) .unwrap(); println!( "wrote segment with date time {} to {}", date_time, path.display() ); state.segments.push_back(Segment { duration, path: basename.to_string(), date_time, }); update_manifest(&mut state); Ok(gst::FlowSuccess::Ok) }) .eos(move |_sink| { unreachable!(); }) .build(), ); } fn probe_encoder(state: Arc<Mutex<State>>, enc: gst::Element) { enc.static_pad("src").unwrap().add_probe( gst::PadProbeType::EVENT_DOWNSTREAM, move |_pad, info| match info.data { Some(gst::PadProbeData::Event(ref ev)) => match ev.view() { gst::EventView::Caps(e) => { let mime = gst_pbutils::codec_utils_caps_get_mime_codec(e.caps()); let mut state = state.lock().unwrap(); state.all_mimes.push(mime.unwrap().into()); state.maybe_write_manifest(); gst::PadProbeReturn::Remove } _ => gst::PadProbeReturn::Ok, }, _ => gst::PadProbeReturn::Ok, }, ); } impl VideoStream { fn setup( &self, state: Arc<Mutex<State>>, pipeline: &gst::Pipeline, path: &Path, ) -> Result<(), Error> { let src = gst::ElementFactory::make("videotestsrc") .property("is-live", true) .build()?; let raw_capsfilter = gst::ElementFactory::make("capsfilter") .property( "caps", gst_video::VideoCapsBuilder::new() .format(gst_video::VideoFormat::I420) .width(self.width as i32) .height(self.height as i32) .framerate(30.into()) .build(), ) .build()?; let timeoverlay = gst::ElementFactory::make("timeoverlay").build()?; let enc = gst::ElementFactory::make("x264enc") .property("bframes", 0u32) .property("bitrate", self.bitrate as u32 / 1000u32) .property_from_str("tune", "zerolatency") .build()?; let h264_capsfilter = gst::ElementFactory::make("capsfilter") .property( "caps", gst::Caps::builder("video/x-h264") .field("profile", "main") .build(), ) .build()?; let mux = gst::ElementFactory::make("cmafmux") .property("fragment-duration", 2500.mseconds()) .property_from_str("header-update-mode", "update") .property("write-mehd", true) .build()?; let appsink = gst_app::AppSink::builder().buffer_list(true).build(); pipeline.add_many([ &src, &raw_capsfilter, &timeoverlay, &enc, &h264_capsfilter, &mux, appsink.upcast_ref(), ])?; gst::Element::link_many([ &src, &raw_capsfilter, &timeoverlay, &enc, &h264_capsfilter, &mux, appsink.upcast_ref(), ])?; probe_encoder(state, enc); setup_appsink(&appsink, &self.name, path, true); Ok(()) } } impl AudioStream { fn setup( &self, state: Arc<Mutex<State>>, pipeline: &gst::Pipeline, path: &Path, ) -> Result<(), Error> { let src = gst::ElementFactory::make("audiotestsrc") .property("is-live", true) .property_from_str("wave", &self.wave) .build()?; let enc = gst::ElementFactory::make("avenc_aac").build()?; let mux = gst::ElementFactory::make("cmafmux") .property_from_str("header-update-mode", "update") .property("write-mehd", true) .property("fragment-duration", 2500.mseconds()) .build()?; let appsink = gst_app::AppSink::builder().buffer_list(true).build(); pipeline.add_many([&src, &enc, &mux, appsink.upcast_ref()])?; gst::Element::link_many([&src, &enc, &mux, appsink.upcast_ref()])?; probe_encoder(state, enc); setup_appsink(&appsink, &self.name, path, false); Ok(()) } } fn main() -> Result<(), Error> { gst::init()?; gstfmp4::plugin_register_static()?; let path = PathBuf::from("hls_live_stream"); let pipeline = gst::Pipeline::default(); std::fs::create_dir_all(&path).expect("failed to create directory"); let mut manifest_path = path.clone(); manifest_path.push("manifest.m3u8"); let state = Arc::new(Mutex::new(State { video_streams: vec![VideoStream { name: "video_0".to_string(), bitrate: 2_048_000, width: 1280, height: 720, }], audio_streams: vec![ AudioStream { name: "audio_0".to_string(),
{ let mut path: PathBuf = path.into(); path.push(name); let state = Arc::new(Mutex::new(StreamState { segments: VecDeque::new(), trimmed_segments: VecDeque::new(), path, start_date_time: None, start_time: gst::ClockTime::NONE, media_sequence: 0, segment_index: 0, })); appsink.set_callbacks( gst_app::AppSinkCallbacks::builder() .new_sample(move |sink| { let sample = sink.pull_sample().map_err(|_| gst::FlowError::Eos)?; let mut state = state.lock().unwrap();
identifier_body
hls_live.rs
} let mut all_mimes = self.all_mimes.clone(); all_mimes.sort(); all_mimes.dedup(); let playlist = MasterPlaylist { version: Some(7), variants: self .video_streams .iter() .map(|stream| { let mut path = PathBuf::new(); path.push(&stream.name); path.push("manifest.m3u8"); VariantStream { uri: path.as_path().display().to_string(), bandwidth: stream.bitrate, codecs: Some(all_mimes.join(",")), resolution: Some(m3u8_rs::Resolution { width: stream.width, height: stream.height, }), audio: Some("audio".to_string()), ..Default::default() } }) .collect(), alternatives: self .audio_streams .iter() .map(|stream| { let mut path = PathBuf::new(); path.push(&stream.name); path.push("manifest.m3u8"); AlternativeMedia { media_type: AlternativeMediaType::Audio, uri: Some(path.as_path().display().to_string()), group_id: "audio".to_string(), language: Some(stream.lang.clone()), name: stream.name.clone(), default: stream.default, autoselect: stream.default, channels: Some("2".to_string()), ..Default::default() } }) .collect(), independent_segments: true, ..Default::default() }; println!("Writing master manifest to {}", self.path.display()); let mut file = std::fs::File::create(&self.path).unwrap(); playlist .write_to(&mut file) .expect("Failed to write master playlist"); self.wrote_manifest = true; } } struct Segment { date_time: DateTime<Utc>, duration: gst::ClockTime, path: String, } struct UnreffedSegment { removal_time: DateTime<Utc>, path: String, } struct StreamState { path: PathBuf, segments: VecDeque<Segment>, trimmed_segments: VecDeque<UnreffedSegment>, start_date_time: Option<DateTime<Utc>>, start_time: Option<gst::ClockTime>, media_sequence: u64, segment_index: u32, } struct
{ name: String, bitrate: u64, width: u64, height: u64, } struct AudioStream { name: String, lang: String, default: bool, wave: String, } fn trim_segments(state: &mut StreamState) { // Arbitrary 5 segments window while state.segments.len() > 5 { let segment = state.segments.pop_front().unwrap(); state.media_sequence += 1; state.trimmed_segments.push_back(UnreffedSegment { // HLS spec mandates that segments are removed from the filesystem no sooner // than the duration of the longest playlist + duration of the segment. // This is 15 seconds (12.5 + 2.5) in our case, we use 20 seconds to be on the // safe side removal_time: segment .date_time .checked_add_signed(Duration::seconds(20)) .unwrap(), path: segment.path.clone(), }); } while let Some(segment) = state.trimmed_segments.front() { if segment.removal_time < state.segments.front().unwrap().date_time { let segment = state.trimmed_segments.pop_front().unwrap(); let mut path = state.path.clone(); path.push(segment.path); println!("Removing {}", path.display()); std::fs::remove_file(path).expect("Failed to remove old segment"); } else { break; } } } fn update_manifest(state: &mut StreamState) { // Now write the manifest let mut path = state.path.clone(); path.push("manifest.m3u8"); println!("writing manifest to {}", path.display()); trim_segments(state); let playlist = MediaPlaylist { version: Some(7), target_duration: 2.5, media_sequence: state.media_sequence, segments: state .segments .iter() .enumerate() .map(|(idx, segment)| MediaSegment { uri: segment.path.to_string(), duration: (segment.duration.nseconds() as f64 / gst::ClockTime::SECOND.nseconds() as f64) as f32, map: Some(m3u8_rs::Map { uri: "init.cmfi".into(), ..Default::default() }), program_date_time: if idx == 0 { Some(segment.date_time.into()) } else { None }, ..Default::default() }) .collect(), end_list: false, playlist_type: None, i_frames_only: false, start: None, independent_segments: true, ..Default::default() }; let mut file = std::fs::File::create(path).unwrap(); playlist .write_to(&mut file) .expect("Failed to write media playlist"); } fn setup_appsink(appsink: &gst_app::AppSink, name: &str, path: &Path, is_video: bool) { let mut path: PathBuf = path.into(); path.push(name); let state = Arc::new(Mutex::new(StreamState { segments: VecDeque::new(), trimmed_segments: VecDeque::new(), path, start_date_time: None, start_time: gst::ClockTime::NONE, media_sequence: 0, segment_index: 0, })); appsink.set_callbacks( gst_app::AppSinkCallbacks::builder() .new_sample(move |sink| { let sample = sink.pull_sample().map_err(|_| gst::FlowError::Eos)?; let mut state = state.lock().unwrap(); // The muxer only outputs non-empty buffer lists let mut buffer_list = sample.buffer_list_owned().expect("no buffer list"); assert!(!buffer_list.is_empty()); let mut first = buffer_list.get(0).unwrap(); // Each list contains a full segment, i.e. does not start with a DELTA_UNIT assert!(!first.flags().contains(gst::BufferFlags::DELTA_UNIT)); // If the buffer has the DISCONT and HEADER flag set then it contains the media // header, i.e. the `ftyp`, `moov` and other media boxes. // // This might be the initial header or the updated header at the end of the stream. if first .flags() .contains(gst::BufferFlags::DISCONT | gst::BufferFlags::HEADER) { let mut path = state.path.clone(); std::fs::create_dir_all(&path).expect("failed to create directory"); path.push("init.cmfi"); println!("writing header to {}", path.display()); let map = first.map_readable().unwrap(); std::fs::write(path, &map).expect("failed to write header"); drop(map); // Remove the header from the buffer list buffer_list.make_mut().remove(0, 1); // If the list is now empty then it only contained the media header and nothing // else. if buffer_list.is_empty() { return Ok(gst::FlowSuccess::Ok); } // Otherwise get the next buffer and continue working with that. first = buffer_list.get(0).unwrap(); } // If the buffer only has the HEADER flag set then this is a segment header that is // followed by one or more actual media buffers. assert!(first.flags().contains(gst::BufferFlags::HEADER)); let mut path = state.path.clone(); let basename = format!( "segment_{}.{}", state.segment_index, if is_video { "cmfv" } else { "cmfa" } ); state.segment_index += 1; path.push(&basename); let segment = sample .segment() .expect("no segment") .downcast_ref::<gst::ClockTime>() .expect("no time segment"); let pts = segment .to_running_time(first.pts().unwrap()) .expect("can't get running time"); if state.start_time.is_none() { state.start_time = Some(pts); } if state.start_date_time.is_none() { let now_utc = Utc::now(); let now_gst = sink.clock().unwrap().time().unwrap(); let pts_clock_time = pts + sink.base_time().unwrap(); let diff = now_gst.checked_sub(pts_clock_time).unwrap(); let pts_utc = now_utc .checked_sub_signed(Duration::nanoseconds(diff.nseconds() as i64)) .unwrap(); state.start_date_time = Some(pts_utc); } let duration = first.duration().unwrap(); let mut file = std::fs::File::create(&path).expect("failed to open fragment"); for buffer in &*buffer_list { use std::io::prelude::*; let map = buffer.map_readable().unwrap(); file.write_all(&map).expect("failed to write fragment"); } let date_time = state .start_date_time .unwrap() .checked_add_signed(Duration::nanoseconds( pts.opt_checked_sub(state.start_time) .unwrap() .unwrap() .nseconds() as i64, )) .unwrap(); println!( "wrote segment with date time {} to {}", date_time, path.display() ); state.segments.push_back(Segment { duration, path: basename.to_string(), date_time, }); update_manifest(&mut state); Ok(gst::FlowSuccess::Ok) }) .eos(move |_sink| { unreachable!(); }) .build(), ); } fn probe_encoder(state: Arc<Mutex<State>>, enc: gst::Element) { enc.static_pad("src").unwrap().add_probe( gst::PadProbeType::EVENT_DOWNSTREAM, move |_pad, info| match info.data { Some(gst::PadProbeData::Event(ref ev)) => match ev.view() { gst::EventView::Caps(e) => { let mime = gst_pbutils::codec_utils_caps_get_mime_codec(e.caps()); let mut state = state.lock().unwrap(); state.all_mimes.push(mime.unwrap().into()); state.maybe_write_manifest(); gst::PadProbeReturn::Remove } _ => gst::PadProbeReturn::Ok, }, _ => gst::PadProbeReturn::Ok, }, ); } impl VideoStream { fn setup( &self, state: Arc<Mutex<State>>, pipeline: &gst::Pipeline, path: &Path, ) -> Result<(), Error> { let src = gst::ElementFactory::make("videotestsrc") .property("is-live", true) .build()?; let raw_capsfilter = gst::ElementFactory::make("capsfilter") .property( "caps", gst_video::VideoCapsBuilder::new() .format(gst_video::VideoFormat::I420) .width(self.width as i32) .height(self.height as i32) .framerate(30.into()) .build(), ) .build()?; let timeoverlay = gst::ElementFactory::make("timeoverlay").build()?; let enc = gst::ElementFactory::make("x264enc") .property("bframes", 0u32) .property("bitrate", self.bitrate as u32 / 1000u32) .property_from_str("tune", "zerolatency") .build()?; let h264_capsfilter = gst::ElementFactory::make("capsfilter") .property( "caps", gst::Caps::builder("video/x-h264") .field("profile", "main") .build(), ) .build()?; let mux = gst::ElementFactory::make("cmafmux") .property("fragment-duration", 2500.mseconds()) .property_from_str("header-update-mode", "update") .property("write-mehd", true) .build()?; let appsink = gst_app::AppSink::builder().buffer_list(true).build(); pipeline.add_many([ &src, &raw_capsfilter, &timeoverlay, &enc, &h264_capsfilter, &mux, appsink.upcast_ref(), ])?; gst::Element::link_many([ &src, &raw_capsfilter, &timeoverlay, &enc, &h264_capsfilter, &mux, appsink.upcast_ref(), ])?; probe_encoder(state, enc); setup_appsink(&appsink, &self.name, path, true); Ok(()) } } impl AudioStream { fn setup( &self, state: Arc<Mutex<State>>, pipeline: &gst::Pipeline, path: &Path, ) -> Result<(), Error> { let src = gst::ElementFactory::make("audiotestsrc") .property("is-live", true) .property_from_str("wave", &self.wave) .build()?; let enc = gst::ElementFactory::make("avenc_aac").build()?; let mux = gst::ElementFactory::make("cmafmux") .property_from_str("header-update-mode", "update") .property("write-mehd", true) .property("fragment-duration", 2500.mseconds()) .build()?; let appsink = gst_app::AppSink::builder().buffer_list(true).build(); pipeline.add_many([&src, &enc, &mux, appsink.upcast_ref()])?; gst::Element::link_many([&src, &enc, &mux, appsink.upcast_ref()])?; probe_encoder(state, enc); setup_appsink(&appsink, &self.name, path, false); Ok(()) } } fn main() -> Result<(), Error> { gst::init()?; gstfmp4::plugin_register_static()?; let path = PathBuf::from("hls_live_stream"); let pipeline = gst::Pipeline::default(); std::fs::create_dir_all(&path).expect("failed to create directory"); let mut manifest_path = path.clone(); manifest_path.push("manifest.m3u8"); let state = Arc::new(Mutex::new(State { video_streams: vec![VideoStream { name: "video_0".to_string(), bitrate: 2_048_000, width: 1280, height: 720, }], audio_streams: vec![ AudioStream { name: "audio_0".to_string(),
VideoStream
identifier_name
mod.rs
use std::collections::HashMap; use std::borrow::Cow; use chrono::{DateTime, Utc, NaiveDateTime}; use log; pub use self::chunked_message::{ChunkSize, ChunkedMessage}; pub use self::compression::MessageCompression; pub use self::wire_message::WireMessage; use crate::{Level, util, Error}; use crate::errors::Result; use serde::de; use serde::de::Deserialize; use serde_with::with_prefix; mod chunked_message; mod compression; mod wire_message; /// Message is thre representation of a GELF message. /// /// `Message` provides a fluid setter and getter interface to all of GELF's /// features. Only the `host`-field is not available. It is managed the /// `Logger`. /// /// A `Message` can also be constructed from a `log::LogRecord`. All /// available metadata is transferred over to the message object. #[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] pub struct Message<'a> { short_message: Cow<'a, str>, full_message: Option<Cow<'a, str>>, #[serde(deserialize_with = "parse_unix_seconds")] timestamp: Option<DateTime<Utc>>, level: Level, #[serde(flatten, with = "prefix_metadata")] metadata: HashMap<Cow<'a, str>, Cow<'a, str>>, } impl<'a> Message<'a> { /// Construct a new log message. /// /// All fields will use their defaults. This means usually Option::None. /// A notable exception is `level`. The GELF spec requires this field to /// default to Level::Alert. pub fn new<S>( short_message: S, ) -> Self where S: Into<Cow<'a, str>> + AsRef<str> { Self::new_with_level(short_message, Level::Alert) } /// Construct a new log message with a defined level /// /// All fields will use their defaults. This means usually Option::None. pub fn new_with_level<S>( short_message: S, level: Level, ) -> Self where S: Into<Cow<'a, str>> + AsRef<str> { Message { short_message: short_message.into(), level, full_message: None, timestamp: None, metadata: HashMap::new(), } } /// Return the `short_message` pub fn short_message(&self) -> &Cow<'a, str> { &self.short_message } /// Set the `short_message` pub fn set_short_message<S>( &mut self, msg: S ) -> &mut Self where S: Into<Cow<'a, str>> + AsRef<str> { self.short_message = msg.into(); self } /// Return the `full_message` pub fn full_message(&self) -> &Option<Cow<'a, str>> { &self.full_message } /// Set the `full_message` pub fn set_full_message<S>( &mut self, msg: S ) -> &mut Self where S: Into<Cow<'a, str>> + AsRef<str> { self.full_message = Some(msg.into()); self } // Clear the `full_message` pub fn clear_full_message(&mut self) -> &mut Self { self.full_message = None; self } /// Return the `timestamp` pub fn timestamp(&self) -> &Option<DateTime<Utc>> { &self.timestamp } /// Set the `timestamp` pub fn set_timestamp(&mut self, ts: DateTime<Utc>) -> &mut Self { self.timestamp = Some(ts); self } /// Clear the `timestamp` pub fn clear_timestamp(&mut self) -> &mut Self { self.timestamp = None; self } /// Return the `level` pub fn level(&self) -> Level { self.level } /// Set the `level` pub fn set_level(&mut self, level: Level) -> &mut Self
/// Return a metadata field with given key pub fn metadata(&self, key: &'a str) -> Option<&Cow<'a, str>> { self.metadata.get(key) } /// Return all metadata pub fn all_metadata(&self) -> &HashMap<Cow<'a, str>, Cow<'a, str>> { &self.metadata } /// Set a metadata field with given key to value pub fn set_metadata<S, T>( &mut self, key: S, value: T, ) -> Result<&mut Self> where S: Into<Cow<'a, str>> + AsRef<str>, T: Into<Cow<'a, str>> + AsRef<str>, { let key = key.into(); if key == "id" { return Err(Error::IllegalNameForAdditional { name: key.into() }.into()); } self.metadata.insert(key, value.into()); Ok(self) } } impl<'a> From<&'a log::Record<'a>> for Message<'a> { /// Create a `Message` from given `log::LogRecord` including all metadata fn from(record: &'a log::Record) -> Message<'a> { // Create message with given text and level let short_message = format!("{}", record.args()); let mut msg = Message::new_with_level( short_message, record.level().into(), ); msg.set_timestamp(Utc::now()); // Add default metadata, and ignore the results (`let _ =...`) as all keys are valid // and set_metadata only fails on invalid keys let _ = msg.set_metadata("file", record.file().unwrap_or("(none)").to_string()); let _ = msg.set_metadata("line", record.line().map(|v| v.to_string()).unwrap_or_else(|| "(none)".into())); let _ = msg.set_metadata("module_path", record.module_path().unwrap_or("(none)").to_string()); let _ = msg.set_metadata("process_id", util::pid().to_string()); msg } } with_prefix!(prefix_metadata "_"); fn parse_unix_seconds<'de, D>(d: D) -> std::result::Result<Option<DateTime<Utc>>, D::Error> where D: de::Deserializer<'de> { let value: Option<f64> = Deserialize::deserialize(d)?; let value = match value { Some(v) => v, None => return Ok(None) }; let seconds = value.trunc() as i64; let nsecs = (value.fract() * 1_000_000_000_f64).abs() as u32; let ndt = NaiveDateTime::from_timestamp_opt(seconds, nsecs); if let Some(ndt) = ndt { Ok(Some(DateTime::<Utc>::from_utc(ndt, Utc))) } else { Err(de::Error::custom(format!( "Invalid or out of range value '{}' for DateTime", value ))) } } #[cfg(test)] mod test { use super::*; use rand::{thread_rng, Rng}; use rand::distributions::{Alphanumeric, Uniform}; use serde_json::de::SliceRead; use serde_json::StreamDeserializer; use chrono::Timelike; fn random_message() -> Message<'static> { let short_message: String = thread_rng() .sample_iter(&Alphanumeric) .take(100) .collect(); let full_message: String = thread_rng() .sample_iter(&Alphanumeric) .take(200) .collect(); let mut rng = thread_rng(); let int = rng.sample::<i64, _>(Uniform::new_inclusive(0, 7)); let mut message = Message::new(short_message); message.set_full_message(full_message); message.set_level(Level::from(int)); random_metadata().into_iter().for_each(|pair| { message.set_metadata(pair.0, pair.1).unwrap(); }); message } fn random_metadata() -> HashMap<String, String> { let mut rng = thread_rng(); let int = rng.sample::<usize, _>(Uniform::new_inclusive(5, 30)); std::iter::repeat_with(|| { let value: String = thread_rng() .sample_iter(&Alphanumeric) .take(200) .collect(); let key: String = thread_rng() .sample_iter(&Alphanumeric) .take(10) .collect(); (key, value) }).take(int) .fold(HashMap::new(), |mut acc, m| { acc.insert(m.0, m.1); acc }) } fn random_messages(amount: usize) -> impl Iterator<Item=Message<'static>> { std::iter::repeat_with(random_message).take(amount) } #[test] fn test_deserialize_valid_json() { let message = random_message(); let input = serde_json::to_string(&message).unwrap(); let actual_message: Message = serde_json::from_str(input.as_str()).expect("No erro parsing"); assert_eq!(actual_message.short_message, message.short_message); assert_eq!(actual_message.full_message, message.full_message); assert_eq!(actual_message.timestamp, message.timestamp); assert_eq!(actual_message.metadata, message.metadata); assert_eq!(actual_message.level, message.level); } #[test] fn test_deserialize_multiple_valid_jsons() { let messages = random_messages(10).collect::<Vec<Message>>(); let input = messages.clone().into_iter() .map(|m| serde_json::to_string(&m).unwrap()) .fold(String::new(), |mut acc, v| { acc.push_str(v.as_str()); acc }); let read = SliceRead::new(input.as_bytes()); let mut stream: StreamDeserializer<SliceRead, Message> = serde_json::StreamDeserializer::new(read); let mut actual_parsed: Vec<Message> = vec![]; while let Some(m) = stream.next() { actual_parsed.push(m.unwrap()); } assert_eq!(actual_parsed, messages); assert_eq!(stream.byte_offset(), input.len()); } #[test] fn test_parse_timestamp_json() { let raw_message = r#" {"version": "1.1", "short_message": "Removing {logging-channel-adapter:_org.springframework.integration.errorLogger} as a subscriber to the 'errorChannel' channel", "full_message": "Removing {logging-channel-adapter:_org.springframework.integration.errorLogger} as a subscriber to the 'errorChannel' channel\n", "timestamp": 1578669969.108120000, "level": 6, "_thread_name": "Thread-11", "_logger_name": "org.springframework.integration.endpoint.EventDrivenConsumer"} "#; let actual_message: Message = serde_json::from_str(raw_message).expect("Parse with success"); let actual_timestamp = actual_message.timestamp().as_ref().expect("Timestamp"); assert_eq!(actual_timestamp.timestamp(), 1_578_669_969); assert!(actual_timestamp.nanosecond() < 108_120_000); assert_eq!(actual_message.full_message().as_ref().expect("Full Message"), "Removing {logging-channel-adapter:_org.springframework.integration.errorLogger} as a subscriber to the 'errorChannel' channel\n"); assert_eq!(actual_message.level(), Level::Informational); assert_eq!(actual_message.metadata("thread_name").expect("thread name"), "Thread-11"); assert_eq!(actual_message.metadata("logger_name").expect("logger name"), "org.springframework.integration.endpoint.EventDrivenConsumer"); } }
{ self.level = level; self }
identifier_body
mod.rs
use std::collections::HashMap; use std::borrow::Cow; use chrono::{DateTime, Utc, NaiveDateTime}; use log; pub use self::chunked_message::{ChunkSize, ChunkedMessage}; pub use self::compression::MessageCompression; pub use self::wire_message::WireMessage; use crate::{Level, util, Error}; use crate::errors::Result; use serde::de; use serde::de::Deserialize; use serde_with::with_prefix; mod chunked_message; mod compression; mod wire_message; /// Message is thre representation of a GELF message. /// /// `Message` provides a fluid setter and getter interface to all of GELF's /// features. Only the `host`-field is not available. It is managed the /// `Logger`. /// /// A `Message` can also be constructed from a `log::LogRecord`. All /// available metadata is transferred over to the message object. #[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] pub struct Message<'a> { short_message: Cow<'a, str>, full_message: Option<Cow<'a, str>>, #[serde(deserialize_with = "parse_unix_seconds")] timestamp: Option<DateTime<Utc>>, level: Level, #[serde(flatten, with = "prefix_metadata")] metadata: HashMap<Cow<'a, str>, Cow<'a, str>>, } impl<'a> Message<'a> { /// Construct a new log message. /// /// All fields will use their defaults. This means usually Option::None. /// A notable exception is `level`. The GELF spec requires this field to /// default to Level::Alert. pub fn new<S>( short_message: S, ) -> Self where S: Into<Cow<'a, str>> + AsRef<str> { Self::new_with_level(short_message, Level::Alert) } /// Construct a new log message with a defined level /// /// All fields will use their defaults. This means usually Option::None. pub fn
<S>( short_message: S, level: Level, ) -> Self where S: Into<Cow<'a, str>> + AsRef<str> { Message { short_message: short_message.into(), level, full_message: None, timestamp: None, metadata: HashMap::new(), } } /// Return the `short_message` pub fn short_message(&self) -> &Cow<'a, str> { &self.short_message } /// Set the `short_message` pub fn set_short_message<S>( &mut self, msg: S ) -> &mut Self where S: Into<Cow<'a, str>> + AsRef<str> { self.short_message = msg.into(); self } /// Return the `full_message` pub fn full_message(&self) -> &Option<Cow<'a, str>> { &self.full_message } /// Set the `full_message` pub fn set_full_message<S>( &mut self, msg: S ) -> &mut Self where S: Into<Cow<'a, str>> + AsRef<str> { self.full_message = Some(msg.into()); self } // Clear the `full_message` pub fn clear_full_message(&mut self) -> &mut Self { self.full_message = None; self } /// Return the `timestamp` pub fn timestamp(&self) -> &Option<DateTime<Utc>> { &self.timestamp } /// Set the `timestamp` pub fn set_timestamp(&mut self, ts: DateTime<Utc>) -> &mut Self { self.timestamp = Some(ts); self } /// Clear the `timestamp` pub fn clear_timestamp(&mut self) -> &mut Self { self.timestamp = None; self } /// Return the `level` pub fn level(&self) -> Level { self.level } /// Set the `level` pub fn set_level(&mut self, level: Level) -> &mut Self { self.level = level; self } /// Return a metadata field with given key pub fn metadata(&self, key: &'a str) -> Option<&Cow<'a, str>> { self.metadata.get(key) } /// Return all metadata pub fn all_metadata(&self) -> &HashMap<Cow<'a, str>, Cow<'a, str>> { &self.metadata } /// Set a metadata field with given key to value pub fn set_metadata<S, T>( &mut self, key: S, value: T, ) -> Result<&mut Self> where S: Into<Cow<'a, str>> + AsRef<str>, T: Into<Cow<'a, str>> + AsRef<str>, { let key = key.into(); if key == "id" { return Err(Error::IllegalNameForAdditional { name: key.into() }.into()); } self.metadata.insert(key, value.into()); Ok(self) } } impl<'a> From<&'a log::Record<'a>> for Message<'a> { /// Create a `Message` from given `log::LogRecord` including all metadata fn from(record: &'a log::Record) -> Message<'a> { // Create message with given text and level let short_message = format!("{}", record.args()); let mut msg = Message::new_with_level( short_message, record.level().into(), ); msg.set_timestamp(Utc::now()); // Add default metadata, and ignore the results (`let _ =...`) as all keys are valid // and set_metadata only fails on invalid keys let _ = msg.set_metadata("file", record.file().unwrap_or("(none)").to_string()); let _ = msg.set_metadata("line", record.line().map(|v| v.to_string()).unwrap_or_else(|| "(none)".into())); let _ = msg.set_metadata("module_path", record.module_path().unwrap_or("(none)").to_string()); let _ = msg.set_metadata("process_id", util::pid().to_string()); msg } } with_prefix!(prefix_metadata "_"); fn parse_unix_seconds<'de, D>(d: D) -> std::result::Result<Option<DateTime<Utc>>, D::Error> where D: de::Deserializer<'de> { let value: Option<f64> = Deserialize::deserialize(d)?; let value = match value { Some(v) => v, None => return Ok(None) }; let seconds = value.trunc() as i64; let nsecs = (value.fract() * 1_000_000_000_f64).abs() as u32; let ndt = NaiveDateTime::from_timestamp_opt(seconds, nsecs); if let Some(ndt) = ndt { Ok(Some(DateTime::<Utc>::from_utc(ndt, Utc))) } else { Err(de::Error::custom(format!( "Invalid or out of range value '{}' for DateTime", value ))) } } #[cfg(test)] mod test { use super::*; use rand::{thread_rng, Rng}; use rand::distributions::{Alphanumeric, Uniform}; use serde_json::de::SliceRead; use serde_json::StreamDeserializer; use chrono::Timelike; fn random_message() -> Message<'static> { let short_message: String = thread_rng() .sample_iter(&Alphanumeric) .take(100) .collect(); let full_message: String = thread_rng() .sample_iter(&Alphanumeric) .take(200) .collect(); let mut rng = thread_rng(); let int = rng.sample::<i64, _>(Uniform::new_inclusive(0, 7)); let mut message = Message::new(short_message); message.set_full_message(full_message); message.set_level(Level::from(int)); random_metadata().into_iter().for_each(|pair| { message.set_metadata(pair.0, pair.1).unwrap(); }); message } fn random_metadata() -> HashMap<String, String> { let mut rng = thread_rng(); let int = rng.sample::<usize, _>(Uniform::new_inclusive(5, 30)); std::iter::repeat_with(|| { let value: String = thread_rng() .sample_iter(&Alphanumeric) .take(200) .collect(); let key: String = thread_rng() .sample_iter(&Alphanumeric) .take(10) .collect(); (key, value) }).take(int) .fold(HashMap::new(), |mut acc, m| { acc.insert(m.0, m.1); acc }) } fn random_messages(amount: usize) -> impl Iterator<Item=Message<'static>> { std::iter::repeat_with(random_message).take(amount) } #[test] fn test_deserialize_valid_json() { let message = random_message(); let input = serde_json::to_string(&message).unwrap(); let actual_message: Message = serde_json::from_str(input.as_str()).expect("No erro parsing"); assert_eq!(actual_message.short_message, message.short_message); assert_eq!(actual_message.full_message, message.full_message); assert_eq!(actual_message.timestamp, message.timestamp); assert_eq!(actual_message.metadata, message.metadata); assert_eq!(actual_message.level, message.level); } #[test] fn test_deserialize_multiple_valid_jsons() { let messages = random_messages(10).collect::<Vec<Message>>(); let input = messages.clone().into_iter() .map(|m| serde_json::to_string(&m).unwrap()) .fold(String::new(), |mut acc, v| { acc.push_str(v.as_str()); acc }); let read = SliceRead::new(input.as_bytes()); let mut stream: StreamDeserializer<SliceRead, Message> = serde_json::StreamDeserializer::new(read); let mut actual_parsed: Vec<Message> = vec![]; while let Some(m) = stream.next() { actual_parsed.push(m.unwrap()); } assert_eq!(actual_parsed, messages); assert_eq!(stream.byte_offset(), input.len()); } #[test] fn test_parse_timestamp_json() { let raw_message = r#" {"version": "1.1", "short_message": "Removing {logging-channel-adapter:_org.springframework.integration.errorLogger} as a subscriber to the 'errorChannel' channel", "full_message": "Removing {logging-channel-adapter:_org.springframework.integration.errorLogger} as a subscriber to the 'errorChannel' channel\n", "timestamp": 1578669969.108120000, "level": 6, "_thread_name": "Thread-11", "_logger_name": "org.springframework.integration.endpoint.EventDrivenConsumer"} "#; let actual_message: Message = serde_json::from_str(raw_message).expect("Parse with success"); let actual_timestamp = actual_message.timestamp().as_ref().expect("Timestamp"); assert_eq!(actual_timestamp.timestamp(), 1_578_669_969); assert!(actual_timestamp.nanosecond() < 108_120_000); assert_eq!(actual_message.full_message().as_ref().expect("Full Message"), "Removing {logging-channel-adapter:_org.springframework.integration.errorLogger} as a subscriber to the 'errorChannel' channel\n"); assert_eq!(actual_message.level(), Level::Informational); assert_eq!(actual_message.metadata("thread_name").expect("thread name"), "Thread-11"); assert_eq!(actual_message.metadata("logger_name").expect("logger name"), "org.springframework.integration.endpoint.EventDrivenConsumer"); } }
new_with_level
identifier_name
mod.rs
use std::collections::HashMap; use std::borrow::Cow; use chrono::{DateTime, Utc, NaiveDateTime}; use log; pub use self::chunked_message::{ChunkSize, ChunkedMessage}; pub use self::compression::MessageCompression; pub use self::wire_message::WireMessage; use crate::{Level, util, Error}; use crate::errors::Result; use serde::de; use serde::de::Deserialize; use serde_with::with_prefix; mod chunked_message; mod compression; mod wire_message; /// Message is thre representation of a GELF message. /// /// `Message` provides a fluid setter and getter interface to all of GELF's /// features. Only the `host`-field is not available. It is managed the /// `Logger`. /// /// A `Message` can also be constructed from a `log::LogRecord`. All /// available metadata is transferred over to the message object. #[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] pub struct Message<'a> { short_message: Cow<'a, str>, full_message: Option<Cow<'a, str>>, #[serde(deserialize_with = "parse_unix_seconds")] timestamp: Option<DateTime<Utc>>, level: Level, #[serde(flatten, with = "prefix_metadata")] metadata: HashMap<Cow<'a, str>, Cow<'a, str>>, } impl<'a> Message<'a> { /// Construct a new log message. /// /// All fields will use their defaults. This means usually Option::None. /// A notable exception is `level`. The GELF spec requires this field to /// default to Level::Alert. pub fn new<S>( short_message: S, ) -> Self where S: Into<Cow<'a, str>> + AsRef<str> { Self::new_with_level(short_message, Level::Alert) } /// Construct a new log message with a defined level /// /// All fields will use their defaults. This means usually Option::None. pub fn new_with_level<S>( short_message: S, level: Level, ) -> Self where S: Into<Cow<'a, str>> + AsRef<str> { Message { short_message: short_message.into(), level, full_message: None, timestamp: None, metadata: HashMap::new(), } } /// Return the `short_message` pub fn short_message(&self) -> &Cow<'a, str> { &self.short_message } /// Set the `short_message` pub fn set_short_message<S>( &mut self, msg: S ) -> &mut Self where S: Into<Cow<'a, str>> + AsRef<str> { self.short_message = msg.into(); self } /// Return the `full_message` pub fn full_message(&self) -> &Option<Cow<'a, str>> { &self.full_message } /// Set the `full_message` pub fn set_full_message<S>( &mut self, msg: S ) -> &mut Self where S: Into<Cow<'a, str>> + AsRef<str> { self.full_message = Some(msg.into()); self } // Clear the `full_message` pub fn clear_full_message(&mut self) -> &mut Self { self.full_message = None; self } /// Return the `timestamp` pub fn timestamp(&self) -> &Option<DateTime<Utc>> { &self.timestamp } /// Set the `timestamp` pub fn set_timestamp(&mut self, ts: DateTime<Utc>) -> &mut Self { self.timestamp = Some(ts); self }
self } /// Return the `level` pub fn level(&self) -> Level { self.level } /// Set the `level` pub fn set_level(&mut self, level: Level) -> &mut Self { self.level = level; self } /// Return a metadata field with given key pub fn metadata(&self, key: &'a str) -> Option<&Cow<'a, str>> { self.metadata.get(key) } /// Return all metadata pub fn all_metadata(&self) -> &HashMap<Cow<'a, str>, Cow<'a, str>> { &self.metadata } /// Set a metadata field with given key to value pub fn set_metadata<S, T>( &mut self, key: S, value: T, ) -> Result<&mut Self> where S: Into<Cow<'a, str>> + AsRef<str>, T: Into<Cow<'a, str>> + AsRef<str>, { let key = key.into(); if key == "id" { return Err(Error::IllegalNameForAdditional { name: key.into() }.into()); } self.metadata.insert(key, value.into()); Ok(self) } } impl<'a> From<&'a log::Record<'a>> for Message<'a> { /// Create a `Message` from given `log::LogRecord` including all metadata fn from(record: &'a log::Record) -> Message<'a> { // Create message with given text and level let short_message = format!("{}", record.args()); let mut msg = Message::new_with_level( short_message, record.level().into(), ); msg.set_timestamp(Utc::now()); // Add default metadata, and ignore the results (`let _ =...`) as all keys are valid // and set_metadata only fails on invalid keys let _ = msg.set_metadata("file", record.file().unwrap_or("(none)").to_string()); let _ = msg.set_metadata("line", record.line().map(|v| v.to_string()).unwrap_or_else(|| "(none)".into())); let _ = msg.set_metadata("module_path", record.module_path().unwrap_or("(none)").to_string()); let _ = msg.set_metadata("process_id", util::pid().to_string()); msg } } with_prefix!(prefix_metadata "_"); fn parse_unix_seconds<'de, D>(d: D) -> std::result::Result<Option<DateTime<Utc>>, D::Error> where D: de::Deserializer<'de> { let value: Option<f64> = Deserialize::deserialize(d)?; let value = match value { Some(v) => v, None => return Ok(None) }; let seconds = value.trunc() as i64; let nsecs = (value.fract() * 1_000_000_000_f64).abs() as u32; let ndt = NaiveDateTime::from_timestamp_opt(seconds, nsecs); if let Some(ndt) = ndt { Ok(Some(DateTime::<Utc>::from_utc(ndt, Utc))) } else { Err(de::Error::custom(format!( "Invalid or out of range value '{}' for DateTime", value ))) } } #[cfg(test)] mod test { use super::*; use rand::{thread_rng, Rng}; use rand::distributions::{Alphanumeric, Uniform}; use serde_json::de::SliceRead; use serde_json::StreamDeserializer; use chrono::Timelike; fn random_message() -> Message<'static> { let short_message: String = thread_rng() .sample_iter(&Alphanumeric) .take(100) .collect(); let full_message: String = thread_rng() .sample_iter(&Alphanumeric) .take(200) .collect(); let mut rng = thread_rng(); let int = rng.sample::<i64, _>(Uniform::new_inclusive(0, 7)); let mut message = Message::new(short_message); message.set_full_message(full_message); message.set_level(Level::from(int)); random_metadata().into_iter().for_each(|pair| { message.set_metadata(pair.0, pair.1).unwrap(); }); message } fn random_metadata() -> HashMap<String, String> { let mut rng = thread_rng(); let int = rng.sample::<usize, _>(Uniform::new_inclusive(5, 30)); std::iter::repeat_with(|| { let value: String = thread_rng() .sample_iter(&Alphanumeric) .take(200) .collect(); let key: String = thread_rng() .sample_iter(&Alphanumeric) .take(10) .collect(); (key, value) }).take(int) .fold(HashMap::new(), |mut acc, m| { acc.insert(m.0, m.1); acc }) } fn random_messages(amount: usize) -> impl Iterator<Item=Message<'static>> { std::iter::repeat_with(random_message).take(amount) } #[test] fn test_deserialize_valid_json() { let message = random_message(); let input = serde_json::to_string(&message).unwrap(); let actual_message: Message = serde_json::from_str(input.as_str()).expect("No erro parsing"); assert_eq!(actual_message.short_message, message.short_message); assert_eq!(actual_message.full_message, message.full_message); assert_eq!(actual_message.timestamp, message.timestamp); assert_eq!(actual_message.metadata, message.metadata); assert_eq!(actual_message.level, message.level); } #[test] fn test_deserialize_multiple_valid_jsons() { let messages = random_messages(10).collect::<Vec<Message>>(); let input = messages.clone().into_iter() .map(|m| serde_json::to_string(&m).unwrap()) .fold(String::new(), |mut acc, v| { acc.push_str(v.as_str()); acc }); let read = SliceRead::new(input.as_bytes()); let mut stream: StreamDeserializer<SliceRead, Message> = serde_json::StreamDeserializer::new(read); let mut actual_parsed: Vec<Message> = vec![]; while let Some(m) = stream.next() { actual_parsed.push(m.unwrap()); } assert_eq!(actual_parsed, messages); assert_eq!(stream.byte_offset(), input.len()); } #[test] fn test_parse_timestamp_json() { let raw_message = r#" {"version": "1.1", "short_message": "Removing {logging-channel-adapter:_org.springframework.integration.errorLogger} as a subscriber to the 'errorChannel' channel", "full_message": "Removing {logging-channel-adapter:_org.springframework.integration.errorLogger} as a subscriber to the 'errorChannel' channel\n", "timestamp": 1578669969.108120000, "level": 6, "_thread_name": "Thread-11", "_logger_name": "org.springframework.integration.endpoint.EventDrivenConsumer"} "#; let actual_message: Message = serde_json::from_str(raw_message).expect("Parse with success"); let actual_timestamp = actual_message.timestamp().as_ref().expect("Timestamp"); assert_eq!(actual_timestamp.timestamp(), 1_578_669_969); assert!(actual_timestamp.nanosecond() < 108_120_000); assert_eq!(actual_message.full_message().as_ref().expect("Full Message"), "Removing {logging-channel-adapter:_org.springframework.integration.errorLogger} as a subscriber to the 'errorChannel' channel\n"); assert_eq!(actual_message.level(), Level::Informational); assert_eq!(actual_message.metadata("thread_name").expect("thread name"), "Thread-11"); assert_eq!(actual_message.metadata("logger_name").expect("logger name"), "org.springframework.integration.endpoint.EventDrivenConsumer"); } }
/// Clear the `timestamp` pub fn clear_timestamp(&mut self) -> &mut Self { self.timestamp = None;
random_line_split
main.rs
#[macro_use] extern crate log; extern crate mio_multithread_unix; extern crate env_logger; extern crate httparse; extern crate mio; extern crate net2; extern crate num_cpus; extern crate slab; extern crate time; use std::ascii::AsciiExt; use std::env; use std::fmt; use std::io::{self, Read, Write}; use std::mem; use std::net::SocketAddr; use std::panic; use std::slice; use std::str; use std::thread; use net2::unix::*; use mio::tcp::{TcpStream, TcpListener}; use mio::{Poll, Token, EventSet, PollOpt, Events}; use slab::Slab; const LISTENER: mio::Token = mio::Token(0); struct Server<'a> { count: u64, listener: &'a TcpListener, connections: Slab<Connection, usize>, } impl<'a> Server<'a> { fn new(listener: &'a TcpListener) -> Server<'a> { Server { count: 0, listener: listener, connections: Slab::new_starting_at(1, 1024), } } fn ready(&mut self, poll: &Poll, token: Token, events: EventSet) { debug!("{:?} {:?}", token, events); if token == LISTENER { while let Ok(Some(socket)) = self.listener.accept() { debug!("accepted"); self.count += 1; // socket.0.set_nodelay(true).unwrap(); let token = self.connections .insert_with(move |_| Connection::new(socket.0)) .unwrap(); let token = mio::Token(token); poll.register(&self.connections[token.into()].socket, token, EventSet::readable() | EventSet::writable(), PollOpt::edge()).unwrap(); self.try_connection(token, EventSet::all()); } } else { self.try_connection(token, events); } } fn try_connection(&mut self, token: Token, events: EventSet) { let token = token.into(); // Simulate a `catch_unwind` that a real server would do anyway let res = panic::catch_unwind(panic::AssertUnwindSafe(|| { self.connections[token].ready(events) })).expect("oh no it panicked!"); if res.is_err() || self.connections[token].closed { if let Err(ref e) = res { info!("error: {:?}\non: {:?} {:?}", e, token, self.connections[token].socket); } debug!("removing"); self.connections.remove(token); } } }
struct Connection { socket: TcpStream, input: Vec<u8>, output: Output, keepalive: bool, closed: bool, read_closed: bool, events: EventSet, } struct Output { buf: Vec<u8>, } impl Connection { fn new(socket: TcpStream) -> Connection { Connection { socket: socket, input: Vec::with_capacity(2048), output: Output { buf: Vec::with_capacity(2048), }, keepalive: false, read_closed: false, closed: false, events: EventSet::none(), } } fn ready(&mut self, events: EventSet) -> io::Result<()> { self.events = self.events | events; while self.events.is_readable() &&!self.read_closed { let before = self.input.len(); let eof = try!(read(&mut self.socket, &mut self.input, &mut self.events)); if eof { debug!("eof"); } self.read_closed = eof; if self.input.len() == before { break } while self.input.len() > 0 { let (req, amt) = match try!(parse(&self.input)) { Some(pair) => pair, None => { debug!("need more data for a request"); return Ok(()) } }; let request = Request { inner: req, amt: amt, data: mem::replace(&mut self.input, Vec::new()), }; debug!("got a request"); self.keepalive = request.version() >= 1; self.keepalive = self.keepalive || request.headers().any(|s| { s.0.eq_ignore_ascii_case("connection") && s.1.eq_ignore_ascii_case(b"keep-alive") }); let response = process(&request); response.to_bytes(&mut self.output.buf); self.input = request.into_input_buf(); if!self.keepalive { debug!("disabling keepalive"); self.read_closed = true; } } } if self.events.is_writable() && self.output.buf.len() > 0 { let done = try!(write(&mut self.socket, &mut self.output, &mut self.events)); if done { debug!("wrote response"); if!self.keepalive || self.read_closed { self.closed = true; } } } if self.read_closed && self.output.buf.len() == 0 { self.closed = true; } Ok(()) } } fn process(r: &Request) -> Response { assert!(r.path() == "/plaintext"); let mut r = Response::new(); r.header("Content-Type", "text/plain; charset=UTF-8") .body("Hello, World!"); return r } type Slice = (usize, usize); #[allow(dead_code)] pub struct Request { inner: RawRequest, data: Vec<u8>, amt: usize, } struct RawRequest { method: Slice, path: Slice, version: u8, headers: Vec<(Slice, Slice)>, } pub struct Headers<'a> { iter: slice::Iter<'a, (Slice, Slice)>, req: &'a Request, } impl Request { pub fn method(&self) -> &str { str::from_utf8(self.get(&self.inner.method)).unwrap() } pub fn path(&self) -> &str { str::from_utf8(self.get(&self.inner.path)).unwrap() } pub fn version(&self) -> u8 { self.inner.version } pub fn headers(&self) -> Headers { Headers { iter: self.inner.headers.iter(), req: self, } } pub fn into_input_buf(mut self) -> Vec<u8> { self.data.drain(..self.amt); self.data } fn get(&self, s: &Slice) -> &[u8] { &self.data[s.0..s.1] } } impl<'a> Iterator for Headers<'a> { type Item = (&'a str, &'a [u8]); fn next(&mut self) -> Option<(&'a str, &'a [u8])> { self.iter.next().map(|&(ref k, ref v)| { (str::from_utf8(self.req.get(k)).unwrap(), self.req.get(v)) }) } } fn parse(buf: &[u8]) -> io::Result<Option<(RawRequest, usize)>> { let mut headers = [httparse::EMPTY_HEADER; 16]; let mut r = httparse::Request::new(&mut headers); let status = try!(r.parse(&buf).map_err(|_| { io::Error::new(io::ErrorKind::Other, "failed to parse") })); return match status { httparse::Status::Complete(amt) => { debug!("ok {:?}", String::from_utf8_lossy(&buf[..amt])); Ok(Some((RawRequest { method: slice(buf, r.method.unwrap().as_bytes()), path: slice(buf, r.path.unwrap().as_bytes()), version: r.version.unwrap(), headers: r.headers.iter().map(|h| { (slice(buf, h.name.as_bytes()), slice(buf, &h.value)) }).collect(), }, amt))) } httparse::Status::Partial => Ok(None), }; fn slice(buf: &[u8], inner: &[u8]) -> Slice { let start = inner.as_ptr() as usize - buf.as_ptr() as usize; assert!(start < buf.len()); (start, start + inner.len()) } } pub struct Response { headers: Vec<(String, String)>, response: String, } impl Response { pub fn new() -> Response { Response { headers: Vec::new(), response: String::new(), } } pub fn header(&mut self, name: &str, val: &str) -> &mut Response { self.headers.push((name.to_string(), val.to_string())); self } pub fn body(&mut self, s: &str) -> &mut Response { self.response = s.to_string(); self } fn to_bytes(&self, into: &mut Vec<u8>) { use std::fmt::Write; write!(FastWrite(into), "\ HTTP/1.1 200 OK\r\n\ Server: Example\r\n\ Date: {}\r\n\ Content-Length: {}\r\n\ ", mio_multithread_unix::date::now(), self.response.len()).unwrap(); for &(ref k, ref v) in &self.headers { extend(into, k.as_bytes()); extend(into, b": "); extend(into, v.as_bytes()); extend(into, b"\r\n"); } extend(into, b"\r\n"); extend(into, self.response.as_bytes()); } } // TODO: impl fmt::Write for Vec<u8> // // Right now `write!` on `Vec<u8>` goes through io::Write and is not super // speedy, so inline a less-crufty implementation here which doesn't go through // io::Error. struct FastWrite<'a>(&'a mut Vec<u8>); impl<'a> fmt::Write for FastWrite<'a> { fn write_str(&mut self, s: &str) -> fmt::Result { extend(self.0, s.as_bytes()); Ok(()) } } // TODO: why does extend_from_slice not optimize? fn extend(dst: &mut Vec<u8>, data: &[u8]) { use std::ptr; dst.reserve(data.len()); let prev = dst.len(); unsafe { ptr::copy_nonoverlapping(data.as_ptr(), dst.as_mut_ptr().offset(prev as isize), data.len()); dst.set_len(prev + data.len()); } } fn read(socket: &mut TcpStream, input: &mut Vec<u8>, events: &mut EventSet) -> io::Result<bool> { match socket.read(unsafe { slice_to_end(input) }) { Ok(0) => return Ok(true), Ok(n) => { let len = input.len(); unsafe { input.set_len(len + n); } return Ok(false) } Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { *events = *events &!EventSet::readable(); return Ok(false) } Err(e) => return Err(e), } unsafe fn slice_to_end(v: &mut Vec<u8>) -> &mut [u8] { use std::slice; if v.capacity() == 0 { v.reserve(16); } if v.capacity() == v.len() { v.reserve(1); } slice::from_raw_parts_mut(v.as_mut_ptr().offset(v.len() as isize), v.capacity() - v.len()) } } fn write(socket: &mut TcpStream, output: &mut Output, events: &mut EventSet) -> io::Result<bool> { assert!(output.buf.len() > 0); loop { match socket.write(&output.buf) { Ok(0) => { return Err(io::Error::new(io::ErrorKind::Other, "early eof2")) } Ok(n) => { output.buf.drain(..n); if output.buf.len() == 0 { return Ok(true) } } Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { *events = *events &!EventSet::writable(); return Ok(false) } Err(e) => return Err(e), } } } fn main() { env_logger::init().unwrap(); let threads = (0..num_cpus::get()).map(|_| { thread::spawn(|| { let poll = mio::Poll::new().unwrap(); let addr = env::args().nth(1).unwrap_or("127.0.0.1:8080".to_string()); let addr = addr.parse::<SocketAddr>().unwrap(); let socket = net2::TcpBuilder::new_v4().unwrap(); socket.reuse_address(true).unwrap(); socket.reuse_port(true).unwrap(); socket.bind(&addr).unwrap(); let listener = socket.listen(2048).unwrap(); let listener = TcpListener::from_listener(listener, &addr).unwrap(); poll.register(&listener, LISTENER, EventSet::readable(), PollOpt::level()).unwrap(); let mut events = Events::new(); let mut server = Server::new(&listener); loop { poll.poll(&mut events, None).unwrap(); for i in 0..events.len() { let event = events.get(i).unwrap(); server.ready(&poll, event.token(), event.kind()); } } }) }).collect::<Vec<_>>(); for thread in threads { thread.join().unwrap(); } }
random_line_split
main.rs
#[macro_use] extern crate log; extern crate mio_multithread_unix; extern crate env_logger; extern crate httparse; extern crate mio; extern crate net2; extern crate num_cpus; extern crate slab; extern crate time; use std::ascii::AsciiExt; use std::env; use std::fmt; use std::io::{self, Read, Write}; use std::mem; use std::net::SocketAddr; use std::panic; use std::slice; use std::str; use std::thread; use net2::unix::*; use mio::tcp::{TcpStream, TcpListener}; use mio::{Poll, Token, EventSet, PollOpt, Events}; use slab::Slab; const LISTENER: mio::Token = mio::Token(0); struct
<'a> { count: u64, listener: &'a TcpListener, connections: Slab<Connection, usize>, } impl<'a> Server<'a> { fn new(listener: &'a TcpListener) -> Server<'a> { Server { count: 0, listener: listener, connections: Slab::new_starting_at(1, 1024), } } fn ready(&mut self, poll: &Poll, token: Token, events: EventSet) { debug!("{:?} {:?}", token, events); if token == LISTENER { while let Ok(Some(socket)) = self.listener.accept() { debug!("accepted"); self.count += 1; // socket.0.set_nodelay(true).unwrap(); let token = self.connections .insert_with(move |_| Connection::new(socket.0)) .unwrap(); let token = mio::Token(token); poll.register(&self.connections[token.into()].socket, token, EventSet::readable() | EventSet::writable(), PollOpt::edge()).unwrap(); self.try_connection(token, EventSet::all()); } } else { self.try_connection(token, events); } } fn try_connection(&mut self, token: Token, events: EventSet) { let token = token.into(); // Simulate a `catch_unwind` that a real server would do anyway let res = panic::catch_unwind(panic::AssertUnwindSafe(|| { self.connections[token].ready(events) })).expect("oh no it panicked!"); if res.is_err() || self.connections[token].closed { if let Err(ref e) = res { info!("error: {:?}\non: {:?} {:?}", e, token, self.connections[token].socket); } debug!("removing"); self.connections.remove(token); } } } struct Connection { socket: TcpStream, input: Vec<u8>, output: Output, keepalive: bool, closed: bool, read_closed: bool, events: EventSet, } struct Output { buf: Vec<u8>, } impl Connection { fn new(socket: TcpStream) -> Connection { Connection { socket: socket, input: Vec::with_capacity(2048), output: Output { buf: Vec::with_capacity(2048), }, keepalive: false, read_closed: false, closed: false, events: EventSet::none(), } } fn ready(&mut self, events: EventSet) -> io::Result<()> { self.events = self.events | events; while self.events.is_readable() &&!self.read_closed { let before = self.input.len(); let eof = try!(read(&mut self.socket, &mut self.input, &mut self.events)); if eof { debug!("eof"); } self.read_closed = eof; if self.input.len() == before { break } while self.input.len() > 0 { let (req, amt) = match try!(parse(&self.input)) { Some(pair) => pair, None => { debug!("need more data for a request"); return Ok(()) } }; let request = Request { inner: req, amt: amt, data: mem::replace(&mut self.input, Vec::new()), }; debug!("got a request"); self.keepalive = request.version() >= 1; self.keepalive = self.keepalive || request.headers().any(|s| { s.0.eq_ignore_ascii_case("connection") && s.1.eq_ignore_ascii_case(b"keep-alive") }); let response = process(&request); response.to_bytes(&mut self.output.buf); self.input = request.into_input_buf(); if!self.keepalive { debug!("disabling keepalive"); self.read_closed = true; } } } if self.events.is_writable() && self.output.buf.len() > 0 { let done = try!(write(&mut self.socket, &mut self.output, &mut self.events)); if done { debug!("wrote response"); if!self.keepalive || self.read_closed { self.closed = true; } } } if self.read_closed && self.output.buf.len() == 0 { self.closed = true; } Ok(()) } } fn process(r: &Request) -> Response { assert!(r.path() == "/plaintext"); let mut r = Response::new(); r.header("Content-Type", "text/plain; charset=UTF-8") .body("Hello, World!"); return r } type Slice = (usize, usize); #[allow(dead_code)] pub struct Request { inner: RawRequest, data: Vec<u8>, amt: usize, } struct RawRequest { method: Slice, path: Slice, version: u8, headers: Vec<(Slice, Slice)>, } pub struct Headers<'a> { iter: slice::Iter<'a, (Slice, Slice)>, req: &'a Request, } impl Request { pub fn method(&self) -> &str { str::from_utf8(self.get(&self.inner.method)).unwrap() } pub fn path(&self) -> &str { str::from_utf8(self.get(&self.inner.path)).unwrap() } pub fn version(&self) -> u8 { self.inner.version } pub fn headers(&self) -> Headers { Headers { iter: self.inner.headers.iter(), req: self, } } pub fn into_input_buf(mut self) -> Vec<u8> { self.data.drain(..self.amt); self.data } fn get(&self, s: &Slice) -> &[u8] { &self.data[s.0..s.1] } } impl<'a> Iterator for Headers<'a> { type Item = (&'a str, &'a [u8]); fn next(&mut self) -> Option<(&'a str, &'a [u8])> { self.iter.next().map(|&(ref k, ref v)| { (str::from_utf8(self.req.get(k)).unwrap(), self.req.get(v)) }) } } fn parse(buf: &[u8]) -> io::Result<Option<(RawRequest, usize)>> { let mut headers = [httparse::EMPTY_HEADER; 16]; let mut r = httparse::Request::new(&mut headers); let status = try!(r.parse(&buf).map_err(|_| { io::Error::new(io::ErrorKind::Other, "failed to parse") })); return match status { httparse::Status::Complete(amt) => { debug!("ok {:?}", String::from_utf8_lossy(&buf[..amt])); Ok(Some((RawRequest { method: slice(buf, r.method.unwrap().as_bytes()), path: slice(buf, r.path.unwrap().as_bytes()), version: r.version.unwrap(), headers: r.headers.iter().map(|h| { (slice(buf, h.name.as_bytes()), slice(buf, &h.value)) }).collect(), }, amt))) } httparse::Status::Partial => Ok(None), }; fn slice(buf: &[u8], inner: &[u8]) -> Slice { let start = inner.as_ptr() as usize - buf.as_ptr() as usize; assert!(start < buf.len()); (start, start + inner.len()) } } pub struct Response { headers: Vec<(String, String)>, response: String, } impl Response { pub fn new() -> Response { Response { headers: Vec::new(), response: String::new(), } } pub fn header(&mut self, name: &str, val: &str) -> &mut Response { self.headers.push((name.to_string(), val.to_string())); self } pub fn body(&mut self, s: &str) -> &mut Response { self.response = s.to_string(); self } fn to_bytes(&self, into: &mut Vec<u8>) { use std::fmt::Write; write!(FastWrite(into), "\ HTTP/1.1 200 OK\r\n\ Server: Example\r\n\ Date: {}\r\n\ Content-Length: {}\r\n\ ", mio_multithread_unix::date::now(), self.response.len()).unwrap(); for &(ref k, ref v) in &self.headers { extend(into, k.as_bytes()); extend(into, b": "); extend(into, v.as_bytes()); extend(into, b"\r\n"); } extend(into, b"\r\n"); extend(into, self.response.as_bytes()); } } // TODO: impl fmt::Write for Vec<u8> // // Right now `write!` on `Vec<u8>` goes through io::Write and is not super // speedy, so inline a less-crufty implementation here which doesn't go through // io::Error. struct FastWrite<'a>(&'a mut Vec<u8>); impl<'a> fmt::Write for FastWrite<'a> { fn write_str(&mut self, s: &str) -> fmt::Result { extend(self.0, s.as_bytes()); Ok(()) } } // TODO: why does extend_from_slice not optimize? fn extend(dst: &mut Vec<u8>, data: &[u8]) { use std::ptr; dst.reserve(data.len()); let prev = dst.len(); unsafe { ptr::copy_nonoverlapping(data.as_ptr(), dst.as_mut_ptr().offset(prev as isize), data.len()); dst.set_len(prev + data.len()); } } fn read(socket: &mut TcpStream, input: &mut Vec<u8>, events: &mut EventSet) -> io::Result<bool> { match socket.read(unsafe { slice_to_end(input) }) { Ok(0) => return Ok(true), Ok(n) => { let len = input.len(); unsafe { input.set_len(len + n); } return Ok(false) } Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { *events = *events &!EventSet::readable(); return Ok(false) } Err(e) => return Err(e), } unsafe fn slice_to_end(v: &mut Vec<u8>) -> &mut [u8] { use std::slice; if v.capacity() == 0 { v.reserve(16); } if v.capacity() == v.len() { v.reserve(1); } slice::from_raw_parts_mut(v.as_mut_ptr().offset(v.len() as isize), v.capacity() - v.len()) } } fn write(socket: &mut TcpStream, output: &mut Output, events: &mut EventSet) -> io::Result<bool> { assert!(output.buf.len() > 0); loop { match socket.write(&output.buf) { Ok(0) => { return Err(io::Error::new(io::ErrorKind::Other, "early eof2")) } Ok(n) => { output.buf.drain(..n); if output.buf.len() == 0 { return Ok(true) } } Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => { *events = *events &!EventSet::writable(); return Ok(false) } Err(e) => return Err(e), } } } fn main() { env_logger::init().unwrap(); let threads = (0..num_cpus::get()).map(|_| { thread::spawn(|| { let poll = mio::Poll::new().unwrap(); let addr = env::args().nth(1).unwrap_or("127.0.0.1:8080".to_string()); let addr = addr.parse::<SocketAddr>().unwrap(); let socket = net2::TcpBuilder::new_v4().unwrap(); socket.reuse_address(true).unwrap(); socket.reuse_port(true).unwrap(); socket.bind(&addr).unwrap(); let listener = socket.listen(2048).unwrap(); let listener = TcpListener::from_listener(listener, &addr).unwrap(); poll.register(&listener, LISTENER, EventSet::readable(), PollOpt::level()).unwrap(); let mut events = Events::new(); let mut server = Server::new(&listener); loop { poll.poll(&mut events, None).unwrap(); for i in 0..events.len() { let event = events.get(i).unwrap(); server.ready(&poll, event.token(), event.kind()); } } }) }).collect::<Vec<_>>(); for thread in threads { thread.join().unwrap(); } }
Server
identifier_name
parser.rs
use crate::error::Error; use crate::resolve_import::resolve_import; use std::sync::Arc; use std::sync::Mutex; use swc_common::comments::SingleThreadedComments; use swc_common::errors::Diagnostic; use swc_common::errors::DiagnosticBuilder; use swc_common::errors::Emitter; use swc_common::errors::Handler; use swc_common::errors::HandlerFlags; use swc_common::input::StringInput; use swc_common::FileName; use swc_common::SourceMap; use swc_ecmascript::ast::Program; use swc_ecmascript::dep_graph::analyze_dependencies; use swc_ecmascript::dep_graph::DependencyKind; use swc_ecmascript::parser::lexer::Lexer; use swc_ecmascript::parser::EsConfig; use swc_ecmascript::parser::JscTarget; use swc_ecmascript::parser::Parser; use swc_ecmascript::parser::Syntax; use swc_ecmascript::parser::TsConfig; use url::Url; // Returns (deps, transpiled source code) pub fn get_deps_and_transpile( url: &Url, source: &str, content_type: &Option<String>, ) -> Result<(Vec<Url>, Option<String>), Error> { let comments = SingleThreadedComments::default(); let source_map = SourceMap::default(); let source_file = source_map .new_source_file(FileName::Custom(url.to_string()), source.to_string()); let input = StringInput::from(&*source_file); let syntax = get_syntax(url, content_type); let lexer = Lexer::new(syntax, JscTarget::Es2020, input, Some(&comments)); let mut parser = Parser::new_from(lexer); let module = parser .parse_module() .map_err(|e| ParseError::new(e, &source_map))?; let mut deps = Vec::new(); for import in analyze_dependencies(&module, &source_map, &comments) { if (import.kind == DependencyKind::Import || import.kind == DependencyKind::Export) && import.is_dynamic == false { let specifier = import.specifier.to_string(); deps.push(resolve_import(&specifier, url.as_str())?); } } // If the file is not jsx, ts, or tsx we do not need to transform it. In that // case source == transformed. if!syntax.jsx() &&!syntax.typescript() { return Ok((deps, None)); } use swc_ecmascript::transforms::react; let program = Program::Module(module); let options = EmitOptions::default(); let source_map = std::rc::Rc::new(source_map); let jsx_pass = react::react( source_map.clone(), Some(&comments), react::Options { pragma: options.jsx_factory.clone(), pragma_frag: options.jsx_fragment_factory.clone(), // this will use `Object.assign()` instead of the `_extends` helper // when spreading props. use_builtins: true, ..Default::default() }, ); use swc_common::chain; use swc_common::Globals; use swc_ecmascript::transforms::fixer; use swc_ecmascript::transforms::helpers; use swc_ecmascript::transforms::pass::Optional; use swc_ecmascript::transforms::proposals; use swc_ecmascript::transforms::typescript; use swc_ecmascript::visit::FoldWith; let mut passes = chain!( Optional::new(jsx_pass, options.transform_jsx), proposals::decorators::decorators(proposals::decorators::Config { legacy: true, emit_metadata: options.emit_metadata }), helpers::inject_helpers(), typescript::strip(), fixer(Some(&comments)), ); let program = swc_common::GLOBALS.set(&Globals::new(), || { helpers::HELPERS.set(&helpers::Helpers::new(false), || { program.fold_with(&mut passes) }) }); use swc_ecmascript::codegen::text_writer::JsWriter; use swc_ecmascript::codegen::Node; let mut src_map_buf = vec![]; let mut buf = vec![]; { let writer = Box::new(JsWriter::new( source_map.clone(), "\n", &mut buf, Some(&mut src_map_buf), )); let config = swc_ecmascript::codegen::Config { minify: false }; let mut emitter = swc_ecmascript::codegen::Emitter { cfg: config, comments: Some(&comments), cm: source_map.clone(), wr: writer, }; program .emit_with(&mut emitter) .map_err(|err| Error::Other(Box::new(err)))?; } let mut src = String::from_utf8(buf).map_err(|err| Error::Other(Box::new(err)))?; { let mut buf = Vec::new(); source_map .build_source_map_from(&mut src_map_buf, None) .to_writer(&mut buf) .map_err(|err| Error::Other(Box::new(err)))?; src.push_str("//# sourceMappingURL=data:application/json;base64,"); let encoded_map = base64::encode(buf); src.push_str(&encoded_map); } Ok((deps, Some(src))) } fn get_syntax(url: &Url, maybe_content_type: &Option<String>) -> Syntax { fn get_es_config(jsx: bool) -> EsConfig { EsConfig { class_private_methods: true, class_private_props: true, class_props: true, dynamic_import: true, export_default_from: true, export_namespace_from: true, import_meta: true, jsx, nullish_coalescing: true, num_sep: true, optional_chaining: true, top_level_await: true, ..EsConfig::default() } } fn get_ts_config(tsx: bool, dts: bool) -> TsConfig { TsConfig { decorators: true, dts, dynamic_import: true, tsx, ..TsConfig::default() } } let maybe_extension = if let Some(content_type) = maybe_content_type { match content_type .split(";") .next() .unwrap() .trim() .to_lowercase() .as_ref() { "application/typescript" | "text/typescript" | "video/vnd.dlna.mpeg-tts" | "video/mp2t" | "application/x-typescript" => Some("ts"), "application/javascript" | "text/javascript" | "application/ecmascript" | "text/ecmascript" | "application/x-javascript" | "application/node" => Some("js"), "text/jsx" => Some("jsx"), "text/tsx" => Some("tsx"), _ => None, } } else { None }; let extension = if maybe_extension.is_some() { maybe_extension } else { let parts: Vec<&str> = url.as_str().split('.').collect(); parts.last().copied() }; match extension { Some("js") => Syntax::Es(get_es_config(false)), Some("jsx") => Syntax::Es(get_es_config(true)), Some("ts") => Syntax::Typescript(get_ts_config(false, false)), Some("tsx") => Syntax::Typescript(get_ts_config(true, false)), _ => Syntax::Typescript(get_ts_config(false, false)), } } pub struct ParseError { lines: Vec<String>, } impl ParseError { fn new( err: swc_ecmascript::parser::error::Error, source_map: &SourceMap, ) -> Self { let error_buffer = ErrorBuffer::default(); let handler = Handler::with_emitter_and_flags( Box::new(error_buffer.clone()), HandlerFlags { can_emit_warnings: true, dont_buffer_diagnostics: true, ..HandlerFlags::default() }, ); let mut diagnostic = err.into_diagnostic(&handler); diagnostic.emit(); let v = error_buffer.0.lock().unwrap(); let lines = v .iter() .map(|d| { if let Some(span) = d.span.primary_span() { let loc = source_map.lookup_char_pos(span.lo); let file_name = match &loc.file.name { FileName::Custom(n) => n, _ => unreachable!(), }; format!( "{} at {}:{}:{}", d.message(), file_name, loc.line, loc.col_display ) } else { d.message() } }) .collect::<Vec<_>>(); Self { lines } } } impl std::error::Error for ParseError {} impl std::fmt::Display for ParseError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { for line in &self.lines { writeln!(f, "{}", line)?; } Ok(()) } } impl std::fmt::Debug for ParseError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { std::fmt::Display::fmt(self, f) } } /// A buffer for collecting errors from the AST parser. #[derive(Debug, Clone, Default)] pub struct ErrorBuffer(Arc<Mutex<Vec<Diagnostic>>>); impl Emitter for ErrorBuffer { fn emit(&mut self, db: &DiagnosticBuilder) { self.0.lock().unwrap().push((**db).clone()); } } /// Options which can be adjusted when transpiling a module. #[derive(Debug, Clone)] pub struct EmitOptions { /// Indicate if JavaScript is being checked/transformed as well, or if it is /// only TypeScript. pub check_js: bool, /// When emitting a legacy decorator, also emit experimental decorator meta /// data. Defaults to `false`. pub emit_metadata: bool, /// Should the source map be inlined in the emitted code file, or provided /// as a separate file. Defaults to `true`. pub inline_source_map: bool, /// When transforming JSX, what value should be used for the JSX factory. /// Defaults to `React.createElement`. pub jsx_factory: String, /// When transforming JSX, what value should be used for the JSX fragment /// factory. Defaults to `React.Fragment`. pub jsx_fragment_factory: String, /// Should JSX be transformed or preserved. Defaults to `true`. pub transform_jsx: bool, } impl Default for EmitOptions { fn default() -> Self { EmitOptions { check_js: false, emit_metadata: false, inline_source_map: true, jsx_factory: "h".into(), jsx_fragment_factory: "Fragment".into(), transform_jsx: true, } } } #[cfg(test)] mod tests { use super::*; #[test] fn test_get_syntax() { // Prefer content-type over extension. let url = Url::parse("https://deno.land/x/[email protected]/bar.js").unwrap(); let content_type = Some("text/jsx".to_string()); let syntax = get_syntax(&url, &content_type); assert!(syntax.jsx()); assert!(!syntax.typescript()); // Fallback to extension if content-type is unsupported. let url = Url::parse("https://deno.land/x/[email protected]/bar.tsx").unwrap(); let content_type = Some("text/unsupported".to_string()); let syntax = get_syntax(&url, &content_type); assert!(syntax.jsx()); assert!(syntax.typescript()); } #[test] fn jsx() { let url = Url::parse( "https://deno.land/x/[email protected]/example/pages/dynamic/%5Bname%5D.tsx", ) .unwrap(); let source = r#" import { Fragment, h } from "../../deps.ts"; import type { PageProps } from "../../deps.ts"; function UserPage(props: PageProps) { const name = props.route?.name?? ""; return ( <> <h1>This is the page for {name}</h1> <p> <a href="/">Go home</a> </p> </> ); } export default UserPage; "#; let (deps, _transpiled) = get_deps_and_transpile(&url, source, &None).unwrap(); assert_eq!(deps.len(), 1); } #[test] #[ignore] fn
() { let url = Url::parse("https://deno.land/x/[email protected]/router.ts").unwrap(); let source = r#" delete<P extends RouteParams = RP, S extends State = RS>( name: string, path: string, ...middleware: RouterMiddleware<P, S>[] ): Router<P extends RP? P : (P & RP), S extends RS? S : (S & RS)>; "#; let (deps, _transpiled) = get_deps_and_transpile(&url, source, &None).unwrap(); assert_eq!(deps.len(), 0); } #[test] #[ignore] fn dynamic_import() { let url = Url::parse("https://deno.land/x/[email protected]/router.ts").unwrap(); let source = r#" await import("fs"); await import("https://deno.land/std/version.ts"); "#; let (deps, _transpiled) = get_deps_and_transpile(&url, source, &None).unwrap(); assert_eq!(deps.len(), 0); } }
complex_types
identifier_name
parser.rs
use crate::error::Error; use crate::resolve_import::resolve_import; use std::sync::Arc; use std::sync::Mutex; use swc_common::comments::SingleThreadedComments; use swc_common::errors::Diagnostic; use swc_common::errors::DiagnosticBuilder; use swc_common::errors::Emitter; use swc_common::errors::Handler; use swc_common::errors::HandlerFlags; use swc_common::input::StringInput; use swc_common::FileName; use swc_common::SourceMap; use swc_ecmascript::ast::Program; use swc_ecmascript::dep_graph::analyze_dependencies; use swc_ecmascript::dep_graph::DependencyKind; use swc_ecmascript::parser::lexer::Lexer; use swc_ecmascript::parser::EsConfig; use swc_ecmascript::parser::JscTarget; use swc_ecmascript::parser::Parser; use swc_ecmascript::parser::Syntax; use swc_ecmascript::parser::TsConfig; use url::Url; // Returns (deps, transpiled source code) pub fn get_deps_and_transpile( url: &Url, source: &str, content_type: &Option<String>, ) -> Result<(Vec<Url>, Option<String>), Error> { let comments = SingleThreadedComments::default(); let source_map = SourceMap::default(); let source_file = source_map .new_source_file(FileName::Custom(url.to_string()), source.to_string()); let input = StringInput::from(&*source_file); let syntax = get_syntax(url, content_type); let lexer = Lexer::new(syntax, JscTarget::Es2020, input, Some(&comments)); let mut parser = Parser::new_from(lexer); let module = parser .parse_module() .map_err(|e| ParseError::new(e, &source_map))?; let mut deps = Vec::new(); for import in analyze_dependencies(&module, &source_map, &comments) { if (import.kind == DependencyKind::Import || import.kind == DependencyKind::Export) && import.is_dynamic == false { let specifier = import.specifier.to_string(); deps.push(resolve_import(&specifier, url.as_str())?); } } // If the file is not jsx, ts, or tsx we do not need to transform it. In that // case source == transformed. if!syntax.jsx() &&!syntax.typescript() { return Ok((deps, None)); } use swc_ecmascript::transforms::react; let program = Program::Module(module); let options = EmitOptions::default(); let source_map = std::rc::Rc::new(source_map); let jsx_pass = react::react( source_map.clone(), Some(&comments), react::Options { pragma: options.jsx_factory.clone(), pragma_frag: options.jsx_fragment_factory.clone(), // this will use `Object.assign()` instead of the `_extends` helper // when spreading props. use_builtins: true, ..Default::default() }, ); use swc_common::chain; use swc_common::Globals; use swc_ecmascript::transforms::fixer; use swc_ecmascript::transforms::helpers; use swc_ecmascript::transforms::pass::Optional; use swc_ecmascript::transforms::proposals; use swc_ecmascript::transforms::typescript; use swc_ecmascript::visit::FoldWith; let mut passes = chain!( Optional::new(jsx_pass, options.transform_jsx), proposals::decorators::decorators(proposals::decorators::Config { legacy: true, emit_metadata: options.emit_metadata }), helpers::inject_helpers(), typescript::strip(), fixer(Some(&comments)), ); let program = swc_common::GLOBALS.set(&Globals::new(), || { helpers::HELPERS.set(&helpers::Helpers::new(false), || { program.fold_with(&mut passes) }) }); use swc_ecmascript::codegen::text_writer::JsWriter; use swc_ecmascript::codegen::Node; let mut src_map_buf = vec![]; let mut buf = vec![]; { let writer = Box::new(JsWriter::new( source_map.clone(), "\n", &mut buf, Some(&mut src_map_buf), )); let config = swc_ecmascript::codegen::Config { minify: false }; let mut emitter = swc_ecmascript::codegen::Emitter { cfg: config, comments: Some(&comments), cm: source_map.clone(), wr: writer, }; program .emit_with(&mut emitter) .map_err(|err| Error::Other(Box::new(err)))?; } let mut src = String::from_utf8(buf).map_err(|err| Error::Other(Box::new(err)))?; { let mut buf = Vec::new(); source_map .build_source_map_from(&mut src_map_buf, None) .to_writer(&mut buf) .map_err(|err| Error::Other(Box::new(err)))?; src.push_str("//# sourceMappingURL=data:application/json;base64,"); let encoded_map = base64::encode(buf); src.push_str(&encoded_map); } Ok((deps, Some(src))) } fn get_syntax(url: &Url, maybe_content_type: &Option<String>) -> Syntax { fn get_es_config(jsx: bool) -> EsConfig { EsConfig { class_private_methods: true, class_private_props: true, class_props: true, dynamic_import: true, export_default_from: true, export_namespace_from: true, import_meta: true, jsx, nullish_coalescing: true, num_sep: true, optional_chaining: true, top_level_await: true, ..EsConfig::default() } } fn get_ts_config(tsx: bool, dts: bool) -> TsConfig { TsConfig { decorators: true, dts, dynamic_import: true, tsx, ..TsConfig::default() } } let maybe_extension = if let Some(content_type) = maybe_content_type { match content_type .split(";") .next() .unwrap() .trim() .to_lowercase() .as_ref() { "application/typescript" | "text/typescript" | "video/vnd.dlna.mpeg-tts" | "video/mp2t" | "application/x-typescript" => Some("ts"), "application/javascript" | "text/javascript" | "application/ecmascript" | "text/ecmascript" | "application/x-javascript" | "application/node" => Some("js"), "text/jsx" => Some("jsx"), "text/tsx" => Some("tsx"), _ => None, } } else { None }; let extension = if maybe_extension.is_some() { maybe_extension } else { let parts: Vec<&str> = url.as_str().split('.').collect(); parts.last().copied() }; match extension { Some("js") => Syntax::Es(get_es_config(false)), Some("jsx") => Syntax::Es(get_es_config(true)), Some("ts") => Syntax::Typescript(get_ts_config(false, false)), Some("tsx") => Syntax::Typescript(get_ts_config(true, false)), _ => Syntax::Typescript(get_ts_config(false, false)), } } pub struct ParseError { lines: Vec<String>, } impl ParseError { fn new( err: swc_ecmascript::parser::error::Error, source_map: &SourceMap, ) -> Self { let error_buffer = ErrorBuffer::default(); let handler = Handler::with_emitter_and_flags( Box::new(error_buffer.clone()), HandlerFlags { can_emit_warnings: true, dont_buffer_diagnostics: true, ..HandlerFlags::default() }, ); let mut diagnostic = err.into_diagnostic(&handler); diagnostic.emit(); let v = error_buffer.0.lock().unwrap(); let lines = v .iter() .map(|d| { if let Some(span) = d.span.primary_span() { let loc = source_map.lookup_char_pos(span.lo); let file_name = match &loc.file.name { FileName::Custom(n) => n, _ => unreachable!(), }; format!( "{} at {}:{}:{}", d.message(), file_name, loc.line, loc.col_display ) } else { d.message() } }) .collect::<Vec<_>>(); Self { lines } } } impl std::error::Error for ParseError {} impl std::fmt::Display for ParseError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { for line in &self.lines { writeln!(f, "{}", line)?; } Ok(()) } } impl std::fmt::Debug for ParseError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { std::fmt::Display::fmt(self, f) } } /// A buffer for collecting errors from the AST parser. #[derive(Debug, Clone, Default)] pub struct ErrorBuffer(Arc<Mutex<Vec<Diagnostic>>>); impl Emitter for ErrorBuffer { fn emit(&mut self, db: &DiagnosticBuilder) { self.0.lock().unwrap().push((**db).clone()); } } /// Options which can be adjusted when transpiling a module. #[derive(Debug, Clone)] pub struct EmitOptions { /// Indicate if JavaScript is being checked/transformed as well, or if it is /// only TypeScript. pub check_js: bool, /// When emitting a legacy decorator, also emit experimental decorator meta /// data. Defaults to `false`. pub emit_metadata: bool, /// Should the source map be inlined in the emitted code file, or provided /// as a separate file. Defaults to `true`. pub inline_source_map: bool, /// When transforming JSX, what value should be used for the JSX factory. /// Defaults to `React.createElement`. pub jsx_factory: String, /// When transforming JSX, what value should be used for the JSX fragment /// factory. Defaults to `React.Fragment`. pub jsx_fragment_factory: String, /// Should JSX be transformed or preserved. Defaults to `true`. pub transform_jsx: bool, } impl Default for EmitOptions { fn default() -> Self { EmitOptions { check_js: false, emit_metadata: false, inline_source_map: true, jsx_factory: "h".into(), jsx_fragment_factory: "Fragment".into(), transform_jsx: true, } } } #[cfg(test)] mod tests { use super::*; #[test] fn test_get_syntax()
#[test] fn jsx() { let url = Url::parse( "https://deno.land/x/[email protected]/example/pages/dynamic/%5Bname%5D.tsx", ) .unwrap(); let source = r#" import { Fragment, h } from "../../deps.ts"; import type { PageProps } from "../../deps.ts"; function UserPage(props: PageProps) { const name = props.route?.name?? ""; return ( <> <h1>This is the page for {name}</h1> <p> <a href="/">Go home</a> </p> </> ); } export default UserPage; "#; let (deps, _transpiled) = get_deps_and_transpile(&url, source, &None).unwrap(); assert_eq!(deps.len(), 1); } #[test] #[ignore] fn complex_types() { let url = Url::parse("https://deno.land/x/[email protected]/router.ts").unwrap(); let source = r#" delete<P extends RouteParams = RP, S extends State = RS>( name: string, path: string, ...middleware: RouterMiddleware<P, S>[] ): Router<P extends RP? P : (P & RP), S extends RS? S : (S & RS)>; "#; let (deps, _transpiled) = get_deps_and_transpile(&url, source, &None).unwrap(); assert_eq!(deps.len(), 0); } #[test] #[ignore] fn dynamic_import() { let url = Url::parse("https://deno.land/x/[email protected]/router.ts").unwrap(); let source = r#" await import("fs"); await import("https://deno.land/std/version.ts"); "#; let (deps, _transpiled) = get_deps_and_transpile(&url, source, &None).unwrap(); assert_eq!(deps.len(), 0); } }
{ // Prefer content-type over extension. let url = Url::parse("https://deno.land/x/[email protected]/bar.js").unwrap(); let content_type = Some("text/jsx".to_string()); let syntax = get_syntax(&url, &content_type); assert!(syntax.jsx()); assert!(!syntax.typescript()); // Fallback to extension if content-type is unsupported. let url = Url::parse("https://deno.land/x/[email protected]/bar.tsx").unwrap(); let content_type = Some("text/unsupported".to_string()); let syntax = get_syntax(&url, &content_type); assert!(syntax.jsx()); assert!(syntax.typescript()); }
identifier_body
parser.rs
use crate::error::Error; use crate::resolve_import::resolve_import; use std::sync::Arc; use std::sync::Mutex; use swc_common::comments::SingleThreadedComments; use swc_common::errors::Diagnostic; use swc_common::errors::DiagnosticBuilder; use swc_common::errors::Emitter; use swc_common::errors::Handler; use swc_common::errors::HandlerFlags; use swc_common::input::StringInput; use swc_common::FileName; use swc_common::SourceMap; use swc_ecmascript::ast::Program; use swc_ecmascript::dep_graph::analyze_dependencies; use swc_ecmascript::dep_graph::DependencyKind; use swc_ecmascript::parser::lexer::Lexer; use swc_ecmascript::parser::EsConfig; use swc_ecmascript::parser::JscTarget; use swc_ecmascript::parser::Parser; use swc_ecmascript::parser::Syntax; use swc_ecmascript::parser::TsConfig; use url::Url; // Returns (deps, transpiled source code) pub fn get_deps_and_transpile( url: &Url, source: &str, content_type: &Option<String>, ) -> Result<(Vec<Url>, Option<String>), Error> { let comments = SingleThreadedComments::default(); let source_map = SourceMap::default(); let source_file = source_map .new_source_file(FileName::Custom(url.to_string()), source.to_string()); let input = StringInput::from(&*source_file); let syntax = get_syntax(url, content_type); let lexer = Lexer::new(syntax, JscTarget::Es2020, input, Some(&comments)); let mut parser = Parser::new_from(lexer); let module = parser .parse_module() .map_err(|e| ParseError::new(e, &source_map))?; let mut deps = Vec::new(); for import in analyze_dependencies(&module, &source_map, &comments) { if (import.kind == DependencyKind::Import || import.kind == DependencyKind::Export) && import.is_dynamic == false { let specifier = import.specifier.to_string(); deps.push(resolve_import(&specifier, url.as_str())?); } } // If the file is not jsx, ts, or tsx we do not need to transform it. In that // case source == transformed. if!syntax.jsx() &&!syntax.typescript() { return Ok((deps, None)); } use swc_ecmascript::transforms::react; let program = Program::Module(module); let options = EmitOptions::default(); let source_map = std::rc::Rc::new(source_map); let jsx_pass = react::react( source_map.clone(), Some(&comments), react::Options { pragma: options.jsx_factory.clone(), pragma_frag: options.jsx_fragment_factory.clone(), // this will use `Object.assign()` instead of the `_extends` helper // when spreading props. use_builtins: true, ..Default::default() }, ); use swc_common::chain; use swc_common::Globals; use swc_ecmascript::transforms::fixer; use swc_ecmascript::transforms::helpers; use swc_ecmascript::transforms::pass::Optional; use swc_ecmascript::transforms::proposals; use swc_ecmascript::transforms::typescript; use swc_ecmascript::visit::FoldWith; let mut passes = chain!( Optional::new(jsx_pass, options.transform_jsx), proposals::decorators::decorators(proposals::decorators::Config { legacy: true, emit_metadata: options.emit_metadata }), helpers::inject_helpers(), typescript::strip(), fixer(Some(&comments)), ); let program = swc_common::GLOBALS.set(&Globals::new(), || { helpers::HELPERS.set(&helpers::Helpers::new(false), || { program.fold_with(&mut passes) }) }); use swc_ecmascript::codegen::text_writer::JsWriter; use swc_ecmascript::codegen::Node; let mut src_map_buf = vec![]; let mut buf = vec![]; { let writer = Box::new(JsWriter::new( source_map.clone(), "\n", &mut buf, Some(&mut src_map_buf), )); let config = swc_ecmascript::codegen::Config { minify: false }; let mut emitter = swc_ecmascript::codegen::Emitter { cfg: config, comments: Some(&comments), cm: source_map.clone(), wr: writer, }; program .emit_with(&mut emitter) .map_err(|err| Error::Other(Box::new(err)))?; } let mut src = String::from_utf8(buf).map_err(|err| Error::Other(Box::new(err)))?; { let mut buf = Vec::new(); source_map .build_source_map_from(&mut src_map_buf, None) .to_writer(&mut buf) .map_err(|err| Error::Other(Box::new(err)))?; src.push_str("//# sourceMappingURL=data:application/json;base64,"); let encoded_map = base64::encode(buf); src.push_str(&encoded_map); } Ok((deps, Some(src))) } fn get_syntax(url: &Url, maybe_content_type: &Option<String>) -> Syntax { fn get_es_config(jsx: bool) -> EsConfig { EsConfig { class_private_methods: true, class_private_props: true, class_props: true, dynamic_import: true, export_default_from: true, export_namespace_from: true, import_meta: true, jsx, nullish_coalescing: true, num_sep: true, optional_chaining: true, top_level_await: true, ..EsConfig::default() } } fn get_ts_config(tsx: bool, dts: bool) -> TsConfig { TsConfig { decorators: true, dts, dynamic_import: true, tsx, ..TsConfig::default() } } let maybe_extension = if let Some(content_type) = maybe_content_type { match content_type .split(";") .next() .unwrap() .trim() .to_lowercase() .as_ref() { "application/typescript" | "text/typescript" | "video/vnd.dlna.mpeg-tts" | "video/mp2t" | "application/x-typescript" => Some("ts"), "application/javascript" | "text/javascript" | "application/ecmascript" | "text/ecmascript" | "application/x-javascript" | "application/node" => Some("js"), "text/jsx" => Some("jsx"), "text/tsx" => Some("tsx"), _ => None, } } else { None }; let extension = if maybe_extension.is_some() { maybe_extension } else { let parts: Vec<&str> = url.as_str().split('.').collect(); parts.last().copied() }; match extension { Some("js") => Syntax::Es(get_es_config(false)), Some("jsx") => Syntax::Es(get_es_config(true)), Some("ts") => Syntax::Typescript(get_ts_config(false, false)), Some("tsx") => Syntax::Typescript(get_ts_config(true, false)), _ => Syntax::Typescript(get_ts_config(false, false)), } } pub struct ParseError { lines: Vec<String>, } impl ParseError { fn new( err: swc_ecmascript::parser::error::Error, source_map: &SourceMap, ) -> Self { let error_buffer = ErrorBuffer::default(); let handler = Handler::with_emitter_and_flags( Box::new(error_buffer.clone()), HandlerFlags { can_emit_warnings: true, dont_buffer_diagnostics: true, ..HandlerFlags::default() }, ); let mut diagnostic = err.into_diagnostic(&handler); diagnostic.emit(); let v = error_buffer.0.lock().unwrap(); let lines = v .iter() .map(|d| { if let Some(span) = d.span.primary_span() { let loc = source_map.lookup_char_pos(span.lo); let file_name = match &loc.file.name { FileName::Custom(n) => n, _ => unreachable!(), }; format!( "{} at {}:{}:{}", d.message(), file_name, loc.line, loc.col_display ) } else { d.message() } }) .collect::<Vec<_>>(); Self { lines } } } impl std::error::Error for ParseError {} impl std::fmt::Display for ParseError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { for line in &self.lines { writeln!(f, "{}", line)?; } Ok(()) } } impl std::fmt::Debug for ParseError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { std::fmt::Display::fmt(self, f) } } /// A buffer for collecting errors from the AST parser. #[derive(Debug, Clone, Default)] pub struct ErrorBuffer(Arc<Mutex<Vec<Diagnostic>>>); impl Emitter for ErrorBuffer { fn emit(&mut self, db: &DiagnosticBuilder) { self.0.lock().unwrap().push((**db).clone()); } } /// Options which can be adjusted when transpiling a module. #[derive(Debug, Clone)] pub struct EmitOptions { /// Indicate if JavaScript is being checked/transformed as well, or if it is /// only TypeScript. pub check_js: bool, /// When emitting a legacy decorator, also emit experimental decorator meta /// data. Defaults to `false`. pub emit_metadata: bool, /// Should the source map be inlined in the emitted code file, or provided /// as a separate file. Defaults to `true`. pub inline_source_map: bool, /// When transforming JSX, what value should be used for the JSX factory. /// Defaults to `React.createElement`. pub jsx_factory: String, /// When transforming JSX, what value should be used for the JSX fragment /// factory. Defaults to `React.Fragment`. pub jsx_fragment_factory: String, /// Should JSX be transformed or preserved. Defaults to `true`. pub transform_jsx: bool, } impl Default for EmitOptions { fn default() -> Self { EmitOptions { check_js: false, emit_metadata: false, inline_source_map: true, jsx_factory: "h".into(), jsx_fragment_factory: "Fragment".into(), transform_jsx: true, } } } #[cfg(test)] mod tests { use super::*; #[test]
let syntax = get_syntax(&url, &content_type); assert!(syntax.jsx()); assert!(!syntax.typescript()); // Fallback to extension if content-type is unsupported. let url = Url::parse("https://deno.land/x/[email protected]/bar.tsx").unwrap(); let content_type = Some("text/unsupported".to_string()); let syntax = get_syntax(&url, &content_type); assert!(syntax.jsx()); assert!(syntax.typescript()); } #[test] fn jsx() { let url = Url::parse( "https://deno.land/x/[email protected]/example/pages/dynamic/%5Bname%5D.tsx", ) .unwrap(); let source = r#" import { Fragment, h } from "../../deps.ts"; import type { PageProps } from "../../deps.ts"; function UserPage(props: PageProps) { const name = props.route?.name?? ""; return ( <> <h1>This is the page for {name}</h1> <p> <a href="/">Go home</a> </p> </> ); } export default UserPage; "#; let (deps, _transpiled) = get_deps_and_transpile(&url, source, &None).unwrap(); assert_eq!(deps.len(), 1); } #[test] #[ignore] fn complex_types() { let url = Url::parse("https://deno.land/x/[email protected]/router.ts").unwrap(); let source = r#" delete<P extends RouteParams = RP, S extends State = RS>( name: string, path: string, ...middleware: RouterMiddleware<P, S>[] ): Router<P extends RP? P : (P & RP), S extends RS? S : (S & RS)>; "#; let (deps, _transpiled) = get_deps_and_transpile(&url, source, &None).unwrap(); assert_eq!(deps.len(), 0); } #[test] #[ignore] fn dynamic_import() { let url = Url::parse("https://deno.land/x/[email protected]/router.ts").unwrap(); let source = r#" await import("fs"); await import("https://deno.land/std/version.ts"); "#; let (deps, _transpiled) = get_deps_and_transpile(&url, source, &None).unwrap(); assert_eq!(deps.len(), 0); } }
fn test_get_syntax() { // Prefer content-type over extension. let url = Url::parse("https://deno.land/x/[email protected]/bar.js").unwrap(); let content_type = Some("text/jsx".to_string());
random_line_split
main.rs
use once_cell::sync::Lazy; use libp2p::{PeerId, Transport, mplex, Swarm, NetworkBehaviour}; use libp2p::identity; use libp2p::floodsub::{Topic, Floodsub, FloodsubEvent}; use libp2p::noise::{X25519Spec, Keypair, NoiseConfig}; use libp2p::tcp::TokioTcpConfig; use libp2p::core::upgrade; use libp2p::mdns::{TokioMdns, MdnsEvent}; use tokio::sync::mpsc; use libp2p::swarm::{SwarmBuilder, NetworkBehaviourEventProcess}; use tokio::io::AsyncBufReadExt; use std::collections::HashSet; use serde::{Serialize, Deserialize}; use log::{error, info}; const STORAGE_FILE_PATH: &str = "./recipes.json"; type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync +'static>>; static KEYS: Lazy<identity::Keypair> = Lazy::new(|| identity::Keypair::generate_ed25519()); static PEER_ID: Lazy<PeerId> = Lazy::new(|| PeerId::from(KEYS.public())); static TOPIC: Lazy<Topic> = Lazy::new(|| Topic::new("recipes")); type Recipes = Vec<Recipe>; #[derive(Debug, Serialize, Deserialize)] struct Recipe { id: usize, name: String, ingredients: String, instructions: String, public: bool, } #[derive(Debug, Serialize, Deserialize)] enum ListMode { ALL, One(String) } #[derive(Debug, Serialize, Deserialize)] struct ListRequest { mode: ListMode } #[derive(Debug, Serialize, Deserialize)] struct ListResponse { mode: ListMode, data: Recipes, receiver: String, } enum EventType { Response(ListResponse), Input(String) } #[derive(NetworkBehaviour)] struct
{ floodsub: Floodsub, mdns: TokioMdns, #[behaviour(ignore)] response_sender: mpsc::UnboundedSender<ListResponse>, } #[tokio::main] async fn main() { pretty_env_logger::init(); info!("Peer Id: {}", PEER_ID.clone()); let (response_sender, mut response_rcv) = tokio::sync::mpsc::unbounded_channel(); let auth_keys = Keypair::<X25519Spec>::new() .into_authentic(&KEYS) .expect("can create auth keys"); let transport = TokioTcpConfig::new() .upgrade(upgrade::Version::V1) .authenticate(NoiseConfig::xx(auth_keys).into_authenticated()) .multiplex(mplex::MplexConfig::new()) .boxed(); let mut behaviour = RecipeBehaviour { floodsub: Floodsub::new(PEER_ID.clone()), mdns: TokioMdns::new().expect("can create mdns"), response_sender }; behaviour.floodsub.subscribe(TOPIC.clone()); let mut swarm = SwarmBuilder::new(transport, behaviour, PEER_ID.clone()) .executor(Box::new(|fut| { tokio::spawn(fut); })).build(); Swarm::listen_on( &mut swarm, "/ip4/0.0.0.0/tcp/0" .parse() .expect("can get a local socket")) .expect("swarm can be started"); let mut stdin = tokio::io::BufReader::new(tokio::io::stdin()).lines(); loop { let evt = { tokio::select! { line = stdin.next_line() => Some(EventType::Input(line.expect("can get line").expect("can read from stdin"))), event = swarm.next() => { info!("Unhandled Swarm Event: {:?}", event); None }, response = response_rcv.recv() => Some(EventType::Response(response.expect("response exists"))) } }; if let Some(event) = evt { match event { EventType::Response(resp) => { let json = serde_json::to_string(&resp).expect("can jsonify response"); swarm.floodsub.publish(TOPIC.clone(), json.as_bytes()); }, EventType::Input(line) => match line.as_str() { "ls p" => handle_list_peers(&mut swarm).await, cmd if cmd.starts_with("ls r") => handle_list_recipes(cmd, &mut swarm).await, cmd if cmd.starts_with("create r") => handle_create_recipe(cmd).await, cmd if cmd.starts_with("publish r") => handle_publish_recipe(cmd).await, _ => error!("unknown command") } } } } } impl NetworkBehaviourEventProcess<MdnsEvent> for RecipeBehaviour { fn inject_event(&mut self, event: MdnsEvent) { match event { MdnsEvent::Discovered(discovered_list) => { for (peer, _addr) in discovered_list { self.floodsub.add_node_to_partial_view(peer); } } MdnsEvent::Expired(expired_list) => { for (peer, _addr) in expired_list { if!self.mdns.has_node(&peer) { self.floodsub.remove_node_from_partial_view(&peer); } } } } } } impl NetworkBehaviourEventProcess<FloodsubEvent> for RecipeBehaviour { fn inject_event(&mut self, event: FloodsubEvent) { match event { FloodsubEvent::Message(msg) => { if let Ok(resp) = serde_json::from_slice::<ListResponse>(&msg.data) { if resp.receiver == PEER_ID.to_string() { info!("response from {}:", msg.source); resp.data.iter().for_each(|r| info!("{:?}", r)); } } else if let Ok(req) = serde_json::from_slice::<ListRequest>(&msg.data) { match req.mode { ListMode::ALL => { info!("received ALL req: {:?} from {:?}", req, msg.source); respond_with_publish_recipes( self.response_sender.clone(), msg.source.to_string() ); } ListMode::One(ref peer_id) => { if peer_id == &PEER_ID.to_string() { info!("received req: {:?} from {:?}", req, msg.source); respond_with_publish_recipes( self.response_sender.clone(), msg.source.to_string() ); } } } } }, _ => () } } } async fn handle_list_peers(swarm: &mut Swarm<RecipeBehaviour>) { info!("Discovered Peers:"); let nodes = swarm.mdns.discovered_nodes(); let mut unique_peers = HashSet::new(); for peer in nodes { unique_peers.insert(peer); } unique_peers.iter().for_each(|p| info!("{}", p)); } async fn handle_list_recipes(cmd: &str, swarm: &mut Swarm<RecipeBehaviour>) { let rest = cmd.strip_prefix("ls r "); match rest { Some("all") => { let req = ListRequest { mode: ListMode::ALL }; let json = serde_json::to_string(&req).expect("can jsonify request"); swarm.floodsub.publish(TOPIC.clone(), json.as_bytes()); } Some(recipes_peer_id) => { let req = ListRequest { mode: ListMode::One(recipes_peer_id.to_owned()) }; let json = serde_json::to_string(&req).expect("can jsonify request"); swarm.floodsub.publish(TOPIC.clone(), json.as_bytes()); } None => { match read_local_recipes().await { Ok(v) => { info!("local recipes ({})", v.len()); v.iter().for_each(|r| info!("{:?}", r)); } Err(e) => error!("error fetching local recipes: {}", e) } } } } async fn handle_create_recipe(cmd: &str) { if let Some(rest) = cmd.strip_prefix("create r") { let elements: Vec<&str> = rest.split("|").collect(); if elements.len() < 3 { info!("too few arguments - Format: name|ingredients|instructions"); } else { let name = elements.get(0).expect("name is there"); let ingredients = elements.get(1).expect("ingredients is there"); let instruments = elements.get(2).expect("instruments is there"); if let Err(e) = create_new_recipe(name, ingredients, instruments).await { error!("error creating recipe: {}", e); } } } } fn respond_with_publish_recipes(sender: mpsc::UnboundedSender<ListResponse>, receiver: String) { tokio::spawn(async move { match read_local_recipes().await { Ok(recipes) => { let resp = ListResponse { mode: ListMode::ALL, receiver, data: recipes.into_iter().filter(|r| r.public).collect() }; if let Err(e) = sender.send(resp) { error!("error sending response via channel, {}", e); } } Err(e) => error!("error fetching local recipes to answer ALL request, {}", e) } }); } async fn handle_publish_recipe(cmd: &str) { if let Some(rest) = cmd.strip_prefix("publish r") { match rest.trim().parse::<usize>() { Ok(id) => { if let Err(e) = publish_recipe(id).await { info!("error publishing recipe with id {}, {}", id, e); } else { info!("published recipe with id: {}", id); } } Err(e) => error!("invalid id: {}, {}", rest.trim(), e) } } } async fn create_new_recipe(name: &str, ingredients: &str, instructions: &str) -> Result<()> { let mut local_recipes = read_local_recipes().await?; let new_id = match local_recipes.iter().max_by_key(|r|r.id) { Some(v) => v.id + 1, None => 0 }; local_recipes.push(Recipe { id: new_id, name: name.to_owned(), ingredients: ingredients.to_owned(), instructions: instructions.to_owned(), public: false }); write_local_recipes(&local_recipes).await?; info!("create recipe:"); info!("name: {}", name); info!("ingredients: {}", ingredients); info!("instruments: {}", instructions); Ok(()) } async fn publish_recipe(id: usize) -> Result<()> { let mut local_recipes = read_local_recipes().await?; local_recipes .iter_mut() .filter(|r|r.id == id) .for_each(|r| r.public = true); write_local_recipes(&local_recipes).await?; Ok(()) } async fn read_local_recipes() -> Result<Recipes> { let content = tokio::fs::read(STORAGE_FILE_PATH).await?; let result = serde_json::from_slice(&content)?; Ok(result) } async fn write_local_recipes(recipes: &Recipes) -> Result<()> { let json = serde_json::to_string(&recipes)?; tokio::fs::write(STORAGE_FILE_PATH, &json).await?; Ok(()) }
RecipeBehaviour
identifier_name
main.rs
use once_cell::sync::Lazy; use libp2p::{PeerId, Transport, mplex, Swarm, NetworkBehaviour}; use libp2p::identity; use libp2p::floodsub::{Topic, Floodsub, FloodsubEvent}; use libp2p::noise::{X25519Spec, Keypair, NoiseConfig}; use libp2p::tcp::TokioTcpConfig; use libp2p::core::upgrade; use libp2p::mdns::{TokioMdns, MdnsEvent}; use tokio::sync::mpsc; use libp2p::swarm::{SwarmBuilder, NetworkBehaviourEventProcess}; use tokio::io::AsyncBufReadExt; use std::collections::HashSet; use serde::{Serialize, Deserialize}; use log::{error, info}; const STORAGE_FILE_PATH: &str = "./recipes.json"; type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync +'static>>; static KEYS: Lazy<identity::Keypair> = Lazy::new(|| identity::Keypair::generate_ed25519()); static PEER_ID: Lazy<PeerId> = Lazy::new(|| PeerId::from(KEYS.public())); static TOPIC: Lazy<Topic> = Lazy::new(|| Topic::new("recipes")); type Recipes = Vec<Recipe>; #[derive(Debug, Serialize, Deserialize)] struct Recipe { id: usize, name: String, ingredients: String, instructions: String, public: bool, } #[derive(Debug, Serialize, Deserialize)] enum ListMode { ALL, One(String) } #[derive(Debug, Serialize, Deserialize)] struct ListRequest { mode: ListMode } #[derive(Debug, Serialize, Deserialize)] struct ListResponse { mode: ListMode, data: Recipes, receiver: String, } enum EventType { Response(ListResponse), Input(String) } #[derive(NetworkBehaviour)] struct RecipeBehaviour { floodsub: Floodsub, mdns: TokioMdns, #[behaviour(ignore)] response_sender: mpsc::UnboundedSender<ListResponse>, } #[tokio::main] async fn main() { pretty_env_logger::init(); info!("Peer Id: {}", PEER_ID.clone()); let (response_sender, mut response_rcv) = tokio::sync::mpsc::unbounded_channel(); let auth_keys = Keypair::<X25519Spec>::new() .into_authentic(&KEYS) .expect("can create auth keys"); let transport = TokioTcpConfig::new() .upgrade(upgrade::Version::V1) .authenticate(NoiseConfig::xx(auth_keys).into_authenticated()) .multiplex(mplex::MplexConfig::new()) .boxed(); let mut behaviour = RecipeBehaviour { floodsub: Floodsub::new(PEER_ID.clone()), mdns: TokioMdns::new().expect("can create mdns"), response_sender }; behaviour.floodsub.subscribe(TOPIC.clone()); let mut swarm = SwarmBuilder::new(transport, behaviour, PEER_ID.clone()) .executor(Box::new(|fut| { tokio::spawn(fut); })).build(); Swarm::listen_on( &mut swarm, "/ip4/0.0.0.0/tcp/0" .parse() .expect("can get a local socket")) .expect("swarm can be started"); let mut stdin = tokio::io::BufReader::new(tokio::io::stdin()).lines(); loop { let evt = { tokio::select! { line = stdin.next_line() => Some(EventType::Input(line.expect("can get line").expect("can read from stdin"))), event = swarm.next() => { info!("Unhandled Swarm Event: {:?}", event); None }, response = response_rcv.recv() => Some(EventType::Response(response.expect("response exists"))) } }; if let Some(event) = evt { match event { EventType::Response(resp) => { let json = serde_json::to_string(&resp).expect("can jsonify response"); swarm.floodsub.publish(TOPIC.clone(), json.as_bytes()); }, EventType::Input(line) => match line.as_str() { "ls p" => handle_list_peers(&mut swarm).await, cmd if cmd.starts_with("ls r") => handle_list_recipes(cmd, &mut swarm).await, cmd if cmd.starts_with("create r") => handle_create_recipe(cmd).await, cmd if cmd.starts_with("publish r") => handle_publish_recipe(cmd).await, _ => error!("unknown command") } } } } } impl NetworkBehaviourEventProcess<MdnsEvent> for RecipeBehaviour { fn inject_event(&mut self, event: MdnsEvent) { match event { MdnsEvent::Discovered(discovered_list) => { for (peer, _addr) in discovered_list { self.floodsub.add_node_to_partial_view(peer); } } MdnsEvent::Expired(expired_list) => { for (peer, _addr) in expired_list { if!self.mdns.has_node(&peer) { self.floodsub.remove_node_from_partial_view(&peer); } } } } } } impl NetworkBehaviourEventProcess<FloodsubEvent> for RecipeBehaviour { fn inject_event(&mut self, event: FloodsubEvent) { match event { FloodsubEvent::Message(msg) => { if let Ok(resp) = serde_json::from_slice::<ListResponse>(&msg.data) { if resp.receiver == PEER_ID.to_string() { info!("response from {}:", msg.source); resp.data.iter().for_each(|r| info!("{:?}", r)); } } else if let Ok(req) = serde_json::from_slice::<ListRequest>(&msg.data) { match req.mode { ListMode::ALL => { info!("received ALL req: {:?} from {:?}", req, msg.source); respond_with_publish_recipes( self.response_sender.clone(), msg.source.to_string() ); } ListMode::One(ref peer_id) => { if peer_id == &PEER_ID.to_string() { info!("received req: {:?} from {:?}", req, msg.source); respond_with_publish_recipes( self.response_sender.clone(), msg.source.to_string() ); } } } } }, _ => () } } } async fn handle_list_peers(swarm: &mut Swarm<RecipeBehaviour>) { info!("Discovered Peers:"); let nodes = swarm.mdns.discovered_nodes(); let mut unique_peers = HashSet::new(); for peer in nodes { unique_peers.insert(peer); } unique_peers.iter().for_each(|p| info!("{}", p)); } async fn handle_list_recipes(cmd: &str, swarm: &mut Swarm<RecipeBehaviour>) { let rest = cmd.strip_prefix("ls r "); match rest { Some("all") => { let req = ListRequest { mode: ListMode::ALL }; let json = serde_json::to_string(&req).expect("can jsonify request"); swarm.floodsub.publish(TOPIC.clone(), json.as_bytes()); } Some(recipes_peer_id) => { let req = ListRequest { mode: ListMode::One(recipes_peer_id.to_owned()) }; let json = serde_json::to_string(&req).expect("can jsonify request"); swarm.floodsub.publish(TOPIC.clone(), json.as_bytes()); } None => { match read_local_recipes().await { Ok(v) => { info!("local recipes ({})", v.len()); v.iter().for_each(|r| info!("{:?}", r)); } Err(e) => error!("error fetching local recipes: {}", e) } } } } async fn handle_create_recipe(cmd: &str) { if let Some(rest) = cmd.strip_prefix("create r") { let elements: Vec<&str> = rest.split("|").collect(); if elements.len() < 3 { info!("too few arguments - Format: name|ingredients|instructions"); } else { let name = elements.get(0).expect("name is there"); let ingredients = elements.get(1).expect("ingredients is there"); let instruments = elements.get(2).expect("instruments is there"); if let Err(e) = create_new_recipe(name, ingredients, instruments).await { error!("error creating recipe: {}", e); } } } } fn respond_with_publish_recipes(sender: mpsc::UnboundedSender<ListResponse>, receiver: String) { tokio::spawn(async move { match read_local_recipes().await { Ok(recipes) => { let resp = ListResponse { mode: ListMode::ALL, receiver, data: recipes.into_iter().filter(|r| r.public).collect() }; if let Err(e) = sender.send(resp) { error!("error sending response via channel, {}", e); } } Err(e) => error!("error fetching local recipes to answer ALL request, {}", e) } }); } async fn handle_publish_recipe(cmd: &str) { if let Some(rest) = cmd.strip_prefix("publish r") { match rest.trim().parse::<usize>() { Ok(id) => { if let Err(e) = publish_recipe(id).await { info!("error publishing recipe with id {}, {}", id, e); } else { info!("published recipe with id: {}", id); } } Err(e) => error!("invalid id: {}, {}", rest.trim(), e) } } } async fn create_new_recipe(name: &str, ingredients: &str, instructions: &str) -> Result<()> { let mut local_recipes = read_local_recipes().await?; let new_id = match local_recipes.iter().max_by_key(|r|r.id) { Some(v) => v.id + 1, None => 0 }; local_recipes.push(Recipe {
}); write_local_recipes(&local_recipes).await?; info!("create recipe:"); info!("name: {}", name); info!("ingredients: {}", ingredients); info!("instruments: {}", instructions); Ok(()) } async fn publish_recipe(id: usize) -> Result<()> { let mut local_recipes = read_local_recipes().await?; local_recipes .iter_mut() .filter(|r|r.id == id) .for_each(|r| r.public = true); write_local_recipes(&local_recipes).await?; Ok(()) } async fn read_local_recipes() -> Result<Recipes> { let content = tokio::fs::read(STORAGE_FILE_PATH).await?; let result = serde_json::from_slice(&content)?; Ok(result) } async fn write_local_recipes(recipes: &Recipes) -> Result<()> { let json = serde_json::to_string(&recipes)?; tokio::fs::write(STORAGE_FILE_PATH, &json).await?; Ok(()) }
id: new_id, name: name.to_owned(), ingredients: ingredients.to_owned(), instructions: instructions.to_owned(), public: false
random_line_split
main.rs
use once_cell::sync::Lazy; use libp2p::{PeerId, Transport, mplex, Swarm, NetworkBehaviour}; use libp2p::identity; use libp2p::floodsub::{Topic, Floodsub, FloodsubEvent}; use libp2p::noise::{X25519Spec, Keypair, NoiseConfig}; use libp2p::tcp::TokioTcpConfig; use libp2p::core::upgrade; use libp2p::mdns::{TokioMdns, MdnsEvent}; use tokio::sync::mpsc; use libp2p::swarm::{SwarmBuilder, NetworkBehaviourEventProcess}; use tokio::io::AsyncBufReadExt; use std::collections::HashSet; use serde::{Serialize, Deserialize}; use log::{error, info}; const STORAGE_FILE_PATH: &str = "./recipes.json"; type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync +'static>>; static KEYS: Lazy<identity::Keypair> = Lazy::new(|| identity::Keypair::generate_ed25519()); static PEER_ID: Lazy<PeerId> = Lazy::new(|| PeerId::from(KEYS.public())); static TOPIC: Lazy<Topic> = Lazy::new(|| Topic::new("recipes")); type Recipes = Vec<Recipe>; #[derive(Debug, Serialize, Deserialize)] struct Recipe { id: usize, name: String, ingredients: String, instructions: String, public: bool, } #[derive(Debug, Serialize, Deserialize)] enum ListMode { ALL, One(String) } #[derive(Debug, Serialize, Deserialize)] struct ListRequest { mode: ListMode } #[derive(Debug, Serialize, Deserialize)] struct ListResponse { mode: ListMode, data: Recipes, receiver: String, } enum EventType { Response(ListResponse), Input(String) } #[derive(NetworkBehaviour)] struct RecipeBehaviour { floodsub: Floodsub, mdns: TokioMdns, #[behaviour(ignore)] response_sender: mpsc::UnboundedSender<ListResponse>, } #[tokio::main] async fn main() { pretty_env_logger::init(); info!("Peer Id: {}", PEER_ID.clone()); let (response_sender, mut response_rcv) = tokio::sync::mpsc::unbounded_channel(); let auth_keys = Keypair::<X25519Spec>::new() .into_authentic(&KEYS) .expect("can create auth keys"); let transport = TokioTcpConfig::new() .upgrade(upgrade::Version::V1) .authenticate(NoiseConfig::xx(auth_keys).into_authenticated()) .multiplex(mplex::MplexConfig::new()) .boxed(); let mut behaviour = RecipeBehaviour { floodsub: Floodsub::new(PEER_ID.clone()), mdns: TokioMdns::new().expect("can create mdns"), response_sender }; behaviour.floodsub.subscribe(TOPIC.clone()); let mut swarm = SwarmBuilder::new(transport, behaviour, PEER_ID.clone()) .executor(Box::new(|fut| { tokio::spawn(fut); })).build(); Swarm::listen_on( &mut swarm, "/ip4/0.0.0.0/tcp/0" .parse() .expect("can get a local socket")) .expect("swarm can be started"); let mut stdin = tokio::io::BufReader::new(tokio::io::stdin()).lines(); loop { let evt = { tokio::select! { line = stdin.next_line() => Some(EventType::Input(line.expect("can get line").expect("can read from stdin"))), event = swarm.next() => { info!("Unhandled Swarm Event: {:?}", event); None }, response = response_rcv.recv() => Some(EventType::Response(response.expect("response exists"))) } }; if let Some(event) = evt { match event { EventType::Response(resp) => { let json = serde_json::to_string(&resp).expect("can jsonify response"); swarm.floodsub.publish(TOPIC.clone(), json.as_bytes()); }, EventType::Input(line) => match line.as_str() { "ls p" => handle_list_peers(&mut swarm).await, cmd if cmd.starts_with("ls r") => handle_list_recipes(cmd, &mut swarm).await, cmd if cmd.starts_with("create r") => handle_create_recipe(cmd).await, cmd if cmd.starts_with("publish r") => handle_publish_recipe(cmd).await, _ => error!("unknown command") } } } } } impl NetworkBehaviourEventProcess<MdnsEvent> for RecipeBehaviour { fn inject_event(&mut self, event: MdnsEvent) { match event { MdnsEvent::Discovered(discovered_list) => { for (peer, _addr) in discovered_list { self.floodsub.add_node_to_partial_view(peer); } } MdnsEvent::Expired(expired_list) => { for (peer, _addr) in expired_list { if!self.mdns.has_node(&peer) { self.floodsub.remove_node_from_partial_view(&peer); } } } } } } impl NetworkBehaviourEventProcess<FloodsubEvent> for RecipeBehaviour { fn inject_event(&mut self, event: FloodsubEvent) { match event { FloodsubEvent::Message(msg) => { if let Ok(resp) = serde_json::from_slice::<ListResponse>(&msg.data) { if resp.receiver == PEER_ID.to_string() { info!("response from {}:", msg.source); resp.data.iter().for_each(|r| info!("{:?}", r)); } } else if let Ok(req) = serde_json::from_slice::<ListRequest>(&msg.data) { match req.mode { ListMode::ALL => { info!("received ALL req: {:?} from {:?}", req, msg.source); respond_with_publish_recipes( self.response_sender.clone(), msg.source.to_string() ); } ListMode::One(ref peer_id) => { if peer_id == &PEER_ID.to_string() { info!("received req: {:?} from {:?}", req, msg.source); respond_with_publish_recipes( self.response_sender.clone(), msg.source.to_string() ); } } } } }, _ => () } } } async fn handle_list_peers(swarm: &mut Swarm<RecipeBehaviour>) { info!("Discovered Peers:"); let nodes = swarm.mdns.discovered_nodes(); let mut unique_peers = HashSet::new(); for peer in nodes { unique_peers.insert(peer); } unique_peers.iter().for_each(|p| info!("{}", p)); } async fn handle_list_recipes(cmd: &str, swarm: &mut Swarm<RecipeBehaviour>) { let rest = cmd.strip_prefix("ls r "); match rest { Some("all") => { let req = ListRequest { mode: ListMode::ALL }; let json = serde_json::to_string(&req).expect("can jsonify request"); swarm.floodsub.publish(TOPIC.clone(), json.as_bytes()); } Some(recipes_peer_id) => { let req = ListRequest { mode: ListMode::One(recipes_peer_id.to_owned()) }; let json = serde_json::to_string(&req).expect("can jsonify request"); swarm.floodsub.publish(TOPIC.clone(), json.as_bytes()); } None => { match read_local_recipes().await { Ok(v) => { info!("local recipes ({})", v.len()); v.iter().for_each(|r| info!("{:?}", r)); } Err(e) => error!("error fetching local recipes: {}", e) } } } } async fn handle_create_recipe(cmd: &str) { if let Some(rest) = cmd.strip_prefix("create r") { let elements: Vec<&str> = rest.split("|").collect(); if elements.len() < 3 { info!("too few arguments - Format: name|ingredients|instructions"); } else { let name = elements.get(0).expect("name is there"); let ingredients = elements.get(1).expect("ingredients is there"); let instruments = elements.get(2).expect("instruments is there"); if let Err(e) = create_new_recipe(name, ingredients, instruments).await
} } } fn respond_with_publish_recipes(sender: mpsc::UnboundedSender<ListResponse>, receiver: String) { tokio::spawn(async move { match read_local_recipes().await { Ok(recipes) => { let resp = ListResponse { mode: ListMode::ALL, receiver, data: recipes.into_iter().filter(|r| r.public).collect() }; if let Err(e) = sender.send(resp) { error!("error sending response via channel, {}", e); } } Err(e) => error!("error fetching local recipes to answer ALL request, {}", e) } }); } async fn handle_publish_recipe(cmd: &str) { if let Some(rest) = cmd.strip_prefix("publish r") { match rest.trim().parse::<usize>() { Ok(id) => { if let Err(e) = publish_recipe(id).await { info!("error publishing recipe with id {}, {}", id, e); } else { info!("published recipe with id: {}", id); } } Err(e) => error!("invalid id: {}, {}", rest.trim(), e) } } } async fn create_new_recipe(name: &str, ingredients: &str, instructions: &str) -> Result<()> { let mut local_recipes = read_local_recipes().await?; let new_id = match local_recipes.iter().max_by_key(|r|r.id) { Some(v) => v.id + 1, None => 0 }; local_recipes.push(Recipe { id: new_id, name: name.to_owned(), ingredients: ingredients.to_owned(), instructions: instructions.to_owned(), public: false }); write_local_recipes(&local_recipes).await?; info!("create recipe:"); info!("name: {}", name); info!("ingredients: {}", ingredients); info!("instruments: {}", instructions); Ok(()) } async fn publish_recipe(id: usize) -> Result<()> { let mut local_recipes = read_local_recipes().await?; local_recipes .iter_mut() .filter(|r|r.id == id) .for_each(|r| r.public = true); write_local_recipes(&local_recipes).await?; Ok(()) } async fn read_local_recipes() -> Result<Recipes> { let content = tokio::fs::read(STORAGE_FILE_PATH).await?; let result = serde_json::from_slice(&content)?; Ok(result) } async fn write_local_recipes(recipes: &Recipes) -> Result<()> { let json = serde_json::to_string(&recipes)?; tokio::fs::write(STORAGE_FILE_PATH, &json).await?; Ok(()) }
{ error!("error creating recipe: {}", e); }
conditional_block
main.rs
use once_cell::sync::Lazy; use libp2p::{PeerId, Transport, mplex, Swarm, NetworkBehaviour}; use libp2p::identity; use libp2p::floodsub::{Topic, Floodsub, FloodsubEvent}; use libp2p::noise::{X25519Spec, Keypair, NoiseConfig}; use libp2p::tcp::TokioTcpConfig; use libp2p::core::upgrade; use libp2p::mdns::{TokioMdns, MdnsEvent}; use tokio::sync::mpsc; use libp2p::swarm::{SwarmBuilder, NetworkBehaviourEventProcess}; use tokio::io::AsyncBufReadExt; use std::collections::HashSet; use serde::{Serialize, Deserialize}; use log::{error, info}; const STORAGE_FILE_PATH: &str = "./recipes.json"; type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync +'static>>; static KEYS: Lazy<identity::Keypair> = Lazy::new(|| identity::Keypair::generate_ed25519()); static PEER_ID: Lazy<PeerId> = Lazy::new(|| PeerId::from(KEYS.public())); static TOPIC: Lazy<Topic> = Lazy::new(|| Topic::new("recipes")); type Recipes = Vec<Recipe>; #[derive(Debug, Serialize, Deserialize)] struct Recipe { id: usize, name: String, ingredients: String, instructions: String, public: bool, } #[derive(Debug, Serialize, Deserialize)] enum ListMode { ALL, One(String) } #[derive(Debug, Serialize, Deserialize)] struct ListRequest { mode: ListMode } #[derive(Debug, Serialize, Deserialize)] struct ListResponse { mode: ListMode, data: Recipes, receiver: String, } enum EventType { Response(ListResponse), Input(String) } #[derive(NetworkBehaviour)] struct RecipeBehaviour { floodsub: Floodsub, mdns: TokioMdns, #[behaviour(ignore)] response_sender: mpsc::UnboundedSender<ListResponse>, } #[tokio::main] async fn main() { pretty_env_logger::init(); info!("Peer Id: {}", PEER_ID.clone()); let (response_sender, mut response_rcv) = tokio::sync::mpsc::unbounded_channel(); let auth_keys = Keypair::<X25519Spec>::new() .into_authentic(&KEYS) .expect("can create auth keys"); let transport = TokioTcpConfig::new() .upgrade(upgrade::Version::V1) .authenticate(NoiseConfig::xx(auth_keys).into_authenticated()) .multiplex(mplex::MplexConfig::new()) .boxed(); let mut behaviour = RecipeBehaviour { floodsub: Floodsub::new(PEER_ID.clone()), mdns: TokioMdns::new().expect("can create mdns"), response_sender }; behaviour.floodsub.subscribe(TOPIC.clone()); let mut swarm = SwarmBuilder::new(transport, behaviour, PEER_ID.clone()) .executor(Box::new(|fut| { tokio::spawn(fut); })).build(); Swarm::listen_on( &mut swarm, "/ip4/0.0.0.0/tcp/0" .parse() .expect("can get a local socket")) .expect("swarm can be started"); let mut stdin = tokio::io::BufReader::new(tokio::io::stdin()).lines(); loop { let evt = { tokio::select! { line = stdin.next_line() => Some(EventType::Input(line.expect("can get line").expect("can read from stdin"))), event = swarm.next() => { info!("Unhandled Swarm Event: {:?}", event); None }, response = response_rcv.recv() => Some(EventType::Response(response.expect("response exists"))) } }; if let Some(event) = evt { match event { EventType::Response(resp) => { let json = serde_json::to_string(&resp).expect("can jsonify response"); swarm.floodsub.publish(TOPIC.clone(), json.as_bytes()); }, EventType::Input(line) => match line.as_str() { "ls p" => handle_list_peers(&mut swarm).await, cmd if cmd.starts_with("ls r") => handle_list_recipes(cmd, &mut swarm).await, cmd if cmd.starts_with("create r") => handle_create_recipe(cmd).await, cmd if cmd.starts_with("publish r") => handle_publish_recipe(cmd).await, _ => error!("unknown command") } } } } } impl NetworkBehaviourEventProcess<MdnsEvent> for RecipeBehaviour { fn inject_event(&mut self, event: MdnsEvent) { match event { MdnsEvent::Discovered(discovered_list) => { for (peer, _addr) in discovered_list { self.floodsub.add_node_to_partial_view(peer); } } MdnsEvent::Expired(expired_list) => { for (peer, _addr) in expired_list { if!self.mdns.has_node(&peer) { self.floodsub.remove_node_from_partial_view(&peer); } } } } } } impl NetworkBehaviourEventProcess<FloodsubEvent> for RecipeBehaviour { fn inject_event(&mut self, event: FloodsubEvent) { match event { FloodsubEvent::Message(msg) => { if let Ok(resp) = serde_json::from_slice::<ListResponse>(&msg.data) { if resp.receiver == PEER_ID.to_string() { info!("response from {}:", msg.source); resp.data.iter().for_each(|r| info!("{:?}", r)); } } else if let Ok(req) = serde_json::from_slice::<ListRequest>(&msg.data) { match req.mode { ListMode::ALL => { info!("received ALL req: {:?} from {:?}", req, msg.source); respond_with_publish_recipes( self.response_sender.clone(), msg.source.to_string() ); } ListMode::One(ref peer_id) => { if peer_id == &PEER_ID.to_string() { info!("received req: {:?} from {:?}", req, msg.source); respond_with_publish_recipes( self.response_sender.clone(), msg.source.to_string() ); } } } } }, _ => () } } } async fn handle_list_peers(swarm: &mut Swarm<RecipeBehaviour>) { info!("Discovered Peers:"); let nodes = swarm.mdns.discovered_nodes(); let mut unique_peers = HashSet::new(); for peer in nodes { unique_peers.insert(peer); } unique_peers.iter().for_each(|p| info!("{}", p)); } async fn handle_list_recipes(cmd: &str, swarm: &mut Swarm<RecipeBehaviour>) { let rest = cmd.strip_prefix("ls r "); match rest { Some("all") => { let req = ListRequest { mode: ListMode::ALL }; let json = serde_json::to_string(&req).expect("can jsonify request"); swarm.floodsub.publish(TOPIC.clone(), json.as_bytes()); } Some(recipes_peer_id) => { let req = ListRequest { mode: ListMode::One(recipes_peer_id.to_owned()) }; let json = serde_json::to_string(&req).expect("can jsonify request"); swarm.floodsub.publish(TOPIC.clone(), json.as_bytes()); } None => { match read_local_recipes().await { Ok(v) => { info!("local recipes ({})", v.len()); v.iter().for_each(|r| info!("{:?}", r)); } Err(e) => error!("error fetching local recipes: {}", e) } } } } async fn handle_create_recipe(cmd: &str) { if let Some(rest) = cmd.strip_prefix("create r") { let elements: Vec<&str> = rest.split("|").collect(); if elements.len() < 3 { info!("too few arguments - Format: name|ingredients|instructions"); } else { let name = elements.get(0).expect("name is there"); let ingredients = elements.get(1).expect("ingredients is there"); let instruments = elements.get(2).expect("instruments is there"); if let Err(e) = create_new_recipe(name, ingredients, instruments).await { error!("error creating recipe: {}", e); } } } } fn respond_with_publish_recipes(sender: mpsc::UnboundedSender<ListResponse>, receiver: String) { tokio::spawn(async move { match read_local_recipes().await { Ok(recipes) => { let resp = ListResponse { mode: ListMode::ALL, receiver, data: recipes.into_iter().filter(|r| r.public).collect() }; if let Err(e) = sender.send(resp) { error!("error sending response via channel, {}", e); } } Err(e) => error!("error fetching local recipes to answer ALL request, {}", e) } }); } async fn handle_publish_recipe(cmd: &str) { if let Some(rest) = cmd.strip_prefix("publish r") { match rest.trim().parse::<usize>() { Ok(id) => { if let Err(e) = publish_recipe(id).await { info!("error publishing recipe with id {}, {}", id, e); } else { info!("published recipe with id: {}", id); } } Err(e) => error!("invalid id: {}, {}", rest.trim(), e) } } } async fn create_new_recipe(name: &str, ingredients: &str, instructions: &str) -> Result<()> { let mut local_recipes = read_local_recipes().await?; let new_id = match local_recipes.iter().max_by_key(|r|r.id) { Some(v) => v.id + 1, None => 0 }; local_recipes.push(Recipe { id: new_id, name: name.to_owned(), ingredients: ingredients.to_owned(), instructions: instructions.to_owned(), public: false }); write_local_recipes(&local_recipes).await?; info!("create recipe:"); info!("name: {}", name); info!("ingredients: {}", ingredients); info!("instruments: {}", instructions); Ok(()) } async fn publish_recipe(id: usize) -> Result<()> { let mut local_recipes = read_local_recipes().await?; local_recipes .iter_mut() .filter(|r|r.id == id) .for_each(|r| r.public = true); write_local_recipes(&local_recipes).await?; Ok(()) } async fn read_local_recipes() -> Result<Recipes>
async fn write_local_recipes(recipes: &Recipes) -> Result<()> { let json = serde_json::to_string(&recipes)?; tokio::fs::write(STORAGE_FILE_PATH, &json).await?; Ok(()) }
{ let content = tokio::fs::read(STORAGE_FILE_PATH).await?; let result = serde_json::from_slice(&content)?; Ok(result) }
identifier_body
scheduler.rs
//! Scheduler is responsible for allocating containers on cluster nodes according to the currently //! submitted jobs and concurrency levels. It is only used on the master node. use crate::executor::*; use actix::fut::wrap_future; use actix::prelude::*; use actix::registry::SystemService; use actix::spawn; use actix_web::{client, HttpMessage}; use failure::{err_msg, Error}; use futures::future::{join_all, Future}; use rand::distributions::WeightedIndex; use rand::prelude::*; use serde_json; use shiplift::builder::{ContainerOptions, ContainerOptionsBuilder}; use std::collections::HashMap; use std::fs; use std::net::{SocketAddr, ToSocketAddrs}; use std::path::PathBuf; use std::time::Duration; use uuid::Uuid; pub type AllocationId = String; pub type NodeId = String; pub type JobId = String; pub type ServiceName = String; const RESOURCE_REFRESH_INTERVAL: Duration = Duration::from_secs(5); /// A job specification (in docker-compose format) #[derive(Debug, PartialEq, Serialize, Deserialize, Clone)] pub struct JobSpec { pub services: HashMap<String, ServiceSpec>, } /// A service element within the job spec #[derive(Debug, PartialEq, Serialize, Deserialize, Clone)] pub struct ServiceSpec { pub image: String, pub command: Option<String>, pub entrypoint: Option<String>, #[serde(default)] pub ports: Vec<String>, #[serde(default)] pub volumes: Vec<String>, #[serde(default)] pub environment: Vec<String>, } impl ServiceSpec { /// Create ContainerOptions based on this service spec pub fn build_container_options(&self) -> Result<ContainerOptionsBuilder, Error> { let mut opt = ContainerOptions::builder(&*self.image); opt.volumes(self.volumes.iter().map(|i| &**i).collect()) .env(self.environment.iter().map(|i| &**i).collect()); if let Some(cmd) = &self.command { opt.cmd(vec![&*cmd]); } if let Some(entrypoint) = &self.entrypoint { opt.entrypoint(entrypoint); } for port in &self.ports { let mut port = port.split(':'); let host_port = port.next().unwrap().parse()?; let container_port = port.next().unwrap().parse()?; opt.expose(container_port, "tcp", host_port); } Ok(opt) } } /// Describes the state of the cluster including all jobs and nodes. #[derive(Debug, PartialEq, Serialize, Deserialize, Clone, Default)] pub struct ClusterState { pub jobs: HashMap<JobId, JobSpec>, pub services: HashMap<JobId, HashMap<ServiceName, ServiceConfig>>, pub nodes: HashMap<NodeId, Node>, pub allocations: HashMap<AllocationId, Allocation>, pub master_node: Option<NodeId>, } impl ClusterState { /// Get a reference to the current master node pub fn master_node(&self) -> Option<&Node> { match &self.master_node { Some(master_id) => Some(&self.nodes[master_id]), None => None, } } } /// Element of cluster state assignming a job's task to a node. #[derive(Debug, PartialEq, Serialize, Deserialize, Clone)] pub struct Allocation { pub allocation_id: AllocationId, pub node_id: NodeId, pub job_id: JobId, pub service_name: ServiceName, } /// Runtime configuration of job services (including concurrency level) #[derive(Debug, PartialEq, Serialize, Deserialize, Clone)] pub struct ServiceConfig { pub scale: usize, } /// Element of cluster state used to describe a member of the cluster. #[derive(Debug, PartialEq, Serialize, Deserialize, Clone)] pub struct Node { pub node_id: NodeId, pub cluster_address: SocketAddr, } impl Node { pub fn new<S, T>(node_id: T, cluster_address: S) -> Node where S: ToSocketAddrs, T: Into<NodeId>, { Node { cluster_address: cluster_address.to_socket_addrs().unwrap().next().unwrap(), node_id: node_id.into(), } } } /// Updates the cluster state to match requested jobs / concurrency levels. #[derive(Default)] pub struct Scheduler { state: ClusterState, node_resources: HashMap<NodeId, NodeResources>, state_path: Option<PathBuf>, } impl Scheduler { /// Set this node as master in an empty cluster. fn bootstrap(&mut self, node: Node) -> Result<(), Error> { if self.state.master_node.is_some() ||!self.state.nodes.is_empty() { return Err(err_msg("Cannot bootstrap a cluster with existing nodes.")); } info!("Bootstrapping cluster as node {}", node.node_id); self.state.master_node = Some(node.node_id.clone()); self.state.nodes.insert(node.node_id.clone(), node); Executor::from_registry().do_send(ExecutorCommand::UpdateState(self.state.clone())); Ok(()) } /// Add / remove allocations based on current jobs and concurrency requirements. fn update_schedule(&mut self) { // Build a map of service to existing allocations let mut service_allocs: HashMap<_, Vec<AllocationId>> = HashMap::new(); for alloc in self.state.allocations.values() { service_allocs .entry((&alloc.job_id, &alloc.service_name)) .or_default() .push(alloc.allocation_id.clone()); } // Put the changes we need to make here, since we can't modify self.state.allocations while // borrowed let mut to_remove = Vec::new(); let mut to_add = Vec::new(); // Used for weighted random node selection let nodes: Vec<_> = self.state.nodes.keys().collect(); let node_index = WeightedIndex::new(nodes.iter().map(|id| { self.node_resources .get(*id) .map(|resources| resources.total_memory - resources.used_memory) .unwrap_or(1) })) .unwrap(); // Compare the existing allocations with the desired concurrency of each service for (job_id, job_services) in &self.state.services { for (service_name, service) in job_services { let existing = service_allocs .remove(&(&job_id, &service_name)) .unwrap_or_default(); let diff = service.scale as isize - existing.len() as isize; debug!("Scheduling {}.{} -> {}", job_id, service_name, diff); if diff > 0 { // Create new allocations for node in node_index .sample_iter(&mut thread_rng()) .take(diff as usize) { to_add.push(Allocation { allocation_id: Uuid::new_v4().to_hyphenated().to_string(), node_id: nodes[node].clone(), job_id: job_id.clone(), service_name: service_name.clone(), }); } } else { to_remove.extend(existing.iter().take(diff.abs() as usize).cloned()); } } } // Remove any allocations that don't correspond to any service for allocs in service_allocs.values() { to_remove.extend(allocs.iter().cloned()); } // Now we drop the index service_allocs and we can mutate the state for alloc_id in to_remove { self.state.allocations.remove(&alloc_id); } for alloc in to_add.drain(..) { self.state .allocations .insert(alloc.allocation_id.clone(), alloc); } self.save_state(); spawn( self.update_nodes() .then(|res| check_err("Update nodes", res)), ); } /// Send the latest state to each node. fn update_nodes(&self) -> impl Future<Item = (), Error = Error> { let update_fut: Vec<_> = self .state .nodes .values() .map(|node| { client::post(format!("http://{}/node/state", node.cluster_address)) .json(&self.state) .unwrap() .send() }) .collect(); join_all(update_fut) .from_err() .map(|results| info!("Sent updated state to {} node(s)", results.len())) } fn load_state(&mut self) -> Result<(), Error> { if let Some(path) = &self.state_path { info!("Loading state from: {:?}", path); let raw_state = fs::File::open(path)?; self.state = serde_json::from_reader(raw_state)?; self.update_schedule(); } Ok(()) } fn save_state(&mut self) { if let Some(path) = &self.state_path { info!("Saving state to: {:?}", path); match serde_json::to_string(&self.state) { Ok(serialized) => match fs::write(path, serialized) { Ok(_) => {} Err(e) => error!("Failed to write state: {:?}", e), }, Err(e) => error!("Failed to serialize state: {:?}", e), } } } } impl Actor for Scheduler { type Context = Context<Self>; fn started(&mut self, ctx: &mut Context<Self>) { // Poll node resource usage (so we don't need to request it each time we reschedule) ctx.run_interval(RESOURCE_REFRESH_INTERVAL, |_, ctx| { let update_fut = wrap_future::<_, Self>(ctx.address().send(GetClusterResources)) .map_err(|e, _, _| error!("Failed request resource refresh: {:?}", e)) .map(|res, scheduler, _| match res { Ok(res) => scheduler.node_resources = res, Err(e) => error!("Failed to refresh node resources: {:?}", e), }); ctx.spawn(update_fut); }); } } impl Supervised for Scheduler {} impl SystemService for Scheduler {} /// Fire-and-forget type commands for the scheduler #[derive(Clone, Debug)] pub enum SchedulerCommand { CreateJob(JobId, JobSpec), DeleteJob(JobId), UpdateService(JobId, ServiceName, ServiceConfig), BootstrapNode(Node), RegisterNode(Node), SetStatePath(PathBuf), } impl Message for SchedulerCommand { type Result = Result<(), Error>; } impl Handler<SchedulerCommand> for Scheduler { type Result = Result<(), Error>; fn handle(&mut self, cmd: SchedulerCommand, _: &mut Context<Self>) -> Self::Result { debug!("Scheduler handling command: {:?}", cmd); match cmd { SchedulerCommand::CreateJob(job_id, job) => { job.services.keys().for_each(|service_name| { self.state .services .entry(job_id.clone()) .or_default() .insert(service_name.clone(), ServiceConfig { scale: 1 }); }); self.state.jobs.insert(job_id, job); self.update_schedule(); Ok(()) } SchedulerCommand::UpdateService(job_id, service_name, service_config) => { let result = self .state .services .get_mut(&job_id) .and_then(|services| services.get_mut(&service_name)) .map(|service| { *service = service_config; {} }) .ok_or_else(|| err_msg("Error does not exist")); self.update_schedule(); result } SchedulerCommand::DeleteJob(job_id) => { self.state.jobs.remove(&job_id); self.state.services.remove(&job_id); self.update_schedule(); Ok(()) } SchedulerCommand::BootstrapNode(node) => self.bootstrap(node), SchedulerCommand::RegisterNode(node) => { self.state.nodes.insert(node.node_id.clone(), node); spawn( self.update_nodes() .map_err(|e| error!("Failed to update new node: {}", e)), ); Ok(()) } SchedulerCommand::SetStatePath(path) => { self.state_path = Some(path); self.load_state() } } } } /// Message type for requesting resource usage of all nodes pub struct GetClusterResources; impl Message for GetClusterResources { type Result = Result<HashMap<String, NodeResources>, Error>; } impl Handler<GetClusterResources> for Scheduler { type Result = ResponseFuture<HashMap<String, NodeResources>, Error>; fn handle(&mut self, _: GetClusterResources, _: &mut Context<Self>) -> Self::Result { let node_queries: Vec<_> = self .state .nodes .values() .map(|node| { let node_id = node.node_id.clone(); client::get(format!("http://{}/node/resources", node.cluster_address)) .finish() .unwrap() .send() .map_err(Error::from) .and_then(|res| res.json().from_err()) .then(move |res| { Ok::<_, Error>(match res { Ok(ok) => Some((node_id, ok)), Err(_) => None, }) }) }) .collect(); Box::new( join_all(node_queries).map(|mut res| res.drain(..).filter_map(|res| res).collect()), ) } } /// Message type for requesting the current list of jobs pub struct ListJobs; impl Message for ListJobs { type Result = Result<HashMap<String, JobSpec>, Error>; } impl Handler<ListJobs> for Scheduler { type Result = Result<HashMap<String, JobSpec>, Error>; fn handle(&mut self, _: ListJobs, _: &mut Context<Self>) -> Self::Result { Ok(self.state.jobs.clone()) } } /// Messsage requesting a list of allocations pub struct ListAllocations; impl Message for ListAllocations { type Result = Result<Vec<Allocation>, Error>; } impl Handler<ListAllocations> for Scheduler { type Result = Result<Vec<Allocation>, Error>; fn handle(&mut self, _: ListAllocations, _: &mut Context<Self>) -> Self::Result { Ok(self.state.allocations.values().cloned().collect()) } } #[cfg(test)] mod test { use crate::scheduler::*; use crate::test_support::*; use serde_yaml; #[test] fn
() { let job: JobSpec = serde_yaml::from_str(TEST_JOB_SPEC).expect("Failed to parse sample job spec"); with_bootstrap_node(|| { Scheduler::from_registry() .send(SchedulerCommand::CreateJob(String::from("test-job"), job)) .and_then(move |res| { assert!(res.is_ok()); Scheduler::from_registry().send(ListJobs) }) .map(|res| { assert_eq!(res.expect("List jobs failed").len(), 1); }) }); } }
test_create_job
identifier_name
scheduler.rs
//! Scheduler is responsible for allocating containers on cluster nodes according to the currently //! submitted jobs and concurrency levels. It is only used on the master node. use crate::executor::*; use actix::fut::wrap_future; use actix::prelude::*; use actix::registry::SystemService; use actix::spawn; use actix_web::{client, HttpMessage}; use failure::{err_msg, Error}; use futures::future::{join_all, Future}; use rand::distributions::WeightedIndex; use rand::prelude::*; use serde_json; use shiplift::builder::{ContainerOptions, ContainerOptionsBuilder}; use std::collections::HashMap; use std::fs; use std::net::{SocketAddr, ToSocketAddrs}; use std::path::PathBuf; use std::time::Duration; use uuid::Uuid; pub type AllocationId = String; pub type NodeId = String; pub type JobId = String; pub type ServiceName = String; const RESOURCE_REFRESH_INTERVAL: Duration = Duration::from_secs(5); /// A job specification (in docker-compose format) #[derive(Debug, PartialEq, Serialize, Deserialize, Clone)] pub struct JobSpec { pub services: HashMap<String, ServiceSpec>, } /// A service element within the job spec #[derive(Debug, PartialEq, Serialize, Deserialize, Clone)] pub struct ServiceSpec { pub image: String, pub command: Option<String>, pub entrypoint: Option<String>, #[serde(default)] pub ports: Vec<String>, #[serde(default)] pub volumes: Vec<String>, #[serde(default)] pub environment: Vec<String>, } impl ServiceSpec { /// Create ContainerOptions based on this service spec pub fn build_container_options(&self) -> Result<ContainerOptionsBuilder, Error> { let mut opt = ContainerOptions::builder(&*self.image); opt.volumes(self.volumes.iter().map(|i| &**i).collect()) .env(self.environment.iter().map(|i| &**i).collect()); if let Some(cmd) = &self.command { opt.cmd(vec![&*cmd]); } if let Some(entrypoint) = &self.entrypoint { opt.entrypoint(entrypoint); } for port in &self.ports { let mut port = port.split(':'); let host_port = port.next().unwrap().parse()?; let container_port = port.next().unwrap().parse()?; opt.expose(container_port, "tcp", host_port); } Ok(opt) } } /// Describes the state of the cluster including all jobs and nodes. #[derive(Debug, PartialEq, Serialize, Deserialize, Clone, Default)] pub struct ClusterState { pub jobs: HashMap<JobId, JobSpec>, pub services: HashMap<JobId, HashMap<ServiceName, ServiceConfig>>, pub nodes: HashMap<NodeId, Node>, pub allocations: HashMap<AllocationId, Allocation>, pub master_node: Option<NodeId>, } impl ClusterState { /// Get a reference to the current master node pub fn master_node(&self) -> Option<&Node> { match &self.master_node { Some(master_id) => Some(&self.nodes[master_id]), None => None, } } } /// Element of cluster state assignming a job's task to a node. #[derive(Debug, PartialEq, Serialize, Deserialize, Clone)] pub struct Allocation { pub allocation_id: AllocationId, pub node_id: NodeId, pub job_id: JobId, pub service_name: ServiceName, } /// Runtime configuration of job services (including concurrency level) #[derive(Debug, PartialEq, Serialize, Deserialize, Clone)] pub struct ServiceConfig { pub scale: usize, } /// Element of cluster state used to describe a member of the cluster. #[derive(Debug, PartialEq, Serialize, Deserialize, Clone)] pub struct Node { pub node_id: NodeId, pub cluster_address: SocketAddr, } impl Node { pub fn new<S, T>(node_id: T, cluster_address: S) -> Node where S: ToSocketAddrs, T: Into<NodeId>, { Node { cluster_address: cluster_address.to_socket_addrs().unwrap().next().unwrap(), node_id: node_id.into(), } } } /// Updates the cluster state to match requested jobs / concurrency levels. #[derive(Default)] pub struct Scheduler { state: ClusterState, node_resources: HashMap<NodeId, NodeResources>, state_path: Option<PathBuf>, } impl Scheduler { /// Set this node as master in an empty cluster. fn bootstrap(&mut self, node: Node) -> Result<(), Error> { if self.state.master_node.is_some() ||!self.state.nodes.is_empty() { return Err(err_msg("Cannot bootstrap a cluster with existing nodes.")); } info!("Bootstrapping cluster as node {}", node.node_id); self.state.master_node = Some(node.node_id.clone()); self.state.nodes.insert(node.node_id.clone(), node); Executor::from_registry().do_send(ExecutorCommand::UpdateState(self.state.clone())); Ok(()) } /// Add / remove allocations based on current jobs and concurrency requirements. fn update_schedule(&mut self) { // Build a map of service to existing allocations let mut service_allocs: HashMap<_, Vec<AllocationId>> = HashMap::new(); for alloc in self.state.allocations.values() { service_allocs .entry((&alloc.job_id, &alloc.service_name)) .or_default() .push(alloc.allocation_id.clone()); } // Put the changes we need to make here, since we can't modify self.state.allocations while // borrowed let mut to_remove = Vec::new(); let mut to_add = Vec::new(); // Used for weighted random node selection let nodes: Vec<_> = self.state.nodes.keys().collect(); let node_index = WeightedIndex::new(nodes.iter().map(|id| { self.node_resources .get(*id) .map(|resources| resources.total_memory - resources.used_memory) .unwrap_or(1) })) .unwrap(); // Compare the existing allocations with the desired concurrency of each service for (job_id, job_services) in &self.state.services { for (service_name, service) in job_services { let existing = service_allocs .remove(&(&job_id, &service_name)) .unwrap_or_default(); let diff = service.scale as isize - existing.len() as isize; debug!("Scheduling {}.{} -> {}", job_id, service_name, diff); if diff > 0 { // Create new allocations for node in node_index .sample_iter(&mut thread_rng()) .take(diff as usize) { to_add.push(Allocation { allocation_id: Uuid::new_v4().to_hyphenated().to_string(), node_id: nodes[node].clone(), job_id: job_id.clone(), service_name: service_name.clone(), }); } } else {
} // Remove any allocations that don't correspond to any service for allocs in service_allocs.values() { to_remove.extend(allocs.iter().cloned()); } // Now we drop the index service_allocs and we can mutate the state for alloc_id in to_remove { self.state.allocations.remove(&alloc_id); } for alloc in to_add.drain(..) { self.state .allocations .insert(alloc.allocation_id.clone(), alloc); } self.save_state(); spawn( self.update_nodes() .then(|res| check_err("Update nodes", res)), ); } /// Send the latest state to each node. fn update_nodes(&self) -> impl Future<Item = (), Error = Error> { let update_fut: Vec<_> = self .state .nodes .values() .map(|node| { client::post(format!("http://{}/node/state", node.cluster_address)) .json(&self.state) .unwrap() .send() }) .collect(); join_all(update_fut) .from_err() .map(|results| info!("Sent updated state to {} node(s)", results.len())) } fn load_state(&mut self) -> Result<(), Error> { if let Some(path) = &self.state_path { info!("Loading state from: {:?}", path); let raw_state = fs::File::open(path)?; self.state = serde_json::from_reader(raw_state)?; self.update_schedule(); } Ok(()) } fn save_state(&mut self) { if let Some(path) = &self.state_path { info!("Saving state to: {:?}", path); match serde_json::to_string(&self.state) { Ok(serialized) => match fs::write(path, serialized) { Ok(_) => {} Err(e) => error!("Failed to write state: {:?}", e), }, Err(e) => error!("Failed to serialize state: {:?}", e), } } } } impl Actor for Scheduler { type Context = Context<Self>; fn started(&mut self, ctx: &mut Context<Self>) { // Poll node resource usage (so we don't need to request it each time we reschedule) ctx.run_interval(RESOURCE_REFRESH_INTERVAL, |_, ctx| { let update_fut = wrap_future::<_, Self>(ctx.address().send(GetClusterResources)) .map_err(|e, _, _| error!("Failed request resource refresh: {:?}", e)) .map(|res, scheduler, _| match res { Ok(res) => scheduler.node_resources = res, Err(e) => error!("Failed to refresh node resources: {:?}", e), }); ctx.spawn(update_fut); }); } } impl Supervised for Scheduler {} impl SystemService for Scheduler {} /// Fire-and-forget type commands for the scheduler #[derive(Clone, Debug)] pub enum SchedulerCommand { CreateJob(JobId, JobSpec), DeleteJob(JobId), UpdateService(JobId, ServiceName, ServiceConfig), BootstrapNode(Node), RegisterNode(Node), SetStatePath(PathBuf), } impl Message for SchedulerCommand { type Result = Result<(), Error>; } impl Handler<SchedulerCommand> for Scheduler { type Result = Result<(), Error>; fn handle(&mut self, cmd: SchedulerCommand, _: &mut Context<Self>) -> Self::Result { debug!("Scheduler handling command: {:?}", cmd); match cmd { SchedulerCommand::CreateJob(job_id, job) => { job.services.keys().for_each(|service_name| { self.state .services .entry(job_id.clone()) .or_default() .insert(service_name.clone(), ServiceConfig { scale: 1 }); }); self.state.jobs.insert(job_id, job); self.update_schedule(); Ok(()) } SchedulerCommand::UpdateService(job_id, service_name, service_config) => { let result = self .state .services .get_mut(&job_id) .and_then(|services| services.get_mut(&service_name)) .map(|service| { *service = service_config; {} }) .ok_or_else(|| err_msg("Error does not exist")); self.update_schedule(); result } SchedulerCommand::DeleteJob(job_id) => { self.state.jobs.remove(&job_id); self.state.services.remove(&job_id); self.update_schedule(); Ok(()) } SchedulerCommand::BootstrapNode(node) => self.bootstrap(node), SchedulerCommand::RegisterNode(node) => { self.state.nodes.insert(node.node_id.clone(), node); spawn( self.update_nodes() .map_err(|e| error!("Failed to update new node: {}", e)), ); Ok(()) } SchedulerCommand::SetStatePath(path) => { self.state_path = Some(path); self.load_state() } } } } /// Message type for requesting resource usage of all nodes pub struct GetClusterResources; impl Message for GetClusterResources { type Result = Result<HashMap<String, NodeResources>, Error>; } impl Handler<GetClusterResources> for Scheduler { type Result = ResponseFuture<HashMap<String, NodeResources>, Error>; fn handle(&mut self, _: GetClusterResources, _: &mut Context<Self>) -> Self::Result { let node_queries: Vec<_> = self .state .nodes .values() .map(|node| { let node_id = node.node_id.clone(); client::get(format!("http://{}/node/resources", node.cluster_address)) .finish() .unwrap() .send() .map_err(Error::from) .and_then(|res| res.json().from_err()) .then(move |res| { Ok::<_, Error>(match res { Ok(ok) => Some((node_id, ok)), Err(_) => None, }) }) }) .collect(); Box::new( join_all(node_queries).map(|mut res| res.drain(..).filter_map(|res| res).collect()), ) } } /// Message type for requesting the current list of jobs pub struct ListJobs; impl Message for ListJobs { type Result = Result<HashMap<String, JobSpec>, Error>; } impl Handler<ListJobs> for Scheduler { type Result = Result<HashMap<String, JobSpec>, Error>; fn handle(&mut self, _: ListJobs, _: &mut Context<Self>) -> Self::Result { Ok(self.state.jobs.clone()) } } /// Messsage requesting a list of allocations pub struct ListAllocations; impl Message for ListAllocations { type Result = Result<Vec<Allocation>, Error>; } impl Handler<ListAllocations> for Scheduler { type Result = Result<Vec<Allocation>, Error>; fn handle(&mut self, _: ListAllocations, _: &mut Context<Self>) -> Self::Result { Ok(self.state.allocations.values().cloned().collect()) } } #[cfg(test)] mod test { use crate::scheduler::*; use crate::test_support::*; use serde_yaml; #[test] fn test_create_job() { let job: JobSpec = serde_yaml::from_str(TEST_JOB_SPEC).expect("Failed to parse sample job spec"); with_bootstrap_node(|| { Scheduler::from_registry() .send(SchedulerCommand::CreateJob(String::from("test-job"), job)) .and_then(move |res| { assert!(res.is_ok()); Scheduler::from_registry().send(ListJobs) }) .map(|res| { assert_eq!(res.expect("List jobs failed").len(), 1); }) }); } }
to_remove.extend(existing.iter().take(diff.abs() as usize).cloned()); } }
random_line_split
scheduler.rs
//! Scheduler is responsible for allocating containers on cluster nodes according to the currently //! submitted jobs and concurrency levels. It is only used on the master node. use crate::executor::*; use actix::fut::wrap_future; use actix::prelude::*; use actix::registry::SystemService; use actix::spawn; use actix_web::{client, HttpMessage}; use failure::{err_msg, Error}; use futures::future::{join_all, Future}; use rand::distributions::WeightedIndex; use rand::prelude::*; use serde_json; use shiplift::builder::{ContainerOptions, ContainerOptionsBuilder}; use std::collections::HashMap; use std::fs; use std::net::{SocketAddr, ToSocketAddrs}; use std::path::PathBuf; use std::time::Duration; use uuid::Uuid; pub type AllocationId = String; pub type NodeId = String; pub type JobId = String; pub type ServiceName = String; const RESOURCE_REFRESH_INTERVAL: Duration = Duration::from_secs(5); /// A job specification (in docker-compose format) #[derive(Debug, PartialEq, Serialize, Deserialize, Clone)] pub struct JobSpec { pub services: HashMap<String, ServiceSpec>, } /// A service element within the job spec #[derive(Debug, PartialEq, Serialize, Deserialize, Clone)] pub struct ServiceSpec { pub image: String, pub command: Option<String>, pub entrypoint: Option<String>, #[serde(default)] pub ports: Vec<String>, #[serde(default)] pub volumes: Vec<String>, #[serde(default)] pub environment: Vec<String>, } impl ServiceSpec { /// Create ContainerOptions based on this service spec pub fn build_container_options(&self) -> Result<ContainerOptionsBuilder, Error> { let mut opt = ContainerOptions::builder(&*self.image); opt.volumes(self.volumes.iter().map(|i| &**i).collect()) .env(self.environment.iter().map(|i| &**i).collect()); if let Some(cmd) = &self.command { opt.cmd(vec![&*cmd]); } if let Some(entrypoint) = &self.entrypoint { opt.entrypoint(entrypoint); } for port in &self.ports { let mut port = port.split(':'); let host_port = port.next().unwrap().parse()?; let container_port = port.next().unwrap().parse()?; opt.expose(container_port, "tcp", host_port); } Ok(opt) } } /// Describes the state of the cluster including all jobs and nodes. #[derive(Debug, PartialEq, Serialize, Deserialize, Clone, Default)] pub struct ClusterState { pub jobs: HashMap<JobId, JobSpec>, pub services: HashMap<JobId, HashMap<ServiceName, ServiceConfig>>, pub nodes: HashMap<NodeId, Node>, pub allocations: HashMap<AllocationId, Allocation>, pub master_node: Option<NodeId>, } impl ClusterState { /// Get a reference to the current master node pub fn master_node(&self) -> Option<&Node> { match &self.master_node { Some(master_id) => Some(&self.nodes[master_id]), None => None, } } } /// Element of cluster state assignming a job's task to a node. #[derive(Debug, PartialEq, Serialize, Deserialize, Clone)] pub struct Allocation { pub allocation_id: AllocationId, pub node_id: NodeId, pub job_id: JobId, pub service_name: ServiceName, } /// Runtime configuration of job services (including concurrency level) #[derive(Debug, PartialEq, Serialize, Deserialize, Clone)] pub struct ServiceConfig { pub scale: usize, } /// Element of cluster state used to describe a member of the cluster. #[derive(Debug, PartialEq, Serialize, Deserialize, Clone)] pub struct Node { pub node_id: NodeId, pub cluster_address: SocketAddr, } impl Node { pub fn new<S, T>(node_id: T, cluster_address: S) -> Node where S: ToSocketAddrs, T: Into<NodeId>, { Node { cluster_address: cluster_address.to_socket_addrs().unwrap().next().unwrap(), node_id: node_id.into(), } } } /// Updates the cluster state to match requested jobs / concurrency levels. #[derive(Default)] pub struct Scheduler { state: ClusterState, node_resources: HashMap<NodeId, NodeResources>, state_path: Option<PathBuf>, } impl Scheduler { /// Set this node as master in an empty cluster. fn bootstrap(&mut self, node: Node) -> Result<(), Error> { if self.state.master_node.is_some() ||!self.state.nodes.is_empty() { return Err(err_msg("Cannot bootstrap a cluster with existing nodes.")); } info!("Bootstrapping cluster as node {}", node.node_id); self.state.master_node = Some(node.node_id.clone()); self.state.nodes.insert(node.node_id.clone(), node); Executor::from_registry().do_send(ExecutorCommand::UpdateState(self.state.clone())); Ok(()) } /// Add / remove allocations based on current jobs and concurrency requirements. fn update_schedule(&mut self) { // Build a map of service to existing allocations let mut service_allocs: HashMap<_, Vec<AllocationId>> = HashMap::new(); for alloc in self.state.allocations.values() { service_allocs .entry((&alloc.job_id, &alloc.service_name)) .or_default() .push(alloc.allocation_id.clone()); } // Put the changes we need to make here, since we can't modify self.state.allocations while // borrowed let mut to_remove = Vec::new(); let mut to_add = Vec::new(); // Used for weighted random node selection let nodes: Vec<_> = self.state.nodes.keys().collect(); let node_index = WeightedIndex::new(nodes.iter().map(|id| { self.node_resources .get(*id) .map(|resources| resources.total_memory - resources.used_memory) .unwrap_or(1) })) .unwrap(); // Compare the existing allocations with the desired concurrency of each service for (job_id, job_services) in &self.state.services { for (service_name, service) in job_services { let existing = service_allocs .remove(&(&job_id, &service_name)) .unwrap_or_default(); let diff = service.scale as isize - existing.len() as isize; debug!("Scheduling {}.{} -> {}", job_id, service_name, diff); if diff > 0 { // Create new allocations for node in node_index .sample_iter(&mut thread_rng()) .take(diff as usize) { to_add.push(Allocation { allocation_id: Uuid::new_v4().to_hyphenated().to_string(), node_id: nodes[node].clone(), job_id: job_id.clone(), service_name: service_name.clone(), }); } } else { to_remove.extend(existing.iter().take(diff.abs() as usize).cloned()); } } } // Remove any allocations that don't correspond to any service for allocs in service_allocs.values() { to_remove.extend(allocs.iter().cloned()); } // Now we drop the index service_allocs and we can mutate the state for alloc_id in to_remove { self.state.allocations.remove(&alloc_id); } for alloc in to_add.drain(..) { self.state .allocations .insert(alloc.allocation_id.clone(), alloc); } self.save_state(); spawn( self.update_nodes() .then(|res| check_err("Update nodes", res)), ); } /// Send the latest state to each node. fn update_nodes(&self) -> impl Future<Item = (), Error = Error> { let update_fut: Vec<_> = self .state .nodes .values() .map(|node| { client::post(format!("http://{}/node/state", node.cluster_address)) .json(&self.state) .unwrap() .send() }) .collect(); join_all(update_fut) .from_err() .map(|results| info!("Sent updated state to {} node(s)", results.len())) } fn load_state(&mut self) -> Result<(), Error> { if let Some(path) = &self.state_path { info!("Loading state from: {:?}", path); let raw_state = fs::File::open(path)?; self.state = serde_json::from_reader(raw_state)?; self.update_schedule(); } Ok(()) } fn save_state(&mut self) { if let Some(path) = &self.state_path { info!("Saving state to: {:?}", path); match serde_json::to_string(&self.state) { Ok(serialized) => match fs::write(path, serialized) { Ok(_) => {} Err(e) => error!("Failed to write state: {:?}", e), }, Err(e) => error!("Failed to serialize state: {:?}", e), } } } } impl Actor for Scheduler { type Context = Context<Self>; fn started(&mut self, ctx: &mut Context<Self>) { // Poll node resource usage (so we don't need to request it each time we reschedule) ctx.run_interval(RESOURCE_REFRESH_INTERVAL, |_, ctx| { let update_fut = wrap_future::<_, Self>(ctx.address().send(GetClusterResources)) .map_err(|e, _, _| error!("Failed request resource refresh: {:?}", e)) .map(|res, scheduler, _| match res { Ok(res) => scheduler.node_resources = res, Err(e) => error!("Failed to refresh node resources: {:?}", e), }); ctx.spawn(update_fut); }); } } impl Supervised for Scheduler {} impl SystemService for Scheduler {} /// Fire-and-forget type commands for the scheduler #[derive(Clone, Debug)] pub enum SchedulerCommand { CreateJob(JobId, JobSpec), DeleteJob(JobId), UpdateService(JobId, ServiceName, ServiceConfig), BootstrapNode(Node), RegisterNode(Node), SetStatePath(PathBuf), } impl Message for SchedulerCommand { type Result = Result<(), Error>; } impl Handler<SchedulerCommand> for Scheduler { type Result = Result<(), Error>; fn handle(&mut self, cmd: SchedulerCommand, _: &mut Context<Self>) -> Self::Result { debug!("Scheduler handling command: {:?}", cmd); match cmd { SchedulerCommand::CreateJob(job_id, job) => { job.services.keys().for_each(|service_name| { self.state .services .entry(job_id.clone()) .or_default() .insert(service_name.clone(), ServiceConfig { scale: 1 }); }); self.state.jobs.insert(job_id, job); self.update_schedule(); Ok(()) } SchedulerCommand::UpdateService(job_id, service_name, service_config) => { let result = self .state .services .get_mut(&job_id) .and_then(|services| services.get_mut(&service_name)) .map(|service| { *service = service_config; {} }) .ok_or_else(|| err_msg("Error does not exist")); self.update_schedule(); result } SchedulerCommand::DeleteJob(job_id) => { self.state.jobs.remove(&job_id); self.state.services.remove(&job_id); self.update_schedule(); Ok(()) } SchedulerCommand::BootstrapNode(node) => self.bootstrap(node), SchedulerCommand::RegisterNode(node) => { self.state.nodes.insert(node.node_id.clone(), node); spawn( self.update_nodes() .map_err(|e| error!("Failed to update new node: {}", e)), ); Ok(()) } SchedulerCommand::SetStatePath(path) => { self.state_path = Some(path); self.load_state() } } } } /// Message type for requesting resource usage of all nodes pub struct GetClusterResources; impl Message for GetClusterResources { type Result = Result<HashMap<String, NodeResources>, Error>; } impl Handler<GetClusterResources> for Scheduler { type Result = ResponseFuture<HashMap<String, NodeResources>, Error>; fn handle(&mut self, _: GetClusterResources, _: &mut Context<Self>) -> Self::Result { let node_queries: Vec<_> = self .state .nodes .values() .map(|node| { let node_id = node.node_id.clone(); client::get(format!("http://{}/node/resources", node.cluster_address)) .finish() .unwrap() .send() .map_err(Error::from) .and_then(|res| res.json().from_err()) .then(move |res| { Ok::<_, Error>(match res { Ok(ok) => Some((node_id, ok)), Err(_) => None, }) }) }) .collect(); Box::new( join_all(node_queries).map(|mut res| res.drain(..).filter_map(|res| res).collect()), ) } } /// Message type for requesting the current list of jobs pub struct ListJobs; impl Message for ListJobs { type Result = Result<HashMap<String, JobSpec>, Error>; } impl Handler<ListJobs> for Scheduler { type Result = Result<HashMap<String, JobSpec>, Error>; fn handle(&mut self, _: ListJobs, _: &mut Context<Self>) -> Self::Result { Ok(self.state.jobs.clone()) } } /// Messsage requesting a list of allocations pub struct ListAllocations; impl Message for ListAllocations { type Result = Result<Vec<Allocation>, Error>; } impl Handler<ListAllocations> for Scheduler { type Result = Result<Vec<Allocation>, Error>; fn handle(&mut self, _: ListAllocations, _: &mut Context<Self>) -> Self::Result { Ok(self.state.allocations.values().cloned().collect()) } } #[cfg(test)] mod test { use crate::scheduler::*; use crate::test_support::*; use serde_yaml; #[test] fn test_create_job()
}
{ let job: JobSpec = serde_yaml::from_str(TEST_JOB_SPEC).expect("Failed to parse sample job spec"); with_bootstrap_node(|| { Scheduler::from_registry() .send(SchedulerCommand::CreateJob(String::from("test-job"), job)) .and_then(move |res| { assert!(res.is_ok()); Scheduler::from_registry().send(ListJobs) }) .map(|res| { assert_eq!(res.expect("List jobs failed").len(), 1); }) }); }
identifier_body
lib.rs
//! Write your own tests and benchmarks that look and behave like built-in tests! //! //! This is a simple and small test harness that mimics the original `libtest` //! (used by `cargo test`/`rustc --test`). That means: all output looks pretty //! much like `cargo test` and most CLI arguments are understood and used. With //! that plumbing work out of the way, your test runner can focus on the actual //! testing. //! //! For a small real world example, see [`examples/tidy.rs`][1]. //! //! [1]: https://github.com/LukasKalbertodt/libtest-mimic/blob/master/examples/tidy.rs //! //! # Usage //! //! To use this, you most likely want to add a manual `[[test]]` section to //! `Cargo.toml` and set `harness = false`. For example: //! //! ```toml //! [[test]] //! name = "mytest" //! path = "tests/mytest.rs" //! harness = false //! ``` //! //! And in `tests/mytest.rs` you would call [`run`] in the `main` function: //! //! ```no_run //! use libtest_mimic::{Arguments, Trial}; //! //! //! // Parse command line arguments //! let args = Arguments::from_args(); //! //! // Create a list of tests and/or benchmarks (in this case: two dummy tests). //! let tests = vec![ //! Trial::test("succeeding_test", move || Ok(())), //! Trial::test("failing_test", move || Err("Woops".into())), //! ]; //! //! // Run all tests and exit the application appropriatly. //! libtest_mimic::run(&args, tests).exit(); //! ``` //! //! Instead of returning `Ok` or `Err` directly, you want to actually perform //! your tests, of course. See [`Trial::test`] for more information on how to //! define a test. You can of course list all your tests manually. But in many //! cases it is useful to generate one test per file in a directory, for //! example. //! //! You can then run `cargo test --test mytest` to run it. To see the CLI //! arguments supported by this crate, run `cargo test --test mytest -- -h`. //! //! //! # Known limitations and differences to the official test harness //! //! `libtest-mimic` works on a best-effort basis: it tries to be as close to //! `libtest` as possible, but there are differences for a variety of reasons. //! For example, some rarely used features might not be implemented, some //! features are extremely difficult to implement, and removing minor, //! unimportant differences is just not worth the hassle. //! //! Some of the notable differences: //! //! - Output capture and `--nocapture`: simply not supported. The official //! `libtest` uses internal `std` functions to temporarily redirect output. //! `libtest-mimic` cannot use those. See [this issue][capture] for more //! information. //! - `--format=json|junit` //! //! [capture]: https://github.com/LukasKalbertodt/libtest-mimic/issues/9 #![forbid(unsafe_code)] use std::{process, sync::mpsc, fmt, time::Instant}; mod args; mod printer; use printer::Printer; use threadpool::ThreadPool; pub use crate::args::{Arguments, ColorSetting, FormatSetting}; /// A single test or benchmark. /// /// The original `libtest` often calls benchmarks "tests", which is a bit /// confusing. So in this library, it is called "trial". /// /// A trial is created via [`Trial::test`] or [`Trial::bench`]. The trial's /// `name` is printed and used for filtering. The `runner` is called when the /// test/benchmark is executed to determine its outcome. If `runner` panics, /// the trial is considered "failed". If you need the behavior of /// `#[should_panic]` you need to catch the panic yourself. You likely want to /// compare the panic payload to an expected value anyway. pub struct Trial { runner: Box<dyn FnOnce(bool) -> Outcome + Send>, info: TestInfo, } impl Trial { /// Creates a (non-benchmark) test with the given name and runner. /// /// The runner returning `Ok(())` is interpreted as the test passing. If the /// runner returns `Err(_)`, the test is considered failed. pub fn test<R>(name: impl Into<String>, runner: R) -> Self where R: FnOnce() -> Result<(), Failed> + Send +'static, { Self { runner: Box::new(move |_test_mode| match runner() { Ok(()) => Outcome::Passed, Err(failed) => Outcome::Failed(failed), }), info: TestInfo { name: name.into(), kind: String::new(), is_ignored: false, is_bench: false, }, } } /// Creates a benchmark with the given name and runner. /// /// If the runner's parameter `test_mode` is `true`, the runner function /// should run all code just once, without measuring, just to make sure it /// does not panic. If the parameter is `false`, it should perform the /// actual benchmark. If `test_mode` is `true` you may return `Ok(None)`, /// but if it's `false`, you have to return a `Measurement`, or else the /// benchmark is considered a failure. /// /// `test_mode` is `true` if neither `--bench` nor `--test` are set, and /// `false` when `--bench` is set. If `--test` is set, benchmarks are not /// ran at all, and both flags cannot be set at the same time. pub fn bench<R>(name: impl Into<String>, runner: R) -> Self where R: FnOnce(bool) -> Result<Option<Measurement>, Failed> + Send +'static, { Self { runner: Box::new(move |test_mode| match runner(test_mode) { Err(failed) => Outcome::Failed(failed), Ok(_) if test_mode => Outcome::Passed, Ok(Some(measurement)) => Outcome::Measured(measurement), Ok(None) => Outcome::Failed("bench runner returned `Ok(None)` in bench mode".into()), }), info: TestInfo { name: name.into(), kind: String::new(), is_ignored: false, is_bench: true, }, } } /// Sets the "kind" of this test/benchmark. If this string is not /// empty, it is printed in brackets before the test name (e.g. /// `test [my-kind] test_name`). (Default: *empty*) /// /// This is the only extension to the original libtest. pub fn with_kind(self, kind: impl Into<String>) -> Self { Self { info: TestInfo { kind: kind.into(), ..self.info }, ..self } } /// Sets whether or not this test is considered "ignored". (Default: `false`) /// /// With the built-in test suite, you can annotate `#[ignore]` on tests to /// not execute them by default (for example because they take a long time /// or require a special environment). If the `--ignored` flag is set, /// ignored tests are executed, too. pub fn with_ignored_flag(self, is_ignored: bool) -> Self { Self { info: TestInfo { is_ignored, ..self.info }, ..self } } /// Returns the name of this trial. pub fn name(&self) -> &str { &self.info.name } /// Returns the kind of this trial. If you have not set a kind, this is an /// empty string. pub fn kind(&self) -> &str { &self.info.kind } /// Returns whether this trial has been marked as *ignored*. pub fn has_ignored_flag(&self) -> bool { self.info.is_ignored } /// Returns `true` iff this trial is a test (as opposed to a benchmark). pub fn is_test(&self) -> bool { !self.info.is_bench } /// Returns `true` iff this trial is a benchmark (as opposed to a test). pub fn is_bench(&self) -> bool { self.info.is_bench } } impl fmt::Debug for Trial { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { struct OpaqueRunner; impl fmt::Debug for OpaqueRunner { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str("<runner>") } } f.debug_struct("Test") .field("runner", &OpaqueRunner) .field("name", &self.info.name) .field("kind", &self.info.kind) .field("is_ignored", &self.info.is_ignored) .field("is_bench", &self.info.is_bench) .finish() } } #[derive(Debug)] struct TestInfo { name: String, kind: String, is_ignored: bool, is_bench: bool, } /// Output of a benchmark. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct Measurement { /// Average time in ns. pub avg: u64, /// Variance in ns. pub variance: u64, } /// Indicates that a test/benchmark has failed. Optionally carries a message. /// /// You usually want to use the `From` impl of this type, which allows you to /// convert any `T: fmt::Display` (e.g. `String`, `&str`,...) into `Failed`. #[derive(Debug, Clone)] pub struct Failed { msg: Option<String>, } impl Failed { /// Creates an instance without message. pub fn without_message() -> Self { Self { msg: None } } /// Returns the message of this instance. pub fn message(&self) -> Option<&str> { self.msg.as_deref() } } impl<M: std::fmt::Display> From<M> for Failed { fn from(msg: M) -> Self { Self { msg: Some(msg.to_string()) } } } /// The outcome of performing a test/benchmark. #[derive(Debug, Clone)] enum Outcome { /// The test passed. Passed, /// The test or benchmark failed. Failed(Failed), /// The test or benchmark was ignored. Ignored, /// The benchmark was successfully run. Measured(Measurement), } /// Contains information about the entire test run. Is returned by[`run`]. /// /// This type is marked as `#[must_use]`. Usually, you just call /// [`exit()`][Conclusion::exit] on the result of `run` to exit the application /// with the correct exit code. But you can also store this value and inspect /// its data. #[derive(Clone, Debug, PartialEq, Eq)] #[must_use = "Call `exit()` or `exit_if_failed()` to set the correct return code"] pub struct Conclusion { /// Number of tests and benchmarks that were filtered out (either by the /// filter-in pattern or by `--skip` arguments). pub num_filtered_out: u64, /// Number of passed tests. pub num_passed: u64, /// Number of failed tests and benchmarks. pub num_failed: u64, /// Number of ignored tests and benchmarks. pub num_ignored: u64, /// Number of benchmarks that successfully ran. pub num_measured: u64, } impl Conclusion { /// Exits the application with an appropriate error code (0 if all tests /// have passed, 101 if there have been failures). pub fn exit(&self) ->! { self.exit_if_failed(); process::exit(0); } /// Exits the application with error code 101 if there were any failures. /// Otherwise, returns normally. pub fn exit_if_failed(&self) { if self.has_failed() { process::exit(101) } } /// Returns whether there have been any failures. pub fn has_failed(&self) -> bool { self.num_failed > 0 } fn empty() -> Self { Self { num_filtered_out: 0, num_passed: 0, num_failed: 0, num_ignored: 0, num_measured: 0, } } } impl Arguments { /// Returns `true` if the given test should be ignored. fn is_ignored(&self, test: &Trial) -> bool { (test.info.is_ignored &&!self.ignored &&!self.include_ignored) || (test.info.is_bench && self.test) || (!test.info.is_bench && self.bench) } fn is_filtered_out(&self, test: &Trial) -> bool { let test_name = &test.info.name; // If a filter was specified, apply this if let Some(filter) = &self.filter { match self.exact { true if test_name!= filter => return true, false if!test_name.contains(filter) => return true,
for skip_filter in &self.skip { match self.exact { true if test_name == skip_filter => return true, false if test_name.contains(skip_filter) => return true, _ => {} } } if self.ignored &&!test.info.is_ignored { return true; } false } } /// Runs all given trials (tests & benchmarks). /// /// This is the central function of this crate. It provides the framework for /// the testing harness. It does all the printing and house keeping. /// /// The returned value contains a couple of useful information. See /// [`Conclusion`] for more information. If `--list` was specified, a list is /// printed and a dummy `Conclusion` is returned. pub fn run(args: &Arguments, mut tests: Vec<Trial>) -> Conclusion { let start_instant = Instant::now(); let mut conclusion = Conclusion::empty(); // Apply filtering if args.filter.is_some() ||!args.skip.is_empty() || args.ignored { let len_before = tests.len() as u64; tests.retain(|test|!args.is_filtered_out(test)); conclusion.num_filtered_out = len_before - tests.len() as u64; } let tests = tests; // Create printer which is used for all output. let mut printer = printer::Printer::new(args, &tests); // If `--list` is specified, just print the list and return. if args.list { printer.print_list(&tests, args.ignored); return Conclusion::empty(); } // Print number of tests printer.print_title(tests.len() as u64); let mut failed_tests = Vec::new(); let mut handle_outcome = |outcome: Outcome, test: TestInfo, printer: &mut Printer| { printer.print_single_outcome(&outcome); // Handle outcome match outcome { Outcome::Passed => conclusion.num_passed += 1, Outcome::Failed(failed) => { failed_tests.push((test, failed.msg)); conclusion.num_failed += 1; }, Outcome::Ignored => conclusion.num_ignored += 1, Outcome::Measured(_) => conclusion.num_measured += 1, } }; // Execute all tests. let test_mode =!args.bench; if args.test_threads == Some(1) { // Run test sequentially in main thread for test in tests { // Print `test foo ...`, run the test, then print the outcome in // the same line. printer.print_test(&test.info); let outcome = if args.is_ignored(&test) { Outcome::Ignored } else { run_single(test.runner, test_mode) }; handle_outcome(outcome, test.info, &mut printer); } } else { // Run test in thread pool. let pool = match args.test_threads { Some(num_threads) => ThreadPool::new(num_threads), None => ThreadPool::default() }; let (sender, receiver) = mpsc::channel(); let num_tests = tests.len(); for test in tests { if args.is_ignored(&test) { sender.send((Outcome::Ignored, test.info)).unwrap(); } else { let sender = sender.clone(); pool.execute(move || { // It's fine to ignore the result of sending. If the // receiver has hung up, everything will wind down soon // anyway. let outcome = run_single(test.runner, test_mode); let _ = sender.send((outcome, test.info)); }); } } for (outcome, test_info) in receiver.iter().take(num_tests) { // In multithreaded mode, we do only print the start of the line // after the test ran, as otherwise it would lead to terribly // interleaved output. printer.print_test(&test_info); handle_outcome(outcome, test_info, &mut printer); } } // Print failures if there were any, and the final summary. if!failed_tests.is_empty() { printer.print_failures(&failed_tests); } printer.print_summary(&conclusion, start_instant.elapsed()); conclusion } /// Runs the given runner, catching any panics and treating them as a failed test. fn run_single(runner: Box<dyn FnOnce(bool) -> Outcome + Send>, test_mode: bool) -> Outcome { use std::panic::{catch_unwind, AssertUnwindSafe}; catch_unwind(AssertUnwindSafe(move || runner(test_mode))).unwrap_or_else(|e| { // The `panic` information is just an `Any` object representing the // value the panic was invoked with. For most panics (which use // `panic!` like `println!`), this is either `&str` or `String`. let payload = e.downcast_ref::<String>() .map(|s| s.as_str()) .or(e.downcast_ref::<&str>().map(|s| *s)); let msg = match payload { Some(payload) => format!("test panicked: {payload}"), None => format!("test panicked"), }; Outcome::Failed(msg.into()) }) }
_ => {} }; } // If any skip pattern were specified, test for all patterns.
random_line_split
lib.rs
//! Write your own tests and benchmarks that look and behave like built-in tests! //! //! This is a simple and small test harness that mimics the original `libtest` //! (used by `cargo test`/`rustc --test`). That means: all output looks pretty //! much like `cargo test` and most CLI arguments are understood and used. With //! that plumbing work out of the way, your test runner can focus on the actual //! testing. //! //! For a small real world example, see [`examples/tidy.rs`][1]. //! //! [1]: https://github.com/LukasKalbertodt/libtest-mimic/blob/master/examples/tidy.rs //! //! # Usage //! //! To use this, you most likely want to add a manual `[[test]]` section to //! `Cargo.toml` and set `harness = false`. For example: //! //! ```toml //! [[test]] //! name = "mytest" //! path = "tests/mytest.rs" //! harness = false //! ``` //! //! And in `tests/mytest.rs` you would call [`run`] in the `main` function: //! //! ```no_run //! use libtest_mimic::{Arguments, Trial}; //! //! //! // Parse command line arguments //! let args = Arguments::from_args(); //! //! // Create a list of tests and/or benchmarks (in this case: two dummy tests). //! let tests = vec![ //! Trial::test("succeeding_test", move || Ok(())), //! Trial::test("failing_test", move || Err("Woops".into())), //! ]; //! //! // Run all tests and exit the application appropriatly. //! libtest_mimic::run(&args, tests).exit(); //! ``` //! //! Instead of returning `Ok` or `Err` directly, you want to actually perform //! your tests, of course. See [`Trial::test`] for more information on how to //! define a test. You can of course list all your tests manually. But in many //! cases it is useful to generate one test per file in a directory, for //! example. //! //! You can then run `cargo test --test mytest` to run it. To see the CLI //! arguments supported by this crate, run `cargo test --test mytest -- -h`. //! //! //! # Known limitations and differences to the official test harness //! //! `libtest-mimic` works on a best-effort basis: it tries to be as close to //! `libtest` as possible, but there are differences for a variety of reasons. //! For example, some rarely used features might not be implemented, some //! features are extremely difficult to implement, and removing minor, //! unimportant differences is just not worth the hassle. //! //! Some of the notable differences: //! //! - Output capture and `--nocapture`: simply not supported. The official //! `libtest` uses internal `std` functions to temporarily redirect output. //! `libtest-mimic` cannot use those. See [this issue][capture] for more //! information. //! - `--format=json|junit` //! //! [capture]: https://github.com/LukasKalbertodt/libtest-mimic/issues/9 #![forbid(unsafe_code)] use std::{process, sync::mpsc, fmt, time::Instant}; mod args; mod printer; use printer::Printer; use threadpool::ThreadPool; pub use crate::args::{Arguments, ColorSetting, FormatSetting}; /// A single test or benchmark. /// /// The original `libtest` often calls benchmarks "tests", which is a bit /// confusing. So in this library, it is called "trial". /// /// A trial is created via [`Trial::test`] or [`Trial::bench`]. The trial's /// `name` is printed and used for filtering. The `runner` is called when the /// test/benchmark is executed to determine its outcome. If `runner` panics, /// the trial is considered "failed". If you need the behavior of /// `#[should_panic]` you need to catch the panic yourself. You likely want to /// compare the panic payload to an expected value anyway. pub struct Trial { runner: Box<dyn FnOnce(bool) -> Outcome + Send>, info: TestInfo, } impl Trial { /// Creates a (non-benchmark) test with the given name and runner. /// /// The runner returning `Ok(())` is interpreted as the test passing. If the /// runner returns `Err(_)`, the test is considered failed. pub fn test<R>(name: impl Into<String>, runner: R) -> Self where R: FnOnce() -> Result<(), Failed> + Send +'static, { Self { runner: Box::new(move |_test_mode| match runner() { Ok(()) => Outcome::Passed, Err(failed) => Outcome::Failed(failed), }), info: TestInfo { name: name.into(), kind: String::new(), is_ignored: false, is_bench: false, }, } } /// Creates a benchmark with the given name and runner. /// /// If the runner's parameter `test_mode` is `true`, the runner function /// should run all code just once, without measuring, just to make sure it /// does not panic. If the parameter is `false`, it should perform the /// actual benchmark. If `test_mode` is `true` you may return `Ok(None)`, /// but if it's `false`, you have to return a `Measurement`, or else the /// benchmark is considered a failure. /// /// `test_mode` is `true` if neither `--bench` nor `--test` are set, and /// `false` when `--bench` is set. If `--test` is set, benchmarks are not /// ran at all, and both flags cannot be set at the same time. pub fn bench<R>(name: impl Into<String>, runner: R) -> Self where R: FnOnce(bool) -> Result<Option<Measurement>, Failed> + Send +'static, { Self { runner: Box::new(move |test_mode| match runner(test_mode) { Err(failed) => Outcome::Failed(failed), Ok(_) if test_mode => Outcome::Passed, Ok(Some(measurement)) => Outcome::Measured(measurement), Ok(None) => Outcome::Failed("bench runner returned `Ok(None)` in bench mode".into()), }), info: TestInfo { name: name.into(), kind: String::new(), is_ignored: false, is_bench: true, }, } } /// Sets the "kind" of this test/benchmark. If this string is not /// empty, it is printed in brackets before the test name (e.g. /// `test [my-kind] test_name`). (Default: *empty*) /// /// This is the only extension to the original libtest. pub fn with_kind(self, kind: impl Into<String>) -> Self { Self { info: TestInfo { kind: kind.into(), ..self.info }, ..self } } /// Sets whether or not this test is considered "ignored". (Default: `false`) /// /// With the built-in test suite, you can annotate `#[ignore]` on tests to /// not execute them by default (for example because they take a long time /// or require a special environment). If the `--ignored` flag is set, /// ignored tests are executed, too. pub fn with_ignored_flag(self, is_ignored: bool) -> Self { Self { info: TestInfo { is_ignored, ..self.info }, ..self } } /// Returns the name of this trial. pub fn name(&self) -> &str { &self.info.name } /// Returns the kind of this trial. If you have not set a kind, this is an /// empty string. pub fn kind(&self) -> &str { &self.info.kind } /// Returns whether this trial has been marked as *ignored*. pub fn has_ignored_flag(&self) -> bool { self.info.is_ignored } /// Returns `true` iff this trial is a test (as opposed to a benchmark). pub fn is_test(&self) -> bool { !self.info.is_bench } /// Returns `true` iff this trial is a benchmark (as opposed to a test). pub fn is_bench(&self) -> bool { self.info.is_bench } } impl fmt::Debug for Trial { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { struct OpaqueRunner; impl fmt::Debug for OpaqueRunner { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str("<runner>") } } f.debug_struct("Test") .field("runner", &OpaqueRunner) .field("name", &self.info.name) .field("kind", &self.info.kind) .field("is_ignored", &self.info.is_ignored) .field("is_bench", &self.info.is_bench) .finish() } } #[derive(Debug)] struct TestInfo { name: String, kind: String, is_ignored: bool, is_bench: bool, } /// Output of a benchmark. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct Measurement { /// Average time in ns. pub avg: u64, /// Variance in ns. pub variance: u64, } /// Indicates that a test/benchmark has failed. Optionally carries a message. /// /// You usually want to use the `From` impl of this type, which allows you to /// convert any `T: fmt::Display` (e.g. `String`, `&str`,...) into `Failed`. #[derive(Debug, Clone)] pub struct Failed { msg: Option<String>, } impl Failed { /// Creates an instance without message. pub fn without_message() -> Self { Self { msg: None } } /// Returns the message of this instance. pub fn message(&self) -> Option<&str> { self.msg.as_deref() } } impl<M: std::fmt::Display> From<M> for Failed { fn from(msg: M) -> Self { Self { msg: Some(msg.to_string()) } } } /// The outcome of performing a test/benchmark. #[derive(Debug, Clone)] enum
{ /// The test passed. Passed, /// The test or benchmark failed. Failed(Failed), /// The test or benchmark was ignored. Ignored, /// The benchmark was successfully run. Measured(Measurement), } /// Contains information about the entire test run. Is returned by[`run`]. /// /// This type is marked as `#[must_use]`. Usually, you just call /// [`exit()`][Conclusion::exit] on the result of `run` to exit the application /// with the correct exit code. But you can also store this value and inspect /// its data. #[derive(Clone, Debug, PartialEq, Eq)] #[must_use = "Call `exit()` or `exit_if_failed()` to set the correct return code"] pub struct Conclusion { /// Number of tests and benchmarks that were filtered out (either by the /// filter-in pattern or by `--skip` arguments). pub num_filtered_out: u64, /// Number of passed tests. pub num_passed: u64, /// Number of failed tests and benchmarks. pub num_failed: u64, /// Number of ignored tests and benchmarks. pub num_ignored: u64, /// Number of benchmarks that successfully ran. pub num_measured: u64, } impl Conclusion { /// Exits the application with an appropriate error code (0 if all tests /// have passed, 101 if there have been failures). pub fn exit(&self) ->! { self.exit_if_failed(); process::exit(0); } /// Exits the application with error code 101 if there were any failures. /// Otherwise, returns normally. pub fn exit_if_failed(&self) { if self.has_failed() { process::exit(101) } } /// Returns whether there have been any failures. pub fn has_failed(&self) -> bool { self.num_failed > 0 } fn empty() -> Self { Self { num_filtered_out: 0, num_passed: 0, num_failed: 0, num_ignored: 0, num_measured: 0, } } } impl Arguments { /// Returns `true` if the given test should be ignored. fn is_ignored(&self, test: &Trial) -> bool { (test.info.is_ignored &&!self.ignored &&!self.include_ignored) || (test.info.is_bench && self.test) || (!test.info.is_bench && self.bench) } fn is_filtered_out(&self, test: &Trial) -> bool { let test_name = &test.info.name; // If a filter was specified, apply this if let Some(filter) = &self.filter { match self.exact { true if test_name!= filter => return true, false if!test_name.contains(filter) => return true, _ => {} }; } // If any skip pattern were specified, test for all patterns. for skip_filter in &self.skip { match self.exact { true if test_name == skip_filter => return true, false if test_name.contains(skip_filter) => return true, _ => {} } } if self.ignored &&!test.info.is_ignored { return true; } false } } /// Runs all given trials (tests & benchmarks). /// /// This is the central function of this crate. It provides the framework for /// the testing harness. It does all the printing and house keeping. /// /// The returned value contains a couple of useful information. See /// [`Conclusion`] for more information. If `--list` was specified, a list is /// printed and a dummy `Conclusion` is returned. pub fn run(args: &Arguments, mut tests: Vec<Trial>) -> Conclusion { let start_instant = Instant::now(); let mut conclusion = Conclusion::empty(); // Apply filtering if args.filter.is_some() ||!args.skip.is_empty() || args.ignored { let len_before = tests.len() as u64; tests.retain(|test|!args.is_filtered_out(test)); conclusion.num_filtered_out = len_before - tests.len() as u64; } let tests = tests; // Create printer which is used for all output. let mut printer = printer::Printer::new(args, &tests); // If `--list` is specified, just print the list and return. if args.list { printer.print_list(&tests, args.ignored); return Conclusion::empty(); } // Print number of tests printer.print_title(tests.len() as u64); let mut failed_tests = Vec::new(); let mut handle_outcome = |outcome: Outcome, test: TestInfo, printer: &mut Printer| { printer.print_single_outcome(&outcome); // Handle outcome match outcome { Outcome::Passed => conclusion.num_passed += 1, Outcome::Failed(failed) => { failed_tests.push((test, failed.msg)); conclusion.num_failed += 1; }, Outcome::Ignored => conclusion.num_ignored += 1, Outcome::Measured(_) => conclusion.num_measured += 1, } }; // Execute all tests. let test_mode =!args.bench; if args.test_threads == Some(1) { // Run test sequentially in main thread for test in tests { // Print `test foo ...`, run the test, then print the outcome in // the same line. printer.print_test(&test.info); let outcome = if args.is_ignored(&test) { Outcome::Ignored } else { run_single(test.runner, test_mode) }; handle_outcome(outcome, test.info, &mut printer); } } else { // Run test in thread pool. let pool = match args.test_threads { Some(num_threads) => ThreadPool::new(num_threads), None => ThreadPool::default() }; let (sender, receiver) = mpsc::channel(); let num_tests = tests.len(); for test in tests { if args.is_ignored(&test) { sender.send((Outcome::Ignored, test.info)).unwrap(); } else { let sender = sender.clone(); pool.execute(move || { // It's fine to ignore the result of sending. If the // receiver has hung up, everything will wind down soon // anyway. let outcome = run_single(test.runner, test_mode); let _ = sender.send((outcome, test.info)); }); } } for (outcome, test_info) in receiver.iter().take(num_tests) { // In multithreaded mode, we do only print the start of the line // after the test ran, as otherwise it would lead to terribly // interleaved output. printer.print_test(&test_info); handle_outcome(outcome, test_info, &mut printer); } } // Print failures if there were any, and the final summary. if!failed_tests.is_empty() { printer.print_failures(&failed_tests); } printer.print_summary(&conclusion, start_instant.elapsed()); conclusion } /// Runs the given runner, catching any panics and treating them as a failed test. fn run_single(runner: Box<dyn FnOnce(bool) -> Outcome + Send>, test_mode: bool) -> Outcome { use std::panic::{catch_unwind, AssertUnwindSafe}; catch_unwind(AssertUnwindSafe(move || runner(test_mode))).unwrap_or_else(|e| { // The `panic` information is just an `Any` object representing the // value the panic was invoked with. For most panics (which use // `panic!` like `println!`), this is either `&str` or `String`. let payload = e.downcast_ref::<String>() .map(|s| s.as_str()) .or(e.downcast_ref::<&str>().map(|s| *s)); let msg = match payload { Some(payload) => format!("test panicked: {payload}"), None => format!("test panicked"), }; Outcome::Failed(msg.into()) }) }
Outcome
identifier_name
lib.rs
//! Write your own tests and benchmarks that look and behave like built-in tests! //! //! This is a simple and small test harness that mimics the original `libtest` //! (used by `cargo test`/`rustc --test`). That means: all output looks pretty //! much like `cargo test` and most CLI arguments are understood and used. With //! that plumbing work out of the way, your test runner can focus on the actual //! testing. //! //! For a small real world example, see [`examples/tidy.rs`][1]. //! //! [1]: https://github.com/LukasKalbertodt/libtest-mimic/blob/master/examples/tidy.rs //! //! # Usage //! //! To use this, you most likely want to add a manual `[[test]]` section to //! `Cargo.toml` and set `harness = false`. For example: //! //! ```toml //! [[test]] //! name = "mytest" //! path = "tests/mytest.rs" //! harness = false //! ``` //! //! And in `tests/mytest.rs` you would call [`run`] in the `main` function: //! //! ```no_run //! use libtest_mimic::{Arguments, Trial}; //! //! //! // Parse command line arguments //! let args = Arguments::from_args(); //! //! // Create a list of tests and/or benchmarks (in this case: two dummy tests). //! let tests = vec![ //! Trial::test("succeeding_test", move || Ok(())), //! Trial::test("failing_test", move || Err("Woops".into())), //! ]; //! //! // Run all tests and exit the application appropriatly. //! libtest_mimic::run(&args, tests).exit(); //! ``` //! //! Instead of returning `Ok` or `Err` directly, you want to actually perform //! your tests, of course. See [`Trial::test`] for more information on how to //! define a test. You can of course list all your tests manually. But in many //! cases it is useful to generate one test per file in a directory, for //! example. //! //! You can then run `cargo test --test mytest` to run it. To see the CLI //! arguments supported by this crate, run `cargo test --test mytest -- -h`. //! //! //! # Known limitations and differences to the official test harness //! //! `libtest-mimic` works on a best-effort basis: it tries to be as close to //! `libtest` as possible, but there are differences for a variety of reasons. //! For example, some rarely used features might not be implemented, some //! features are extremely difficult to implement, and removing minor, //! unimportant differences is just not worth the hassle. //! //! Some of the notable differences: //! //! - Output capture and `--nocapture`: simply not supported. The official //! `libtest` uses internal `std` functions to temporarily redirect output. //! `libtest-mimic` cannot use those. See [this issue][capture] for more //! information. //! - `--format=json|junit` //! //! [capture]: https://github.com/LukasKalbertodt/libtest-mimic/issues/9 #![forbid(unsafe_code)] use std::{process, sync::mpsc, fmt, time::Instant}; mod args; mod printer; use printer::Printer; use threadpool::ThreadPool; pub use crate::args::{Arguments, ColorSetting, FormatSetting}; /// A single test or benchmark. /// /// The original `libtest` often calls benchmarks "tests", which is a bit /// confusing. So in this library, it is called "trial". /// /// A trial is created via [`Trial::test`] or [`Trial::bench`]. The trial's /// `name` is printed and used for filtering. The `runner` is called when the /// test/benchmark is executed to determine its outcome. If `runner` panics, /// the trial is considered "failed". If you need the behavior of /// `#[should_panic]` you need to catch the panic yourself. You likely want to /// compare the panic payload to an expected value anyway. pub struct Trial { runner: Box<dyn FnOnce(bool) -> Outcome + Send>, info: TestInfo, } impl Trial { /// Creates a (non-benchmark) test with the given name and runner. /// /// The runner returning `Ok(())` is interpreted as the test passing. If the /// runner returns `Err(_)`, the test is considered failed. pub fn test<R>(name: impl Into<String>, runner: R) -> Self where R: FnOnce() -> Result<(), Failed> + Send +'static,
/// Creates a benchmark with the given name and runner. /// /// If the runner's parameter `test_mode` is `true`, the runner function /// should run all code just once, without measuring, just to make sure it /// does not panic. If the parameter is `false`, it should perform the /// actual benchmark. If `test_mode` is `true` you may return `Ok(None)`, /// but if it's `false`, you have to return a `Measurement`, or else the /// benchmark is considered a failure. /// /// `test_mode` is `true` if neither `--bench` nor `--test` are set, and /// `false` when `--bench` is set. If `--test` is set, benchmarks are not /// ran at all, and both flags cannot be set at the same time. pub fn bench<R>(name: impl Into<String>, runner: R) -> Self where R: FnOnce(bool) -> Result<Option<Measurement>, Failed> + Send +'static, { Self { runner: Box::new(move |test_mode| match runner(test_mode) { Err(failed) => Outcome::Failed(failed), Ok(_) if test_mode => Outcome::Passed, Ok(Some(measurement)) => Outcome::Measured(measurement), Ok(None) => Outcome::Failed("bench runner returned `Ok(None)` in bench mode".into()), }), info: TestInfo { name: name.into(), kind: String::new(), is_ignored: false, is_bench: true, }, } } /// Sets the "kind" of this test/benchmark. If this string is not /// empty, it is printed in brackets before the test name (e.g. /// `test [my-kind] test_name`). (Default: *empty*) /// /// This is the only extension to the original libtest. pub fn with_kind(self, kind: impl Into<String>) -> Self { Self { info: TestInfo { kind: kind.into(), ..self.info }, ..self } } /// Sets whether or not this test is considered "ignored". (Default: `false`) /// /// With the built-in test suite, you can annotate `#[ignore]` on tests to /// not execute them by default (for example because they take a long time /// or require a special environment). If the `--ignored` flag is set, /// ignored tests are executed, too. pub fn with_ignored_flag(self, is_ignored: bool) -> Self { Self { info: TestInfo { is_ignored, ..self.info }, ..self } } /// Returns the name of this trial. pub fn name(&self) -> &str { &self.info.name } /// Returns the kind of this trial. If you have not set a kind, this is an /// empty string. pub fn kind(&self) -> &str { &self.info.kind } /// Returns whether this trial has been marked as *ignored*. pub fn has_ignored_flag(&self) -> bool { self.info.is_ignored } /// Returns `true` iff this trial is a test (as opposed to a benchmark). pub fn is_test(&self) -> bool { !self.info.is_bench } /// Returns `true` iff this trial is a benchmark (as opposed to a test). pub fn is_bench(&self) -> bool { self.info.is_bench } } impl fmt::Debug for Trial { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { struct OpaqueRunner; impl fmt::Debug for OpaqueRunner { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str("<runner>") } } f.debug_struct("Test") .field("runner", &OpaqueRunner) .field("name", &self.info.name) .field("kind", &self.info.kind) .field("is_ignored", &self.info.is_ignored) .field("is_bench", &self.info.is_bench) .finish() } } #[derive(Debug)] struct TestInfo { name: String, kind: String, is_ignored: bool, is_bench: bool, } /// Output of a benchmark. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct Measurement { /// Average time in ns. pub avg: u64, /// Variance in ns. pub variance: u64, } /// Indicates that a test/benchmark has failed. Optionally carries a message. /// /// You usually want to use the `From` impl of this type, which allows you to /// convert any `T: fmt::Display` (e.g. `String`, `&str`,...) into `Failed`. #[derive(Debug, Clone)] pub struct Failed { msg: Option<String>, } impl Failed { /// Creates an instance without message. pub fn without_message() -> Self { Self { msg: None } } /// Returns the message of this instance. pub fn message(&self) -> Option<&str> { self.msg.as_deref() } } impl<M: std::fmt::Display> From<M> for Failed { fn from(msg: M) -> Self { Self { msg: Some(msg.to_string()) } } } /// The outcome of performing a test/benchmark. #[derive(Debug, Clone)] enum Outcome { /// The test passed. Passed, /// The test or benchmark failed. Failed(Failed), /// The test or benchmark was ignored. Ignored, /// The benchmark was successfully run. Measured(Measurement), } /// Contains information about the entire test run. Is returned by[`run`]. /// /// This type is marked as `#[must_use]`. Usually, you just call /// [`exit()`][Conclusion::exit] on the result of `run` to exit the application /// with the correct exit code. But you can also store this value and inspect /// its data. #[derive(Clone, Debug, PartialEq, Eq)] #[must_use = "Call `exit()` or `exit_if_failed()` to set the correct return code"] pub struct Conclusion { /// Number of tests and benchmarks that were filtered out (either by the /// filter-in pattern or by `--skip` arguments). pub num_filtered_out: u64, /// Number of passed tests. pub num_passed: u64, /// Number of failed tests and benchmarks. pub num_failed: u64, /// Number of ignored tests and benchmarks. pub num_ignored: u64, /// Number of benchmarks that successfully ran. pub num_measured: u64, } impl Conclusion { /// Exits the application with an appropriate error code (0 if all tests /// have passed, 101 if there have been failures). pub fn exit(&self) ->! { self.exit_if_failed(); process::exit(0); } /// Exits the application with error code 101 if there were any failures. /// Otherwise, returns normally. pub fn exit_if_failed(&self) { if self.has_failed() { process::exit(101) } } /// Returns whether there have been any failures. pub fn has_failed(&self) -> bool { self.num_failed > 0 } fn empty() -> Self { Self { num_filtered_out: 0, num_passed: 0, num_failed: 0, num_ignored: 0, num_measured: 0, } } } impl Arguments { /// Returns `true` if the given test should be ignored. fn is_ignored(&self, test: &Trial) -> bool { (test.info.is_ignored &&!self.ignored &&!self.include_ignored) || (test.info.is_bench && self.test) || (!test.info.is_bench && self.bench) } fn is_filtered_out(&self, test: &Trial) -> bool { let test_name = &test.info.name; // If a filter was specified, apply this if let Some(filter) = &self.filter { match self.exact { true if test_name!= filter => return true, false if!test_name.contains(filter) => return true, _ => {} }; } // If any skip pattern were specified, test for all patterns. for skip_filter in &self.skip { match self.exact { true if test_name == skip_filter => return true, false if test_name.contains(skip_filter) => return true, _ => {} } } if self.ignored &&!test.info.is_ignored { return true; } false } } /// Runs all given trials (tests & benchmarks). /// /// This is the central function of this crate. It provides the framework for /// the testing harness. It does all the printing and house keeping. /// /// The returned value contains a couple of useful information. See /// [`Conclusion`] for more information. If `--list` was specified, a list is /// printed and a dummy `Conclusion` is returned. pub fn run(args: &Arguments, mut tests: Vec<Trial>) -> Conclusion { let start_instant = Instant::now(); let mut conclusion = Conclusion::empty(); // Apply filtering if args.filter.is_some() ||!args.skip.is_empty() || args.ignored { let len_before = tests.len() as u64; tests.retain(|test|!args.is_filtered_out(test)); conclusion.num_filtered_out = len_before - tests.len() as u64; } let tests = tests; // Create printer which is used for all output. let mut printer = printer::Printer::new(args, &tests); // If `--list` is specified, just print the list and return. if args.list { printer.print_list(&tests, args.ignored); return Conclusion::empty(); } // Print number of tests printer.print_title(tests.len() as u64); let mut failed_tests = Vec::new(); let mut handle_outcome = |outcome: Outcome, test: TestInfo, printer: &mut Printer| { printer.print_single_outcome(&outcome); // Handle outcome match outcome { Outcome::Passed => conclusion.num_passed += 1, Outcome::Failed(failed) => { failed_tests.push((test, failed.msg)); conclusion.num_failed += 1; }, Outcome::Ignored => conclusion.num_ignored += 1, Outcome::Measured(_) => conclusion.num_measured += 1, } }; // Execute all tests. let test_mode =!args.bench; if args.test_threads == Some(1) { // Run test sequentially in main thread for test in tests { // Print `test foo ...`, run the test, then print the outcome in // the same line. printer.print_test(&test.info); let outcome = if args.is_ignored(&test) { Outcome::Ignored } else { run_single(test.runner, test_mode) }; handle_outcome(outcome, test.info, &mut printer); } } else { // Run test in thread pool. let pool = match args.test_threads { Some(num_threads) => ThreadPool::new(num_threads), None => ThreadPool::default() }; let (sender, receiver) = mpsc::channel(); let num_tests = tests.len(); for test in tests { if args.is_ignored(&test) { sender.send((Outcome::Ignored, test.info)).unwrap(); } else { let sender = sender.clone(); pool.execute(move || { // It's fine to ignore the result of sending. If the // receiver has hung up, everything will wind down soon // anyway. let outcome = run_single(test.runner, test_mode); let _ = sender.send((outcome, test.info)); }); } } for (outcome, test_info) in receiver.iter().take(num_tests) { // In multithreaded mode, we do only print the start of the line // after the test ran, as otherwise it would lead to terribly // interleaved output. printer.print_test(&test_info); handle_outcome(outcome, test_info, &mut printer); } } // Print failures if there were any, and the final summary. if!failed_tests.is_empty() { printer.print_failures(&failed_tests); } printer.print_summary(&conclusion, start_instant.elapsed()); conclusion } /// Runs the given runner, catching any panics and treating them as a failed test. fn run_single(runner: Box<dyn FnOnce(bool) -> Outcome + Send>, test_mode: bool) -> Outcome { use std::panic::{catch_unwind, AssertUnwindSafe}; catch_unwind(AssertUnwindSafe(move || runner(test_mode))).unwrap_or_else(|e| { // The `panic` information is just an `Any` object representing the // value the panic was invoked with. For most panics (which use // `panic!` like `println!`), this is either `&str` or `String`. let payload = e.downcast_ref::<String>() .map(|s| s.as_str()) .or(e.downcast_ref::<&str>().map(|s| *s)); let msg = match payload { Some(payload) => format!("test panicked: {payload}"), None => format!("test panicked"), }; Outcome::Failed(msg.into()) }) }
{ Self { runner: Box::new(move |_test_mode| match runner() { Ok(()) => Outcome::Passed, Err(failed) => Outcome::Failed(failed), }), info: TestInfo { name: name.into(), kind: String::new(), is_ignored: false, is_bench: false, }, } }
identifier_body
websocket.rs
use serde_json; use ws::{listen, Handler, Factory, Sender, Handshake, Request, Response as WsResponse, Message, CloseCode}; use ws::{Error as WsError, ErrorKind as WsErrorKind, Result as WsResult}; use graph::{PossibleErr as GraphErr, *}; use std::thread; use std::thread::{JoinHandle}; use std::fmt; use std::result; enum PossibleErr{ Ws(WsError), String(String), GraphErr(GraphErr), JsonErr(serde_json::Error), Disp(Box<fmt::Display>), None } impl fmt::Display for PossibleErr { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::PossibleErr::*; match *self{ Ws(ref w) => w.fmt(f), String(ref s) => s.fmt(f), GraphErr(ref g) => g.fmt(f), JsonErr(ref j) => j.fmt(f), Disp(ref d) => d.fmt(f), None => (None).fmt(f) } } } type Result<T> = result::Result<T, PossibleErr>; impl From<::std::option::NoneError> for PossibleErr{ fn from(_: ::std::option::NoneError) -> PossibleErr{ PossibleErr::None } } impl From<WsError> for PossibleErr{ fn from(g: WsError) -> PossibleErr{ PossibleErr::Ws(g) } } impl From<GraphErr> for PossibleErr{ fn from(g: GraphErr) -> PossibleErr{ PossibleErr::GraphErr(g) } } impl From<serde_json::Error> for PossibleErr{ fn from(j: serde_json::Error) -> PossibleErr{ PossibleErr::JsonErr(j) } } fn to_ws_err(e: PossibleErr, kind: WsErrorKind) -> WsError{ use self::GraphErr::Ws as GWs; use self::PossibleErr::*; match e{ Ws(w) | GraphErr(GWs(w)) => w, _ => WsError::new(kind, format!("{}", e)) } } fn to_ws(e: PossibleErr) -> WsError{ to_ws_err(e, WsErrorKind::Internal) } impl Into<WsError> for PossibleErr{ fn into(self) -> WsError{ to_ws(self) } } fn decode_command(msg: Message) -> Result<Command>{ match msg{ Message::Text(t) => { Ok(serde_json::from_str(&t[..])?) }, Message::Binary(..) => Err(WsError::new( WsErrorKind::Protocol, format!("binary message received where expecting text JSON")).into()) } } fn encode_response(response: Response) -> WsResult<Message>
fn encode_update<T: Into<Update>>(update: T) -> WsResult<Message>{ match serde_json::to_string(&update.into()){ Ok(s) => Ok(Message::Text(s)), Err(e) => Err(WsError::new(WsErrorKind::Internal, format!("encode_update failed {:?}", e))) } } struct ClientCommon; impl ClientCommon{ fn on_open(out: &Sender, store: &GraphStore, id: GraphId, client_type: ClientType) -> Result<()>{ if let Ok(_) = store.attach(id, client_type, out.clone()){ trace!("Client supplied valid GraphId {}", id); Ok(()) } else{ let err = format!("GraphId {} does not exist", id); out.send(encode_response(Response::Error(DataValue::from(err.clone())))?)?; Ok(()) //Err(WsError::new(WsErrorKind::Protocol, err)) } } fn on_command(_out: &Sender, store: &GraphStore, command: &Command, graph: GraphId, _client_type: ClientType) -> Result<Option<Response>> { use graph::Command::*; let response = match *command{ AddLink{ source: ref from, target: ref to } => { let graph = store.get(graph)?; let result = graph.add_link(from, to); match result{ Response::Ok => { graph.repeat_to(ClientType::Both, encode_update(command.clone())?); Response::Ok } _ => result } }, _ => {return Ok(None)} }; Ok(Some(response)) } } #[derive(Copy,Clone)] struct FrontendClient{ graph: GraphId } impl FrontendClient{ fn on_open(out: &Sender, store: &GraphStore, id: GraphId) -> Result<Self>{ ClientCommon::on_open(out, store, id, ClientType::Frontend)?; trace!("Frontend attached to GraphId {}", id); out.send( encode_update( Command::SetGraph{ graph: store.get(id)?.data.clone() } )? )?; out.send( encode_update( Response::Warning("Test Warning".into()) )? )?; Ok(FrontendClient{ graph: id }) } fn on_command(&self, out: &Sender, store: &GraphStore, command: &Command) -> Result<Response> { //use graph::Command::*; if let Some(common) = ClientCommon::on_command(out, store, command, self.graph, ClientType::Frontend)?{ return Ok(common); } match *command{ _ => Err(WsError::new( WsErrorKind::Protocol, format!("Expected Frontend command, got {:?}", command)).into()) } } } #[derive(Copy,Clone)] struct BackendClient{ graph: GraphId } impl BackendClient{ fn on_open(out: &Sender, store: &GraphStore, id: GraphId) -> Result<Self>{ ClientCommon::on_open(out, store, id, ClientType::Backend)?; trace!("Backend attached to GraphId {}", id); Ok(BackendClient{ graph: id }) } fn on_command(&self, out: &Sender, store: &GraphStore, command: &Command) -> Result<Response> { use graph::Command::*; let client_type = ClientType::Backend; if let Some(common) = ClientCommon::on_command(out, store, command, self.graph, client_type.clone())?{ return Ok(common); } match *command{ SetGraph{ ref graph } => Ok({ trace!("set graph {:?}", graph); store.set_graph(self.graph, graph.clone())?; store.repeat_to(self.graph, client_type.opposite(), encode_update(command.clone())?)?; Response::Ok }), SetData{ ref id, ref value } => Ok({ trace!("set data {:?} = {:?}", id, value); //store.set_data(self.graph, id, value)?; store.repeat_to(self.graph, client_type.opposite(), encode_update(command.clone())?)?; Response::Ok }), _ => Err(WsError::new(WsErrorKind::Protocol, format!("Expected Backend command, got {:?}", command)).into()) } } } #[derive(Copy,Clone)] enum ClientState{ Frontend(FrontendClient), Backend(BackendClient), AwaitingType } struct ServerHandler{ out: Sender, store: GraphStore, state: ClientState, addr: String } impl ServerHandler{ fn on_open_inner(&mut self, hs: Handshake) -> Result<()>{ if let Some(ip_addr) = hs.peer_addr { let ip_string = format!("{}", ip_addr); info!("{:>20} - connection {:?} established", ip_string, self.out.token()); self.addr = ip_string; } else{ debug!("Connection without IP address?"); } self.out.send( serde_json::to_string( &self.store.list())? )?; Ok(()) } fn on_message_inner(&mut self, msg: Message) -> Result<()> { use self::ClientState::*; use graph::Command::{FrontendAttach, BackendAttach}; let command = decode_command(msg)?; let response = match self.state.clone() { Frontend(client) => client.on_command(&self.out, &self.store, &command)?, Backend(client) => // different type for client than the above match client.on_command(&self.out, &self.store, &command)?, AwaitingType => { let out = &self.out; let store = &self.store; let state = match command{ FrontendAttach{ id } => Frontend(FrontendClient::on_open(out, store, id)?), BackendAttach { id } => { let id = match id{ Some(id) if self.store.contains_key(id) => id, Some(id) => self.store.empty_at(id), None => self.store.new_empty() }; Backend(BackendClient::on_open(out, store, id)?) }, _ => return Err(WsError::new(WsErrorKind::Protocol, "Expected FrontendAttach or BackendAttach, got something else").into()) }; self.state = state; Response::Ok } }; if response!= Response::Ok{ // don't generate Ok messages, they're pointless and hard to coordinate self.out.send(encode_response(response)?)? } Ok(()) } } impl Handler for ServerHandler{ fn on_open(&mut self, hs: Handshake) -> WsResult<()>{ self.on_open_inner(hs).map_err(|e|e.into()) } fn on_request(&mut self, req: &Request) -> WsResult<WsResponse> { let mut res = WsResponse::from_request(req)?; let protocol_name = "selenologist-node-editor"; res.set_protocol(protocol_name); Ok(res) } fn on_message(&mut self, msg: Message) -> WsResult<()> { self.on_message_inner(msg).map_err(|e|e.into()) } fn on_close(&mut self, code: CloseCode, reason: &str){ use self::ClientState::*; trace!("Closing connection {:?} because {:?} {}", self.addr, code, reason); match self.state{ Backend(BackendClient{ graph }) | Frontend(FrontendClient{ graph }) => { self.store.remove_listener(graph, self.out.token().0) .unwrap(); } _ => {} } } } #[derive(Default)] struct ServerFactory{ store: GraphStore, } impl Factory for ServerFactory{ type Handler = ServerHandler; fn connection_made(&mut self, out: Sender) -> Self::Handler{ ServerHandler{ out, store: self.store.clone(), state: ClientState::AwaitingType, addr: "0.0.0.0:0".into() } } } pub fn launch_thread() -> JoinHandle<()> { thread::Builder::new() .name("websocket".into()) .spawn(move || { let mut factory = ServerFactory::default(); let listen_addr = "127.0.0.1:3001"; info!("Attempting to listen on {}", listen_addr); listen(listen_addr, |out| factory.connection_made(out)).unwrap() }).unwrap() }
{ match serde_json::to_string(&response){ Ok(s) => Ok(Message::Text(s)), Err(e) => Err(WsError::new(WsErrorKind::Internal, format!("encode_response failed {:?}", e))) } }
identifier_body
websocket.rs
use serde_json; use ws::{listen, Handler, Factory, Sender, Handshake, Request, Response as WsResponse, Message, CloseCode}; use ws::{Error as WsError, ErrorKind as WsErrorKind, Result as WsResult}; use graph::{PossibleErr as GraphErr, *}; use std::thread; use std::thread::{JoinHandle}; use std::fmt; use std::result; enum PossibleErr{ Ws(WsError), String(String), GraphErr(GraphErr), JsonErr(serde_json::Error), Disp(Box<fmt::Display>), None } impl fmt::Display for PossibleErr { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::PossibleErr::*; match *self{ Ws(ref w) => w.fmt(f), String(ref s) => s.fmt(f), GraphErr(ref g) => g.fmt(f), JsonErr(ref j) => j.fmt(f), Disp(ref d) => d.fmt(f), None => (None).fmt(f) } } } type Result<T> = result::Result<T, PossibleErr>; impl From<::std::option::NoneError> for PossibleErr{ fn from(_: ::std::option::NoneError) -> PossibleErr{ PossibleErr::None } } impl From<WsError> for PossibleErr{ fn from(g: WsError) -> PossibleErr{ PossibleErr::Ws(g) } } impl From<GraphErr> for PossibleErr{ fn from(g: GraphErr) -> PossibleErr{ PossibleErr::GraphErr(g) } } impl From<serde_json::Error> for PossibleErr{ fn from(j: serde_json::Error) -> PossibleErr{ PossibleErr::JsonErr(j) } } fn to_ws_err(e: PossibleErr, kind: WsErrorKind) -> WsError{ use self::GraphErr::Ws as GWs; use self::PossibleErr::*; match e{ Ws(w) | GraphErr(GWs(w)) => w, _ => WsError::new(kind, format!("{}", e)) } } fn to_ws(e: PossibleErr) -> WsError{ to_ws_err(e, WsErrorKind::Internal) } impl Into<WsError> for PossibleErr{ fn into(self) -> WsError{ to_ws(self) } } fn decode_command(msg: Message) -> Result<Command>{ match msg{ Message::Text(t) => { Ok(serde_json::from_str(&t[..])?) }, Message::Binary(..) => Err(WsError::new( WsErrorKind::Protocol, format!("binary message received where expecting text JSON")).into()) } } fn encode_response(response: Response) -> WsResult<Message>{ match serde_json::to_string(&response){ Ok(s) => Ok(Message::Text(s)), Err(e) => Err(WsError::new(WsErrorKind::Internal, format!("encode_response failed {:?}", e))) } } fn encode_update<T: Into<Update>>(update: T) -> WsResult<Message>{ match serde_json::to_string(&update.into()){ Ok(s) => Ok(Message::Text(s)), Err(e) => Err(WsError::new(WsErrorKind::Internal, format!("encode_update failed {:?}", e))) } } struct ClientCommon; impl ClientCommon{ fn on_open(out: &Sender, store: &GraphStore, id: GraphId, client_type: ClientType) -> Result<()>{ if let Ok(_) = store.attach(id, client_type, out.clone()){ trace!("Client supplied valid GraphId {}", id); Ok(()) } else{ let err = format!("GraphId {} does not exist", id); out.send(encode_response(Response::Error(DataValue::from(err.clone())))?)?; Ok(()) //Err(WsError::new(WsErrorKind::Protocol, err)) } } fn on_command(_out: &Sender, store: &GraphStore, command: &Command, graph: GraphId, _client_type: ClientType) -> Result<Option<Response>> { use graph::Command::*; let response = match *command{ AddLink{ source: ref from, target: ref to } => { let graph = store.get(graph)?; let result = graph.add_link(from, to); match result{ Response::Ok => { graph.repeat_to(ClientType::Both, encode_update(command.clone())?); Response::Ok } _ => result } }, _ => {return Ok(None)} }; Ok(Some(response)) } } #[derive(Copy,Clone)] struct FrontendClient{ graph: GraphId } impl FrontendClient{ fn on_open(out: &Sender, store: &GraphStore, id: GraphId) -> Result<Self>{ ClientCommon::on_open(out, store, id, ClientType::Frontend)?; trace!("Frontend attached to GraphId {}", id); out.send( encode_update( Command::SetGraph{ graph: store.get(id)?.data.clone() } )? )?; out.send( encode_update( Response::Warning("Test Warning".into()) )? )?; Ok(FrontendClient{ graph: id }) } fn on_command(&self, out: &Sender, store: &GraphStore, command: &Command) -> Result<Response> { //use graph::Command::*; if let Some(common) = ClientCommon::on_command(out, store, command, self.graph, ClientType::Frontend)?{ return Ok(common); } match *command{ _ => Err(WsError::new( WsErrorKind::Protocol, format!("Expected Frontend command, got {:?}", command)).into()) } } } #[derive(Copy,Clone)] struct BackendClient{ graph: GraphId } impl BackendClient{ fn on_open(out: &Sender, store: &GraphStore, id: GraphId) -> Result<Self>{ ClientCommon::on_open(out, store, id, ClientType::Backend)?; trace!("Backend attached to GraphId {}", id); Ok(BackendClient{ graph: id }) } fn on_command(&self, out: &Sender, store: &GraphStore, command: &Command) -> Result<Response> { use graph::Command::*; let client_type = ClientType::Backend; if let Some(common) = ClientCommon::on_command(out, store, command, self.graph, client_type.clone())?{ return Ok(common); } match *command{ SetGraph{ ref graph } => Ok({ trace!("set graph {:?}", graph); store.set_graph(self.graph, graph.clone())?; store.repeat_to(self.graph, client_type.opposite(), encode_update(command.clone())?)?; Response::Ok }), SetData{ ref id, ref value } => Ok({ trace!("set data {:?} = {:?}", id, value); //store.set_data(self.graph, id, value)?; store.repeat_to(self.graph, client_type.opposite(), encode_update(command.clone())?)?; Response::Ok }), _ => Err(WsError::new(WsErrorKind::Protocol, format!("Expected Backend command, got {:?}", command)).into()) } } } #[derive(Copy,Clone)] enum ClientState{ Frontend(FrontendClient), Backend(BackendClient), AwaitingType } struct ServerHandler{ out: Sender, store: GraphStore, state: ClientState, addr: String } impl ServerHandler{ fn on_open_inner(&mut self, hs: Handshake) -> Result<()>{ if let Some(ip_addr) = hs.peer_addr { let ip_string = format!("{}", ip_addr); info!("{:>20} - connection {:?} established", ip_string, self.out.token()); self.addr = ip_string; } else{ debug!("Connection without IP address?"); } self.out.send( serde_json::to_string( &self.store.list())? )?; Ok(()) } fn on_message_inner(&mut self, msg: Message) -> Result<()> { use self::ClientState::*; use graph::Command::{FrontendAttach, BackendAttach}; let command = decode_command(msg)?; let response = match self.state.clone() { Frontend(client) => client.on_command(&self.out, &self.store, &command)?, Backend(client) => // different type for client than the above match client.on_command(&self.out, &self.store, &command)?, AwaitingType => { let out = &self.out; let store = &self.store; let state = match command{ FrontendAttach{ id } => Frontend(FrontendClient::on_open(out, store, id)?), BackendAttach { id } => { let id = match id{ Some(id) if self.store.contains_key(id) => id, Some(id) => self.store.empty_at(id), None => self.store.new_empty() }; Backend(BackendClient::on_open(out, store, id)?) }, _ => return Err(WsError::new(WsErrorKind::Protocol, "Expected FrontendAttach or BackendAttach, got something else").into()) }; self.state = state; Response::Ok } }; if response!= Response::Ok{ // don't generate Ok messages, they're pointless and hard to coordinate self.out.send(encode_response(response)?)? } Ok(()) } } impl Handler for ServerHandler{ fn on_open(&mut self, hs: Handshake) -> WsResult<()>{ self.on_open_inner(hs).map_err(|e|e.into()) } fn on_request(&mut self, req: &Request) -> WsResult<WsResponse> { let mut res = WsResponse::from_request(req)?; let protocol_name = "selenologist-node-editor"; res.set_protocol(protocol_name); Ok(res) } fn on_message(&mut self, msg: Message) -> WsResult<()> { self.on_message_inner(msg).map_err(|e|e.into()) } fn on_close(&mut self, code: CloseCode, reason: &str){ use self::ClientState::*; trace!("Closing connection {:?} because {:?} {}", self.addr, code, reason); match self.state{ Backend(BackendClient{ graph }) | Frontend(FrontendClient{ graph }) => { self.store.remove_listener(graph, self.out.token().0) .unwrap(); } _ => {} } } } #[derive(Default)]
} impl Factory for ServerFactory{ type Handler = ServerHandler; fn connection_made(&mut self, out: Sender) -> Self::Handler{ ServerHandler{ out, store: self.store.clone(), state: ClientState::AwaitingType, addr: "0.0.0.0:0".into() } } } pub fn launch_thread() -> JoinHandle<()> { thread::Builder::new() .name("websocket".into()) .spawn(move || { let mut factory = ServerFactory::default(); let listen_addr = "127.0.0.1:3001"; info!("Attempting to listen on {}", listen_addr); listen(listen_addr, |out| factory.connection_made(out)).unwrap() }).unwrap() }
struct ServerFactory{ store: GraphStore,
random_line_split
websocket.rs
use serde_json; use ws::{listen, Handler, Factory, Sender, Handshake, Request, Response as WsResponse, Message, CloseCode}; use ws::{Error as WsError, ErrorKind as WsErrorKind, Result as WsResult}; use graph::{PossibleErr as GraphErr, *}; use std::thread; use std::thread::{JoinHandle}; use std::fmt; use std::result; enum PossibleErr{ Ws(WsError), String(String), GraphErr(GraphErr), JsonErr(serde_json::Error), Disp(Box<fmt::Display>), None } impl fmt::Display for PossibleErr { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::PossibleErr::*; match *self{ Ws(ref w) => w.fmt(f), String(ref s) => s.fmt(f), GraphErr(ref g) => g.fmt(f), JsonErr(ref j) => j.fmt(f), Disp(ref d) => d.fmt(f), None => (None).fmt(f) } } } type Result<T> = result::Result<T, PossibleErr>; impl From<::std::option::NoneError> for PossibleErr{ fn from(_: ::std::option::NoneError) -> PossibleErr{ PossibleErr::None } } impl From<WsError> for PossibleErr{ fn from(g: WsError) -> PossibleErr{ PossibleErr::Ws(g) } } impl From<GraphErr> for PossibleErr{ fn from(g: GraphErr) -> PossibleErr{ PossibleErr::GraphErr(g) } } impl From<serde_json::Error> for PossibleErr{ fn from(j: serde_json::Error) -> PossibleErr{ PossibleErr::JsonErr(j) } } fn to_ws_err(e: PossibleErr, kind: WsErrorKind) -> WsError{ use self::GraphErr::Ws as GWs; use self::PossibleErr::*; match e{ Ws(w) | GraphErr(GWs(w)) => w, _ => WsError::new(kind, format!("{}", e)) } } fn to_ws(e: PossibleErr) -> WsError{ to_ws_err(e, WsErrorKind::Internal) } impl Into<WsError> for PossibleErr{ fn into(self) -> WsError{ to_ws(self) } } fn decode_command(msg: Message) -> Result<Command>{ match msg{ Message::Text(t) => { Ok(serde_json::from_str(&t[..])?) }, Message::Binary(..) => Err(WsError::new( WsErrorKind::Protocol, format!("binary message received where expecting text JSON")).into()) } } fn encode_response(response: Response) -> WsResult<Message>{ match serde_json::to_string(&response){ Ok(s) => Ok(Message::Text(s)), Err(e) => Err(WsError::new(WsErrorKind::Internal, format!("encode_response failed {:?}", e))) } } fn encode_update<T: Into<Update>>(update: T) -> WsResult<Message>{ match serde_json::to_string(&update.into()){ Ok(s) => Ok(Message::Text(s)), Err(e) => Err(WsError::new(WsErrorKind::Internal, format!("encode_update failed {:?}", e))) } } struct ClientCommon; impl ClientCommon{ fn on_open(out: &Sender, store: &GraphStore, id: GraphId, client_type: ClientType) -> Result<()>{ if let Ok(_) = store.attach(id, client_type, out.clone()){ trace!("Client supplied valid GraphId {}", id); Ok(()) } else{ let err = format!("GraphId {} does not exist", id); out.send(encode_response(Response::Error(DataValue::from(err.clone())))?)?; Ok(()) //Err(WsError::new(WsErrorKind::Protocol, err)) } } fn on_command(_out: &Sender, store: &GraphStore, command: &Command, graph: GraphId, _client_type: ClientType) -> Result<Option<Response>> { use graph::Command::*; let response = match *command{ AddLink{ source: ref from, target: ref to } => { let graph = store.get(graph)?; let result = graph.add_link(from, to); match result{ Response::Ok => { graph.repeat_to(ClientType::Both, encode_update(command.clone())?); Response::Ok } _ => result } }, _ => {return Ok(None)} }; Ok(Some(response)) } } #[derive(Copy,Clone)] struct FrontendClient{ graph: GraphId } impl FrontendClient{ fn on_open(out: &Sender, store: &GraphStore, id: GraphId) -> Result<Self>{ ClientCommon::on_open(out, store, id, ClientType::Frontend)?; trace!("Frontend attached to GraphId {}", id); out.send( encode_update( Command::SetGraph{ graph: store.get(id)?.data.clone() } )? )?; out.send( encode_update( Response::Warning("Test Warning".into()) )? )?; Ok(FrontendClient{ graph: id }) } fn on_command(&self, out: &Sender, store: &GraphStore, command: &Command) -> Result<Response> { //use graph::Command::*; if let Some(common) = ClientCommon::on_command(out, store, command, self.graph, ClientType::Frontend)?{ return Ok(common); } match *command{ _ => Err(WsError::new( WsErrorKind::Protocol, format!("Expected Frontend command, got {:?}", command)).into()) } } } #[derive(Copy,Clone)] struct BackendClient{ graph: GraphId } impl BackendClient{ fn
(out: &Sender, store: &GraphStore, id: GraphId) -> Result<Self>{ ClientCommon::on_open(out, store, id, ClientType::Backend)?; trace!("Backend attached to GraphId {}", id); Ok(BackendClient{ graph: id }) } fn on_command(&self, out: &Sender, store: &GraphStore, command: &Command) -> Result<Response> { use graph::Command::*; let client_type = ClientType::Backend; if let Some(common) = ClientCommon::on_command(out, store, command, self.graph, client_type.clone())?{ return Ok(common); } match *command{ SetGraph{ ref graph } => Ok({ trace!("set graph {:?}", graph); store.set_graph(self.graph, graph.clone())?; store.repeat_to(self.graph, client_type.opposite(), encode_update(command.clone())?)?; Response::Ok }), SetData{ ref id, ref value } => Ok({ trace!("set data {:?} = {:?}", id, value); //store.set_data(self.graph, id, value)?; store.repeat_to(self.graph, client_type.opposite(), encode_update(command.clone())?)?; Response::Ok }), _ => Err(WsError::new(WsErrorKind::Protocol, format!("Expected Backend command, got {:?}", command)).into()) } } } #[derive(Copy,Clone)] enum ClientState{ Frontend(FrontendClient), Backend(BackendClient), AwaitingType } struct ServerHandler{ out: Sender, store: GraphStore, state: ClientState, addr: String } impl ServerHandler{ fn on_open_inner(&mut self, hs: Handshake) -> Result<()>{ if let Some(ip_addr) = hs.peer_addr { let ip_string = format!("{}", ip_addr); info!("{:>20} - connection {:?} established", ip_string, self.out.token()); self.addr = ip_string; } else{ debug!("Connection without IP address?"); } self.out.send( serde_json::to_string( &self.store.list())? )?; Ok(()) } fn on_message_inner(&mut self, msg: Message) -> Result<()> { use self::ClientState::*; use graph::Command::{FrontendAttach, BackendAttach}; let command = decode_command(msg)?; let response = match self.state.clone() { Frontend(client) => client.on_command(&self.out, &self.store, &command)?, Backend(client) => // different type for client than the above match client.on_command(&self.out, &self.store, &command)?, AwaitingType => { let out = &self.out; let store = &self.store; let state = match command{ FrontendAttach{ id } => Frontend(FrontendClient::on_open(out, store, id)?), BackendAttach { id } => { let id = match id{ Some(id) if self.store.contains_key(id) => id, Some(id) => self.store.empty_at(id), None => self.store.new_empty() }; Backend(BackendClient::on_open(out, store, id)?) }, _ => return Err(WsError::new(WsErrorKind::Protocol, "Expected FrontendAttach or BackendAttach, got something else").into()) }; self.state = state; Response::Ok } }; if response!= Response::Ok{ // don't generate Ok messages, they're pointless and hard to coordinate self.out.send(encode_response(response)?)? } Ok(()) } } impl Handler for ServerHandler{ fn on_open(&mut self, hs: Handshake) -> WsResult<()>{ self.on_open_inner(hs).map_err(|e|e.into()) } fn on_request(&mut self, req: &Request) -> WsResult<WsResponse> { let mut res = WsResponse::from_request(req)?; let protocol_name = "selenologist-node-editor"; res.set_protocol(protocol_name); Ok(res) } fn on_message(&mut self, msg: Message) -> WsResult<()> { self.on_message_inner(msg).map_err(|e|e.into()) } fn on_close(&mut self, code: CloseCode, reason: &str){ use self::ClientState::*; trace!("Closing connection {:?} because {:?} {}", self.addr, code, reason); match self.state{ Backend(BackendClient{ graph }) | Frontend(FrontendClient{ graph }) => { self.store.remove_listener(graph, self.out.token().0) .unwrap(); } _ => {} } } } #[derive(Default)] struct ServerFactory{ store: GraphStore, } impl Factory for ServerFactory{ type Handler = ServerHandler; fn connection_made(&mut self, out: Sender) -> Self::Handler{ ServerHandler{ out, store: self.store.clone(), state: ClientState::AwaitingType, addr: "0.0.0.0:0".into() } } } pub fn launch_thread() -> JoinHandle<()> { thread::Builder::new() .name("websocket".into()) .spawn(move || { let mut factory = ServerFactory::default(); let listen_addr = "127.0.0.1:3001"; info!("Attempting to listen on {}", listen_addr); listen(listen_addr, |out| factory.connection_made(out)).unwrap() }).unwrap() }
on_open
identifier_name
v2.rs
use std::fs::{File, OpenOptions}; use std::io::Write; use std::path::{Path, PathBuf}; use serde_json::{Deserializer, Value}; use tempfile::NamedTempFile; use crate::index_controller::dump_actor::loaders::compat::{asc_ranking_rule, desc_ranking_rule}; use crate::index_controller::dump_actor::Metadata; use crate::index_controller::updates::status::{ Aborted, Enqueued, Failed, Processed, Processing, UpdateResult, UpdateStatus, }; use crate::index_controller::updates::store::dump::UpdateEntry; use crate::index_controller::updates::store::Update; use crate::options::IndexerOpts; use super::v3; /// The dump v2 reads the dump folder and patches all the needed file to make it compatible with a /// dump v3, then calls the dump v3 to actually handle the dump. pub fn load_dump( meta: Metadata, src: impl AsRef<Path>, dst: impl AsRef<Path>, index_db_size: usize, update_db_size: usize, indexing_options: &IndexerOpts, ) -> anyhow::Result<()>
patch_updates(update_dir, update_path)?; v3::load_dump( meta, src, dst, index_db_size, update_db_size, indexing_options, ) } fn patch_index_uuid_path(path: &Path) -> Option<PathBuf> { let uuid = path.file_name()?.to_str()?.trim_start_matches("index-"); let new_path = path.parent()?.join(uuid); Some(new_path) } fn patch_settings(path: impl AsRef<Path>) -> anyhow::Result<()> { let mut meta_file = File::open(&path)?; let mut meta: Value = serde_json::from_reader(&mut meta_file)?; // We first deserialize the dump meta into a serde_json::Value and change // the custom ranking rules settings from the old format to the new format. if let Some(ranking_rules) = meta.pointer_mut("/settings/rankingRules") { patch_custom_ranking_rules(ranking_rules); } let mut meta_file = OpenOptions::new().truncate(true).write(true).open(path)?; serde_json::to_writer(&mut meta_file, &meta)?; Ok(()) } fn patch_updates(dir: impl AsRef<Path>, path: impl AsRef<Path>) -> anyhow::Result<()> { let mut output_update_file = NamedTempFile::new_in(&dir)?; let update_file = File::open(&path)?; let stream = Deserializer::from_reader(update_file).into_iter::<compat::UpdateEntry>(); for update in stream { let update_entry = update?; let update_entry = UpdateEntry::from(update_entry); serde_json::to_writer(&mut output_update_file, &update_entry)?; output_update_file.write_all(b"\n")?; } output_update_file.flush()?; output_update_file.persist(path)?; Ok(()) } /// Converts the ranking rules from the format `asc(_)`, `desc(_)` to the format `_:asc`, `_:desc`. /// /// This is done for compatibility reasons, and to avoid a new dump version, /// since the new syntax was introduced soon after the new dump version. fn patch_custom_ranking_rules(ranking_rules: &mut Value) { *ranking_rules = match ranking_rules.take() { Value::Array(values) => values .into_iter() .filter_map(|value| match value { Value::String(s) if s.starts_with("asc") => asc_ranking_rule(&s) .map(|f| format!("{}:asc", f)) .map(Value::String), Value::String(s) if s.starts_with("desc") => desc_ranking_rule(&s) .map(|f| format!("{}:desc", f)) .map(Value::String), otherwise => Some(otherwise), }) .collect(), otherwise => otherwise, } } impl From<compat::UpdateEntry> for UpdateEntry { fn from(compat::UpdateEntry { uuid, update }: compat::UpdateEntry) -> Self { let update = match update { compat::UpdateStatus::Processing(meta) => UpdateStatus::Processing(meta.into()), compat::UpdateStatus::Enqueued(meta) => UpdateStatus::Enqueued(meta.into()), compat::UpdateStatus::Processed(meta) => UpdateStatus::Processed(meta.into()), compat::UpdateStatus::Aborted(meta) => UpdateStatus::Aborted(meta.into()), compat::UpdateStatus::Failed(meta) => UpdateStatus::Failed(meta.into()), }; Self { uuid, update } } } impl From<compat::Failed> for Failed { fn from(other: compat::Failed) -> Self { let compat::Failed { from, error, failed_at, } = other; Self { from: from.into(), msg: error.message, code: compat::error_code_from_str(&error.error_code) .expect("Invalid update: Invalid error code"), failed_at, } } } impl From<compat::Aborted> for Aborted { fn from(other: compat::Aborted) -> Self { let compat::Aborted { from, aborted_at } = other; Self { from: from.into(), aborted_at, } } } impl From<compat::Processing> for Processing { fn from(other: compat::Processing) -> Self { let compat::Processing { from, started_processing_at, } = other; Self { from: from.into(), started_processing_at, } } } impl From<compat::Enqueued> for Enqueued { fn from(other: compat::Enqueued) -> Self { let compat::Enqueued { update_id, meta, enqueued_at, content, } = other; let meta = match meta { compat::UpdateMeta::DocumentsAddition { method, primary_key, .. } => { Update::DocumentAddition { primary_key, method, // Just ignore if the uuid is no present. If it is needed later, an error will // be thrown. content_uuid: content.unwrap_or_default(), } } compat::UpdateMeta::ClearDocuments => Update::ClearDocuments, compat::UpdateMeta::DeleteDocuments { ids } => Update::DeleteDocuments(ids), compat::UpdateMeta::Settings(settings) => Update::Settings(settings), }; Self { update_id, meta, enqueued_at, } } } impl From<compat::Processed> for Processed { fn from(other: compat::Processed) -> Self { let compat::Processed { from, success, processed_at, } = other; Self { success: success.into(), processed_at, from: from.into(), } } } impl From<compat::UpdateResult> for UpdateResult { fn from(other: compat::UpdateResult) -> Self { match other { compat::UpdateResult::DocumentsAddition(r) => Self::DocumentsAddition(r), compat::UpdateResult::DocumentDeletion { deleted } => { Self::DocumentDeletion { deleted } } compat::UpdateResult::Other => Self::Other, } } } /// compat structure from pre-dumpv3 meilisearch mod compat { use anyhow::bail; use chrono::{DateTime, Utc}; use meilisearch_error::Code; use milli::update::{DocumentAdditionResult, IndexDocumentsMethod}; use serde::{Deserialize, Serialize}; use uuid::Uuid; use crate::index::{Settings, Unchecked}; #[derive(Serialize, Deserialize)] pub struct UpdateEntry { pub uuid: Uuid, pub update: UpdateStatus, } #[derive(Debug, Clone, Serialize, Deserialize)] pub enum UpdateFormat { Json, } #[derive(Debug, Clone, Serialize, Deserialize)] pub enum UpdateResult { DocumentsAddition(DocumentAdditionResult), DocumentDeletion { deleted: u64 }, Other, } #[allow(clippy::large_enum_variant)] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(tag = "type")] pub enum UpdateMeta { DocumentsAddition { method: IndexDocumentsMethod, format: UpdateFormat, primary_key: Option<String>, }, ClearDocuments, DeleteDocuments { ids: Vec<String>, }, Settings(Settings<Unchecked>), } #[derive(Debug, Serialize, Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct Enqueued { pub update_id: u64, pub meta: UpdateMeta, pub enqueued_at: DateTime<Utc>, pub content: Option<Uuid>, } #[derive(Debug, Serialize, Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct Processed { pub success: UpdateResult, pub processed_at: DateTime<Utc>, #[serde(flatten)] pub from: Processing, } #[derive(Debug, Serialize, Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct Processing { #[serde(flatten)] pub from: Enqueued, pub started_processing_at: DateTime<Utc>, } #[derive(Debug, Serialize, Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct Aborted { #[serde(flatten)] pub from: Enqueued, pub aborted_at: DateTime<Utc>, } #[derive(Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct Failed { #[serde(flatten)] pub from: Processing, pub error: ResponseError, pub failed_at: DateTime<Utc>, } #[derive(Debug, Serialize, Deserialize)] #[serde(tag = "status", rename_all = "camelCase")] pub enum UpdateStatus { Processing(Processing), Enqueued(Enqueued), Processed(Processed), Aborted(Aborted), Failed(Failed), } type StatusCode = (); #[derive(Debug, Serialize, Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct ResponseError { #[serde(skip)] pub code: StatusCode, pub message: String, pub error_code: String, pub error_type: String, pub error_link: String, } pub fn error_code_from_str(s: &str) -> anyhow::Result<Code> { let code = match s { "index_creation_failed" => Code::CreateIndex, "index_already_exists" => Code::IndexAlreadyExists, "index_not_found" => Code::IndexNotFound, "invalid_index_uid" => Code::InvalidIndexUid, "index_not_accessible" => Code::OpenIndex, "invalid_state" => Code::InvalidState, "missing_primary_key" => Code::MissingPrimaryKey, "primary_key_already_present" => Code::PrimaryKeyAlreadyPresent, "invalid_request" => Code::InvalidRankingRule, "max_fields_limit_exceeded" => Code::MaxFieldsLimitExceeded, "missing_document_id" => Code::MissingDocumentId, "invalid_facet" => Code::Facet, "invalid_filter" => Code::Filter, "invalid_sort" => Code::Sort, "bad_parameter" => Code::BadParameter, "bad_request" => Code::BadRequest, "document_not_found" => Code::DocumentNotFound, "internal" => Code::Internal, "invalid_geo_field" => Code::InvalidGeoField, "invalid_token" => Code::InvalidToken, "missing_authorization_header" => Code::MissingAuthorizationHeader, "not_found" => Code::NotFound, "payload_too_large" => Code::PayloadTooLarge, "unretrievable_document" => Code::RetrieveDocument, "search_error" => Code::SearchDocuments, "unsupported_media_type" => Code::UnsupportedMediaType, "dump_already_in_progress" => Code::DumpAlreadyInProgress, "dump_process_failed" => Code::DumpProcessFailed, _ => bail!("unknow error code."), }; Ok(code) } }
{ let indexes_path = src.as_ref().join("indexes"); let dir_entries = std::fs::read_dir(indexes_path)?; for entry in dir_entries { let entry = entry?; // rename the index folder let path = entry.path(); let new_path = patch_index_uuid_path(&path).expect("invalid index folder."); std::fs::rename(path, &new_path)?; let settings_path = new_path.join("meta.json"); patch_settings(settings_path)?; } let update_dir = src.as_ref().join("updates"); let update_path = update_dir.join("data.jsonl");
identifier_body
v2.rs
use std::fs::{File, OpenOptions}; use std::io::Write; use std::path::{Path, PathBuf}; use serde_json::{Deserializer, Value}; use tempfile::NamedTempFile; use crate::index_controller::dump_actor::loaders::compat::{asc_ranking_rule, desc_ranking_rule}; use crate::index_controller::dump_actor::Metadata; use crate::index_controller::updates::status::{ Aborted, Enqueued, Failed, Processed, Processing, UpdateResult, UpdateStatus, }; use crate::index_controller::updates::store::dump::UpdateEntry; use crate::index_controller::updates::store::Update; use crate::options::IndexerOpts; use super::v3; /// The dump v2 reads the dump folder and patches all the needed file to make it compatible with a /// dump v3, then calls the dump v3 to actually handle the dump. pub fn load_dump( meta: Metadata, src: impl AsRef<Path>, dst: impl AsRef<Path>, index_db_size: usize, update_db_size: usize, indexing_options: &IndexerOpts, ) -> anyhow::Result<()> { let indexes_path = src.as_ref().join("indexes"); let dir_entries = std::fs::read_dir(indexes_path)?; for entry in dir_entries { let entry = entry?; // rename the index folder let path = entry.path(); let new_path = patch_index_uuid_path(&path).expect("invalid index folder."); std::fs::rename(path, &new_path)?; let settings_path = new_path.join("meta.json"); patch_settings(settings_path)?; } let update_dir = src.as_ref().join("updates"); let update_path = update_dir.join("data.jsonl"); patch_updates(update_dir, update_path)?; v3::load_dump( meta, src, dst, index_db_size, update_db_size, indexing_options, ) } fn
(path: &Path) -> Option<PathBuf> { let uuid = path.file_name()?.to_str()?.trim_start_matches("index-"); let new_path = path.parent()?.join(uuid); Some(new_path) } fn patch_settings(path: impl AsRef<Path>) -> anyhow::Result<()> { let mut meta_file = File::open(&path)?; let mut meta: Value = serde_json::from_reader(&mut meta_file)?; // We first deserialize the dump meta into a serde_json::Value and change // the custom ranking rules settings from the old format to the new format. if let Some(ranking_rules) = meta.pointer_mut("/settings/rankingRules") { patch_custom_ranking_rules(ranking_rules); } let mut meta_file = OpenOptions::new().truncate(true).write(true).open(path)?; serde_json::to_writer(&mut meta_file, &meta)?; Ok(()) } fn patch_updates(dir: impl AsRef<Path>, path: impl AsRef<Path>) -> anyhow::Result<()> { let mut output_update_file = NamedTempFile::new_in(&dir)?; let update_file = File::open(&path)?; let stream = Deserializer::from_reader(update_file).into_iter::<compat::UpdateEntry>(); for update in stream { let update_entry = update?; let update_entry = UpdateEntry::from(update_entry); serde_json::to_writer(&mut output_update_file, &update_entry)?; output_update_file.write_all(b"\n")?; } output_update_file.flush()?; output_update_file.persist(path)?; Ok(()) } /// Converts the ranking rules from the format `asc(_)`, `desc(_)` to the format `_:asc`, `_:desc`. /// /// This is done for compatibility reasons, and to avoid a new dump version, /// since the new syntax was introduced soon after the new dump version. fn patch_custom_ranking_rules(ranking_rules: &mut Value) { *ranking_rules = match ranking_rules.take() { Value::Array(values) => values .into_iter() .filter_map(|value| match value { Value::String(s) if s.starts_with("asc") => asc_ranking_rule(&s) .map(|f| format!("{}:asc", f)) .map(Value::String), Value::String(s) if s.starts_with("desc") => desc_ranking_rule(&s) .map(|f| format!("{}:desc", f)) .map(Value::String), otherwise => Some(otherwise), }) .collect(), otherwise => otherwise, } } impl From<compat::UpdateEntry> for UpdateEntry { fn from(compat::UpdateEntry { uuid, update }: compat::UpdateEntry) -> Self { let update = match update { compat::UpdateStatus::Processing(meta) => UpdateStatus::Processing(meta.into()), compat::UpdateStatus::Enqueued(meta) => UpdateStatus::Enqueued(meta.into()), compat::UpdateStatus::Processed(meta) => UpdateStatus::Processed(meta.into()), compat::UpdateStatus::Aborted(meta) => UpdateStatus::Aborted(meta.into()), compat::UpdateStatus::Failed(meta) => UpdateStatus::Failed(meta.into()), }; Self { uuid, update } } } impl From<compat::Failed> for Failed { fn from(other: compat::Failed) -> Self { let compat::Failed { from, error, failed_at, } = other; Self { from: from.into(), msg: error.message, code: compat::error_code_from_str(&error.error_code) .expect("Invalid update: Invalid error code"), failed_at, } } } impl From<compat::Aborted> for Aborted { fn from(other: compat::Aborted) -> Self { let compat::Aborted { from, aborted_at } = other; Self { from: from.into(), aborted_at, } } } impl From<compat::Processing> for Processing { fn from(other: compat::Processing) -> Self { let compat::Processing { from, started_processing_at, } = other; Self { from: from.into(), started_processing_at, } } } impl From<compat::Enqueued> for Enqueued { fn from(other: compat::Enqueued) -> Self { let compat::Enqueued { update_id, meta, enqueued_at, content, } = other; let meta = match meta { compat::UpdateMeta::DocumentsAddition { method, primary_key, .. } => { Update::DocumentAddition { primary_key, method, // Just ignore if the uuid is no present. If it is needed later, an error will // be thrown. content_uuid: content.unwrap_or_default(), } } compat::UpdateMeta::ClearDocuments => Update::ClearDocuments, compat::UpdateMeta::DeleteDocuments { ids } => Update::DeleteDocuments(ids), compat::UpdateMeta::Settings(settings) => Update::Settings(settings), }; Self { update_id, meta, enqueued_at, } } } impl From<compat::Processed> for Processed { fn from(other: compat::Processed) -> Self { let compat::Processed { from, success, processed_at, } = other; Self { success: success.into(), processed_at, from: from.into(), } } } impl From<compat::UpdateResult> for UpdateResult { fn from(other: compat::UpdateResult) -> Self { match other { compat::UpdateResult::DocumentsAddition(r) => Self::DocumentsAddition(r), compat::UpdateResult::DocumentDeletion { deleted } => { Self::DocumentDeletion { deleted } } compat::UpdateResult::Other => Self::Other, } } } /// compat structure from pre-dumpv3 meilisearch mod compat { use anyhow::bail; use chrono::{DateTime, Utc}; use meilisearch_error::Code; use milli::update::{DocumentAdditionResult, IndexDocumentsMethod}; use serde::{Deserialize, Serialize}; use uuid::Uuid; use crate::index::{Settings, Unchecked}; #[derive(Serialize, Deserialize)] pub struct UpdateEntry { pub uuid: Uuid, pub update: UpdateStatus, } #[derive(Debug, Clone, Serialize, Deserialize)] pub enum UpdateFormat { Json, } #[derive(Debug, Clone, Serialize, Deserialize)] pub enum UpdateResult { DocumentsAddition(DocumentAdditionResult), DocumentDeletion { deleted: u64 }, Other, } #[allow(clippy::large_enum_variant)] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(tag = "type")] pub enum UpdateMeta { DocumentsAddition { method: IndexDocumentsMethod, format: UpdateFormat, primary_key: Option<String>, }, ClearDocuments, DeleteDocuments { ids: Vec<String>, }, Settings(Settings<Unchecked>), } #[derive(Debug, Serialize, Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct Enqueued { pub update_id: u64, pub meta: UpdateMeta, pub enqueued_at: DateTime<Utc>, pub content: Option<Uuid>, } #[derive(Debug, Serialize, Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct Processed { pub success: UpdateResult, pub processed_at: DateTime<Utc>, #[serde(flatten)] pub from: Processing, } #[derive(Debug, Serialize, Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct Processing { #[serde(flatten)] pub from: Enqueued, pub started_processing_at: DateTime<Utc>, } #[derive(Debug, Serialize, Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct Aborted { #[serde(flatten)] pub from: Enqueued, pub aborted_at: DateTime<Utc>, } #[derive(Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct Failed { #[serde(flatten)] pub from: Processing, pub error: ResponseError, pub failed_at: DateTime<Utc>, } #[derive(Debug, Serialize, Deserialize)] #[serde(tag = "status", rename_all = "camelCase")] pub enum UpdateStatus { Processing(Processing), Enqueued(Enqueued), Processed(Processed), Aborted(Aborted), Failed(Failed), } type StatusCode = (); #[derive(Debug, Serialize, Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct ResponseError { #[serde(skip)] pub code: StatusCode, pub message: String, pub error_code: String, pub error_type: String, pub error_link: String, } pub fn error_code_from_str(s: &str) -> anyhow::Result<Code> { let code = match s { "index_creation_failed" => Code::CreateIndex, "index_already_exists" => Code::IndexAlreadyExists, "index_not_found" => Code::IndexNotFound, "invalid_index_uid" => Code::InvalidIndexUid, "index_not_accessible" => Code::OpenIndex, "invalid_state" => Code::InvalidState, "missing_primary_key" => Code::MissingPrimaryKey, "primary_key_already_present" => Code::PrimaryKeyAlreadyPresent, "invalid_request" => Code::InvalidRankingRule, "max_fields_limit_exceeded" => Code::MaxFieldsLimitExceeded, "missing_document_id" => Code::MissingDocumentId, "invalid_facet" => Code::Facet, "invalid_filter" => Code::Filter, "invalid_sort" => Code::Sort, "bad_parameter" => Code::BadParameter, "bad_request" => Code::BadRequest, "document_not_found" => Code::DocumentNotFound, "internal" => Code::Internal, "invalid_geo_field" => Code::InvalidGeoField, "invalid_token" => Code::InvalidToken, "missing_authorization_header" => Code::MissingAuthorizationHeader, "not_found" => Code::NotFound, "payload_too_large" => Code::PayloadTooLarge, "unretrievable_document" => Code::RetrieveDocument, "search_error" => Code::SearchDocuments, "unsupported_media_type" => Code::UnsupportedMediaType, "dump_already_in_progress" => Code::DumpAlreadyInProgress, "dump_process_failed" => Code::DumpProcessFailed, _ => bail!("unknow error code."), }; Ok(code) } }
patch_index_uuid_path
identifier_name
v2.rs
use std::fs::{File, OpenOptions}; use std::io::Write; use std::path::{Path, PathBuf}; use serde_json::{Deserializer, Value}; use tempfile::NamedTempFile; use crate::index_controller::dump_actor::loaders::compat::{asc_ranking_rule, desc_ranking_rule}; use crate::index_controller::dump_actor::Metadata; use crate::index_controller::updates::status::{ Aborted, Enqueued, Failed, Processed, Processing, UpdateResult, UpdateStatus, }; use crate::index_controller::updates::store::dump::UpdateEntry; use crate::index_controller::updates::store::Update; use crate::options::IndexerOpts; use super::v3; /// The dump v2 reads the dump folder and patches all the needed file to make it compatible with a /// dump v3, then calls the dump v3 to actually handle the dump. pub fn load_dump( meta: Metadata, src: impl AsRef<Path>, dst: impl AsRef<Path>, index_db_size: usize, update_db_size: usize, indexing_options: &IndexerOpts, ) -> anyhow::Result<()> { let indexes_path = src.as_ref().join("indexes"); let dir_entries = std::fs::read_dir(indexes_path)?; for entry in dir_entries { let entry = entry?; // rename the index folder let path = entry.path(); let new_path = patch_index_uuid_path(&path).expect("invalid index folder."); std::fs::rename(path, &new_path)?; let settings_path = new_path.join("meta.json"); patch_settings(settings_path)?; } let update_dir = src.as_ref().join("updates"); let update_path = update_dir.join("data.jsonl"); patch_updates(update_dir, update_path)?; v3::load_dump( meta, src, dst, index_db_size, update_db_size, indexing_options, ) } fn patch_index_uuid_path(path: &Path) -> Option<PathBuf> { let uuid = path.file_name()?.to_str()?.trim_start_matches("index-"); let new_path = path.parent()?.join(uuid); Some(new_path) } fn patch_settings(path: impl AsRef<Path>) -> anyhow::Result<()> { let mut meta_file = File::open(&path)?; let mut meta: Value = serde_json::from_reader(&mut meta_file)?; // We first deserialize the dump meta into a serde_json::Value and change // the custom ranking rules settings from the old format to the new format. if let Some(ranking_rules) = meta.pointer_mut("/settings/rankingRules") { patch_custom_ranking_rules(ranking_rules); } let mut meta_file = OpenOptions::new().truncate(true).write(true).open(path)?; serde_json::to_writer(&mut meta_file, &meta)?; Ok(()) } fn patch_updates(dir: impl AsRef<Path>, path: impl AsRef<Path>) -> anyhow::Result<()> { let mut output_update_file = NamedTempFile::new_in(&dir)?; let update_file = File::open(&path)?; let stream = Deserializer::from_reader(update_file).into_iter::<compat::UpdateEntry>(); for update in stream { let update_entry = update?; let update_entry = UpdateEntry::from(update_entry); serde_json::to_writer(&mut output_update_file, &update_entry)?; output_update_file.write_all(b"\n")?; } output_update_file.flush()?; output_update_file.persist(path)?; Ok(()) } /// Converts the ranking rules from the format `asc(_)`, `desc(_)` to the format `_:asc`, `_:desc`. /// /// This is done for compatibility reasons, and to avoid a new dump version, /// since the new syntax was introduced soon after the new dump version. fn patch_custom_ranking_rules(ranking_rules: &mut Value) { *ranking_rules = match ranking_rules.take() { Value::Array(values) => values .into_iter() .filter_map(|value| match value { Value::String(s) if s.starts_with("asc") => asc_ranking_rule(&s) .map(|f| format!("{}:asc", f)) .map(Value::String), Value::String(s) if s.starts_with("desc") => desc_ranking_rule(&s) .map(|f| format!("{}:desc", f)) .map(Value::String), otherwise => Some(otherwise), }) .collect(), otherwise => otherwise, } } impl From<compat::UpdateEntry> for UpdateEntry { fn from(compat::UpdateEntry { uuid, update }: compat::UpdateEntry) -> Self { let update = match update { compat::UpdateStatus::Processing(meta) => UpdateStatus::Processing(meta.into()), compat::UpdateStatus::Enqueued(meta) => UpdateStatus::Enqueued(meta.into()), compat::UpdateStatus::Processed(meta) => UpdateStatus::Processed(meta.into()), compat::UpdateStatus::Aborted(meta) => UpdateStatus::Aborted(meta.into()), compat::UpdateStatus::Failed(meta) => UpdateStatus::Failed(meta.into()), }; Self { uuid, update } } } impl From<compat::Failed> for Failed { fn from(other: compat::Failed) -> Self { let compat::Failed { from, error, failed_at, } = other; Self { from: from.into(), msg: error.message, code: compat::error_code_from_str(&error.error_code) .expect("Invalid update: Invalid error code"), failed_at, } } } impl From<compat::Aborted> for Aborted { fn from(other: compat::Aborted) -> Self { let compat::Aborted { from, aborted_at } = other; Self { from: from.into(), aborted_at, } } } impl From<compat::Processing> for Processing { fn from(other: compat::Processing) -> Self { let compat::Processing { from, started_processing_at, } = other; Self { from: from.into(), started_processing_at, } } } impl From<compat::Enqueued> for Enqueued { fn from(other: compat::Enqueued) -> Self { let compat::Enqueued { update_id, meta, enqueued_at, content, } = other; let meta = match meta { compat::UpdateMeta::DocumentsAddition { method, primary_key, .. } => { Update::DocumentAddition { primary_key, method, // Just ignore if the uuid is no present. If it is needed later, an error will // be thrown. content_uuid: content.unwrap_or_default(), } } compat::UpdateMeta::ClearDocuments => Update::ClearDocuments, compat::UpdateMeta::DeleteDocuments { ids } => Update::DeleteDocuments(ids), compat::UpdateMeta::Settings(settings) => Update::Settings(settings), }; Self { update_id, meta, enqueued_at, } } } impl From<compat::Processed> for Processed { fn from(other: compat::Processed) -> Self { let compat::Processed { from, success, processed_at, } = other; Self { success: success.into(), processed_at, from: from.into(), } } }
compat::UpdateResult::DocumentsAddition(r) => Self::DocumentsAddition(r), compat::UpdateResult::DocumentDeletion { deleted } => { Self::DocumentDeletion { deleted } } compat::UpdateResult::Other => Self::Other, } } } /// compat structure from pre-dumpv3 meilisearch mod compat { use anyhow::bail; use chrono::{DateTime, Utc}; use meilisearch_error::Code; use milli::update::{DocumentAdditionResult, IndexDocumentsMethod}; use serde::{Deserialize, Serialize}; use uuid::Uuid; use crate::index::{Settings, Unchecked}; #[derive(Serialize, Deserialize)] pub struct UpdateEntry { pub uuid: Uuid, pub update: UpdateStatus, } #[derive(Debug, Clone, Serialize, Deserialize)] pub enum UpdateFormat { Json, } #[derive(Debug, Clone, Serialize, Deserialize)] pub enum UpdateResult { DocumentsAddition(DocumentAdditionResult), DocumentDeletion { deleted: u64 }, Other, } #[allow(clippy::large_enum_variant)] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(tag = "type")] pub enum UpdateMeta { DocumentsAddition { method: IndexDocumentsMethod, format: UpdateFormat, primary_key: Option<String>, }, ClearDocuments, DeleteDocuments { ids: Vec<String>, }, Settings(Settings<Unchecked>), } #[derive(Debug, Serialize, Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct Enqueued { pub update_id: u64, pub meta: UpdateMeta, pub enqueued_at: DateTime<Utc>, pub content: Option<Uuid>, } #[derive(Debug, Serialize, Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct Processed { pub success: UpdateResult, pub processed_at: DateTime<Utc>, #[serde(flatten)] pub from: Processing, } #[derive(Debug, Serialize, Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct Processing { #[serde(flatten)] pub from: Enqueued, pub started_processing_at: DateTime<Utc>, } #[derive(Debug, Serialize, Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct Aborted { #[serde(flatten)] pub from: Enqueued, pub aborted_at: DateTime<Utc>, } #[derive(Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct Failed { #[serde(flatten)] pub from: Processing, pub error: ResponseError, pub failed_at: DateTime<Utc>, } #[derive(Debug, Serialize, Deserialize)] #[serde(tag = "status", rename_all = "camelCase")] pub enum UpdateStatus { Processing(Processing), Enqueued(Enqueued), Processed(Processed), Aborted(Aborted), Failed(Failed), } type StatusCode = (); #[derive(Debug, Serialize, Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct ResponseError { #[serde(skip)] pub code: StatusCode, pub message: String, pub error_code: String, pub error_type: String, pub error_link: String, } pub fn error_code_from_str(s: &str) -> anyhow::Result<Code> { let code = match s { "index_creation_failed" => Code::CreateIndex, "index_already_exists" => Code::IndexAlreadyExists, "index_not_found" => Code::IndexNotFound, "invalid_index_uid" => Code::InvalidIndexUid, "index_not_accessible" => Code::OpenIndex, "invalid_state" => Code::InvalidState, "missing_primary_key" => Code::MissingPrimaryKey, "primary_key_already_present" => Code::PrimaryKeyAlreadyPresent, "invalid_request" => Code::InvalidRankingRule, "max_fields_limit_exceeded" => Code::MaxFieldsLimitExceeded, "missing_document_id" => Code::MissingDocumentId, "invalid_facet" => Code::Facet, "invalid_filter" => Code::Filter, "invalid_sort" => Code::Sort, "bad_parameter" => Code::BadParameter, "bad_request" => Code::BadRequest, "document_not_found" => Code::DocumentNotFound, "internal" => Code::Internal, "invalid_geo_field" => Code::InvalidGeoField, "invalid_token" => Code::InvalidToken, "missing_authorization_header" => Code::MissingAuthorizationHeader, "not_found" => Code::NotFound, "payload_too_large" => Code::PayloadTooLarge, "unretrievable_document" => Code::RetrieveDocument, "search_error" => Code::SearchDocuments, "unsupported_media_type" => Code::UnsupportedMediaType, "dump_already_in_progress" => Code::DumpAlreadyInProgress, "dump_process_failed" => Code::DumpProcessFailed, _ => bail!("unknow error code."), }; Ok(code) } }
impl From<compat::UpdateResult> for UpdateResult { fn from(other: compat::UpdateResult) -> Self { match other {
random_line_split
lib.rs
//! This crate contains structures and generators for specifying how to generate //! historical and real-time test data for Delorean. The rules for how to //! generate data and what shape it should take can be specified in a TOML file. //! //! Generators can output in line protocol, Parquet, or can be used to generate //! real-time load on a server that implements the [InfluxDB 2.0 write //! path][write-api]. //! //! [write-api]: https://v2.docs.influxdata.com/v2.0/api/#tag/Write //! //! While this generator could be compared to [the Go based one that creates TSM //! data][go-gen], its purpose is meant to be more far reaching. In addition to //! generating historical data, it should be useful for generating data in a //! sequence as you would expect it to arrive in a production environment. That //! means many agents sending data with their different tags and timestamps. //! //! [go-gen]: https://github.com/influxdata/influxdb/pull/12710 #![deny(rustdoc::broken_intra_doc_links, rustdoc::bare_urls, rust_2018_idioms)] #![warn( missing_copy_implementations, missing_debug_implementations, missing_docs, clippy::explicit_iter_loop, clippy::future_not_send, clippy::use_self, clippy::clone_on_ref_ptr )] use crate::substitution::Substitute; use rand::Rng; use rand_seeder::Seeder; use snafu::{ResultExt, Snafu}; use std::{ convert::TryFrom, time::{SystemTime, UNIX_EPOCH}, }; pub mod agent; pub mod field; pub mod measurement; pub mod specification; pub mod substitution; pub mod tag; mod tag_set; pub mod write; /// Errors that may happen while generating points. #[derive(Snafu, Debug)] pub enum Error { /// Error that may happen when waiting on a tokio task #[snafu(display("Could not join tokio task: {}", source))] TokioError { /// Underlying tokio error that caused this problem source: tokio::task::JoinError, }, /// Error that may happen when constructing an agent name #[snafu(display("Could not create agent name, caused by:\n{}", source))] CouldNotCreateAgentName { /// Underlying `substitution` module error that caused this problem source: substitution::Error, }, /// Error that may happen when an agent generates points #[snafu(display("Agent could not generate points, caused by:\n{}", source))] AgentCouldNotGeneratePoints { /// Underlying `agent` module error that caused this problem source: agent::Error, }, /// Error that may happen when creating agents #[snafu(display("Could not create agent `{}`, caused by:\n{}", name, source))] CouldNotCreateAgent { /// The name of the relevant agent name: String, /// Underlying `agent` module error that caused this problem source: agent::Error, }, /// Error that may happen when constructing an agent's writer #[snafu(display("Could not create writer for agent `{}`, caused by:\n{}", name, source))] CouldNotCreateAgentWriter { /// The name of the relevant agent name: String, /// Underlying `write` module error that caused this problem source: write::Error, }, } type Result<T, E = Error> = std::result::Result<T, E>; /// Generate data from the configuration in the spec. /// /// Provide a writer that the line protocol should be written to. /// /// If `start_datetime` or `end_datetime` are `None`, the current datetime will /// be used. pub async fn generate<T: DataGenRng>( spec: &specification::DataSpec, points_writer_builder: &mut write::PointsWriterBuilder, start_datetime: Option<i64>, end_datetime: Option<i64>, execution_start_time: i64, continue_on: bool, batch_size: usize, ) -> Result<usize> { let seed = spec.base_seed.to_owned().unwrap_or_else(|| { let mut rng = rand::thread_rng(); format!("{:04}", rng.gen_range(0..10000)) }); let mut handles = vec![]; // for each agent specification for agent_spec in &spec.agents { // create iterators to `cycle` through for `agent_spec.tags` let tag_set_iterator = tag::AgentTagIterator::new(&agent_spec.tags); // create `count` number of agent instances, or 1 agent if no count is specified let n_agents = agent_spec.count.unwrap_or(1); for (agent_id, mut agent_tags) in tag_set_iterator.take(n_agents).enumerate() { let agent_name = Substitute::once(&agent_spec.name, &[("agent_id", &agent_id.to_string())]) .context(CouldNotCreateAgentName)?; agent_tags.push(tag::Tag::new("data_spec", &spec.name)); if let Some(name_tag_key) = &agent_spec.name_tag_key { agent_tags.push(tag::Tag::new(name_tag_key, &agent_name)); } let mut agent = agent::Agent::<T>::new( agent_spec, &agent_name, agent_id, &seed, agent_tags, start_datetime, end_datetime, execution_start_time, continue_on, ) .context(CouldNotCreateAgent { name: &agent_name })?; let agent_points_writer = points_writer_builder .build_for_agent(&agent_name) .context(CouldNotCreateAgentWriter { name: &agent_name })?; handles.push(tokio::task::spawn(async move { agent.generate_all(agent_points_writer, batch_size).await })); } } let mut total_points = 0; for handle in handles { total_points += handle .await .context(TokioError)? .context(AgentCouldNotGeneratePoints)?; } Ok(total_points) } /// Shorthand trait for the functionality this crate needs a random number generator to have pub trait DataGenRng: rand::Rng + rand::SeedableRng + Send +'static {} impl<T: rand::Rng + rand::SeedableRng + Send +'static> DataGenRng for T {} /// Encapsulating the creation of an optionally-seedable random number generator /// to make this easy to change. Uses a 4-digit number expressed as a `String` /// as the seed type to enable easy creation of another instance using the same /// seed. #[derive(Debug)] pub struct RandomNumberGenerator<T: DataGenRng> { rng: T, /// The seed used for this instance. pub seed: String, } impl<T: DataGenRng> Default for RandomNumberGenerator<T> { fn default() -> Self { let mut rng = rand::thread_rng(); let seed = format!("{:04}", rng.gen_range(0..10000)); Self::new(seed) } } impl<T: DataGenRng> RandomNumberGenerator<T> { /// Create a new instance using the specified seed. pub fn new(seed: impl Into<String>) -> Self { let seed = seed.into(); Self { rng: Seeder::from(&seed).make_rng(), seed, } } /// Generate a random GUID pub fn guid(&mut self) -> uuid::Uuid { let mut bytes = [0u8; 16]; self.rng.fill_bytes(&mut bytes); uuid::Builder::from_bytes(bytes) .set_variant(uuid::Variant::RFC4122) .set_version(uuid::Version::Random) .build() } } impl<T: DataGenRng> rand::RngCore for RandomNumberGenerator<T> { fn next_u32(&mut self) -> u32 { self.rng.next_u32() } fn next_u64(&mut self) -> u64 { self.rng.next_u64() } fn fill_bytes(&mut self, dest: &mut [u8]) { self.rng.fill_bytes(dest); } fn try_fill_bytes(&mut self, dest: &mut [u8]) -> std::result::Result<(), rand::Error> { self.rng.try_fill_bytes(dest) } } /// Gets the current time in nanoseconds since the epoch pub fn now_ns() -> i64 { let since_the_epoch = SystemTime::now() .duration_since(UNIX_EPOCH) .expect("Time went backwards"); i64::try_from(since_the_epoch.as_nanos()).expect("Time does not fit") } // Always returns 0. #[cfg(test)] #[derive(Default)] struct ZeroRng; #[cfg(test)] impl rand::RngCore for ZeroRng { fn next_u32(&mut self) -> u32 { self.next_u64() as u32 } fn next_u64(&mut self) -> u64 { 0 } fn fill_bytes(&mut self, dest: &mut [u8]) { rand_core::impls::fill_bytes_via_next(self, dest) } fn try_fill_bytes(&mut self, dest: &mut [u8]) -> std::result::Result<(), rand::Error> { self.fill_bytes(dest); Ok(()) } } #[cfg(test)] impl rand::SeedableRng for ZeroRng { type Seed = Vec<u8>; // Ignore the seed value fn from_seed(_seed: Self::Seed) -> Self { Self } } // The test rng ignores the seed anyway, so the seed specified doesn't matter. #[cfg(test)] const TEST_SEED: &str = ""; #[cfg(test)] fn test_rng() -> RandomNumberGenerator<ZeroRng> { RandomNumberGenerator::<ZeroRng>::new(TEST_SEED) } // A random number type that does *not* have a predictable sequence of values for use in tests // that assert on properties rather than exact values. Aliased for convenience in changing to // a different Rng type. #[cfg(test)] type DynamicRng = rand::rngs::SmallRng; #[cfg(test)] mod test { use super::*; use crate::specification::*; use influxdb2_client::models::WriteDataPoint; use std::str::FromStr; type Error = Box<dyn std::error::Error>; type Result<T = (), E = Error> = std::result::Result<T, E>; #[tokio::test] async fn historical_data_sampling_interval() -> Result<()> { let toml = r#" name = "demo_schema" [[agents]] name = "basic" sampling_interval = "10s" # seconds [[agents.measurements]] name = "cpu" [[agents.measurements.fields]] name = "up" bool = true"#; let data_spec = DataSpec::from_str(toml).unwrap(); let agent_id = 0; let agent_spec = &data_spec.agents[0]; // Take agent_tags out of the equation for the purposes of this test let agent_tags = vec![]; let execution_start_time = now_ns(); // imagine we've specified at the command line that we want to generate metrics // for 1970 let start_datetime = Some(0); // for the first 15 seconds of the year let end_datetime = Some(15 * 1_000_000_000); let mut agent = agent::Agent::<ZeroRng>::new( agent_spec, &agent_spec.name, agent_id, TEST_SEED, agent_tags, start_datetime, end_datetime, execution_start_time, false, )?; let data_points = agent.generate().await?; let mut v = Vec::new(); for data_point in data_points { data_point.write_data_point_to(&mut v).unwrap(); } let line_protocol = String::from_utf8(v).unwrap(); // Get a point for time 0 let expected_line_protocol = "cpu up=f 0\n"; assert_eq!(line_protocol, expected_line_protocol); let data_points = agent.generate().await?; let mut v = Vec::new(); for data_point in data_points { data_point.write_data_point_to(&mut v).unwrap(); } let line_protocol = String::from_utf8(v).unwrap(); // Get a point for time 10s
let data_points = agent.generate().await?; assert!( data_points.is_empty(), "expected no data points, got {:?}", data_points ); Ok(()) } }
let expected_line_protocol = "cpu up=f 10000000000\n"; assert_eq!(line_protocol, expected_line_protocol); // Don't get any points anymore because we're past the ending datetime
random_line_split
lib.rs
//! This crate contains structures and generators for specifying how to generate //! historical and real-time test data for Delorean. The rules for how to //! generate data and what shape it should take can be specified in a TOML file. //! //! Generators can output in line protocol, Parquet, or can be used to generate //! real-time load on a server that implements the [InfluxDB 2.0 write //! path][write-api]. //! //! [write-api]: https://v2.docs.influxdata.com/v2.0/api/#tag/Write //! //! While this generator could be compared to [the Go based one that creates TSM //! data][go-gen], its purpose is meant to be more far reaching. In addition to //! generating historical data, it should be useful for generating data in a //! sequence as you would expect it to arrive in a production environment. That //! means many agents sending data with their different tags and timestamps. //! //! [go-gen]: https://github.com/influxdata/influxdb/pull/12710 #![deny(rustdoc::broken_intra_doc_links, rustdoc::bare_urls, rust_2018_idioms)] #![warn( missing_copy_implementations, missing_debug_implementations, missing_docs, clippy::explicit_iter_loop, clippy::future_not_send, clippy::use_self, clippy::clone_on_ref_ptr )] use crate::substitution::Substitute; use rand::Rng; use rand_seeder::Seeder; use snafu::{ResultExt, Snafu}; use std::{ convert::TryFrom, time::{SystemTime, UNIX_EPOCH}, }; pub mod agent; pub mod field; pub mod measurement; pub mod specification; pub mod substitution; pub mod tag; mod tag_set; pub mod write; /// Errors that may happen while generating points. #[derive(Snafu, Debug)] pub enum Error { /// Error that may happen when waiting on a tokio task #[snafu(display("Could not join tokio task: {}", source))] TokioError { /// Underlying tokio error that caused this problem source: tokio::task::JoinError, }, /// Error that may happen when constructing an agent name #[snafu(display("Could not create agent name, caused by:\n{}", source))] CouldNotCreateAgentName { /// Underlying `substitution` module error that caused this problem source: substitution::Error, }, /// Error that may happen when an agent generates points #[snafu(display("Agent could not generate points, caused by:\n{}", source))] AgentCouldNotGeneratePoints { /// Underlying `agent` module error that caused this problem source: agent::Error, }, /// Error that may happen when creating agents #[snafu(display("Could not create agent `{}`, caused by:\n{}", name, source))] CouldNotCreateAgent { /// The name of the relevant agent name: String, /// Underlying `agent` module error that caused this problem source: agent::Error, }, /// Error that may happen when constructing an agent's writer #[snafu(display("Could not create writer for agent `{}`, caused by:\n{}", name, source))] CouldNotCreateAgentWriter { /// The name of the relevant agent name: String, /// Underlying `write` module error that caused this problem source: write::Error, }, } type Result<T, E = Error> = std::result::Result<T, E>; /// Generate data from the configuration in the spec. /// /// Provide a writer that the line protocol should be written to. /// /// If `start_datetime` or `end_datetime` are `None`, the current datetime will /// be used. pub async fn generate<T: DataGenRng>( spec: &specification::DataSpec, points_writer_builder: &mut write::PointsWriterBuilder, start_datetime: Option<i64>, end_datetime: Option<i64>, execution_start_time: i64, continue_on: bool, batch_size: usize, ) -> Result<usize> { let seed = spec.base_seed.to_owned().unwrap_or_else(|| { let mut rng = rand::thread_rng(); format!("{:04}", rng.gen_range(0..10000)) }); let mut handles = vec![]; // for each agent specification for agent_spec in &spec.agents { // create iterators to `cycle` through for `agent_spec.tags` let tag_set_iterator = tag::AgentTagIterator::new(&agent_spec.tags); // create `count` number of agent instances, or 1 agent if no count is specified let n_agents = agent_spec.count.unwrap_or(1); for (agent_id, mut agent_tags) in tag_set_iterator.take(n_agents).enumerate() { let agent_name = Substitute::once(&agent_spec.name, &[("agent_id", &agent_id.to_string())]) .context(CouldNotCreateAgentName)?; agent_tags.push(tag::Tag::new("data_spec", &spec.name)); if let Some(name_tag_key) = &agent_spec.name_tag_key { agent_tags.push(tag::Tag::new(name_tag_key, &agent_name)); } let mut agent = agent::Agent::<T>::new( agent_spec, &agent_name, agent_id, &seed, agent_tags, start_datetime, end_datetime, execution_start_time, continue_on, ) .context(CouldNotCreateAgent { name: &agent_name })?; let agent_points_writer = points_writer_builder .build_for_agent(&agent_name) .context(CouldNotCreateAgentWriter { name: &agent_name })?; handles.push(tokio::task::spawn(async move { agent.generate_all(agent_points_writer, batch_size).await })); } } let mut total_points = 0; for handle in handles { total_points += handle .await .context(TokioError)? .context(AgentCouldNotGeneratePoints)?; } Ok(total_points) } /// Shorthand trait for the functionality this crate needs a random number generator to have pub trait DataGenRng: rand::Rng + rand::SeedableRng + Send +'static {} impl<T: rand::Rng + rand::SeedableRng + Send +'static> DataGenRng for T {} /// Encapsulating the creation of an optionally-seedable random number generator /// to make this easy to change. Uses a 4-digit number expressed as a `String` /// as the seed type to enable easy creation of another instance using the same /// seed. #[derive(Debug)] pub struct RandomNumberGenerator<T: DataGenRng> { rng: T, /// The seed used for this instance. pub seed: String, } impl<T: DataGenRng> Default for RandomNumberGenerator<T> { fn default() -> Self { let mut rng = rand::thread_rng(); let seed = format!("{:04}", rng.gen_range(0..10000)); Self::new(seed) } } impl<T: DataGenRng> RandomNumberGenerator<T> { /// Create a new instance using the specified seed. pub fn new(seed: impl Into<String>) -> Self { let seed = seed.into(); Self { rng: Seeder::from(&seed).make_rng(), seed, } } /// Generate a random GUID pub fn guid(&mut self) -> uuid::Uuid { let mut bytes = [0u8; 16]; self.rng.fill_bytes(&mut bytes); uuid::Builder::from_bytes(bytes) .set_variant(uuid::Variant::RFC4122) .set_version(uuid::Version::Random) .build() } } impl<T: DataGenRng> rand::RngCore for RandomNumberGenerator<T> { fn next_u32(&mut self) -> u32 { self.rng.next_u32() } fn next_u64(&mut self) -> u64 { self.rng.next_u64() } fn
(&mut self, dest: &mut [u8]) { self.rng.fill_bytes(dest); } fn try_fill_bytes(&mut self, dest: &mut [u8]) -> std::result::Result<(), rand::Error> { self.rng.try_fill_bytes(dest) } } /// Gets the current time in nanoseconds since the epoch pub fn now_ns() -> i64 { let since_the_epoch = SystemTime::now() .duration_since(UNIX_EPOCH) .expect("Time went backwards"); i64::try_from(since_the_epoch.as_nanos()).expect("Time does not fit") } // Always returns 0. #[cfg(test)] #[derive(Default)] struct ZeroRng; #[cfg(test)] impl rand::RngCore for ZeroRng { fn next_u32(&mut self) -> u32 { self.next_u64() as u32 } fn next_u64(&mut self) -> u64 { 0 } fn fill_bytes(&mut self, dest: &mut [u8]) { rand_core::impls::fill_bytes_via_next(self, dest) } fn try_fill_bytes(&mut self, dest: &mut [u8]) -> std::result::Result<(), rand::Error> { self.fill_bytes(dest); Ok(()) } } #[cfg(test)] impl rand::SeedableRng for ZeroRng { type Seed = Vec<u8>; // Ignore the seed value fn from_seed(_seed: Self::Seed) -> Self { Self } } // The test rng ignores the seed anyway, so the seed specified doesn't matter. #[cfg(test)] const TEST_SEED: &str = ""; #[cfg(test)] fn test_rng() -> RandomNumberGenerator<ZeroRng> { RandomNumberGenerator::<ZeroRng>::new(TEST_SEED) } // A random number type that does *not* have a predictable sequence of values for use in tests // that assert on properties rather than exact values. Aliased for convenience in changing to // a different Rng type. #[cfg(test)] type DynamicRng = rand::rngs::SmallRng; #[cfg(test)] mod test { use super::*; use crate::specification::*; use influxdb2_client::models::WriteDataPoint; use std::str::FromStr; type Error = Box<dyn std::error::Error>; type Result<T = (), E = Error> = std::result::Result<T, E>; #[tokio::test] async fn historical_data_sampling_interval() -> Result<()> { let toml = r#" name = "demo_schema" [[agents]] name = "basic" sampling_interval = "10s" # seconds [[agents.measurements]] name = "cpu" [[agents.measurements.fields]] name = "up" bool = true"#; let data_spec = DataSpec::from_str(toml).unwrap(); let agent_id = 0; let agent_spec = &data_spec.agents[0]; // Take agent_tags out of the equation for the purposes of this test let agent_tags = vec![]; let execution_start_time = now_ns(); // imagine we've specified at the command line that we want to generate metrics // for 1970 let start_datetime = Some(0); // for the first 15 seconds of the year let end_datetime = Some(15 * 1_000_000_000); let mut agent = agent::Agent::<ZeroRng>::new( agent_spec, &agent_spec.name, agent_id, TEST_SEED, agent_tags, start_datetime, end_datetime, execution_start_time, false, )?; let data_points = agent.generate().await?; let mut v = Vec::new(); for data_point in data_points { data_point.write_data_point_to(&mut v).unwrap(); } let line_protocol = String::from_utf8(v).unwrap(); // Get a point for time 0 let expected_line_protocol = "cpu up=f 0\n"; assert_eq!(line_protocol, expected_line_protocol); let data_points = agent.generate().await?; let mut v = Vec::new(); for data_point in data_points { data_point.write_data_point_to(&mut v).unwrap(); } let line_protocol = String::from_utf8(v).unwrap(); // Get a point for time 10s let expected_line_protocol = "cpu up=f 10000000000\n"; assert_eq!(line_protocol, expected_line_protocol); // Don't get any points anymore because we're past the ending datetime let data_points = agent.generate().await?; assert!( data_points.is_empty(), "expected no data points, got {:?}", data_points ); Ok(()) } }
fill_bytes
identifier_name
lib.rs
//! This crate contains structures and generators for specifying how to generate //! historical and real-time test data for Delorean. The rules for how to //! generate data and what shape it should take can be specified in a TOML file. //! //! Generators can output in line protocol, Parquet, or can be used to generate //! real-time load on a server that implements the [InfluxDB 2.0 write //! path][write-api]. //! //! [write-api]: https://v2.docs.influxdata.com/v2.0/api/#tag/Write //! //! While this generator could be compared to [the Go based one that creates TSM //! data][go-gen], its purpose is meant to be more far reaching. In addition to //! generating historical data, it should be useful for generating data in a //! sequence as you would expect it to arrive in a production environment. That //! means many agents sending data with their different tags and timestamps. //! //! [go-gen]: https://github.com/influxdata/influxdb/pull/12710 #![deny(rustdoc::broken_intra_doc_links, rustdoc::bare_urls, rust_2018_idioms)] #![warn( missing_copy_implementations, missing_debug_implementations, missing_docs, clippy::explicit_iter_loop, clippy::future_not_send, clippy::use_self, clippy::clone_on_ref_ptr )] use crate::substitution::Substitute; use rand::Rng; use rand_seeder::Seeder; use snafu::{ResultExt, Snafu}; use std::{ convert::TryFrom, time::{SystemTime, UNIX_EPOCH}, }; pub mod agent; pub mod field; pub mod measurement; pub mod specification; pub mod substitution; pub mod tag; mod tag_set; pub mod write; /// Errors that may happen while generating points. #[derive(Snafu, Debug)] pub enum Error { /// Error that may happen when waiting on a tokio task #[snafu(display("Could not join tokio task: {}", source))] TokioError { /// Underlying tokio error that caused this problem source: tokio::task::JoinError, }, /// Error that may happen when constructing an agent name #[snafu(display("Could not create agent name, caused by:\n{}", source))] CouldNotCreateAgentName { /// Underlying `substitution` module error that caused this problem source: substitution::Error, }, /// Error that may happen when an agent generates points #[snafu(display("Agent could not generate points, caused by:\n{}", source))] AgentCouldNotGeneratePoints { /// Underlying `agent` module error that caused this problem source: agent::Error, }, /// Error that may happen when creating agents #[snafu(display("Could not create agent `{}`, caused by:\n{}", name, source))] CouldNotCreateAgent { /// The name of the relevant agent name: String, /// Underlying `agent` module error that caused this problem source: agent::Error, }, /// Error that may happen when constructing an agent's writer #[snafu(display("Could not create writer for agent `{}`, caused by:\n{}", name, source))] CouldNotCreateAgentWriter { /// The name of the relevant agent name: String, /// Underlying `write` module error that caused this problem source: write::Error, }, } type Result<T, E = Error> = std::result::Result<T, E>; /// Generate data from the configuration in the spec. /// /// Provide a writer that the line protocol should be written to. /// /// If `start_datetime` or `end_datetime` are `None`, the current datetime will /// be used. pub async fn generate<T: DataGenRng>( spec: &specification::DataSpec, points_writer_builder: &mut write::PointsWriterBuilder, start_datetime: Option<i64>, end_datetime: Option<i64>, execution_start_time: i64, continue_on: bool, batch_size: usize, ) -> Result<usize> { let seed = spec.base_seed.to_owned().unwrap_or_else(|| { let mut rng = rand::thread_rng(); format!("{:04}", rng.gen_range(0..10000)) }); let mut handles = vec![]; // for each agent specification for agent_spec in &spec.agents { // create iterators to `cycle` through for `agent_spec.tags` let tag_set_iterator = tag::AgentTagIterator::new(&agent_spec.tags); // create `count` number of agent instances, or 1 agent if no count is specified let n_agents = agent_spec.count.unwrap_or(1); for (agent_id, mut agent_tags) in tag_set_iterator.take(n_agents).enumerate() { let agent_name = Substitute::once(&agent_spec.name, &[("agent_id", &agent_id.to_string())]) .context(CouldNotCreateAgentName)?; agent_tags.push(tag::Tag::new("data_spec", &spec.name)); if let Some(name_tag_key) = &agent_spec.name_tag_key { agent_tags.push(tag::Tag::new(name_tag_key, &agent_name)); } let mut agent = agent::Agent::<T>::new( agent_spec, &agent_name, agent_id, &seed, agent_tags, start_datetime, end_datetime, execution_start_time, continue_on, ) .context(CouldNotCreateAgent { name: &agent_name })?; let agent_points_writer = points_writer_builder .build_for_agent(&agent_name) .context(CouldNotCreateAgentWriter { name: &agent_name })?; handles.push(tokio::task::spawn(async move { agent.generate_all(agent_points_writer, batch_size).await })); } } let mut total_points = 0; for handle in handles { total_points += handle .await .context(TokioError)? .context(AgentCouldNotGeneratePoints)?; } Ok(total_points) } /// Shorthand trait for the functionality this crate needs a random number generator to have pub trait DataGenRng: rand::Rng + rand::SeedableRng + Send +'static {} impl<T: rand::Rng + rand::SeedableRng + Send +'static> DataGenRng for T {} /// Encapsulating the creation of an optionally-seedable random number generator /// to make this easy to change. Uses a 4-digit number expressed as a `String` /// as the seed type to enable easy creation of another instance using the same /// seed. #[derive(Debug)] pub struct RandomNumberGenerator<T: DataGenRng> { rng: T, /// The seed used for this instance. pub seed: String, } impl<T: DataGenRng> Default for RandomNumberGenerator<T> { fn default() -> Self { let mut rng = rand::thread_rng(); let seed = format!("{:04}", rng.gen_range(0..10000)); Self::new(seed) } } impl<T: DataGenRng> RandomNumberGenerator<T> { /// Create a new instance using the specified seed. pub fn new(seed: impl Into<String>) -> Self
/// Generate a random GUID pub fn guid(&mut self) -> uuid::Uuid { let mut bytes = [0u8; 16]; self.rng.fill_bytes(&mut bytes); uuid::Builder::from_bytes(bytes) .set_variant(uuid::Variant::RFC4122) .set_version(uuid::Version::Random) .build() } } impl<T: DataGenRng> rand::RngCore for RandomNumberGenerator<T> { fn next_u32(&mut self) -> u32 { self.rng.next_u32() } fn next_u64(&mut self) -> u64 { self.rng.next_u64() } fn fill_bytes(&mut self, dest: &mut [u8]) { self.rng.fill_bytes(dest); } fn try_fill_bytes(&mut self, dest: &mut [u8]) -> std::result::Result<(), rand::Error> { self.rng.try_fill_bytes(dest) } } /// Gets the current time in nanoseconds since the epoch pub fn now_ns() -> i64 { let since_the_epoch = SystemTime::now() .duration_since(UNIX_EPOCH) .expect("Time went backwards"); i64::try_from(since_the_epoch.as_nanos()).expect("Time does not fit") } // Always returns 0. #[cfg(test)] #[derive(Default)] struct ZeroRng; #[cfg(test)] impl rand::RngCore for ZeroRng { fn next_u32(&mut self) -> u32 { self.next_u64() as u32 } fn next_u64(&mut self) -> u64 { 0 } fn fill_bytes(&mut self, dest: &mut [u8]) { rand_core::impls::fill_bytes_via_next(self, dest) } fn try_fill_bytes(&mut self, dest: &mut [u8]) -> std::result::Result<(), rand::Error> { self.fill_bytes(dest); Ok(()) } } #[cfg(test)] impl rand::SeedableRng for ZeroRng { type Seed = Vec<u8>; // Ignore the seed value fn from_seed(_seed: Self::Seed) -> Self { Self } } // The test rng ignores the seed anyway, so the seed specified doesn't matter. #[cfg(test)] const TEST_SEED: &str = ""; #[cfg(test)] fn test_rng() -> RandomNumberGenerator<ZeroRng> { RandomNumberGenerator::<ZeroRng>::new(TEST_SEED) } // A random number type that does *not* have a predictable sequence of values for use in tests // that assert on properties rather than exact values. Aliased for convenience in changing to // a different Rng type. #[cfg(test)] type DynamicRng = rand::rngs::SmallRng; #[cfg(test)] mod test { use super::*; use crate::specification::*; use influxdb2_client::models::WriteDataPoint; use std::str::FromStr; type Error = Box<dyn std::error::Error>; type Result<T = (), E = Error> = std::result::Result<T, E>; #[tokio::test] async fn historical_data_sampling_interval() -> Result<()> { let toml = r#" name = "demo_schema" [[agents]] name = "basic" sampling_interval = "10s" # seconds [[agents.measurements]] name = "cpu" [[agents.measurements.fields]] name = "up" bool = true"#; let data_spec = DataSpec::from_str(toml).unwrap(); let agent_id = 0; let agent_spec = &data_spec.agents[0]; // Take agent_tags out of the equation for the purposes of this test let agent_tags = vec![]; let execution_start_time = now_ns(); // imagine we've specified at the command line that we want to generate metrics // for 1970 let start_datetime = Some(0); // for the first 15 seconds of the year let end_datetime = Some(15 * 1_000_000_000); let mut agent = agent::Agent::<ZeroRng>::new( agent_spec, &agent_spec.name, agent_id, TEST_SEED, agent_tags, start_datetime, end_datetime, execution_start_time, false, )?; let data_points = agent.generate().await?; let mut v = Vec::new(); for data_point in data_points { data_point.write_data_point_to(&mut v).unwrap(); } let line_protocol = String::from_utf8(v).unwrap(); // Get a point for time 0 let expected_line_protocol = "cpu up=f 0\n"; assert_eq!(line_protocol, expected_line_protocol); let data_points = agent.generate().await?; let mut v = Vec::new(); for data_point in data_points { data_point.write_data_point_to(&mut v).unwrap(); } let line_protocol = String::from_utf8(v).unwrap(); // Get a point for time 10s let expected_line_protocol = "cpu up=f 10000000000\n"; assert_eq!(line_protocol, expected_line_protocol); // Don't get any points anymore because we're past the ending datetime let data_points = agent.generate().await?; assert!( data_points.is_empty(), "expected no data points, got {:?}", data_points ); Ok(()) } }
{ let seed = seed.into(); Self { rng: Seeder::from(&seed).make_rng(), seed, } }
identifier_body
lib.rs
//! This crate contains structures and generators for specifying how to generate //! historical and real-time test data for Delorean. The rules for how to //! generate data and what shape it should take can be specified in a TOML file. //! //! Generators can output in line protocol, Parquet, or can be used to generate //! real-time load on a server that implements the [InfluxDB 2.0 write //! path][write-api]. //! //! [write-api]: https://v2.docs.influxdata.com/v2.0/api/#tag/Write //! //! While this generator could be compared to [the Go based one that creates TSM //! data][go-gen], its purpose is meant to be more far reaching. In addition to //! generating historical data, it should be useful for generating data in a //! sequence as you would expect it to arrive in a production environment. That //! means many agents sending data with their different tags and timestamps. //! //! [go-gen]: https://github.com/influxdata/influxdb/pull/12710 #![deny(rustdoc::broken_intra_doc_links, rustdoc::bare_urls, rust_2018_idioms)] #![warn( missing_copy_implementations, missing_debug_implementations, missing_docs, clippy::explicit_iter_loop, clippy::future_not_send, clippy::use_self, clippy::clone_on_ref_ptr )] use crate::substitution::Substitute; use rand::Rng; use rand_seeder::Seeder; use snafu::{ResultExt, Snafu}; use std::{ convert::TryFrom, time::{SystemTime, UNIX_EPOCH}, }; pub mod agent; pub mod field; pub mod measurement; pub mod specification; pub mod substitution; pub mod tag; mod tag_set; pub mod write; /// Errors that may happen while generating points. #[derive(Snafu, Debug)] pub enum Error { /// Error that may happen when waiting on a tokio task #[snafu(display("Could not join tokio task: {}", source))] TokioError { /// Underlying tokio error that caused this problem source: tokio::task::JoinError, }, /// Error that may happen when constructing an agent name #[snafu(display("Could not create agent name, caused by:\n{}", source))] CouldNotCreateAgentName { /// Underlying `substitution` module error that caused this problem source: substitution::Error, }, /// Error that may happen when an agent generates points #[snafu(display("Agent could not generate points, caused by:\n{}", source))] AgentCouldNotGeneratePoints { /// Underlying `agent` module error that caused this problem source: agent::Error, }, /// Error that may happen when creating agents #[snafu(display("Could not create agent `{}`, caused by:\n{}", name, source))] CouldNotCreateAgent { /// The name of the relevant agent name: String, /// Underlying `agent` module error that caused this problem source: agent::Error, }, /// Error that may happen when constructing an agent's writer #[snafu(display("Could not create writer for agent `{}`, caused by:\n{}", name, source))] CouldNotCreateAgentWriter { /// The name of the relevant agent name: String, /// Underlying `write` module error that caused this problem source: write::Error, }, } type Result<T, E = Error> = std::result::Result<T, E>; /// Generate data from the configuration in the spec. /// /// Provide a writer that the line protocol should be written to. /// /// If `start_datetime` or `end_datetime` are `None`, the current datetime will /// be used. pub async fn generate<T: DataGenRng>( spec: &specification::DataSpec, points_writer_builder: &mut write::PointsWriterBuilder, start_datetime: Option<i64>, end_datetime: Option<i64>, execution_start_time: i64, continue_on: bool, batch_size: usize, ) -> Result<usize> { let seed = spec.base_seed.to_owned().unwrap_or_else(|| { let mut rng = rand::thread_rng(); format!("{:04}", rng.gen_range(0..10000)) }); let mut handles = vec![]; // for each agent specification for agent_spec in &spec.agents { // create iterators to `cycle` through for `agent_spec.tags` let tag_set_iterator = tag::AgentTagIterator::new(&agent_spec.tags); // create `count` number of agent instances, or 1 agent if no count is specified let n_agents = agent_spec.count.unwrap_or(1); for (agent_id, mut agent_tags) in tag_set_iterator.take(n_agents).enumerate() { let agent_name = Substitute::once(&agent_spec.name, &[("agent_id", &agent_id.to_string())]) .context(CouldNotCreateAgentName)?; agent_tags.push(tag::Tag::new("data_spec", &spec.name)); if let Some(name_tag_key) = &agent_spec.name_tag_key
let mut agent = agent::Agent::<T>::new( agent_spec, &agent_name, agent_id, &seed, agent_tags, start_datetime, end_datetime, execution_start_time, continue_on, ) .context(CouldNotCreateAgent { name: &agent_name })?; let agent_points_writer = points_writer_builder .build_for_agent(&agent_name) .context(CouldNotCreateAgentWriter { name: &agent_name })?; handles.push(tokio::task::spawn(async move { agent.generate_all(agent_points_writer, batch_size).await })); } } let mut total_points = 0; for handle in handles { total_points += handle .await .context(TokioError)? .context(AgentCouldNotGeneratePoints)?; } Ok(total_points) } /// Shorthand trait for the functionality this crate needs a random number generator to have pub trait DataGenRng: rand::Rng + rand::SeedableRng + Send +'static {} impl<T: rand::Rng + rand::SeedableRng + Send +'static> DataGenRng for T {} /// Encapsulating the creation of an optionally-seedable random number generator /// to make this easy to change. Uses a 4-digit number expressed as a `String` /// as the seed type to enable easy creation of another instance using the same /// seed. #[derive(Debug)] pub struct RandomNumberGenerator<T: DataGenRng> { rng: T, /// The seed used for this instance. pub seed: String, } impl<T: DataGenRng> Default for RandomNumberGenerator<T> { fn default() -> Self { let mut rng = rand::thread_rng(); let seed = format!("{:04}", rng.gen_range(0..10000)); Self::new(seed) } } impl<T: DataGenRng> RandomNumberGenerator<T> { /// Create a new instance using the specified seed. pub fn new(seed: impl Into<String>) -> Self { let seed = seed.into(); Self { rng: Seeder::from(&seed).make_rng(), seed, } } /// Generate a random GUID pub fn guid(&mut self) -> uuid::Uuid { let mut bytes = [0u8; 16]; self.rng.fill_bytes(&mut bytes); uuid::Builder::from_bytes(bytes) .set_variant(uuid::Variant::RFC4122) .set_version(uuid::Version::Random) .build() } } impl<T: DataGenRng> rand::RngCore for RandomNumberGenerator<T> { fn next_u32(&mut self) -> u32 { self.rng.next_u32() } fn next_u64(&mut self) -> u64 { self.rng.next_u64() } fn fill_bytes(&mut self, dest: &mut [u8]) { self.rng.fill_bytes(dest); } fn try_fill_bytes(&mut self, dest: &mut [u8]) -> std::result::Result<(), rand::Error> { self.rng.try_fill_bytes(dest) } } /// Gets the current time in nanoseconds since the epoch pub fn now_ns() -> i64 { let since_the_epoch = SystemTime::now() .duration_since(UNIX_EPOCH) .expect("Time went backwards"); i64::try_from(since_the_epoch.as_nanos()).expect("Time does not fit") } // Always returns 0. #[cfg(test)] #[derive(Default)] struct ZeroRng; #[cfg(test)] impl rand::RngCore for ZeroRng { fn next_u32(&mut self) -> u32 { self.next_u64() as u32 } fn next_u64(&mut self) -> u64 { 0 } fn fill_bytes(&mut self, dest: &mut [u8]) { rand_core::impls::fill_bytes_via_next(self, dest) } fn try_fill_bytes(&mut self, dest: &mut [u8]) -> std::result::Result<(), rand::Error> { self.fill_bytes(dest); Ok(()) } } #[cfg(test)] impl rand::SeedableRng for ZeroRng { type Seed = Vec<u8>; // Ignore the seed value fn from_seed(_seed: Self::Seed) -> Self { Self } } // The test rng ignores the seed anyway, so the seed specified doesn't matter. #[cfg(test)] const TEST_SEED: &str = ""; #[cfg(test)] fn test_rng() -> RandomNumberGenerator<ZeroRng> { RandomNumberGenerator::<ZeroRng>::new(TEST_SEED) } // A random number type that does *not* have a predictable sequence of values for use in tests // that assert on properties rather than exact values. Aliased for convenience in changing to // a different Rng type. #[cfg(test)] type DynamicRng = rand::rngs::SmallRng; #[cfg(test)] mod test { use super::*; use crate::specification::*; use influxdb2_client::models::WriteDataPoint; use std::str::FromStr; type Error = Box<dyn std::error::Error>; type Result<T = (), E = Error> = std::result::Result<T, E>; #[tokio::test] async fn historical_data_sampling_interval() -> Result<()> { let toml = r#" name = "demo_schema" [[agents]] name = "basic" sampling_interval = "10s" # seconds [[agents.measurements]] name = "cpu" [[agents.measurements.fields]] name = "up" bool = true"#; let data_spec = DataSpec::from_str(toml).unwrap(); let agent_id = 0; let agent_spec = &data_spec.agents[0]; // Take agent_tags out of the equation for the purposes of this test let agent_tags = vec![]; let execution_start_time = now_ns(); // imagine we've specified at the command line that we want to generate metrics // for 1970 let start_datetime = Some(0); // for the first 15 seconds of the year let end_datetime = Some(15 * 1_000_000_000); let mut agent = agent::Agent::<ZeroRng>::new( agent_spec, &agent_spec.name, agent_id, TEST_SEED, agent_tags, start_datetime, end_datetime, execution_start_time, false, )?; let data_points = agent.generate().await?; let mut v = Vec::new(); for data_point in data_points { data_point.write_data_point_to(&mut v).unwrap(); } let line_protocol = String::from_utf8(v).unwrap(); // Get a point for time 0 let expected_line_protocol = "cpu up=f 0\n"; assert_eq!(line_protocol, expected_line_protocol); let data_points = agent.generate().await?; let mut v = Vec::new(); for data_point in data_points { data_point.write_data_point_to(&mut v).unwrap(); } let line_protocol = String::from_utf8(v).unwrap(); // Get a point for time 10s let expected_line_protocol = "cpu up=f 10000000000\n"; assert_eq!(line_protocol, expected_line_protocol); // Don't get any points anymore because we're past the ending datetime let data_points = agent.generate().await?; assert!( data_points.is_empty(), "expected no data points, got {:?}", data_points ); Ok(()) } }
{ agent_tags.push(tag::Tag::new(name_tag_key, &agent_name)); }
conditional_block
main.rs
message)); } } } else { PickYourAuth::None(NoToken) }; let bindle_client = Client::new(&opts.server_url, token)?; let local = bindle::provider::file::FileProvider::new( bindle_dir, bindle::search::NoopEngine::default(), ) .await; let cache = DumbCache::new(bindle_client.clone(), local); // We don't verify locally yet, but we will need the keyring to do so let _keyring = load_keyring(opts.keyring) .await .unwrap_or_else(|_| KeyRing::default()); match opts.subcmd { SubCommand::Info(info_opts) => { let inv = match info_opts.yanked { true => cache.get_invoice(info_opts.bindle_id), false => cache.get_yanked_invoice(info_opts.bindle_id), } .await .map_err(map_storage_error)?; match info_opts.output { Some(format) if &format == "toml" => { tokio::io::stdout().write_all(&toml::to_vec(&inv)?).await? } Some(format) if &format == "json" => { tokio::io::stdout() .write_all(&serde_json::to_vec_pretty(&inv)?) .await? } Some(format) => { return Err(ClientError::Other(format!("Unknown format: {}", format))) } None => tokio::io::stdout().write_all(&toml::to_vec(&inv)?).await?, } } SubCommand::GetInvoice(gi_opts) => { let inv = match gi_opts.yanked { true => cache.get_invoice(&gi_opts.bindle_id), false => cache.get_yanked_invoice(&gi_opts.bindle_id), } .await .map_err(map_storage_error)?; let mut file = tokio::fs::OpenOptions::new() .write(true) .create_new(true) // Make sure we aren't overwriting .open(&gi_opts.output) .await?; file.write_all(&toml::to_vec(&inv)?).await?; file.flush().await?; println!( "Wrote invoice {} to {}", gi_opts.bindle_id, gi_opts.output.display() ); } SubCommand::GetParcel(gp_opts) => get_parcel(cache, gp_opts).await?, SubCommand::Yank(yank_opts) => { bindle_client.yank_invoice(&yank_opts.bindle_id).await?; println!("Bindle {} yanked", yank_opts.bindle_id); } SubCommand::Search(search_opts) => { // TODO: Do we want to use the cache for searching? let matches = bindle_client .query_invoices(search_opts.clone().into()) .await?; match search_opts.output { Some(format) if &format == "toml" => { tokio::io::stdout() .write_all(&toml::to_vec(&matches)?) .await? } Some(format) if &format == "json" => { tokio::io::stdout() .write_all(&serde_json::to_vec_pretty(&matches)?) .await? } Some(format) if &format == "table" => tablify(&matches), Some(format) => { return Err(ClientError::Other(format!("Unknown format: {}", format))) } None => tablify(&matches), } } SubCommand::Get(get_opts) => get_all(cache, get_opts).await?, SubCommand::Push(push_opts) => push_all(bindle_client, push_opts).await?, SubCommand::PushInvoice(push_opts) => { let resp = bindle_client .create_invoice_from_file(push_opts.path) .await?; println!("Invoice {} created", resp.invoice.bindle.id); } SubCommand::SignInvoice(sign_opts) => { // Role let role = if let Some(r) = sign_opts.role { role_from_name(r)? } else { SignatureRole::Creator }; // Keyfile let keyfile = match sign_opts.secret_file { Some(dir) => dir, None => ensure_config_dir().await?.join("secret_keys.toml"), }; // Signing key let key = first_matching_key(keyfile, &role).await?; // Load the invoice and sign it. let mut inv: Invoice = bindle::client::load::toml(sign_opts.invoice.as_str()).await?; inv.sign(role.clone(), &key)?; // Write the signed invoice to a file. let outfile = sign_opts .destination .unwrap_or_else(|| format!("./invoice-{}.toml", inv.canonical_name())); println!( "Signed as {} with role {} and wrote to {}", sign_opts.invoice, role, outfile ); tokio::fs::write(outfile, toml::to_string(&inv)?).await?; } SubCommand::PushFile(push_opts) => { let label = generate_label(&push_opts.path, push_opts.name, push_opts.media_type).await?; println!("Uploading file {} to server", push_opts.path.display()); bindle_client .create_parcel_from_file(push_opts.bindle_id, &label.sha256, push_opts.path) .await?; println!("File successfully uploaded"); } SubCommand::GenerateLabel(generate_opts) => { let label = generate_label( generate_opts.path, generate_opts.name, generate_opts.media_type, ) .await?; println!("{}", toml::to_string_pretty(&label)?); } SubCommand::PrintKey(print_key_opts) => { let dir = match print_key_opts.secret_file { Some(dir) => dir, None => ensure_config_dir().await?.join("secret_keys.toml"), }; let keyfile = SecretKeyFile::load_file(dir) .await .map_err(|e| ClientError::Other(e.to_string()))?; let matches: Vec<KeyEntry> = match print_key_opts.label { Some(name) => keyfile .key .iter() .filter_map(|k| { if!k.label.contains(&name) { return None; } match k.try_into() { //Skip malformed keys. Err(e) => { eprintln!("Warning: Malformed key: {} (skipping)", e); None } Ok(ke) => Some(ke), } }) .collect(), None => keyfile .key .iter() .filter_map(|k| match k.try_into() { //Skip malformed keys. Err(e) => { eprintln!("Warning: Malformed key: {} (skipping)", e); None } Ok(ke) => Some(ke), }) .collect(), }; let keyring = KeyRing::new(matches); let out = toml::to_string(&keyring).map_err(|e| ClientError::Other(e.to_string()))?; println!("{}", out); } SubCommand::CreateKey(create_opts) => { let dir = match create_opts.secret_file { Some(dir) => dir, None => ensure_config_dir().await?.join("secret_keys.toml"), }; println!("Writing keys to {}", dir.display()); match tokio::fs::metadata(&dir).await { Err(e) if matches!(e.kind(), std::io::ErrorKind::NotFound) => { println!("File {} does not exist. Creating it.", dir.display()); let mut keyfile = SecretKeyFile::default(); let newkey = SecretKeyEntry::new( create_opts.label, vec![bindle::SignatureRole::Creator], ); keyfile.key.push(newkey); keyfile .save_file(dir) .await .map_err(|e| ClientError::Other(e.to_string()))?; } Ok(info) => { if!info.is_file() { eprint!("Path must point to a file."); return Err(ClientError::Other( "Keyfile cannot be directory or symlink".to_owned(), )); } let mut keyfile = SecretKeyFile::load_file(&dir) .await .map_err(|e| ClientError::Other(e.to_string()))?; let newkey = SecretKeyEntry::new( create_opts.label, vec![bindle::SignatureRole::Creator], ); keyfile.key.push(newkey); keyfile .save_file(dir) .await .map_err(|e| ClientError::Other(e.to_string()))?; } Err(e) => return Err(e.into()), } } SubCommand::Login(_login_opts) => { // TODO: We'll use login opts when we enable additional login providers OidcToken::login(&opts.server_url, token_file).await?; println!("Login successful"); } } Ok(()) } async fn generate_label( file_path: impl AsRef<Path>, name: Option<String>, media_type: Option<String>, ) -> Result<bindle::Label> { let path = file_path.as_ref().to_owned(); let mut file = tokio::fs::File::open(&path).await?; let media_type = media_type.unwrap_or_else(|| { mime_guess::from_path(&path) .first_or_octet_stream() .to_string() }); info!("Using media type {}", media_type); // Note: Should be able to unwrap here because the file opening step would have // failed in conditions where this returns `None` let name = name.unwrap_or_else(|| path.file_name().unwrap().to_string_lossy().to_string()); info!("Using name {}", name); let size = file.metadata().await?.len(); let mut sha = bindle::async_util::AsyncSha256::new(); tokio::io::copy(&mut file, &mut sha).await?; let result = sha.into_inner().expect("data lock error").finalize(); Ok(bindle::Label { sha256: format!("{:x}", result), media_type, size, name, annotations: None, // TODO: allow annotations from command line ..bindle::Label::default() }) } async fn get_parcel<C: Cache + Send + Sync + Clone>(cache: C, opts: GetParcel) -> Result<()> { let parcel = cache .get_parcel(opts.bindle_id, &opts.sha) .await .map_err(map_storage_error)?; let mut file = tokio::fs::OpenOptions::new() .write(true) .create_new(true) // Make sure we aren't overwriting .open(&opts.output) .await?; tokio::io::copy( &mut StreamReader::new( parcel.map(|res| res.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))), ), &mut file, ) .await?; println!("Wrote parcel {} to {}", opts.sha, opts.output.display()); Ok(()) } async fn push_all<T: TokenManager + Send + Sync + Clone +'static>( client: Client<T>, opts: Push, ) -> Result<()> { let standalone = StandaloneRead::new(opts.path, &opts.bindle_id).await?; standalone.push(&client).await?; println!("Pushed bindle {}", opts.bindle_id); Ok(()) } async fn get_all<C: Cache + Send + Sync + Clone>(cache: C, opts: Get) -> Result<()> { let inv = match opts.yanked { true => cache.get_invoice(opts.bindle_id), false => cache.get_yanked_invoice(opts.bindle_id), } .await .map_err(map_storage_error)?; println!("Fetched invoice. Starting fetch of parcels"); let parcels = Arc::new(Mutex::new(std::collections::HashMap::new())); let zero_vec = Vec::with_capacity(0); let is_export = opts.export.is_some(); let parcel_fetch = inv .parcel .as_ref() .unwrap_or(&zero_vec) .iter() .map(|p| { ( p.label.sha256.clone(), inv.bindle.id.clone(), cache.clone(), parcels.clone(), ) }) .map(|(sha, bindle_id, c, parcels)| async move { match c.get_parcel(bindle_id, &sha).await { Ok(p) => { println!("Fetched parcel {}", sha); if is_export { parcels.lock().await.insert( sha, StreamReader::new(p.map(|res| { res.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e)) })), ); } } Err(e) => { match e { ProviderError::NotFound => warn!("Parcel {} does not exist", sha), ProviderError::ProxyError(err) if matches!(err, ClientError::ParcelNotFound) => { warn!("Parcel {} does not exist", sha) } // Only return an error if it isn't a not found error. By design, an invoice // can contain parcels that don't yet exist ProviderError::ProxyError(inner) => return Err(inner), _ => { return Err(ClientError::Other(format!( "Unable to get parcel {}: {:?}", sha, e ))) } } } } Ok(()) }); futures::future::join_all(parcel_fetch) .await .into_iter() .collect::<Result<Vec<_>>>()?; if let Some(p) = opts.export { let standalone = StandaloneWrite::new(p, &inv.bindle.id)?; standalone .write( inv, // All locks should be done at this point (as all futures exited), so panicing feels // right here as it is an unrecoverable condition Arc::try_unwrap(parcels) .map_err(|_| ClientError::Other("Unexpected lock error".to_string())) .unwrap()
.into_inner(), ) .await?; }
random_line_split
main.rs
.await? } Some(format) => { return Err(ClientError::Other(format!("Unknown format: {}", format))) } None => tokio::io::stdout().write_all(&toml::to_vec(&inv)?).await?, } } SubCommand::GetInvoice(gi_opts) => { let inv = match gi_opts.yanked { true => cache.get_invoice(&gi_opts.bindle_id), false => cache.get_yanked_invoice(&gi_opts.bindle_id), } .await .map_err(map_storage_error)?; let mut file = tokio::fs::OpenOptions::new() .write(true) .create_new(true) // Make sure we aren't overwriting .open(&gi_opts.output) .await?; file.write_all(&toml::to_vec(&inv)?).await?; file.flush().await?; println!( "Wrote invoice {} to {}", gi_opts.bindle_id, gi_opts.output.display() ); } SubCommand::GetParcel(gp_opts) => get_parcel(cache, gp_opts).await?, SubCommand::Yank(yank_opts) => { bindle_client.yank_invoice(&yank_opts.bindle_id).await?; println!("Bindle {} yanked", yank_opts.bindle_id); } SubCommand::Search(search_opts) => { // TODO: Do we want to use the cache for searching? let matches = bindle_client .query_invoices(search_opts.clone().into()) .await?; match search_opts.output { Some(format) if &format == "toml" => { tokio::io::stdout() .write_all(&toml::to_vec(&matches)?) .await? } Some(format) if &format == "json" => { tokio::io::stdout() .write_all(&serde_json::to_vec_pretty(&matches)?) .await? } Some(format) if &format == "table" => tablify(&matches), Some(format) => { return Err(ClientError::Other(format!("Unknown format: {}", format))) } None => tablify(&matches), } } SubCommand::Get(get_opts) => get_all(cache, get_opts).await?, SubCommand::Push(push_opts) => push_all(bindle_client, push_opts).await?, SubCommand::PushInvoice(push_opts) => { let resp = bindle_client .create_invoice_from_file(push_opts.path) .await?; println!("Invoice {} created", resp.invoice.bindle.id); } SubCommand::SignInvoice(sign_opts) => { // Role let role = if let Some(r) = sign_opts.role { role_from_name(r)? } else { SignatureRole::Creator }; // Keyfile let keyfile = match sign_opts.secret_file { Some(dir) => dir, None => ensure_config_dir().await?.join("secret_keys.toml"), }; // Signing key let key = first_matching_key(keyfile, &role).await?; // Load the invoice and sign it. let mut inv: Invoice = bindle::client::load::toml(sign_opts.invoice.as_str()).await?; inv.sign(role.clone(), &key)?; // Write the signed invoice to a file. let outfile = sign_opts .destination .unwrap_or_else(|| format!("./invoice-{}.toml", inv.canonical_name())); println!( "Signed as {} with role {} and wrote to {}", sign_opts.invoice, role, outfile ); tokio::fs::write(outfile, toml::to_string(&inv)?).await?; } SubCommand::PushFile(push_opts) => { let label = generate_label(&push_opts.path, push_opts.name, push_opts.media_type).await?; println!("Uploading file {} to server", push_opts.path.display()); bindle_client .create_parcel_from_file(push_opts.bindle_id, &label.sha256, push_opts.path) .await?; println!("File successfully uploaded"); } SubCommand::GenerateLabel(generate_opts) => { let label = generate_label( generate_opts.path, generate_opts.name, generate_opts.media_type, ) .await?; println!("{}", toml::to_string_pretty(&label)?); } SubCommand::PrintKey(print_key_opts) => { let dir = match print_key_opts.secret_file { Some(dir) => dir, None => ensure_config_dir().await?.join("secret_keys.toml"), }; let keyfile = SecretKeyFile::load_file(dir) .await .map_err(|e| ClientError::Other(e.to_string()))?; let matches: Vec<KeyEntry> = match print_key_opts.label { Some(name) => keyfile .key .iter() .filter_map(|k| { if!k.label.contains(&name) { return None; } match k.try_into() { //Skip malformed keys. Err(e) => { eprintln!("Warning: Malformed key: {} (skipping)", e); None } Ok(ke) => Some(ke), } }) .collect(), None => keyfile .key .iter() .filter_map(|k| match k.try_into() { //Skip malformed keys. Err(e) => { eprintln!("Warning: Malformed key: {} (skipping)", e); None } Ok(ke) => Some(ke), }) .collect(), }; let keyring = KeyRing::new(matches); let out = toml::to_string(&keyring).map_err(|e| ClientError::Other(e.to_string()))?; println!("{}", out); } SubCommand::CreateKey(create_opts) => { let dir = match create_opts.secret_file { Some(dir) => dir, None => ensure_config_dir().await?.join("secret_keys.toml"), }; println!("Writing keys to {}", dir.display()); match tokio::fs::metadata(&dir).await { Err(e) if matches!(e.kind(), std::io::ErrorKind::NotFound) => { println!("File {} does not exist. Creating it.", dir.display()); let mut keyfile = SecretKeyFile::default(); let newkey = SecretKeyEntry::new( create_opts.label, vec![bindle::SignatureRole::Creator], ); keyfile.key.push(newkey); keyfile .save_file(dir) .await .map_err(|e| ClientError::Other(e.to_string()))?; } Ok(info) => { if!info.is_file() { eprint!("Path must point to a file."); return Err(ClientError::Other( "Keyfile cannot be directory or symlink".to_owned(), )); } let mut keyfile = SecretKeyFile::load_file(&dir) .await .map_err(|e| ClientError::Other(e.to_string()))?; let newkey = SecretKeyEntry::new( create_opts.label, vec![bindle::SignatureRole::Creator], ); keyfile.key.push(newkey); keyfile .save_file(dir) .await .map_err(|e| ClientError::Other(e.to_string()))?; } Err(e) => return Err(e.into()), } } SubCommand::Login(_login_opts) => { // TODO: We'll use login opts when we enable additional login providers OidcToken::login(&opts.server_url, token_file).await?; println!("Login successful"); } } Ok(()) } async fn generate_label( file_path: impl AsRef<Path>, name: Option<String>, media_type: Option<String>, ) -> Result<bindle::Label> { let path = file_path.as_ref().to_owned(); let mut file = tokio::fs::File::open(&path).await?; let media_type = media_type.unwrap_or_else(|| { mime_guess::from_path(&path) .first_or_octet_stream() .to_string() }); info!("Using media type {}", media_type); // Note: Should be able to unwrap here because the file opening step would have // failed in conditions where this returns `None` let name = name.unwrap_or_else(|| path.file_name().unwrap().to_string_lossy().to_string()); info!("Using name {}", name); let size = file.metadata().await?.len(); let mut sha = bindle::async_util::AsyncSha256::new(); tokio::io::copy(&mut file, &mut sha).await?; let result = sha.into_inner().expect("data lock error").finalize(); Ok(bindle::Label { sha256: format!("{:x}", result), media_type, size, name, annotations: None, // TODO: allow annotations from command line ..bindle::Label::default() }) } async fn get_parcel<C: Cache + Send + Sync + Clone>(cache: C, opts: GetParcel) -> Result<()> { let parcel = cache .get_parcel(opts.bindle_id, &opts.sha) .await .map_err(map_storage_error)?; let mut file = tokio::fs::OpenOptions::new() .write(true) .create_new(true) // Make sure we aren't overwriting .open(&opts.output) .await?; tokio::io::copy( &mut StreamReader::new( parcel.map(|res| res.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))), ), &mut file, ) .await?; println!("Wrote parcel {} to {}", opts.sha, opts.output.display()); Ok(()) } async fn push_all<T: TokenManager + Send + Sync + Clone +'static>( client: Client<T>, opts: Push, ) -> Result<()> { let standalone = StandaloneRead::new(opts.path, &opts.bindle_id).await?; standalone.push(&client).await?; println!("Pushed bindle {}", opts.bindle_id); Ok(()) } async fn get_all<C: Cache + Send + Sync + Clone>(cache: C, opts: Get) -> Result<()> { let inv = match opts.yanked { true => cache.get_invoice(opts.bindle_id), false => cache.get_yanked_invoice(opts.bindle_id), } .await .map_err(map_storage_error)?; println!("Fetched invoice. Starting fetch of parcels"); let parcels = Arc::new(Mutex::new(std::collections::HashMap::new())); let zero_vec = Vec::with_capacity(0); let is_export = opts.export.is_some(); let parcel_fetch = inv .parcel .as_ref() .unwrap_or(&zero_vec) .iter() .map(|p| { ( p.label.sha256.clone(), inv.bindle.id.clone(), cache.clone(), parcels.clone(), ) }) .map(|(sha, bindle_id, c, parcels)| async move { match c.get_parcel(bindle_id, &sha).await { Ok(p) => { println!("Fetched parcel {}", sha); if is_export { parcels.lock().await.insert( sha, StreamReader::new(p.map(|res| { res.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e)) })), ); } } Err(e) => { match e { ProviderError::NotFound => warn!("Parcel {} does not exist", sha), ProviderError::ProxyError(err) if matches!(err, ClientError::ParcelNotFound) => { warn!("Parcel {} does not exist", sha) } // Only return an error if it isn't a not found error. By design, an invoice // can contain parcels that don't yet exist ProviderError::ProxyError(inner) => return Err(inner), _ => { return Err(ClientError::Other(format!( "Unable to get parcel {}: {:?}", sha, e ))) } } } } Ok(()) }); futures::future::join_all(parcel_fetch) .await .into_iter() .collect::<Result<Vec<_>>>()?; if let Some(p) = opts.export { let standalone = StandaloneWrite::new(p, &inv.bindle.id)?; standalone .write( inv, // All locks should be done at this point (as all futures exited), so panicing feels // right here as it is an unrecoverable condition Arc::try_unwrap(parcels) .map_err(|_| ClientError::Other("Unexpected lock error".to_string())) .unwrap() .into_inner(), ) .await?; } Ok(()) } async fn load_keyring(keyring: Option<PathBuf>) -> anyhow::Result<KeyRing> { // This takes an Option<PathBuf> because we want to wrap all of the flag handling in this // function, including setting the default if the kyering is None. let dir = keyring .unwrap_or_else(default_config_dir) .join("keyring.toml"); let kr = bindle::client::load::toml(dir).await?; Ok(kr) } fn map_storage_error(e: ProviderError) -> ClientError { match e { ProviderError::Io(e) => ClientError::Io(e), ProviderError::ProxyError(inner) => inner, ProviderError::InvalidId(parse_err) => ClientError::InvalidId(parse_err), _ => ClientError::Other(format!("{}", e)), } } fn default_config_dir() -> PathBuf { dirs::config_dir() .map(|v| v.join("bindle/")) .unwrap_or_else(|| "./bindle".into()) } /// Get the config dir, ensuring that it exists. /// /// This will return the default config directory. If that directory does not /// exist, it will be created before the path is returned. /// /// If the system does not have a configuration directory, this will create a directory named /// `bindle/` in the local working directory. /// /// This will return an error async fn ensure_config_dir() -> Result<PathBuf> { let dir = default_config_dir(); tokio::fs::create_dir_all(&dir).await?; Ok(dir) } fn role_from_name(name: String) -> Result<SignatureRole>
{ match name.as_str() { "c" | "creator" => Ok(SignatureRole::Creator), "h" | "host" => Ok(SignatureRole::Host), "a" | "approver" => Ok(SignatureRole::Approver), "p" | "proxy" => Ok(SignatureRole::Proxy), _ => Err(ClientError::Other("Unknown role".to_owned())), } }
identifier_body
main.rs
} } else { PickYourAuth::None(NoToken) }; let bindle_client = Client::new(&opts.server_url, token)?; let local = bindle::provider::file::FileProvider::new( bindle_dir, bindle::search::NoopEngine::default(), ) .await; let cache = DumbCache::new(bindle_client.clone(), local); // We don't verify locally yet, but we will need the keyring to do so let _keyring = load_keyring(opts.keyring) .await .unwrap_or_else(|_| KeyRing::default()); match opts.subcmd { SubCommand::Info(info_opts) => { let inv = match info_opts.yanked { true => cache.get_invoice(info_opts.bindle_id), false => cache.get_yanked_invoice(info_opts.bindle_id), } .await .map_err(map_storage_error)?; match info_opts.output { Some(format) if &format == "toml" => { tokio::io::stdout().write_all(&toml::to_vec(&inv)?).await? } Some(format) if &format == "json" => { tokio::io::stdout() .write_all(&serde_json::to_vec_pretty(&inv)?) .await? } Some(format) => { return Err(ClientError::Other(format!("Unknown format: {}", format))) } None => tokio::io::stdout().write_all(&toml::to_vec(&inv)?).await?, } } SubCommand::GetInvoice(gi_opts) => { let inv = match gi_opts.yanked { true => cache.get_invoice(&gi_opts.bindle_id), false => cache.get_yanked_invoice(&gi_opts.bindle_id), } .await .map_err(map_storage_error)?; let mut file = tokio::fs::OpenOptions::new() .write(true) .create_new(true) // Make sure we aren't overwriting .open(&gi_opts.output) .await?; file.write_all(&toml::to_vec(&inv)?).await?; file.flush().await?; println!( "Wrote invoice {} to {}", gi_opts.bindle_id, gi_opts.output.display() ); } SubCommand::GetParcel(gp_opts) => get_parcel(cache, gp_opts).await?, SubCommand::Yank(yank_opts) => { bindle_client.yank_invoice(&yank_opts.bindle_id).await?; println!("Bindle {} yanked", yank_opts.bindle_id); } SubCommand::Search(search_opts) => { // TODO: Do we want to use the cache for searching? let matches = bindle_client .query_invoices(search_opts.clone().into()) .await?; match search_opts.output { Some(format) if &format == "toml" => { tokio::io::stdout() .write_all(&toml::to_vec(&matches)?) .await? } Some(format) if &format == "json" => { tokio::io::stdout() .write_all(&serde_json::to_vec_pretty(&matches)?) .await? } Some(format) if &format == "table" => tablify(&matches), Some(format) => { return Err(ClientError::Other(format!("Unknown format: {}", format))) } None => tablify(&matches), } } SubCommand::Get(get_opts) => get_all(cache, get_opts).await?, SubCommand::Push(push_opts) => push_all(bindle_client, push_opts).await?, SubCommand::PushInvoice(push_opts) => { let resp = bindle_client .create_invoice_from_file(push_opts.path) .await?; println!("Invoice {} created", resp.invoice.bindle.id); } SubCommand::SignInvoice(sign_opts) => { // Role let role = if let Some(r) = sign_opts.role { role_from_name(r)? } else { SignatureRole::Creator }; // Keyfile let keyfile = match sign_opts.secret_file { Some(dir) => dir, None => ensure_config_dir().await?.join("secret_keys.toml"), }; // Signing key let key = first_matching_key(keyfile, &role).await?; // Load the invoice and sign it. let mut inv: Invoice = bindle::client::load::toml(sign_opts.invoice.as_str()).await?; inv.sign(role.clone(), &key)?; // Write the signed invoice to a file. let outfile = sign_opts .destination .unwrap_or_else(|| format!("./invoice-{}.toml", inv.canonical_name())); println!( "Signed as {} with role {} and wrote to {}", sign_opts.invoice, role, outfile ); tokio::fs::write(outfile, toml::to_string(&inv)?).await?; } SubCommand::PushFile(push_opts) => { let label = generate_label(&push_opts.path, push_opts.name, push_opts.media_type).await?; println!("Uploading file {} to server", push_opts.path.display()); bindle_client .create_parcel_from_file(push_opts.bindle_id, &label.sha256, push_opts.path) .await?; println!("File successfully uploaded"); } SubCommand::GenerateLabel(generate_opts) => { let label = generate_label( generate_opts.path, generate_opts.name, generate_opts.media_type, ) .await?; println!("{}", toml::to_string_pretty(&label)?); } SubCommand::PrintKey(print_key_opts) => { let dir = match print_key_opts.secret_file { Some(dir) => dir, None => ensure_config_dir().await?.join("secret_keys.toml"), }; let keyfile = SecretKeyFile::load_file(dir) .await .map_err(|e| ClientError::Other(e.to_string()))?; let matches: Vec<KeyEntry> = match print_key_opts.label { Some(name) => keyfile .key .iter() .filter_map(|k| { if!k.label.contains(&name) { return None; } match k.try_into() { //Skip malformed keys. Err(e) => { eprintln!("Warning: Malformed key: {} (skipping)", e); None } Ok(ke) => Some(ke), } }) .collect(), None => keyfile .key .iter() .filter_map(|k| match k.try_into() { //Skip malformed keys. Err(e) => { eprintln!("Warning: Malformed key: {} (skipping)", e); None } Ok(ke) => Some(ke), }) .collect(), }; let keyring = KeyRing::new(matches); let out = toml::to_string(&keyring).map_err(|e| ClientError::Other(e.to_string()))?; println!("{}", out); } SubCommand::CreateKey(create_opts) => { let dir = match create_opts.secret_file { Some(dir) => dir, None => ensure_config_dir().await?.join("secret_keys.toml"), }; println!("Writing keys to {}", dir.display()); match tokio::fs::metadata(&dir).await { Err(e) if matches!(e.kind(), std::io::ErrorKind::NotFound) => { println!("File {} does not exist. Creating it.", dir.display()); let mut keyfile = SecretKeyFile::default(); let newkey = SecretKeyEntry::new( create_opts.label, vec![bindle::SignatureRole::Creator], ); keyfile.key.push(newkey); keyfile .save_file(dir) .await .map_err(|e| ClientError::Other(e.to_string()))?; } Ok(info) => { if!info.is_file() { eprint!("Path must point to a file."); return Err(ClientError::Other( "Keyfile cannot be directory or symlink".to_owned(), )); } let mut keyfile = SecretKeyFile::load_file(&dir) .await .map_err(|e| ClientError::Other(e.to_string()))?; let newkey = SecretKeyEntry::new( create_opts.label, vec![bindle::SignatureRole::Creator], ); keyfile.key.push(newkey); keyfile .save_file(dir) .await .map_err(|e| ClientError::Other(e.to_string()))?; } Err(e) => return Err(e.into()), } } SubCommand::Login(_login_opts) => { // TODO: We'll use login opts when we enable additional login providers OidcToken::login(&opts.server_url, token_file).await?; println!("Login successful"); } } Ok(()) } async fn generate_label( file_path: impl AsRef<Path>, name: Option<String>, media_type: Option<String>, ) -> Result<bindle::Label> { let path = file_path.as_ref().to_owned(); let mut file = tokio::fs::File::open(&path).await?; let media_type = media_type.unwrap_or_else(|| { mime_guess::from_path(&path) .first_or_octet_stream() .to_string() }); info!("Using media type {}", media_type); // Note: Should be able to unwrap here because the file opening step would have // failed in conditions where this returns `None` let name = name.unwrap_or_else(|| path.file_name().unwrap().to_string_lossy().to_string()); info!("Using name {}", name); let size = file.metadata().await?.len(); let mut sha = bindle::async_util::AsyncSha256::new(); tokio::io::copy(&mut file, &mut sha).await?; let result = sha.into_inner().expect("data lock error").finalize(); Ok(bindle::Label { sha256: format!("{:x}", result), media_type, size, name, annotations: None, // TODO: allow annotations from command line ..bindle::Label::default() }) } async fn get_parcel<C: Cache + Send + Sync + Clone>(cache: C, opts: GetParcel) -> Result<()> { let parcel = cache .get_parcel(opts.bindle_id, &opts.sha) .await .map_err(map_storage_error)?; let mut file = tokio::fs::OpenOptions::new() .write(true) .create_new(true) // Make sure we aren't overwriting .open(&opts.output) .await?; tokio::io::copy( &mut StreamReader::new( parcel.map(|res| res.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))), ), &mut file, ) .await?; println!("Wrote parcel {} to {}", opts.sha, opts.output.display()); Ok(()) } async fn push_all<T: TokenManager + Send + Sync + Clone +'static>( client: Client<T>, opts: Push, ) -> Result<()> { let standalone = StandaloneRead::new(opts.path, &opts.bindle_id).await?; standalone.push(&client).await?; println!("Pushed bindle {}", opts.bindle_id); Ok(()) } async fn get_all<C: Cache + Send + Sync + Clone>(cache: C, opts: Get) -> Result<()> { let inv = match opts.yanked { true => cache.get_invoice(opts.bindle_id), false => cache.get_yanked_invoice(opts.bindle_id), } .await .map_err(map_storage_error)?; println!("Fetched invoice. Starting fetch of parcels"); let parcels = Arc::new(Mutex::new(std::collections::HashMap::new())); let zero_vec = Vec::with_capacity(0); let is_export = opts.export.is_some(); let parcel_fetch = inv .parcel .as_ref() .unwrap_or(&zero_vec) .iter() .map(|p| { ( p.label.sha256.clone(), inv.bindle.id.clone(), cache.clone(), parcels.clone(), ) }) .map(|(sha, bindle_id, c, parcels)| async move { match c.get_parcel(bindle_id, &sha).await { Ok(p) => { println!("Fetched parcel {}", sha); if is_export { parcels.lock().await.insert( sha, StreamReader::new(p.map(|res| { res.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e)) })), ); } } Err(e) => { match e { ProviderError::NotFound => warn!("Parcel {} does not exist", sha), ProviderError::ProxyError(err) if matches!(err, ClientError::ParcelNotFound) => { warn!("Parcel {} does not exist", sha) } // Only return an error if it isn't a not found error. By design, an invoice // can contain parcels that don't yet exist ProviderError::ProxyError(inner) => return Err(inner), _ => { return Err(ClientError::Other(format!( "Unable to get parcel {}: {:?}", sha, e ))) } } } } Ok(()) }); futures::future::join_all(parcel_fetch) .await .into_iter() .collect::<Result<Vec<_>>>()?; if let Some(p) = opts.export { let standalone = StandaloneWrite::new(p, &inv.bindle.id)?; standalone .write( inv, // All locks should be done at this point (as all futures exited), so panicing feels // right here as it is an unrecoverable condition Arc::try_unwrap(parcels) .map_err(|_| ClientError::Other("Unexpected lock error".to_string())) .unwrap() .into_inner(), ) .await?; } Ok(()) } async fn
load_keyring
identifier_name
fsevents.rs
#![allow(non_camel_case_types, non_uppercase_statics)] // C types use std::collections::{HashSet}; use std::c_str::CString; use std::io::{IoError, IoResult}; use std::io::fs::PathExtensions; use std::mem; use std::ptr; use std::raw::Slice; use std::os; use std::io::{Timer}; use std::time::Duration; //use super; use libc::{c_void, c_char, c_int, ENOENT}; use sync::{Arc, Mutex}; #[repr(C)] enum CFStringBuiltInEncodings { kCFStringEncodingUnicode = 0x01000000, kCFStringEncodingUTF8 = 0x08000100, } static kFSEventStreamCreateFlagNoDefer: u32 = 0x00000002; static kFSEventStreamCreateFlagFileEvents: u32 = 0x00000010; #[deriving(Show)] enum Event { Create(String), Remove(String), //ModifyMeta, Modify(String), RenameOld(String), RenameNew(String), } enum Control { Update(HashSet<String>), Exit, } #[repr(C)] struct FSEventStreamContext { version: c_int, info: *mut c_void, retain: *const c_void, release: *const c_void, desc: *const c_void, } type callback_t = extern "C" fn( stream: *const c_void, info: *const c_void, size: c_int, paths: *const *const i8, events: *const u32, ids: *const u64 ); #[repr(C)] enum FSEventStreamEventFlags { //kFSEventStreamEventFlagNone = 0x00000000, //kFSEventStreamEventFlagMustScanSubDirs = 0x00000001, //kFSEventStreamEventFlagUserDropped = 0x00000002, //kFSEventStreamEventFlagKernelDropped = 0x00000004, //kFSEventStreamEventFlagEventIdsWrapped = 0x00000008, //kFSEventStreamEventFlagHistoryDone = 0x00000010, //kFSEventStreamEventFlagRootChanged = 0x00000020, //kFSEventStreamEventFlagMount = 0x00000040, //kFSEventStreamEventFlagUnmount = 0x00000080, kFSEventStreamEventFlagItemCreated = 0x00000100, kFSEventStreamEventFlagItemRemoved = 0x00000200, //kFSEventStreamEventFlagItemInodeMetaMod = 0x00000400, kFSEventStreamEventFlagItemRenamed = 0x00000800, //kFSEventStreamEventFlagItemModified = 0x00001000, //kFSEventStreamEventFlagItemFinderInfoMod = 0x00002000, //kFSEventStreamEventFlagItemChangeOwner = 0x00004000, //kFSEventStreamEventFlagItemXattrMod = 0x00008000, kFSEventStreamEventFlagItemIsFile = 0x00010000, //kFSEventStreamEventFlagItemIsDir = 0x00020000, //kFSEventStreamEventFlagItemIsSymlink = 0x00040000, //kFSEventStreamEventFlagOwnEvent = 0x00080000 } static kFSEventStreamEventIdSinceNow: u64 = 0xFFFFFFFFFFFFFFFF; fn has_flag(event: u32, expected: FSEventStreamEventFlags) -> bool { event & expected as u32 == expected as u32 } extern "C" fn callback(_stream: *const c_void, info: *const c_void, size: c_int, paths: *const *const i8, events: *const u32, ids: *const u64) { let tx: &mut Sender<Event> = unsafe { &mut *(info as *mut Sender<Event>) };
data: events, len: size as uint, }) }; let ids: &[u64] = unsafe { mem::transmute(Slice { data: ids, len: size as uint, }) }; let paths: &[*const i8] = unsafe { mem::transmute(Slice { data: paths, len: size as uint, }) }; let paths = Vec::from_fn(size as uint, |id| { unsafe { CString::new(paths[id], false) } }); let mut renamed = false; for id in range(0, size as uint) { debug!("Received filesystem event: [id: {}, ev: {}] from '{}'", ids[id], events[id], paths[id]); let event = events[id]; let path = String::from_str(paths[id].as_str().unwrap()); if event & kFSEventStreamEventFlagItemIsFile as u32 == 0 { continue; } let path_ = Path::new(path.as_slice()); if has_flag(event, kFSEventStreamEventFlagItemCreated) && path_.exists() { tx.send(Create(path.clone())); } if has_flag(event, kFSEventStreamEventFlagItemRemoved) &&!path_.exists() { tx.send(Remove(path.clone())); } if has_flag(event, kFSEventStreamEventFlagItemRenamed) { if renamed { tx.send(RenameOld(path)); } else { tx.send(RenameNew(path)); } renamed =!renamed; } } } struct CoreFoundationString { d: *const c_void, } impl CoreFoundationString { fn new(string: &str) -> CoreFoundationString { CoreFoundationString { d: unsafe { CFStringCreateWithCString( kCFAllocatorDefault, string.to_c_str().as_ptr(), kCFStringEncodingUTF8 ) } } } } impl Drop for CoreFoundationString { fn drop(&mut self) { unsafe { CFRelease(self.d) } } } struct CoreFoundationArray { d: *const c_void, items: Vec<CoreFoundationString>, // It's a RAII container. } impl CoreFoundationArray { fn new(collection: &HashSet<String>) -> CoreFoundationArray { let d = unsafe { CFArrayCreateMutable( kCFAllocatorDefault, collection.len() as i32, ptr::null::<c_void>() ) }; let mut items = Vec::new(); for item in collection.iter() { let item = CoreFoundationString::new(item.as_slice()); unsafe { CFArrayAppendValue(d, item.d); } items.push(item); } CoreFoundationArray { d: d, items: items, } } } impl Drop for CoreFoundationArray { fn drop(&mut self) { self.items.clear(); unsafe { CFRelease(self.d) } } } fn recreate_stream(eventloop: *mut c_void, context: *const FSEventStreamContext, paths: HashSet<String>) -> *mut c_void { let paths = CoreFoundationArray::new(&paths); let latency = 0.05f64; let stream = unsafe { FSEventStreamCreate( kCFAllocatorDefault, callback, context, paths.d, kFSEventStreamEventIdSinceNow, latency, kFSEventStreamCreateFlagFileEvents ) }; unsafe { FSEventStreamScheduleWithRunLoop(stream, eventloop, kCFRunLoopDefaultMode); FSEventStreamStart(stream); stream } } pub struct Watcher { pub rx: Receiver<Event>, ctx: SyncSender<Control>, paths: HashSet<String>, stream: Arc<Mutex<*mut c_void>>, eventloop: Arc<Mutex<*mut c_void>>, } impl Watcher { pub fn new() -> Watcher { let (mut tx, rx) = channel::<Event>(); let (ctx, crx) = sync_channel::<Control>(0); let eventloop = Arc::new(Mutex::new(ptr::null_mut::<c_void>())); let stream = Arc::new(Mutex::new(ptr::null_mut::<c_void>())); let watcher = Watcher { rx: rx, ctx: ctx, paths: HashSet::new(), stream: stream.clone(), eventloop: eventloop.clone(), }; spawn(proc() { debug!("Starting watcher thread..."); unsafe { *eventloop.lock() = CFRunLoopGetCurrent(); let tx: *mut c_void = &mut tx as *mut _ as *mut c_void; let context = FSEventStreamContext { version: 0, info: tx, retain: ptr::null::<c_void>(), release: ptr::null::<c_void>(), desc: ptr::null::<c_void>(), }; loop { debug!("New watcher loop iteration"); match crx.recv() { Update(paths) => { debug!("Updating watcher loop with {}", paths); *stream.lock() = recreate_stream(*eventloop.lock(), &context, paths); CFRunLoopRun(); } Exit => { debug!("Received watcher exit event - performing graceful shutdown"); break } } } } }); watcher } pub fn watch(&mut self, path: &Path) -> IoResult<()> { if path.exists() { debug!("Adding '{}' to the watch", path.display()); let path = os::make_absolute(path); let path = match path.as_str() { Some(path) => String::from_str(path), None => return Err(IoError::from_errno(ENOENT as uint, false)) }; self.paths.insert(path.clone()); self.update(); Ok(()) } else { Err(IoError::from_errno(ENOENT as uint, false)) } } pub fn unwatch(&mut self, path: &String) -> IoResult<()> { self.paths.remove(path); self.update(); Ok(()) } fn update(&self) { self.stop_stream(); self.ctx.send(Update(self.paths.clone())); } fn stop_stream(&self) { let mut stream = self.stream.lock(); if!(*stream).is_null() { unsafe { FSEventStreamStop(*stream); FSEventStreamInvalidate(*stream); FSEventStreamRelease(*stream); CFRunLoopWakeUp(*self.eventloop.lock()); } } } } impl Drop for Watcher { fn drop(&mut self) { debug!("dropping! {:p}", self); self.stop_stream(); self.ctx.send(Exit); } } #[link(name = "Carbon", kind = "framework")] #[link(name = "CoreFoundation", kind = "framework")] extern { static kCFAllocatorDefault: *mut c_void; static kCFRunLoopDefaultMode: *mut c_void; fn CFStringCreateWithCString(allocator: *mut c_void, string: *const c_char, encoding: CFStringBuiltInEncodings) -> *const c_void; fn CFArrayCreateMutable(allocator: *mut c_void, size: c_int, callbacks: *const c_void) -> *const c_void; fn CFArrayAppendValue(array: *const c_void, value: *const c_void); fn FSEventStreamCreate(allocator: *mut c_void, cb: callback_t, context: *const FSEventStreamContext, paths: *const c_void, since: u64, latency: f64, flags: u32) -> *mut c_void; fn FSEventStreamScheduleWithRunLoop(stream: *mut c_void, eventloop: *mut c_void, mode: *mut c_void); fn FSEventStreamStart(stream: *mut c_void); fn FSEventStreamStop(stream: *mut c_void); fn FSEventStreamInvalidate(stream: *mut c_void); fn FSEventStreamRelease(stream: *mut c_void); fn CFRunLoopGetCurrent() -> *mut c_void; fn CFRunLoopRun(); fn CFRunLoopWakeUp(ev: *mut c_void); fn CFRelease(p: *const c_void); } pub struct Backend { pub watcher: Watcher, } impl Backend { pub fn new(period: Duration) -> Backend { Backend { watcher: Watcher::new() } } pub fn register(&mut self, paths: HashSet<Path>) { for path in paths.iter() { self.watcher.watch(path); } } pub fn transform(&self, ev: Event) -> super::Event { super::Unknown } }
let events: &[u32] = unsafe { mem::transmute(Slice {
random_line_split
fsevents.rs
#![allow(non_camel_case_types, non_uppercase_statics)] // C types use std::collections::{HashSet}; use std::c_str::CString; use std::io::{IoError, IoResult}; use std::io::fs::PathExtensions; use std::mem; use std::ptr; use std::raw::Slice; use std::os; use std::io::{Timer}; use std::time::Duration; //use super; use libc::{c_void, c_char, c_int, ENOENT}; use sync::{Arc, Mutex}; #[repr(C)] enum CFStringBuiltInEncodings { kCFStringEncodingUnicode = 0x01000000, kCFStringEncodingUTF8 = 0x08000100, } static kFSEventStreamCreateFlagNoDefer: u32 = 0x00000002; static kFSEventStreamCreateFlagFileEvents: u32 = 0x00000010; #[deriving(Show)] enum Event { Create(String), Remove(String), //ModifyMeta, Modify(String), RenameOld(String), RenameNew(String), } enum Control { Update(HashSet<String>), Exit, } #[repr(C)] struct FSEventStreamContext { version: c_int, info: *mut c_void, retain: *const c_void, release: *const c_void, desc: *const c_void, } type callback_t = extern "C" fn( stream: *const c_void, info: *const c_void, size: c_int, paths: *const *const i8, events: *const u32, ids: *const u64 ); #[repr(C)] enum FSEventStreamEventFlags { //kFSEventStreamEventFlagNone = 0x00000000, //kFSEventStreamEventFlagMustScanSubDirs = 0x00000001, //kFSEventStreamEventFlagUserDropped = 0x00000002, //kFSEventStreamEventFlagKernelDropped = 0x00000004, //kFSEventStreamEventFlagEventIdsWrapped = 0x00000008, //kFSEventStreamEventFlagHistoryDone = 0x00000010, //kFSEventStreamEventFlagRootChanged = 0x00000020, //kFSEventStreamEventFlagMount = 0x00000040, //kFSEventStreamEventFlagUnmount = 0x00000080, kFSEventStreamEventFlagItemCreated = 0x00000100, kFSEventStreamEventFlagItemRemoved = 0x00000200, //kFSEventStreamEventFlagItemInodeMetaMod = 0x00000400, kFSEventStreamEventFlagItemRenamed = 0x00000800, //kFSEventStreamEventFlagItemModified = 0x00001000, //kFSEventStreamEventFlagItemFinderInfoMod = 0x00002000, //kFSEventStreamEventFlagItemChangeOwner = 0x00004000, //kFSEventStreamEventFlagItemXattrMod = 0x00008000, kFSEventStreamEventFlagItemIsFile = 0x00010000, //kFSEventStreamEventFlagItemIsDir = 0x00020000, //kFSEventStreamEventFlagItemIsSymlink = 0x00040000, //kFSEventStreamEventFlagOwnEvent = 0x00080000 } static kFSEventStreamEventIdSinceNow: u64 = 0xFFFFFFFFFFFFFFFF; fn has_flag(event: u32, expected: FSEventStreamEventFlags) -> bool { event & expected as u32 == expected as u32 } extern "C" fn callback(_stream: *const c_void, info: *const c_void, size: c_int, paths: *const *const i8, events: *const u32, ids: *const u64) { let tx: &mut Sender<Event> = unsafe { &mut *(info as *mut Sender<Event>) }; let events: &[u32] = unsafe { mem::transmute(Slice { data: events, len: size as uint, }) }; let ids: &[u64] = unsafe { mem::transmute(Slice { data: ids, len: size as uint, }) }; let paths: &[*const i8] = unsafe { mem::transmute(Slice { data: paths, len: size as uint, }) }; let paths = Vec::from_fn(size as uint, |id| { unsafe { CString::new(paths[id], false) } }); let mut renamed = false; for id in range(0, size as uint) { debug!("Received filesystem event: [id: {}, ev: {}] from '{}'", ids[id], events[id], paths[id]); let event = events[id]; let path = String::from_str(paths[id].as_str().unwrap()); if event & kFSEventStreamEventFlagItemIsFile as u32 == 0 { continue; } let path_ = Path::new(path.as_slice()); if has_flag(event, kFSEventStreamEventFlagItemCreated) && path_.exists() { tx.send(Create(path.clone())); } if has_flag(event, kFSEventStreamEventFlagItemRemoved) &&!path_.exists() { tx.send(Remove(path.clone())); } if has_flag(event, kFSEventStreamEventFlagItemRenamed) { if renamed { tx.send(RenameOld(path)); } else { tx.send(RenameNew(path)); } renamed =!renamed; } } } struct CoreFoundationString { d: *const c_void, } impl CoreFoundationString { fn new(string: &str) -> CoreFoundationString { CoreFoundationString { d: unsafe { CFStringCreateWithCString( kCFAllocatorDefault, string.to_c_str().as_ptr(), kCFStringEncodingUTF8 ) } } } } impl Drop for CoreFoundationString { fn drop(&mut self) { unsafe { CFRelease(self.d) } } } struct CoreFoundationArray { d: *const c_void, items: Vec<CoreFoundationString>, // It's a RAII container. } impl CoreFoundationArray { fn new(collection: &HashSet<String>) -> CoreFoundationArray { let d = unsafe { CFArrayCreateMutable( kCFAllocatorDefault, collection.len() as i32, ptr::null::<c_void>() ) }; let mut items = Vec::new(); for item in collection.iter() { let item = CoreFoundationString::new(item.as_slice()); unsafe { CFArrayAppendValue(d, item.d); } items.push(item); } CoreFoundationArray { d: d, items: items, } } } impl Drop for CoreFoundationArray { fn drop(&mut self) { self.items.clear(); unsafe { CFRelease(self.d) } } } fn recreate_stream(eventloop: *mut c_void, context: *const FSEventStreamContext, paths: HashSet<String>) -> *mut c_void { let paths = CoreFoundationArray::new(&paths); let latency = 0.05f64; let stream = unsafe { FSEventStreamCreate( kCFAllocatorDefault, callback, context, paths.d, kFSEventStreamEventIdSinceNow, latency, kFSEventStreamCreateFlagFileEvents ) }; unsafe { FSEventStreamScheduleWithRunLoop(stream, eventloop, kCFRunLoopDefaultMode); FSEventStreamStart(stream); stream } } pub struct Watcher { pub rx: Receiver<Event>, ctx: SyncSender<Control>, paths: HashSet<String>, stream: Arc<Mutex<*mut c_void>>, eventloop: Arc<Mutex<*mut c_void>>, } impl Watcher { pub fn new() -> Watcher { let (mut tx, rx) = channel::<Event>(); let (ctx, crx) = sync_channel::<Control>(0); let eventloop = Arc::new(Mutex::new(ptr::null_mut::<c_void>())); let stream = Arc::new(Mutex::new(ptr::null_mut::<c_void>())); let watcher = Watcher { rx: rx, ctx: ctx, paths: HashSet::new(), stream: stream.clone(), eventloop: eventloop.clone(), }; spawn(proc() { debug!("Starting watcher thread..."); unsafe { *eventloop.lock() = CFRunLoopGetCurrent(); let tx: *mut c_void = &mut tx as *mut _ as *mut c_void; let context = FSEventStreamContext { version: 0, info: tx, retain: ptr::null::<c_void>(), release: ptr::null::<c_void>(), desc: ptr::null::<c_void>(), }; loop { debug!("New watcher loop iteration"); match crx.recv() { Update(paths) =>
Exit => { debug!("Received watcher exit event - performing graceful shutdown"); break } } } } }); watcher } pub fn watch(&mut self, path: &Path) -> IoResult<()> { if path.exists() { debug!("Adding '{}' to the watch", path.display()); let path = os::make_absolute(path); let path = match path.as_str() { Some(path) => String::from_str(path), None => return Err(IoError::from_errno(ENOENT as uint, false)) }; self.paths.insert(path.clone()); self.update(); Ok(()) } else { Err(IoError::from_errno(ENOENT as uint, false)) } } pub fn unwatch(&mut self, path: &String) -> IoResult<()> { self.paths.remove(path); self.update(); Ok(()) } fn update(&self) { self.stop_stream(); self.ctx.send(Update(self.paths.clone())); } fn stop_stream(&self) { let mut stream = self.stream.lock(); if!(*stream).is_null() { unsafe { FSEventStreamStop(*stream); FSEventStreamInvalidate(*stream); FSEventStreamRelease(*stream); CFRunLoopWakeUp(*self.eventloop.lock()); } } } } impl Drop for Watcher { fn drop(&mut self) { debug!("dropping! {:p}", self); self.stop_stream(); self.ctx.send(Exit); } } #[link(name = "Carbon", kind = "framework")] #[link(name = "CoreFoundation", kind = "framework")] extern { static kCFAllocatorDefault: *mut c_void; static kCFRunLoopDefaultMode: *mut c_void; fn CFStringCreateWithCString(allocator: *mut c_void, string: *const c_char, encoding: CFStringBuiltInEncodings) -> *const c_void; fn CFArrayCreateMutable(allocator: *mut c_void, size: c_int, callbacks: *const c_void) -> *const c_void; fn CFArrayAppendValue(array: *const c_void, value: *const c_void); fn FSEventStreamCreate(allocator: *mut c_void, cb: callback_t, context: *const FSEventStreamContext, paths: *const c_void, since: u64, latency: f64, flags: u32) -> *mut c_void; fn FSEventStreamScheduleWithRunLoop(stream: *mut c_void, eventloop: *mut c_void, mode: *mut c_void); fn FSEventStreamStart(stream: *mut c_void); fn FSEventStreamStop(stream: *mut c_void); fn FSEventStreamInvalidate(stream: *mut c_void); fn FSEventStreamRelease(stream: *mut c_void); fn CFRunLoopGetCurrent() -> *mut c_void; fn CFRunLoopRun(); fn CFRunLoopWakeUp(ev: *mut c_void); fn CFRelease(p: *const c_void); } pub struct Backend { pub watcher: Watcher, } impl Backend { pub fn new(period: Duration) -> Backend { Backend { watcher: Watcher::new() } } pub fn register(&mut self, paths: HashSet<Path>) { for path in paths.iter() { self.watcher.watch(path); } } pub fn transform(&self, ev: Event) -> super::Event { super::Unknown } }
{ debug!("Updating watcher loop with {}", paths); *stream.lock() = recreate_stream(*eventloop.lock(), &context, paths); CFRunLoopRun(); }
conditional_block
fsevents.rs
#![allow(non_camel_case_types, non_uppercase_statics)] // C types use std::collections::{HashSet}; use std::c_str::CString; use std::io::{IoError, IoResult}; use std::io::fs::PathExtensions; use std::mem; use std::ptr; use std::raw::Slice; use std::os; use std::io::{Timer}; use std::time::Duration; //use super; use libc::{c_void, c_char, c_int, ENOENT}; use sync::{Arc, Mutex}; #[repr(C)] enum CFStringBuiltInEncodings { kCFStringEncodingUnicode = 0x01000000, kCFStringEncodingUTF8 = 0x08000100, } static kFSEventStreamCreateFlagNoDefer: u32 = 0x00000002; static kFSEventStreamCreateFlagFileEvents: u32 = 0x00000010; #[deriving(Show)] enum
{ Create(String), Remove(String), //ModifyMeta, Modify(String), RenameOld(String), RenameNew(String), } enum Control { Update(HashSet<String>), Exit, } #[repr(C)] struct FSEventStreamContext { version: c_int, info: *mut c_void, retain: *const c_void, release: *const c_void, desc: *const c_void, } type callback_t = extern "C" fn( stream: *const c_void, info: *const c_void, size: c_int, paths: *const *const i8, events: *const u32, ids: *const u64 ); #[repr(C)] enum FSEventStreamEventFlags { //kFSEventStreamEventFlagNone = 0x00000000, //kFSEventStreamEventFlagMustScanSubDirs = 0x00000001, //kFSEventStreamEventFlagUserDropped = 0x00000002, //kFSEventStreamEventFlagKernelDropped = 0x00000004, //kFSEventStreamEventFlagEventIdsWrapped = 0x00000008, //kFSEventStreamEventFlagHistoryDone = 0x00000010, //kFSEventStreamEventFlagRootChanged = 0x00000020, //kFSEventStreamEventFlagMount = 0x00000040, //kFSEventStreamEventFlagUnmount = 0x00000080, kFSEventStreamEventFlagItemCreated = 0x00000100, kFSEventStreamEventFlagItemRemoved = 0x00000200, //kFSEventStreamEventFlagItemInodeMetaMod = 0x00000400, kFSEventStreamEventFlagItemRenamed = 0x00000800, //kFSEventStreamEventFlagItemModified = 0x00001000, //kFSEventStreamEventFlagItemFinderInfoMod = 0x00002000, //kFSEventStreamEventFlagItemChangeOwner = 0x00004000, //kFSEventStreamEventFlagItemXattrMod = 0x00008000, kFSEventStreamEventFlagItemIsFile = 0x00010000, //kFSEventStreamEventFlagItemIsDir = 0x00020000, //kFSEventStreamEventFlagItemIsSymlink = 0x00040000, //kFSEventStreamEventFlagOwnEvent = 0x00080000 } static kFSEventStreamEventIdSinceNow: u64 = 0xFFFFFFFFFFFFFFFF; fn has_flag(event: u32, expected: FSEventStreamEventFlags) -> bool { event & expected as u32 == expected as u32 } extern "C" fn callback(_stream: *const c_void, info: *const c_void, size: c_int, paths: *const *const i8, events: *const u32, ids: *const u64) { let tx: &mut Sender<Event> = unsafe { &mut *(info as *mut Sender<Event>) }; let events: &[u32] = unsafe { mem::transmute(Slice { data: events, len: size as uint, }) }; let ids: &[u64] = unsafe { mem::transmute(Slice { data: ids, len: size as uint, }) }; let paths: &[*const i8] = unsafe { mem::transmute(Slice { data: paths, len: size as uint, }) }; let paths = Vec::from_fn(size as uint, |id| { unsafe { CString::new(paths[id], false) } }); let mut renamed = false; for id in range(0, size as uint) { debug!("Received filesystem event: [id: {}, ev: {}] from '{}'", ids[id], events[id], paths[id]); let event = events[id]; let path = String::from_str(paths[id].as_str().unwrap()); if event & kFSEventStreamEventFlagItemIsFile as u32 == 0 { continue; } let path_ = Path::new(path.as_slice()); if has_flag(event, kFSEventStreamEventFlagItemCreated) && path_.exists() { tx.send(Create(path.clone())); } if has_flag(event, kFSEventStreamEventFlagItemRemoved) &&!path_.exists() { tx.send(Remove(path.clone())); } if has_flag(event, kFSEventStreamEventFlagItemRenamed) { if renamed { tx.send(RenameOld(path)); } else { tx.send(RenameNew(path)); } renamed =!renamed; } } } struct CoreFoundationString { d: *const c_void, } impl CoreFoundationString { fn new(string: &str) -> CoreFoundationString { CoreFoundationString { d: unsafe { CFStringCreateWithCString( kCFAllocatorDefault, string.to_c_str().as_ptr(), kCFStringEncodingUTF8 ) } } } } impl Drop for CoreFoundationString { fn drop(&mut self) { unsafe { CFRelease(self.d) } } } struct CoreFoundationArray { d: *const c_void, items: Vec<CoreFoundationString>, // It's a RAII container. } impl CoreFoundationArray { fn new(collection: &HashSet<String>) -> CoreFoundationArray { let d = unsafe { CFArrayCreateMutable( kCFAllocatorDefault, collection.len() as i32, ptr::null::<c_void>() ) }; let mut items = Vec::new(); for item in collection.iter() { let item = CoreFoundationString::new(item.as_slice()); unsafe { CFArrayAppendValue(d, item.d); } items.push(item); } CoreFoundationArray { d: d, items: items, } } } impl Drop for CoreFoundationArray { fn drop(&mut self) { self.items.clear(); unsafe { CFRelease(self.d) } } } fn recreate_stream(eventloop: *mut c_void, context: *const FSEventStreamContext, paths: HashSet<String>) -> *mut c_void { let paths = CoreFoundationArray::new(&paths); let latency = 0.05f64; let stream = unsafe { FSEventStreamCreate( kCFAllocatorDefault, callback, context, paths.d, kFSEventStreamEventIdSinceNow, latency, kFSEventStreamCreateFlagFileEvents ) }; unsafe { FSEventStreamScheduleWithRunLoop(stream, eventloop, kCFRunLoopDefaultMode); FSEventStreamStart(stream); stream } } pub struct Watcher { pub rx: Receiver<Event>, ctx: SyncSender<Control>, paths: HashSet<String>, stream: Arc<Mutex<*mut c_void>>, eventloop: Arc<Mutex<*mut c_void>>, } impl Watcher { pub fn new() -> Watcher { let (mut tx, rx) = channel::<Event>(); let (ctx, crx) = sync_channel::<Control>(0); let eventloop = Arc::new(Mutex::new(ptr::null_mut::<c_void>())); let stream = Arc::new(Mutex::new(ptr::null_mut::<c_void>())); let watcher = Watcher { rx: rx, ctx: ctx, paths: HashSet::new(), stream: stream.clone(), eventloop: eventloop.clone(), }; spawn(proc() { debug!("Starting watcher thread..."); unsafe { *eventloop.lock() = CFRunLoopGetCurrent(); let tx: *mut c_void = &mut tx as *mut _ as *mut c_void; let context = FSEventStreamContext { version: 0, info: tx, retain: ptr::null::<c_void>(), release: ptr::null::<c_void>(), desc: ptr::null::<c_void>(), }; loop { debug!("New watcher loop iteration"); match crx.recv() { Update(paths) => { debug!("Updating watcher loop with {}", paths); *stream.lock() = recreate_stream(*eventloop.lock(), &context, paths); CFRunLoopRun(); } Exit => { debug!("Received watcher exit event - performing graceful shutdown"); break } } } } }); watcher } pub fn watch(&mut self, path: &Path) -> IoResult<()> { if path.exists() { debug!("Adding '{}' to the watch", path.display()); let path = os::make_absolute(path); let path = match path.as_str() { Some(path) => String::from_str(path), None => return Err(IoError::from_errno(ENOENT as uint, false)) }; self.paths.insert(path.clone()); self.update(); Ok(()) } else { Err(IoError::from_errno(ENOENT as uint, false)) } } pub fn unwatch(&mut self, path: &String) -> IoResult<()> { self.paths.remove(path); self.update(); Ok(()) } fn update(&self) { self.stop_stream(); self.ctx.send(Update(self.paths.clone())); } fn stop_stream(&self) { let mut stream = self.stream.lock(); if!(*stream).is_null() { unsafe { FSEventStreamStop(*stream); FSEventStreamInvalidate(*stream); FSEventStreamRelease(*stream); CFRunLoopWakeUp(*self.eventloop.lock()); } } } } impl Drop for Watcher { fn drop(&mut self) { debug!("dropping! {:p}", self); self.stop_stream(); self.ctx.send(Exit); } } #[link(name = "Carbon", kind = "framework")] #[link(name = "CoreFoundation", kind = "framework")] extern { static kCFAllocatorDefault: *mut c_void; static kCFRunLoopDefaultMode: *mut c_void; fn CFStringCreateWithCString(allocator: *mut c_void, string: *const c_char, encoding: CFStringBuiltInEncodings) -> *const c_void; fn CFArrayCreateMutable(allocator: *mut c_void, size: c_int, callbacks: *const c_void) -> *const c_void; fn CFArrayAppendValue(array: *const c_void, value: *const c_void); fn FSEventStreamCreate(allocator: *mut c_void, cb: callback_t, context: *const FSEventStreamContext, paths: *const c_void, since: u64, latency: f64, flags: u32) -> *mut c_void; fn FSEventStreamScheduleWithRunLoop(stream: *mut c_void, eventloop: *mut c_void, mode: *mut c_void); fn FSEventStreamStart(stream: *mut c_void); fn FSEventStreamStop(stream: *mut c_void); fn FSEventStreamInvalidate(stream: *mut c_void); fn FSEventStreamRelease(stream: *mut c_void); fn CFRunLoopGetCurrent() -> *mut c_void; fn CFRunLoopRun(); fn CFRunLoopWakeUp(ev: *mut c_void); fn CFRelease(p: *const c_void); } pub struct Backend { pub watcher: Watcher, } impl Backend { pub fn new(period: Duration) -> Backend { Backend { watcher: Watcher::new() } } pub fn register(&mut self, paths: HashSet<Path>) { for path in paths.iter() { self.watcher.watch(path); } } pub fn transform(&self, ev: Event) -> super::Event { super::Unknown } }
Event
identifier_name
fsevents.rs
#![allow(non_camel_case_types, non_uppercase_statics)] // C types use std::collections::{HashSet}; use std::c_str::CString; use std::io::{IoError, IoResult}; use std::io::fs::PathExtensions; use std::mem; use std::ptr; use std::raw::Slice; use std::os; use std::io::{Timer}; use std::time::Duration; //use super; use libc::{c_void, c_char, c_int, ENOENT}; use sync::{Arc, Mutex}; #[repr(C)] enum CFStringBuiltInEncodings { kCFStringEncodingUnicode = 0x01000000, kCFStringEncodingUTF8 = 0x08000100, } static kFSEventStreamCreateFlagNoDefer: u32 = 0x00000002; static kFSEventStreamCreateFlagFileEvents: u32 = 0x00000010; #[deriving(Show)] enum Event { Create(String), Remove(String), //ModifyMeta, Modify(String), RenameOld(String), RenameNew(String), } enum Control { Update(HashSet<String>), Exit, } #[repr(C)] struct FSEventStreamContext { version: c_int, info: *mut c_void, retain: *const c_void, release: *const c_void, desc: *const c_void, } type callback_t = extern "C" fn( stream: *const c_void, info: *const c_void, size: c_int, paths: *const *const i8, events: *const u32, ids: *const u64 ); #[repr(C)] enum FSEventStreamEventFlags { //kFSEventStreamEventFlagNone = 0x00000000, //kFSEventStreamEventFlagMustScanSubDirs = 0x00000001, //kFSEventStreamEventFlagUserDropped = 0x00000002, //kFSEventStreamEventFlagKernelDropped = 0x00000004, //kFSEventStreamEventFlagEventIdsWrapped = 0x00000008, //kFSEventStreamEventFlagHistoryDone = 0x00000010, //kFSEventStreamEventFlagRootChanged = 0x00000020, //kFSEventStreamEventFlagMount = 0x00000040, //kFSEventStreamEventFlagUnmount = 0x00000080, kFSEventStreamEventFlagItemCreated = 0x00000100, kFSEventStreamEventFlagItemRemoved = 0x00000200, //kFSEventStreamEventFlagItemInodeMetaMod = 0x00000400, kFSEventStreamEventFlagItemRenamed = 0x00000800, //kFSEventStreamEventFlagItemModified = 0x00001000, //kFSEventStreamEventFlagItemFinderInfoMod = 0x00002000, //kFSEventStreamEventFlagItemChangeOwner = 0x00004000, //kFSEventStreamEventFlagItemXattrMod = 0x00008000, kFSEventStreamEventFlagItemIsFile = 0x00010000, //kFSEventStreamEventFlagItemIsDir = 0x00020000, //kFSEventStreamEventFlagItemIsSymlink = 0x00040000, //kFSEventStreamEventFlagOwnEvent = 0x00080000 } static kFSEventStreamEventIdSinceNow: u64 = 0xFFFFFFFFFFFFFFFF; fn has_flag(event: u32, expected: FSEventStreamEventFlags) -> bool { event & expected as u32 == expected as u32 } extern "C" fn callback(_stream: *const c_void, info: *const c_void, size: c_int, paths: *const *const i8, events: *const u32, ids: *const u64) { let tx: &mut Sender<Event> = unsafe { &mut *(info as *mut Sender<Event>) }; let events: &[u32] = unsafe { mem::transmute(Slice { data: events, len: size as uint, }) }; let ids: &[u64] = unsafe { mem::transmute(Slice { data: ids, len: size as uint, }) }; let paths: &[*const i8] = unsafe { mem::transmute(Slice { data: paths, len: size as uint, }) }; let paths = Vec::from_fn(size as uint, |id| { unsafe { CString::new(paths[id], false) } }); let mut renamed = false; for id in range(0, size as uint) { debug!("Received filesystem event: [id: {}, ev: {}] from '{}'", ids[id], events[id], paths[id]); let event = events[id]; let path = String::from_str(paths[id].as_str().unwrap()); if event & kFSEventStreamEventFlagItemIsFile as u32 == 0 { continue; } let path_ = Path::new(path.as_slice()); if has_flag(event, kFSEventStreamEventFlagItemCreated) && path_.exists() { tx.send(Create(path.clone())); } if has_flag(event, kFSEventStreamEventFlagItemRemoved) &&!path_.exists() { tx.send(Remove(path.clone())); } if has_flag(event, kFSEventStreamEventFlagItemRenamed) { if renamed { tx.send(RenameOld(path)); } else { tx.send(RenameNew(path)); } renamed =!renamed; } } } struct CoreFoundationString { d: *const c_void, } impl CoreFoundationString { fn new(string: &str) -> CoreFoundationString { CoreFoundationString { d: unsafe { CFStringCreateWithCString( kCFAllocatorDefault, string.to_c_str().as_ptr(), kCFStringEncodingUTF8 ) } } } } impl Drop for CoreFoundationString { fn drop(&mut self) { unsafe { CFRelease(self.d) } } } struct CoreFoundationArray { d: *const c_void, items: Vec<CoreFoundationString>, // It's a RAII container. } impl CoreFoundationArray { fn new(collection: &HashSet<String>) -> CoreFoundationArray { let d = unsafe { CFArrayCreateMutable( kCFAllocatorDefault, collection.len() as i32, ptr::null::<c_void>() ) }; let mut items = Vec::new(); for item in collection.iter() { let item = CoreFoundationString::new(item.as_slice()); unsafe { CFArrayAppendValue(d, item.d); } items.push(item); } CoreFoundationArray { d: d, items: items, } } } impl Drop for CoreFoundationArray { fn drop(&mut self) { self.items.clear(); unsafe { CFRelease(self.d) } } } fn recreate_stream(eventloop: *mut c_void, context: *const FSEventStreamContext, paths: HashSet<String>) -> *mut c_void { let paths = CoreFoundationArray::new(&paths); let latency = 0.05f64; let stream = unsafe { FSEventStreamCreate( kCFAllocatorDefault, callback, context, paths.d, kFSEventStreamEventIdSinceNow, latency, kFSEventStreamCreateFlagFileEvents ) }; unsafe { FSEventStreamScheduleWithRunLoop(stream, eventloop, kCFRunLoopDefaultMode); FSEventStreamStart(stream); stream } } pub struct Watcher { pub rx: Receiver<Event>, ctx: SyncSender<Control>, paths: HashSet<String>, stream: Arc<Mutex<*mut c_void>>, eventloop: Arc<Mutex<*mut c_void>>, } impl Watcher { pub fn new() -> Watcher { let (mut tx, rx) = channel::<Event>(); let (ctx, crx) = sync_channel::<Control>(0); let eventloop = Arc::new(Mutex::new(ptr::null_mut::<c_void>())); let stream = Arc::new(Mutex::new(ptr::null_mut::<c_void>())); let watcher = Watcher { rx: rx, ctx: ctx, paths: HashSet::new(), stream: stream.clone(), eventloop: eventloop.clone(), }; spawn(proc() { debug!("Starting watcher thread..."); unsafe { *eventloop.lock() = CFRunLoopGetCurrent(); let tx: *mut c_void = &mut tx as *mut _ as *mut c_void; let context = FSEventStreamContext { version: 0, info: tx, retain: ptr::null::<c_void>(), release: ptr::null::<c_void>(), desc: ptr::null::<c_void>(), }; loop { debug!("New watcher loop iteration"); match crx.recv() { Update(paths) => { debug!("Updating watcher loop with {}", paths); *stream.lock() = recreate_stream(*eventloop.lock(), &context, paths); CFRunLoopRun(); } Exit => { debug!("Received watcher exit event - performing graceful shutdown"); break } } } } }); watcher } pub fn watch(&mut self, path: &Path) -> IoResult<()> { if path.exists() { debug!("Adding '{}' to the watch", path.display()); let path = os::make_absolute(path); let path = match path.as_str() { Some(path) => String::from_str(path), None => return Err(IoError::from_errno(ENOENT as uint, false)) }; self.paths.insert(path.clone()); self.update(); Ok(()) } else { Err(IoError::from_errno(ENOENT as uint, false)) } } pub fn unwatch(&mut self, path: &String) -> IoResult<()> { self.paths.remove(path); self.update(); Ok(()) } fn update(&self) { self.stop_stream(); self.ctx.send(Update(self.paths.clone())); } fn stop_stream(&self) { let mut stream = self.stream.lock(); if!(*stream).is_null() { unsafe { FSEventStreamStop(*stream); FSEventStreamInvalidate(*stream); FSEventStreamRelease(*stream); CFRunLoopWakeUp(*self.eventloop.lock()); } } } } impl Drop for Watcher { fn drop(&mut self) { debug!("dropping! {:p}", self); self.stop_stream(); self.ctx.send(Exit); } } #[link(name = "Carbon", kind = "framework")] #[link(name = "CoreFoundation", kind = "framework")] extern { static kCFAllocatorDefault: *mut c_void; static kCFRunLoopDefaultMode: *mut c_void; fn CFStringCreateWithCString(allocator: *mut c_void, string: *const c_char, encoding: CFStringBuiltInEncodings) -> *const c_void; fn CFArrayCreateMutable(allocator: *mut c_void, size: c_int, callbacks: *const c_void) -> *const c_void; fn CFArrayAppendValue(array: *const c_void, value: *const c_void); fn FSEventStreamCreate(allocator: *mut c_void, cb: callback_t, context: *const FSEventStreamContext, paths: *const c_void, since: u64, latency: f64, flags: u32) -> *mut c_void; fn FSEventStreamScheduleWithRunLoop(stream: *mut c_void, eventloop: *mut c_void, mode: *mut c_void); fn FSEventStreamStart(stream: *mut c_void); fn FSEventStreamStop(stream: *mut c_void); fn FSEventStreamInvalidate(stream: *mut c_void); fn FSEventStreamRelease(stream: *mut c_void); fn CFRunLoopGetCurrent() -> *mut c_void; fn CFRunLoopRun(); fn CFRunLoopWakeUp(ev: *mut c_void); fn CFRelease(p: *const c_void); } pub struct Backend { pub watcher: Watcher, } impl Backend { pub fn new(period: Duration) -> Backend { Backend { watcher: Watcher::new() } } pub fn register(&mut self, paths: HashSet<Path>) { for path in paths.iter() { self.watcher.watch(path); } } pub fn transform(&self, ev: Event) -> super::Event
}
{ super::Unknown }
identifier_body
kmodules.rs
//! This file contains all the stuff needed by Kernel Modules use super::message::push_message; use super::process::get_file_content; use super::scheduler::Scheduler; use super::thread_group::Credentials; use super::vfs::{Path, VFS}; use super::{IpcResult, SysResult}; use alloc::boxed::Box; use alloc::vec::Vec; use ansi_escape_code::Colored; use elf_loader::{SegmentType, SymbolTable}; use fallible_collections::boxed::FallibleBox; use irq::Irq; use kernel_modules::{ ForeignAllocMethods, KernelEvent, KernelSymbolList, KeyboardConfig, ModConfig, ModResult, ModReturn, ModSpecificReturn, RTCConfig, SymbolList, }; use libc_binding::{Errno, FileType, OpenFlags}; use log::Record; use time::Date; use core::convert::{TryFrom, TryInto}; use core::slice; use core::sync::atomic::AtomicU32; use crate::drivers::PIC_8259; use crate::elf_loader::load_elf; use crate::memory::mmu::Entry; use crate::memory::tools::{AllocFlags, NbrPages, Page, Virt}; use crate::memory::HIGH_KERNEL_MEMORY; /// Main structure pub struct KernelModules { dummy: Option<Module>, rtc: Option<Module>, keyboard: Option<Module>, syslog: Option<Module>, pub second_cycle: Vec<fn()>, } #[allow(dead_code)] /// Stored structure of a given module struct Module { start_point: u32, symbol_table: Box<SymbolTable>, mod_return: ModReturn, alloc_table: AllocTable, } /// Main implementation impl KernelModules { pub fn new() -> Self { Self { dummy: None, rtc: None, keyboard: None, syslog: None, second_cycle: Vec::new(), } } } impl Scheduler { /// Try to insert a Kernel Module pub fn insert_module(&mut self, modname: &str) -> SysResult<u32> { let (module_opt, module_pathname, mod_config) = match modname { "dummy" => ( &mut self.kernel_modules.dummy, "/turbofish/mod/dummy.mod", ModConfig::Dummy, ), "rtc" => ( &mut self.kernel_modules.rtc, "/turbofish/mod/rtc.mod", ModConfig::RTC(RTCConfig { enable_irq, disable_irq, // May be set as volatile... current_unix_time: unsafe { &mut CURRENT_UNIX_TIME }, }), ), "keyboard" => ( &mut self.kernel_modules.keyboard, "/turbofish/mod/key.mod", ModConfig::Keyboard(KeyboardConfig { enable_irq, disable_irq, callback: push_message, }), ), "syslog" => ( &mut self.kernel_modules.syslog, "/turbofish/mod/syslog.mod", ModConfig::Syslog, ), _ => { log::warn!("Unknown module name"); return Ok(0); } }; if let Some(_) = module_opt { log::warn!("Module already active"); return Ok(0); } // Generate content from disk let content = get_module_raw_content(module_pathname)?; // Try to parse ELF let (eip, symbol_table, alloc_table) = load_module(&content)?; let symbol_table = match symbol_table { Some(s) => s, None => { log::error!("No Symtab for that Module"); return Err(Errno::EINVAL); } }; // Launch the module with his particulary context let start_point: u32 = eip as u32; let p: fn(SymbolList) -> ModResult = unsafe { core::mem::transmute(start_point) }; let mod_return = p(SymbolList { write, emergency_write, alloc_tools: ForeignAllocMethods { kmalloc, kcalloc, kfree, krealloc, }, kernel_callback: mod_config, kernel_symbol_list: KernelSymbolList::new(), }) .map_err(|_e| Errno::EINVAL)?; if let Some(configurable_callbacks) = &mod_return.configurable_callbacks_opt { // Ensure we have suffisant memory before binding something let mut second_cycle_chunk_reserved = 0; for elem in configurable_callbacks.iter() { match elem.when { KernelEvent::Second => second_cycle_chunk_reserved += 1, _ => {} } } self.kernel_modules .second_cycle .try_reserve(second_cycle_chunk_reserved)?; // Bind callbacks for elem in configurable_callbacks.iter() { match elem.when { KernelEvent::Log => { // We assume that a function bindable to Log event has fn(&Record) prototype. // Yes, it is really really unsafe... But Louis is asking for that // LOGGER is on a direct binding. Not passing through Scheduler let p: fn(&Record) = unsafe { core::mem::transmute(elem.what) }; unsafe { // It is a shame that only one module can be binded to the log! terminal::log::LOGGER.bind(p); } } KernelEvent::Second => { // We assume that a function bindable to Log event has fn() prototype. let p: fn() = unsafe { core::mem::transmute(elem.what) }; self.kernel_modules.second_cycle.push(p); } } } } *module_opt = Some(Module { start_point, symbol_table, mod_return, alloc_table, }); Ok(0) } /// Try to remove a kernel module pub fn remove_module(&mut self, modname: &str) -> SysResult<u32> { let module_opt = match modname { "dummy" => &mut self.kernel_modules.dummy, "rtc" => &mut self.kernel_modules.rtc, "keyboard" => &mut self.kernel_modules.keyboard, "syslog" => &mut self.kernel_modules.syslog, _ => { log::warn!("Unknown module name"); return Ok(0); } }; match module_opt { None => { log::warn!("Module already inactive"); return Ok(0); } Some(module) => { // Disable callbacks if let Some(configurable_callbacks) = &module.mod_return.configurable_callbacks_opt { for elem in configurable_callbacks.iter() { match elem.when { KernelEvent::Log => unsafe { terminal::log::LOGGER.unbind(); }, KernelEvent::Second => { let p: fn() = unsafe { core::mem::transmute(elem.what) }; let _r = self .kernel_modules .second_cycle .drain_filter(|elem| *elem == p) .collect::<Vec<_>>(); } } } } // Halt the module (module.mod_return.stop)(); } } *module_opt = None; Ok(0) } /// List all loaded modules pub fn list_modules(&self) -> SysResult<u32> { if self.kernel_modules.dummy.is_some() { println!("- module loaded: {}", "DUMMY".yellow()); } if self.kernel_modules.keyboard.is_some() { println!("- module loaded: {}", "KEYBOARD".yellow()); } if self.kernel_modules.rtc.is_some() { println!("- module loaded: {}", "RTC".yellow()); } if self.kernel_modules.syslog.is_some() { println!("- module loaded: {}", "SYSLOG".yellow()); } Ok(0) } /// Keyboard driver method specific pub fn reboot_computer(&self) { if let Some(keyboard) = &self.kernel_modules.keyboard { if let ModSpecificReturn::Keyboard(keyboard_return) = &keyboard.mod_return.spec { (keyboard_return.reboot_computer)(); } else { panic!("Unexpected error"); } } else { log::error!("ps2_controler/Keyboard handler not loaded"); } } /// RTC driver method specific pub fn read_date(&self) -> Date { if let Some(rtc) = &self.kernel_modules.rtc { if let ModSpecificReturn::RTC(rtc_return) = &rtc.mod_return.spec { (rtc_return.read_date)() } else { panic!("Unexpected error"); } } else { Date::default() } } } /// RTC driver specific globale pub static mut CURRENT_UNIX_TIME: AtomicU32 = AtomicU32::new(0); /// Set IDT ENTRY fn: Usable by modules fn enable_irq(idt_gate: Irq, func: unsafe extern "C" fn()) { unsafe { PIC_8259.lock().enable_irq(idt_gate, Some(func)); } } /// Unset IDT ENTRY fn: Usable by modules fn disable_irq(idt_gate: Irq) { unsafe { PIC_8259.lock().disable_irq(idt_gate); } } /// Common Write method for modules fn write(s: &str) { log::info!("{}", s); } /// Ermergency Write method for modules fn
(s: &str) { eprint!("{}", s); } /// Just used for a symbol list test #[no_mangle] #[link_section = ".kernel_exported_functions"] pub fn symbol_list_test() { log::info!("symbol_list_test function sucessfully called by a module!"); } #[no_mangle] #[link_section = ".kernel_exported_functions"] pub fn add_syslog_entry(entry: &str) -> Result<(), Errno> { let cwd = Path::try_from("/")?; let path = Path::try_from("/var/syslog")?; let mode = FileType::from_bits(0o600).expect("Cannot set FileType"); let flags = OpenFlags::O_WRONLY | OpenFlags::O_CREAT | OpenFlags::O_APPEND; let creds = &Credentials::ROOT; VFS.force_unlock(); /* just in case of. This mutex could become very problematic */ let file_operator = match VFS.lock().open(&cwd, creds, path, flags, mode)? { IpcResult::Done(file_operator) => file_operator, IpcResult::Wait(file_operator, _) => file_operator, }; let mut m = file_operator.lock(); m.write(unsafe { core::slice::from_raw_parts(entry as *const _ as *const u8, entry.len()) })?; Ok(()) } /// Common allocator methods for modules extern "C" { fn kmalloc(len: usize) -> *mut u8; fn kcalloc(count: usize, size: usize) -> *mut u8; fn kfree(ptr: *mut u8); fn krealloc(addr: *mut u8, new_size: usize) -> *mut u8; } struct AllocTable(Vec<AllocEntry>); struct AllocEntry { page_index: Page<Virt>, nbr_pages: NbrPages, } impl AllocEntry { fn new(page_index: Page<Virt>, nbr_pages: NbrPages) -> Self { Self { page_index, nbr_pages, } } } impl Drop for AllocEntry { fn drop(&mut self) { unsafe { HIGH_KERNEL_MEMORY .as_mut() .unwrap() .dealloc_on(self.page_index, self.nbr_pages) .expect("Unexpected memory error"); } } } /// Load a module from ELF fn load_module(content: &[u8]) -> SysResult<(u32, Option<Box<SymbolTable>>, AllocTable)> { let mut alloc_table: AllocTable = AllocTable(Vec::new()); // Parse Elf and generate stuff let elf = load_elf(content)?; for h in &elf.program_header_table { if h.segment_type == SegmentType::Load { let segment = unsafe { let page_index: Page<Virt> = Virt(h.vaddr as usize).into(); let nbr_pages: NbrPages = (h.memsz as usize).into(); alloc_table.0.try_reserve(1)?; HIGH_KERNEL_MEMORY.as_mut().unwrap().alloc_on( page_index, nbr_pages, AllocFlags::KERNEL_MEMORY, )?; alloc_table.0.push(AllocEntry::new(page_index, nbr_pages)); slice::from_raw_parts_mut(h.vaddr as usize as *mut u8, h.memsz as usize) }; segment[0..h.filez as usize] .copy_from_slice(&content[h.offset as usize..h.offset as usize + h.filez as usize]); unsafe { // With BSS (so a NOBITS section), the memsz value exceed the filesz. Setting next bytes as 0 segment[h.filez as usize..h.memsz as usize] .as_mut_ptr() .write_bytes(0, h.memsz as usize - h.filez as usize); // Modify the rights on pages by following the ELF specific restrictions HIGH_KERNEL_MEMORY .as_mut() .unwrap() .change_range_page_entry( Page::containing(Virt(h.vaddr as usize)), (h.memsz as usize).into(), &mut |entry: &mut Entry| { *entry |= Entry::from( Into::<AllocFlags>::into(h.flags) | AllocFlags::KERNEL_MEMORY, ) }, )?; } } } Ok(( elf.header.entry_point as u32, match SymbolTable::try_new(content).ok() { Some(elem) => Some(Box::try_new(elem)?), None => None, }, alloc_table, )) } /// Get Data of a module fn get_module_raw_content(mod_pathname: &str) -> SysResult<Vec<u8>> { let path = mod_pathname.try_into()?; get_file_content( &Path::try_from("/").expect("no root"), &Credentials::ROOT, path, ) }
emergency_write
identifier_name
kmodules.rs
//! This file contains all the stuff needed by Kernel Modules use super::message::push_message; use super::process::get_file_content; use super::scheduler::Scheduler; use super::thread_group::Credentials; use super::vfs::{Path, VFS}; use super::{IpcResult, SysResult}; use alloc::boxed::Box; use alloc::vec::Vec; use ansi_escape_code::Colored; use elf_loader::{SegmentType, SymbolTable}; use fallible_collections::boxed::FallibleBox; use irq::Irq; use kernel_modules::{ ForeignAllocMethods, KernelEvent, KernelSymbolList, KeyboardConfig, ModConfig, ModResult, ModReturn, ModSpecificReturn, RTCConfig, SymbolList, }; use libc_binding::{Errno, FileType, OpenFlags}; use log::Record; use time::Date; use core::convert::{TryFrom, TryInto}; use core::slice; use core::sync::atomic::AtomicU32; use crate::drivers::PIC_8259; use crate::elf_loader::load_elf; use crate::memory::mmu::Entry; use crate::memory::tools::{AllocFlags, NbrPages, Page, Virt}; use crate::memory::HIGH_KERNEL_MEMORY; /// Main structure pub struct KernelModules { dummy: Option<Module>, rtc: Option<Module>, keyboard: Option<Module>, syslog: Option<Module>, pub second_cycle: Vec<fn()>, } #[allow(dead_code)] /// Stored structure of a given module struct Module { start_point: u32, symbol_table: Box<SymbolTable>, mod_return: ModReturn, alloc_table: AllocTable, } /// Main implementation impl KernelModules { pub fn new() -> Self { Self { dummy: None, rtc: None, keyboard: None, syslog: None, second_cycle: Vec::new(), } } } impl Scheduler { /// Try to insert a Kernel Module pub fn insert_module(&mut self, modname: &str) -> SysResult<u32> { let (module_opt, module_pathname, mod_config) = match modname { "dummy" => ( &mut self.kernel_modules.dummy, "/turbofish/mod/dummy.mod", ModConfig::Dummy, ), "rtc" => ( &mut self.kernel_modules.rtc, "/turbofish/mod/rtc.mod", ModConfig::RTC(RTCConfig { enable_irq, disable_irq, // May be set as volatile... current_unix_time: unsafe { &mut CURRENT_UNIX_TIME }, }), ), "keyboard" => ( &mut self.kernel_modules.keyboard, "/turbofish/mod/key.mod", ModConfig::Keyboard(KeyboardConfig { enable_irq, disable_irq, callback: push_message, }), ), "syslog" => ( &mut self.kernel_modules.syslog, "/turbofish/mod/syslog.mod", ModConfig::Syslog, ), _ => { log::warn!("Unknown module name"); return Ok(0); } }; if let Some(_) = module_opt { log::warn!("Module already active"); return Ok(0); } // Generate content from disk let content = get_module_raw_content(module_pathname)?; // Try to parse ELF let (eip, symbol_table, alloc_table) = load_module(&content)?; let symbol_table = match symbol_table { Some(s) => s, None => { log::error!("No Symtab for that Module"); return Err(Errno::EINVAL); } }; // Launch the module with his particulary context let start_point: u32 = eip as u32; let p: fn(SymbolList) -> ModResult = unsafe { core::mem::transmute(start_point) }; let mod_return = p(SymbolList { write, emergency_write, alloc_tools: ForeignAllocMethods { kmalloc, kcalloc, kfree, krealloc, }, kernel_callback: mod_config, kernel_symbol_list: KernelSymbolList::new(), }) .map_err(|_e| Errno::EINVAL)?; if let Some(configurable_callbacks) = &mod_return.configurable_callbacks_opt { // Ensure we have suffisant memory before binding something let mut second_cycle_chunk_reserved = 0; for elem in configurable_callbacks.iter() { match elem.when { KernelEvent::Second => second_cycle_chunk_reserved += 1, _ => {} } } self.kernel_modules .second_cycle .try_reserve(second_cycle_chunk_reserved)?; // Bind callbacks for elem in configurable_callbacks.iter() { match elem.when { KernelEvent::Log => { // We assume that a function bindable to Log event has fn(&Record) prototype. // Yes, it is really really unsafe... But Louis is asking for that // LOGGER is on a direct binding. Not passing through Scheduler let p: fn(&Record) = unsafe { core::mem::transmute(elem.what) }; unsafe { // It is a shame that only one module can be binded to the log! terminal::log::LOGGER.bind(p); } } KernelEvent::Second => { // We assume that a function bindable to Log event has fn() prototype. let p: fn() = unsafe { core::mem::transmute(elem.what) }; self.kernel_modules.second_cycle.push(p); } } } } *module_opt = Some(Module { start_point, symbol_table, mod_return, alloc_table, }); Ok(0) } /// Try to remove a kernel module pub fn remove_module(&mut self, modname: &str) -> SysResult<u32> { let module_opt = match modname { "dummy" => &mut self.kernel_modules.dummy, "rtc" => &mut self.kernel_modules.rtc, "keyboard" => &mut self.kernel_modules.keyboard, "syslog" => &mut self.kernel_modules.syslog, _ => { log::warn!("Unknown module name"); return Ok(0); } }; match module_opt { None => { log::warn!("Module already inactive"); return Ok(0); } Some(module) => { // Disable callbacks if let Some(configurable_callbacks) = &module.mod_return.configurable_callbacks_opt { for elem in configurable_callbacks.iter() { match elem.when { KernelEvent::Log => unsafe { terminal::log::LOGGER.unbind(); }, KernelEvent::Second => { let p: fn() = unsafe { core::mem::transmute(elem.what) }; let _r = self .kernel_modules .second_cycle .drain_filter(|elem| *elem == p) .collect::<Vec<_>>(); } } } } // Halt the module (module.mod_return.stop)(); } } *module_opt = None; Ok(0) } /// List all loaded modules pub fn list_modules(&self) -> SysResult<u32> { if self.kernel_modules.dummy.is_some() { println!("- module loaded: {}", "DUMMY".yellow()); } if self.kernel_modules.keyboard.is_some() { println!("- module loaded: {}", "KEYBOARD".yellow()); } if self.kernel_modules.rtc.is_some() { println!("- module loaded: {}", "RTC".yellow()); } if self.kernel_modules.syslog.is_some() { println!("- module loaded: {}", "SYSLOG".yellow()); } Ok(0) } /// Keyboard driver method specific pub fn reboot_computer(&self) { if let Some(keyboard) = &self.kernel_modules.keyboard { if let ModSpecificReturn::Keyboard(keyboard_return) = &keyboard.mod_return.spec { (keyboard_return.reboot_computer)(); } else { panic!("Unexpected error"); } } else { log::error!("ps2_controler/Keyboard handler not loaded"); } } /// RTC driver method specific pub fn read_date(&self) -> Date { if let Some(rtc) = &self.kernel_modules.rtc { if let ModSpecificReturn::RTC(rtc_return) = &rtc.mod_return.spec { (rtc_return.read_date)() } else { panic!("Unexpected error"); } } else { Date::default() } } } /// RTC driver specific globale pub static mut CURRENT_UNIX_TIME: AtomicU32 = AtomicU32::new(0); /// Set IDT ENTRY fn: Usable by modules fn enable_irq(idt_gate: Irq, func: unsafe extern "C" fn()) { unsafe { PIC_8259.lock().enable_irq(idt_gate, Some(func)); } } /// Unset IDT ENTRY fn: Usable by modules fn disable_irq(idt_gate: Irq) { unsafe { PIC_8259.lock().disable_irq(idt_gate); } } /// Common Write method for modules fn write(s: &str) { log::info!("{}", s); } /// Ermergency Write method for modules fn emergency_write(s: &str) { eprint!("{}", s); } /// Just used for a symbol list test #[no_mangle] #[link_section = ".kernel_exported_functions"] pub fn symbol_list_test() { log::info!("symbol_list_test function sucessfully called by a module!"); } #[no_mangle] #[link_section = ".kernel_exported_functions"] pub fn add_syslog_entry(entry: &str) -> Result<(), Errno> { let cwd = Path::try_from("/")?; let path = Path::try_from("/var/syslog")?; let mode = FileType::from_bits(0o600).expect("Cannot set FileType"); let flags = OpenFlags::O_WRONLY | OpenFlags::O_CREAT | OpenFlags::O_APPEND; let creds = &Credentials::ROOT; VFS.force_unlock(); /* just in case of. This mutex could become very problematic */ let file_operator = match VFS.lock().open(&cwd, creds, path, flags, mode)? { IpcResult::Done(file_operator) => file_operator, IpcResult::Wait(file_operator, _) => file_operator, }; let mut m = file_operator.lock(); m.write(unsafe { core::slice::from_raw_parts(entry as *const _ as *const u8, entry.len()) })?; Ok(()) } /// Common allocator methods for modules extern "C" { fn kmalloc(len: usize) -> *mut u8; fn kcalloc(count: usize, size: usize) -> *mut u8; fn kfree(ptr: *mut u8); fn krealloc(addr: *mut u8, new_size: usize) -> *mut u8; } struct AllocTable(Vec<AllocEntry>); struct AllocEntry { page_index: Page<Virt>, nbr_pages: NbrPages, } impl AllocEntry { fn new(page_index: Page<Virt>, nbr_pages: NbrPages) -> Self { Self { page_index, nbr_pages, } } } impl Drop for AllocEntry { fn drop(&mut self) { unsafe { HIGH_KERNEL_MEMORY .as_mut() .unwrap() .dealloc_on(self.page_index, self.nbr_pages) .expect("Unexpected memory error"); } } } /// Load a module from ELF fn load_module(content: &[u8]) -> SysResult<(u32, Option<Box<SymbolTable>>, AllocTable)> { let mut alloc_table: AllocTable = AllocTable(Vec::new()); // Parse Elf and generate stuff let elf = load_elf(content)?; for h in &elf.program_header_table { if h.segment_type == SegmentType::Load { let segment = unsafe { let page_index: Page<Virt> = Virt(h.vaddr as usize).into(); let nbr_pages: NbrPages = (h.memsz as usize).into(); alloc_table.0.try_reserve(1)?; HIGH_KERNEL_MEMORY.as_mut().unwrap().alloc_on( page_index, nbr_pages, AllocFlags::KERNEL_MEMORY, )?; alloc_table.0.push(AllocEntry::new(page_index, nbr_pages)); slice::from_raw_parts_mut(h.vaddr as usize as *mut u8, h.memsz as usize) }; segment[0..h.filez as usize] .copy_from_slice(&content[h.offset as usize..h.offset as usize + h.filez as usize]); unsafe {
.as_mut_ptr() .write_bytes(0, h.memsz as usize - h.filez as usize); // Modify the rights on pages by following the ELF specific restrictions HIGH_KERNEL_MEMORY .as_mut() .unwrap() .change_range_page_entry( Page::containing(Virt(h.vaddr as usize)), (h.memsz as usize).into(), &mut |entry: &mut Entry| { *entry |= Entry::from( Into::<AllocFlags>::into(h.flags) | AllocFlags::KERNEL_MEMORY, ) }, )?; } } } Ok(( elf.header.entry_point as u32, match SymbolTable::try_new(content).ok() { Some(elem) => Some(Box::try_new(elem)?), None => None, }, alloc_table, )) } /// Get Data of a module fn get_module_raw_content(mod_pathname: &str) -> SysResult<Vec<u8>> { let path = mod_pathname.try_into()?; get_file_content( &Path::try_from("/").expect("no root"), &Credentials::ROOT, path, ) }
// With BSS (so a NOBITS section), the memsz value exceed the filesz. Setting next bytes as 0 segment[h.filez as usize..h.memsz as usize]
random_line_split
kmodules.rs
//! This file contains all the stuff needed by Kernel Modules use super::message::push_message; use super::process::get_file_content; use super::scheduler::Scheduler; use super::thread_group::Credentials; use super::vfs::{Path, VFS}; use super::{IpcResult, SysResult}; use alloc::boxed::Box; use alloc::vec::Vec; use ansi_escape_code::Colored; use elf_loader::{SegmentType, SymbolTable}; use fallible_collections::boxed::FallibleBox; use irq::Irq; use kernel_modules::{ ForeignAllocMethods, KernelEvent, KernelSymbolList, KeyboardConfig, ModConfig, ModResult, ModReturn, ModSpecificReturn, RTCConfig, SymbolList, }; use libc_binding::{Errno, FileType, OpenFlags}; use log::Record; use time::Date; use core::convert::{TryFrom, TryInto}; use core::slice; use core::sync::atomic::AtomicU32; use crate::drivers::PIC_8259; use crate::elf_loader::load_elf; use crate::memory::mmu::Entry; use crate::memory::tools::{AllocFlags, NbrPages, Page, Virt}; use crate::memory::HIGH_KERNEL_MEMORY; /// Main structure pub struct KernelModules { dummy: Option<Module>, rtc: Option<Module>, keyboard: Option<Module>, syslog: Option<Module>, pub second_cycle: Vec<fn()>, } #[allow(dead_code)] /// Stored structure of a given module struct Module { start_point: u32, symbol_table: Box<SymbolTable>, mod_return: ModReturn, alloc_table: AllocTable, } /// Main implementation impl KernelModules { pub fn new() -> Self { Self { dummy: None, rtc: None, keyboard: None, syslog: None, second_cycle: Vec::new(), } } } impl Scheduler { /// Try to insert a Kernel Module pub fn insert_module(&mut self, modname: &str) -> SysResult<u32>
ModConfig::Keyboard(KeyboardConfig { enable_irq, disable_irq, callback: push_message, }), ), "syslog" => ( &mut self.kernel_modules.syslog, "/turbofish/mod/syslog.mod", ModConfig::Syslog, ), _ => { log::warn!("Unknown module name"); return Ok(0); } }; if let Some(_) = module_opt { log::warn!("Module already active"); return Ok(0); } // Generate content from disk let content = get_module_raw_content(module_pathname)?; // Try to parse ELF let (eip, symbol_table, alloc_table) = load_module(&content)?; let symbol_table = match symbol_table { Some(s) => s, None => { log::error!("No Symtab for that Module"); return Err(Errno::EINVAL); } }; // Launch the module with his particulary context let start_point: u32 = eip as u32; let p: fn(SymbolList) -> ModResult = unsafe { core::mem::transmute(start_point) }; let mod_return = p(SymbolList { write, emergency_write, alloc_tools: ForeignAllocMethods { kmalloc, kcalloc, kfree, krealloc, }, kernel_callback: mod_config, kernel_symbol_list: KernelSymbolList::new(), }) .map_err(|_e| Errno::EINVAL)?; if let Some(configurable_callbacks) = &mod_return.configurable_callbacks_opt { // Ensure we have suffisant memory before binding something let mut second_cycle_chunk_reserved = 0; for elem in configurable_callbacks.iter() { match elem.when { KernelEvent::Second => second_cycle_chunk_reserved += 1, _ => {} } } self.kernel_modules .second_cycle .try_reserve(second_cycle_chunk_reserved)?; // Bind callbacks for elem in configurable_callbacks.iter() { match elem.when { KernelEvent::Log => { // We assume that a function bindable to Log event has fn(&Record) prototype. // Yes, it is really really unsafe... But Louis is asking for that // LOGGER is on a direct binding. Not passing through Scheduler let p: fn(&Record) = unsafe { core::mem::transmute(elem.what) }; unsafe { // It is a shame that only one module can be binded to the log! terminal::log::LOGGER.bind(p); } } KernelEvent::Second => { // We assume that a function bindable to Log event has fn() prototype. let p: fn() = unsafe { core::mem::transmute(elem.what) }; self.kernel_modules.second_cycle.push(p); } } } } *module_opt = Some(Module { start_point, symbol_table, mod_return, alloc_table, }); Ok(0) } /// Try to remove a kernel module pub fn remove_module(&mut self, modname: &str) -> SysResult<u32> { let module_opt = match modname { "dummy" => &mut self.kernel_modules.dummy, "rtc" => &mut self.kernel_modules.rtc, "keyboard" => &mut self.kernel_modules.keyboard, "syslog" => &mut self.kernel_modules.syslog, _ => { log::warn!("Unknown module name"); return Ok(0); } }; match module_opt { None => { log::warn!("Module already inactive"); return Ok(0); } Some(module) => { // Disable callbacks if let Some(configurable_callbacks) = &module.mod_return.configurable_callbacks_opt { for elem in configurable_callbacks.iter() { match elem.when { KernelEvent::Log => unsafe { terminal::log::LOGGER.unbind(); }, KernelEvent::Second => { let p: fn() = unsafe { core::mem::transmute(elem.what) }; let _r = self .kernel_modules .second_cycle .drain_filter(|elem| *elem == p) .collect::<Vec<_>>(); } } } } // Halt the module (module.mod_return.stop)(); } } *module_opt = None; Ok(0) } /// List all loaded modules pub fn list_modules(&self) -> SysResult<u32> { if self.kernel_modules.dummy.is_some() { println!("- module loaded: {}", "DUMMY".yellow()); } if self.kernel_modules.keyboard.is_some() { println!("- module loaded: {}", "KEYBOARD".yellow()); } if self.kernel_modules.rtc.is_some() { println!("- module loaded: {}", "RTC".yellow()); } if self.kernel_modules.syslog.is_some() { println!("- module loaded: {}", "SYSLOG".yellow()); } Ok(0) } /// Keyboard driver method specific pub fn reboot_computer(&self) { if let Some(keyboard) = &self.kernel_modules.keyboard { if let ModSpecificReturn::Keyboard(keyboard_return) = &keyboard.mod_return.spec { (keyboard_return.reboot_computer)(); } else { panic!("Unexpected error"); } } else { log::error!("ps2_controler/Keyboard handler not loaded"); } } /// RTC driver method specific pub fn read_date(&self) -> Date { if let Some(rtc) = &self.kernel_modules.rtc { if let ModSpecificReturn::RTC(rtc_return) = &rtc.mod_return.spec { (rtc_return.read_date)() } else { panic!("Unexpected error"); } } else { Date::default() } } } /// RTC driver specific globale pub static mut CURRENT_UNIX_TIME: AtomicU32 = AtomicU32::new(0); /// Set IDT ENTRY fn: Usable by modules fn enable_irq(idt_gate: Irq, func: unsafe extern "C" fn()) { unsafe { PIC_8259.lock().enable_irq(idt_gate, Some(func)); } } /// Unset IDT ENTRY fn: Usable by modules fn disable_irq(idt_gate: Irq) { unsafe { PIC_8259.lock().disable_irq(idt_gate); } } /// Common Write method for modules fn write(s: &str) { log::info!("{}", s); } /// Ermergency Write method for modules fn emergency_write(s: &str) { eprint!("{}", s); } /// Just used for a symbol list test #[no_mangle] #[link_section = ".kernel_exported_functions"] pub fn symbol_list_test() { log::info!("symbol_list_test function sucessfully called by a module!"); } #[no_mangle] #[link_section = ".kernel_exported_functions"] pub fn add_syslog_entry(entry: &str) -> Result<(), Errno> { let cwd = Path::try_from("/")?; let path = Path::try_from("/var/syslog")?; let mode = FileType::from_bits(0o600).expect("Cannot set FileType"); let flags = OpenFlags::O_WRONLY | OpenFlags::O_CREAT | OpenFlags::O_APPEND; let creds = &Credentials::ROOT; VFS.force_unlock(); /* just in case of. This mutex could become very problematic */ let file_operator = match VFS.lock().open(&cwd, creds, path, flags, mode)? { IpcResult::Done(file_operator) => file_operator, IpcResult::Wait(file_operator, _) => file_operator, }; let mut m = file_operator.lock(); m.write(unsafe { core::slice::from_raw_parts(entry as *const _ as *const u8, entry.len()) })?; Ok(()) } /// Common allocator methods for modules extern "C" { fn kmalloc(len: usize) -> *mut u8; fn kcalloc(count: usize, size: usize) -> *mut u8; fn kfree(ptr: *mut u8); fn krealloc(addr: *mut u8, new_size: usize) -> *mut u8; } struct AllocTable(Vec<AllocEntry>); struct AllocEntry { page_index: Page<Virt>, nbr_pages: NbrPages, } impl AllocEntry { fn new(page_index: Page<Virt>, nbr_pages: NbrPages) -> Self { Self { page_index, nbr_pages, } } } impl Drop for AllocEntry { fn drop(&mut self) { unsafe { HIGH_KERNEL_MEMORY .as_mut() .unwrap() .dealloc_on(self.page_index, self.nbr_pages) .expect("Unexpected memory error"); } } } /// Load a module from ELF fn load_module(content: &[u8]) -> SysResult<(u32, Option<Box<SymbolTable>>, AllocTable)> { let mut alloc_table: AllocTable = AllocTable(Vec::new()); // Parse Elf and generate stuff let elf = load_elf(content)?; for h in &elf.program_header_table { if h.segment_type == SegmentType::Load { let segment = unsafe { let page_index: Page<Virt> = Virt(h.vaddr as usize).into(); let nbr_pages: NbrPages = (h.memsz as usize).into(); alloc_table.0.try_reserve(1)?; HIGH_KERNEL_MEMORY.as_mut().unwrap().alloc_on( page_index, nbr_pages, AllocFlags::KERNEL_MEMORY, )?; alloc_table.0.push(AllocEntry::new(page_index, nbr_pages)); slice::from_raw_parts_mut(h.vaddr as usize as *mut u8, h.memsz as usize) }; segment[0..h.filez as usize] .copy_from_slice(&content[h.offset as usize..h.offset as usize + h.filez as usize]); unsafe { // With BSS (so a NOBITS section), the memsz value exceed the filesz. Setting next bytes as 0 segment[h.filez as usize..h.memsz as usize] .as_mut_ptr() .write_bytes(0, h.memsz as usize - h.filez as usize); // Modify the rights on pages by following the ELF specific restrictions HIGH_KERNEL_MEMORY .as_mut() .unwrap() .change_range_page_entry( Page::containing(Virt(h.vaddr as usize)), (h.memsz as usize).into(), &mut |entry: &mut Entry| { *entry |= Entry::from( Into::<AllocFlags>::into(h.flags) | AllocFlags::KERNEL_MEMORY, ) }, )?; } } } Ok(( elf.header.entry_point as u32, match SymbolTable::try_new(content).ok() { Some(elem) => Some(Box::try_new(elem)?), None => None, }, alloc_table, )) } /// Get Data of a module fn get_module_raw_content(mod_pathname: &str) -> SysResult<Vec<u8>> { let path = mod_pathname.try_into()?; get_file_content( &Path::try_from("/").expect("no root"), &Credentials::ROOT, path, ) }
{ let (module_opt, module_pathname, mod_config) = match modname { "dummy" => ( &mut self.kernel_modules.dummy, "/turbofish/mod/dummy.mod", ModConfig::Dummy, ), "rtc" => ( &mut self.kernel_modules.rtc, "/turbofish/mod/rtc.mod", ModConfig::RTC(RTCConfig { enable_irq, disable_irq, // May be set as volatile... current_unix_time: unsafe { &mut CURRENT_UNIX_TIME }, }), ), "keyboard" => ( &mut self.kernel_modules.keyboard, "/turbofish/mod/key.mod",
identifier_body
mod.rs
#![warn(missing_docs)] //! Contains all structures and methods to create and manage scenes. //! //! Scene is container for graph nodes, animations and physics. pub mod base; pub mod camera; pub mod graph; pub mod light; pub mod mesh; pub mod node; pub mod particle_system; pub mod sprite; pub mod transform; use crate::{ animation::AnimationContainer, core::{ math::vec2::Vec2, pool::{Handle, Pool, PoolIterator, PoolIteratorMut}, visitor::{Visit, VisitError, VisitResult, Visitor}, }, engine::resource_manager::ResourceManager, physics::{rigid_body::RigidBody, Physics}, resource::texture::Texture, scene::{graph::Graph, node::Node}, utils::{lightmap::Lightmap, log::Log}, }; use std::{ collections::HashMap, ops::{Index, IndexMut}, path::Path, sync::{Arc, Mutex}, }; /// Physics binder is used to link graph nodes with rigid bodies. Scene will /// sync transform of node with its associated rigid body. #[derive(Clone, Debug)] pub struct PhysicsBinder { node_rigid_body_map: HashMap<Handle<Node>, Handle<RigidBody>>, } impl Default for PhysicsBinder { fn default() -> Self { Self { node_rigid_body_map: Default::default(), } } } impl PhysicsBinder { /// Links given graph node with specified rigid body. pub fn bind( &mut self, node: Handle<Node>, rigid_body: Handle<RigidBody>, ) -> Option<Handle<RigidBody>> { self.node_rigid_body_map.insert(node, rigid_body) } /// Unlinks given graph node from its associated rigid body (if any). pub fn unbind(&mut self, node: Handle<Node>) -> Option<Handle<RigidBody>> { self.node_rigid_body_map.remove(&node) } /// Unlinks given body from a node that is linked with the body. /// /// # Performance /// /// This method is slow because of two reasons: /// /// 1) Search is linear /// 2) Additional memory is allocated /// /// So it is not advised to call it in performance critical places. pub fn unbind_by_body(&mut self, body: Handle<RigidBody>) -> Handle<Node> { let mut node = Handle::NONE; self.node_rigid_body_map = self .node_rigid_body_map .clone() .into_iter() .filter(|&(n, b)| { if b == body { node = n; false } else { true } }) .collect(); node } /// Returns handle of rigid body associated with given node. It will return /// Handle::NONE if given node isn't linked to a rigid body. pub fn body_of(&self, node: Handle<Node>) -> Handle<RigidBody> { self.node_rigid_body_map .get(&node) .copied() .unwrap_or_default() } } impl Visit for PhysicsBinder { fn visit(&mut self, name: &str, visitor: &mut Visitor) -> VisitResult { visitor.enter_region(name)?; self.node_rigid_body_map.visit("Map", visitor)?; visitor.leave_region() } } /// See module docs. #[derive(Debug)] pub struct Scene { /// Graph is main container for all scene nodes. It calculates global transforms for nodes, /// updates them and performs all other important work. See `graph` module docs for more /// info. pub graph: Graph, /// Animations container controls all animation on scene. Each animation can have tracks which /// has handles to graph nodes. See `animation` module docs for more info. pub animations: AnimationContainer, /// Physics world. Allows you create various physics objects such as static geometries and /// rigid bodies. Rigid bodies then should be linked with graph nodes using binder. pub physics: Physics, /// Physics binder is a bridge between physics world and scene graph. If a rigid body is linked /// to a graph node, then rigid body will control local transform of node. pub physics_binder: PhysicsBinder, /// Texture to draw scene to. If empty, scene will be drawn on screen directly. /// It is useful to "embed" some scene into other by drawing a quad with this /// texture. This can be used to make in-game video conference - you can make /// separate scene with your characters and draw scene into texture, then in /// main scene you can attach this texture to some quad which will be used as /// monitor. Other usage could be previewer of models, like pictogram of character /// in real-time strategies, in other words there are plenty of possible uses. pub render_target: Option<Arc<Mutex<Texture>>>, lightmap: Option<Lightmap>, } impl Default for Scene { fn default() -> Self { Self { graph: Default::default(), animations: Default::default(), physics: Default::default(), physics_binder: Default::default(), render_target: None, lightmap: None, } } } impl Scene { /// Creates new scene with single root node. /// /// # Notes /// /// This method differs from Default trait implementation! Scene::default() creates /// empty graph with no nodes. #[inline] pub fn new() -> Self { Self { // Graph must be created with `new` method because it differs from `default` graph: Graph::new(), physics: Default::default(), animations: Default::default(), physics_binder: Default::default(), render_target: None, lightmap: None, } } /// Tries to load scene from given file. File can contain any scene in native engine format. /// Such scenes can be made in rusty editor. pub fn from_file<P: AsRef<Path>>( path: P, resource_manager: &mut ResourceManager, ) -> Result<Self, VisitError> { let mut scene = Scene::default(); let mut visitor = Visitor::load_binary(path.as_ref())?; scene.visit("Scene", &mut visitor)?; // Restore pointers to resources. Scene saves only paths to resources, here we must // find real resources instead. for node in scene.graph.linear_iter_mut() { if let Some(shallow_resource) = node.resource.clone() { node.resource = resource_manager.request_model(&shallow_resource.lock().unwrap().path); } } // And do resolve to extract correct graphical data and so on. scene.resolve(); Ok(scene) } fn update_physics(&mut self, dt: f32) { self.physics.step(dt); // Keep pair when node and body are both alive. let graph = &self.graph; let physics = &self.physics; self.physics_binder .node_rigid_body_map .retain(|node, body| { graph.is_valid_handle(*node) && physics.is_valid_body_handle(*body) }); // Sync node positions with assigned physics bodies for (node, body) in self.physics_binder.node_rigid_body_map.iter() { let body = physics.borrow_body(*body); self.graph[*node] .local_transform_mut() .set_position(body.get_position()); } } /// Removes node from scene with all associated entities, like animations etc. /// /// # Panics /// /// Panics if handle is invalid. pub fn remove_node(&mut self, handle: Handle<Node>) { for descendant in self.graph.traverse_handle_iter(handle) { // Remove all associated animations. self.animations.retain(|animation| { for track in animation.get_tracks() { if track.get_node() == descendant { return false; } } true }); } self.graph.remove_node(handle) } pub(in crate) fn resolve(&mut self) { Log::writeln("Starting resolve...".to_owned()); self.graph.resolve(); self.animations.resolve(&self.graph); Log::writeln("Resolve succeeded!".to_owned()); } /// Tries to set new lightmap to scene. pub fn set_lightmap(&mut self, lightmap: Lightmap) -> Result<Option<Lightmap>, &'static str> { // Assign textures to surfaces. for (handle, lightmaps) in lightmap.map.iter() { if let Node::Mesh(mesh) = &mut self.graph[*handle] { if mesh.surfaces().len()!= lightmaps.len() { return Err("failed to set lightmap, surface count mismatch"); } for (surface, entry) in mesh.surfaces_mut().iter_mut().zip(lightmaps) { // This unwrap() call must never panic in normal conditions, because texture wrapped in Option // only to implement Default trait to be serializable. let texture = entry.texture.clone().unwrap(); surface.set_lightmap_texture(texture) } } } Ok(std::mem::replace(&mut self.lightmap, Some(lightmap))) } /// Performs single update tick with given delta time from last frame. Internally /// it updates physics, animations, and each graph node. In most cases there is /// no need to call it directly, engine automatically updates all available scenes. pub fn update(&mut self, frame_size: Vec2, dt: f32) { self.update_physics(dt); self.animations.update_animations(dt); self.graph.update_nodes(frame_size, dt); } /// Creates deep copy of a scene, filter predicate allows you to filter out nodes /// by your criteria. pub fn clone<F>(&self, filter: &mut F) -> Self where F: FnMut(Handle<Node>, &Node) -> bool, { let (graph, old_new_map) = self.graph.clone(filter); let mut animations = self.animations.clone(); for animation in animations.iter_mut() { // Remove all tracks for nodes that were filtered out. animation.retain_tracks(|track| old_new_map.contains_key(&track.get_node())); // Remap track nodes. for track in animation.get_tracks_mut() { track.set_node(old_new_map[&track.get_node()]); } } let physics = self.physics.clone(); let mut physics_binder = PhysicsBinder::default(); for (node, &body) in self.physics_binder.node_rigid_body_map.iter() { // Make sure we bind existing node with new physical body. if let Some(&new_node) = old_new_map.get(node)
} Self { graph, animations, physics, physics_binder, render_target: Default::default(), lightmap: self.lightmap.clone(), } } } impl Visit for Scene { fn visit(&mut self, name: &str, visitor: &mut Visitor) -> VisitResult { visitor.enter_region(name)?; self.physics_binder.visit("PhysicsBinder", visitor)?; self.graph.visit("Graph", visitor)?; self.animations.visit("Animations", visitor)?; self.physics.visit("Physics", visitor)?; let _ = self.lightmap.visit("Lightmap", visitor); visitor.leave_region() } } /// Container for scenes in the engine. It just a simple wrapper around Pool. pub struct SceneContainer { pool: Pool<Scene>, } impl SceneContainer { pub(in crate) fn new() -> Self { Self { pool: Pool::new() } } /// Creates new iterator over scenes in container. #[inline] pub fn iter(&self) -> PoolIterator<Scene> { self.pool.iter() } /// Creates new mutable iterator over scenes in container. #[inline] pub fn iter_mut(&mut self) -> PoolIteratorMut<Scene> { self.pool.iter_mut() } /// Adds new scene into container. #[inline] pub fn add(&mut self, scene: Scene) -> Handle<Scene> { self.pool.spawn(scene) } /// Removes all scenes from container. #[inline] pub fn clear(&mut self) { self.pool.clear() } /// Removes given scene from container. #[inline] pub fn remove(&mut self, handle: Handle<Scene>) { self.pool.free(handle); } } impl Index<Handle<Scene>> for SceneContainer { type Output = Scene; #[inline] fn index(&self, index: Handle<Scene>) -> &Self::Output { &self.pool[index] } } impl IndexMut<Handle<Scene>> for SceneContainer { #[inline] fn index_mut(&mut self, index: Handle<Scene>) -> &mut Self::Output { &mut self.pool[index] } } impl Default for SceneContainer { fn default() -> Self { Self { pool: Pool::new() } } } impl Visit for SceneContainer { fn visit(&mut self, name: &str, visitor: &mut Visitor) -> VisitResult { visitor.enter_region(name)?; self.pool.visit("Pool", visitor)?; visitor.leave_region() } }
{ // Re-use of body handle is fine here because physics copy bodies // directly and handles from previous pool is still suitable for copy. physics_binder.bind(new_node, body); }
conditional_block
mod.rs
#![warn(missing_docs)] //! Contains all structures and methods to create and manage scenes. //! //! Scene is container for graph nodes, animations and physics. pub mod base; pub mod camera; pub mod graph; pub mod light; pub mod mesh; pub mod node; pub mod particle_system; pub mod sprite; pub mod transform; use crate::{ animation::AnimationContainer, core::{ math::vec2::Vec2, pool::{Handle, Pool, PoolIterator, PoolIteratorMut}, visitor::{Visit, VisitError, VisitResult, Visitor}, }, engine::resource_manager::ResourceManager, physics::{rigid_body::RigidBody, Physics}, resource::texture::Texture, scene::{graph::Graph, node::Node}, utils::{lightmap::Lightmap, log::Log}, }; use std::{ collections::HashMap, ops::{Index, IndexMut}, path::Path, sync::{Arc, Mutex}, }; /// Physics binder is used to link graph nodes with rigid bodies. Scene will /// sync transform of node with its associated rigid body. #[derive(Clone, Debug)] pub struct PhysicsBinder { node_rigid_body_map: HashMap<Handle<Node>, Handle<RigidBody>>, } impl Default for PhysicsBinder { fn default() -> Self { Self { node_rigid_body_map: Default::default(), } } } impl PhysicsBinder { /// Links given graph node with specified rigid body. pub fn bind( &mut self, node: Handle<Node>, rigid_body: Handle<RigidBody>, ) -> Option<Handle<RigidBody>> { self.node_rigid_body_map.insert(node, rigid_body) } /// Unlinks given graph node from its associated rigid body (if any). pub fn unbind(&mut self, node: Handle<Node>) -> Option<Handle<RigidBody>> { self.node_rigid_body_map.remove(&node) } /// Unlinks given body from a node that is linked with the body. /// /// # Performance /// /// This method is slow because of two reasons: /// /// 1) Search is linear /// 2) Additional memory is allocated /// /// So it is not advised to call it in performance critical places. pub fn unbind_by_body(&mut self, body: Handle<RigidBody>) -> Handle<Node> { let mut node = Handle::NONE; self.node_rigid_body_map = self .node_rigid_body_map .clone() .into_iter() .filter(|&(n, b)| { if b == body { node = n; false } else { true } }) .collect(); node } /// Returns handle of rigid body associated with given node. It will return /// Handle::NONE if given node isn't linked to a rigid body. pub fn body_of(&self, node: Handle<Node>) -> Handle<RigidBody> { self.node_rigid_body_map .get(&node) .copied() .unwrap_or_default() } } impl Visit for PhysicsBinder { fn visit(&mut self, name: &str, visitor: &mut Visitor) -> VisitResult { visitor.enter_region(name)?; self.node_rigid_body_map.visit("Map", visitor)?; visitor.leave_region() } } /// See module docs. #[derive(Debug)] pub struct Scene { /// Graph is main container for all scene nodes. It calculates global transforms for nodes, /// updates them and performs all other important work. See `graph` module docs for more /// info. pub graph: Graph, /// Animations container controls all animation on scene. Each animation can have tracks which /// has handles to graph nodes. See `animation` module docs for more info. pub animations: AnimationContainer, /// Physics world. Allows you create various physics objects such as static geometries and /// rigid bodies. Rigid bodies then should be linked with graph nodes using binder. pub physics: Physics, /// Physics binder is a bridge between physics world and scene graph. If a rigid body is linked /// to a graph node, then rigid body will control local transform of node. pub physics_binder: PhysicsBinder, /// Texture to draw scene to. If empty, scene will be drawn on screen directly. /// It is useful to "embed" some scene into other by drawing a quad with this /// texture. This can be used to make in-game video conference - you can make /// separate scene with your characters and draw scene into texture, then in /// main scene you can attach this texture to some quad which will be used as /// monitor. Other usage could be previewer of models, like pictogram of character /// in real-time strategies, in other words there are plenty of possible uses. pub render_target: Option<Arc<Mutex<Texture>>>, lightmap: Option<Lightmap>, } impl Default for Scene { fn default() -> Self { Self { graph: Default::default(), animations: Default::default(), physics: Default::default(), physics_binder: Default::default(), render_target: None, lightmap: None, } } } impl Scene { /// Creates new scene with single root node. /// /// # Notes /// /// This method differs from Default trait implementation! Scene::default() creates /// empty graph with no nodes. #[inline] pub fn new() -> Self { Self { // Graph must be created with `new` method because it differs from `default` graph: Graph::new(), physics: Default::default(), animations: Default::default(), physics_binder: Default::default(), render_target: None, lightmap: None, } } /// Tries to load scene from given file. File can contain any scene in native engine format. /// Such scenes can be made in rusty editor. pub fn from_file<P: AsRef<Path>>( path: P, resource_manager: &mut ResourceManager, ) -> Result<Self, VisitError> { let mut scene = Scene::default(); let mut visitor = Visitor::load_binary(path.as_ref())?; scene.visit("Scene", &mut visitor)?; // Restore pointers to resources. Scene saves only paths to resources, here we must // find real resources instead. for node in scene.graph.linear_iter_mut() { if let Some(shallow_resource) = node.resource.clone() { node.resource = resource_manager.request_model(&shallow_resource.lock().unwrap().path); } } // And do resolve to extract correct graphical data and so on. scene.resolve(); Ok(scene) } fn update_physics(&mut self, dt: f32) { self.physics.step(dt); // Keep pair when node and body are both alive. let graph = &self.graph; let physics = &self.physics; self.physics_binder .node_rigid_body_map .retain(|node, body| { graph.is_valid_handle(*node) && physics.is_valid_body_handle(*body) }); // Sync node positions with assigned physics bodies for (node, body) in self.physics_binder.node_rigid_body_map.iter() { let body = physics.borrow_body(*body); self.graph[*node] .local_transform_mut() .set_position(body.get_position()); } } /// Removes node from scene with all associated entities, like animations etc. /// /// # Panics /// /// Panics if handle is invalid. pub fn remove_node(&mut self, handle: Handle<Node>)
pub(in crate) fn resolve(&mut self) { Log::writeln("Starting resolve...".to_owned()); self.graph.resolve(); self.animations.resolve(&self.graph); Log::writeln("Resolve succeeded!".to_owned()); } /// Tries to set new lightmap to scene. pub fn set_lightmap(&mut self, lightmap: Lightmap) -> Result<Option<Lightmap>, &'static str> { // Assign textures to surfaces. for (handle, lightmaps) in lightmap.map.iter() { if let Node::Mesh(mesh) = &mut self.graph[*handle] { if mesh.surfaces().len()!= lightmaps.len() { return Err("failed to set lightmap, surface count mismatch"); } for (surface, entry) in mesh.surfaces_mut().iter_mut().zip(lightmaps) { // This unwrap() call must never panic in normal conditions, because texture wrapped in Option // only to implement Default trait to be serializable. let texture = entry.texture.clone().unwrap(); surface.set_lightmap_texture(texture) } } } Ok(std::mem::replace(&mut self.lightmap, Some(lightmap))) } /// Performs single update tick with given delta time from last frame. Internally /// it updates physics, animations, and each graph node. In most cases there is /// no need to call it directly, engine automatically updates all available scenes. pub fn update(&mut self, frame_size: Vec2, dt: f32) { self.update_physics(dt); self.animations.update_animations(dt); self.graph.update_nodes(frame_size, dt); } /// Creates deep copy of a scene, filter predicate allows you to filter out nodes /// by your criteria. pub fn clone<F>(&self, filter: &mut F) -> Self where F: FnMut(Handle<Node>, &Node) -> bool, { let (graph, old_new_map) = self.graph.clone(filter); let mut animations = self.animations.clone(); for animation in animations.iter_mut() { // Remove all tracks for nodes that were filtered out. animation.retain_tracks(|track| old_new_map.contains_key(&track.get_node())); // Remap track nodes. for track in animation.get_tracks_mut() { track.set_node(old_new_map[&track.get_node()]); } } let physics = self.physics.clone(); let mut physics_binder = PhysicsBinder::default(); for (node, &body) in self.physics_binder.node_rigid_body_map.iter() { // Make sure we bind existing node with new physical body. if let Some(&new_node) = old_new_map.get(node) { // Re-use of body handle is fine here because physics copy bodies // directly and handles from previous pool is still suitable for copy. physics_binder.bind(new_node, body); } } Self { graph, animations, physics, physics_binder, render_target: Default::default(), lightmap: self.lightmap.clone(), } } } impl Visit for Scene { fn visit(&mut self, name: &str, visitor: &mut Visitor) -> VisitResult { visitor.enter_region(name)?; self.physics_binder.visit("PhysicsBinder", visitor)?; self.graph.visit("Graph", visitor)?; self.animations.visit("Animations", visitor)?; self.physics.visit("Physics", visitor)?; let _ = self.lightmap.visit("Lightmap", visitor); visitor.leave_region() } } /// Container for scenes in the engine. It just a simple wrapper around Pool. pub struct SceneContainer { pool: Pool<Scene>, } impl SceneContainer { pub(in crate) fn new() -> Self { Self { pool: Pool::new() } } /// Creates new iterator over scenes in container. #[inline] pub fn iter(&self) -> PoolIterator<Scene> { self.pool.iter() } /// Creates new mutable iterator over scenes in container. #[inline] pub fn iter_mut(&mut self) -> PoolIteratorMut<Scene> { self.pool.iter_mut() } /// Adds new scene into container. #[inline] pub fn add(&mut self, scene: Scene) -> Handle<Scene> { self.pool.spawn(scene) } /// Removes all scenes from container. #[inline] pub fn clear(&mut self) { self.pool.clear() } /// Removes given scene from container. #[inline] pub fn remove(&mut self, handle: Handle<Scene>) { self.pool.free(handle); } } impl Index<Handle<Scene>> for SceneContainer { type Output = Scene; #[inline] fn index(&self, index: Handle<Scene>) -> &Self::Output { &self.pool[index] } } impl IndexMut<Handle<Scene>> for SceneContainer { #[inline] fn index_mut(&mut self, index: Handle<Scene>) -> &mut Self::Output { &mut self.pool[index] } } impl Default for SceneContainer { fn default() -> Self { Self { pool: Pool::new() } } } impl Visit for SceneContainer { fn visit(&mut self, name: &str, visitor: &mut Visitor) -> VisitResult { visitor.enter_region(name)?; self.pool.visit("Pool", visitor)?; visitor.leave_region() } }
{ for descendant in self.graph.traverse_handle_iter(handle) { // Remove all associated animations. self.animations.retain(|animation| { for track in animation.get_tracks() { if track.get_node() == descendant { return false; } } true }); } self.graph.remove_node(handle) }
identifier_body
mod.rs
#![warn(missing_docs)] //! Contains all structures and methods to create and manage scenes. //! //! Scene is container for graph nodes, animations and physics. pub mod base; pub mod camera; pub mod graph; pub mod light; pub mod mesh; pub mod node; pub mod particle_system; pub mod sprite; pub mod transform; use crate::{ animation::AnimationContainer, core::{ math::vec2::Vec2, pool::{Handle, Pool, PoolIterator, PoolIteratorMut}, visitor::{Visit, VisitError, VisitResult, Visitor}, }, engine::resource_manager::ResourceManager, physics::{rigid_body::RigidBody, Physics}, resource::texture::Texture, scene::{graph::Graph, node::Node}, utils::{lightmap::Lightmap, log::Log}, }; use std::{ collections::HashMap, ops::{Index, IndexMut}, path::Path, sync::{Arc, Mutex}, }; /// Physics binder is used to link graph nodes with rigid bodies. Scene will /// sync transform of node with its associated rigid body. #[derive(Clone, Debug)] pub struct PhysicsBinder { node_rigid_body_map: HashMap<Handle<Node>, Handle<RigidBody>>, } impl Default for PhysicsBinder { fn default() -> Self { Self { node_rigid_body_map: Default::default(), } } } impl PhysicsBinder { /// Links given graph node with specified rigid body. pub fn bind( &mut self, node: Handle<Node>, rigid_body: Handle<RigidBody>, ) -> Option<Handle<RigidBody>> { self.node_rigid_body_map.insert(node, rigid_body) } /// Unlinks given graph node from its associated rigid body (if any). pub fn unbind(&mut self, node: Handle<Node>) -> Option<Handle<RigidBody>> { self.node_rigid_body_map.remove(&node) } /// Unlinks given body from a node that is linked with the body. /// /// # Performance /// /// This method is slow because of two reasons: /// /// 1) Search is linear /// 2) Additional memory is allocated /// /// So it is not advised to call it in performance critical places. pub fn unbind_by_body(&mut self, body: Handle<RigidBody>) -> Handle<Node> { let mut node = Handle::NONE; self.node_rigid_body_map = self .node_rigid_body_map .clone() .into_iter() .filter(|&(n, b)| { if b == body { node = n; false } else { true } }) .collect(); node } /// Returns handle of rigid body associated with given node. It will return /// Handle::NONE if given node isn't linked to a rigid body. pub fn body_of(&self, node: Handle<Node>) -> Handle<RigidBody> { self.node_rigid_body_map .get(&node) .copied() .unwrap_or_default() } } impl Visit for PhysicsBinder { fn visit(&mut self, name: &str, visitor: &mut Visitor) -> VisitResult { visitor.enter_region(name)?; self.node_rigid_body_map.visit("Map", visitor)?; visitor.leave_region() } } /// See module docs. #[derive(Debug)] pub struct Scene { /// Graph is main container for all scene nodes. It calculates global transforms for nodes, /// updates them and performs all other important work. See `graph` module docs for more /// info. pub graph: Graph, /// Animations container controls all animation on scene. Each animation can have tracks which /// has handles to graph nodes. See `animation` module docs for more info. pub animations: AnimationContainer, /// Physics world. Allows you create various physics objects such as static geometries and /// rigid bodies. Rigid bodies then should be linked with graph nodes using binder. pub physics: Physics, /// Physics binder is a bridge between physics world and scene graph. If a rigid body is linked /// to a graph node, then rigid body will control local transform of node. pub physics_binder: PhysicsBinder, /// Texture to draw scene to. If empty, scene will be drawn on screen directly. /// It is useful to "embed" some scene into other by drawing a quad with this /// texture. This can be used to make in-game video conference - you can make /// separate scene with your characters and draw scene into texture, then in /// main scene you can attach this texture to some quad which will be used as /// monitor. Other usage could be previewer of models, like pictogram of character /// in real-time strategies, in other words there are plenty of possible uses. pub render_target: Option<Arc<Mutex<Texture>>>, lightmap: Option<Lightmap>, } impl Default for Scene { fn default() -> Self { Self { graph: Default::default(), animations: Default::default(), physics: Default::default(), physics_binder: Default::default(), render_target: None, lightmap: None, } }
impl Scene { /// Creates new scene with single root node. /// /// # Notes /// /// This method differs from Default trait implementation! Scene::default() creates /// empty graph with no nodes. #[inline] pub fn new() -> Self { Self { // Graph must be created with `new` method because it differs from `default` graph: Graph::new(), physics: Default::default(), animations: Default::default(), physics_binder: Default::default(), render_target: None, lightmap: None, } } /// Tries to load scene from given file. File can contain any scene in native engine format. /// Such scenes can be made in rusty editor. pub fn from_file<P: AsRef<Path>>( path: P, resource_manager: &mut ResourceManager, ) -> Result<Self, VisitError> { let mut scene = Scene::default(); let mut visitor = Visitor::load_binary(path.as_ref())?; scene.visit("Scene", &mut visitor)?; // Restore pointers to resources. Scene saves only paths to resources, here we must // find real resources instead. for node in scene.graph.linear_iter_mut() { if let Some(shallow_resource) = node.resource.clone() { node.resource = resource_manager.request_model(&shallow_resource.lock().unwrap().path); } } // And do resolve to extract correct graphical data and so on. scene.resolve(); Ok(scene) } fn update_physics(&mut self, dt: f32) { self.physics.step(dt); // Keep pair when node and body are both alive. let graph = &self.graph; let physics = &self.physics; self.physics_binder .node_rigid_body_map .retain(|node, body| { graph.is_valid_handle(*node) && physics.is_valid_body_handle(*body) }); // Sync node positions with assigned physics bodies for (node, body) in self.physics_binder.node_rigid_body_map.iter() { let body = physics.borrow_body(*body); self.graph[*node] .local_transform_mut() .set_position(body.get_position()); } } /// Removes node from scene with all associated entities, like animations etc. /// /// # Panics /// /// Panics if handle is invalid. pub fn remove_node(&mut self, handle: Handle<Node>) { for descendant in self.graph.traverse_handle_iter(handle) { // Remove all associated animations. self.animations.retain(|animation| { for track in animation.get_tracks() { if track.get_node() == descendant { return false; } } true }); } self.graph.remove_node(handle) } pub(in crate) fn resolve(&mut self) { Log::writeln("Starting resolve...".to_owned()); self.graph.resolve(); self.animations.resolve(&self.graph); Log::writeln("Resolve succeeded!".to_owned()); } /// Tries to set new lightmap to scene. pub fn set_lightmap(&mut self, lightmap: Lightmap) -> Result<Option<Lightmap>, &'static str> { // Assign textures to surfaces. for (handle, lightmaps) in lightmap.map.iter() { if let Node::Mesh(mesh) = &mut self.graph[*handle] { if mesh.surfaces().len()!= lightmaps.len() { return Err("failed to set lightmap, surface count mismatch"); } for (surface, entry) in mesh.surfaces_mut().iter_mut().zip(lightmaps) { // This unwrap() call must never panic in normal conditions, because texture wrapped in Option // only to implement Default trait to be serializable. let texture = entry.texture.clone().unwrap(); surface.set_lightmap_texture(texture) } } } Ok(std::mem::replace(&mut self.lightmap, Some(lightmap))) } /// Performs single update tick with given delta time from last frame. Internally /// it updates physics, animations, and each graph node. In most cases there is /// no need to call it directly, engine automatically updates all available scenes. pub fn update(&mut self, frame_size: Vec2, dt: f32) { self.update_physics(dt); self.animations.update_animations(dt); self.graph.update_nodes(frame_size, dt); } /// Creates deep copy of a scene, filter predicate allows you to filter out nodes /// by your criteria. pub fn clone<F>(&self, filter: &mut F) -> Self where F: FnMut(Handle<Node>, &Node) -> bool, { let (graph, old_new_map) = self.graph.clone(filter); let mut animations = self.animations.clone(); for animation in animations.iter_mut() { // Remove all tracks for nodes that were filtered out. animation.retain_tracks(|track| old_new_map.contains_key(&track.get_node())); // Remap track nodes. for track in animation.get_tracks_mut() { track.set_node(old_new_map[&track.get_node()]); } } let physics = self.physics.clone(); let mut physics_binder = PhysicsBinder::default(); for (node, &body) in self.physics_binder.node_rigid_body_map.iter() { // Make sure we bind existing node with new physical body. if let Some(&new_node) = old_new_map.get(node) { // Re-use of body handle is fine here because physics copy bodies // directly and handles from previous pool is still suitable for copy. physics_binder.bind(new_node, body); } } Self { graph, animations, physics, physics_binder, render_target: Default::default(), lightmap: self.lightmap.clone(), } } } impl Visit for Scene { fn visit(&mut self, name: &str, visitor: &mut Visitor) -> VisitResult { visitor.enter_region(name)?; self.physics_binder.visit("PhysicsBinder", visitor)?; self.graph.visit("Graph", visitor)?; self.animations.visit("Animations", visitor)?; self.physics.visit("Physics", visitor)?; let _ = self.lightmap.visit("Lightmap", visitor); visitor.leave_region() } } /// Container for scenes in the engine. It just a simple wrapper around Pool. pub struct SceneContainer { pool: Pool<Scene>, } impl SceneContainer { pub(in crate) fn new() -> Self { Self { pool: Pool::new() } } /// Creates new iterator over scenes in container. #[inline] pub fn iter(&self) -> PoolIterator<Scene> { self.pool.iter() } /// Creates new mutable iterator over scenes in container. #[inline] pub fn iter_mut(&mut self) -> PoolIteratorMut<Scene> { self.pool.iter_mut() } /// Adds new scene into container. #[inline] pub fn add(&mut self, scene: Scene) -> Handle<Scene> { self.pool.spawn(scene) } /// Removes all scenes from container. #[inline] pub fn clear(&mut self) { self.pool.clear() } /// Removes given scene from container. #[inline] pub fn remove(&mut self, handle: Handle<Scene>) { self.pool.free(handle); } } impl Index<Handle<Scene>> for SceneContainer { type Output = Scene; #[inline] fn index(&self, index: Handle<Scene>) -> &Self::Output { &self.pool[index] } } impl IndexMut<Handle<Scene>> for SceneContainer { #[inline] fn index_mut(&mut self, index: Handle<Scene>) -> &mut Self::Output { &mut self.pool[index] } } impl Default for SceneContainer { fn default() -> Self { Self { pool: Pool::new() } } } impl Visit for SceneContainer { fn visit(&mut self, name: &str, visitor: &mut Visitor) -> VisitResult { visitor.enter_region(name)?; self.pool.visit("Pool", visitor)?; visitor.leave_region() } }
}
random_line_split
mod.rs
#![warn(missing_docs)] //! Contains all structures and methods to create and manage scenes. //! //! Scene is container for graph nodes, animations and physics. pub mod base; pub mod camera; pub mod graph; pub mod light; pub mod mesh; pub mod node; pub mod particle_system; pub mod sprite; pub mod transform; use crate::{ animation::AnimationContainer, core::{ math::vec2::Vec2, pool::{Handle, Pool, PoolIterator, PoolIteratorMut}, visitor::{Visit, VisitError, VisitResult, Visitor}, }, engine::resource_manager::ResourceManager, physics::{rigid_body::RigidBody, Physics}, resource::texture::Texture, scene::{graph::Graph, node::Node}, utils::{lightmap::Lightmap, log::Log}, }; use std::{ collections::HashMap, ops::{Index, IndexMut}, path::Path, sync::{Arc, Mutex}, }; /// Physics binder is used to link graph nodes with rigid bodies. Scene will /// sync transform of node with its associated rigid body. #[derive(Clone, Debug)] pub struct PhysicsBinder { node_rigid_body_map: HashMap<Handle<Node>, Handle<RigidBody>>, } impl Default for PhysicsBinder { fn default() -> Self { Self { node_rigid_body_map: Default::default(), } } } impl PhysicsBinder { /// Links given graph node with specified rigid body. pub fn bind( &mut self, node: Handle<Node>, rigid_body: Handle<RigidBody>, ) -> Option<Handle<RigidBody>> { self.node_rigid_body_map.insert(node, rigid_body) } /// Unlinks given graph node from its associated rigid body (if any). pub fn unbind(&mut self, node: Handle<Node>) -> Option<Handle<RigidBody>> { self.node_rigid_body_map.remove(&node) } /// Unlinks given body from a node that is linked with the body. /// /// # Performance /// /// This method is slow because of two reasons: /// /// 1) Search is linear /// 2) Additional memory is allocated /// /// So it is not advised to call it in performance critical places. pub fn unbind_by_body(&mut self, body: Handle<RigidBody>) -> Handle<Node> { let mut node = Handle::NONE; self.node_rigid_body_map = self .node_rigid_body_map .clone() .into_iter() .filter(|&(n, b)| { if b == body { node = n; false } else { true } }) .collect(); node } /// Returns handle of rigid body associated with given node. It will return /// Handle::NONE if given node isn't linked to a rigid body. pub fn body_of(&self, node: Handle<Node>) -> Handle<RigidBody> { self.node_rigid_body_map .get(&node) .copied() .unwrap_or_default() } } impl Visit for PhysicsBinder { fn visit(&mut self, name: &str, visitor: &mut Visitor) -> VisitResult { visitor.enter_region(name)?; self.node_rigid_body_map.visit("Map", visitor)?; visitor.leave_region() } } /// See module docs. #[derive(Debug)] pub struct Scene { /// Graph is main container for all scene nodes. It calculates global transforms for nodes, /// updates them and performs all other important work. See `graph` module docs for more /// info. pub graph: Graph, /// Animations container controls all animation on scene. Each animation can have tracks which /// has handles to graph nodes. See `animation` module docs for more info. pub animations: AnimationContainer, /// Physics world. Allows you create various physics objects such as static geometries and /// rigid bodies. Rigid bodies then should be linked with graph nodes using binder. pub physics: Physics, /// Physics binder is a bridge between physics world and scene graph. If a rigid body is linked /// to a graph node, then rigid body will control local transform of node. pub physics_binder: PhysicsBinder, /// Texture to draw scene to. If empty, scene will be drawn on screen directly. /// It is useful to "embed" some scene into other by drawing a quad with this /// texture. This can be used to make in-game video conference - you can make /// separate scene with your characters and draw scene into texture, then in /// main scene you can attach this texture to some quad which will be used as /// monitor. Other usage could be previewer of models, like pictogram of character /// in real-time strategies, in other words there are plenty of possible uses. pub render_target: Option<Arc<Mutex<Texture>>>, lightmap: Option<Lightmap>, } impl Default for Scene { fn default() -> Self { Self { graph: Default::default(), animations: Default::default(), physics: Default::default(), physics_binder: Default::default(), render_target: None, lightmap: None, } } } impl Scene { /// Creates new scene with single root node. /// /// # Notes /// /// This method differs from Default trait implementation! Scene::default() creates /// empty graph with no nodes. #[inline] pub fn new() -> Self { Self { // Graph must be created with `new` method because it differs from `default` graph: Graph::new(), physics: Default::default(), animations: Default::default(), physics_binder: Default::default(), render_target: None, lightmap: None, } } /// Tries to load scene from given file. File can contain any scene in native engine format. /// Such scenes can be made in rusty editor. pub fn from_file<P: AsRef<Path>>( path: P, resource_manager: &mut ResourceManager, ) -> Result<Self, VisitError> { let mut scene = Scene::default(); let mut visitor = Visitor::load_binary(path.as_ref())?; scene.visit("Scene", &mut visitor)?; // Restore pointers to resources. Scene saves only paths to resources, here we must // find real resources instead. for node in scene.graph.linear_iter_mut() { if let Some(shallow_resource) = node.resource.clone() { node.resource = resource_manager.request_model(&shallow_resource.lock().unwrap().path); } } // And do resolve to extract correct graphical data and so on. scene.resolve(); Ok(scene) } fn update_physics(&mut self, dt: f32) { self.physics.step(dt); // Keep pair when node and body are both alive. let graph = &self.graph; let physics = &self.physics; self.physics_binder .node_rigid_body_map .retain(|node, body| { graph.is_valid_handle(*node) && physics.is_valid_body_handle(*body) }); // Sync node positions with assigned physics bodies for (node, body) in self.physics_binder.node_rigid_body_map.iter() { let body = physics.borrow_body(*body); self.graph[*node] .local_transform_mut() .set_position(body.get_position()); } } /// Removes node from scene with all associated entities, like animations etc. /// /// # Panics /// /// Panics if handle is invalid. pub fn remove_node(&mut self, handle: Handle<Node>) { for descendant in self.graph.traverse_handle_iter(handle) { // Remove all associated animations. self.animations.retain(|animation| { for track in animation.get_tracks() { if track.get_node() == descendant { return false; } } true }); } self.graph.remove_node(handle) } pub(in crate) fn resolve(&mut self) { Log::writeln("Starting resolve...".to_owned()); self.graph.resolve(); self.animations.resolve(&self.graph); Log::writeln("Resolve succeeded!".to_owned()); } /// Tries to set new lightmap to scene. pub fn set_lightmap(&mut self, lightmap: Lightmap) -> Result<Option<Lightmap>, &'static str> { // Assign textures to surfaces. for (handle, lightmaps) in lightmap.map.iter() { if let Node::Mesh(mesh) = &mut self.graph[*handle] { if mesh.surfaces().len()!= lightmaps.len() { return Err("failed to set lightmap, surface count mismatch"); } for (surface, entry) in mesh.surfaces_mut().iter_mut().zip(lightmaps) { // This unwrap() call must never panic in normal conditions, because texture wrapped in Option // only to implement Default trait to be serializable. let texture = entry.texture.clone().unwrap(); surface.set_lightmap_texture(texture) } } } Ok(std::mem::replace(&mut self.lightmap, Some(lightmap))) } /// Performs single update tick with given delta time from last frame. Internally /// it updates physics, animations, and each graph node. In most cases there is /// no need to call it directly, engine automatically updates all available scenes. pub fn update(&mut self, frame_size: Vec2, dt: f32) { self.update_physics(dt); self.animations.update_animations(dt); self.graph.update_nodes(frame_size, dt); } /// Creates deep copy of a scene, filter predicate allows you to filter out nodes /// by your criteria. pub fn clone<F>(&self, filter: &mut F) -> Self where F: FnMut(Handle<Node>, &Node) -> bool, { let (graph, old_new_map) = self.graph.clone(filter); let mut animations = self.animations.clone(); for animation in animations.iter_mut() { // Remove all tracks for nodes that were filtered out. animation.retain_tracks(|track| old_new_map.contains_key(&track.get_node())); // Remap track nodes. for track in animation.get_tracks_mut() { track.set_node(old_new_map[&track.get_node()]); } } let physics = self.physics.clone(); let mut physics_binder = PhysicsBinder::default(); for (node, &body) in self.physics_binder.node_rigid_body_map.iter() { // Make sure we bind existing node with new physical body. if let Some(&new_node) = old_new_map.get(node) { // Re-use of body handle is fine here because physics copy bodies // directly and handles from previous pool is still suitable for copy. physics_binder.bind(new_node, body); } } Self { graph, animations, physics, physics_binder, render_target: Default::default(), lightmap: self.lightmap.clone(), } } } impl Visit for Scene { fn visit(&mut self, name: &str, visitor: &mut Visitor) -> VisitResult { visitor.enter_region(name)?; self.physics_binder.visit("PhysicsBinder", visitor)?; self.graph.visit("Graph", visitor)?; self.animations.visit("Animations", visitor)?; self.physics.visit("Physics", visitor)?; let _ = self.lightmap.visit("Lightmap", visitor); visitor.leave_region() } } /// Container for scenes in the engine. It just a simple wrapper around Pool. pub struct SceneContainer { pool: Pool<Scene>, } impl SceneContainer { pub(in crate) fn new() -> Self { Self { pool: Pool::new() } } /// Creates new iterator over scenes in container. #[inline] pub fn iter(&self) -> PoolIterator<Scene> { self.pool.iter() } /// Creates new mutable iterator over scenes in container. #[inline] pub fn iter_mut(&mut self) -> PoolIteratorMut<Scene> { self.pool.iter_mut() } /// Adds new scene into container. #[inline] pub fn
(&mut self, scene: Scene) -> Handle<Scene> { self.pool.spawn(scene) } /// Removes all scenes from container. #[inline] pub fn clear(&mut self) { self.pool.clear() } /// Removes given scene from container. #[inline] pub fn remove(&mut self, handle: Handle<Scene>) { self.pool.free(handle); } } impl Index<Handle<Scene>> for SceneContainer { type Output = Scene; #[inline] fn index(&self, index: Handle<Scene>) -> &Self::Output { &self.pool[index] } } impl IndexMut<Handle<Scene>> for SceneContainer { #[inline] fn index_mut(&mut self, index: Handle<Scene>) -> &mut Self::Output { &mut self.pool[index] } } impl Default for SceneContainer { fn default() -> Self { Self { pool: Pool::new() } } } impl Visit for SceneContainer { fn visit(&mut self, name: &str, visitor: &mut Visitor) -> VisitResult { visitor.enter_region(name)?; self.pool.visit("Pool", visitor)?; visitor.leave_region() } }
add
identifier_name
index.rs
/*! Indexing within memory elements. This module provides types which guarantee certain properties about selecting bits within a memory element. These types enable their use sites to explicitly declare the indexing behavior they require, and move safety checks from runtime to compile time. # Bit Indexing The [`BitIdx`] type represents the semantic index of a bit within a memory element. It does not perform bit positioning, and cannot be used to create a shift instruction or mask value. It is transformed into a value which can do these things – [`BitPos`] – through the [`BitOrder::at`] function. # Region End Marker `bitvec` uses “half-open” ranges, described by a starting point and a count of members that are live. This means that the “end” of a range is not the last member that is *in*cluded in the range, but rather the first member that is *ex*cluded from it. This requires the [`BitTail` end marker to include in its range the width of the element type (`8` for `u8`, etc), in order to mark that a region includes the very last bit in the element (index `7` for `u8`, etc`). The starting number for a dead region cannot be used to perform bit selection, but is used to provide range computation, so it is kept distinct from the indexing types. # Bit Positioning The [`BitPos`] type corresponds directly to a bit position in a memory element. Its value can be used to create shift instructions which select part of memory. It is only ever created by the `BitOrder::at` function. # Bit Selection The [`BitSel`] type is a one-hot mask encoding for a memory element. Unlike the previous types, which are range-limited integers, this type is a wrapper over a memory element and guarantees that it can be used as a mask value in `&` and `|` operations to modify exactly one bit. It is equivalent to `1 << BitPos.value()`. # Bit Masking Lastly, the [`BitMask`] type is a bitmask that permits any number of bits to be set or cleared. It is provided as a type rather than a bare value in order to clearly communicate that there is no restriction on what this mask may affect. [`BitIdx`]: struct.BitIdx.html [`BitMask`]: struct.BitMask.html [`BitOrder::at`]:../order/trait.BitOrder.html#method.at [`BitPos`]: struct.BitPos.html [`BitSel`]: struct.BitSel.html [`BitTail`]: struct.BitTail.html !*/ use crate::mem::BitMemory; use core::{ fmt::{ self, Binary, Formatter, }, iter::{ Product, Sum, }, marker::PhantomData, ops::{ BitAnd, BitOr, Deref, Not, }, }; #[cfg(feature = "serde")] use core::convert::TryFrom; /** Indicates a semantic index of a bit within a memory element. This is a counter in the domain `0.. M::BITS`, and marks a semantic position in the ordering sequence described by a [`BitOrder`] implementation. It is used for both position computation through `BitOrder` and range computation in [`BitPtr`]. # Type Parameters - `M`: The memory element type controlled by this index. [`BitOrder`]:../order/trait.BitOrder.html [`BitPtr`]:../pointer/struct.BitPtr.html **/ // If Rust had user-provided ranged integers, this would be communicable to the // compiler: // #[rustc_layout_scalar_valid_range_end(M::BITS)] #[repr(transparent)] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitIdx<M> where M: BitMemory { /// Semantic index within an element. Constrained to `0.. M::BITS`. idx: u8, /// Marker for the indexed type. _ty: PhantomData<M>, } impl<M> BitIdx<M> where M: BitMemory { /// The zero index. pub const ZERO: Self = Self { idx: 0, _ty: PhantomData, }; /// Wraps a counter value as a known-good index of the `M` element type. /// /// # Parameters /// /// - `idx`: A semantic index within a `M` memory element. /// /// # Returns /// /// If `idx` is within the range `0.. M::BITS`, then this returns the index /// value wrapped in the index type; if `idx` exceeds this range, then this /// returns `None`. pub fn new(idx: u8) -> Option<Self> { if idx >= M::BITS { return None; } Some(unsafe { Self::new_unchecked(idx) }) } /// Wraps a counter value as a known-good index of the `M` element type. /// /// # Parameters /// /// - `idx`: A semantic index within a `M` memory element. It must be in the /// range `0.. M::BITS`. /// /// # Safety /// /// If `idx` is outside the range, then the produced value will cause errors /// and memory unsafety when used. #[inline] pub unsafe fn new_unchecked(idx: u8) -> Self { debug_assert!( idx < M::BITS, "Bit index {} cannot exceed type width {}", idx, M::BITS, ); Self { idx, _ty: PhantomData, } } /// Finds the destination bit a certain distance away from a starting bit. /// /// This produces the number of elements to move from the starting point, /// and then the bit index of the destination bit in the destination /// element. /// /// # Parameters /// /// - `self`: A bit index in some memory element, used as the starting /// position for the offset calculation. /// - `by`: The number of bits by which to move. Negative values move /// downwards in memory: towards index zero, then counting from index /// `M::MASK` to index zero in the next element lower in memory, repeating /// until arrival. Positive values move upwards in memory: towards index /// `M::MASK`, then counting from index zero to index `M::MASK` in the /// next element higher in memory, repeating until arrival. /// /// # Returns /// /// - `.0`: The number of elements by which to offset the caller’s element /// cursor. This value can be passed directly into [`ptr::offset`]. /// - `.1`: The bit index of the destination bit in the element selected by /// applying the `.0` pointer offset. /// /// # Safety /// /// `by` must not be far enough to cause the returned element offset value /// to, when applied to the original memory address via [`ptr::offset`], /// produce a reference out of bounds of the original allocation. This /// method has no way of checking this requirement. /// /// [`ptr::offset`]: https://doc.rust-lang.org/stable/std/primitive.pointer.html#method.offset pub(crate) fn offset(self, by: isize) -> (isize, Self) { let val = *self; /* Signed-add `*self` and the jump distance. Overflowing is the unlikely branch. The result is a bit index, and an overflow marker. `far` is permitted to be negative; this means that it is lower in memory than the origin bit. The number line has its origin at the front edge of the origin element, so `-1` is the *last* bit of the prior memory element. */ let (far, ovf) = by.overflowing_add(val as isize); // If the `isize` addition does not overflow, then the sum can be used // directly. if!ovf { // If `far` is in the origin element, then the jump moves zero // elements and produces `far` as an absolute index directly. if (0.. M::BITS as isize).contains(&far) { (0, (far as u8).idx()) } /* Otherwise, downshift the bit distance to compute the number of elements moved in either direction, and mask to compute the absolute bit index in the destination element. */ else { (far >> M::INDX, (far as u8 & M::MASK).idx()) } } else { /* Overflowing `isize` addition happens to produce ordinary `usize` addition. In point of fact, `isize` addition and `usize` addition are the same machine instruction to perform the sum; it is merely the signed interpretation of the sum that differs. The sum can be recast back to `usize` without issue. */ let far = far as usize; // This is really only needed in order to prevent sign-extension of // the downshift; once shifted, the value can be safely re-signed. ((far >> M::INDX) as isize, (far as u8 & M::MASK).idx()) } } /// Computes the size of a span from `self` for `len` bits. /// /// Spans always extend upwards in memory. /// /// # Parameters /// /// - `self`: The starting bit position of the span. /// - `len`: The number of bits to include in the span. /// /// # Returns /// /// - `.0`: The number of elements of `M` included in the span. If `len` is /// `0`, this will be `0`; otherwise, it will be at least one. /// - `.1`: The index of the first dead bit *after* the span. If `self` and /// `len` are both `0`, this will be `0`; otherwise, it will be in the /// domain `1..= M::BITS`. /// /// # Notes /// /// This defers to [`BitTail::span`], because `BitTail` is a strict superset /// of `BitIdx` (it is `{ BitIdx | M::BITS }`), and spans frequently begin /// from the tail of a slice in this crate. The `offset` function is *not* /// implemented on `BitTail`, and remains on `BitIdx` because offsets can /// only be computed from bit addresses that exist. It does not make sense /// to compute the offset from a `M::BITS` tail. /// /// [`BitTail::span`]: struct.BitTail.html#method.span #[inline] pub(crate) fn span(self, len: usize) -> (usize, BitTail<M>) { unsafe { BitTail::new_unchecked(*self) }.span(len) } } impl<M> Binary for BitIdx<M> where M: BitMemory { fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { write!(fmt, "0b{:0>1$b}", self.idx, M::INDX as usize) } } impl<M> Deref for BitIdx<M> where M: BitMemory { type Target = u8; fn deref(&self) -> &Self::Target { &self.idx } } #[cfg(feature = "serde")] impl<M> TryFrom<u8> for BitIdx<M> where M: BitMemory { type Error = &'static str; fn try_from(idx: u8) -> Result<Self, Self::Error> { Self::new(idx).ok_or( "Attempted to construct a `BitIdx` with an index out of range", ) } } /** Indicates a semantic index of a dead bit *beyond* a memory element. This type is equivalent to `BitIdx<M>`, except that it includes `M::BITS` in its domain. Instances of this type will only ever contain `0` when the span they describe is *empty*. Non-empty spans always cycle through the domain `1..= M::BITS`. This type cannot be used for indexing, and does not translate to `BitPos<M>`. This type has no behavior other than viewing its internal `u8` for arithmetic. # Type Parameters - `M`: The memory element type controlled by this tail. **/ // #[rustc_layout_scalar_valid_range_end(M::BITS + 1)] #[repr(transparent)] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitTail<M> where M: BitMemory { /// Semantic index *after* an element. Constrained to `0..= M::BITS`. end: u8, /// Marker for the tailed type. _ty: PhantomData<M>, } impl<M> BitTail<M> where M: BitMemory { /// The termination index. pub const END: Self = Self { end: M::BITS, _ty: PhantomData, }; /// Mark that `end` is a tail index for a type. /// /// # Parameters /// /// - `end` must be in the range `0..= M::BITS`. pub(crate) unsafe fn new_unchecked(end: u8) -> Self { debug_assert!( end <= M::BITS, "Bit tail {} cannot surpass type width {}", end, M::BITS, ); Self { end, _ty: PhantomData, } } pub(crate) fn span(self, len: usize) -> (usize, Self) { let val = *self; debug_assert!( val <= M::BITS, "Tail out of range: {} overflows type width {}", val, M::BITS, ); if len == 0 { return (0, self); } let head = val & M::MASK; let bits_in_head = (M::BITS - head) as usize; if len <= bits_in_head { return (1, (head + len as u8).tail()); } let bits_after_head = len - bits_in_head; let elts = bits_after_head >> M::INDX; let tail = bits_after_head as u8 & M::MASK; let is_zero = (tail == 0) as u8; let edges = 2 - is_zero as usize; (elts + edges, ((is_zero << M::INDX) | tail).tail()) /* The above expression is the branchless equivalent of this structure: if tail == 0 { (elts + 1, M::BITS.tail()) } else { (elts + 2, tail.tail()) } */ } } impl<M> Deref for BitTail<M> where M: BitMemory { type Target = u8; fn deref(&self) -> &Self::Target { &self.end } } /** Indicates a real electrical index within an element. This type is produced by [`BitOrder`] implementors, and marks a specific electrical bit within a memory element, rather than [`BitIdx`]’s semantic bit. # Type Parameters - `M`: A `BitMemory` element which provides bounds-checking information. The [`new`] constructor uses [`M::BITS`] to ensure that constructed `BitPos` instances are always valid to use within `M` elements. [`BitIdx`]: struct.BitIdx.html [`BitOrder`]:../order/trait.BitOrder.html [`M::BITS`]:../mem/trait.BitMemory.html#associatedconstant.BITS [`new`]: #method.new **/ // #[rustc_layout_scalar_valid_range_end(M::BITS)] #[repr(transparent)] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitPos<M> where M: BitMemory { /// Electrical position within an element. Constrained to `0.. M::BITS`. pos: u8, /// Marker for the positioned type. _ty: PhantomData<M>, } impl<M> BitPos<M> where M: BitMemory { /// Produce a new bit position marker at a valid position value. /// /// `BitOrder` implementations should prefer this method, but *may* use /// [`::new_unchecked`] if they can guarantee that the range invariant is /// upheld. /// /// # Parameters /// /// - `pos`: The bit position value to encode. It must be in the range `0.. /// M::BITS`. /// /// # Panics /// /// This function panics if `pos` is greater than or equal to `M::BITS`. /// /// [`::new_unchecked`]: #method.new_unchecked #[inline] pub fn new(pos: u8) -> Self { assert!( pos < M::BITS, "Bit position {} cannot exceed type width {}", pos, M::BITS, ); Self { pos, _ty: PhantomData, } } /// Produce a new bit position marker at any position value. /// /// # Safety /// /// The caller *must* ensure that `pos` is less than `M::BITS`. `BitOrder` /// implementations should prefer [`::new`], which panics on range failure. /// /// # Parameters /// /// - `pos`: The bit position value to encode. This must be in the range `0 /// .. M::BITS`. /// /// # Returns /// /// `pos` wrapped in the `BitPos` marker type. /// /// # Panics /// /// This function panics if `pos` is greater than or equal to `M::BITS`, but /// only in debug builds. It does not inspect `pos` in release builds. /// /// [`::new`]: #method.new #[inline] pub unsafe fn new_unchecked(pos: u8) -> Self { debug_assert!( pos < M::BITS, "Bit position {} cannot exceed type width {}", pos, M::BITS, ); Self { pos, _ty: PhantomData, } } /// Produces a one-hot selector mask from a position value. /// /// This is equivalent to `1 << *self`. /// /// # Parameters /// /// - `self` /// /// # Returns /// /// A one-hot selector mask with the bit at `*self` set. #[inline] pub fn select(self) -> BitSel<M> { unsafe { BitSel::new_unchecked(M::ONE << *self) } } } impl<M> Deref for BitPos<M> where M: BitMemory { type Target = u8; fn deref(&self) -> &Self::Target { &self.pos } } /** Wrapper type indicating a one-hot encoding of a bit mask for an element. This type is produced by [`BitOrder`] implementations to speed up access to the underlying memory. It ensures that masks have exactly one set bit, and can safely be used as a mask for read/write access to memory. # Type Parameters - `M`: The storage type being masked. [`BitOrder`]:../order/trait.BitOrder.html **/ #[repr(transparent)] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitSel<M> where M: BitMemory { /// Mask value. sel: M, } impl<M> BitSel<M> where M: BitMemory { /// Produce a new bit-mask wrapper around a one-hot mask value. /// /// `BitOrder` implementations should prefer this method, but *may* use /// [`::new_unchecked`] if they can guarantee that the one-hot invariant is /// upheld. /// /// # Parameters /// /// - `mask`: The mask value to encode. This **must** have exactly one bit /// set high, and all others set low. /// /// # Returns /// /// `mask` wrapped in the `BitMask` marker type. /// /// # Panics /// /// This function unconditionally panics if `mask` has zero or multiple bits /// set high. /// /// [`::new_unchecked`]: #method.new_unchecked #[inline] pub fn new(sel: M) -> Self { assert!( sel.count_ones() == 1, "Masks are required to have exactly one set bit: {:0>1$b}", sel, M::BITS as usize, ); Self { sel } } /// Produce a new bit-mask wrapper around any value. /// /// # Safety /// /// The caller *must* ensure that `mask` has exactly one bit set. `BitOrder` /// implementations should prefer [`::new`], which always panics on failure. /// /// # Parameters /// /// - `mask`: The mask value to encode. This must have exactly one bit set. /// Failure to uphold this requirement will introduce uncontrolled state /// contamination. /// /// # Returns /// /// `mask` wrapped in the `BitMask` marker type. /// /// # Panics /// /// This function panics if `mask` has zero or multiple bits set, only in /// debug builds. It does not inspect `mask` in release builds. /// /// [`::new`]: #method.new #[inline] pub unsafe fn new_unchecked(sel: M) -> Self { debug_assert!( sel.count_ones() == 1, "Masks are required to have exactly one set bit: {:0>1$b}", sel, M::BITS as usize, ); Self { sel } } } impl<M> Deref for BitSel<M> where M: BitMemory { type Target = M; fn deref(&self) -> &Self::Target { &self.sel } } /** A multi-bit selector mask. Unlike [`BitSel`], which enforces a strict one-hot mask encoding, this mask type permits any number of bits to be set or unset. This is used to combine batch operations in an element. It is only constructed by accumulating [`BitPos`] or [`BitSel`] values. As `BitSel` is only constructed from `BitPos`, and `BitPos` is only constructed from [`BitIdx`] and [`BitOrder`], this enforces a chain of responsibility to prove that a given multimask is safe. [`BitIdx`]: struct.BitIdx.html [`BitOrder`]:../order/trait.BitOrder.html [`BitPos`]: struct.BitPos.html [`BitSel`]: struct.BitSel.html **/ #[repr(transparent)] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitMask<M> where M: BitMemory { /// A mask of any number of bits to modify. mask: M, } impl<M> BitMask<M> where M: BitMemory { /// A full mask. pub const ALL: Self = Self { mask: M::ALL }; /// An empty mask. pub const ZERO: Self = Self { mask: M::ZERO }; /// Wraps a value as a bitmask. /// /// # Safety /// /// The caller must ensure that the mask value is correct in the caller’s /// provenance. /// /// # Parameters /// /// - `mask`: Any integer, to be reïnterpreted as a bitmask. /// /// # Returns /// /// The `mask` value as a bitmask. pub fn new(mask: M) -> Self { Self { mask } } } impl<M> Product<BitPos<M>> for BitMask<M> where M: BitMemory { fn product<I>(iter: I) -> Self where I: Iterator<Item = BitPos<M>> { iter.map(BitPos::select).product() } } impl<M> Product<BitSel<M>> for BitMask<M> where M: BitMemory { fn product<I>(iter: I) -> Self where I: Iterator<Item = BitSel<M>> { iter.fold(Self::ALL, BitAnd::bitand) } } /// Enable accumulation of a multi-bit mask from a sequence of position values. impl<M> Sum<BitPos<M>> for BitMask<M> where M: BitMemory { fn sum<I>(iter: I) -> Self where I: Iterator<Item = BitPos<M>> { iter.map(BitPos::select).sum() } } /// Enable accumulation of a multi-bit mask from a sequence of selector masks. impl<M> Sum<BitSel<M>> for BitMask<M> where M: BitMemory { fn sum<I>(iter: I) -> Self where I: Iterator<Item = BitSel<M>> { iter.fold(Self::ZERO, BitOr::bitor) } } impl<M> BitAnd<M> for BitMask<M> where M: BitMemory { type Output = Self; fn bitand(self, rhs: M) -> Self { Self { mask: self.mask & rhs, } } } impl<M> BitAnd<BitPos<M>> for BitMask<M> where M: BitMemory { type Output = Self; fn bitand(self, rhs: BitPos<M>) -> Self { self & rhs.select() } } impl<M> BitAnd<BitSel<M>> for BitMask<M> where M: BitMemory { type Output = Self; fn bitand(self, rhs: BitSel<M>) -> Self { Self { mask: self.mask & rhs.sel, } } } impl<M> BitOr<M> for BitMask<M> where M: BitMemory { type Output = Self; fn bitor(self, rhs: M) -> Self { Self { mask: self.mask | rhs, } } } /// Insert a position value into a multimask. impl<M> BitOr<BitPos<M>> for BitMask<M> where M: BitMemory { type Output = Self; fn bitor(self, rhs: BitPos<M>) -> Self { self | rhs.select() } } /// Insert a single selector into a multimask. impl<M> BitOr<BitSel<M>> for BitMask<M> where M: BitMemory { type Output = Self; fn bitor(self, rhs: BitSel<M>) -> Self { Self { mask: self.mask | rhs.sel, } } } impl<M> Deref for BitMask<M> where M: BitMemory { type Target = M; fn deref(&self) -> &Self::Target { &self.mask } } impl<M> Not for BitMask<M> where M: BitMemory { type Output = Self; fn not(self) -> Self { Self { mask:!self.mask } } } /** Internal convenience trait for wrapping numbers with appropriate markers. This trait must only be used on values that are known to be valid for their context. It provides an internal-only shorthand for wrapping integer literals and known-good values in marker types. It is only implemented on `u8`. **/ pub(crate) trait Indexable { /// Wraps a value as a `BitIdx<M>`. fn idx<M>(self) -> BitIdx<M> where M: BitMemory; /// Wraps a value as a `BitTail<M>`. fn tail<M>(self) -> BitTail<M> where M: BitMemory; /// Wraps a value as a `BitPos<M>`. fn pos<M>(self) -> BitPos<M> where M: BitMemory; } impl Indexable for u8 { fn idx<M>(self) -> BitIdx<M> where M: BitMemory { unsafe { BitIdx::<M>::new_unchecked(self) } } fn tail<M>(self) -> BitTail<M> where M: BitMemory { unsafe { BitTai
-> BitPos<M> where M: BitMemory { unsafe { BitPos::<M>::new_unchecked(self) } } } #[cfg(test)] mod tests { use super::*; #[test] fn jump_far_up() { // isize::max_value() is 0x7f...ff, so the result bit will be one less // than the start bit. for n in 1.. 8 { let (elt, bit) = n.idx::<u8>().offset(isize::max_value()); assert_eq!(elt, (isize::max_value() >> u8::INDX) + 1); assert_eq!(*bit, n - 1); } let (elt, bit) = 0u8.idx::<u8>().offset(isize::max_value()); assert_eq!(elt, isize::max_value() >> u8::INDX); assert_eq!(*bit, 7); } #[test] fn jump_far_down() { // isize::min_value() is 0x80...00, so the result bit will be equal to // the start bit for n in 0.. 8 { let (elt, bit) = n.idx::<u8>().offset(isize::min_value()); assert_eq!(elt, isize::min_value() >> u8::INDX); assert_eq!(*bit, n); } } }
l::<M>::new_unchecked(self) } } fn pos<M>(self)
identifier_body
index.rs
/*! Indexing within memory elements. This module provides types which guarantee certain properties about selecting bits within a memory element. These types enable their use sites to explicitly declare the indexing behavior they require, and move safety checks from runtime to compile time. # Bit Indexing The [`BitIdx`] type represents the semantic index of a bit within a memory element. It does not perform bit positioning, and cannot be used to create a shift instruction or mask value. It is transformed into a value which can do these things – [`BitPos`] – through the [`BitOrder::at`] function. # Region End Marker `bitvec` uses “half-open” ranges, described by a starting point and a count of members that are live. This means that the “end” of a range is not the last member that is *in*cluded in the range, but rather the first member that is *ex*cluded from it. This requires the [`BitTail` end marker to include in its range the width of the element type (`8` for `u8`, etc), in order to mark that a region includes the very last bit in the element (index `7` for `u8`, etc`). The starting number for a dead region cannot be used to perform bit selection, but is used to provide range computation, so it is kept distinct from the indexing types. # Bit Positioning The [`BitPos`] type corresponds directly to a bit position in a memory element. Its value can be used to create shift instructions which select part of memory. It is only ever created by the `BitOrder::at` function. # Bit Selection The [`BitSel`] type is a one-hot mask encoding for a memory element. Unlike the previous types, which are range-limited integers, this type is a wrapper over a memory element and guarantees that it can be used as a mask value in `&` and `|` operations to modify exactly one bit. It is equivalent to `1 << BitPos.value()`. # Bit Masking Lastly, the [`BitMask`] type is a bitmask that permits any number of bits to be set or cleared. It is provided as a type rather than a bare value in order to clearly communicate that there is no restriction on what this mask may affect. [`BitIdx`]: struct.BitIdx.html [`BitMask`]: struct.BitMask.html [`BitOrder::at`]:../order/trait.BitOrder.html#method.at [`BitPos`]: struct.BitPos.html [`BitSel`]: struct.BitSel.html [`BitTail`]: struct.BitTail.html !*/ use crate::mem::BitMemory; use core::{ fmt::{ self, Binary, Formatter, }, iter::{ Product, Sum, }, marker::PhantomData, ops::{ BitAnd, BitOr, Deref, Not, }, }; #[cfg(feature = "serde")] use core::convert::TryFrom; /** Indicates a semantic index of a bit within a memory element. This is a counter in the domain `0.. M::BITS`, and marks a semantic position in the ordering sequence described by a [`BitOrder`] implementation. It is used for both position computation through `BitOrder` and range computation in [`BitPtr`]. # Type Parameters - `M`: The memory element type controlled by this index. [`BitOrder`]:../order/trait.BitOrder.html [`BitPtr`]:../pointer/struct.BitPtr.html **/ // If Rust had user-provided ranged integers, this would be communicable to the // compiler: // #[rustc_layout_scalar_valid_range_end(M::BITS)] #[repr(transparent)] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitIdx<M> where M: BitMemory { /// Semantic index within an element. Constrained to `0.. M::BITS`. idx: u8, /// Marker for the indexed type. _ty: PhantomData<M>, } impl<M> BitIdx<M> where M: BitMemory { /// The zero index. pub const ZERO: Self = Self { idx: 0, _ty: PhantomData, }; /// Wraps a counter value as a known-good index of the `M` element type. /// /// # Parameters /// /// - `idx`: A semantic index within a `M` memory element. /// /// # Returns /// /// If `idx` is within the range `0.. M::BITS`, then this returns the index /// value wrapped in the index type; if `idx` exceeds this range, then this /// returns `None`. pub fn new(idx: u8) -> Option<Self> { if idx >= M::BITS { return None; } Some(unsafe { Self::new_unchecked(idx) }) } /// Wraps a counter value as a known-good index of the `M` element type. /// /// # Parameters /// /// - `idx`: A semantic index within a `M` memory element. It must be in the /// range `0.. M::BITS`. /// /// # Safety /// /// If `idx` is outside the range, then the produced value will cause errors /// and memory unsafety when used. #[inline] pub unsafe fn new_unchecked(idx: u8) -> Self { debug_assert!( idx < M::BITS, "Bit index {} cannot exceed type width {}", idx, M::BITS, ); Self { idx, _ty: PhantomData, } } /// Finds the destination bit a certain distance away from a starting bit. /// /// This produces the number of elements to move from the starting point, /// and then the bit index of the destination bit in the destination /// element. /// /// # Parameters /// /// - `self`: A bit index in some memory element, used as the starting /// position for the offset calculation. /// - `by`: The number of bits by which to move. Negative values move /// downwards in memory: towards index zero, then counting from index /// `M::MASK` to index zero in the next element lower in memory, repeating /// until arrival. Positive values move upwards in memory: towards index /// `M::MASK`, then counting from index zero to index `M::MASK` in the /// next element higher in memory, repeating until arrival. /// /// # Returns /// /// - `.0`: The number of elements by which to offset the caller’s element /// cursor. This value can be passed directly into [`ptr::offset`]. /// - `.1`: The bit index of the destination bit in the element selected by /// applying the `.0` pointer offset. /// /// # Safety /// /// `by` must not be far enough to cause the returned element offset value /// to, when applied to the original memory address via [`ptr::offset`], /// produce a reference out of bounds of the original allocation. This /// method has no way of checking this requirement. /// /// [`ptr::offset`]: https://doc.rust-lang.org/stable/std/primitive.pointer.html#method.offset pub(crate) fn offset(self, by: isize) -> (isize, Self) { let val = *self; /* Signed-add `*self` and the jump distance. Overflowing is the unlikely branch. The result is a bit index, and an overflow marker. `far` is permitted to be negative; this means that it is lower in memory than the origin bit. The number line has its origin at the front edge of the origin element, so `-1` is the *last* bit of the prior memory element. */ let (far, ovf) = by.overflowing_add(val as isize); // If the `isize` addition does not overflow, then the sum can be used // directly. if!ovf { // If `far` is in the origin element, then the jump moves zero // elements and produces `far` as an absolute index directly. if (0.. M::BITS as isize).contains(&far) { (0, (far
se, downshift the bit distance to compute the number of elements moved in either direction, and mask to compute the absolute bit index in the destination element. */ else { (far >> M::INDX, (far as u8 & M::MASK).idx()) } } else { /* Overflowing `isize` addition happens to produce ordinary `usize` addition. In point of fact, `isize` addition and `usize` addition are the same machine instruction to perform the sum; it is merely the signed interpretation of the sum that differs. The sum can be recast back to `usize` without issue. */ let far = far as usize; // This is really only needed in order to prevent sign-extension of // the downshift; once shifted, the value can be safely re-signed. ((far >> M::INDX) as isize, (far as u8 & M::MASK).idx()) } } /// Computes the size of a span from `self` for `len` bits. /// /// Spans always extend upwards in memory. /// /// # Parameters /// /// - `self`: The starting bit position of the span. /// - `len`: The number of bits to include in the span. /// /// # Returns /// /// - `.0`: The number of elements of `M` included in the span. If `len` is /// `0`, this will be `0`; otherwise, it will be at least one. /// - `.1`: The index of the first dead bit *after* the span. If `self` and /// `len` are both `0`, this will be `0`; otherwise, it will be in the /// domain `1..= M::BITS`. /// /// # Notes /// /// This defers to [`BitTail::span`], because `BitTail` is a strict superset /// of `BitIdx` (it is `{ BitIdx | M::BITS }`), and spans frequently begin /// from the tail of a slice in this crate. The `offset` function is *not* /// implemented on `BitTail`, and remains on `BitIdx` because offsets can /// only be computed from bit addresses that exist. It does not make sense /// to compute the offset from a `M::BITS` tail. /// /// [`BitTail::span`]: struct.BitTail.html#method.span #[inline] pub(crate) fn span(self, len: usize) -> (usize, BitTail<M>) { unsafe { BitTail::new_unchecked(*self) }.span(len) } } impl<M> Binary for BitIdx<M> where M: BitMemory { fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { write!(fmt, "0b{:0>1$b}", self.idx, M::INDX as usize) } } impl<M> Deref for BitIdx<M> where M: BitMemory { type Target = u8; fn deref(&self) -> &Self::Target { &self.idx } } #[cfg(feature = "serde")] impl<M> TryFrom<u8> for BitIdx<M> where M: BitMemory { type Error = &'static str; fn try_from(idx: u8) -> Result<Self, Self::Error> { Self::new(idx).ok_or( "Attempted to construct a `BitIdx` with an index out of range", ) } } /** Indicates a semantic index of a dead bit *beyond* a memory element. This type is equivalent to `BitIdx<M>`, except that it includes `M::BITS` in its domain. Instances of this type will only ever contain `0` when the span they describe is *empty*. Non-empty spans always cycle through the domain `1..= M::BITS`. This type cannot be used for indexing, and does not translate to `BitPos<M>`. This type has no behavior other than viewing its internal `u8` for arithmetic. # Type Parameters - `M`: The memory element type controlled by this tail. **/ // #[rustc_layout_scalar_valid_range_end(M::BITS + 1)] #[repr(transparent)] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitTail<M> where M: BitMemory { /// Semantic index *after* an element. Constrained to `0..= M::BITS`. end: u8, /// Marker for the tailed type. _ty: PhantomData<M>, } impl<M> BitTail<M> where M: BitMemory { /// The termination index. pub const END: Self = Self { end: M::BITS, _ty: PhantomData, }; /// Mark that `end` is a tail index for a type. /// /// # Parameters /// /// - `end` must be in the range `0..= M::BITS`. pub(crate) unsafe fn new_unchecked(end: u8) -> Self { debug_assert!( end <= M::BITS, "Bit tail {} cannot surpass type width {}", end, M::BITS, ); Self { end, _ty: PhantomData, } } pub(crate) fn span(self, len: usize) -> (usize, Self) { let val = *self; debug_assert!( val <= M::BITS, "Tail out of range: {} overflows type width {}", val, M::BITS, ); if len == 0 { return (0, self); } let head = val & M::MASK; let bits_in_head = (M::BITS - head) as usize; if len <= bits_in_head { return (1, (head + len as u8).tail()); } let bits_after_head = len - bits_in_head; let elts = bits_after_head >> M::INDX; let tail = bits_after_head as u8 & M::MASK; let is_zero = (tail == 0) as u8; let edges = 2 - is_zero as usize; (elts + edges, ((is_zero << M::INDX) | tail).tail()) /* The above expression is the branchless equivalent of this structure: if tail == 0 { (elts + 1, M::BITS.tail()) } else { (elts + 2, tail.tail()) } */ } } impl<M> Deref for BitTail<M> where M: BitMemory { type Target = u8; fn deref(&self) -> &Self::Target { &self.end } } /** Indicates a real electrical index within an element. This type is produced by [`BitOrder`] implementors, and marks a specific electrical bit within a memory element, rather than [`BitIdx`]’s semantic bit. # Type Parameters - `M`: A `BitMemory` element which provides bounds-checking information. The [`new`] constructor uses [`M::BITS`] to ensure that constructed `BitPos` instances are always valid to use within `M` elements. [`BitIdx`]: struct.BitIdx.html [`BitOrder`]:../order/trait.BitOrder.html [`M::BITS`]:../mem/trait.BitMemory.html#associatedconstant.BITS [`new`]: #method.new **/ // #[rustc_layout_scalar_valid_range_end(M::BITS)] #[repr(transparent)] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitPos<M> where M: BitMemory { /// Electrical position within an element. Constrained to `0.. M::BITS`. pos: u8, /// Marker for the positioned type. _ty: PhantomData<M>, } impl<M> BitPos<M> where M: BitMemory { /// Produce a new bit position marker at a valid position value. /// /// `BitOrder` implementations should prefer this method, but *may* use /// [`::new_unchecked`] if they can guarantee that the range invariant is /// upheld. /// /// # Parameters /// /// - `pos`: The bit position value to encode. It must be in the range `0.. /// M::BITS`. /// /// # Panics /// /// This function panics if `pos` is greater than or equal to `M::BITS`. /// /// [`::new_unchecked`]: #method.new_unchecked #[inline] pub fn new(pos: u8) -> Self { assert!( pos < M::BITS, "Bit position {} cannot exceed type width {}", pos, M::BITS, ); Self { pos, _ty: PhantomData, } } /// Produce a new bit position marker at any position value. /// /// # Safety /// /// The caller *must* ensure that `pos` is less than `M::BITS`. `BitOrder` /// implementations should prefer [`::new`], which panics on range failure. /// /// # Parameters /// /// - `pos`: The bit position value to encode. This must be in the range `0 /// .. M::BITS`. /// /// # Returns /// /// `pos` wrapped in the `BitPos` marker type. /// /// # Panics /// /// This function panics if `pos` is greater than or equal to `M::BITS`, but /// only in debug builds. It does not inspect `pos` in release builds. /// /// [`::new`]: #method.new #[inline] pub unsafe fn new_unchecked(pos: u8) -> Self { debug_assert!( pos < M::BITS, "Bit position {} cannot exceed type width {}", pos, M::BITS, ); Self { pos, _ty: PhantomData, } } /// Produces a one-hot selector mask from a position value. /// /// This is equivalent to `1 << *self`. /// /// # Parameters /// /// - `self` /// /// # Returns /// /// A one-hot selector mask with the bit at `*self` set. #[inline] pub fn select(self) -> BitSel<M> { unsafe { BitSel::new_unchecked(M::ONE << *self) } } } impl<M> Deref for BitPos<M> where M: BitMemory { type Target = u8; fn deref(&self) -> &Self::Target { &self.pos } } /** Wrapper type indicating a one-hot encoding of a bit mask for an element. This type is produced by [`BitOrder`] implementations to speed up access to the underlying memory. It ensures that masks have exactly one set bit, and can safely be used as a mask for read/write access to memory. # Type Parameters - `M`: The storage type being masked. [`BitOrder`]:../order/trait.BitOrder.html **/ #[repr(transparent)] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitSel<M> where M: BitMemory { /// Mask value. sel: M, } impl<M> BitSel<M> where M: BitMemory { /// Produce a new bit-mask wrapper around a one-hot mask value. /// /// `BitOrder` implementations should prefer this method, but *may* use /// [`::new_unchecked`] if they can guarantee that the one-hot invariant is /// upheld. /// /// # Parameters /// /// - `mask`: The mask value to encode. This **must** have exactly one bit /// set high, and all others set low. /// /// # Returns /// /// `mask` wrapped in the `BitMask` marker type. /// /// # Panics /// /// This function unconditionally panics if `mask` has zero or multiple bits /// set high. /// /// [`::new_unchecked`]: #method.new_unchecked #[inline] pub fn new(sel: M) -> Self { assert!( sel.count_ones() == 1, "Masks are required to have exactly one set bit: {:0>1$b}", sel, M::BITS as usize, ); Self { sel } } /// Produce a new bit-mask wrapper around any value. /// /// # Safety /// /// The caller *must* ensure that `mask` has exactly one bit set. `BitOrder` /// implementations should prefer [`::new`], which always panics on failure. /// /// # Parameters /// /// - `mask`: The mask value to encode. This must have exactly one bit set. /// Failure to uphold this requirement will introduce uncontrolled state /// contamination. /// /// # Returns /// /// `mask` wrapped in the `BitMask` marker type. /// /// # Panics /// /// This function panics if `mask` has zero or multiple bits set, only in /// debug builds. It does not inspect `mask` in release builds. /// /// [`::new`]: #method.new #[inline] pub unsafe fn new_unchecked(sel: M) -> Self { debug_assert!( sel.count_ones() == 1, "Masks are required to have exactly one set bit: {:0>1$b}", sel, M::BITS as usize, ); Self { sel } } } impl<M> Deref for BitSel<M> where M: BitMemory { type Target = M; fn deref(&self) -> &Self::Target { &self.sel } } /** A multi-bit selector mask. Unlike [`BitSel`], which enforces a strict one-hot mask encoding, this mask type permits any number of bits to be set or unset. This is used to combine batch operations in an element. It is only constructed by accumulating [`BitPos`] or [`BitSel`] values. As `BitSel` is only constructed from `BitPos`, and `BitPos` is only constructed from [`BitIdx`] and [`BitOrder`], this enforces a chain of responsibility to prove that a given multimask is safe. [`BitIdx`]: struct.BitIdx.html [`BitOrder`]:../order/trait.BitOrder.html [`BitPos`]: struct.BitPos.html [`BitSel`]: struct.BitSel.html **/ #[repr(transparent)] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitMask<M> where M: BitMemory { /// A mask of any number of bits to modify. mask: M, } impl<M> BitMask<M> where M: BitMemory { /// A full mask. pub const ALL: Self = Self { mask: M::ALL }; /// An empty mask. pub const ZERO: Self = Self { mask: M::ZERO }; /// Wraps a value as a bitmask. /// /// # Safety /// /// The caller must ensure that the mask value is correct in the caller’s /// provenance. /// /// # Parameters /// /// - `mask`: Any integer, to be reïnterpreted as a bitmask. /// /// # Returns /// /// The `mask` value as a bitmask. pub fn new(mask: M) -> Self { Self { mask } } } impl<M> Product<BitPos<M>> for BitMask<M> where M: BitMemory { fn product<I>(iter: I) -> Self where I: Iterator<Item = BitPos<M>> { iter.map(BitPos::select).product() } } impl<M> Product<BitSel<M>> for BitMask<M> where M: BitMemory { fn product<I>(iter: I) -> Self where I: Iterator<Item = BitSel<M>> { iter.fold(Self::ALL, BitAnd::bitand) } } /// Enable accumulation of a multi-bit mask from a sequence of position values. impl<M> Sum<BitPos<M>> for BitMask<M> where M: BitMemory { fn sum<I>(iter: I) -> Self where I: Iterator<Item = BitPos<M>> { iter.map(BitPos::select).sum() } } /// Enable accumulation of a multi-bit mask from a sequence of selector masks. impl<M> Sum<BitSel<M>> for BitMask<M> where M: BitMemory { fn sum<I>(iter: I) -> Self where I: Iterator<Item = BitSel<M>> { iter.fold(Self::ZERO, BitOr::bitor) } } impl<M> BitAnd<M> for BitMask<M> where M: BitMemory { type Output = Self; fn bitand(self, rhs: M) -> Self { Self { mask: self.mask & rhs, } } } impl<M> BitAnd<BitPos<M>> for BitMask<M> where M: BitMemory { type Output = Self; fn bitand(self, rhs: BitPos<M>) -> Self { self & rhs.select() } } impl<M> BitAnd<BitSel<M>> for BitMask<M> where M: BitMemory { type Output = Self; fn bitand(self, rhs: BitSel<M>) -> Self { Self { mask: self.mask & rhs.sel, } } } impl<M> BitOr<M> for BitMask<M> where M: BitMemory { type Output = Self; fn bitor(self, rhs: M) -> Self { Self { mask: self.mask | rhs, } } } /// Insert a position value into a multimask. impl<M> BitOr<BitPos<M>> for BitMask<M> where M: BitMemory { type Output = Self; fn bitor(self, rhs: BitPos<M>) -> Self { self | rhs.select() } } /// Insert a single selector into a multimask. impl<M> BitOr<BitSel<M>> for BitMask<M> where M: BitMemory { type Output = Self; fn bitor(self, rhs: BitSel<M>) -> Self { Self { mask: self.mask | rhs.sel, } } } impl<M> Deref for BitMask<M> where M: BitMemory { type Target = M; fn deref(&self) -> &Self::Target { &self.mask } } impl<M> Not for BitMask<M> where M: BitMemory { type Output = Self; fn not(self) -> Self { Self { mask:!self.mask } } } /** Internal convenience trait for wrapping numbers with appropriate markers. This trait must only be used on values that are known to be valid for their context. It provides an internal-only shorthand for wrapping integer literals and known-good values in marker types. It is only implemented on `u8`. **/ pub(crate) trait Indexable { /// Wraps a value as a `BitIdx<M>`. fn idx<M>(self) -> BitIdx<M> where M: BitMemory; /// Wraps a value as a `BitTail<M>`. fn tail<M>(self) -> BitTail<M> where M: BitMemory; /// Wraps a value as a `BitPos<M>`. fn pos<M>(self) -> BitPos<M> where M: BitMemory; } impl Indexable for u8 { fn idx<M>(self) -> BitIdx<M> where M: BitMemory { unsafe { BitIdx::<M>::new_unchecked(self) } } fn tail<M>(self) -> BitTail<M> where M: BitMemory { unsafe { BitTail::<M>::new_unchecked(self) } } fn pos<M>(self) -> BitPos<M> where M: BitMemory { unsafe { BitPos::<M>::new_unchecked(self) } } } #[cfg(test)] mod tests { use super::*; #[test] fn jump_far_up() { // isize::max_value() is 0x7f...ff, so the result bit will be one less // than the start bit. for n in 1.. 8 { let (elt, bit) = n.idx::<u8>().offset(isize::max_value()); assert_eq!(elt, (isize::max_value() >> u8::INDX) + 1); assert_eq!(*bit, n - 1); } let (elt, bit) = 0u8.idx::<u8>().offset(isize::max_value()); assert_eq!(elt, isize::max_value() >> u8::INDX); assert_eq!(*bit, 7); } #[test] fn jump_far_down() { // isize::min_value() is 0x80...00, so the result bit will be equal to // the start bit for n in 0.. 8 { let (elt, bit) = n.idx::<u8>().offset(isize::min_value()); assert_eq!(elt, isize::min_value() >> u8::INDX); assert_eq!(*bit, n); } } }
as u8).idx()) } /* Otherwi
conditional_block
index.rs
/*! Indexing within memory elements. This module provides types which guarantee certain properties about selecting bits within a memory element. These types enable their use sites to explicitly declare the indexing behavior they require, and move safety checks from runtime to compile time. # Bit Indexing The [`BitIdx`] type represents the semantic index of a bit within a memory element. It does not perform bit positioning, and cannot be used to create a shift instruction or mask value. It is transformed into a value which can do these things – [`BitPos`] – through the [`BitOrder::at`] function. # Region End Marker `bitvec` uses “half-open” ranges, described by a starting point and a count of members that are live. This means that the “end” of a range is not the last member that is *in*cluded in the range, but rather the first member that is *ex*cluded from it. This requires the [`BitTail` end marker to include in its range the width of the element type (`8` for `u8`, etc), in order to mark that a region includes the very last bit in the element (index `7` for `u8`, etc`). The starting number for a dead region cannot be used to perform bit selection, but is used to provide range computation, so it is kept distinct from the indexing types. # Bit Positioning The [`BitPos`] type corresponds directly to a bit position in a memory element. Its value can be used to create shift instructions which select part of memory. It is only ever created by the `BitOrder::at` function. # Bit Selection The [`BitSel`] type is a one-hot mask encoding for a memory element. Unlike the previous types, which are range-limited integers, this type is a wrapper over a memory element and guarantees that it can be used as a mask value in `&` and `|` operations to modify exactly one bit. It is equivalent to `1 << BitPos.value()`. # Bit Masking Lastly, the [`BitMask`] type is a bitmask that permits any number of bits to be set or cleared. It is provided as a type rather than a bare value in order to clearly communicate that there is no restriction on what this mask may affect. [`BitIdx`]: struct.BitIdx.html [`BitMask`]: struct.BitMask.html [`BitOrder::at`]:../order/trait.BitOrder.html#method.at [`BitPos`]: struct.BitPos.html [`BitSel`]: struct.BitSel.html [`BitTail`]: struct.BitTail.html !*/ use crate::mem::BitMemory; use core::{ fmt::{ self, Binary, Formatter, }, iter::{ Product, Sum, }, marker::PhantomData, ops::{ BitAnd, BitOr, Deref, Not, }, }; #[cfg(feature = "serde")] use core::convert::TryFrom; /** Indicates a semantic index of a bit within a memory element. This is a counter in the domain `0.. M::BITS`, and marks a semantic position in the ordering sequence described by a [`BitOrder`] implementation. It is used for both position computation through `BitOrder` and range computation in [`BitPtr`]. # Type Parameters - `M`: The memory element type controlled by this index. [`BitOrder`]:../order/trait.BitOrder.html [`BitPtr`]:../pointer/struct.BitPtr.html **/ // If Rust had user-provided ranged integers, this would be communicable to the // compiler: // #[rustc_layout_scalar_valid_range_end(M::BITS)] #[repr(transparent)] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitIdx<M> where M: BitMemory { /// Semantic index within an element. Constrained to `0.. M::BITS`. idx: u8, /// Marker for the indexed type. _ty: PhantomData<M>, } impl<M> BitIdx<M> where M: BitMemory { /// The zero index. pub const ZERO: Self = Self { idx: 0, _ty: PhantomData, }; /// Wraps a counter value as a known-good index of the `M` element type. /// /// # Parameters /// /// - `idx`: A semantic index within a `M` memory element. /// /// # Returns /// /// If `idx` is within the range `0.. M::BITS`, then this returns the index /// value wrapped in the index type; if `idx` exceeds this range, then this /// returns `None`. pub fn new(idx: u8) -> Option<Self> { if idx >= M::BITS { return None; } Some(unsafe { Self::new_unchecked(idx) }) } /// Wraps a counter value as a known-good index of the `M` element type. /// /// # Parameters /// /// - `idx`: A semantic index within a `M` memory element. It must be in the /// range `0.. M::BITS`. /// /// # Safety /// /// If `idx` is outside the range, then the produced value will cause errors /// and memory unsafety when used. #[inline] pub unsafe fn new_unchecked(idx: u8) -> Self { debug_assert!( idx < M::BITS, "Bit index {} cannot exceed type width {}", idx, M::BITS, ); Self { idx, _ty: PhantomData, } } /// Finds the destination bit a certain distance away from a starting bit. /// /// This produces the number of elements to move from the starting point, /// and then the bit index of the destination bit in the destination /// element. /// /// # Parameters /// /// - `self`: A bit index in some memory element, used as the starting /// position for the offset calculation. /// - `by`: The number of bits by which to move. Negative values move /// downwards in memory: towards index zero, then counting from index /// `M::MASK` to index zero in the next element lower in memory, repeating /// until arrival. Positive values move upwards in memory: towards index /// `M::MASK`, then counting from index zero to index `M::MASK` in the /// next element higher in memory, repeating until arrival. /// /// # Returns /// /// - `.0`: The number of elements by which to offset the caller’s element /// cursor. This value can be passed directly into [`ptr::offset`]. /// - `.1`: The bit index of the destination bit in the element selected by /// applying the `.0` pointer offset. /// /// # Safety /// /// `by` must not be far enough to cause the returned element offset value /// to, when applied to the original memory address via [`ptr::offset`], /// produce a reference out of bounds of the original allocation. This /// method has no way of checking this requirement. /// /// [`ptr::offset`]: https://doc.rust-lang.org/stable/std/primitive.pointer.html#method.offset pub(crate) fn offset(self, by: isize) -> (isize, Self) { let val = *self; /* Signed-add `*self` and the jump distance. Overflowing is the unlikely branch. The result is a bit index, and an overflow marker. `far` is permitted to be negative; this means that it is lower in memory than the origin bit. The number line has its origin at the front edge of the origin element, so `-1` is the *last* bit of the prior memory element. */ let (far, ovf) = by.overflowing_add(val as isize); // If the `isize` addition does not overflow, then the sum can be used // directly. if!ovf { // If `far` is in the origin element, then the jump moves zero // elements and produces `far` as an absolute index directly. if (0.. M::BITS as isize).contains(&far) { (0, (far as u8).idx()) } /* Otherwise, downshift the bit distance to compute the number of elements moved in either direction, and mask to compute the absolute bit index in the destination element. */ else { (far >> M::INDX, (far as u8 & M::MASK).idx()) } } else { /* Overflowing `isize` addition happens to produce ordinary `usize` addition. In point of fact, `isize` addition and `usize` addition are the same machine instruction to perform the sum; it is merely the signed interpretation of the sum that differs. The sum can be recast back to `usize` without issue. */ let far = far as usize; // This is really only needed in order to prevent sign-extension of // the downshift; once shifted, the value can be safely re-signed. ((far >> M::INDX) as isize, (far as u8 & M::MASK).idx()) } } /// Computes the size of a span from `self` for `len` bits. /// /// Spans always extend upwards in memory. /// /// # Parameters /// /// - `self`: The starting bit position of the span. /// - `len`: The number of bits to include in the span. /// /// # Returns /// /// - `.0`: The number of elements of `M` included in the span. If `len` is /// `0`, this will be `0`; otherwise, it will be at least one. /// - `.1`: The index of the first dead bit *after* the span. If `self` and /// `len` are both `0`, this will be `0`; otherwise, it will be in the /// domain `1..= M::BITS`. /// /// # Notes /// /// This defers to [`BitTail::span`], because `BitTail` is a strict superset /// of `BitIdx` (it is `{ BitIdx | M::BITS }`), and spans frequently begin /// from the tail of a slice in this crate. The `offset` function is *not* /// implemented on `BitTail`, and remains on `BitIdx` because offsets can /// only be computed from bit addresses that exist. It does not make sense /// to compute the offset from a `M::BITS` tail. /// /// [`BitTail::span`]: struct.BitTail.html#method.span #[inline] pub(crate) fn span(self, len: usize) -> (usize, BitTail<M>) { unsafe { BitTail::new_unchecked(*self) }.span(len) } } impl<M> Binary for BitIdx<M> where M: BitMemory { fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { write!(fmt, "0b{:0>1$b}", self.idx, M::INDX as usize) } } impl<M> Deref for BitIdx<M> where M: BitMemory { type Target = u8; fn deref(&self) -> &Self::Target { &self.idx } } #[cfg(feature = "serde")] impl<M> TryFrom<u8> for BitIdx<M> where M: BitMemory { type Error = &'static str; fn try_from(idx: u8) -> Result<Self, Self::Error> { Self::new(idx).ok_or( "Attempted to construct a `BitIdx` with an index out of range", ) } } /** Indicates a semantic index of a dead bit *beyond* a memory element. This type is equivalent to `BitIdx<M>`, except that it includes `M::BITS` in its domain. Instances of this type will only ever contain `0` when the span they describe is *empty*. Non-empty spans always cycle through the domain `1..= M::BITS`. This type cannot be used for indexing, and does not translate to `BitPos<M>`. This type has no behavior other than viewing its internal `u8` for arithmetic. # Type Parameters - `M`: The memory element type controlled by this tail. **/ // #[rustc_layout_scalar_valid_range_end(M::BITS + 1)] #[repr(transparent)] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitTail<M> where M: BitMemory { /// Semantic index *after* an element. Constrained to `0..= M::BITS`. end: u8, /// Marker for the tailed type. _ty: PhantomData<M>, } impl<M> BitTail<M> where M: BitMemory { /// The termination index. pub const END: Self = Self { end: M::BITS, _ty: PhantomData, }; /// Mark that `end` is a tail index for a type. /// /// # Parameters /// /// - `end` must be in the range `0..= M::BITS`. pub(crate) unsafe fn new_unchecked(end: u8) -> Self { debug_assert!( end <= M::BITS, "Bit tail {} cannot surpass type width {}", end, M::BITS, ); Self { end, _ty: PhantomData, } } pub(crate) fn span(self, len: usize) -> (usize, Self) { let val = *self; debug_assert!( val <= M::BITS, "Tail out of range: {} overflows type width {}", val, M::BITS, ); if len == 0 { return (0, self); } let head = val & M::MASK; let bits_in_head = (M::BITS - head) as usize; if len <= bits_in_head { return (1, (head + len as u8).tail()); } let bits_after_head = len - bits_in_head; let elts = bits_after_head >> M::INDX; let tail = bits_after_head as u8 & M::MASK; let is_zero = (tail == 0) as u8; let edges = 2 - is_zero as usize; (elts + edges, ((is_zero << M::INDX) | tail).tail()) /* The above expression is the branchless equivalent of this structure: if tail == 0 { (elts + 1, M::BITS.tail()) } else { (elts + 2, tail.tail()) } */ } } impl<M> Deref for BitTail<M> where M: BitMemory { type Target = u8; fn deref(&self) -> &Self::Target { &self.end } } /** Indicates a real electrical index within an element. This type is produced by [`BitOrder`] implementors, and marks a specific electrical bit within a memory element, rather than [`BitIdx`]’s semantic bit. # Type Parameters - `M`: A `BitMemory` element which provides bounds-checking information. The [`new`] constructor uses [`M::BITS`] to ensure that constructed `BitPos` instances are always valid to use within `M` elements. [`BitIdx`]: struct.BitIdx.html [`BitOrder`]:../order/trait.BitOrder.html [`M::BITS`]:../mem/trait.BitMemory.html#associatedconstant.BITS [`new`]: #method.new **/ // #[rustc_layout_scalar_valid_range_end(M::BITS)] #[repr(transparent)] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitPos<M> where M: BitMemory { /// Electrical position within an element. Constrained to `0.. M::BITS`. pos: u8, /// Marker for the positioned type. _ty: PhantomData<M>, } impl<M> BitPos<M> where M: BitMemory { /// Produce a new bit position marker at a valid position value. /// /// `BitOrder` implementations should prefer this method, but *may* use /// [`::new_unchecked`] if they can guarantee that the range invariant is /// upheld. /// /// # Parameters /// /// - `pos`: The bit position value to encode. It must be in the range `0.. /// M::BITS`. /// /// # Panics /// /// This function panics if `pos` is greater than or equal to `M::BITS`. /// /// [`::new_unchecked`]: #method.new_unchecked #[inline] pub fn new(pos: u8) -> Self { assert!( pos < M::BITS, "Bit position {} cannot exceed type width {}", pos, M::BITS, ); Self { pos, _ty: PhantomData, } } /// Produce a new bit position marker at any position value. /// /// # Safety /// /// The caller *must* ensure that `pos` is less than `M::BITS`. `BitOrder` /// implementations should prefer [`::new`], which panics on range failure. /// /// # Parameters /// /// - `pos`: The bit position value to encode. This must be in the range `0 /// .. M::BITS`. /// /// # Returns /// /// `pos` wrapped in the `BitPos` marker type. /// /// # Panics /// /// This function panics if `pos` is greater than or equal to `M::BITS`, but /// only in debug builds. It does not inspect `pos` in release builds. /// /// [`::new`]: #method.new #[inline] pub unsafe fn new_unchecked(pos: u8) -> Self { debug_assert!( pos < M::BITS, "Bit position {} cannot exceed type width {}", pos, M::BITS, ); Self { pos, _ty: PhantomData, } } /// Produces a one-hot selector mask from a position value. /// /// This is equivalent to `1 << *self`. /// /// # Parameters /// /// - `self` /// /// # Returns /// /// A one-hot selector mask with the bit at `*self` set. #[inline] pub fn select(self) -> BitSel<M> { unsafe { BitSel::new_unchecked(M::ONE << *self) } } } impl<M> Deref for BitPos<M> where M: BitMemory { type Target = u8; fn deref(&self) -> &Self::Target { &self.pos } } /** Wrapper type indicating a one-hot encoding of a bit mask for an element. This type is produced by [`BitOrder`] implementations to speed up access to the underlying memory. It ensures that masks have exactly one set bit, and can safely be used as a mask for read/write access to memory. # Type Parameters - `M`: The storage type being masked. [`BitOrder`]:../order/trait.BitOrder.html **/ #[repr(transparent)] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitSel<M> where M: BitMemory { /// Mask value. sel: M, } impl<M> BitSel<M> where M: BitMemory { /// Produce a new bit-mask wrapper around a one-hot mask value. /// /// `BitOrder` implementations should prefer this method, but *may* use /// [`::new_unchecked`] if they can guarantee that the one-hot invariant is /// upheld. /// /// # Parameters /// /// - `mask`: The mask value to encode. This **must** have exactly one bit /// set high, and all others set low. /// /// # Returns /// /// `mask` wrapped in the `BitMask` marker type. /// /// # Panics /// /// This function unconditionally panics if `mask` has zero or multiple bits /// set high. /// /// [`::new_unchecked`]: #method.new_unchecked #[inline] pub fn new(sel: M) -> Self { assert!( sel.count_ones() == 1, "Masks are required to have exactly one set bit: {:0>1$b}", sel, M::BITS as usize, ); Self { sel } } /// Produce a new bit-mask wrapper around any value. /// /// # Safety /// /// The caller *must* ensure that `mask` has exactly one bit set. `BitOrder` /// implementations should prefer [`::new`], which always panics on failure. /// /// # Parameters /// /// - `mask`: The mask value to encode. This must have exactly one bit set. /// Failure to uphold this requirement will introduce uncontrolled state /// contamination. /// /// # Returns /// /// `mask` wrapped in the `BitMask` marker type. /// /// # Panics /// /// This function panics if `mask` has zero or multiple bits set, only in /// debug builds. It does not inspect `mask` in release builds. /// /// [`::new`]: #method.new #[inline] pub unsafe fn new_unchecked(sel: M) -> Self { debug_assert!( sel.count_ones() == 1, "Masks are required to have exactly one set bit: {:0>1$b}", sel, M::BITS as usize, ); Self { sel } } } impl<M> Deref for BitSel<M> where M: BitMemory { type Target = M; fn deref(&self) ->
::Target { &self.sel } } /** A multi-bit selector mask. Unlike [`BitSel`], which enforces a strict one-hot mask encoding, this mask type permits any number of bits to be set or unset. This is used to combine batch operations in an element. It is only constructed by accumulating [`BitPos`] or [`BitSel`] values. As `BitSel` is only constructed from `BitPos`, and `BitPos` is only constructed from [`BitIdx`] and [`BitOrder`], this enforces a chain of responsibility to prove that a given multimask is safe. [`BitIdx`]: struct.BitIdx.html [`BitOrder`]:../order/trait.BitOrder.html [`BitPos`]: struct.BitPos.html [`BitSel`]: struct.BitSel.html **/ #[repr(transparent)] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitMask<M> where M: BitMemory { /// A mask of any number of bits to modify. mask: M, } impl<M> BitMask<M> where M: BitMemory { /// A full mask. pub const ALL: Self = Self { mask: M::ALL }; /// An empty mask. pub const ZERO: Self = Self { mask: M::ZERO }; /// Wraps a value as a bitmask. /// /// # Safety /// /// The caller must ensure that the mask value is correct in the caller’s /// provenance. /// /// # Parameters /// /// - `mask`: Any integer, to be reïnterpreted as a bitmask. /// /// # Returns /// /// The `mask` value as a bitmask. pub fn new(mask: M) -> Self { Self { mask } } } impl<M> Product<BitPos<M>> for BitMask<M> where M: BitMemory { fn product<I>(iter: I) -> Self where I: Iterator<Item = BitPos<M>> { iter.map(BitPos::select).product() } } impl<M> Product<BitSel<M>> for BitMask<M> where M: BitMemory { fn product<I>(iter: I) -> Self where I: Iterator<Item = BitSel<M>> { iter.fold(Self::ALL, BitAnd::bitand) } } /// Enable accumulation of a multi-bit mask from a sequence of position values. impl<M> Sum<BitPos<M>> for BitMask<M> where M: BitMemory { fn sum<I>(iter: I) -> Self where I: Iterator<Item = BitPos<M>> { iter.map(BitPos::select).sum() } } /// Enable accumulation of a multi-bit mask from a sequence of selector masks. impl<M> Sum<BitSel<M>> for BitMask<M> where M: BitMemory { fn sum<I>(iter: I) -> Self where I: Iterator<Item = BitSel<M>> { iter.fold(Self::ZERO, BitOr::bitor) } } impl<M> BitAnd<M> for BitMask<M> where M: BitMemory { type Output = Self; fn bitand(self, rhs: M) -> Self { Self { mask: self.mask & rhs, } } } impl<M> BitAnd<BitPos<M>> for BitMask<M> where M: BitMemory { type Output = Self; fn bitand(self, rhs: BitPos<M>) -> Self { self & rhs.select() } } impl<M> BitAnd<BitSel<M>> for BitMask<M> where M: BitMemory { type Output = Self; fn bitand(self, rhs: BitSel<M>) -> Self { Self { mask: self.mask & rhs.sel, } } } impl<M> BitOr<M> for BitMask<M> where M: BitMemory { type Output = Self; fn bitor(self, rhs: M) -> Self { Self { mask: self.mask | rhs, } } } /// Insert a position value into a multimask. impl<M> BitOr<BitPos<M>> for BitMask<M> where M: BitMemory { type Output = Self; fn bitor(self, rhs: BitPos<M>) -> Self { self | rhs.select() } } /// Insert a single selector into a multimask. impl<M> BitOr<BitSel<M>> for BitMask<M> where M: BitMemory { type Output = Self; fn bitor(self, rhs: BitSel<M>) -> Self { Self { mask: self.mask | rhs.sel, } } } impl<M> Deref for BitMask<M> where M: BitMemory { type Target = M; fn deref(&self) -> &Self::Target { &self.mask } } impl<M> Not for BitMask<M> where M: BitMemory { type Output = Self; fn not(self) -> Self { Self { mask:!self.mask } } } /** Internal convenience trait for wrapping numbers with appropriate markers. This trait must only be used on values that are known to be valid for their context. It provides an internal-only shorthand for wrapping integer literals and known-good values in marker types. It is only implemented on `u8`. **/ pub(crate) trait Indexable { /// Wraps a value as a `BitIdx<M>`. fn idx<M>(self) -> BitIdx<M> where M: BitMemory; /// Wraps a value as a `BitTail<M>`. fn tail<M>(self) -> BitTail<M> where M: BitMemory; /// Wraps a value as a `BitPos<M>`. fn pos<M>(self) -> BitPos<M> where M: BitMemory; } impl Indexable for u8 { fn idx<M>(self) -> BitIdx<M> where M: BitMemory { unsafe { BitIdx::<M>::new_unchecked(self) } } fn tail<M>(self) -> BitTail<M> where M: BitMemory { unsafe { BitTail::<M>::new_unchecked(self) } } fn pos<M>(self) -> BitPos<M> where M: BitMemory { unsafe { BitPos::<M>::new_unchecked(self) } } } #[cfg(test)] mod tests { use super::*; #[test] fn jump_far_up() { // isize::max_value() is 0x7f...ff, so the result bit will be one less // than the start bit. for n in 1.. 8 { let (elt, bit) = n.idx::<u8>().offset(isize::max_value()); assert_eq!(elt, (isize::max_value() >> u8::INDX) + 1); assert_eq!(*bit, n - 1); } let (elt, bit) = 0u8.idx::<u8>().offset(isize::max_value()); assert_eq!(elt, isize::max_value() >> u8::INDX); assert_eq!(*bit, 7); } #[test] fn jump_far_down() { // isize::min_value() is 0x80...00, so the result bit will be equal to // the start bit for n in 0.. 8 { let (elt, bit) = n.idx::<u8>().offset(isize::min_value()); assert_eq!(elt, isize::min_value() >> u8::INDX); assert_eq!(*bit, n); } } }
&Self
identifier_name
index.rs
/*! Indexing within memory elements. This module provides types which guarantee certain properties about selecting bits within a memory element. These types enable their use sites to explicitly declare the indexing behavior they require, and move safety checks from runtime to compile time. # Bit Indexing The [`BitIdx`] type represents the semantic index of a bit within a memory element. It does not perform bit positioning, and cannot be used to create a shift instruction or mask value. It is transformed into a value which can do these things – [`BitPos`] – through the [`BitOrder::at`] function. # Region End Marker `bitvec` uses “half-open” ranges, described by a starting point and a count of members that are live. This means that the “end” of a range is not the last member that is *in*cluded in the range, but rather the first member that is *ex*cluded from it. This requires the [`BitTail` end marker to include in its range the width of the element type (`8` for `u8`, etc), in order to mark that a region includes the very last bit in the element (index `7` for `u8`, etc`). The starting number for a dead region cannot be used to perform bit selection, but is used to provide range computation, so it is kept distinct from the indexing types. # Bit Positioning The [`BitPos`] type corresponds directly to a bit position in a memory element. Its value can be used to create shift instructions which select part of memory. It is only ever created by the `BitOrder::at` function. # Bit Selection The [`BitSel`] type is a one-hot mask encoding for a memory element. Unlike the previous types, which are range-limited integers, this type is a wrapper over a memory element and guarantees that it can be used as a mask value in `&` and `|` operations to modify exactly one bit. It is equivalent to `1 << BitPos.value()`. # Bit Masking Lastly, the [`BitMask`] type is a bitmask that permits any number of bits to be set or cleared. It is provided as a type rather than a bare value in order to clearly communicate that there is no restriction on what this mask may affect. [`BitIdx`]: struct.BitIdx.html [`BitMask`]: struct.BitMask.html [`BitOrder::at`]:../order/trait.BitOrder.html#method.at [`BitPos`]: struct.BitPos.html [`BitSel`]: struct.BitSel.html [`BitTail`]: struct.BitTail.html !*/ use crate::mem::BitMemory; use core::{ fmt::{ self, Binary, Formatter, }, iter::{ Product, Sum, }, marker::PhantomData, ops::{ BitAnd, BitOr, Deref, Not, }, }; #[cfg(feature = "serde")] use core::convert::TryFrom; /** Indicates a semantic index of a bit within a memory element. This is a counter in the domain `0.. M::BITS`, and marks a semantic position in the ordering sequence described by a [`BitOrder`] implementation. It is used for both position computation through `BitOrder` and range computation in [`BitPtr`]. # Type Parameters - `M`: The memory element type controlled by this index. [`BitOrder`]:../order/trait.BitOrder.html [`BitPtr`]:../pointer/struct.BitPtr.html **/ // If Rust had user-provided ranged integers, this would be communicable to the // compiler: // #[rustc_layout_scalar_valid_range_end(M::BITS)] #[repr(transparent)] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitIdx<M> where M: BitMemory { /// Semantic index within an element. Constrained to `0.. M::BITS`. idx: u8, /// Marker for the indexed type. _ty: PhantomData<M>, } impl<M> BitIdx<M> where M: BitMemory { /// The zero index. pub const ZERO: Self = Self { idx: 0, _ty: PhantomData, }; /// Wraps a counter value as a known-good index of the `M` element type. /// /// # Parameters /// /// - `idx`: A semantic index within a `M` memory element. /// /// # Returns /// /// If `idx` is within the range `0.. M::BITS`, then this returns the index /// value wrapped in the index type; if `idx` exceeds this range, then this /// returns `None`. pub fn new(idx: u8) -> Option<Self> { if idx >= M::BITS { return None; } Some(unsafe { Self::new_unchecked(idx) }) } /// Wraps a counter value as a known-good index of the `M` element type. /// /// # Parameters /// /// - `idx`: A semantic index within a `M` memory element. It must be in the /// range `0.. M::BITS`. /// /// # Safety /// /// If `idx` is outside the range, then the produced value will cause errors /// and memory unsafety when used. #[inline] pub unsafe fn new_unchecked(idx: u8) -> Self { debug_assert!( idx < M::BITS, "Bit index {} cannot exceed type width {}", idx, M::BITS, ); Self { idx, _ty: PhantomData, } } /// Finds the destination bit a certain distance away from a starting bit. /// /// This produces the number of elements to move from the starting point, /// and then the bit index of the destination bit in the destination /// element. /// /// # Parameters /// /// - `self`: A bit index in some memory element, used as the starting /// position for the offset calculation. /// - `by`: The number of bits by which to move. Negative values move /// downwards in memory: towards index zero, then counting from index /// `M::MASK` to index zero in the next element lower in memory, repeating /// until arrival. Positive values move upwards in memory: towards index /// `M::MASK`, then counting from index zero to index `M::MASK` in the /// next element higher in memory, repeating until arrival. /// /// # Returns /// /// - `.0`: The number of elements by which to offset the caller’s element /// cursor. This value can be passed directly into [`ptr::offset`]. /// - `.1`: The bit index of the destination bit in the element selected by /// applying the `.0` pointer offset. /// /// # Safety /// /// `by` must not be far enough to cause the returned element offset value /// to, when applied to the original memory address via [`ptr::offset`], /// produce a reference out of bounds of the original allocation. This /// method has no way of checking this requirement. /// /// [`ptr::offset`]: https://doc.rust-lang.org/stable/std/primitive.pointer.html#method.offset pub(crate) fn offset(self, by: isize) -> (isize, Self) { let val = *self; /* Signed-add `*self` and the jump distance. Overflowing is the unlikely branch. The result is a bit index, and an overflow marker. `far` is permitted to be negative; this means that it is lower in memory than the origin bit. The number line has its origin at the front edge of the origin element, so `-1` is the *last* bit of the prior memory element. */ let (far, ovf) = by.overflowing_add(val as isize); // If the `isize` addition does not overflow, then the sum can be used // directly. if!ovf { // If `far` is in the origin element, then the jump moves zero // elements and produces `far` as an absolute index directly. if (0.. M::BITS as isize).contains(&far) { (0, (far as u8).idx()) } /* Otherwise, downshift the bit distance to compute the number of elements moved in either direction, and mask to compute the absolute bit index in the destination element. */ else { (far >> M::INDX, (far as u8 & M::MASK).idx()) } } else { /* Overflowing `isize` addition happens to produce ordinary `usize` addition. In point of fact, `isize` addition and `usize` addition are the same machine instruction to perform the sum; it is merely the signed interpretation of the sum that differs. The sum can be recast back to `usize` without issue. */ let far = far as usize; // This is really only needed in order to prevent sign-extension of // the downshift; once shifted, the value can be safely re-signed. ((far >> M::INDX) as isize, (far as u8 & M::MASK).idx()) } } /// Computes the size of a span from `self` for `len` bits. /// /// Spans always extend upwards in memory. /// /// # Parameters /// /// - `self`: The starting bit position of the span. /// - `len`: The number of bits to include in the span. /// /// # Returns /// /// - `.0`: The number of elements of `M` included in the span. If `len` is /// `0`, this will be `0`; otherwise, it will be at least one. /// - `.1`: The index of the first dead bit *after* the span. If `self` and /// `len` are both `0`, this will be `0`; otherwise, it will be in the /// domain `1..= M::BITS`. /// /// # Notes /// /// This defers to [`BitTail::span`], because `BitTail` is a strict superset /// of `BitIdx` (it is `{ BitIdx | M::BITS }`), and spans frequently begin /// from the tail of a slice in this crate. The `offset` function is *not* /// implemented on `BitTail`, and remains on `BitIdx` because offsets can /// only be computed from bit addresses that exist. It does not make sense /// to compute the offset from a `M::BITS` tail. /// /// [`BitTail::span`]: struct.BitTail.html#method.span #[inline] pub(crate) fn span(self, len: usize) -> (usize, BitTail<M>) { unsafe { BitTail::new_unchecked(*self) }.span(len) } } impl<M> Binary for BitIdx<M> where M: BitMemory { fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { write!(fmt, "0b{:0>1$b}", self.idx, M::INDX as usize) } } impl<M> Deref for BitIdx<M> where M: BitMemory { type Target = u8; fn deref(&self) -> &Self::Target { &self.idx } } #[cfg(feature = "serde")] impl<M> TryFrom<u8> for BitIdx<M> where M: BitMemory { type Error = &'static str; fn try_from(idx: u8) -> Result<Self, Self::Error> { Self::new(idx).ok_or( "Attempted to construct a `BitIdx` with an index out of range", ) } } /** Indicates a semantic index of a dead bit *beyond* a memory element. This type is equivalent to `BitIdx<M>`, except that it includes `M::BITS` in its domain. Instances of this type will only ever contain `0` when the span they describe is *empty*. Non-empty spans always cycle through the domain `1..= M::BITS`. This type cannot be used for indexing, and does not translate to `BitPos<M>`. This type has no behavior other than viewing its internal `u8` for arithmetic. # Type Parameters - `M`: The memory element type controlled by this tail. **/ // #[rustc_layout_scalar_valid_range_end(M::BITS + 1)] #[repr(transparent)] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitTail<M> where M: BitMemory { /// Semantic index *after* an element. Constrained to `0..= M::BITS`. end: u8, /// Marker for the tailed type. _ty: PhantomData<M>, } impl<M> BitTail<M> where M: BitMemory
{ /// The termination index. pub const END: Self = Self { end: M::BITS, _ty: PhantomData, }; /// Mark that `end` is a tail index for a type. /// /// # Parameters /// /// - `end` must be in the range `0..= M::BITS`. pub(crate) unsafe fn new_unchecked(end: u8) -> Self { debug_assert!( end <= M::BITS, "Bit tail {} cannot surpass type width {}", end, M::BITS, ); Self { end, _ty: PhantomData, } } pub(crate) fn span(self, len: usize) -> (usize, Self) { let val = *self; debug_assert!( val <= M::BITS, "Tail out of range: {} overflows type width {}", val, M::BITS, ); if len == 0 { return (0, self); } let head = val & M::MASK; let bits_in_head = (M::BITS - head) as usize; if len <= bits_in_head { return (1, (head + len as u8).tail()); } let bits_after_head = len - bits_in_head; let elts = bits_after_head >> M::INDX; let tail = bits_after_head as u8 & M::MASK; let is_zero = (tail == 0) as u8; let edges = 2 - is_zero as usize; (elts + edges, ((is_zero << M::INDX) | tail).tail()) /* The above expression is the branchless equivalent of this structure: if tail == 0 { (elts + 1, M::BITS.tail()) } else { (elts + 2, tail.tail()) } */ } } impl<M> Deref for BitTail<M> where M: BitMemory { type Target = u8; fn deref(&self) -> &Self::Target { &self.end } } /** Indicates a real electrical index within an element. This type is produced by [`BitOrder`] implementors, and marks a specific electrical bit within a memory element, rather than [`BitIdx`]’s semantic bit. # Type Parameters - `M`: A `BitMemory` element which provides bounds-checking information. The [`new`] constructor uses [`M::BITS`] to ensure that constructed `BitPos` instances are always valid to use within `M` elements. [`BitIdx`]: struct.BitIdx.html [`BitOrder`]:../order/trait.BitOrder.html [`M::BITS`]:../mem/trait.BitMemory.html#associatedconstant.BITS [`new`]: #method.new **/ // #[rustc_layout_scalar_valid_range_end(M::BITS)] #[repr(transparent)] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitPos<M> where M: BitMemory { /// Electrical position within an element. Constrained to `0.. M::BITS`. pos: u8, /// Marker for the positioned type. _ty: PhantomData<M>, } impl<M> BitPos<M> where M: BitMemory { /// Produce a new bit position marker at a valid position value. /// /// `BitOrder` implementations should prefer this method, but *may* use /// [`::new_unchecked`] if they can guarantee that the range invariant is /// upheld. /// /// # Parameters /// /// - `pos`: The bit position value to encode. It must be in the range `0.. /// M::BITS`. /// /// # Panics /// /// This function panics if `pos` is greater than or equal to `M::BITS`. /// /// [`::new_unchecked`]: #method.new_unchecked #[inline] pub fn new(pos: u8) -> Self { assert!( pos < M::BITS, "Bit position {} cannot exceed type width {}", pos, M::BITS, ); Self { pos, _ty: PhantomData, } } /// Produce a new bit position marker at any position value. /// /// # Safety /// /// The caller *must* ensure that `pos` is less than `M::BITS`. `BitOrder` /// implementations should prefer [`::new`], which panics on range failure. /// /// # Parameters /// /// - `pos`: The bit position value to encode. This must be in the range `0 /// .. M::BITS`. /// /// # Returns /// /// `pos` wrapped in the `BitPos` marker type. /// /// # Panics /// /// This function panics if `pos` is greater than or equal to `M::BITS`, but /// only in debug builds. It does not inspect `pos` in release builds. /// /// [`::new`]: #method.new #[inline] pub unsafe fn new_unchecked(pos: u8) -> Self { debug_assert!( pos < M::BITS, "Bit position {} cannot exceed type width {}", pos, M::BITS, ); Self { pos, _ty: PhantomData, } } /// Produces a one-hot selector mask from a position value. /// /// This is equivalent to `1 << *self`. /// /// # Parameters /// /// - `self` /// /// # Returns /// /// A one-hot selector mask with the bit at `*self` set. #[inline] pub fn select(self) -> BitSel<M> { unsafe { BitSel::new_unchecked(M::ONE << *self) } } } impl<M> Deref for BitPos<M> where M: BitMemory { type Target = u8; fn deref(&self) -> &Self::Target { &self.pos } } /** Wrapper type indicating a one-hot encoding of a bit mask for an element. This type is produced by [`BitOrder`] implementations to speed up access to the underlying memory. It ensures that masks have exactly one set bit, and can safely be used as a mask for read/write access to memory. # Type Parameters - `M`: The storage type being masked. [`BitOrder`]:../order/trait.BitOrder.html **/ #[repr(transparent)] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitSel<M> where M: BitMemory { /// Mask value. sel: M, } impl<M> BitSel<M> where M: BitMemory { /// Produce a new bit-mask wrapper around a one-hot mask value. /// /// `BitOrder` implementations should prefer this method, but *may* use /// [`::new_unchecked`] if they can guarantee that the one-hot invariant is /// upheld. /// /// # Parameters /// /// - `mask`: The mask value to encode. This **must** have exactly one bit /// set high, and all others set low. /// /// # Returns /// /// `mask` wrapped in the `BitMask` marker type. /// /// # Panics /// /// This function unconditionally panics if `mask` has zero or multiple bits /// set high. /// /// [`::new_unchecked`]: #method.new_unchecked #[inline] pub fn new(sel: M) -> Self { assert!( sel.count_ones() == 1, "Masks are required to have exactly one set bit: {:0>1$b}", sel, M::BITS as usize, ); Self { sel } } /// Produce a new bit-mask wrapper around any value. /// /// # Safety /// /// The caller *must* ensure that `mask` has exactly one bit set. `BitOrder` /// implementations should prefer [`::new`], which always panics on failure. /// /// # Parameters /// /// - `mask`: The mask value to encode. This must have exactly one bit set. /// Failure to uphold this requirement will introduce uncontrolled state /// contamination. /// /// # Returns /// /// `mask` wrapped in the `BitMask` marker type. /// /// # Panics /// /// This function panics if `mask` has zero or multiple bits set, only in /// debug builds. It does not inspect `mask` in release builds. /// /// [`::new`]: #method.new #[inline] pub unsafe fn new_unchecked(sel: M) -> Self { debug_assert!( sel.count_ones() == 1, "Masks are required to have exactly one set bit: {:0>1$b}", sel, M::BITS as usize, ); Self { sel } } } impl<M> Deref for BitSel<M> where M: BitMemory { type Target = M; fn deref(&self) -> &Self::Target { &self.sel } } /** A multi-bit selector mask. Unlike [`BitSel`], which enforces a strict one-hot mask encoding, this mask type permits any number of bits to be set or unset. This is used to combine batch operations in an element. It is only constructed by accumulating [`BitPos`] or [`BitSel`] values. As `BitSel` is only constructed from `BitPos`, and `BitPos` is only constructed from [`BitIdx`] and [`BitOrder`], this enforces a chain of responsibility to prove that a given multimask is safe. [`BitIdx`]: struct.BitIdx.html [`BitOrder`]:../order/trait.BitOrder.html [`BitPos`]: struct.BitPos.html [`BitSel`]: struct.BitSel.html **/ #[repr(transparent)] #[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct BitMask<M> where M: BitMemory { /// A mask of any number of bits to modify. mask: M, } impl<M> BitMask<M> where M: BitMemory { /// A full mask. pub const ALL: Self = Self { mask: M::ALL }; /// An empty mask. pub const ZERO: Self = Self { mask: M::ZERO }; /// Wraps a value as a bitmask. /// /// # Safety /// /// The caller must ensure that the mask value is correct in the caller’s /// provenance. /// /// # Parameters /// /// - `mask`: Any integer, to be reïnterpreted as a bitmask. /// /// # Returns /// /// The `mask` value as a bitmask. pub fn new(mask: M) -> Self { Self { mask } } } impl<M> Product<BitPos<M>> for BitMask<M> where M: BitMemory { fn product<I>(iter: I) -> Self where I: Iterator<Item = BitPos<M>> { iter.map(BitPos::select).product() } } impl<M> Product<BitSel<M>> for BitMask<M> where M: BitMemory { fn product<I>(iter: I) -> Self where I: Iterator<Item = BitSel<M>> { iter.fold(Self::ALL, BitAnd::bitand) } } /// Enable accumulation of a multi-bit mask from a sequence of position values. impl<M> Sum<BitPos<M>> for BitMask<M> where M: BitMemory { fn sum<I>(iter: I) -> Self where I: Iterator<Item = BitPos<M>> { iter.map(BitPos::select).sum() } } /// Enable accumulation of a multi-bit mask from a sequence of selector masks. impl<M> Sum<BitSel<M>> for BitMask<M> where M: BitMemory { fn sum<I>(iter: I) -> Self where I: Iterator<Item = BitSel<M>> { iter.fold(Self::ZERO, BitOr::bitor) } } impl<M> BitAnd<M> for BitMask<M> where M: BitMemory { type Output = Self; fn bitand(self, rhs: M) -> Self { Self { mask: self.mask & rhs, } } } impl<M> BitAnd<BitPos<M>> for BitMask<M> where M: BitMemory { type Output = Self; fn bitand(self, rhs: BitPos<M>) -> Self { self & rhs.select() } } impl<M> BitAnd<BitSel<M>> for BitMask<M> where M: BitMemory { type Output = Self; fn bitand(self, rhs: BitSel<M>) -> Self { Self { mask: self.mask & rhs.sel, } } } impl<M> BitOr<M> for BitMask<M> where M: BitMemory { type Output = Self; fn bitor(self, rhs: M) -> Self { Self { mask: self.mask | rhs, } } } /// Insert a position value into a multimask. impl<M> BitOr<BitPos<M>> for BitMask<M> where M: BitMemory { type Output = Self; fn bitor(self, rhs: BitPos<M>) -> Self { self | rhs.select() } } /// Insert a single selector into a multimask. impl<M> BitOr<BitSel<M>> for BitMask<M> where M: BitMemory { type Output = Self; fn bitor(self, rhs: BitSel<M>) -> Self { Self { mask: self.mask | rhs.sel, } } } impl<M> Deref for BitMask<M> where M: BitMemory { type Target = M; fn deref(&self) -> &Self::Target { &self.mask } } impl<M> Not for BitMask<M> where M: BitMemory { type Output = Self; fn not(self) -> Self { Self { mask:!self.mask } } } /** Internal convenience trait for wrapping numbers with appropriate markers. This trait must only be used on values that are known to be valid for their context. It provides an internal-only shorthand for wrapping integer literals and known-good values in marker types. It is only implemented on `u8`. **/ pub(crate) trait Indexable { /// Wraps a value as a `BitIdx<M>`. fn idx<M>(self) -> BitIdx<M> where M: BitMemory; /// Wraps a value as a `BitTail<M>`. fn tail<M>(self) -> BitTail<M> where M: BitMemory; /// Wraps a value as a `BitPos<M>`. fn pos<M>(self) -> BitPos<M> where M: BitMemory; } impl Indexable for u8 { fn idx<M>(self) -> BitIdx<M> where M: BitMemory { unsafe { BitIdx::<M>::new_unchecked(self) } } fn tail<M>(self) -> BitTail<M> where M: BitMemory { unsafe { BitTail::<M>::new_unchecked(self) } } fn pos<M>(self) -> BitPos<M> where M: BitMemory { unsafe { BitPos::<M>::new_unchecked(self) } } } #[cfg(test)] mod tests { use super::*; #[test] fn jump_far_up() { // isize::max_value() is 0x7f...ff, so the result bit will be one less // than the start bit. for n in 1.. 8 { let (elt, bit) = n.idx::<u8>().offset(isize::max_value()); assert_eq!(elt, (isize::max_value() >> u8::INDX) + 1); assert_eq!(*bit, n - 1); } let (elt, bit) = 0u8.idx::<u8>().offset(isize::max_value()); assert_eq!(elt, isize::max_value() >> u8::INDX); assert_eq!(*bit, 7); } #[test] fn jump_far_down() { // isize::min_value() is 0x80...00, so the result bit will be equal to // the start bit for n in 0.. 8 { let (elt, bit) = n.idx::<u8>().offset(isize::min_value()); assert_eq!(elt, isize::min_value() >> u8::INDX); assert_eq!(*bit, n); } } }
random_line_split
transaction.rs
use std::{ collections::HashMap, future::Future, pin::Pin, sync::Arc, task::{Context, Poll}, }; use futures::{ stream::{FuturesUnordered, StreamExt}, FutureExt, }; use tower::{Service, ServiceExt}; use tracing::Instrument; use zebra_chain::{ block, parameters::{Network, NetworkUpgrade}, transaction::{self, HashType, Transaction}, transparent, }; use zebra_script::CachedFfiTransaction; use zebra_state as zs; use crate::{error::TransactionError, primitives, script, BoxError}; mod check; /// Asynchronous transaction verification. #[derive(Debug, Clone)] pub struct Verifier<ZS> { network: Network, script_verifier: script::Verifier<ZS>, // spend_verifier: groth16::Verifier, // output_verifier: groth16::Verifier, // joinsplit_verifier: groth16::Verifier, } impl<ZS> Verifier<ZS> where ZS: Service<zs::Request, Response = zs::Response, Error = BoxError> + Send + Clone +'static, ZS::Future: Send +'static, { // XXX: how should this struct be constructed? pub fn new(network: Network, script_verifier: script::Verifier<ZS>) -> Self { // let (spend_verifier, output_verifier, joinsplit_verifier) = todo!(); Self { network, script_verifier, // spend_verifier, // output_verifier, // joinsplit_verifier, } } } /// Specifies whether a transaction should be verified as part of a block or as /// part of the mempool. /// /// Transaction verification has slightly different consensus rules, depending on /// whether the transaction is to be included in a block on in the mempool. #[allow(dead_code)] pub enum Request { /// Verify the supplied transaction as part of a block. Block { /// The transaction itself. transaction: Arc<Transaction>, /// Additional UTXOs which are known at the time of verification. known_utxos: Arc<HashMap<transparent::OutPoint, zs::Utxo>>, /// The height of the block containing this transaction, used to /// determine the applicable network upgrade. height: block::Height, }, /// Verify the supplied transaction as part of the mempool. Mempool { /// The transaction itself. transaction: Arc<Transaction>, /// Additional UTXOs which are known at the time of verification. known_utxos: Arc<HashMap<transparent::OutPoint, zs::Utxo>>, /// Bug: this field should be the next block height, because some /// consensus rules depend on the exact height. See #1683. upgrade: NetworkUpgrade, }, } impl<ZS> Service<Request> for Verifier<ZS> where ZS: Service<zs::Request, Response = zs::Response, Error = BoxError> + Send + Clone +'static, ZS::Future: Send +'static, { type Response = transaction::Hash; type Error = TransactionError; type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send +'static>>; fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { Poll::Ready(Ok(())) } // TODO: break up each chunk into its own method fn call(&mut self, req: Request) -> Self::Future
transaction, known_utxos, upgrade, } => (transaction, known_utxos, upgrade), }; let mut spend_verifier = primitives::groth16::SPEND_VERIFIER.clone(); let mut output_verifier = primitives::groth16::OUTPUT_VERIFIER.clone(); let mut ed25519_verifier = primitives::ed25519::VERIFIER.clone(); let mut redjubjub_verifier = primitives::redjubjub::VERIFIER.clone(); let mut script_verifier = self.script_verifier.clone(); let span = tracing::debug_span!("tx", hash = %tx.hash()); async move { tracing::trace!(?tx); match &*tx { Transaction::V1 {.. } | Transaction::V2 {.. } | Transaction::V3 {.. } => { tracing::debug!(?tx, "got transaction with wrong version"); Err(TransactionError::WrongVersion) } Transaction::V4 { inputs, // outputs, // lock_time, // expiry_height, joinsplit_data, sapling_shielded_data, .. } => { // A set of asynchronous checks which must all succeed. // We finish by waiting on these below. let mut async_checks = FuturesUnordered::new(); // Do basic checks first check::has_inputs_and_outputs(&tx)?; // Handle transparent inputs and outputs. if tx.is_coinbase() { check::coinbase_tx_no_joinsplit_or_spend(&tx)?; } else { // feed all of the inputs to the script and shielded verifiers // the script_verifier also checks transparent sighashes, using its own implementation let cached_ffi_transaction = Arc::new(CachedFfiTransaction::new(tx.clone())); for input_index in 0..inputs.len() { let rsp = script_verifier.ready_and().await?.call(script::Request { upgrade, known_utxos: known_utxos.clone(), cached_ffi_transaction: cached_ffi_transaction.clone(), input_index, }); async_checks.push(rsp); } } let shielded_sighash = tx.sighash( upgrade, HashType::ALL, None, ); if let Some(joinsplit_data) = joinsplit_data { // XXX create a method on JoinSplitData // that prepares groth16::Items with the correct proofs // and proof inputs, handling interstitial treestates // correctly. // Then, pass those items to self.joinsplit to verify them. // Consensus rule: The joinSplitSig MUST represent a // valid signature, under joinSplitPubKey, of the // sighash. // // Queue the validation of the JoinSplit signature while // adding the resulting future to our collection of // async checks that (at a minimum) must pass for the // transaction to verify. // // https://zips.z.cash/protocol/protocol.pdf#sproutnonmalleability // https://zips.z.cash/protocol/protocol.pdf#txnencodingandconsensus let rsp = ed25519_verifier .ready_and() .await? .call((joinsplit_data.pub_key, joinsplit_data.sig, &shielded_sighash).into()); async_checks.push(rsp.boxed()); } if let Some(shielded_data) = sapling_shielded_data { check::shielded_balances_match(&shielded_data)?; for spend in shielded_data.spends_per_anchor() { // Consensus rule: cv and rk MUST NOT be of small // order, i.e. [h_J]cv MUST NOT be 𝒪_J and [h_J]rk // MUST NOT be 𝒪_J. // // https://zips.z.cash/protocol/protocol.pdf#spenddesc check::spend_cv_rk_not_small_order(&spend)?; // Consensus rule: The proof π_ZKSpend MUST be valid // given a primary input formed from the other // fields except spendAuthSig. // // Queue the verification of the Groth16 spend proof // for each Spend description while adding the // resulting future to our collection of async // checks that (at a minimum) must pass for the // transaction to verify. let spend_rsp = spend_verifier .ready_and() .await? .call(primitives::groth16::ItemWrapper::from(&spend).into()); async_checks.push(spend_rsp.boxed()); // Consensus rule: The spend authorization signature // MUST be a valid SpendAuthSig signature over // SigHash using rk as the validating key. // // Queue the validation of the RedJubjub spend // authorization signature for each Spend // description while adding the resulting future to // our collection of async checks that (at a // minimum) must pass for the transaction to verify. let rsp = redjubjub_verifier .ready_and() .await? .call((spend.rk, spend.spend_auth_sig, &shielded_sighash).into()); async_checks.push(rsp.boxed()); } for output in shielded_data.outputs() { // Consensus rule: cv and wpk MUST NOT be of small // order, i.e. [h_J]cv MUST NOT be 𝒪_J and [h_J]wpk // MUST NOT be 𝒪_J. // // https://zips.z.cash/protocol/protocol.pdf#outputdesc check::output_cv_epk_not_small_order(output)?; // Consensus rule: The proof π_ZKOutput MUST be // valid given a primary input formed from the other // fields except C^enc and C^out. // // Queue the verification of the Groth16 output // proof for each Output description while adding // the resulting future to our collection of async // checks that (at a minimum) must pass for the // transaction to verify. let output_rsp = output_verifier .ready_and() .await? .call(primitives::groth16::ItemWrapper::from(output).into()); async_checks.push(output_rsp.boxed()); } let bvk = shielded_data.binding_verification_key(); // TODO: enable async verification and remove this block - #1939 { let item: zebra_chain::primitives::redjubjub::batch::Item = (bvk, shielded_data.binding_sig, &shielded_sighash).into(); item.verify_single().unwrap_or_else(|binding_sig_error| { let binding_sig_error = binding_sig_error.to_string(); tracing::warn!(%binding_sig_error, "ignoring"); metrics::counter!("zebra.error.sapling.binding", 1, "kind" => binding_sig_error); }); // Ignore errors until binding signatures are fixed //.map_err(|e| BoxError::from(Box::new(e)))?; } let _rsp = redjubjub_verifier .ready_and() .await? .call((bvk, shielded_data.binding_sig, &shielded_sighash).into()) .boxed(); // TODO: stop ignoring binding signature errors - #1939 // async_checks.push(rsp); } // Finally, wait for all asynchronous checks to complete // successfully, or fail verification if they error. while let Some(check) = async_checks.next().await { tracing::trace!(?check, remaining = async_checks.len()); check?; } Ok(tx.hash()) } Transaction::V5 {.. } => { unimplemented!("v5 transaction validation as specified in ZIP-216, ZIP-224, ZIP-225, and ZIP-244") } } } .instrument(span) .boxed() } }
{ let is_mempool = match req { Request::Block { .. } => false, Request::Mempool { .. } => true, }; if is_mempool { // XXX determine exactly which rules apply to mempool transactions unimplemented!(); } let (tx, known_utxos, upgrade) = match req { Request::Block { transaction, known_utxos, height, } => { let upgrade = NetworkUpgrade::current(self.network, height); (transaction, known_utxos, upgrade) } Request::Mempool {
identifier_body
transaction.rs
use std::{ collections::HashMap, future::Future, pin::Pin, sync::Arc, task::{Context, Poll}, }; use futures::{ stream::{FuturesUnordered, StreamExt}, FutureExt, }; use tower::{Service, ServiceExt}; use tracing::Instrument; use zebra_chain::{ block, parameters::{Network, NetworkUpgrade}, transaction::{self, HashType, Transaction}, transparent, }; use zebra_script::CachedFfiTransaction; use zebra_state as zs; use crate::{error::TransactionError, primitives, script, BoxError}; mod check; /// Asynchronous transaction verification. #[derive(Debug, Clone)] pub struct Verifier<ZS> { network: Network, script_verifier: script::Verifier<ZS>, // spend_verifier: groth16::Verifier, // output_verifier: groth16::Verifier, // joinsplit_verifier: groth16::Verifier, } impl<ZS> Verifier<ZS> where ZS: Service<zs::Request, Response = zs::Response, Error = BoxError> + Send + Clone +'static, ZS::Future: Send +'static, { // XXX: how should this struct be constructed? pub fn new(network: Network, script_verifier: script::Verifier<ZS>) -> Self { // let (spend_verifier, output_verifier, joinsplit_verifier) = todo!(); Self { network, script_verifier, // spend_verifier, // output_verifier, // joinsplit_verifier, } } } /// Specifies whether a transaction should be verified as part of a block or as /// part of the mempool. /// /// Transaction verification has slightly different consensus rules, depending on /// whether the transaction is to be included in a block on in the mempool. #[allow(dead_code)] pub enum Request { /// Verify the supplied transaction as part of a block. Block { /// The transaction itself. transaction: Arc<Transaction>, /// Additional UTXOs which are known at the time of verification. known_utxos: Arc<HashMap<transparent::OutPoint, zs::Utxo>>, /// The height of the block containing this transaction, used to /// determine the applicable network upgrade. height: block::Height, }, /// Verify the supplied transaction as part of the mempool. Mempool { /// The transaction itself. transaction: Arc<Transaction>, /// Additional UTXOs which are known at the time of verification. known_utxos: Arc<HashMap<transparent::OutPoint, zs::Utxo>>, /// Bug: this field should be the next block height, because some /// consensus rules depend on the exact height. See #1683. upgrade: NetworkUpgrade, }, } impl<ZS> Service<Request> for Verifier<ZS> where ZS: Service<zs::Request, Response = zs::Response, Error = BoxError> + Send + Clone +'static, ZS::Future: Send +'static, { type Response = transaction::Hash; type Error = TransactionError; type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send +'static>>; fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { Poll::Ready(Ok(())) } // TODO: break up each chunk into its own method fn call(&mut self, req: Request) -> Self::Future { let is_mempool = match req { Request::Block {.. } => false, Request::Mempool {.. } => true, }; if is_mempool { // XXX determine exactly which rules apply to mempool transactions unimplemented!(); } let (tx, known_utxos, upgrade) = match req { Request::Block { transaction, known_utxos, height, } => { let upgrade = NetworkUpgrade::current(self.network, height); (transaction, known_utxos, upgrade) } Request::Mempool { transaction, known_utxos, upgrade, } => (transaction, known_utxos, upgrade), }; let mut spend_verifier = primitives::groth16::SPEND_VERIFIER.clone(); let mut output_verifier = primitives::groth16::OUTPUT_VERIFIER.clone(); let mut ed25519_verifier = primitives::ed25519::VERIFIER.clone(); let mut redjubjub_verifier = primitives::redjubjub::VERIFIER.clone(); let mut script_verifier = self.script_verifier.clone(); let span = tracing::debug_span!("tx", hash = %tx.hash()); async move { tracing::trace!(?tx); match &*tx { Transaction::V1 {.. } | Transaction::V2 {.. } | Transaction::V3 {.. } => { tracing::debug!(?tx, "got transaction with wrong version"); Err(TransactionError::WrongVersion) } Transaction::V4 { inputs, // outputs, // lock_time, // expiry_height, joinsplit_data, sapling_shielded_data, .. } => { // A set of asynchronous checks which must all succeed. // We finish by waiting on these below. let mut async_checks = FuturesUnordered::new(); // Do basic checks first check::has_inputs_and_outputs(&tx)?; // Handle transparent inputs and outputs. if tx.is_coinbase() { check::coinbase_tx_no_joinsplit_or_spend(&tx)?; } else { // feed all of the inputs to the script and shielded verifiers // the script_verifier also checks transparent sighashes, using its own implementation let cached_ffi_transaction = Arc::new(CachedFfiTransaction::new(tx.clone())); for input_index in 0..inputs.len() { let rsp = script_verifier.ready_and().await?.call(script::Request { upgrade, known_utxos: known_utxos.clone(), cached_ffi_transaction: cached_ffi_transaction.clone(), input_index, }); async_checks.push(rsp); } } let shielded_sighash = tx.sighash( upgrade, HashType::ALL, None, ); if let Some(joinsplit_data) = joinsplit_data { // XXX create a method on JoinSplitData // that prepares groth16::Items with the correct proofs // and proof inputs, handling interstitial treestates // correctly. // Then, pass those items to self.joinsplit to verify them. // Consensus rule: The joinSplitSig MUST represent a // valid signature, under joinSplitPubKey, of the // sighash. // // Queue the validation of the JoinSplit signature while
// https://zips.z.cash/protocol/protocol.pdf#sproutnonmalleability // https://zips.z.cash/protocol/protocol.pdf#txnencodingandconsensus let rsp = ed25519_verifier .ready_and() .await? .call((joinsplit_data.pub_key, joinsplit_data.sig, &shielded_sighash).into()); async_checks.push(rsp.boxed()); } if let Some(shielded_data) = sapling_shielded_data { check::shielded_balances_match(&shielded_data)?; for spend in shielded_data.spends_per_anchor() { // Consensus rule: cv and rk MUST NOT be of small // order, i.e. [h_J]cv MUST NOT be 𝒪_J and [h_J]rk // MUST NOT be 𝒪_J. // // https://zips.z.cash/protocol/protocol.pdf#spenddesc check::spend_cv_rk_not_small_order(&spend)?; // Consensus rule: The proof π_ZKSpend MUST be valid // given a primary input formed from the other // fields except spendAuthSig. // // Queue the verification of the Groth16 spend proof // for each Spend description while adding the // resulting future to our collection of async // checks that (at a minimum) must pass for the // transaction to verify. let spend_rsp = spend_verifier .ready_and() .await? .call(primitives::groth16::ItemWrapper::from(&spend).into()); async_checks.push(spend_rsp.boxed()); // Consensus rule: The spend authorization signature // MUST be a valid SpendAuthSig signature over // SigHash using rk as the validating key. // // Queue the validation of the RedJubjub spend // authorization signature for each Spend // description while adding the resulting future to // our collection of async checks that (at a // minimum) must pass for the transaction to verify. let rsp = redjubjub_verifier .ready_and() .await? .call((spend.rk, spend.spend_auth_sig, &shielded_sighash).into()); async_checks.push(rsp.boxed()); } for output in shielded_data.outputs() { // Consensus rule: cv and wpk MUST NOT be of small // order, i.e. [h_J]cv MUST NOT be 𝒪_J and [h_J]wpk // MUST NOT be 𝒪_J. // // https://zips.z.cash/protocol/protocol.pdf#outputdesc check::output_cv_epk_not_small_order(output)?; // Consensus rule: The proof π_ZKOutput MUST be // valid given a primary input formed from the other // fields except C^enc and C^out. // // Queue the verification of the Groth16 output // proof for each Output description while adding // the resulting future to our collection of async // checks that (at a minimum) must pass for the // transaction to verify. let output_rsp = output_verifier .ready_and() .await? .call(primitives::groth16::ItemWrapper::from(output).into()); async_checks.push(output_rsp.boxed()); } let bvk = shielded_data.binding_verification_key(); // TODO: enable async verification and remove this block - #1939 { let item: zebra_chain::primitives::redjubjub::batch::Item = (bvk, shielded_data.binding_sig, &shielded_sighash).into(); item.verify_single().unwrap_or_else(|binding_sig_error| { let binding_sig_error = binding_sig_error.to_string(); tracing::warn!(%binding_sig_error, "ignoring"); metrics::counter!("zebra.error.sapling.binding", 1, "kind" => binding_sig_error); }); // Ignore errors until binding signatures are fixed //.map_err(|e| BoxError::from(Box::new(e)))?; } let _rsp = redjubjub_verifier .ready_and() .await? .call((bvk, shielded_data.binding_sig, &shielded_sighash).into()) .boxed(); // TODO: stop ignoring binding signature errors - #1939 // async_checks.push(rsp); } // Finally, wait for all asynchronous checks to complete // successfully, or fail verification if they error. while let Some(check) = async_checks.next().await { tracing::trace!(?check, remaining = async_checks.len()); check?; } Ok(tx.hash()) } Transaction::V5 {.. } => { unimplemented!("v5 transaction validation as specified in ZIP-216, ZIP-224, ZIP-225, and ZIP-244") } } } .instrument(span) .boxed() } }
// adding the resulting future to our collection of // async checks that (at a minimum) must pass for the // transaction to verify. //
random_line_split
transaction.rs
use std::{ collections::HashMap, future::Future, pin::Pin, sync::Arc, task::{Context, Poll}, }; use futures::{ stream::{FuturesUnordered, StreamExt}, FutureExt, }; use tower::{Service, ServiceExt}; use tracing::Instrument; use zebra_chain::{ block, parameters::{Network, NetworkUpgrade}, transaction::{self, HashType, Transaction}, transparent, }; use zebra_script::CachedFfiTransaction; use zebra_state as zs; use crate::{error::TransactionError, primitives, script, BoxError}; mod check; /// Asynchronous transaction verification. #[derive(Debug, Clone)] pub struct Verifier<ZS> { network: Network, script_verifier: script::Verifier<ZS>, // spend_verifier: groth16::Verifier, // output_verifier: groth16::Verifier, // joinsplit_verifier: groth16::Verifier, } impl<ZS> Verifier<ZS> where ZS: Service<zs::Request, Response = zs::Response, Error = BoxError> + Send + Clone +'static, ZS::Future: Send +'static, { // XXX: how should this struct be constructed? pub fn new(network: Network, script_verifier: script::Verifier<ZS>) -> Self { // let (spend_verifier, output_verifier, joinsplit_verifier) = todo!(); Self { network, script_verifier, // spend_verifier, // output_verifier, // joinsplit_verifier, } } } /// Specifies whether a transaction should be verified as part of a block or as /// part of the mempool. /// /// Transaction verification has slightly different consensus rules, depending on /// whether the transaction is to be included in a block on in the mempool. #[allow(dead_code)] pub enum
{ /// Verify the supplied transaction as part of a block. Block { /// The transaction itself. transaction: Arc<Transaction>, /// Additional UTXOs which are known at the time of verification. known_utxos: Arc<HashMap<transparent::OutPoint, zs::Utxo>>, /// The height of the block containing this transaction, used to /// determine the applicable network upgrade. height: block::Height, }, /// Verify the supplied transaction as part of the mempool. Mempool { /// The transaction itself. transaction: Arc<Transaction>, /// Additional UTXOs which are known at the time of verification. known_utxos: Arc<HashMap<transparent::OutPoint, zs::Utxo>>, /// Bug: this field should be the next block height, because some /// consensus rules depend on the exact height. See #1683. upgrade: NetworkUpgrade, }, } impl<ZS> Service<Request> for Verifier<ZS> where ZS: Service<zs::Request, Response = zs::Response, Error = BoxError> + Send + Clone +'static, ZS::Future: Send +'static, { type Response = transaction::Hash; type Error = TransactionError; type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send +'static>>; fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { Poll::Ready(Ok(())) } // TODO: break up each chunk into its own method fn call(&mut self, req: Request) -> Self::Future { let is_mempool = match req { Request::Block {.. } => false, Request::Mempool {.. } => true, }; if is_mempool { // XXX determine exactly which rules apply to mempool transactions unimplemented!(); } let (tx, known_utxos, upgrade) = match req { Request::Block { transaction, known_utxos, height, } => { let upgrade = NetworkUpgrade::current(self.network, height); (transaction, known_utxos, upgrade) } Request::Mempool { transaction, known_utxos, upgrade, } => (transaction, known_utxos, upgrade), }; let mut spend_verifier = primitives::groth16::SPEND_VERIFIER.clone(); let mut output_verifier = primitives::groth16::OUTPUT_VERIFIER.clone(); let mut ed25519_verifier = primitives::ed25519::VERIFIER.clone(); let mut redjubjub_verifier = primitives::redjubjub::VERIFIER.clone(); let mut script_verifier = self.script_verifier.clone(); let span = tracing::debug_span!("tx", hash = %tx.hash()); async move { tracing::trace!(?tx); match &*tx { Transaction::V1 {.. } | Transaction::V2 {.. } | Transaction::V3 {.. } => { tracing::debug!(?tx, "got transaction with wrong version"); Err(TransactionError::WrongVersion) } Transaction::V4 { inputs, // outputs, // lock_time, // expiry_height, joinsplit_data, sapling_shielded_data, .. } => { // A set of asynchronous checks which must all succeed. // We finish by waiting on these below. let mut async_checks = FuturesUnordered::new(); // Do basic checks first check::has_inputs_and_outputs(&tx)?; // Handle transparent inputs and outputs. if tx.is_coinbase() { check::coinbase_tx_no_joinsplit_or_spend(&tx)?; } else { // feed all of the inputs to the script and shielded verifiers // the script_verifier also checks transparent sighashes, using its own implementation let cached_ffi_transaction = Arc::new(CachedFfiTransaction::new(tx.clone())); for input_index in 0..inputs.len() { let rsp = script_verifier.ready_and().await?.call(script::Request { upgrade, known_utxos: known_utxos.clone(), cached_ffi_transaction: cached_ffi_transaction.clone(), input_index, }); async_checks.push(rsp); } } let shielded_sighash = tx.sighash( upgrade, HashType::ALL, None, ); if let Some(joinsplit_data) = joinsplit_data { // XXX create a method on JoinSplitData // that prepares groth16::Items with the correct proofs // and proof inputs, handling interstitial treestates // correctly. // Then, pass those items to self.joinsplit to verify them. // Consensus rule: The joinSplitSig MUST represent a // valid signature, under joinSplitPubKey, of the // sighash. // // Queue the validation of the JoinSplit signature while // adding the resulting future to our collection of // async checks that (at a minimum) must pass for the // transaction to verify. // // https://zips.z.cash/protocol/protocol.pdf#sproutnonmalleability // https://zips.z.cash/protocol/protocol.pdf#txnencodingandconsensus let rsp = ed25519_verifier .ready_and() .await? .call((joinsplit_data.pub_key, joinsplit_data.sig, &shielded_sighash).into()); async_checks.push(rsp.boxed()); } if let Some(shielded_data) = sapling_shielded_data { check::shielded_balances_match(&shielded_data)?; for spend in shielded_data.spends_per_anchor() { // Consensus rule: cv and rk MUST NOT be of small // order, i.e. [h_J]cv MUST NOT be 𝒪_J and [h_J]rk // MUST NOT be 𝒪_J. // // https://zips.z.cash/protocol/protocol.pdf#spenddesc check::spend_cv_rk_not_small_order(&spend)?; // Consensus rule: The proof π_ZKSpend MUST be valid // given a primary input formed from the other // fields except spendAuthSig. // // Queue the verification of the Groth16 spend proof // for each Spend description while adding the // resulting future to our collection of async // checks that (at a minimum) must pass for the // transaction to verify. let spend_rsp = spend_verifier .ready_and() .await? .call(primitives::groth16::ItemWrapper::from(&spend).into()); async_checks.push(spend_rsp.boxed()); // Consensus rule: The spend authorization signature // MUST be a valid SpendAuthSig signature over // SigHash using rk as the validating key. // // Queue the validation of the RedJubjub spend // authorization signature for each Spend // description while adding the resulting future to // our collection of async checks that (at a // minimum) must pass for the transaction to verify. let rsp = redjubjub_verifier .ready_and() .await? .call((spend.rk, spend.spend_auth_sig, &shielded_sighash).into()); async_checks.push(rsp.boxed()); } for output in shielded_data.outputs() { // Consensus rule: cv and wpk MUST NOT be of small // order, i.e. [h_J]cv MUST NOT be 𝒪_J and [h_J]wpk // MUST NOT be 𝒪_J. // // https://zips.z.cash/protocol/protocol.pdf#outputdesc check::output_cv_epk_not_small_order(output)?; // Consensus rule: The proof π_ZKOutput MUST be // valid given a primary input formed from the other // fields except C^enc and C^out. // // Queue the verification of the Groth16 output // proof for each Output description while adding // the resulting future to our collection of async // checks that (at a minimum) must pass for the // transaction to verify. let output_rsp = output_verifier .ready_and() .await? .call(primitives::groth16::ItemWrapper::from(output).into()); async_checks.push(output_rsp.boxed()); } let bvk = shielded_data.binding_verification_key(); // TODO: enable async verification and remove this block - #1939 { let item: zebra_chain::primitives::redjubjub::batch::Item = (bvk, shielded_data.binding_sig, &shielded_sighash).into(); item.verify_single().unwrap_or_else(|binding_sig_error| { let binding_sig_error = binding_sig_error.to_string(); tracing::warn!(%binding_sig_error, "ignoring"); metrics::counter!("zebra.error.sapling.binding", 1, "kind" => binding_sig_error); }); // Ignore errors until binding signatures are fixed //.map_err(|e| BoxError::from(Box::new(e)))?; } let _rsp = redjubjub_verifier .ready_and() .await? .call((bvk, shielded_data.binding_sig, &shielded_sighash).into()) .boxed(); // TODO: stop ignoring binding signature errors - #1939 // async_checks.push(rsp); } // Finally, wait for all asynchronous checks to complete // successfully, or fail verification if they error. while let Some(check) = async_checks.next().await { tracing::trace!(?check, remaining = async_checks.len()); check?; } Ok(tx.hash()) } Transaction::V5 {.. } => { unimplemented!("v5 transaction validation as specified in ZIP-216, ZIP-224, ZIP-225, and ZIP-244") } } } .instrument(span) .boxed() } }
Request
identifier_name
main.rs
; use sdl2::event::Event; use sdl2::event::WindowEvent; use sdl2::keyboard::Keycode; use sdl2::gfx::primitives::DrawRenderer; use sdl2::render::Renderer; use std::f32; use std::f32::consts::PI; use std::collections::HashMap; use keyboard::Keyboard; use keyboard::HexAddr; use keyboard::HexKey; use keyboard::HarmonicKeyboard; use keyboard::JammerKeyboard; use midir::{MidiOutput, MidiOutputConnection}; use std::error::Error; use sdl2::rwops::RWops; const INCREMENT_ANGLE:f32 = 2.0*PI/6.0; // 60 degrees in radians const MOUSE_OID:i64 = -1; const NOTE_ON_MSG: u8 = 0x90; const NOTE_OFF_MSG: u8 = 0x80; /* TODO * Octave Shifing * Keyboard Rotation * Multi Key Highlighting * Readme for github * Factor out midi * Better error handling/remove.unwraps * Add Guitar Layout * Consider changing draw_keyboard so that it draws only the key changes and not the whole board * every time. * Correct velocity controls? */ fn get_hexagon(x:i16, y:i16, radius:i16) -> ([i16;6], [i16;6]) { // TODO this function needs to be broken up into a calculate and translate section, we don't // need to redo the sin math every time. let r:f32 = radius as f32; let mut angle:f32 = INCREMENT_ANGLE/2.0; let mut xs: [i16;6] = [0; 6]; let mut ys: [i16;6] = [0; 6]; for i in 0..6 { let xo = angle.sin()*r; let yo = angle.cos()*r; angle += INCREMENT_ANGLE; xs[i] = x + xo.round() as i16; ys[i] = y + yo.round() as i16; } return (xs, ys) } fn translate_hexagon(xlist:[i16;6], ylist:[i16;6], x:i16, y:i16) -> ([i16;6], [i16;6]) { let mut xs: [i16;6] = [0; 6]; let mut ys: [i16;6] = [0; 6]; for i in 0..6 { xs[i] = xlist[i] + x; ys[i] = ylist[i] + y; } return (xs, ys) } /// Given the x and y locations of a click, return the address of the hexagon /// The logic I'm doing in here is a little crazy. /// By rotating the cordinate grid three times I can figure out the "index" in terms of number of /// hexagons from a the starting point /// This effectivly tesalates the hexagons into 6 triangles, this algorithm gets the location of /// the triangle clicked, then figures out which hexagon that triangle belongs in. fn get_hex_address(xo:f32, yo:f32, hexagon:&HexagonDescription) -> HexAddr { let hex_height = hexagon.half_height as f32; let plane1 = yo / hex_height; let incangle = INCREMENT_ANGLE * -2.0; // -120 degrees //let x = xo * incangle.cos() + yo * incangle.sin(); let y = xo * incangle.sin() + yo * incangle.cos(); let plane2 = -y / hex_height; // TODO why did I need to multiply this by two?? let incangle = INCREMENT_ANGLE * -4.0; // -120 degrees //let x = xo * incangle.cos() + yo * incangle.sin(); let y = xo * incangle.sin() + yo * incangle.cos(); let plane3 = y / hex_height ; let cord1 = plane1.floor() as i16; let mut cord2 = plane2.floor() as i16; let mut cord3 = plane3.floor() as i16; // left justify the coordinate system for my own sanity while doing this modulo math cord2 -= cord1/2; cord3 += cord1/2 + 1; let mut y = cord1; let mut x = cord2/3; //println!("a: {} b:{} c:{}", cord1, cord2, cord3); if cord1 % 2 == 0 { // white down if cord2 % 3 == 0 { //println!("white"); y+=1; } else if cord3 % 3 == 1 && cord2 % 3 == 1{ //println!("white"); y +=1; } else { //println!("purple"); x+=1; } } else { // white up if cord2 % 3 == 1 { //println!("white"); } else if cord3 % 3 == 0 && cord2 % 3 == 0 { //println!("white"); } else { //println!("purple"); y +=1; if cord2 %3!= 0 { x +=1; } } } //println!("x:{}, y:{}", x, y); HexAddr{x:x, y:y} } fn note_to_color(note: u8, config: &Config) -> Color { // 0 for root, 1 for in key, 2 for sharp or flat let colors = &config.colors; let key = config.root_note; //C C# D D# E F F# G G# A A# B let major_color_mask = [ colors.root, // c colors.out_of_key, // c# colors.in_key_and_penta, // d colors.out_of_key, // d# colors.in_key_and_penta, // e colors.in_key, // f colors.out_of_key, // f# colors.in_key_and_penta, // g colors.out_of_key, // g# colors.in_key_and_penta, // a colors.out_of_key, // a# colors.in_key // b ]; // computed c relative let minor_color_mask = [ colors.in_key_and_penta, // c colors.out_of_key, // c# colors.in_key_and_penta, // d colors.out_of_key, // d# colors.in_key_and_penta, // e colors.in_key, // f colors.out_of_key, // f# colors.in_key_and_penta, // g colors.out_of_key, // g# colors.root, // a colors.out_of_key, // a# colors.in_key // b ]; let index = (note + key ) % 12; if config.is_major { major_color_mask[index as usize] } else { minor_color_mask[index as usize] } } struct Config { colors: ColorProfile, root_note: u8, // 0 for c is_major: bool, hexagon: HexagonDescription, width: u32, height: u32, rows: i16, cols: i16, velocity: u8, } struct ColorProfile { line_color: Color, root: Color, out_of_key: Color, in_key_and_penta: Color, in_key: Color, white: Color, } #[derive(Debug)] struct HexagonDescription { width:i16, height:i16, half_height:i16, radius:i16, x_vec:[i16;6], y_vec:[i16;6], } fn draw_keyboard( renderer:&mut Renderer, font: &Font, config: &Config, keyboard: &Keyboard, pressed_keys: Vec<HexAddr>) -> Result<(),String> { let rows = config.rows; let cols = config.cols; for row in 0..rows { for col in 0..cols { let addr = HexAddr{x:col, y:row}; let is_even = row % 2 == 0; let (mut x_offset, y_offset) = match is_even { true => ((config.hexagon.width + config.hexagon.radius) * col, row * config.hexagon.half_height), false => ((config.hexagon.width + config.hexagon.radius) * col + config.hexagon.radius + config.hexagon.radius/2, row * config.hexagon.half_height), }; x_offset -= config.hexagon.width/2; let (xs, ys) = translate_hexagon(config.hexagon.x_vec, config.hexagon.y_vec, x_offset, y_offset); let key_info = keyboard.get_key_info(addr); let (color, label) = if let Some(key_info) = key_info { (note_to_color(key_info.note, config), key_info.label) } else { (config.colors.line_color, " ".to_string()) }; let polygon_color = match pressed_keys.contains(&addr) { true => config.colors.white, false => color, }; try!(renderer.filled_polygon(&xs, &ys, polygon_color)); try!(renderer.polygon(&xs, &ys, config.colors.line_color)); // TODO cache textures for the hex labels // if we don't have a keyboard then just print the row and column numbers let surface = font.render(label.as_str()).blended(config.colors.line_color).unwrap(); let mut texture = renderer.create_texture_from_surface(&surface).unwrap(); let TextureQuery { width, height,.. } = texture.query(); let label_x = (x_offset as i32 - width as i32/2) as i32; let label_y = (y_offset as i32 - height as i32/2) as i32; let target = Rect::new(label_x, label_y, width, height); try!(renderer.copy(&mut texture, None, Some(target))); } } Ok(()) } struct KeyboardState<'a> { active_presses_map : HashMap<i64, HexAddr>, config: &'a Config, connection_out: &'a mut MidiOutputConnection, } impl<'a> KeyboardState<'a> { fn start_note(&mut self, addr: HexAddr, keyboard: &mut Keyboard) { let key = keyboard.get_key_info(addr); if let Some(x) = key { let res = self.connection_out.send(&[NOTE_ON_MSG, x.note, self.config.velocity]); if let Err(err) = res { println!("Error Sending Midi Note {}", err); }; }; } fn end_note(&mut self, addr: HexAddr, keyboard: &mut Keyboard) { let key = keyboard.get_key_info(addr); if let Some(x) = key { let res = self.connection_out.send(&[NOTE_OFF_MSG, x.note, self.config.velocity]); if let Err(err) = res { println!("Error Sending Midi Note {}", err); }; }; } fn on_press(&mut self, oid: i64, x:f32, y:f32, keyboard: &mut Keyboard) { let addr = get_hex_address(x, y, &self.config.hexagon); self.active_presses_map.insert(oid, addr); self.start_note(addr, keyboard); } fn on_release(&mut self, oid: i64, keyboard: &mut Keyboard) { match self.active_presses_map.remove(&oid) { Some(addr) => self.end_note(addr, keyboard), None => (), } } fn on_move(&mut self, oid: i64, x:f32, y:f32, keyboard: &mut Keyboard) { let addr = get_hex_address(x, y, &self.config.hexagon); match self.active_presses_map.get(&oid) { None => self.start_note(addr, keyboard), Some(&old_addr) => { if addr!= old_addr { self.start_note(addr, keyboard); self.end_note(old_addr, keyboard); } } }; self.active_presses_map.insert(oid, addr); } fn get_pressed(&self) -> Vec<HexAddr> { // TODO this iteration is SLOW and this function is called once per hexagon // TODO make this function FAST! let mut vec = Vec::new(); for (_, &value) in &self.active_presses_map { vec.push(value); } vec } } fn get_midi_connection() -> Result<MidiOutputConnection,Box<Error>> { // TODO improve midi selection criteria, maybe pick off of command line. let midi_out = try!(MidiOutput::new("Isomidi")); let out_port: u32 = match midi_out.port_count() { 0 => return Err("no output port found".into()), _ => { println!("Choosing the last available output port: {}", midi_out.port_name(0).unwrap()); midi_out.port_count() -1 } }; println!("\nOpening connection"); Ok(try!(midi_out.connect(out_port, "isomidi").map_err(|e| e.kind()))) } fn main() { ///////////////////////// ///// CONSTANTS ///////////////////////// // https://coolors.co/f4d06f-ff8811-9dd9d2-fff8f0-392f5a let radius = 75; let screen_height = 1200; let screen_width = 1800; let ttf_font_bytes = include_bytes!("FantasqueSansMono-Regular.ttf"); let mut connection_out = get_midi_connection().unwrap(); ///////////////////////// //// SDL Setup ///////////////////////// let sdl_context = sdl2::init().unwrap(); let video_subsystem = sdl_context.video().unwrap(); video_subsystem.gl_attr().set_multisample_samples(8); let ttf_context = sdl2::ttf::init().unwrap();
.build() .unwrap(); let mut renderer = window.renderer().build().unwrap(); let font_rwop = RWops::from_bytes(ttf_font_bytes).unwrap(); let keyboard_font = ttf_context.load_font_from_rwops(font_rwop, 20).unwrap(); // be bold // keyboard_font.set_style(sdl2::ttf::STYLE_BOLD); // Draw a black screen renderer.set_draw_color(Color::RGB(0, 0, 0)); renderer.clear(); renderer.present(); let mut event_pump = sdl_context.event_pump().unwrap(); ///////////////////////// //// Load the keyboard ///////////////////////// //let mut keyboard = JammerKeyboard {}; 'config: loop { ///////////////////////// ///// Derived Constants ///////////////////////// let colors = ColorProfile { line_color: Color::RGB(0, 0, 0), root : Color::RGB(0xf4,0xD0,0x6F), // root out_of_key : Color::RGB(0xff,0x88,0x11), // sharp/flat in_key_and_penta : Color::RGB(0x9D,0x9D,0xD2), // in key & pentatonic in_key : Color::RGB(0xba, 0xba, 0xdf), // in key white : Color::RGB(0x39,0x2F,0x5A), }; let (hexagon_x, hexagon_y) = get_hexagon(0,0,radius); let half_height = ((INCREMENT_ANGLE).sin() * radius as f32).round() as i16; let hexagon = HexagonDescription { width : (radius * 2 ) as i16, half_height: half_height, height : half_height * 2, radius: radius, x_vec: hexagon_x, y_vec: hexagon_y, }; println!("hexagon: {:?}", hexagon); let size = { let mut window = renderer.window_mut().unwrap(); window.size() }; let rows = (size.1 as i16/hexagon.height as i16) * 2 + 3; let cols = (size.0 as i16/(hexagon.width + hexagon.radius)) as i16 + 2; let mut config = Config { colors: colors, root_note: 0, // C is_major: true, hexagon: hexagon, width: size.0, height: size.1, rows: rows, cols: cols, velocity: 70, }; let mut keyboard = HarmonicKeyboard {}; let mut keyboard_state = KeyboardState { config: &config, active_presses_map: HashMap::new(), connection_out: &mut connection_out, }; ///////////////////////// //// Main loop ///////////////////////// let mut frame_count = 0; let mut last_time = Instant::now(); let mut first_run = true; 'render: loop { // TODO sleep till next event? let sleep_time = Duration::from_millis(10); thread::sleep(sleep_time); // TODO: How are we going to do multi finger tracking and mouse tracking? // list of active fingerids / mouse id plus the current hex addr. // on hex addr change fire on_key_press let mut trigger_draw = false; if first_run { trigger_draw = true; first_run = false } for event in event_pump.poll_iter() { match event { Event::Quit {..} | Event::KeyDown { keycode: Some(Keycode::Escape),.. } => { println!("Exiting"); break 'config }, Event::MouseButtonDown {x, y,..} => { keyboard_state.on_press(MOUSE_OID, x as f32, y as f32, &mut keyboard); trigger_draw = true; }, Event::MouseButtonUp {..} => { keyboard_state.on_release(MOUSE_OID, &mut keyboard); trigger_draw = true; }, Event::MouseMotion {x, y, mousestate,..} => { // track only if left mouse button is down if mousestate.left() { keyboard_state.on_move(MOUSE_OID, x as f32, y as f32, &mut keyboard); trigger_draw = true; } }, Event::FingerDown {x, y, finger_id,..} => { keyboard_state.on_press(finger_id, x as f32, y as f32, &mut keyboard); trigger_draw = true; }, Event::FingerMotion {x, y, finger_id,..} => { keyboard_state.on_move(finger_id, x as f32, y as f32, &mut keyboard); trigger_draw = true; }, Event::FingerUp {finger_id,..} => { keyboard_state.on_release(finger_id, &mut keyboard); trigger_draw = true; }, Event::Window {win_event,..} => { match win_event { WindowEvent::SizeChanged (width, height) => { // breaks out of the render loop and reconfigures the application break'render } _ => {} } }, _ => {} } } if trigger_draw { renderer.set_draw_color(config.colors.line_color); renderer.clear(); draw_keyboard(&mut renderer, &keyboard_font, &config, &keyboard, keyboard_state.get_pressed()).unwrap(); } renderer.present();
let window = video_subsystem.window("Isomidi", screen_width, screen_height) .position_centered() .opengl() .resizable()
random_line_split
main.rs
use sdl2::event::Event; use sdl2::event::WindowEvent; use sdl2::keyboard::Keycode; use sdl2::gfx::primitives::DrawRenderer; use sdl2::render::Renderer; use std::f32; use std::f32::consts::PI; use std::collections::HashMap; use keyboard::Keyboard; use keyboard::HexAddr; use keyboard::HexKey; use keyboard::HarmonicKeyboard; use keyboard::JammerKeyboard; use midir::{MidiOutput, MidiOutputConnection}; use std::error::Error; use sdl2::rwops::RWops; const INCREMENT_ANGLE:f32 = 2.0*PI/6.0; // 60 degrees in radians const MOUSE_OID:i64 = -1; const NOTE_ON_MSG: u8 = 0x90; const NOTE_OFF_MSG: u8 = 0x80; /* TODO * Octave Shifing * Keyboard Rotation * Multi Key Highlighting * Readme for github * Factor out midi * Better error handling/remove.unwraps * Add Guitar Layout * Consider changing draw_keyboard so that it draws only the key changes and not the whole board * every time. * Correct velocity controls? */ fn get_hexagon(x:i16, y:i16, radius:i16) -> ([i16;6], [i16;6]) { // TODO this function needs to be broken up into a calculate and translate section, we don't // need to redo the sin math every time. let r:f32 = radius as f32; let mut angle:f32 = INCREMENT_ANGLE/2.0; let mut xs: [i16;6] = [0; 6]; let mut ys: [i16;6] = [0; 6]; for i in 0..6 { let xo = angle.sin()*r; let yo = angle.cos()*r; angle += INCREMENT_ANGLE; xs[i] = x + xo.round() as i16; ys[i] = y + yo.round() as i16; } return (xs, ys) } fn translate_hexagon(xlist:[i16;6], ylist:[i16;6], x:i16, y:i16) -> ([i16;6], [i16;6]) { let mut xs: [i16;6] = [0; 6]; let mut ys: [i16;6] = [0; 6]; for i in 0..6 { xs[i] = xlist[i] + x; ys[i] = ylist[i] + y; } return (xs, ys) } /// Given the x and y locations of a click, return the address of the hexagon /// The logic I'm doing in here is a little crazy. /// By rotating the cordinate grid three times I can figure out the "index" in terms of number of /// hexagons from a the starting point /// This effectivly tesalates the hexagons into 6 triangles, this algorithm gets the location of /// the triangle clicked, then figures out which hexagon that triangle belongs in. fn get_hex_address(xo:f32, yo:f32, hexagon:&HexagonDescription) -> HexAddr { let hex_height = hexagon.half_height as f32; let plane1 = yo / hex_height; let incangle = INCREMENT_ANGLE * -2.0; // -120 degrees //let x = xo * incangle.cos() + yo * incangle.sin(); let y = xo * incangle.sin() + yo * incangle.cos(); let plane2 = -y / hex_height; // TODO why did I need to multiply this by two?? let incangle = INCREMENT_ANGLE * -4.0; // -120 degrees //let x = xo * incangle.cos() + yo * incangle.sin(); let y = xo * incangle.sin() + yo * incangle.cos(); let plane3 = y / hex_height ; let cord1 = plane1.floor() as i16; let mut cord2 = plane2.floor() as i16; let mut cord3 = plane3.floor() as i16; // left justify the coordinate system for my own sanity while doing this modulo math cord2 -= cord1/2; cord3 += cord1/2 + 1; let mut y = cord1; let mut x = cord2/3; //println!("a: {} b:{} c:{}", cord1, cord2, cord3); if cord1 % 2 == 0 { // white down if cord2 % 3 == 0 { //println!("white"); y+=1; } else if cord3 % 3 == 1 && cord2 % 3 == 1{ //println!("white"); y +=1; } else { //println!("purple"); x+=1; } } else { // white up if cord2 % 3 == 1 { //println!("white"); } else if cord3 % 3 == 0 && cord2 % 3 == 0 { //println!("white"); } else { //println!("purple"); y +=1; if cord2 %3!= 0 { x +=1; } } } //println!("x:{}, y:{}", x, y); HexAddr{x:x, y:y} } fn note_to_color(note: u8, config: &Config) -> Color { // 0 for root, 1 for in key, 2 for sharp or flat let colors = &config.colors; let key = config.root_note; //C C# D D# E F F# G G# A A# B let major_color_mask = [ colors.root, // c colors.out_of_key, // c# colors.in_key_and_penta, // d colors.out_of_key, // d# colors.in_key_and_penta, // e colors.in_key, // f colors.out_of_key, // f# colors.in_key_and_penta, // g colors.out_of_key, // g# colors.in_key_and_penta, // a colors.out_of_key, // a# colors.in_key // b ]; // computed c relative let minor_color_mask = [ colors.in_key_and_penta, // c colors.out_of_key, // c# colors.in_key_and_penta, // d colors.out_of_key, // d# colors.in_key_and_penta, // e colors.in_key, // f colors.out_of_key, // f# colors.in_key_and_penta, // g colors.out_of_key, // g# colors.root, // a colors.out_of_key, // a# colors.in_key // b ]; let index = (note + key ) % 12; if config.is_major { major_color_mask[index as usize] } else { minor_color_mask[index as usize] } } struct Config { colors: ColorProfile, root_note: u8, // 0 for c is_major: bool, hexagon: HexagonDescription, width: u32, height: u32, rows: i16, cols: i16, velocity: u8, } struct ColorProfile { line_color: Color, root: Color, out_of_key: Color, in_key_and_penta: Color, in_key: Color, white: Color, } #[derive(Debug)] struct HexagonDescription { width:i16, height:i16, half_height:i16, radius:i16, x_vec:[i16;6], y_vec:[i16;6], } fn draw_keyboard( renderer:&mut Renderer, font: &Font, config: &Config, keyboard: &Keyboard, pressed_keys: Vec<HexAddr>) -> Result<(),String> { let rows = config.rows; let cols = config.cols; for row in 0..rows { for col in 0..cols { let addr = HexAddr{x:col, y:row}; let is_even = row % 2 == 0; let (mut x_offset, y_offset) = match is_even { true => ((config.hexagon.width + config.hexagon.radius) * col, row * config.hexagon.half_height), false => ((config.hexagon.width + config.hexagon.radius) * col + config.hexagon.radius + config.hexagon.radius/2, row * config.hexagon.half_height), }; x_offset -= config.hexagon.width/2; let (xs, ys) = translate_hexagon(config.hexagon.x_vec, config.hexagon.y_vec, x_offset, y_offset); let key_info = keyboard.get_key_info(addr); let (color, label) = if let Some(key_info) = key_info { (note_to_color(key_info.note, config), key_info.label) } else { (config.colors.line_color, " ".to_string()) }; let polygon_color = match pressed_keys.contains(&addr) { true => config.colors.white, false => color, }; try!(renderer.filled_polygon(&xs, &ys, polygon_color)); try!(renderer.polygon(&xs, &ys, config.colors.line_color)); // TODO cache textures for the hex labels // if we don't have a keyboard then just print the row and column numbers let surface = font.render(label.as_str()).blended(config.colors.line_color).unwrap(); let mut texture = renderer.create_texture_from_surface(&surface).unwrap(); let TextureQuery { width, height,.. } = texture.query(); let label_x = (x_offset as i32 - width as i32/2) as i32; let label_y = (y_offset as i32 - height as i32/2) as i32; let target = Rect::new(label_x, label_y, width, height); try!(renderer.copy(&mut texture, None, Some(target))); } } Ok(()) } struct KeyboardState<'a> { active_presses_map : HashMap<i64, HexAddr>, config: &'a Config, connection_out: &'a mut MidiOutputConnection, } impl<'a> KeyboardState<'a> { fn start_note(&mut self, addr: HexAddr, keyboard: &mut Keyboard) { let key = keyboard.get_key_info(addr); if let Some(x) = key { let res = self.connection_out.send(&[NOTE_ON_MSG, x.note, self.config.velocity]); if let Err(err) = res { println!("Error Sending Midi Note {}", err); }; }; } fn end_note(&mut self, addr: HexAddr, keyboard: &mut Keyboard) { let key = keyboard.get_key_info(addr); if let Some(x) = key { let res = self.connection_out.send(&[NOTE_OFF_MSG, x.note, self.config.velocity]); if let Err(err) = res { println!("Error Sending Midi Note {}", err); }; }; } fn on_press(&mut self, oid: i64, x:f32, y:f32, keyboard: &mut Keyboard) { let addr = get_hex_address(x, y, &self.config.hexagon); self.active_presses_map.insert(oid, addr); self.start_note(addr, keyboard); } fn on_release(&mut self, oid: i64, keyboard: &mut Keyboard) { match self.active_presses_map.remove(&oid) { Some(addr) => self.end_note(addr, keyboard), None => (), } } fn on_move(&mut self, oid: i64, x:f32, y:f32, keyboard: &mut Keyboard) { let addr = get_hex_address(x, y, &self.config.hexagon); match self.active_presses_map.get(&oid) { None => self.start_note(addr, keyboard), Some(&old_addr) =>
}; self.active_presses_map.insert(oid, addr); } fn get_pressed(&self) -> Vec<HexAddr> { // TODO this iteration is SLOW and this function is called once per hexagon // TODO make this function FAST! let mut vec = Vec::new(); for (_, &value) in &self.active_presses_map { vec.push(value); } vec } } fn get_midi_connection() -> Result<MidiOutputConnection,Box<Error>> { // TODO improve midi selection criteria, maybe pick off of command line. let midi_out = try!(MidiOutput::new("Isomidi")); let out_port: u32 = match midi_out.port_count() { 0 => return Err("no output port found".into()), _ => { println!("Choosing the last available output port: {}", midi_out.port_name(0).unwrap()); midi_out.port_count() -1 } }; println!("\nOpening connection"); Ok(try!(midi_out.connect(out_port, "isomidi").map_err(|e| e.kind()))) } fn main() { ///////////////////////// ///// CONSTANTS ///////////////////////// // https://coolors.co/f4d06f-ff8811-9dd9d2-fff8f0-392f5a let radius = 75; let screen_height = 1200; let screen_width = 1800; let ttf_font_bytes = include_bytes!("FantasqueSansMono-Regular.ttf"); let mut connection_out = get_midi_connection().unwrap(); ///////////////////////// //// SDL Setup ///////////////////////// let sdl_context = sdl2::init().unwrap(); let video_subsystem = sdl_context.video().unwrap(); video_subsystem.gl_attr().set_multisample_samples(8); let ttf_context = sdl2::ttf::init().unwrap(); let window = video_subsystem.window("Isomidi", screen_width, screen_height) .position_centered() .opengl() .resizable() .build() .unwrap(); let mut renderer = window.renderer().build().unwrap(); let font_rwop = RWops::from_bytes(ttf_font_bytes).unwrap(); let keyboard_font = ttf_context.load_font_from_rwops(font_rwop, 20).unwrap(); // be bold // keyboard_font.set_style(sdl2::ttf::STYLE_BOLD); // Draw a black screen renderer.set_draw_color(Color::RGB(0, 0, 0)); renderer.clear(); renderer.present(); let mut event_pump = sdl_context.event_pump().unwrap(); ///////////////////////// //// Load the keyboard ///////////////////////// //let mut keyboard = JammerKeyboard {}; 'config: loop { ///////////////////////// ///// Derived Constants ///////////////////////// let colors = ColorProfile { line_color: Color::RGB(0, 0, 0), root : Color::RGB(0xf4,0xD0,0x6F), // root out_of_key : Color::RGB(0xff,0x88,0x11), // sharp/flat in_key_and_penta : Color::RGB(0x9D,0x9D,0xD2), // in key & pentatonic in_key : Color::RGB(0xba, 0xba, 0xdf), // in key white : Color::RGB(0x39,0x2F,0x5A), }; let (hexagon_x, hexagon_y) = get_hexagon(0,0,radius); let half_height = ((INCREMENT_ANGLE).sin() * radius as f32).round() as i16; let hexagon = HexagonDescription { width : (radius * 2 ) as i16, half_height: half_height, height : half_height * 2, radius: radius, x_vec: hexagon_x, y_vec: hexagon_y, }; println!("hexagon: {:?}", hexagon); let size = { let mut window = renderer.window_mut().unwrap(); window.size() }; let rows = (size.1 as i16/hexagon.height as i16) * 2 + 3; let cols = (size.0 as i16/(hexagon.width + hexagon.radius)) as i16 + 2; let mut config = Config { colors: colors, root_note: 0, // C is_major: true, hexagon: hexagon, width: size.0, height: size.1, rows: rows, cols: cols, velocity: 70, }; let mut keyboard = HarmonicKeyboard {}; let mut keyboard_state = KeyboardState { config: &config, active_presses_map: HashMap::new(), connection_out: &mut connection_out, }; ///////////////////////// //// Main loop ///////////////////////// let mut frame_count = 0; let mut last_time = Instant::now(); let mut first_run = true; 'render: loop { // TODO sleep till next event? let sleep_time = Duration::from_millis(10); thread::sleep(sleep_time); // TODO: How are we going to do multi finger tracking and mouse tracking? // list of active fingerids / mouse id plus the current hex addr. // on hex addr change fire on_key_press let mut trigger_draw = false; if first_run { trigger_draw = true; first_run = false } for event in event_pump.poll_iter() { match event { Event::Quit {..} | Event::KeyDown { keycode: Some(Keycode::Escape),.. } => { println!("Exiting"); break 'config }, Event::MouseButtonDown {x, y,..} => { keyboard_state.on_press(MOUSE_OID, x as f32, y as f32, &mut keyboard); trigger_draw = true; }, Event::MouseButtonUp {..} => { keyboard_state.on_release(MOUSE_OID, &mut keyboard); trigger_draw = true; }, Event::MouseMotion {x, y, mousestate,..} => { // track only if left mouse button is down if mousestate.left() { keyboard_state.on_move(MOUSE_OID, x as f32, y as f32, &mut keyboard); trigger_draw = true; } }, Event::FingerDown {x, y, finger_id,..} => { keyboard_state.on_press(finger_id, x as f32, y as f32, &mut keyboard); trigger_draw = true; }, Event::FingerMotion {x, y, finger_id,..} => { keyboard_state.on_move(finger_id, x as f32, y as f32, &mut keyboard); trigger_draw = true; }, Event::FingerUp {finger_id,..} => { keyboard_state.on_release(finger_id, &mut keyboard); trigger_draw = true; }, Event::Window {win_event,..} => { match win_event { WindowEvent::SizeChanged (width, height) => { // breaks out of the render loop and reconfigures the application break'render } _ => {} } }, _ => {} } } if trigger_draw { renderer.set_draw_color(config.colors.line_color); renderer.clear(); draw_keyboard(&mut renderer, &keyboard_font, &config, &keyboard, keyboard_state.get_pressed()).unwrap(); } renderer.present();
{ if addr != old_addr { self.start_note(addr, keyboard); self.end_note(old_addr, keyboard); } }
conditional_block
main.rs
use sdl2::event::Event; use sdl2::event::WindowEvent; use sdl2::keyboard::Keycode; use sdl2::gfx::primitives::DrawRenderer; use sdl2::render::Renderer; use std::f32; use std::f32::consts::PI; use std::collections::HashMap; use keyboard::Keyboard; use keyboard::HexAddr; use keyboard::HexKey; use keyboard::HarmonicKeyboard; use keyboard::JammerKeyboard; use midir::{MidiOutput, MidiOutputConnection}; use std::error::Error; use sdl2::rwops::RWops; const INCREMENT_ANGLE:f32 = 2.0*PI/6.0; // 60 degrees in radians const MOUSE_OID:i64 = -1; const NOTE_ON_MSG: u8 = 0x90; const NOTE_OFF_MSG: u8 = 0x80; /* TODO * Octave Shifing * Keyboard Rotation * Multi Key Highlighting * Readme for github * Factor out midi * Better error handling/remove.unwraps * Add Guitar Layout * Consider changing draw_keyboard so that it draws only the key changes and not the whole board * every time. * Correct velocity controls? */ fn get_hexagon(x:i16, y:i16, radius:i16) -> ([i16;6], [i16;6]) { // TODO this function needs to be broken up into a calculate and translate section, we don't // need to redo the sin math every time. let r:f32 = radius as f32; let mut angle:f32 = INCREMENT_ANGLE/2.0; let mut xs: [i16;6] = [0; 6]; let mut ys: [i16;6] = [0; 6]; for i in 0..6 { let xo = angle.sin()*r; let yo = angle.cos()*r; angle += INCREMENT_ANGLE; xs[i] = x + xo.round() as i16; ys[i] = y + yo.round() as i16; } return (xs, ys) } fn translate_hexagon(xlist:[i16;6], ylist:[i16;6], x:i16, y:i16) -> ([i16;6], [i16;6]) { let mut xs: [i16;6] = [0; 6]; let mut ys: [i16;6] = [0; 6]; for i in 0..6 { xs[i] = xlist[i] + x; ys[i] = ylist[i] + y; } return (xs, ys) } /// Given the x and y locations of a click, return the address of the hexagon /// The logic I'm doing in here is a little crazy. /// By rotating the cordinate grid three times I can figure out the "index" in terms of number of /// hexagons from a the starting point /// This effectivly tesalates the hexagons into 6 triangles, this algorithm gets the location of /// the triangle clicked, then figures out which hexagon that triangle belongs in. fn
(xo:f32, yo:f32, hexagon:&HexagonDescription) -> HexAddr { let hex_height = hexagon.half_height as f32; let plane1 = yo / hex_height; let incangle = INCREMENT_ANGLE * -2.0; // -120 degrees //let x = xo * incangle.cos() + yo * incangle.sin(); let y = xo * incangle.sin() + yo * incangle.cos(); let plane2 = -y / hex_height; // TODO why did I need to multiply this by two?? let incangle = INCREMENT_ANGLE * -4.0; // -120 degrees //let x = xo * incangle.cos() + yo * incangle.sin(); let y = xo * incangle.sin() + yo * incangle.cos(); let plane3 = y / hex_height ; let cord1 = plane1.floor() as i16; let mut cord2 = plane2.floor() as i16; let mut cord3 = plane3.floor() as i16; // left justify the coordinate system for my own sanity while doing this modulo math cord2 -= cord1/2; cord3 += cord1/2 + 1; let mut y = cord1; let mut x = cord2/3; //println!("a: {} b:{} c:{}", cord1, cord2, cord3); if cord1 % 2 == 0 { // white down if cord2 % 3 == 0 { //println!("white"); y+=1; } else if cord3 % 3 == 1 && cord2 % 3 == 1{ //println!("white"); y +=1; } else { //println!("purple"); x+=1; } } else { // white up if cord2 % 3 == 1 { //println!("white"); } else if cord3 % 3 == 0 && cord2 % 3 == 0 { //println!("white"); } else { //println!("purple"); y +=1; if cord2 %3!= 0 { x +=1; } } } //println!("x:{}, y:{}", x, y); HexAddr{x:x, y:y} } fn note_to_color(note: u8, config: &Config) -> Color { // 0 for root, 1 for in key, 2 for sharp or flat let colors = &config.colors; let key = config.root_note; //C C# D D# E F F# G G# A A# B let major_color_mask = [ colors.root, // c colors.out_of_key, // c# colors.in_key_and_penta, // d colors.out_of_key, // d# colors.in_key_and_penta, // e colors.in_key, // f colors.out_of_key, // f# colors.in_key_and_penta, // g colors.out_of_key, // g# colors.in_key_and_penta, // a colors.out_of_key, // a# colors.in_key // b ]; // computed c relative let minor_color_mask = [ colors.in_key_and_penta, // c colors.out_of_key, // c# colors.in_key_and_penta, // d colors.out_of_key, // d# colors.in_key_and_penta, // e colors.in_key, // f colors.out_of_key, // f# colors.in_key_and_penta, // g colors.out_of_key, // g# colors.root, // a colors.out_of_key, // a# colors.in_key // b ]; let index = (note + key ) % 12; if config.is_major { major_color_mask[index as usize] } else { minor_color_mask[index as usize] } } struct Config { colors: ColorProfile, root_note: u8, // 0 for c is_major: bool, hexagon: HexagonDescription, width: u32, height: u32, rows: i16, cols: i16, velocity: u8, } struct ColorProfile { line_color: Color, root: Color, out_of_key: Color, in_key_and_penta: Color, in_key: Color, white: Color, } #[derive(Debug)] struct HexagonDescription { width:i16, height:i16, half_height:i16, radius:i16, x_vec:[i16;6], y_vec:[i16;6], } fn draw_keyboard( renderer:&mut Renderer, font: &Font, config: &Config, keyboard: &Keyboard, pressed_keys: Vec<HexAddr>) -> Result<(),String> { let rows = config.rows; let cols = config.cols; for row in 0..rows { for col in 0..cols { let addr = HexAddr{x:col, y:row}; let is_even = row % 2 == 0; let (mut x_offset, y_offset) = match is_even { true => ((config.hexagon.width + config.hexagon.radius) * col, row * config.hexagon.half_height), false => ((config.hexagon.width + config.hexagon.radius) * col + config.hexagon.radius + config.hexagon.radius/2, row * config.hexagon.half_height), }; x_offset -= config.hexagon.width/2; let (xs, ys) = translate_hexagon(config.hexagon.x_vec, config.hexagon.y_vec, x_offset, y_offset); let key_info = keyboard.get_key_info(addr); let (color, label) = if let Some(key_info) = key_info { (note_to_color(key_info.note, config), key_info.label) } else { (config.colors.line_color, " ".to_string()) }; let polygon_color = match pressed_keys.contains(&addr) { true => config.colors.white, false => color, }; try!(renderer.filled_polygon(&xs, &ys, polygon_color)); try!(renderer.polygon(&xs, &ys, config.colors.line_color)); // TODO cache textures for the hex labels // if we don't have a keyboard then just print the row and column numbers let surface = font.render(label.as_str()).blended(config.colors.line_color).unwrap(); let mut texture = renderer.create_texture_from_surface(&surface).unwrap(); let TextureQuery { width, height,.. } = texture.query(); let label_x = (x_offset as i32 - width as i32/2) as i32; let label_y = (y_offset as i32 - height as i32/2) as i32; let target = Rect::new(label_x, label_y, width, height); try!(renderer.copy(&mut texture, None, Some(target))); } } Ok(()) } struct KeyboardState<'a> { active_presses_map : HashMap<i64, HexAddr>, config: &'a Config, connection_out: &'a mut MidiOutputConnection, } impl<'a> KeyboardState<'a> { fn start_note(&mut self, addr: HexAddr, keyboard: &mut Keyboard) { let key = keyboard.get_key_info(addr); if let Some(x) = key { let res = self.connection_out.send(&[NOTE_ON_MSG, x.note, self.config.velocity]); if let Err(err) = res { println!("Error Sending Midi Note {}", err); }; }; } fn end_note(&mut self, addr: HexAddr, keyboard: &mut Keyboard) { let key = keyboard.get_key_info(addr); if let Some(x) = key { let res = self.connection_out.send(&[NOTE_OFF_MSG, x.note, self.config.velocity]); if let Err(err) = res { println!("Error Sending Midi Note {}", err); }; }; } fn on_press(&mut self, oid: i64, x:f32, y:f32, keyboard: &mut Keyboard) { let addr = get_hex_address(x, y, &self.config.hexagon); self.active_presses_map.insert(oid, addr); self.start_note(addr, keyboard); } fn on_release(&mut self, oid: i64, keyboard: &mut Keyboard) { match self.active_presses_map.remove(&oid) { Some(addr) => self.end_note(addr, keyboard), None => (), } } fn on_move(&mut self, oid: i64, x:f32, y:f32, keyboard: &mut Keyboard) { let addr = get_hex_address(x, y, &self.config.hexagon); match self.active_presses_map.get(&oid) { None => self.start_note(addr, keyboard), Some(&old_addr) => { if addr!= old_addr { self.start_note(addr, keyboard); self.end_note(old_addr, keyboard); } } }; self.active_presses_map.insert(oid, addr); } fn get_pressed(&self) -> Vec<HexAddr> { // TODO this iteration is SLOW and this function is called once per hexagon // TODO make this function FAST! let mut vec = Vec::new(); for (_, &value) in &self.active_presses_map { vec.push(value); } vec } } fn get_midi_connection() -> Result<MidiOutputConnection,Box<Error>> { // TODO improve midi selection criteria, maybe pick off of command line. let midi_out = try!(MidiOutput::new("Isomidi")); let out_port: u32 = match midi_out.port_count() { 0 => return Err("no output port found".into()), _ => { println!("Choosing the last available output port: {}", midi_out.port_name(0).unwrap()); midi_out.port_count() -1 } }; println!("\nOpening connection"); Ok(try!(midi_out.connect(out_port, "isomidi").map_err(|e| e.kind()))) } fn main() { ///////////////////////// ///// CONSTANTS ///////////////////////// // https://coolors.co/f4d06f-ff8811-9dd9d2-fff8f0-392f5a let radius = 75; let screen_height = 1200; let screen_width = 1800; let ttf_font_bytes = include_bytes!("FantasqueSansMono-Regular.ttf"); let mut connection_out = get_midi_connection().unwrap(); ///////////////////////// //// SDL Setup ///////////////////////// let sdl_context = sdl2::init().unwrap(); let video_subsystem = sdl_context.video().unwrap(); video_subsystem.gl_attr().set_multisample_samples(8); let ttf_context = sdl2::ttf::init().unwrap(); let window = video_subsystem.window("Isomidi", screen_width, screen_height) .position_centered() .opengl() .resizable() .build() .unwrap(); let mut renderer = window.renderer().build().unwrap(); let font_rwop = RWops::from_bytes(ttf_font_bytes).unwrap(); let keyboard_font = ttf_context.load_font_from_rwops(font_rwop, 20).unwrap(); // be bold // keyboard_font.set_style(sdl2::ttf::STYLE_BOLD); // Draw a black screen renderer.set_draw_color(Color::RGB(0, 0, 0)); renderer.clear(); renderer.present(); let mut event_pump = sdl_context.event_pump().unwrap(); ///////////////////////// //// Load the keyboard ///////////////////////// //let mut keyboard = JammerKeyboard {}; 'config: loop { ///////////////////////// ///// Derived Constants ///////////////////////// let colors = ColorProfile { line_color: Color::RGB(0, 0, 0), root : Color::RGB(0xf4,0xD0,0x6F), // root out_of_key : Color::RGB(0xff,0x88,0x11), // sharp/flat in_key_and_penta : Color::RGB(0x9D,0x9D,0xD2), // in key & pentatonic in_key : Color::RGB(0xba, 0xba, 0xdf), // in key white : Color::RGB(0x39,0x2F,0x5A), }; let (hexagon_x, hexagon_y) = get_hexagon(0,0,radius); let half_height = ((INCREMENT_ANGLE).sin() * radius as f32).round() as i16; let hexagon = HexagonDescription { width : (radius * 2 ) as i16, half_height: half_height, height : half_height * 2, radius: radius, x_vec: hexagon_x, y_vec: hexagon_y, }; println!("hexagon: {:?}", hexagon); let size = { let mut window = renderer.window_mut().unwrap(); window.size() }; let rows = (size.1 as i16/hexagon.height as i16) * 2 + 3; let cols = (size.0 as i16/(hexagon.width + hexagon.radius)) as i16 + 2; let mut config = Config { colors: colors, root_note: 0, // C is_major: true, hexagon: hexagon, width: size.0, height: size.1, rows: rows, cols: cols, velocity: 70, }; let mut keyboard = HarmonicKeyboard {}; let mut keyboard_state = KeyboardState { config: &config, active_presses_map: HashMap::new(), connection_out: &mut connection_out, }; ///////////////////////// //// Main loop ///////////////////////// let mut frame_count = 0; let mut last_time = Instant::now(); let mut first_run = true; 'render: loop { // TODO sleep till next event? let sleep_time = Duration::from_millis(10); thread::sleep(sleep_time); // TODO: How are we going to do multi finger tracking and mouse tracking? // list of active fingerids / mouse id plus the current hex addr. // on hex addr change fire on_key_press let mut trigger_draw = false; if first_run { trigger_draw = true; first_run = false } for event in event_pump.poll_iter() { match event { Event::Quit {..} | Event::KeyDown { keycode: Some(Keycode::Escape),.. } => { println!("Exiting"); break 'config }, Event::MouseButtonDown {x, y,..} => { keyboard_state.on_press(MOUSE_OID, x as f32, y as f32, &mut keyboard); trigger_draw = true; }, Event::MouseButtonUp {..} => { keyboard_state.on_release(MOUSE_OID, &mut keyboard); trigger_draw = true; }, Event::MouseMotion {x, y, mousestate,..} => { // track only if left mouse button is down if mousestate.left() { keyboard_state.on_move(MOUSE_OID, x as f32, y as f32, &mut keyboard); trigger_draw = true; } }, Event::FingerDown {x, y, finger_id,..} => { keyboard_state.on_press(finger_id, x as f32, y as f32, &mut keyboard); trigger_draw = true; }, Event::FingerMotion {x, y, finger_id,..} => { keyboard_state.on_move(finger_id, x as f32, y as f32, &mut keyboard); trigger_draw = true; }, Event::FingerUp {finger_id,..} => { keyboard_state.on_release(finger_id, &mut keyboard); trigger_draw = true; }, Event::Window {win_event,..} => { match win_event { WindowEvent::SizeChanged (width, height) => { // breaks out of the render loop and reconfigures the application break'render } _ => {} } }, _ => {} } } if trigger_draw { renderer.set_draw_color(config.colors.line_color); renderer.clear(); draw_keyboard(&mut renderer, &keyboard_font, &config, &keyboard, keyboard_state.get_pressed()).unwrap(); } renderer.present();
get_hex_address
identifier_name
main.rs
use sdl2::event::Event; use sdl2::event::WindowEvent; use sdl2::keyboard::Keycode; use sdl2::gfx::primitives::DrawRenderer; use sdl2::render::Renderer; use std::f32; use std::f32::consts::PI; use std::collections::HashMap; use keyboard::Keyboard; use keyboard::HexAddr; use keyboard::HexKey; use keyboard::HarmonicKeyboard; use keyboard::JammerKeyboard; use midir::{MidiOutput, MidiOutputConnection}; use std::error::Error; use sdl2::rwops::RWops; const INCREMENT_ANGLE:f32 = 2.0*PI/6.0; // 60 degrees in radians const MOUSE_OID:i64 = -1; const NOTE_ON_MSG: u8 = 0x90; const NOTE_OFF_MSG: u8 = 0x80; /* TODO * Octave Shifing * Keyboard Rotation * Multi Key Highlighting * Readme for github * Factor out midi * Better error handling/remove.unwraps * Add Guitar Layout * Consider changing draw_keyboard so that it draws only the key changes and not the whole board * every time. * Correct velocity controls? */ fn get_hexagon(x:i16, y:i16, radius:i16) -> ([i16;6], [i16;6])
fn translate_hexagon(xlist:[i16;6], ylist:[i16;6], x:i16, y:i16) -> ([i16;6], [i16;6]) { let mut xs: [i16;6] = [0; 6]; let mut ys: [i16;6] = [0; 6]; for i in 0..6 { xs[i] = xlist[i] + x; ys[i] = ylist[i] + y; } return (xs, ys) } /// Given the x and y locations of a click, return the address of the hexagon /// The logic I'm doing in here is a little crazy. /// By rotating the cordinate grid three times I can figure out the "index" in terms of number of /// hexagons from a the starting point /// This effectivly tesalates the hexagons into 6 triangles, this algorithm gets the location of /// the triangle clicked, then figures out which hexagon that triangle belongs in. fn get_hex_address(xo:f32, yo:f32, hexagon:&HexagonDescription) -> HexAddr { let hex_height = hexagon.half_height as f32; let plane1 = yo / hex_height; let incangle = INCREMENT_ANGLE * -2.0; // -120 degrees //let x = xo * incangle.cos() + yo * incangle.sin(); let y = xo * incangle.sin() + yo * incangle.cos(); let plane2 = -y / hex_height; // TODO why did I need to multiply this by two?? let incangle = INCREMENT_ANGLE * -4.0; // -120 degrees //let x = xo * incangle.cos() + yo * incangle.sin(); let y = xo * incangle.sin() + yo * incangle.cos(); let plane3 = y / hex_height ; let cord1 = plane1.floor() as i16; let mut cord2 = plane2.floor() as i16; let mut cord3 = plane3.floor() as i16; // left justify the coordinate system for my own sanity while doing this modulo math cord2 -= cord1/2; cord3 += cord1/2 + 1; let mut y = cord1; let mut x = cord2/3; //println!("a: {} b:{} c:{}", cord1, cord2, cord3); if cord1 % 2 == 0 { // white down if cord2 % 3 == 0 { //println!("white"); y+=1; } else if cord3 % 3 == 1 && cord2 % 3 == 1{ //println!("white"); y +=1; } else { //println!("purple"); x+=1; } } else { // white up if cord2 % 3 == 1 { //println!("white"); } else if cord3 % 3 == 0 && cord2 % 3 == 0 { //println!("white"); } else { //println!("purple"); y +=1; if cord2 %3!= 0 { x +=1; } } } //println!("x:{}, y:{}", x, y); HexAddr{x:x, y:y} } fn note_to_color(note: u8, config: &Config) -> Color { // 0 for root, 1 for in key, 2 for sharp or flat let colors = &config.colors; let key = config.root_note; //C C# D D# E F F# G G# A A# B let major_color_mask = [ colors.root, // c colors.out_of_key, // c# colors.in_key_and_penta, // d colors.out_of_key, // d# colors.in_key_and_penta, // e colors.in_key, // f colors.out_of_key, // f# colors.in_key_and_penta, // g colors.out_of_key, // g# colors.in_key_and_penta, // a colors.out_of_key, // a# colors.in_key // b ]; // computed c relative let minor_color_mask = [ colors.in_key_and_penta, // c colors.out_of_key, // c# colors.in_key_and_penta, // d colors.out_of_key, // d# colors.in_key_and_penta, // e colors.in_key, // f colors.out_of_key, // f# colors.in_key_and_penta, // g colors.out_of_key, // g# colors.root, // a colors.out_of_key, // a# colors.in_key // b ]; let index = (note + key ) % 12; if config.is_major { major_color_mask[index as usize] } else { minor_color_mask[index as usize] } } struct Config { colors: ColorProfile, root_note: u8, // 0 for c is_major: bool, hexagon: HexagonDescription, width: u32, height: u32, rows: i16, cols: i16, velocity: u8, } struct ColorProfile { line_color: Color, root: Color, out_of_key: Color, in_key_and_penta: Color, in_key: Color, white: Color, } #[derive(Debug)] struct HexagonDescription { width:i16, height:i16, half_height:i16, radius:i16, x_vec:[i16;6], y_vec:[i16;6], } fn draw_keyboard( renderer:&mut Renderer, font: &Font, config: &Config, keyboard: &Keyboard, pressed_keys: Vec<HexAddr>) -> Result<(),String> { let rows = config.rows; let cols = config.cols; for row in 0..rows { for col in 0..cols { let addr = HexAddr{x:col, y:row}; let is_even = row % 2 == 0; let (mut x_offset, y_offset) = match is_even { true => ((config.hexagon.width + config.hexagon.radius) * col, row * config.hexagon.half_height), false => ((config.hexagon.width + config.hexagon.radius) * col + config.hexagon.radius + config.hexagon.radius/2, row * config.hexagon.half_height), }; x_offset -= config.hexagon.width/2; let (xs, ys) = translate_hexagon(config.hexagon.x_vec, config.hexagon.y_vec, x_offset, y_offset); let key_info = keyboard.get_key_info(addr); let (color, label) = if let Some(key_info) = key_info { (note_to_color(key_info.note, config), key_info.label) } else { (config.colors.line_color, " ".to_string()) }; let polygon_color = match pressed_keys.contains(&addr) { true => config.colors.white, false => color, }; try!(renderer.filled_polygon(&xs, &ys, polygon_color)); try!(renderer.polygon(&xs, &ys, config.colors.line_color)); // TODO cache textures for the hex labels // if we don't have a keyboard then just print the row and column numbers let surface = font.render(label.as_str()).blended(config.colors.line_color).unwrap(); let mut texture = renderer.create_texture_from_surface(&surface).unwrap(); let TextureQuery { width, height,.. } = texture.query(); let label_x = (x_offset as i32 - width as i32/2) as i32; let label_y = (y_offset as i32 - height as i32/2) as i32; let target = Rect::new(label_x, label_y, width, height); try!(renderer.copy(&mut texture, None, Some(target))); } } Ok(()) } struct KeyboardState<'a> { active_presses_map : HashMap<i64, HexAddr>, config: &'a Config, connection_out: &'a mut MidiOutputConnection, } impl<'a> KeyboardState<'a> { fn start_note(&mut self, addr: HexAddr, keyboard: &mut Keyboard) { let key = keyboard.get_key_info(addr); if let Some(x) = key { let res = self.connection_out.send(&[NOTE_ON_MSG, x.note, self.config.velocity]); if let Err(err) = res { println!("Error Sending Midi Note {}", err); }; }; } fn end_note(&mut self, addr: HexAddr, keyboard: &mut Keyboard) { let key = keyboard.get_key_info(addr); if let Some(x) = key { let res = self.connection_out.send(&[NOTE_OFF_MSG, x.note, self.config.velocity]); if let Err(err) = res { println!("Error Sending Midi Note {}", err); }; }; } fn on_press(&mut self, oid: i64, x:f32, y:f32, keyboard: &mut Keyboard) { let addr = get_hex_address(x, y, &self.config.hexagon); self.active_presses_map.insert(oid, addr); self.start_note(addr, keyboard); } fn on_release(&mut self, oid: i64, keyboard: &mut Keyboard) { match self.active_presses_map.remove(&oid) { Some(addr) => self.end_note(addr, keyboard), None => (), } } fn on_move(&mut self, oid: i64, x:f32, y:f32, keyboard: &mut Keyboard) { let addr = get_hex_address(x, y, &self.config.hexagon); match self.active_presses_map.get(&oid) { None => self.start_note(addr, keyboard), Some(&old_addr) => { if addr!= old_addr { self.start_note(addr, keyboard); self.end_note(old_addr, keyboard); } } }; self.active_presses_map.insert(oid, addr); } fn get_pressed(&self) -> Vec<HexAddr> { // TODO this iteration is SLOW and this function is called once per hexagon // TODO make this function FAST! let mut vec = Vec::new(); for (_, &value) in &self.active_presses_map { vec.push(value); } vec } } fn get_midi_connection() -> Result<MidiOutputConnection,Box<Error>> { // TODO improve midi selection criteria, maybe pick off of command line. let midi_out = try!(MidiOutput::new("Isomidi")); let out_port: u32 = match midi_out.port_count() { 0 => return Err("no output port found".into()), _ => { println!("Choosing the last available output port: {}", midi_out.port_name(0).unwrap()); midi_out.port_count() -1 } }; println!("\nOpening connection"); Ok(try!(midi_out.connect(out_port, "isomidi").map_err(|e| e.kind()))) } fn main() { ///////////////////////// ///// CONSTANTS ///////////////////////// // https://coolors.co/f4d06f-ff8811-9dd9d2-fff8f0-392f5a let radius = 75; let screen_height = 1200; let screen_width = 1800; let ttf_font_bytes = include_bytes!("FantasqueSansMono-Regular.ttf"); let mut connection_out = get_midi_connection().unwrap(); ///////////////////////// //// SDL Setup ///////////////////////// let sdl_context = sdl2::init().unwrap(); let video_subsystem = sdl_context.video().unwrap(); video_subsystem.gl_attr().set_multisample_samples(8); let ttf_context = sdl2::ttf::init().unwrap(); let window = video_subsystem.window("Isomidi", screen_width, screen_height) .position_centered() .opengl() .resizable() .build() .unwrap(); let mut renderer = window.renderer().build().unwrap(); let font_rwop = RWops::from_bytes(ttf_font_bytes).unwrap(); let keyboard_font = ttf_context.load_font_from_rwops(font_rwop, 20).unwrap(); // be bold // keyboard_font.set_style(sdl2::ttf::STYLE_BOLD); // Draw a black screen renderer.set_draw_color(Color::RGB(0, 0, 0)); renderer.clear(); renderer.present(); let mut event_pump = sdl_context.event_pump().unwrap(); ///////////////////////// //// Load the keyboard ///////////////////////// //let mut keyboard = JammerKeyboard {}; 'config: loop { ///////////////////////// ///// Derived Constants ///////////////////////// let colors = ColorProfile { line_color: Color::RGB(0, 0, 0), root : Color::RGB(0xf4,0xD0,0x6F), // root out_of_key : Color::RGB(0xff,0x88,0x11), // sharp/flat in_key_and_penta : Color::RGB(0x9D,0x9D,0xD2), // in key & pentatonic in_key : Color::RGB(0xba, 0xba, 0xdf), // in key white : Color::RGB(0x39,0x2F,0x5A), }; let (hexagon_x, hexagon_y) = get_hexagon(0,0,radius); let half_height = ((INCREMENT_ANGLE).sin() * radius as f32).round() as i16; let hexagon = HexagonDescription { width : (radius * 2 ) as i16, half_height: half_height, height : half_height * 2, radius: radius, x_vec: hexagon_x, y_vec: hexagon_y, }; println!("hexagon: {:?}", hexagon); let size = { let mut window = renderer.window_mut().unwrap(); window.size() }; let rows = (size.1 as i16/hexagon.height as i16) * 2 + 3; let cols = (size.0 as i16/(hexagon.width + hexagon.radius)) as i16 + 2; let mut config = Config { colors: colors, root_note: 0, // C is_major: true, hexagon: hexagon, width: size.0, height: size.1, rows: rows, cols: cols, velocity: 70, }; let mut keyboard = HarmonicKeyboard {}; let mut keyboard_state = KeyboardState { config: &config, active_presses_map: HashMap::new(), connection_out: &mut connection_out, }; ///////////////////////// //// Main loop ///////////////////////// let mut frame_count = 0; let mut last_time = Instant::now(); let mut first_run = true; 'render: loop { // TODO sleep till next event? let sleep_time = Duration::from_millis(10); thread::sleep(sleep_time); // TODO: How are we going to do multi finger tracking and mouse tracking? // list of active fingerids / mouse id plus the current hex addr. // on hex addr change fire on_key_press let mut trigger_draw = false; if first_run { trigger_draw = true; first_run = false } for event in event_pump.poll_iter() { match event { Event::Quit {..} | Event::KeyDown { keycode: Some(Keycode::Escape),.. } => { println!("Exiting"); break 'config }, Event::MouseButtonDown {x, y,..} => { keyboard_state.on_press(MOUSE_OID, x as f32, y as f32, &mut keyboard); trigger_draw = true; }, Event::MouseButtonUp {..} => { keyboard_state.on_release(MOUSE_OID, &mut keyboard); trigger_draw = true; }, Event::MouseMotion {x, y, mousestate,..} => { // track only if left mouse button is down if mousestate.left() { keyboard_state.on_move(MOUSE_OID, x as f32, y as f32, &mut keyboard); trigger_draw = true; } }, Event::FingerDown {x, y, finger_id,..} => { keyboard_state.on_press(finger_id, x as f32, y as f32, &mut keyboard); trigger_draw = true; }, Event::FingerMotion {x, y, finger_id,..} => { keyboard_state.on_move(finger_id, x as f32, y as f32, &mut keyboard); trigger_draw = true; }, Event::FingerUp {finger_id,..} => { keyboard_state.on_release(finger_id, &mut keyboard); trigger_draw = true; }, Event::Window {win_event,..} => { match win_event { WindowEvent::SizeChanged (width, height) => { // breaks out of the render loop and reconfigures the application break'render } _ => {} } }, _ => {} } } if trigger_draw { renderer.set_draw_color(config.colors.line_color); renderer.clear(); draw_keyboard(&mut renderer, &keyboard_font, &config, &keyboard, keyboard_state.get_pressed()).unwrap(); } renderer.present();
{ // TODO this function needs to be broken up into a calculate and translate section, we don't // need to redo the sin math every time. let r:f32 = radius as f32; let mut angle:f32 = INCREMENT_ANGLE/2.0; let mut xs: [i16;6] = [0; 6]; let mut ys: [i16;6] = [0; 6]; for i in 0..6 { let xo = angle.sin()*r; let yo = angle.cos()*r; angle += INCREMENT_ANGLE; xs[i] = x + xo.round() as i16; ys[i] = y + yo.round() as i16; } return (xs, ys) }
identifier_body
render.rs
use crate::{ html::{Attribute, Children, Element, EventListener, EventToMessage, Html}, program::Program, }; use itertools::{EitherOrBoth, Itertools}; use std::fmt::Debug; use std::rc::Rc; use wasm_bindgen::prelude::*; use wasm_bindgen::JsCast; pub struct Renderer<Model, Msg> { program: Rc<Program<Model, Msg>>, to_remove: Vec<(web_sys::Node, web_sys::Node)>, } fn eiter_or_both_to_option_tuple<T>(pair: EitherOrBoth<T, T>) -> (Option<T>, Option<T>) { use itertools::EitherOrBoth::{Both, Left, Right}; match pair { Both(a, b) => (Some(a), Some(b)), Left(a) => (Some(a), None), Right(b) => (None, Some(b)), } } impl<Model, Msg> Renderer<Model, Msg> where Msg: PartialEq + Debug + Clone +'static, Model: Debug + Clone +'static, { pub fn render( root: &web_sys::Node, program: &Rc<Program<Model, Msg>>, new_tree: &Html<Msg>, old_tree: &Option<Html<Msg>>, ) -> Result<(), JsValue> { let mut renderer = Renderer { program: program.clone(), to_remove: vec![], }; // TODO: We should probably not assume that the number here is 0 renderer.update_element(root, Some(new_tree), old_tree.as_ref(), 0)?; for (parent, child) in &renderer.to_remove { parent.remove_child(&child)?; } Ok(()) } fn update_element( &mut self, parent: &web_sys::Node, new: Option<&Html<Msg>>, old: Option<&Html<Msg>>, index: u32, ) -> Result<(), JsValue> { match (old, new) { (None, Some(new_html)) => { // Node is added parent.append_child(&self.create_node(new_html)?)?; } (Some(_removed), None) => { // Node is removed if let Some(child) = parent.child_nodes().item(index) { // Don't remove childs until after every iteration is finished. If not, the // indexes will not point to the correct nodes anymore self.to_remove.push((parent.clone(), child)); } else { // console_log!( // "Could not find node with index {} when removing {}", // index, // removed.to_html_text(0) // ); } } (Some(old), Some(new)) => match (old, new) { (Html::Element(old_tag), Html::Element(new_tag)) if old_tag.name == new_tag.name && old_tag.key() == new_tag.key() => { let current_node: web_sys::Element = match parent.child_nodes().item(index) { Some(n) => n.dyn_into()?, None => { return Err(JsValue::from_str(&format!( "ERROR: Could not find node at index {}", index ))); } }; // We have a node (current_node) that has changed from old_tag to new_tag, though // the tag is still the same. This means we need to diff children and attributes // First we diff attributes // We start by removing the ones that are no longer active for old_attr in &old_tag.attrs { let new_attr = new_tag.attrs.iter().find(|e| e == &old_attr); if new_attr.is_none() { remove_attribute(&current_node, old_attr)?; } else if let Attribute::Event(old_listener) = old_attr { if let Some(Attribute::Event(new_listener)) = new_attr { if let Some(js_closure) = old_listener.js_closure.0.borrow_mut().take() { new_listener.js_closure.0.replace(Some(js_closure)); } } } } // Then we add the ones that are added for attr in &new_tag.attrs { if!old_tag.attrs.contains(attr) { self.add_attribute(&current_node, attr)?; } } if let (Children::Nodes(old_children), Children::Nodes(new_children)) = (&old_tag.children, &new_tag.children) { for (child_index, pair) in old_children .iter() .zip_longest(new_children.iter()) .enumerate() { let (old_child, new_child) = eiter_or_both_to_option_tuple(pair); self.update_element( &current_node, new_child, old_child, child_index as u32, )?; } } } (Html::Text(s1), Html::Text(s2)) => { if s1!= s2 { if let Some(child) = parent.child_nodes().item(index) { child.set_text_content(Some(&s2)); } else { return Err(JsValue::from_str(&format!( "ERROR: Could not find node at index {}", index, ))); } } } _ => { if let Some(child) = parent.child_nodes().item(index) { parent.replace_child(&self.create_node(new)?, &child)?; } else { return Err(JsValue::from_str(&format!( "ERROR: Could not find node at index {}", index, ))); } } }, (None, None) => { // Should never happen, but if it happens we can just do nothing and it will be okay } } Ok(()) } fn create_node(&self, input: &Html<Msg>) -> Result<web_sys::Node, JsValue> { match input { Html::Element(Element { name, attrs, children, .. }) => { let el = self.program.browser.document.create_element(&name)?; for attr in attrs { self.add_attribute(&el, attr)?; } let node: web_sys::Node = el.into(); if let Children::Nodes(children) = children { for child in children { let child_node = self.create_node(&child)?; node.append_child(&child_node)?; } } Ok(node) } Html::Text(text) => { let node = self.program.browser.document.create_text_node(&text); Ok(node.into()) } } } fn add_attribute( &self, node: &web_sys::Element, attribute: &Attribute<Msg>, ) -> Result<(), JsValue> { match attribute { Attribute::Key(_) => Ok(()), Attribute::Text(key, value) => node.set_attribute(&key, &value), Attribute::Bool(key) => node.set_attribute(&key, "true"), Attribute::Event(EventListener { type_, to_message, stop_propagation, prevent_default, js_closure, }) => { let to_message = to_message.clone(); let program = self.program.clone(); let stop_propagation = *stop_propagation; let prevent_default = *prevent_default; let closure = Closure::wrap(Box::new(move |event: web_sys::Event| { if prevent_default { event.prevent_default(); } if stop_propagation { event.stop_propagation(); } let result = match &to_message { EventToMessage::StaticMsg(msg) => Program::dispatch(&program, msg), }; if let Err(error) = result { log::error!("{:#?}", error); } }) as Box<Fn(_)>);
node_et .add_event_listener_with_callback(&type_, closure.as_ref().unchecked_ref())?; let ret = js_closure.0.replace(Some(closure)); if ret.is_some() { log::warn!("to_message did already have a closure???"); } Ok(()) } } } } fn remove_attribute<Msg>( node: &web_sys::Element, attribute: &Attribute<Msg>, ) -> Result<(), JsValue> { match attribute { Attribute::Key(_) => {} // TODO: I think I know why elm normalizes before adding and removing attributes. We should probably do the same Attribute::Text(key, _) => { node.remove_attribute(key)?; } Attribute::Bool(key) => { node.remove_attribute(key)?; } Attribute::Event(EventListener { type_, js_closure,.. }) => { if let Some(closure) = js_closure.0.replace(None) { let node_et: &web_sys::EventTarget = &node; node_et.remove_event_listener_with_callback( &type_, closure.as_ref().unchecked_ref(), )?; } else { log::warn!("Could not get a function to remove listener"); } } } Ok(()) }
let node_et: &web_sys::EventTarget = &node;
random_line_split
render.rs
use crate::{ html::{Attribute, Children, Element, EventListener, EventToMessage, Html}, program::Program, }; use itertools::{EitherOrBoth, Itertools}; use std::fmt::Debug; use std::rc::Rc; use wasm_bindgen::prelude::*; use wasm_bindgen::JsCast; pub struct Renderer<Model, Msg> { program: Rc<Program<Model, Msg>>, to_remove: Vec<(web_sys::Node, web_sys::Node)>, } fn eiter_or_both_to_option_tuple<T>(pair: EitherOrBoth<T, T>) -> (Option<T>, Option<T>) { use itertools::EitherOrBoth::{Both, Left, Right}; match pair { Both(a, b) => (Some(a), Some(b)), Left(a) => (Some(a), None), Right(b) => (None, Some(b)), } } impl<Model, Msg> Renderer<Model, Msg> where Msg: PartialEq + Debug + Clone +'static, Model: Debug + Clone +'static, { pub fn render( root: &web_sys::Node, program: &Rc<Program<Model, Msg>>, new_tree: &Html<Msg>, old_tree: &Option<Html<Msg>>, ) -> Result<(), JsValue> { let mut renderer = Renderer { program: program.clone(), to_remove: vec![], }; // TODO: We should probably not assume that the number here is 0 renderer.update_element(root, Some(new_tree), old_tree.as_ref(), 0)?; for (parent, child) in &renderer.to_remove { parent.remove_child(&child)?; } Ok(()) } fn
( &mut self, parent: &web_sys::Node, new: Option<&Html<Msg>>, old: Option<&Html<Msg>>, index: u32, ) -> Result<(), JsValue> { match (old, new) { (None, Some(new_html)) => { // Node is added parent.append_child(&self.create_node(new_html)?)?; } (Some(_removed), None) => { // Node is removed if let Some(child) = parent.child_nodes().item(index) { // Don't remove childs until after every iteration is finished. If not, the // indexes will not point to the correct nodes anymore self.to_remove.push((parent.clone(), child)); } else { // console_log!( // "Could not find node with index {} when removing {}", // index, // removed.to_html_text(0) // ); } } (Some(old), Some(new)) => match (old, new) { (Html::Element(old_tag), Html::Element(new_tag)) if old_tag.name == new_tag.name && old_tag.key() == new_tag.key() => { let current_node: web_sys::Element = match parent.child_nodes().item(index) { Some(n) => n.dyn_into()?, None => { return Err(JsValue::from_str(&format!( "ERROR: Could not find node at index {}", index ))); } }; // We have a node (current_node) that has changed from old_tag to new_tag, though // the tag is still the same. This means we need to diff children and attributes // First we diff attributes // We start by removing the ones that are no longer active for old_attr in &old_tag.attrs { let new_attr = new_tag.attrs.iter().find(|e| e == &old_attr); if new_attr.is_none() { remove_attribute(&current_node, old_attr)?; } else if let Attribute::Event(old_listener) = old_attr { if let Some(Attribute::Event(new_listener)) = new_attr { if let Some(js_closure) = old_listener.js_closure.0.borrow_mut().take() { new_listener.js_closure.0.replace(Some(js_closure)); } } } } // Then we add the ones that are added for attr in &new_tag.attrs { if!old_tag.attrs.contains(attr) { self.add_attribute(&current_node, attr)?; } } if let (Children::Nodes(old_children), Children::Nodes(new_children)) = (&old_tag.children, &new_tag.children) { for (child_index, pair) in old_children .iter() .zip_longest(new_children.iter()) .enumerate() { let (old_child, new_child) = eiter_or_both_to_option_tuple(pair); self.update_element( &current_node, new_child, old_child, child_index as u32, )?; } } } (Html::Text(s1), Html::Text(s2)) => { if s1!= s2 { if let Some(child) = parent.child_nodes().item(index) { child.set_text_content(Some(&s2)); } else { return Err(JsValue::from_str(&format!( "ERROR: Could not find node at index {}", index, ))); } } } _ => { if let Some(child) = parent.child_nodes().item(index) { parent.replace_child(&self.create_node(new)?, &child)?; } else { return Err(JsValue::from_str(&format!( "ERROR: Could not find node at index {}", index, ))); } } }, (None, None) => { // Should never happen, but if it happens we can just do nothing and it will be okay } } Ok(()) } fn create_node(&self, input: &Html<Msg>) -> Result<web_sys::Node, JsValue> { match input { Html::Element(Element { name, attrs, children, .. }) => { let el = self.program.browser.document.create_element(&name)?; for attr in attrs { self.add_attribute(&el, attr)?; } let node: web_sys::Node = el.into(); if let Children::Nodes(children) = children { for child in children { let child_node = self.create_node(&child)?; node.append_child(&child_node)?; } } Ok(node) } Html::Text(text) => { let node = self.program.browser.document.create_text_node(&text); Ok(node.into()) } } } fn add_attribute( &self, node: &web_sys::Element, attribute: &Attribute<Msg>, ) -> Result<(), JsValue> { match attribute { Attribute::Key(_) => Ok(()), Attribute::Text(key, value) => node.set_attribute(&key, &value), Attribute::Bool(key) => node.set_attribute(&key, "true"), Attribute::Event(EventListener { type_, to_message, stop_propagation, prevent_default, js_closure, }) => { let to_message = to_message.clone(); let program = self.program.clone(); let stop_propagation = *stop_propagation; let prevent_default = *prevent_default; let closure = Closure::wrap(Box::new(move |event: web_sys::Event| { if prevent_default { event.prevent_default(); } if stop_propagation { event.stop_propagation(); } let result = match &to_message { EventToMessage::StaticMsg(msg) => Program::dispatch(&program, msg), }; if let Err(error) = result { log::error!("{:#?}", error); } }) as Box<Fn(_)>); let node_et: &web_sys::EventTarget = &node; node_et .add_event_listener_with_callback(&type_, closure.as_ref().unchecked_ref())?; let ret = js_closure.0.replace(Some(closure)); if ret.is_some() { log::warn!("to_message did already have a closure???"); } Ok(()) } } } } fn remove_attribute<Msg>( node: &web_sys::Element, attribute: &Attribute<Msg>, ) -> Result<(), JsValue> { match attribute { Attribute::Key(_) => {} // TODO: I think I know why elm normalizes before adding and removing attributes. We should probably do the same Attribute::Text(key, _) => { node.remove_attribute(key)?; } Attribute::Bool(key) => { node.remove_attribute(key)?; } Attribute::Event(EventListener { type_, js_closure,.. }) => { if let Some(closure) = js_closure.0.replace(None) { let node_et: &web_sys::EventTarget = &node; node_et.remove_event_listener_with_callback( &type_, closure.as_ref().unchecked_ref(), )?; } else { log::warn!("Could not get a function to remove listener"); } } } Ok(()) }
update_element
identifier_name
render.rs
use crate::{ html::{Attribute, Children, Element, EventListener, EventToMessage, Html}, program::Program, }; use itertools::{EitherOrBoth, Itertools}; use std::fmt::Debug; use std::rc::Rc; use wasm_bindgen::prelude::*; use wasm_bindgen::JsCast; pub struct Renderer<Model, Msg> { program: Rc<Program<Model, Msg>>, to_remove: Vec<(web_sys::Node, web_sys::Node)>, } fn eiter_or_both_to_option_tuple<T>(pair: EitherOrBoth<T, T>) -> (Option<T>, Option<T>) { use itertools::EitherOrBoth::{Both, Left, Right}; match pair { Both(a, b) => (Some(a), Some(b)), Left(a) => (Some(a), None), Right(b) => (None, Some(b)), } } impl<Model, Msg> Renderer<Model, Msg> where Msg: PartialEq + Debug + Clone +'static, Model: Debug + Clone +'static, { pub fn render( root: &web_sys::Node, program: &Rc<Program<Model, Msg>>, new_tree: &Html<Msg>, old_tree: &Option<Html<Msg>>, ) -> Result<(), JsValue> { let mut renderer = Renderer { program: program.clone(), to_remove: vec![], }; // TODO: We should probably not assume that the number here is 0 renderer.update_element(root, Some(new_tree), old_tree.as_ref(), 0)?; for (parent, child) in &renderer.to_remove { parent.remove_child(&child)?; } Ok(()) } fn update_element( &mut self, parent: &web_sys::Node, new: Option<&Html<Msg>>, old: Option<&Html<Msg>>, index: u32, ) -> Result<(), JsValue> { match (old, new) { (None, Some(new_html)) => { // Node is added parent.append_child(&self.create_node(new_html)?)?; } (Some(_removed), None) => { // Node is removed if let Some(child) = parent.child_nodes().item(index) { // Don't remove childs until after every iteration is finished. If not, the // indexes will not point to the correct nodes anymore self.to_remove.push((parent.clone(), child)); } else { // console_log!( // "Could not find node with index {} when removing {}", // index, // removed.to_html_text(0) // ); } } (Some(old), Some(new)) => match (old, new) { (Html::Element(old_tag), Html::Element(new_tag)) if old_tag.name == new_tag.name && old_tag.key() == new_tag.key() => { let current_node: web_sys::Element = match parent.child_nodes().item(index) { Some(n) => n.dyn_into()?, None => { return Err(JsValue::from_str(&format!( "ERROR: Could not find node at index {}", index ))); } }; // We have a node (current_node) that has changed from old_tag to new_tag, though // the tag is still the same. This means we need to diff children and attributes // First we diff attributes // We start by removing the ones that are no longer active for old_attr in &old_tag.attrs { let new_attr = new_tag.attrs.iter().find(|e| e == &old_attr); if new_attr.is_none() { remove_attribute(&current_node, old_attr)?; } else if let Attribute::Event(old_listener) = old_attr { if let Some(Attribute::Event(new_listener)) = new_attr { if let Some(js_closure) = old_listener.js_closure.0.borrow_mut().take() { new_listener.js_closure.0.replace(Some(js_closure)); } } } } // Then we add the ones that are added for attr in &new_tag.attrs { if!old_tag.attrs.contains(attr) { self.add_attribute(&current_node, attr)?; } } if let (Children::Nodes(old_children), Children::Nodes(new_children)) = (&old_tag.children, &new_tag.children) { for (child_index, pair) in old_children .iter() .zip_longest(new_children.iter()) .enumerate() { let (old_child, new_child) = eiter_or_both_to_option_tuple(pair); self.update_element( &current_node, new_child, old_child, child_index as u32, )?; } } } (Html::Text(s1), Html::Text(s2)) => { if s1!= s2 { if let Some(child) = parent.child_nodes().item(index) { child.set_text_content(Some(&s2)); } else { return Err(JsValue::from_str(&format!( "ERROR: Could not find node at index {}", index, ))); } } } _ => { if let Some(child) = parent.child_nodes().item(index) { parent.replace_child(&self.create_node(new)?, &child)?; } else { return Err(JsValue::from_str(&format!( "ERROR: Could not find node at index {}", index, ))); } } }, (None, None) => { // Should never happen, but if it happens we can just do nothing and it will be okay } } Ok(()) } fn create_node(&self, input: &Html<Msg>) -> Result<web_sys::Node, JsValue> { match input { Html::Element(Element { name, attrs, children, .. }) => { let el = self.program.browser.document.create_element(&name)?; for attr in attrs { self.add_attribute(&el, attr)?; } let node: web_sys::Node = el.into(); if let Children::Nodes(children) = children { for child in children { let child_node = self.create_node(&child)?; node.append_child(&child_node)?; } } Ok(node) } Html::Text(text) => { let node = self.program.browser.document.create_text_node(&text); Ok(node.into()) } } } fn add_attribute( &self, node: &web_sys::Element, attribute: &Attribute<Msg>, ) -> Result<(), JsValue> { match attribute { Attribute::Key(_) => Ok(()), Attribute::Text(key, value) => node.set_attribute(&key, &value), Attribute::Bool(key) => node.set_attribute(&key, "true"), Attribute::Event(EventListener { type_, to_message, stop_propagation, prevent_default, js_closure, }) => { let to_message = to_message.clone(); let program = self.program.clone(); let stop_propagation = *stop_propagation; let prevent_default = *prevent_default; let closure = Closure::wrap(Box::new(move |event: web_sys::Event| { if prevent_default { event.prevent_default(); } if stop_propagation { event.stop_propagation(); } let result = match &to_message { EventToMessage::StaticMsg(msg) => Program::dispatch(&program, msg), }; if let Err(error) = result { log::error!("{:#?}", error); } }) as Box<Fn(_)>); let node_et: &web_sys::EventTarget = &node; node_et .add_event_listener_with_callback(&type_, closure.as_ref().unchecked_ref())?; let ret = js_closure.0.replace(Some(closure)); if ret.is_some() { log::warn!("to_message did already have a closure???"); } Ok(()) } } } } fn remove_attribute<Msg>( node: &web_sys::Element, attribute: &Attribute<Msg>, ) -> Result<(), JsValue> { match attribute { Attribute::Key(_) => {} // TODO: I think I know why elm normalizes before adding and removing attributes. We should probably do the same Attribute::Text(key, _) => { node.remove_attribute(key)?; } Attribute::Bool(key) => { node.remove_attribute(key)?; } Attribute::Event(EventListener { type_, js_closure,.. }) =>
} Ok(()) }
{ if let Some(closure) = js_closure.0.replace(None) { let node_et: &web_sys::EventTarget = &node; node_et.remove_event_listener_with_callback( &type_, closure.as_ref().unchecked_ref(), )?; } else { log::warn!("Could not get a function to remove listener"); } }
conditional_block
main.rs
// index is now at qtype let qtype = u16::from_be_bytes(self.datagram[index..index + 2].try_into().unwrap()); // A = 1, AAAA = 28 if qtype!= 1 && qtype!= 28 { log::error!("Problem parsing qname, qtype is not 1 or 28: {}", qtype); return Err(FormatError); } index += 2; let qclass = u16::from_be_bytes(self.datagram[index..index + 2].try_into().unwrap()); if qclass!= 1 { log::error!("Problem parsing qname, qclass is not 1: {}", qclass); return Err(FormatError); } index += 2; } // index is now at the aname section for aname in 0..ancount { log::trace!("parsing aname{}, index {}", aname, index); // first check to see if we're dealing with a pointer or a name if self.datagram[index] >= 0xc0 { // pointer index += 1; if self.datagram[index]!= 0xc { log::error!( "Found aname pointer, but value does not conform to length of aname header" ); return Err(FormatError); } index += 1; } else { // name, fast forward past the name index = self.fast_foward_name(index)?; log::trace!("fast forward aname to {}", index); } // index is now at type let atype = u16::from_be_bytes(self.datagram[index..index + 2].try_into().unwrap()); // A = 1, AAAA = 28 if atype!= 1 && atype!= 28 { log::error!("Problem parsing aname, type is not 1 or 28: {}", atype); return Err(FormatError); } index += 2; let aclass = u16::from_be_bytes(self.datagram[index..index + 2].try_into().unwrap()); if aclass!= 1 { log::error!("Problem parsing aname, aclass is not 1: {}", aclass); return Err(FormatError); } index += 2; // this is our TTL let ttl = u32::from_be_bytes(self.datagram[index..index + 4].try_into().unwrap()); log::trace!("got ttl: {}", ttl); index += 4; // this is the payload length let addr_len = u16::from_be_bytes(self.datagram[index..index + 2].try_into().unwrap()); index += 2; match addr_len { // ipv4 4 => { if atype!= 1 { log::error!("Got a 4-byte address, but ATYPE!= A (1)"); return Err(FormatError); } // this copy happens because I can't figure out how to get Ipv4Addr::from() to realize it's casting from a [u8;4] let mut rdata: [u8; 4] = [0; 4]; for (&src, dst) in self.datagram[index..index + 4].iter().zip(rdata.iter_mut()) { *dst = src; } let addr = IpAddr::V4(Ipv4Addr::from(rdata)); index += 4; map.insert(addr, ttl); } // ipv6 16 => { if atype!= 28 { log::error!("Got a 16-byte address, but ATYPE!= AAAA (28)"); return Err(FormatError); } // this copy happens because I can't figure out how to get Ipv6Addr::from() to realize it's casting from a [u8;4] let mut rdata: [u8; 16] = [0; 16]; for (&src, dst) in self.datagram[index..index + 16] .iter() .zip(rdata.iter_mut()) { *dst = src; } let addr = IpAddr::V6(Ipv6Addr::from(rdata)); index += 16; map.insert(addr, ttl); } _ => { log::error!("Length field does not match a known record type"); return Err(FormatError); } } } Ok(map) } /* example response for: betrusted.io->185.199.111.153 Header: 61, ca, id 81, 80, header 0, 1, qdcount 0, 4, ancount 0, 0, nscount 0, 0, arcount qname: 9, length 9 62, 65, 74, 72, 75, 73, 74, 65, 64, "betrusted" 2, length 2 69, 6f, "io" 0, end of name qtype: 0, 1, type A qclass: 0, 1, type IN aname0: c0, name is a pointer (any value > 192 is a pointer) c, offset of 12 from start of aname0 0, 1, type A 0, 1, class IN 0, 0, e, 10, 0xe10 = 3600 seconds TTL 0, 4, 4 bytes address b9, c7, 6c, 99, address aname1: c0, name is a pointer c, 0, 1, type A 0, 1, class IN 0, 0, e, 10, TTL 0, 4, 4 byte address b9, c7, 6d, 99, address aname2: c0, c, 0, 1, 0, 1, 0, 0, e, 10, 0, 4, b9, c7, 6e, 99, aname3: c0, c, 0, 1, 0, 1, 0, 0, e, 10, 0, 4, b9, c7, 6f, 99 */ /* pub fn is_query(&self) -> bool { !self.is_response() } */ pub fn rcode(&self) -> DnsResponseCode { match (self.header() >> 11) & 0xF { 0 => DnsResponseCode::NoError, 1 => DnsResponseCode::FormatError, 2 => DnsResponseCode::ServerFailure, 3 => DnsResponseCode::NameError, 4 => DnsResponseCode::NotImplemented, 5 => DnsResponseCode::Refused, _ => DnsResponseCode::UnknownError, } } } pub struct Resolver { /// DnsServerManager is a service of the Net crate that automatically updates the DNS server list mgr: net::protocols::DnsServerManager, socket: UdpSocket, buf: [u8; DNS_PKT_MAX_LEN], trng: trng::Trng, freeze: bool, } impl Resolver { pub fn new(xns: &xous_names::XousNames) -> Resolver { let trng = trng::Trng::new(&xns).unwrap(); let local_port = (49152 + trng.get_u32().unwrap() % 16384) as u16; let socket = UdpSocket::bind( format!("0.0.0.0:{}", local_port), ) .expect("couldn't create socket for DNS resolver"); let timeout = Duration::from_millis(10_000); // 10 seconds for DNS to resolve by default socket.set_read_timeout(Some(timeout)).unwrap(); socket.set_nonblocking(false).unwrap(); // we want this to block. // we /could/ do a non-blocking DNS resolver, but...what would you do in the meantime?? // blocking is probably what we actually want this time. Resolver { mgr: net::protocols::DnsServerManager::register(&xns) .expect("Couldn't register the DNS server list auto-manager"), socket, buf: [0; DNS_PKT_MAX_LEN], trng, freeze: false, } } pub fn add_server(&mut self, addr: IpAddr) { self.mgr.add_server(addr); } pub fn remove_server(&mut self, addr: IpAddr) { self.mgr.remove_server(addr); } pub fn clear_all_servers(&mut self) { self.mgr.clear(); } pub fn set_freeze_config(&mut self, freeze: bool) { self.freeze = freeze; self.mgr.set_freeze(freeze); } pub fn get_freeze(&self) -> bool { self.freeze } /// this allows us to re-use the TRNG object pub fn trng_u32(&self) -> u32 { self.trng.get_u32().unwrap() } pub fn resolve(&mut self, name: &str) -> Result<HashMap<IpAddr, u32>, DnsResponseCode> { if let Some(dns_address) = self.mgr.get_random() { let dns_port = 53; let server = SocketAddr::new(dns_address, dns_port); let qname = name; let qtype = QueryType::A; let qclass = QueryClass::IN; let query = Message::query(qname, qtype, qclass, self.trng.get_u32().unwrap() as u16); self.socket .send_to(&query.datagram, &server) .map_err(|_| DnsResponseCode::NetworkError)?; match self.socket.recv(&mut self.buf) { Ok(len) => { let message = Message::from(&self.buf[..len]); if message.id() == query.id() && message.is_response() { return match message.rcode() { DnsResponseCode::NoError => message.parse_response(), rcode => Err(rcode), }; } else { Err(DnsResponseCode::NetworkError) } } Err(e) => match e.kind() { ErrorKind::WouldBlock => Err(DnsResponseCode::NetworkError), _ => Err(DnsResponseCode::UnknownError), }, } } else { Err(DnsResponseCode::NoServerSpecified) } } } #[derive(PartialEq, Debug)] #[repr(C)] enum NameConversionError { /// The length of the memory buffer was invalid InvalidMemoryBuffer = 1, /// The specified nameserver string was not UTF-8 InvalidString = 3, /// The message was not a mutable memory message InvalidMessageType = 4, } fn name_from_msg(env: &xous::MessageEnvelope) -> Result<&str, NameConversionError> { let msg = env .body .memory_message() .ok_or(NameConversionError::InvalidMessageType)?; let valid_bytes = msg.valid.map(|v| v.get()).unwrap_or_else(|| msg.buf.len()); if valid_bytes > DNS_NAME_LENGTH_LIMIT || valid_bytes < 1 { log::error!("valid bytes exceeded DNS name limit"); return Err(NameConversionError::InvalidMemoryBuffer); } // Safe because we've already validated that it's a valid range let str_slice = unsafe { core::slice::from_raw_parts(msg.buf.as_ptr(), valid_bytes) }; let name_string = core::str::from_utf8(str_slice).map_err(|_| NameConversionError::InvalidString)?; Ok(name_string) } fn fill_response(mut env: xous::MessageEnvelope, entries: &HashMap<IpAddr, u32>) -> Option<()> { let mem = env.body.memory_message_mut()?; let s: &mut [u8] = mem.buf.as_slice_mut(); let mut i = s.iter_mut(); // First tag = 1 for "Error" -- we'll fill this in at the end when it's successful *i.next()? = 1; // Limit the number of entries to 128, which is a nice number. Given that an IPv6 // address is 17 bytes, that means that ~240 IPv6 addresses will fit in a 4 kB page.
let mut entry_count = entries.len(); if entry_count > 128 { entry_count = 128; } *i.next()? = entry_count.try_into().ok()?; // Start filling in the addreses for addr in entries.keys() { match addr { &IpAddr::V4(a) => { // IPv4 *i.next()? = 4; for entry in a.octets() { *i.next()? = entry; } } &IpAddr::V6(a) => { // IPv6 for entry in a.octets() { *i.next()? = entry; } *i.next()? = 6; } } } // Convert the entry to a "Success" message drop(i); s[0] = 0; None } fn fill_error(mut env: xous::MessageEnvelope, code: DnsResponseCode) -> Option<()> { let mem = env.body.memory_message_mut()?; let s: &mut [u8] = mem.buf.as_slice_mut(); let mut i = s.iter_mut(); *i.next()? = 1; *i.next()? = code as u8; None } fn main() ->! { log_server::init_wait().unwrap(); log::set_max_level(log::LevelFilter::Info); log::info!("my PID is {}", xous::process::id()); // Time is stuck in the DNS crate because the status crate is out of resources, and the DNS // crate is fairly under-utilized and ideal for sticking a service like time in it. // // this kicks off the thread that services the `libstd` calls for time-related things. // we want this started really early, because it sanity checks the RTC and a bunch of other stuff. time::start_time_server(); time::start_time_ux(); let xns = xous_names::XousNames::new().unwrap(); let dns_sid = xns .register_name(api::SERVER_NAME_DNS, None) .expect("can't register server"); log::trace!("registered with NS -- {:?}", dns_sid); // this will magically populate a list of DNS servers when they become available let mut resolver = Resolver::new(&xns); // if you wanted to force a server into the initial config, you can do it here, for example: // resolver.add_server(IpAddr::V4(Ipv4Addr::new(1,1,1,1))); // the `u32` value is the TTL of the IpAddr let mut dns_cache = HashMap::<std::string::String, HashMap<IpAddr, u32>>::new(); // build a thread that pings the UpdateTtl function once every few minutes to expire the DNS cache thread::spawn({ let local_cid = xous::connect(dns_sid).unwrap(); move || { const TTL_INTERVAL_SECS: usize = 300; // every 5 minutes update the map let tt = ticktimer_server::Ticktimer::new().unwrap(); loop { tt.sleep_ms(TTL_INTERVAL_SECS * 1000).unwrap(); xous::send_message( local_cid, xous::Message::new_scalar( Opcode::UpdateTtl.to_usize().unwrap(), TTL_INTERVAL_SECS, 0, 0, 0, ), ) .expect("couldn't increment DNS cache"); } } }); log::trace!("ready to accept requests"); loop { let mut msg = xous::receive_message(dns_sid).unwrap(); match FromPrimitive::from_usize(msg.body.id()) { Some(Opcode::RawLookup) => { match name_from_msg(&msg).map(|s| s.to_owned()) { Ok(owned_name) => { // handle the special case of "localhost" as a string if owned_name == "localhost" { let mut local = HashMap::<IpAddr, u32>::new(); local.insert(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 86400); fill_response(msg, &local); continue; } log::trace!("performing a lookup of {}", owned_name); // Try to get the result out of the DNS cache if let Some(entries) = dns_cache.get(&owned_name) { fill_response(msg, entries); continue; } // This entry is not in the cache, so perform a lookup match resolver.resolve(&owned_name) { Ok(cache_entry) => { fill_response(msg, &cache_entry); dns_cache.insert(owned_name, cache_entry); continue; }
// 128 is just a conservative value rounded down.
random_line_split
main.rs
// index is now at qtype let qtype = u16::from_be_bytes(self.datagram[index..index + 2].try_into().unwrap()); // A = 1, AAAA = 28 if qtype!= 1 && qtype!= 28 { log::error!("Problem parsing qname, qtype is not 1 or 28: {}", qtype); return Err(FormatError); } index += 2; let qclass = u16::from_be_bytes(self.datagram[index..index + 2].try_into().unwrap()); if qclass!= 1 { log::error!("Problem parsing qname, qclass is not 1: {}", qclass); return Err(FormatError); } index += 2; } // index is now at the aname section for aname in 0..ancount { log::trace!("parsing aname{}, index {}", aname, index); // first check to see if we're dealing with a pointer or a name if self.datagram[index] >= 0xc0 { // pointer index += 1; if self.datagram[index]!= 0xc { log::error!( "Found aname pointer, but value does not conform to length of aname header" ); return Err(FormatError); } index += 1; } else { // name, fast forward past the name index = self.fast_foward_name(index)?; log::trace!("fast forward aname to {}", index); } // index is now at type let atype = u16::from_be_bytes(self.datagram[index..index + 2].try_into().unwrap()); // A = 1, AAAA = 28 if atype!= 1 && atype!= 28 { log::error!("Problem parsing aname, type is not 1 or 28: {}", atype); return Err(FormatError); } index += 2; let aclass = u16::from_be_bytes(self.datagram[index..index + 2].try_into().unwrap()); if aclass!= 1 { log::error!("Problem parsing aname, aclass is not 1: {}", aclass); return Err(FormatError); } index += 2; // this is our TTL let ttl = u32::from_be_bytes(self.datagram[index..index + 4].try_into().unwrap()); log::trace!("got ttl: {}", ttl); index += 4; // this is the payload length let addr_len = u16::from_be_bytes(self.datagram[index..index + 2].try_into().unwrap()); index += 2; match addr_len { // ipv4 4 => { if atype!= 1 { log::error!("Got a 4-byte address, but ATYPE!= A (1)"); return Err(FormatError); } // this copy happens because I can't figure out how to get Ipv4Addr::from() to realize it's casting from a [u8;4] let mut rdata: [u8; 4] = [0; 4]; for (&src, dst) in self.datagram[index..index + 4].iter().zip(rdata.iter_mut()) { *dst = src; } let addr = IpAddr::V4(Ipv4Addr::from(rdata)); index += 4; map.insert(addr, ttl); } // ipv6 16 => { if atype!= 28 { log::error!("Got a 16-byte address, but ATYPE!= AAAA (28)"); return Err(FormatError); } // this copy happens because I can't figure out how to get Ipv6Addr::from() to realize it's casting from a [u8;4] let mut rdata: [u8; 16] = [0; 16]; for (&src, dst) in self.datagram[index..index + 16] .iter() .zip(rdata.iter_mut()) { *dst = src; } let addr = IpAddr::V6(Ipv6Addr::from(rdata)); index += 16; map.insert(addr, ttl); } _ => { log::error!("Length field does not match a known record type"); return Err(FormatError); } } } Ok(map) } /* example response for: betrusted.io->185.199.111.153 Header: 61, ca, id 81, 80, header 0, 1, qdcount 0, 4, ancount 0, 0, nscount 0, 0, arcount qname: 9, length 9 62, 65, 74, 72, 75, 73, 74, 65, 64, "betrusted" 2, length 2 69, 6f, "io" 0, end of name qtype: 0, 1, type A qclass: 0, 1, type IN aname0: c0, name is a pointer (any value > 192 is a pointer) c, offset of 12 from start of aname0 0, 1, type A 0, 1, class IN 0, 0, e, 10, 0xe10 = 3600 seconds TTL 0, 4, 4 bytes address b9, c7, 6c, 99, address aname1: c0, name is a pointer c, 0, 1, type A 0, 1, class IN 0, 0, e, 10, TTL 0, 4, 4 byte address b9, c7, 6d, 99, address aname2: c0, c, 0, 1, 0, 1, 0, 0, e, 10, 0, 4, b9, c7, 6e, 99, aname3: c0, c, 0, 1, 0, 1, 0, 0, e, 10, 0, 4, b9, c7, 6f, 99 */ /* pub fn is_query(&self) -> bool { !self.is_response() } */ pub fn rcode(&self) -> DnsResponseCode
} pub struct Resolver { /// DnsServerManager is a service of the Net crate that automatically updates the DNS server list mgr: net::protocols::DnsServerManager, socket: UdpSocket, buf: [u8; DNS_PKT_MAX_LEN], trng: trng::Trng, freeze: bool, } impl Resolver { pub fn new(xns: &xous_names::XousNames) -> Resolver { let trng = trng::Trng::new(&xns).unwrap(); let local_port = (49152 + trng.get_u32().unwrap() % 16384) as u16; let socket = UdpSocket::bind( format!("0.0.0.0:{}", local_port), ) .expect("couldn't create socket for DNS resolver"); let timeout = Duration::from_millis(10_000); // 10 seconds for DNS to resolve by default socket.set_read_timeout(Some(timeout)).unwrap(); socket.set_nonblocking(false).unwrap(); // we want this to block. // we /could/ do a non-blocking DNS resolver, but...what would you do in the meantime?? // blocking is probably what we actually want this time. Resolver { mgr: net::protocols::DnsServerManager::register(&xns) .expect("Couldn't register the DNS server list auto-manager"), socket, buf: [0; DNS_PKT_MAX_LEN], trng, freeze: false, } } pub fn add_server(&mut self, addr: IpAddr) { self.mgr.add_server(addr); } pub fn remove_server(&mut self, addr: IpAddr) { self.mgr.remove_server(addr); } pub fn clear_all_servers(&mut self) { self.mgr.clear(); } pub fn set_freeze_config(&mut self, freeze: bool) { self.freeze = freeze; self.mgr.set_freeze(freeze); } pub fn get_freeze(&self) -> bool { self.freeze } /// this allows us to re-use the TRNG object pub fn trng_u32(&self) -> u32 { self.trng.get_u32().unwrap() } pub fn resolve(&mut self, name: &str) -> Result<HashMap<IpAddr, u32>, DnsResponseCode> { if let Some(dns_address) = self.mgr.get_random() { let dns_port = 53; let server = SocketAddr::new(dns_address, dns_port); let qname = name; let qtype = QueryType::A; let qclass = QueryClass::IN; let query = Message::query(qname, qtype, qclass, self.trng.get_u32().unwrap() as u16); self.socket .send_to(&query.datagram, &server) .map_err(|_| DnsResponseCode::NetworkError)?; match self.socket.recv(&mut self.buf) { Ok(len) => { let message = Message::from(&self.buf[..len]); if message.id() == query.id() && message.is_response() { return match message.rcode() { DnsResponseCode::NoError => message.parse_response(), rcode => Err(rcode), }; } else { Err(DnsResponseCode::NetworkError) } } Err(e) => match e.kind() { ErrorKind::WouldBlock => Err(DnsResponseCode::NetworkError), _ => Err(DnsResponseCode::UnknownError), }, } } else { Err(DnsResponseCode::NoServerSpecified) } } } #[derive(PartialEq, Debug)] #[repr(C)] enum NameConversionError { /// The length of the memory buffer was invalid InvalidMemoryBuffer = 1, /// The specified nameserver string was not UTF-8 InvalidString = 3, /// The message was not a mutable memory message InvalidMessageType = 4, } fn name_from_msg(env: &xous::MessageEnvelope) -> Result<&str, NameConversionError> { let msg = env .body .memory_message() .ok_or(NameConversionError::InvalidMessageType)?; let valid_bytes = msg.valid.map(|v| v.get()).unwrap_or_else(|| msg.buf.len()); if valid_bytes > DNS_NAME_LENGTH_LIMIT || valid_bytes < 1 { log::error!("valid bytes exceeded DNS name limit"); return Err(NameConversionError::InvalidMemoryBuffer); } // Safe because we've already validated that it's a valid range let str_slice = unsafe { core::slice::from_raw_parts(msg.buf.as_ptr(), valid_bytes) }; let name_string = core::str::from_utf8(str_slice).map_err(|_| NameConversionError::InvalidString)?; Ok(name_string) } fn fill_response(mut env: xous::MessageEnvelope, entries: &HashMap<IpAddr, u32>) -> Option<()> { let mem = env.body.memory_message_mut()?; let s: &mut [u8] = mem.buf.as_slice_mut(); let mut i = s.iter_mut(); // First tag = 1 for "Error" -- we'll fill this in at the end when it's successful *i.next()? = 1; // Limit the number of entries to 128, which is a nice number. Given that an IPv6 // address is 17 bytes, that means that ~240 IPv6 addresses will fit in a 4 kB page. // 128 is just a conservative value rounded down. let mut entry_count = entries.len(); if entry_count > 128 { entry_count = 128; } *i.next()? = entry_count.try_into().ok()?; // Start filling in the addreses for addr in entries.keys() { match addr { &IpAddr::V4(a) => { // IPv4 *i.next()? = 4; for entry in a.octets() { *i.next()? = entry; } } &IpAddr::V6(a) => { // IPv6 for entry in a.octets() { *i.next()? = entry; } *i.next()? = 6; } } } // Convert the entry to a "Success" message drop(i); s[0] = 0; None } fn fill_error(mut env: xous::MessageEnvelope, code: DnsResponseCode) -> Option<()> { let mem = env.body.memory_message_mut()?; let s: &mut [u8] = mem.buf.as_slice_mut(); let mut i = s.iter_mut(); *i.next()? = 1; *i.next()? = code as u8; None } fn main() ->! { log_server::init_wait().unwrap(); log::set_max_level(log::LevelFilter::Info); log::info!("my PID is {}", xous::process::id()); // Time is stuck in the DNS crate because the status crate is out of resources, and the DNS // crate is fairly under-utilized and ideal for sticking a service like time in it. // // this kicks off the thread that services the `libstd` calls for time-related things. // we want this started really early, because it sanity checks the RTC and a bunch of other stuff. time::start_time_server(); time::start_time_ux(); let xns = xous_names::XousNames::new().unwrap(); let dns_sid = xns .register_name(api::SERVER_NAME_DNS, None) .expect("can't register server"); log::trace!("registered with NS -- {:?}", dns_sid); // this will magically populate a list of DNS servers when they become available let mut resolver = Resolver::new(&xns); // if you wanted to force a server into the initial config, you can do it here, for example: // resolver.add_server(IpAddr::V4(Ipv4Addr::new(1,1,1,1))); // the `u32` value is the TTL of the IpAddr let mut dns_cache = HashMap::<std::string::String, HashMap<IpAddr, u32>>::new(); // build a thread that pings the UpdateTtl function once every few minutes to expire the DNS cache thread::spawn({ let local_cid = xous::connect(dns_sid).unwrap(); move || { const TTL_INTERVAL_SECS: usize = 300; // every 5 minutes update the map let tt = ticktimer_server::Ticktimer::new().unwrap(); loop { tt.sleep_ms(TTL_INTERVAL_SECS * 1000).unwrap(); xous::send_message( local_cid, xous::Message::new_scalar( Opcode::UpdateTtl.to_usize().unwrap(), TTL_INTERVAL_SECS, 0, 0, 0, ), ) .expect("couldn't increment DNS cache"); } } }); log::trace!("ready to accept requests"); loop { let mut msg = xous::receive_message(dns_sid).unwrap(); match FromPrimitive::from_usize(msg.body.id()) { Some(Opcode::RawLookup) => { match name_from_msg(&msg).map(|s| s.to_owned()) { Ok(owned_name) => { // handle the special case of "localhost" as a string if owned_name == "localhost" { let mut local = HashMap::<IpAddr, u32>::new(); local.insert(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 86400); fill_response(msg, &local); continue; } log::trace!("performing a lookup of {}", owned_name); // Try to get the result out of the DNS cache if let Some(entries) = dns_cache.get(&owned_name) { fill_response(msg, entries); continue; } // This entry is not in the cache, so perform a lookup match resolver.resolve(&owned_name) { Ok(cache_entry) => { fill_response(msg, &cache_entry); dns_cache.insert(owned_name, cache_entry); continue; }
{ match (self.header() >> 11) & 0xF { 0 => DnsResponseCode::NoError, 1 => DnsResponseCode::FormatError, 2 => DnsResponseCode::ServerFailure, 3 => DnsResponseCode::NameError, 4 => DnsResponseCode::NotImplemented, 5 => DnsResponseCode::Refused, _ => DnsResponseCode::UnknownError, } }
identifier_body
main.rs
// index is now at qtype let qtype = u16::from_be_bytes(self.datagram[index..index + 2].try_into().unwrap()); // A = 1, AAAA = 28 if qtype!= 1 && qtype!= 28 { log::error!("Problem parsing qname, qtype is not 1 or 28: {}", qtype); return Err(FormatError); } index += 2; let qclass = u16::from_be_bytes(self.datagram[index..index + 2].try_into().unwrap()); if qclass!= 1 { log::error!("Problem parsing qname, qclass is not 1: {}", qclass); return Err(FormatError); } index += 2; } // index is now at the aname section for aname in 0..ancount { log::trace!("parsing aname{}, index {}", aname, index); // first check to see if we're dealing with a pointer or a name if self.datagram[index] >= 0xc0 { // pointer index += 1; if self.datagram[index]!= 0xc { log::error!( "Found aname pointer, but value does not conform to length of aname header" ); return Err(FormatError); } index += 1; } else { // name, fast forward past the name index = self.fast_foward_name(index)?; log::trace!("fast forward aname to {}", index); } // index is now at type let atype = u16::from_be_bytes(self.datagram[index..index + 2].try_into().unwrap()); // A = 1, AAAA = 28 if atype!= 1 && atype!= 28 { log::error!("Problem parsing aname, type is not 1 or 28: {}", atype); return Err(FormatError); } index += 2; let aclass = u16::from_be_bytes(self.datagram[index..index + 2].try_into().unwrap()); if aclass!= 1 { log::error!("Problem parsing aname, aclass is not 1: {}", aclass); return Err(FormatError); } index += 2; // this is our TTL let ttl = u32::from_be_bytes(self.datagram[index..index + 4].try_into().unwrap()); log::trace!("got ttl: {}", ttl); index += 4; // this is the payload length let addr_len = u16::from_be_bytes(self.datagram[index..index + 2].try_into().unwrap()); index += 2; match addr_len { // ipv4 4 => { if atype!= 1 { log::error!("Got a 4-byte address, but ATYPE!= A (1)"); return Err(FormatError); } // this copy happens because I can't figure out how to get Ipv4Addr::from() to realize it's casting from a [u8;4] let mut rdata: [u8; 4] = [0; 4]; for (&src, dst) in self.datagram[index..index + 4].iter().zip(rdata.iter_mut()) { *dst = src; } let addr = IpAddr::V4(Ipv4Addr::from(rdata)); index += 4; map.insert(addr, ttl); } // ipv6 16 => { if atype!= 28 { log::error!("Got a 16-byte address, but ATYPE!= AAAA (28)"); return Err(FormatError); } // this copy happens because I can't figure out how to get Ipv6Addr::from() to realize it's casting from a [u8;4] let mut rdata: [u8; 16] = [0; 16]; for (&src, dst) in self.datagram[index..index + 16] .iter() .zip(rdata.iter_mut()) { *dst = src; } let addr = IpAddr::V6(Ipv6Addr::from(rdata)); index += 16; map.insert(addr, ttl); } _ => { log::error!("Length field does not match a known record type"); return Err(FormatError); } } } Ok(map) } /* example response for: betrusted.io->185.199.111.153 Header: 61, ca, id 81, 80, header 0, 1, qdcount 0, 4, ancount 0, 0, nscount 0, 0, arcount qname: 9, length 9 62, 65, 74, 72, 75, 73, 74, 65, 64, "betrusted" 2, length 2 69, 6f, "io" 0, end of name qtype: 0, 1, type A qclass: 0, 1, type IN aname0: c0, name is a pointer (any value > 192 is a pointer) c, offset of 12 from start of aname0 0, 1, type A 0, 1, class IN 0, 0, e, 10, 0xe10 = 3600 seconds TTL 0, 4, 4 bytes address b9, c7, 6c, 99, address aname1: c0, name is a pointer c, 0, 1, type A 0, 1, class IN 0, 0, e, 10, TTL 0, 4, 4 byte address b9, c7, 6d, 99, address aname2: c0, c, 0, 1, 0, 1, 0, 0, e, 10, 0, 4, b9, c7, 6e, 99, aname3: c0, c, 0, 1, 0, 1, 0, 0, e, 10, 0, 4, b9, c7, 6f, 99 */ /* pub fn is_query(&self) -> bool { !self.is_response() } */ pub fn rcode(&self) -> DnsResponseCode { match (self.header() >> 11) & 0xF { 0 => DnsResponseCode::NoError, 1 => DnsResponseCode::FormatError, 2 => DnsResponseCode::ServerFailure, 3 => DnsResponseCode::NameError, 4 => DnsResponseCode::NotImplemented, 5 => DnsResponseCode::Refused, _ => DnsResponseCode::UnknownError, } } } pub struct Resolver { /// DnsServerManager is a service of the Net crate that automatically updates the DNS server list mgr: net::protocols::DnsServerManager, socket: UdpSocket, buf: [u8; DNS_PKT_MAX_LEN], trng: trng::Trng, freeze: bool, } impl Resolver { pub fn new(xns: &xous_names::XousNames) -> Resolver { let trng = trng::Trng::new(&xns).unwrap(); let local_port = (49152 + trng.get_u32().unwrap() % 16384) as u16; let socket = UdpSocket::bind( format!("0.0.0.0:{}", local_port), ) .expect("couldn't create socket for DNS resolver"); let timeout = Duration::from_millis(10_000); // 10 seconds for DNS to resolve by default socket.set_read_timeout(Some(timeout)).unwrap(); socket.set_nonblocking(false).unwrap(); // we want this to block. // we /could/ do a non-blocking DNS resolver, but...what would you do in the meantime?? // blocking is probably what we actually want this time. Resolver { mgr: net::protocols::DnsServerManager::register(&xns) .expect("Couldn't register the DNS server list auto-manager"), socket, buf: [0; DNS_PKT_MAX_LEN], trng, freeze: false, } } pub fn add_server(&mut self, addr: IpAddr) { self.mgr.add_server(addr); } pub fn remove_server(&mut self, addr: IpAddr) { self.mgr.remove_server(addr); } pub fn clear_all_servers(&mut self) { self.mgr.clear(); } pub fn
(&mut self, freeze: bool) { self.freeze = freeze; self.mgr.set_freeze(freeze); } pub fn get_freeze(&self) -> bool { self.freeze } /// this allows us to re-use the TRNG object pub fn trng_u32(&self) -> u32 { self.trng.get_u32().unwrap() } pub fn resolve(&mut self, name: &str) -> Result<HashMap<IpAddr, u32>, DnsResponseCode> { if let Some(dns_address) = self.mgr.get_random() { let dns_port = 53; let server = SocketAddr::new(dns_address, dns_port); let qname = name; let qtype = QueryType::A; let qclass = QueryClass::IN; let query = Message::query(qname, qtype, qclass, self.trng.get_u32().unwrap() as u16); self.socket .send_to(&query.datagram, &server) .map_err(|_| DnsResponseCode::NetworkError)?; match self.socket.recv(&mut self.buf) { Ok(len) => { let message = Message::from(&self.buf[..len]); if message.id() == query.id() && message.is_response() { return match message.rcode() { DnsResponseCode::NoError => message.parse_response(), rcode => Err(rcode), }; } else { Err(DnsResponseCode::NetworkError) } } Err(e) => match e.kind() { ErrorKind::WouldBlock => Err(DnsResponseCode::NetworkError), _ => Err(DnsResponseCode::UnknownError), }, } } else { Err(DnsResponseCode::NoServerSpecified) } } } #[derive(PartialEq, Debug)] #[repr(C)] enum NameConversionError { /// The length of the memory buffer was invalid InvalidMemoryBuffer = 1, /// The specified nameserver string was not UTF-8 InvalidString = 3, /// The message was not a mutable memory message InvalidMessageType = 4, } fn name_from_msg(env: &xous::MessageEnvelope) -> Result<&str, NameConversionError> { let msg = env .body .memory_message() .ok_or(NameConversionError::InvalidMessageType)?; let valid_bytes = msg.valid.map(|v| v.get()).unwrap_or_else(|| msg.buf.len()); if valid_bytes > DNS_NAME_LENGTH_LIMIT || valid_bytes < 1 { log::error!("valid bytes exceeded DNS name limit"); return Err(NameConversionError::InvalidMemoryBuffer); } // Safe because we've already validated that it's a valid range let str_slice = unsafe { core::slice::from_raw_parts(msg.buf.as_ptr(), valid_bytes) }; let name_string = core::str::from_utf8(str_slice).map_err(|_| NameConversionError::InvalidString)?; Ok(name_string) } fn fill_response(mut env: xous::MessageEnvelope, entries: &HashMap<IpAddr, u32>) -> Option<()> { let mem = env.body.memory_message_mut()?; let s: &mut [u8] = mem.buf.as_slice_mut(); let mut i = s.iter_mut(); // First tag = 1 for "Error" -- we'll fill this in at the end when it's successful *i.next()? = 1; // Limit the number of entries to 128, which is a nice number. Given that an IPv6 // address is 17 bytes, that means that ~240 IPv6 addresses will fit in a 4 kB page. // 128 is just a conservative value rounded down. let mut entry_count = entries.len(); if entry_count > 128 { entry_count = 128; } *i.next()? = entry_count.try_into().ok()?; // Start filling in the addreses for addr in entries.keys() { match addr { &IpAddr::V4(a) => { // IPv4 *i.next()? = 4; for entry in a.octets() { *i.next()? = entry; } } &IpAddr::V6(a) => { // IPv6 for entry in a.octets() { *i.next()? = entry; } *i.next()? = 6; } } } // Convert the entry to a "Success" message drop(i); s[0] = 0; None } fn fill_error(mut env: xous::MessageEnvelope, code: DnsResponseCode) -> Option<()> { let mem = env.body.memory_message_mut()?; let s: &mut [u8] = mem.buf.as_slice_mut(); let mut i = s.iter_mut(); *i.next()? = 1; *i.next()? = code as u8; None } fn main() ->! { log_server::init_wait().unwrap(); log::set_max_level(log::LevelFilter::Info); log::info!("my PID is {}", xous::process::id()); // Time is stuck in the DNS crate because the status crate is out of resources, and the DNS // crate is fairly under-utilized and ideal for sticking a service like time in it. // // this kicks off the thread that services the `libstd` calls for time-related things. // we want this started really early, because it sanity checks the RTC and a bunch of other stuff. time::start_time_server(); time::start_time_ux(); let xns = xous_names::XousNames::new().unwrap(); let dns_sid = xns .register_name(api::SERVER_NAME_DNS, None) .expect("can't register server"); log::trace!("registered with NS -- {:?}", dns_sid); // this will magically populate a list of DNS servers when they become available let mut resolver = Resolver::new(&xns); // if you wanted to force a server into the initial config, you can do it here, for example: // resolver.add_server(IpAddr::V4(Ipv4Addr::new(1,1,1,1))); // the `u32` value is the TTL of the IpAddr let mut dns_cache = HashMap::<std::string::String, HashMap<IpAddr, u32>>::new(); // build a thread that pings the UpdateTtl function once every few minutes to expire the DNS cache thread::spawn({ let local_cid = xous::connect(dns_sid).unwrap(); move || { const TTL_INTERVAL_SECS: usize = 300; // every 5 minutes update the map let tt = ticktimer_server::Ticktimer::new().unwrap(); loop { tt.sleep_ms(TTL_INTERVAL_SECS * 1000).unwrap(); xous::send_message( local_cid, xous::Message::new_scalar( Opcode::UpdateTtl.to_usize().unwrap(), TTL_INTERVAL_SECS, 0, 0, 0, ), ) .expect("couldn't increment DNS cache"); } } }); log::trace!("ready to accept requests"); loop { let mut msg = xous::receive_message(dns_sid).unwrap(); match FromPrimitive::from_usize(msg.body.id()) { Some(Opcode::RawLookup) => { match name_from_msg(&msg).map(|s| s.to_owned()) { Ok(owned_name) => { // handle the special case of "localhost" as a string if owned_name == "localhost" { let mut local = HashMap::<IpAddr, u32>::new(); local.insert(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 86400); fill_response(msg, &local); continue; } log::trace!("performing a lookup of {}", owned_name); // Try to get the result out of the DNS cache if let Some(entries) = dns_cache.get(&owned_name) { fill_response(msg, entries); continue; } // This entry is not in the cache, so perform a lookup match resolver.resolve(&owned_name) { Ok(cache_entry) => { fill_response(msg, &cache_entry); dns_cache.insert(owned_name, cache_entry); continue; }
set_freeze_config
identifier_name
sudoku_server_basic.rs
use std::future::Future; use bytes::Buf; use bytes::BytesMut; use log::error; use log::info; use recipes::shutdown::Shutdown; use subslice::SubsliceExt; use thiserror::Error; use tokio::io::AsyncReadExt; use tokio::io::AsyncWriteExt; use tokio::io::BufWriter; use tokio::runtime::Builder; use tokio::signal; use tokio::sync::mpsc; use tokio::time; use tokio::{ net::{TcpListener, TcpStream}, sync::broadcast, }; fn main() -> anyhow::Result<()> { let thread_rt = Builder::new_multi_thread() .worker_threads(4) .thread_name("sudoku-server") .enable_io() .enable_time() .build()?; thread_rt.block_on(async move { env_logger::init(); let port = 9981; let listener = TcpListener::bind(&format!("0.0.0.0:{}", port)).await; info!("sudoku server start listening: {}", port); // if let Ok(listener) = listener { // let _ = run(listener, signal::ctrl_c()).await; // } match listener { Ok(l) => { let _ = run(l, signal::ctrl_c()).await; } Err(err) => { error!("bind address[0.0.0.0:{}] error, cause: {}", port, err); } } }); Ok(()) } #[derive(Error, Debug)] enum Error { #[error("ProtocolError")] ProtocolError, #[error("IOError: {0}")] IOError(#[from] std::io::Error), #[error("ConnectionError: {0}")] ConnectionError(&'static str), } #[derive(Debug)] struct Listener { listener: TcpListener, notify_shutdown: broadcast::Sender<()>, shutdown_complete_rx: mpsc::Receiver<()>, shutdown_complete_tx: mpsc::Sender<()>, } impl Listener { async fn run(&mut self) -> anyhow::Result<()> { info!("accepting inbound connections"); loop { let socket = self.accept().await?; let mut handler = Handler { connection: Connection::new(socket), shutdown: Shutdown::new(self.notify_shutdown.subscribe()), _shutdown_complete: self.shutdown_complete_tx.clone(), }; // handler reader tokio::spawn(async move { if let Err(err) = handler.run().await { error!("read error: {}", err); } }); } } async fn accept(&mut self) -> anyhow::Result<TcpStream> { let mut backoff = 1; // Try to accept a few times loop { // Perform the accept operation. If a socket is successfully // accepted, return it. Otherwise, save the error. match self.listener.accept().await { Ok((socket, _peer)) => { // info!("peer: {} connected.", peer); return Ok(socket); } Err(err) => { if backoff > 64 { // Accept has failed too many times. Return the error. return Err(err.into()); } } } // Pause execution until the back off period elapses. time::sleep(time::Duration::from_secs(backoff)).await; // Double the back off backoff *= 2; } } } #[derive(Debug)] struct Connection { stream: BufWriter<TcpStream>, buffer: BytesMut, } impl Connection { /// Create a new `Connection`, backed by `socket`. Read and write buffers /// are initialized. pub fn new(socket: TcpStream) -> Connection { // let (reader, writer) = tokio::io::split(socket); Connection { stream: BufWriter::new(socket), // writer: Arc::new(Mutex::new(BufWriter::new(writer))), // Default to a 4KB read buffer. For the use case of mini redis, // this is fine. However, real applications will want to tune this // value to their specific use case. There is a high likelihood that // a larger read buffer will work better. buffer: BytesMut::with_capacity(4 * 1024), } } async fn
(&mut self) -> anyhow::Result<Option<Frame>> { loop { // Attempt to parse a frame from the buffered data. If enough data // has been buffered, the frame is returned. if let Some(frame) = self.parse_frame()? { return Ok(Some(frame)); } // There is not enough buffered data to read a frame. Attempt to // read more data from the socket. // // On success, the number of bytes is returned. `0` indicates "end // of stream". if 0 == self.stream.read_buf(&mut self.buffer).await? { // The remote closed the connection. For this to be a clean // shutdown, there should be no data in the read buffer. If // there is, this means that the peer closed the socket while // sending a frame. if self.buffer.is_empty() { return Ok(None); } else { return Err(Error::ConnectionError("connection reset by peer").into()); } } } } async fn send_result(&mut self, id: Option<&str>, ans: &str) -> anyhow::Result<()> { if let Some(id) = id { self.stream.write_all(id.as_bytes()).await?; self.stream.write_u8(b':').await?; } self.stream.write_all(ans.as_bytes()).await?; self.stream.write_all(b"\r\n").await?; self.stream.flush().await?; Ok(()) } // frame id:puzzle\r\n or puzzle\r\n fn parse_frame(&mut self) -> anyhow::Result<Option<Frame>> { // let mut buf = Cursor::new(&self.buffer[..]); let line_end = match self.buffer.find(b"\r\n") { Some(end) => end, None => return Ok(None), }; let mut parts = self.buffer[..line_end].split(|c| c == &b':'); // let vec = parts.into_iter().collect(); let maybe_id_or_puzzle = parts.next().ok_or(Error::ProtocolError)?; let maybe_id_or_puzzle = std::str::from_utf8(maybe_id_or_puzzle)?.to_string(); let maybe_puzzle = parts.next(); if maybe_puzzle.is_none() { return Ok(Some(Frame { id: None, puzzle: maybe_id_or_puzzle, })); } let puzzle = std::str::from_utf8(maybe_puzzle.unwrap())?.to_string(); if!parts.next().is_none() { return Err(Error::ProtocolError.into()); } self.buffer.advance(line_end + 2); Ok(Some(Frame { id: Some(maybe_id_or_puzzle), puzzle, })) } } #[derive(Debug)] struct Result { id: Option<String>, ans: String, } #[derive(Debug)] struct Frame { id: Option<String>, puzzle: String, } #[derive(Debug)] struct Handler { connection: Connection, shutdown: Shutdown, _shutdown_complete: mpsc::Sender<()>, } impl Handler { async fn run(&mut self) -> anyhow::Result<()> { // As long as the shutdown signal has not been received, try to read a // new request frame. while!self.shutdown.is_shutdown() { let maybe_frame = tokio::select! { res = self.connection.read_frame() => res?, _ = self.shutdown.recv() => { // If a shutdown signal is received, return from `run`. // This will result in the task terminating. return Ok(()); } }; // If `None` is returned from `read_frame()` then the peer closed // the socket. There is no further work to do and the task can be // terminated. let frame = match maybe_frame { Some(frame) => frame, None => return Ok(()), }; // get ans let ans = sudoku_resolve(&frame.puzzle); // let id = frame.puzzle.clone(); // let id = frame.id.clone(); // let ans = task::spawn_blocking(move || { // sudoku_resolve(&frame.puzzle) // }).await?; // self.connection.send_result(id.as_deref(), &ans).await?; self.connection.send_result(frame.id.as_deref(), &ans).await?; } Ok(()) } } pub async fn run(listener: TcpListener, shutdown: impl Future) -> anyhow::Result<()> { let (notify_shutdown, _) = broadcast::channel(1); let (shutdown_complete_tx, shutdown_complete_rx) = mpsc::channel(1); let mut server = Listener { listener, notify_shutdown, shutdown_complete_tx, shutdown_complete_rx, }; tokio::select! { res = server.run() => { // If an error is received here, accepting connections from the TCP // listener failed multiple times and the server is giving up and // shutting down. // // Errors encountered when handling individual connections do not // bubble up to this point. if let Err(err) = res { error!("failed to accept, cause: {}", err); } } _ = shutdown => { // The shutdown signal has been received. info!("shutting down"); } } let Listener { mut shutdown_complete_rx, shutdown_complete_tx, notify_shutdown, .. } = server; // When `notify_shutdown` is dropped, all tasks which have `subscribe`d will // receive the shutdown signal and can exit drop(notify_shutdown); // Drop final `Sender` so the `Receiver` below can complete drop(shutdown_complete_tx); // Wait for all active connections to finish processing. As the `Sender` // handle held by the listener has been dropped above, the only remaining // `Sender` instances are held by connection handler tasks. When those drop, // the `mpsc` channel will close and `recv()` will return `None`. let _ = shutdown_complete_rx.recv().await; Ok(()) } #[inline] fn sudoku_resolve(req: &str) -> String { recipes::sudoku::sudoku_resolve(&req) }
read_frame
identifier_name
sudoku_server_basic.rs
use std::future::Future; use bytes::Buf; use bytes::BytesMut; use log::error; use log::info; use recipes::shutdown::Shutdown; use subslice::SubsliceExt; use thiserror::Error; use tokio::io::AsyncReadExt; use tokio::io::AsyncWriteExt; use tokio::io::BufWriter; use tokio::runtime::Builder; use tokio::signal; use tokio::sync::mpsc; use tokio::time; use tokio::{ net::{TcpListener, TcpStream}, sync::broadcast, }; fn main() -> anyhow::Result<()> { let thread_rt = Builder::new_multi_thread() .worker_threads(4) .thread_name("sudoku-server") .enable_io() .enable_time() .build()?; thread_rt.block_on(async move { env_logger::init(); let port = 9981; let listener = TcpListener::bind(&format!("0.0.0.0:{}", port)).await; info!("sudoku server start listening: {}", port); // if let Ok(listener) = listener { // let _ = run(listener, signal::ctrl_c()).await; // } match listener { Ok(l) => { let _ = run(l, signal::ctrl_c()).await; } Err(err) => { error!("bind address[0.0.0.0:{}] error, cause: {}", port, err); } } }); Ok(()) } #[derive(Error, Debug)] enum Error { #[error("ProtocolError")] ProtocolError, #[error("IOError: {0}")] IOError(#[from] std::io::Error), #[error("ConnectionError: {0}")] ConnectionError(&'static str), } #[derive(Debug)] struct Listener { listener: TcpListener, notify_shutdown: broadcast::Sender<()>, shutdown_complete_rx: mpsc::Receiver<()>, shutdown_complete_tx: mpsc::Sender<()>, } impl Listener { async fn run(&mut self) -> anyhow::Result<()> { info!("accepting inbound connections"); loop { let socket = self.accept().await?; let mut handler = Handler { connection: Connection::new(socket), shutdown: Shutdown::new(self.notify_shutdown.subscribe()), _shutdown_complete: self.shutdown_complete_tx.clone(), }; // handler reader tokio::spawn(async move { if let Err(err) = handler.run().await { error!("read error: {}", err); } }); } } async fn accept(&mut self) -> anyhow::Result<TcpStream> { let mut backoff = 1; // Try to accept a few times loop { // Perform the accept operation. If a socket is successfully // accepted, return it. Otherwise, save the error. match self.listener.accept().await { Ok((socket, _peer)) => { // info!("peer: {} connected.", peer); return Ok(socket); } Err(err) => { if backoff > 64 { // Accept has failed too many times. Return the error. return Err(err.into()); } } } // Pause execution until the back off period elapses. time::sleep(time::Duration::from_secs(backoff)).await; // Double the back off backoff *= 2; } } } #[derive(Debug)] struct Connection { stream: BufWriter<TcpStream>, buffer: BytesMut, } impl Connection { /// Create a new `Connection`, backed by `socket`. Read and write buffers /// are initialized. pub fn new(socket: TcpStream) -> Connection { // let (reader, writer) = tokio::io::split(socket); Connection { stream: BufWriter::new(socket), // writer: Arc::new(Mutex::new(BufWriter::new(writer))), // Default to a 4KB read buffer. For the use case of mini redis, // this is fine. However, real applications will want to tune this // value to their specific use case. There is a high likelihood that // a larger read buffer will work better. buffer: BytesMut::with_capacity(4 * 1024), } } async fn read_frame(&mut self) -> anyhow::Result<Option<Frame>> { loop { // Attempt to parse a frame from the buffered data. If enough data // has been buffered, the frame is returned. if let Some(frame) = self.parse_frame()? { return Ok(Some(frame)); } // There is not enough buffered data to read a frame. Attempt to // read more data from the socket. // // On success, the number of bytes is returned. `0` indicates "end // of stream". if 0 == self.stream.read_buf(&mut self.buffer).await? { // The remote closed the connection. For this to be a clean // shutdown, there should be no data in the read buffer. If // there is, this means that the peer closed the socket while // sending a frame. if self.buffer.is_empty() { return Ok(None); } else { return Err(Error::ConnectionError("connection reset by peer").into()); } } } } async fn send_result(&mut self, id: Option<&str>, ans: &str) -> anyhow::Result<()> { if let Some(id) = id { self.stream.write_all(id.as_bytes()).await?; self.stream.write_u8(b':').await?; } self.stream.write_all(ans.as_bytes()).await?; self.stream.write_all(b"\r\n").await?; self.stream.flush().await?; Ok(()) } // frame id:puzzle\r\n or puzzle\r\n fn parse_frame(&mut self) -> anyhow::Result<Option<Frame>> { // let mut buf = Cursor::new(&self.buffer[..]); let line_end = match self.buffer.find(b"\r\n") { Some(end) => end, None => return Ok(None), }; let mut parts = self.buffer[..line_end].split(|c| c == &b':'); // let vec = parts.into_iter().collect(); let maybe_id_or_puzzle = parts.next().ok_or(Error::ProtocolError)?; let maybe_id_or_puzzle = std::str::from_utf8(maybe_id_or_puzzle)?.to_string(); let maybe_puzzle = parts.next(); if maybe_puzzle.is_none() { return Ok(Some(Frame { id: None, puzzle: maybe_id_or_puzzle, })); } let puzzle = std::str::from_utf8(maybe_puzzle.unwrap())?.to_string(); if!parts.next().is_none() { return Err(Error::ProtocolError.into()); } self.buffer.advance(line_end + 2); Ok(Some(Frame { id: Some(maybe_id_or_puzzle), puzzle, })) } } #[derive(Debug)] struct Result { id: Option<String>, ans: String, } #[derive(Debug)] struct Frame { id: Option<String>, puzzle: String, } #[derive(Debug)] struct Handler { connection: Connection, shutdown: Shutdown, _shutdown_complete: mpsc::Sender<()>, } impl Handler { async fn run(&mut self) -> anyhow::Result<()> { // As long as the shutdown signal has not been received, try to read a // new request frame. while!self.shutdown.is_shutdown() { let maybe_frame = tokio::select! { res = self.connection.read_frame() => res?, _ = self.shutdown.recv() => { // If a shutdown signal is received, return from `run`. // This will result in the task terminating. return Ok(()); } }; // If `None` is returned from `read_frame()` then the peer closed // the socket. There is no further work to do and the task can be // terminated. let frame = match maybe_frame { Some(frame) => frame, None => return Ok(()), }; // get ans let ans = sudoku_resolve(&frame.puzzle); // let id = frame.puzzle.clone(); // let id = frame.id.clone(); // let ans = task::spawn_blocking(move || { // sudoku_resolve(&frame.puzzle) // }).await?; // self.connection.send_result(id.as_deref(), &ans).await?; self.connection.send_result(frame.id.as_deref(), &ans).await?; } Ok(()) } } pub async fn run(listener: TcpListener, shutdown: impl Future) -> anyhow::Result<()> { let (notify_shutdown, _) = broadcast::channel(1); let (shutdown_complete_tx, shutdown_complete_rx) = mpsc::channel(1); let mut server = Listener { listener, notify_shutdown, shutdown_complete_tx, shutdown_complete_rx, }; tokio::select! { res = server.run() => { // If an error is received here, accepting connections from the TCP // listener failed multiple times and the server is giving up and // shutting down. // // Errors encountered when handling individual connections do not // bubble up to this point. if let Err(err) = res { error!("failed to accept, cause: {}", err); } } _ = shutdown => { // The shutdown signal has been received. info!("shutting down"); } } let Listener { mut shutdown_complete_rx, shutdown_complete_tx, notify_shutdown, .. } = server; // When `notify_shutdown` is dropped, all tasks which have `subscribe`d will // receive the shutdown signal and can exit drop(notify_shutdown); // Drop final `Sender` so the `Receiver` below can complete drop(shutdown_complete_tx); // Wait for all active connections to finish processing. As the `Sender` // handle held by the listener has been dropped above, the only remaining // `Sender` instances are held by connection handler tasks. When those drop,
#[inline] fn sudoku_resolve(req: &str) -> String { recipes::sudoku::sudoku_resolve(&req) }
// the `mpsc` channel will close and `recv()` will return `None`. let _ = shutdown_complete_rx.recv().await; Ok(()) }
random_line_split
sudoku_server_basic.rs
use std::future::Future; use bytes::Buf; use bytes::BytesMut; use log::error; use log::info; use recipes::shutdown::Shutdown; use subslice::SubsliceExt; use thiserror::Error; use tokio::io::AsyncReadExt; use tokio::io::AsyncWriteExt; use tokio::io::BufWriter; use tokio::runtime::Builder; use tokio::signal; use tokio::sync::mpsc; use tokio::time; use tokio::{ net::{TcpListener, TcpStream}, sync::broadcast, }; fn main() -> anyhow::Result<()> { let thread_rt = Builder::new_multi_thread() .worker_threads(4) .thread_name("sudoku-server") .enable_io() .enable_time() .build()?; thread_rt.block_on(async move { env_logger::init(); let port = 9981; let listener = TcpListener::bind(&format!("0.0.0.0:{}", port)).await; info!("sudoku server start listening: {}", port); // if let Ok(listener) = listener { // let _ = run(listener, signal::ctrl_c()).await; // } match listener { Ok(l) => { let _ = run(l, signal::ctrl_c()).await; } Err(err) => { error!("bind address[0.0.0.0:{}] error, cause: {}", port, err); } } }); Ok(()) } #[derive(Error, Debug)] enum Error { #[error("ProtocolError")] ProtocolError, #[error("IOError: {0}")] IOError(#[from] std::io::Error), #[error("ConnectionError: {0}")] ConnectionError(&'static str), } #[derive(Debug)] struct Listener { listener: TcpListener, notify_shutdown: broadcast::Sender<()>, shutdown_complete_rx: mpsc::Receiver<()>, shutdown_complete_tx: mpsc::Sender<()>, } impl Listener { async fn run(&mut self) -> anyhow::Result<()> { info!("accepting inbound connections"); loop { let socket = self.accept().await?; let mut handler = Handler { connection: Connection::new(socket), shutdown: Shutdown::new(self.notify_shutdown.subscribe()), _shutdown_complete: self.shutdown_complete_tx.clone(), }; // handler reader tokio::spawn(async move { if let Err(err) = handler.run().await { error!("read error: {}", err); } }); } } async fn accept(&mut self) -> anyhow::Result<TcpStream> { let mut backoff = 1; // Try to accept a few times loop { // Perform the accept operation. If a socket is successfully // accepted, return it. Otherwise, save the error. match self.listener.accept().await { Ok((socket, _peer)) => { // info!("peer: {} connected.", peer); return Ok(socket); } Err(err) => { if backoff > 64 { // Accept has failed too many times. Return the error. return Err(err.into()); } } } // Pause execution until the back off period elapses. time::sleep(time::Duration::from_secs(backoff)).await; // Double the back off backoff *= 2; } } } #[derive(Debug)] struct Connection { stream: BufWriter<TcpStream>, buffer: BytesMut, } impl Connection { /// Create a new `Connection`, backed by `socket`. Read and write buffers /// are initialized. pub fn new(socket: TcpStream) -> Connection { // let (reader, writer) = tokio::io::split(socket); Connection { stream: BufWriter::new(socket), // writer: Arc::new(Mutex::new(BufWriter::new(writer))), // Default to a 4KB read buffer. For the use case of mini redis, // this is fine. However, real applications will want to tune this // value to their specific use case. There is a high likelihood that // a larger read buffer will work better. buffer: BytesMut::with_capacity(4 * 1024), } } async fn read_frame(&mut self) -> anyhow::Result<Option<Frame>> { loop { // Attempt to parse a frame from the buffered data. If enough data // has been buffered, the frame is returned. if let Some(frame) = self.parse_frame()? { return Ok(Some(frame)); } // There is not enough buffered data to read a frame. Attempt to // read more data from the socket. // // On success, the number of bytes is returned. `0` indicates "end // of stream". if 0 == self.stream.read_buf(&mut self.buffer).await? { // The remote closed the connection. For this to be a clean // shutdown, there should be no data in the read buffer. If // there is, this means that the peer closed the socket while // sending a frame. if self.buffer.is_empty() { return Ok(None); } else { return Err(Error::ConnectionError("connection reset by peer").into()); } } } } async fn send_result(&mut self, id: Option<&str>, ans: &str) -> anyhow::Result<()> { if let Some(id) = id
self.stream.write_all(ans.as_bytes()).await?; self.stream.write_all(b"\r\n").await?; self.stream.flush().await?; Ok(()) } // frame id:puzzle\r\n or puzzle\r\n fn parse_frame(&mut self) -> anyhow::Result<Option<Frame>> { // let mut buf = Cursor::new(&self.buffer[..]); let line_end = match self.buffer.find(b"\r\n") { Some(end) => end, None => return Ok(None), }; let mut parts = self.buffer[..line_end].split(|c| c == &b':'); // let vec = parts.into_iter().collect(); let maybe_id_or_puzzle = parts.next().ok_or(Error::ProtocolError)?; let maybe_id_or_puzzle = std::str::from_utf8(maybe_id_or_puzzle)?.to_string(); let maybe_puzzle = parts.next(); if maybe_puzzle.is_none() { return Ok(Some(Frame { id: None, puzzle: maybe_id_or_puzzle, })); } let puzzle = std::str::from_utf8(maybe_puzzle.unwrap())?.to_string(); if!parts.next().is_none() { return Err(Error::ProtocolError.into()); } self.buffer.advance(line_end + 2); Ok(Some(Frame { id: Some(maybe_id_or_puzzle), puzzle, })) } } #[derive(Debug)] struct Result { id: Option<String>, ans: String, } #[derive(Debug)] struct Frame { id: Option<String>, puzzle: String, } #[derive(Debug)] struct Handler { connection: Connection, shutdown: Shutdown, _shutdown_complete: mpsc::Sender<()>, } impl Handler { async fn run(&mut self) -> anyhow::Result<()> { // As long as the shutdown signal has not been received, try to read a // new request frame. while!self.shutdown.is_shutdown() { let maybe_frame = tokio::select! { res = self.connection.read_frame() => res?, _ = self.shutdown.recv() => { // If a shutdown signal is received, return from `run`. // This will result in the task terminating. return Ok(()); } }; // If `None` is returned from `read_frame()` then the peer closed // the socket. There is no further work to do and the task can be // terminated. let frame = match maybe_frame { Some(frame) => frame, None => return Ok(()), }; // get ans let ans = sudoku_resolve(&frame.puzzle); // let id = frame.puzzle.clone(); // let id = frame.id.clone(); // let ans = task::spawn_blocking(move || { // sudoku_resolve(&frame.puzzle) // }).await?; // self.connection.send_result(id.as_deref(), &ans).await?; self.connection.send_result(frame.id.as_deref(), &ans).await?; } Ok(()) } } pub async fn run(listener: TcpListener, shutdown: impl Future) -> anyhow::Result<()> { let (notify_shutdown, _) = broadcast::channel(1); let (shutdown_complete_tx, shutdown_complete_rx) = mpsc::channel(1); let mut server = Listener { listener, notify_shutdown, shutdown_complete_tx, shutdown_complete_rx, }; tokio::select! { res = server.run() => { // If an error is received here, accepting connections from the TCP // listener failed multiple times and the server is giving up and // shutting down. // // Errors encountered when handling individual connections do not // bubble up to this point. if let Err(err) = res { error!("failed to accept, cause: {}", err); } } _ = shutdown => { // The shutdown signal has been received. info!("shutting down"); } } let Listener { mut shutdown_complete_rx, shutdown_complete_tx, notify_shutdown, .. } = server; // When `notify_shutdown` is dropped, all tasks which have `subscribe`d will // receive the shutdown signal and can exit drop(notify_shutdown); // Drop final `Sender` so the `Receiver` below can complete drop(shutdown_complete_tx); // Wait for all active connections to finish processing. As the `Sender` // handle held by the listener has been dropped above, the only remaining // `Sender` instances are held by connection handler tasks. When those drop, // the `mpsc` channel will close and `recv()` will return `None`. let _ = shutdown_complete_rx.recv().await; Ok(()) } #[inline] fn sudoku_resolve(req: &str) -> String { recipes::sudoku::sudoku_resolve(&req) }
{ self.stream.write_all(id.as_bytes()).await?; self.stream.write_u8(b':').await?; }
conditional_block
sudoku_server_basic.rs
use std::future::Future; use bytes::Buf; use bytes::BytesMut; use log::error; use log::info; use recipes::shutdown::Shutdown; use subslice::SubsliceExt; use thiserror::Error; use tokio::io::AsyncReadExt; use tokio::io::AsyncWriteExt; use tokio::io::BufWriter; use tokio::runtime::Builder; use tokio::signal; use tokio::sync::mpsc; use tokio::time; use tokio::{ net::{TcpListener, TcpStream}, sync::broadcast, }; fn main() -> anyhow::Result<()> { let thread_rt = Builder::new_multi_thread() .worker_threads(4) .thread_name("sudoku-server") .enable_io() .enable_time() .build()?; thread_rt.block_on(async move { env_logger::init(); let port = 9981; let listener = TcpListener::bind(&format!("0.0.0.0:{}", port)).await; info!("sudoku server start listening: {}", port); // if let Ok(listener) = listener { // let _ = run(listener, signal::ctrl_c()).await; // } match listener { Ok(l) => { let _ = run(l, signal::ctrl_c()).await; } Err(err) => { error!("bind address[0.0.0.0:{}] error, cause: {}", port, err); } } }); Ok(()) } #[derive(Error, Debug)] enum Error { #[error("ProtocolError")] ProtocolError, #[error("IOError: {0}")] IOError(#[from] std::io::Error), #[error("ConnectionError: {0}")] ConnectionError(&'static str), } #[derive(Debug)] struct Listener { listener: TcpListener, notify_shutdown: broadcast::Sender<()>, shutdown_complete_rx: mpsc::Receiver<()>, shutdown_complete_tx: mpsc::Sender<()>, } impl Listener { async fn run(&mut self) -> anyhow::Result<()> { info!("accepting inbound connections"); loop { let socket = self.accept().await?; let mut handler = Handler { connection: Connection::new(socket), shutdown: Shutdown::new(self.notify_shutdown.subscribe()), _shutdown_complete: self.shutdown_complete_tx.clone(), }; // handler reader tokio::spawn(async move { if let Err(err) = handler.run().await { error!("read error: {}", err); } }); } } async fn accept(&mut self) -> anyhow::Result<TcpStream> { let mut backoff = 1; // Try to accept a few times loop { // Perform the accept operation. If a socket is successfully // accepted, return it. Otherwise, save the error. match self.listener.accept().await { Ok((socket, _peer)) => { // info!("peer: {} connected.", peer); return Ok(socket); } Err(err) => { if backoff > 64 { // Accept has failed too many times. Return the error. return Err(err.into()); } } } // Pause execution until the back off period elapses. time::sleep(time::Duration::from_secs(backoff)).await; // Double the back off backoff *= 2; } } } #[derive(Debug)] struct Connection { stream: BufWriter<TcpStream>, buffer: BytesMut, } impl Connection { /// Create a new `Connection`, backed by `socket`. Read and write buffers /// are initialized. pub fn new(socket: TcpStream) -> Connection { // let (reader, writer) = tokio::io::split(socket); Connection { stream: BufWriter::new(socket), // writer: Arc::new(Mutex::new(BufWriter::new(writer))), // Default to a 4KB read buffer. For the use case of mini redis, // this is fine. However, real applications will want to tune this // value to their specific use case. There is a high likelihood that // a larger read buffer will work better. buffer: BytesMut::with_capacity(4 * 1024), } } async fn read_frame(&mut self) -> anyhow::Result<Option<Frame>> { loop { // Attempt to parse a frame from the buffered data. If enough data // has been buffered, the frame is returned. if let Some(frame) = self.parse_frame()? { return Ok(Some(frame)); } // There is not enough buffered data to read a frame. Attempt to // read more data from the socket. // // On success, the number of bytes is returned. `0` indicates "end // of stream". if 0 == self.stream.read_buf(&mut self.buffer).await? { // The remote closed the connection. For this to be a clean // shutdown, there should be no data in the read buffer. If // there is, this means that the peer closed the socket while // sending a frame. if self.buffer.is_empty() { return Ok(None); } else { return Err(Error::ConnectionError("connection reset by peer").into()); } } } } async fn send_result(&mut self, id: Option<&str>, ans: &str) -> anyhow::Result<()> { if let Some(id) = id { self.stream.write_all(id.as_bytes()).await?; self.stream.write_u8(b':').await?; } self.stream.write_all(ans.as_bytes()).await?; self.stream.write_all(b"\r\n").await?; self.stream.flush().await?; Ok(()) } // frame id:puzzle\r\n or puzzle\r\n fn parse_frame(&mut self) -> anyhow::Result<Option<Frame>> { // let mut buf = Cursor::new(&self.buffer[..]); let line_end = match self.buffer.find(b"\r\n") { Some(end) => end, None => return Ok(None), }; let mut parts = self.buffer[..line_end].split(|c| c == &b':'); // let vec = parts.into_iter().collect(); let maybe_id_or_puzzle = parts.next().ok_or(Error::ProtocolError)?; let maybe_id_or_puzzle = std::str::from_utf8(maybe_id_or_puzzle)?.to_string(); let maybe_puzzle = parts.next(); if maybe_puzzle.is_none() { return Ok(Some(Frame { id: None, puzzle: maybe_id_or_puzzle, })); } let puzzle = std::str::from_utf8(maybe_puzzle.unwrap())?.to_string(); if!parts.next().is_none() { return Err(Error::ProtocolError.into()); } self.buffer.advance(line_end + 2); Ok(Some(Frame { id: Some(maybe_id_or_puzzle), puzzle, })) } } #[derive(Debug)] struct Result { id: Option<String>, ans: String, } #[derive(Debug)] struct Frame { id: Option<String>, puzzle: String, } #[derive(Debug)] struct Handler { connection: Connection, shutdown: Shutdown, _shutdown_complete: mpsc::Sender<()>, } impl Handler { async fn run(&mut self) -> anyhow::Result<()> { // As long as the shutdown signal has not been received, try to read a // new request frame. while!self.shutdown.is_shutdown() { let maybe_frame = tokio::select! { res = self.connection.read_frame() => res?, _ = self.shutdown.recv() => { // If a shutdown signal is received, return from `run`. // This will result in the task terminating. return Ok(()); } }; // If `None` is returned from `read_frame()` then the peer closed // the socket. There is no further work to do and the task can be // terminated. let frame = match maybe_frame { Some(frame) => frame, None => return Ok(()), }; // get ans let ans = sudoku_resolve(&frame.puzzle); // let id = frame.puzzle.clone(); // let id = frame.id.clone(); // let ans = task::spawn_blocking(move || { // sudoku_resolve(&frame.puzzle) // }).await?; // self.connection.send_result(id.as_deref(), &ans).await?; self.connection.send_result(frame.id.as_deref(), &ans).await?; } Ok(()) } } pub async fn run(listener: TcpListener, shutdown: impl Future) -> anyhow::Result<()>
} } _ = shutdown => { // The shutdown signal has been received. info!("shutting down"); } } let Listener { mut shutdown_complete_rx, shutdown_complete_tx, notify_shutdown, .. } = server; // When `notify_shutdown` is dropped, all tasks which have `subscribe`d will // receive the shutdown signal and can exit drop(notify_shutdown); // Drop final `Sender` so the `Receiver` below can complete drop(shutdown_complete_tx); // Wait for all active connections to finish processing. As the `Sender` // handle held by the listener has been dropped above, the only remaining // `Sender` instances are held by connection handler tasks. When those drop, // the `mpsc` channel will close and `recv()` will return `None`. let _ = shutdown_complete_rx.recv().await; Ok(()) } #[inline] fn sudoku_resolve(req: &str) -> String { recipes::sudoku::sudoku_resolve(&req) }
{ let (notify_shutdown, _) = broadcast::channel(1); let (shutdown_complete_tx, shutdown_complete_rx) = mpsc::channel(1); let mut server = Listener { listener, notify_shutdown, shutdown_complete_tx, shutdown_complete_rx, }; tokio::select! { res = server.run() => { // If an error is received here, accepting connections from the TCP // listener failed multiple times and the server is giving up and // shutting down. // // Errors encountered when handling individual connections do not // bubble up to this point. if let Err(err) = res { error!("failed to accept, cause: {}", err);
identifier_body
rule_builder.rs
use { crate::{ mir::{self, BlockRef, LValLink}, mir_ext::mark_persistent_recursive, optimizer::{ BuildGroups, ClosureBuilder, GroupBuilder, GroupDyn, GroupSet, MatchMap, OptimizerAspect, SuccBuilder, closure::{Closure, ClosureId, ClosureSeed}, closure_interner::{ClosureInterner}, dfa_builder::{DFABuilder}, }, runtime::{ Grammar, ReduceId, options::Options, }, }, lang_mir::Cursor, north_core::{ compiler::Compiler, iter::ModelIterator, model::{Child, Link, ModelCell}, node_id::{NodeId, ToNodeId}, structure::ForestExt, visitor::{VisitorAspect, VisitCtxCore}, }, std::{ any::Any, cell::RefCell, collections::{HashMap, VecDeque, hash_map::Entry::*}, mem::replace, rc::Rc, } }; //////////////////////////////////////////////////////////////////////////////////////////////// crate type GroupPair = (Box<dyn GroupDyn>, Box<dyn Any>); //////////////////////////////////////////////////////////////////////////////////////////////// crate enum BuildJob { BuildOne { cursor: Cursor, closure: Closure, pair: GroupPair, }, BuildSet { cursor: Cursor, closure: Closure, }, } //////////////////////////////////////////////////////////////////////////////////////////////// // ClosureInterner // Closure -> TransitionTable pub struct RuleBuilder { crate _optimizer: Rc<OptimizerAspect>, crate visitor: Rc<VisitorAspect>, crate output_model: ModelCell, crate options: Options, crate closure_builder: Rc<ClosureBuilder>, crate closure_interner: Rc<ClosureInterner>, crate dfa_builder: DFABuilder, crate grammar: Option<Rc<Grammar>>, crate reduce_ids: Vec<ReduceId>, crate closure_map: HashMap<Closure, Option<BlockRef>>, // IndexMap crate value_map: HashMap<NodeId, NodeId>, crate build_queue: VecDeque<BuildJob>, crate fail_block: Option<BlockRef>, crate num_built: usize, crate origin_matches: MatchMap, crate origin_stmts: Rc<RefCell<Vec<GroupPair>>>, crate rule_ty: Option<Link<mir::TypeFn>>, crate iter_locals: Vec<LValLink>, crate locals: Vec<Child<mir::RuleLocal>>, crate blocks: Vec<Child<mir::Block>>, } impl RuleBuilder { pub fn new(comp: &Compiler, options: Options) -> Self { let visitor = comp.aspect_mut::<VisitorAspect>(); let closure_builder = Rc::new(ClosureBuilder::new(visitor.clone())); let closure_interner = ClosureInterner::new(); let dfa_builder = DFABuilder::new( closure_builder.clone(), closure_interner.clone(), visitor.clone(), ); Self { _optimizer: comp.aspect_mut::<OptimizerAspect>(), visitor: visitor.clone(), output_model: comp.model_cell.clone(), options, closure_builder, closure_interner, dfa_builder, grammar: None, reduce_ids: Vec::new(), closure_map: HashMap::new(), value_map: HashMap::new(), build_queue: VecDeque::new(), fail_block: None, num_built: 0, origin_matches: MatchMap::default(), origin_stmts: <_>::default(), rule_ty: None, iter_locals: Vec::new(), locals: Vec::new(), blocks: Vec::new(), } } pub fn add_rules(&mut self, rules: &Vec<NodeId>) { let seed = { let model = self.visitor.input_model.borrow(); let first_rule = model.get::<_, mir::ItemRule>(rules[0]).unwrap(); self.rule_ty = Some(first_rule.rule_ty.clone()); model.iter(rules) .borrow_cast_nodes_to::<mir::ItemRule>() .filter_map(|r| r.blocks.first().cloned()) .map(|b| model.node(b).first_node()) .collect::<Vec<_>>() }; let mut seed = ClosureSeed::new(seed); seed.follow_calls = true; let _main_block = self.resolve_closure_seed(seed); let fail_seed = ClosureSeed::default(); let fail_block = self.resolve_closure_seed(fail_seed); self.fail_block = Some(fail_block); } pub fn add_value_map<A: ToNodeId, B: ToNodeId>(&mut self, from: A, to: B) { self.value_map.insert(from.to_top(), to.to_top()); } pub fn at_origin(&self) -> bool { self.num_built <= 2 } pub fn build( &mut self, grammar: Rc<Grammar>, rules: &Vec<NodeId>, reduce_ids: Vec<ReduceId>, ) -> (String, NodeId<mir::ItemRule>) { assert!(rules.len() >= 1); self.closure_builder.set_grammar(Some(grammar.clone())); self.grammar = Some(grammar); self.reduce_ids = reduce_ids; self.add_rules(rules); while let Some(job) = self.build_queue.pop_front() { match job { BuildJob::BuildOne { cursor, closure, pair } => { // println!("BUILD_ONE {:?} => {:?}", closure, cursor.orig_block_id()); self.build_one(cursor, closure, pair); }, BuildJob::BuildSet { cursor, closure } => { // println!("BUILD_CLOSURE {:?} => {:?}", closure, cursor.orig_block_id()); self.build_set(cursor, closure); self.num_built += 1; }, } } // self.dump_info(); self.closure_builder.set_grammar(None); self.closure_map.clear(); self.grammar = None; self.iter_locals.clear(); self.num_built = 0; self.origin_matches.clear(); self.origin_stmts.borrow_mut().clear(); self.value_map.clear(); let mut model = self.output_model.borrow_mut(); let node_id = model.new_node(); let name = format!("opt_{}", node_id.idx()); //println!("BUILD LOCALS {:?}", self.locals.len()); model.complete_node(node_id, mir::ItemRule { name: name.clone(), rule_ty: self.rule_ty.take().unwrap(), params: Vec::new(), locals: replace(&mut self.locals, Vec::new()), blocks: replace(&mut self.blocks, Vec::new()), }); drop(model); // Mark persistent blocks mark_persistent_recursive(&*self.visitor, node_id); (name, node_id) } pub fn build_one(&mut self, mut cursor: Cursor, closure: Closure, pair: GroupPair) { let (group_k, group_v) = pair; let succ_seed = self.successors(closure); let succ_closure = self.closure(succ_seed); group_k.build_mir_dyn(&*group_v, self, &mut cursor); match (cursor.is_completed(), succ_closure.is_empty()) { (true, true) => { }, (true, false) => { panic!("completed group with successors"); }, (false, true) => { self.fuse_block(cursor); }, (false, false) =>
, } } crate fn build_origin_stmts(&mut self, cursor: &mut Cursor) { let stmts = self.origin_stmts.clone(); let stmts = stmts.borrow(); for (k, v) in stmts.iter() { k.build_mir_dyn(&**v, self, cursor); } } pub fn build_set(&mut self, mut cursor: Cursor, closure: Closure) { if closure.is_empty() { cursor.build_ctl(mir::CtlFail { }); return; } let group_set = self.group(&closure); group_set.build_mir(self, cursor); } pub fn closure(&self, seed: ClosureSeed) -> Closure { self.closure_builder.build(seed) } pub fn dump_info(&self) { let mut map = HashMap::new(); for (closure, _) in &self.closure_map { let mut closure_ids = closure.entries.clone(); closure_ids.sort(); map.insert(closure_ids, ()); } println!("CLOSURES {:?} vs {:?}", map.len(), self.closure_map.len()); } pub fn fail_block(&self) -> BlockRef { self.fail_block.clone().unwrap() } pub fn fuse_block(&self, mut cur: Cursor) { if!cur.is_completed() { let fail_block = self.fail_block(); cur.build_ctl(mir::CtlBr { block: fail_block }); } } pub fn group(&mut self, closure: &Closure) -> GroupSet { let mut group_buider = GroupBuilder::new(); let visitor = self.visitor.clone(); for entry in &closure.entries { let node_id = *entry; let core = VisitCtxCore::<BuildGroups> { aspect: &*visitor, node_id, imp_args: self }; let _ = visitor.visit(core, &mut group_buider); } group_buider.complete() } pub fn intern_closure(&self, closure: Closure) -> ClosureId { self.closure_interner.intern(closure) } pub fn is_origin_pure(&self) -> bool { let incorporate_reductions = self.options.incorporate_reductions; let is_pure = self.origin_stmts.borrow().is_empty(); incorporate_reductions && is_pure } pub fn is_reduce_external(&self, reduce_id: ReduceId) -> bool { self.reduce_ids.contains(&reduce_id) } pub fn map_local(&mut self, src: NodeId<mir::RuleLocal>) -> NodeId<mir::RuleLocal> { let ty = { let model = self.visitor.input_model.borrow(); let node = model.node(src); node.ty.clone() // XXX: BAD: type needs to be deep copied/transfered }; let mut model_out = self.output_model.borrow_mut(); let local_out = model_out.build_node(mir::RuleLocal { ty, index: self.locals.len() }); self.locals.push(local_out.into()); local_out } pub fn map_local_iter(&mut self, src: &LValLink, depth: usize) -> LValLink { if let Some(local) = self.iter_locals.get(depth).cloned() { self.add_value_map(src, &local); return local; } if depth!= self.iter_locals.len() { panic!("invalid depth"); } let local = self.map_lval(src); self.iter_locals.push(local.clone()); local } pub fn map_lval(&mut self, src: &LValLink) -> LValLink { let node_id = src.to_top(); if let Some(result) = self.value_map.get(&node_id) { return result.cast().into(); } let new_local = self.map_local(node_id.cast()); self.value_map.insert(node_id, new_local.to_top()); new_local.cast().into() } pub fn map_lval_lookup(&mut self, src: &LValLink) -> LValLink { let node_id = src.to_top(); match self.value_map.get(&node_id) { Some(result) => result.cast().into(), None => panic!("unknown value: {:?}", node_id), } } crate fn origin_matches(&self, reduce_id: ReduceId) -> ClosureSeed { let mut result = ClosureSeed::default(); if let Some(grammar) = &self.grammar { let match_map = &grammar.match_map; for (match_id, prec_map) in &self.origin_matches { let prec = match match_map.get(*match_id, reduce_id) { Some(prec) => prec, None => continue, }; if let Some(closure) = prec_map.get(&prec) { result.merge(closure); } } } result } crate fn queue_build_one(&mut self, cursor: Cursor, closure: Closure, pair: GroupPair) { let job = BuildJob::BuildOne { cursor, closure, pair }; self.build_queue.push_back(job); } crate fn resolve<F>(&mut self, closure: Closure, job_ctor: F) -> BlockRef where F: FnOnce(Cursor, Closure) -> BuildJob { match self.closure_map.entry(closure) { Occupied(occupied) => { occupied.get().clone().unwrap() } Vacant(vacant) => { let mut model = self.output_model.borrow_mut(); let block_id = model.new_node::<mir::Block>(); self.blocks.push(block_id.into()); let cursor = Cursor::new(self.output_model.clone(), block_id); let job = job_ctor(cursor, vacant.key().clone()); self.build_queue.push_back(job); vacant.insert(Some(block_id.into())); block_id.into() } } } crate fn resolve_closure(&mut self, closure: Closure) -> BlockRef { self.resolve(closure, |cursor, closure| { BuildJob::BuildSet { cursor, closure } }) } crate fn resolve_closure_seed(&mut self, seed: ClosureSeed) -> BlockRef { let closure = self.closure(seed); self.resolve_closure(closure) } crate fn resolve_one(&mut self, seed: ClosureSeed, pair: GroupPair) -> BlockRef { let closure = seed.into_closure(); self.resolve(closure, |cursor, closure| { BuildJob::BuildOne { cursor, closure, pair } }) } crate fn successors(&self, seed: Closure) -> ClosureSeed { let mut builder = SuccBuilder::new(seed.entries); builder.build(&*self.visitor); builder.complete() } } ////////////////////////////////////////////////////////////////////////////////////////////////
{ let job = BuildJob::BuildSet { cursor, closure: succ_closure }; self.build_queue.push_back(job); }
conditional_block
rule_builder.rs
use { crate::{ mir::{self, BlockRef, LValLink}, mir_ext::mark_persistent_recursive, optimizer::{ BuildGroups, ClosureBuilder, GroupBuilder, GroupDyn, GroupSet, MatchMap, OptimizerAspect, SuccBuilder, closure::{Closure, ClosureId, ClosureSeed}, closure_interner::{ClosureInterner}, dfa_builder::{DFABuilder}, }, runtime::{ Grammar, ReduceId, options::Options, }, }, lang_mir::Cursor, north_core::{ compiler::Compiler, iter::ModelIterator, model::{Child, Link, ModelCell}, node_id::{NodeId, ToNodeId}, structure::ForestExt, visitor::{VisitorAspect, VisitCtxCore}, }, std::{ any::Any, cell::RefCell, collections::{HashMap, VecDeque, hash_map::Entry::*}, mem::replace, rc::Rc, } }; //////////////////////////////////////////////////////////////////////////////////////////////// crate type GroupPair = (Box<dyn GroupDyn>, Box<dyn Any>); //////////////////////////////////////////////////////////////////////////////////////////////// crate enum BuildJob { BuildOne { cursor: Cursor, closure: Closure, pair: GroupPair, }, BuildSet { cursor: Cursor, closure: Closure, }, } //////////////////////////////////////////////////////////////////////////////////////////////// // ClosureInterner // Closure -> TransitionTable pub struct RuleBuilder { crate _optimizer: Rc<OptimizerAspect>, crate visitor: Rc<VisitorAspect>, crate output_model: ModelCell, crate options: Options, crate closure_builder: Rc<ClosureBuilder>, crate closure_interner: Rc<ClosureInterner>, crate dfa_builder: DFABuilder, crate grammar: Option<Rc<Grammar>>, crate reduce_ids: Vec<ReduceId>, crate closure_map: HashMap<Closure, Option<BlockRef>>, // IndexMap crate value_map: HashMap<NodeId, NodeId>, crate build_queue: VecDeque<BuildJob>, crate fail_block: Option<BlockRef>, crate num_built: usize, crate origin_matches: MatchMap, crate origin_stmts: Rc<RefCell<Vec<GroupPair>>>, crate rule_ty: Option<Link<mir::TypeFn>>, crate iter_locals: Vec<LValLink>, crate locals: Vec<Child<mir::RuleLocal>>, crate blocks: Vec<Child<mir::Block>>, } impl RuleBuilder { pub fn new(comp: &Compiler, options: Options) -> Self { let visitor = comp.aspect_mut::<VisitorAspect>(); let closure_builder = Rc::new(ClosureBuilder::new(visitor.clone())); let closure_interner = ClosureInterner::new(); let dfa_builder = DFABuilder::new( closure_builder.clone(), closure_interner.clone(), visitor.clone(), ); Self { _optimizer: comp.aspect_mut::<OptimizerAspect>(), visitor: visitor.clone(), output_model: comp.model_cell.clone(), options, closure_builder, closure_interner, dfa_builder, grammar: None, reduce_ids: Vec::new(), closure_map: HashMap::new(), value_map: HashMap::new(), build_queue: VecDeque::new(), fail_block: None, num_built: 0, origin_matches: MatchMap::default(), origin_stmts: <_>::default(), rule_ty: None, iter_locals: Vec::new(), locals: Vec::new(), blocks: Vec::new(), } } pub fn add_rules(&mut self, rules: &Vec<NodeId>) { let seed = { let model = self.visitor.input_model.borrow(); let first_rule = model.get::<_, mir::ItemRule>(rules[0]).unwrap(); self.rule_ty = Some(first_rule.rule_ty.clone()); model.iter(rules) .borrow_cast_nodes_to::<mir::ItemRule>() .filter_map(|r| r.blocks.first().cloned()) .map(|b| model.node(b).first_node()) .collect::<Vec<_>>() }; let mut seed = ClosureSeed::new(seed); seed.follow_calls = true; let _main_block = self.resolve_closure_seed(seed); let fail_seed = ClosureSeed::default(); let fail_block = self.resolve_closure_seed(fail_seed); self.fail_block = Some(fail_block); } pub fn add_value_map<A: ToNodeId, B: ToNodeId>(&mut self, from: A, to: B) { self.value_map.insert(from.to_top(), to.to_top()); } pub fn at_origin(&self) -> bool { self.num_built <= 2 } pub fn build( &mut self, grammar: Rc<Grammar>, rules: &Vec<NodeId>, reduce_ids: Vec<ReduceId>, ) -> (String, NodeId<mir::ItemRule>) { assert!(rules.len() >= 1); self.closure_builder.set_grammar(Some(grammar.clone())); self.grammar = Some(grammar); self.reduce_ids = reduce_ids; self.add_rules(rules); while let Some(job) = self.build_queue.pop_front() { match job { BuildJob::BuildOne { cursor, closure, pair } => { // println!("BUILD_ONE {:?} => {:?}", closure, cursor.orig_block_id()); self.build_one(cursor, closure, pair); }, BuildJob::BuildSet { cursor, closure } => { // println!("BUILD_CLOSURE {:?} => {:?}", closure, cursor.orig_block_id()); self.build_set(cursor, closure); self.num_built += 1; }, } } // self.dump_info(); self.closure_builder.set_grammar(None); self.closure_map.clear(); self.grammar = None; self.iter_locals.clear(); self.num_built = 0; self.origin_matches.clear(); self.origin_stmts.borrow_mut().clear(); self.value_map.clear(); let mut model = self.output_model.borrow_mut(); let node_id = model.new_node(); let name = format!("opt_{}", node_id.idx()); //println!("BUILD LOCALS {:?}", self.locals.len()); model.complete_node(node_id, mir::ItemRule { name: name.clone(), rule_ty: self.rule_ty.take().unwrap(), params: Vec::new(), locals: replace(&mut self.locals, Vec::new()), blocks: replace(&mut self.blocks, Vec::new()), }); drop(model); // Mark persistent blocks mark_persistent_recursive(&*self.visitor, node_id); (name, node_id) } pub fn build_one(&mut self, mut cursor: Cursor, closure: Closure, pair: GroupPair) { let (group_k, group_v) = pair; let succ_seed = self.successors(closure); let succ_closure = self.closure(succ_seed); group_k.build_mir_dyn(&*group_v, self, &mut cursor); match (cursor.is_completed(), succ_closure.is_empty()) { (true, true) => { }, (true, false) => { panic!("completed group with successors"); }, (false, true) => { self.fuse_block(cursor); }, (false, false) => { let job = BuildJob::BuildSet { cursor, closure: succ_closure }; self.build_queue.push_back(job); }, } } crate fn build_origin_stmts(&mut self, cursor: &mut Cursor) { let stmts = self.origin_stmts.clone(); let stmts = stmts.borrow(); for (k, v) in stmts.iter() { k.build_mir_dyn(&**v, self, cursor); } } pub fn build_set(&mut self, mut cursor: Cursor, closure: Closure) { if closure.is_empty() { cursor.build_ctl(mir::CtlFail { }); return; } let group_set = self.group(&closure); group_set.build_mir(self, cursor); } pub fn closure(&self, seed: ClosureSeed) -> Closure { self.closure_builder.build(seed) } pub fn dump_info(&self) { let mut map = HashMap::new(); for (closure, _) in &self.closure_map { let mut closure_ids = closure.entries.clone(); closure_ids.sort(); map.insert(closure_ids, ()); } println!("CLOSURES {:?} vs {:?}", map.len(), self.closure_map.len()); } pub fn fail_block(&self) -> BlockRef { self.fail_block.clone().unwrap() } pub fn fuse_block(&self, mut cur: Cursor) { if!cur.is_completed() { let fail_block = self.fail_block(); cur.build_ctl(mir::CtlBr { block: fail_block }); } } pub fn group(&mut self, closure: &Closure) -> GroupSet { let mut group_buider = GroupBuilder::new(); let visitor = self.visitor.clone(); for entry in &closure.entries { let node_id = *entry; let core = VisitCtxCore::<BuildGroups> { aspect: &*visitor, node_id, imp_args: self }; let _ = visitor.visit(core, &mut group_buider); } group_buider.complete() } pub fn intern_closure(&self, closure: Closure) -> ClosureId { self.closure_interner.intern(closure) } pub fn is_origin_pure(&self) -> bool { let incorporate_reductions = self.options.incorporate_reductions; let is_pure = self.origin_stmts.borrow().is_empty(); incorporate_reductions && is_pure } pub fn is_reduce_external(&self, reduce_id: ReduceId) -> bool { self.reduce_ids.contains(&reduce_id) } pub fn map_local(&mut self, src: NodeId<mir::RuleLocal>) -> NodeId<mir::RuleLocal> { let ty = { let model = self.visitor.input_model.borrow(); let node = model.node(src); node.ty.clone() // XXX: BAD: type needs to be deep copied/transfered }; let mut model_out = self.output_model.borrow_mut(); let local_out = model_out.build_node(mir::RuleLocal { ty, index: self.locals.len() }); self.locals.push(local_out.into()); local_out } pub fn map_local_iter(&mut self, src: &LValLink, depth: usize) -> LValLink { if let Some(local) = self.iter_locals.get(depth).cloned() { self.add_value_map(src, &local); return local; } if depth!= self.iter_locals.len() { panic!("invalid depth"); } let local = self.map_lval(src); self.iter_locals.push(local.clone()); local } pub fn map_lval(&mut self, src: &LValLink) -> LValLink { let node_id = src.to_top(); if let Some(result) = self.value_map.get(&node_id) { return result.cast().into(); } let new_local = self.map_local(node_id.cast()); self.value_map.insert(node_id, new_local.to_top()); new_local.cast().into() } pub fn map_lval_lookup(&mut self, src: &LValLink) -> LValLink { let node_id = src.to_top(); match self.value_map.get(&node_id) { Some(result) => result.cast().into(), None => panic!("unknown value: {:?}", node_id), } } crate fn origin_matches(&self, reduce_id: ReduceId) -> ClosureSeed { let mut result = ClosureSeed::default(); if let Some(grammar) = &self.grammar { let match_map = &grammar.match_map; for (match_id, prec_map) in &self.origin_matches { let prec = match match_map.get(*match_id, reduce_id) { Some(prec) => prec, None => continue, }; if let Some(closure) = prec_map.get(&prec) { result.merge(closure); } } } result } crate fn queue_build_one(&mut self, cursor: Cursor, closure: Closure, pair: GroupPair) { let job = BuildJob::BuildOne { cursor, closure, pair }; self.build_queue.push_back(job); } crate fn resolve<F>(&mut self, closure: Closure, job_ctor: F) -> BlockRef where F: FnOnce(Cursor, Closure) -> BuildJob { match self.closure_map.entry(closure) { Occupied(occupied) => { occupied.get().clone().unwrap() } Vacant(vacant) => { let mut model = self.output_model.borrow_mut(); let block_id = model.new_node::<mir::Block>(); self.blocks.push(block_id.into()); let cursor = Cursor::new(self.output_model.clone(), block_id); let job = job_ctor(cursor, vacant.key().clone()); self.build_queue.push_back(job); vacant.insert(Some(block_id.into())); block_id.into() } } } crate fn resolve_closure(&mut self, closure: Closure) -> BlockRef { self.resolve(closure, |cursor, closure| { BuildJob::BuildSet { cursor, closure } }) } crate fn resolve_closure_seed(&mut self, seed: ClosureSeed) -> BlockRef { let closure = self.closure(seed); self.resolve_closure(closure) } crate fn
(&mut self, seed: ClosureSeed, pair: GroupPair) -> BlockRef { let closure = seed.into_closure(); self.resolve(closure, |cursor, closure| { BuildJob::BuildOne { cursor, closure, pair } }) } crate fn successors(&self, seed: Closure) -> ClosureSeed { let mut builder = SuccBuilder::new(seed.entries); builder.build(&*self.visitor); builder.complete() } } ////////////////////////////////////////////////////////////////////////////////////////////////
resolve_one
identifier_name
rule_builder.rs
use { crate::{ mir::{self, BlockRef, LValLink}, mir_ext::mark_persistent_recursive, optimizer::{ BuildGroups, ClosureBuilder, GroupBuilder, GroupDyn, GroupSet, MatchMap, OptimizerAspect, SuccBuilder, closure::{Closure, ClosureId, ClosureSeed}, closure_interner::{ClosureInterner}, dfa_builder::{DFABuilder}, }, runtime::{ Grammar, ReduceId, options::Options, }, }, lang_mir::Cursor, north_core::{ compiler::Compiler, iter::ModelIterator, model::{Child, Link, ModelCell}, node_id::{NodeId, ToNodeId}, structure::ForestExt, visitor::{VisitorAspect, VisitCtxCore}, }, std::{ any::Any, cell::RefCell, collections::{HashMap, VecDeque, hash_map::Entry::*}, mem::replace, rc::Rc, } }; //////////////////////////////////////////////////////////////////////////////////////////////// crate type GroupPair = (Box<dyn GroupDyn>, Box<dyn Any>); //////////////////////////////////////////////////////////////////////////////////////////////// crate enum BuildJob { BuildOne { cursor: Cursor, closure: Closure, pair: GroupPair, }, BuildSet { cursor: Cursor, closure: Closure, }, } //////////////////////////////////////////////////////////////////////////////////////////////// // ClosureInterner // Closure -> TransitionTable pub struct RuleBuilder { crate _optimizer: Rc<OptimizerAspect>, crate visitor: Rc<VisitorAspect>, crate output_model: ModelCell, crate options: Options, crate closure_builder: Rc<ClosureBuilder>, crate closure_interner: Rc<ClosureInterner>, crate dfa_builder: DFABuilder, crate grammar: Option<Rc<Grammar>>, crate reduce_ids: Vec<ReduceId>, crate closure_map: HashMap<Closure, Option<BlockRef>>, // IndexMap crate value_map: HashMap<NodeId, NodeId>, crate build_queue: VecDeque<BuildJob>, crate fail_block: Option<BlockRef>, crate num_built: usize, crate origin_matches: MatchMap, crate origin_stmts: Rc<RefCell<Vec<GroupPair>>>, crate rule_ty: Option<Link<mir::TypeFn>>, crate iter_locals: Vec<LValLink>, crate locals: Vec<Child<mir::RuleLocal>>, crate blocks: Vec<Child<mir::Block>>, } impl RuleBuilder { pub fn new(comp: &Compiler, options: Options) -> Self { let visitor = comp.aspect_mut::<VisitorAspect>(); let closure_builder = Rc::new(ClosureBuilder::new(visitor.clone())); let closure_interner = ClosureInterner::new(); let dfa_builder = DFABuilder::new( closure_builder.clone(), closure_interner.clone(), visitor.clone(), ); Self { _optimizer: comp.aspect_mut::<OptimizerAspect>(), visitor: visitor.clone(), output_model: comp.model_cell.clone(), options, closure_builder, closure_interner, dfa_builder, grammar: None, reduce_ids: Vec::new(), closure_map: HashMap::new(), value_map: HashMap::new(), build_queue: VecDeque::new(), fail_block: None, num_built: 0, origin_matches: MatchMap::default(), origin_stmts: <_>::default(), rule_ty: None, iter_locals: Vec::new(), locals: Vec::new(), blocks: Vec::new(), } } pub fn add_rules(&mut self, rules: &Vec<NodeId>)
self.fail_block = Some(fail_block); } pub fn add_value_map<A: ToNodeId, B: ToNodeId>(&mut self, from: A, to: B) { self.value_map.insert(from.to_top(), to.to_top()); } pub fn at_origin(&self) -> bool { self.num_built <= 2 } pub fn build( &mut self, grammar: Rc<Grammar>, rules: &Vec<NodeId>, reduce_ids: Vec<ReduceId>, ) -> (String, NodeId<mir::ItemRule>) { assert!(rules.len() >= 1); self.closure_builder.set_grammar(Some(grammar.clone())); self.grammar = Some(grammar); self.reduce_ids = reduce_ids; self.add_rules(rules); while let Some(job) = self.build_queue.pop_front() { match job { BuildJob::BuildOne { cursor, closure, pair } => { // println!("BUILD_ONE {:?} => {:?}", closure, cursor.orig_block_id()); self.build_one(cursor, closure, pair); }, BuildJob::BuildSet { cursor, closure } => { // println!("BUILD_CLOSURE {:?} => {:?}", closure, cursor.orig_block_id()); self.build_set(cursor, closure); self.num_built += 1; }, } } // self.dump_info(); self.closure_builder.set_grammar(None); self.closure_map.clear(); self.grammar = None; self.iter_locals.clear(); self.num_built = 0; self.origin_matches.clear(); self.origin_stmts.borrow_mut().clear(); self.value_map.clear(); let mut model = self.output_model.borrow_mut(); let node_id = model.new_node(); let name = format!("opt_{}", node_id.idx()); //println!("BUILD LOCALS {:?}", self.locals.len()); model.complete_node(node_id, mir::ItemRule { name: name.clone(), rule_ty: self.rule_ty.take().unwrap(), params: Vec::new(), locals: replace(&mut self.locals, Vec::new()), blocks: replace(&mut self.blocks, Vec::new()), }); drop(model); // Mark persistent blocks mark_persistent_recursive(&*self.visitor, node_id); (name, node_id) } pub fn build_one(&mut self, mut cursor: Cursor, closure: Closure, pair: GroupPair) { let (group_k, group_v) = pair; let succ_seed = self.successors(closure); let succ_closure = self.closure(succ_seed); group_k.build_mir_dyn(&*group_v, self, &mut cursor); match (cursor.is_completed(), succ_closure.is_empty()) { (true, true) => { }, (true, false) => { panic!("completed group with successors"); }, (false, true) => { self.fuse_block(cursor); }, (false, false) => { let job = BuildJob::BuildSet { cursor, closure: succ_closure }; self.build_queue.push_back(job); }, } } crate fn build_origin_stmts(&mut self, cursor: &mut Cursor) { let stmts = self.origin_stmts.clone(); let stmts = stmts.borrow(); for (k, v) in stmts.iter() { k.build_mir_dyn(&**v, self, cursor); } } pub fn build_set(&mut self, mut cursor: Cursor, closure: Closure) { if closure.is_empty() { cursor.build_ctl(mir::CtlFail { }); return; } let group_set = self.group(&closure); group_set.build_mir(self, cursor); } pub fn closure(&self, seed: ClosureSeed) -> Closure { self.closure_builder.build(seed) } pub fn dump_info(&self) { let mut map = HashMap::new(); for (closure, _) in &self.closure_map { let mut closure_ids = closure.entries.clone(); closure_ids.sort(); map.insert(closure_ids, ()); } println!("CLOSURES {:?} vs {:?}", map.len(), self.closure_map.len()); } pub fn fail_block(&self) -> BlockRef { self.fail_block.clone().unwrap() } pub fn fuse_block(&self, mut cur: Cursor) { if!cur.is_completed() { let fail_block = self.fail_block(); cur.build_ctl(mir::CtlBr { block: fail_block }); } } pub fn group(&mut self, closure: &Closure) -> GroupSet { let mut group_buider = GroupBuilder::new(); let visitor = self.visitor.clone(); for entry in &closure.entries { let node_id = *entry; let core = VisitCtxCore::<BuildGroups> { aspect: &*visitor, node_id, imp_args: self }; let _ = visitor.visit(core, &mut group_buider); } group_buider.complete() } pub fn intern_closure(&self, closure: Closure) -> ClosureId { self.closure_interner.intern(closure) } pub fn is_origin_pure(&self) -> bool { let incorporate_reductions = self.options.incorporate_reductions; let is_pure = self.origin_stmts.borrow().is_empty(); incorporate_reductions && is_pure } pub fn is_reduce_external(&self, reduce_id: ReduceId) -> bool { self.reduce_ids.contains(&reduce_id) } pub fn map_local(&mut self, src: NodeId<mir::RuleLocal>) -> NodeId<mir::RuleLocal> { let ty = { let model = self.visitor.input_model.borrow(); let node = model.node(src); node.ty.clone() // XXX: BAD: type needs to be deep copied/transfered }; let mut model_out = self.output_model.borrow_mut(); let local_out = model_out.build_node(mir::RuleLocal { ty, index: self.locals.len() }); self.locals.push(local_out.into()); local_out } pub fn map_local_iter(&mut self, src: &LValLink, depth: usize) -> LValLink { if let Some(local) = self.iter_locals.get(depth).cloned() { self.add_value_map(src, &local); return local; } if depth!= self.iter_locals.len() { panic!("invalid depth"); } let local = self.map_lval(src); self.iter_locals.push(local.clone()); local } pub fn map_lval(&mut self, src: &LValLink) -> LValLink { let node_id = src.to_top(); if let Some(result) = self.value_map.get(&node_id) { return result.cast().into(); } let new_local = self.map_local(node_id.cast()); self.value_map.insert(node_id, new_local.to_top()); new_local.cast().into() } pub fn map_lval_lookup(&mut self, src: &LValLink) -> LValLink { let node_id = src.to_top(); match self.value_map.get(&node_id) { Some(result) => result.cast().into(), None => panic!("unknown value: {:?}", node_id), } } crate fn origin_matches(&self, reduce_id: ReduceId) -> ClosureSeed { let mut result = ClosureSeed::default(); if let Some(grammar) = &self.grammar { let match_map = &grammar.match_map; for (match_id, prec_map) in &self.origin_matches { let prec = match match_map.get(*match_id, reduce_id) { Some(prec) => prec, None => continue, }; if let Some(closure) = prec_map.get(&prec) { result.merge(closure); } } } result } crate fn queue_build_one(&mut self, cursor: Cursor, closure: Closure, pair: GroupPair) { let job = BuildJob::BuildOne { cursor, closure, pair }; self.build_queue.push_back(job); } crate fn resolve<F>(&mut self, closure: Closure, job_ctor: F) -> BlockRef where F: FnOnce(Cursor, Closure) -> BuildJob { match self.closure_map.entry(closure) { Occupied(occupied) => { occupied.get().clone().unwrap() } Vacant(vacant) => { let mut model = self.output_model.borrow_mut(); let block_id = model.new_node::<mir::Block>(); self.blocks.push(block_id.into()); let cursor = Cursor::new(self.output_model.clone(), block_id); let job = job_ctor(cursor, vacant.key().clone()); self.build_queue.push_back(job); vacant.insert(Some(block_id.into())); block_id.into() } } } crate fn resolve_closure(&mut self, closure: Closure) -> BlockRef { self.resolve(closure, |cursor, closure| { BuildJob::BuildSet { cursor, closure } }) } crate fn resolve_closure_seed(&mut self, seed: ClosureSeed) -> BlockRef { let closure = self.closure(seed); self.resolve_closure(closure) } crate fn resolve_one(&mut self, seed: ClosureSeed, pair: GroupPair) -> BlockRef { let closure = seed.into_closure(); self.resolve(closure, |cursor, closure| { BuildJob::BuildOne { cursor, closure, pair } }) } crate fn successors(&self, seed: Closure) -> ClosureSeed { let mut builder = SuccBuilder::new(seed.entries); builder.build(&*self.visitor); builder.complete() } } ////////////////////////////////////////////////////////////////////////////////////////////////
{ let seed = { let model = self.visitor.input_model.borrow(); let first_rule = model.get::<_, mir::ItemRule>(rules[0]).unwrap(); self.rule_ty = Some(first_rule.rule_ty.clone()); model.iter(rules) .borrow_cast_nodes_to::<mir::ItemRule>() .filter_map(|r| r.blocks.first().cloned()) .map(|b| model.node(b).first_node()) .collect::<Vec<_>>() }; let mut seed = ClosureSeed::new(seed); seed.follow_calls = true; let _main_block = self.resolve_closure_seed(seed); let fail_seed = ClosureSeed::default(); let fail_block = self.resolve_closure_seed(fail_seed);
identifier_body
rule_builder.rs
use { crate::{ mir::{self, BlockRef, LValLink}, mir_ext::mark_persistent_recursive, optimizer::{ BuildGroups, ClosureBuilder, GroupBuilder, GroupDyn, GroupSet, MatchMap, OptimizerAspect, SuccBuilder, closure::{Closure, ClosureId, ClosureSeed}, closure_interner::{ClosureInterner}, dfa_builder::{DFABuilder}, }, runtime::{ Grammar, ReduceId, options::Options, }, }, lang_mir::Cursor, north_core::{ compiler::Compiler, iter::ModelIterator, model::{Child, Link, ModelCell}, node_id::{NodeId, ToNodeId}, structure::ForestExt, visitor::{VisitorAspect, VisitCtxCore}, }, std::{ any::Any, cell::RefCell, collections::{HashMap, VecDeque, hash_map::Entry::*}, mem::replace, rc::Rc, } }; //////////////////////////////////////////////////////////////////////////////////////////////// crate type GroupPair = (Box<dyn GroupDyn>, Box<dyn Any>); //////////////////////////////////////////////////////////////////////////////////////////////// crate enum BuildJob { BuildOne { cursor: Cursor, closure: Closure, pair: GroupPair, }, BuildSet { cursor: Cursor, closure: Closure, }, } //////////////////////////////////////////////////////////////////////////////////////////////// // ClosureInterner // Closure -> TransitionTable pub struct RuleBuilder { crate _optimizer: Rc<OptimizerAspect>, crate visitor: Rc<VisitorAspect>, crate output_model: ModelCell, crate options: Options, crate closure_builder: Rc<ClosureBuilder>, crate closure_interner: Rc<ClosureInterner>, crate dfa_builder: DFABuilder, crate grammar: Option<Rc<Grammar>>, crate reduce_ids: Vec<ReduceId>, crate closure_map: HashMap<Closure, Option<BlockRef>>, // IndexMap crate value_map: HashMap<NodeId, NodeId>, crate build_queue: VecDeque<BuildJob>, crate fail_block: Option<BlockRef>, crate num_built: usize, crate origin_matches: MatchMap,
crate rule_ty: Option<Link<mir::TypeFn>>, crate iter_locals: Vec<LValLink>, crate locals: Vec<Child<mir::RuleLocal>>, crate blocks: Vec<Child<mir::Block>>, } impl RuleBuilder { pub fn new(comp: &Compiler, options: Options) -> Self { let visitor = comp.aspect_mut::<VisitorAspect>(); let closure_builder = Rc::new(ClosureBuilder::new(visitor.clone())); let closure_interner = ClosureInterner::new(); let dfa_builder = DFABuilder::new( closure_builder.clone(), closure_interner.clone(), visitor.clone(), ); Self { _optimizer: comp.aspect_mut::<OptimizerAspect>(), visitor: visitor.clone(), output_model: comp.model_cell.clone(), options, closure_builder, closure_interner, dfa_builder, grammar: None, reduce_ids: Vec::new(), closure_map: HashMap::new(), value_map: HashMap::new(), build_queue: VecDeque::new(), fail_block: None, num_built: 0, origin_matches: MatchMap::default(), origin_stmts: <_>::default(), rule_ty: None, iter_locals: Vec::new(), locals: Vec::new(), blocks: Vec::new(), } } pub fn add_rules(&mut self, rules: &Vec<NodeId>) { let seed = { let model = self.visitor.input_model.borrow(); let first_rule = model.get::<_, mir::ItemRule>(rules[0]).unwrap(); self.rule_ty = Some(first_rule.rule_ty.clone()); model.iter(rules) .borrow_cast_nodes_to::<mir::ItemRule>() .filter_map(|r| r.blocks.first().cloned()) .map(|b| model.node(b).first_node()) .collect::<Vec<_>>() }; let mut seed = ClosureSeed::new(seed); seed.follow_calls = true; let _main_block = self.resolve_closure_seed(seed); let fail_seed = ClosureSeed::default(); let fail_block = self.resolve_closure_seed(fail_seed); self.fail_block = Some(fail_block); } pub fn add_value_map<A: ToNodeId, B: ToNodeId>(&mut self, from: A, to: B) { self.value_map.insert(from.to_top(), to.to_top()); } pub fn at_origin(&self) -> bool { self.num_built <= 2 } pub fn build( &mut self, grammar: Rc<Grammar>, rules: &Vec<NodeId>, reduce_ids: Vec<ReduceId>, ) -> (String, NodeId<mir::ItemRule>) { assert!(rules.len() >= 1); self.closure_builder.set_grammar(Some(grammar.clone())); self.grammar = Some(grammar); self.reduce_ids = reduce_ids; self.add_rules(rules); while let Some(job) = self.build_queue.pop_front() { match job { BuildJob::BuildOne { cursor, closure, pair } => { // println!("BUILD_ONE {:?} => {:?}", closure, cursor.orig_block_id()); self.build_one(cursor, closure, pair); }, BuildJob::BuildSet { cursor, closure } => { // println!("BUILD_CLOSURE {:?} => {:?}", closure, cursor.orig_block_id()); self.build_set(cursor, closure); self.num_built += 1; }, } } // self.dump_info(); self.closure_builder.set_grammar(None); self.closure_map.clear(); self.grammar = None; self.iter_locals.clear(); self.num_built = 0; self.origin_matches.clear(); self.origin_stmts.borrow_mut().clear(); self.value_map.clear(); let mut model = self.output_model.borrow_mut(); let node_id = model.new_node(); let name = format!("opt_{}", node_id.idx()); //println!("BUILD LOCALS {:?}", self.locals.len()); model.complete_node(node_id, mir::ItemRule { name: name.clone(), rule_ty: self.rule_ty.take().unwrap(), params: Vec::new(), locals: replace(&mut self.locals, Vec::new()), blocks: replace(&mut self.blocks, Vec::new()), }); drop(model); // Mark persistent blocks mark_persistent_recursive(&*self.visitor, node_id); (name, node_id) } pub fn build_one(&mut self, mut cursor: Cursor, closure: Closure, pair: GroupPair) { let (group_k, group_v) = pair; let succ_seed = self.successors(closure); let succ_closure = self.closure(succ_seed); group_k.build_mir_dyn(&*group_v, self, &mut cursor); match (cursor.is_completed(), succ_closure.is_empty()) { (true, true) => { }, (true, false) => { panic!("completed group with successors"); }, (false, true) => { self.fuse_block(cursor); }, (false, false) => { let job = BuildJob::BuildSet { cursor, closure: succ_closure }; self.build_queue.push_back(job); }, } } crate fn build_origin_stmts(&mut self, cursor: &mut Cursor) { let stmts = self.origin_stmts.clone(); let stmts = stmts.borrow(); for (k, v) in stmts.iter() { k.build_mir_dyn(&**v, self, cursor); } } pub fn build_set(&mut self, mut cursor: Cursor, closure: Closure) { if closure.is_empty() { cursor.build_ctl(mir::CtlFail { }); return; } let group_set = self.group(&closure); group_set.build_mir(self, cursor); } pub fn closure(&self, seed: ClosureSeed) -> Closure { self.closure_builder.build(seed) } pub fn dump_info(&self) { let mut map = HashMap::new(); for (closure, _) in &self.closure_map { let mut closure_ids = closure.entries.clone(); closure_ids.sort(); map.insert(closure_ids, ()); } println!("CLOSURES {:?} vs {:?}", map.len(), self.closure_map.len()); } pub fn fail_block(&self) -> BlockRef { self.fail_block.clone().unwrap() } pub fn fuse_block(&self, mut cur: Cursor) { if!cur.is_completed() { let fail_block = self.fail_block(); cur.build_ctl(mir::CtlBr { block: fail_block }); } } pub fn group(&mut self, closure: &Closure) -> GroupSet { let mut group_buider = GroupBuilder::new(); let visitor = self.visitor.clone(); for entry in &closure.entries { let node_id = *entry; let core = VisitCtxCore::<BuildGroups> { aspect: &*visitor, node_id, imp_args: self }; let _ = visitor.visit(core, &mut group_buider); } group_buider.complete() } pub fn intern_closure(&self, closure: Closure) -> ClosureId { self.closure_interner.intern(closure) } pub fn is_origin_pure(&self) -> bool { let incorporate_reductions = self.options.incorporate_reductions; let is_pure = self.origin_stmts.borrow().is_empty(); incorporate_reductions && is_pure } pub fn is_reduce_external(&self, reduce_id: ReduceId) -> bool { self.reduce_ids.contains(&reduce_id) } pub fn map_local(&mut self, src: NodeId<mir::RuleLocal>) -> NodeId<mir::RuleLocal> { let ty = { let model = self.visitor.input_model.borrow(); let node = model.node(src); node.ty.clone() // XXX: BAD: type needs to be deep copied/transfered }; let mut model_out = self.output_model.borrow_mut(); let local_out = model_out.build_node(mir::RuleLocal { ty, index: self.locals.len() }); self.locals.push(local_out.into()); local_out } pub fn map_local_iter(&mut self, src: &LValLink, depth: usize) -> LValLink { if let Some(local) = self.iter_locals.get(depth).cloned() { self.add_value_map(src, &local); return local; } if depth!= self.iter_locals.len() { panic!("invalid depth"); } let local = self.map_lval(src); self.iter_locals.push(local.clone()); local } pub fn map_lval(&mut self, src: &LValLink) -> LValLink { let node_id = src.to_top(); if let Some(result) = self.value_map.get(&node_id) { return result.cast().into(); } let new_local = self.map_local(node_id.cast()); self.value_map.insert(node_id, new_local.to_top()); new_local.cast().into() } pub fn map_lval_lookup(&mut self, src: &LValLink) -> LValLink { let node_id = src.to_top(); match self.value_map.get(&node_id) { Some(result) => result.cast().into(), None => panic!("unknown value: {:?}", node_id), } } crate fn origin_matches(&self, reduce_id: ReduceId) -> ClosureSeed { let mut result = ClosureSeed::default(); if let Some(grammar) = &self.grammar { let match_map = &grammar.match_map; for (match_id, prec_map) in &self.origin_matches { let prec = match match_map.get(*match_id, reduce_id) { Some(prec) => prec, None => continue, }; if let Some(closure) = prec_map.get(&prec) { result.merge(closure); } } } result } crate fn queue_build_one(&mut self, cursor: Cursor, closure: Closure, pair: GroupPair) { let job = BuildJob::BuildOne { cursor, closure, pair }; self.build_queue.push_back(job); } crate fn resolve<F>(&mut self, closure: Closure, job_ctor: F) -> BlockRef where F: FnOnce(Cursor, Closure) -> BuildJob { match self.closure_map.entry(closure) { Occupied(occupied) => { occupied.get().clone().unwrap() } Vacant(vacant) => { let mut model = self.output_model.borrow_mut(); let block_id = model.new_node::<mir::Block>(); self.blocks.push(block_id.into()); let cursor = Cursor::new(self.output_model.clone(), block_id); let job = job_ctor(cursor, vacant.key().clone()); self.build_queue.push_back(job); vacant.insert(Some(block_id.into())); block_id.into() } } } crate fn resolve_closure(&mut self, closure: Closure) -> BlockRef { self.resolve(closure, |cursor, closure| { BuildJob::BuildSet { cursor, closure } }) } crate fn resolve_closure_seed(&mut self, seed: ClosureSeed) -> BlockRef { let closure = self.closure(seed); self.resolve_closure(closure) } crate fn resolve_one(&mut self, seed: ClosureSeed, pair: GroupPair) -> BlockRef { let closure = seed.into_closure(); self.resolve(closure, |cursor, closure| { BuildJob::BuildOne { cursor, closure, pair } }) } crate fn successors(&self, seed: Closure) -> ClosureSeed { let mut builder = SuccBuilder::new(seed.entries); builder.build(&*self.visitor); builder.complete() } } ////////////////////////////////////////////////////////////////////////////////////////////////
crate origin_stmts: Rc<RefCell<Vec<GroupPair>>>,
random_line_split
trainer.rs
// p.inc(1); // } Some((score, string)) }) .collect(); // Fill seed_sentencepieces for (count, character) in sall_chars { seed_sentencepieces.push((character.to_string(), count.into())); } // sort by decreasing score substr_index.sort_by_key(|&a| Reverse(a)); for (score, char_string) in substr_index { // Just in case assert!(self.is_valid_sentencepiece(char_string)); let string: String = char_string.iter().collect(); seed_sentencepieces.push((string, score.into())); if seed_sentencepieces.len() >= self.seed_size { break; } } to_log_prob(&mut seed_sentencepieces); seed_sentencepieces } fn prune_sentence_pieces( &self, model: &Unigram, pieces: &[SentencePiece], sentences: &[Sentence], ) -> Vec<SentencePiece> { let mut always_keep = vec![true; pieces.len()]; let mut alternatives: Vec<Vec<usize>> = vec![Vec::new(); pieces.len()]; let bos_id = pieces.len() + 1; let eos_id = pieces.len() + 2; // First, segments the current sentencepieces to know // how each sentencepiece is resegmented if this sentencepiece is removed // from the vocabulary. // To do so, we take the second best segmentation of sentencepiece[i]. // alternatives[i] stores the sequence of second best sentencepieces. for (id, (token, _score)) in pieces.iter().enumerate() { // Always keep unk. if id == 0 { always_keep[id] = false; continue; } let mut lattice = Lattice::from(token, bos_id, eos_id); model.populate_nodes(&mut lattice); let nbests = lattice.nbest(2); if nbests.len() == 1 { always_keep[id] = true; } else if nbests[0].len() >= 2 { always_keep[id] = false; } else if nbests[0].len() == 1 { always_keep[id] = true; for node in &nbests[1] { let alt_id = node.borrow().id; alternatives[id].push(alt_id); } } } // Second, segments all sentences to compute likelihood // with a unigram language model. inverted[i] stores // the set of sentence index where the sentencepieces[i] appears. let chunk_size = std::cmp::max(sentences.len() / current_num_threads(), 1); let indexed_sentences: Vec<(usize, &Sentence)> = sentences.iter().enumerate().collect(); let collected: (f64, Vec<f64>, Vec<Vec<usize>>) = indexed_sentences .maybe_par_chunks(chunk_size) .map(|enumerated_sentence_count_chunk| { let mut vsum = 0.0; let mut freq: Vec<f64> = vec![0.0; pieces.len()]; let mut inverted: Vec<Vec<usize>> = vec![Vec::new(); pieces.len()]; for (i, (sentence, count)) in enumerated_sentence_count_chunk { let mut lattice = Lattice::from(sentence, bos_id, eos_id); model.populate_nodes(&mut lattice); vsum += *count as f64; for node_ref in lattice.viterbi() { let id = node_ref.borrow().id; freq[id] += *count as f64; inverted[id].push(*i); } } (vsum, freq, inverted) }) .reduce( || (0.0, vec![0.0; pieces.len()], vec![Vec::new(); pieces.len()]), |(vsum, freq, inverted), (lvsum, lfreq, linverted)| { ( vsum + lvsum, freq.iter() .zip(lfreq) .map(|(global_el, local_el)| global_el + local_el) .collect(), inverted .iter() .zip(linverted) .map(|(global_el, local_el)| [&global_el[..], &local_el[..]].concat()) .collect(), ) }, ); let (vsum, freq, inverted) = collected; let sum: f64 = freq.iter().sum(); let logsum = sum.ln(); let mut candidates: Vec<(usize, f64)> = vec![]; let mut new_pieces: Vec<SentencePiece> = Vec::with_capacity(self.vocab_size as usize); new_pieces.push(pieces[0].clone()); // Finally, computes how likely the LM likelihood is reduced if // the sentencepiece[i] is removed from the vocabulary. // Since the exact computation of loss is difficult, we compute the // loss approximately by assuming that all sentencepiece[i] in the sentences // are replaced with alternatives[i] when sentencepiece[i] is removed. for (id, (token, score)) in pieces.iter().enumerate() { if id == 0 { continue; } if freq[id] == 0.0 &&!always_keep[id] { // not found in Viterbi path. Can remove this entry safely. continue; } else if alternatives[id].is_empty() { // no alternatives. Keeps this entry. new_pieces.push((token.to_string(), *score)); } else { let mut f = 0.0; // the frequency of pieces[i]; for n in &inverted[id] { let score = sentences[*n].1 as f64; f += score; } // TODO: Temporary hack to avoid Nans. if f == 0.0 || f.is_nan() { // new_pieces.push((token.to_string(), *score)); continue; } f /= vsum; // normalizes by all sentence frequency. let logprob_sp = freq[id].ln() - logsum; // After removing the sentencepiece[i], its frequency freq[i] is // re-assigned to alternatives. // new_sum = current_sum - freq[i] + freq[i] * alternatives.size() // = current_sum + freq[i] (alternatives - 1) let logsum_alt = (sum + freq[id] * (alternatives.len() - 1) as f64).ln(); // The frequencies of altenatives are increased by freq[i]. let mut logprob_alt = 0.0; for n in &alternatives[id] { logprob_alt += (freq[*n] + freq[id]).ln() - logsum_alt; } // loss: the diff of likelihood after removing the sentencepieces[i]. let loss = f * (logprob_sp - logprob_alt); if loss.is_nan() { panic!(""); } candidates.push((id, loss)); } } let desired_vocab_size: usize = (self.vocab_size as usize * 11) / 10; // * 1.1 let pruned_size: usize = ((pieces.len() as f64) * self.shrinking_factor) as usize; let pruned_size = desired_vocab_size.max(pruned_size); candidates.sort_by(|(_, a), (_, b)| b.partial_cmp(a).unwrap()); for (id, _score) in candidates { if new_pieces.len() == pruned_size { break; } new_pieces.push(pieces[id].clone()); } new_pieces.to_vec() } /// Update the progress bar with the new provided length and message fn update_progress(&self, p: &Option<ProgressBar>, len: usize, message: &str) { if let Some(p) = p { p.set_message(message); p.set_length(len as u64); p.set_draw_delta(len as u64 / 100); p.reset(); } } /// Set the progress bar in the finish state fn finalize_progress(&self, p: &Option<ProgressBar>, final_len: usize) { if let Some(p) = p { p.set_length(final_len as u64); p.finish(); println!(); } } fn run_e_step(&self, model: &Unigram, sentences: &[Sentence]) -> (f64, u32, Vec<f64>) { let all_sentence_freq: u32 = sentences.iter().map(|(_a, b)| *b).sum(); let chunk_size = std::cmp::max(sentences.len() / current_num_threads(), 1); let collected: (f64, u32, Vec<f64>) = sentences .maybe_par_chunks(chunk_size) .map(|sentences_chunk| { let mut expected: Vec<f64> = vec![0.0; model.len()]; let mut objs: f64 = 0.0; let mut ntokens: u32 = 0; for (string, freq) in sentences_chunk { let mut lattice = Lattice::from(string, model.bos_id, model.eos_id); model.populate_nodes(&mut lattice); let z: f64 = lattice.populate_marginal(*freq as f64, &mut expected); if z.is_nan() { panic!("likelihood is NAN. Input sentence may be too long."); } ntokens += lattice.viterbi().len() as u32; objs -= z / (all_sentence_freq as f64); } (objs, ntokens, expected) }) .reduce( || (0.0, 0, vec![0.0; model.len()]), |(objs, ntokens, expected), (lobjs, lntokens, lexpected)| { ( objs + lobjs, ntokens + lntokens, expected .iter() .zip(lexpected) .map(|(global_el, local_el)| global_el + local_el) .collect(), ) }, ); collected } fn run_m_step(&self, pieces: &[SentencePiece], expected: &[f64]) -> Vec<SentencePiece> { if pieces.len()!= expected.len() { panic!( "Those two iterators are supposed to be the same length ({} vs {})", pieces.len(), expected.len() ); } let mut new_pieces: Vec<SentencePiece> = Vec::with_capacity(self.vocab_size.try_into().unwrap()); let mut sum = 0.0; let expected_frequency_threshold = 0.5; for (i, (freq, (piece, _score))) in expected.iter().zip(pieces).enumerate() { // Always keep unk. if i == 0 { new_pieces.push((piece.clone(), f64::NAN)); continue; } if *freq < expected_frequency_threshold { continue; } new_pieces.push((piece.clone(), *freq)); sum += freq; } // // Here we do not use the original EM, but use the // // Bayesianified/DPified EM algorithm. // // https://cs.stanford.edu/~pliang/papers/tutorial-acl2007-talk.pdf // // This modification will act as a sparse prior. let logsum = digamma(sum); let new_pieces: Vec<_> = new_pieces .into_iter() .map(|(s, c)| (s, digamma(c) - logsum)) .collect(); new_pieces } pub fn do_train( &self, sentences: Vec<Sentence>, model: &mut Unigram, ) -> Result<Vec<AddedToken>> { let progress = self.setup_progress(); // // 1. Compute frequent substrings // TODO Should be able to upgrade to u64 when needed self.update_progress(&progress, sentences.len(), "Suffix array seeds"); let mut pieces: Vec<SentencePiece> = Vec::with_capacity(self.vocab_size.try_into().unwrap()); // We use a UNK token when training, whatever the `self.unk_token` pieces.push(("<UNK>".into(), f64::NAN)); pieces.extend(self.make_seed_sentence_pieces(&sentences, &progress)); self.finalize_progress(&progress, sentences.len()); // Useful to check compatibility with spm. debug!( "Using {} pieces on {} sentences for EM training", pieces.len(), sentences.len() ); let desired_vocab_size: usize = (self.vocab_size as usize * 11) / 10; // * 1.1 // 2. Run E-M Loops to fine grain the pieces. // We will shrink the vocab by shrinking_factor every loop on average // Some other pieces are dropped if logprob is too small // V = N * (f)**k // k = log(V / N) / log(f) let expected_loops = (((desired_vocab_size as f64).ln() - (pieces.len() as f64).ln()) / self.shrinking_factor.ln()) as usize + 1; let expected_updates = expected_loops * self.n_sub_iterations as usize; self.update_progress(&progress, expected_updates, "EM training"); let required_chars = self.required_chars(&sentences); if required_chars.len() as u32 > self.vocab_size { return Err(Box::new(UnigramTrainerError::VocabularyTooSmall)); } let mut new_model = Unigram::from(pieces.clone(), Some(0), false)?; loop { // Sub-EM iteration. for _iter in 0..self.n_sub_iterations { // Executes E step let (_objective, _num_tokens, expected) = self.run_e_step(&new_model, &sentences); // Executes M step. pieces = self.run_m_step(&pieces, &expected); new_model = Unigram::from(pieces.clone(), Some(0), false)?; // Useful comment for checking compatibility with spm debug!( "Em iter={} size={} obj={} num_tokens={} num_tokens/piece={}", _iter, new_model.len(), _objective, _num_tokens, _num_tokens as f64 / model.len() as f64 ); if let Some(p) = &progress { p.inc(1); } } // end of Sub EM iteration // Stops the iteration when the size of sentences reaches to the // desired symbol size. if pieces.len() <= desired_vocab_size { break; } // Prunes pieces. pieces = self.prune_sentence_pieces(&new_model, &pieces, &sentences); new_model = Unigram::from(pieces.clone(), Some(0), false)?; } self.finalize_progress(&progress, expected_updates); // Finally, adjusts the size of sentencepices to be |vocab_size|. *model = self.finalize(new_model, required_chars)?; Ok(self.special_tokens.clone()) } } impl Trainer for UnigramTrainer { type Model = Unigram; /// Train a Unigram model fn train(&self, model: &mut Unigram) -> Result<Vec<AddedToken>> { let sentences: Vec<_> = self.words.iter().map(|(s, i)| (s.to_owned(), *i)).collect(); self.do_train(sentences, model) } /// Whether we should show progress fn should_show_progress(&self) -> bool { self.show_progress } fn feed<I, S, F>(&mut self, iterator: I, process: F) -> Result<()> where I: Iterator<Item = S> + Send, S: AsRef<str> + Send, F: Fn(&str) -> Result<Vec<String>> + Sync, { let words: Result<HashMap<String, u32>> = iterator .maybe_par_bridge() .map(|sequence| { let words = process(sequence.as_ref())?; let mut map = HashMap::new(); for word in words { map.entry(word).and_modify(|c| *c += 1).or_insert(1); } Ok(map) }) .reduce( || Ok(HashMap::new()), |acc, ws| { let mut acc = acc?; for (k, v) in ws? { acc.entry(k).and_modify(|c| *c += v).or_insert(v); }
random_line_split
trainer.rs
#[derive(thiserror::Error, Debug)] pub enum UnigramTrainerError { #[error("The vocabulary is not large enough to contain all chars")] VocabularyTooSmall, } fn to_log_prob(pieces: &mut [SentencePiece]) { let sum: f64 = pieces.iter().map(|(_, score)| score).sum(); let logsum = sum.ln(); for (_, score) in pieces.iter_mut() { *score = score.ln() - logsum; } } /// A `UnigramTrainer` can train a `Unigram` model from `word_counts`. #[non_exhaustive] #[derive(Builder, Debug, Clone, Serialize, Deserialize)] pub struct UnigramTrainer { #[builder(default = "true")] pub show_progress: bool, #[builder(default = "8000")] pub vocab_size: u32, #[builder(default = "2")] pub n_sub_iterations: u32, #[builder(default = "0.75")] pub shrinking_factor: f64, #[builder(default = "vec![]")] pub special_tokens: Vec<AddedToken>, #[builder(default = "HashSet::new()")] pub initial_alphabet: HashSet<char>, #[builder(default = "None")] pub unk_token: Option<String>, #[builder(default = "16")] pub max_piece_length: usize, #[builder(default = "1_000_000")] seed_size: usize, #[builder(default = "HashMap::new()")] words: HashMap<String, u32>, } impl Default for UnigramTrainer { fn default() -> Self { Self::builder().build().unwrap() } } impl UnigramTrainer { pub fn builder() -> UnigramTrainerBuilder { UnigramTrainerBuilder::default() } /// Setup a progress bar if asked to show progress fn setup_progress(&self) -> Option<ProgressBar> { if self.show_progress { let p = ProgressBar::new(0); p.set_style( ProgressStyle::default_bar() .template("[{elapsed_precise}] {msg:<40!} {wide_bar} {pos:<9!}/{len:>9!}"), ); Some(p) } else { None } } fn is_valid_sentencepiece(&self, char_string: &[char]) -> bool { // Checks string length // Space not in the substring, numbers, hiragana and more should be taken // care of within pre_tokenizers. // https://github.com/google/sentencepiece/blob/26be9516cd81d5315ee31c48d2438018e0eab879/src/trainer_interface.cc#L203 let n = char_string.len(); if char_string.is_empty() || n > self.max_piece_length { return false; } true } fn finalize(&self, model: Unigram, required_chars: HashSet<String>) -> Result<Unigram> { let mut min_score_penalty = 0.0; let min_score_penalty_delta = 0.0001; let mut pieces: Vec<(String, f64)> = vec![]; let mut inserted: HashSet<String> = HashSet::new(); // We don't want to include the <UNK> that was used to train inserted.insert("<UNK>".into()); let existing_pieces: HashMap<String, f64> = model.iter().cloned().collect(); for c in required_chars { if let Some(t) = existing_pieces.get(&c) { inserted.insert(c.clone()); pieces.push((c, *t)); } else { let score = model.min_score + min_score_penalty; inserted.insert(c.clone()); pieces.push((c, score)); min_score_penalty += min_score_penalty_delta; } } let (unk_id, need_add_unk) = if let Some(ref unk) = self.unk_token { let unk_id = self.special_tokens.iter().enumerate().find_map(|(i, t)| { if t.content == *unk { Some(i) } else { None } }); match unk_id { Some(id) => (Some(id), false), None => (Some(0), true), } } else { (None, false) }; let vocab_size_without_special_tokens = if need_add_unk { self.vocab_size as usize - self.special_tokens.len() - 1 } else { self.vocab_size as usize - self.special_tokens.len() }; for (token, score) in model.iter() { if inserted.contains::<str>(token) { continue; } inserted.insert(token.to_string()); pieces.push((token.to_string(), if score.is_nan() { 0.0 } else { *score })); if pieces.len() == vocab_size_without_special_tokens { break; } } pieces.sort_by(|(_, a), (_, b)| b.partial_cmp(a).unwrap()); // Insert the necessary tokens let mut special_tokens = self .special_tokens .iter() .map(|t| (t.content.clone(), 0.0)) .collect::<Vec<_>>(); if need_add_unk { special_tokens.insert(0, (self.unk_token.clone().unwrap(), 0.0)); } Unigram::from( special_tokens.into_iter().chain(pieces).collect(), unk_id, model.byte_fallback(), ) } fn required_chars(&self, word_counts: &[Sentence]) -> HashSet<String> { word_counts .iter() .flat_map(|(s, _count)| s.chars()) .chain(self.initial_alphabet.iter().copied()) .map(|c| c.to_string()) .collect() } fn make_seed_sentence_pieces( &self, sentences: &[Sentence], _progress: &Option<ProgressBar>, ) -> Vec<SentencePiece> { // Put all sentences in a string, separated by \0 let total: usize = sentences .iter() .map(|(s, _)| s.chars().count()) .sum::<usize>() + sentences.len(); let mut flat_string = String::with_capacity(total); let mut all_chars: HashMap<char, u32> = HashMap::new(); let c_sentence_boundary = '\0'; let k_sentence_boundary = '\0'.to_string(); for (string, n) in sentences { if string.is_empty() { continue; } flat_string.push_str(string); // XXX // Comment suggests we add sentence boundary, but it seems to be missing from actual // code in spm. flat_string.push_str(&k_sentence_boundary); for c in string.chars() { if c!= c_sentence_boundary { *all_chars.entry(c).or_insert(0) += n; } } } flat_string.shrink_to_fit(); #[cfg(feature = "esaxx_fast")] let suffix = esaxx_rs::suffix(&flat_string).unwrap(); #[cfg(not(feature = "esaxx_fast"))] let suffix = esaxx_rs::suffix_rs(&flat_string).unwrap(); // Basic chars need to be in sentence pieces. let mut seed_sentencepieces: Vec<SentencePiece> = vec![]; let mut sall_chars: Vec<_> = all_chars.into_iter().map(|(a, b)| (b, a)).collect(); // Reversed order sall_chars.sort_by_key(|&a| Reverse(a)); let mut substr_index: Vec<_> = suffix .iter() .filter_map(|(string, freq)| { if string.len() <= 1 { return None; } if string.contains(&c_sentence_boundary) { return None; } if!self.is_valid_sentencepiece(string) { return None; } let score = freq * string.len() as u32; // if let Some(p) = &progress { // p.inc(1); // } Some((score, string)) }) .collect(); // Fill seed_sentencepieces for (count, character) in sall_chars { seed_sentencepieces.push((character.to_string(), count.into())); } // sort by decreasing score substr_index.sort_by_key(|&a| Reverse(a)); for (score, char_string) in substr_index { // Just in case assert!(self.is_valid_sentencepiece(char_string)); let string: String = char_string.iter().collect(); seed_sentencepieces.push((string, score.into())); if seed_sentencepieces.len() >= self.seed_size { break; } } to_log_prob(&mut seed_sentencepieces); seed_sentencepieces } fn prune_sentence_pieces( &self, model: &Unigram, pieces: &[SentencePiece], sentences: &[Sentence], ) -> Vec<SentencePiece> { let mut always_keep = vec![true; pieces.len()]; let mut alternatives: Vec<Vec<usize>> = vec![Vec::new(); pieces.len()]; let bos_id = pieces.len() + 1; let eos_id = pieces.len() + 2; // First, segments the current sentencepieces to know // how each sentencepiece is resegmented if this sentencepiece is removed // from the vocabulary. // To do so, we take the second best segmentation of sentencepiece[i]. // alternatives[i] stores the sequence of second best sentencepieces. for (id, (token, _score)) in pieces.iter().enumerate() { // Always keep unk. if id == 0 { always_keep[id] = false; continue; } let mut lattice = Lattice::from(token, bos_id, eos_id); model.populate_nodes(&mut lattice); let nbests = lattice.nbest(2); if nbests.len() == 1 { always_keep[id] = true; } else if nbests[0].len() >= 2 { always_keep[id] = false; } else if nbests[0].len() == 1 { always_keep[id] = true; for node in &nbests[1] { let alt_id = node.borrow().id; alternatives[id].push(alt_id); } } } // Second, segments all sentences to compute likelihood // with a unigram language model. inverted[i] stores // the set of sentence index where the sentencepieces[i] appears. let chunk_size = std::cmp::max(sentences.len() / current_num_threads(), 1); let indexed_sentences: Vec<(usize, &Sentence)> = sentences.iter().enumerate().collect(); let collected: (f64, Vec<f64>, Vec<Vec<usize>>) = indexed_sentences .maybe_par_chunks(chunk_size) .map(|enumerated_sentence_count_chunk| { let mut vsum = 0.0; let mut freq: Vec<f64> = vec![0.0; pieces.len()]; let mut inverted: Vec<Vec<usize>> = vec![Vec::new(); pieces.len()]; for (i, (sentence, count)) in enumerated_sentence_count_chunk { let mut lattice = Lattice::from(sentence, bos_id, eos_id); model.populate_nodes(&mut lattice); vsum += *count as f64; for node_ref in lattice.viterbi() { let id = node_ref.borrow().id; freq[id] += *count as f64; inverted[id].push(*i); } } (vsum, freq, inverted) }) .reduce( || (0.0, vec![0.0; pieces.len()], vec![Vec::new(); pieces.len()]), |(vsum, freq, inverted), (lvsum, lfreq, linverted)| { ( vsum + lvsum, freq.iter() .zip(lfreq) .map(|(global_el, local_el)| global_el + local_el) .collect(), inverted .iter() .zip(linverted) .map(|(global_el, local_el)| [&global_el[..], &local_el[..]].concat()) .collect(), ) }, ); let (vsum, freq, inverted) = collected; let sum: f64 = freq.iter().sum(); let logsum = sum.ln(); let mut candidates: Vec<(usize, f64)> = vec![]; let mut new_pieces: Vec<SentencePiece> = Vec::with_capacity(self.vocab_size as usize); new_pieces.push(pieces[0].clone()); // Finally, computes how likely the LM likelihood is reduced if // the sentencepiece[i] is removed from the vocabulary. // Since the exact computation of loss is difficult, we compute the // loss approximately by assuming that all sentencepiece[i] in the sentences // are replaced with alternatives[i] when sentencepiece[i] is removed. for (id, (token, score)) in pieces.iter().enumerate() { if id == 0 { continue; } if freq[id] == 0.0 &&!always_keep[id] { // not found in Viterbi path. Can remove this entry safely. continue; } else if alternatives[id].is_empty() { // no alternatives. Keeps this entry. new_pieces.push((token.to_string(), *score)); } else { let mut f = 0.0; // the frequency of pieces[i]; for n in &inverted[id] { let score = sentences[*n].1 as f64; f += score; } // TODO: Temporary hack to avoid Nans. if f == 0.0 || f.is_nan() { // new_pieces.push((token.to_string(), *score)); continue; } f /= vsum; // normalizes by all sentence frequency. let logprob_sp = freq[id].ln() - logsum; // After removing the sentencepiece[i], its frequency freq[i] is // re-assigned to alternatives. // new_sum = current_sum - freq[i] + freq[i] * alternatives.size() // = current_sum + freq[i] (alternatives - 1) let logsum_alt = (sum + freq[id] * (alternatives.len() - 1) as f64).ln(); // The frequencies of altenatives are increased by freq[i]. let mut logprob_alt = 0.0; for n in &alternatives[id] { logprob_alt += (freq[*n] + freq[id]).ln() - logsum_alt; } // loss: the diff of likelihood after removing the sentencepieces[i]. let loss = f * (logprob_sp - logprob_alt); if loss.is_nan() { panic!(""); } candidates.push((id, loss)); } } let desired_vocab_size: usize = (self.vocab_size as usize * 11) / 10; // * 1.1 let pruned_size: usize = ((pieces.len() as f64) * self.shrinking_factor) as usize; let pruned_size = desired_vocab_size.max(pruned_size); candidates.sort_by(|(_, a), (_, b)| b.partial_cmp(a).unwrap()); for (id, _score) in candidates { if new_pieces.len() == pruned_size { break; } new_pieces.push(pieces[id].clone()); } new_pieces.to_vec() } /// Update the progress bar with the new provided length and message fn update_progress(&self, p: &Option<ProgressBar>, len: usize, message: &str) { if let Some(p) = p { p.set_message(message); p.set_length(len as u64); p.set_draw_delta(len as u64 / 100); p.reset(); } } /// Set the progress bar in the finish state fn finalize_progress(&self, p: &Option<ProgressBar>, final_len: usize) { if let Some(p) = p { p.set_length(final_len as u64); p.finish(); println!(); } } fn run_e_step(&self, model: &Unigram, sentences: &[Sentence]) -> (f64, u32, Vec<f64>) { let all_sentence_freq: u32 = sentences.iter().map(|(_a, b)| *b).sum(); let chunk_size = std::cmp::max(sentences.len() / current_num_threads(), 1); let collected: (f64, u32, Vec<f64>) = sentences .maybe_par_chunks(chunk_size) .map(|sentences_chunk| {
{ let mut result = 0.0; while x < 7.0 { result -= 1.0 / x; x += 1.0; } x -= 1.0 / 2.0; let xx = 1.0 / x; let xx2 = xx * xx; let xx4 = xx2 * xx2; result += x.ln() + (1.0 / 24.0) * xx2 - 7.0 / 960.0 * xx4 + (31.0 / 8064.0) * xx4 * xx2 - (127.0 / 30720.0) * xx4 * xx4; result }
identifier_body
trainer.rs
}; let vocab_size_without_special_tokens = if need_add_unk { self.vocab_size as usize - self.special_tokens.len() - 1 } else { self.vocab_size as usize - self.special_tokens.len() }; for (token, score) in model.iter() { if inserted.contains::<str>(token) { continue; } inserted.insert(token.to_string()); pieces.push((token.to_string(), if score.is_nan() { 0.0 } else { *score })); if pieces.len() == vocab_size_without_special_tokens { break; } } pieces.sort_by(|(_, a), (_, b)| b.partial_cmp(a).unwrap()); // Insert the necessary tokens let mut special_tokens = self .special_tokens .iter() .map(|t| (t.content.clone(), 0.0)) .collect::<Vec<_>>(); if need_add_unk { special_tokens.insert(0, (self.unk_token.clone().unwrap(), 0.0)); } Unigram::from( special_tokens.into_iter().chain(pieces).collect(), unk_id, model.byte_fallback(), ) } fn
(&self, word_counts: &[Sentence]) -> HashSet<String> { word_counts .iter() .flat_map(|(s, _count)| s.chars()) .chain(self.initial_alphabet.iter().copied()) .map(|c| c.to_string()) .collect() } fn make_seed_sentence_pieces( &self, sentences: &[Sentence], _progress: &Option<ProgressBar>, ) -> Vec<SentencePiece> { // Put all sentences in a string, separated by \0 let total: usize = sentences .iter() .map(|(s, _)| s.chars().count()) .sum::<usize>() + sentences.len(); let mut flat_string = String::with_capacity(total); let mut all_chars: HashMap<char, u32> = HashMap::new(); let c_sentence_boundary = '\0'; let k_sentence_boundary = '\0'.to_string(); for (string, n) in sentences { if string.is_empty() { continue; } flat_string.push_str(string); // XXX // Comment suggests we add sentence boundary, but it seems to be missing from actual // code in spm. flat_string.push_str(&k_sentence_boundary); for c in string.chars() { if c!= c_sentence_boundary { *all_chars.entry(c).or_insert(0) += n; } } } flat_string.shrink_to_fit(); #[cfg(feature = "esaxx_fast")] let suffix = esaxx_rs::suffix(&flat_string).unwrap(); #[cfg(not(feature = "esaxx_fast"))] let suffix = esaxx_rs::suffix_rs(&flat_string).unwrap(); // Basic chars need to be in sentence pieces. let mut seed_sentencepieces: Vec<SentencePiece> = vec![]; let mut sall_chars: Vec<_> = all_chars.into_iter().map(|(a, b)| (b, a)).collect(); // Reversed order sall_chars.sort_by_key(|&a| Reverse(a)); let mut substr_index: Vec<_> = suffix .iter() .filter_map(|(string, freq)| { if string.len() <= 1 { return None; } if string.contains(&c_sentence_boundary) { return None; } if!self.is_valid_sentencepiece(string) { return None; } let score = freq * string.len() as u32; // if let Some(p) = &progress { // p.inc(1); // } Some((score, string)) }) .collect(); // Fill seed_sentencepieces for (count, character) in sall_chars { seed_sentencepieces.push((character.to_string(), count.into())); } // sort by decreasing score substr_index.sort_by_key(|&a| Reverse(a)); for (score, char_string) in substr_index { // Just in case assert!(self.is_valid_sentencepiece(char_string)); let string: String = char_string.iter().collect(); seed_sentencepieces.push((string, score.into())); if seed_sentencepieces.len() >= self.seed_size { break; } } to_log_prob(&mut seed_sentencepieces); seed_sentencepieces } fn prune_sentence_pieces( &self, model: &Unigram, pieces: &[SentencePiece], sentences: &[Sentence], ) -> Vec<SentencePiece> { let mut always_keep = vec![true; pieces.len()]; let mut alternatives: Vec<Vec<usize>> = vec![Vec::new(); pieces.len()]; let bos_id = pieces.len() + 1; let eos_id = pieces.len() + 2; // First, segments the current sentencepieces to know // how each sentencepiece is resegmented if this sentencepiece is removed // from the vocabulary. // To do so, we take the second best segmentation of sentencepiece[i]. // alternatives[i] stores the sequence of second best sentencepieces. for (id, (token, _score)) in pieces.iter().enumerate() { // Always keep unk. if id == 0 { always_keep[id] = false; continue; } let mut lattice = Lattice::from(token, bos_id, eos_id); model.populate_nodes(&mut lattice); let nbests = lattice.nbest(2); if nbests.len() == 1 { always_keep[id] = true; } else if nbests[0].len() >= 2 { always_keep[id] = false; } else if nbests[0].len() == 1 { always_keep[id] = true; for node in &nbests[1] { let alt_id = node.borrow().id; alternatives[id].push(alt_id); } } } // Second, segments all sentences to compute likelihood // with a unigram language model. inverted[i] stores // the set of sentence index where the sentencepieces[i] appears. let chunk_size = std::cmp::max(sentences.len() / current_num_threads(), 1); let indexed_sentences: Vec<(usize, &Sentence)> = sentences.iter().enumerate().collect(); let collected: (f64, Vec<f64>, Vec<Vec<usize>>) = indexed_sentences .maybe_par_chunks(chunk_size) .map(|enumerated_sentence_count_chunk| { let mut vsum = 0.0; let mut freq: Vec<f64> = vec![0.0; pieces.len()]; let mut inverted: Vec<Vec<usize>> = vec![Vec::new(); pieces.len()]; for (i, (sentence, count)) in enumerated_sentence_count_chunk { let mut lattice = Lattice::from(sentence, bos_id, eos_id); model.populate_nodes(&mut lattice); vsum += *count as f64; for node_ref in lattice.viterbi() { let id = node_ref.borrow().id; freq[id] += *count as f64; inverted[id].push(*i); } } (vsum, freq, inverted) }) .reduce( || (0.0, vec![0.0; pieces.len()], vec![Vec::new(); pieces.len()]), |(vsum, freq, inverted), (lvsum, lfreq, linverted)| { ( vsum + lvsum, freq.iter() .zip(lfreq) .map(|(global_el, local_el)| global_el + local_el) .collect(), inverted .iter() .zip(linverted) .map(|(global_el, local_el)| [&global_el[..], &local_el[..]].concat()) .collect(), ) }, ); let (vsum, freq, inverted) = collected; let sum: f64 = freq.iter().sum(); let logsum = sum.ln(); let mut candidates: Vec<(usize, f64)> = vec![]; let mut new_pieces: Vec<SentencePiece> = Vec::with_capacity(self.vocab_size as usize); new_pieces.push(pieces[0].clone()); // Finally, computes how likely the LM likelihood is reduced if // the sentencepiece[i] is removed from the vocabulary. // Since the exact computation of loss is difficult, we compute the // loss approximately by assuming that all sentencepiece[i] in the sentences // are replaced with alternatives[i] when sentencepiece[i] is removed. for (id, (token, score)) in pieces.iter().enumerate() { if id == 0 { continue; } if freq[id] == 0.0 &&!always_keep[id] { // not found in Viterbi path. Can remove this entry safely. continue; } else if alternatives[id].is_empty() { // no alternatives. Keeps this entry. new_pieces.push((token.to_string(), *score)); } else { let mut f = 0.0; // the frequency of pieces[i]; for n in &inverted[id] { let score = sentences[*n].1 as f64; f += score; } // TODO: Temporary hack to avoid Nans. if f == 0.0 || f.is_nan() { // new_pieces.push((token.to_string(), *score)); continue; } f /= vsum; // normalizes by all sentence frequency. let logprob_sp = freq[id].ln() - logsum; // After removing the sentencepiece[i], its frequency freq[i] is // re-assigned to alternatives. // new_sum = current_sum - freq[i] + freq[i] * alternatives.size() // = current_sum + freq[i] (alternatives - 1) let logsum_alt = (sum + freq[id] * (alternatives.len() - 1) as f64).ln(); // The frequencies of altenatives are increased by freq[i]. let mut logprob_alt = 0.0; for n in &alternatives[id] { logprob_alt += (freq[*n] + freq[id]).ln() - logsum_alt; } // loss: the diff of likelihood after removing the sentencepieces[i]. let loss = f * (logprob_sp - logprob_alt); if loss.is_nan() { panic!(""); } candidates.push((id, loss)); } } let desired_vocab_size: usize = (self.vocab_size as usize * 11) / 10; // * 1.1 let pruned_size: usize = ((pieces.len() as f64) * self.shrinking_factor) as usize; let pruned_size = desired_vocab_size.max(pruned_size); candidates.sort_by(|(_, a), (_, b)| b.partial_cmp(a).unwrap()); for (id, _score) in candidates { if new_pieces.len() == pruned_size { break; } new_pieces.push(pieces[id].clone()); } new_pieces.to_vec() } /// Update the progress bar with the new provided length and message fn update_progress(&self, p: &Option<ProgressBar>, len: usize, message: &str) { if let Some(p) = p { p.set_message(message); p.set_length(len as u64); p.set_draw_delta(len as u64 / 100); p.reset(); } } /// Set the progress bar in the finish state fn finalize_progress(&self, p: &Option<ProgressBar>, final_len: usize) { if let Some(p) = p { p.set_length(final_len as u64); p.finish(); println!(); } } fn run_e_step(&self, model: &Unigram, sentences: &[Sentence]) -> (f64, u32, Vec<f64>) { let all_sentence_freq: u32 = sentences.iter().map(|(_a, b)| *b).sum(); let chunk_size = std::cmp::max(sentences.len() / current_num_threads(), 1); let collected: (f64, u32, Vec<f64>) = sentences .maybe_par_chunks(chunk_size) .map(|sentences_chunk| { let mut expected: Vec<f64> = vec![0.0; model.len()]; let mut objs: f64 = 0.0; let mut ntokens: u32 = 0; for (string, freq) in sentences_chunk { let mut lattice = Lattice::from(string, model.bos_id, model.eos_id); model.populate_nodes(&mut lattice); let z: f64 = lattice.populate_marginal(*freq as f64, &mut expected); if z.is_nan() { panic!("likelihood is NAN. Input sentence may be too long."); } ntokens += lattice.viterbi().len() as u32; objs -= z / (all_sentence_freq as f64); } (objs, ntokens, expected) }) .reduce( || (0.0, 0, vec![0.0; model.len()]), |(objs, ntokens, expected), (lobjs, lntokens, lexpected)| { ( objs + lobjs, ntokens + lntokens, expected .iter() .zip(lexpected) .map(|(global_el, local_el)| global_el + local_el) .collect(), ) }, ); collected } fn run_m_step(&self, pieces: &[SentencePiece], expected: &[f64]) -> Vec<SentencePiece> { if pieces.len()!= expected.len() { panic!( "Those two iterators are supposed to be the same length ({} vs {})", pieces.len(), expected.len() ); } let mut new_pieces: Vec<SentencePiece> = Vec::with_capacity(self.vocab_size.try_into().unwrap()); let mut sum = 0.0; let expected_frequency_threshold = 0.5; for (i, (freq, (piece, _score))) in expected.iter().zip(pieces).enumerate() { // Always keep unk. if i == 0 { new_pieces.push((piece.clone(), f64::NAN)); continue; } if *freq < expected_frequency_threshold { continue; } new_pieces.push((piece.clone(), *freq)); sum += freq; } // // Here we do not use the original EM, but use the // // Bayesianified/DPified EM algorithm. // // https://cs.stanford.edu/~pliang/papers/tutorial-acl2007-talk.pdf // // This modification will act as a sparse prior. let logsum = digamma(sum); let new_pieces: Vec<_> = new_pieces .into_iter() .map(|(s, c)| (s, digamma(c) - logsum)) .collect(); new_pieces } pub fn do_train( &self, sentences: Vec<Sentence>, model: &mut Unigram, ) -> Result<Vec<AddedToken>> { let progress = self.setup_progress(); // // 1. Compute frequent substrings // TODO Should be able to upgrade to u64 when needed self.update_progress(&progress, sentences.len(), "Suffix array seeds"); let mut pieces: Vec<SentencePiece> = Vec::with_capacity(self.vocab_size.try_into().unwrap()); // We use a UNK token when training, whatever the `self.unk_token` pieces.push(("<UNK>".into(), f64::NAN)); pieces.extend(self.make_seed_sentence_pieces(&sentences, &progress)); self.finalize_progress(&progress, sentences.len()); // Useful to check compatibility with spm. debug!( "Using {} pieces on {} sentences for EM training", pieces.len(), sentences.len()
required_chars
identifier_name
trainer.rs
}; let vocab_size_without_special_tokens = if need_add_unk { self.vocab_size as usize - self.special_tokens.len() - 1 } else { self.vocab_size as usize - self.special_tokens.len() }; for (token, score) in model.iter() { if inserted.contains::<str>(token) { continue; } inserted.insert(token.to_string()); pieces.push((token.to_string(), if score.is_nan() { 0.0 } else { *score })); if pieces.len() == vocab_size_without_special_tokens { break; } } pieces.sort_by(|(_, a), (_, b)| b.partial_cmp(a).unwrap()); // Insert the necessary tokens let mut special_tokens = self .special_tokens .iter() .map(|t| (t.content.clone(), 0.0)) .collect::<Vec<_>>(); if need_add_unk { special_tokens.insert(0, (self.unk_token.clone().unwrap(), 0.0)); } Unigram::from( special_tokens.into_iter().chain(pieces).collect(), unk_id, model.byte_fallback(), ) } fn required_chars(&self, word_counts: &[Sentence]) -> HashSet<String> { word_counts .iter() .flat_map(|(s, _count)| s.chars()) .chain(self.initial_alphabet.iter().copied()) .map(|c| c.to_string()) .collect() } fn make_seed_sentence_pieces( &self, sentences: &[Sentence], _progress: &Option<ProgressBar>, ) -> Vec<SentencePiece> { // Put all sentences in a string, separated by \0 let total: usize = sentences .iter() .map(|(s, _)| s.chars().count()) .sum::<usize>() + sentences.len(); let mut flat_string = String::with_capacity(total); let mut all_chars: HashMap<char, u32> = HashMap::new(); let c_sentence_boundary = '\0'; let k_sentence_boundary = '\0'.to_string(); for (string, n) in sentences { if string.is_empty() { continue; } flat_string.push_str(string); // XXX // Comment suggests we add sentence boundary, but it seems to be missing from actual // code in spm. flat_string.push_str(&k_sentence_boundary); for c in string.chars() { if c!= c_sentence_boundary { *all_chars.entry(c).or_insert(0) += n; } } } flat_string.shrink_to_fit(); #[cfg(feature = "esaxx_fast")] let suffix = esaxx_rs::suffix(&flat_string).unwrap(); #[cfg(not(feature = "esaxx_fast"))] let suffix = esaxx_rs::suffix_rs(&flat_string).unwrap(); // Basic chars need to be in sentence pieces. let mut seed_sentencepieces: Vec<SentencePiece> = vec![]; let mut sall_chars: Vec<_> = all_chars.into_iter().map(|(a, b)| (b, a)).collect(); // Reversed order sall_chars.sort_by_key(|&a| Reverse(a)); let mut substr_index: Vec<_> = suffix .iter() .filter_map(|(string, freq)| { if string.len() <= 1 { return None; } if string.contains(&c_sentence_boundary) { return None; } if!self.is_valid_sentencepiece(string) { return None; } let score = freq * string.len() as u32; // if let Some(p) = &progress { // p.inc(1); // } Some((score, string)) }) .collect(); // Fill seed_sentencepieces for (count, character) in sall_chars { seed_sentencepieces.push((character.to_string(), count.into())); } // sort by decreasing score substr_index.sort_by_key(|&a| Reverse(a)); for (score, char_string) in substr_index { // Just in case assert!(self.is_valid_sentencepiece(char_string)); let string: String = char_string.iter().collect(); seed_sentencepieces.push((string, score.into())); if seed_sentencepieces.len() >= self.seed_size { break; } } to_log_prob(&mut seed_sentencepieces); seed_sentencepieces } fn prune_sentence_pieces( &self, model: &Unigram, pieces: &[SentencePiece], sentences: &[Sentence], ) -> Vec<SentencePiece> { let mut always_keep = vec![true; pieces.len()]; let mut alternatives: Vec<Vec<usize>> = vec![Vec::new(); pieces.len()]; let bos_id = pieces.len() + 1; let eos_id = pieces.len() + 2; // First, segments the current sentencepieces to know // how each sentencepiece is resegmented if this sentencepiece is removed // from the vocabulary. // To do so, we take the second best segmentation of sentencepiece[i]. // alternatives[i] stores the sequence of second best sentencepieces. for (id, (token, _score)) in pieces.iter().enumerate() { // Always keep unk. if id == 0 { always_keep[id] = false; continue; } let mut lattice = Lattice::from(token, bos_id, eos_id); model.populate_nodes(&mut lattice); let nbests = lattice.nbest(2); if nbests.len() == 1 { always_keep[id] = true; } else if nbests[0].len() >= 2 { always_keep[id] = false; } else if nbests[0].len() == 1 { always_keep[id] = true; for node in &nbests[1] { let alt_id = node.borrow().id; alternatives[id].push(alt_id); } } } // Second, segments all sentences to compute likelihood // with a unigram language model. inverted[i] stores // the set of sentence index where the sentencepieces[i] appears. let chunk_size = std::cmp::max(sentences.len() / current_num_threads(), 1); let indexed_sentences: Vec<(usize, &Sentence)> = sentences.iter().enumerate().collect(); let collected: (f64, Vec<f64>, Vec<Vec<usize>>) = indexed_sentences .maybe_par_chunks(chunk_size) .map(|enumerated_sentence_count_chunk| { let mut vsum = 0.0; let mut freq: Vec<f64> = vec![0.0; pieces.len()]; let mut inverted: Vec<Vec<usize>> = vec![Vec::new(); pieces.len()]; for (i, (sentence, count)) in enumerated_sentence_count_chunk { let mut lattice = Lattice::from(sentence, bos_id, eos_id); model.populate_nodes(&mut lattice); vsum += *count as f64; for node_ref in lattice.viterbi() { let id = node_ref.borrow().id; freq[id] += *count as f64; inverted[id].push(*i); } } (vsum, freq, inverted) }) .reduce( || (0.0, vec![0.0; pieces.len()], vec![Vec::new(); pieces.len()]), |(vsum, freq, inverted), (lvsum, lfreq, linverted)| { ( vsum + lvsum, freq.iter() .zip(lfreq) .map(|(global_el, local_el)| global_el + local_el) .collect(), inverted .iter() .zip(linverted) .map(|(global_el, local_el)| [&global_el[..], &local_el[..]].concat()) .collect(), ) }, ); let (vsum, freq, inverted) = collected; let sum: f64 = freq.iter().sum(); let logsum = sum.ln(); let mut candidates: Vec<(usize, f64)> = vec![]; let mut new_pieces: Vec<SentencePiece> = Vec::with_capacity(self.vocab_size as usize); new_pieces.push(pieces[0].clone()); // Finally, computes how likely the LM likelihood is reduced if // the sentencepiece[i] is removed from the vocabulary. // Since the exact computation of loss is difficult, we compute the // loss approximately by assuming that all sentencepiece[i] in the sentences // are replaced with alternatives[i] when sentencepiece[i] is removed. for (id, (token, score)) in pieces.iter().enumerate() { if id == 0 { continue; } if freq[id] == 0.0 &&!always_keep[id] { // not found in Viterbi path. Can remove this entry safely. continue; } else if alternatives[id].is_empty() { // no alternatives. Keeps this entry. new_pieces.push((token.to_string(), *score)); } else { let mut f = 0.0; // the frequency of pieces[i]; for n in &inverted[id] { let score = sentences[*n].1 as f64; f += score; } // TODO: Temporary hack to avoid Nans. if f == 0.0 || f.is_nan()
f /= vsum; // normalizes by all sentence frequency. let logprob_sp = freq[id].ln() - logsum; // After removing the sentencepiece[i], its frequency freq[i] is // re-assigned to alternatives. // new_sum = current_sum - freq[i] + freq[i] * alternatives.size() // = current_sum + freq[i] (alternatives - 1) let logsum_alt = (sum + freq[id] * (alternatives.len() - 1) as f64).ln(); // The frequencies of altenatives are increased by freq[i]. let mut logprob_alt = 0.0; for n in &alternatives[id] { logprob_alt += (freq[*n] + freq[id]).ln() - logsum_alt; } // loss: the diff of likelihood after removing the sentencepieces[i]. let loss = f * (logprob_sp - logprob_alt); if loss.is_nan() { panic!(""); } candidates.push((id, loss)); } } let desired_vocab_size: usize = (self.vocab_size as usize * 11) / 10; // * 1.1 let pruned_size: usize = ((pieces.len() as f64) * self.shrinking_factor) as usize; let pruned_size = desired_vocab_size.max(pruned_size); candidates.sort_by(|(_, a), (_, b)| b.partial_cmp(a).unwrap()); for (id, _score) in candidates { if new_pieces.len() == pruned_size { break; } new_pieces.push(pieces[id].clone()); } new_pieces.to_vec() } /// Update the progress bar with the new provided length and message fn update_progress(&self, p: &Option<ProgressBar>, len: usize, message: &str) { if let Some(p) = p { p.set_message(message); p.set_length(len as u64); p.set_draw_delta(len as u64 / 100); p.reset(); } } /// Set the progress bar in the finish state fn finalize_progress(&self, p: &Option<ProgressBar>, final_len: usize) { if let Some(p) = p { p.set_length(final_len as u64); p.finish(); println!(); } } fn run_e_step(&self, model: &Unigram, sentences: &[Sentence]) -> (f64, u32, Vec<f64>) { let all_sentence_freq: u32 = sentences.iter().map(|(_a, b)| *b).sum(); let chunk_size = std::cmp::max(sentences.len() / current_num_threads(), 1); let collected: (f64, u32, Vec<f64>) = sentences .maybe_par_chunks(chunk_size) .map(|sentences_chunk| { let mut expected: Vec<f64> = vec![0.0; model.len()]; let mut objs: f64 = 0.0; let mut ntokens: u32 = 0; for (string, freq) in sentences_chunk { let mut lattice = Lattice::from(string, model.bos_id, model.eos_id); model.populate_nodes(&mut lattice); let z: f64 = lattice.populate_marginal(*freq as f64, &mut expected); if z.is_nan() { panic!("likelihood is NAN. Input sentence may be too long."); } ntokens += lattice.viterbi().len() as u32; objs -= z / (all_sentence_freq as f64); } (objs, ntokens, expected) }) .reduce( || (0.0, 0, vec![0.0; model.len()]), |(objs, ntokens, expected), (lobjs, lntokens, lexpected)| { ( objs + lobjs, ntokens + lntokens, expected .iter() .zip(lexpected) .map(|(global_el, local_el)| global_el + local_el) .collect(), ) }, ); collected } fn run_m_step(&self, pieces: &[SentencePiece], expected: &[f64]) -> Vec<SentencePiece> { if pieces.len()!= expected.len() { panic!( "Those two iterators are supposed to be the same length ({} vs {})", pieces.len(), expected.len() ); } let mut new_pieces: Vec<SentencePiece> = Vec::with_capacity(self.vocab_size.try_into().unwrap()); let mut sum = 0.0; let expected_frequency_threshold = 0.5; for (i, (freq, (piece, _score))) in expected.iter().zip(pieces).enumerate() { // Always keep unk. if i == 0 { new_pieces.push((piece.clone(), f64::NAN)); continue; } if *freq < expected_frequency_threshold { continue; } new_pieces.push((piece.clone(), *freq)); sum += freq; } // // Here we do not use the original EM, but use the // // Bayesianified/DPified EM algorithm. // // https://cs.stanford.edu/~pliang/papers/tutorial-acl2007-talk.pdf // // This modification will act as a sparse prior. let logsum = digamma(sum); let new_pieces: Vec<_> = new_pieces .into_iter() .map(|(s, c)| (s, digamma(c) - logsum)) .collect(); new_pieces } pub fn do_train( &self, sentences: Vec<Sentence>, model: &mut Unigram, ) -> Result<Vec<AddedToken>> { let progress = self.setup_progress(); // // 1. Compute frequent substrings // TODO Should be able to upgrade to u64 when needed self.update_progress(&progress, sentences.len(), "Suffix array seeds"); let mut pieces: Vec<SentencePiece> = Vec::with_capacity(self.vocab_size.try_into().unwrap()); // We use a UNK token when training, whatever the `self.unk_token` pieces.push(("<UNK>".into(), f64::NAN)); pieces.extend(self.make_seed_sentence_pieces(&sentences, &progress)); self.finalize_progress(&progress, sentences.len()); // Useful to check compatibility with spm. debug!( "Using {} pieces on {} sentences for EM training", pieces.len(), sentences.len()
{ // new_pieces.push((token.to_string(), *score)); continue; }
conditional_block
manager.rs
use crate::{ asset::{Asset, AssetHandle}, loaders::{LoadStatus, Loader}, sources::Source, }; use std::path::{Path, PathBuf}; use std::sync::mpsc::{Receiver, Sender}; use std::{collections::HashMap, io::ErrorKind, sync::Arc}; /// Manages the loading and unloading of one struct that implements the Asset trait. /// Regular calls to maintain support lazy loading, auto unload(optional default:off) and auto drop(optional default:off). pub struct Manager<A, L> where A: Asset<L>, L: Loader, { drop: bool, unload: bool, loader_id: usize, load_send: Sender<(usize, PathBuf, L::TransferSupplement)>, load_recv: Receiver<(PathBuf, <L::Source as Source>::Output)>, asset_handles: HashMap<PathBuf, AssetHandle<A, L>>, loaded_once: Vec<PathBuf>, data: A::ManagerSupplement, } unsafe impl<A, L> Sync for Manager<A, L> where A: Asset<L>, L: Loader, { } //channels are unsafe to send but are only used internally. impl<A, L> Manager<A, L> where A: Asset<L>, L: Loader, { /// Construct a new, empty `Manager`. /// /// The function does not allocate and the returned Managers main storage will have no /// capacity until `insert` is called. pub(crate) fn new( loader_id: usize, load_send: Sender<(usize, PathBuf, L::TransferSupplement)>, load_recv: Receiver<(PathBuf, <L::Source as Source>::Output)>, data: A::ManagerSupplement, ) -> Self { Self { drop: false, unload: false, loader_id, load_send, load_recv, asset_handles: HashMap::new(), loaded_once: Vec::new(), data, } } pub fn capacity(&self) -> usize { self.asset_handles.capacity() } /// Set the `auto_dropout` of the Manager to `true`. /// /// The Manager will drop the AssetHandle on the next call of its `maintain` function /// if the asset is not loaded. /// /// After dropping the AssetHandle the `key` may be reused! /// pub fn auto_dropout(mut self) -> Self { self.drop = true; self } /// Set the `auto_unload` of the Manager to `true`. /// /// The Manager will drop its reference to the Asset on the next call of its `maintain` function /// if its strong_refcount is equal to 1. /// pub fn auto_unload(mut self) -> Self { self.unload = true; self } /// Insert an Assets Path into the Manager and return its key without loading the asset. /// If the specified path is already known to the Manager it will return the known paths key. /// /// If auto_dropout is activated the Asset has to be explicitly loaded with the given key after inserting /// or it will be dropped in the next call to maintain. /// pub fn insert<P: AsRef<Path>>(&mut self, path: P, data: A::AssetSupplement) { let path: PathBuf = path.as_ref().into(); self.asset_handles .entry(path.clone()) .or_insert(AssetHandle::new(path, data)); } /// Insert an Assets Path and the loaded Asset into the Manager and return its key. /// If the specified path is already known to the Manager it will return the known paths key. /// /// If auto_dropout is activated the Asset has to be explicitly loaded with the given key after inserting /// or it will be dropped in the next call to maintain. /// pub fn insert_raw<P: AsRef<Path>>(&mut self, path: P, asset: A::Structure, data: A::AssetSupplement) { let path: PathBuf = path.as_ref().into(); let mut handle = AssetHandle::new(path.clone(), data); handle.set(asset); self.asset_handles.insert(path, handle); } /// Loads an unloaded Asset known to the the Manager and returns its Arc<T>. /// If the asset is already loaded it will just return the Asset. /// /// If there is no valid file found at the specified path it will return an io::Error. /// If the key is not found it will return None. /// pub fn load<P: AsRef<Path>>(&mut self, path: P, supp: L::TransferSupplement) -> Result<(), std::io::Error> { let mut a = self .asset_handles .get_mut(path.as_ref()) .ok_or(std::io::Error::new( ErrorKind::NotFound, format!("Entry not found! {:?}", path.as_ref()), ))?; if!path.as_ref().exists() { Err(std::io::Error::new( ErrorKind::NotFound, format!("File not found! {:?}", path.as_ref()), )) } else if a.status.eq(&LoadStatus::Loading){ Err(std::io::Error::new( ErrorKind::AlreadyExists, format!("Image already loading! {:?}", path.as_ref()), )) } else { a.status = LoadStatus::Loading; let package = (self.loader_id, path.as_ref().into(), supp); self .load_send .send(package) .map_err(|e| std::io::Error::new( ErrorKind::ConnectionReset, format!("Error sending! {:?}", e), )) } } /// Unloads an Asset known to the the Manager. The Asset can be reloaded with the same key. /// /// The Arc of the Asset will be dropped. The Asset may still be used but the Manager wont know about it anymore. /// If the key is not found it will do nothing. /// pub fn unload<P: AsRef<Path>>(&mut self, path: P) { if let Some(handle) = self.asset_handles.get_mut(path.as_ref()) { handle.unload() } } /// Drops an Asset known to the the Manager. The key may be reused by another Asset. /// /// If the key is not found it will do nothing. /// pub fn drop<P: AsRef<Path>>(&mut self, path: P) { self.asset_handles.remove(path.as_ref()); } /// Returns an Asset known to the the Manager. /// /// If the key is not found it will return None. /// If the Asset is not loaded it will return None. /// Call status() to get detailed information. /// pub fn get<P: AsRef<Path>>(&self, path: P) -> Option<Arc<A::Structure>> { Some(self.asset_handles.get(path.as_ref())?.get()?.clone()) } /// Returns an Asset known to the the Manager. /// /// If the key is not found it will return None. /// If the Asset is not loading it will return None. /// Will wait for the Asset to become available on the receiver and then returning it. /// pub fn get_blocking<P: AsRef<Path>>(&mut self, path: P) -> Option<Arc<A::Structure>> { match self.asset_handles.get(path.as_ref())?.get() { None => { if let Some(handle) = self.asset_handles.get_mut(path.as_ref()) { if handle.status.eq(&LoadStatus::Loading) { while let Ok((p, out)) = self.load_recv.recv() { if let Ok(a) = A::construct(out, &handle.data, &self.data) { handle.set(a); self.loaded_once.push(path.as_ref().into()); if p.eq(path.as_ref()) { return Some(handle.get()?.clone()); } } } } } None } Some(a) => Some(a.clone()), } } /// Returns loaded assets once as soon as they have the LoadStatus::Loaded. pub fn get_loaded_once(&mut self) -> Vec<PathBuf> { let mut list = Vec::new(); if!self.loaded_once.is_empty() { std::mem::swap(&mut list, &mut self.loaded_once); } list } /// Returns the LoadStatus of an Asset known to the the Manager. /// /// If the key is not found it will return None. /// pub fn status<P: AsRef<Path>>(&self, path: P) -> Option<LoadStatus> { Some(self.asset_handles.get(path.as_ref())?.status) } pub fn data_asset<P: AsRef<Path>>(&self, path: P) -> Option<&A::AssetSupplement>{ Some(&self.asset_handles.get(path.as_ref())?.data) } pub fn data_manager<P: AsRef<Path>>(&self) -> Option<&A::ManagerSupplement>{ Some(&self.data) } /// Maintains the manager. Needs to be called for lazy loading, to unload unused Assets and maybe even drop them. /// The default Manager will not drop or unload any Assets. So maintain will just load Assets. /// Will be slow if used with a large initial capacity + min_drop + min_unload as it will iterate over every Asset. /// pub fn maintain(&mut self) { if self.unload { self.asset_handles .values_mut() .filter(|h| h.status.eq(&LoadStatus::Loaded)) .filter(|h| Arc::strong_count(h.get().unwrap()).eq(&1)) .for_each(|h| h.unload()); } if self.drop { let mut paths_to_drop = Vec::new(); for (path, handle) in self.asset_handles.iter() { if self.drop && handle.status!= LoadStatus::Loading { paths_to_drop.push(path.clone()); } } for path in paths_to_drop { self.drop(path); } } for (p, b) in self.load_recv.try_iter() { if let Some(handle) = self.asset_handles.get_mut(p.as_path()) { if let Ok(a) = A::construct(b, &handle.data, &self.data) { handle.set(a); self.loaded_once.push(p); } }
)) } } impl<A, L> Iterator for Manager<A, L> where A: Asset<L>, L: Loader, { type Item = Option<Arc<A::Structure>>; fn next(&mut self) -> Option<Self::Item> { self.asset_handles .iter() .next() .map(|(_, a)| a.get().map(|a| a.clone())) } }
} } pub fn strong_count<P: AsRef<Path>>(&mut self, path: P) -> Option<usize> { Some(Arc::strong_count( self.asset_handles.get(path.as_ref())?.get()?,
random_line_split
manager.rs
use crate::{ asset::{Asset, AssetHandle}, loaders::{LoadStatus, Loader}, sources::Source, }; use std::path::{Path, PathBuf}; use std::sync::mpsc::{Receiver, Sender}; use std::{collections::HashMap, io::ErrorKind, sync::Arc}; /// Manages the loading and unloading of one struct that implements the Asset trait. /// Regular calls to maintain support lazy loading, auto unload(optional default:off) and auto drop(optional default:off). pub struct
<A, L> where A: Asset<L>, L: Loader, { drop: bool, unload: bool, loader_id: usize, load_send: Sender<(usize, PathBuf, L::TransferSupplement)>, load_recv: Receiver<(PathBuf, <L::Source as Source>::Output)>, asset_handles: HashMap<PathBuf, AssetHandle<A, L>>, loaded_once: Vec<PathBuf>, data: A::ManagerSupplement, } unsafe impl<A, L> Sync for Manager<A, L> where A: Asset<L>, L: Loader, { } //channels are unsafe to send but are only used internally. impl<A, L> Manager<A, L> where A: Asset<L>, L: Loader, { /// Construct a new, empty `Manager`. /// /// The function does not allocate and the returned Managers main storage will have no /// capacity until `insert` is called. pub(crate) fn new( loader_id: usize, load_send: Sender<(usize, PathBuf, L::TransferSupplement)>, load_recv: Receiver<(PathBuf, <L::Source as Source>::Output)>, data: A::ManagerSupplement, ) -> Self { Self { drop: false, unload: false, loader_id, load_send, load_recv, asset_handles: HashMap::new(), loaded_once: Vec::new(), data, } } pub fn capacity(&self) -> usize { self.asset_handles.capacity() } /// Set the `auto_dropout` of the Manager to `true`. /// /// The Manager will drop the AssetHandle on the next call of its `maintain` function /// if the asset is not loaded. /// /// After dropping the AssetHandle the `key` may be reused! /// pub fn auto_dropout(mut self) -> Self { self.drop = true; self } /// Set the `auto_unload` of the Manager to `true`. /// /// The Manager will drop its reference to the Asset on the next call of its `maintain` function /// if its strong_refcount is equal to 1. /// pub fn auto_unload(mut self) -> Self { self.unload = true; self } /// Insert an Assets Path into the Manager and return its key without loading the asset. /// If the specified path is already known to the Manager it will return the known paths key. /// /// If auto_dropout is activated the Asset has to be explicitly loaded with the given key after inserting /// or it will be dropped in the next call to maintain. /// pub fn insert<P: AsRef<Path>>(&mut self, path: P, data: A::AssetSupplement) { let path: PathBuf = path.as_ref().into(); self.asset_handles .entry(path.clone()) .or_insert(AssetHandle::new(path, data)); } /// Insert an Assets Path and the loaded Asset into the Manager and return its key. /// If the specified path is already known to the Manager it will return the known paths key. /// /// If auto_dropout is activated the Asset has to be explicitly loaded with the given key after inserting /// or it will be dropped in the next call to maintain. /// pub fn insert_raw<P: AsRef<Path>>(&mut self, path: P, asset: A::Structure, data: A::AssetSupplement) { let path: PathBuf = path.as_ref().into(); let mut handle = AssetHandle::new(path.clone(), data); handle.set(asset); self.asset_handles.insert(path, handle); } /// Loads an unloaded Asset known to the the Manager and returns its Arc<T>. /// If the asset is already loaded it will just return the Asset. /// /// If there is no valid file found at the specified path it will return an io::Error. /// If the key is not found it will return None. /// pub fn load<P: AsRef<Path>>(&mut self, path: P, supp: L::TransferSupplement) -> Result<(), std::io::Error> { let mut a = self .asset_handles .get_mut(path.as_ref()) .ok_or(std::io::Error::new( ErrorKind::NotFound, format!("Entry not found! {:?}", path.as_ref()), ))?; if!path.as_ref().exists() { Err(std::io::Error::new( ErrorKind::NotFound, format!("File not found! {:?}", path.as_ref()), )) } else if a.status.eq(&LoadStatus::Loading){ Err(std::io::Error::new( ErrorKind::AlreadyExists, format!("Image already loading! {:?}", path.as_ref()), )) } else { a.status = LoadStatus::Loading; let package = (self.loader_id, path.as_ref().into(), supp); self .load_send .send(package) .map_err(|e| std::io::Error::new( ErrorKind::ConnectionReset, format!("Error sending! {:?}", e), )) } } /// Unloads an Asset known to the the Manager. The Asset can be reloaded with the same key. /// /// The Arc of the Asset will be dropped. The Asset may still be used but the Manager wont know about it anymore. /// If the key is not found it will do nothing. /// pub fn unload<P: AsRef<Path>>(&mut self, path: P) { if let Some(handle) = self.asset_handles.get_mut(path.as_ref()) { handle.unload() } } /// Drops an Asset known to the the Manager. The key may be reused by another Asset. /// /// If the key is not found it will do nothing. /// pub fn drop<P: AsRef<Path>>(&mut self, path: P) { self.asset_handles.remove(path.as_ref()); } /// Returns an Asset known to the the Manager. /// /// If the key is not found it will return None. /// If the Asset is not loaded it will return None. /// Call status() to get detailed information. /// pub fn get<P: AsRef<Path>>(&self, path: P) -> Option<Arc<A::Structure>> { Some(self.asset_handles.get(path.as_ref())?.get()?.clone()) } /// Returns an Asset known to the the Manager. /// /// If the key is not found it will return None. /// If the Asset is not loading it will return None. /// Will wait for the Asset to become available on the receiver and then returning it. /// pub fn get_blocking<P: AsRef<Path>>(&mut self, path: P) -> Option<Arc<A::Structure>> { match self.asset_handles.get(path.as_ref())?.get() { None => { if let Some(handle) = self.asset_handles.get_mut(path.as_ref()) { if handle.status.eq(&LoadStatus::Loading) { while let Ok((p, out)) = self.load_recv.recv() { if let Ok(a) = A::construct(out, &handle.data, &self.data) { handle.set(a); self.loaded_once.push(path.as_ref().into()); if p.eq(path.as_ref()) { return Some(handle.get()?.clone()); } } } } } None } Some(a) => Some(a.clone()), } } /// Returns loaded assets once as soon as they have the LoadStatus::Loaded. pub fn get_loaded_once(&mut self) -> Vec<PathBuf> { let mut list = Vec::new(); if!self.loaded_once.is_empty() { std::mem::swap(&mut list, &mut self.loaded_once); } list } /// Returns the LoadStatus of an Asset known to the the Manager. /// /// If the key is not found it will return None. /// pub fn status<P: AsRef<Path>>(&self, path: P) -> Option<LoadStatus> { Some(self.asset_handles.get(path.as_ref())?.status) } pub fn data_asset<P: AsRef<Path>>(&self, path: P) -> Option<&A::AssetSupplement>{ Some(&self.asset_handles.get(path.as_ref())?.data) } pub fn data_manager<P: AsRef<Path>>(&self) -> Option<&A::ManagerSupplement>{ Some(&self.data) } /// Maintains the manager. Needs to be called for lazy loading, to unload unused Assets and maybe even drop them. /// The default Manager will not drop or unload any Assets. So maintain will just load Assets. /// Will be slow if used with a large initial capacity + min_drop + min_unload as it will iterate over every Asset. /// pub fn maintain(&mut self) { if self.unload { self.asset_handles .values_mut() .filter(|h| h.status.eq(&LoadStatus::Loaded)) .filter(|h| Arc::strong_count(h.get().unwrap()).eq(&1)) .for_each(|h| h.unload()); } if self.drop { let mut paths_to_drop = Vec::new(); for (path, handle) in self.asset_handles.iter() { if self.drop && handle.status!= LoadStatus::Loading { paths_to_drop.push(path.clone()); } } for path in paths_to_drop { self.drop(path); } } for (p, b) in self.load_recv.try_iter() { if let Some(handle) = self.asset_handles.get_mut(p.as_path()) { if let Ok(a) = A::construct(b, &handle.data, &self.data) { handle.set(a); self.loaded_once.push(p); } } } } pub fn strong_count<P: AsRef<Path>>(&mut self, path: P) -> Option<usize> { Some(Arc::strong_count( self.asset_handles.get(path.as_ref())?.get()?, )) } } impl<A, L> Iterator for Manager<A, L> where A: Asset<L>, L: Loader, { type Item = Option<Arc<A::Structure>>; fn next(&mut self) -> Option<Self::Item> { self.asset_handles .iter() .next() .map(|(_, a)| a.get().map(|a| a.clone())) } }
Manager
identifier_name
manager.rs
use crate::{ asset::{Asset, AssetHandle}, loaders::{LoadStatus, Loader}, sources::Source, }; use std::path::{Path, PathBuf}; use std::sync::mpsc::{Receiver, Sender}; use std::{collections::HashMap, io::ErrorKind, sync::Arc}; /// Manages the loading and unloading of one struct that implements the Asset trait. /// Regular calls to maintain support lazy loading, auto unload(optional default:off) and auto drop(optional default:off). pub struct Manager<A, L> where A: Asset<L>, L: Loader, { drop: bool, unload: bool, loader_id: usize, load_send: Sender<(usize, PathBuf, L::TransferSupplement)>, load_recv: Receiver<(PathBuf, <L::Source as Source>::Output)>, asset_handles: HashMap<PathBuf, AssetHandle<A, L>>, loaded_once: Vec<PathBuf>, data: A::ManagerSupplement, } unsafe impl<A, L> Sync for Manager<A, L> where A: Asset<L>, L: Loader, { } //channels are unsafe to send but are only used internally. impl<A, L> Manager<A, L> where A: Asset<L>, L: Loader, { /// Construct a new, empty `Manager`. /// /// The function does not allocate and the returned Managers main storage will have no /// capacity until `insert` is called. pub(crate) fn new( loader_id: usize, load_send: Sender<(usize, PathBuf, L::TransferSupplement)>, load_recv: Receiver<(PathBuf, <L::Source as Source>::Output)>, data: A::ManagerSupplement, ) -> Self { Self { drop: false, unload: false, loader_id, load_send, load_recv, asset_handles: HashMap::new(), loaded_once: Vec::new(), data, } } pub fn capacity(&self) -> usize { self.asset_handles.capacity() } /// Set the `auto_dropout` of the Manager to `true`. /// /// The Manager will drop the AssetHandle on the next call of its `maintain` function /// if the asset is not loaded. /// /// After dropping the AssetHandle the `key` may be reused! /// pub fn auto_dropout(mut self) -> Self { self.drop = true; self } /// Set the `auto_unload` of the Manager to `true`. /// /// The Manager will drop its reference to the Asset on the next call of its `maintain` function /// if its strong_refcount is equal to 1. /// pub fn auto_unload(mut self) -> Self { self.unload = true; self } /// Insert an Assets Path into the Manager and return its key without loading the asset. /// If the specified path is already known to the Manager it will return the known paths key. /// /// If auto_dropout is activated the Asset has to be explicitly loaded with the given key after inserting /// or it will be dropped in the next call to maintain. /// pub fn insert<P: AsRef<Path>>(&mut self, path: P, data: A::AssetSupplement) { let path: PathBuf = path.as_ref().into(); self.asset_handles .entry(path.clone()) .or_insert(AssetHandle::new(path, data)); } /// Insert an Assets Path and the loaded Asset into the Manager and return its key. /// If the specified path is already known to the Manager it will return the known paths key. /// /// If auto_dropout is activated the Asset has to be explicitly loaded with the given key after inserting /// or it will be dropped in the next call to maintain. /// pub fn insert_raw<P: AsRef<Path>>(&mut self, path: P, asset: A::Structure, data: A::AssetSupplement) { let path: PathBuf = path.as_ref().into(); let mut handle = AssetHandle::new(path.clone(), data); handle.set(asset); self.asset_handles.insert(path, handle); } /// Loads an unloaded Asset known to the the Manager and returns its Arc<T>. /// If the asset is already loaded it will just return the Asset. /// /// If there is no valid file found at the specified path it will return an io::Error. /// If the key is not found it will return None. /// pub fn load<P: AsRef<Path>>(&mut self, path: P, supp: L::TransferSupplement) -> Result<(), std::io::Error> { let mut a = self .asset_handles .get_mut(path.as_ref()) .ok_or(std::io::Error::new( ErrorKind::NotFound, format!("Entry not found! {:?}", path.as_ref()), ))?; if!path.as_ref().exists() { Err(std::io::Error::new( ErrorKind::NotFound, format!("File not found! {:?}", path.as_ref()), )) } else if a.status.eq(&LoadStatus::Loading)
else { a.status = LoadStatus::Loading; let package = (self.loader_id, path.as_ref().into(), supp); self .load_send .send(package) .map_err(|e| std::io::Error::new( ErrorKind::ConnectionReset, format!("Error sending! {:?}", e), )) } } /// Unloads an Asset known to the the Manager. The Asset can be reloaded with the same key. /// /// The Arc of the Asset will be dropped. The Asset may still be used but the Manager wont know about it anymore. /// If the key is not found it will do nothing. /// pub fn unload<P: AsRef<Path>>(&mut self, path: P) { if let Some(handle) = self.asset_handles.get_mut(path.as_ref()) { handle.unload() } } /// Drops an Asset known to the the Manager. The key may be reused by another Asset. /// /// If the key is not found it will do nothing. /// pub fn drop<P: AsRef<Path>>(&mut self, path: P) { self.asset_handles.remove(path.as_ref()); } /// Returns an Asset known to the the Manager. /// /// If the key is not found it will return None. /// If the Asset is not loaded it will return None. /// Call status() to get detailed information. /// pub fn get<P: AsRef<Path>>(&self, path: P) -> Option<Arc<A::Structure>> { Some(self.asset_handles.get(path.as_ref())?.get()?.clone()) } /// Returns an Asset known to the the Manager. /// /// If the key is not found it will return None. /// If the Asset is not loading it will return None. /// Will wait for the Asset to become available on the receiver and then returning it. /// pub fn get_blocking<P: AsRef<Path>>(&mut self, path: P) -> Option<Arc<A::Structure>> { match self.asset_handles.get(path.as_ref())?.get() { None => { if let Some(handle) = self.asset_handles.get_mut(path.as_ref()) { if handle.status.eq(&LoadStatus::Loading) { while let Ok((p, out)) = self.load_recv.recv() { if let Ok(a) = A::construct(out, &handle.data, &self.data) { handle.set(a); self.loaded_once.push(path.as_ref().into()); if p.eq(path.as_ref()) { return Some(handle.get()?.clone()); } } } } } None } Some(a) => Some(a.clone()), } } /// Returns loaded assets once as soon as they have the LoadStatus::Loaded. pub fn get_loaded_once(&mut self) -> Vec<PathBuf> { let mut list = Vec::new(); if!self.loaded_once.is_empty() { std::mem::swap(&mut list, &mut self.loaded_once); } list } /// Returns the LoadStatus of an Asset known to the the Manager. /// /// If the key is not found it will return None. /// pub fn status<P: AsRef<Path>>(&self, path: P) -> Option<LoadStatus> { Some(self.asset_handles.get(path.as_ref())?.status) } pub fn data_asset<P: AsRef<Path>>(&self, path: P) -> Option<&A::AssetSupplement>{ Some(&self.asset_handles.get(path.as_ref())?.data) } pub fn data_manager<P: AsRef<Path>>(&self) -> Option<&A::ManagerSupplement>{ Some(&self.data) } /// Maintains the manager. Needs to be called for lazy loading, to unload unused Assets and maybe even drop them. /// The default Manager will not drop or unload any Assets. So maintain will just load Assets. /// Will be slow if used with a large initial capacity + min_drop + min_unload as it will iterate over every Asset. /// pub fn maintain(&mut self) { if self.unload { self.asset_handles .values_mut() .filter(|h| h.status.eq(&LoadStatus::Loaded)) .filter(|h| Arc::strong_count(h.get().unwrap()).eq(&1)) .for_each(|h| h.unload()); } if self.drop { let mut paths_to_drop = Vec::new(); for (path, handle) in self.asset_handles.iter() { if self.drop && handle.status!= LoadStatus::Loading { paths_to_drop.push(path.clone()); } } for path in paths_to_drop { self.drop(path); } } for (p, b) in self.load_recv.try_iter() { if let Some(handle) = self.asset_handles.get_mut(p.as_path()) { if let Ok(a) = A::construct(b, &handle.data, &self.data) { handle.set(a); self.loaded_once.push(p); } } } } pub fn strong_count<P: AsRef<Path>>(&mut self, path: P) -> Option<usize> { Some(Arc::strong_count( self.asset_handles.get(path.as_ref())?.get()?, )) } } impl<A, L> Iterator for Manager<A, L> where A: Asset<L>, L: Loader, { type Item = Option<Arc<A::Structure>>; fn next(&mut self) -> Option<Self::Item> { self.asset_handles .iter() .next() .map(|(_, a)| a.get().map(|a| a.clone())) } }
{ Err(std::io::Error::new( ErrorKind::AlreadyExists, format!("Image already loading! {:?}", path.as_ref()), )) }
conditional_block
manager.rs
use crate::{ asset::{Asset, AssetHandle}, loaders::{LoadStatus, Loader}, sources::Source, }; use std::path::{Path, PathBuf}; use std::sync::mpsc::{Receiver, Sender}; use std::{collections::HashMap, io::ErrorKind, sync::Arc}; /// Manages the loading and unloading of one struct that implements the Asset trait. /// Regular calls to maintain support lazy loading, auto unload(optional default:off) and auto drop(optional default:off). pub struct Manager<A, L> where A: Asset<L>, L: Loader, { drop: bool, unload: bool, loader_id: usize, load_send: Sender<(usize, PathBuf, L::TransferSupplement)>, load_recv: Receiver<(PathBuf, <L::Source as Source>::Output)>, asset_handles: HashMap<PathBuf, AssetHandle<A, L>>, loaded_once: Vec<PathBuf>, data: A::ManagerSupplement, } unsafe impl<A, L> Sync for Manager<A, L> where A: Asset<L>, L: Loader, { } //channels are unsafe to send but are only used internally. impl<A, L> Manager<A, L> where A: Asset<L>, L: Loader, { /// Construct a new, empty `Manager`. /// /// The function does not allocate and the returned Managers main storage will have no /// capacity until `insert` is called. pub(crate) fn new( loader_id: usize, load_send: Sender<(usize, PathBuf, L::TransferSupplement)>, load_recv: Receiver<(PathBuf, <L::Source as Source>::Output)>, data: A::ManagerSupplement, ) -> Self { Self { drop: false, unload: false, loader_id, load_send, load_recv, asset_handles: HashMap::new(), loaded_once: Vec::new(), data, } } pub fn capacity(&self) -> usize { self.asset_handles.capacity() } /// Set the `auto_dropout` of the Manager to `true`. /// /// The Manager will drop the AssetHandle on the next call of its `maintain` function /// if the asset is not loaded. /// /// After dropping the AssetHandle the `key` may be reused! /// pub fn auto_dropout(mut self) -> Self { self.drop = true; self } /// Set the `auto_unload` of the Manager to `true`. /// /// The Manager will drop its reference to the Asset on the next call of its `maintain` function /// if its strong_refcount is equal to 1. /// pub fn auto_unload(mut self) -> Self { self.unload = true; self } /// Insert an Assets Path into the Manager and return its key without loading the asset. /// If the specified path is already known to the Manager it will return the known paths key. /// /// If auto_dropout is activated the Asset has to be explicitly loaded with the given key after inserting /// or it will be dropped in the next call to maintain. /// pub fn insert<P: AsRef<Path>>(&mut self, path: P, data: A::AssetSupplement) { let path: PathBuf = path.as_ref().into(); self.asset_handles .entry(path.clone()) .or_insert(AssetHandle::new(path, data)); } /// Insert an Assets Path and the loaded Asset into the Manager and return its key. /// If the specified path is already known to the Manager it will return the known paths key. /// /// If auto_dropout is activated the Asset has to be explicitly loaded with the given key after inserting /// or it will be dropped in the next call to maintain. /// pub fn insert_raw<P: AsRef<Path>>(&mut self, path: P, asset: A::Structure, data: A::AssetSupplement) { let path: PathBuf = path.as_ref().into(); let mut handle = AssetHandle::new(path.clone(), data); handle.set(asset); self.asset_handles.insert(path, handle); } /// Loads an unloaded Asset known to the the Manager and returns its Arc<T>. /// If the asset is already loaded it will just return the Asset. /// /// If there is no valid file found at the specified path it will return an io::Error. /// If the key is not found it will return None. /// pub fn load<P: AsRef<Path>>(&mut self, path: P, supp: L::TransferSupplement) -> Result<(), std::io::Error> { let mut a = self .asset_handles .get_mut(path.as_ref()) .ok_or(std::io::Error::new( ErrorKind::NotFound, format!("Entry not found! {:?}", path.as_ref()), ))?; if!path.as_ref().exists() { Err(std::io::Error::new( ErrorKind::NotFound, format!("File not found! {:?}", path.as_ref()), )) } else if a.status.eq(&LoadStatus::Loading){ Err(std::io::Error::new( ErrorKind::AlreadyExists, format!("Image already loading! {:?}", path.as_ref()), )) } else { a.status = LoadStatus::Loading; let package = (self.loader_id, path.as_ref().into(), supp); self .load_send .send(package) .map_err(|e| std::io::Error::new( ErrorKind::ConnectionReset, format!("Error sending! {:?}", e), )) } } /// Unloads an Asset known to the the Manager. The Asset can be reloaded with the same key. /// /// The Arc of the Asset will be dropped. The Asset may still be used but the Manager wont know about it anymore. /// If the key is not found it will do nothing. /// pub fn unload<P: AsRef<Path>>(&mut self, path: P) { if let Some(handle) = self.asset_handles.get_mut(path.as_ref()) { handle.unload() } } /// Drops an Asset known to the the Manager. The key may be reused by another Asset. /// /// If the key is not found it will do nothing. /// pub fn drop<P: AsRef<Path>>(&mut self, path: P) { self.asset_handles.remove(path.as_ref()); } /// Returns an Asset known to the the Manager. /// /// If the key is not found it will return None. /// If the Asset is not loaded it will return None. /// Call status() to get detailed information. /// pub fn get<P: AsRef<Path>>(&self, path: P) -> Option<Arc<A::Structure>> { Some(self.asset_handles.get(path.as_ref())?.get()?.clone()) } /// Returns an Asset known to the the Manager. /// /// If the key is not found it will return None. /// If the Asset is not loading it will return None. /// Will wait for the Asset to become available on the receiver and then returning it. /// pub fn get_blocking<P: AsRef<Path>>(&mut self, path: P) -> Option<Arc<A::Structure>> { match self.asset_handles.get(path.as_ref())?.get() { None => { if let Some(handle) = self.asset_handles.get_mut(path.as_ref()) { if handle.status.eq(&LoadStatus::Loading) { while let Ok((p, out)) = self.load_recv.recv() { if let Ok(a) = A::construct(out, &handle.data, &self.data) { handle.set(a); self.loaded_once.push(path.as_ref().into()); if p.eq(path.as_ref()) { return Some(handle.get()?.clone()); } } } } } None } Some(a) => Some(a.clone()), } } /// Returns loaded assets once as soon as they have the LoadStatus::Loaded. pub fn get_loaded_once(&mut self) -> Vec<PathBuf> { let mut list = Vec::new(); if!self.loaded_once.is_empty() { std::mem::swap(&mut list, &mut self.loaded_once); } list } /// Returns the LoadStatus of an Asset known to the the Manager. /// /// If the key is not found it will return None. /// pub fn status<P: AsRef<Path>>(&self, path: P) -> Option<LoadStatus> { Some(self.asset_handles.get(path.as_ref())?.status) } pub fn data_asset<P: AsRef<Path>>(&self, path: P) -> Option<&A::AssetSupplement>{ Some(&self.asset_handles.get(path.as_ref())?.data) } pub fn data_manager<P: AsRef<Path>>(&self) -> Option<&A::ManagerSupplement>{ Some(&self.data) } /// Maintains the manager. Needs to be called for lazy loading, to unload unused Assets and maybe even drop them. /// The default Manager will not drop or unload any Assets. So maintain will just load Assets. /// Will be slow if used with a large initial capacity + min_drop + min_unload as it will iterate over every Asset. /// pub fn maintain(&mut self) { if self.unload { self.asset_handles .values_mut() .filter(|h| h.status.eq(&LoadStatus::Loaded)) .filter(|h| Arc::strong_count(h.get().unwrap()).eq(&1)) .for_each(|h| h.unload()); } if self.drop { let mut paths_to_drop = Vec::new(); for (path, handle) in self.asset_handles.iter() { if self.drop && handle.status!= LoadStatus::Loading { paths_to_drop.push(path.clone()); } } for path in paths_to_drop { self.drop(path); } } for (p, b) in self.load_recv.try_iter() { if let Some(handle) = self.asset_handles.get_mut(p.as_path()) { if let Ok(a) = A::construct(b, &handle.data, &self.data) { handle.set(a); self.loaded_once.push(p); } } } } pub fn strong_count<P: AsRef<Path>>(&mut self, path: P) -> Option<usize> { Some(Arc::strong_count( self.asset_handles.get(path.as_ref())?.get()?, )) } } impl<A, L> Iterator for Manager<A, L> where A: Asset<L>, L: Loader, { type Item = Option<Arc<A::Structure>>; fn next(&mut self) -> Option<Self::Item>
}
{ self.asset_handles .iter() .next() .map(|(_, a)| a.get().map(|a| a.clone())) }
identifier_body
metadata.rs
// Copyright 2018 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use cargo::{ core::{ dependency::Kind as CargoKind, resolver::ResolveOpts, summary::FeatureValue as CargoFeatureValue, Workspace, }, ops::{self, Packages}, util::Config, CargoResult, }; use serde_json; use std::{collections::HashMap, env, fs, path::PathBuf, process::Command}; use crate::util::{self, RazeError}; use tempdir::TempDir; use serde_derive::{Deserialize, Serialize}; pub type PackageId = String; pub type Kind = String; pub type TargetSpec = String; /** * An entity that can retrive deserialized metadata for a Cargo Workspace. * * The `CargoInternalsMetadataFetcher` is probably the one you want. * * Usage of..Subcommand.. is waiting on a cargo release containing * <https://github.com/rust-lang/cargo/pull/5122> */ pub trait MetadataFetcher { fn fetch_metadata(&mut self, files: CargoWorkspaceFiles) -> CargoResult<Metadata>; } /** The local Cargo workspace files to be used for build planning.*/ pub struct CargoWorkspaceFiles { pub toml_path: PathBuf, pub lock_path_opt: Option<PathBuf>, } /** * The metadata for a whole Cargo workspace. * * WARNING: Cargo-raze does not control the definition of this struct. * This struct mirrors Cargo's own [`ExportInfo`]( * https://github.com/rust-lang/cargo/blob/0.40.0/src/cargo/ops/cargo_output_metadata.rs#L78-L85) */ #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct Metadata { pub packages: Vec<Package>, pub resolve: Resolve, pub workspace_members: Vec<PackageId>, pub target_directory: String, pub version: i64, } /** * The metadata for an individual Cargo crate. * * WARNING: Cargo-raze does not control the definition of this struct. * This struct mirrors Cargo's own [`SerializedPackage`]( * https://github.com/rust-lang/cargo/blob/0.40.0/src/cargo/core/package.rs#L32-L50) */ #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct Package { pub name: String, pub version: String, pub id: PackageId, pub license: Option<String>, pub license_file: Option<String>, pub description: Option<String>, pub source: Option<String>, pub dependencies: Vec<Dependency>, pub targets: Vec<Target>, pub features: HashMap<String, Vec<String>>, pub manifest_path: String, pub edition: String, pub sha256: Option<String>, } /** * The metadata for a dependency (a reference connecting a crate to another crate). * * WARNING: Cargo-raze does not control the definition of this struct. * This struct mirrors Cargo's own [`SerializedDependency`]( * https://github.com/rust-lang/cargo/blob/0.40.0/src/cargo/core/dependency.rs#L49-L60) */ #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct Dependency { pub name: String, pub source: String, pub req: String, pub kind: Option<Kind>, #[serde(default = "default_dependency_field_optional")] pub optional: bool, #[serde(default = "default_dependency_field_uses_default_features")] pub uses_default_features: bool, pub features: Vec<String>, pub target: Option<TargetSpec>, } /** * The metadata for a compileable target. * * WARNING: Cargo-raze does not control the definition of this struct. * This struct mirrors Cargo's own [`SerializedTarget`]( * https://github.com/rust-lang/cargo/blob/0.40.0/src/cargo/core/manifest.rs#L188-L197) */ #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct Target { pub name: String, pub kind: Vec<String>, pub crate_types: Vec<String>, pub src_path: String, pub edition: String, } /** * The metadata for a fully resolved dependency tree. * * WARNING: Cargo-raze does not control the definition of this struct. * This struct mirrors Cargo's own [`MetadataResolve`]( * https://github.com/rust-lang/cargo/blob/0.40.0/src/cargo/ops/cargo_output_metadata.rs#L91-L95) */ #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct Resolve { pub nodes: Vec<ResolveNode>, pub root: PackageId, } /** * The metadata for a single resolved entry in the full dependency tree. * * WARNING: Cargo-raze does not control the definition of this struct. * This struct mirrors Cargo's own [`Node`]( * https://github.com/rust-lang/cargo/blob/0.40.0/src/cargo/ops/cargo_output_metadata.rs#L102-L106) */ #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct ResolveNode { pub id: PackageId, pub dependencies: Vec<PackageId>, // Optional due to recent feature addition in Cargo. pub features: Option<Vec<String>>, } /** A workspace metadata fetcher that uses the Cargo Metadata subcommand. */ #[allow(dead_code)] pub struct CargoSubcommandMetadataFetcher; /** * A workspace metadata fetcher that uses Cargo's internals. * *!DANGER DANGER! * This struct is very hard to test as it uses Cargo's stateful internals, please take care when * changing it. *!DANGER DANGER! */ pub struct CargoInternalsMetadataFetcher<'config> { cargo_config: &'config Config, } impl MetadataFetcher for CargoSubcommandMetadataFetcher { fn fetch_metadata(&mut self, files: CargoWorkspaceFiles) -> CargoResult<Metadata> { assert!(files.toml_path.is_file()); assert!(files.lock_path_opt.as_ref().map_or(true, |p| p.is_file())); // Copy files into a temp directory // UNWRAP: Guarded by function assertion let cargo_tempdir = { let dir = TempDir::new("cargo_raze_metadata_dir")?; let dir_path = dir.path(); let new_toml_path = dir_path.join(files.toml_path.file_name().unwrap()); fs::copy(files.toml_path, new_toml_path)?; if let Some(lock_path) = files.lock_path_opt { let new_lock_path = dir_path.join(lock_path.file_name().unwrap()); fs::copy(lock_path, new_lock_path)?; } dir }; // Shell out to cargo let exec_output = Command::new("cargo") .current_dir(cargo_tempdir.path()) .args(&["metadata", "--format-version", "1"]) .output()?; // Handle command errs let stdout_str = String::from_utf8(exec_output.stdout).unwrap_or_else(|_| "[unparsable bytes]".to_owned()); if!exec_output.status.success() { let stderr_str = String::from_utf8(exec_output.stderr).unwrap_or_else(|_| "[unparsable bytes]".to_owned()); println!("`cargo metadata` failed. Inspect Cargo.toml for issues!"); println!("stdout: {}", stdout_str); println!("stderr: {}", stderr_str); return Err(RazeError::Generic("Failed to run `cargo metadata`".to_owned()).into()); } // Parse and yield metadata serde_json::from_str::<Metadata>(&stdout_str).map_err(|e| e.into()) } } impl<'config> MetadataFetcher for CargoInternalsMetadataFetcher<'config> { fn fetch_metadata(&mut self, files: CargoWorkspaceFiles) -> CargoResult<Metadata> { let manifest = if files.toml_path.is_relative() { env::current_dir().unwrap().join(&files.toml_path) } else { files.toml_path }; let ws = Workspace::new(&manifest, &self.cargo_config)?; let specs = Packages::All.to_package_id_specs(&ws)?; let root_name = specs.iter().next().unwrap().name(); let resolve_opts = ResolveOpts::new(true, &[], false, false); let (resolved_packages, cargo_resolve) = ops::resolve_ws_with_opts(&ws, resolve_opts, &specs)?; let root = cargo_resolve .iter() .find(|dep| dep.name() == root_name) .ok_or_else(|| RazeError::Internal("root crate should be in cargo resolve".to_owned()))? .to_string(); let nodes = cargo_resolve .iter() .map(|id| ResolveNode { id: id.to_string(), features: Some( cargo_resolve .features_sorted(id) .iter() .map(|s| s.to_string()) .collect(), ), dependencies: cargo_resolve.deps(id).map(|(p, _)| p.to_string()).collect(), }) .collect(); let resolve = Resolve { nodes, root }; let packages = resolved_packages .package_ids() // TODO(acmcarther): Justify this unwrap .map(|package_id| (package_id, resolved_packages.get_one(package_id).unwrap())) .map(|(package_id, package)| { let manifest_metadata = package.manifest().metadata(); let dependencies = package .dependencies() .iter() .map(|dependency| Dependency {
source: serde_json::to_string(&dependency.source_id()).unwrap(), req: dependency.version_req().to_string(), kind: match dependency.kind() { CargoKind::Normal => None, CargoKind::Development => Some("dev".to_owned()), CargoKind::Build => Some("build".to_owned()), }, optional: dependency.is_optional(), uses_default_features: dependency.uses_default_features(), features: dependency .features() .iter() .map(|s| s.to_string()) .collect(), target: dependency.platform().map(|p| p.to_string()), }) .collect(); let targets = package .targets() .iter() .map(|target| Target { name: target.name().to_owned(), kind: util::kind_to_kinds(target.kind()), src_path: target.src_path().path().unwrap().display().to_string(), edition: target.edition().to_string(), crate_types: target .rustc_crate_types() .iter() .map(|t| t.to_string()) .collect(), }) .collect(); let features = package .summary() .features() .iter() .map(|(feature, feature_values)| { let our_feature_values = feature_values .iter() .map(|value| match value { CargoFeatureValue::Feature(name) | CargoFeatureValue::Crate(name) => { name.to_string() } // This matches the current Serialize impl for CargoFeatureValue CargoFeatureValue::CrateFeature(crate_name, feature_name) => { format!("{}/{}", crate_name.as_str(), feature_name.as_str()) } }) .collect(); (feature.to_string(), our_feature_values) }) .collect(); // UNWRAP: It's cargo's responsibility to ensure a serializable source_id let pkg_source = serde_json::to_string(&package_id.source_id()).unwrap(); // Cargo use SHA256 for checksum so we can use them directly let sha256 = package .manifest() .summary() .checksum() .map(ToString::to_string); Package { name: package.name().to_string(), version: package.version().to_string(), id: package_id.to_string(), license: manifest_metadata.license.clone(), license_file: manifest_metadata.license_file.clone(), description: manifest_metadata.description.clone(), source: Some(pkg_source), manifest_path: package.manifest_path().display().to_string(), edition: package.manifest().edition().to_string(), dependencies, targets, features, sha256, } }) .collect(); let workspace_members = ws .members() .map(|pkg| pkg.package_id().to_string()) .collect(); Ok(Metadata { target_directory: ws.target_dir().display().to_string(), version: 0, /* not generated via subcomand */ packages, resolve, workspace_members, }) } } impl<'config> CargoInternalsMetadataFetcher<'config> { pub fn new(cargo_config: &'config Config) -> CargoInternalsMetadataFetcher<'config> { CargoInternalsMetadataFetcher { cargo_config } } } fn default_dependency_field_optional() -> bool { // Dependencies are implicitly required. // TODO(acmcarther): Citation? false } fn default_dependency_field_uses_default_features() -> bool { // Default features are used by default // Citation: https://doc.rust-lang.org/cargo/reference/manifest.html#rules true } #[cfg(test)] pub mod testing { use super::*; pub struct StubMetadataFetcher { metadata: Metadata, } impl MetadataFetcher for StubMetadataFetcher { fn fetch_metadata(&mut self, _: CargoWorkspaceFiles) -> CargoResult<Metadata> { Ok(self.metadata.clone()) } } impl StubMetadataFetcher { pub fn with_metadata(metadata: Metadata) -> StubMetadataFetcher { StubMetadataFetcher { metadata } } } pub fn dummy_package() -> Package { Package { name: String::new(), version: String::new(), id: String::new(), license: None, license_file: None, description: None, source: None, dependencies: Vec::new(), targets: Vec::new(), features: HashMap::new(), manifest_path: String::new(), edition: String::new(), sha256: None, } } pub fn dummy_metadata() -> Metadata { Metadata { packages: Vec::new(), resolve: dummy_resolve(), workspace_members: Vec::new(), target_directory: String::new(), version: 1, } } pub fn dummy_resolve() -> Resolve { Resolve { nodes: Vec::new(), root: String::new(), } } } #[cfg(test)] mod tests { use super::*; use serde_json; use std::fs::File; use std::io::Write; fn basic_toml() -> &'static str { " [package] name = \"test\" version = \"0.0.1\" [lib] path = \"not_a_file.rs\" " } fn basic_lock() -> &'static str { " [[package]] name = \"test\" version = \"0.0.1\" dependencies = [ ] " } #[test] fn test_metadata_deserializes_correctly() { let metadata_file_contents = include_str!("../test_fixtures/metadata.txt"); serde_json::from_str::<Metadata>(metadata_file_contents).unwrap(); } #[test] fn test_cargo_subcommand_metadata_fetcher_works_without_lock() { let dir = TempDir::new("test_cargo_raze_metadata_dir").unwrap(); let toml_path = dir.path().join("Cargo.toml"); let mut toml = File::create(&toml_path).unwrap(); toml.write_all(basic_toml().as_bytes()).unwrap(); let files = CargoWorkspaceFiles { lock_path_opt: None, toml_path, }; let mut fetcher = CargoSubcommandMetadataFetcher; fetcher.fetch_metadata(files).unwrap(); } #[test] fn test_cargo_subcommand_metadata_fetcher_works_with_lock() { let dir = TempDir::new("test_cargo_raze_metadata_dir").unwrap(); let toml_path = { let path = dir.path().join("Cargo.toml"); let mut toml = File::create(&path).unwrap(); toml.write_all(basic_toml().as_bytes()).unwrap(); path }; let lock_path = { let path = dir.path().join("Cargo.lock"); let mut lock = File::create(&path).unwrap(); lock.write_all(basic_lock().as_bytes()).unwrap(); path }; let files = CargoWorkspaceFiles { lock_path_opt: Some(lock_path), toml_path, }; let mut fetcher = CargoSubcommandMetadataFetcher; fetcher.fetch_metadata(files).unwrap(); } #[test] fn test_cargo_subcommand_metadata_fetcher_handles_bad_files() { let dir = TempDir::new("test_cargo_raze_metadata_dir").unwrap(); let toml_path = { let path = dir.path().join("Cargo.toml"); let mut toml = File::create(&path).unwrap(); toml.write_all(b"hello").unwrap(); path }; let files = CargoWorkspaceFiles { lock_path_opt: None, toml_path, }; let mut fetcher = CargoSubcommandMetadataFetcher; assert!(fetcher.fetch_metadata(files).is_err()); } }
name: dependency.package_name().to_string(), // UNWRAP: It's cargo's responsibility to ensure a serializable source_id
random_line_split
metadata.rs
// Copyright 2018 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use cargo::{ core::{ dependency::Kind as CargoKind, resolver::ResolveOpts, summary::FeatureValue as CargoFeatureValue, Workspace, }, ops::{self, Packages}, util::Config, CargoResult, }; use serde_json; use std::{collections::HashMap, env, fs, path::PathBuf, process::Command}; use crate::util::{self, RazeError}; use tempdir::TempDir; use serde_derive::{Deserialize, Serialize}; pub type PackageId = String; pub type Kind = String; pub type TargetSpec = String; /** * An entity that can retrive deserialized metadata for a Cargo Workspace. * * The `CargoInternalsMetadataFetcher` is probably the one you want. * * Usage of..Subcommand.. is waiting on a cargo release containing * <https://github.com/rust-lang/cargo/pull/5122> */ pub trait MetadataFetcher { fn fetch_metadata(&mut self, files: CargoWorkspaceFiles) -> CargoResult<Metadata>; } /** The local Cargo workspace files to be used for build planning.*/ pub struct CargoWorkspaceFiles { pub toml_path: PathBuf, pub lock_path_opt: Option<PathBuf>, } /** * The metadata for a whole Cargo workspace. * * WARNING: Cargo-raze does not control the definition of this struct. * This struct mirrors Cargo's own [`ExportInfo`]( * https://github.com/rust-lang/cargo/blob/0.40.0/src/cargo/ops/cargo_output_metadata.rs#L78-L85) */ #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct Metadata { pub packages: Vec<Package>, pub resolve: Resolve, pub workspace_members: Vec<PackageId>, pub target_directory: String, pub version: i64, } /** * The metadata for an individual Cargo crate. * * WARNING: Cargo-raze does not control the definition of this struct. * This struct mirrors Cargo's own [`SerializedPackage`]( * https://github.com/rust-lang/cargo/blob/0.40.0/src/cargo/core/package.rs#L32-L50) */ #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct Package { pub name: String, pub version: String, pub id: PackageId, pub license: Option<String>, pub license_file: Option<String>, pub description: Option<String>, pub source: Option<String>, pub dependencies: Vec<Dependency>, pub targets: Vec<Target>, pub features: HashMap<String, Vec<String>>, pub manifest_path: String, pub edition: String, pub sha256: Option<String>, } /** * The metadata for a dependency (a reference connecting a crate to another crate). * * WARNING: Cargo-raze does not control the definition of this struct. * This struct mirrors Cargo's own [`SerializedDependency`]( * https://github.com/rust-lang/cargo/blob/0.40.0/src/cargo/core/dependency.rs#L49-L60) */ #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct Dependency { pub name: String, pub source: String, pub req: String, pub kind: Option<Kind>, #[serde(default = "default_dependency_field_optional")] pub optional: bool, #[serde(default = "default_dependency_field_uses_default_features")] pub uses_default_features: bool, pub features: Vec<String>, pub target: Option<TargetSpec>, } /** * The metadata for a compileable target. * * WARNING: Cargo-raze does not control the definition of this struct. * This struct mirrors Cargo's own [`SerializedTarget`]( * https://github.com/rust-lang/cargo/blob/0.40.0/src/cargo/core/manifest.rs#L188-L197) */ #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct Target { pub name: String, pub kind: Vec<String>, pub crate_types: Vec<String>, pub src_path: String, pub edition: String, } /** * The metadata for a fully resolved dependency tree. * * WARNING: Cargo-raze does not control the definition of this struct. * This struct mirrors Cargo's own [`MetadataResolve`]( * https://github.com/rust-lang/cargo/blob/0.40.0/src/cargo/ops/cargo_output_metadata.rs#L91-L95) */ #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct Resolve { pub nodes: Vec<ResolveNode>, pub root: PackageId, } /** * The metadata for a single resolved entry in the full dependency tree. * * WARNING: Cargo-raze does not control the definition of this struct. * This struct mirrors Cargo's own [`Node`]( * https://github.com/rust-lang/cargo/blob/0.40.0/src/cargo/ops/cargo_output_metadata.rs#L102-L106) */ #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct ResolveNode { pub id: PackageId, pub dependencies: Vec<PackageId>, // Optional due to recent feature addition in Cargo. pub features: Option<Vec<String>>, } /** A workspace metadata fetcher that uses the Cargo Metadata subcommand. */ #[allow(dead_code)] pub struct CargoSubcommandMetadataFetcher; /** * A workspace metadata fetcher that uses Cargo's internals. * *!DANGER DANGER! * This struct is very hard to test as it uses Cargo's stateful internals, please take care when * changing it. *!DANGER DANGER! */ pub struct CargoInternalsMetadataFetcher<'config> { cargo_config: &'config Config, } impl MetadataFetcher for CargoSubcommandMetadataFetcher { fn fetch_metadata(&mut self, files: CargoWorkspaceFiles) -> CargoResult<Metadata> { assert!(files.toml_path.is_file()); assert!(files.lock_path_opt.as_ref().map_or(true, |p| p.is_file())); // Copy files into a temp directory // UNWRAP: Guarded by function assertion let cargo_tempdir = { let dir = TempDir::new("cargo_raze_metadata_dir")?; let dir_path = dir.path(); let new_toml_path = dir_path.join(files.toml_path.file_name().unwrap()); fs::copy(files.toml_path, new_toml_path)?; if let Some(lock_path) = files.lock_path_opt { let new_lock_path = dir_path.join(lock_path.file_name().unwrap()); fs::copy(lock_path, new_lock_path)?; } dir }; // Shell out to cargo let exec_output = Command::new("cargo") .current_dir(cargo_tempdir.path()) .args(&["metadata", "--format-version", "1"]) .output()?; // Handle command errs let stdout_str = String::from_utf8(exec_output.stdout).unwrap_or_else(|_| "[unparsable bytes]".to_owned()); if!exec_output.status.success() { let stderr_str = String::from_utf8(exec_output.stderr).unwrap_or_else(|_| "[unparsable bytes]".to_owned()); println!("`cargo metadata` failed. Inspect Cargo.toml for issues!"); println!("stdout: {}", stdout_str); println!("stderr: {}", stderr_str); return Err(RazeError::Generic("Failed to run `cargo metadata`".to_owned()).into()); } // Parse and yield metadata serde_json::from_str::<Metadata>(&stdout_str).map_err(|e| e.into()) } } impl<'config> MetadataFetcher for CargoInternalsMetadataFetcher<'config> { fn fetch_metadata(&mut self, files: CargoWorkspaceFiles) -> CargoResult<Metadata> { let manifest = if files.toml_path.is_relative() { env::current_dir().unwrap().join(&files.toml_path) } else { files.toml_path }; let ws = Workspace::new(&manifest, &self.cargo_config)?; let specs = Packages::All.to_package_id_specs(&ws)?; let root_name = specs.iter().next().unwrap().name(); let resolve_opts = ResolveOpts::new(true, &[], false, false); let (resolved_packages, cargo_resolve) = ops::resolve_ws_with_opts(&ws, resolve_opts, &specs)?; let root = cargo_resolve .iter() .find(|dep| dep.name() == root_name) .ok_or_else(|| RazeError::Internal("root crate should be in cargo resolve".to_owned()))? .to_string(); let nodes = cargo_resolve .iter() .map(|id| ResolveNode { id: id.to_string(), features: Some( cargo_resolve .features_sorted(id) .iter() .map(|s| s.to_string()) .collect(), ), dependencies: cargo_resolve.deps(id).map(|(p, _)| p.to_string()).collect(), }) .collect(); let resolve = Resolve { nodes, root }; let packages = resolved_packages .package_ids() // TODO(acmcarther): Justify this unwrap .map(|package_id| (package_id, resolved_packages.get_one(package_id).unwrap())) .map(|(package_id, package)| { let manifest_metadata = package.manifest().metadata(); let dependencies = package .dependencies() .iter() .map(|dependency| Dependency { name: dependency.package_name().to_string(), // UNWRAP: It's cargo's responsibility to ensure a serializable source_id source: serde_json::to_string(&dependency.source_id()).unwrap(), req: dependency.version_req().to_string(), kind: match dependency.kind() { CargoKind::Normal => None, CargoKind::Development => Some("dev".to_owned()), CargoKind::Build => Some("build".to_owned()), }, optional: dependency.is_optional(), uses_default_features: dependency.uses_default_features(), features: dependency .features() .iter() .map(|s| s.to_string()) .collect(), target: dependency.platform().map(|p| p.to_string()), }) .collect(); let targets = package .targets() .iter() .map(|target| Target { name: target.name().to_owned(), kind: util::kind_to_kinds(target.kind()), src_path: target.src_path().path().unwrap().display().to_string(), edition: target.edition().to_string(), crate_types: target .rustc_crate_types() .iter() .map(|t| t.to_string()) .collect(), }) .collect(); let features = package .summary() .features() .iter() .map(|(feature, feature_values)| { let our_feature_values = feature_values .iter() .map(|value| match value { CargoFeatureValue::Feature(name) | CargoFeatureValue::Crate(name) => { name.to_string() } // This matches the current Serialize impl for CargoFeatureValue CargoFeatureValue::CrateFeature(crate_name, feature_name) => { format!("{}/{}", crate_name.as_str(), feature_name.as_str()) } }) .collect(); (feature.to_string(), our_feature_values) }) .collect(); // UNWRAP: It's cargo's responsibility to ensure a serializable source_id let pkg_source = serde_json::to_string(&package_id.source_id()).unwrap(); // Cargo use SHA256 for checksum so we can use them directly let sha256 = package .manifest() .summary() .checksum() .map(ToString::to_string); Package { name: package.name().to_string(), version: package.version().to_string(), id: package_id.to_string(), license: manifest_metadata.license.clone(), license_file: manifest_metadata.license_file.clone(), description: manifest_metadata.description.clone(), source: Some(pkg_source), manifest_path: package.manifest_path().display().to_string(), edition: package.manifest().edition().to_string(), dependencies, targets, features, sha256, } }) .collect(); let workspace_members = ws .members() .map(|pkg| pkg.package_id().to_string()) .collect(); Ok(Metadata { target_directory: ws.target_dir().display().to_string(), version: 0, /* not generated via subcomand */ packages, resolve, workspace_members, }) } } impl<'config> CargoInternalsMetadataFetcher<'config> { pub fn new(cargo_config: &'config Config) -> CargoInternalsMetadataFetcher<'config> { CargoInternalsMetadataFetcher { cargo_config } } } fn default_dependency_field_optional() -> bool { // Dependencies are implicitly required. // TODO(acmcarther): Citation? false } fn default_dependency_field_uses_default_features() -> bool { // Default features are used by default // Citation: https://doc.rust-lang.org/cargo/reference/manifest.html#rules true } #[cfg(test)] pub mod testing { use super::*; pub struct StubMetadataFetcher { metadata: Metadata, } impl MetadataFetcher for StubMetadataFetcher { fn fetch_metadata(&mut self, _: CargoWorkspaceFiles) -> CargoResult<Metadata> { Ok(self.metadata.clone()) } } impl StubMetadataFetcher { pub fn with_metadata(metadata: Metadata) -> StubMetadataFetcher { StubMetadataFetcher { metadata } } } pub fn dummy_package() -> Package { Package { name: String::new(), version: String::new(), id: String::new(), license: None, license_file: None, description: None, source: None, dependencies: Vec::new(), targets: Vec::new(), features: HashMap::new(), manifest_path: String::new(), edition: String::new(), sha256: None, } } pub fn dummy_metadata() -> Metadata { Metadata { packages: Vec::new(), resolve: dummy_resolve(), workspace_members: Vec::new(), target_directory: String::new(), version: 1, } } pub fn dummy_resolve() -> Resolve { Resolve { nodes: Vec::new(), root: String::new(), } } } #[cfg(test)] mod tests { use super::*; use serde_json; use std::fs::File; use std::io::Write; fn basic_toml() -> &'static str { " [package] name = \"test\" version = \"0.0.1\" [lib] path = \"not_a_file.rs\" " } fn basic_lock() -> &'static str { " [[package]] name = \"test\" version = \"0.0.1\" dependencies = [ ] " } #[test] fn test_metadata_deserializes_correctly() { let metadata_file_contents = include_str!("../test_fixtures/metadata.txt"); serde_json::from_str::<Metadata>(metadata_file_contents).unwrap(); } #[test] fn test_cargo_subcommand_metadata_fetcher_works_without_lock() { let dir = TempDir::new("test_cargo_raze_metadata_dir").unwrap(); let toml_path = dir.path().join("Cargo.toml"); let mut toml = File::create(&toml_path).unwrap(); toml.write_all(basic_toml().as_bytes()).unwrap(); let files = CargoWorkspaceFiles { lock_path_opt: None, toml_path, }; let mut fetcher = CargoSubcommandMetadataFetcher; fetcher.fetch_metadata(files).unwrap(); } #[test] fn test_cargo_subcommand_metadata_fetcher_works_with_lock() { let dir = TempDir::new("test_cargo_raze_metadata_dir").unwrap(); let toml_path = { let path = dir.path().join("Cargo.toml"); let mut toml = File::create(&path).unwrap(); toml.write_all(basic_toml().as_bytes()).unwrap(); path }; let lock_path = { let path = dir.path().join("Cargo.lock"); let mut lock = File::create(&path).unwrap(); lock.write_all(basic_lock().as_bytes()).unwrap(); path }; let files = CargoWorkspaceFiles { lock_path_opt: Some(lock_path), toml_path, }; let mut fetcher = CargoSubcommandMetadataFetcher; fetcher.fetch_metadata(files).unwrap(); } #[test] fn test_cargo_subcommand_metadata_fetcher_handles_bad_files()
}
{ let dir = TempDir::new("test_cargo_raze_metadata_dir").unwrap(); let toml_path = { let path = dir.path().join("Cargo.toml"); let mut toml = File::create(&path).unwrap(); toml.write_all(b"hello").unwrap(); path }; let files = CargoWorkspaceFiles { lock_path_opt: None, toml_path, }; let mut fetcher = CargoSubcommandMetadataFetcher; assert!(fetcher.fetch_metadata(files).is_err()); }
identifier_body
metadata.rs
// Copyright 2018 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use cargo::{ core::{ dependency::Kind as CargoKind, resolver::ResolveOpts, summary::FeatureValue as CargoFeatureValue, Workspace, }, ops::{self, Packages}, util::Config, CargoResult, }; use serde_json; use std::{collections::HashMap, env, fs, path::PathBuf, process::Command}; use crate::util::{self, RazeError}; use tempdir::TempDir; use serde_derive::{Deserialize, Serialize}; pub type PackageId = String; pub type Kind = String; pub type TargetSpec = String; /** * An entity that can retrive deserialized metadata for a Cargo Workspace. * * The `CargoInternalsMetadataFetcher` is probably the one you want. * * Usage of..Subcommand.. is waiting on a cargo release containing * <https://github.com/rust-lang/cargo/pull/5122> */ pub trait MetadataFetcher { fn fetch_metadata(&mut self, files: CargoWorkspaceFiles) -> CargoResult<Metadata>; } /** The local Cargo workspace files to be used for build planning.*/ pub struct CargoWorkspaceFiles { pub toml_path: PathBuf, pub lock_path_opt: Option<PathBuf>, } /** * The metadata for a whole Cargo workspace. * * WARNING: Cargo-raze does not control the definition of this struct. * This struct mirrors Cargo's own [`ExportInfo`]( * https://github.com/rust-lang/cargo/blob/0.40.0/src/cargo/ops/cargo_output_metadata.rs#L78-L85) */ #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct Metadata { pub packages: Vec<Package>, pub resolve: Resolve, pub workspace_members: Vec<PackageId>, pub target_directory: String, pub version: i64, } /** * The metadata for an individual Cargo crate. * * WARNING: Cargo-raze does not control the definition of this struct. * This struct mirrors Cargo's own [`SerializedPackage`]( * https://github.com/rust-lang/cargo/blob/0.40.0/src/cargo/core/package.rs#L32-L50) */ #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct Package { pub name: String, pub version: String, pub id: PackageId, pub license: Option<String>, pub license_file: Option<String>, pub description: Option<String>, pub source: Option<String>, pub dependencies: Vec<Dependency>, pub targets: Vec<Target>, pub features: HashMap<String, Vec<String>>, pub manifest_path: String, pub edition: String, pub sha256: Option<String>, } /** * The metadata for a dependency (a reference connecting a crate to another crate). * * WARNING: Cargo-raze does not control the definition of this struct. * This struct mirrors Cargo's own [`SerializedDependency`]( * https://github.com/rust-lang/cargo/blob/0.40.0/src/cargo/core/dependency.rs#L49-L60) */ #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct Dependency { pub name: String, pub source: String, pub req: String, pub kind: Option<Kind>, #[serde(default = "default_dependency_field_optional")] pub optional: bool, #[serde(default = "default_dependency_field_uses_default_features")] pub uses_default_features: bool, pub features: Vec<String>, pub target: Option<TargetSpec>, } /** * The metadata for a compileable target. * * WARNING: Cargo-raze does not control the definition of this struct. * This struct mirrors Cargo's own [`SerializedTarget`]( * https://github.com/rust-lang/cargo/blob/0.40.0/src/cargo/core/manifest.rs#L188-L197) */ #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct Target { pub name: String, pub kind: Vec<String>, pub crate_types: Vec<String>, pub src_path: String, pub edition: String, } /** * The metadata for a fully resolved dependency tree. * * WARNING: Cargo-raze does not control the definition of this struct. * This struct mirrors Cargo's own [`MetadataResolve`]( * https://github.com/rust-lang/cargo/blob/0.40.0/src/cargo/ops/cargo_output_metadata.rs#L91-L95) */ #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct Resolve { pub nodes: Vec<ResolveNode>, pub root: PackageId, } /** * The metadata for a single resolved entry in the full dependency tree. * * WARNING: Cargo-raze does not control the definition of this struct. * This struct mirrors Cargo's own [`Node`]( * https://github.com/rust-lang/cargo/blob/0.40.0/src/cargo/ops/cargo_output_metadata.rs#L102-L106) */ #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct ResolveNode { pub id: PackageId, pub dependencies: Vec<PackageId>, // Optional due to recent feature addition in Cargo. pub features: Option<Vec<String>>, } /** A workspace metadata fetcher that uses the Cargo Metadata subcommand. */ #[allow(dead_code)] pub struct CargoSubcommandMetadataFetcher; /** * A workspace metadata fetcher that uses Cargo's internals. * *!DANGER DANGER! * This struct is very hard to test as it uses Cargo's stateful internals, please take care when * changing it. *!DANGER DANGER! */ pub struct CargoInternalsMetadataFetcher<'config> { cargo_config: &'config Config, } impl MetadataFetcher for CargoSubcommandMetadataFetcher { fn fetch_metadata(&mut self, files: CargoWorkspaceFiles) -> CargoResult<Metadata> { assert!(files.toml_path.is_file()); assert!(files.lock_path_opt.as_ref().map_or(true, |p| p.is_file())); // Copy files into a temp directory // UNWRAP: Guarded by function assertion let cargo_tempdir = { let dir = TempDir::new("cargo_raze_metadata_dir")?; let dir_path = dir.path(); let new_toml_path = dir_path.join(files.toml_path.file_name().unwrap()); fs::copy(files.toml_path, new_toml_path)?; if let Some(lock_path) = files.lock_path_opt { let new_lock_path = dir_path.join(lock_path.file_name().unwrap()); fs::copy(lock_path, new_lock_path)?; } dir }; // Shell out to cargo let exec_output = Command::new("cargo") .current_dir(cargo_tempdir.path()) .args(&["metadata", "--format-version", "1"]) .output()?; // Handle command errs let stdout_str = String::from_utf8(exec_output.stdout).unwrap_or_else(|_| "[unparsable bytes]".to_owned()); if!exec_output.status.success() { let stderr_str = String::from_utf8(exec_output.stderr).unwrap_or_else(|_| "[unparsable bytes]".to_owned()); println!("`cargo metadata` failed. Inspect Cargo.toml for issues!"); println!("stdout: {}", stdout_str); println!("stderr: {}", stderr_str); return Err(RazeError::Generic("Failed to run `cargo metadata`".to_owned()).into()); } // Parse and yield metadata serde_json::from_str::<Metadata>(&stdout_str).map_err(|e| e.into()) } } impl<'config> MetadataFetcher for CargoInternalsMetadataFetcher<'config> { fn fetch_metadata(&mut self, files: CargoWorkspaceFiles) -> CargoResult<Metadata> { let manifest = if files.toml_path.is_relative() { env::current_dir().unwrap().join(&files.toml_path) } else { files.toml_path }; let ws = Workspace::new(&manifest, &self.cargo_config)?; let specs = Packages::All.to_package_id_specs(&ws)?; let root_name = specs.iter().next().unwrap().name(); let resolve_opts = ResolveOpts::new(true, &[], false, false); let (resolved_packages, cargo_resolve) = ops::resolve_ws_with_opts(&ws, resolve_opts, &specs)?; let root = cargo_resolve .iter() .find(|dep| dep.name() == root_name) .ok_or_else(|| RazeError::Internal("root crate should be in cargo resolve".to_owned()))? .to_string(); let nodes = cargo_resolve .iter() .map(|id| ResolveNode { id: id.to_string(), features: Some( cargo_resolve .features_sorted(id) .iter() .map(|s| s.to_string()) .collect(), ), dependencies: cargo_resolve.deps(id).map(|(p, _)| p.to_string()).collect(), }) .collect(); let resolve = Resolve { nodes, root }; let packages = resolved_packages .package_ids() // TODO(acmcarther): Justify this unwrap .map(|package_id| (package_id, resolved_packages.get_one(package_id).unwrap())) .map(|(package_id, package)| { let manifest_metadata = package.manifest().metadata(); let dependencies = package .dependencies() .iter() .map(|dependency| Dependency { name: dependency.package_name().to_string(), // UNWRAP: It's cargo's responsibility to ensure a serializable source_id source: serde_json::to_string(&dependency.source_id()).unwrap(), req: dependency.version_req().to_string(), kind: match dependency.kind() { CargoKind::Normal => None, CargoKind::Development => Some("dev".to_owned()), CargoKind::Build => Some("build".to_owned()), }, optional: dependency.is_optional(), uses_default_features: dependency.uses_default_features(), features: dependency .features() .iter() .map(|s| s.to_string()) .collect(), target: dependency.platform().map(|p| p.to_string()), }) .collect(); let targets = package .targets() .iter() .map(|target| Target { name: target.name().to_owned(), kind: util::kind_to_kinds(target.kind()), src_path: target.src_path().path().unwrap().display().to_string(), edition: target.edition().to_string(), crate_types: target .rustc_crate_types() .iter() .map(|t| t.to_string()) .collect(), }) .collect(); let features = package .summary() .features() .iter() .map(|(feature, feature_values)| { let our_feature_values = feature_values .iter() .map(|value| match value { CargoFeatureValue::Feature(name) | CargoFeatureValue::Crate(name) => { name.to_string() } // This matches the current Serialize impl for CargoFeatureValue CargoFeatureValue::CrateFeature(crate_name, feature_name) => { format!("{}/{}", crate_name.as_str(), feature_name.as_str()) } }) .collect(); (feature.to_string(), our_feature_values) }) .collect(); // UNWRAP: It's cargo's responsibility to ensure a serializable source_id let pkg_source = serde_json::to_string(&package_id.source_id()).unwrap(); // Cargo use SHA256 for checksum so we can use them directly let sha256 = package .manifest() .summary() .checksum() .map(ToString::to_string); Package { name: package.name().to_string(), version: package.version().to_string(), id: package_id.to_string(), license: manifest_metadata.license.clone(), license_file: manifest_metadata.license_file.clone(), description: manifest_metadata.description.clone(), source: Some(pkg_source), manifest_path: package.manifest_path().display().to_string(), edition: package.manifest().edition().to_string(), dependencies, targets, features, sha256, } }) .collect(); let workspace_members = ws .members() .map(|pkg| pkg.package_id().to_string()) .collect(); Ok(Metadata { target_directory: ws.target_dir().display().to_string(), version: 0, /* not generated via subcomand */ packages, resolve, workspace_members, }) } } impl<'config> CargoInternalsMetadataFetcher<'config> { pub fn new(cargo_config: &'config Config) -> CargoInternalsMetadataFetcher<'config> { CargoInternalsMetadataFetcher { cargo_config } } } fn default_dependency_field_optional() -> bool { // Dependencies are implicitly required. // TODO(acmcarther): Citation? false } fn default_dependency_field_uses_default_features() -> bool { // Default features are used by default // Citation: https://doc.rust-lang.org/cargo/reference/manifest.html#rules true } #[cfg(test)] pub mod testing { use super::*; pub struct StubMetadataFetcher { metadata: Metadata, } impl MetadataFetcher for StubMetadataFetcher { fn fetch_metadata(&mut self, _: CargoWorkspaceFiles) -> CargoResult<Metadata> { Ok(self.metadata.clone()) } } impl StubMetadataFetcher { pub fn with_metadata(metadata: Metadata) -> StubMetadataFetcher { StubMetadataFetcher { metadata } } } pub fn dummy_package() -> Package { Package { name: String::new(), version: String::new(), id: String::new(), license: None, license_file: None, description: None, source: None, dependencies: Vec::new(), targets: Vec::new(), features: HashMap::new(), manifest_path: String::new(), edition: String::new(), sha256: None, } } pub fn dummy_metadata() -> Metadata { Metadata { packages: Vec::new(), resolve: dummy_resolve(), workspace_members: Vec::new(), target_directory: String::new(), version: 1, } } pub fn dummy_resolve() -> Resolve { Resolve { nodes: Vec::new(), root: String::new(), } } } #[cfg(test)] mod tests { use super::*; use serde_json; use std::fs::File; use std::io::Write; fn basic_toml() -> &'static str { " [package] name = \"test\" version = \"0.0.1\" [lib] path = \"not_a_file.rs\" " } fn basic_lock() -> &'static str { " [[package]] name = \"test\" version = \"0.0.1\" dependencies = [ ] " } #[test] fn test_metadata_deserializes_correctly() { let metadata_file_contents = include_str!("../test_fixtures/metadata.txt"); serde_json::from_str::<Metadata>(metadata_file_contents).unwrap(); } #[test] fn test_cargo_subcommand_metadata_fetcher_works_without_lock() { let dir = TempDir::new("test_cargo_raze_metadata_dir").unwrap(); let toml_path = dir.path().join("Cargo.toml"); let mut toml = File::create(&toml_path).unwrap(); toml.write_all(basic_toml().as_bytes()).unwrap(); let files = CargoWorkspaceFiles { lock_path_opt: None, toml_path, }; let mut fetcher = CargoSubcommandMetadataFetcher; fetcher.fetch_metadata(files).unwrap(); } #[test] fn
() { let dir = TempDir::new("test_cargo_raze_metadata_dir").unwrap(); let toml_path = { let path = dir.path().join("Cargo.toml"); let mut toml = File::create(&path).unwrap(); toml.write_all(basic_toml().as_bytes()).unwrap(); path }; let lock_path = { let path = dir.path().join("Cargo.lock"); let mut lock = File::create(&path).unwrap(); lock.write_all(basic_lock().as_bytes()).unwrap(); path }; let files = CargoWorkspaceFiles { lock_path_opt: Some(lock_path), toml_path, }; let mut fetcher = CargoSubcommandMetadataFetcher; fetcher.fetch_metadata(files).unwrap(); } #[test] fn test_cargo_subcommand_metadata_fetcher_handles_bad_files() { let dir = TempDir::new("test_cargo_raze_metadata_dir").unwrap(); let toml_path = { let path = dir.path().join("Cargo.toml"); let mut toml = File::create(&path).unwrap(); toml.write_all(b"hello").unwrap(); path }; let files = CargoWorkspaceFiles { lock_path_opt: None, toml_path, }; let mut fetcher = CargoSubcommandMetadataFetcher; assert!(fetcher.fetch_metadata(files).is_err()); } }
test_cargo_subcommand_metadata_fetcher_works_with_lock
identifier_name
non_copy_const.rs
/// ### Why is this bad? /// Consts are copied everywhere they are referenced, i.e., /// every time you refer to the const a fresh instance of the `Cell` or `Mutex` /// or `AtomicXxxx` will be created, which defeats the whole purpose of using /// these types in the first place. /// /// The `const` should better be replaced by a `static` item if a global /// variable is wanted, or replaced by a `const fn` if a constructor is wanted. /// /// ### Known problems /// A "non-constant" const item is a legacy way to supply an /// initialized value to downstream `static` items (e.g., the /// `std::sync::ONCE_INIT` constant). In this case the use of `const` is legit, /// and this lint should be suppressed. /// /// Even though the lint avoids triggering on a constant whose type has enums that have variants /// with interior mutability, and its value uses non interior mutable variants (see /// [#3962](https://github.com/rust-lang/rust-clippy/issues/3962) and /// [#3825](https://github.com/rust-lang/rust-clippy/issues/3825) for examples); /// it complains about associated constants without default values only based on its types; /// which might not be preferable. /// There're other enums plus associated constants cases that the lint cannot handle. /// /// Types that have underlying or potential interior mutability trigger the lint whether /// the interior mutable field is used or not. See issues /// [#5812](https://github.com/rust-lang/rust-clippy/issues/5812) and /// /// ### Example /// ```rust /// use std::sync::atomic::{AtomicUsize, Ordering::SeqCst}; /// /// // Bad. /// const CONST_ATOM: AtomicUsize = AtomicUsize::new(12); /// CONST_ATOM.store(6, SeqCst); // the content of the atomic is unchanged /// assert_eq!(CONST_ATOM.load(SeqCst), 12); // because the CONST_ATOM in these lines are distinct /// /// // Good. /// static STATIC_ATOM: AtomicUsize = AtomicUsize::new(15); /// STATIC_ATOM.store(9, SeqCst); /// assert_eq!(STATIC_ATOM.load(SeqCst), 9); // use a `static` item to refer to the same instance /// ``` #[clippy::version = "pre 1.29.0"] pub DECLARE_INTERIOR_MUTABLE_CONST, style, "declaring `const` with interior mutability" } // FIXME: this is a correctness problem but there's no suitable // warn-by-default category. declare_clippy_lint! { /// ### What it does /// Checks if `const` items which is interior mutable (e.g., /// contains a `Cell`, `Mutex`, `AtomicXxxx`, etc.) has been borrowed directly. /// /// ### Why is this bad? /// Consts are copied everywhere they are referenced, i.e., /// every time you refer to the const a fresh instance of the `Cell` or `Mutex` /// or `AtomicXxxx` will be created, which defeats the whole purpose of using /// these types in the first place. /// /// The `const` value should be stored inside a `static` item. /// /// ### Known problems /// When an enum has variants with interior mutability, use of its non /// interior mutable variants can generate false positives. See issue /// [#3962](https://github.com/rust-lang/rust-clippy/issues/3962) /// /// Types that have underlying or potential interior mutability trigger the lint whether /// the interior mutable field is used or not. See issues /// [#5812](https://github.com/rust-lang/rust-clippy/issues/5812) and /// [#3825](https://github.com/rust-lang/rust-clippy/issues/3825) /// /// ### Example /// ```rust /// use std::sync::atomic::{AtomicUsize, Ordering::SeqCst}; /// const CONST_ATOM: AtomicUsize = AtomicUsize::new(12); /// /// // Bad. /// CONST_ATOM.store(6, SeqCst); // the content of the atomic is unchanged /// assert_eq!(CONST_ATOM.load(SeqCst), 12); // because the CONST_ATOM in these lines are distinct /// /// // Good. /// static STATIC_ATOM: AtomicUsize = CONST_ATOM; /// STATIC_ATOM.store(9, SeqCst); /// assert_eq!(STATIC_ATOM.load(SeqCst), 9); // use a `static` item to refer to the same instance /// ``` #[clippy::version = "pre 1.29.0"] pub BORROW_INTERIOR_MUTABLE_CONST, style, "referencing `const` with interior mutability" } fn is_unfrozen<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>) -> bool { // Ignore types whose layout is unknown since `is_freeze` reports every generic types as `!Freeze`, // making it indistinguishable from `UnsafeCell`. i.e. it isn't a tool to prove a type is // 'unfrozen'. However, this code causes a false negative in which // a type contains a layout-unknown type, but also an unsafe cell like `const CELL: Cell<T>`. // Yet, it's better than `ty.has_type_flags(TypeFlags::HAS_TY_PARAM | TypeFlags::HAS_PROJECTION)` // since it works when a pointer indirection involves (`Cell<*const T>`). // Making up a `ParamEnv` where every generic params and assoc types are `Freeze`is another option; // but I'm not sure whether it's a decent way, if possible. cx.tcx.layout_of(cx.param_env.and(ty)).is_ok() &&!ty.is_freeze(cx.tcx.at(DUMMY_SP), cx.param_env) } fn is_value_unfrozen_raw<'tcx>( cx: &LateContext<'tcx>, result: Result<ConstValue<'tcx>, ErrorHandled>, ty: Ty<'tcx>, ) -> bool { fn inner<'tcx>(cx: &LateContext<'tcx>, val: &'tcx Const<'tcx>) -> bool { match val.ty.kind() { // the fact that we have to dig into every structs to search enums // leads us to the point checking `UnsafeCell` directly is the only option. ty::Adt(ty_def,..) if Some(ty_def.did) == cx.tcx.lang_items().unsafe_cell_type() => true, ty::Array(..) | ty::Adt(..) | ty::Tuple(..) => { let val = cx.tcx.destructure_const(cx.param_env.and(val)); val.fields.iter().any(|field| inner(cx, field)) }, _ => false, } } result.map_or_else( |err| { // Consider `TooGeneric` cases as being unfrozen. // This causes a false positive where an assoc const whose type is unfrozen // have a value that is a frozen variant with a generic param (an example is // `declare_interior_mutable_const::enums::BothOfCellAndGeneric::GENERIC_VARIANT`). // However, it prevents a number of false negatives that is, I think, important: // 1. assoc consts in trait defs referring to consts of themselves // (an example is `declare_interior_mutable_const::traits::ConcreteTypes::ANOTHER_ATOMIC`). // 2. a path expr referring to assoc consts whose type is doesn't have // any frozen variants in trait defs (i.e. without substitute for `Self`). // (e.g. borrowing `borrow_interior_mutable_const::trait::ConcreteTypes::ATOMIC`) // 3. similar to the false positive above; // but the value is an unfrozen variant, or the type has no enums. (An example is // `declare_interior_mutable_const::enums::BothOfCellAndGeneric::UNFROZEN_VARIANT` // and `declare_interior_mutable_const::enums::BothOfCellAndGeneric::NO_ENUM`). // One might be able to prevent these FNs correctly, and replace this with `false`; // e.g. implementing `has_frozen_variant` described above, and not running this function // when the type doesn't have any frozen variants would be the 'correct' way for the 2nd // case (that actually removes another suboptimal behavior (I won't say 'false positive') where, // similar to 2., but with the a frozen variant) (e.g. borrowing // `borrow_interior_mutable_const::enums::AssocConsts::TO_BE_FROZEN_VARIANT`). // I chose this way because unfrozen enums as assoc consts are rare (or, hopefully, none). err == ErrorHandled::TooGeneric }, |val| inner(cx, Const::from_value(cx.tcx, val, ty)), ) } fn is_value_unfrozen_poly<'tcx>(cx: &LateContext<'tcx>, body_id: BodyId, ty: Ty<'tcx>) -> bool { let result = cx.tcx.const_eval_poly(body_id.hir_id.owner.to_def_id()); is_value_unfrozen_raw(cx, result, ty) } fn is_value_unfrozen_expr<'tcx>(cx: &LateContext<'tcx>, hir_id: HirId, def_id: DefId, ty: Ty<'tcx>) -> bool { let substs = cx.typeck_results().node_substs(hir_id); let result = cx.tcx.const_eval_resolve( cx.param_env, ty::Unevaluated::new(ty::WithOptConstParam::unknown(def_id), substs), None, ); is_value_unfrozen_raw(cx, result, ty) } #[derive(Copy, Clone)] enum Source { Item { item: Span }, Assoc { item: Span }, Expr { expr: Span }, } impl Source { #[must_use] fn lint(&self) -> (&'static Lint, &'static str, Span) { match self { Self::Item { item } | Self::Assoc { item,.. } => ( DECLARE_INTERIOR_MUTABLE_CONST, "a `const` item should never be interior mutable", *item, ), Self::Expr { expr } => ( BORROW_INTERIOR_MUTABLE_CONST, "a `const` item with interior mutability should not be borrowed", *expr, ), } } } fn lint(cx: &LateContext<'_>, source: Source) { let (lint, msg, span) = source.lint(); span_lint_and_then(cx, lint, span, msg, |diag| { if span.from_expansion() { return; // Don't give suggestions into macros. } match source { Source::Item {.. } => { let const_kw_span = span.from_inner(InnerSpan::new(0, 5)); diag.span_label(const_kw_span, "make this a static item (maybe with lazy_static)"); }, Source::Assoc {.. } => (), Source::Expr {.. } => { diag.help("assign this const to a local or static variable, and use the variable here"); }, } }); } declare_lint_pass!(NonCopyConst => [DECLARE_INTERIOR_MUTABLE_CONST, BORROW_INTERIOR_MUTABLE_CONST]); impl<'tcx> LateLintPass<'tcx> for NonCopyConst { fn check_item(&mut self, cx: &LateContext<'tcx>, it: &'tcx Item<'_>) { if let ItemKind::Const(hir_ty, body_id) = it.kind { let ty = hir_ty_to_ty(cx.tcx, hir_ty); if is_unfrozen(cx, ty) && is_value_unfrozen_poly(cx, body_id, ty) { lint(cx, Source::Item { item: it.span }); } } } fn check_trait_item(&mut self, cx: &LateContext<'tcx>, trait_item: &'tcx TraitItem<'_>) { if let TraitItemKind::Const(hir_ty, body_id_opt) = &trait_item.kind { let ty = hir_ty_to_ty(cx.tcx, hir_ty); // Normalize assoc types because ones originated from generic params // bounded other traits could have their bound. let normalized = cx.tcx.normalize_erasing_regions(cx.param_env, ty); if is_unfrozen(cx, normalized) // When there's no default value, lint it only according to its type; // in other words, lint consts whose value *could* be unfrozen, not definitely is. // This feels inconsistent with how the lint treats generic types, // which avoids linting types which potentially become unfrozen. // One could check whether an unfrozen type have a *frozen variant* // (like `body_id_opt.map_or_else(||!has_frozen_variant(...),...)`), // and do the same as the case of generic types at impl items. // Note that it isn't sufficient to check if it has an enum // since all of that enum's variants can be unfrozen: // i.e. having an enum doesn't necessary mean a type has a frozen variant. // And, implementing it isn't a trivial task; it'll probably end up // re-implementing the trait predicate evaluation specific to `Freeze`. && body_id_opt.map_or(true, |body_id| is_value_unfrozen_poly(cx, body_id, normalized)) { lint(cx, Source::Assoc { item: trait_item.span }); } } } fn check_impl_item(&mut self, cx: &LateContext<'tcx>, impl_item: &'tcx ImplItem<'_>)
// bounded other traits could have their bound at the trait defs; // and, in that case, the definition is *not* generic. cx.tcx.normalize_erasing_regions( cx.tcx.param_env(of_trait_def_id), cx.tcx.type_of(of_assoc_item.def_id), ), )) .is_err(); // If there were a function like `has_frozen_variant` described above, // we should use here as a frozen variant is a potential to be frozen // similar to unknown layouts. // e.g. `layout_of(...).is_err() || has_frozen_variant(...);` let ty = hir_ty_to_ty(cx.tcx, hir_ty); let normalized = cx.tcx.normalize_erasing_regions(cx.param_env, ty); if is_unfrozen(cx, normalized); if is_value_unfrozen_poly(cx, *body_id, normalized); then { lint( cx, Source::Assoc { item: impl_item.span, }, ); } } }, ItemKind::Impl(Impl { of_trait: None,.. }) => { let ty = hir_ty_to_ty(cx.tcx, hir_ty); // Normalize assoc types originated from generic params. let normalized = cx.tcx.normalize_erasing_regions(cx.param_env, ty); if is_unfrozen(cx, ty) && is_value_unfrozen_poly(cx, *body_id, normalized) { lint(cx, Source::Assoc { item: impl_item.span }); } }, _ => (), } } } fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>) { if let ExprKind::Path(qpath) = &expr.kind { // Only lint if we use the const item inside a function. if in_constant(cx, expr.hir_id) { return; } // Make sure it is a const item. let item_def_id = match cx.qpath_res(qpath, expr.hir_id) { Res::Def(DefKind::Const | DefKind::AssocConst, did) => did, _ => return, }; // Climb up to resolve any field access and explicit referencing. let mut cur_expr = expr; let mut dereferenced_expr = expr; let mut needs_check_adjustment = true; loop { let parent_id = cx.tcx.hir().get_parent_node(cur_expr.hir_id); if parent_id == cur_expr.hir_id { break; } if let Some(Node::Expr(parent_expr)) = cx.tcx.hir().find(parent_id) { match &parent_expr.kind { ExprKind::AddrOf(..) => { // `&e` => `e` must be referenced. needs_check_adjustment = false; }, ExprKind::Field(..) => { needs_check_adjustment = true; // Check whether implicit dereferences happened; // if so, no need to go further up // because of the same reason as the `ExprKind::Unary` case. if cx .typeck_results() .expr_adjustments(dereferenced_expr) .iter() .any(|adj| matches!(adj.kind, Adjust::Deref(_))) { break; } dereferenced_expr = parent_expr; }, ExprKind::Index(e, _) if ptr::eq(&**e, cur_expr) => { // `e[i]` => desugared to `*Index::index(&e, i)`, // meaning `e` must be referenced. // no need to go further up since a method call is involved now. needs_check_adjustment = false; break; }, ExprKind::Unary(UnOp::Deref, _) => { // `*e` => desugared to `*Deref::deref(&e)`,
{ if let ImplItemKind::Const(hir_ty, body_id) = &impl_item.kind { let item_hir_id = cx.tcx.hir().get_parent_node(impl_item.hir_id()); let item = cx.tcx.hir().expect_item(item_hir_id); match &item.kind { ItemKind::Impl(Impl { of_trait: Some(of_trait_ref), .. }) => { if_chain! { // Lint a trait impl item only when the definition is a generic type, // assuming an assoc const is not meant to be an interior mutable type. if let Some(of_trait_def_id) = of_trait_ref.trait_def_id(); if let Some(of_assoc_item) = specialization_graph::Node::Trait(of_trait_def_id) .item(cx.tcx, impl_item.ident, AssocKind::Const, of_trait_def_id); if cx .tcx .layout_of(cx.tcx.param_env(of_trait_def_id).and( // Normalize assoc types because ones originated from generic params
identifier_body
non_copy_const.rs
// the content of the atomic is unchanged /// assert_eq!(CONST_ATOM.load(SeqCst), 12); // because the CONST_ATOM in these lines are distinct /// /// // Good. /// static STATIC_ATOM: AtomicUsize = AtomicUsize::new(15); /// STATIC_ATOM.store(9, SeqCst); /// assert_eq!(STATIC_ATOM.load(SeqCst), 9); // use a `static` item to refer to the same instance /// ``` #[clippy::version = "pre 1.29.0"] pub DECLARE_INTERIOR_MUTABLE_CONST, style, "declaring `const` with interior mutability" } // FIXME: this is a correctness problem but there's no suitable // warn-by-default category. declare_clippy_lint! { /// ### What it does /// Checks if `const` items which is interior mutable (e.g., /// contains a `Cell`, `Mutex`, `AtomicXxxx`, etc.) has been borrowed directly. /// /// ### Why is this bad? /// Consts are copied everywhere they are referenced, i.e., /// every time you refer to the const a fresh instance of the `Cell` or `Mutex` /// or `AtomicXxxx` will be created, which defeats the whole purpose of using /// these types in the first place. /// /// The `const` value should be stored inside a `static` item. /// /// ### Known problems /// When an enum has variants with interior mutability, use of its non /// interior mutable variants can generate false positives. See issue /// [#3962](https://github.com/rust-lang/rust-clippy/issues/3962) /// /// Types that have underlying or potential interior mutability trigger the lint whether /// the interior mutable field is used or not. See issues /// [#5812](https://github.com/rust-lang/rust-clippy/issues/5812) and /// [#3825](https://github.com/rust-lang/rust-clippy/issues/3825) /// /// ### Example /// ```rust /// use std::sync::atomic::{AtomicUsize, Ordering::SeqCst}; /// const CONST_ATOM: AtomicUsize = AtomicUsize::new(12); /// /// // Bad. /// CONST_ATOM.store(6, SeqCst); // the content of the atomic is unchanged /// assert_eq!(CONST_ATOM.load(SeqCst), 12); // because the CONST_ATOM in these lines are distinct /// /// // Good. /// static STATIC_ATOM: AtomicUsize = CONST_ATOM; /// STATIC_ATOM.store(9, SeqCst); /// assert_eq!(STATIC_ATOM.load(SeqCst), 9); // use a `static` item to refer to the same instance /// ``` #[clippy::version = "pre 1.29.0"] pub BORROW_INTERIOR_MUTABLE_CONST, style, "referencing `const` with interior mutability" } fn is_unfrozen<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>) -> bool { // Ignore types whose layout is unknown since `is_freeze` reports every generic types as `!Freeze`, // making it indistinguishable from `UnsafeCell`. i.e. it isn't a tool to prove a type is // 'unfrozen'. However, this code causes a false negative in which // a type contains a layout-unknown type, but also an unsafe cell like `const CELL: Cell<T>`. // Yet, it's better than `ty.has_type_flags(TypeFlags::HAS_TY_PARAM | TypeFlags::HAS_PROJECTION)` // since it works when a pointer indirection involves (`Cell<*const T>`). // Making up a `ParamEnv` where every generic params and assoc types are `Freeze`is another option; // but I'm not sure whether it's a decent way, if possible. cx.tcx.layout_of(cx.param_env.and(ty)).is_ok() &&!ty.is_freeze(cx.tcx.at(DUMMY_SP), cx.param_env) } fn is_value_unfrozen_raw<'tcx>( cx: &LateContext<'tcx>, result: Result<ConstValue<'tcx>, ErrorHandled>, ty: Ty<'tcx>, ) -> bool { fn inner<'tcx>(cx: &LateContext<'tcx>, val: &'tcx Const<'tcx>) -> bool { match val.ty.kind() { // the fact that we have to dig into every structs to search enums // leads us to the point checking `UnsafeCell` directly is the only option. ty::Adt(ty_def,..) if Some(ty_def.did) == cx.tcx.lang_items().unsafe_cell_type() => true, ty::Array(..) | ty::Adt(..) | ty::Tuple(..) => { let val = cx.tcx.destructure_const(cx.param_env.and(val)); val.fields.iter().any(|field| inner(cx, field)) }, _ => false, } } result.map_or_else( |err| { // Consider `TooGeneric` cases as being unfrozen. // This causes a false positive where an assoc const whose type is unfrozen // have a value that is a frozen variant with a generic param (an example is // `declare_interior_mutable_const::enums::BothOfCellAndGeneric::GENERIC_VARIANT`). // However, it prevents a number of false negatives that is, I think, important: // 1. assoc consts in trait defs referring to consts of themselves // (an example is `declare_interior_mutable_const::traits::ConcreteTypes::ANOTHER_ATOMIC`). // 2. a path expr referring to assoc consts whose type is doesn't have // any frozen variants in trait defs (i.e. without substitute for `Self`). // (e.g. borrowing `borrow_interior_mutable_const::trait::ConcreteTypes::ATOMIC`) // 3. similar to the false positive above; // but the value is an unfrozen variant, or the type has no enums. (An example is // `declare_interior_mutable_const::enums::BothOfCellAndGeneric::UNFROZEN_VARIANT` // and `declare_interior_mutable_const::enums::BothOfCellAndGeneric::NO_ENUM`). // One might be able to prevent these FNs correctly, and replace this with `false`; // e.g. implementing `has_frozen_variant` described above, and not running this function // when the type doesn't have any frozen variants would be the 'correct' way for the 2nd // case (that actually removes another suboptimal behavior (I won't say 'false positive') where, // similar to 2., but with the a frozen variant) (e.g. borrowing // `borrow_interior_mutable_const::enums::AssocConsts::TO_BE_FROZEN_VARIANT`). // I chose this way because unfrozen enums as assoc consts are rare (or, hopefully, none). err == ErrorHandled::TooGeneric }, |val| inner(cx, Const::from_value(cx.tcx, val, ty)), ) } fn is_value_unfrozen_poly<'tcx>(cx: &LateContext<'tcx>, body_id: BodyId, ty: Ty<'tcx>) -> bool { let result = cx.tcx.const_eval_poly(body_id.hir_id.owner.to_def_id()); is_value_unfrozen_raw(cx, result, ty) } fn is_value_unfrozen_expr<'tcx>(cx: &LateContext<'tcx>, hir_id: HirId, def_id: DefId, ty: Ty<'tcx>) -> bool { let substs = cx.typeck_results().node_substs(hir_id); let result = cx.tcx.const_eval_resolve( cx.param_env, ty::Unevaluated::new(ty::WithOptConstParam::unknown(def_id), substs), None, ); is_value_unfrozen_raw(cx, result, ty) } #[derive(Copy, Clone)] enum Source { Item { item: Span }, Assoc { item: Span }, Expr { expr: Span }, } impl Source { #[must_use] fn lint(&self) -> (&'static Lint, &'static str, Span) { match self { Self::Item { item } | Self::Assoc { item,.. } => ( DECLARE_INTERIOR_MUTABLE_CONST, "a `const` item should never be interior mutable", *item, ), Self::Expr { expr } => ( BORROW_INTERIOR_MUTABLE_CONST, "a `const` item with interior mutability should not be borrowed", *expr, ), } } } fn lint(cx: &LateContext<'_>, source: Source) { let (lint, msg, span) = source.lint(); span_lint_and_then(cx, lint, span, msg, |diag| { if span.from_expansion() { return; // Don't give suggestions into macros. } match source { Source::Item {.. } => { let const_kw_span = span.from_inner(InnerSpan::new(0, 5)); diag.span_label(const_kw_span, "make this a static item (maybe with lazy_static)"); }, Source::Assoc {.. } => (), Source::Expr {.. } => { diag.help("assign this const to a local or static variable, and use the variable here"); }, } }); } declare_lint_pass!(NonCopyConst => [DECLARE_INTERIOR_MUTABLE_CONST, BORROW_INTERIOR_MUTABLE_CONST]); impl<'tcx> LateLintPass<'tcx> for NonCopyConst { fn check_item(&mut self, cx: &LateContext<'tcx>, it: &'tcx Item<'_>) { if let ItemKind::Const(hir_ty, body_id) = it.kind { let ty = hir_ty_to_ty(cx.tcx, hir_ty); if is_unfrozen(cx, ty) && is_value_unfrozen_poly(cx, body_id, ty) { lint(cx, Source::Item { item: it.span }); } } } fn check_trait_item(&mut self, cx: &LateContext<'tcx>, trait_item: &'tcx TraitItem<'_>) { if let TraitItemKind::Const(hir_ty, body_id_opt) = &trait_item.kind { let ty = hir_ty_to_ty(cx.tcx, hir_ty); // Normalize assoc types because ones originated from generic params // bounded other traits could have their bound. let normalized = cx.tcx.normalize_erasing_regions(cx.param_env, ty); if is_unfrozen(cx, normalized) // When there's no default value, lint it only according to its type; // in other words, lint consts whose value *could* be unfrozen, not definitely is. // This feels inconsistent with how the lint treats generic types, // which avoids linting types which potentially become unfrozen. // One could check whether an unfrozen type have a *frozen variant* // (like `body_id_opt.map_or_else(||!has_frozen_variant(...),...)`), // and do the same as the case of generic types at impl items. // Note that it isn't sufficient to check if it has an enum // since all of that enum's variants can be unfrozen: // i.e. having an enum doesn't necessary mean a type has a frozen variant. // And, implementing it isn't a trivial task; it'll probably end up // re-implementing the trait predicate evaluation specific to `Freeze`. && body_id_opt.map_or(true, |body_id| is_value_unfrozen_poly(cx, body_id, normalized)) { lint(cx, Source::Assoc { item: trait_item.span }); } } } fn check_impl_item(&mut self, cx: &LateContext<'tcx>, impl_item: &'tcx ImplItem<'_>) { if let ImplItemKind::Const(hir_ty, body_id) = &impl_item.kind { let item_hir_id = cx.tcx.hir().get_parent_node(impl_item.hir_id()); let item = cx.tcx.hir().expect_item(item_hir_id); match &item.kind { ItemKind::Impl(Impl { of_trait: Some(of_trait_ref), .. }) => { if_chain! { // Lint a trait impl item only when the definition is a generic type, // assuming an assoc const is not meant to be an interior mutable type. if let Some(of_trait_def_id) = of_trait_ref.trait_def_id(); if let Some(of_assoc_item) = specialization_graph::Node::Trait(of_trait_def_id) .item(cx.tcx, impl_item.ident, AssocKind::Const, of_trait_def_id); if cx .tcx .layout_of(cx.tcx.param_env(of_trait_def_id).and( // Normalize assoc types because ones originated from generic params // bounded other traits could have their bound at the trait defs; // and, in that case, the definition is *not* generic. cx.tcx.normalize_erasing_regions( cx.tcx.param_env(of_trait_def_id), cx.tcx.type_of(of_assoc_item.def_id), ), )) .is_err(); // If there were a function like `has_frozen_variant` described above, // we should use here as a frozen variant is a potential to be frozen // similar to unknown layouts. // e.g. `layout_of(...).is_err() || has_frozen_variant(...);` let ty = hir_ty_to_ty(cx.tcx, hir_ty); let normalized = cx.tcx.normalize_erasing_regions(cx.param_env, ty); if is_unfrozen(cx, normalized); if is_value_unfrozen_poly(cx, *body_id, normalized); then { lint( cx, Source::Assoc { item: impl_item.span, }, ); } } }, ItemKind::Impl(Impl { of_trait: None,.. }) => { let ty = hir_ty_to_ty(cx.tcx, hir_ty); // Normalize assoc types originated from generic params. let normalized = cx.tcx.normalize_erasing_regions(cx.param_env, ty); if is_unfrozen(cx, ty) && is_value_unfrozen_poly(cx, *body_id, normalized) { lint(cx, Source::Assoc { item: impl_item.span }); } }, _ => (), } } } fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>) { if let ExprKind::Path(qpath) = &expr.kind { // Only lint if we use the const item inside a function. if in_constant(cx, expr.hir_id) { return; } // Make sure it is a const item. let item_def_id = match cx.qpath_res(qpath, expr.hir_id) { Res::Def(DefKind::Const | DefKind::AssocConst, did) => did, _ => return, }; // Climb up to resolve any field access and explicit referencing. let mut cur_expr = expr; let mut dereferenced_expr = expr; let mut needs_check_adjustment = true; loop { let parent_id = cx.tcx.hir().get_parent_node(cur_expr.hir_id); if parent_id == cur_expr.hir_id { break; } if let Some(Node::Expr(parent_expr)) = cx.tcx.hir().find(parent_id) { match &parent_expr.kind { ExprKind::AddrOf(..) => { // `&e` => `e` must be referenced. needs_check_adjustment = false; }, ExprKind::Field(..) => { needs_check_adjustment = true; // Check whether implicit dereferences happened; // if so, no need to go further up // because of the same reason as the `ExprKind::Unary` case. if cx .typeck_results() .expr_adjustments(dereferenced_expr) .iter() .any(|adj| matches!(adj.kind, Adjust::Deref(_))) { break; } dereferenced_expr = parent_expr; }, ExprKind::Index(e, _) if ptr::eq(&**e, cur_expr) => { // `e[i]` => desugared to `*Index::index(&e, i)`, // meaning `e` must be referenced. // no need to go further up since a method call is involved now. needs_check_adjustment = false; break; }, ExprKind::Unary(UnOp::Deref, _) => { // `*e` => desugared to `*Deref::deref(&e)`, // meaning `e` must be referenced. // no need to go further up since a method call is involved now. needs_check_adjustment = false; break; }, _ => break, } cur_expr = parent_expr; } else { break; } } let ty = if needs_check_adjustment { let adjustments = cx.typeck_results().expr_adjustments(dereferenced_expr); if let Some(i) = adjustments .iter() .position(|adj| matches!(adj.kind, Adjust::Borrow(_) | Adjust::Deref(_))) { if i == 0
{ cx.typeck_results().expr_ty(dereferenced_expr) }
conditional_block
non_copy_const.rs
/// ### Why is this bad? /// Consts are copied everywhere they are referenced, i.e., /// every time you refer to the const a fresh instance of the `Cell` or `Mutex` /// or `AtomicXxxx` will be created, which defeats the whole purpose of using /// these types in the first place. /// /// The `const` should better be replaced by a `static` item if a global /// variable is wanted, or replaced by a `const fn` if a constructor is wanted. /// /// ### Known problems /// A "non-constant" const item is a legacy way to supply an /// initialized value to downstream `static` items (e.g., the /// `std::sync::ONCE_INIT` constant). In this case the use of `const` is legit, /// and this lint should be suppressed. /// /// Even though the lint avoids triggering on a constant whose type has enums that have variants /// with interior mutability, and its value uses non interior mutable variants (see /// [#3962](https://github.com/rust-lang/rust-clippy/issues/3962) and /// [#3825](https://github.com/rust-lang/rust-clippy/issues/3825) for examples); /// it complains about associated constants without default values only based on its types; /// which might not be preferable. /// There're other enums plus associated constants cases that the lint cannot handle. /// /// Types that have underlying or potential interior mutability trigger the lint whether /// the interior mutable field is used or not. See issues /// [#5812](https://github.com/rust-lang/rust-clippy/issues/5812) and /// /// ### Example /// ```rust /// use std::sync::atomic::{AtomicUsize, Ordering::SeqCst}; /// /// // Bad. /// const CONST_ATOM: AtomicUsize = AtomicUsize::new(12); /// CONST_ATOM.store(6, SeqCst); // the content of the atomic is unchanged /// assert_eq!(CONST_ATOM.load(SeqCst), 12); // because the CONST_ATOM in these lines are distinct /// /// // Good. /// static STATIC_ATOM: AtomicUsize = AtomicUsize::new(15); /// STATIC_ATOM.store(9, SeqCst); /// assert_eq!(STATIC_ATOM.load(SeqCst), 9); // use a `static` item to refer to the same instance /// ``` #[clippy::version = "pre 1.29.0"] pub DECLARE_INTERIOR_MUTABLE_CONST, style, "declaring `const` with interior mutability" } // FIXME: this is a correctness problem but there's no suitable // warn-by-default category. declare_clippy_lint! { /// ### What it does /// Checks if `const` items which is interior mutable (e.g., /// contains a `Cell`, `Mutex`, `AtomicXxxx`, etc.) has been borrowed directly. /// /// ### Why is this bad? /// Consts are copied everywhere they are referenced, i.e., /// every time you refer to the const a fresh instance of the `Cell` or `Mutex` /// or `AtomicXxxx` will be created, which defeats the whole purpose of using /// these types in the first place. /// /// The `const` value should be stored inside a `static` item. /// /// ### Known problems /// When an enum has variants with interior mutability, use of its non /// interior mutable variants can generate false positives. See issue /// [#3962](https://github.com/rust-lang/rust-clippy/issues/3962) /// /// Types that have underlying or potential interior mutability trigger the lint whether /// the interior mutable field is used or not. See issues /// [#5812](https://github.com/rust-lang/rust-clippy/issues/5812) and /// [#3825](https://github.com/rust-lang/rust-clippy/issues/3825) /// /// ### Example /// ```rust /// use std::sync::atomic::{AtomicUsize, Ordering::SeqCst}; /// const CONST_ATOM: AtomicUsize = AtomicUsize::new(12); /// /// // Bad. /// CONST_ATOM.store(6, SeqCst); // the content of the atomic is unchanged /// assert_eq!(CONST_ATOM.load(SeqCst), 12); // because the CONST_ATOM in these lines are distinct /// /// // Good. /// static STATIC_ATOM: AtomicUsize = CONST_ATOM; /// STATIC_ATOM.store(9, SeqCst); /// assert_eq!(STATIC_ATOM.load(SeqCst), 9); // use a `static` item to refer to the same instance /// ``` #[clippy::version = "pre 1.29.0"] pub BORROW_INTERIOR_MUTABLE_CONST, style, "referencing `const` with interior mutability" } fn
<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>) -> bool { // Ignore types whose layout is unknown since `is_freeze` reports every generic types as `!Freeze`, // making it indistinguishable from `UnsafeCell`. i.e. it isn't a tool to prove a type is // 'unfrozen'. However, this code causes a false negative in which // a type contains a layout-unknown type, but also an unsafe cell like `const CELL: Cell<T>`. // Yet, it's better than `ty.has_type_flags(TypeFlags::HAS_TY_PARAM | TypeFlags::HAS_PROJECTION)` // since it works when a pointer indirection involves (`Cell<*const T>`). // Making up a `ParamEnv` where every generic params and assoc types are `Freeze`is another option; // but I'm not sure whether it's a decent way, if possible. cx.tcx.layout_of(cx.param_env.and(ty)).is_ok() &&!ty.is_freeze(cx.tcx.at(DUMMY_SP), cx.param_env) } fn is_value_unfrozen_raw<'tcx>( cx: &LateContext<'tcx>, result: Result<ConstValue<'tcx>, ErrorHandled>, ty: Ty<'tcx>, ) -> bool { fn inner<'tcx>(cx: &LateContext<'tcx>, val: &'tcx Const<'tcx>) -> bool { match val.ty.kind() { // the fact that we have to dig into every structs to search enums // leads us to the point checking `UnsafeCell` directly is the only option. ty::Adt(ty_def,..) if Some(ty_def.did) == cx.tcx.lang_items().unsafe_cell_type() => true, ty::Array(..) | ty::Adt(..) | ty::Tuple(..) => { let val = cx.tcx.destructure_const(cx.param_env.and(val)); val.fields.iter().any(|field| inner(cx, field)) }, _ => false, } } result.map_or_else( |err| { // Consider `TooGeneric` cases as being unfrozen. // This causes a false positive where an assoc const whose type is unfrozen // have a value that is a frozen variant with a generic param (an example is // `declare_interior_mutable_const::enums::BothOfCellAndGeneric::GENERIC_VARIANT`). // However, it prevents a number of false negatives that is, I think, important: // 1. assoc consts in trait defs referring to consts of themselves // (an example is `declare_interior_mutable_const::traits::ConcreteTypes::ANOTHER_ATOMIC`). // 2. a path expr referring to assoc consts whose type is doesn't have // any frozen variants in trait defs (i.e. without substitute for `Self`). // (e.g. borrowing `borrow_interior_mutable_const::trait::ConcreteTypes::ATOMIC`) // 3. similar to the false positive above; // but the value is an unfrozen variant, or the type has no enums. (An example is // `declare_interior_mutable_const::enums::BothOfCellAndGeneric::UNFROZEN_VARIANT` // and `declare_interior_mutable_const::enums::BothOfCellAndGeneric::NO_ENUM`). // One might be able to prevent these FNs correctly, and replace this with `false`; // e.g. implementing `has_frozen_variant` described above, and not running this function // when the type doesn't have any frozen variants would be the 'correct' way for the 2nd // case (that actually removes another suboptimal behavior (I won't say 'false positive') where, // similar to 2., but with the a frozen variant) (e.g. borrowing // `borrow_interior_mutable_const::enums::AssocConsts::TO_BE_FROZEN_VARIANT`). // I chose this way because unfrozen enums as assoc consts are rare (or, hopefully, none). err == ErrorHandled::TooGeneric }, |val| inner(cx, Const::from_value(cx.tcx, val, ty)), ) } fn is_value_unfrozen_poly<'tcx>(cx: &LateContext<'tcx>, body_id: BodyId, ty: Ty<'tcx>) -> bool { let result = cx.tcx.const_eval_poly(body_id.hir_id.owner.to_def_id()); is_value_unfrozen_raw(cx, result, ty) } fn is_value_unfrozen_expr<'tcx>(cx: &LateContext<'tcx>, hir_id: HirId, def_id: DefId, ty: Ty<'tcx>) -> bool { let substs = cx.typeck_results().node_substs(hir_id); let result = cx.tcx.const_eval_resolve( cx.param_env, ty::Unevaluated::new(ty::WithOptConstParam::unknown(def_id), substs), None, ); is_value_unfrozen_raw(cx, result, ty) } #[derive(Copy, Clone)] enum Source { Item { item: Span }, Assoc { item: Span }, Expr { expr: Span }, } impl Source { #[must_use] fn lint(&self) -> (&'static Lint, &'static str, Span) { match self { Self::Item { item } | Self::Assoc { item,.. } => ( DECLARE_INTERIOR_MUTABLE_CONST, "a `const` item should never be interior mutable", *item, ), Self::Expr { expr } => ( BORROW_INTERIOR_MUTABLE_CONST, "a `const` item with interior mutability should not be borrowed", *expr, ), } } } fn lint(cx: &LateContext<'_>, source: Source) { let (lint, msg, span) = source.lint(); span_lint_and_then(cx, lint, span, msg, |diag| { if span.from_expansion() { return; // Don't give suggestions into macros. } match source { Source::Item {.. } => { let const_kw_span = span.from_inner(InnerSpan::new(0, 5)); diag.span_label(const_kw_span, "make this a static item (maybe with lazy_static)"); }, Source::Assoc {.. } => (), Source::Expr {.. } => { diag.help("assign this const to a local or static variable, and use the variable here"); }, } }); } declare_lint_pass!(NonCopyConst => [DECLARE_INTERIOR_MUTABLE_CONST, BORROW_INTERIOR_MUTABLE_CONST]); impl<'tcx> LateLintPass<'tcx> for NonCopyConst { fn check_item(&mut self, cx: &LateContext<'tcx>, it: &'tcx Item<'_>) { if let ItemKind::Const(hir_ty, body_id) = it.kind { let ty = hir_ty_to_ty(cx.tcx, hir_ty); if is_unfrozen(cx, ty) && is_value_unfrozen_poly(cx, body_id, ty) { lint(cx, Source::Item { item: it.span }); } } } fn check_trait_item(&mut self, cx: &LateContext<'tcx>, trait_item: &'tcx TraitItem<'_>) { if let TraitItemKind::Const(hir_ty, body_id_opt) = &trait_item.kind { let ty = hir_ty_to_ty(cx.tcx, hir_ty); // Normalize assoc types because ones originated from generic params // bounded other traits could have their bound. let normalized = cx.tcx.normalize_erasing_regions(cx.param_env, ty); if is_unfrozen(cx, normalized) // When there's no default value, lint it only according to its type; // in other words, lint consts whose value *could* be unfrozen, not definitely is. // This feels inconsistent with how the lint treats generic types, // which avoids linting types which potentially become unfrozen. // One could check whether an unfrozen type have a *frozen variant* // (like `body_id_opt.map_or_else(||!has_frozen_variant(...),...)`), // and do the same as the case of generic types at impl items. // Note that it isn't sufficient to check if it has an enum // since all of that enum's variants can be unfrozen: // i.e. having an enum doesn't necessary mean a type has a frozen variant. // And, implementing it isn't a trivial task; it'll probably end up // re-implementing the trait predicate evaluation specific to `Freeze`. && body_id_opt.map_or(true, |body_id| is_value_unfrozen_poly(cx, body_id, normalized)) { lint(cx, Source::Assoc { item: trait_item.span }); } } } fn check_impl_item(&mut self, cx: &LateContext<'tcx>, impl_item: &'tcx ImplItem<'_>) { if let ImplItemKind::Const(hir_ty, body_id) = &impl_item.kind { let item_hir_id = cx.tcx.hir().get_parent_node(impl_item.hir_id()); let item = cx.tcx.hir().expect_item(item_hir_id); match &item.kind { ItemKind::Impl(Impl { of_trait: Some(of_trait_ref), .. }) => { if_chain! { // Lint a trait impl item only when the definition is a generic type, // assuming an assoc const is not meant to be an interior mutable type. if let Some(of_trait_def_id) = of_trait_ref.trait_def_id(); if let Some(of_assoc_item) = specialization_graph::Node::Trait(of_trait_def_id) .item(cx.tcx, impl_item.ident, AssocKind::Const, of_trait_def_id); if cx .tcx .layout_of(cx.tcx.param_env(of_trait_def_id).and( // Normalize assoc types because ones originated from generic params // bounded other traits could have their bound at the trait defs; // and, in that case, the definition is *not* generic. cx.tcx.normalize_erasing_regions( cx.tcx.param_env(of_trait_def_id), cx.tcx.type_of(of_assoc_item.def_id), ), )) .is_err(); // If there were a function like `has_frozen_variant` described above, // we should use here as a frozen variant is a potential to be frozen // similar to unknown layouts. // e.g. `layout_of(...).is_err() || has_frozen_variant(...);` let ty = hir_ty_to_ty(cx.tcx, hir_ty); let normalized = cx.tcx.normalize_erasing_regions(cx.param_env, ty); if is_unfrozen(cx, normalized); if is_value_unfrozen_poly(cx, *body_id, normalized); then { lint( cx, Source::Assoc { item: impl_item.span, }, ); } } }, ItemKind::Impl(Impl { of_trait: None,.. }) => { let ty = hir_ty_to_ty(cx.tcx, hir_ty); // Normalize assoc types originated from generic params. let normalized = cx.tcx.normalize_erasing_regions(cx.param_env, ty); if is_unfrozen(cx, ty) && is_value_unfrozen_poly(cx, *body_id, normalized) { lint(cx, Source::Assoc { item: impl_item.span }); } }, _ => (), } } } fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>) { if let ExprKind::Path(qpath) = &expr.kind { // Only lint if we use the const item inside a function. if in_constant(cx, expr.hir_id) { return; } // Make sure it is a const item. let item_def_id = match cx.qpath_res(qpath, expr.hir_id) { Res::Def(DefKind::Const | DefKind::AssocConst, did) => did, _ => return, }; // Climb up to resolve any field access and explicit referencing. let mut cur_expr = expr; let mut dereferenced_expr = expr; let mut needs_check_adjustment = true; loop { let parent_id = cx.tcx.hir().get_parent_node(cur_expr.hir_id); if parent_id == cur_expr.hir_id { break; } if let Some(Node::Expr(parent_expr)) = cx.tcx.hir().find(parent_id) { match &parent_expr.kind { ExprKind::AddrOf(..) => { // `&e` => `e` must be referenced. needs_check_adjustment = false; }, ExprKind::Field(..) => { needs_check_adjustment = true; // Check whether implicit dereferences happened; // if so, no need to go further up // because of the same reason as the `ExprKind::Unary` case. if cx .typeck_results() .expr_adjustments(dereferenced_expr) .iter() .any(|adj| matches!(adj.kind, Adjust::Deref(_))) { break; } dereferenced_expr = parent_expr; }, ExprKind::Index(e, _) if ptr::eq(&**e, cur_expr) => { // `e[i]` => desugared to `*Index::index(&e, i)`, // meaning `e` must be referenced. // no need to go further up since a method call is involved now. needs_check_adjustment = false; break; }, ExprKind::Unary(UnOp::Deref, _) => { // `*e` => desugared to `*Deref::deref(&e)`,
is_unfrozen
identifier_name
non_copy_const.rs
/// ### Why is this bad?
/// /// The `const` should better be replaced by a `static` item if a global /// variable is wanted, or replaced by a `const fn` if a constructor is wanted. /// /// ### Known problems /// A "non-constant" const item is a legacy way to supply an /// initialized value to downstream `static` items (e.g., the /// `std::sync::ONCE_INIT` constant). In this case the use of `const` is legit, /// and this lint should be suppressed. /// /// Even though the lint avoids triggering on a constant whose type has enums that have variants /// with interior mutability, and its value uses non interior mutable variants (see /// [#3962](https://github.com/rust-lang/rust-clippy/issues/3962) and /// [#3825](https://github.com/rust-lang/rust-clippy/issues/3825) for examples); /// it complains about associated constants without default values only based on its types; /// which might not be preferable. /// There're other enums plus associated constants cases that the lint cannot handle. /// /// Types that have underlying or potential interior mutability trigger the lint whether /// the interior mutable field is used or not. See issues /// [#5812](https://github.com/rust-lang/rust-clippy/issues/5812) and /// /// ### Example /// ```rust /// use std::sync::atomic::{AtomicUsize, Ordering::SeqCst}; /// /// // Bad. /// const CONST_ATOM: AtomicUsize = AtomicUsize::new(12); /// CONST_ATOM.store(6, SeqCst); // the content of the atomic is unchanged /// assert_eq!(CONST_ATOM.load(SeqCst), 12); // because the CONST_ATOM in these lines are distinct /// /// // Good. /// static STATIC_ATOM: AtomicUsize = AtomicUsize::new(15); /// STATIC_ATOM.store(9, SeqCst); /// assert_eq!(STATIC_ATOM.load(SeqCst), 9); // use a `static` item to refer to the same instance /// ``` #[clippy::version = "pre 1.29.0"] pub DECLARE_INTERIOR_MUTABLE_CONST, style, "declaring `const` with interior mutability" } // FIXME: this is a correctness problem but there's no suitable // warn-by-default category. declare_clippy_lint! { /// ### What it does /// Checks if `const` items which is interior mutable (e.g., /// contains a `Cell`, `Mutex`, `AtomicXxxx`, etc.) has been borrowed directly. /// /// ### Why is this bad? /// Consts are copied everywhere they are referenced, i.e., /// every time you refer to the const a fresh instance of the `Cell` or `Mutex` /// or `AtomicXxxx` will be created, which defeats the whole purpose of using /// these types in the first place. /// /// The `const` value should be stored inside a `static` item. /// /// ### Known problems /// When an enum has variants with interior mutability, use of its non /// interior mutable variants can generate false positives. See issue /// [#3962](https://github.com/rust-lang/rust-clippy/issues/3962) /// /// Types that have underlying or potential interior mutability trigger the lint whether /// the interior mutable field is used or not. See issues /// [#5812](https://github.com/rust-lang/rust-clippy/issues/5812) and /// [#3825](https://github.com/rust-lang/rust-clippy/issues/3825) /// /// ### Example /// ```rust /// use std::sync::atomic::{AtomicUsize, Ordering::SeqCst}; /// const CONST_ATOM: AtomicUsize = AtomicUsize::new(12); /// /// // Bad. /// CONST_ATOM.store(6, SeqCst); // the content of the atomic is unchanged /// assert_eq!(CONST_ATOM.load(SeqCst), 12); // because the CONST_ATOM in these lines are distinct /// /// // Good. /// static STATIC_ATOM: AtomicUsize = CONST_ATOM; /// STATIC_ATOM.store(9, SeqCst); /// assert_eq!(STATIC_ATOM.load(SeqCst), 9); // use a `static` item to refer to the same instance /// ``` #[clippy::version = "pre 1.29.0"] pub BORROW_INTERIOR_MUTABLE_CONST, style, "referencing `const` with interior mutability" } fn is_unfrozen<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>) -> bool { // Ignore types whose layout is unknown since `is_freeze` reports every generic types as `!Freeze`, // making it indistinguishable from `UnsafeCell`. i.e. it isn't a tool to prove a type is // 'unfrozen'. However, this code causes a false negative in which // a type contains a layout-unknown type, but also an unsafe cell like `const CELL: Cell<T>`. // Yet, it's better than `ty.has_type_flags(TypeFlags::HAS_TY_PARAM | TypeFlags::HAS_PROJECTION)` // since it works when a pointer indirection involves (`Cell<*const T>`). // Making up a `ParamEnv` where every generic params and assoc types are `Freeze`is another option; // but I'm not sure whether it's a decent way, if possible. cx.tcx.layout_of(cx.param_env.and(ty)).is_ok() &&!ty.is_freeze(cx.tcx.at(DUMMY_SP), cx.param_env) } fn is_value_unfrozen_raw<'tcx>( cx: &LateContext<'tcx>, result: Result<ConstValue<'tcx>, ErrorHandled>, ty: Ty<'tcx>, ) -> bool { fn inner<'tcx>(cx: &LateContext<'tcx>, val: &'tcx Const<'tcx>) -> bool { match val.ty.kind() { // the fact that we have to dig into every structs to search enums // leads us to the point checking `UnsafeCell` directly is the only option. ty::Adt(ty_def,..) if Some(ty_def.did) == cx.tcx.lang_items().unsafe_cell_type() => true, ty::Array(..) | ty::Adt(..) | ty::Tuple(..) => { let val = cx.tcx.destructure_const(cx.param_env.and(val)); val.fields.iter().any(|field| inner(cx, field)) }, _ => false, } } result.map_or_else( |err| { // Consider `TooGeneric` cases as being unfrozen. // This causes a false positive where an assoc const whose type is unfrozen // have a value that is a frozen variant with a generic param (an example is // `declare_interior_mutable_const::enums::BothOfCellAndGeneric::GENERIC_VARIANT`). // However, it prevents a number of false negatives that is, I think, important: // 1. assoc consts in trait defs referring to consts of themselves // (an example is `declare_interior_mutable_const::traits::ConcreteTypes::ANOTHER_ATOMIC`). // 2. a path expr referring to assoc consts whose type is doesn't have // any frozen variants in trait defs (i.e. without substitute for `Self`). // (e.g. borrowing `borrow_interior_mutable_const::trait::ConcreteTypes::ATOMIC`) // 3. similar to the false positive above; // but the value is an unfrozen variant, or the type has no enums. (An example is // `declare_interior_mutable_const::enums::BothOfCellAndGeneric::UNFROZEN_VARIANT` // and `declare_interior_mutable_const::enums::BothOfCellAndGeneric::NO_ENUM`). // One might be able to prevent these FNs correctly, and replace this with `false`; // e.g. implementing `has_frozen_variant` described above, and not running this function // when the type doesn't have any frozen variants would be the 'correct' way for the 2nd // case (that actually removes another suboptimal behavior (I won't say 'false positive') where, // similar to 2., but with the a frozen variant) (e.g. borrowing // `borrow_interior_mutable_const::enums::AssocConsts::TO_BE_FROZEN_VARIANT`). // I chose this way because unfrozen enums as assoc consts are rare (or, hopefully, none). err == ErrorHandled::TooGeneric }, |val| inner(cx, Const::from_value(cx.tcx, val, ty)), ) } fn is_value_unfrozen_poly<'tcx>(cx: &LateContext<'tcx>, body_id: BodyId, ty: Ty<'tcx>) -> bool { let result = cx.tcx.const_eval_poly(body_id.hir_id.owner.to_def_id()); is_value_unfrozen_raw(cx, result, ty) } fn is_value_unfrozen_expr<'tcx>(cx: &LateContext<'tcx>, hir_id: HirId, def_id: DefId, ty: Ty<'tcx>) -> bool { let substs = cx.typeck_results().node_substs(hir_id); let result = cx.tcx.const_eval_resolve( cx.param_env, ty::Unevaluated::new(ty::WithOptConstParam::unknown(def_id), substs), None, ); is_value_unfrozen_raw(cx, result, ty) } #[derive(Copy, Clone)] enum Source { Item { item: Span }, Assoc { item: Span }, Expr { expr: Span }, } impl Source { #[must_use] fn lint(&self) -> (&'static Lint, &'static str, Span) { match self { Self::Item { item } | Self::Assoc { item,.. } => ( DECLARE_INTERIOR_MUTABLE_CONST, "a `const` item should never be interior mutable", *item, ), Self::Expr { expr } => ( BORROW_INTERIOR_MUTABLE_CONST, "a `const` item with interior mutability should not be borrowed", *expr, ), } } } fn lint(cx: &LateContext<'_>, source: Source) { let (lint, msg, span) = source.lint(); span_lint_and_then(cx, lint, span, msg, |diag| { if span.from_expansion() { return; // Don't give suggestions into macros. } match source { Source::Item {.. } => { let const_kw_span = span.from_inner(InnerSpan::new(0, 5)); diag.span_label(const_kw_span, "make this a static item (maybe with lazy_static)"); }, Source::Assoc {.. } => (), Source::Expr {.. } => { diag.help("assign this const to a local or static variable, and use the variable here"); }, } }); } declare_lint_pass!(NonCopyConst => [DECLARE_INTERIOR_MUTABLE_CONST, BORROW_INTERIOR_MUTABLE_CONST]); impl<'tcx> LateLintPass<'tcx> for NonCopyConst { fn check_item(&mut self, cx: &LateContext<'tcx>, it: &'tcx Item<'_>) { if let ItemKind::Const(hir_ty, body_id) = it.kind { let ty = hir_ty_to_ty(cx.tcx, hir_ty); if is_unfrozen(cx, ty) && is_value_unfrozen_poly(cx, body_id, ty) { lint(cx, Source::Item { item: it.span }); } } } fn check_trait_item(&mut self, cx: &LateContext<'tcx>, trait_item: &'tcx TraitItem<'_>) { if let TraitItemKind::Const(hir_ty, body_id_opt) = &trait_item.kind { let ty = hir_ty_to_ty(cx.tcx, hir_ty); // Normalize assoc types because ones originated from generic params // bounded other traits could have their bound. let normalized = cx.tcx.normalize_erasing_regions(cx.param_env, ty); if is_unfrozen(cx, normalized) // When there's no default value, lint it only according to its type; // in other words, lint consts whose value *could* be unfrozen, not definitely is. // This feels inconsistent with how the lint treats generic types, // which avoids linting types which potentially become unfrozen. // One could check whether an unfrozen type have a *frozen variant* // (like `body_id_opt.map_or_else(||!has_frozen_variant(...),...)`), // and do the same as the case of generic types at impl items. // Note that it isn't sufficient to check if it has an enum // since all of that enum's variants can be unfrozen: // i.e. having an enum doesn't necessary mean a type has a frozen variant. // And, implementing it isn't a trivial task; it'll probably end up // re-implementing the trait predicate evaluation specific to `Freeze`. && body_id_opt.map_or(true, |body_id| is_value_unfrozen_poly(cx, body_id, normalized)) { lint(cx, Source::Assoc { item: trait_item.span }); } } } fn check_impl_item(&mut self, cx: &LateContext<'tcx>, impl_item: &'tcx ImplItem<'_>) { if let ImplItemKind::Const(hir_ty, body_id) = &impl_item.kind { let item_hir_id = cx.tcx.hir().get_parent_node(impl_item.hir_id()); let item = cx.tcx.hir().expect_item(item_hir_id); match &item.kind { ItemKind::Impl(Impl { of_trait: Some(of_trait_ref), .. }) => { if_chain! { // Lint a trait impl item only when the definition is a generic type, // assuming an assoc const is not meant to be an interior mutable type. if let Some(of_trait_def_id) = of_trait_ref.trait_def_id(); if let Some(of_assoc_item) = specialization_graph::Node::Trait(of_trait_def_id) .item(cx.tcx, impl_item.ident, AssocKind::Const, of_trait_def_id); if cx .tcx .layout_of(cx.tcx.param_env(of_trait_def_id).and( // Normalize assoc types because ones originated from generic params // bounded other traits could have their bound at the trait defs; // and, in that case, the definition is *not* generic. cx.tcx.normalize_erasing_regions( cx.tcx.param_env(of_trait_def_id), cx.tcx.type_of(of_assoc_item.def_id), ), )) .is_err(); // If there were a function like `has_frozen_variant` described above, // we should use here as a frozen variant is a potential to be frozen // similar to unknown layouts. // e.g. `layout_of(...).is_err() || has_frozen_variant(...);` let ty = hir_ty_to_ty(cx.tcx, hir_ty); let normalized = cx.tcx.normalize_erasing_regions(cx.param_env, ty); if is_unfrozen(cx, normalized); if is_value_unfrozen_poly(cx, *body_id, normalized); then { lint( cx, Source::Assoc { item: impl_item.span, }, ); } } }, ItemKind::Impl(Impl { of_trait: None,.. }) => { let ty = hir_ty_to_ty(cx.tcx, hir_ty); // Normalize assoc types originated from generic params. let normalized = cx.tcx.normalize_erasing_regions(cx.param_env, ty); if is_unfrozen(cx, ty) && is_value_unfrozen_poly(cx, *body_id, normalized) { lint(cx, Source::Assoc { item: impl_item.span }); } }, _ => (), } } } fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>) { if let ExprKind::Path(qpath) = &expr.kind { // Only lint if we use the const item inside a function. if in_constant(cx, expr.hir_id) { return; } // Make sure it is a const item. let item_def_id = match cx.qpath_res(qpath, expr.hir_id) { Res::Def(DefKind::Const | DefKind::AssocConst, did) => did, _ => return, }; // Climb up to resolve any field access and explicit referencing. let mut cur_expr = expr; let mut dereferenced_expr = expr; let mut needs_check_adjustment = true; loop { let parent_id = cx.tcx.hir().get_parent_node(cur_expr.hir_id); if parent_id == cur_expr.hir_id { break; } if let Some(Node::Expr(parent_expr)) = cx.tcx.hir().find(parent_id) { match &parent_expr.kind { ExprKind::AddrOf(..) => { // `&e` => `e` must be referenced. needs_check_adjustment = false; }, ExprKind::Field(..) => { needs_check_adjustment = true; // Check whether implicit dereferences happened; // if so, no need to go further up // because of the same reason as the `ExprKind::Unary` case. if cx .typeck_results() .expr_adjustments(dereferenced_expr) .iter() .any(|adj| matches!(adj.kind, Adjust::Deref(_))) { break; } dereferenced_expr = parent_expr; }, ExprKind::Index(e, _) if ptr::eq(&**e, cur_expr) => { // `e[i]` => desugared to `*Index::index(&e, i)`, // meaning `e` must be referenced. // no need to go further up since a method call is involved now. needs_check_adjustment = false; break; }, ExprKind::Unary(UnOp::Deref, _) => { // `*e` => desugared to `*Deref::deref(&e)`, // meaning
/// Consts are copied everywhere they are referenced, i.e., /// every time you refer to the const a fresh instance of the `Cell` or `Mutex` /// or `AtomicXxxx` will be created, which defeats the whole purpose of using /// these types in the first place.
random_line_split
lib.rs
/* 8888888b. 8888888b. d8888 88888888888 d8888 888888b. 888 8888888888.d8888b. 888 Y88b 888 "Y88b d88888 888 d88888 888 "88b 888 888 d88P Y88b 888 888 888 888 d88P888 888 d88P888 888 .88P 888 888 Y88b. 888 d88P 888 888 d88P 888 888 d88P 888 8888888K. 888 8888888 "Y888b. 8888888P" 888 888 d88P 888 888 d88P 888 888 "Y88b 888 888 "Y88b. 888 T88b 888888 888 888 d88P 888 888 d88P 888 888 888 888 888 "888 888 T88b 888 .d88P d8888888888 888 d8888888888 888 d88P 888 888 Y88b d88P 888 T88b 8888888P" d88P 888 888 d88P 888 8888888P" 88888888 8888888888 "Y8888P" */ /* Form diesel and serve imports */ use diesel::*; use rocket::request::{FormItems, FromForm}; use serde::Serialize; use diesel::sql_types::BigInt; /* This one stands for the r-datatables counting struct */ #[derive(QueryableByName, Serialize)] pub struct Count { #[sql_type = "BigInt"] pub count: i64, } /* "Tables" explanation: =================== -> Data Structure comes like: (JoinType, (dest_table_name, dest_table_key), (origin_table_name, origin_table_key)) -> Implemented Struct will return something like: " `JoinType` JOIN `dest_table_name` ON `origin_table_name`.`origin_table_key` = `table2`.`common_field` *( n-th) " */ #[derive(Debug, Clone)] pub struct Tables<'a> { pub origin: (&'a str, &'a str), /* From */ pub fields: Vec<&'a str>, /* Fields to seek for */ pub join_targets: Option<Vec<(&'a str, (&'a str, &'a str), (&'a str, &'a str))>>, /* Join Targets explained over here */ pub datatables_post_query: DataTableQuery, /* Incoming Query */ pub query: Option<String>, /* Our builded query holder */ pub condition: Option<Vec<(&'a str, &'a str, &'a str)>>, /* (And/Or, Field_Name, Value) */ pub distinct: Option<bool>, } impl<'a> Tables<'a> { pub fn generate(&mut self) -> String { match self.datatables_post_query.order[0].0 { Some(column_index_to_order) => format!( "{} ORDER BY {} {}", self.select().join().where_like().condition().query.to_owned().unwrap(), self.fields[column_index_to_order as usize], &self.datatables_post_query.order[0] .1 .as_ref() .unwrap() .to_uppercase() ), None => self.select().join().where_like().condition().query.to_owned().unwrap(), } } /* Returns fields for the query */ pub fn select(&mut self) -> Self { let stmt = &self .fields .iter() .map(|field| format!("{}, ", field)) .collect::<String>(); self.query = Some( format!("SELECT {} {} FROM {}", match self.distinct { Some(_) => {"DISTINCT"} None => {""} }, stmt[..(stmt.len() - 2)].to_owned(), self.origin.0 ) .to_owned(), ); self.to_owned() } pub fn where_like(&mut self) -> Self { /* #Where like: ## This function receives self (as all of the SQL generators) and reparses the content of "where" from the incoming Datatable query to do a seeking for desired information over all table fields returns... gues what? self! */ let stmt = self .fields .iter() .map(|field| { format!( " CAST({} as TEXT) LIKE '%{}%' OR", field, self.datatables_post_query.search[0].0.as_ref().unwrap() ) }) .collect::<String>(); self.query = Some( format!( "{} WHERE ({})", self.query.to_owned().unwrap(), stmt[..(stmt.len() - 2)].to_owned() ) .to_owned(), ); self.to_owned() } pub fn join(&mut self) -> Self { /* # How this works? ## We will match the existing needing of appending the "join statement" or not As well we do on other self sql generators functions, we'll opt to not do an if stmt for seeking the "last" target and doing a exactly cut for the string to append. Returns self. */ match self.join_targets { Some(_) => { let stmt = self .join_targets .as_ref() .unwrap() .iter() .map(|(join_type, (target, target_key), (origin, origin_key))| { format!( "{} JOIN {} ON {}.{} = {}.{} ", join_type.to_uppercase(), target, origin, origin_key, target, target_key, ) }) .collect::<String>(); self.query = Some( format!("{} {}", self.query.to_owned().unwrap(), stmt.to_owned()).to_owned(), ); self.to_owned() } None => self.to_owned(), } } pub fn condition(&mut self) -> Self { match self.condition { Some(_) => { let stmt = self.condition.as_ref().unwrap().iter().map(|(sub_cond, target, value)| { format!("{} {} = '{}'", sub_cond.to_uppercase(), target, &value.to_string()) }).collect::<String>(); self.query = Some( format!("{} {}", self.query.to_owned().unwrap(), stmt.to_owned()).to_owned(), ); self.to_owned() } None => { self.to_owned()
} } } } #[allow(non_snake_case)] #[derive(Debug, Clone)] pub struct DataTableQuery { pub draw: i32, /* Stands for the n-th time that we're drawing */ pub columns: Vec<( Option<i32>, Option<String>, Option<bool>, Option<bool>, Option<String>, Option<bool>, )>, pub order: Vec<(Option<i32>, Option<String>)>, pub start: i32, /* How much to skip */ pub length: i32, /* How much to retrieve */ pub search: Vec<(Option<String>, bool)>, pub info: Option<i32>, } impl<'f> FromForm<'f> for DataTableQuery { // In practice, we'd use a more descriptive error type. type Error = (); fn from_form(items: &mut FormItems<'f>, strict: bool) -> Result<DataTableQuery, ()> { let mut draw: Option<i32> = None; let mut start: Option<i32> = None; let mut length: Option<i32> = None; let tmp_columns: Vec<( Option<i32>, Option<String>, Option<bool>, Option<bool>, Option<String>, Option<bool>, )> = vec![(None, None, None, None, None, None)]; let mut order_tuple: (Option<i32>, Option<String>) = (None, None); let mut search_value: Option<String> = None; let mut time_stamp: Option<i32> = None; for item in items { match item.key.as_str() { "draw" if draw.is_none() => { let decoded = item.value.url_decode().map_err(|_| ())?; draw = Some(match decoded.parse::<i32>() { Ok(item_val) => item_val, Err(_err_msg) => 0, }); } "start" if start.is_none() => { let decoded = item.value.url_decode().map_err(|_| ())?; start = Some(match decoded.parse::<i32>() { Ok(item_val) => item_val, Err(_err_msg) => 0, }); } "length" if length.is_none() => { let decoded = item.value.url_decode().map_err(|_| ())?; length = Some(match decoded.parse::<i32>() { Ok(item_val) => item_val, Err(_err_msg) => 0, }); } "search%5Bvalue%5D" if search_value.is_none() => { let decoded = Some(item.value.url_decode().map_err(|_| ())?); search_value = decoded; } key if key.contains("order%5B0%5D") => { if key.contains("order%5B0%5D%5Bcolumn%5D") { order_tuple.0 = Some( item.value .url_decode() .map_err(|_| ())? .parse::<i32>() .unwrap(), ); } else { order_tuple.1 = Some(item.value.url_decode().map_err(|_| ())?); } } "_" => { time_stamp = Some( item.value .url_decode() .map_err(|_| ())? .parse::<i32>() .unwrap(), ); } _ if strict => return Err(()), _ => {} } } Ok(DataTableQuery { draw: match draw { Some(value) => value, None => 0, }, columns: tmp_columns[1..].to_owned(), order: vec![order_tuple], start: match start { Some(value) => value, None => 0, }, length: match length { Some(value) => value, None => 0, }, search: vec![(search_value, false)], info: time_stamp.to_owned(), }) } } #[allow(non_snake_case)] #[derive(Debug, Serialize)] pub struct OutcomeData<T> { pub draw: i32, pub recordsTotal: i64, pub recordsFiltered: i32, pub data: Vec<T>, } pub fn datatables_query< T: diesel::deserialize::QueryableByName<diesel::pg::Pg> + std::fmt::Debug + std::clone::Clone, >( table: Tables, conn: PgConnection, ) -> OutcomeData<T> { println!("{}", table.clone().generate()); let (data_results, total_data): (Vec<T>, Count) = ( sql_query(table.clone().generate()) .load(&conn) .expect("Failed to retrieve information"), sql_query(format!("SELECT COUNT(*) FROM {}", table.origin.0)) .load::<Count>(&conn) .expect("Query failed") .pop() .expect("No rows"), ); let tmp_results = data_results[(table.datatables_post_query.start as usize)..].to_vec(); OutcomeData::<T> { draw: table.datatables_post_query.draw, /* N-th draw */ recordsTotal: total_data.count, /* How much we have on this table */ recordsFiltered: data_results.len() as i32, /* How much query has returned */ data: if tmp_results.len() >= (table.datatables_post_query.length as usize) { tmp_results[..(table.datatables_post_query.length as usize)].to_vec() } else { tmp_results.to_vec() }, } }
random_line_split
lib.rs
/* 8888888b. 8888888b. d8888 88888888888 d8888 888888b. 888 8888888888.d8888b. 888 Y88b 888 "Y88b d88888 888 d88888 888 "88b 888 888 d88P Y88b 888 888 888 888 d88P888 888 d88P888 888 .88P 888 888 Y88b. 888 d88P 888 888 d88P 888 888 d88P 888 8888888K. 888 8888888 "Y888b. 8888888P" 888 888 d88P 888 888 d88P 888 888 "Y88b 888 888 "Y88b. 888 T88b 888888 888 888 d88P 888 888 d88P 888 888 888 888 888 "888 888 T88b 888 .d88P d8888888888 888 d8888888888 888 d88P 888 888 Y88b d88P 888 T88b 8888888P" d88P 888 888 d88P 888 8888888P" 88888888 8888888888 "Y8888P" */ /* Form diesel and serve imports */ use diesel::*; use rocket::request::{FormItems, FromForm}; use serde::Serialize; use diesel::sql_types::BigInt; /* This one stands for the r-datatables counting struct */ #[derive(QueryableByName, Serialize)] pub struct Count { #[sql_type = "BigInt"] pub count: i64, } /* "Tables" explanation: =================== -> Data Structure comes like: (JoinType, (dest_table_name, dest_table_key), (origin_table_name, origin_table_key)) -> Implemented Struct will return something like: " `JoinType` JOIN `dest_table_name` ON `origin_table_name`.`origin_table_key` = `table2`.`common_field` *( n-th) " */ #[derive(Debug, Clone)] pub struct Tables<'a> { pub origin: (&'a str, &'a str), /* From */ pub fields: Vec<&'a str>, /* Fields to seek for */ pub join_targets: Option<Vec<(&'a str, (&'a str, &'a str), (&'a str, &'a str))>>, /* Join Targets explained over here */ pub datatables_post_query: DataTableQuery, /* Incoming Query */ pub query: Option<String>, /* Our builded query holder */ pub condition: Option<Vec<(&'a str, &'a str, &'a str)>>, /* (And/Or, Field_Name, Value) */ pub distinct: Option<bool>, } impl<'a> Tables<'a> { pub fn generate(&mut self) -> String { match self.datatables_post_query.order[0].0 { Some(column_index_to_order) => format!( "{} ORDER BY {} {}", self.select().join().where_like().condition().query.to_owned().unwrap(), self.fields[column_index_to_order as usize], &self.datatables_post_query.order[0] .1 .as_ref() .unwrap() .to_uppercase() ), None => self.select().join().where_like().condition().query.to_owned().unwrap(), } } /* Returns fields for the query */ pub fn select(&mut self) -> Self { let stmt = &self .fields .iter() .map(|field| format!("{}, ", field)) .collect::<String>(); self.query = Some( format!("SELECT {} {} FROM {}", match self.distinct { Some(_) => {"DISTINCT"} None => {""} }, stmt[..(stmt.len() - 2)].to_owned(), self.origin.0 ) .to_owned(), ); self.to_owned() } pub fn where_like(&mut self) -> Self { /* #Where like: ## This function receives self (as all of the SQL generators) and reparses the content of "where" from the incoming Datatable query to do a seeking for desired information over all table fields returns... gues what? self! */ let stmt = self .fields .iter() .map(|field| { format!( " CAST({} as TEXT) LIKE '%{}%' OR", field, self.datatables_post_query.search[0].0.as_ref().unwrap() ) }) .collect::<String>(); self.query = Some( format!( "{} WHERE ({})", self.query.to_owned().unwrap(), stmt[..(stmt.len() - 2)].to_owned() ) .to_owned(), ); self.to_owned() } pub fn join(&mut self) -> Self { /* # How this works? ## We will match the existing needing of appending the "join statement" or not As well we do on other self sql generators functions, we'll opt to not do an if stmt for seeking the "last" target and doing a exactly cut for the string to append. Returns self. */ match self.join_targets { Some(_) => { let stmt = self .join_targets .as_ref() .unwrap() .iter() .map(|(join_type, (target, target_key), (origin, origin_key))| { format!( "{} JOIN {} ON {}.{} = {}.{} ", join_type.to_uppercase(), target, origin, origin_key, target, target_key, ) }) .collect::<String>(); self.query = Some( format!("{} {}", self.query.to_owned().unwrap(), stmt.to_owned()).to_owned(), ); self.to_owned() } None => self.to_owned(), } } pub fn condition(&mut self) -> Self { match self.condition { Some(_) => { let stmt = self.condition.as_ref().unwrap().iter().map(|(sub_cond, target, value)| { format!("{} {} = '{}'", sub_cond.to_uppercase(), target, &value.to_string()) }).collect::<String>(); self.query = Some( format!("{} {}", self.query.to_owned().unwrap(), stmt.to_owned()).to_owned(), ); self.to_owned() } None => { self.to_owned() } } } } #[allow(non_snake_case)] #[derive(Debug, Clone)] pub struct DataTableQuery { pub draw: i32, /* Stands for the n-th time that we're drawing */ pub columns: Vec<( Option<i32>, Option<String>, Option<bool>, Option<bool>, Option<String>, Option<bool>, )>, pub order: Vec<(Option<i32>, Option<String>)>, pub start: i32, /* How much to skip */ pub length: i32, /* How much to retrieve */ pub search: Vec<(Option<String>, bool)>, pub info: Option<i32>, } impl<'f> FromForm<'f> for DataTableQuery { // In practice, we'd use a more descriptive error type. type Error = (); fn from_form(items: &mut FormItems<'f>, strict: bool) -> Result<DataTableQuery, ()> { let mut draw: Option<i32> = None; let mut start: Option<i32> = None; let mut length: Option<i32> = None; let tmp_columns: Vec<( Option<i32>, Option<String>, Option<bool>, Option<bool>, Option<String>, Option<bool>, )> = vec![(None, None, None, None, None, None)]; let mut order_tuple: (Option<i32>, Option<String>) = (None, None); let mut search_value: Option<String> = None; let mut time_stamp: Option<i32> = None; for item in items { match item.key.as_str() { "draw" if draw.is_none() =>
"start" if start.is_none() => { let decoded = item.value.url_decode().map_err(|_| ())?; start = Some(match decoded.parse::<i32>() { Ok(item_val) => item_val, Err(_err_msg) => 0, }); } "length" if length.is_none() => { let decoded = item.value.url_decode().map_err(|_| ())?; length = Some(match decoded.parse::<i32>() { Ok(item_val) => item_val, Err(_err_msg) => 0, }); } "search%5Bvalue%5D" if search_value.is_none() => { let decoded = Some(item.value.url_decode().map_err(|_| ())?); search_value = decoded; } key if key.contains("order%5B0%5D") => { if key.contains("order%5B0%5D%5Bcolumn%5D") { order_tuple.0 = Some( item.value .url_decode() .map_err(|_| ())? .parse::<i32>() .unwrap(), ); } else { order_tuple.1 = Some(item.value.url_decode().map_err(|_| ())?); } } "_" => { time_stamp = Some( item.value .url_decode() .map_err(|_| ())? .parse::<i32>() .unwrap(), ); } _ if strict => return Err(()), _ => {} } } Ok(DataTableQuery { draw: match draw { Some(value) => value, None => 0, }, columns: tmp_columns[1..].to_owned(), order: vec![order_tuple], start: match start { Some(value) => value, None => 0, }, length: match length { Some(value) => value, None => 0, }, search: vec![(search_value, false)], info: time_stamp.to_owned(), }) } } #[allow(non_snake_case)] #[derive(Debug, Serialize)] pub struct OutcomeData<T> { pub draw: i32, pub recordsTotal: i64, pub recordsFiltered: i32, pub data: Vec<T>, } pub fn datatables_query< T: diesel::deserialize::QueryableByName<diesel::pg::Pg> + std::fmt::Debug + std::clone::Clone, >( table: Tables, conn: PgConnection, ) -> OutcomeData<T> { println!("{}", table.clone().generate()); let (data_results, total_data): (Vec<T>, Count) = ( sql_query(table.clone().generate()) .load(&conn) .expect("Failed to retrieve information"), sql_query(format!("SELECT COUNT(*) FROM {}", table.origin.0)) .load::<Count>(&conn) .expect("Query failed") .pop() .expect("No rows"), ); let tmp_results = data_results[(table.datatables_post_query.start as usize)..].to_vec(); OutcomeData::<T> { draw: table.datatables_post_query.draw, /* N-th draw */ recordsTotal: total_data.count, /* How much we have on this table */ recordsFiltered: data_results.len() as i32, /* How much query has returned */ data: if tmp_results.len() >= (table.datatables_post_query.length as usize) { tmp_results[..(table.datatables_post_query.length as usize)].to_vec() } else { tmp_results.to_vec() }, } }
{ let decoded = item.value.url_decode().map_err(|_| ())?; draw = Some(match decoded.parse::<i32>() { Ok(item_val) => item_val, Err(_err_msg) => 0, }); }
conditional_block
lib.rs
/* 8888888b. 8888888b. d8888 88888888888 d8888 888888b. 888 8888888888.d8888b. 888 Y88b 888 "Y88b d88888 888 d88888 888 "88b 888 888 d88P Y88b 888 888 888 888 d88P888 888 d88P888 888 .88P 888 888 Y88b. 888 d88P 888 888 d88P 888 888 d88P 888 8888888K. 888 8888888 "Y888b. 8888888P" 888 888 d88P 888 888 d88P 888 888 "Y88b 888 888 "Y88b. 888 T88b 888888 888 888 d88P 888 888 d88P 888 888 888 888 888 "888 888 T88b 888 .d88P d8888888888 888 d8888888888 888 d88P 888 888 Y88b d88P 888 T88b 8888888P" d88P 888 888 d88P 888 8888888P" 88888888 8888888888 "Y8888P" */ /* Form diesel and serve imports */ use diesel::*; use rocket::request::{FormItems, FromForm}; use serde::Serialize; use diesel::sql_types::BigInt; /* This one stands for the r-datatables counting struct */ #[derive(QueryableByName, Serialize)] pub struct Count { #[sql_type = "BigInt"] pub count: i64, } /* "Tables" explanation: =================== -> Data Structure comes like: (JoinType, (dest_table_name, dest_table_key), (origin_table_name, origin_table_key)) -> Implemented Struct will return something like: " `JoinType` JOIN `dest_table_name` ON `origin_table_name`.`origin_table_key` = `table2`.`common_field` *( n-th) " */ #[derive(Debug, Clone)] pub struct Tables<'a> { pub origin: (&'a str, &'a str), /* From */ pub fields: Vec<&'a str>, /* Fields to seek for */ pub join_targets: Option<Vec<(&'a str, (&'a str, &'a str), (&'a str, &'a str))>>, /* Join Targets explained over here */ pub datatables_post_query: DataTableQuery, /* Incoming Query */ pub query: Option<String>, /* Our builded query holder */ pub condition: Option<Vec<(&'a str, &'a str, &'a str)>>, /* (And/Or, Field_Name, Value) */ pub distinct: Option<bool>, } impl<'a> Tables<'a> { pub fn generate(&mut self) -> String { match self.datatables_post_query.order[0].0 { Some(column_index_to_order) => format!( "{} ORDER BY {} {}", self.select().join().where_like().condition().query.to_owned().unwrap(), self.fields[column_index_to_order as usize], &self.datatables_post_query.order[0] .1 .as_ref() .unwrap() .to_uppercase() ), None => self.select().join().where_like().condition().query.to_owned().unwrap(), } } /* Returns fields for the query */ pub fn select(&mut self) -> Self { let stmt = &self .fields .iter() .map(|field| format!("{}, ", field)) .collect::<String>(); self.query = Some( format!("SELECT {} {} FROM {}", match self.distinct { Some(_) => {"DISTINCT"} None => {""} }, stmt[..(stmt.len() - 2)].to_owned(), self.origin.0 ) .to_owned(), ); self.to_owned() } pub fn where_like(&mut self) -> Self { /* #Where like: ## This function receives self (as all of the SQL generators) and reparses the content of "where" from the incoming Datatable query to do a seeking for desired information over all table fields returns... gues what? self! */ let stmt = self .fields .iter() .map(|field| { format!( " CAST({} as TEXT) LIKE '%{}%' OR", field, self.datatables_post_query.search[0].0.as_ref().unwrap() ) }) .collect::<String>(); self.query = Some( format!( "{} WHERE ({})", self.query.to_owned().unwrap(), stmt[..(stmt.len() - 2)].to_owned() ) .to_owned(), ); self.to_owned() } pub fn join(&mut self) -> Self
) }) .collect::<String>(); self.query = Some( format!("{} {}", self.query.to_owned().unwrap(), stmt.to_owned()).to_owned(), ); self.to_owned() } None => self.to_owned(), } } pub fn condition(&mut self) -> Self { match self.condition { Some(_) => { let stmt = self.condition.as_ref().unwrap().iter().map(|(sub_cond, target, value)| { format!("{} {} = '{}'", sub_cond.to_uppercase(), target, &value.to_string()) }).collect::<String>(); self.query = Some( format!("{} {}", self.query.to_owned().unwrap(), stmt.to_owned()).to_owned(), ); self.to_owned() } None => { self.to_owned() } } } } #[allow(non_snake_case)] #[derive(Debug, Clone)] pub struct DataTableQuery { pub draw: i32, /* Stands for the n-th time that we're drawing */ pub columns: Vec<( Option<i32>, Option<String>, Option<bool>, Option<bool>, Option<String>, Option<bool>, )>, pub order: Vec<(Option<i32>, Option<String>)>, pub start: i32, /* How much to skip */ pub length: i32, /* How much to retrieve */ pub search: Vec<(Option<String>, bool)>, pub info: Option<i32>, } impl<'f> FromForm<'f> for DataTableQuery { // In practice, we'd use a more descriptive error type. type Error = (); fn from_form(items: &mut FormItems<'f>, strict: bool) -> Result<DataTableQuery, ()> { let mut draw: Option<i32> = None; let mut start: Option<i32> = None; let mut length: Option<i32> = None; let tmp_columns: Vec<( Option<i32>, Option<String>, Option<bool>, Option<bool>, Option<String>, Option<bool>, )> = vec![(None, None, None, None, None, None)]; let mut order_tuple: (Option<i32>, Option<String>) = (None, None); let mut search_value: Option<String> = None; let mut time_stamp: Option<i32> = None; for item in items { match item.key.as_str() { "draw" if draw.is_none() => { let decoded = item.value.url_decode().map_err(|_| ())?; draw = Some(match decoded.parse::<i32>() { Ok(item_val) => item_val, Err(_err_msg) => 0, }); } "start" if start.is_none() => { let decoded = item.value.url_decode().map_err(|_| ())?; start = Some(match decoded.parse::<i32>() { Ok(item_val) => item_val, Err(_err_msg) => 0, }); } "length" if length.is_none() => { let decoded = item.value.url_decode().map_err(|_| ())?; length = Some(match decoded.parse::<i32>() { Ok(item_val) => item_val, Err(_err_msg) => 0, }); } "search%5Bvalue%5D" if search_value.is_none() => { let decoded = Some(item.value.url_decode().map_err(|_| ())?); search_value = decoded; } key if key.contains("order%5B0%5D") => { if key.contains("order%5B0%5D%5Bcolumn%5D") { order_tuple.0 = Some( item.value .url_decode() .map_err(|_| ())? .parse::<i32>() .unwrap(), ); } else { order_tuple.1 = Some(item.value.url_decode().map_err(|_| ())?); } } "_" => { time_stamp = Some( item.value .url_decode() .map_err(|_| ())? .parse::<i32>() .unwrap(), ); } _ if strict => return Err(()), _ => {} } } Ok(DataTableQuery { draw: match draw { Some(value) => value, None => 0, }, columns: tmp_columns[1..].to_owned(), order: vec![order_tuple], start: match start { Some(value) => value, None => 0, }, length: match length { Some(value) => value, None => 0, }, search: vec![(search_value, false)], info: time_stamp.to_owned(), }) } } #[allow(non_snake_case)] #[derive(Debug, Serialize)] pub struct OutcomeData<T> { pub draw: i32, pub recordsTotal: i64, pub recordsFiltered: i32, pub data: Vec<T>, } pub fn datatables_query< T: diesel::deserialize::QueryableByName<diesel::pg::Pg> + std::fmt::Debug + std::clone::Clone, >( table: Tables, conn: PgConnection, ) -> OutcomeData<T> { println!("{}", table.clone().generate()); let (data_results, total_data): (Vec<T>, Count) = ( sql_query(table.clone().generate()) .load(&conn) .expect("Failed to retrieve information"), sql_query(format!("SELECT COUNT(*) FROM {}", table.origin.0)) .load::<Count>(&conn) .expect("Query failed") .pop() .expect("No rows"), ); let tmp_results = data_results[(table.datatables_post_query.start as usize)..].to_vec(); OutcomeData::<T> { draw: table.datatables_post_query.draw, /* N-th draw */ recordsTotal: total_data.count, /* How much we have on this table */ recordsFiltered: data_results.len() as i32, /* How much query has returned */ data: if tmp_results.len() >= (table.datatables_post_query.length as usize) { tmp_results[..(table.datatables_post_query.length as usize)].to_vec() } else { tmp_results.to_vec() }, } }
{ /* # How this works? ## We will match the existing needing of appending the "join statement" or not As well we do on other self sql generators functions, we'll opt to not do an if stmt for seeking the "last" target and doing a exactly cut for the string to append. Returns self. */ match self.join_targets { Some(_) => { let stmt = self .join_targets .as_ref() .unwrap() .iter() .map(|(join_type, (target, target_key), (origin, origin_key))| { format!( "{} JOIN {} ON {}.{} = {}.{} ", join_type.to_uppercase(), target, origin, origin_key, target, target_key,
identifier_body
lib.rs
/* 8888888b. 8888888b. d8888 88888888888 d8888 888888b. 888 8888888888.d8888b. 888 Y88b 888 "Y88b d88888 888 d88888 888 "88b 888 888 d88P Y88b 888 888 888 888 d88P888 888 d88P888 888 .88P 888 888 Y88b. 888 d88P 888 888 d88P 888 888 d88P 888 8888888K. 888 8888888 "Y888b. 8888888P" 888 888 d88P 888 888 d88P 888 888 "Y88b 888 888 "Y88b. 888 T88b 888888 888 888 d88P 888 888 d88P 888 888 888 888 888 "888 888 T88b 888 .d88P d8888888888 888 d8888888888 888 d88P 888 888 Y88b d88P 888 T88b 8888888P" d88P 888 888 d88P 888 8888888P" 88888888 8888888888 "Y8888P" */ /* Form diesel and serve imports */ use diesel::*; use rocket::request::{FormItems, FromForm}; use serde::Serialize; use diesel::sql_types::BigInt; /* This one stands for the r-datatables counting struct */ #[derive(QueryableByName, Serialize)] pub struct Count { #[sql_type = "BigInt"] pub count: i64, } /* "Tables" explanation: =================== -> Data Structure comes like: (JoinType, (dest_table_name, dest_table_key), (origin_table_name, origin_table_key)) -> Implemented Struct will return something like: " `JoinType` JOIN `dest_table_name` ON `origin_table_name`.`origin_table_key` = `table2`.`common_field` *( n-th) " */ #[derive(Debug, Clone)] pub struct
<'a> { pub origin: (&'a str, &'a str), /* From */ pub fields: Vec<&'a str>, /* Fields to seek for */ pub join_targets: Option<Vec<(&'a str, (&'a str, &'a str), (&'a str, &'a str))>>, /* Join Targets explained over here */ pub datatables_post_query: DataTableQuery, /* Incoming Query */ pub query: Option<String>, /* Our builded query holder */ pub condition: Option<Vec<(&'a str, &'a str, &'a str)>>, /* (And/Or, Field_Name, Value) */ pub distinct: Option<bool>, } impl<'a> Tables<'a> { pub fn generate(&mut self) -> String { match self.datatables_post_query.order[0].0 { Some(column_index_to_order) => format!( "{} ORDER BY {} {}", self.select().join().where_like().condition().query.to_owned().unwrap(), self.fields[column_index_to_order as usize], &self.datatables_post_query.order[0] .1 .as_ref() .unwrap() .to_uppercase() ), None => self.select().join().where_like().condition().query.to_owned().unwrap(), } } /* Returns fields for the query */ pub fn select(&mut self) -> Self { let stmt = &self .fields .iter() .map(|field| format!("{}, ", field)) .collect::<String>(); self.query = Some( format!("SELECT {} {} FROM {}", match self.distinct { Some(_) => {"DISTINCT"} None => {""} }, stmt[..(stmt.len() - 2)].to_owned(), self.origin.0 ) .to_owned(), ); self.to_owned() } pub fn where_like(&mut self) -> Self { /* #Where like: ## This function receives self (as all of the SQL generators) and reparses the content of "where" from the incoming Datatable query to do a seeking for desired information over all table fields returns... gues what? self! */ let stmt = self .fields .iter() .map(|field| { format!( " CAST({} as TEXT) LIKE '%{}%' OR", field, self.datatables_post_query.search[0].0.as_ref().unwrap() ) }) .collect::<String>(); self.query = Some( format!( "{} WHERE ({})", self.query.to_owned().unwrap(), stmt[..(stmt.len() - 2)].to_owned() ) .to_owned(), ); self.to_owned() } pub fn join(&mut self) -> Self { /* # How this works? ## We will match the existing needing of appending the "join statement" or not As well we do on other self sql generators functions, we'll opt to not do an if stmt for seeking the "last" target and doing a exactly cut for the string to append. Returns self. */ match self.join_targets { Some(_) => { let stmt = self .join_targets .as_ref() .unwrap() .iter() .map(|(join_type, (target, target_key), (origin, origin_key))| { format!( "{} JOIN {} ON {}.{} = {}.{} ", join_type.to_uppercase(), target, origin, origin_key, target, target_key, ) }) .collect::<String>(); self.query = Some( format!("{} {}", self.query.to_owned().unwrap(), stmt.to_owned()).to_owned(), ); self.to_owned() } None => self.to_owned(), } } pub fn condition(&mut self) -> Self { match self.condition { Some(_) => { let stmt = self.condition.as_ref().unwrap().iter().map(|(sub_cond, target, value)| { format!("{} {} = '{}'", sub_cond.to_uppercase(), target, &value.to_string()) }).collect::<String>(); self.query = Some( format!("{} {}", self.query.to_owned().unwrap(), stmt.to_owned()).to_owned(), ); self.to_owned() } None => { self.to_owned() } } } } #[allow(non_snake_case)] #[derive(Debug, Clone)] pub struct DataTableQuery { pub draw: i32, /* Stands for the n-th time that we're drawing */ pub columns: Vec<( Option<i32>, Option<String>, Option<bool>, Option<bool>, Option<String>, Option<bool>, )>, pub order: Vec<(Option<i32>, Option<String>)>, pub start: i32, /* How much to skip */ pub length: i32, /* How much to retrieve */ pub search: Vec<(Option<String>, bool)>, pub info: Option<i32>, } impl<'f> FromForm<'f> for DataTableQuery { // In practice, we'd use a more descriptive error type. type Error = (); fn from_form(items: &mut FormItems<'f>, strict: bool) -> Result<DataTableQuery, ()> { let mut draw: Option<i32> = None; let mut start: Option<i32> = None; let mut length: Option<i32> = None; let tmp_columns: Vec<( Option<i32>, Option<String>, Option<bool>, Option<bool>, Option<String>, Option<bool>, )> = vec![(None, None, None, None, None, None)]; let mut order_tuple: (Option<i32>, Option<String>) = (None, None); let mut search_value: Option<String> = None; let mut time_stamp: Option<i32> = None; for item in items { match item.key.as_str() { "draw" if draw.is_none() => { let decoded = item.value.url_decode().map_err(|_| ())?; draw = Some(match decoded.parse::<i32>() { Ok(item_val) => item_val, Err(_err_msg) => 0, }); } "start" if start.is_none() => { let decoded = item.value.url_decode().map_err(|_| ())?; start = Some(match decoded.parse::<i32>() { Ok(item_val) => item_val, Err(_err_msg) => 0, }); } "length" if length.is_none() => { let decoded = item.value.url_decode().map_err(|_| ())?; length = Some(match decoded.parse::<i32>() { Ok(item_val) => item_val, Err(_err_msg) => 0, }); } "search%5Bvalue%5D" if search_value.is_none() => { let decoded = Some(item.value.url_decode().map_err(|_| ())?); search_value = decoded; } key if key.contains("order%5B0%5D") => { if key.contains("order%5B0%5D%5Bcolumn%5D") { order_tuple.0 = Some( item.value .url_decode() .map_err(|_| ())? .parse::<i32>() .unwrap(), ); } else { order_tuple.1 = Some(item.value.url_decode().map_err(|_| ())?); } } "_" => { time_stamp = Some( item.value .url_decode() .map_err(|_| ())? .parse::<i32>() .unwrap(), ); } _ if strict => return Err(()), _ => {} } } Ok(DataTableQuery { draw: match draw { Some(value) => value, None => 0, }, columns: tmp_columns[1..].to_owned(), order: vec![order_tuple], start: match start { Some(value) => value, None => 0, }, length: match length { Some(value) => value, None => 0, }, search: vec![(search_value, false)], info: time_stamp.to_owned(), }) } } #[allow(non_snake_case)] #[derive(Debug, Serialize)] pub struct OutcomeData<T> { pub draw: i32, pub recordsTotal: i64, pub recordsFiltered: i32, pub data: Vec<T>, } pub fn datatables_query< T: diesel::deserialize::QueryableByName<diesel::pg::Pg> + std::fmt::Debug + std::clone::Clone, >( table: Tables, conn: PgConnection, ) -> OutcomeData<T> { println!("{}", table.clone().generate()); let (data_results, total_data): (Vec<T>, Count) = ( sql_query(table.clone().generate()) .load(&conn) .expect("Failed to retrieve information"), sql_query(format!("SELECT COUNT(*) FROM {}", table.origin.0)) .load::<Count>(&conn) .expect("Query failed") .pop() .expect("No rows"), ); let tmp_results = data_results[(table.datatables_post_query.start as usize)..].to_vec(); OutcomeData::<T> { draw: table.datatables_post_query.draw, /* N-th draw */ recordsTotal: total_data.count, /* How much we have on this table */ recordsFiltered: data_results.len() as i32, /* How much query has returned */ data: if tmp_results.len() >= (table.datatables_post_query.length as usize) { tmp_results[..(table.datatables_post_query.length as usize)].to_vec() } else { tmp_results.to_vec() }, } }
Tables
identifier_name
main.rs
use alis_bot_rs::*; use clap::{App, Arg, ArgMatches}; use failure::Error; use futures::prelude::*; use glob::glob; use irc::client::prelude::*; use log::{debug, error, info}; use std::path::PathBuf; use std::sync::mpsc::channel; use std::sync::{Arc, Condvar, Mutex}; use std::thread; use tokio::runtime::Runtime; #[macro_use] extern crate failure; const CONFIG_FILE_OPT: &str = "config"; const CONFIG_DIR_OPT: &str = "conf-dir"; const CONFIG_FILE_EXT: &str = "toml"; const DEFAULT_CONFIG_FILE: &str = "example_config.toml"; fn build_app() -> App<'static> { App::new("alis-bot-rs") .version("1.0") .about("alis-unofficial IRC bot") .arg( Arg::new("config") .about("configuration file(s) to use") .takes_value(true) .short('c') .long("config") .value_name("FILE") .multiple(true) .conflicts_with("conf-dir"), ) .arg("-d, --conf-dir=[DIR] 'configuration directory to use'") } fn main() { let matches = build_app().get_matches(); env_logger::init(); let configs = match get_config_paths_from_cli(matches) { Ok(c) => c, Err(e) => { error!("{}", e); match get_config_path_from_default() { Ok(c) => c, Err(e) => { error!("{}", e); return; } } } }; let rt = Runtime::new().unwrap(); /* tasked instances */ rt.block_on(async move { for config in configs { tokio::spawn(async move { run_instance(&config).await }); } }); loop {} } fn get_config_paths_from_cli(matches: ArgMatches) -> Result<Vec<PathBuf>, Error> { let paths: Vec<PathBuf> = { if matches.is_present(CONFIG_FILE_OPT) { matches .values_of(CONFIG_FILE_OPT) .unwrap() .filter_map(|s| config_file_is_valid(PathBuf::from(s)).ok()) .collect() } else if matches.is_present(CONFIG_DIR_OPT) { if let Some(user_glob) = matches.value_of(CONFIG_DIR_OPT) { let user_glob = format!("{}/*.{}", user_glob, CONFIG_FILE_EXT); glob(&user_glob) .expect("Failed to read glob pattern") .filter_map(|s| s.ok()) .filter_map(|s| config_file_is_valid(s).ok()) .collect() } else { return Err(format_err!("No directory value specified")); } } else { return Err(format_err!( "No configuration file specified, using default." )); } }; if paths.len() == 0 { return Err(format_err!("No valid configuration files found")); } Ok(paths) } fn config_file_is_valid(path: PathBuf) -> Result<PathBuf, Error> { let error; if let Ok(config) = Config::load(&path) { if let Some(_server) = config.server { return Ok(path); } else { error = format_err!( "Configuration file: {}, no server specified", path.as_path().display().to_string() ); } } else { error = format_err!("File not found: {}", path.as_path().display().to_string()); } error!("{}", error); Err(error) } fn get_config_path_from_default() -> Result<Vec<PathBuf>, Error>
async fn run_instance(config: &PathBuf) -> irc::error::Result<()> { let config = Config::load(&config)?; let mut client = Client::from_config(config.clone()).await?; client.identify()?; let mut stream = client.stream()?; if let Some(server) = config.server { info!("Connected to {}", server); } let mut server_name: Option<String> = None; let listing = ChannelListing::new(); // private messages mpsc channel let (ms, mr) = channel::<Message>(); // shared client let client = Arc::new(client); let privmsg_client = Arc::clone(&client); // Mutex with condition for listing access let mutcond: Arc<(Mutex<(bool, ChannelListing)>, Condvar)> = Arc::new((Mutex::new((false, listing)), Condvar::new())); let c_mutcond = Arc::clone(&mutcond); let privmsg_thread = thread::spawn(move || loop { let message = mr.recv().unwrap(); if let Command::PRIVMSG(_target, msg) = &message.command { let source = match message.source_nickname() { Some(s) => s, None => continue, }; privmsg_parse(&privmsg_client, &c_mutcond, &source, &msg); } }); while let Some(message) = stream.next().await.transpose()? { match &message.command { Command::PRIVMSG(target, _msg) => { // responds only to private message, ignoring unspecified source and server messages if target.eq(&client.current_nickname()) { let source = if let Some(s) = message.source_nickname() { s } else { continue; }; match &server_name { Some(server_name) if source.eq(server_name) => continue, _ => ms.send(message).unwrap(), } } } Command::Response(rpl_type, v) if *rpl_type == Response::RPL_LIST => { /* updating channel list */ let &(ref mtx, ref _cnd) = &*mutcond; let mut guard = mtx.lock().unwrap(); let listing = &mut guard.1; listing.add_channel(v); } Command::Response(rpl_type, _v) if *rpl_type == Response::RPL_LISTEND => { let &(ref mtx, ref cnd) = &*mutcond; let mut guard = mtx.lock().unwrap(); let listing = &mut guard.1; listing.set_timestamp(); debug!( "Channel list request...done. {} channels received", &listing.len() ); /* listing made available from now */ guard.0 = true; cnd.notify_all(); } Command::Response(rpl_type, _) if *rpl_type == Response::RPL_WELCOME => { if let Some(Prefix::ServerName(name)) = &message.prefix { server_name = Some(name.to_string()); } send_list_command(&client); } _ => (), } } let _ = privmsg_thread.join(); Ok(()) } #[cfg(test)] mod tests { use super::*; use std::fs::{rename, File}; use std::io::Write; use tempfile::Builder; #[test] fn conflicting_args() { let cmd = ["alis-bot-rs", "-c", "some_file", "-d", "some_dir"].iter(); let matches = build_app().try_get_matches_from(cmd); assert!(matches.is_err()); } #[test] fn multiple_files_on_c_option() { let mut expected: Vec<_> = Vec::new(); let dir = Builder::new() .prefix("test") .rand_bytes(0) .tempdir() .unwrap(); for i in 1..3 { let file_path = dir.path().join(format! {"{}_file.toml", i}); let mut file = File::create(&file_path).unwrap(); writeln!(file, "server = \"test\"").unwrap(); expected.push(file_path); } let cmd = [ "alis-bot-rs", "-c", "/tmp/test/1_file.toml", "/tmp/test/2_file.toml", ] .iter(); let matches = build_app().get_matches_from(cmd); let result = get_config_paths_from_cli(matches).unwrap(); assert_eq!(result, expected); let unvalid_file = dir.path().join("error_file.toml"); let _file = File::create(&unvalid_file).unwrap(); let cmd = [ "alis-bot-rs", "-c", "/tmp/test/1_file.toml", "/tmp/test/2_file.toml", "/tmp/test/error_file.toml", ] .iter(); let matches = build_app().get_matches_from(cmd); let result = get_config_paths_from_cli(matches).unwrap(); assert_eq!(result, expected); } #[test] fn multiple_files_in_directory() { let mut expected: Vec<_> = Vec::new(); let dir = Builder::new() .prefix("dir") .rand_bytes(0) .tempdir() .unwrap(); for i in 1..4 { let file_path = dir.path().join(format! {"{}_file.toml", i}); let mut file = File::create(&file_path).unwrap(); writeln!(file, "server = \"test\"").unwrap(); expected.push(file_path); } let cmd = ["alis-bot-rs", "-d", "/tmp/dir"].iter(); let matches = build_app().get_matches_from(cmd); let result = get_config_paths_from_cli(matches).unwrap(); assert_eq!(result, expected); } #[test] fn directory_failures_errors() { let cmd = ["alis-bot-rs", "-d", "/unaccessible/path"].iter(); let matches = build_app().get_matches_from(cmd); assert!(get_config_paths_from_cli(matches).is_err()); let _dir = Builder::new() .prefix("empty") .rand_bytes(0) .tempdir() .unwrap(); let cmd = ["alis-bot-rs", "-d", "/empty/"].iter(); let matches = build_app().get_matches_from(cmd); assert!( get_config_paths_from_cli(matches).is_err(), "No valid configuration files found" ); } #[test] fn use_default_config() { let cmd = ["alis-bot-rs"].iter(); let matches = build_app().get_matches_from(cmd); assert!( get_config_paths_from_cli(matches).is_err(), "No configuration file specified" ) } #[test] fn no_default_config_file() { rename("example_config.toml", "tmp_test.toml").unwrap(); assert!(get_config_path_from_default().is_err()); rename("tmp_test.toml", "example_config.toml").unwrap(); } }
{ let path = match config_file_is_valid(PathBuf::from(DEFAULT_CONFIG_FILE)) { Ok(p) => p, Err(e) => return Err(e), }; info!( "Using default configuration file: {}", path.as_path().display().to_string() ); Ok(vec![path]) }
identifier_body
main.rs
use alis_bot_rs::*; use clap::{App, Arg, ArgMatches}; use failure::Error; use futures::prelude::*; use glob::glob; use irc::client::prelude::*; use log::{debug, error, info}; use std::path::PathBuf; use std::sync::mpsc::channel; use std::sync::{Arc, Condvar, Mutex}; use std::thread; use tokio::runtime::Runtime; #[macro_use] extern crate failure; const CONFIG_FILE_OPT: &str = "config"; const CONFIG_DIR_OPT: &str = "conf-dir"; const CONFIG_FILE_EXT: &str = "toml"; const DEFAULT_CONFIG_FILE: &str = "example_config.toml"; fn build_app() -> App<'static> { App::new("alis-bot-rs") .version("1.0") .about("alis-unofficial IRC bot") .arg( Arg::new("config") .about("configuration file(s) to use") .takes_value(true) .short('c') .long("config") .value_name("FILE") .multiple(true) .conflicts_with("conf-dir"), ) .arg("-d, --conf-dir=[DIR] 'configuration directory to use'") } fn main() { let matches = build_app().get_matches(); env_logger::init(); let configs = match get_config_paths_from_cli(matches) { Ok(c) => c, Err(e) => { error!("{}", e); match get_config_path_from_default() { Ok(c) => c, Err(e) => { error!("{}", e); return; } } } }; let rt = Runtime::new().unwrap(); /* tasked instances */ rt.block_on(async move { for config in configs { tokio::spawn(async move { run_instance(&config).await }); } }); loop {} } fn get_config_paths_from_cli(matches: ArgMatches) -> Result<Vec<PathBuf>, Error> { let paths: Vec<PathBuf> = { if matches.is_present(CONFIG_FILE_OPT) { matches .values_of(CONFIG_FILE_OPT) .unwrap() .filter_map(|s| config_file_is_valid(PathBuf::from(s)).ok()) .collect() } else if matches.is_present(CONFIG_DIR_OPT) { if let Some(user_glob) = matches.value_of(CONFIG_DIR_OPT) { let user_glob = format!("{}/*.{}", user_glob, CONFIG_FILE_EXT); glob(&user_glob) .expect("Failed to read glob pattern") .filter_map(|s| s.ok()) .filter_map(|s| config_file_is_valid(s).ok()) .collect() } else { return Err(format_err!("No directory value specified")); } } else { return Err(format_err!( "No configuration file specified, using default." )); } }; if paths.len() == 0
Ok(paths) } fn config_file_is_valid(path: PathBuf) -> Result<PathBuf, Error> { let error; if let Ok(config) = Config::load(&path) { if let Some(_server) = config.server { return Ok(path); } else { error = format_err!( "Configuration file: {}, no server specified", path.as_path().display().to_string() ); } } else { error = format_err!("File not found: {}", path.as_path().display().to_string()); } error!("{}", error); Err(error) } fn get_config_path_from_default() -> Result<Vec<PathBuf>, Error> { let path = match config_file_is_valid(PathBuf::from(DEFAULT_CONFIG_FILE)) { Ok(p) => p, Err(e) => return Err(e), }; info!( "Using default configuration file: {}", path.as_path().display().to_string() ); Ok(vec![path]) } async fn run_instance(config: &PathBuf) -> irc::error::Result<()> { let config = Config::load(&config)?; let mut client = Client::from_config(config.clone()).await?; client.identify()?; let mut stream = client.stream()?; if let Some(server) = config.server { info!("Connected to {}", server); } let mut server_name: Option<String> = None; let listing = ChannelListing::new(); // private messages mpsc channel let (ms, mr) = channel::<Message>(); // shared client let client = Arc::new(client); let privmsg_client = Arc::clone(&client); // Mutex with condition for listing access let mutcond: Arc<(Mutex<(bool, ChannelListing)>, Condvar)> = Arc::new((Mutex::new((false, listing)), Condvar::new())); let c_mutcond = Arc::clone(&mutcond); let privmsg_thread = thread::spawn(move || loop { let message = mr.recv().unwrap(); if let Command::PRIVMSG(_target, msg) = &message.command { let source = match message.source_nickname() { Some(s) => s, None => continue, }; privmsg_parse(&privmsg_client, &c_mutcond, &source, &msg); } }); while let Some(message) = stream.next().await.transpose()? { match &message.command { Command::PRIVMSG(target, _msg) => { // responds only to private message, ignoring unspecified source and server messages if target.eq(&client.current_nickname()) { let source = if let Some(s) = message.source_nickname() { s } else { continue; }; match &server_name { Some(server_name) if source.eq(server_name) => continue, _ => ms.send(message).unwrap(), } } } Command::Response(rpl_type, v) if *rpl_type == Response::RPL_LIST => { /* updating channel list */ let &(ref mtx, ref _cnd) = &*mutcond; let mut guard = mtx.lock().unwrap(); let listing = &mut guard.1; listing.add_channel(v); } Command::Response(rpl_type, _v) if *rpl_type == Response::RPL_LISTEND => { let &(ref mtx, ref cnd) = &*mutcond; let mut guard = mtx.lock().unwrap(); let listing = &mut guard.1; listing.set_timestamp(); debug!( "Channel list request...done. {} channels received", &listing.len() ); /* listing made available from now */ guard.0 = true; cnd.notify_all(); } Command::Response(rpl_type, _) if *rpl_type == Response::RPL_WELCOME => { if let Some(Prefix::ServerName(name)) = &message.prefix { server_name = Some(name.to_string()); } send_list_command(&client); } _ => (), } } let _ = privmsg_thread.join(); Ok(()) } #[cfg(test)] mod tests { use super::*; use std::fs::{rename, File}; use std::io::Write; use tempfile::Builder; #[test] fn conflicting_args() { let cmd = ["alis-bot-rs", "-c", "some_file", "-d", "some_dir"].iter(); let matches = build_app().try_get_matches_from(cmd); assert!(matches.is_err()); } #[test] fn multiple_files_on_c_option() { let mut expected: Vec<_> = Vec::new(); let dir = Builder::new() .prefix("test") .rand_bytes(0) .tempdir() .unwrap(); for i in 1..3 { let file_path = dir.path().join(format! {"{}_file.toml", i}); let mut file = File::create(&file_path).unwrap(); writeln!(file, "server = \"test\"").unwrap(); expected.push(file_path); } let cmd = [ "alis-bot-rs", "-c", "/tmp/test/1_file.toml", "/tmp/test/2_file.toml", ] .iter(); let matches = build_app().get_matches_from(cmd); let result = get_config_paths_from_cli(matches).unwrap(); assert_eq!(result, expected); let unvalid_file = dir.path().join("error_file.toml"); let _file = File::create(&unvalid_file).unwrap(); let cmd = [ "alis-bot-rs", "-c", "/tmp/test/1_file.toml", "/tmp/test/2_file.toml", "/tmp/test/error_file.toml", ] .iter(); let matches = build_app().get_matches_from(cmd); let result = get_config_paths_from_cli(matches).unwrap(); assert_eq!(result, expected); } #[test] fn multiple_files_in_directory() { let mut expected: Vec<_> = Vec::new(); let dir = Builder::new() .prefix("dir") .rand_bytes(0) .tempdir() .unwrap(); for i in 1..4 { let file_path = dir.path().join(format! {"{}_file.toml", i}); let mut file = File::create(&file_path).unwrap(); writeln!(file, "server = \"test\"").unwrap(); expected.push(file_path); } let cmd = ["alis-bot-rs", "-d", "/tmp/dir"].iter(); let matches = build_app().get_matches_from(cmd); let result = get_config_paths_from_cli(matches).unwrap(); assert_eq!(result, expected); } #[test] fn directory_failures_errors() { let cmd = ["alis-bot-rs", "-d", "/unaccessible/path"].iter(); let matches = build_app().get_matches_from(cmd); assert!(get_config_paths_from_cli(matches).is_err()); let _dir = Builder::new() .prefix("empty") .rand_bytes(0) .tempdir() .unwrap(); let cmd = ["alis-bot-rs", "-d", "/empty/"].iter(); let matches = build_app().get_matches_from(cmd); assert!( get_config_paths_from_cli(matches).is_err(), "No valid configuration files found" ); } #[test] fn use_default_config() { let cmd = ["alis-bot-rs"].iter(); let matches = build_app().get_matches_from(cmd); assert!( get_config_paths_from_cli(matches).is_err(), "No configuration file specified" ) } #[test] fn no_default_config_file() { rename("example_config.toml", "tmp_test.toml").unwrap(); assert!(get_config_path_from_default().is_err()); rename("tmp_test.toml", "example_config.toml").unwrap(); } }
{ return Err(format_err!("No valid configuration files found")); }
conditional_block
main.rs
use alis_bot_rs::*; use clap::{App, Arg, ArgMatches}; use failure::Error; use futures::prelude::*; use glob::glob; use irc::client::prelude::*; use log::{debug, error, info}; use std::path::PathBuf; use std::sync::mpsc::channel; use std::sync::{Arc, Condvar, Mutex}; use std::thread; use tokio::runtime::Runtime; #[macro_use] extern crate failure; const CONFIG_FILE_OPT: &str = "config"; const CONFIG_DIR_OPT: &str = "conf-dir"; const CONFIG_FILE_EXT: &str = "toml"; const DEFAULT_CONFIG_FILE: &str = "example_config.toml"; fn build_app() -> App<'static> { App::new("alis-bot-rs") .version("1.0") .about("alis-unofficial IRC bot") .arg( Arg::new("config") .about("configuration file(s) to use") .takes_value(true) .short('c') .long("config") .value_name("FILE") .multiple(true) .conflicts_with("conf-dir"), ) .arg("-d, --conf-dir=[DIR] 'configuration directory to use'") } fn main() { let matches = build_app().get_matches(); env_logger::init(); let configs = match get_config_paths_from_cli(matches) { Ok(c) => c, Err(e) => { error!("{}", e); match get_config_path_from_default() { Ok(c) => c, Err(e) => { error!("{}", e); return; } } } }; let rt = Runtime::new().unwrap(); /* tasked instances */ rt.block_on(async move { for config in configs { tokio::spawn(async move { run_instance(&config).await }); } }); loop {} } fn get_config_paths_from_cli(matches: ArgMatches) -> Result<Vec<PathBuf>, Error> { let paths: Vec<PathBuf> = { if matches.is_present(CONFIG_FILE_OPT) { matches .values_of(CONFIG_FILE_OPT) .unwrap() .filter_map(|s| config_file_is_valid(PathBuf::from(s)).ok()) .collect() } else if matches.is_present(CONFIG_DIR_OPT) { if let Some(user_glob) = matches.value_of(CONFIG_DIR_OPT) { let user_glob = format!("{}/*.{}", user_glob, CONFIG_FILE_EXT); glob(&user_glob) .expect("Failed to read glob pattern") .filter_map(|s| s.ok()) .filter_map(|s| config_file_is_valid(s).ok()) .collect() } else { return Err(format_err!("No directory value specified")); } } else { return Err(format_err!( "No configuration file specified, using default." )); } }; if paths.len() == 0 { return Err(format_err!("No valid configuration files found")); } Ok(paths) } fn config_file_is_valid(path: PathBuf) -> Result<PathBuf, Error> { let error; if let Ok(config) = Config::load(&path) { if let Some(_server) = config.server { return Ok(path); } else { error = format_err!( "Configuration file: {}, no server specified", path.as_path().display().to_string() ); } } else { error = format_err!("File not found: {}", path.as_path().display().to_string()); } error!("{}", error); Err(error) } fn get_config_path_from_default() -> Result<Vec<PathBuf>, Error> { let path = match config_file_is_valid(PathBuf::from(DEFAULT_CONFIG_FILE)) { Ok(p) => p, Err(e) => return Err(e), }; info!( "Using default configuration file: {}", path.as_path().display().to_string() ); Ok(vec![path]) } async fn run_instance(config: &PathBuf) -> irc::error::Result<()> { let config = Config::load(&config)?; let mut client = Client::from_config(config.clone()).await?; client.identify()?; let mut stream = client.stream()?; if let Some(server) = config.server { info!("Connected to {}", server); } let mut server_name: Option<String> = None; let listing = ChannelListing::new(); // private messages mpsc channel let (ms, mr) = channel::<Message>(); // shared client let client = Arc::new(client); let privmsg_client = Arc::clone(&client); // Mutex with condition for listing access let mutcond: Arc<(Mutex<(bool, ChannelListing)>, Condvar)> = Arc::new((Mutex::new((false, listing)), Condvar::new())); let c_mutcond = Arc::clone(&mutcond); let privmsg_thread = thread::spawn(move || loop { let message = mr.recv().unwrap(); if let Command::PRIVMSG(_target, msg) = &message.command { let source = match message.source_nickname() { Some(s) => s, None => continue, }; privmsg_parse(&privmsg_client, &c_mutcond, &source, &msg); } }); while let Some(message) = stream.next().await.transpose()? { match &message.command { Command::PRIVMSG(target, _msg) => { // responds only to private message, ignoring unspecified source and server messages if target.eq(&client.current_nickname()) { let source = if let Some(s) = message.source_nickname() { s } else { continue; }; match &server_name { Some(server_name) if source.eq(server_name) => continue, _ => ms.send(message).unwrap(), } } } Command::Response(rpl_type, v) if *rpl_type == Response::RPL_LIST => { /* updating channel list */ let &(ref mtx, ref _cnd) = &*mutcond; let mut guard = mtx.lock().unwrap(); let listing = &mut guard.1; listing.add_channel(v); } Command::Response(rpl_type, _v) if *rpl_type == Response::RPL_LISTEND => { let &(ref mtx, ref cnd) = &*mutcond; let mut guard = mtx.lock().unwrap(); let listing = &mut guard.1; listing.set_timestamp(); debug!( "Channel list request...done. {} channels received", &listing.len() ); /* listing made available from now */ guard.0 = true; cnd.notify_all(); } Command::Response(rpl_type, _) if *rpl_type == Response::RPL_WELCOME => { if let Some(Prefix::ServerName(name)) = &message.prefix { server_name = Some(name.to_string()); } send_list_command(&client); } _ => (), } } let _ = privmsg_thread.join(); Ok(()) } #[cfg(test)] mod tests { use super::*; use std::fs::{rename, File}; use std::io::Write; use tempfile::Builder; #[test] fn conflicting_args() { let cmd = ["alis-bot-rs", "-c", "some_file", "-d", "some_dir"].iter(); let matches = build_app().try_get_matches_from(cmd); assert!(matches.is_err()); } #[test] fn multiple_files_on_c_option() { let mut expected: Vec<_> = Vec::new(); let dir = Builder::new() .prefix("test") .rand_bytes(0) .tempdir() .unwrap(); for i in 1..3 { let file_path = dir.path().join(format! {"{}_file.toml", i}); let mut file = File::create(&file_path).unwrap(); writeln!(file, "server = \"test\"").unwrap(); expected.push(file_path); } let cmd = [ "alis-bot-rs", "-c", "/tmp/test/1_file.toml", "/tmp/test/2_file.toml", ] .iter(); let matches = build_app().get_matches_from(cmd); let result = get_config_paths_from_cli(matches).unwrap(); assert_eq!(result, expected); let unvalid_file = dir.path().join("error_file.toml"); let _file = File::create(&unvalid_file).unwrap(); let cmd = [ "alis-bot-rs",
"/tmp/test/1_file.toml", "/tmp/test/2_file.toml", "/tmp/test/error_file.toml", ] .iter(); let matches = build_app().get_matches_from(cmd); let result = get_config_paths_from_cli(matches).unwrap(); assert_eq!(result, expected); } #[test] fn multiple_files_in_directory() { let mut expected: Vec<_> = Vec::new(); let dir = Builder::new() .prefix("dir") .rand_bytes(0) .tempdir() .unwrap(); for i in 1..4 { let file_path = dir.path().join(format! {"{}_file.toml", i}); let mut file = File::create(&file_path).unwrap(); writeln!(file, "server = \"test\"").unwrap(); expected.push(file_path); } let cmd = ["alis-bot-rs", "-d", "/tmp/dir"].iter(); let matches = build_app().get_matches_from(cmd); let result = get_config_paths_from_cli(matches).unwrap(); assert_eq!(result, expected); } #[test] fn directory_failures_errors() { let cmd = ["alis-bot-rs", "-d", "/unaccessible/path"].iter(); let matches = build_app().get_matches_from(cmd); assert!(get_config_paths_from_cli(matches).is_err()); let _dir = Builder::new() .prefix("empty") .rand_bytes(0) .tempdir() .unwrap(); let cmd = ["alis-bot-rs", "-d", "/empty/"].iter(); let matches = build_app().get_matches_from(cmd); assert!( get_config_paths_from_cli(matches).is_err(), "No valid configuration files found" ); } #[test] fn use_default_config() { let cmd = ["alis-bot-rs"].iter(); let matches = build_app().get_matches_from(cmd); assert!( get_config_paths_from_cli(matches).is_err(), "No configuration file specified" ) } #[test] fn no_default_config_file() { rename("example_config.toml", "tmp_test.toml").unwrap(); assert!(get_config_path_from_default().is_err()); rename("tmp_test.toml", "example_config.toml").unwrap(); } }
"-c",
random_line_split
main.rs
use alis_bot_rs::*; use clap::{App, Arg, ArgMatches}; use failure::Error; use futures::prelude::*; use glob::glob; use irc::client::prelude::*; use log::{debug, error, info}; use std::path::PathBuf; use std::sync::mpsc::channel; use std::sync::{Arc, Condvar, Mutex}; use std::thread; use tokio::runtime::Runtime; #[macro_use] extern crate failure; const CONFIG_FILE_OPT: &str = "config"; const CONFIG_DIR_OPT: &str = "conf-dir"; const CONFIG_FILE_EXT: &str = "toml"; const DEFAULT_CONFIG_FILE: &str = "example_config.toml"; fn build_app() -> App<'static> { App::new("alis-bot-rs") .version("1.0") .about("alis-unofficial IRC bot") .arg( Arg::new("config") .about("configuration file(s) to use") .takes_value(true) .short('c') .long("config") .value_name("FILE") .multiple(true) .conflicts_with("conf-dir"), ) .arg("-d, --conf-dir=[DIR] 'configuration directory to use'") } fn main() { let matches = build_app().get_matches(); env_logger::init(); let configs = match get_config_paths_from_cli(matches) { Ok(c) => c, Err(e) => { error!("{}", e); match get_config_path_from_default() { Ok(c) => c, Err(e) => { error!("{}", e); return; } } } }; let rt = Runtime::new().unwrap(); /* tasked instances */ rt.block_on(async move { for config in configs { tokio::spawn(async move { run_instance(&config).await }); } }); loop {} } fn get_config_paths_from_cli(matches: ArgMatches) -> Result<Vec<PathBuf>, Error> { let paths: Vec<PathBuf> = { if matches.is_present(CONFIG_FILE_OPT) { matches .values_of(CONFIG_FILE_OPT) .unwrap() .filter_map(|s| config_file_is_valid(PathBuf::from(s)).ok()) .collect() } else if matches.is_present(CONFIG_DIR_OPT) { if let Some(user_glob) = matches.value_of(CONFIG_DIR_OPT) { let user_glob = format!("{}/*.{}", user_glob, CONFIG_FILE_EXT); glob(&user_glob) .expect("Failed to read glob pattern") .filter_map(|s| s.ok()) .filter_map(|s| config_file_is_valid(s).ok()) .collect() } else { return Err(format_err!("No directory value specified")); } } else { return Err(format_err!( "No configuration file specified, using default." )); } }; if paths.len() == 0 { return Err(format_err!("No valid configuration files found")); } Ok(paths) } fn config_file_is_valid(path: PathBuf) -> Result<PathBuf, Error> { let error; if let Ok(config) = Config::load(&path) { if let Some(_server) = config.server { return Ok(path); } else { error = format_err!( "Configuration file: {}, no server specified", path.as_path().display().to_string() ); } } else { error = format_err!("File not found: {}", path.as_path().display().to_string()); } error!("{}", error); Err(error) } fn get_config_path_from_default() -> Result<Vec<PathBuf>, Error> { let path = match config_file_is_valid(PathBuf::from(DEFAULT_CONFIG_FILE)) { Ok(p) => p, Err(e) => return Err(e), }; info!( "Using default configuration file: {}", path.as_path().display().to_string() ); Ok(vec![path]) } async fn run_instance(config: &PathBuf) -> irc::error::Result<()> { let config = Config::load(&config)?; let mut client = Client::from_config(config.clone()).await?; client.identify()?; let mut stream = client.stream()?; if let Some(server) = config.server { info!("Connected to {}", server); } let mut server_name: Option<String> = None; let listing = ChannelListing::new(); // private messages mpsc channel let (ms, mr) = channel::<Message>(); // shared client let client = Arc::new(client); let privmsg_client = Arc::clone(&client); // Mutex with condition for listing access let mutcond: Arc<(Mutex<(bool, ChannelListing)>, Condvar)> = Arc::new((Mutex::new((false, listing)), Condvar::new())); let c_mutcond = Arc::clone(&mutcond); let privmsg_thread = thread::spawn(move || loop { let message = mr.recv().unwrap(); if let Command::PRIVMSG(_target, msg) = &message.command { let source = match message.source_nickname() { Some(s) => s, None => continue, }; privmsg_parse(&privmsg_client, &c_mutcond, &source, &msg); } }); while let Some(message) = stream.next().await.transpose()? { match &message.command { Command::PRIVMSG(target, _msg) => { // responds only to private message, ignoring unspecified source and server messages if target.eq(&client.current_nickname()) { let source = if let Some(s) = message.source_nickname() { s } else { continue; }; match &server_name { Some(server_name) if source.eq(server_name) => continue, _ => ms.send(message).unwrap(), } } } Command::Response(rpl_type, v) if *rpl_type == Response::RPL_LIST => { /* updating channel list */ let &(ref mtx, ref _cnd) = &*mutcond; let mut guard = mtx.lock().unwrap(); let listing = &mut guard.1; listing.add_channel(v); } Command::Response(rpl_type, _v) if *rpl_type == Response::RPL_LISTEND => { let &(ref mtx, ref cnd) = &*mutcond; let mut guard = mtx.lock().unwrap(); let listing = &mut guard.1; listing.set_timestamp(); debug!( "Channel list request...done. {} channels received", &listing.len() ); /* listing made available from now */ guard.0 = true; cnd.notify_all(); } Command::Response(rpl_type, _) if *rpl_type == Response::RPL_WELCOME => { if let Some(Prefix::ServerName(name)) = &message.prefix { server_name = Some(name.to_string()); } send_list_command(&client); } _ => (), } } let _ = privmsg_thread.join(); Ok(()) } #[cfg(test)] mod tests { use super::*; use std::fs::{rename, File}; use std::io::Write; use tempfile::Builder; #[test] fn conflicting_args() { let cmd = ["alis-bot-rs", "-c", "some_file", "-d", "some_dir"].iter(); let matches = build_app().try_get_matches_from(cmd); assert!(matches.is_err()); } #[test] fn multiple_files_on_c_option() { let mut expected: Vec<_> = Vec::new(); let dir = Builder::new() .prefix("test") .rand_bytes(0) .tempdir() .unwrap(); for i in 1..3 { let file_path = dir.path().join(format! {"{}_file.toml", i}); let mut file = File::create(&file_path).unwrap(); writeln!(file, "server = \"test\"").unwrap(); expected.push(file_path); } let cmd = [ "alis-bot-rs", "-c", "/tmp/test/1_file.toml", "/tmp/test/2_file.toml", ] .iter(); let matches = build_app().get_matches_from(cmd); let result = get_config_paths_from_cli(matches).unwrap(); assert_eq!(result, expected); let unvalid_file = dir.path().join("error_file.toml"); let _file = File::create(&unvalid_file).unwrap(); let cmd = [ "alis-bot-rs", "-c", "/tmp/test/1_file.toml", "/tmp/test/2_file.toml", "/tmp/test/error_file.toml", ] .iter(); let matches = build_app().get_matches_from(cmd); let result = get_config_paths_from_cli(matches).unwrap(); assert_eq!(result, expected); } #[test] fn multiple_files_in_directory() { let mut expected: Vec<_> = Vec::new(); let dir = Builder::new() .prefix("dir") .rand_bytes(0) .tempdir() .unwrap(); for i in 1..4 { let file_path = dir.path().join(format! {"{}_file.toml", i}); let mut file = File::create(&file_path).unwrap(); writeln!(file, "server = \"test\"").unwrap(); expected.push(file_path); } let cmd = ["alis-bot-rs", "-d", "/tmp/dir"].iter(); let matches = build_app().get_matches_from(cmd); let result = get_config_paths_from_cli(matches).unwrap(); assert_eq!(result, expected); } #[test] fn
() { let cmd = ["alis-bot-rs", "-d", "/unaccessible/path"].iter(); let matches = build_app().get_matches_from(cmd); assert!(get_config_paths_from_cli(matches).is_err()); let _dir = Builder::new() .prefix("empty") .rand_bytes(0) .tempdir() .unwrap(); let cmd = ["alis-bot-rs", "-d", "/empty/"].iter(); let matches = build_app().get_matches_from(cmd); assert!( get_config_paths_from_cli(matches).is_err(), "No valid configuration files found" ); } #[test] fn use_default_config() { let cmd = ["alis-bot-rs"].iter(); let matches = build_app().get_matches_from(cmd); assert!( get_config_paths_from_cli(matches).is_err(), "No configuration file specified" ) } #[test] fn no_default_config_file() { rename("example_config.toml", "tmp_test.toml").unwrap(); assert!(get_config_path_from_default().is_err()); rename("tmp_test.toml", "example_config.toml").unwrap(); } }
directory_failures_errors
identifier_name
encode.rs
match n_down { 1 => { for t in text.split_whitespace() { grams.push(vec![t]); } // for some reason the ngrams crate has difficulty when n == 1, // so in this case we generate the n gram ourselves by a simple whitespace // delimiter. }, _ => { grams = text.split_whitespace().ngrams(n_down).collect(); } } for v in grams { if n_down == 1 { total_words += 1; } if let Some(val) = hash.get_mut(&v) { *val += 1; } else { if n_down == 1 { unique_words.push(v[0]); // if we are on the last n-depth (n == 1), // that means the vectors only contain one word. // if the hash does not have this vector of one word yet, then // this is the first time we are seeing it, so we will add it to // a vector of unique words. } hash.insert(v, 1); // if the hash does not have this vector yet, add it // with occurance 1, and next time we see this vector, // increment the occurance } } n_down -= 1; } (hash, unique_words, total_words) } pub fn get_restricted_chars(char_map: &HashMap<char, usize>, gib_word: &String) -> Vec<char> { let mut restricted_chars = vec![]; for key in char_map.keys() { restricted_chars.push(*key); } let word_chars: Vec<char> = gib_word.chars().collect(); for c in word_chars { if restricted_chars.contains(&c) { let char_index = restricted_chars.iter().position(|&r| r == c).unwrap(); restricted_chars.remove(char_index); } } restricted_chars } pub fn can_use_word(word: &str, good_chars: &Vec<char>, restricted_chars: &Vec<char>) -> bool { let word_chars: Vec<char> = word.chars().collect(); let mut good_chars_used = vec![0; good_chars.len()]; for c in word_chars { if restricted_chars.contains(&c) { return false; } if good_chars.contains(&c) { let char_index = good_chars.iter().position(|&r| r == c).unwrap(); good_chars_used[char_index] += 1; } } for i in good_chars_used { if i == 0 { return false; } } true } pub fn get_initial_words<'a>(hashmap: &'a HashMap<Vec<&str>, usize>, n: usize) -> Vec<&'a str> { let mut vecs_with_n_items = vec![]; for key in hashmap.keys() { if key.len() == n { vecs_with_n_items.push(key); } } let mut count_hash = HashMap::new(); for vec in vecs_with_n_items { let mut n_minus_1_slice = vec![]; let mut counter = 1; for word in vec { if counter < n { n_minus_1_slice.push(*word); } else { break; } counter += 1; } if let Some(val) = count_hash.get_mut(&n_minus_1_slice) { *val += 1; } else { count_hash.insert(n_minus_1_slice, 1); } } let mut best_vec_count = 0; let mut best_vec = vec![]; for vec in count_hash.keys() { let vec_count = count_hash.get(vec).unwrap(); if vec_count > &best_vec_count { best_vec_count = *vec_count; best_vec = vec.to_vec(); } } best_vec } pub fn get_probability_of(word: &str, given: &Vec<&str>, hashmap: &HashMap<Vec<&str>, usize>, num_words: f64) -> f64 { let count_of_given = match given.len() { 0 => num_words, _ => { if let Some(count) = hashmap.get(given) { *count as f64 } else { return 0.0; } }, }; let mut word_vec = given.clone(); word_vec.push(word); let count_of_sequence = if let Some(count) = hashmap.get(&word_vec) { *count as f64 } else { return 0.0; }; count_of_sequence / count_of_given } pub fn get_best_word<'a>( gram: &HashMap<Vec<&str>, usize>, usable_words: &Vec<&'a str>, current_words: &Vec<&str>, n: usize, total_words: f64, ) -> (&'a str, usize) { let mut all_p_zero = true; let mut use_n = n; let mut max_p_index = 0; while all_p_zero { let mut ngram_slice = vec![]; let mut counter = 1; for word in current_words.iter().rev() { if counter < use_n { ngram_slice.push(*word); } counter += 1; } ngram_slice.reverse(); let last_word = ngram_slice.last().unwrap(); let mut max_p = 0.0; max_p_index = 0; for i in 0..usable_words.len() { let w = &usable_words[i]; if w == last_word { continue; } let p = get_probability_of(w, &ngram_slice, gram, total_words); // let p = get_interpolated_probability(w, &ngram_slice, gram, total_words); // println!("P({} | {:?}) = {}", w, ngram_slice, p); if p > max_p { all_p_zero = false; max_p_index = i; max_p = p; } } if all_p_zero { use_n -= 1; } // comment this if using interpolation if use_n == 1 { // no point in picking the word that appears the most... // take our chances and break, and pick a random word from the list. // println!("reached bottom of use n!"); break; } } use_n -= 1; if use_n == 0 { let mut rng = rand::thread_rng(); max_p_index = rng.gen_range(0, usable_words.len()); } (usable_words[max_p_index], use_n) } pub fn wordify( gram: &HashMap<Vec<&str>, usize>, n: usize, file_words: Vec<String>, rng: &mut StdRng, bit_to_char_map: &mut HashMap<usize, char>, unique_words: &Vec<&str>, total_words: f64, consecutive_skips: usize, depth_skip_threshold: usize, use_shuffle: bool, ) -> Result<String, String> { let mut char_to_bit_map = HashMap::new(); let mut num_bits = 0; for bit_val in bit_to_char_map.keys() { num_bits += 1; char_to_bit_map.insert(*bit_to_char_map.get(&bit_val).unwrap(), *bit_val); } num_bits -= 1; // let num_words = unique_words.len(); let mut succ_count = 0; let mut fail_count = 0; let mut skip_count = 0; let mut n_gram_used = vec![0; n]; let mut text_data = String::from(""); let mut current_words = get_initial_words(gram, n); let mut i = 0; let mut consecutive_skips_used = 0; let mut skip_words = vec![]; if!use_shuffle { for w in unique_words { if utils::is_skip_word(w, &char_to_bit_map) { skip_words.push(*w); } } // if not shuffling, skip words get filled in once before // iterating. } while i < file_words.len() { let gibberish_word = &file_words[i]; let mut used_skip_word = false; let mut use_keys = vec![]; for key in char_to_bit_map.keys() { use_keys.push(*key); } let restricted_chars = get_restricted_chars(&char_to_bit_map, gibberish_word); let mut usable_words = vec![]; if use_shuffle { skip_words = vec![]; // if shuffling the bit to char map, // we have to reset the skip words every time because they might // be different } // let mut value_num_list = vec![0; max_value]; for w in unique_words { // let word_val = get_value_from_word(w, char_value_map, max_value); // // println!("value for {}: {}", w, get_value_from_word(w, char_value_map, 2)); // value_num_list[word_val] += 1; if use_shuffle && utils::is_skip_word(w, &char_to_bit_map) { skip_words.push(*w); } if can_use_word(w, &gibberish_word.chars().collect(), &restricted_chars) { usable_words.push(*w); } } match usable_words.len() { 0 => { fail_count += 1; text_data.push_str(&gibberish_word); text_data.push_str(" "); current_words.push("."); consecutive_skips_used = 0; // if there are NO usable words at all then we 'failed' // to encode this word. we push the gibberish word as is to // the text data output because we still need to be able to decode it. // we add a. to current words to stimulate the ngram probability // for the next word. }, 1 => { succ_count += 1; let best_word = &usable_words[0]; text_data.push_str(best_word); current_words.push(best_word); text_data.push_str(" "); n_gram_used[0] += 1; consecutive_skips_used = 0; // there is only one usable word, so use it without // estimating any probabilities. ngram used a depth // of 0 since we are not evaluating ngrams here. }, _ => { let (best_word, n_used) = get_best_word( gram, &usable_words, &current_words, n, total_words, ); // user can fine-tune the quality of the text output using depth_skip_threshold // and consecutive skips allowed. The higher both are, the more skip words are used // which can potentially make the output look more like real text, at the // expense of encoding less bits per word on average. // consecutive skips used sets a limit to this such that it forces // the program to eventually encode a word, otherwise it might // loop forever in certain situations. // depth skip threshold allows user to say which n-depths are acceptable. // lower n-depths produce less realistic. if n_used <= depth_skip_threshold && consecutive_skips_used < consecutive_skips && skip_words.len() > 0 { let (best_word2, n_used2) = get_best_word( gram, &skip_words, &current_words, n, total_words ); n_gram_used[n_used2] += 1; current_words.push(best_word2); text_data.push_str(best_word2); text_data.push_str(" "); skip_count += 1; used_skip_word = true; consecutive_skips_used += 1; i -= 1; // we used a skip word, make sure to keep i at its current // level so that we try to encode this word again } else { succ_count += 1; n_gram_used[n_used] += 1; text_data.push_str(best_word); current_words.push(best_word); text_data.push_str(" "); consecutive_skips_used = 0; // if not using a skip word, we encoded the best possible word according // to ngrams. add the best word to the text output, as well as the current // words vec which is used to determine word probabilities for the next // iteration } } }; if!used_skip_word && use_shuffle { // only shuffle the bit to char map if we encoded a word // if we used a skip word, we do NOT want to shuffle as we // will not be able to properly decode utils::fill_bit_to_char_map(rng, bit_to_char_map); char_to_bit_map = utils::make_char_to_bit_map(bit_to_char_map); } i += 1; } text_data.pop(); // remove trailing space let num_bytes = (file_words.len() * num_bits) / 8; // print summary println!("\nencoding using {} bits per word. file had {} bytes, ie: {} words to wordify", num_bits, num_bytes, file_words.len()); println!("succesfully filled {} words", (succ_count + skip_count)); println!("of the {} words, {} were skip words", (succ_count + skip_count), skip_count); println!("failed to find a word {} times", fail_count); println!("average bits per word: {}\n", ((num_bytes * 8) as f64 / (succ_count + skip_count) as f64)); println!("\nN-depth summary: {:?}", n_gram_used); Ok(text_data) } pub fn get_value_vec_from_char_value_mode( file_contents: &Vec<u8>, num_bits: usize, use_shuffle: bool, rng: &mut StdRng, char_to_value_map: &mut HashMap<char, usize>, ) -> Vec<u8> { let mut cursor = Cursor::new(&file_contents); let mut num_bits_remain = file_contents.len() * 8; let mut bitreader = BitReader::endian(&mut cursor, BigEndian); let mut value_vec = vec![]; while num_bits_remain > 0 { let num_bits_to_read = if num_bits_remain < num_bits as usize { num_bits_remain as u32 } else { num_bits as u32 }; let value: u8 = bitreader.read(num_bits_to_read).unwrap(); if use_shuffle { utils::shuffle_char_value_map(rng, char_to_value_map); } value_vec.push(value); num_bits_remain -= num_bits_to_read as usize; } value_vec } pub fn get_value_vec( bit_to_char_map: &mut HashMap<usize, char>, file_contents: &Vec<u8>, num_bits: usize, use_shuffle: bool, rng: &mut StdRng, ) -> Vec<String> { let mut cursor = Cursor::new(&file_contents); let mut num_bits_remain = file_contents.len() * 8; let mut bitreader = BitReader::endian(&mut cursor, BigEndian); let mut sorted_keys = vec![]; let mut value_vec = vec![]; for byte_val in bit_to_char_map.keys() { sorted_keys.push(*byte_val); } sorted_keys.sort_by(|a, b| b.cmp(a)); // sort keys once so you dont need to do it in the iteration. // the bit to char map maps bit positions: (0, 1, 2, 4, 8, 16, 32, etc) // to characters. we iterate over the bit position values, and push the value // to a sorted_keys vec, and then sort in descending order // (ie: 0th element is largest) // we do this because the user provides the number of bits. // so if the user says number // of bits is 3, then the sorted keys will look like: [4, 2, 1, 0] while num_bits_remain > 0 { let num_bits_to_read = if num_bits_remain < num_bits as usize { num_bits_remain as u32 } else { num_bits as u32 }; let value: u8 = bitreader.read(num_bits_to_read).unwrap(); let char_str = utils::get_chars_from_value(value, bit_to_char_map, &sorted_keys); if use_shuffle { utils::fill_bit_to_char_map(rng, bit_to_char_map); } value_vec.push(char_str); num_bits_remain -= num_bits_to_read as usize; } // iterate the file that you wish to encode, reading num_bits at a time. // for each value you read, generate characters that map to the value using the bit to char map // if using shuffle, the bit to char map gets shuffled according to a seeded rng. // at the end you have a vector of gibberish strings that you will try to hide // in words using ngrams. value_vec } pub fn wordify_from_char_value_mode( gram: &HashMap<Vec<&str>, usize>, char_to_value_map: &mut HashMap<char, usize>, n: usize, file_values: Vec<u8>, num_bits: usize, unique_words: &Vec<&str>, total_words: f64, use_shuffle: bool, value_mode: utils::ValueMode, rng: &mut StdRng, ) -> Result<String, String> { let mut succ_count = 0; let mut n_gram_used = vec![0; n]; let mut text_data = String::from(""); let mut current_words = get_initial_words(gram, n); let mut i = 0; while i < file_values.len() { let current_val = file_values[i]; let mut usable_words = vec![]; for w in unique_words { if *w == "." || *w == "," || *w == "?" || *w == ";" || *w == "!" { // dont use punctuation in char_value mode because // punctuation isnt ignored by the decoder. if you want // to leave punctuation in, you would also have to leave // the spaces around them which would result in a stego text // like: he likes cars, toys, and trucks. // for that reason, I chose to ignore punctuation continue; } let w_val = utils::get_value_from_chars(w, &char_to_value_map, &value_mode); if w_val == current_val as usize { usable_words.push(*w); } } match usable_words.len() { 0 => { panic!("NOT ENOUGH WORDS WITH VALUE {}", current_val); }, 1 => { succ_count += 1; let best_word = &usable_words[0]; text_data.push_str(best_word); current_words.push(best_word); text_data.push_str(" "); n_gram_used[0] += 1; }, _ => { let (best_word, n_used) = get_best_word( gram, &usable_words, &current_words, n, total_words, ); succ_count += 1; n_gram_used[n_used] += 1; text_data.push_str(best_word); current_words.push(best_word); text_data.push_str(" "); } }; if use_shuffle { utils::shuffle_char_value_map(rng, char_to_value_map); } i += 1; } text_data.pop(); // remove trailing space let num_bytes = (file_values.len() * num_bits) / 8; // print summary println!("\nencoding using {} bits per word. file had {} bytes, ie: {} words to wordify", num_bits, num_bytes, file_values.len()); println!("succesfully filled {} words", succ_count); println!("average bits per word: {}\n", ((num_bytes * 8) as f64 / succ_count as f64)); println!("\nN-depth summary: {:?}", n_gram_used); Ok(text_data) } pub fn encode_char_bit_map( file: &str, output: &str, seed_str: &str, word_file_name: &str, n_depth: usize, consecutive_skips: usize, depth_skip_threshold: usize, num_bits: usize, use_shuffle: bool, ) -> Result<(), String> { let mut rng = utils::create_rng_from_seed(seed_str); let mut original_rng = utils::create_rng_from_seed(seed_str); let contents = utils::get_file_contents(file)?; let mut word_file_data = utils::get_file_contents_as_string(word_file_name)?; let mut bit_to_char_map = utils::make_bit_to_char_map(num_bits); let mut original_bit_to_char_map = bit_to_char_map.clone(); utils::fill_bit_to_char_map(&mut rng, &mut bit_to_char_map); utils::fill_bit_to_char_map(&mut original_rng, &mut original_bit_to_char_map); let value_vec = get_value_vec(&mut bit_to_char_map, &contents, num_bits, use_shuffle, &mut rng); word_file_data = word_file_data.to_lowercase(); word_file_data = utils::format_text_for_ngrams(&word_file_data); let ( gram_hash, unique_words, total_words, ) = generate_ngrams(&word_file_data, n_depth); let text_data = wordify( &gram_hash, n_depth, value_vec, &mut original_rng, &mut original_bit_to_char_map, &unique_words, total_words as f64, consecutive_skips, depth_skip_threshold, use_shuffle, )?; fs::write(output, text_data).unwrap(); Ok(()) } pub fn encode_char_value_map( file: &str, output: &str, seed_str: &str, word_file_name: &str, n_depth: usize, consecutive_skips: usize, depth_skip_threshold: usize, num_bits: usize, use_shuffle: bool, value_mode: utils::ValueMode, ) -> Result<(), String> { let mut rng = utils::create_rng_from_seed(seed_str); let mut original_rng = utils::create_rng_from_seed(seed_str); let contents = utils::get_file_contents(file)?; let mut word_file_data = utils::get_file_contents_as_string(word_file_name)?; let mut char_to_value_map = utils::make_char_to_value_map(num_bits); let mut original_char_to_value_map = char_to_value_map.clone(); utils::shuffle_char_value_map(&mut rng, &mut char_to_value_map); utils::shuffle_char_value_map(&mut original_rng, &mut original_char_to_value_map); // panic!("dsa"); let mut value_vec = get_value_vec_from_char_value_mode( &contents, num_bits, use_shuffle, &mut rng, &mut char_to_value_map, ); word_file_data = word_file_data.to_lowercase(); word_file_data = utils::format_text_for_ngrams(&word_file_data); let ( gram_hash, unique_words, total_words, ) = generate_ngrams(&word_file_data, n_depth); let text_data = wordify_from_char_value_mode( &gram_hash, &mut original_char_to_value_map, n_depth, value_vec, num_bits, &unique_words, total_words as f64, use_shuffle, value_mode, &mut original_rng, )?; fs::write(output, text_data).unwrap(); Ok(()) } pub fn
(matches: &ArgMatches) -> Result<(), String> { let file =
encode
identifier_name
encode.rs
match n_down { 1 => { for t in text.split_whitespace() { grams.push(vec![t]); } // for some reason the ngrams crate has difficulty when n == 1, // so in this case we generate the n gram ourselves by a simple whitespace // delimiter. }, _ => { grams = text.split_whitespace().ngrams(n_down).collect(); } } for v in grams { if n_down == 1 { total_words += 1; } if let Some(val) = hash.get_mut(&v) { *val += 1; } else { if n_down == 1 { unique_words.push(v[0]); // if we are on the last n-depth (n == 1), // that means the vectors only contain one word. // if the hash does not have this vector of one word yet, then // this is the first time we are seeing it, so we will add it to // a vector of unique words. } hash.insert(v, 1); // if the hash does not have this vector yet, add it // with occurance 1, and next time we see this vector, // increment the occurance } } n_down -= 1; } (hash, unique_words, total_words) } pub fn get_restricted_chars(char_map: &HashMap<char, usize>, gib_word: &String) -> Vec<char> { let mut restricted_chars = vec![]; for key in char_map.keys() { restricted_chars.push(*key); } let word_chars: Vec<char> = gib_word.chars().collect(); for c in word_chars { if restricted_chars.contains(&c) { let char_index = restricted_chars.iter().position(|&r| r == c).unwrap(); restricted_chars.remove(char_index); } } restricted_chars } pub fn can_use_word(word: &str, good_chars: &Vec<char>, restricted_chars: &Vec<char>) -> bool { let word_chars: Vec<char> = word.chars().collect(); let mut good_chars_used = vec![0; good_chars.len()]; for c in word_chars { if restricted_chars.contains(&c) { return false; } if good_chars.contains(&c) { let char_index = good_chars.iter().position(|&r| r == c).unwrap(); good_chars_used[char_index] += 1; } } for i in good_chars_used { if i == 0 { return false; } } true } pub fn get_initial_words<'a>(hashmap: &'a HashMap<Vec<&str>, usize>, n: usize) -> Vec<&'a str> { let mut vecs_with_n_items = vec![]; for key in hashmap.keys() { if key.len() == n { vecs_with_n_items.push(key); } } let mut count_hash = HashMap::new(); for vec in vecs_with_n_items { let mut n_minus_1_slice = vec![]; let mut counter = 1; for word in vec { if counter < n { n_minus_1_slice.push(*word); } else { break; } counter += 1; } if let Some(val) = count_hash.get_mut(&n_minus_1_slice) { *val += 1; } else { count_hash.insert(n_minus_1_slice, 1); } } let mut best_vec_count = 0; let mut best_vec = vec![]; for vec in count_hash.keys() { let vec_count = count_hash.get(vec).unwrap(); if vec_count > &best_vec_count { best_vec_count = *vec_count; best_vec = vec.to_vec(); } } best_vec } pub fn get_probability_of(word: &str, given: &Vec<&str>, hashmap: &HashMap<Vec<&str>, usize>, num_words: f64) -> f64 { let count_of_given = match given.len() { 0 => num_words, _ => { if let Some(count) = hashmap.get(given) { *count as f64 } else { return 0.0; } }, }; let mut word_vec = given.clone(); word_vec.push(word); let count_of_sequence = if let Some(count) = hashmap.get(&word_vec) { *count as f64 } else { return 0.0; }; count_of_sequence / count_of_given } pub fn get_best_word<'a>( gram: &HashMap<Vec<&str>, usize>, usable_words: &Vec<&'a str>, current_words: &Vec<&str>, n: usize, total_words: f64, ) -> (&'a str, usize) { let mut all_p_zero = true; let mut use_n = n; let mut max_p_index = 0; while all_p_zero { let mut ngram_slice = vec![]; let mut counter = 1; for word in current_words.iter().rev() { if counter < use_n { ngram_slice.push(*word); } counter += 1; } ngram_slice.reverse(); let last_word = ngram_slice.last().unwrap(); let mut max_p = 0.0; max_p_index = 0; for i in 0..usable_words.len() { let w = &usable_words[i]; if w == last_word { continue; } let p = get_probability_of(w, &ngram_slice, gram, total_words); // let p = get_interpolated_probability(w, &ngram_slice, gram, total_words); // println!("P({} | {:?}) = {}", w, ngram_slice, p); if p > max_p { all_p_zero = false; max_p_index = i; max_p = p; } } if all_p_zero { use_n -= 1; } // comment this if using interpolation if use_n == 1 { // no point in picking the word that appears the most... // take our chances and break, and pick a random word from the list. // println!("reached bottom of use n!"); break; } } use_n -= 1; if use_n == 0 { let mut rng = rand::thread_rng(); max_p_index = rng.gen_range(0, usable_words.len()); } (usable_words[max_p_index], use_n) } pub fn wordify( gram: &HashMap<Vec<&str>, usize>, n: usize, file_words: Vec<String>, rng: &mut StdRng, bit_to_char_map: &mut HashMap<usize, char>, unique_words: &Vec<&str>, total_words: f64, consecutive_skips: usize, depth_skip_threshold: usize, use_shuffle: bool, ) -> Result<String, String> { let mut char_to_bit_map = HashMap::new(); let mut num_bits = 0; for bit_val in bit_to_char_map.keys() { num_bits += 1; char_to_bit_map.insert(*bit_to_char_map.get(&bit_val).unwrap(), *bit_val); } num_bits -= 1; // let num_words = unique_words.len(); let mut succ_count = 0; let mut fail_count = 0; let mut skip_count = 0; let mut n_gram_used = vec![0; n]; let mut text_data = String::from(""); let mut current_words = get_initial_words(gram, n); let mut i = 0; let mut consecutive_skips_used = 0; let mut skip_words = vec![]; if!use_shuffle { for w in unique_words { if utils::is_skip_word(w, &char_to_bit_map) { skip_words.push(*w); } } // if not shuffling, skip words get filled in once before // iterating. } while i < file_words.len() { let gibberish_word = &file_words[i]; let mut used_skip_word = false; let mut use_keys = vec![]; for key in char_to_bit_map.keys() { use_keys.push(*key); } let restricted_chars = get_restricted_chars(&char_to_bit_map, gibberish_word); let mut usable_words = vec![]; if use_shuffle { skip_words = vec![]; // if shuffling the bit to char map, // we have to reset the skip words every time because they might // be different } // let mut value_num_list = vec![0; max_value]; for w in unique_words { // let word_val = get_value_from_word(w, char_value_map, max_value); // // println!("value for {}: {}", w, get_value_from_word(w, char_value_map, 2)); // value_num_list[word_val] += 1; if use_shuffle && utils::is_skip_word(w, &char_to_bit_map) { skip_words.push(*w); } if can_use_word(w, &gibberish_word.chars().collect(), &restricted_chars) { usable_words.push(*w); } } match usable_words.len() { 0 => { fail_count += 1; text_data.push_str(&gibberish_word); text_data.push_str(" "); current_words.push("."); consecutive_skips_used = 0; // if there are NO usable words at all then we 'failed' // to encode this word. we push the gibberish word as is to // the text data output because we still need to be able to decode it. // we add a. to current words to stimulate the ngram probability // for the next word. }, 1 => { succ_count += 1; let best_word = &usable_words[0]; text_data.push_str(best_word); current_words.push(best_word); text_data.push_str(" "); n_gram_used[0] += 1; consecutive_skips_used = 0; // there is only one usable word, so use it without // estimating any probabilities. ngram used a depth // of 0 since we are not evaluating ngrams here. }, _ => { let (best_word, n_used) = get_best_word( gram, &usable_words, &current_words, n, total_words, ); // user can fine-tune the quality of the text output using depth_skip_threshold // and consecutive skips allowed. The higher both are, the more skip words are used // which can potentially make the output look more like real text, at the // expense of encoding less bits per word on average. // consecutive skips used sets a limit to this such that it forces // the program to eventually encode a word, otherwise it might // loop forever in certain situations. // depth skip threshold allows user to say which n-depths are acceptable. // lower n-depths produce less realistic. if n_used <= depth_skip_threshold && consecutive_skips_used < consecutive_skips && skip_words.len() > 0 { let (best_word2, n_used2) = get_best_word( gram, &skip_words, &current_words, n, total_words ); n_gram_used[n_used2] += 1; current_words.push(best_word2); text_data.push_str(best_word2); text_data.push_str(" "); skip_count += 1; used_skip_word = true; consecutive_skips_used += 1; i -= 1; // we used a skip word, make sure to keep i at its current // level so that we try to encode this word again } else { succ_count += 1; n_gram_used[n_used] += 1; text_data.push_str(best_word); current_words.push(best_word); text_data.push_str(" "); consecutive_skips_used = 0; // if not using a skip word, we encoded the best possible word according // to ngrams. add the best word to the text output, as well as the current // words vec which is used to determine word probabilities for the next // iteration } } }; if!used_skip_word && use_shuffle { // only shuffle the bit to char map if we encoded a word // if we used a skip word, we do NOT want to shuffle as we // will not be able to properly decode utils::fill_bit_to_char_map(rng, bit_to_char_map); char_to_bit_map = utils::make_char_to_bit_map(bit_to_char_map); } i += 1; } text_data.pop(); // remove trailing space let num_bytes = (file_words.len() * num_bits) / 8; // print summary println!("\nencoding using {} bits per word. file had {} bytes, ie: {} words to wordify", num_bits, num_bytes, file_words.len()); println!("succesfully filled {} words", (succ_count + skip_count)); println!("of the {} words, {} were skip words", (succ_count + skip_count), skip_count); println!("failed to find a word {} times", fail_count); println!("average bits per word: {}\n", ((num_bytes * 8) as f64 / (succ_count + skip_count) as f64)); println!("\nN-depth summary: {:?}", n_gram_used); Ok(text_data) } pub fn get_value_vec_from_char_value_mode( file_contents: &Vec<u8>, num_bits: usize, use_shuffle: bool, rng: &mut StdRng, char_to_value_map: &mut HashMap<char, usize>, ) -> Vec<u8> { let mut cursor = Cursor::new(&file_contents); let mut num_bits_remain = file_contents.len() * 8; let mut bitreader = BitReader::endian(&mut cursor, BigEndian); let mut value_vec = vec![]; while num_bits_remain > 0 { let num_bits_to_read = if num_bits_remain < num_bits as usize { num_bits_remain as u32 } else { num_bits as u32 }; let value: u8 = bitreader.read(num_bits_to_read).unwrap(); if use_shuffle { utils::shuffle_char_value_map(rng, char_to_value_map); } value_vec.push(value); num_bits_remain -= num_bits_to_read as usize; } value_vec } pub fn get_value_vec( bit_to_char_map: &mut HashMap<usize, char>, file_contents: &Vec<u8>, num_bits: usize, use_shuffle: bool, rng: &mut StdRng, ) -> Vec<String>
while num_bits_remain > 0 { let num_bits_to_read = if num_bits_remain < num_bits as usize { num_bits_remain as u32 } else { num_bits as u32 }; let value: u8 = bitreader.read(num_bits_to_read).unwrap(); let char_str = utils::get_chars_from_value(value, bit_to_char_map, &sorted_keys); if use_shuffle { utils::fill_bit_to_char_map(rng, bit_to_char_map); } value_vec.push(char_str); num_bits_remain -= num_bits_to_read as usize; } // iterate the file that you wish to encode, reading num_bits at a time. // for each value you read, generate characters that map to the value using the bit to char map // if using shuffle, the bit to char map gets shuffled according to a seeded rng. // at the end you have a vector of gibberish strings that you will try to hide // in words using ngrams. value_vec } pub fn wordify_from_char_value_mode( gram: &HashMap<Vec<&str>, usize>, char_to_value_map: &mut HashMap<char, usize>, n: usize, file_values: Vec<u8>, num_bits: usize, unique_words: &Vec<&str>, total_words: f64, use_shuffle: bool, value_mode: utils::ValueMode, rng: &mut StdRng, ) -> Result<String, String> { let mut succ_count = 0; let mut n_gram_used = vec![0; n]; let mut text_data = String::from(""); let mut current_words = get_initial_words(gram, n); let mut i = 0; while i < file_values.len() { let current_val = file_values[i]; let mut usable_words = vec![]; for w in unique_words { if *w == "." || *w == "," || *w == "?" || *w == ";" || *w == "!" { // dont use punctuation in char_value mode because // punctuation isnt ignored by the decoder. if you want // to leave punctuation in, you would also have to leave // the spaces around them which would result in a stego text // like: he likes cars, toys, and trucks. // for that reason, I chose to ignore punctuation continue; } let w_val = utils::get_value_from_chars(w, &char_to_value_map, &value_mode); if w_val == current_val as usize { usable_words.push(*w); } } match usable_words.len() { 0 => { panic!("NOT ENOUGH WORDS WITH VALUE {}", current_val); }, 1 => { succ_count += 1; let best_word = &usable_words[0]; text_data.push_str(best_word); current_words.push(best_word); text_data.push_str(" "); n_gram_used[0] += 1; }, _ => { let (best_word, n_used) = get_best_word( gram, &usable_words, &current_words, n, total_words, ); succ_count += 1; n_gram_used[n_used] += 1; text_data.push_str(best_word); current_words.push(best_word); text_data.push_str(" "); } }; if use_shuffle { utils::shuffle_char_value_map(rng, char_to_value_map); } i += 1; } text_data.pop(); // remove trailing space let num_bytes = (file_values.len() * num_bits) / 8; // print summary println!("\nencoding using {} bits per word. file had {} bytes, ie: {} words to wordify", num_bits, num_bytes, file_values.len()); println!("succesfully filled {} words", succ_count); println!("average bits per word: {}\n", ((num_bytes * 8) as f64 / succ_count as f64)); println!("\nN-depth summary: {:?}", n_gram_used); Ok(text_data) } pub fn encode_char_bit_map( file: &str, output: &str, seed_str: &str, word_file_name: &str, n_depth: usize, consecutive_skips: usize, depth_skip_threshold: usize, num_bits: usize, use_shuffle: bool, ) -> Result<(), String> { let mut rng = utils::create_rng_from_seed(seed_str); let mut original_rng = utils::create_rng_from_seed(seed_str); let contents = utils::get_file_contents(file)?; let mut word_file_data = utils::get_file_contents_as_string(word_file_name)?; let mut bit_to_char_map = utils::make_bit_to_char_map(num_bits); let mut original_bit_to_char_map = bit_to_char_map.clone(); utils::fill_bit_to_char_map(&mut rng, &mut bit_to_char_map); utils::fill_bit_to_char_map(&mut original_rng, &mut original_bit_to_char_map); let value_vec = get_value_vec(&mut bit_to_char_map, &contents, num_bits, use_shuffle, &mut rng); word_file_data = word_file_data.to_lowercase(); word_file_data = utils::format_text_for_ngrams(&word_file_data); let ( gram_hash, unique_words, total_words, ) = generate_ngrams(&word_file_data, n_depth); let text_data = wordify( &gram_hash, n_depth, value_vec, &mut original_rng, &mut original_bit_to_char_map, &unique_words, total_words as f64, consecutive_skips, depth_skip_threshold, use_shuffle, )?; fs::write(output, text_data).unwrap(); Ok(()) } pub fn encode_char_value_map( file: &str, output: &str, seed_str: &str, word_file_name: &str, n_depth: usize, consecutive_skips: usize, depth_skip_threshold: usize, num_bits: usize, use_shuffle: bool, value_mode: utils::ValueMode, ) -> Result<(), String> { let mut rng = utils::create_rng_from_seed(seed_str); let mut original_rng = utils::create_rng_from_seed(seed_str); let contents = utils::get_file_contents(file)?; let mut word_file_data = utils::get_file_contents_as_string(word_file_name)?; let mut char_to_value_map = utils::make_char_to_value_map(num_bits); let mut original_char_to_value_map = char_to_value_map.clone(); utils::shuffle_char_value_map(&mut rng, &mut char_to_value_map); utils::shuffle_char_value_map(&mut original_rng, &mut original_char_to_value_map); // panic!("dsa"); let mut value_vec = get_value_vec_from_char_value_mode( &contents, num_bits, use_shuffle, &mut rng, &mut char_to_value_map, ); word_file_data = word_file_data.to_lowercase(); word_file_data = utils::format_text_for_ngrams(&word_file_data); let ( gram_hash, unique_words, total_words, ) = generate_ngrams(&word_file_data, n_depth); let text_data = wordify_from_char_value_mode( &gram_hash, &mut original_char_to_value_map, n_depth, value_vec, num_bits, &unique_words, total_words as f64, use_shuffle, value_mode, &mut original_rng, )?; fs::write(output, text_data).unwrap(); Ok(()) } pub fn encode(matches: &ArgMatches) -> Result<(), String> { let file =
{ let mut cursor = Cursor::new(&file_contents); let mut num_bits_remain = file_contents.len() * 8; let mut bitreader = BitReader::endian(&mut cursor, BigEndian); let mut sorted_keys = vec![]; let mut value_vec = vec![]; for byte_val in bit_to_char_map.keys() { sorted_keys.push(*byte_val); } sorted_keys.sort_by(|a, b| b.cmp(a)); // sort keys once so you dont need to do it in the iteration. // the bit to char map maps bit positions: (0, 1, 2, 4, 8, 16, 32, etc) // to characters. we iterate over the bit position values, and push the value // to a sorted_keys vec, and then sort in descending order // (ie: 0th element is largest) // we do this because the user provides the number of bits. // so if the user says number // of bits is 3, then the sorted keys will look like: [4, 2, 1, 0]
identifier_body
encode.rs
match n_down { 1 => { for t in text.split_whitespace() { grams.push(vec![t]); } // for some reason the ngrams crate has difficulty when n == 1, // so in this case we generate the n gram ourselves by a simple whitespace // delimiter. }, _ => { grams = text.split_whitespace().ngrams(n_down).collect(); } } for v in grams { if n_down == 1 { total_words += 1; } if let Some(val) = hash.get_mut(&v) { *val += 1; } else { if n_down == 1 { unique_words.push(v[0]); // if we are on the last n-depth (n == 1), // that means the vectors only contain one word. // if the hash does not have this vector of one word yet, then // this is the first time we are seeing it, so we will add it to // a vector of unique words. } hash.insert(v, 1); // if the hash does not have this vector yet, add it // with occurance 1, and next time we see this vector, // increment the occurance } } n_down -= 1; } (hash, unique_words, total_words) } pub fn get_restricted_chars(char_map: &HashMap<char, usize>, gib_word: &String) -> Vec<char> { let mut restricted_chars = vec![]; for key in char_map.keys() { restricted_chars.push(*key); } let word_chars: Vec<char> = gib_word.chars().collect(); for c in word_chars { if restricted_chars.contains(&c) { let char_index = restricted_chars.iter().position(|&r| r == c).unwrap(); restricted_chars.remove(char_index); } } restricted_chars } pub fn can_use_word(word: &str, good_chars: &Vec<char>, restricted_chars: &Vec<char>) -> bool { let word_chars: Vec<char> = word.chars().collect(); let mut good_chars_used = vec![0; good_chars.len()]; for c in word_chars { if restricted_chars.contains(&c) { return false; } if good_chars.contains(&c) { let char_index = good_chars.iter().position(|&r| r == c).unwrap(); good_chars_used[char_index] += 1; } } for i in good_chars_used { if i == 0 { return false; } } true } pub fn get_initial_words<'a>(hashmap: &'a HashMap<Vec<&str>, usize>, n: usize) -> Vec<&'a str> { let mut vecs_with_n_items = vec![]; for key in hashmap.keys() { if key.len() == n { vecs_with_n_items.push(key); } } let mut count_hash = HashMap::new(); for vec in vecs_with_n_items { let mut n_minus_1_slice = vec![]; let mut counter = 1; for word in vec { if counter < n { n_minus_1_slice.push(*word); } else { break; } counter += 1; } if let Some(val) = count_hash.get_mut(&n_minus_1_slice) { *val += 1; } else { count_hash.insert(n_minus_1_slice, 1); } } let mut best_vec_count = 0; let mut best_vec = vec![]; for vec in count_hash.keys() { let vec_count = count_hash.get(vec).unwrap(); if vec_count > &best_vec_count { best_vec_count = *vec_count; best_vec = vec.to_vec(); } } best_vec } pub fn get_probability_of(word: &str, given: &Vec<&str>, hashmap: &HashMap<Vec<&str>, usize>, num_words: f64) -> f64 { let count_of_given = match given.len() { 0 => num_words, _ => { if let Some(count) = hashmap.get(given) { *count as f64 } else { return 0.0; } }, }; let mut word_vec = given.clone(); word_vec.push(word); let count_of_sequence = if let Some(count) = hashmap.get(&word_vec) { *count as f64 } else { return 0.0; }; count_of_sequence / count_of_given } pub fn get_best_word<'a>( gram: &HashMap<Vec<&str>, usize>, usable_words: &Vec<&'a str>, current_words: &Vec<&str>, n: usize, total_words: f64, ) -> (&'a str, usize) { let mut all_p_zero = true; let mut use_n = n; let mut max_p_index = 0; while all_p_zero { let mut ngram_slice = vec![]; let mut counter = 1; for word in current_words.iter().rev() { if counter < use_n { ngram_slice.push(*word); } counter += 1; } ngram_slice.reverse(); let last_word = ngram_slice.last().unwrap(); let mut max_p = 0.0; max_p_index = 0; for i in 0..usable_words.len() { let w = &usable_words[i]; if w == last_word { continue; } let p = get_probability_of(w, &ngram_slice, gram, total_words); // let p = get_interpolated_probability(w, &ngram_slice, gram, total_words); // println!("P({} | {:?}) = {}", w, ngram_slice, p); if p > max_p { all_p_zero = false; max_p_index = i; max_p = p; } } if all_p_zero { use_n -= 1; } // comment this if using interpolation if use_n == 1 { // no point in picking the word that appears the most... // take our chances and break, and pick a random word from the list. // println!("reached bottom of use n!"); break; } } use_n -= 1; if use_n == 0 { let mut rng = rand::thread_rng(); max_p_index = rng.gen_range(0, usable_words.len()); } (usable_words[max_p_index], use_n) } pub fn wordify( gram: &HashMap<Vec<&str>, usize>, n: usize, file_words: Vec<String>, rng: &mut StdRng, bit_to_char_map: &mut HashMap<usize, char>, unique_words: &Vec<&str>, total_words: f64, consecutive_skips: usize, depth_skip_threshold: usize, use_shuffle: bool, ) -> Result<String, String> { let mut char_to_bit_map = HashMap::new(); let mut num_bits = 0; for bit_val in bit_to_char_map.keys() { num_bits += 1; char_to_bit_map.insert(*bit_to_char_map.get(&bit_val).unwrap(), *bit_val); } num_bits -= 1; // let num_words = unique_words.len(); let mut succ_count = 0; let mut fail_count = 0; let mut skip_count = 0; let mut n_gram_used = vec![0; n]; let mut text_data = String::from(""); let mut current_words = get_initial_words(gram, n); let mut i = 0; let mut consecutive_skips_used = 0; let mut skip_words = vec![]; if!use_shuffle { for w in unique_words { if utils::is_skip_word(w, &char_to_bit_map) { skip_words.push(*w); } } // if not shuffling, skip words get filled in once before // iterating. } while i < file_words.len() { let gibberish_word = &file_words[i]; let mut used_skip_word = false; let mut use_keys = vec![]; for key in char_to_bit_map.keys() { use_keys.push(*key); } let restricted_chars = get_restricted_chars(&char_to_bit_map, gibberish_word); let mut usable_words = vec![]; if use_shuffle { skip_words = vec![]; // if shuffling the bit to char map, // we have to reset the skip words every time because they might // be different } // let mut value_num_list = vec![0; max_value]; for w in unique_words { // let word_val = get_value_from_word(w, char_value_map, max_value); // // println!("value for {}: {}", w, get_value_from_word(w, char_value_map, 2)); // value_num_list[word_val] += 1; if use_shuffle && utils::is_skip_word(w, &char_to_bit_map) { skip_words.push(*w); } if can_use_word(w, &gibberish_word.chars().collect(), &restricted_chars) { usable_words.push(*w); } } match usable_words.len() { 0 => { fail_count += 1; text_data.push_str(&gibberish_word); text_data.push_str(" "); current_words.push("."); consecutive_skips_used = 0; // if there are NO usable words at all then we 'failed' // to encode this word. we push the gibberish word as is to // the text data output because we still need to be able to decode it. // we add a. to current words to stimulate the ngram probability // for the next word. }, 1 => { succ_count += 1; let best_word = &usable_words[0]; text_data.push_str(best_word); current_words.push(best_word); text_data.push_str(" "); n_gram_used[0] += 1; consecutive_skips_used = 0; // there is only one usable word, so use it without // estimating any probabilities. ngram used a depth // of 0 since we are not evaluating ngrams here. }, _ => { let (best_word, n_used) = get_best_word( gram, &usable_words, &current_words, n, total_words, ); // user can fine-tune the quality of the text output using depth_skip_threshold // and consecutive skips allowed. The higher both are, the more skip words are used // which can potentially make the output look more like real text, at the // expense of encoding less bits per word on average. // consecutive skips used sets a limit to this such that it forces // the program to eventually encode a word, otherwise it might // loop forever in certain situations. // depth skip threshold allows user to say which n-depths are acceptable. // lower n-depths produce less realistic. if n_used <= depth_skip_threshold && consecutive_skips_used < consecutive_skips && skip_words.len() > 0 { let (best_word2, n_used2) = get_best_word( gram, &skip_words, &current_words, n, total_words ); n_gram_used[n_used2] += 1; current_words.push(best_word2); text_data.push_str(best_word2); text_data.push_str(" "); skip_count += 1; used_skip_word = true; consecutive_skips_used += 1; i -= 1; // we used a skip word, make sure to keep i at its current // level so that we try to encode this word again } else { succ_count += 1; n_gram_used[n_used] += 1; text_data.push_str(best_word); current_words.push(best_word); text_data.push_str(" "); consecutive_skips_used = 0; // if not using a skip word, we encoded the best possible word according // to ngrams. add the best word to the text output, as well as the current // words vec which is used to determine word probabilities for the next // iteration } } }; if!used_skip_word && use_shuffle { // only shuffle the bit to char map if we encoded a word // if we used a skip word, we do NOT want to shuffle as we // will not be able to properly decode utils::fill_bit_to_char_map(rng, bit_to_char_map); char_to_bit_map = utils::make_char_to_bit_map(bit_to_char_map); } i += 1; } text_data.pop(); // remove trailing space let num_bytes = (file_words.len() * num_bits) / 8; // print summary println!("\nencoding using {} bits per word. file had {} bytes, ie: {} words to wordify", num_bits, num_bytes, file_words.len()); println!("succesfully filled {} words", (succ_count + skip_count)); println!("of the {} words, {} were skip words", (succ_count + skip_count), skip_count); println!("failed to find a word {} times", fail_count); println!("average bits per word: {}\n", ((num_bytes * 8) as f64 / (succ_count + skip_count) as f64)); println!("\nN-depth summary: {:?}", n_gram_used); Ok(text_data) } pub fn get_value_vec_from_char_value_mode( file_contents: &Vec<u8>, num_bits: usize,
let mut cursor = Cursor::new(&file_contents); let mut num_bits_remain = file_contents.len() * 8; let mut bitreader = BitReader::endian(&mut cursor, BigEndian); let mut value_vec = vec![]; while num_bits_remain > 0 { let num_bits_to_read = if num_bits_remain < num_bits as usize { num_bits_remain as u32 } else { num_bits as u32 }; let value: u8 = bitreader.read(num_bits_to_read).unwrap(); if use_shuffle { utils::shuffle_char_value_map(rng, char_to_value_map); } value_vec.push(value); num_bits_remain -= num_bits_to_read as usize; } value_vec } pub fn get_value_vec( bit_to_char_map: &mut HashMap<usize, char>, file_contents: &Vec<u8>, num_bits: usize, use_shuffle: bool, rng: &mut StdRng, ) -> Vec<String> { let mut cursor = Cursor::new(&file_contents); let mut num_bits_remain = file_contents.len() * 8; let mut bitreader = BitReader::endian(&mut cursor, BigEndian); let mut sorted_keys = vec![]; let mut value_vec = vec![]; for byte_val in bit_to_char_map.keys() { sorted_keys.push(*byte_val); } sorted_keys.sort_by(|a, b| b.cmp(a)); // sort keys once so you dont need to do it in the iteration. // the bit to char map maps bit positions: (0, 1, 2, 4, 8, 16, 32, etc) // to characters. we iterate over the bit position values, and push the value // to a sorted_keys vec, and then sort in descending order // (ie: 0th element is largest) // we do this because the user provides the number of bits. // so if the user says number // of bits is 3, then the sorted keys will look like: [4, 2, 1, 0] while num_bits_remain > 0 { let num_bits_to_read = if num_bits_remain < num_bits as usize { num_bits_remain as u32 } else { num_bits as u32 }; let value: u8 = bitreader.read(num_bits_to_read).unwrap(); let char_str = utils::get_chars_from_value(value, bit_to_char_map, &sorted_keys); if use_shuffle { utils::fill_bit_to_char_map(rng, bit_to_char_map); } value_vec.push(char_str); num_bits_remain -= num_bits_to_read as usize; } // iterate the file that you wish to encode, reading num_bits at a time. // for each value you read, generate characters that map to the value using the bit to char map // if using shuffle, the bit to char map gets shuffled according to a seeded rng. // at the end you have a vector of gibberish strings that you will try to hide // in words using ngrams. value_vec } pub fn wordify_from_char_value_mode( gram: &HashMap<Vec<&str>, usize>, char_to_value_map: &mut HashMap<char, usize>, n: usize, file_values: Vec<u8>, num_bits: usize, unique_words: &Vec<&str>, total_words: f64, use_shuffle: bool, value_mode: utils::ValueMode, rng: &mut StdRng, ) -> Result<String, String> { let mut succ_count = 0; let mut n_gram_used = vec![0; n]; let mut text_data = String::from(""); let mut current_words = get_initial_words(gram, n); let mut i = 0; while i < file_values.len() { let current_val = file_values[i]; let mut usable_words = vec![]; for w in unique_words { if *w == "." || *w == "," || *w == "?" || *w == ";" || *w == "!" { // dont use punctuation in char_value mode because // punctuation isnt ignored by the decoder. if you want // to leave punctuation in, you would also have to leave // the spaces around them which would result in a stego text // like: he likes cars, toys, and trucks. // for that reason, I chose to ignore punctuation continue; } let w_val = utils::get_value_from_chars(w, &char_to_value_map, &value_mode); if w_val == current_val as usize { usable_words.push(*w); } } match usable_words.len() { 0 => { panic!("NOT ENOUGH WORDS WITH VALUE {}", current_val); }, 1 => { succ_count += 1; let best_word = &usable_words[0]; text_data.push_str(best_word); current_words.push(best_word); text_data.push_str(" "); n_gram_used[0] += 1; }, _ => { let (best_word, n_used) = get_best_word( gram, &usable_words, &current_words, n, total_words, ); succ_count += 1; n_gram_used[n_used] += 1; text_data.push_str(best_word); current_words.push(best_word); text_data.push_str(" "); } }; if use_shuffle { utils::shuffle_char_value_map(rng, char_to_value_map); } i += 1; } text_data.pop(); // remove trailing space let num_bytes = (file_values.len() * num_bits) / 8; // print summary println!("\nencoding using {} bits per word. file had {} bytes, ie: {} words to wordify", num_bits, num_bytes, file_values.len()); println!("succesfully filled {} words", succ_count); println!("average bits per word: {}\n", ((num_bytes * 8) as f64 / succ_count as f64)); println!("\nN-depth summary: {:?}", n_gram_used); Ok(text_data) } pub fn encode_char_bit_map( file: &str, output: &str, seed_str: &str, word_file_name: &str, n_depth: usize, consecutive_skips: usize, depth_skip_threshold: usize, num_bits: usize, use_shuffle: bool, ) -> Result<(), String> { let mut rng = utils::create_rng_from_seed(seed_str); let mut original_rng = utils::create_rng_from_seed(seed_str); let contents = utils::get_file_contents(file)?; let mut word_file_data = utils::get_file_contents_as_string(word_file_name)?; let mut bit_to_char_map = utils::make_bit_to_char_map(num_bits); let mut original_bit_to_char_map = bit_to_char_map.clone(); utils::fill_bit_to_char_map(&mut rng, &mut bit_to_char_map); utils::fill_bit_to_char_map(&mut original_rng, &mut original_bit_to_char_map); let value_vec = get_value_vec(&mut bit_to_char_map, &contents, num_bits, use_shuffle, &mut rng); word_file_data = word_file_data.to_lowercase(); word_file_data = utils::format_text_for_ngrams(&word_file_data); let ( gram_hash, unique_words, total_words, ) = generate_ngrams(&word_file_data, n_depth); let text_data = wordify( &gram_hash, n_depth, value_vec, &mut original_rng, &mut original_bit_to_char_map, &unique_words, total_words as f64, consecutive_skips, depth_skip_threshold, use_shuffle, )?; fs::write(output, text_data).unwrap(); Ok(()) } pub fn encode_char_value_map( file: &str, output: &str, seed_str: &str, word_file_name: &str, n_depth: usize, consecutive_skips: usize, depth_skip_threshold: usize, num_bits: usize, use_shuffle: bool, value_mode: utils::ValueMode, ) -> Result<(), String> { let mut rng = utils::create_rng_from_seed(seed_str); let mut original_rng = utils::create_rng_from_seed(seed_str); let contents = utils::get_file_contents(file)?; let mut word_file_data = utils::get_file_contents_as_string(word_file_name)?; let mut char_to_value_map = utils::make_char_to_value_map(num_bits); let mut original_char_to_value_map = char_to_value_map.clone(); utils::shuffle_char_value_map(&mut rng, &mut char_to_value_map); utils::shuffle_char_value_map(&mut original_rng, &mut original_char_to_value_map); // panic!("dsa"); let mut value_vec = get_value_vec_from_char_value_mode( &contents, num_bits, use_shuffle, &mut rng, &mut char_to_value_map, ); word_file_data = word_file_data.to_lowercase(); word_file_data = utils::format_text_for_ngrams(&word_file_data); let ( gram_hash, unique_words, total_words, ) = generate_ngrams(&word_file_data, n_depth); let text_data = wordify_from_char_value_mode( &gram_hash, &mut original_char_to_value_map, n_depth, value_vec, num_bits, &unique_words, total_words as f64, use_shuffle, value_mode, &mut original_rng, )?; fs::write(output, text_data).unwrap(); Ok(()) } pub fn encode(matches: &ArgMatches) -> Result<(), String> { let file = utils::
use_shuffle: bool, rng: &mut StdRng, char_to_value_map: &mut HashMap<char, usize>, ) -> Vec<u8> {
random_line_split
rcu.rs
//! Reset and clock unit use crate::pac::RCU; use riscv::interrupt; use crate::time::Hertz; use core::cmp; /// Extension trait that sets up the `RCU` peripheral pub trait RcuExt { /// Configure the clocks of the `RCU` peripheral fn configure(self) -> UnconfiguredRcu; } impl RcuExt for RCU { fn configure(self) -> UnconfiguredRcu { UnconfiguredRcu::new(self) } } /// Configured RCU peripheral pub struct Rcu { /// Frozen clock frequencies pub clocks: Clocks, pub(crate) regs: RCU, } pub struct UnconfiguredRcu { hxtal: Option<u32>, sysclk: Option<u32>, regs: RCU, } impl UnconfiguredRcu { fn new(rcu: RCU) -> Self { Self { hxtal: None, sysclk: None, regs: rcu, } } /// Uses an external oscillator instead of IRC8M (internal RC oscillator) as the high-speed /// clock source. Will result in a hang if an external oscillator is not connected or it fails /// to start. pub fn ext_hf_clock(mut self, freq: impl Into<Hertz>) -> Self { let freq = freq.into().0; assert!(4_000_000 <= freq && freq <= 32_000_000); self.hxtal = Some(freq); self } /// Sets the desired frequency for the SYSCLK clock pub fn sysclk(mut self, freq: impl Into<Hertz>) -> Self { let freq = freq.into().0; assert!(freq <= 108_000_000); self.sysclk = Some(freq); self } /// Freezes clock configuration, making it effective pub fn freeze(self) -> Rcu { const IRC8M: u32 = 8_000_000; let target_sysclk = self.sysclk.unwrap_or(IRC8M); let (scs_bits, use_pll) = match (self.hxtal, target_sysclk) { (Some(freq), sysclk) if freq == sysclk => (0b01, false), (None, sysclk) if IRC8M == sysclk => (0b00, false), _ => (0b10, true), }; let pllsel_bit; let predv0_bits; let pllmf_bits; if use_pll { let pllmf; if let Some(hxtal_freq) = self.hxtal { // Use external clock + divider pllsel_bit = true; let calculate_pll = |source: u32, target: u32| -> Option<(u8, u8)> { const PLL_IN_MIN: u32 = 600_000; let div_max = cmp::min(16, source / PLL_IN_MIN); for d in 1..=div_max { let pllsource = source / d; let pllm = target / pllsource; if pllm < 2 || pllm == 15 || pllm > 32{ continue; } let actual_freq = pllsource * pllm; if actual_freq == target { return Some((d as u8, pllm as u8)); } } None }; let (d, m) = calculate_pll(hxtal_freq, target_sysclk).expect("invalid sysclk value"); predv0_bits = d - 1; pllmf = m; } else { // IRC8M/2 is used as an input clock pllsel_bit = false; let pllsource = IRC8M / 2; let m = target_sysclk / pllsource; let m = cmp::max(2, cmp::min(m, 32)); assert_ne!(m, 15, "invalid sysclk value"); let actual_sysclk = pllsource * m; assert_eq!(target_sysclk, actual_sysclk, "invalid sysclk value"); predv0_bits = 0; pllmf = m as u8; } pllmf_bits = match pllmf { 2..=14 => pllmf - 2, 16..=32 => pllmf - 1, _ => unreachable!("invalid pll multiplier"), }; } else { pllsel_bit = false; predv0_bits = 0; pllmf_bits = 0; } // Switch to the internal clock let rcu = unsafe { &*crate::pac::RCU::ptr() }; rcu.ctl.modify(|_, w| w.irc8men().set_bit()); // Enable IRC8M oscillator while rcu.ctl.read().irc8mstb().bit_is_clear() {} // Wait for oscillator to stabilize rcu.cfg0.modify(|_, w| unsafe { w.scs().bits(0b00) }); // Switch to the internal oscillator rcu.ctl.modify(|_, w| w.pllen().clear_bit()); // Disable PLL // Set bus prescalers rcu.cfg0.modify(|_, w| unsafe { w.ahbpsc().bits(0b0000) }); // CK_SYS rcu.cfg0.modify(|_, w| unsafe { w.apb1psc().bits(0b100) }); // CK_AHB / 2 rcu.cfg0.modify(|_, w| unsafe { w.apb2psc().bits(0b000) }); // CK_AHB let apb1_psc = 2; let apb2_psc = 1; if self.hxtal.is_some() { // Enable external oscillator rcu.ctl.modify(|_, w| w.hxtalen().set_bit()); // Wait for oscillator to stabilize while rcu.ctl.read().hxtalstb().bit_is_clear() {} // Select HXTAL as prescaler input source clock rcu.cfg1.modify(|_, w| w.predv0sel().clear_bit()); // Configure the prescaler rcu.cfg1.modify(|_, w| unsafe { w.predv0().bits(predv0_bits) }); } if use_pll { // Configure PLL input selector rcu.cfg0.modify(|_, w| w.pllsel().bit(pllsel_bit)); // Configure PLL multiplier rcu.cfg0.modify(|_, w| unsafe { w .pllmf_4().bit(pllmf_bits & 0x10!= 0) .pllmf_3_0().bits(pllmf_bits & 0xf) }); // Enable PLL rcu.ctl.modify(|_, w| w.pllen().set_bit()); // Wait for PLL to stabilize while rcu.ctl.read().pllstb().bit_is_clear() {} } else { // Disable PLL rcu.ctl.modify(|_, w| w.pllen().clear_bit()); } // Switch to the configured clock source rcu.cfg0.modify(|_, w| unsafe { w.scs().bits(scs_bits) }); let usbclk_valid; if use_pll { let pllclk = target_sysclk; let (valid, pr) = match pllclk { 48_000_000 => (true, 0b01), // pllclk / 1 72_000_000 => (true, 0b00), // pllclk / 1.5 96_000_000 => (true, 0b11), // pllclk / 2 _ => (false, 0), }; usbclk_valid = valid; // Configure USB prescaler rcu.cfg0.modify(|_, w| unsafe { w.usbfspsc().bits(pr) }); } else { usbclk_valid = false; } let clocks = Clocks { sysclk: Hertz(target_sysclk), apb1_psc, apb2_psc, usbclk_valid }; Rcu { clocks, regs: self.regs } } } #[derive(Copy, Clone)] pub struct Clocks { sysclk: Hertz, apb1_psc: u8, apb2_psc: u8, usbclk_valid: bool, } impl Clocks { /// Returns the system (core) frequency pub const fn sysclk(&self) -> Hertz { self.sysclk } /// Returns the frequency of the AHB pub const fn hclk(&self) -> Hertz { self.sysclk } /// Returns the frequency of the APB1 pub const fn pclk1(&self) -> Hertz { Hertz(self.sysclk.0 / self.apb1_psc as u32) } /// Returns the frequency of the APB2 pub const fn pclk2(&self) -> Hertz { Hertz(self.sysclk.0 / self.apb2_psc as u32) } /// Returns the frequency of the SysTick timer pub const fn systick(&self) -> Hertz { Hertz(self.sysclk.0 / 4) } /// Returns the frequency of the TIMER0 base clock pub fn timer0(&self) -> Hertz { let pclk2 = self.pclk2(); if self.apb2_psc == 1 { pclk2 } else { Hertz(pclk2.0 * 2) } } /// Returns the frequency of the TIMER1..6 base clock pub fn timerx(&self) -> Hertz { let pclk1 = self.pclk1(); if self.apb1_psc == 1 { pclk1 } else { Hertz(pclk1.0 * 2) } } /// Returns whether the USBCLK clock frequency is valid for the USB peripheral pub const fn usbclk_valid(&self) -> bool { self.usbclk_valid } } macro_rules! base_freq { ($($PER:ident => $func:ident,)+) => { $( impl BaseFrequency for crate::pac::$PER { #[inline(always)] fn base_frequency(rcu: &Rcu) -> Hertz { rcu.clocks.$func() } } )+ } } base_freq! { ADC0 => pclk2, ADC1 => pclk2, I2C0 => pclk1, I2C1 => pclk1, SPI0 => pclk2, SPI1 => pclk1, SPI2 => pclk1, TIMER0 => timer0, TIMER1 => timerx, TIMER2 => timerx, TIMER3 => timerx, TIMER4 => timerx, TIMER5 => timerx, TIMER6 => timerx, UART3 => pclk1, UART4 => pclk1, USART0 => pclk2, USART1 => pclk1, USART2 => pclk1, } pub(crate) mod closed_traits { use super::Rcu; use crate::time::Hertz; /// Enable/disable peripheral pub trait Enable { fn enable(rcu: &mut Rcu); fn disable(rcu: &mut Rcu); } /// Reset peripheral pub trait Reset { fn reset(rcu: &mut Rcu); } pub trait BaseFrequency { fn base_frequency(rcu: &Rcu) -> Hertz; } } pub(crate) use closed_traits::*; macro_rules! bus_enable { ($PER:ident => ($apben:ident, $peren:ident)) => { impl Enable for crate::pac::$PER { #[inline(always)] fn enable(rcu: &mut Rcu) { interrupt::free(|_| { rcu.regs.$apben.modify(|_, w| w.$peren().set_bit()); }); } #[inline(always)] fn disable(rcu: &mut Rcu) { interrupt::free(|_| { rcu.regs.$apben.modify(|_, w| w.$peren().clear_bit()); }); } } } } macro_rules! bus { ($($PER:ident => ($apben:ident, $apbrst:ident, $peren:ident, $perrst:ident),)+) => { $( bus_enable!($PER => ($apben, $peren)); impl Reset for crate::pac::$PER { #[inline(always)] fn reset(rcu: &mut Rcu) { interrupt::free(|_| { rcu.regs.$apbrst.modify(|_, w| w.$perrst().set_bit()); rcu.regs.$apbrst.modify(|_, w| w.$perrst().clear_bit()); }); } } )+ } } bus! { ADC0 => (apb2en, apb2rst, adc0en, adc0rst), ADC1 => (apb2en, apb2rst, adc1en, adc1rst), AFIO => (apb2en, apb2rst, afen, afrst), BKP => (apb1en, apb1rst, bkpien, bkpirst), CAN0 => (apb1en, apb1rst, can0en, can0rst), CAN1 => (apb1en, apb1rst, can1en, can1rst), DAC => (apb1en, apb1rst, dacen, dacrst), GPIOA => (apb2en, apb2rst, paen, parst), GPIOB => (apb2en, apb2rst, pben, pbrst), GPIOC => (apb2en, apb2rst, pcen, pcrst), GPIOD => (apb2en, apb2rst, pden, pdrst), GPIOE => (apb2en, apb2rst, peen, perst), I2C0 => (apb1en, apb1rst, i2c0en, i2c0rst), I2C1 => (apb1en, apb1rst, i2c1en, i2c1rst), PMU => (apb1en, apb1rst, pmuen, pmurst), SPI0 => (apb2en, apb2rst, spi0en, spi0rst), SPI1 => (apb1en, apb1rst, spi1en, spi1rst), SPI2 => (apb1en, apb1rst, spi2en, spi2rst), TIMER0 => (apb2en, apb2rst, timer0en, timer0rst), TIMER1 => (apb1en, apb1rst, timer1en, timer1rst), TIMER2 => (apb1en, apb1rst, timer2en, timer2rst), TIMER3 => (apb1en, apb1rst, timer3en, timer3rst), TIMER4 => (apb1en, apb1rst, timer4en, timer4rst), TIMER5 => (apb1en, apb1rst, timer5en, timer5rst), TIMER6 => (apb1en, apb1rst, timer6en, timer6rst), UART3 => (apb1en, apb1rst, uart3en, uart3rst), UART4 => (apb1en, apb1rst, uart4en, uart4rst), USART0 => (apb2en, apb2rst, usart0en, usart0rst), USART1 => (apb1en, apb1rst, usart1en, usart1rst), USART2 => (apb1en, apb1rst, usart2en, usart2rst), USBFS_GLOBAL => (ahben, ahbrst, usbfsen, usbfsrst), WWDGT => (apb1en, apb1rst, wwdgten, wwdgtrst),
bus_enable!(DMA1 => (ahben, dma1en)); bus_enable!(EXMC => (ahben, exmcen));
} bus_enable!(CRC => (ahben, crcen)); bus_enable!(DMA0 => (ahben, dma0en));
random_line_split
rcu.rs
//! Reset and clock unit use crate::pac::RCU; use riscv::interrupt; use crate::time::Hertz; use core::cmp; /// Extension trait that sets up the `RCU` peripheral pub trait RcuExt { /// Configure the clocks of the `RCU` peripheral fn configure(self) -> UnconfiguredRcu; } impl RcuExt for RCU { fn configure(self) -> UnconfiguredRcu { UnconfiguredRcu::new(self) } } /// Configured RCU peripheral pub struct Rcu { /// Frozen clock frequencies pub clocks: Clocks, pub(crate) regs: RCU, } pub struct UnconfiguredRcu { hxtal: Option<u32>, sysclk: Option<u32>, regs: RCU, } impl UnconfiguredRcu { fn new(rcu: RCU) -> Self { Self { hxtal: None, sysclk: None, regs: rcu, } } /// Uses an external oscillator instead of IRC8M (internal RC oscillator) as the high-speed /// clock source. Will result in a hang if an external oscillator is not connected or it fails /// to start. pub fn ext_hf_clock(mut self, freq: impl Into<Hertz>) -> Self { let freq = freq.into().0; assert!(4_000_000 <= freq && freq <= 32_000_000); self.hxtal = Some(freq); self } /// Sets the desired frequency for the SYSCLK clock pub fn sysclk(mut self, freq: impl Into<Hertz>) -> Self { let freq = freq.into().0; assert!(freq <= 108_000_000); self.sysclk = Some(freq); self } /// Freezes clock configuration, making it effective pub fn freeze(self) -> Rcu { const IRC8M: u32 = 8_000_000; let target_sysclk = self.sysclk.unwrap_or(IRC8M); let (scs_bits, use_pll) = match (self.hxtal, target_sysclk) { (Some(freq), sysclk) if freq == sysclk => (0b01, false), (None, sysclk) if IRC8M == sysclk => (0b00, false), _ => (0b10, true), }; let pllsel_bit; let predv0_bits; let pllmf_bits; if use_pll { let pllmf; if let Some(hxtal_freq) = self.hxtal { // Use external clock + divider pllsel_bit = true; let calculate_pll = |source: u32, target: u32| -> Option<(u8, u8)> { const PLL_IN_MIN: u32 = 600_000; let div_max = cmp::min(16, source / PLL_IN_MIN); for d in 1..=div_max { let pllsource = source / d; let pllm = target / pllsource; if pllm < 2 || pllm == 15 || pllm > 32{ continue; } let actual_freq = pllsource * pllm; if actual_freq == target { return Some((d as u8, pllm as u8)); } } None }; let (d, m) = calculate_pll(hxtal_freq, target_sysclk).expect("invalid sysclk value"); predv0_bits = d - 1; pllmf = m; } else { // IRC8M/2 is used as an input clock pllsel_bit = false; let pllsource = IRC8M / 2; let m = target_sysclk / pllsource; let m = cmp::max(2, cmp::min(m, 32)); assert_ne!(m, 15, "invalid sysclk value"); let actual_sysclk = pllsource * m; assert_eq!(target_sysclk, actual_sysclk, "invalid sysclk value"); predv0_bits = 0; pllmf = m as u8; } pllmf_bits = match pllmf { 2..=14 => pllmf - 2, 16..=32 => pllmf - 1, _ => unreachable!("invalid pll multiplier"), }; } else { pllsel_bit = false; predv0_bits = 0; pllmf_bits = 0; } // Switch to the internal clock let rcu = unsafe { &*crate::pac::RCU::ptr() }; rcu.ctl.modify(|_, w| w.irc8men().set_bit()); // Enable IRC8M oscillator while rcu.ctl.read().irc8mstb().bit_is_clear() {} // Wait for oscillator to stabilize rcu.cfg0.modify(|_, w| unsafe { w.scs().bits(0b00) }); // Switch to the internal oscillator rcu.ctl.modify(|_, w| w.pllen().clear_bit()); // Disable PLL // Set bus prescalers rcu.cfg0.modify(|_, w| unsafe { w.ahbpsc().bits(0b0000) }); // CK_SYS rcu.cfg0.modify(|_, w| unsafe { w.apb1psc().bits(0b100) }); // CK_AHB / 2 rcu.cfg0.modify(|_, w| unsafe { w.apb2psc().bits(0b000) }); // CK_AHB let apb1_psc = 2; let apb2_psc = 1; if self.hxtal.is_some() { // Enable external oscillator rcu.ctl.modify(|_, w| w.hxtalen().set_bit()); // Wait for oscillator to stabilize while rcu.ctl.read().hxtalstb().bit_is_clear() {} // Select HXTAL as prescaler input source clock rcu.cfg1.modify(|_, w| w.predv0sel().clear_bit()); // Configure the prescaler rcu.cfg1.modify(|_, w| unsafe { w.predv0().bits(predv0_bits) }); } if use_pll { // Configure PLL input selector rcu.cfg0.modify(|_, w| w.pllsel().bit(pllsel_bit)); // Configure PLL multiplier rcu.cfg0.modify(|_, w| unsafe { w .pllmf_4().bit(pllmf_bits & 0x10!= 0) .pllmf_3_0().bits(pllmf_bits & 0xf) }); // Enable PLL rcu.ctl.modify(|_, w| w.pllen().set_bit()); // Wait for PLL to stabilize while rcu.ctl.read().pllstb().bit_is_clear() {} } else { // Disable PLL rcu.ctl.modify(|_, w| w.pllen().clear_bit()); } // Switch to the configured clock source rcu.cfg0.modify(|_, w| unsafe { w.scs().bits(scs_bits) }); let usbclk_valid; if use_pll { let pllclk = target_sysclk; let (valid, pr) = match pllclk { 48_000_000 => (true, 0b01), // pllclk / 1 72_000_000 => (true, 0b00), // pllclk / 1.5 96_000_000 => (true, 0b11), // pllclk / 2 _ => (false, 0), }; usbclk_valid = valid; // Configure USB prescaler rcu.cfg0.modify(|_, w| unsafe { w.usbfspsc().bits(pr) }); } else { usbclk_valid = false; } let clocks = Clocks { sysclk: Hertz(target_sysclk), apb1_psc, apb2_psc, usbclk_valid }; Rcu { clocks, regs: self.regs } } } #[derive(Copy, Clone)] pub struct Clocks { sysclk: Hertz, apb1_psc: u8, apb2_psc: u8, usbclk_valid: bool, } impl Clocks { /// Returns the system (core) frequency pub const fn sysclk(&self) -> Hertz { self.sysclk } /// Returns the frequency of the AHB pub const fn hclk(&self) -> Hertz { self.sysclk } /// Returns the frequency of the APB1 pub const fn pclk1(&self) -> Hertz { Hertz(self.sysclk.0 / self.apb1_psc as u32) } /// Returns the frequency of the APB2 pub const fn pclk2(&self) -> Hertz { Hertz(self.sysclk.0 / self.apb2_psc as u32) } /// Returns the frequency of the SysTick timer pub const fn systick(&self) -> Hertz { Hertz(self.sysclk.0 / 4) } /// Returns the frequency of the TIMER0 base clock pub fn timer0(&self) -> Hertz
/// Returns the frequency of the TIMER1..6 base clock pub fn timerx(&self) -> Hertz { let pclk1 = self.pclk1(); if self.apb1_psc == 1 { pclk1 } else { Hertz(pclk1.0 * 2) } } /// Returns whether the USBCLK clock frequency is valid for the USB peripheral pub const fn usbclk_valid(&self) -> bool { self.usbclk_valid } } macro_rules! base_freq { ($($PER:ident => $func:ident,)+) => { $( impl BaseFrequency for crate::pac::$PER { #[inline(always)] fn base_frequency(rcu: &Rcu) -> Hertz { rcu.clocks.$func() } } )+ } } base_freq! { ADC0 => pclk2, ADC1 => pclk2, I2C0 => pclk1, I2C1 => pclk1, SPI0 => pclk2, SPI1 => pclk1, SPI2 => pclk1, TIMER0 => timer0, TIMER1 => timerx, TIMER2 => timerx, TIMER3 => timerx, TIMER4 => timerx, TIMER5 => timerx, TIMER6 => timerx, UART3 => pclk1, UART4 => pclk1, USART0 => pclk2, USART1 => pclk1, USART2 => pclk1, } pub(crate) mod closed_traits { use super::Rcu; use crate::time::Hertz; /// Enable/disable peripheral pub trait Enable { fn enable(rcu: &mut Rcu); fn disable(rcu: &mut Rcu); } /// Reset peripheral pub trait Reset { fn reset(rcu: &mut Rcu); } pub trait BaseFrequency { fn base_frequency(rcu: &Rcu) -> Hertz; } } pub(crate) use closed_traits::*; macro_rules! bus_enable { ($PER:ident => ($apben:ident, $peren:ident)) => { impl Enable for crate::pac::$PER { #[inline(always)] fn enable(rcu: &mut Rcu) { interrupt::free(|_| { rcu.regs.$apben.modify(|_, w| w.$peren().set_bit()); }); } #[inline(always)] fn disable(rcu: &mut Rcu) { interrupt::free(|_| { rcu.regs.$apben.modify(|_, w| w.$peren().clear_bit()); }); } } } } macro_rules! bus { ($($PER:ident => ($apben:ident, $apbrst:ident, $peren:ident, $perrst:ident),)+) => { $( bus_enable!($PER => ($apben, $peren)); impl Reset for crate::pac::$PER { #[inline(always)] fn reset(rcu: &mut Rcu) { interrupt::free(|_| { rcu.regs.$apbrst.modify(|_, w| w.$perrst().set_bit()); rcu.regs.$apbrst.modify(|_, w| w.$perrst().clear_bit()); }); } } )+ } } bus! { ADC0 => (apb2en, apb2rst, adc0en, adc0rst), ADC1 => (apb2en, apb2rst, adc1en, adc1rst), AFIO => (apb2en, apb2rst, afen, afrst), BKP => (apb1en, apb1rst, bkpien, bkpirst), CAN0 => (apb1en, apb1rst, can0en, can0rst), CAN1 => (apb1en, apb1rst, can1en, can1rst), DAC => (apb1en, apb1rst, dacen, dacrst), GPIOA => (apb2en, apb2rst, paen, parst), GPIOB => (apb2en, apb2rst, pben, pbrst), GPIOC => (apb2en, apb2rst, pcen, pcrst), GPIOD => (apb2en, apb2rst, pden, pdrst), GPIOE => (apb2en, apb2rst, peen, perst), I2C0 => (apb1en, apb1rst, i2c0en, i2c0rst), I2C1 => (apb1en, apb1rst, i2c1en, i2c1rst), PMU => (apb1en, apb1rst, pmuen, pmurst), SPI0 => (apb2en, apb2rst, spi0en, spi0rst), SPI1 => (apb1en, apb1rst, spi1en, spi1rst), SPI2 => (apb1en, apb1rst, spi2en, spi2rst), TIMER0 => (apb2en, apb2rst, timer0en, timer0rst), TIMER1 => (apb1en, apb1rst, timer1en, timer1rst), TIMER2 => (apb1en, apb1rst, timer2en, timer2rst), TIMER3 => (apb1en, apb1rst, timer3en, timer3rst), TIMER4 => (apb1en, apb1rst, timer4en, timer4rst), TIMER5 => (apb1en, apb1rst, timer5en, timer5rst), TIMER6 => (apb1en, apb1rst, timer6en, timer6rst), UART3 => (apb1en, apb1rst, uart3en, uart3rst), UART4 => (apb1en, apb1rst, uart4en, uart4rst), USART0 => (apb2en, apb2rst, usart0en, usart0rst), USART1 => (apb1en, apb1rst, usart1en, usart1rst), USART2 => (apb1en, apb1rst, usart2en, usart2rst), USBFS_GLOBAL => (ahben, ahbrst, usbfsen, usbfsrst), WWDGT => (apb1en, apb1rst, wwdgten, wwdgtrst), } bus_enable!(CRC => (ahben, crcen)); bus_enable!(DMA0 => (ahben, dma0en)); bus_enable!(DMA1 => (ahben, dma1en)); bus_enable!(EXMC => (ahben, exmcen));
{ let pclk2 = self.pclk2(); if self.apb2_psc == 1 { pclk2 } else { Hertz(pclk2.0 * 2) } }
identifier_body
rcu.rs
//! Reset and clock unit use crate::pac::RCU; use riscv::interrupt; use crate::time::Hertz; use core::cmp; /// Extension trait that sets up the `RCU` peripheral pub trait RcuExt { /// Configure the clocks of the `RCU` peripheral fn configure(self) -> UnconfiguredRcu; } impl RcuExt for RCU { fn configure(self) -> UnconfiguredRcu { UnconfiguredRcu::new(self) } } /// Configured RCU peripheral pub struct Rcu { /// Frozen clock frequencies pub clocks: Clocks, pub(crate) regs: RCU, } pub struct UnconfiguredRcu { hxtal: Option<u32>, sysclk: Option<u32>, regs: RCU, } impl UnconfiguredRcu { fn new(rcu: RCU) -> Self { Self { hxtal: None, sysclk: None, regs: rcu, } } /// Uses an external oscillator instead of IRC8M (internal RC oscillator) as the high-speed /// clock source. Will result in a hang if an external oscillator is not connected or it fails /// to start. pub fn ext_hf_clock(mut self, freq: impl Into<Hertz>) -> Self { let freq = freq.into().0; assert!(4_000_000 <= freq && freq <= 32_000_000); self.hxtal = Some(freq); self } /// Sets the desired frequency for the SYSCLK clock pub fn sysclk(mut self, freq: impl Into<Hertz>) -> Self { let freq = freq.into().0; assert!(freq <= 108_000_000); self.sysclk = Some(freq); self } /// Freezes clock configuration, making it effective pub fn freeze(self) -> Rcu { const IRC8M: u32 = 8_000_000; let target_sysclk = self.sysclk.unwrap_or(IRC8M); let (scs_bits, use_pll) = match (self.hxtal, target_sysclk) { (Some(freq), sysclk) if freq == sysclk => (0b01, false), (None, sysclk) if IRC8M == sysclk => (0b00, false), _ => (0b10, true), }; let pllsel_bit; let predv0_bits; let pllmf_bits; if use_pll { let pllmf; if let Some(hxtal_freq) = self.hxtal { // Use external clock + divider pllsel_bit = true; let calculate_pll = |source: u32, target: u32| -> Option<(u8, u8)> { const PLL_IN_MIN: u32 = 600_000; let div_max = cmp::min(16, source / PLL_IN_MIN); for d in 1..=div_max { let pllsource = source / d; let pllm = target / pllsource; if pllm < 2 || pllm == 15 || pllm > 32{ continue; } let actual_freq = pllsource * pllm; if actual_freq == target { return Some((d as u8, pllm as u8)); } } None }; let (d, m) = calculate_pll(hxtal_freq, target_sysclk).expect("invalid sysclk value"); predv0_bits = d - 1; pllmf = m; } else { // IRC8M/2 is used as an input clock pllsel_bit = false; let pllsource = IRC8M / 2; let m = target_sysclk / pllsource; let m = cmp::max(2, cmp::min(m, 32)); assert_ne!(m, 15, "invalid sysclk value"); let actual_sysclk = pllsource * m; assert_eq!(target_sysclk, actual_sysclk, "invalid sysclk value"); predv0_bits = 0; pllmf = m as u8; } pllmf_bits = match pllmf { 2..=14 => pllmf - 2, 16..=32 => pllmf - 1, _ => unreachable!("invalid pll multiplier"), }; } else { pllsel_bit = false; predv0_bits = 0; pllmf_bits = 0; } // Switch to the internal clock let rcu = unsafe { &*crate::pac::RCU::ptr() }; rcu.ctl.modify(|_, w| w.irc8men().set_bit()); // Enable IRC8M oscillator while rcu.ctl.read().irc8mstb().bit_is_clear() {} // Wait for oscillator to stabilize rcu.cfg0.modify(|_, w| unsafe { w.scs().bits(0b00) }); // Switch to the internal oscillator rcu.ctl.modify(|_, w| w.pllen().clear_bit()); // Disable PLL // Set bus prescalers rcu.cfg0.modify(|_, w| unsafe { w.ahbpsc().bits(0b0000) }); // CK_SYS rcu.cfg0.modify(|_, w| unsafe { w.apb1psc().bits(0b100) }); // CK_AHB / 2 rcu.cfg0.modify(|_, w| unsafe { w.apb2psc().bits(0b000) }); // CK_AHB let apb1_psc = 2; let apb2_psc = 1; if self.hxtal.is_some() { // Enable external oscillator rcu.ctl.modify(|_, w| w.hxtalen().set_bit()); // Wait for oscillator to stabilize while rcu.ctl.read().hxtalstb().bit_is_clear() {} // Select HXTAL as prescaler input source clock rcu.cfg1.modify(|_, w| w.predv0sel().clear_bit()); // Configure the prescaler rcu.cfg1.modify(|_, w| unsafe { w.predv0().bits(predv0_bits) }); } if use_pll { // Configure PLL input selector rcu.cfg0.modify(|_, w| w.pllsel().bit(pllsel_bit)); // Configure PLL multiplier rcu.cfg0.modify(|_, w| unsafe { w .pllmf_4().bit(pllmf_bits & 0x10!= 0) .pllmf_3_0().bits(pllmf_bits & 0xf) }); // Enable PLL rcu.ctl.modify(|_, w| w.pllen().set_bit()); // Wait for PLL to stabilize while rcu.ctl.read().pllstb().bit_is_clear() {} } else { // Disable PLL rcu.ctl.modify(|_, w| w.pllen().clear_bit()); } // Switch to the configured clock source rcu.cfg0.modify(|_, w| unsafe { w.scs().bits(scs_bits) }); let usbclk_valid; if use_pll { let pllclk = target_sysclk; let (valid, pr) = match pllclk { 48_000_000 => (true, 0b01), // pllclk / 1 72_000_000 => (true, 0b00), // pllclk / 1.5 96_000_000 => (true, 0b11), // pllclk / 2 _ => (false, 0), }; usbclk_valid = valid; // Configure USB prescaler rcu.cfg0.modify(|_, w| unsafe { w.usbfspsc().bits(pr) }); } else { usbclk_valid = false; } let clocks = Clocks { sysclk: Hertz(target_sysclk), apb1_psc, apb2_psc, usbclk_valid }; Rcu { clocks, regs: self.regs } } } #[derive(Copy, Clone)] pub struct Clocks { sysclk: Hertz, apb1_psc: u8, apb2_psc: u8, usbclk_valid: bool, } impl Clocks { /// Returns the system (core) frequency pub const fn sysclk(&self) -> Hertz { self.sysclk } /// Returns the frequency of the AHB pub const fn hclk(&self) -> Hertz { self.sysclk } /// Returns the frequency of the APB1 pub const fn pclk1(&self) -> Hertz { Hertz(self.sysclk.0 / self.apb1_psc as u32) } /// Returns the frequency of the APB2 pub const fn pclk2(&self) -> Hertz { Hertz(self.sysclk.0 / self.apb2_psc as u32) } /// Returns the frequency of the SysTick timer pub const fn
(&self) -> Hertz { Hertz(self.sysclk.0 / 4) } /// Returns the frequency of the TIMER0 base clock pub fn timer0(&self) -> Hertz { let pclk2 = self.pclk2(); if self.apb2_psc == 1 { pclk2 } else { Hertz(pclk2.0 * 2) } } /// Returns the frequency of the TIMER1..6 base clock pub fn timerx(&self) -> Hertz { let pclk1 = self.pclk1(); if self.apb1_psc == 1 { pclk1 } else { Hertz(pclk1.0 * 2) } } /// Returns whether the USBCLK clock frequency is valid for the USB peripheral pub const fn usbclk_valid(&self) -> bool { self.usbclk_valid } } macro_rules! base_freq { ($($PER:ident => $func:ident,)+) => { $( impl BaseFrequency for crate::pac::$PER { #[inline(always)] fn base_frequency(rcu: &Rcu) -> Hertz { rcu.clocks.$func() } } )+ } } base_freq! { ADC0 => pclk2, ADC1 => pclk2, I2C0 => pclk1, I2C1 => pclk1, SPI0 => pclk2, SPI1 => pclk1, SPI2 => pclk1, TIMER0 => timer0, TIMER1 => timerx, TIMER2 => timerx, TIMER3 => timerx, TIMER4 => timerx, TIMER5 => timerx, TIMER6 => timerx, UART3 => pclk1, UART4 => pclk1, USART0 => pclk2, USART1 => pclk1, USART2 => pclk1, } pub(crate) mod closed_traits { use super::Rcu; use crate::time::Hertz; /// Enable/disable peripheral pub trait Enable { fn enable(rcu: &mut Rcu); fn disable(rcu: &mut Rcu); } /// Reset peripheral pub trait Reset { fn reset(rcu: &mut Rcu); } pub trait BaseFrequency { fn base_frequency(rcu: &Rcu) -> Hertz; } } pub(crate) use closed_traits::*; macro_rules! bus_enable { ($PER:ident => ($apben:ident, $peren:ident)) => { impl Enable for crate::pac::$PER { #[inline(always)] fn enable(rcu: &mut Rcu) { interrupt::free(|_| { rcu.regs.$apben.modify(|_, w| w.$peren().set_bit()); }); } #[inline(always)] fn disable(rcu: &mut Rcu) { interrupt::free(|_| { rcu.regs.$apben.modify(|_, w| w.$peren().clear_bit()); }); } } } } macro_rules! bus { ($($PER:ident => ($apben:ident, $apbrst:ident, $peren:ident, $perrst:ident),)+) => { $( bus_enable!($PER => ($apben, $peren)); impl Reset for crate::pac::$PER { #[inline(always)] fn reset(rcu: &mut Rcu) { interrupt::free(|_| { rcu.regs.$apbrst.modify(|_, w| w.$perrst().set_bit()); rcu.regs.$apbrst.modify(|_, w| w.$perrst().clear_bit()); }); } } )+ } } bus! { ADC0 => (apb2en, apb2rst, adc0en, adc0rst), ADC1 => (apb2en, apb2rst, adc1en, adc1rst), AFIO => (apb2en, apb2rst, afen, afrst), BKP => (apb1en, apb1rst, bkpien, bkpirst), CAN0 => (apb1en, apb1rst, can0en, can0rst), CAN1 => (apb1en, apb1rst, can1en, can1rst), DAC => (apb1en, apb1rst, dacen, dacrst), GPIOA => (apb2en, apb2rst, paen, parst), GPIOB => (apb2en, apb2rst, pben, pbrst), GPIOC => (apb2en, apb2rst, pcen, pcrst), GPIOD => (apb2en, apb2rst, pden, pdrst), GPIOE => (apb2en, apb2rst, peen, perst), I2C0 => (apb1en, apb1rst, i2c0en, i2c0rst), I2C1 => (apb1en, apb1rst, i2c1en, i2c1rst), PMU => (apb1en, apb1rst, pmuen, pmurst), SPI0 => (apb2en, apb2rst, spi0en, spi0rst), SPI1 => (apb1en, apb1rst, spi1en, spi1rst), SPI2 => (apb1en, apb1rst, spi2en, spi2rst), TIMER0 => (apb2en, apb2rst, timer0en, timer0rst), TIMER1 => (apb1en, apb1rst, timer1en, timer1rst), TIMER2 => (apb1en, apb1rst, timer2en, timer2rst), TIMER3 => (apb1en, apb1rst, timer3en, timer3rst), TIMER4 => (apb1en, apb1rst, timer4en, timer4rst), TIMER5 => (apb1en, apb1rst, timer5en, timer5rst), TIMER6 => (apb1en, apb1rst, timer6en, timer6rst), UART3 => (apb1en, apb1rst, uart3en, uart3rst), UART4 => (apb1en, apb1rst, uart4en, uart4rst), USART0 => (apb2en, apb2rst, usart0en, usart0rst), USART1 => (apb1en, apb1rst, usart1en, usart1rst), USART2 => (apb1en, apb1rst, usart2en, usart2rst), USBFS_GLOBAL => (ahben, ahbrst, usbfsen, usbfsrst), WWDGT => (apb1en, apb1rst, wwdgten, wwdgtrst), } bus_enable!(CRC => (ahben, crcen)); bus_enable!(DMA0 => (ahben, dma0en)); bus_enable!(DMA1 => (ahben, dma1en)); bus_enable!(EXMC => (ahben, exmcen));
systick
identifier_name