file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
graph.rs | //! A container for audio devices in an acyclic graph.
//!
//! A graph can be used when many audio devices need to connect in complex
//! topologies. It can connect each output channel of a device to any input
//! channel, provided that connection does not create a cycle.
//!
//! A graph is initialized by adding each device as a node in the graph, and
//! then specifying the edges between devices. The graph will automatically
//! process the devices in order of their dependencies.
//!
//! # Example
//!
//! The following example creates a graph with two different branches into
//! a stereo output. It feeds the micropgone to the left channel, and
//! a low-passed oscillator into the right channel.
//!
//! ```no_run
//! use oxcable::filters::first_order::{Filter, LowPass};
//! use oxcable::graph::{DeviceGraph, Tick};
//! use oxcable::io::audio::AudioEngine;
//! use oxcable::oscillator::*;
//!
//! let engine = AudioEngine::with_buffer_size(256).unwrap();
//! let mut graph = DeviceGraph::new();
//!
//! // Add nodes to graph
//! let microphone = graph.add_node(engine.default_input(1).unwrap());
//! let oscillator = graph.add_node(Oscillator::new(Sine).freq(440.0));
//! let filter = graph.add_node(Filter::new(LowPass(8000f32), 1));
//! let speaker = graph.add_node(engine.default_output(2).unwrap());
//!
//! // Connect devices together
//! graph.add_edge(microphone, 0, speaker, 0);
//! graph.add_edge(oscillator, 0, filter, 0);
//! graph.add_edge(filter, 0, speaker, 1);
//!
//! // Play audio ad nauseam.
//! graph.tick_forever();
//! ```
use std::collections::VecDeque;
use error::{Error, Result};
use types::{AudioDevice, Sample, Time};
pub use tick::Tick;
/// An acyclic graph for audio devices.
pub struct DeviceGraph {
nodes: Vec<AudioNode>, // the actual nodes
topology: Vec<usize>, // the order to tick the nodes
bus: Vec<Sample>, // the audio bus to write samples to
time: Time // the next timestep
}
impl DeviceGraph {
/// Creates an empty graph.
pub fn new() -> Self {
DeviceGraph {
nodes: Vec::new(),
topology: Vec::new(),
bus: Vec::new(),
time: 0
}
}
/// Adds a new device into the graph, with no connections. Returns
/// a identifier that refers back to this device.
pub fn add_node<D>(&mut self, device: D) -> AudioNodeIdx
where D:'static+AudioDevice {
let node = AudioNode::new(device, &mut self.bus);
let idx = self.nodes.len();
self.nodes.push(node);
self.topology.push(idx);
AudioNodeIdx(idx)
}
/// Connects two devices in the graph.
///
/// * `src` and `dest` are identifiers for the actual devices to connect.
/// * `src_ch` and `dest_ch` are the channel indices of the two devices.
///
/// If invalid indices are provided, or if the specified edge would create
/// a cycle in the graph, an Err is returned and no changes dest the graph are
/// made.
pub fn add_edge(&mut self, src: AudioNodeIdx, src_ch: usize,
dest: AudioNodeIdx, dest_ch: usize) -> Result<()> {
// Check device indices
let AudioNodeIdx(src_i) = src;
let AudioNodeIdx(dest_i) = dest;
if src_i >= self.nodes.len() {
return Err(Error::OutOfRange("src"));
} else if dest_i >= self.nodes.len() {
return Err(Error::OutOfRange("dest"));
}
// Check channels
if self.nodes[src_i].device.num_outputs() <= src_ch {
return Err(Error::OutOfRange("src_ch"));
}
if self.nodes[dest_i].device.num_inputs() <= dest_ch {
return Err(Error::OutOfRange("dest_ch"));
}
while self.nodes[dest_i].inputs.len() < dest_ch {
self.nodes[dest_i].inputs.push(None);
}
// Set input
let (start,_) = self.nodes[src_i].outputs;
self.nodes[dest_i].inputs[dest_ch] = Some(start+src_ch);
self.topological_sort(dest_i, dest_ch)
}
/// Determines the topology of our device graph. If the graph has a cycle,
/// then we remove the last edge. Otherwise, we set self.topology to
/// a topologically sorted order.
fn topological_sort(&mut self, dest_i: usize, dest_ch: usize) -> Result<()> {
// Intialize our set of input edges, and our set of edgeless nodes
let mut topology = Vec::new();
let mut inputs: Vec<Vec<_>> = self.nodes.iter().map(
|node| node.inputs.iter().filter_map(|&o| o).collect()
).collect();
let mut no_inputs: VecDeque<_> = inputs.iter().enumerate().filter_map(
|(i, ins)| if ins.len() == 0 { Some(i) } else |
).collect();
// While there are nodes with no input, we choose one, add it as the
// next node in our topology, and remove all edges from that node. Any
// nodes that lose their final edge are added to the edgeless set.
loop {
match no_inputs.pop_front() {
Some(i) => {
topology.push(i);
let (out_start, out_end) = self.nodes[i].outputs;
for out in out_start..out_end {
for (j, ins) in inputs.iter_mut().enumerate() {
let mut idx = None;
for k in 0..ins.len() {
if ins[k] == out {
idx = Some(k);
break;
}
}
match idx {
Some(k) => {
ins.swap_remove(k);
if ins.len() == 0 {
no_inputs.push_back(j);
}
},
None => ()
}
}
}
},
None => break
}
}
if topology.len() == self.nodes.len() {
self.topology = topology;
Ok(())
} else {
self.nodes[dest_i].inputs[dest_ch] = None;
Err(Error::CreatesCycle)
}
}
}
impl Tick for DeviceGraph {
fn tick(&mut self) {
for &i in self.topology.iter() {
self.nodes[i].tick(self.time, &mut self.bus);
}
self.time += 1;
}
}
/// An identifier used to refer back to a node in the graph.
#[derive(Copy, Clone, Debug)]
pub struct AudioNodeIdx(usize);
/// A wrapper for a node in the graph.
///
/// Management of indices in the bus is handled in the graph itself.
struct AudioNode {
device: Box<AudioDevice>, // wraps the device
inputs: Vec<Option<usize>>, // bus indices of the inputs
input_buf: Vec<Sample>, // an allocated buffer for containing inputs
outputs: (usize, usize) // the range of outputs in the bus
}
impl AudioNode {
/// Wraps the device in a new node
fn new<D>(device: D, bus: &mut Vec<Sample>) -> AudioNode
where D:'static+AudioDevice {
let num_in = device.num_inputs();
let num_out = device.num_outputs();
let start = bus.len();
for _ in 0..num_out {
bus.push(0.0);
}
let end = bus.len();
AudioNode {
device: Box::new(device),
inputs: vec![None; num_in],
input_buf: vec![0.0; num_in],
outputs: (start, end)
}
}
/// Extracts the inputs out of the bus, tick the device and place the outputs
/// back into the bus.
fn tick(&mut self, t: Time, bus: &mut[Sample]) {
for (i, ch) in self.inputs.iter().enumerate() {
self.input_buf[i] = ch.map_or(0.0, |j| bus[j]);
}
let (start, end) = self.outputs;
self.device.tick(t, &self.input_buf, &mut bus[start..end]);
}
}
#[cfg(test)]
mod test {
use testing::MockAudioDevice;
use super::{DeviceGraph, Tick};
#[test]
fn test_empty_graph() {
DeviceGraph::new().tick();
}
#[test]
fn test_one_node() {
let mut mock = MockAudioDevice::new("mock", 1, 1);
mock.will_tick(&[0.0], &[1.0]);
let mut graph = DeviceGraph::new();
graph.add_node(mock);
graph.tick();
}
#[test]
fn test_disconnected() {
let mut mock1 = MockAudioDevice::new("mock1", 1, 1);
let mut mock2 = MockAudioDevice::new("mock2", 1, 1);
mock1.will_tick(&[0.0], &[1.0]);
mock2.will_tick(&[0.0], &[2.0]);
let mut graph = DeviceGraph::new();
graph.add_node(mock1);
graph.add_node(mock2);
graph.tick();
}
#[test]
fn test_linear() {
let mut mock1 = MockAudioDevice::new("mock1", 0, 1);
let mut mock2 = MockAudioDevice::new("mock2", 1, 0);
mock1.will_tick(&[], &[1.0]);
mock2.will_tick(&[1.0], &[]);
let mut graph = DeviceGraph::new();
let mock1 = graph.add_node(mock1);
let mock2 = graph.add_node(mock2);
graph.add_edge(mock1, 0, mock2, 0).unwrap();
graph.tick();
}
#[test]
fn test_complex() {
let mut mock1 = MockAudioDevice::new("mock1", 1, 1);
let mut mock2 = MockAudioDevice::new("mock2", 1, 1);
let mut mock3 = MockAudioDevice::new("mock3", 2, 1);
let mut mock4 = MockAudioDevice::new("mock4", 1, 1);
let mut mock5 = MockAudioDevice::new("mock5", 1, 1);
mock1.will_tick(&[0.0], &[1.0]);
mock2.will_tick(&[4.0], &[2.0]);
mock3.will_tick(&[2.0, 4.0], &[3.0]);
mock4.will_tick(&[1.0], &[4.0]);
mock5.will_tick(&[0.0], &[5.0]);
let mut graph = DeviceGraph::new();
let mock1 = graph.add_node(mock1);
let mock2 = graph.add_node(mock2);
let mock3 = graph.add_node(mock3);
let mock4 = graph.add_node(mock4);
let _mock5 = graph.add_node(mock5);
graph.add_edge(mock1, 0, mock4, 0).unwrap();
graph.add_edge(mock4, 0, mock2, 0).unwrap();
graph.add_edge(mock2, 0, mock3, 0).unwrap();
graph.add_edge(mock4, 0, mock3, 1).unwrap();
graph.tick();
}
#[test]
#[should_panic]
fn test_direct_cycle() {
let mock1 = MockAudioDevice::new("mock1", 1, 1);
let mock2 = MockAudioDevice::new("mock2", 1, 1);
let mut graph = DeviceGraph::new();
let mock1 = graph.add_node(mock1);
let mock2 = graph.add_node(mock2);
graph.add_edge(mock1, 0, mock2, 0).unwrap();
graph.add_edge(mock2, 0, mock1, 0).unwrap();
}
#[test]
#[should_panic]
fn test_indirect_cycle() {
let mock1 = MockAudioDevice::new("mock1", 1, 1);
let mock2 = MockAudioDevice::new("mock2", 1, 1);
let mock3 = MockAudioDevice::new("mock3", 1, 1);
let mut graph = DeviceGraph::new();
let mock1 = graph.add_node(mock1);
let mock2 = graph.add_node(mock2);
let mock3 = graph.add_node(mock3);
graph.add_edge(mock1, 0, mock2, 0).unwrap();
graph.add_edge(mock2, 0, mock3, 0).unwrap();
graph.add_edge(mock3, 0, mock1, 0).unwrap();
}
}
| { None } | conditional_block |
graph.rs | //! channel, provided that connection does not create a cycle.
//!
//! A graph is initialized by adding each device as a node in the graph, and
//! then specifying the edges between devices. The graph will automatically
//! process the devices in order of their dependencies.
//!
//! # Example
//!
//! The following example creates a graph with two different branches into
//! a stereo output. It feeds the micropgone to the left channel, and
//! a low-passed oscillator into the right channel.
//!
//! ```no_run
//! use oxcable::filters::first_order::{Filter, LowPass};
//! use oxcable::graph::{DeviceGraph, Tick};
//! use oxcable::io::audio::AudioEngine;
//! use oxcable::oscillator::*;
//!
//! let engine = AudioEngine::with_buffer_size(256).unwrap();
//! let mut graph = DeviceGraph::new();
//!
//! // Add nodes to graph
//! let microphone = graph.add_node(engine.default_input(1).unwrap());
//! let oscillator = graph.add_node(Oscillator::new(Sine).freq(440.0));
//! let filter = graph.add_node(Filter::new(LowPass(8000f32), 1));
//! let speaker = graph.add_node(engine.default_output(2).unwrap());
//!
//! // Connect devices together
//! graph.add_edge(microphone, 0, speaker, 0);
//! graph.add_edge(oscillator, 0, filter, 0);
//! graph.add_edge(filter, 0, speaker, 1);
//!
//! // Play audio ad nauseam.
//! graph.tick_forever();
//! ```
use std::collections::VecDeque;
use error::{Error, Result};
use types::{AudioDevice, Sample, Time};
pub use tick::Tick;
/// An acyclic graph for audio devices.
pub struct DeviceGraph {
nodes: Vec<AudioNode>, // the actual nodes
topology: Vec<usize>, // the order to tick the nodes
bus: Vec<Sample>, // the audio bus to write samples to
time: Time // the next timestep
}
impl DeviceGraph {
/// Creates an empty graph.
pub fn new() -> Self {
DeviceGraph {
nodes: Vec::new(),
topology: Vec::new(),
bus: Vec::new(),
time: 0
}
}
/// Adds a new device into the graph, with no connections. Returns
/// a identifier that refers back to this device.
pub fn add_node<D>(&mut self, device: D) -> AudioNodeIdx
where D:'static+AudioDevice {
let node = AudioNode::new(device, &mut self.bus);
let idx = self.nodes.len();
self.nodes.push(node);
self.topology.push(idx);
AudioNodeIdx(idx)
}
/// Connects two devices in the graph.
///
/// * `src` and `dest` are identifiers for the actual devices to connect.
/// * `src_ch` and `dest_ch` are the channel indices of the two devices.
///
/// If invalid indices are provided, or if the specified edge would create
/// a cycle in the graph, an Err is returned and no changes dest the graph are
/// made.
pub fn add_edge(&mut self, src: AudioNodeIdx, src_ch: usize,
dest: AudioNodeIdx, dest_ch: usize) -> Result<()> {
// Check device indices
let AudioNodeIdx(src_i) = src;
let AudioNodeIdx(dest_i) = dest;
if src_i >= self.nodes.len() {
return Err(Error::OutOfRange("src"));
} else if dest_i >= self.nodes.len() {
return Err(Error::OutOfRange("dest"));
}
// Check channels
if self.nodes[src_i].device.num_outputs() <= src_ch {
return Err(Error::OutOfRange("src_ch"));
}
if self.nodes[dest_i].device.num_inputs() <= dest_ch {
return Err(Error::OutOfRange("dest_ch"));
}
while self.nodes[dest_i].inputs.len() < dest_ch {
self.nodes[dest_i].inputs.push(None);
}
// Set input
let (start,_) = self.nodes[src_i].outputs;
self.nodes[dest_i].inputs[dest_ch] = Some(start+src_ch);
self.topological_sort(dest_i, dest_ch)
}
/// Determines the topology of our device graph. If the graph has a cycle,
/// then we remove the last edge. Otherwise, we set self.topology to
/// a topologically sorted order.
fn topological_sort(&mut self, dest_i: usize, dest_ch: usize) -> Result<()> {
// Intialize our set of input edges, and our set of edgeless nodes
let mut topology = Vec::new();
let mut inputs: Vec<Vec<_>> = self.nodes.iter().map(
|node| node.inputs.iter().filter_map(|&o| o).collect()
).collect();
let mut no_inputs: VecDeque<_> = inputs.iter().enumerate().filter_map(
|(i, ins)| if ins.len() == 0 { Some(i) } else { None }
).collect();
// While there are nodes with no input, we choose one, add it as the
// next node in our topology, and remove all edges from that node. Any
// nodes that lose their final edge are added to the edgeless set.
loop {
match no_inputs.pop_front() {
Some(i) => {
topology.push(i);
let (out_start, out_end) = self.nodes[i].outputs;
for out in out_start..out_end {
for (j, ins) in inputs.iter_mut().enumerate() {
let mut idx = None;
for k in 0..ins.len() {
if ins[k] == out {
idx = Some(k);
break;
}
}
match idx {
Some(k) => {
ins.swap_remove(k);
if ins.len() == 0 {
no_inputs.push_back(j);
}
},
None => ()
}
}
}
},
None => break
}
}
if topology.len() == self.nodes.len() {
self.topology = topology;
Ok(())
} else {
self.nodes[dest_i].inputs[dest_ch] = None;
Err(Error::CreatesCycle)
}
}
}
impl Tick for DeviceGraph {
fn tick(&mut self) {
for &i in self.topology.iter() {
self.nodes[i].tick(self.time, &mut self.bus);
}
self.time += 1;
}
}
/// An identifier used to refer back to a node in the graph.
#[derive(Copy, Clone, Debug)]
pub struct AudioNodeIdx(usize);
/// A wrapper for a node in the graph.
///
/// Management of indices in the bus is handled in the graph itself.
struct AudioNode {
device: Box<AudioDevice>, // wraps the device
inputs: Vec<Option<usize>>, // bus indices of the inputs
input_buf: Vec<Sample>, // an allocated buffer for containing inputs
outputs: (usize, usize) // the range of outputs in the bus
}
impl AudioNode {
/// Wraps the device in a new node
fn new<D>(device: D, bus: &mut Vec<Sample>) -> AudioNode
where D:'static+AudioDevice {
let num_in = device.num_inputs();
let num_out = device.num_outputs();
let start = bus.len();
for _ in 0..num_out {
bus.push(0.0);
}
let end = bus.len();
AudioNode {
device: Box::new(device),
inputs: vec![None; num_in],
input_buf: vec![0.0; num_in],
outputs: (start, end)
}
}
/// Extracts the inputs out of the bus, tick the device and place the outputs
/// back into the bus.
fn tick(&mut self, t: Time, bus: &mut[Sample]) {
for (i, ch) in self.inputs.iter().enumerate() {
self.input_buf[i] = ch.map_or(0.0, |j| bus[j]);
}
let (start, end) = self.outputs;
self.device.tick(t, &self.input_buf, &mut bus[start..end]);
}
}
#[cfg(test)]
mod test {
use testing::MockAudioDevice;
use super::{DeviceGraph, Tick};
#[test]
fn test_empty_graph() {
DeviceGraph::new().tick();
}
#[test]
fn test_one_node() {
let mut mock = MockAudioDevice::new("mock", 1, 1);
mock.will_tick(&[0.0], &[1.0]);
let mut graph = DeviceGraph::new();
graph.add_node(mock);
graph.tick();
}
#[test]
fn test_disconnected() {
let mut mock1 = MockAudioDevice::new("mock1", 1, 1);
let mut mock2 = MockAudioDevice::new("mock2", 1, 1);
mock1.will_tick(&[0.0], &[1.0]);
mock2.will_tick(&[0.0], &[2.0]);
let mut graph = DeviceGraph::new();
graph.add_node(mock1);
graph.add_node(mock2);
graph.tick();
}
#[test]
fn test_linear() {
let mut mock1 = MockAudioDevice::new("mock1", 0, 1);
let mut mock2 = MockAudioDevice::new("mock2", 1, 0);
mock1.will_tick(&[], &[1.0]);
mock2.will_tick(&[1.0], &[]);
let mut graph = DeviceGraph::new();
let mock1 = graph.add_node(mock1);
let mock2 = graph.add_node(mock2);
graph.add_edge(mock1, 0, mock2, 0).unwrap();
graph.tick();
}
#[test]
fn test_complex() {
let mut mock1 = MockAudioDevice::new("mock1", 1, 1);
let mut mock2 = MockAudioDevice::new("mock2", 1, 1);
let mut mock3 = MockAudioDevice::new("mock3", 2, 1);
let mut mock4 = MockAudioDevice::new("mock4", 1, 1);
let mut mock5 = MockAudioDevice::new("mock5", 1, 1);
mock1.will_tick(&[0.0], &[1.0]);
mock2.will_tick(&[4.0], &[2.0]);
mock3.will_tick(&[2.0, 4.0], &[3.0]);
mock4.will_tick(&[1.0], &[4.0]);
mock5.will_tick(&[0.0], &[5.0]);
let mut graph = DeviceGraph::new();
let mock1 = graph.add_node(mock1);
let mock2 = graph.add_node(mock2);
let mock3 = graph.add_node(mock3);
let mock4 = graph.add_node(mock4);
let _mock5 = graph.add_node(mock5);
graph.add_edge(mock1, 0, mock4, 0).unwrap();
graph.add_edge(mock4, 0, mock2, 0).unwrap();
graph.add_edge(mock2, 0, mock3, 0).unwrap();
graph.add_edge(mock4, 0, mock3, 1).unwrap();
graph.tick();
}
#[test]
#[should_panic]
fn test_direct_cycle() {
let mock1 = MockAudioDevice::new("mock1", 1, 1);
let mock2 = MockAudioDevice::new("mock2", 1, 1);
let mut graph = DeviceGraph::new();
let mock1 = graph.add_node(mock1);
let mock2 = graph.add_node(mock2);
graph.add_edge(mock1, 0, mock2, 0).unwrap();
graph.add_edge(mock2, 0, mock1, 0).unwrap();
}
#[test]
#[should_panic]
fn test_indirect_cycle() {
let mock1 = MockAudioDevice::new("mock1", 1, 1);
let mock2 = MockAudioDevice::new("mock2", 1, 1);
let mock3 = MockAudioDevice::new("mock3", 1, 1);
let mut graph = DeviceGraph::new();
let mock1 = graph.add_node(mock1);
let mock2 = graph.add_node(mock2);
let mock3 = graph.add_node(mock3);
graph.add_edge(mock1, 0, mock2, 0).unwrap();
graph.add_edge(mock2, 0, mock3, 0).unwrap();
graph.add_edge(mock3, 0, mock1, 0).unwrap();
}
} | //! A container for audio devices in an acyclic graph.
//!
//! A graph can be used when many audio devices need to connect in complex
//! topologies. It can connect each output channel of a device to any input | random_line_split |
|
graph.rs | //! A container for audio devices in an acyclic graph.
//!
//! A graph can be used when many audio devices need to connect in complex
//! topologies. It can connect each output channel of a device to any input
//! channel, provided that connection does not create a cycle.
//!
//! A graph is initialized by adding each device as a node in the graph, and
//! then specifying the edges between devices. The graph will automatically
//! process the devices in order of their dependencies.
//!
//! # Example
//!
//! The following example creates a graph with two different branches into
//! a stereo output. It feeds the micropgone to the left channel, and
//! a low-passed oscillator into the right channel.
//!
//! ```no_run
//! use oxcable::filters::first_order::{Filter, LowPass};
//! use oxcable::graph::{DeviceGraph, Tick};
//! use oxcable::io::audio::AudioEngine;
//! use oxcable::oscillator::*;
//!
//! let engine = AudioEngine::with_buffer_size(256).unwrap();
//! let mut graph = DeviceGraph::new();
//!
//! // Add nodes to graph
//! let microphone = graph.add_node(engine.default_input(1).unwrap());
//! let oscillator = graph.add_node(Oscillator::new(Sine).freq(440.0));
//! let filter = graph.add_node(Filter::new(LowPass(8000f32), 1));
//! let speaker = graph.add_node(engine.default_output(2).unwrap());
//!
//! // Connect devices together
//! graph.add_edge(microphone, 0, speaker, 0);
//! graph.add_edge(oscillator, 0, filter, 0);
//! graph.add_edge(filter, 0, speaker, 1);
//!
//! // Play audio ad nauseam.
//! graph.tick_forever();
//! ```
use std::collections::VecDeque;
use error::{Error, Result};
use types::{AudioDevice, Sample, Time};
pub use tick::Tick;
/// An acyclic graph for audio devices.
pub struct DeviceGraph {
nodes: Vec<AudioNode>, // the actual nodes
topology: Vec<usize>, // the order to tick the nodes
bus: Vec<Sample>, // the audio bus to write samples to
time: Time // the next timestep
}
impl DeviceGraph {
/// Creates an empty graph.
pub fn new() -> Self {
DeviceGraph {
nodes: Vec::new(),
topology: Vec::new(),
bus: Vec::new(),
time: 0
}
}
/// Adds a new device into the graph, with no connections. Returns
/// a identifier that refers back to this device.
pub fn add_node<D>(&mut self, device: D) -> AudioNodeIdx
where D:'static+AudioDevice {
let node = AudioNode::new(device, &mut self.bus);
let idx = self.nodes.len();
self.nodes.push(node);
self.topology.push(idx);
AudioNodeIdx(idx)
}
/// Connects two devices in the graph.
///
/// * `src` and `dest` are identifiers for the actual devices to connect.
/// * `src_ch` and `dest_ch` are the channel indices of the two devices.
///
/// If invalid indices are provided, or if the specified edge would create
/// a cycle in the graph, an Err is returned and no changes dest the graph are
/// made.
pub fn add_edge(&mut self, src: AudioNodeIdx, src_ch: usize,
dest: AudioNodeIdx, dest_ch: usize) -> Result<()> {
// Check device indices
let AudioNodeIdx(src_i) = src;
let AudioNodeIdx(dest_i) = dest;
if src_i >= self.nodes.len() {
return Err(Error::OutOfRange("src"));
} else if dest_i >= self.nodes.len() {
return Err(Error::OutOfRange("dest"));
}
// Check channels
if self.nodes[src_i].device.num_outputs() <= src_ch {
return Err(Error::OutOfRange("src_ch"));
}
if self.nodes[dest_i].device.num_inputs() <= dest_ch {
return Err(Error::OutOfRange("dest_ch"));
}
while self.nodes[dest_i].inputs.len() < dest_ch {
self.nodes[dest_i].inputs.push(None);
}
// Set input
let (start,_) = self.nodes[src_i].outputs;
self.nodes[dest_i].inputs[dest_ch] = Some(start+src_ch);
self.topological_sort(dest_i, dest_ch)
}
/// Determines the topology of our device graph. If the graph has a cycle,
/// then we remove the last edge. Otherwise, we set self.topology to
/// a topologically sorted order.
fn topological_sort(&mut self, dest_i: usize, dest_ch: usize) -> Result<()> {
// Intialize our set of input edges, and our set of edgeless nodes
let mut topology = Vec::new();
let mut inputs: Vec<Vec<_>> = self.nodes.iter().map(
|node| node.inputs.iter().filter_map(|&o| o).collect()
).collect();
let mut no_inputs: VecDeque<_> = inputs.iter().enumerate().filter_map(
|(i, ins)| if ins.len() == 0 { Some(i) } else { None }
).collect();
// While there are nodes with no input, we choose one, add it as the
// next node in our topology, and remove all edges from that node. Any
// nodes that lose their final edge are added to the edgeless set.
loop {
match no_inputs.pop_front() {
Some(i) => {
topology.push(i);
let (out_start, out_end) = self.nodes[i].outputs;
for out in out_start..out_end {
for (j, ins) in inputs.iter_mut().enumerate() {
let mut idx = None;
for k in 0..ins.len() {
if ins[k] == out {
idx = Some(k);
break;
}
}
match idx {
Some(k) => {
ins.swap_remove(k);
if ins.len() == 0 {
no_inputs.push_back(j);
}
},
None => ()
}
}
}
},
None => break
}
}
if topology.len() == self.nodes.len() {
self.topology = topology;
Ok(())
} else {
self.nodes[dest_i].inputs[dest_ch] = None;
Err(Error::CreatesCycle)
}
}
}
impl Tick for DeviceGraph {
fn tick(&mut self) {
for &i in self.topology.iter() {
self.nodes[i].tick(self.time, &mut self.bus);
}
self.time += 1;
}
}
/// An identifier used to refer back to a node in the graph.
#[derive(Copy, Clone, Debug)]
pub struct AudioNodeIdx(usize);
/// A wrapper for a node in the graph.
///
/// Management of indices in the bus is handled in the graph itself.
struct AudioNode {
device: Box<AudioDevice>, // wraps the device
inputs: Vec<Option<usize>>, // bus indices of the inputs
input_buf: Vec<Sample>, // an allocated buffer for containing inputs
outputs: (usize, usize) // the range of outputs in the bus
}
impl AudioNode {
/// Wraps the device in a new node
fn new<D>(device: D, bus: &mut Vec<Sample>) -> AudioNode
where D:'static+AudioDevice {
let num_in = device.num_inputs();
let num_out = device.num_outputs();
let start = bus.len();
for _ in 0..num_out {
bus.push(0.0);
}
let end = bus.len();
AudioNode {
device: Box::new(device),
inputs: vec![None; num_in],
input_buf: vec![0.0; num_in],
outputs: (start, end)
}
}
/// Extracts the inputs out of the bus, tick the device and place the outputs
/// back into the bus.
fn tick(&mut self, t: Time, bus: &mut[Sample]) {
for (i, ch) in self.inputs.iter().enumerate() {
self.input_buf[i] = ch.map_or(0.0, |j| bus[j]);
}
let (start, end) = self.outputs;
self.device.tick(t, &self.input_buf, &mut bus[start..end]);
}
}
#[cfg(test)]
mod test {
use testing::MockAudioDevice;
use super::{DeviceGraph, Tick};
#[test]
fn test_empty_graph() {
DeviceGraph::new().tick();
}
#[test]
fn test_one_node() {
let mut mock = MockAudioDevice::new("mock", 1, 1);
mock.will_tick(&[0.0], &[1.0]);
let mut graph = DeviceGraph::new();
graph.add_node(mock);
graph.tick();
}
#[test]
fn test_disconnected() {
let mut mock1 = MockAudioDevice::new("mock1", 1, 1);
let mut mock2 = MockAudioDevice::new("mock2", 1, 1);
mock1.will_tick(&[0.0], &[1.0]);
mock2.will_tick(&[0.0], &[2.0]);
let mut graph = DeviceGraph::new();
graph.add_node(mock1);
graph.add_node(mock2);
graph.tick();
}
#[test]
fn test_linear() {
let mut mock1 = MockAudioDevice::new("mock1", 0, 1);
let mut mock2 = MockAudioDevice::new("mock2", 1, 0);
mock1.will_tick(&[], &[1.0]);
mock2.will_tick(&[1.0], &[]);
let mut graph = DeviceGraph::new();
let mock1 = graph.add_node(mock1);
let mock2 = graph.add_node(mock2);
graph.add_edge(mock1, 0, mock2, 0).unwrap();
graph.tick();
}
#[test]
fn test_complex() {
let mut mock1 = MockAudioDevice::new("mock1", 1, 1);
let mut mock2 = MockAudioDevice::new("mock2", 1, 1);
let mut mock3 = MockAudioDevice::new("mock3", 2, 1);
let mut mock4 = MockAudioDevice::new("mock4", 1, 1);
let mut mock5 = MockAudioDevice::new("mock5", 1, 1);
mock1.will_tick(&[0.0], &[1.0]);
mock2.will_tick(&[4.0], &[2.0]);
mock3.will_tick(&[2.0, 4.0], &[3.0]);
mock4.will_tick(&[1.0], &[4.0]);
mock5.will_tick(&[0.0], &[5.0]);
let mut graph = DeviceGraph::new();
let mock1 = graph.add_node(mock1);
let mock2 = graph.add_node(mock2);
let mock3 = graph.add_node(mock3);
let mock4 = graph.add_node(mock4);
let _mock5 = graph.add_node(mock5);
graph.add_edge(mock1, 0, mock4, 0).unwrap();
graph.add_edge(mock4, 0, mock2, 0).unwrap();
graph.add_edge(mock2, 0, mock3, 0).unwrap();
graph.add_edge(mock4, 0, mock3, 1).unwrap();
graph.tick();
}
#[test]
#[should_panic]
fn | () {
let mock1 = MockAudioDevice::new("mock1", 1, 1);
let mock2 = MockAudioDevice::new("mock2", 1, 1);
let mut graph = DeviceGraph::new();
let mock1 = graph.add_node(mock1);
let mock2 = graph.add_node(mock2);
graph.add_edge(mock1, 0, mock2, 0).unwrap();
graph.add_edge(mock2, 0, mock1, 0).unwrap();
}
#[test]
#[should_panic]
fn test_indirect_cycle() {
let mock1 = MockAudioDevice::new("mock1", 1, 1);
let mock2 = MockAudioDevice::new("mock2", 1, 1);
let mock3 = MockAudioDevice::new("mock3", 1, 1);
let mut graph = DeviceGraph::new();
let mock1 = graph.add_node(mock1);
let mock2 = graph.add_node(mock2);
let mock3 = graph.add_node(mock3);
graph.add_edge(mock1, 0, mock2, 0).unwrap();
graph.add_edge(mock2, 0, mock3, 0).unwrap();
graph.add_edge(mock3, 0, mock1, 0).unwrap();
}
}
| test_direct_cycle | identifier_name |
bwt.rs | use sa::{insert, suffix_array};
use std::ops::Index;
/// Generate the [Burrows-Wheeler Transform](https://en.wikipedia.org/wiki/Burrows%E2%80%93Wheeler_transform)
/// of the given input.
///
/// ``` rust
/// let text = String::from("The quick brown fox jumps over the lazy dog");
/// let bw = nucleic_acid::bwt(text.as_bytes());
/// assert_eq!(String::from("gkynxeser\u{0}l i hhv otTu c uwd rfm ebp qjoooza"),
/// String::from_utf8(bw).unwrap());
/// ```
/// The output can then be used for compression or FM-index'ing.
pub fn bwt(input: &[u8]) -> Vec<u8> {
suffix_array(input).into_iter().map(|i| {
// BWT[i] = S[SA[i] - 1]
if i == 0 { 0 } else { input[(i - 1) as usize] }
}).collect()
}
// Takes a frequency map of bytes and generates the index of first occurrence
// of each byte.
fn generate_occurrence_index(map: &mut Vec<u32>) {
let mut idx = 0;
for i in 0..map.len() {
let c = map[i];
map[i] = idx;
idx += c;
}
}
/// Invert the BWT and generate the original data.
///
/// ``` rust
/// let text = String::from("Hello, world!");
/// let bw = nucleic_acid::bwt(text.as_bytes());
/// let ibw = nucleic_acid::ibwt(&bw);
/// assert_eq!(text, String::from_utf8(ibw).unwrap());
/// ```
pub fn ibwt(input: &[u8]) -> Vec<u8> {
// get the byte distribution
let mut map = Vec::new();
for i in input {
insert(&mut map, *i);
}
generate_occurrence_index(&mut map);
// generate the LF vector
let mut lf = vec![0; input.len()];
for (i, c) in input.iter().enumerate() {
let byte = *c as usize;
let val = map[byte];
lf[i] = val;
map[byte] = val + 1;
}
let mut idx = 0;
// construct the sequence by traversing through the LF vector
let mut output = vec![0; input.len()];
for i in (0..(input.len() - 1)).rev() {
output[i] = input[idx];
idx = lf[idx] as usize;
}
output.pop();
output
}
/// [Ferragina-Manzini index](https://en.wikipedia.org/wiki/FM-index)
/// (or Full-text index in Minute space) for finding occurrences of substrings
/// in O(1) time.
///
/// ``` rust
/// use nucleic_acid::FMIndex;
///
/// let text = String::from("GCGTGCCCAGGGCACTGCCGCTGCAGGCGTAGGCATCGCATCACACGCGT");
/// let index = FMIndex::new(text.as_bytes());
///
/// // count the occurrences
/// assert_eq!(0, index.count("CCCCC"));
/// assert_eq!(3, index.count("TG"));
///
/// //... or get their positions
/// assert_eq!(index.search("GCGT"), vec![46, 26, 0]);
/// ```
///
/// The current implementation of FM-index is a memory killer, since it stores positions
/// of **all bytes** in the given data. For the human genome (~3 GB), it consumed
/// ~27 GB of RAM to build the index (in ~4 mins).
///
/// That said, it still returns the match results in a few microseconds.
#[derive(Clone, Debug)]
pub struct FMIndex {
/// BW-transformed data
data: Vec<u8>,
/// forward frequency of each character in the BWT data
cache: Vec<u32>,
/// incremental character frequencies
occ_map: Vec<u32>,
/// LF-mapping for backward search
lf_vec: Vec<u32>,
}
impl FMIndex {
/// Generate an FM-index for the input data.
#[inline]
pub fn new(data: &[u8]) -> FMIndex {
FMIndex::new_from_bwt(bwt(data))
}
/// Get the reference to the inner BWT data.
///
/// Note that the length of BWT is one more than the length of the actual text,
/// since it has a null byte to indicate empty string.
pub fn bwt(&self) -> &[u8] {
&self.data
}
/// Generate the FM-index from the BWT data.
///
/// It's not a good idea to generate FM-index from scratch all the time, especially for large inputs.
/// This would be very useful when your data is large and remains constant for a while.
///
/// FM-index internally uses BWT, and BWT is generated from the suffix array, which takes a lot of time.
/// If your input doesn't change, then it's better to get the BWT data (using `bwt` method), write it
/// to a file and generate the index from that in the future.
pub fn new_from_bwt(bwt_data: Vec<u8>) -> FMIndex | }
let mut i = lf_vec[0] as usize;
lf_vec[0] = 0;
let mut counter = bwt_data.len() as u32 - 1;
// Only difference is that we replace the LF indices with the lengths of prefix
// from a particular position (in other words, the number of times
// it would take us to get to the start of string).
for _ in 0..(bwt_data.len() - 1) {
let next = lf_vec[i];
lf_vec[i] = counter;
i = next as usize;
counter -= 1;
}
FMIndex {
data: bwt_data,
cache: count,
occ_map: map,
lf_vec: lf_vec,
}
}
/// Get the nearest position of a character in the internal BWT data.
///
/// The `count` and `search` methods rely on this method for finding occurrences.
/// For example, we can do soemthing like this,
///
/// ``` rust
/// use nucleic_acid::FMIndex;
/// let fm = FMIndex::new(b"Hello, Hello, Hello" as &[u8]);
///
/// // initially, the range should be the length of the BWT
/// let mut top = 0;
/// let mut bottom = fm.bwt().len();
/// let query = b"llo";
///
/// // feed the characters in the reverse
/// for ch in query.iter().rev() {
/// top = fm.nearest(top, *ch);
/// bottom = fm.nearest(bottom, *ch);
/// if top >= bottom {
/// return
/// }
/// }
///
/// // If we get a valid range, then everything in that range is a valid match.
/// // This way, we can get both the count and positions...
/// assert_eq!(3, bottom - top);
/// assert_eq!(vec![17, 10, 3], (top..bottom).map(|i| fm[i]).collect::<Vec<_>>());
/// ```
///
/// This is backward searching. As you feed in the characters along with a position, `nearest` will
/// give you a new position in the index. Once the range becomes invalid (which happens when the
/// substring doesn't exist), we can bail out. On the contrary, if the range remains valid after
/// you've fed in all the characters, then every value within in that range is an occurrence.
///
/// So, this is useful when you want to cache the repeating ranges. With this, you can build your own
/// count/search functions with caching. It's also useful for making custom approximate matching functions
/// by backtracking whenever there's an invalid range.
pub fn nearest(&self, idx: usize, ch: u8) -> usize {
match self.occ_map.get(ch as usize) {
Some(res) if *res > 0 => {
*res as usize + (0..idx).rev()
.find(|&i| self.data[i] == ch)
.map(|i| self.cache[i] as usize)
.unwrap_or(0)
},
_ => 0,
}
}
fn get_range(&self, query: &str) -> Option<(usize, usize)> {
let mut top = 0;
let mut bottom = self.data.len();
for ch in query.as_bytes().iter().rev() {
top = self.nearest(top, *ch);
bottom = self.nearest(bottom, *ch);
if top >= bottom {
return None
}
}
if top >= bottom {
None
} else {
Some((top, bottom))
}
}
/// Count the occurrences of the substring in the original data.
pub fn count(&self, query: &str) -> usize {
match self.get_range(query) {
Some((top, bottom)) => bottom - top,
None => 0,
}
}
/// Get the positions of occurrences of substring in the original data.
pub fn search(&self, query: &str) -> Vec<usize> {
match self.get_range(query) {
Some((top, bottom)) => (top..bottom).map(|idx| {
let i = self.nearest(idx, self.data[idx]);
self.lf_vec[i] as usize
}).collect(),
None => Vec::new(),
}
}
}
impl Index<usize> for FMIndex {
type Output = u32;
fn index(&self, i: usize) -> &u32 {
self.lf_vec.get(i).expect("index out of range")
}
}
#[cfg(test)]
mod tests {
use super::{FMIndex, bwt, ibwt};
#[test]
fn test_bwt_and_ibwt() {
let text = String::from("ATCTAGGAGATCTGAATCTAGTTCAACTAGCTAGATCTAGAGACAGCTAA");
let bw = bwt(text.as_bytes());
let ibw = ibwt(&bw);
assert_eq!(String::from("AATCGGAGTTGCTTTG\u{0}AGTAGTGATTTTAAGAAAAAACCCCCCTAAAACG"),
String::from_utf8(bw).unwrap());
assert_eq!(text, String::from_utf8(ibw).unwrap());
}
#[test]
fn test_fm_index() {
let text = String::from("GCGTGCCCAGGGCACTGCCGCTGCAGGCGTAGGCATCGCATCACACGCGT");
let index = FMIndex::new(text.as_bytes());
assert_eq!(0, index.count("CCCCC"));
let mut result = index.search("TG");
result.sort();
assert_eq!(result, vec![3, 15, 21]);
let mut result = index.search("GCGT");
result.sort();
assert_eq!(result, vec![0, 26, 46]);
assert_eq!(vec![1], index.search("CGTGCCC"));
}
}
| {
let mut map = Vec::new();
let mut count = vec![0u32; bwt_data.len()];
let mut idx = 0;
// generate the frequency map and forward frequency vector from BWT
for i in &bwt_data {
let value = insert(&mut map, *i);
count[idx] = value;
idx += 1;
}
generate_occurrence_index(&mut map);
let mut lf_vec = count.clone();
let mut lf_occ_map = map.clone();
// generate the LF vector (just like inverting the BWT)
for (i, c) in bwt_data.iter().enumerate() {
let idx = *c as usize;
lf_vec[i] = lf_occ_map[idx];
lf_occ_map[idx] += 1; | identifier_body |
bwt.rs | use sa::{insert, suffix_array};
use std::ops::Index;
/// Generate the [Burrows-Wheeler Transform](https://en.wikipedia.org/wiki/Burrows%E2%80%93Wheeler_transform)
/// of the given input.
///
/// ``` rust
/// let text = String::from("The quick brown fox jumps over the lazy dog");
/// let bw = nucleic_acid::bwt(text.as_bytes());
/// assert_eq!(String::from("gkynxeser\u{0}l i hhv otTu c uwd rfm ebp qjoooza"),
/// String::from_utf8(bw).unwrap());
/// ```
/// The output can then be used for compression or FM-index'ing.
pub fn bwt(input: &[u8]) -> Vec<u8> {
suffix_array(input).into_iter().map(|i| {
// BWT[i] = S[SA[i] - 1]
if i == 0 { 0 } else { input[(i - 1) as usize] }
}).collect()
}
// Takes a frequency map of bytes and generates the index of first occurrence
// of each byte.
fn generate_occurrence_index(map: &mut Vec<u32>) {
let mut idx = 0;
for i in 0..map.len() {
let c = map[i];
map[i] = idx;
idx += c;
}
}
/// Invert the BWT and generate the original data.
///
/// ``` rust
/// let text = String::from("Hello, world!");
/// let bw = nucleic_acid::bwt(text.as_bytes());
/// let ibw = nucleic_acid::ibwt(&bw);
/// assert_eq!(text, String::from_utf8(ibw).unwrap());
/// ```
pub fn ibwt(input: &[u8]) -> Vec<u8> {
// get the byte distribution
let mut map = Vec::new();
for i in input {
insert(&mut map, *i);
}
generate_occurrence_index(&mut map);
// generate the LF vector
let mut lf = vec![0; input.len()];
for (i, c) in input.iter().enumerate() {
let byte = *c as usize;
let val = map[byte];
lf[i] = val;
map[byte] = val + 1;
}
let mut idx = 0;
// construct the sequence by traversing through the LF vector
let mut output = vec![0; input.len()];
for i in (0..(input.len() - 1)).rev() {
output[i] = input[idx];
idx = lf[idx] as usize;
}
output.pop();
output
}
/// [Ferragina-Manzini index](https://en.wikipedia.org/wiki/FM-index)
/// (or Full-text index in Minute space) for finding occurrences of substrings
/// in O(1) time.
///
/// ``` rust
/// use nucleic_acid::FMIndex;
///
/// let text = String::from("GCGTGCCCAGGGCACTGCCGCTGCAGGCGTAGGCATCGCATCACACGCGT");
/// let index = FMIndex::new(text.as_bytes());
///
/// // count the occurrences
/// assert_eq!(0, index.count("CCCCC"));
/// assert_eq!(3, index.count("TG"));
///
/// //... or get their positions
/// assert_eq!(index.search("GCGT"), vec![46, 26, 0]);
/// ```
///
/// The current implementation of FM-index is a memory killer, since it stores positions
/// of **all bytes** in the given data. For the human genome (~3 GB), it consumed
/// ~27 GB of RAM to build the index (in ~4 mins).
///
/// That said, it still returns the match results in a few microseconds.
#[derive(Clone, Debug)]
pub struct FMIndex {
/// BW-transformed data
data: Vec<u8>,
/// forward frequency of each character in the BWT data
cache: Vec<u32>,
/// incremental character frequencies
occ_map: Vec<u32>,
/// LF-mapping for backward search
lf_vec: Vec<u32>,
}
impl FMIndex {
/// Generate an FM-index for the input data.
#[inline]
pub fn new(data: &[u8]) -> FMIndex {
FMIndex::new_from_bwt(bwt(data))
}
/// Get the reference to the inner BWT data.
///
/// Note that the length of BWT is one more than the length of the actual text,
/// since it has a null byte to indicate empty string.
pub fn bwt(&self) -> &[u8] {
&self.data
}
/// Generate the FM-index from the BWT data.
///
/// It's not a good idea to generate FM-index from scratch all the time, especially for large inputs.
/// This would be very useful when your data is large and remains constant for a while.
///
/// FM-index internally uses BWT, and BWT is generated from the suffix array, which takes a lot of time.
/// If your input doesn't change, then it's better to get the BWT data (using `bwt` method), write it
/// to a file and generate the index from that in the future.
pub fn new_from_bwt(bwt_data: Vec<u8>) -> FMIndex {
let mut map = Vec::new();
let mut count = vec![0u32; bwt_data.len()];
let mut idx = 0;
// generate the frequency map and forward frequency vector from BWT
for i in &bwt_data {
let value = insert(&mut map, *i);
count[idx] = value;
idx += 1;
}
generate_occurrence_index(&mut map);
let mut lf_vec = count.clone();
let mut lf_occ_map = map.clone();
// generate the LF vector (just like inverting the BWT)
for (i, c) in bwt_data.iter().enumerate() {
let idx = *c as usize;
lf_vec[i] = lf_occ_map[idx];
lf_occ_map[idx] += 1;
}
let mut i = lf_vec[0] as usize;
lf_vec[0] = 0;
let mut counter = bwt_data.len() as u32 - 1;
// Only difference is that we replace the LF indices with the lengths of prefix
// from a particular position (in other words, the number of times
// it would take us to get to the start of string).
for _ in 0..(bwt_data.len() - 1) {
let next = lf_vec[i];
lf_vec[i] = counter;
i = next as usize;
counter -= 1;
}
FMIndex {
data: bwt_data,
cache: count,
occ_map: map,
lf_vec: lf_vec,
}
}
/// Get the nearest position of a character in the internal BWT data.
///
/// The `count` and `search` methods rely on this method for finding occurrences.
/// For example, we can do soemthing like this,
///
/// ``` rust
/// use nucleic_acid::FMIndex;
/// let fm = FMIndex::new(b"Hello, Hello, Hello" as &[u8]);
///
/// // initially, the range should be the length of the BWT
/// let mut top = 0;
/// let mut bottom = fm.bwt().len();
/// let query = b"llo";
///
/// // feed the characters in the reverse
/// for ch in query.iter().rev() {
/// top = fm.nearest(top, *ch);
/// bottom = fm.nearest(bottom, *ch);
/// if top >= bottom {
/// return
/// }
/// }
///
/// // If we get a valid range, then everything in that range is a valid match.
/// // This way, we can get both the count and positions...
/// assert_eq!(3, bottom - top);
/// assert_eq!(vec![17, 10, 3], (top..bottom).map(|i| fm[i]).collect::<Vec<_>>());
/// ```
///
/// This is backward searching. As you feed in the characters along with a position, `nearest` will
/// give you a new position in the index. Once the range becomes invalid (which happens when the
/// substring doesn't exist), we can bail out. On the contrary, if the range remains valid after
/// you've fed in all the characters, then every value within in that range is an occurrence.
///
/// So, this is useful when you want to cache the repeating ranges. With this, you can build your own
/// count/search functions with caching. It's also useful for making custom approximate matching functions
/// by backtracking whenever there's an invalid range.
pub fn nearest(&self, idx: usize, ch: u8) -> usize {
match self.occ_map.get(ch as usize) {
Some(res) if *res > 0 => {
*res as usize + (0..idx).rev()
.find(|&i| self.data[i] == ch)
.map(|i| self.cache[i] as usize)
.unwrap_or(0)
},
_ => 0,
}
}
fn get_range(&self, query: &str) -> Option<(usize, usize)> {
let mut top = 0;
let mut bottom = self.data.len();
for ch in query.as_bytes().iter().rev() {
top = self.nearest(top, *ch);
bottom = self.nearest(bottom, *ch);
if top >= bottom {
return None
}
}
if top >= bottom {
None
} else {
Some((top, bottom))
}
}
/// Count the occurrences of the substring in the original data.
pub fn count(&self, query: &str) -> usize {
match self.get_range(query) {
Some((top, bottom)) => bottom - top,
None => 0,
}
}
/// Get the positions of occurrences of substring in the original data.
pub fn search(&self, query: &str) -> Vec<usize> {
match self.get_range(query) {
Some((top, bottom)) => (top..bottom).map(|idx| {
let i = self.nearest(idx, self.data[idx]);
self.lf_vec[i] as usize
}).collect(),
None => Vec::new(),
}
}
}
impl Index<usize> for FMIndex {
type Output = u32;
fn index(&self, i: usize) -> &u32 {
self.lf_vec.get(i).expect("index out of range")
}
}
#[cfg(test)]
mod tests {
use super::{FMIndex, bwt, ibwt};
#[test]
fn test_bwt_and_ibwt() {
let text = String::from("ATCTAGGAGATCTGAATCTAGTTCAACTAGCTAGATCTAGAGACAGCTAA");
let bw = bwt(text.as_bytes());
let ibw = ibwt(&bw);
assert_eq!(String::from("AATCGGAGTTGCTTTG\u{0}AGTAGTGATTTTAAGAAAAAACCCCCCTAAAACG"),
String::from_utf8(bw).unwrap());
assert_eq!(text, String::from_utf8(ibw).unwrap());
}
#[test]
fn test_fm_index() {
let text = String::from("GCGTGCCCAGGGCACTGCCGCTGCAGGCGTAGGCATCGCATCACACGCGT");
let index = FMIndex::new(text.as_bytes());
assert_eq!(0, index.count("CCCCC"));
let mut result = index.search("TG");
result.sort();
assert_eq!(result, vec![3, 15, 21]);
let mut result = index.search("GCGT");
result.sort();
assert_eq!(result, vec![0, 26, 46]);
assert_eq!(vec![1], index.search("CGTGCCC"));
} | } | random_line_split |
|
bwt.rs | use sa::{insert, suffix_array};
use std::ops::Index;
/// Generate the [Burrows-Wheeler Transform](https://en.wikipedia.org/wiki/Burrows%E2%80%93Wheeler_transform)
/// of the given input.
///
/// ``` rust
/// let text = String::from("The quick brown fox jumps over the lazy dog");
/// let bw = nucleic_acid::bwt(text.as_bytes());
/// assert_eq!(String::from("gkynxeser\u{0}l i hhv otTu c uwd rfm ebp qjoooza"),
/// String::from_utf8(bw).unwrap());
/// ```
/// The output can then be used for compression or FM-index'ing.
pub fn bwt(input: &[u8]) -> Vec<u8> {
suffix_array(input).into_iter().map(|i| {
// BWT[i] = S[SA[i] - 1]
if i == 0 { 0 } else { input[(i - 1) as usize] }
}).collect()
}
// Takes a frequency map of bytes and generates the index of first occurrence
// of each byte.
fn | (map: &mut Vec<u32>) {
let mut idx = 0;
for i in 0..map.len() {
let c = map[i];
map[i] = idx;
idx += c;
}
}
/// Invert the BWT and generate the original data.
///
/// ``` rust
/// let text = String::from("Hello, world!");
/// let bw = nucleic_acid::bwt(text.as_bytes());
/// let ibw = nucleic_acid::ibwt(&bw);
/// assert_eq!(text, String::from_utf8(ibw).unwrap());
/// ```
pub fn ibwt(input: &[u8]) -> Vec<u8> {
// get the byte distribution
let mut map = Vec::new();
for i in input {
insert(&mut map, *i);
}
generate_occurrence_index(&mut map);
// generate the LF vector
let mut lf = vec![0; input.len()];
for (i, c) in input.iter().enumerate() {
let byte = *c as usize;
let val = map[byte];
lf[i] = val;
map[byte] = val + 1;
}
let mut idx = 0;
// construct the sequence by traversing through the LF vector
let mut output = vec![0; input.len()];
for i in (0..(input.len() - 1)).rev() {
output[i] = input[idx];
idx = lf[idx] as usize;
}
output.pop();
output
}
/// [Ferragina-Manzini index](https://en.wikipedia.org/wiki/FM-index)
/// (or Full-text index in Minute space) for finding occurrences of substrings
/// in O(1) time.
///
/// ``` rust
/// use nucleic_acid::FMIndex;
///
/// let text = String::from("GCGTGCCCAGGGCACTGCCGCTGCAGGCGTAGGCATCGCATCACACGCGT");
/// let index = FMIndex::new(text.as_bytes());
///
/// // count the occurrences
/// assert_eq!(0, index.count("CCCCC"));
/// assert_eq!(3, index.count("TG"));
///
/// //... or get their positions
/// assert_eq!(index.search("GCGT"), vec![46, 26, 0]);
/// ```
///
/// The current implementation of FM-index is a memory killer, since it stores positions
/// of **all bytes** in the given data. For the human genome (~3 GB), it consumed
/// ~27 GB of RAM to build the index (in ~4 mins).
///
/// That said, it still returns the match results in a few microseconds.
#[derive(Clone, Debug)]
pub struct FMIndex {
/// BW-transformed data
data: Vec<u8>,
/// forward frequency of each character in the BWT data
cache: Vec<u32>,
/// incremental character frequencies
occ_map: Vec<u32>,
/// LF-mapping for backward search
lf_vec: Vec<u32>,
}
impl FMIndex {
/// Generate an FM-index for the input data.
#[inline]
pub fn new(data: &[u8]) -> FMIndex {
FMIndex::new_from_bwt(bwt(data))
}
/// Get the reference to the inner BWT data.
///
/// Note that the length of BWT is one more than the length of the actual text,
/// since it has a null byte to indicate empty string.
pub fn bwt(&self) -> &[u8] {
&self.data
}
/// Generate the FM-index from the BWT data.
///
/// It's not a good idea to generate FM-index from scratch all the time, especially for large inputs.
/// This would be very useful when your data is large and remains constant for a while.
///
/// FM-index internally uses BWT, and BWT is generated from the suffix array, which takes a lot of time.
/// If your input doesn't change, then it's better to get the BWT data (using `bwt` method), write it
/// to a file and generate the index from that in the future.
pub fn new_from_bwt(bwt_data: Vec<u8>) -> FMIndex {
let mut map = Vec::new();
let mut count = vec![0u32; bwt_data.len()];
let mut idx = 0;
// generate the frequency map and forward frequency vector from BWT
for i in &bwt_data {
let value = insert(&mut map, *i);
count[idx] = value;
idx += 1;
}
generate_occurrence_index(&mut map);
let mut lf_vec = count.clone();
let mut lf_occ_map = map.clone();
// generate the LF vector (just like inverting the BWT)
for (i, c) in bwt_data.iter().enumerate() {
let idx = *c as usize;
lf_vec[i] = lf_occ_map[idx];
lf_occ_map[idx] += 1;
}
let mut i = lf_vec[0] as usize;
lf_vec[0] = 0;
let mut counter = bwt_data.len() as u32 - 1;
// Only difference is that we replace the LF indices with the lengths of prefix
// from a particular position (in other words, the number of times
// it would take us to get to the start of string).
for _ in 0..(bwt_data.len() - 1) {
let next = lf_vec[i];
lf_vec[i] = counter;
i = next as usize;
counter -= 1;
}
FMIndex {
data: bwt_data,
cache: count,
occ_map: map,
lf_vec: lf_vec,
}
}
/// Get the nearest position of a character in the internal BWT data.
///
/// The `count` and `search` methods rely on this method for finding occurrences.
/// For example, we can do soemthing like this,
///
/// ``` rust
/// use nucleic_acid::FMIndex;
/// let fm = FMIndex::new(b"Hello, Hello, Hello" as &[u8]);
///
/// // initially, the range should be the length of the BWT
/// let mut top = 0;
/// let mut bottom = fm.bwt().len();
/// let query = b"llo";
///
/// // feed the characters in the reverse
/// for ch in query.iter().rev() {
/// top = fm.nearest(top, *ch);
/// bottom = fm.nearest(bottom, *ch);
/// if top >= bottom {
/// return
/// }
/// }
///
/// // If we get a valid range, then everything in that range is a valid match.
/// // This way, we can get both the count and positions...
/// assert_eq!(3, bottom - top);
/// assert_eq!(vec![17, 10, 3], (top..bottom).map(|i| fm[i]).collect::<Vec<_>>());
/// ```
///
/// This is backward searching. As you feed in the characters along with a position, `nearest` will
/// give you a new position in the index. Once the range becomes invalid (which happens when the
/// substring doesn't exist), we can bail out. On the contrary, if the range remains valid after
/// you've fed in all the characters, then every value within in that range is an occurrence.
///
/// So, this is useful when you want to cache the repeating ranges. With this, you can build your own
/// count/search functions with caching. It's also useful for making custom approximate matching functions
/// by backtracking whenever there's an invalid range.
pub fn nearest(&self, idx: usize, ch: u8) -> usize {
match self.occ_map.get(ch as usize) {
Some(res) if *res > 0 => {
*res as usize + (0..idx).rev()
.find(|&i| self.data[i] == ch)
.map(|i| self.cache[i] as usize)
.unwrap_or(0)
},
_ => 0,
}
}
fn get_range(&self, query: &str) -> Option<(usize, usize)> {
let mut top = 0;
let mut bottom = self.data.len();
for ch in query.as_bytes().iter().rev() {
top = self.nearest(top, *ch);
bottom = self.nearest(bottom, *ch);
if top >= bottom {
return None
}
}
if top >= bottom {
None
} else {
Some((top, bottom))
}
}
/// Count the occurrences of the substring in the original data.
pub fn count(&self, query: &str) -> usize {
match self.get_range(query) {
Some((top, bottom)) => bottom - top,
None => 0,
}
}
/// Get the positions of occurrences of substring in the original data.
pub fn search(&self, query: &str) -> Vec<usize> {
match self.get_range(query) {
Some((top, bottom)) => (top..bottom).map(|idx| {
let i = self.nearest(idx, self.data[idx]);
self.lf_vec[i] as usize
}).collect(),
None => Vec::new(),
}
}
}
impl Index<usize> for FMIndex {
type Output = u32;
fn index(&self, i: usize) -> &u32 {
self.lf_vec.get(i).expect("index out of range")
}
}
#[cfg(test)]
mod tests {
use super::{FMIndex, bwt, ibwt};
#[test]
fn test_bwt_and_ibwt() {
let text = String::from("ATCTAGGAGATCTGAATCTAGTTCAACTAGCTAGATCTAGAGACAGCTAA");
let bw = bwt(text.as_bytes());
let ibw = ibwt(&bw);
assert_eq!(String::from("AATCGGAGTTGCTTTG\u{0}AGTAGTGATTTTAAGAAAAAACCCCCCTAAAACG"),
String::from_utf8(bw).unwrap());
assert_eq!(text, String::from_utf8(ibw).unwrap());
}
#[test]
fn test_fm_index() {
let text = String::from("GCGTGCCCAGGGCACTGCCGCTGCAGGCGTAGGCATCGCATCACACGCGT");
let index = FMIndex::new(text.as_bytes());
assert_eq!(0, index.count("CCCCC"));
let mut result = index.search("TG");
result.sort();
assert_eq!(result, vec![3, 15, 21]);
let mut result = index.search("GCGT");
result.sort();
assert_eq!(result, vec![0, 26, 46]);
assert_eq!(vec![1], index.search("CGTGCCC"));
}
}
| generate_occurrence_index | identifier_name |
albums.rs | use std::collections::HashMap;
use postgres::GenericConnection as PostgresConnection;
use db::pg::RowsExtension;
use library::Result;
const MAX_TOP_ALBUMS: i64 = 10;
#[derive(RustcEncodable)]
struct Track {
name: String,
is_favorite: bool,
scrobbles: Vec<i32>,
}
#[derive(RustcEncodable)]
pub struct Album {
name: String,
tracks: HashMap<i32, Track>,
}
#[derive(RustcEncodable)]
pub struct TopAlbum {
label: String,
value: i32,
}
pub type TopAlbums = Vec<TopAlbum>;
pub fn load_album(conn: &PostgresConnection, album_id: i32) -> Result<Option<Album>> {
let rows = try!(conn.query("SELECT name FROM albums WHERE id = $1", &[&album_id]));
if rows.len() == 0 {
return Ok(None);
}
let name: String = rows.get(0).get("name");
let mut tracks = HashMap::<i32, Track>::new();
let query = "SELECT id, name, is_favorite FROM tracks WHERE album_id = $1";
for row in &try!(conn.query(query, &[&album_id])) {
tracks.insert(row.get("id"),
Track {
name: row.get("name"),
is_favorite: row.get("is_favorite"),
scrobbles: Vec::new(),
});
}
let mut ids = tracks.keys().fold(String::new(), |acc, id| format!("{}{},", acc, id));
ids.pop();
let query = format!("SELECT track_id, timestamp FROM scrobbles WHERE track_id IN({})",
ids);
let scrobbles = try!(conn.query(&query, &[]));
for row in &scrobbles {
let track_id: i32 = row.get("track_id");
let timestamp: i32 = row.get("timestamp");
if let Some(track) = tracks.get_mut(&track_id) {
track.scrobbles.push(timestamp)
}
}
Ok(Some(Album {
name: name,
tracks: tracks,
}))
}
pub fn load_top_albums(conn: &PostgresConnection) -> Result<TopAlbums> {
let query = r#"
SELECT
artists.name as artist,
albums.name as album,
albums.plays as plays
FROM albums
LEFT JOIN artists ON artists.id = albums.artist_id
ORDER BY albums.plays DESC
OFFSET 0 LIMIT $1
"#;
let rows = try!(conn.query(query, &[&MAX_TOP_ALBUMS]));
let mut albums = TopAlbums::with_capacity(MAX_TOP_ALBUMS as usize);
for row in &rows {
let artist: String = row.get("artist");
let album: String = row.get("album");
albums.push(TopAlbum {
label: format!("{} - {}", artist, album),
value: row.get("plays"),
});
}
Ok(albums)
} |
pub fn total_albums(conn: &PostgresConnection) -> Result<i64> {
Ok(try!(conn.query("SELECT COUNT(*) FROM albums", &[])).fetch_column())
} | random_line_split |
|
albums.rs | use std::collections::HashMap;
use postgres::GenericConnection as PostgresConnection;
use db::pg::RowsExtension;
use library::Result;
const MAX_TOP_ALBUMS: i64 = 10;
#[derive(RustcEncodable)]
struct Track {
name: String,
is_favorite: bool,
scrobbles: Vec<i32>,
}
#[derive(RustcEncodable)]
pub struct Album {
name: String,
tracks: HashMap<i32, Track>,
}
#[derive(RustcEncodable)]
pub struct TopAlbum {
label: String,
value: i32,
}
pub type TopAlbums = Vec<TopAlbum>;
pub fn load_album(conn: &PostgresConnection, album_id: i32) -> Result<Option<Album>> {
let rows = try!(conn.query("SELECT name FROM albums WHERE id = $1", &[&album_id]));
if rows.len() == 0 |
let name: String = rows.get(0).get("name");
let mut tracks = HashMap::<i32, Track>::new();
let query = "SELECT id, name, is_favorite FROM tracks WHERE album_id = $1";
for row in &try!(conn.query(query, &[&album_id])) {
tracks.insert(row.get("id"),
Track {
name: row.get("name"),
is_favorite: row.get("is_favorite"),
scrobbles: Vec::new(),
});
}
let mut ids = tracks.keys().fold(String::new(), |acc, id| format!("{}{},", acc, id));
ids.pop();
let query = format!("SELECT track_id, timestamp FROM scrobbles WHERE track_id IN({})",
ids);
let scrobbles = try!(conn.query(&query, &[]));
for row in &scrobbles {
let track_id: i32 = row.get("track_id");
let timestamp: i32 = row.get("timestamp");
if let Some(track) = tracks.get_mut(&track_id) {
track.scrobbles.push(timestamp)
}
}
Ok(Some(Album {
name: name,
tracks: tracks,
}))
}
pub fn load_top_albums(conn: &PostgresConnection) -> Result<TopAlbums> {
let query = r#"
SELECT
artists.name as artist,
albums.name as album,
albums.plays as plays
FROM albums
LEFT JOIN artists ON artists.id = albums.artist_id
ORDER BY albums.plays DESC
OFFSET 0 LIMIT $1
"#;
let rows = try!(conn.query(query, &[&MAX_TOP_ALBUMS]));
let mut albums = TopAlbums::with_capacity(MAX_TOP_ALBUMS as usize);
for row in &rows {
let artist: String = row.get("artist");
let album: String = row.get("album");
albums.push(TopAlbum {
label: format!("{} - {}", artist, album),
value: row.get("plays"),
});
}
Ok(albums)
}
pub fn total_albums(conn: &PostgresConnection) -> Result<i64> {
Ok(try!(conn.query("SELECT COUNT(*) FROM albums", &[])).fetch_column())
}
| {
return Ok(None);
} | conditional_block |
albums.rs | use std::collections::HashMap;
use postgres::GenericConnection as PostgresConnection;
use db::pg::RowsExtension;
use library::Result;
const MAX_TOP_ALBUMS: i64 = 10;
#[derive(RustcEncodable)]
struct Track {
name: String,
is_favorite: bool,
scrobbles: Vec<i32>,
}
#[derive(RustcEncodable)]
pub struct Album {
name: String,
tracks: HashMap<i32, Track>,
}
#[derive(RustcEncodable)]
pub struct TopAlbum {
label: String,
value: i32,
}
pub type TopAlbums = Vec<TopAlbum>;
pub fn load_album(conn: &PostgresConnection, album_id: i32) -> Result<Option<Album>> {
let rows = try!(conn.query("SELECT name FROM albums WHERE id = $1", &[&album_id]));
if rows.len() == 0 {
return Ok(None);
}
let name: String = rows.get(0).get("name");
let mut tracks = HashMap::<i32, Track>::new();
let query = "SELECT id, name, is_favorite FROM tracks WHERE album_id = $1";
for row in &try!(conn.query(query, &[&album_id])) {
tracks.insert(row.get("id"),
Track {
name: row.get("name"),
is_favorite: row.get("is_favorite"),
scrobbles: Vec::new(),
});
}
let mut ids = tracks.keys().fold(String::new(), |acc, id| format!("{}{},", acc, id));
ids.pop();
let query = format!("SELECT track_id, timestamp FROM scrobbles WHERE track_id IN({})",
ids);
let scrobbles = try!(conn.query(&query, &[]));
for row in &scrobbles {
let track_id: i32 = row.get("track_id");
let timestamp: i32 = row.get("timestamp");
if let Some(track) = tracks.get_mut(&track_id) {
track.scrobbles.push(timestamp)
}
}
Ok(Some(Album {
name: name,
tracks: tracks,
}))
}
pub fn | (conn: &PostgresConnection) -> Result<TopAlbums> {
let query = r#"
SELECT
artists.name as artist,
albums.name as album,
albums.plays as plays
FROM albums
LEFT JOIN artists ON artists.id = albums.artist_id
ORDER BY albums.plays DESC
OFFSET 0 LIMIT $1
"#;
let rows = try!(conn.query(query, &[&MAX_TOP_ALBUMS]));
let mut albums = TopAlbums::with_capacity(MAX_TOP_ALBUMS as usize);
for row in &rows {
let artist: String = row.get("artist");
let album: String = row.get("album");
albums.push(TopAlbum {
label: format!("{} - {}", artist, album),
value: row.get("plays"),
});
}
Ok(albums)
}
pub fn total_albums(conn: &PostgresConnection) -> Result<i64> {
Ok(try!(conn.query("SELECT COUNT(*) FROM albums", &[])).fetch_column())
}
| load_top_albums | identifier_name |
log-knows-the-names-of-variants.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
enum foo {
a(uint),
b(~str),
c,
}
enum bar {
d, e, f
}
pub fn main() {
assert_eq!(~"a(22)", fmt!("%?", a(22u)));
assert_eq!(~"b(~\"hi\")", fmt!("%?", b(~"hi")));
assert_eq!(~"c", fmt!("%?", c)); | assert_eq!(~"d", fmt!("%?", d));
} | random_line_split |
|
log-knows-the-names-of-variants.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
enum foo {
a(uint),
b(~str),
c,
}
enum bar {
d, e, f
}
pub fn main() | {
assert_eq!(~"a(22)", fmt!("%?", a(22u)));
assert_eq!(~"b(~\"hi\")", fmt!("%?", b(~"hi")));
assert_eq!(~"c", fmt!("%?", c));
assert_eq!(~"d", fmt!("%?", d));
} | identifier_body |
|
log-knows-the-names-of-variants.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
enum | {
a(uint),
b(~str),
c,
}
enum bar {
d, e, f
}
pub fn main() {
assert_eq!(~"a(22)", fmt!("%?", a(22u)));
assert_eq!(~"b(~\"hi\")", fmt!("%?", b(~"hi")));
assert_eq!(~"c", fmt!("%?", c));
assert_eq!(~"d", fmt!("%?", d));
}
| foo | identifier_name |
borrowck-field-sensitivity.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(box_syntax)]
struct A { a: int, b: Box<int> }
fn deref_after_move() {
let x = A { a: 1, b: box 2 };
drop(x.b);
drop(*x.b); //~ ERROR use of moved value: `*x.b`
}
fn deref_after_fu_move() {
let x = A { a: 1, b: box 2 };
let y = A { a: 3,.. x };
drop(*x.b); //~ ERROR use of moved value: `*x.b`
}
fn borrow_after_move() {
let x = A { a: 1, b: box 2 };
drop(x.b);
let p = &x.b; //~ ERROR use of moved value: `x.b`
drop(**p);
}
fn borrow_after_fu_move() {
let x = A { a: 1, b: box 2 };
let _y = A { a: 3,.. x };
let p = &x.b; //~ ERROR use of moved value: `x.b`
drop(**p);
}
fn move_after_borrow() {
let x = A { a: 1, b: box 2 };
let p = &x.b;
drop(x.b); //~ ERROR cannot move out of `x.b` because it is borrowed
drop(**p);
}
fn fu_move_after_borrow() {
let x = A { a: 1, b: box 2 };
let p = &x.b;
let _y = A { a: 3,.. x }; //~ ERROR cannot move out of `x.b` because it is borrowed
drop(**p);
}
fn mut_borrow_after_mut_borrow() {
let mut x = A { a: 1, b: box 2 };
let p = &mut x.a;
let q = &mut x.a; //~ ERROR cannot borrow `x.a` as mutable more than once at a time
drop(*p);
drop(*q);
}
| drop(x.b);
drop(x.b); //~ ERROR use of moved value: `x.b`
}
fn move_after_fu_move() {
let x = A { a: 1, b: box 2 };
let _y = A { a: 3,.. x };
drop(x.b); //~ ERROR use of moved value: `x.b`
}
fn fu_move_after_move() {
let x = A { a: 1, b: box 2 };
drop(x.b);
let _z = A { a: 3,.. x }; //~ ERROR use of moved value: `x.b`
}
fn fu_move_after_fu_move() {
let x = A { a: 1, b: box 2 };
let _y = A { a: 3,.. x };
let _z = A { a: 4,.. x }; //~ ERROR use of moved value: `x.b`
}
// The following functions aren't yet accepted, but they should be.
fn copy_after_field_assign_after_uninit() {
let mut x: A;
x.a = 1;
drop(x.a); //~ ERROR use of possibly uninitialized variable: `x.a`
}
fn borrow_after_field_assign_after_uninit() {
let mut x: A;
x.a = 1;
let p = &x.a; //~ ERROR use of possibly uninitialized variable: `x.a`
drop(*p);
}
fn move_after_field_assign_after_uninit() {
let mut x: A;
x.b = box 1;
drop(x.b); //~ ERROR use of possibly uninitialized variable: `x.b`
}
fn main() {
deref_after_move();
deref_after_fu_move();
borrow_after_move();
borrow_after_fu_move();
move_after_borrow();
fu_move_after_borrow();
mut_borrow_after_mut_borrow();
move_after_move();
move_after_fu_move();
fu_move_after_move();
fu_move_after_fu_move();
copy_after_field_assign_after_uninit();
borrow_after_field_assign_after_uninit();
move_after_field_assign_after_uninit();
} | fn move_after_move() {
let x = A { a: 1, b: box 2 }; | random_line_split |
borrowck-field-sensitivity.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(box_syntax)]
struct A { a: int, b: Box<int> }
fn deref_after_move() {
let x = A { a: 1, b: box 2 };
drop(x.b);
drop(*x.b); //~ ERROR use of moved value: `*x.b`
}
fn deref_after_fu_move() {
let x = A { a: 1, b: box 2 };
let y = A { a: 3,.. x };
drop(*x.b); //~ ERROR use of moved value: `*x.b`
}
fn borrow_after_move() {
let x = A { a: 1, b: box 2 };
drop(x.b);
let p = &x.b; //~ ERROR use of moved value: `x.b`
drop(**p);
}
fn borrow_after_fu_move() {
let x = A { a: 1, b: box 2 };
let _y = A { a: 3,.. x };
let p = &x.b; //~ ERROR use of moved value: `x.b`
drop(**p);
}
fn move_after_borrow() {
let x = A { a: 1, b: box 2 };
let p = &x.b;
drop(x.b); //~ ERROR cannot move out of `x.b` because it is borrowed
drop(**p);
}
fn fu_move_after_borrow() {
let x = A { a: 1, b: box 2 };
let p = &x.b;
let _y = A { a: 3,.. x }; //~ ERROR cannot move out of `x.b` because it is borrowed
drop(**p);
}
fn mut_borrow_after_mut_borrow() {
let mut x = A { a: 1, b: box 2 };
let p = &mut x.a;
let q = &mut x.a; //~ ERROR cannot borrow `x.a` as mutable more than once at a time
drop(*p);
drop(*q);
}
fn move_after_move() {
let x = A { a: 1, b: box 2 };
drop(x.b);
drop(x.b); //~ ERROR use of moved value: `x.b`
}
fn move_after_fu_move() {
let x = A { a: 1, b: box 2 };
let _y = A { a: 3,.. x };
drop(x.b); //~ ERROR use of moved value: `x.b`
}
fn | () {
let x = A { a: 1, b: box 2 };
drop(x.b);
let _z = A { a: 3,.. x }; //~ ERROR use of moved value: `x.b`
}
fn fu_move_after_fu_move() {
let x = A { a: 1, b: box 2 };
let _y = A { a: 3,.. x };
let _z = A { a: 4,.. x }; //~ ERROR use of moved value: `x.b`
}
// The following functions aren't yet accepted, but they should be.
fn copy_after_field_assign_after_uninit() {
let mut x: A;
x.a = 1;
drop(x.a); //~ ERROR use of possibly uninitialized variable: `x.a`
}
fn borrow_after_field_assign_after_uninit() {
let mut x: A;
x.a = 1;
let p = &x.a; //~ ERROR use of possibly uninitialized variable: `x.a`
drop(*p);
}
fn move_after_field_assign_after_uninit() {
let mut x: A;
x.b = box 1;
drop(x.b); //~ ERROR use of possibly uninitialized variable: `x.b`
}
fn main() {
deref_after_move();
deref_after_fu_move();
borrow_after_move();
borrow_after_fu_move();
move_after_borrow();
fu_move_after_borrow();
mut_borrow_after_mut_borrow();
move_after_move();
move_after_fu_move();
fu_move_after_move();
fu_move_after_fu_move();
copy_after_field_assign_after_uninit();
borrow_after_field_assign_after_uninit();
move_after_field_assign_after_uninit();
}
| fu_move_after_move | identifier_name |
borrowck-field-sensitivity.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(box_syntax)]
struct A { a: int, b: Box<int> }
fn deref_after_move() {
let x = A { a: 1, b: box 2 };
drop(x.b);
drop(*x.b); //~ ERROR use of moved value: `*x.b`
}
fn deref_after_fu_move() {
let x = A { a: 1, b: box 2 };
let y = A { a: 3,.. x };
drop(*x.b); //~ ERROR use of moved value: `*x.b`
}
fn borrow_after_move() {
let x = A { a: 1, b: box 2 };
drop(x.b);
let p = &x.b; //~ ERROR use of moved value: `x.b`
drop(**p);
}
fn borrow_after_fu_move() |
fn move_after_borrow() {
let x = A { a: 1, b: box 2 };
let p = &x.b;
drop(x.b); //~ ERROR cannot move out of `x.b` because it is borrowed
drop(**p);
}
fn fu_move_after_borrow() {
let x = A { a: 1, b: box 2 };
let p = &x.b;
let _y = A { a: 3,.. x }; //~ ERROR cannot move out of `x.b` because it is borrowed
drop(**p);
}
fn mut_borrow_after_mut_borrow() {
let mut x = A { a: 1, b: box 2 };
let p = &mut x.a;
let q = &mut x.a; //~ ERROR cannot borrow `x.a` as mutable more than once at a time
drop(*p);
drop(*q);
}
fn move_after_move() {
let x = A { a: 1, b: box 2 };
drop(x.b);
drop(x.b); //~ ERROR use of moved value: `x.b`
}
fn move_after_fu_move() {
let x = A { a: 1, b: box 2 };
let _y = A { a: 3,.. x };
drop(x.b); //~ ERROR use of moved value: `x.b`
}
fn fu_move_after_move() {
let x = A { a: 1, b: box 2 };
drop(x.b);
let _z = A { a: 3,.. x }; //~ ERROR use of moved value: `x.b`
}
fn fu_move_after_fu_move() {
let x = A { a: 1, b: box 2 };
let _y = A { a: 3,.. x };
let _z = A { a: 4,.. x }; //~ ERROR use of moved value: `x.b`
}
// The following functions aren't yet accepted, but they should be.
fn copy_after_field_assign_after_uninit() {
let mut x: A;
x.a = 1;
drop(x.a); //~ ERROR use of possibly uninitialized variable: `x.a`
}
fn borrow_after_field_assign_after_uninit() {
let mut x: A;
x.a = 1;
let p = &x.a; //~ ERROR use of possibly uninitialized variable: `x.a`
drop(*p);
}
fn move_after_field_assign_after_uninit() {
let mut x: A;
x.b = box 1;
drop(x.b); //~ ERROR use of possibly uninitialized variable: `x.b`
}
fn main() {
deref_after_move();
deref_after_fu_move();
borrow_after_move();
borrow_after_fu_move();
move_after_borrow();
fu_move_after_borrow();
mut_borrow_after_mut_borrow();
move_after_move();
move_after_fu_move();
fu_move_after_move();
fu_move_after_fu_move();
copy_after_field_assign_after_uninit();
borrow_after_field_assign_after_uninit();
move_after_field_assign_after_uninit();
}
| {
let x = A { a: 1, b: box 2 };
let _y = A { a: 3, .. x };
let p = &x.b; //~ ERROR use of moved value: `x.b`
drop(**p);
} | identifier_body |
linux_base.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use target::TargetOptions;
use std::default::Default;
pub fn opts() -> TargetOptions | }
}
| {
TargetOptions {
linker: "cc".to_string(),
dynamic_linking: true,
executables: true,
morestack: true,
linker_is_gnu: true,
has_rpath: true,
pre_link_args: vec![
// We want to be able to strip as much executable code as possible
// from the linker command line, and this flag indicates to the
// linker that it can avoid linking in dynamic libraries that don't
// actually satisfy any symbols up to that point (as with many other
// resolutions the linker does). This option only applies to all
// following libraries so we're sure to pass it as one of the first
// arguments.
"-Wl,--as-needed".to_string(),
],
position_independent_executables: true,
.. Default::default() | identifier_body |
linux_base.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use target::TargetOptions;
use std::default::Default;
pub fn opts() -> TargetOptions {
TargetOptions {
linker: "cc".to_string(),
dynamic_linking: true,
executables: true,
morestack: true,
linker_is_gnu: true,
has_rpath: true,
pre_link_args: vec![
// We want to be able to strip as much executable code as possible
// from the linker command line, and this flag indicates to the
// linker that it can avoid linking in dynamic libraries that don't
// actually satisfy any symbols up to that point (as with many other
// resolutions the linker does). This option only applies to all
// following libraries so we're sure to pass it as one of the first
// arguments.
"-Wl,--as-needed".to_string(), | ],
position_independent_executables: true,
.. Default::default()
}
} | random_line_split |
|
linux_base.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use target::TargetOptions;
use std::default::Default;
pub fn | () -> TargetOptions {
TargetOptions {
linker: "cc".to_string(),
dynamic_linking: true,
executables: true,
morestack: true,
linker_is_gnu: true,
has_rpath: true,
pre_link_args: vec![
// We want to be able to strip as much executable code as possible
// from the linker command line, and this flag indicates to the
// linker that it can avoid linking in dynamic libraries that don't
// actually satisfy any symbols up to that point (as with many other
// resolutions the linker does). This option only applies to all
// following libraries so we're sure to pass it as one of the first
// arguments.
"-Wl,--as-needed".to_string(),
],
position_independent_executables: true,
.. Default::default()
}
}
| opts | identifier_name |
backend.rs | use std::env;
use dotenv::dotenv;
use diesel;
use diesel::prelude::*;
use diesel::pg::PgConnection;
use super::models::*;
use ::services::schema;
pub struct Backend {
connection: PgConnection
}
impl Backend {
pub fn new() -> Self {
dotenv().ok();
let database_url = env::var("DATABASE_URL")
.expect("DATABASE_URL must be set");
let connection = PgConnection::establish(&database_url)
.expect(&format!("Error connecting to {}", database_url));
Self {
connection
}
}
pub fn vote(&self, user: &str, entity: &str, up: i32, down: i32) {
use self::schema::{users, voteables, votes};
let entity = &entity.to_lowercase();
use self::schema::users::dsl as us;
let mut res: Vec<User> = us::users.filter(us::user_id.eq(user))
.load(&self.connection).unwrap();
let user = match res.len() {
0 => {
let new_user = NewUser { user_id: user };
diesel::insert(&new_user).into(users::table)
.get_result(&self.connection)
.expect("Error creating new user")
},
_ => res.pop().unwrap(),
};
use self::schema::voteables::dsl::*;
let mut res: Vec<Voteable> = voteables.filter(value.eq(entity))
.load(&self.connection).unwrap();
let mut voteable = match res.len() {
0 => {
let new_voteable = NewVoteable {
value: entity,
total_up: 0,
total_down: 0,
};
let res = diesel::insert(&new_voteable)
.into(voteables::table)
.get_result(&self.connection);
if let Err(e) = res{
return;
}
res.unwrap()
},
_ => res.pop().unwrap(),
};
voteable.total_up += up;
voteable.total_down += down;
voteable.save_changes::<Voteable>(&self.connection);
use ::services::schema::votes::dsl as vts;
let mut res: Vec<Vote> = vts::votes.filter(vts::user_id.eq(user.id))
.filter(vts::voteable_id.eq(voteable.id))
.load(&self.connection).unwrap();
let mut vote = match res.len() {
0 => {
let new_vote = NewVote{
user_id: user.id,
voteable_id: voteable.id,
up: 0,
down: 0,
};
diesel::insert(&new_vote).into(votes::table)
.get_result(&self.connection)
.expect("Error creating new vote")
},
_ => res.pop().unwrap(),
};
vote.up += up;
vote.down += down;
vote.save_changes::<Vote>(&self.connection);
}
pub fn get_upvotes(&self, entity: &str) -> Option<Voteable> |
}
| {
use self::schema::voteables::dsl::*;
let entity = &entity.to_lowercase();
let mut res = voteables.filter(value.eq(entity))
.load(&self.connection).unwrap();
match res.len() {
0 => None,
_ => Some(res.pop().unwrap()),
}
} | identifier_body |
backend.rs | use std::env;
use dotenv::dotenv;
use diesel;
use diesel::prelude::*;
use diesel::pg::PgConnection;
use super::models::*;
use ::services::schema;
pub struct Backend {
connection: PgConnection
}
impl Backend {
pub fn new() -> Self {
dotenv().ok();
let database_url = env::var("DATABASE_URL")
.expect("DATABASE_URL must be set");
let connection = PgConnection::establish(&database_url)
.expect(&format!("Error connecting to {}", database_url));
Self {
connection
}
}
pub fn | (&self, user: &str, entity: &str, up: i32, down: i32) {
use self::schema::{users, voteables, votes};
let entity = &entity.to_lowercase();
use self::schema::users::dsl as us;
let mut res: Vec<User> = us::users.filter(us::user_id.eq(user))
.load(&self.connection).unwrap();
let user = match res.len() {
0 => {
let new_user = NewUser { user_id: user };
diesel::insert(&new_user).into(users::table)
.get_result(&self.connection)
.expect("Error creating new user")
},
_ => res.pop().unwrap(),
};
use self::schema::voteables::dsl::*;
let mut res: Vec<Voteable> = voteables.filter(value.eq(entity))
.load(&self.connection).unwrap();
let mut voteable = match res.len() {
0 => {
let new_voteable = NewVoteable {
value: entity,
total_up: 0,
total_down: 0,
};
let res = diesel::insert(&new_voteable)
.into(voteables::table)
.get_result(&self.connection);
if let Err(e) = res{
return;
}
res.unwrap()
},
_ => res.pop().unwrap(),
};
voteable.total_up += up;
voteable.total_down += down;
voteable.save_changes::<Voteable>(&self.connection);
use ::services::schema::votes::dsl as vts;
let mut res: Vec<Vote> = vts::votes.filter(vts::user_id.eq(user.id))
.filter(vts::voteable_id.eq(voteable.id))
.load(&self.connection).unwrap();
let mut vote = match res.len() {
0 => {
let new_vote = NewVote{
user_id: user.id,
voteable_id: voteable.id,
up: 0,
down: 0,
};
diesel::insert(&new_vote).into(votes::table)
.get_result(&self.connection)
.expect("Error creating new vote")
},
_ => res.pop().unwrap(),
};
vote.up += up;
vote.down += down;
vote.save_changes::<Vote>(&self.connection);
}
pub fn get_upvotes(&self, entity: &str) -> Option<Voteable> {
use self::schema::voteables::dsl::*;
let entity = &entity.to_lowercase();
let mut res = voteables.filter(value.eq(entity))
.load(&self.connection).unwrap();
match res.len() {
0 => None,
_ => Some(res.pop().unwrap()),
}
}
}
| vote | identifier_name |
backend.rs | use std::env;
use dotenv::dotenv;
use diesel;
use diesel::prelude::*;
use diesel::pg::PgConnection;
use super::models::*;
use ::services::schema;
pub struct Backend {
connection: PgConnection
}
impl Backend {
pub fn new() -> Self {
dotenv().ok();
let database_url = env::var("DATABASE_URL")
.expect("DATABASE_URL must be set");
let connection = PgConnection::establish(&database_url)
.expect(&format!("Error connecting to {}", database_url));
Self {
connection
}
}
pub fn vote(&self, user: &str, entity: &str, up: i32, down: i32) {
use self::schema::{users, voteables, votes};
let entity = &entity.to_lowercase();
use self::schema::users::dsl as us;
let mut res: Vec<User> = us::users.filter(us::user_id.eq(user))
.load(&self.connection).unwrap();
let user = match res.len() {
0 => {
let new_user = NewUser { user_id: user };
diesel::insert(&new_user).into(users::table)
.get_result(&self.connection)
.expect("Error creating new user")
},
_ => res.pop().unwrap(),
};
use self::schema::voteables::dsl::*;
let mut res: Vec<Voteable> = voteables.filter(value.eq(entity))
.load(&self.connection).unwrap();
let mut voteable = match res.len() {
0 => {
let new_voteable = NewVoteable {
value: entity,
total_up: 0,
total_down: 0,
};
let res = diesel::insert(&new_voteable)
.into(voteables::table)
.get_result(&self.connection);
if let Err(e) = res{
return;
}
res.unwrap()
},
_ => res.pop().unwrap(),
};
voteable.total_up += up; | voteable.save_changes::<Voteable>(&self.connection);
use ::services::schema::votes::dsl as vts;
let mut res: Vec<Vote> = vts::votes.filter(vts::user_id.eq(user.id))
.filter(vts::voteable_id.eq(voteable.id))
.load(&self.connection).unwrap();
let mut vote = match res.len() {
0 => {
let new_vote = NewVote{
user_id: user.id,
voteable_id: voteable.id,
up: 0,
down: 0,
};
diesel::insert(&new_vote).into(votes::table)
.get_result(&self.connection)
.expect("Error creating new vote")
},
_ => res.pop().unwrap(),
};
vote.up += up;
vote.down += down;
vote.save_changes::<Vote>(&self.connection);
}
pub fn get_upvotes(&self, entity: &str) -> Option<Voteable> {
use self::schema::voteables::dsl::*;
let entity = &entity.to_lowercase();
let mut res = voteables.filter(value.eq(entity))
.load(&self.connection).unwrap();
match res.len() {
0 => None,
_ => Some(res.pop().unwrap()),
}
}
} | voteable.total_down += down; | random_line_split |
selector_parser.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Gecko-specific bits for selector-parsing.
use cssparser::{Parser, ToCss};
use element_state::ElementState;
use gecko_bindings::structs::CSSPseudoClassType;
use selector_parser::{SelectorParser, PseudoElementCascadeType};
use selectors::parser::{ComplexSelector, SelectorMethods};
use selectors::visitor::SelectorVisitor;
use std::borrow::Cow;
use std::fmt;
use string_cache::{Atom, Namespace, WeakAtom, WeakNamespace};
pub use gecko::pseudo_element::{PseudoElement, EAGER_PSEUDOS, EAGER_PSEUDO_COUNT};
pub use gecko::snapshot::SnapshotMap;
bitflags! {
flags NonTSPseudoClassFlag: u8 {
// See NonTSPseudoClass::is_internal()
const PSEUDO_CLASS_INTERNAL = 0x01,
}
}
macro_rules! pseudo_class_name {
(bare: [$(($css:expr, $name:ident, $gecko_type:tt, $state:tt, $flags:tt),)*],
string: [$(($s_css:expr, $s_name:ident, $s_gecko_type:tt, $s_state:tt, $s_flags:tt),)*]) => {
#[doc = "Our representation of a non tree-structural pseudo-class."]
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum NonTSPseudoClass {
$(
#[doc = $css]
$name,
)*
$(
#[doc = $s_css]
$s_name(Box<[u16]>),
)*
/// The non-standard `:-moz-any` pseudo-class.
///
/// TODO(emilio): We disallow combinators and pseudos here, so we
/// should use SimpleSelector instead
MozAny(Box<[ComplexSelector<SelectorImpl>]>),
}
}
}
apply_non_ts_list!(pseudo_class_name);
impl ToCss for NonTSPseudoClass {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
use cssparser::CssStringWriter;
use fmt::Write;
macro_rules! pseudo_class_serialize {
(bare: [$(($css:expr, $name:ident, $gecko_type:tt, $state:tt, $flags:tt),)*],
string: [$(($s_css:expr, $s_name:ident, $s_gecko_type:tt, $s_state:tt, $s_flags:tt),)*]) => {
match *self {
$(NonTSPseudoClass::$name => concat!(":", $css),)*
$(NonTSPseudoClass::$s_name(ref s) => {
write!(dest, ":{}(", $s_css)?;
{
// FIXME(emilio): Avoid the extra allocation!
let mut css = CssStringWriter::new(dest);
// Discount the null char in the end from the
// string.
css.write_str(&String::from_utf16(&s[..s.len() - 1]).unwrap())?;
}
return dest.write_str(")")
}, )*
NonTSPseudoClass::MozAny(ref selectors) => {
dest.write_str(":-moz-any(")?;
let mut iter = selectors.iter();
let first = iter.next().expect(":-moz-any must have at least 1 selector");
first.to_css(dest)?;
for selector in iter {
dest.write_str(", ")?;
selector.to_css(dest)?;
}
return dest.write_str(")")
}
}
}
}
let ser = apply_non_ts_list!(pseudo_class_serialize);
dest.write_str(ser)
}
}
impl SelectorMethods for NonTSPseudoClass {
type Impl = SelectorImpl;
fn visit<V>(&self, visitor: &mut V) -> bool
where V: SelectorVisitor<Impl = Self::Impl>,
{
if let NonTSPseudoClass::MozAny(ref selectors) = *self |
true
}
}
impl NonTSPseudoClass {
/// A pseudo-class is internal if it can only be used inside
/// user agent style sheets.
pub fn is_internal(&self) -> bool {
macro_rules! check_flag {
(_) => (false);
($flags:expr) => ($flags.contains(PSEUDO_CLASS_INTERNAL));
}
macro_rules! pseudo_class_check_internal {
(bare: [$(($css:expr, $name:ident, $gecko_type:tt, $state:tt, $flags:tt),)*],
string: [$(($s_css:expr, $s_name:ident, $s_gecko_type:tt, $s_state:tt, $s_flags:tt),)*]) => {
match *self {
$(NonTSPseudoClass::$name => check_flag!($flags),)*
$(NonTSPseudoClass::$s_name(..) => check_flag!($s_flags),)*
NonTSPseudoClass::MozAny(_) => false,
}
}
}
apply_non_ts_list!(pseudo_class_check_internal)
}
/// https://drafts.csswg.org/selectors-4/#useraction-pseudos
///
/// We intentionally skip the link-related ones.
pub fn is_safe_user_action_state(&self) -> bool {
matches!(*self, NonTSPseudoClass::Hover |
NonTSPseudoClass::Active |
NonTSPseudoClass::Focus)
}
/// Get the state flag associated with a pseudo-class, if any.
pub fn state_flag(&self) -> ElementState {
macro_rules! flag {
(_) => (ElementState::empty());
($state:ident) => (::element_state::$state);
}
macro_rules! pseudo_class_state {
(bare: [$(($css:expr, $name:ident, $gecko_type:tt, $state:tt, $flags:tt),)*],
string: [$(($s_css:expr, $s_name:ident, $s_gecko_type:tt, $s_state:tt, $s_flags:tt),)*]) => {
match *self {
$(NonTSPseudoClass::$name => flag!($state),)*
$(NonTSPseudoClass::$s_name(..) => flag!($s_state),)*
NonTSPseudoClass::MozAny(..) => ElementState::empty(),
}
}
}
apply_non_ts_list!(pseudo_class_state)
}
/// Returns true if the given pseudoclass should trigger style sharing cache revalidation.
pub fn needs_cache_revalidation(&self) -> bool {
// :dir() depends on state only, but doesn't use state_flag because its
// semantics don't quite match. Nevertheless, it doesn't need cache
// revalidation, because we already compare states for elements and
// candidates.
self.state_flag().is_empty() &&
!matches!(*self,
NonTSPseudoClass::MozAny(_) |
NonTSPseudoClass::Dir(_) |
NonTSPseudoClass::MozIsHTML |
NonTSPseudoClass::MozPlaceholder)
}
/// Convert NonTSPseudoClass to Gecko's CSSPseudoClassType.
pub fn to_gecko_pseudoclasstype(&self) -> Option<CSSPseudoClassType> {
macro_rules! gecko_type {
(_) => (None);
($gecko_type:ident) =>
(Some(::gecko_bindings::structs::CSSPseudoClassType::$gecko_type));
}
macro_rules! pseudo_class_geckotype {
(bare: [$(($css:expr, $name:ident, $gecko_type:tt, $state:tt, $flags:tt),)*],
string: [$(($s_css:expr, $s_name:ident, $s_gecko_type:tt, $s_state:tt, $s_flags:tt),)*]) => {
match *self {
$(NonTSPseudoClass::$name => gecko_type!($gecko_type),)*
$(NonTSPseudoClass::$s_name(..) => gecko_type!($s_gecko_type),)*
NonTSPseudoClass::MozAny(_) => gecko_type!(any),
}
}
}
apply_non_ts_list!(pseudo_class_geckotype)
}
}
/// The dummy struct we use to implement our selector parsing.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct SelectorImpl;
impl ::selectors::SelectorImpl for SelectorImpl {
type AttrValue = Atom;
type Identifier = Atom;
type ClassName = Atom;
type LocalName = Atom;
type NamespacePrefix = Atom;
type NamespaceUrl = Namespace;
type BorrowedNamespaceUrl = WeakNamespace;
type BorrowedLocalName = WeakAtom;
type PseudoElement = PseudoElement;
type NonTSPseudoClass = NonTSPseudoClass;
}
impl<'a> ::selectors::Parser for SelectorParser<'a> {
type Impl = SelectorImpl;
fn parse_non_ts_pseudo_class(&self, name: Cow<str>) -> Result<NonTSPseudoClass, ()> {
macro_rules! pseudo_class_parse {
(bare: [$(($css:expr, $name:ident, $gecko_type:tt, $state:tt, $flags:tt),)*],
string: [$(($s_css:expr, $s_name:ident, $s_gecko_type:tt, $s_state:tt, $s_flags:tt),)*]) => {
match_ignore_ascii_case! { &name,
$($css => NonTSPseudoClass::$name,)*
_ => return Err(())
}
}
}
let pseudo_class = apply_non_ts_list!(pseudo_class_parse);
if!pseudo_class.is_internal() || self.in_user_agent_stylesheet() {
Ok(pseudo_class)
} else {
Err(())
}
}
fn parse_non_ts_functional_pseudo_class(&self,
name: Cow<str>,
parser: &mut Parser)
-> Result<NonTSPseudoClass, ()> {
macro_rules! pseudo_class_string_parse {
(bare: [$(($css:expr, $name:ident, $gecko_type:tt, $state:tt, $flags:tt),)*],
string: [$(($s_css:expr, $s_name:ident, $s_gecko_type:tt, $s_state:tt, $s_flags:tt),)*]) => {
match_ignore_ascii_case! { &name,
$($s_css => {
let name = parser.expect_ident_or_string()?;
// convert to null terminated utf16 string
// since that's what Gecko deals with
let utf16: Vec<u16> = name.encode_utf16().chain(Some(0u16)).collect();
NonTSPseudoClass::$s_name(utf16.into_boxed_slice())
}, )*
"-moz-any" => {
let selectors = parser.parse_comma_separated(|input| {
ComplexSelector::parse(self, input)
})?;
// Selectors inside `:-moz-any` may not include combinators.
if selectors.iter().flat_map(|x| x.iter_raw()).any(|s| s.is_combinator()) {
return Err(())
}
NonTSPseudoClass::MozAny(selectors.into_boxed_slice())
}
_ => return Err(())
}
}
}
let pseudo_class = apply_non_ts_list!(pseudo_class_string_parse);
if!pseudo_class.is_internal() || self.in_user_agent_stylesheet() {
Ok(pseudo_class)
} else {
Err(())
}
}
fn parse_pseudo_element(&self, name: Cow<str>) -> Result<PseudoElement, ()> {
PseudoElement::from_slice(&name, self.in_user_agent_stylesheet())
.ok_or(())
}
fn default_namespace(&self) -> Option<Namespace> {
self.namespaces.default.clone()
}
fn namespace_for_prefix(&self, prefix: &Atom) -> Option<Namespace> {
self.namespaces.prefixes.get(prefix).cloned()
}
}
impl SelectorImpl {
#[inline]
/// Legacy alias for PseudoElement::cascade_type.
pub fn pseudo_element_cascade_type(pseudo: &PseudoElement) -> PseudoElementCascadeType {
pseudo.cascade_type()
}
/// A helper to traverse each eagerly cascaded pseudo-element, executing
/// `fun` on it.
#[inline]
pub fn each_eagerly_cascaded_pseudo_element<F>(mut fun: F)
where F: FnMut(PseudoElement),
{
for pseudo in &EAGER_PSEUDOS {
fun(pseudo.clone())
}
}
#[inline]
/// Executes a function for each pseudo-element.
pub fn each_pseudo_element<F>(fun: F)
where F: FnMut(PseudoElement),
{
PseudoElement::each(fun)
}
#[inline]
/// Returns the relevant state flag for a given non-tree-structural
/// pseudo-class.
pub fn pseudo_class_state_flag(pc: &NonTSPseudoClass) -> ElementState {
pc.state_flag()
}
}
| {
for selector in selectors.iter() {
if !selector.visit(visitor) {
return false;
}
}
} | conditional_block |
selector_parser.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Gecko-specific bits for selector-parsing.
use cssparser::{Parser, ToCss};
use element_state::ElementState;
use gecko_bindings::structs::CSSPseudoClassType;
use selector_parser::{SelectorParser, PseudoElementCascadeType};
use selectors::parser::{ComplexSelector, SelectorMethods};
use selectors::visitor::SelectorVisitor;
use std::borrow::Cow;
use std::fmt;
use string_cache::{Atom, Namespace, WeakAtom, WeakNamespace};
pub use gecko::pseudo_element::{PseudoElement, EAGER_PSEUDOS, EAGER_PSEUDO_COUNT};
pub use gecko::snapshot::SnapshotMap;
bitflags! {
flags NonTSPseudoClassFlag: u8 {
// See NonTSPseudoClass::is_internal()
const PSEUDO_CLASS_INTERNAL = 0x01,
}
}
macro_rules! pseudo_class_name {
(bare: [$(($css:expr, $name:ident, $gecko_type:tt, $state:tt, $flags:tt),)*],
string: [$(($s_css:expr, $s_name:ident, $s_gecko_type:tt, $s_state:tt, $s_flags:tt),)*]) => {
#[doc = "Our representation of a non tree-structural pseudo-class."]
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum NonTSPseudoClass {
$(
#[doc = $css]
$name,
)*
$(
#[doc = $s_css]
$s_name(Box<[u16]>),
)*
/// The non-standard `:-moz-any` pseudo-class.
///
/// TODO(emilio): We disallow combinators and pseudos here, so we
/// should use SimpleSelector instead
MozAny(Box<[ComplexSelector<SelectorImpl>]>),
}
}
}
apply_non_ts_list!(pseudo_class_name);
impl ToCss for NonTSPseudoClass {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
use cssparser::CssStringWriter;
use fmt::Write;
macro_rules! pseudo_class_serialize {
(bare: [$(($css:expr, $name:ident, $gecko_type:tt, $state:tt, $flags:tt),)*],
string: [$(($s_css:expr, $s_name:ident, $s_gecko_type:tt, $s_state:tt, $s_flags:tt),)*]) => {
match *self {
$(NonTSPseudoClass::$name => concat!(":", $css),)*
$(NonTSPseudoClass::$s_name(ref s) => {
write!(dest, ":{}(", $s_css)?;
{
// FIXME(emilio): Avoid the extra allocation!
let mut css = CssStringWriter::new(dest);
// Discount the null char in the end from the
// string.
css.write_str(&String::from_utf16(&s[..s.len() - 1]).unwrap())?;
}
return dest.write_str(")")
}, )*
NonTSPseudoClass::MozAny(ref selectors) => {
dest.write_str(":-moz-any(")?;
let mut iter = selectors.iter();
let first = iter.next().expect(":-moz-any must have at least 1 selector");
first.to_css(dest)?;
for selector in iter {
dest.write_str(", ")?;
selector.to_css(dest)?;
}
return dest.write_str(")")
}
}
}
}
let ser = apply_non_ts_list!(pseudo_class_serialize);
dest.write_str(ser)
}
}
impl SelectorMethods for NonTSPseudoClass {
type Impl = SelectorImpl;
fn visit<V>(&self, visitor: &mut V) -> bool
where V: SelectorVisitor<Impl = Self::Impl>,
{
if let NonTSPseudoClass::MozAny(ref selectors) = *self {
for selector in selectors.iter() {
if!selector.visit(visitor) {
return false;
}
}
}
true
}
}
impl NonTSPseudoClass {
/// A pseudo-class is internal if it can only be used inside
/// user agent style sheets.
pub fn is_internal(&self) -> bool {
macro_rules! check_flag {
(_) => (false);
($flags:expr) => ($flags.contains(PSEUDO_CLASS_INTERNAL));
}
macro_rules! pseudo_class_check_internal {
(bare: [$(($css:expr, $name:ident, $gecko_type:tt, $state:tt, $flags:tt),)*],
string: [$(($s_css:expr, $s_name:ident, $s_gecko_type:tt, $s_state:tt, $s_flags:tt),)*]) => {
match *self {
$(NonTSPseudoClass::$name => check_flag!($flags),)*
$(NonTSPseudoClass::$s_name(..) => check_flag!($s_flags),)*
NonTSPseudoClass::MozAny(_) => false,
}
}
}
apply_non_ts_list!(pseudo_class_check_internal)
}
/// https://drafts.csswg.org/selectors-4/#useraction-pseudos
///
/// We intentionally skip the link-related ones.
pub fn | (&self) -> bool {
matches!(*self, NonTSPseudoClass::Hover |
NonTSPseudoClass::Active |
NonTSPseudoClass::Focus)
}
/// Get the state flag associated with a pseudo-class, if any.
pub fn state_flag(&self) -> ElementState {
macro_rules! flag {
(_) => (ElementState::empty());
($state:ident) => (::element_state::$state);
}
macro_rules! pseudo_class_state {
(bare: [$(($css:expr, $name:ident, $gecko_type:tt, $state:tt, $flags:tt),)*],
string: [$(($s_css:expr, $s_name:ident, $s_gecko_type:tt, $s_state:tt, $s_flags:tt),)*]) => {
match *self {
$(NonTSPseudoClass::$name => flag!($state),)*
$(NonTSPseudoClass::$s_name(..) => flag!($s_state),)*
NonTSPseudoClass::MozAny(..) => ElementState::empty(),
}
}
}
apply_non_ts_list!(pseudo_class_state)
}
/// Returns true if the given pseudoclass should trigger style sharing cache revalidation.
pub fn needs_cache_revalidation(&self) -> bool {
// :dir() depends on state only, but doesn't use state_flag because its
// semantics don't quite match. Nevertheless, it doesn't need cache
// revalidation, because we already compare states for elements and
// candidates.
self.state_flag().is_empty() &&
!matches!(*self,
NonTSPseudoClass::MozAny(_) |
NonTSPseudoClass::Dir(_) |
NonTSPseudoClass::MozIsHTML |
NonTSPseudoClass::MozPlaceholder)
}
/// Convert NonTSPseudoClass to Gecko's CSSPseudoClassType.
pub fn to_gecko_pseudoclasstype(&self) -> Option<CSSPseudoClassType> {
macro_rules! gecko_type {
(_) => (None);
($gecko_type:ident) =>
(Some(::gecko_bindings::structs::CSSPseudoClassType::$gecko_type));
}
macro_rules! pseudo_class_geckotype {
(bare: [$(($css:expr, $name:ident, $gecko_type:tt, $state:tt, $flags:tt),)*],
string: [$(($s_css:expr, $s_name:ident, $s_gecko_type:tt, $s_state:tt, $s_flags:tt),)*]) => {
match *self {
$(NonTSPseudoClass::$name => gecko_type!($gecko_type),)*
$(NonTSPseudoClass::$s_name(..) => gecko_type!($s_gecko_type),)*
NonTSPseudoClass::MozAny(_) => gecko_type!(any),
}
}
}
apply_non_ts_list!(pseudo_class_geckotype)
}
}
/// The dummy struct we use to implement our selector parsing.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct SelectorImpl;
impl ::selectors::SelectorImpl for SelectorImpl {
type AttrValue = Atom;
type Identifier = Atom;
type ClassName = Atom;
type LocalName = Atom;
type NamespacePrefix = Atom;
type NamespaceUrl = Namespace;
type BorrowedNamespaceUrl = WeakNamespace;
type BorrowedLocalName = WeakAtom;
type PseudoElement = PseudoElement;
type NonTSPseudoClass = NonTSPseudoClass;
}
impl<'a> ::selectors::Parser for SelectorParser<'a> {
type Impl = SelectorImpl;
fn parse_non_ts_pseudo_class(&self, name: Cow<str>) -> Result<NonTSPseudoClass, ()> {
macro_rules! pseudo_class_parse {
(bare: [$(($css:expr, $name:ident, $gecko_type:tt, $state:tt, $flags:tt),)*],
string: [$(($s_css:expr, $s_name:ident, $s_gecko_type:tt, $s_state:tt, $s_flags:tt),)*]) => {
match_ignore_ascii_case! { &name,
$($css => NonTSPseudoClass::$name,)*
_ => return Err(())
}
}
}
let pseudo_class = apply_non_ts_list!(pseudo_class_parse);
if!pseudo_class.is_internal() || self.in_user_agent_stylesheet() {
Ok(pseudo_class)
} else {
Err(())
}
}
fn parse_non_ts_functional_pseudo_class(&self,
name: Cow<str>,
parser: &mut Parser)
-> Result<NonTSPseudoClass, ()> {
macro_rules! pseudo_class_string_parse {
(bare: [$(($css:expr, $name:ident, $gecko_type:tt, $state:tt, $flags:tt),)*],
string: [$(($s_css:expr, $s_name:ident, $s_gecko_type:tt, $s_state:tt, $s_flags:tt),)*]) => {
match_ignore_ascii_case! { &name,
$($s_css => {
let name = parser.expect_ident_or_string()?;
// convert to null terminated utf16 string
// since that's what Gecko deals with
let utf16: Vec<u16> = name.encode_utf16().chain(Some(0u16)).collect();
NonTSPseudoClass::$s_name(utf16.into_boxed_slice())
}, )*
"-moz-any" => {
let selectors = parser.parse_comma_separated(|input| {
ComplexSelector::parse(self, input)
})?;
// Selectors inside `:-moz-any` may not include combinators.
if selectors.iter().flat_map(|x| x.iter_raw()).any(|s| s.is_combinator()) {
return Err(())
}
NonTSPseudoClass::MozAny(selectors.into_boxed_slice())
}
_ => return Err(())
}
}
}
let pseudo_class = apply_non_ts_list!(pseudo_class_string_parse);
if!pseudo_class.is_internal() || self.in_user_agent_stylesheet() {
Ok(pseudo_class)
} else {
Err(())
}
}
fn parse_pseudo_element(&self, name: Cow<str>) -> Result<PseudoElement, ()> {
PseudoElement::from_slice(&name, self.in_user_agent_stylesheet())
.ok_or(())
}
fn default_namespace(&self) -> Option<Namespace> {
self.namespaces.default.clone()
}
fn namespace_for_prefix(&self, prefix: &Atom) -> Option<Namespace> {
self.namespaces.prefixes.get(prefix).cloned()
}
}
impl SelectorImpl {
#[inline]
/// Legacy alias for PseudoElement::cascade_type.
pub fn pseudo_element_cascade_type(pseudo: &PseudoElement) -> PseudoElementCascadeType {
pseudo.cascade_type()
}
/// A helper to traverse each eagerly cascaded pseudo-element, executing
/// `fun` on it.
#[inline]
pub fn each_eagerly_cascaded_pseudo_element<F>(mut fun: F)
where F: FnMut(PseudoElement),
{
for pseudo in &EAGER_PSEUDOS {
fun(pseudo.clone())
}
}
#[inline]
/// Executes a function for each pseudo-element.
pub fn each_pseudo_element<F>(fun: F)
where F: FnMut(PseudoElement),
{
PseudoElement::each(fun)
}
#[inline]
/// Returns the relevant state flag for a given non-tree-structural
/// pseudo-class.
pub fn pseudo_class_state_flag(pc: &NonTSPseudoClass) -> ElementState {
pc.state_flag()
}
}
| is_safe_user_action_state | identifier_name |
selector_parser.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Gecko-specific bits for selector-parsing.
use cssparser::{Parser, ToCss};
use element_state::ElementState;
use gecko_bindings::structs::CSSPseudoClassType;
use selector_parser::{SelectorParser, PseudoElementCascadeType};
use selectors::parser::{ComplexSelector, SelectorMethods};
use selectors::visitor::SelectorVisitor;
use std::borrow::Cow;
use std::fmt;
use string_cache::{Atom, Namespace, WeakAtom, WeakNamespace};
pub use gecko::pseudo_element::{PseudoElement, EAGER_PSEUDOS, EAGER_PSEUDO_COUNT};
pub use gecko::snapshot::SnapshotMap;
bitflags! {
flags NonTSPseudoClassFlag: u8 {
// See NonTSPseudoClass::is_internal()
const PSEUDO_CLASS_INTERNAL = 0x01,
}
}
macro_rules! pseudo_class_name {
(bare: [$(($css:expr, $name:ident, $gecko_type:tt, $state:tt, $flags:tt),)*],
string: [$(($s_css:expr, $s_name:ident, $s_gecko_type:tt, $s_state:tt, $s_flags:tt),)*]) => {
#[doc = "Our representation of a non tree-structural pseudo-class."]
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum NonTSPseudoClass {
$(
#[doc = $css]
$name,
)*
$(
#[doc = $s_css]
$s_name(Box<[u16]>),
)*
/// The non-standard `:-moz-any` pseudo-class.
///
/// TODO(emilio): We disallow combinators and pseudos here, so we
/// should use SimpleSelector instead
MozAny(Box<[ComplexSelector<SelectorImpl>]>),
}
}
}
apply_non_ts_list!(pseudo_class_name);
impl ToCss for NonTSPseudoClass {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
use cssparser::CssStringWriter;
use fmt::Write;
macro_rules! pseudo_class_serialize {
(bare: [$(($css:expr, $name:ident, $gecko_type:tt, $state:tt, $flags:tt),)*],
string: [$(($s_css:expr, $s_name:ident, $s_gecko_type:tt, $s_state:tt, $s_flags:tt),)*]) => {
match *self {
$(NonTSPseudoClass::$name => concat!(":", $css),)*
$(NonTSPseudoClass::$s_name(ref s) => {
write!(dest, ":{}(", $s_css)?;
{
// FIXME(emilio): Avoid the extra allocation!
let mut css = CssStringWriter::new(dest);
// Discount the null char in the end from the
// string.
css.write_str(&String::from_utf16(&s[..s.len() - 1]).unwrap())?;
}
return dest.write_str(")")
}, )*
NonTSPseudoClass::MozAny(ref selectors) => {
dest.write_str(":-moz-any(")?;
let mut iter = selectors.iter();
let first = iter.next().expect(":-moz-any must have at least 1 selector");
first.to_css(dest)?;
for selector in iter {
dest.write_str(", ")?;
selector.to_css(dest)?;
}
return dest.write_str(")")
}
}
}
}
let ser = apply_non_ts_list!(pseudo_class_serialize);
dest.write_str(ser)
}
}
impl SelectorMethods for NonTSPseudoClass {
type Impl = SelectorImpl;
fn visit<V>(&self, visitor: &mut V) -> bool
where V: SelectorVisitor<Impl = Self::Impl>,
{
if let NonTSPseudoClass::MozAny(ref selectors) = *self {
for selector in selectors.iter() {
if!selector.visit(visitor) {
return false;
}
}
}
true
}
}
impl NonTSPseudoClass {
/// A pseudo-class is internal if it can only be used inside
/// user agent style sheets.
pub fn is_internal(&self) -> bool {
macro_rules! check_flag {
(_) => (false);
($flags:expr) => ($flags.contains(PSEUDO_CLASS_INTERNAL));
}
macro_rules! pseudo_class_check_internal {
(bare: [$(($css:expr, $name:ident, $gecko_type:tt, $state:tt, $flags:tt),)*],
string: [$(($s_css:expr, $s_name:ident, $s_gecko_type:tt, $s_state:tt, $s_flags:tt),)*]) => {
match *self {
$(NonTSPseudoClass::$name => check_flag!($flags),)*
$(NonTSPseudoClass::$s_name(..) => check_flag!($s_flags),)*
NonTSPseudoClass::MozAny(_) => false,
}
}
}
apply_non_ts_list!(pseudo_class_check_internal)
}
/// https://drafts.csswg.org/selectors-4/#useraction-pseudos
///
/// We intentionally skip the link-related ones.
pub fn is_safe_user_action_state(&self) -> bool |
/// Get the state flag associated with a pseudo-class, if any.
pub fn state_flag(&self) -> ElementState {
macro_rules! flag {
(_) => (ElementState::empty());
($state:ident) => (::element_state::$state);
}
macro_rules! pseudo_class_state {
(bare: [$(($css:expr, $name:ident, $gecko_type:tt, $state:tt, $flags:tt),)*],
string: [$(($s_css:expr, $s_name:ident, $s_gecko_type:tt, $s_state:tt, $s_flags:tt),)*]) => {
match *self {
$(NonTSPseudoClass::$name => flag!($state),)*
$(NonTSPseudoClass::$s_name(..) => flag!($s_state),)*
NonTSPseudoClass::MozAny(..) => ElementState::empty(),
}
}
}
apply_non_ts_list!(pseudo_class_state)
}
/// Returns true if the given pseudoclass should trigger style sharing cache revalidation.
pub fn needs_cache_revalidation(&self) -> bool {
// :dir() depends on state only, but doesn't use state_flag because its
// semantics don't quite match. Nevertheless, it doesn't need cache
// revalidation, because we already compare states for elements and
// candidates.
self.state_flag().is_empty() &&
!matches!(*self,
NonTSPseudoClass::MozAny(_) |
NonTSPseudoClass::Dir(_) |
NonTSPseudoClass::MozIsHTML |
NonTSPseudoClass::MozPlaceholder)
}
/// Convert NonTSPseudoClass to Gecko's CSSPseudoClassType.
pub fn to_gecko_pseudoclasstype(&self) -> Option<CSSPseudoClassType> {
macro_rules! gecko_type {
(_) => (None);
($gecko_type:ident) =>
(Some(::gecko_bindings::structs::CSSPseudoClassType::$gecko_type));
}
macro_rules! pseudo_class_geckotype {
(bare: [$(($css:expr, $name:ident, $gecko_type:tt, $state:tt, $flags:tt),)*],
string: [$(($s_css:expr, $s_name:ident, $s_gecko_type:tt, $s_state:tt, $s_flags:tt),)*]) => {
match *self {
$(NonTSPseudoClass::$name => gecko_type!($gecko_type),)*
$(NonTSPseudoClass::$s_name(..) => gecko_type!($s_gecko_type),)*
NonTSPseudoClass::MozAny(_) => gecko_type!(any),
}
}
}
apply_non_ts_list!(pseudo_class_geckotype)
}
}
/// The dummy struct we use to implement our selector parsing.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct SelectorImpl;
impl ::selectors::SelectorImpl for SelectorImpl {
type AttrValue = Atom;
type Identifier = Atom;
type ClassName = Atom;
type LocalName = Atom;
type NamespacePrefix = Atom;
type NamespaceUrl = Namespace;
type BorrowedNamespaceUrl = WeakNamespace;
type BorrowedLocalName = WeakAtom;
type PseudoElement = PseudoElement;
type NonTSPseudoClass = NonTSPseudoClass;
}
impl<'a> ::selectors::Parser for SelectorParser<'a> {
type Impl = SelectorImpl;
fn parse_non_ts_pseudo_class(&self, name: Cow<str>) -> Result<NonTSPseudoClass, ()> {
macro_rules! pseudo_class_parse {
(bare: [$(($css:expr, $name:ident, $gecko_type:tt, $state:tt, $flags:tt),)*],
string: [$(($s_css:expr, $s_name:ident, $s_gecko_type:tt, $s_state:tt, $s_flags:tt),)*]) => {
match_ignore_ascii_case! { &name,
$($css => NonTSPseudoClass::$name,)*
_ => return Err(())
}
}
}
let pseudo_class = apply_non_ts_list!(pseudo_class_parse);
if!pseudo_class.is_internal() || self.in_user_agent_stylesheet() {
Ok(pseudo_class)
} else {
Err(())
}
}
fn parse_non_ts_functional_pseudo_class(&self,
name: Cow<str>,
parser: &mut Parser)
-> Result<NonTSPseudoClass, ()> {
macro_rules! pseudo_class_string_parse {
(bare: [$(($css:expr, $name:ident, $gecko_type:tt, $state:tt, $flags:tt),)*],
string: [$(($s_css:expr, $s_name:ident, $s_gecko_type:tt, $s_state:tt, $s_flags:tt),)*]) => {
match_ignore_ascii_case! { &name,
$($s_css => {
let name = parser.expect_ident_or_string()?;
// convert to null terminated utf16 string
// since that's what Gecko deals with
let utf16: Vec<u16> = name.encode_utf16().chain(Some(0u16)).collect();
NonTSPseudoClass::$s_name(utf16.into_boxed_slice())
}, )*
"-moz-any" => {
let selectors = parser.parse_comma_separated(|input| {
ComplexSelector::parse(self, input)
})?;
// Selectors inside `:-moz-any` may not include combinators.
if selectors.iter().flat_map(|x| x.iter_raw()).any(|s| s.is_combinator()) {
return Err(())
}
NonTSPseudoClass::MozAny(selectors.into_boxed_slice())
}
_ => return Err(())
}
}
}
let pseudo_class = apply_non_ts_list!(pseudo_class_string_parse);
if!pseudo_class.is_internal() || self.in_user_agent_stylesheet() {
Ok(pseudo_class)
} else {
Err(())
}
}
fn parse_pseudo_element(&self, name: Cow<str>) -> Result<PseudoElement, ()> {
PseudoElement::from_slice(&name, self.in_user_agent_stylesheet())
.ok_or(())
}
fn default_namespace(&self) -> Option<Namespace> {
self.namespaces.default.clone()
}
fn namespace_for_prefix(&self, prefix: &Atom) -> Option<Namespace> {
self.namespaces.prefixes.get(prefix).cloned()
}
}
impl SelectorImpl {
#[inline]
/// Legacy alias for PseudoElement::cascade_type.
pub fn pseudo_element_cascade_type(pseudo: &PseudoElement) -> PseudoElementCascadeType {
pseudo.cascade_type()
}
/// A helper to traverse each eagerly cascaded pseudo-element, executing
/// `fun` on it.
#[inline]
pub fn each_eagerly_cascaded_pseudo_element<F>(mut fun: F)
where F: FnMut(PseudoElement),
{
for pseudo in &EAGER_PSEUDOS {
fun(pseudo.clone())
}
}
#[inline]
/// Executes a function for each pseudo-element.
pub fn each_pseudo_element<F>(fun: F)
where F: FnMut(PseudoElement),
{
PseudoElement::each(fun)
}
#[inline]
/// Returns the relevant state flag for a given non-tree-structural
/// pseudo-class.
pub fn pseudo_class_state_flag(pc: &NonTSPseudoClass) -> ElementState {
pc.state_flag()
}
}
| {
matches!(*self, NonTSPseudoClass::Hover |
NonTSPseudoClass::Active |
NonTSPseudoClass::Focus)
} | identifier_body |
selector_parser.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Gecko-specific bits for selector-parsing.
use cssparser::{Parser, ToCss};
use element_state::ElementState;
use gecko_bindings::structs::CSSPseudoClassType;
use selector_parser::{SelectorParser, PseudoElementCascadeType};
use selectors::parser::{ComplexSelector, SelectorMethods};
use selectors::visitor::SelectorVisitor;
use std::borrow::Cow;
use std::fmt;
use string_cache::{Atom, Namespace, WeakAtom, WeakNamespace};
pub use gecko::pseudo_element::{PseudoElement, EAGER_PSEUDOS, EAGER_PSEUDO_COUNT};
pub use gecko::snapshot::SnapshotMap;
bitflags! {
flags NonTSPseudoClassFlag: u8 {
// See NonTSPseudoClass::is_internal()
const PSEUDO_CLASS_INTERNAL = 0x01,
}
}
macro_rules! pseudo_class_name {
(bare: [$(($css:expr, $name:ident, $gecko_type:tt, $state:tt, $flags:tt),)*],
string: [$(($s_css:expr, $s_name:ident, $s_gecko_type:tt, $s_state:tt, $s_flags:tt),)*]) => {
#[doc = "Our representation of a non tree-structural pseudo-class."]
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum NonTSPseudoClass {
$(
#[doc = $css]
$name,
)*
$(
#[doc = $s_css]
$s_name(Box<[u16]>),
)*
/// The non-standard `:-moz-any` pseudo-class.
///
/// TODO(emilio): We disallow combinators and pseudos here, so we
/// should use SimpleSelector instead
MozAny(Box<[ComplexSelector<SelectorImpl>]>),
}
}
}
apply_non_ts_list!(pseudo_class_name);
impl ToCss for NonTSPseudoClass {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
use cssparser::CssStringWriter;
use fmt::Write;
macro_rules! pseudo_class_serialize {
(bare: [$(($css:expr, $name:ident, $gecko_type:tt, $state:tt, $flags:tt),)*],
string: [$(($s_css:expr, $s_name:ident, $s_gecko_type:tt, $s_state:tt, $s_flags:tt),)*]) => {
match *self {
$(NonTSPseudoClass::$name => concat!(":", $css),)*
$(NonTSPseudoClass::$s_name(ref s) => {
write!(dest, ":{}(", $s_css)?;
{
// FIXME(emilio): Avoid the extra allocation!
let mut css = CssStringWriter::new(dest);
// Discount the null char in the end from the
// string.
css.write_str(&String::from_utf16(&s[..s.len() - 1]).unwrap())?;
}
return dest.write_str(")")
}, )*
NonTSPseudoClass::MozAny(ref selectors) => {
dest.write_str(":-moz-any(")?;
let mut iter = selectors.iter();
let first = iter.next().expect(":-moz-any must have at least 1 selector");
first.to_css(dest)?;
for selector in iter {
dest.write_str(", ")?;
selector.to_css(dest)?;
}
return dest.write_str(")")
}
}
}
}
let ser = apply_non_ts_list!(pseudo_class_serialize);
dest.write_str(ser)
}
}
impl SelectorMethods for NonTSPseudoClass {
type Impl = SelectorImpl;
fn visit<V>(&self, visitor: &mut V) -> bool
where V: SelectorVisitor<Impl = Self::Impl>,
{
if let NonTSPseudoClass::MozAny(ref selectors) = *self {
for selector in selectors.iter() {
if!selector.visit(visitor) {
return false;
}
}
}
true
}
}
impl NonTSPseudoClass {
/// A pseudo-class is internal if it can only be used inside
/// user agent style sheets.
pub fn is_internal(&self) -> bool {
macro_rules! check_flag {
(_) => (false);
($flags:expr) => ($flags.contains(PSEUDO_CLASS_INTERNAL));
}
macro_rules! pseudo_class_check_internal {
(bare: [$(($css:expr, $name:ident, $gecko_type:tt, $state:tt, $flags:tt),)*],
string: [$(($s_css:expr, $s_name:ident, $s_gecko_type:tt, $s_state:tt, $s_flags:tt),)*]) => {
match *self {
$(NonTSPseudoClass::$name => check_flag!($flags),)*
$(NonTSPseudoClass::$s_name(..) => check_flag!($s_flags),)*
NonTSPseudoClass::MozAny(_) => false,
}
}
}
apply_non_ts_list!(pseudo_class_check_internal)
}
/// https://drafts.csswg.org/selectors-4/#useraction-pseudos
///
/// We intentionally skip the link-related ones.
pub fn is_safe_user_action_state(&self) -> bool {
matches!(*self, NonTSPseudoClass::Hover |
NonTSPseudoClass::Active |
NonTSPseudoClass::Focus)
}
/// Get the state flag associated with a pseudo-class, if any.
pub fn state_flag(&self) -> ElementState {
macro_rules! flag {
(_) => (ElementState::empty());
($state:ident) => (::element_state::$state);
}
macro_rules! pseudo_class_state {
(bare: [$(($css:expr, $name:ident, $gecko_type:tt, $state:tt, $flags:tt),)*],
string: [$(($s_css:expr, $s_name:ident, $s_gecko_type:tt, $s_state:tt, $s_flags:tt),)*]) => {
match *self {
$(NonTSPseudoClass::$name => flag!($state),)*
$(NonTSPseudoClass::$s_name(..) => flag!($s_state),)*
NonTSPseudoClass::MozAny(..) => ElementState::empty(),
}
}
}
apply_non_ts_list!(pseudo_class_state)
}
/// Returns true if the given pseudoclass should trigger style sharing cache revalidation.
pub fn needs_cache_revalidation(&self) -> bool {
// :dir() depends on state only, but doesn't use state_flag because its
// semantics don't quite match. Nevertheless, it doesn't need cache
// revalidation, because we already compare states for elements and
// candidates.
self.state_flag().is_empty() &&
!matches!(*self,
NonTSPseudoClass::MozAny(_) |
NonTSPseudoClass::Dir(_) |
NonTSPseudoClass::MozIsHTML |
NonTSPseudoClass::MozPlaceholder)
}
/// Convert NonTSPseudoClass to Gecko's CSSPseudoClassType.
pub fn to_gecko_pseudoclasstype(&self) -> Option<CSSPseudoClassType> {
macro_rules! gecko_type {
(_) => (None);
($gecko_type:ident) =>
(Some(::gecko_bindings::structs::CSSPseudoClassType::$gecko_type));
}
macro_rules! pseudo_class_geckotype {
(bare: [$(($css:expr, $name:ident, $gecko_type:tt, $state:tt, $flags:tt),)*],
string: [$(($s_css:expr, $s_name:ident, $s_gecko_type:tt, $s_state:tt, $s_flags:tt),)*]) => {
match *self {
$(NonTSPseudoClass::$name => gecko_type!($gecko_type),)*
$(NonTSPseudoClass::$s_name(..) => gecko_type!($s_gecko_type),)*
NonTSPseudoClass::MozAny(_) => gecko_type!(any),
}
}
}
apply_non_ts_list!(pseudo_class_geckotype)
}
}
/// The dummy struct we use to implement our selector parsing.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct SelectorImpl;
impl ::selectors::SelectorImpl for SelectorImpl {
type AttrValue = Atom;
type Identifier = Atom;
type ClassName = Atom;
type LocalName = Atom;
type NamespacePrefix = Atom;
type NamespaceUrl = Namespace;
type BorrowedNamespaceUrl = WeakNamespace;
type BorrowedLocalName = WeakAtom;
type PseudoElement = PseudoElement;
type NonTSPseudoClass = NonTSPseudoClass;
}
impl<'a> ::selectors::Parser for SelectorParser<'a> {
type Impl = SelectorImpl;
fn parse_non_ts_pseudo_class(&self, name: Cow<str>) -> Result<NonTSPseudoClass, ()> {
macro_rules! pseudo_class_parse {
(bare: [$(($css:expr, $name:ident, $gecko_type:tt, $state:tt, $flags:tt),)*],
string: [$(($s_css:expr, $s_name:ident, $s_gecko_type:tt, $s_state:tt, $s_flags:tt),)*]) => {
match_ignore_ascii_case! { &name,
$($css => NonTSPseudoClass::$name,)*
_ => return Err(())
}
}
}
let pseudo_class = apply_non_ts_list!(pseudo_class_parse);
if!pseudo_class.is_internal() || self.in_user_agent_stylesheet() {
Ok(pseudo_class)
} else {
Err(())
}
}
fn parse_non_ts_functional_pseudo_class(&self,
name: Cow<str>,
parser: &mut Parser)
-> Result<NonTSPseudoClass, ()> {
macro_rules! pseudo_class_string_parse { | match_ignore_ascii_case! { &name,
$($s_css => {
let name = parser.expect_ident_or_string()?;
// convert to null terminated utf16 string
// since that's what Gecko deals with
let utf16: Vec<u16> = name.encode_utf16().chain(Some(0u16)).collect();
NonTSPseudoClass::$s_name(utf16.into_boxed_slice())
}, )*
"-moz-any" => {
let selectors = parser.parse_comma_separated(|input| {
ComplexSelector::parse(self, input)
})?;
// Selectors inside `:-moz-any` may not include combinators.
if selectors.iter().flat_map(|x| x.iter_raw()).any(|s| s.is_combinator()) {
return Err(())
}
NonTSPseudoClass::MozAny(selectors.into_boxed_slice())
}
_ => return Err(())
}
}
}
let pseudo_class = apply_non_ts_list!(pseudo_class_string_parse);
if!pseudo_class.is_internal() || self.in_user_agent_stylesheet() {
Ok(pseudo_class)
} else {
Err(())
}
}
fn parse_pseudo_element(&self, name: Cow<str>) -> Result<PseudoElement, ()> {
PseudoElement::from_slice(&name, self.in_user_agent_stylesheet())
.ok_or(())
}
fn default_namespace(&self) -> Option<Namespace> {
self.namespaces.default.clone()
}
fn namespace_for_prefix(&self, prefix: &Atom) -> Option<Namespace> {
self.namespaces.prefixes.get(prefix).cloned()
}
}
impl SelectorImpl {
#[inline]
/// Legacy alias for PseudoElement::cascade_type.
pub fn pseudo_element_cascade_type(pseudo: &PseudoElement) -> PseudoElementCascadeType {
pseudo.cascade_type()
}
/// A helper to traverse each eagerly cascaded pseudo-element, executing
/// `fun` on it.
#[inline]
pub fn each_eagerly_cascaded_pseudo_element<F>(mut fun: F)
where F: FnMut(PseudoElement),
{
for pseudo in &EAGER_PSEUDOS {
fun(pseudo.clone())
}
}
#[inline]
/// Executes a function for each pseudo-element.
pub fn each_pseudo_element<F>(fun: F)
where F: FnMut(PseudoElement),
{
PseudoElement::each(fun)
}
#[inline]
/// Returns the relevant state flag for a given non-tree-structural
/// pseudo-class.
pub fn pseudo_class_state_flag(pc: &NonTSPseudoClass) -> ElementState {
pc.state_flag()
}
} | (bare: [$(($css:expr, $name:ident, $gecko_type:tt, $state:tt, $flags:tt),)*],
string: [$(($s_css:expr, $s_name:ident, $s_gecko_type:tt, $s_state:tt, $s_flags:tt),)*]) => { | random_line_split |
overloaded-index-in-field.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test using overloaded indexing when the "map" is stored in a
// field. This caused problems at some point.
#![feature(core)]
use std::ops::Index;
struct Foo {
x: isize,
y: isize,
}
struct Bar {
foo: Foo
}
impl Index<isize> for Foo {
type Output = isize;
fn index(&self, z: isize) -> &isize {
if z == 0 | else {
&self.y
}
}
}
trait Int {
fn get(self) -> isize;
fn get_from_ref(&self) -> isize;
fn inc(&mut self);
}
impl Int for isize {
fn get(self) -> isize { self }
fn get_from_ref(&self) -> isize { *self }
fn inc(&mut self) { *self += 1; }
}
fn main() {
let f = Bar { foo: Foo {
x: 1,
y: 2,
} };
assert_eq!(f.foo[1].get(), 2);
}
| {
&self.x
} | conditional_block |
overloaded-index-in-field.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT. | //
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test using overloaded indexing when the "map" is stored in a
// field. This caused problems at some point.
#![feature(core)]
use std::ops::Index;
struct Foo {
x: isize,
y: isize,
}
struct Bar {
foo: Foo
}
impl Index<isize> for Foo {
type Output = isize;
fn index(&self, z: isize) -> &isize {
if z == 0 {
&self.x
} else {
&self.y
}
}
}
trait Int {
fn get(self) -> isize;
fn get_from_ref(&self) -> isize;
fn inc(&mut self);
}
impl Int for isize {
fn get(self) -> isize { self }
fn get_from_ref(&self) -> isize { *self }
fn inc(&mut self) { *self += 1; }
}
fn main() {
let f = Bar { foo: Foo {
x: 1,
y: 2,
} };
assert_eq!(f.foo[1].get(), 2);
} | random_line_split |
|
overloaded-index-in-field.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test using overloaded indexing when the "map" is stored in a
// field. This caused problems at some point.
#![feature(core)]
use std::ops::Index;
struct | {
x: isize,
y: isize,
}
struct Bar {
foo: Foo
}
impl Index<isize> for Foo {
type Output = isize;
fn index(&self, z: isize) -> &isize {
if z == 0 {
&self.x
} else {
&self.y
}
}
}
trait Int {
fn get(self) -> isize;
fn get_from_ref(&self) -> isize;
fn inc(&mut self);
}
impl Int for isize {
fn get(self) -> isize { self }
fn get_from_ref(&self) -> isize { *self }
fn inc(&mut self) { *self += 1; }
}
fn main() {
let f = Bar { foo: Foo {
x: 1,
y: 2,
} };
assert_eq!(f.foo[1].get(), 2);
}
| Foo | identifier_name |
overloaded-index-in-field.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test using overloaded indexing when the "map" is stored in a
// field. This caused problems at some point.
#![feature(core)]
use std::ops::Index;
struct Foo {
x: isize,
y: isize,
}
struct Bar {
foo: Foo
}
impl Index<isize> for Foo {
type Output = isize;
fn index(&self, z: isize) -> &isize |
}
trait Int {
fn get(self) -> isize;
fn get_from_ref(&self) -> isize;
fn inc(&mut self);
}
impl Int for isize {
fn get(self) -> isize { self }
fn get_from_ref(&self) -> isize { *self }
fn inc(&mut self) { *self += 1; }
}
fn main() {
let f = Bar { foo: Foo {
x: 1,
y: 2,
} };
assert_eq!(f.foo[1].get(), 2);
}
| {
if z == 0 {
&self.x
} else {
&self.y
}
} | identifier_body |
associated-types-normalize-in-bounds-ufcs.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT. | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that we normalize associated types that appear in bounds; if
// we didn't, the call to `self.split2()` fails to type check.
// pretty-expanded FIXME #23616
use std::marker::PhantomData;
struct Splits<'a, T:'a, P>(PhantomData<(&'a T, P)>);
struct SplitsN<I>(PhantomData<I>);
trait SliceExt2 {
type Item;
fn split2<'a, P>(&'a self, pred: P) -> Splits<'a, Self::Item, P>
where P: FnMut(&Self::Item) -> bool;
fn splitn2<'a, P>(&'a self, n: u32, pred: P) -> SplitsN<Splits<'a, Self::Item, P>>
where P: FnMut(&Self::Item) -> bool;
}
impl<T> SliceExt2 for [T] {
type Item = T;
fn split2<P>(&self, pred: P) -> Splits<T, P> where P: FnMut(&T) -> bool {
loop {}
}
fn splitn2<P>(&self, n: u32, pred: P) -> SplitsN<Splits<T, P>> where P: FnMut(&T) -> bool {
SliceExt2::split2(self, pred);
loop {}
}
}
fn main() { } | // | random_line_split |
associated-types-normalize-in-bounds-ufcs.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that we normalize associated types that appear in bounds; if
// we didn't, the call to `self.split2()` fails to type check.
// pretty-expanded FIXME #23616
use std::marker::PhantomData;
struct Splits<'a, T:'a, P>(PhantomData<(&'a T, P)>);
struct SplitsN<I>(PhantomData<I>);
trait SliceExt2 {
type Item;
fn split2<'a, P>(&'a self, pred: P) -> Splits<'a, Self::Item, P>
where P: FnMut(&Self::Item) -> bool;
fn splitn2<'a, P>(&'a self, n: u32, pred: P) -> SplitsN<Splits<'a, Self::Item, P>>
where P: FnMut(&Self::Item) -> bool;
}
impl<T> SliceExt2 for [T] {
type Item = T;
fn split2<P>(&self, pred: P) -> Splits<T, P> where P: FnMut(&T) -> bool {
loop {}
}
fn splitn2<P>(&self, n: u32, pred: P) -> SplitsN<Splits<T, P>> where P: FnMut(&T) -> bool {
SliceExt2::split2(self, pred);
loop {}
}
}
fn | () { }
| main | identifier_name |
serviceworkercontainer.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::ServiceWorkerContainerBinding::{ServiceWorkerContainerMethods, Wrap};
use dom::bindings::codegen::Bindings::ServiceWorkerContainerBinding::RegistrationOptions;
use dom::bindings::error::Error;
use dom::bindings::reflector::{DomObject, reflect_dom_object};
use dom::bindings::root::{Dom, DomRoot, MutNullableDom};
use dom::bindings::str::USVString; | use dom::client::Client;
use dom::eventtarget::EventTarget;
use dom::globalscope::GlobalScope;
use dom::promise::Promise;
use dom::serviceworker::ServiceWorker;
use dom_struct::dom_struct;
use script_thread::ScriptThread;
use serviceworkerjob::{Job, JobType};
#[allow(unused_imports)] use std::ascii::AsciiExt;
use std::default::Default;
use std::rc::Rc;
#[dom_struct]
pub struct ServiceWorkerContainer {
eventtarget: EventTarget,
controller: MutNullableDom<ServiceWorker>,
client: Dom<Client>
}
impl ServiceWorkerContainer {
fn new_inherited(client: &Client) -> ServiceWorkerContainer {
ServiceWorkerContainer {
eventtarget: EventTarget::new_inherited(),
controller: Default::default(),
client: Dom::from_ref(client),
}
}
#[allow(unrooted_must_root)]
pub fn new(global: &GlobalScope) -> DomRoot<ServiceWorkerContainer> {
let client = Client::new(&global.as_window());
let container = ServiceWorkerContainer::new_inherited(&*client);
reflect_dom_object(Box::new(container), global, Wrap)
}
}
impl ServiceWorkerContainerMethods for ServiceWorkerContainer {
// https://w3c.github.io/ServiceWorker/#service-worker-container-controller-attribute
fn GetController(&self) -> Option<DomRoot<ServiceWorker>> {
self.client.get_controller()
}
#[allow(unrooted_must_root)]
// https://w3c.github.io/ServiceWorker/#service-worker-container-register-method and - A
// https://w3c.github.io/ServiceWorker/#start-register-algorithm - B
fn Register(&self,
script_url: USVString,
options: &RegistrationOptions) -> Rc<Promise> {
// A: Step 1
let promise = Promise::new(&*self.global());
let USVString(ref script_url) = script_url;
let api_base_url = self.global().api_base_url();
// A: Step 3-5
let script_url = match api_base_url.join(script_url) {
Ok(url) => url,
Err(_) => {
promise.reject_error(Error::Type("Invalid script URL".to_owned()));
return promise;
}
};
// B: Step 2
match script_url.scheme() {
"https" | "http" => {},
_ => {
promise.reject_error(Error::Type("Only secure origins are allowed".to_owned()));
return promise;
}
}
// B: Step 3
if script_url.path().to_ascii_lowercase().contains("%2f") ||
script_url.path().to_ascii_lowercase().contains("%5c") {
promise.reject_error(Error::Type("Script URL contains forbidden characters".to_owned()));
return promise;
}
// B: Step 4-5
let scope = match options.scope {
Some(ref scope) => {
let &USVString(ref inner_scope) = scope;
match api_base_url.join(inner_scope) {
Ok(url) => url,
Err(_) => {
promise.reject_error(Error::Type("Invalid scope URL".to_owned()));
return promise;
}
}
},
None => script_url.join("./").unwrap()
};
// B: Step 6
match scope.scheme() {
"https" | "http" => {},
_ => {
promise.reject_error(Error::Type("Only secure origins are allowed".to_owned()));
return promise;
}
}
// B: Step 7
if scope.path().to_ascii_lowercase().contains("%2f") ||
scope.path().to_ascii_lowercase().contains("%5c") {
promise.reject_error(Error::Type("Scope URL contains forbidden characters".to_owned()));
return promise;
}
// B: Step 8
let job = Job::create_job(JobType::Register, scope, script_url, promise.clone(), &*self.client);
ScriptThread::schedule_job(job);
promise
}
} | random_line_split |
|
serviceworkercontainer.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::ServiceWorkerContainerBinding::{ServiceWorkerContainerMethods, Wrap};
use dom::bindings::codegen::Bindings::ServiceWorkerContainerBinding::RegistrationOptions;
use dom::bindings::error::Error;
use dom::bindings::reflector::{DomObject, reflect_dom_object};
use dom::bindings::root::{Dom, DomRoot, MutNullableDom};
use dom::bindings::str::USVString;
use dom::client::Client;
use dom::eventtarget::EventTarget;
use dom::globalscope::GlobalScope;
use dom::promise::Promise;
use dom::serviceworker::ServiceWorker;
use dom_struct::dom_struct;
use script_thread::ScriptThread;
use serviceworkerjob::{Job, JobType};
#[allow(unused_imports)] use std::ascii::AsciiExt;
use std::default::Default;
use std::rc::Rc;
#[dom_struct]
pub struct ServiceWorkerContainer {
eventtarget: EventTarget,
controller: MutNullableDom<ServiceWorker>,
client: Dom<Client>
}
impl ServiceWorkerContainer {
fn | (client: &Client) -> ServiceWorkerContainer {
ServiceWorkerContainer {
eventtarget: EventTarget::new_inherited(),
controller: Default::default(),
client: Dom::from_ref(client),
}
}
#[allow(unrooted_must_root)]
pub fn new(global: &GlobalScope) -> DomRoot<ServiceWorkerContainer> {
let client = Client::new(&global.as_window());
let container = ServiceWorkerContainer::new_inherited(&*client);
reflect_dom_object(Box::new(container), global, Wrap)
}
}
impl ServiceWorkerContainerMethods for ServiceWorkerContainer {
// https://w3c.github.io/ServiceWorker/#service-worker-container-controller-attribute
fn GetController(&self) -> Option<DomRoot<ServiceWorker>> {
self.client.get_controller()
}
#[allow(unrooted_must_root)]
// https://w3c.github.io/ServiceWorker/#service-worker-container-register-method and - A
// https://w3c.github.io/ServiceWorker/#start-register-algorithm - B
fn Register(&self,
script_url: USVString,
options: &RegistrationOptions) -> Rc<Promise> {
// A: Step 1
let promise = Promise::new(&*self.global());
let USVString(ref script_url) = script_url;
let api_base_url = self.global().api_base_url();
// A: Step 3-5
let script_url = match api_base_url.join(script_url) {
Ok(url) => url,
Err(_) => {
promise.reject_error(Error::Type("Invalid script URL".to_owned()));
return promise;
}
};
// B: Step 2
match script_url.scheme() {
"https" | "http" => {},
_ => {
promise.reject_error(Error::Type("Only secure origins are allowed".to_owned()));
return promise;
}
}
// B: Step 3
if script_url.path().to_ascii_lowercase().contains("%2f") ||
script_url.path().to_ascii_lowercase().contains("%5c") {
promise.reject_error(Error::Type("Script URL contains forbidden characters".to_owned()));
return promise;
}
// B: Step 4-5
let scope = match options.scope {
Some(ref scope) => {
let &USVString(ref inner_scope) = scope;
match api_base_url.join(inner_scope) {
Ok(url) => url,
Err(_) => {
promise.reject_error(Error::Type("Invalid scope URL".to_owned()));
return promise;
}
}
},
None => script_url.join("./").unwrap()
};
// B: Step 6
match scope.scheme() {
"https" | "http" => {},
_ => {
promise.reject_error(Error::Type("Only secure origins are allowed".to_owned()));
return promise;
}
}
// B: Step 7
if scope.path().to_ascii_lowercase().contains("%2f") ||
scope.path().to_ascii_lowercase().contains("%5c") {
promise.reject_error(Error::Type("Scope URL contains forbidden characters".to_owned()));
return promise;
}
// B: Step 8
let job = Job::create_job(JobType::Register, scope, script_url, promise.clone(), &*self.client);
ScriptThread::schedule_job(job);
promise
}
}
| new_inherited | identifier_name |
serviceworkercontainer.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::ServiceWorkerContainerBinding::{ServiceWorkerContainerMethods, Wrap};
use dom::bindings::codegen::Bindings::ServiceWorkerContainerBinding::RegistrationOptions;
use dom::bindings::error::Error;
use dom::bindings::reflector::{DomObject, reflect_dom_object};
use dom::bindings::root::{Dom, DomRoot, MutNullableDom};
use dom::bindings::str::USVString;
use dom::client::Client;
use dom::eventtarget::EventTarget;
use dom::globalscope::GlobalScope;
use dom::promise::Promise;
use dom::serviceworker::ServiceWorker;
use dom_struct::dom_struct;
use script_thread::ScriptThread;
use serviceworkerjob::{Job, JobType};
#[allow(unused_imports)] use std::ascii::AsciiExt;
use std::default::Default;
use std::rc::Rc;
#[dom_struct]
pub struct ServiceWorkerContainer {
eventtarget: EventTarget,
controller: MutNullableDom<ServiceWorker>,
client: Dom<Client>
}
impl ServiceWorkerContainer {
fn new_inherited(client: &Client) -> ServiceWorkerContainer {
ServiceWorkerContainer {
eventtarget: EventTarget::new_inherited(),
controller: Default::default(),
client: Dom::from_ref(client),
}
}
#[allow(unrooted_must_root)]
pub fn new(global: &GlobalScope) -> DomRoot<ServiceWorkerContainer> {
let client = Client::new(&global.as_window());
let container = ServiceWorkerContainer::new_inherited(&*client);
reflect_dom_object(Box::new(container), global, Wrap)
}
}
impl ServiceWorkerContainerMethods for ServiceWorkerContainer {
// https://w3c.github.io/ServiceWorker/#service-worker-container-controller-attribute
fn GetController(&self) -> Option<DomRoot<ServiceWorker>> |
#[allow(unrooted_must_root)]
// https://w3c.github.io/ServiceWorker/#service-worker-container-register-method and - A
// https://w3c.github.io/ServiceWorker/#start-register-algorithm - B
fn Register(&self,
script_url: USVString,
options: &RegistrationOptions) -> Rc<Promise> {
// A: Step 1
let promise = Promise::new(&*self.global());
let USVString(ref script_url) = script_url;
let api_base_url = self.global().api_base_url();
// A: Step 3-5
let script_url = match api_base_url.join(script_url) {
Ok(url) => url,
Err(_) => {
promise.reject_error(Error::Type("Invalid script URL".to_owned()));
return promise;
}
};
// B: Step 2
match script_url.scheme() {
"https" | "http" => {},
_ => {
promise.reject_error(Error::Type("Only secure origins are allowed".to_owned()));
return promise;
}
}
// B: Step 3
if script_url.path().to_ascii_lowercase().contains("%2f") ||
script_url.path().to_ascii_lowercase().contains("%5c") {
promise.reject_error(Error::Type("Script URL contains forbidden characters".to_owned()));
return promise;
}
// B: Step 4-5
let scope = match options.scope {
Some(ref scope) => {
let &USVString(ref inner_scope) = scope;
match api_base_url.join(inner_scope) {
Ok(url) => url,
Err(_) => {
promise.reject_error(Error::Type("Invalid scope URL".to_owned()));
return promise;
}
}
},
None => script_url.join("./").unwrap()
};
// B: Step 6
match scope.scheme() {
"https" | "http" => {},
_ => {
promise.reject_error(Error::Type("Only secure origins are allowed".to_owned()));
return promise;
}
}
// B: Step 7
if scope.path().to_ascii_lowercase().contains("%2f") ||
scope.path().to_ascii_lowercase().contains("%5c") {
promise.reject_error(Error::Type("Scope URL contains forbidden characters".to_owned()));
return promise;
}
// B: Step 8
let job = Job::create_job(JobType::Register, scope, script_url, promise.clone(), &*self.client);
ScriptThread::schedule_job(job);
promise
}
}
| {
self.client.get_controller()
} | identifier_body |
testworkletglobalscope.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::cell::DomRefCell;
use crate::dom::bindings::codegen::Bindings::TestWorkletGlobalScopeBinding;
use crate::dom::bindings::codegen::Bindings::TestWorkletGlobalScopeBinding::TestWorkletGlobalScopeMethods;
use crate::dom::bindings::root::DomRoot;
use crate::dom::bindings::str::DOMString;
use crate::dom::worklet::WorkletExecutor;
use crate::dom::workletglobalscope::WorkletGlobalScope;
use crate::dom::workletglobalscope::WorkletGlobalScopeInit;
use dom_struct::dom_struct;
use js::rust::Runtime;
use msg::constellation_msg::PipelineId;
use servo_channel::Sender;
use servo_url::ServoUrl;
use std::collections::HashMap;
// check-tidy: no specs after this line
#[dom_struct]
pub struct TestWorkletGlobalScope {
// The worklet global for this object
worklet_global: WorkletGlobalScope,
// The key/value pairs
lookup_table: DomRefCell<HashMap<String, String>>,
}
impl TestWorkletGlobalScope {
#[allow(unsafe_code)]
pub fn new(
runtime: &Runtime,
pipeline_id: PipelineId,
base_url: ServoUrl,
executor: WorkletExecutor,
init: &WorkletGlobalScopeInit,
) -> DomRoot<TestWorkletGlobalScope> {
debug!(
"Creating test worklet global scope for pipeline {}.",
pipeline_id
);
let global = Box::new(TestWorkletGlobalScope {
worklet_global: WorkletGlobalScope::new_inherited(
pipeline_id,
base_url,
executor,
init,
),
lookup_table: Default::default(),
});
unsafe { TestWorkletGlobalScopeBinding::Wrap(runtime.cx(), global) }
}
pub fn perform_a_worklet_task(&self, task: TestWorkletTask) {
match task {
TestWorkletTask::Lookup(key, sender) => {
debug!("Looking up key {}.", key);
let result = self.lookup_table.borrow().get(&key).cloned();
let _ = sender.send(result);
},
}
}
}
impl TestWorkletGlobalScopeMethods for TestWorkletGlobalScope {
fn RegisterKeyValue(&self, key: DOMString, value: DOMString) |
}
/// Tasks which can be performed by test worklets.
pub enum TestWorkletTask {
Lookup(String, Sender<Option<String>>),
}
| {
debug!("Registering test worklet key/value {}/{}.", key, value);
self.lookup_table
.borrow_mut()
.insert(String::from(key), String::from(value));
} | identifier_body |
testworkletglobalscope.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::cell::DomRefCell;
use crate::dom::bindings::codegen::Bindings::TestWorkletGlobalScopeBinding;
use crate::dom::bindings::codegen::Bindings::TestWorkletGlobalScopeBinding::TestWorkletGlobalScopeMethods;
use crate::dom::bindings::root::DomRoot;
use crate::dom::bindings::str::DOMString;
use crate::dom::worklet::WorkletExecutor;
use crate::dom::workletglobalscope::WorkletGlobalScope;
use crate::dom::workletglobalscope::WorkletGlobalScopeInit;
use dom_struct::dom_struct;
use js::rust::Runtime;
use msg::constellation_msg::PipelineId;
use servo_channel::Sender;
use servo_url::ServoUrl;
use std::collections::HashMap;
// check-tidy: no specs after this line
#[dom_struct]
pub struct TestWorkletGlobalScope {
// The worklet global for this object
worklet_global: WorkletGlobalScope,
// The key/value pairs
lookup_table: DomRefCell<HashMap<String, String>>,
}
impl TestWorkletGlobalScope {
#[allow(unsafe_code)]
pub fn new(
runtime: &Runtime,
pipeline_id: PipelineId,
base_url: ServoUrl,
executor: WorkletExecutor,
init: &WorkletGlobalScopeInit,
) -> DomRoot<TestWorkletGlobalScope> {
debug!(
"Creating test worklet global scope for pipeline {}.",
pipeline_id
);
let global = Box::new(TestWorkletGlobalScope {
worklet_global: WorkletGlobalScope::new_inherited(
pipeline_id,
base_url,
executor,
init,
),
lookup_table: Default::default(),
});
unsafe { TestWorkletGlobalScopeBinding::Wrap(runtime.cx(), global) }
}
pub fn perform_a_worklet_task(&self, task: TestWorkletTask) {
match task {
TestWorkletTask::Lookup(key, sender) => {
debug!("Looking up key {}.", key);
let result = self.lookup_table.borrow().get(&key).cloned();
let _ = sender.send(result);
},
}
}
}
impl TestWorkletGlobalScopeMethods for TestWorkletGlobalScope {
fn RegisterKeyValue(&self, key: DOMString, value: DOMString) {
debug!("Registering test worklet key/value {}/{}.", key, value);
self.lookup_table
.borrow_mut()
.insert(String::from(key), String::from(value));
}
}
/// Tasks which can be performed by test worklets.
pub enum | {
Lookup(String, Sender<Option<String>>),
}
| TestWorkletTask | identifier_name |
testworkletglobalscope.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::cell::DomRefCell;
use crate::dom::bindings::codegen::Bindings::TestWorkletGlobalScopeBinding;
use crate::dom::bindings::codegen::Bindings::TestWorkletGlobalScopeBinding::TestWorkletGlobalScopeMethods;
use crate::dom::bindings::root::DomRoot;
use crate::dom::bindings::str::DOMString;
use crate::dom::worklet::WorkletExecutor;
use crate::dom::workletglobalscope::WorkletGlobalScope;
use crate::dom::workletglobalscope::WorkletGlobalScopeInit;
use dom_struct::dom_struct;
use js::rust::Runtime;
use msg::constellation_msg::PipelineId;
use servo_channel::Sender;
use servo_url::ServoUrl;
use std::collections::HashMap;
// check-tidy: no specs after this line
#[dom_struct]
pub struct TestWorkletGlobalScope {
// The worklet global for this object
worklet_global: WorkletGlobalScope,
// The key/value pairs
lookup_table: DomRefCell<HashMap<String, String>>,
}
impl TestWorkletGlobalScope {
#[allow(unsafe_code)]
pub fn new(
runtime: &Runtime,
pipeline_id: PipelineId,
base_url: ServoUrl,
executor: WorkletExecutor,
init: &WorkletGlobalScopeInit,
) -> DomRoot<TestWorkletGlobalScope> {
debug!(
"Creating test worklet global scope for pipeline {}.",
pipeline_id
);
let global = Box::new(TestWorkletGlobalScope {
worklet_global: WorkletGlobalScope::new_inherited(
pipeline_id,
base_url,
executor,
init,
),
lookup_table: Default::default(), | });
unsafe { TestWorkletGlobalScopeBinding::Wrap(runtime.cx(), global) }
}
pub fn perform_a_worklet_task(&self, task: TestWorkletTask) {
match task {
TestWorkletTask::Lookup(key, sender) => {
debug!("Looking up key {}.", key);
let result = self.lookup_table.borrow().get(&key).cloned();
let _ = sender.send(result);
},
}
}
}
impl TestWorkletGlobalScopeMethods for TestWorkletGlobalScope {
fn RegisterKeyValue(&self, key: DOMString, value: DOMString) {
debug!("Registering test worklet key/value {}/{}.", key, value);
self.lookup_table
.borrow_mut()
.insert(String::from(key), String::from(value));
}
}
/// Tasks which can be performed by test worklets.
pub enum TestWorkletTask {
Lookup(String, Sender<Option<String>>),
} | random_line_split |
|
dom_blob.rs | // This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/gtk-rs/gir-files)
// DO NOT EDIT
use DOMObject;
use glib::object::Cast;
use glib::object::IsA;
use glib::signal::SignalHandlerId;
use glib::signal::connect_raw;
use glib::translate::*;
use glib_sys;
use std::boxed::Box as Box_;
use std::fmt;
use std::mem::transmute;
use webkit2_webextension_sys;
glib_wrapper! {
pub struct DOMBlob(Object<webkit2_webextension_sys::WebKitDOMBlob, webkit2_webextension_sys::WebKitDOMBlobClass, DOMBlobClass>) @extends DOMObject;
match fn {
get_type => || webkit2_webextension_sys::webkit_dom_blob_get_type(),
}
}
pub const NONE_DOM_BLOB: Option<&DOMBlob> = None;
pub trait DOMBlobExt:'static {
#[cfg_attr(feature = "v2_22", deprecated)]
fn get_size(&self) -> u64;
fn connect_property_size_notify<F: Fn(&Self) +'static>(&self, f: F) -> SignalHandlerId;
}
impl<O: IsA<DOMBlob>> DOMBlobExt for O {
fn get_size(&self) -> u64 {
unsafe {
webkit2_webextension_sys::webkit_dom_blob_get_size(self.as_ref().to_glib_none().0)
}
}
fn connect_property_size_notify<F: Fn(&Self) +'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_size_trampoline<P, F: Fn(&P) +'static>(this: *mut webkit2_webextension_sys::WebKitDOMBlob, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer)
where P: IsA<DOMBlob>
{
let f: &F = &*(f as *const F);
f(&DOMBlob::from_glib_borrow(this).unsafe_cast())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(self.as_ptr() as *mut _, b"notify::size\0".as_ptr() as *const _,
Some(transmute(notify_size_trampoline::<Self, F> as usize)), Box_::into_raw(f))
}
}
}
| impl fmt::Display for DOMBlob {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "DOMBlob")
}
} | random_line_split |
|
dom_blob.rs | // This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/gtk-rs/gir-files)
// DO NOT EDIT
use DOMObject;
use glib::object::Cast;
use glib::object::IsA;
use glib::signal::SignalHandlerId;
use glib::signal::connect_raw;
use glib::translate::*;
use glib_sys;
use std::boxed::Box as Box_;
use std::fmt;
use std::mem::transmute;
use webkit2_webextension_sys;
glib_wrapper! {
pub struct DOMBlob(Object<webkit2_webextension_sys::WebKitDOMBlob, webkit2_webextension_sys::WebKitDOMBlobClass, DOMBlobClass>) @extends DOMObject;
match fn {
get_type => || webkit2_webextension_sys::webkit_dom_blob_get_type(),
}
}
pub const NONE_DOM_BLOB: Option<&DOMBlob> = None;
pub trait DOMBlobExt:'static {
#[cfg_attr(feature = "v2_22", deprecated)]
fn get_size(&self) -> u64;
fn connect_property_size_notify<F: Fn(&Self) +'static>(&self, f: F) -> SignalHandlerId;
}
impl<O: IsA<DOMBlob>> DOMBlobExt for O {
fn get_size(&self) -> u64 {
unsafe {
webkit2_webextension_sys::webkit_dom_blob_get_size(self.as_ref().to_glib_none().0)
}
}
fn connect_property_size_notify<F: Fn(&Self) +'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_size_trampoline<P, F: Fn(&P) +'static>(this: *mut webkit2_webextension_sys::WebKitDOMBlob, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer)
where P: IsA<DOMBlob>
{
let f: &F = &*(f as *const F);
f(&DOMBlob::from_glib_borrow(this).unsafe_cast())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(self.as_ptr() as *mut _, b"notify::size\0".as_ptr() as *const _,
Some(transmute(notify_size_trampoline::<Self, F> as usize)), Box_::into_raw(f))
}
}
}
impl fmt::Display for DOMBlob {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result |
}
| {
write!(f, "DOMBlob")
} | identifier_body |
dom_blob.rs | // This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/gtk-rs/gir-files)
// DO NOT EDIT
use DOMObject;
use glib::object::Cast;
use glib::object::IsA;
use glib::signal::SignalHandlerId;
use glib::signal::connect_raw;
use glib::translate::*;
use glib_sys;
use std::boxed::Box as Box_;
use std::fmt;
use std::mem::transmute;
use webkit2_webextension_sys;
glib_wrapper! {
pub struct DOMBlob(Object<webkit2_webextension_sys::WebKitDOMBlob, webkit2_webextension_sys::WebKitDOMBlobClass, DOMBlobClass>) @extends DOMObject;
match fn {
get_type => || webkit2_webextension_sys::webkit_dom_blob_get_type(),
}
}
pub const NONE_DOM_BLOB: Option<&DOMBlob> = None;
pub trait DOMBlobExt:'static {
#[cfg_attr(feature = "v2_22", deprecated)]
fn get_size(&self) -> u64;
fn connect_property_size_notify<F: Fn(&Self) +'static>(&self, f: F) -> SignalHandlerId;
}
impl<O: IsA<DOMBlob>> DOMBlobExt for O {
fn get_size(&self) -> u64 {
unsafe {
webkit2_webextension_sys::webkit_dom_blob_get_size(self.as_ref().to_glib_none().0)
}
}
fn connect_property_size_notify<F: Fn(&Self) +'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn | <P, F: Fn(&P) +'static>(this: *mut webkit2_webextension_sys::WebKitDOMBlob, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer)
where P: IsA<DOMBlob>
{
let f: &F = &*(f as *const F);
f(&DOMBlob::from_glib_borrow(this).unsafe_cast())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(self.as_ptr() as *mut _, b"notify::size\0".as_ptr() as *const _,
Some(transmute(notify_size_trampoline::<Self, F> as usize)), Box_::into_raw(f))
}
}
}
impl fmt::Display for DOMBlob {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "DOMBlob")
}
}
| notify_size_trampoline | identifier_name |
vec.rs | #[no_std];
#[no_core];
use zero;
pub trait OwnedVector<T> {
unsafe fn push_fast(&mut self, t: T);
unsafe fn len(&self) -> uint;
unsafe fn set_len(&mut self, newlen: uint);
unsafe fn as_mut_buf<U>(&self, f: &fn(*mut T, uint) -> U) -> U;
unsafe fn data(&self) -> *u8;
}
pub struct Vec<T> {
fill: uint,
alloc: uint,
data: T
}
impl<T> OwnedVector<T> for ~[T] {
//FIXME: Does not check to see if we have space
// See: https://github.com/mozilla/rust/blob/master/src/libstd/vec.rs#L1317
unsafe fn | (&mut self, t: T) {
let repr: **mut Vec<u8> = zero::transmute(self);
let fill = (**repr).fill;
(**repr).fill += zero::size_of::<T>();
let p = &(**repr).data as *u8 as uint;
let mut i = 0;
while i < zero::size_of::<T>() {
*((p+fill+i) as *mut u8) = *((&t as *T as uint + i) as *mut u8);
i += 1;
}
}
unsafe fn len(&self) -> uint {
let repr: **Vec<u8> = zero::transmute(self);
((**repr).fill / zero::size_of::<T>()) as uint
}
unsafe fn set_len(&mut self, newlen: uint) {
let repr: **mut Vec<u8> = zero::transmute(self);
(**repr).fill = zero::size_of::<T>() * newlen;
}
unsafe fn as_mut_buf<U>(&self, f: &fn(*mut T, uint) -> U) -> U {
let repr: **mut Vec<T> = zero::transmute(self);
f(&mut (**repr).data as *mut T, (**repr).fill / zero::size_of::<T>())
}
unsafe fn data(&self) -> *u8 {
let repr: **mut Vec<u8> = zero::transmute(self);
&(**repr).data as *u8
}
}
| push_fast | identifier_name |
vec.rs | #[no_std];
#[no_core];
use zero;
pub trait OwnedVector<T> {
unsafe fn push_fast(&mut self, t: T);
unsafe fn len(&self) -> uint;
unsafe fn set_len(&mut self, newlen: uint);
unsafe fn as_mut_buf<U>(&self, f: &fn(*mut T, uint) -> U) -> U;
unsafe fn data(&self) -> *u8;
}
pub struct Vec<T> {
fill: uint,
alloc: uint,
data: T
}
impl<T> OwnedVector<T> for ~[T] {
//FIXME: Does not check to see if we have space
// See: https://github.com/mozilla/rust/blob/master/src/libstd/vec.rs#L1317
unsafe fn push_fast(&mut self, t: T) |
unsafe fn len(&self) -> uint {
let repr: **Vec<u8> = zero::transmute(self);
((**repr).fill / zero::size_of::<T>()) as uint
}
unsafe fn set_len(&mut self, newlen: uint) {
let repr: **mut Vec<u8> = zero::transmute(self);
(**repr).fill = zero::size_of::<T>() * newlen;
}
unsafe fn as_mut_buf<U>(&self, f: &fn(*mut T, uint) -> U) -> U {
let repr: **mut Vec<T> = zero::transmute(self);
f(&mut (**repr).data as *mut T, (**repr).fill / zero::size_of::<T>())
}
unsafe fn data(&self) -> *u8 {
let repr: **mut Vec<u8> = zero::transmute(self);
&(**repr).data as *u8
}
}
| {
let repr: **mut Vec<u8> = zero::transmute(self);
let fill = (**repr).fill;
(**repr).fill += zero::size_of::<T>();
let p = &(**repr).data as *u8 as uint;
let mut i = 0;
while i < zero::size_of::<T>() {
*((p+fill+i) as *mut u8) = *((&t as *T as uint + i) as *mut u8);
i += 1;
}
} | identifier_body |
vec.rs | #[no_std];
#[no_core];
use zero;
pub trait OwnedVector<T> {
unsafe fn push_fast(&mut self, t: T);
unsafe fn len(&self) -> uint;
unsafe fn set_len(&mut self, newlen: uint);
unsafe fn as_mut_buf<U>(&self, f: &fn(*mut T, uint) -> U) -> U;
unsafe fn data(&self) -> *u8;
}
pub struct Vec<T> {
fill: uint,
alloc: uint,
data: T
}
impl<T> OwnedVector<T> for ~[T] {
//FIXME: Does not check to see if we have space
// See: https://github.com/mozilla/rust/blob/master/src/libstd/vec.rs#L1317
unsafe fn push_fast(&mut self, t: T) {
let repr: **mut Vec<u8> = zero::transmute(self);
let fill = (**repr).fill;
(**repr).fill += zero::size_of::<T>();
let p = &(**repr).data as *u8 as uint;
let mut i = 0;
while i < zero::size_of::<T>() {
*((p+fill+i) as *mut u8) = *((&t as *T as uint + i) as *mut u8);
i += 1;
}
}
unsafe fn len(&self) -> uint {
let repr: **Vec<u8> = zero::transmute(self);
((**repr).fill / zero::size_of::<T>()) as uint | }
unsafe fn set_len(&mut self, newlen: uint) {
let repr: **mut Vec<u8> = zero::transmute(self);
(**repr).fill = zero::size_of::<T>() * newlen;
}
unsafe fn as_mut_buf<U>(&self, f: &fn(*mut T, uint) -> U) -> U {
let repr: **mut Vec<T> = zero::transmute(self);
f(&mut (**repr).data as *mut T, (**repr).fill / zero::size_of::<T>())
}
unsafe fn data(&self) -> *u8 {
let repr: **mut Vec<u8> = zero::transmute(self);
&(**repr).data as *u8
}
} | random_line_split |
|
issue-17651.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that moves of unsized values within closures are caught
// and rejected.
fn main() | {
// FIXME (#22405): Replace `Box::new` with `box` here when/if possible.
(|| Box::new(*(&[0][..])))();
//~^ ERROR the trait `core::marker::Sized` is not implemented for the type `[_]`
} | identifier_body |
|
issue-17651.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that moves of unsized values within closures are caught
// and rejected.
fn | () {
// FIXME (#22405): Replace `Box::new` with `box` here when/if possible.
(|| Box::new(*(&[0][..])))();
//~^ ERROR the trait `core::marker::Sized` is not implemented for the type `[_]`
}
| main | identifier_name |
datastructures.rs | use std::iter;
use std::ops::{Index, IndexMut};
pub type Idx = [u32; 2];
#[derive(Debug)]
pub struct Matrix<T> {
shape: Idx,
data: Vec<Vec<T>>
}
impl<T: Copy> Matrix<T> {
pub fn fill(shape: Idx, value: T) -> Matrix<T> {
let data = (0..shape[0]).map(|_| {
iter::repeat(value).take(shape[1] as usize)
.collect::<Vec<_>>()
}).collect::<Vec<_>>();
Matrix {
shape: shape,
data: data
}
}
pub fn iter<'a>(&'a self) -> Box<Iterator<Item=(Idx, T)> + 'a> {
Box::new((0..self.height()).flat_map(move |y| {
(0..self.width()).map(move |x| ([x, y], self[[x, y]]))
}))
}
}
impl<T> Matrix<T> {
pub fn width(&self) -> u32 {
self.shape[0]
}
pub fn height(&self) -> u32 {
self.shape[1]
}
}
impl<T> Index<Idx> for Matrix<T> {
type Output = T;
fn index(&self, index: Idx) -> &T { | }
}
impl<T> IndexMut<Idx> for Matrix<T> {
fn index_mut(&mut self, index: Idx) -> &mut T {
let (x, y) = (index[0], index[1]);
assert!(x < self.width() && y < self.height());
&mut self.data[x as usize][y as usize]
}
} | let (x, y) = (index[0], index[1]);
assert!(x < self.width() && y < self.height());
&self.data[x as usize][y as usize] | random_line_split |
datastructures.rs | use std::iter;
use std::ops::{Index, IndexMut};
pub type Idx = [u32; 2];
#[derive(Debug)]
pub struct | <T> {
shape: Idx,
data: Vec<Vec<T>>
}
impl<T: Copy> Matrix<T> {
pub fn fill(shape: Idx, value: T) -> Matrix<T> {
let data = (0..shape[0]).map(|_| {
iter::repeat(value).take(shape[1] as usize)
.collect::<Vec<_>>()
}).collect::<Vec<_>>();
Matrix {
shape: shape,
data: data
}
}
pub fn iter<'a>(&'a self) -> Box<Iterator<Item=(Idx, T)> + 'a> {
Box::new((0..self.height()).flat_map(move |y| {
(0..self.width()).map(move |x| ([x, y], self[[x, y]]))
}))
}
}
impl<T> Matrix<T> {
pub fn width(&self) -> u32 {
self.shape[0]
}
pub fn height(&self) -> u32 {
self.shape[1]
}
}
impl<T> Index<Idx> for Matrix<T> {
type Output = T;
fn index(&self, index: Idx) -> &T {
let (x, y) = (index[0], index[1]);
assert!(x < self.width() && y < self.height());
&self.data[x as usize][y as usize]
}
}
impl<T> IndexMut<Idx> for Matrix<T> {
fn index_mut(&mut self, index: Idx) -> &mut T {
let (x, y) = (index[0], index[1]);
assert!(x < self.width() && y < self.height());
&mut self.data[x as usize][y as usize]
}
}
| Matrix | identifier_name |
datastructures.rs | use std::iter;
use std::ops::{Index, IndexMut};
pub type Idx = [u32; 2];
#[derive(Debug)]
pub struct Matrix<T> {
shape: Idx,
data: Vec<Vec<T>>
}
impl<T: Copy> Matrix<T> {
pub fn fill(shape: Idx, value: T) -> Matrix<T> |
pub fn iter<'a>(&'a self) -> Box<Iterator<Item=(Idx, T)> + 'a> {
Box::new((0..self.height()).flat_map(move |y| {
(0..self.width()).map(move |x| ([x, y], self[[x, y]]))
}))
}
}
impl<T> Matrix<T> {
pub fn width(&self) -> u32 {
self.shape[0]
}
pub fn height(&self) -> u32 {
self.shape[1]
}
}
impl<T> Index<Idx> for Matrix<T> {
type Output = T;
fn index(&self, index: Idx) -> &T {
let (x, y) = (index[0], index[1]);
assert!(x < self.width() && y < self.height());
&self.data[x as usize][y as usize]
}
}
impl<T> IndexMut<Idx> for Matrix<T> {
fn index_mut(&mut self, index: Idx) -> &mut T {
let (x, y) = (index[0], index[1]);
assert!(x < self.width() && y < self.height());
&mut self.data[x as usize][y as usize]
}
}
| {
let data = (0..shape[0]).map(|_| {
iter::repeat(value).take(shape[1] as usize)
.collect::<Vec<_>>()
}).collect::<Vec<_>>();
Matrix {
shape: shape,
data: data
}
} | identifier_body |
partial_cmp_natural.rs | use malachite_base::num::arithmetic::traits::Sign;
use malachite_base::num::basic::traits::One;
use malachite_base::num::logic::traits::SignificantBits;
use malachite_nz::natural::Natural;
use std::cmp::Ordering;
use Rational;
impl PartialOrd<Natural> for Rational {
/// Compares a `Rational` to a `Natural`.
///
/// # Worst-case complexity
/// TODO
///
/// # Examples
/// ```
/// extern crate malachite_nz;
///
/// use malachite_nz::natural::Natural;
/// use malachite_q::Rational;
/// use std::str::FromStr;
///
/// assert!(Rational::from_str("22/7").unwrap() > Natural::from(3u32));
/// assert!(Rational::from_str("22/7").unwrap() < Natural::from(4u32));
/// ```
fn partial_cmp(&self, other: &Natural) -> Option<Ordering> {
// First check signs
let self_sign = self.sign();
let other_sign = other.sign();
let sign_cmp = self_sign.cmp(&other_sign);
if sign_cmp!= Ordering::Equal || self_sign == Ordering::Equal {
return Some(sign_cmp);
}
// Then check if one is < 1 and the other is > 1
let self_cmp_one = self.numerator.cmp(&self.denominator);
let other_cmp_one = other.cmp(&Natural::ONE);
let one_cmp = self_cmp_one.cmp(&other_cmp_one);
if one_cmp!= Ordering::Equal {
return Some(one_cmp);
}
// Then compare numerators and denominators
let n_cmp = self.numerator.cmp(other);
let d_cmp = self.denominator.cmp(&Natural::ONE);
if n_cmp == Ordering::Equal && d_cmp == Ordering::Equal {
return Some(Ordering::Equal);
} else {
let nd_cmp = n_cmp.cmp(&d_cmp);
if nd_cmp!= Ordering::Equal {
return Some(nd_cmp);
}
}
let first_prod_bits = self.numerator.significant_bits();
let second_prod_bits = self.denominator.significant_bits() + other.significant_bits();
if first_prod_bits < second_prod_bits - 1 {
return Some(Ordering::Less);
} else if first_prod_bits > second_prod_bits {
return Some(Ordering::Greater);
}
// Finally, cross-multiply.
Some(self.numerator.cmp(&(&self.denominator * other)))
}
}
impl PartialOrd<Rational> for Natural {
/// Compares a `Natural` to a `Rational`.
///
/// # Worst-case complexity
/// TODO
///
/// # Examples
/// ```
/// extern crate malachite_nz;
///
/// use malachite_nz::natural::Natural;
/// use malachite_q::Rational;
/// use std::str::FromStr;
///
/// assert!(Natural::from(3u32) < Rational::from_str("22/7").unwrap());
/// assert!(Natural::from(4u32) > Rational::from_str("22/7").unwrap());
/// ```
#[inline]
fn | (&self, other: &Rational) -> Option<Ordering> {
other.partial_cmp(self).map(Ordering::reverse)
}
}
| partial_cmp | identifier_name |
partial_cmp_natural.rs | use malachite_base::num::arithmetic::traits::Sign;
use malachite_base::num::basic::traits::One;
use malachite_base::num::logic::traits::SignificantBits;
use malachite_nz::natural::Natural;
use std::cmp::Ordering;
use Rational;
impl PartialOrd<Natural> for Rational {
/// Compares a `Rational` to a `Natural`.
///
/// # Worst-case complexity
/// TODO
///
/// # Examples
/// ```
/// extern crate malachite_nz;
///
/// use malachite_nz::natural::Natural;
/// use malachite_q::Rational;
/// use std::str::FromStr;
///
/// assert!(Rational::from_str("22/7").unwrap() > Natural::from(3u32));
/// assert!(Rational::from_str("22/7").unwrap() < Natural::from(4u32));
/// ```
fn partial_cmp(&self, other: &Natural) -> Option<Ordering> {
// First check signs
let self_sign = self.sign(); | let sign_cmp = self_sign.cmp(&other_sign);
if sign_cmp!= Ordering::Equal || self_sign == Ordering::Equal {
return Some(sign_cmp);
}
// Then check if one is < 1 and the other is > 1
let self_cmp_one = self.numerator.cmp(&self.denominator);
let other_cmp_one = other.cmp(&Natural::ONE);
let one_cmp = self_cmp_one.cmp(&other_cmp_one);
if one_cmp!= Ordering::Equal {
return Some(one_cmp);
}
// Then compare numerators and denominators
let n_cmp = self.numerator.cmp(other);
let d_cmp = self.denominator.cmp(&Natural::ONE);
if n_cmp == Ordering::Equal && d_cmp == Ordering::Equal {
return Some(Ordering::Equal);
} else {
let nd_cmp = n_cmp.cmp(&d_cmp);
if nd_cmp!= Ordering::Equal {
return Some(nd_cmp);
}
}
let first_prod_bits = self.numerator.significant_bits();
let second_prod_bits = self.denominator.significant_bits() + other.significant_bits();
if first_prod_bits < second_prod_bits - 1 {
return Some(Ordering::Less);
} else if first_prod_bits > second_prod_bits {
return Some(Ordering::Greater);
}
// Finally, cross-multiply.
Some(self.numerator.cmp(&(&self.denominator * other)))
}
}
impl PartialOrd<Rational> for Natural {
/// Compares a `Natural` to a `Rational`.
///
/// # Worst-case complexity
/// TODO
///
/// # Examples
/// ```
/// extern crate malachite_nz;
///
/// use malachite_nz::natural::Natural;
/// use malachite_q::Rational;
/// use std::str::FromStr;
///
/// assert!(Natural::from(3u32) < Rational::from_str("22/7").unwrap());
/// assert!(Natural::from(4u32) > Rational::from_str("22/7").unwrap());
/// ```
#[inline]
fn partial_cmp(&self, other: &Rational) -> Option<Ordering> {
other.partial_cmp(self).map(Ordering::reverse)
}
} | let other_sign = other.sign(); | random_line_split |
partial_cmp_natural.rs | use malachite_base::num::arithmetic::traits::Sign;
use malachite_base::num::basic::traits::One;
use malachite_base::num::logic::traits::SignificantBits;
use malachite_nz::natural::Natural;
use std::cmp::Ordering;
use Rational;
impl PartialOrd<Natural> for Rational {
/// Compares a `Rational` to a `Natural`.
///
/// # Worst-case complexity
/// TODO
///
/// # Examples
/// ```
/// extern crate malachite_nz;
///
/// use malachite_nz::natural::Natural;
/// use malachite_q::Rational;
/// use std::str::FromStr;
///
/// assert!(Rational::from_str("22/7").unwrap() > Natural::from(3u32));
/// assert!(Rational::from_str("22/7").unwrap() < Natural::from(4u32));
/// ```
fn partial_cmp(&self, other: &Natural) -> Option<Ordering> {
// First check signs
let self_sign = self.sign();
let other_sign = other.sign();
let sign_cmp = self_sign.cmp(&other_sign);
if sign_cmp!= Ordering::Equal || self_sign == Ordering::Equal {
return Some(sign_cmp);
}
// Then check if one is < 1 and the other is > 1
let self_cmp_one = self.numerator.cmp(&self.denominator);
let other_cmp_one = other.cmp(&Natural::ONE);
let one_cmp = self_cmp_one.cmp(&other_cmp_one);
if one_cmp!= Ordering::Equal {
return Some(one_cmp);
}
// Then compare numerators and denominators
let n_cmp = self.numerator.cmp(other);
let d_cmp = self.denominator.cmp(&Natural::ONE);
if n_cmp == Ordering::Equal && d_cmp == Ordering::Equal {
return Some(Ordering::Equal);
} else |
let first_prod_bits = self.numerator.significant_bits();
let second_prod_bits = self.denominator.significant_bits() + other.significant_bits();
if first_prod_bits < second_prod_bits - 1 {
return Some(Ordering::Less);
} else if first_prod_bits > second_prod_bits {
return Some(Ordering::Greater);
}
// Finally, cross-multiply.
Some(self.numerator.cmp(&(&self.denominator * other)))
}
}
impl PartialOrd<Rational> for Natural {
/// Compares a `Natural` to a `Rational`.
///
/// # Worst-case complexity
/// TODO
///
/// # Examples
/// ```
/// extern crate malachite_nz;
///
/// use malachite_nz::natural::Natural;
/// use malachite_q::Rational;
/// use std::str::FromStr;
///
/// assert!(Natural::from(3u32) < Rational::from_str("22/7").unwrap());
/// assert!(Natural::from(4u32) > Rational::from_str("22/7").unwrap());
/// ```
#[inline]
fn partial_cmp(&self, other: &Rational) -> Option<Ordering> {
other.partial_cmp(self).map(Ordering::reverse)
}
}
| {
let nd_cmp = n_cmp.cmp(&d_cmp);
if nd_cmp != Ordering::Equal {
return Some(nd_cmp);
}
} | conditional_block |
partial_cmp_natural.rs | use malachite_base::num::arithmetic::traits::Sign;
use malachite_base::num::basic::traits::One;
use malachite_base::num::logic::traits::SignificantBits;
use malachite_nz::natural::Natural;
use std::cmp::Ordering;
use Rational;
impl PartialOrd<Natural> for Rational {
/// Compares a `Rational` to a `Natural`.
///
/// # Worst-case complexity
/// TODO
///
/// # Examples
/// ```
/// extern crate malachite_nz;
///
/// use malachite_nz::natural::Natural;
/// use malachite_q::Rational;
/// use std::str::FromStr;
///
/// assert!(Rational::from_str("22/7").unwrap() > Natural::from(3u32));
/// assert!(Rational::from_str("22/7").unwrap() < Natural::from(4u32));
/// ```
fn partial_cmp(&self, other: &Natural) -> Option<Ordering> | } else {
let nd_cmp = n_cmp.cmp(&d_cmp);
if nd_cmp!= Ordering::Equal {
return Some(nd_cmp);
}
}
let first_prod_bits = self.numerator.significant_bits();
let second_prod_bits = self.denominator.significant_bits() + other.significant_bits();
if first_prod_bits < second_prod_bits - 1 {
return Some(Ordering::Less);
} else if first_prod_bits > second_prod_bits {
return Some(Ordering::Greater);
}
// Finally, cross-multiply.
Some(self.numerator.cmp(&(&self.denominator * other)))
}
}
impl PartialOrd<Rational> for Natural {
/// Compares a `Natural` to a `Rational`.
///
/// # Worst-case complexity
/// TODO
///
/// # Examples
/// ```
/// extern crate malachite_nz;
///
/// use malachite_nz::natural::Natural;
/// use malachite_q::Rational;
/// use std::str::FromStr;
///
/// assert!(Natural::from(3u32) < Rational::from_str("22/7").unwrap());
/// assert!(Natural::from(4u32) > Rational::from_str("22/7").unwrap());
/// ```
#[inline]
fn partial_cmp(&self, other: &Rational) -> Option<Ordering> {
other.partial_cmp(self).map(Ordering::reverse)
}
}
| {
// First check signs
let self_sign = self.sign();
let other_sign = other.sign();
let sign_cmp = self_sign.cmp(&other_sign);
if sign_cmp != Ordering::Equal || self_sign == Ordering::Equal {
return Some(sign_cmp);
}
// Then check if one is < 1 and the other is > 1
let self_cmp_one = self.numerator.cmp(&self.denominator);
let other_cmp_one = other.cmp(&Natural::ONE);
let one_cmp = self_cmp_one.cmp(&other_cmp_one);
if one_cmp != Ordering::Equal {
return Some(one_cmp);
}
// Then compare numerators and denominators
let n_cmp = self.numerator.cmp(other);
let d_cmp = self.denominator.cmp(&Natural::ONE);
if n_cmp == Ordering::Equal && d_cmp == Ordering::Equal {
return Some(Ordering::Equal); | identifier_body |
svh-a-change-trait-bound.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
| //! The `svh-a-*.rs` files are all deviations from the base file
//! svh-a-base.rs with some difference (usually in `fn foo`) that
//! should not affect the strict version hash (SVH) computation
//! (#14132).
#![crate_name = "a"]
macro_rules! three {
() => { 3 }
}
pub trait U {}
pub trait V {}
impl U for () {}
impl V for () {}
static A_CONSTANT : int = 2;
pub fn foo<T:V>(_: int) -> int {
3
}
pub fn an_unused_name() -> int {
4
} | random_line_split |
|
svh-a-change-trait-bound.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The `svh-a-*.rs` files are all deviations from the base file
//! svh-a-base.rs with some difference (usually in `fn foo`) that
//! should not affect the strict version hash (SVH) computation
//! (#14132).
#![crate_name = "a"]
macro_rules! three {
() => { 3 }
}
pub trait U {}
pub trait V {}
impl U for () {}
impl V for () {}
static A_CONSTANT : int = 2;
pub fn | <T:V>(_: int) -> int {
3
}
pub fn an_unused_name() -> int {
4
}
| foo | identifier_name |
svh-a-change-trait-bound.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The `svh-a-*.rs` files are all deviations from the base file
//! svh-a-base.rs with some difference (usually in `fn foo`) that
//! should not affect the strict version hash (SVH) computation
//! (#14132).
#![crate_name = "a"]
macro_rules! three {
() => { 3 }
}
pub trait U {}
pub trait V {}
impl U for () {}
impl V for () {}
static A_CONSTANT : int = 2;
pub fn foo<T:V>(_: int) -> int {
3
}
pub fn an_unused_name() -> int | {
4
} | identifier_body |
|
main.rs | #[macro_use]
extern crate rocket;
#[macro_use]
extern crate rocket_contrib;
mod airport_data;
mod api;
mod path;
use airport_data::our_airports::OurAirports; | use anyhow::{Context, Result};
use rocket::config::{Config, Environment};
use rocket_contrib::serve::StaticFiles;
#[rocket::main]
async fn main() -> Result<()> {
let config = {
let env = Environment::active().context("failed to get Rocket config")?;
Config::build(env)
.workers(1)
.finalize()
.context("failed to build Rocket config")?
};
let mut airports_source = OurAirports::init().context("failed to init OurAirports data")?;
if airports_source.is_up_to_date() {
println!("loading OurAirports data..");
} else {
println!("updating OurAirports data..");
airports_source.update().context("update failed")?;
};
let airports = airports_source
.load()
.context("failed to load OurAirports data")?;
println!("finished loading OurAirports data");
rocket::custom(config)
.manage(airports)
.mount("/", StaticFiles::from("frontend/public/"))
.mount("/api", routes![api::search_routes::search_routes])
.launch()
.await
.context("failed to initialize Rocket")
} | use airport_data::AirportData; | random_line_split |
main.rs | #[macro_use]
extern crate rocket;
#[macro_use]
extern crate rocket_contrib;
mod airport_data;
mod api;
mod path;
use airport_data::our_airports::OurAirports;
use airport_data::AirportData;
use anyhow::{Context, Result};
use rocket::config::{Config, Environment};
use rocket_contrib::serve::StaticFiles;
#[rocket::main]
async fn | () -> Result<()> {
let config = {
let env = Environment::active().context("failed to get Rocket config")?;
Config::build(env)
.workers(1)
.finalize()
.context("failed to build Rocket config")?
};
let mut airports_source = OurAirports::init().context("failed to init OurAirports data")?;
if airports_source.is_up_to_date() {
println!("loading OurAirports data..");
} else {
println!("updating OurAirports data..");
airports_source.update().context("update failed")?;
};
let airports = airports_source
.load()
.context("failed to load OurAirports data")?;
println!("finished loading OurAirports data");
rocket::custom(config)
.manage(airports)
.mount("/", StaticFiles::from("frontend/public/"))
.mount("/api", routes![api::search_routes::search_routes])
.launch()
.await
.context("failed to initialize Rocket")
}
| main | identifier_name |
main.rs | #[macro_use]
extern crate rocket;
#[macro_use]
extern crate rocket_contrib;
mod airport_data;
mod api;
mod path;
use airport_data::our_airports::OurAirports;
use airport_data::AirportData;
use anyhow::{Context, Result};
use rocket::config::{Config, Environment};
use rocket_contrib::serve::StaticFiles;
#[rocket::main]
async fn main() -> Result<()> | .load()
.context("failed to load OurAirports data")?;
println!("finished loading OurAirports data");
rocket::custom(config)
.manage(airports)
.mount("/", StaticFiles::from("frontend/public/"))
.mount("/api", routes![api::search_routes::search_routes])
.launch()
.await
.context("failed to initialize Rocket")
}
| {
let config = {
let env = Environment::active().context("failed to get Rocket config")?;
Config::build(env)
.workers(1)
.finalize()
.context("failed to build Rocket config")?
};
let mut airports_source = OurAirports::init().context("failed to init OurAirports data")?;
if airports_source.is_up_to_date() {
println!("loading OurAirports data..");
} else {
println!("updating OurAirports data..");
airports_source.update().context("update failed")?;
};
let airports = airports_source | identifier_body |
main.rs | #[macro_use]
extern crate rocket;
#[macro_use]
extern crate rocket_contrib;
mod airport_data;
mod api;
mod path;
use airport_data::our_airports::OurAirports;
use airport_data::AirportData;
use anyhow::{Context, Result};
use rocket::config::{Config, Environment};
use rocket_contrib::serve::StaticFiles;
#[rocket::main]
async fn main() -> Result<()> {
let config = {
let env = Environment::active().context("failed to get Rocket config")?;
Config::build(env)
.workers(1)
.finalize()
.context("failed to build Rocket config")?
};
let mut airports_source = OurAirports::init().context("failed to init OurAirports data")?;
if airports_source.is_up_to_date() {
println!("loading OurAirports data..");
} else | ;
let airports = airports_source
.load()
.context("failed to load OurAirports data")?;
println!("finished loading OurAirports data");
rocket::custom(config)
.manage(airports)
.mount("/", StaticFiles::from("frontend/public/"))
.mount("/api", routes![api::search_routes::search_routes])
.launch()
.await
.context("failed to initialize Rocket")
}
| {
println!("updating OurAirports data..");
airports_source.update().context("update failed")?;
} | conditional_block |
config.rs | // Copyright (c) 2016 Nikita Pekin and the smexybot contributors
// See the README.md file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use error::Result;
use serde_json;
use std::collections::HashSet;
use std::fs::File;
use std::io::{ErrorKind, Read};
#[cfg(feature = "nightly")]
include!("config.in.rs");
#[cfg(feature = "with-syntex")]
include!(concat!(env!("OUT_DIR"), "/config.rs"));
impl Config {
pub fn | (name: Option<&str>) -> Self {
if let Some(name) = name {
match Config::load_from_file(name) {
Ok(config) => {
return config;
},
Err(err) => warn!("Failed for load config from \"{}\": {}", name, err),
}
}
info!("Using default config");
Default::default()
}
pub fn load_from_file(name: &str) -> Result<Self> {
let mut file = match File::open(name) {
Ok(file) => file,
// If no file is present, assume this is a fresh config.
Err(ref err) if err.kind() == ErrorKind::NotFound => return Ok(Default::default()),
Err(_) => panic!("Failed to open file: {}", name),
};
let mut config = String::new();
file.read_to_string(&mut config)
.expect(&format!("Failed to read from file: {}", name));
let config = serde_json::from_str(&config).expect("Failed to deserialize Config");
info!("Loaded config from: \"{}\"", name);
Ok(config)
}
}
impl Default for Config {
fn default() -> Config {
Config {
bot_name: "smexybot".to_owned(),
command_prefix: ";".to_owned(),
owners: HashSet::new(),
source_url: "https://github.com/indiv0/smexybot".to_owned(),
}
}
}
| new | identifier_name |
config.rs | // Copyright (c) 2016 Nikita Pekin and the smexybot contributors
// See the README.md file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use error::Result;
use serde_json;
use std::collections::HashSet;
use std::fs::File;
use std::io::{ErrorKind, Read};
#[cfg(feature = "nightly")]
include!("config.in.rs");
#[cfg(feature = "with-syntex")]
include!(concat!(env!("OUT_DIR"), "/config.rs"));
impl Config {
pub fn new(name: Option<&str>) -> Self {
if let Some(name) = name {
match Config::load_from_file(name) {
Ok(config) => {
return config;
},
Err(err) => warn!("Failed for load config from \"{}\": {}", name, err),
}
}
info!("Using default config");
Default::default()
}
pub fn load_from_file(name: &str) -> Result<Self> {
let mut file = match File::open(name) {
Ok(file) => file,
// If no file is present, assume this is a fresh config.
Err(ref err) if err.kind() == ErrorKind::NotFound => return Ok(Default::default()),
Err(_) => panic!("Failed to open file: {}", name),
};
let mut config = String::new();
file.read_to_string(&mut config)
.expect(&format!("Failed to read from file: {}", name));
let config = serde_json::from_str(&config).expect("Failed to deserialize Config");
info!("Loaded config from: \"{}\"", name);
Ok(config)
}
}
impl Default for Config {
fn default() -> Config |
}
| {
Config {
bot_name: "smexybot".to_owned(),
command_prefix: ";".to_owned(),
owners: HashSet::new(),
source_url: "https://github.com/indiv0/smexybot".to_owned(),
}
} | identifier_body |
config.rs | // Copyright (c) 2016 Nikita Pekin and the smexybot contributors
// See the README.md file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use error::Result;
use serde_json;
use std::collections::HashSet;
use std::fs::File;
use std::io::{ErrorKind, Read};
#[cfg(feature = "nightly")]
include!("config.in.rs");
#[cfg(feature = "with-syntex")]
include!(concat!(env!("OUT_DIR"), "/config.rs"));
impl Config {
pub fn new(name: Option<&str>) -> Self {
if let Some(name) = name {
match Config::load_from_file(name) {
Ok(config) => {
return config;
},
Err(err) => warn!("Failed for load config from \"{}\": {}", name, err),
}
}
info!("Using default config");
Default::default()
}
pub fn load_from_file(name: &str) -> Result<Self> {
let mut file = match File::open(name) {
Ok(file) => file,
// If no file is present, assume this is a fresh config.
Err(ref err) if err.kind() == ErrorKind::NotFound => return Ok(Default::default()),
Err(_) => panic!("Failed to open file: {}", name),
};
let mut config = String::new();
file.read_to_string(&mut config)
.expect(&format!("Failed to read from file: {}", name));
let config = serde_json::from_str(&config).expect("Failed to deserialize Config");
info!("Loaded config from: \"{}\"", name);
Ok(config)
}
} | command_prefix: ";".to_owned(),
owners: HashSet::new(),
source_url: "https://github.com/indiv0/smexybot".to_owned(),
}
}
} |
impl Default for Config {
fn default() -> Config {
Config {
bot_name: "smexybot".to_owned(), | random_line_split |
coerce-unify-return.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Check that coercions unify the expected return type of a polymorphic
// function call, instead of leaving the type variables as they were.
// pretty-expanded FIXME #23616
struct | ;
impl Foo {
fn foo<T>(self, x: T) -> Option<T> { Some(x) }
}
pub fn main() {
let _: Option<fn()> = Some(main);
let _: Option<fn()> = Foo.foo(main);
// The same two cases, with implicit type variables made explicit.
let _: Option<fn()> = Some::<_>(main);
let _: Option<fn()> = Foo.foo::<_>(main);
}
| Foo | identifier_name |
coerce-unify-return.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Check that coercions unify the expected return type of a polymorphic
// function call, instead of leaving the type variables as they were.
// pretty-expanded FIXME #23616
struct Foo;
impl Foo {
fn foo<T>(self, x: T) -> Option<T> { Some(x) }
}
pub fn main() { | let _: Option<fn()> = Foo.foo(main);
// The same two cases, with implicit type variables made explicit.
let _: Option<fn()> = Some::<_>(main);
let _: Option<fn()> = Foo.foo::<_>(main);
} | let _: Option<fn()> = Some(main); | random_line_split |
coerce-unify-return.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Check that coercions unify the expected return type of a polymorphic
// function call, instead of leaving the type variables as they were.
// pretty-expanded FIXME #23616
struct Foo;
impl Foo {
fn foo<T>(self, x: T) -> Option<T> |
}
pub fn main() {
let _: Option<fn()> = Some(main);
let _: Option<fn()> = Foo.foo(main);
// The same two cases, with implicit type variables made explicit.
let _: Option<fn()> = Some::<_>(main);
let _: Option<fn()> = Foo.foo::<_>(main);
}
| { Some(x) } | identifier_body |
parser.rs | extern crate dirs;
use regex::Regex;
use std::fs::File;
use std::io::prelude::*;
use std::path::Path;
use {Creds, Result};
pub fn get_credentials(conf: String) -> Result<Creds> {
let mut path = dirs::home_dir().ok_or("Can't get home dir")?;
// Build path to config file
path.push(conf);
let content = read_config_file(path.as_path())?;
let user = extract_info(r"set imap_user=(\w*)", &content)?;
let pass = extract_info(r"set imap_pass=(\w*)", &content)?;
let host = extract_info(r"set folder=imaps?://(.+):\d+", &content)?;
let port = extract_info(r"set folder=imaps?://.+:(\d+)", &content)?;
let port = port.parse()?;
Ok(Creds {
user: user,
pass: pass,
host: host,
port: port,
})
}
pub fn | (pattern: &str, text: &str) -> Result<String> {
let re = Regex::new(pattern)?;
let cap = re.captures(text).ok_or("Couldn't match")?;
let xtr = cap.get(1).ok_or("No captures")?;
Ok(xtr.as_str().to_string())
}
fn read_config_file(path: &Path) -> Result<String> {
let mut content = String::new();
let mut file = File::open(&path)?;
file.read_to_string(&mut content)?;
Ok(content)
}
pub fn get_db_path() -> Result<String> {
let mut path = dirs::home_dir().ok_or("Can't get home dir")?;
path.push(::DB);
let path_str = path.to_str()
.ok_or("Can't convert path into string")?;
Ok(path_str.to_string())
}
| extract_info | identifier_name |
parser.rs | extern crate dirs;
use regex::Regex;
use std::fs::File;
use std::io::prelude::*;
use std::path::Path;
use {Creds, Result};
pub fn get_credentials(conf: String) -> Result<Creds> {
let mut path = dirs::home_dir().ok_or("Can't get home dir")?;
// Build path to config file
path.push(conf);
let content = read_config_file(path.as_path())?;
let user = extract_info(r"set imap_user=(\w*)", &content)?;
let pass = extract_info(r"set imap_pass=(\w*)", &content)?;
let host = extract_info(r"set folder=imaps?://(.+):\d+", &content)?;
let port = extract_info(r"set folder=imaps?://.+:(\d+)", &content)?;
let port = port.parse()?;
Ok(Creds {
user: user,
pass: pass,
host: host,
port: port,
})
}
pub fn extract_info(pattern: &str, text: &str) -> Result<String> {
let re = Regex::new(pattern)?;
let cap = re.captures(text).ok_or("Couldn't match")?;
let xtr = cap.get(1).ok_or("No captures")?;
Ok(xtr.as_str().to_string())
}
fn read_config_file(path: &Path) -> Result<String> {
let mut content = String::new();
let mut file = File::open(&path)?;
file.read_to_string(&mut content)?;
Ok(content)
}
pub fn get_db_path() -> Result<String> {
let mut path = dirs::home_dir().ok_or("Can't get home dir")?; | path.push(::DB);
let path_str = path.to_str()
.ok_or("Can't convert path into string")?;
Ok(path_str.to_string())
} | random_line_split |
|
parser.rs | extern crate dirs;
use regex::Regex;
use std::fs::File;
use std::io::prelude::*;
use std::path::Path;
use {Creds, Result};
pub fn get_credentials(conf: String) -> Result<Creds> {
let mut path = dirs::home_dir().ok_or("Can't get home dir")?;
// Build path to config file
path.push(conf);
let content = read_config_file(path.as_path())?;
let user = extract_info(r"set imap_user=(\w*)", &content)?;
let pass = extract_info(r"set imap_pass=(\w*)", &content)?;
let host = extract_info(r"set folder=imaps?://(.+):\d+", &content)?;
let port = extract_info(r"set folder=imaps?://.+:(\d+)", &content)?;
let port = port.parse()?;
Ok(Creds {
user: user,
pass: pass,
host: host,
port: port,
})
}
pub fn extract_info(pattern: &str, text: &str) -> Result<String> |
fn read_config_file(path: &Path) -> Result<String> {
let mut content = String::new();
let mut file = File::open(&path)?;
file.read_to_string(&mut content)?;
Ok(content)
}
pub fn get_db_path() -> Result<String> {
let mut path = dirs::home_dir().ok_or("Can't get home dir")?;
path.push(::DB);
let path_str = path.to_str()
.ok_or("Can't convert path into string")?;
Ok(path_str.to_string())
}
| {
let re = Regex::new(pattern)?;
let cap = re.captures(text).ok_or("Couldn't match")?;
let xtr = cap.get(1).ok_or("No captures")?;
Ok(xtr.as_str().to_string())
} | identifier_body |
ctypes.rs | // Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use syn::Ident;
use crate::conversion::api::ApiName;
use crate::types::Namespace;
use crate::{conversion::api::Api, known_types::known_types, types::QualifiedName};
use super::fun::FnPhase;
/// Spot any variable-length C types (e.g. unsigned long)
/// used in the [Api]s and append those as extra APIs.
pub(crate) fn append_ctype_information(apis: &mut Vec<Api<FnPhase>>) {
let ctypes: HashMap<Ident, QualifiedName> = apis
.iter()
.flat_map(|api| api.deps())
.filter(|ty| known_types().is_ctype(ty))
.map(|ty| (ty.get_final_ident(), ty))
.collect();
for (id, typename) in ctypes {
apis.push(Api::CType {
name: ApiName::new(&Namespace::new(), id),
typename,
});
} | } | random_line_split |
|
ctypes.rs | // Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use syn::Ident;
use crate::conversion::api::ApiName;
use crate::types::Namespace;
use crate::{conversion::api::Api, known_types::known_types, types::QualifiedName};
use super::fun::FnPhase;
/// Spot any variable-length C types (e.g. unsigned long)
/// used in the [Api]s and append those as extra APIs.
pub(crate) fn | (apis: &mut Vec<Api<FnPhase>>) {
let ctypes: HashMap<Ident, QualifiedName> = apis
.iter()
.flat_map(|api| api.deps())
.filter(|ty| known_types().is_ctype(ty))
.map(|ty| (ty.get_final_ident(), ty))
.collect();
for (id, typename) in ctypes {
apis.push(Api::CType {
name: ApiName::new(&Namespace::new(), id),
typename,
});
}
}
| append_ctype_information | identifier_name |
ctypes.rs | // Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use syn::Ident;
use crate::conversion::api::ApiName;
use crate::types::Namespace;
use crate::{conversion::api::Api, known_types::known_types, types::QualifiedName};
use super::fun::FnPhase;
/// Spot any variable-length C types (e.g. unsigned long)
/// used in the [Api]s and append those as extra APIs.
pub(crate) fn append_ctype_information(apis: &mut Vec<Api<FnPhase>>) | {
let ctypes: HashMap<Ident, QualifiedName> = apis
.iter()
.flat_map(|api| api.deps())
.filter(|ty| known_types().is_ctype(ty))
.map(|ty| (ty.get_final_ident(), ty))
.collect();
for (id, typename) in ctypes {
apis.push(Api::CType {
name: ApiName::new(&Namespace::new(), id),
typename,
});
}
} | identifier_body |
|
lexer.rs | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::{errors::*, parser::syntax::make_loc, FileCommentMap, MatchedFileCommentMap};
use codespan::{ByteIndex, Span};
use move_ir_types::location::Loc;
use std::{collections::BTreeMap, fmt};
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub enum Tok {
EOF,
AddressValue,
NumValue,
U8Value,
U64Value,
U128Value,
ByteStringValue,
IdentifierValue,
Exclaim,
ExclaimEqual,
Percent,
Amp,
AmpAmp,
AmpMut,
LParen,
RParen,
LBracket,
RBracket,
Star,
Plus,
Comma,
Minus,
Period,
PeriodPeriod,
Slash,
Colon,
ColonColon,
Semicolon,
Less,
LessEqual,
LessLess,
Equal,
EqualEqual,
EqualEqualGreater,
Greater,
GreaterEqual,
GreaterGreater,
Caret,
Abort,
Acquires,
As,
Break,
Continue,
Copy,
Copyable,
Define,
Else,
False,
If,
Invariant,
Let,
Loop,
Module,
Move,
Native,
Public,
Resource,
Return,
Spec,
Struct,
True,
Use,
While,
LBrace,
Pipe,
PipePipe,
RBrace,
Fun,
Script,
Const,
Friend,
}
impl fmt::Display for Tok {
fn fmt<'f>(&self, formatter: &mut fmt::Formatter<'f>) -> Result<(), fmt::Error> {
use Tok::*;
let s = match *self {
EOF => "[end-of-file]",
AddressValue => "[Address]",
NumValue => "[Num]",
U8Value => "[U8]",
U64Value => "[U64]",
U128Value => "[U128]",
ByteStringValue => "[ByteString]",
IdentifierValue => "[Identifier]",
Exclaim => "!",
ExclaimEqual => "!=",
Percent => "%",
Amp => "&",
AmpAmp => "&&",
AmpMut => "&mut",
LParen => "(",
RParen => ")",
LBracket => "[",
RBracket => "]",
Star => "*",
Plus => "+",
Comma => ",",
Minus => "-",
Period => ".",
PeriodPeriod => "..",
Slash => "/",
Colon => ":",
ColonColon => "::",
Semicolon => ";",
Less => "<",
LessEqual => "<=",
LessLess => "<<",
Equal => "=",
EqualEqual => "==",
EqualEqualGreater => "==>",
Greater => ">",
GreaterEqual => ">=",
GreaterGreater => ">>",
Caret => "^",
Abort => "abort",
Acquires => "acquires",
As => "as",
Break => "break",
Continue => "continue",
Copy => "copy",
Copyable => "copyable",
Define => "define",
Else => "else",
False => "false",
If => "if",
Invariant => "invariant",
Let => "let",
Loop => "loop",
Module => "module",
Move => "move",
Native => "native",
Public => "public",
Resource => "resource",
Return => "return",
Spec => "spec",
Struct => "struct",
True => "true",
Use => "use",
While => "while",
LBrace => "{",
Pipe => "|",
PipePipe => "||",
RBrace => "}",
Fun => "fun",
Script => "script",
Const => "const",
Friend => "friend",
};
fmt::Display::fmt(s, formatter)
}
}
pub struct Lexer<'input> {
text: &'input str,
file: &'static str,
doc_comments: FileCommentMap,
matched_doc_comments: MatchedFileCommentMap,
prev_end: usize,
cur_start: usize,
cur_end: usize,
token: Tok,
}
impl<'input> Lexer<'input> {
pub fn new(
text: &'input str,
file: &'static str,
doc_comments: BTreeMap<Span, String>,
) -> Lexer<'input> {
Lexer {
text,
file,
doc_comments,
matched_doc_comments: BTreeMap::new(),
prev_end: 0,
cur_start: 0,
cur_end: 0,
token: Tok::EOF,
}
}
pub fn peek(&self) -> Tok {
self.token
}
pub fn content(&self) -> &str {
&self.text[self.cur_start..self.cur_end]
}
pub fn file_name(&self) -> &'static str {
self.file
}
pub fn start_loc(&self) -> usize {
self.cur_start
}
pub fn previous_end_loc(&self) -> usize {
self.prev_end
}
// Look ahead to the next token after the current one and return it without advancing
// the state of the lexer.
pub fn lookahead(&self) -> Result<Tok, Error> {
let text = self.text[self.cur_end..].trim_start();
let offset = self.text.len() - text.len();
let (tok, _) = find_token(self.file, text, offset)?;
Ok(tok)
}
// Look ahead to the next two tokens after the current one and return them without advancing
// the state of the lexer.
pub fn lookahead2(&self) -> Result<(Tok, Tok), Error> {
let text = self.text[self.cur_end..].trim_start();
let offset = self.text.len() - text.len();
let (first, length) = find_token(self.file, text, offset)?;
let text2 = self.text[offset + length..].trim_start();
let offset2 = self.text.len() - text2.len();
let (second, _) = find_token(self.file, text2, offset2)?;
Ok((first, second))
}
// Matches the doc comments after the last token (or the beginning of the file) to the position
// of the current token. This moves the comments out of `doc_comments` and
// into `matched_doc_comments`. At the end of parsing, if `doc_comments` is not empty, errors
// for stale doc comments will be produced.
//
// Calling this function during parsing effectively marks a valid point for documentation
// comments. The documentation comments are not stored in the AST, but can be retrieved by
// using the start position of an item as an index into `matched_doc_comments`.
pub fn match_doc_comments(&mut self) {
let start = self.previous_end_loc() as u32;
let end = self.cur_start as u32;
let mut matched = vec![];
let merged = self
.doc_comments
.range(Span::new(start, start)..Span::new(end, end))
.map(|(span, s)| {
matched.push(*span);
s.clone()
})
.collect::<Vec<String>>()
.join("\n");
for span in matched {
self.doc_comments.remove(&span);
}
self.matched_doc_comments.insert(ByteIndex(end), merged);
}
// At the end of parsing, checks whether there are any unmatched documentation comments,
// producing errors if so. Otherwise returns a map from file position to associated
// documentation.
pub fn check_and_get_doc_comments(&mut self) -> Result<MatchedFileCommentMap, Errors> |
pub fn advance(&mut self) -> Result<(), Error> {
self.prev_end = self.cur_end;
let text = self.text[self.cur_end..].trim_start();
self.cur_start = self.text.len() - text.len();
let (token, len) = find_token(self.file, text, self.cur_start)?;
self.cur_end = self.cur_start + len;
self.token = token;
Ok(())
}
// Replace the current token. The lexer will always match the longest token,
// but sometimes the parser will prefer to replace it with a shorter one,
// e.g., ">" instead of ">>".
pub fn replace_token(&mut self, token: Tok, len: usize) {
self.token = token;
self.cur_end = self.cur_start + len
}
}
// Find the next token and its length without changing the state of the lexer.
fn find_token(file: &'static str, text: &str, start_offset: usize) -> Result<(Tok, usize), Error> {
let c: char = match text.chars().next() {
Some(next_char) => next_char,
None => {
return Ok((Tok::EOF, 0));
}
};
let (tok, len) = match c {
'0'..='9' => {
if text.starts_with("0x") && text.len() > 2 {
let hex_len = get_hex_digits_len(&text[2..]);
if hex_len == 0 {
// Fall back to treating this as a "0" token.
(Tok::NumValue, 1)
} else {
(Tok::AddressValue, 2 + hex_len)
}
} else {
get_decimal_number(&text)
}
}
'A'..='Z' | 'a'..='z' | '_' => {
if text.starts_with("x\"") || text.starts_with("b\"") {
let line = &text.lines().next().unwrap()[2..];
match get_string_len(line) {
Some(last_quote) => (Tok::ByteStringValue, 2 + last_quote + 1),
None => {
return Err(vec![(
make_loc(file, start_offset, start_offset + line.len() + 2),
"Missing closing quote (\") after byte string".to_string(),
)])
}
}
} else {
let len = get_name_len(&text);
(get_name_token(&text[..len]), len)
}
}
'&' => {
if text.starts_with("&mut ") {
(Tok::AmpMut, 5)
} else if text.starts_with("&&") {
(Tok::AmpAmp, 2)
} else {
(Tok::Amp, 1)
}
}
'|' => {
if text.starts_with("||") {
(Tok::PipePipe, 2)
} else {
(Tok::Pipe, 1)
}
}
'=' => {
if text.starts_with("==>") {
(Tok::EqualEqualGreater, 3)
} else if text.starts_with("==") {
(Tok::EqualEqual, 2)
} else {
(Tok::Equal, 1)
}
}
'!' => {
if text.starts_with("!=") {
(Tok::ExclaimEqual, 2)
} else {
(Tok::Exclaim, 1)
}
}
'<' => {
if text.starts_with("<=") {
(Tok::LessEqual, 2)
} else if text.starts_with("<<") {
(Tok::LessLess, 2)
} else {
(Tok::Less, 1)
}
}
'>' => {
if text.starts_with(">=") {
(Tok::GreaterEqual, 2)
} else if text.starts_with(">>") {
(Tok::GreaterGreater, 2)
} else {
(Tok::Greater, 1)
}
}
':' => {
if text.starts_with("::") {
(Tok::ColonColon, 2)
} else {
(Tok::Colon, 1)
}
}
'%' => (Tok::Percent, 1),
'(' => (Tok::LParen, 1),
')' => (Tok::RParen, 1),
'[' => (Tok::LBracket, 1),
']' => (Tok::RBracket, 1),
'*' => (Tok::Star, 1),
'+' => (Tok::Plus, 1),
',' => (Tok::Comma, 1),
'-' => (Tok::Minus, 1),
'.' => {
if text.starts_with("..") {
(Tok::PeriodPeriod, 2)
} else {
(Tok::Period, 1)
}
}
'/' => (Tok::Slash, 1),
';' => (Tok::Semicolon, 1),
'^' => (Tok::Caret, 1),
'{' => (Tok::LBrace, 1),
'}' => (Tok::RBrace, 1),
_ => {
let loc = make_loc(file, start_offset, start_offset);
return Err(vec![(loc, format!("Invalid character: '{}'", c))]);
}
};
Ok((tok, len))
}
// Return the length of the substring matching [a-zA-Z0-9_]. Note that
// this does not do any special check for whether the first character
// starts with a number, so the caller is responsible for any additional
// checks on the first character.
fn get_name_len(text: &str) -> usize {
text.chars()
.position(|c|!matches!(c, 'a'..='z' | 'A'..='Z' | '_' | '0'..='9'))
.unwrap_or_else(|| text.len())
}
fn get_decimal_number(text: &str) -> (Tok, usize) {
let len = text
.chars()
.position(|c|!matches!(c, '0'..='9'))
.unwrap_or_else(|| text.len());
let rest = &text[len..];
if rest.starts_with("u8") {
(Tok::U8Value, len + 2)
} else if rest.starts_with("u64") {
(Tok::U64Value, len + 3)
} else if rest.starts_with("u128") {
(Tok::U128Value, len + 4)
} else {
(Tok::NumValue, len)
}
}
// Return the length of the substring containing characters in [0-9a-fA-F].
fn get_hex_digits_len(text: &str) -> usize {
text.find(|c|!matches!(c, 'a'..='f' | 'A'..='F' | '0'..='9'))
.unwrap_or_else(|| text.len())
}
// Return the length of the quoted string, or None if there is no closing quote.
fn get_string_len(text: &str) -> Option<usize> {
let mut pos = 0;
let mut iter = text.chars();
while let Some(chr) = iter.next() {
if chr == '\\' {
// Skip over the escaped character (e.g., a quote or another backslash)
if iter.next().is_some() {
pos += 1;
}
} else if chr == '"' {
return Some(pos);
}
pos += 1;
}
None
}
fn get_name_token(name: &str) -> Tok {
match name {
"abort" => Tok::Abort,
"acquires" => Tok::Acquires,
"as" => Tok::As,
"break" => Tok::Break,
"const" => Tok::Const,
"continue" => Tok::Continue,
"copy" => Tok::Copy,
"copyable" => Tok::Copyable,
"define" => Tok::Define,
"else" => Tok::Else,
"false" => Tok::False,
"fun" => Tok::Fun,
"friend" => Tok::Friend,
"if" => Tok::If,
"invariant" => Tok::Invariant,
"let" => Tok::Let,
"loop" => Tok::Loop,
"module" => Tok::Module,
"move" => Tok::Move,
"native" => Tok::Native,
"public" => Tok::Public,
"resource" => Tok::Resource,
"return" => Tok::Return,
"script" => Tok::Script,
"spec" => Tok::Spec,
"struct" => Tok::Struct,
"true" => Tok::True,
"use" => Tok::Use,
"while" => Tok::While,
_ => Tok::IdentifierValue,
}
}
| {
let errors = self
.doc_comments
.iter()
.map(|(span, _)| {
vec![(
Loc::new(self.file, *span),
"documentation comment cannot be matched to a language item".to_string(),
)]
})
.collect::<Errors>();
if errors.is_empty() {
Ok(std::mem::take(&mut self.matched_doc_comments))
} else {
Err(errors)
}
} | identifier_body |
lexer.rs | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::{errors::*, parser::syntax::make_loc, FileCommentMap, MatchedFileCommentMap};
use codespan::{ByteIndex, Span};
use move_ir_types::location::Loc;
use std::{collections::BTreeMap, fmt};
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub enum Tok {
EOF,
AddressValue,
NumValue,
U8Value,
U64Value,
U128Value,
ByteStringValue,
IdentifierValue,
Exclaim,
ExclaimEqual,
Percent,
Amp,
AmpAmp,
AmpMut,
LParen,
RParen,
LBracket,
RBracket,
Star,
Plus,
Comma,
Minus,
Period,
PeriodPeriod,
Slash,
Colon,
ColonColon,
Semicolon,
Less,
LessEqual,
LessLess,
Equal,
EqualEqual,
EqualEqualGreater,
Greater,
GreaterEqual,
GreaterGreater,
Caret,
Abort,
Acquires,
As,
Break,
Continue,
Copy,
Copyable,
Define,
Else,
False,
If,
Invariant,
Let,
Loop,
Module,
Move,
Native,
Public,
Resource,
Return,
Spec,
Struct,
True,
Use,
While,
LBrace,
Pipe,
PipePipe,
RBrace,
Fun,
Script,
Const,
Friend,
}
impl fmt::Display for Tok {
fn fmt<'f>(&self, formatter: &mut fmt::Formatter<'f>) -> Result<(), fmt::Error> {
use Tok::*;
let s = match *self {
EOF => "[end-of-file]",
AddressValue => "[Address]",
NumValue => "[Num]",
U8Value => "[U8]",
U64Value => "[U64]",
U128Value => "[U128]",
ByteStringValue => "[ByteString]",
IdentifierValue => "[Identifier]",
Exclaim => "!",
ExclaimEqual => "!=",
Percent => "%",
Amp => "&",
AmpAmp => "&&",
AmpMut => "&mut",
LParen => "(",
RParen => ")",
LBracket => "[",
RBracket => "]",
Star => "*",
Plus => "+",
Comma => ",",
Minus => "-",
Period => ".",
PeriodPeriod => "..",
Slash => "/",
Colon => ":",
ColonColon => "::",
Semicolon => ";",
Less => "<",
LessEqual => "<=",
LessLess => "<<",
Equal => "=",
EqualEqual => "==",
EqualEqualGreater => "==>",
Greater => ">",
GreaterEqual => ">=",
GreaterGreater => ">>",
Caret => "^",
Abort => "abort",
Acquires => "acquires",
As => "as",
Break => "break",
Continue => "continue",
Copy => "copy",
Copyable => "copyable",
Define => "define",
Else => "else",
False => "false",
If => "if",
Invariant => "invariant",
Let => "let",
Loop => "loop",
Module => "module",
Move => "move",
Native => "native",
Public => "public",
Resource => "resource",
Return => "return", | True => "true",
Use => "use",
While => "while",
LBrace => "{",
Pipe => "|",
PipePipe => "||",
RBrace => "}",
Fun => "fun",
Script => "script",
Const => "const",
Friend => "friend",
};
fmt::Display::fmt(s, formatter)
}
}
pub struct Lexer<'input> {
text: &'input str,
file: &'static str,
doc_comments: FileCommentMap,
matched_doc_comments: MatchedFileCommentMap,
prev_end: usize,
cur_start: usize,
cur_end: usize,
token: Tok,
}
impl<'input> Lexer<'input> {
pub fn new(
text: &'input str,
file: &'static str,
doc_comments: BTreeMap<Span, String>,
) -> Lexer<'input> {
Lexer {
text,
file,
doc_comments,
matched_doc_comments: BTreeMap::new(),
prev_end: 0,
cur_start: 0,
cur_end: 0,
token: Tok::EOF,
}
}
pub fn peek(&self) -> Tok {
self.token
}
pub fn content(&self) -> &str {
&self.text[self.cur_start..self.cur_end]
}
pub fn file_name(&self) -> &'static str {
self.file
}
pub fn start_loc(&self) -> usize {
self.cur_start
}
pub fn previous_end_loc(&self) -> usize {
self.prev_end
}
// Look ahead to the next token after the current one and return it without advancing
// the state of the lexer.
pub fn lookahead(&self) -> Result<Tok, Error> {
let text = self.text[self.cur_end..].trim_start();
let offset = self.text.len() - text.len();
let (tok, _) = find_token(self.file, text, offset)?;
Ok(tok)
}
// Look ahead to the next two tokens after the current one and return them without advancing
// the state of the lexer.
pub fn lookahead2(&self) -> Result<(Tok, Tok), Error> {
let text = self.text[self.cur_end..].trim_start();
let offset = self.text.len() - text.len();
let (first, length) = find_token(self.file, text, offset)?;
let text2 = self.text[offset + length..].trim_start();
let offset2 = self.text.len() - text2.len();
let (second, _) = find_token(self.file, text2, offset2)?;
Ok((first, second))
}
// Matches the doc comments after the last token (or the beginning of the file) to the position
// of the current token. This moves the comments out of `doc_comments` and
// into `matched_doc_comments`. At the end of parsing, if `doc_comments` is not empty, errors
// for stale doc comments will be produced.
//
// Calling this function during parsing effectively marks a valid point for documentation
// comments. The documentation comments are not stored in the AST, but can be retrieved by
// using the start position of an item as an index into `matched_doc_comments`.
pub fn match_doc_comments(&mut self) {
let start = self.previous_end_loc() as u32;
let end = self.cur_start as u32;
let mut matched = vec![];
let merged = self
.doc_comments
.range(Span::new(start, start)..Span::new(end, end))
.map(|(span, s)| {
matched.push(*span);
s.clone()
})
.collect::<Vec<String>>()
.join("\n");
for span in matched {
self.doc_comments.remove(&span);
}
self.matched_doc_comments.insert(ByteIndex(end), merged);
}
// At the end of parsing, checks whether there are any unmatched documentation comments,
// producing errors if so. Otherwise returns a map from file position to associated
// documentation.
pub fn check_and_get_doc_comments(&mut self) -> Result<MatchedFileCommentMap, Errors> {
let errors = self
.doc_comments
.iter()
.map(|(span, _)| {
vec![(
Loc::new(self.file, *span),
"documentation comment cannot be matched to a language item".to_string(),
)]
})
.collect::<Errors>();
if errors.is_empty() {
Ok(std::mem::take(&mut self.matched_doc_comments))
} else {
Err(errors)
}
}
pub fn advance(&mut self) -> Result<(), Error> {
self.prev_end = self.cur_end;
let text = self.text[self.cur_end..].trim_start();
self.cur_start = self.text.len() - text.len();
let (token, len) = find_token(self.file, text, self.cur_start)?;
self.cur_end = self.cur_start + len;
self.token = token;
Ok(())
}
// Replace the current token. The lexer will always match the longest token,
// but sometimes the parser will prefer to replace it with a shorter one,
// e.g., ">" instead of ">>".
pub fn replace_token(&mut self, token: Tok, len: usize) {
self.token = token;
self.cur_end = self.cur_start + len
}
}
// Find the next token and its length without changing the state of the lexer.
fn find_token(file: &'static str, text: &str, start_offset: usize) -> Result<(Tok, usize), Error> {
let c: char = match text.chars().next() {
Some(next_char) => next_char,
None => {
return Ok((Tok::EOF, 0));
}
};
let (tok, len) = match c {
'0'..='9' => {
if text.starts_with("0x") && text.len() > 2 {
let hex_len = get_hex_digits_len(&text[2..]);
if hex_len == 0 {
// Fall back to treating this as a "0" token.
(Tok::NumValue, 1)
} else {
(Tok::AddressValue, 2 + hex_len)
}
} else {
get_decimal_number(&text)
}
}
'A'..='Z' | 'a'..='z' | '_' => {
if text.starts_with("x\"") || text.starts_with("b\"") {
let line = &text.lines().next().unwrap()[2..];
match get_string_len(line) {
Some(last_quote) => (Tok::ByteStringValue, 2 + last_quote + 1),
None => {
return Err(vec![(
make_loc(file, start_offset, start_offset + line.len() + 2),
"Missing closing quote (\") after byte string".to_string(),
)])
}
}
} else {
let len = get_name_len(&text);
(get_name_token(&text[..len]), len)
}
}
'&' => {
if text.starts_with("&mut ") {
(Tok::AmpMut, 5)
} else if text.starts_with("&&") {
(Tok::AmpAmp, 2)
} else {
(Tok::Amp, 1)
}
}
'|' => {
if text.starts_with("||") {
(Tok::PipePipe, 2)
} else {
(Tok::Pipe, 1)
}
}
'=' => {
if text.starts_with("==>") {
(Tok::EqualEqualGreater, 3)
} else if text.starts_with("==") {
(Tok::EqualEqual, 2)
} else {
(Tok::Equal, 1)
}
}
'!' => {
if text.starts_with("!=") {
(Tok::ExclaimEqual, 2)
} else {
(Tok::Exclaim, 1)
}
}
'<' => {
if text.starts_with("<=") {
(Tok::LessEqual, 2)
} else if text.starts_with("<<") {
(Tok::LessLess, 2)
} else {
(Tok::Less, 1)
}
}
'>' => {
if text.starts_with(">=") {
(Tok::GreaterEqual, 2)
} else if text.starts_with(">>") {
(Tok::GreaterGreater, 2)
} else {
(Tok::Greater, 1)
}
}
':' => {
if text.starts_with("::") {
(Tok::ColonColon, 2)
} else {
(Tok::Colon, 1)
}
}
'%' => (Tok::Percent, 1),
'(' => (Tok::LParen, 1),
')' => (Tok::RParen, 1),
'[' => (Tok::LBracket, 1),
']' => (Tok::RBracket, 1),
'*' => (Tok::Star, 1),
'+' => (Tok::Plus, 1),
',' => (Tok::Comma, 1),
'-' => (Tok::Minus, 1),
'.' => {
if text.starts_with("..") {
(Tok::PeriodPeriod, 2)
} else {
(Tok::Period, 1)
}
}
'/' => (Tok::Slash, 1),
';' => (Tok::Semicolon, 1),
'^' => (Tok::Caret, 1),
'{' => (Tok::LBrace, 1),
'}' => (Tok::RBrace, 1),
_ => {
let loc = make_loc(file, start_offset, start_offset);
return Err(vec![(loc, format!("Invalid character: '{}'", c))]);
}
};
Ok((tok, len))
}
// Return the length of the substring matching [a-zA-Z0-9_]. Note that
// this does not do any special check for whether the first character
// starts with a number, so the caller is responsible for any additional
// checks on the first character.
fn get_name_len(text: &str) -> usize {
text.chars()
.position(|c|!matches!(c, 'a'..='z' | 'A'..='Z' | '_' | '0'..='9'))
.unwrap_or_else(|| text.len())
}
fn get_decimal_number(text: &str) -> (Tok, usize) {
let len = text
.chars()
.position(|c|!matches!(c, '0'..='9'))
.unwrap_or_else(|| text.len());
let rest = &text[len..];
if rest.starts_with("u8") {
(Tok::U8Value, len + 2)
} else if rest.starts_with("u64") {
(Tok::U64Value, len + 3)
} else if rest.starts_with("u128") {
(Tok::U128Value, len + 4)
} else {
(Tok::NumValue, len)
}
}
// Return the length of the substring containing characters in [0-9a-fA-F].
fn get_hex_digits_len(text: &str) -> usize {
text.find(|c|!matches!(c, 'a'..='f' | 'A'..='F' | '0'..='9'))
.unwrap_or_else(|| text.len())
}
// Return the length of the quoted string, or None if there is no closing quote.
fn get_string_len(text: &str) -> Option<usize> {
let mut pos = 0;
let mut iter = text.chars();
while let Some(chr) = iter.next() {
if chr == '\\' {
// Skip over the escaped character (e.g., a quote or another backslash)
if iter.next().is_some() {
pos += 1;
}
} else if chr == '"' {
return Some(pos);
}
pos += 1;
}
None
}
fn get_name_token(name: &str) -> Tok {
match name {
"abort" => Tok::Abort,
"acquires" => Tok::Acquires,
"as" => Tok::As,
"break" => Tok::Break,
"const" => Tok::Const,
"continue" => Tok::Continue,
"copy" => Tok::Copy,
"copyable" => Tok::Copyable,
"define" => Tok::Define,
"else" => Tok::Else,
"false" => Tok::False,
"fun" => Tok::Fun,
"friend" => Tok::Friend,
"if" => Tok::If,
"invariant" => Tok::Invariant,
"let" => Tok::Let,
"loop" => Tok::Loop,
"module" => Tok::Module,
"move" => Tok::Move,
"native" => Tok::Native,
"public" => Tok::Public,
"resource" => Tok::Resource,
"return" => Tok::Return,
"script" => Tok::Script,
"spec" => Tok::Spec,
"struct" => Tok::Struct,
"true" => Tok::True,
"use" => Tok::Use,
"while" => Tok::While,
_ => Tok::IdentifierValue,
}
} | Spec => "spec",
Struct => "struct", | random_line_split |
lexer.rs | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::{errors::*, parser::syntax::make_loc, FileCommentMap, MatchedFileCommentMap};
use codespan::{ByteIndex, Span};
use move_ir_types::location::Loc;
use std::{collections::BTreeMap, fmt};
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub enum Tok {
EOF,
AddressValue,
NumValue,
U8Value,
U64Value,
U128Value,
ByteStringValue,
IdentifierValue,
Exclaim,
ExclaimEqual,
Percent,
Amp,
AmpAmp,
AmpMut,
LParen,
RParen,
LBracket,
RBracket,
Star,
Plus,
Comma,
Minus,
Period,
PeriodPeriod,
Slash,
Colon,
ColonColon,
Semicolon,
Less,
LessEqual,
LessLess,
Equal,
EqualEqual,
EqualEqualGreater,
Greater,
GreaterEqual,
GreaterGreater,
Caret,
Abort,
Acquires,
As,
Break,
Continue,
Copy,
Copyable,
Define,
Else,
False,
If,
Invariant,
Let,
Loop,
Module,
Move,
Native,
Public,
Resource,
Return,
Spec,
Struct,
True,
Use,
While,
LBrace,
Pipe,
PipePipe,
RBrace,
Fun,
Script,
Const,
Friend,
}
impl fmt::Display for Tok {
fn fmt<'f>(&self, formatter: &mut fmt::Formatter<'f>) -> Result<(), fmt::Error> {
use Tok::*;
let s = match *self {
EOF => "[end-of-file]",
AddressValue => "[Address]",
NumValue => "[Num]",
U8Value => "[U8]",
U64Value => "[U64]",
U128Value => "[U128]",
ByteStringValue => "[ByteString]",
IdentifierValue => "[Identifier]",
Exclaim => "!",
ExclaimEqual => "!=",
Percent => "%",
Amp => "&",
AmpAmp => "&&",
AmpMut => "&mut",
LParen => "(",
RParen => ")",
LBracket => "[",
RBracket => "]",
Star => "*",
Plus => "+",
Comma => ",",
Minus => "-",
Period => ".",
PeriodPeriod => "..",
Slash => "/",
Colon => ":",
ColonColon => "::",
Semicolon => ";",
Less => "<",
LessEqual => "<=",
LessLess => "<<",
Equal => "=",
EqualEqual => "==",
EqualEqualGreater => "==>",
Greater => ">",
GreaterEqual => ">=",
GreaterGreater => ">>",
Caret => "^",
Abort => "abort",
Acquires => "acquires",
As => "as",
Break => "break",
Continue => "continue",
Copy => "copy",
Copyable => "copyable",
Define => "define",
Else => "else",
False => "false",
If => "if",
Invariant => "invariant",
Let => "let",
Loop => "loop",
Module => "module",
Move => "move",
Native => "native",
Public => "public",
Resource => "resource",
Return => "return",
Spec => "spec",
Struct => "struct",
True => "true",
Use => "use",
While => "while",
LBrace => "{",
Pipe => "|",
PipePipe => "||",
RBrace => "}",
Fun => "fun",
Script => "script",
Const => "const",
Friend => "friend",
};
fmt::Display::fmt(s, formatter)
}
}
pub struct Lexer<'input> {
text: &'input str,
file: &'static str,
doc_comments: FileCommentMap,
matched_doc_comments: MatchedFileCommentMap,
prev_end: usize,
cur_start: usize,
cur_end: usize,
token: Tok,
}
impl<'input> Lexer<'input> {
pub fn new(
text: &'input str,
file: &'static str,
doc_comments: BTreeMap<Span, String>,
) -> Lexer<'input> {
Lexer {
text,
file,
doc_comments,
matched_doc_comments: BTreeMap::new(),
prev_end: 0,
cur_start: 0,
cur_end: 0,
token: Tok::EOF,
}
}
pub fn peek(&self) -> Tok {
self.token
}
pub fn content(&self) -> &str {
&self.text[self.cur_start..self.cur_end]
}
pub fn file_name(&self) -> &'static str {
self.file
}
pub fn start_loc(&self) -> usize {
self.cur_start
}
pub fn previous_end_loc(&self) -> usize {
self.prev_end
}
// Look ahead to the next token after the current one and return it without advancing
// the state of the lexer.
pub fn lookahead(&self) -> Result<Tok, Error> {
let text = self.text[self.cur_end..].trim_start();
let offset = self.text.len() - text.len();
let (tok, _) = find_token(self.file, text, offset)?;
Ok(tok)
}
// Look ahead to the next two tokens after the current one and return them without advancing
// the state of the lexer.
pub fn lookahead2(&self) -> Result<(Tok, Tok), Error> {
let text = self.text[self.cur_end..].trim_start();
let offset = self.text.len() - text.len();
let (first, length) = find_token(self.file, text, offset)?;
let text2 = self.text[offset + length..].trim_start();
let offset2 = self.text.len() - text2.len();
let (second, _) = find_token(self.file, text2, offset2)?;
Ok((first, second))
}
// Matches the doc comments after the last token (or the beginning of the file) to the position
// of the current token. This moves the comments out of `doc_comments` and
// into `matched_doc_comments`. At the end of parsing, if `doc_comments` is not empty, errors
// for stale doc comments will be produced.
//
// Calling this function during parsing effectively marks a valid point for documentation
// comments. The documentation comments are not stored in the AST, but can be retrieved by
// using the start position of an item as an index into `matched_doc_comments`.
pub fn | (&mut self) {
let start = self.previous_end_loc() as u32;
let end = self.cur_start as u32;
let mut matched = vec![];
let merged = self
.doc_comments
.range(Span::new(start, start)..Span::new(end, end))
.map(|(span, s)| {
matched.push(*span);
s.clone()
})
.collect::<Vec<String>>()
.join("\n");
for span in matched {
self.doc_comments.remove(&span);
}
self.matched_doc_comments.insert(ByteIndex(end), merged);
}
// At the end of parsing, checks whether there are any unmatched documentation comments,
// producing errors if so. Otherwise returns a map from file position to associated
// documentation.
pub fn check_and_get_doc_comments(&mut self) -> Result<MatchedFileCommentMap, Errors> {
let errors = self
.doc_comments
.iter()
.map(|(span, _)| {
vec![(
Loc::new(self.file, *span),
"documentation comment cannot be matched to a language item".to_string(),
)]
})
.collect::<Errors>();
if errors.is_empty() {
Ok(std::mem::take(&mut self.matched_doc_comments))
} else {
Err(errors)
}
}
pub fn advance(&mut self) -> Result<(), Error> {
self.prev_end = self.cur_end;
let text = self.text[self.cur_end..].trim_start();
self.cur_start = self.text.len() - text.len();
let (token, len) = find_token(self.file, text, self.cur_start)?;
self.cur_end = self.cur_start + len;
self.token = token;
Ok(())
}
// Replace the current token. The lexer will always match the longest token,
// but sometimes the parser will prefer to replace it with a shorter one,
// e.g., ">" instead of ">>".
pub fn replace_token(&mut self, token: Tok, len: usize) {
self.token = token;
self.cur_end = self.cur_start + len
}
}
// Find the next token and its length without changing the state of the lexer.
fn find_token(file: &'static str, text: &str, start_offset: usize) -> Result<(Tok, usize), Error> {
let c: char = match text.chars().next() {
Some(next_char) => next_char,
None => {
return Ok((Tok::EOF, 0));
}
};
let (tok, len) = match c {
'0'..='9' => {
if text.starts_with("0x") && text.len() > 2 {
let hex_len = get_hex_digits_len(&text[2..]);
if hex_len == 0 {
// Fall back to treating this as a "0" token.
(Tok::NumValue, 1)
} else {
(Tok::AddressValue, 2 + hex_len)
}
} else {
get_decimal_number(&text)
}
}
'A'..='Z' | 'a'..='z' | '_' => {
if text.starts_with("x\"") || text.starts_with("b\"") {
let line = &text.lines().next().unwrap()[2..];
match get_string_len(line) {
Some(last_quote) => (Tok::ByteStringValue, 2 + last_quote + 1),
None => {
return Err(vec![(
make_loc(file, start_offset, start_offset + line.len() + 2),
"Missing closing quote (\") after byte string".to_string(),
)])
}
}
} else {
let len = get_name_len(&text);
(get_name_token(&text[..len]), len)
}
}
'&' => {
if text.starts_with("&mut ") {
(Tok::AmpMut, 5)
} else if text.starts_with("&&") {
(Tok::AmpAmp, 2)
} else {
(Tok::Amp, 1)
}
}
'|' => {
if text.starts_with("||") {
(Tok::PipePipe, 2)
} else {
(Tok::Pipe, 1)
}
}
'=' => {
if text.starts_with("==>") {
(Tok::EqualEqualGreater, 3)
} else if text.starts_with("==") {
(Tok::EqualEqual, 2)
} else {
(Tok::Equal, 1)
}
}
'!' => {
if text.starts_with("!=") {
(Tok::ExclaimEqual, 2)
} else {
(Tok::Exclaim, 1)
}
}
'<' => {
if text.starts_with("<=") {
(Tok::LessEqual, 2)
} else if text.starts_with("<<") {
(Tok::LessLess, 2)
} else {
(Tok::Less, 1)
}
}
'>' => {
if text.starts_with(">=") {
(Tok::GreaterEqual, 2)
} else if text.starts_with(">>") {
(Tok::GreaterGreater, 2)
} else {
(Tok::Greater, 1)
}
}
':' => {
if text.starts_with("::") {
(Tok::ColonColon, 2)
} else {
(Tok::Colon, 1)
}
}
'%' => (Tok::Percent, 1),
'(' => (Tok::LParen, 1),
')' => (Tok::RParen, 1),
'[' => (Tok::LBracket, 1),
']' => (Tok::RBracket, 1),
'*' => (Tok::Star, 1),
'+' => (Tok::Plus, 1),
',' => (Tok::Comma, 1),
'-' => (Tok::Minus, 1),
'.' => {
if text.starts_with("..") {
(Tok::PeriodPeriod, 2)
} else {
(Tok::Period, 1)
}
}
'/' => (Tok::Slash, 1),
';' => (Tok::Semicolon, 1),
'^' => (Tok::Caret, 1),
'{' => (Tok::LBrace, 1),
'}' => (Tok::RBrace, 1),
_ => {
let loc = make_loc(file, start_offset, start_offset);
return Err(vec![(loc, format!("Invalid character: '{}'", c))]);
}
};
Ok((tok, len))
}
// Return the length of the substring matching [a-zA-Z0-9_]. Note that
// this does not do any special check for whether the first character
// starts with a number, so the caller is responsible for any additional
// checks on the first character.
fn get_name_len(text: &str) -> usize {
text.chars()
.position(|c|!matches!(c, 'a'..='z' | 'A'..='Z' | '_' | '0'..='9'))
.unwrap_or_else(|| text.len())
}
fn get_decimal_number(text: &str) -> (Tok, usize) {
let len = text
.chars()
.position(|c|!matches!(c, '0'..='9'))
.unwrap_or_else(|| text.len());
let rest = &text[len..];
if rest.starts_with("u8") {
(Tok::U8Value, len + 2)
} else if rest.starts_with("u64") {
(Tok::U64Value, len + 3)
} else if rest.starts_with("u128") {
(Tok::U128Value, len + 4)
} else {
(Tok::NumValue, len)
}
}
// Return the length of the substring containing characters in [0-9a-fA-F].
fn get_hex_digits_len(text: &str) -> usize {
text.find(|c|!matches!(c, 'a'..='f' | 'A'..='F' | '0'..='9'))
.unwrap_or_else(|| text.len())
}
// Return the length of the quoted string, or None if there is no closing quote.
fn get_string_len(text: &str) -> Option<usize> {
let mut pos = 0;
let mut iter = text.chars();
while let Some(chr) = iter.next() {
if chr == '\\' {
// Skip over the escaped character (e.g., a quote or another backslash)
if iter.next().is_some() {
pos += 1;
}
} else if chr == '"' {
return Some(pos);
}
pos += 1;
}
None
}
fn get_name_token(name: &str) -> Tok {
match name {
"abort" => Tok::Abort,
"acquires" => Tok::Acquires,
"as" => Tok::As,
"break" => Tok::Break,
"const" => Tok::Const,
"continue" => Tok::Continue,
"copy" => Tok::Copy,
"copyable" => Tok::Copyable,
"define" => Tok::Define,
"else" => Tok::Else,
"false" => Tok::False,
"fun" => Tok::Fun,
"friend" => Tok::Friend,
"if" => Tok::If,
"invariant" => Tok::Invariant,
"let" => Tok::Let,
"loop" => Tok::Loop,
"module" => Tok::Module,
"move" => Tok::Move,
"native" => Tok::Native,
"public" => Tok::Public,
"resource" => Tok::Resource,
"return" => Tok::Return,
"script" => Tok::Script,
"spec" => Tok::Spec,
"struct" => Tok::Struct,
"true" => Tok::True,
"use" => Tok::Use,
"while" => Tok::While,
_ => Tok::IdentifierValue,
}
}
| match_doc_comments | identifier_name |
dhcp.rs | /* Copyright (C) 2018 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
* Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
use applayer;
use core;
use core::{ALPROTO_UNKNOWN, AppProto, Flow};
use dhcp::parser::*;
use libc;
use log::*;
use nom;
use parser::*;
use std;
use std::ffi::{CStr,CString};
use std::mem::transmute;
static mut ALPROTO_DHCP: AppProto = ALPROTO_UNKNOWN;
static DHCP_MIN_FRAME_LEN: u32 = 232;
pub const BOOTP_REQUEST: u8 = 1;
pub const BOOTP_REPLY: u8 = 2;
// DHCP option types. Names based on IANA naming:
// https://www.iana.org/assignments/bootp-dhcp-parameters/bootp-dhcp-parameters.xhtml
pub const DHCP_OPT_SUBNET_MASK: u8 = 1;
pub const DHCP_OPT_ROUTERS: u8 = 3;
pub const DHCP_OPT_DNS_SERVER: u8 = 6;
pub const DHCP_OPT_HOSTNAME: u8 = 12;
pub const DHCP_OPT_REQUESTED_IP: u8 = 50;
pub const DHCP_OPT_ADDRESS_TIME: u8 = 51;
pub const DHCP_OPT_TYPE: u8 = 53;
pub const DHCP_OPT_SERVER_ID: u8 = 54;
pub const DHCP_OPT_PARAMETER_LIST: u8 = 55;
pub const DHCP_OPT_RENEWAL_TIME: u8 = 58;
pub const DHCP_OPT_REBINDING_TIME: u8 = 59;
pub const DHCP_OPT_CLIENT_ID: u8 = 61;
pub const DHCP_OPT_END: u8 = 255;
/// DHCP message types.
pub const DHCP_TYPE_DISCOVER: u8 = 1;
pub const DHCP_TYPE_OFFER: u8 = 2;
pub const DHCP_TYPE_REQUEST: u8 = 3;
pub const DHCP_TYPE_DECLINE: u8 = 4;
pub const DHCP_TYPE_ACK: u8 = 5;
pub const DHCP_TYPE_NAK: u8 = 6;
pub const DHCP_TYPE_RELEASE: u8 = 7;
pub const DHCP_TYPE_INFORM: u8 = 8;
/// DHCP parameter types.
/// https://www.iana.org/assignments/bootp-dhcp-parameters/bootp-dhcp-parameters.txt
pub const DHCP_PARAM_SUBNET_MASK: u8 = 1;
pub const DHCP_PARAM_ROUTER: u8 = 3;
pub const DHCP_PARAM_DNS_SERVER: u8 = 6;
pub const DHCP_PARAM_DOMAIN: u8 = 15;
pub const DHCP_PARAM_ARP_TIMEOUT: u8 = 35;
pub const DHCP_PARAM_NTP_SERVER: u8 = 42;
pub const DHCP_PARAM_TFTP_SERVER_NAME: u8 = 66;
pub const DHCP_PARAM_TFTP_SERVER_IP: u8 = 150;
#[repr(u32)]
pub enum DHCPEvent {
TruncatedOptions = 0,
MalformedOptions,
}
/// The concept of a transaction is more to satisfy the Suricata
/// app-layer. This DHCP parser is actually stateless where each
/// message is its own transaction.
pub struct DHCPTransaction {
tx_id: u64,
pub message: DHCPMessage,
logged: applayer::LoggerFlags,
de_state: Option<*mut core::DetectEngineState>,
events: *mut core::AppLayerDecoderEvents,
}
impl DHCPTransaction {
pub fn new(id: u64, message: DHCPMessage) -> DHCPTransaction {
DHCPTransaction {
tx_id: id,
message: message,
logged: applayer::LoggerFlags::new(),
de_state: None,
events: std::ptr::null_mut(),
}
}
}
export_tx_get_detect_state!(rs_dhcp_tx_get_detect_state, DHCPTransaction);
export_tx_set_detect_state!(rs_dhcp_tx_set_detect_state, DHCPTransaction);
pub struct DHCPState {
// Internal transaction ID.
tx_id: u64,
// List of transactions.
transactions: Vec<DHCPTransaction>,
events: u16,
}
impl DHCPState {
pub fn new() -> DHCPState {
return DHCPState {
tx_id: 0,
transactions: Vec::new(),
events: 0,
};
}
pub fn parse(&mut self, input: &[u8]) -> bool {
match dhcp_parse(input) {
nom::IResult::Done(_, message) => {
let malformed_options = message.malformed_options;
let truncated_options = message.truncated_options;
self.tx_id += 1;
let transaction = DHCPTransaction::new(self.tx_id, message);
self.transactions.push(transaction);
if malformed_options {
self.set_event(DHCPEvent::MalformedOptions);
}
if truncated_options {
self.set_event(DHCPEvent::TruncatedOptions);
}
return true;
}
_ => {
return false;
}
}
}
pub fn get_tx(&mut self, tx_id: u64) -> Option<&DHCPTransaction> {
for tx in &mut self.transactions {
if tx.tx_id == tx_id + 1 {
return Some(tx);
}
}
return None;
}
fn free_tx(&mut self, tx_id: u64) {
let len = self.transactions.len();
let mut found = false;
let mut index = 0;
for i in 0..len {
let tx = &self.transactions[i];
if tx.tx_id == tx_id + 1 {
found = true;
index = i;
break;
}
}
if found |
}
fn set_event(&mut self, event: DHCPEvent) {
if let Some(tx) = self.transactions.last_mut() {
core::sc_app_layer_decoder_events_set_event_raw(
&mut tx.events, event as u8);
self.events += 1;
}
}
fn get_tx_iterator(&mut self, min_tx_id: u64, state: &mut u64) ->
Option<(&DHCPTransaction, u64, bool)>
{
let mut index = *state as usize;
let len = self.transactions.len();
while index < len {
let tx = &self.transactions[index];
if tx.tx_id < min_tx_id + 1 {
index += 1;
continue;
}
*state = index as u64 + 1;
return Some((tx, tx.tx_id - 1, (len - index) > 1));
}
return None;
}
}
#[no_mangle]
pub extern "C" fn rs_dhcp_probing_parser(_flow: *const Flow,
input: *const libc::uint8_t,
input_len: u32,
_offset: *const u32) -> AppProto {
if input_len < DHCP_MIN_FRAME_LEN {
return ALPROTO_UNKNOWN;
}
let slice = build_slice!(input, input_len as usize);
match parse_header(slice) {
nom::IResult::Done(_, _) => {
return unsafe { ALPROTO_DHCP };
}
_ => {
return ALPROTO_UNKNOWN;
}
}
}
#[no_mangle]
pub extern "C" fn rs_dhcp_tx_get_alstate_progress(_tx: *mut libc::c_void,
_direction: libc::uint8_t) -> libc::c_int {
// As this is a stateless parser, simply use 1.
return 1;
}
#[no_mangle]
pub extern "C" fn rs_dhcp_state_progress_completion_status(
_direction: libc::uint8_t) -> libc::c_int {
// The presence of a transaction means we are complete.
return 1;
}
#[no_mangle]
pub extern "C" fn rs_dhcp_state_get_tx(state: *mut libc::c_void,
tx_id: libc::uint64_t) -> *mut libc::c_void {
let state = cast_pointer!(state, DHCPState);
match state.get_tx(tx_id) {
Some(tx) => {
return unsafe { transmute(tx) };
}
None => {
return std::ptr::null_mut();
}
}
}
#[no_mangle]
pub extern "C" fn rs_dhcp_state_get_tx_count(state: *mut libc::c_void) -> libc::uint64_t {
let state = cast_pointer!(state, DHCPState);
return state.tx_id;
}
#[no_mangle]
pub extern "C" fn rs_dhcp_parse(_flow: *const core::Flow,
state: *mut libc::c_void,
_pstate: *mut libc::c_void,
input: *const libc::uint8_t,
input_len: u32,
_data: *const libc::c_void,
_flags: u8) -> i8 {
let state = cast_pointer!(state, DHCPState);
let buf = build_slice!(input, input_len as usize);
if state.parse(buf) {
return 1;
}
return -1;
}
#[no_mangle]
pub extern "C" fn rs_dhcp_state_tx_free(
state: *mut libc::c_void,
tx_id: libc::uint64_t)
{
let state = cast_pointer!(state, DHCPState);
state.free_tx(tx_id);
}
#[no_mangle]
pub extern "C" fn rs_dhcp_state_new() -> *mut libc::c_void {
let state = DHCPState::new();
let boxed = Box::new(state);
return unsafe {
transmute(boxed)
};
}
#[no_mangle]
pub extern "C" fn rs_dhcp_state_free(state: *mut libc::c_void) {
// Just unbox...
let _drop: Box<DHCPState> = unsafe { transmute(state) };
}
#[no_mangle]
pub extern "C" fn rs_dhcp_tx_get_logged(_state: *mut libc::c_void, tx: *mut libc::c_void) -> u32 {
let tx = cast_pointer!(tx, DHCPTransaction);
return tx.logged.get();
}
#[no_mangle]
pub extern "C" fn rs_dhcp_tx_set_logged(_state: *mut libc::c_void,
tx: *mut libc::c_void,
logged: libc::uint32_t) {
let tx = cast_pointer!(tx, DHCPTransaction);
tx.logged.set(logged);
}
#[no_mangle]
pub extern "C" fn rs_dhcp_state_get_events(state: *mut libc::c_void,
tx_id: libc::uint64_t)
-> *mut core::AppLayerDecoderEvents
{
let state = cast_pointer!(state, DHCPState);
match state.get_tx(tx_id) {
Some(tx) => tx.events,
_ => std::ptr::null_mut(),
}
}
#[no_mangle]
pub extern "C" fn rs_dhcp_state_get_event_info(
event_name: *const libc::c_char,
event_id: *mut libc::c_int,
event_type: *mut core::AppLayerEventType)
-> libc::c_int
{
if event_name == std::ptr::null() {
return -1;
}
let c_event_name: &CStr = unsafe { CStr::from_ptr(event_name) };
let event = match c_event_name.to_str() {
Ok(s) => {
match s {
"malformed_options" => DHCPEvent::MalformedOptions as i32,
"truncated_options" => DHCPEvent::TruncatedOptions as i32,
_ => -1, // unknown event
}
},
Err(_) => -1, // UTF-8 conversion failed
};
unsafe{
*event_type = core::APP_LAYER_EVENT_TYPE_TRANSACTION;
*event_id = event as libc::c_int;
};
0
}
#[no_mangle]
pub extern "C" fn rs_dhcp_state_get_tx_iterator(
_ipproto: libc::uint8_t,
_alproto: AppProto,
state: *mut libc::c_void,
min_tx_id: libc::uint64_t,
_max_tx_id: libc::uint64_t,
istate: &mut libc::uint64_t)
-> applayer::AppLayerGetTxIterTuple
{
let state = cast_pointer!(state, DHCPState);
match state.get_tx_iterator(min_tx_id, istate) {
Some((tx, out_tx_id, has_next)) => {
let c_tx = unsafe { transmute(tx) };
let ires = applayer::AppLayerGetTxIterTuple::with_values(
c_tx, out_tx_id, has_next);
return ires;
}
None => {
return applayer::AppLayerGetTxIterTuple::not_found();
}
}
}
const PARSER_NAME: &'static [u8] = b"dhcp\0";
#[no_mangle]
pub unsafe extern "C" fn rs_dhcp_register_parser() {
SCLogDebug!("Registering DHCP parser.");
let ports = CString::new("[67,68]").unwrap();
let parser = RustParser {
name: PARSER_NAME.as_ptr() as *const libc::c_char,
default_port: ports.as_ptr(),
ipproto: libc::IPPROTO_UDP,
probe_ts: rs_dhcp_probing_parser,
probe_tc: rs_dhcp_probing_parser,
min_depth: 0,
max_depth: 16,
state_new: rs_dhcp_state_new,
state_free: rs_dhcp_state_free,
tx_free: rs_dhcp_state_tx_free,
parse_ts: rs_dhcp_parse,
parse_tc: rs_dhcp_parse,
get_tx_count: rs_dhcp_state_get_tx_count,
get_tx: rs_dhcp_state_get_tx,
tx_get_comp_st: rs_dhcp_state_progress_completion_status,
tx_get_progress: rs_dhcp_tx_get_alstate_progress,
get_tx_logged: Some(rs_dhcp_tx_get_logged),
set_tx_logged: Some(rs_dhcp_tx_set_logged),
get_de_state: rs_dhcp_tx_get_detect_state,
set_de_state: rs_dhcp_tx_set_detect_state,
get_events: Some(rs_dhcp_state_get_events),
get_eventinfo: Some(rs_dhcp_state_get_event_info),
localstorage_new: None,
localstorage_free: None,
get_tx_mpm_id: None,
set_tx_mpm_id: None,
get_files: None,
get_tx_iterator: Some(rs_dhcp_state_get_tx_iterator),
};
let ip_proto_str = CString::new("udp").unwrap();
if AppLayerProtoDetectConfProtoDetectionEnabled(ip_proto_str.as_ptr(), parser.name)!= 0 {
let alproto = AppLayerRegisterProtocolDetection(&parser, 1);
ALPROTO_DHCP = alproto;
if AppLayerParserConfParserEnabled(ip_proto_str.as_ptr(), parser.name)!= 0 {
let _ = AppLayerRegisterParser(&parser, alproto);
}
} else {
SCLogDebug!("Protocol detector and parser disabled for DHCP.");
}
}
| {
self.transactions.remove(index);
} | conditional_block |
dhcp.rs | /* Copyright (C) 2018 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
* Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
use applayer;
use core;
use core::{ALPROTO_UNKNOWN, AppProto, Flow};
use dhcp::parser::*;
use libc;
use log::*;
use nom;
use parser::*;
use std;
use std::ffi::{CStr,CString};
use std::mem::transmute;
static mut ALPROTO_DHCP: AppProto = ALPROTO_UNKNOWN;
static DHCP_MIN_FRAME_LEN: u32 = 232;
pub const BOOTP_REQUEST: u8 = 1;
pub const BOOTP_REPLY: u8 = 2;
// DHCP option types. Names based on IANA naming:
// https://www.iana.org/assignments/bootp-dhcp-parameters/bootp-dhcp-parameters.xhtml
pub const DHCP_OPT_SUBNET_MASK: u8 = 1;
pub const DHCP_OPT_ROUTERS: u8 = 3;
pub const DHCP_OPT_DNS_SERVER: u8 = 6;
pub const DHCP_OPT_HOSTNAME: u8 = 12;
pub const DHCP_OPT_REQUESTED_IP: u8 = 50;
pub const DHCP_OPT_ADDRESS_TIME: u8 = 51;
pub const DHCP_OPT_TYPE: u8 = 53;
pub const DHCP_OPT_SERVER_ID: u8 = 54;
pub const DHCP_OPT_PARAMETER_LIST: u8 = 55;
pub const DHCP_OPT_RENEWAL_TIME: u8 = 58;
pub const DHCP_OPT_REBINDING_TIME: u8 = 59;
pub const DHCP_OPT_CLIENT_ID: u8 = 61;
pub const DHCP_OPT_END: u8 = 255;
/// DHCP message types.
pub const DHCP_TYPE_DISCOVER: u8 = 1;
pub const DHCP_TYPE_OFFER: u8 = 2;
pub const DHCP_TYPE_REQUEST: u8 = 3;
pub const DHCP_TYPE_DECLINE: u8 = 4;
pub const DHCP_TYPE_ACK: u8 = 5;
pub const DHCP_TYPE_NAK: u8 = 6;
pub const DHCP_TYPE_RELEASE: u8 = 7;
pub const DHCP_TYPE_INFORM: u8 = 8;
/// DHCP parameter types.
/// https://www.iana.org/assignments/bootp-dhcp-parameters/bootp-dhcp-parameters.txt
pub const DHCP_PARAM_SUBNET_MASK: u8 = 1;
pub const DHCP_PARAM_ROUTER: u8 = 3;
pub const DHCP_PARAM_DNS_SERVER: u8 = 6;
pub const DHCP_PARAM_DOMAIN: u8 = 15;
pub const DHCP_PARAM_ARP_TIMEOUT: u8 = 35;
pub const DHCP_PARAM_NTP_SERVER: u8 = 42;
pub const DHCP_PARAM_TFTP_SERVER_NAME: u8 = 66;
pub const DHCP_PARAM_TFTP_SERVER_IP: u8 = 150;
#[repr(u32)]
pub enum DHCPEvent {
TruncatedOptions = 0,
MalformedOptions,
}
/// The concept of a transaction is more to satisfy the Suricata
/// app-layer. This DHCP parser is actually stateless where each
/// message is its own transaction.
pub struct DHCPTransaction {
tx_id: u64,
pub message: DHCPMessage,
logged: applayer::LoggerFlags,
de_state: Option<*mut core::DetectEngineState>,
events: *mut core::AppLayerDecoderEvents,
}
impl DHCPTransaction {
pub fn new(id: u64, message: DHCPMessage) -> DHCPTransaction {
DHCPTransaction {
tx_id: id,
message: message,
logged: applayer::LoggerFlags::new(),
de_state: None,
events: std::ptr::null_mut(),
}
}
}
export_tx_get_detect_state!(rs_dhcp_tx_get_detect_state, DHCPTransaction);
export_tx_set_detect_state!(rs_dhcp_tx_set_detect_state, DHCPTransaction);
pub struct DHCPState {
// Internal transaction ID.
tx_id: u64,
// List of transactions.
transactions: Vec<DHCPTransaction>,
events: u16,
}
impl DHCPState {
pub fn | () -> DHCPState {
return DHCPState {
tx_id: 0,
transactions: Vec::new(),
events: 0,
};
}
pub fn parse(&mut self, input: &[u8]) -> bool {
match dhcp_parse(input) {
nom::IResult::Done(_, message) => {
let malformed_options = message.malformed_options;
let truncated_options = message.truncated_options;
self.tx_id += 1;
let transaction = DHCPTransaction::new(self.tx_id, message);
self.transactions.push(transaction);
if malformed_options {
self.set_event(DHCPEvent::MalformedOptions);
}
if truncated_options {
self.set_event(DHCPEvent::TruncatedOptions);
}
return true;
}
_ => {
return false;
}
}
}
pub fn get_tx(&mut self, tx_id: u64) -> Option<&DHCPTransaction> {
for tx in &mut self.transactions {
if tx.tx_id == tx_id + 1 {
return Some(tx);
}
}
return None;
}
fn free_tx(&mut self, tx_id: u64) {
let len = self.transactions.len();
let mut found = false;
let mut index = 0;
for i in 0..len {
let tx = &self.transactions[i];
if tx.tx_id == tx_id + 1 {
found = true;
index = i;
break;
}
}
if found {
self.transactions.remove(index);
}
}
fn set_event(&mut self, event: DHCPEvent) {
if let Some(tx) = self.transactions.last_mut() {
core::sc_app_layer_decoder_events_set_event_raw(
&mut tx.events, event as u8);
self.events += 1;
}
}
fn get_tx_iterator(&mut self, min_tx_id: u64, state: &mut u64) ->
Option<(&DHCPTransaction, u64, bool)>
{
let mut index = *state as usize;
let len = self.transactions.len();
while index < len {
let tx = &self.transactions[index];
if tx.tx_id < min_tx_id + 1 {
index += 1;
continue;
}
*state = index as u64 + 1;
return Some((tx, tx.tx_id - 1, (len - index) > 1));
}
return None;
}
}
#[no_mangle]
pub extern "C" fn rs_dhcp_probing_parser(_flow: *const Flow,
input: *const libc::uint8_t,
input_len: u32,
_offset: *const u32) -> AppProto {
if input_len < DHCP_MIN_FRAME_LEN {
return ALPROTO_UNKNOWN;
}
let slice = build_slice!(input, input_len as usize);
match parse_header(slice) {
nom::IResult::Done(_, _) => {
return unsafe { ALPROTO_DHCP };
}
_ => {
return ALPROTO_UNKNOWN;
}
}
}
#[no_mangle]
pub extern "C" fn rs_dhcp_tx_get_alstate_progress(_tx: *mut libc::c_void,
_direction: libc::uint8_t) -> libc::c_int {
// As this is a stateless parser, simply use 1.
return 1;
}
#[no_mangle]
pub extern "C" fn rs_dhcp_state_progress_completion_status(
_direction: libc::uint8_t) -> libc::c_int {
// The presence of a transaction means we are complete.
return 1;
}
#[no_mangle]
pub extern "C" fn rs_dhcp_state_get_tx(state: *mut libc::c_void,
tx_id: libc::uint64_t) -> *mut libc::c_void {
let state = cast_pointer!(state, DHCPState);
match state.get_tx(tx_id) {
Some(tx) => {
return unsafe { transmute(tx) };
}
None => {
return std::ptr::null_mut();
}
}
}
#[no_mangle]
pub extern "C" fn rs_dhcp_state_get_tx_count(state: *mut libc::c_void) -> libc::uint64_t {
let state = cast_pointer!(state, DHCPState);
return state.tx_id;
}
#[no_mangle]
pub extern "C" fn rs_dhcp_parse(_flow: *const core::Flow,
state: *mut libc::c_void,
_pstate: *mut libc::c_void,
input: *const libc::uint8_t,
input_len: u32,
_data: *const libc::c_void,
_flags: u8) -> i8 {
let state = cast_pointer!(state, DHCPState);
let buf = build_slice!(input, input_len as usize);
if state.parse(buf) {
return 1;
}
return -1;
}
#[no_mangle]
pub extern "C" fn rs_dhcp_state_tx_free(
state: *mut libc::c_void,
tx_id: libc::uint64_t)
{
let state = cast_pointer!(state, DHCPState);
state.free_tx(tx_id);
}
#[no_mangle]
pub extern "C" fn rs_dhcp_state_new() -> *mut libc::c_void {
let state = DHCPState::new();
let boxed = Box::new(state);
return unsafe {
transmute(boxed)
};
}
#[no_mangle]
pub extern "C" fn rs_dhcp_state_free(state: *mut libc::c_void) {
// Just unbox...
let _drop: Box<DHCPState> = unsafe { transmute(state) };
}
#[no_mangle]
pub extern "C" fn rs_dhcp_tx_get_logged(_state: *mut libc::c_void, tx: *mut libc::c_void) -> u32 {
let tx = cast_pointer!(tx, DHCPTransaction);
return tx.logged.get();
}
#[no_mangle]
pub extern "C" fn rs_dhcp_tx_set_logged(_state: *mut libc::c_void,
tx: *mut libc::c_void,
logged: libc::uint32_t) {
let tx = cast_pointer!(tx, DHCPTransaction);
tx.logged.set(logged);
}
#[no_mangle]
pub extern "C" fn rs_dhcp_state_get_events(state: *mut libc::c_void,
tx_id: libc::uint64_t)
-> *mut core::AppLayerDecoderEvents
{
let state = cast_pointer!(state, DHCPState);
match state.get_tx(tx_id) {
Some(tx) => tx.events,
_ => std::ptr::null_mut(),
}
}
#[no_mangle]
pub extern "C" fn rs_dhcp_state_get_event_info(
event_name: *const libc::c_char,
event_id: *mut libc::c_int,
event_type: *mut core::AppLayerEventType)
-> libc::c_int
{
if event_name == std::ptr::null() {
return -1;
}
let c_event_name: &CStr = unsafe { CStr::from_ptr(event_name) };
let event = match c_event_name.to_str() {
Ok(s) => {
match s {
"malformed_options" => DHCPEvent::MalformedOptions as i32,
"truncated_options" => DHCPEvent::TruncatedOptions as i32,
_ => -1, // unknown event
}
},
Err(_) => -1, // UTF-8 conversion failed
};
unsafe{
*event_type = core::APP_LAYER_EVENT_TYPE_TRANSACTION;
*event_id = event as libc::c_int;
};
0
}
#[no_mangle]
pub extern "C" fn rs_dhcp_state_get_tx_iterator(
_ipproto: libc::uint8_t,
_alproto: AppProto,
state: *mut libc::c_void,
min_tx_id: libc::uint64_t,
_max_tx_id: libc::uint64_t,
istate: &mut libc::uint64_t)
-> applayer::AppLayerGetTxIterTuple
{
let state = cast_pointer!(state, DHCPState);
match state.get_tx_iterator(min_tx_id, istate) {
Some((tx, out_tx_id, has_next)) => {
let c_tx = unsafe { transmute(tx) };
let ires = applayer::AppLayerGetTxIterTuple::with_values(
c_tx, out_tx_id, has_next);
return ires;
}
None => {
return applayer::AppLayerGetTxIterTuple::not_found();
}
}
}
const PARSER_NAME: &'static [u8] = b"dhcp\0";
#[no_mangle]
pub unsafe extern "C" fn rs_dhcp_register_parser() {
SCLogDebug!("Registering DHCP parser.");
let ports = CString::new("[67,68]").unwrap();
let parser = RustParser {
name: PARSER_NAME.as_ptr() as *const libc::c_char,
default_port: ports.as_ptr(),
ipproto: libc::IPPROTO_UDP,
probe_ts: rs_dhcp_probing_parser,
probe_tc: rs_dhcp_probing_parser,
min_depth: 0,
max_depth: 16,
state_new: rs_dhcp_state_new,
state_free: rs_dhcp_state_free,
tx_free: rs_dhcp_state_tx_free,
parse_ts: rs_dhcp_parse,
parse_tc: rs_dhcp_parse,
get_tx_count: rs_dhcp_state_get_tx_count,
get_tx: rs_dhcp_state_get_tx,
tx_get_comp_st: rs_dhcp_state_progress_completion_status,
tx_get_progress: rs_dhcp_tx_get_alstate_progress,
get_tx_logged: Some(rs_dhcp_tx_get_logged),
set_tx_logged: Some(rs_dhcp_tx_set_logged),
get_de_state: rs_dhcp_tx_get_detect_state,
set_de_state: rs_dhcp_tx_set_detect_state,
get_events: Some(rs_dhcp_state_get_events),
get_eventinfo: Some(rs_dhcp_state_get_event_info),
localstorage_new: None,
localstorage_free: None,
get_tx_mpm_id: None,
set_tx_mpm_id: None,
get_files: None,
get_tx_iterator: Some(rs_dhcp_state_get_tx_iterator),
};
let ip_proto_str = CString::new("udp").unwrap();
if AppLayerProtoDetectConfProtoDetectionEnabled(ip_proto_str.as_ptr(), parser.name)!= 0 {
let alproto = AppLayerRegisterProtocolDetection(&parser, 1);
ALPROTO_DHCP = alproto;
if AppLayerParserConfParserEnabled(ip_proto_str.as_ptr(), parser.name)!= 0 {
let _ = AppLayerRegisterParser(&parser, alproto);
}
} else {
SCLogDebug!("Protocol detector and parser disabled for DHCP.");
}
}
| new | identifier_name |
dhcp.rs | /* Copyright (C) 2018 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
* Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
use applayer;
use core;
use core::{ALPROTO_UNKNOWN, AppProto, Flow};
use dhcp::parser::*;
use libc;
use log::*;
use nom;
use parser::*;
use std;
use std::ffi::{CStr,CString};
use std::mem::transmute;
static mut ALPROTO_DHCP: AppProto = ALPROTO_UNKNOWN;
static DHCP_MIN_FRAME_LEN: u32 = 232;
pub const BOOTP_REQUEST: u8 = 1;
pub const BOOTP_REPLY: u8 = 2;
// DHCP option types. Names based on IANA naming:
// https://www.iana.org/assignments/bootp-dhcp-parameters/bootp-dhcp-parameters.xhtml
pub const DHCP_OPT_SUBNET_MASK: u8 = 1;
pub const DHCP_OPT_ROUTERS: u8 = 3;
pub const DHCP_OPT_DNS_SERVER: u8 = 6;
pub const DHCP_OPT_HOSTNAME: u8 = 12;
pub const DHCP_OPT_REQUESTED_IP: u8 = 50;
pub const DHCP_OPT_ADDRESS_TIME: u8 = 51;
pub const DHCP_OPT_TYPE: u8 = 53;
pub const DHCP_OPT_SERVER_ID: u8 = 54;
pub const DHCP_OPT_PARAMETER_LIST: u8 = 55;
pub const DHCP_OPT_RENEWAL_TIME: u8 = 58;
pub const DHCP_OPT_REBINDING_TIME: u8 = 59;
pub const DHCP_OPT_CLIENT_ID: u8 = 61;
pub const DHCP_OPT_END: u8 = 255;
/// DHCP message types.
pub const DHCP_TYPE_DISCOVER: u8 = 1;
pub const DHCP_TYPE_OFFER: u8 = 2;
pub const DHCP_TYPE_REQUEST: u8 = 3;
pub const DHCP_TYPE_DECLINE: u8 = 4;
pub const DHCP_TYPE_ACK: u8 = 5;
pub const DHCP_TYPE_NAK: u8 = 6;
pub const DHCP_TYPE_RELEASE: u8 = 7;
pub const DHCP_TYPE_INFORM: u8 = 8;
/// DHCP parameter types.
/// https://www.iana.org/assignments/bootp-dhcp-parameters/bootp-dhcp-parameters.txt
pub const DHCP_PARAM_SUBNET_MASK: u8 = 1;
pub const DHCP_PARAM_ROUTER: u8 = 3;
pub const DHCP_PARAM_DNS_SERVER: u8 = 6;
pub const DHCP_PARAM_DOMAIN: u8 = 15;
pub const DHCP_PARAM_ARP_TIMEOUT: u8 = 35;
pub const DHCP_PARAM_NTP_SERVER: u8 = 42;
pub const DHCP_PARAM_TFTP_SERVER_NAME: u8 = 66;
pub const DHCP_PARAM_TFTP_SERVER_IP: u8 = 150;
#[repr(u32)]
pub enum DHCPEvent {
TruncatedOptions = 0,
MalformedOptions,
}
/// The concept of a transaction is more to satisfy the Suricata
/// app-layer. This DHCP parser is actually stateless where each
/// message is its own transaction.
pub struct DHCPTransaction {
tx_id: u64,
pub message: DHCPMessage,
logged: applayer::LoggerFlags,
de_state: Option<*mut core::DetectEngineState>,
events: *mut core::AppLayerDecoderEvents,
}
impl DHCPTransaction {
pub fn new(id: u64, message: DHCPMessage) -> DHCPTransaction {
DHCPTransaction {
tx_id: id,
message: message,
logged: applayer::LoggerFlags::new(),
de_state: None,
events: std::ptr::null_mut(),
}
}
}
export_tx_get_detect_state!(rs_dhcp_tx_get_detect_state, DHCPTransaction);
export_tx_set_detect_state!(rs_dhcp_tx_set_detect_state, DHCPTransaction);
pub struct DHCPState {
// Internal transaction ID.
tx_id: u64,
// List of transactions.
transactions: Vec<DHCPTransaction>,
events: u16,
}
impl DHCPState {
pub fn new() -> DHCPState {
return DHCPState {
tx_id: 0,
transactions: Vec::new(),
events: 0,
};
}
pub fn parse(&mut self, input: &[u8]) -> bool {
match dhcp_parse(input) {
nom::IResult::Done(_, message) => {
let malformed_options = message.malformed_options;
let truncated_options = message.truncated_options;
self.tx_id += 1;
let transaction = DHCPTransaction::new(self.tx_id, message);
self.transactions.push(transaction);
if malformed_options {
self.set_event(DHCPEvent::MalformedOptions);
}
if truncated_options {
self.set_event(DHCPEvent::TruncatedOptions);
}
return true;
}
_ => {
return false;
}
}
}
pub fn get_tx(&mut self, tx_id: u64) -> Option<&DHCPTransaction> {
for tx in &mut self.transactions {
if tx.tx_id == tx_id + 1 {
return Some(tx);
}
}
return None;
}
fn free_tx(&mut self, tx_id: u64) {
let len = self.transactions.len();
let mut found = false;
let mut index = 0;
for i in 0..len {
let tx = &self.transactions[i];
if tx.tx_id == tx_id + 1 {
found = true;
index = i;
break;
}
}
if found {
self.transactions.remove(index);
}
}
fn set_event(&mut self, event: DHCPEvent) {
if let Some(tx) = self.transactions.last_mut() {
core::sc_app_layer_decoder_events_set_event_raw(
&mut tx.events, event as u8);
self.events += 1;
}
}
fn get_tx_iterator(&mut self, min_tx_id: u64, state: &mut u64) ->
Option<(&DHCPTransaction, u64, bool)>
{
let mut index = *state as usize;
let len = self.transactions.len();
while index < len {
let tx = &self.transactions[index];
if tx.tx_id < min_tx_id + 1 {
index += 1;
continue;
}
*state = index as u64 + 1;
return Some((tx, tx.tx_id - 1, (len - index) > 1));
}
return None;
}
}
#[no_mangle]
pub extern "C" fn rs_dhcp_probing_parser(_flow: *const Flow,
input: *const libc::uint8_t,
input_len: u32,
_offset: *const u32) -> AppProto {
if input_len < DHCP_MIN_FRAME_LEN {
return ALPROTO_UNKNOWN;
}
let slice = build_slice!(input, input_len as usize);
match parse_header(slice) {
nom::IResult::Done(_, _) => {
return unsafe { ALPROTO_DHCP };
}
_ => {
return ALPROTO_UNKNOWN;
}
}
}
#[no_mangle]
pub extern "C" fn rs_dhcp_tx_get_alstate_progress(_tx: *mut libc::c_void,
_direction: libc::uint8_t) -> libc::c_int {
// As this is a stateless parser, simply use 1.
return 1;
}
#[no_mangle]
pub extern "C" fn rs_dhcp_state_progress_completion_status(
_direction: libc::uint8_t) -> libc::c_int {
// The presence of a transaction means we are complete.
return 1;
}
#[no_mangle]
pub extern "C" fn rs_dhcp_state_get_tx(state: *mut libc::c_void,
tx_id: libc::uint64_t) -> *mut libc::c_void {
let state = cast_pointer!(state, DHCPState);
match state.get_tx(tx_id) {
Some(tx) => {
return unsafe { transmute(tx) };
}
None => {
return std::ptr::null_mut();
}
}
}
#[no_mangle]
pub extern "C" fn rs_dhcp_state_get_tx_count(state: *mut libc::c_void) -> libc::uint64_t {
let state = cast_pointer!(state, DHCPState);
return state.tx_id;
}
#[no_mangle]
pub extern "C" fn rs_dhcp_parse(_flow: *const core::Flow,
state: *mut libc::c_void,
_pstate: *mut libc::c_void,
input: *const libc::uint8_t,
input_len: u32,
_data: *const libc::c_void,
_flags: u8) -> i8 {
let state = cast_pointer!(state, DHCPState);
let buf = build_slice!(input, input_len as usize);
if state.parse(buf) {
return 1;
}
return -1;
}
#[no_mangle]
pub extern "C" fn rs_dhcp_state_tx_free(
state: *mut libc::c_void,
tx_id: libc::uint64_t)
{
let state = cast_pointer!(state, DHCPState);
state.free_tx(tx_id);
}
#[no_mangle]
pub extern "C" fn rs_dhcp_state_new() -> *mut libc::c_void {
let state = DHCPState::new();
let boxed = Box::new(state);
return unsafe {
transmute(boxed)
};
}
#[no_mangle]
pub extern "C" fn rs_dhcp_state_free(state: *mut libc::c_void) |
#[no_mangle]
pub extern "C" fn rs_dhcp_tx_get_logged(_state: *mut libc::c_void, tx: *mut libc::c_void) -> u32 {
let tx = cast_pointer!(tx, DHCPTransaction);
return tx.logged.get();
}
#[no_mangle]
pub extern "C" fn rs_dhcp_tx_set_logged(_state: *mut libc::c_void,
tx: *mut libc::c_void,
logged: libc::uint32_t) {
let tx = cast_pointer!(tx, DHCPTransaction);
tx.logged.set(logged);
}
#[no_mangle]
pub extern "C" fn rs_dhcp_state_get_events(state: *mut libc::c_void,
tx_id: libc::uint64_t)
-> *mut core::AppLayerDecoderEvents
{
let state = cast_pointer!(state, DHCPState);
match state.get_tx(tx_id) {
Some(tx) => tx.events,
_ => std::ptr::null_mut(),
}
}
#[no_mangle]
pub extern "C" fn rs_dhcp_state_get_event_info(
event_name: *const libc::c_char,
event_id: *mut libc::c_int,
event_type: *mut core::AppLayerEventType)
-> libc::c_int
{
if event_name == std::ptr::null() {
return -1;
}
let c_event_name: &CStr = unsafe { CStr::from_ptr(event_name) };
let event = match c_event_name.to_str() {
Ok(s) => {
match s {
"malformed_options" => DHCPEvent::MalformedOptions as i32,
"truncated_options" => DHCPEvent::TruncatedOptions as i32,
_ => -1, // unknown event
}
},
Err(_) => -1, // UTF-8 conversion failed
};
unsafe{
*event_type = core::APP_LAYER_EVENT_TYPE_TRANSACTION;
*event_id = event as libc::c_int;
};
0
}
#[no_mangle]
pub extern "C" fn rs_dhcp_state_get_tx_iterator(
_ipproto: libc::uint8_t,
_alproto: AppProto,
state: *mut libc::c_void,
min_tx_id: libc::uint64_t,
_max_tx_id: libc::uint64_t,
istate: &mut libc::uint64_t)
-> applayer::AppLayerGetTxIterTuple
{
let state = cast_pointer!(state, DHCPState);
match state.get_tx_iterator(min_tx_id, istate) {
Some((tx, out_tx_id, has_next)) => {
let c_tx = unsafe { transmute(tx) };
let ires = applayer::AppLayerGetTxIterTuple::with_values(
c_tx, out_tx_id, has_next);
return ires;
}
None => {
return applayer::AppLayerGetTxIterTuple::not_found();
}
}
}
const PARSER_NAME: &'static [u8] = b"dhcp\0";
#[no_mangle]
pub unsafe extern "C" fn rs_dhcp_register_parser() {
SCLogDebug!("Registering DHCP parser.");
let ports = CString::new("[67,68]").unwrap();
let parser = RustParser {
name: PARSER_NAME.as_ptr() as *const libc::c_char,
default_port: ports.as_ptr(),
ipproto: libc::IPPROTO_UDP,
probe_ts: rs_dhcp_probing_parser,
probe_tc: rs_dhcp_probing_parser,
min_depth: 0,
max_depth: 16,
state_new: rs_dhcp_state_new,
state_free: rs_dhcp_state_free,
tx_free: rs_dhcp_state_tx_free,
parse_ts: rs_dhcp_parse,
parse_tc: rs_dhcp_parse,
get_tx_count: rs_dhcp_state_get_tx_count,
get_tx: rs_dhcp_state_get_tx,
tx_get_comp_st: rs_dhcp_state_progress_completion_status,
tx_get_progress: rs_dhcp_tx_get_alstate_progress,
get_tx_logged: Some(rs_dhcp_tx_get_logged),
set_tx_logged: Some(rs_dhcp_tx_set_logged),
get_de_state: rs_dhcp_tx_get_detect_state,
set_de_state: rs_dhcp_tx_set_detect_state,
get_events: Some(rs_dhcp_state_get_events),
get_eventinfo: Some(rs_dhcp_state_get_event_info),
localstorage_new: None,
localstorage_free: None,
get_tx_mpm_id: None,
set_tx_mpm_id: None,
get_files: None,
get_tx_iterator: Some(rs_dhcp_state_get_tx_iterator),
};
let ip_proto_str = CString::new("udp").unwrap();
if AppLayerProtoDetectConfProtoDetectionEnabled(ip_proto_str.as_ptr(), parser.name)!= 0 {
let alproto = AppLayerRegisterProtocolDetection(&parser, 1);
ALPROTO_DHCP = alproto;
if AppLayerParserConfParserEnabled(ip_proto_str.as_ptr(), parser.name)!= 0 {
let _ = AppLayerRegisterParser(&parser, alproto);
}
} else {
SCLogDebug!("Protocol detector and parser disabled for DHCP.");
}
}
| {
// Just unbox...
let _drop: Box<DHCPState> = unsafe { transmute(state) };
} | identifier_body |
dhcp.rs | /* Copyright (C) 2018 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
* Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
use applayer;
use core;
use core::{ALPROTO_UNKNOWN, AppProto, Flow};
use dhcp::parser::*;
use libc;
use log::*;
use nom;
use parser::*;
use std;
use std::ffi::{CStr,CString};
use std::mem::transmute;
static mut ALPROTO_DHCP: AppProto = ALPROTO_UNKNOWN;
static DHCP_MIN_FRAME_LEN: u32 = 232;
pub const BOOTP_REQUEST: u8 = 1;
pub const BOOTP_REPLY: u8 = 2;
// DHCP option types. Names based on IANA naming:
// https://www.iana.org/assignments/bootp-dhcp-parameters/bootp-dhcp-parameters.xhtml
pub const DHCP_OPT_SUBNET_MASK: u8 = 1;
pub const DHCP_OPT_ROUTERS: u8 = 3;
pub const DHCP_OPT_DNS_SERVER: u8 = 6;
pub const DHCP_OPT_HOSTNAME: u8 = 12;
pub const DHCP_OPT_REQUESTED_IP: u8 = 50;
pub const DHCP_OPT_ADDRESS_TIME: u8 = 51;
pub const DHCP_OPT_TYPE: u8 = 53;
pub const DHCP_OPT_SERVER_ID: u8 = 54;
pub const DHCP_OPT_PARAMETER_LIST: u8 = 55;
pub const DHCP_OPT_RENEWAL_TIME: u8 = 58;
pub const DHCP_OPT_REBINDING_TIME: u8 = 59;
pub const DHCP_OPT_CLIENT_ID: u8 = 61;
pub const DHCP_OPT_END: u8 = 255;
/// DHCP message types.
pub const DHCP_TYPE_DISCOVER: u8 = 1;
pub const DHCP_TYPE_OFFER: u8 = 2;
pub const DHCP_TYPE_REQUEST: u8 = 3;
pub const DHCP_TYPE_DECLINE: u8 = 4;
pub const DHCP_TYPE_ACK: u8 = 5;
pub const DHCP_TYPE_NAK: u8 = 6;
pub const DHCP_TYPE_RELEASE: u8 = 7;
pub const DHCP_TYPE_INFORM: u8 = 8;
/// DHCP parameter types.
/// https://www.iana.org/assignments/bootp-dhcp-parameters/bootp-dhcp-parameters.txt
pub const DHCP_PARAM_SUBNET_MASK: u8 = 1;
pub const DHCP_PARAM_ROUTER: u8 = 3;
pub const DHCP_PARAM_DNS_SERVER: u8 = 6;
pub const DHCP_PARAM_DOMAIN: u8 = 15;
pub const DHCP_PARAM_ARP_TIMEOUT: u8 = 35;
pub const DHCP_PARAM_NTP_SERVER: u8 = 42;
pub const DHCP_PARAM_TFTP_SERVER_NAME: u8 = 66;
pub const DHCP_PARAM_TFTP_SERVER_IP: u8 = 150;
#[repr(u32)]
pub enum DHCPEvent {
TruncatedOptions = 0,
MalformedOptions,
}
/// The concept of a transaction is more to satisfy the Suricata
/// app-layer. This DHCP parser is actually stateless where each
/// message is its own transaction.
pub struct DHCPTransaction {
tx_id: u64,
pub message: DHCPMessage,
logged: applayer::LoggerFlags,
de_state: Option<*mut core::DetectEngineState>,
events: *mut core::AppLayerDecoderEvents,
}
impl DHCPTransaction {
pub fn new(id: u64, message: DHCPMessage) -> DHCPTransaction {
DHCPTransaction {
tx_id: id,
message: message,
logged: applayer::LoggerFlags::new(),
de_state: None,
events: std::ptr::null_mut(),
}
}
}
export_tx_get_detect_state!(rs_dhcp_tx_get_detect_state, DHCPTransaction);
export_tx_set_detect_state!(rs_dhcp_tx_set_detect_state, DHCPTransaction);
pub struct DHCPState {
// Internal transaction ID.
tx_id: u64,
// List of transactions.
transactions: Vec<DHCPTransaction>,
events: u16,
}
impl DHCPState {
pub fn new() -> DHCPState {
return DHCPState {
tx_id: 0,
transactions: Vec::new(),
events: 0,
};
}
pub fn parse(&mut self, input: &[u8]) -> bool {
match dhcp_parse(input) {
nom::IResult::Done(_, message) => {
let malformed_options = message.malformed_options;
let truncated_options = message.truncated_options;
self.tx_id += 1;
let transaction = DHCPTransaction::new(self.tx_id, message);
self.transactions.push(transaction);
if malformed_options {
self.set_event(DHCPEvent::MalformedOptions);
}
if truncated_options {
self.set_event(DHCPEvent::TruncatedOptions);
}
return true;
}
_ => {
return false;
}
}
}
pub fn get_tx(&mut self, tx_id: u64) -> Option<&DHCPTransaction> {
for tx in &mut self.transactions {
if tx.tx_id == tx_id + 1 {
return Some(tx);
}
}
return None;
}
fn free_tx(&mut self, tx_id: u64) {
let len = self.transactions.len();
let mut found = false;
let mut index = 0;
for i in 0..len {
let tx = &self.transactions[i];
if tx.tx_id == tx_id + 1 {
found = true;
index = i;
break;
}
}
if found {
self.transactions.remove(index);
}
}
fn set_event(&mut self, event: DHCPEvent) {
if let Some(tx) = self.transactions.last_mut() {
core::sc_app_layer_decoder_events_set_event_raw(
&mut tx.events, event as u8);
self.events += 1;
}
}
fn get_tx_iterator(&mut self, min_tx_id: u64, state: &mut u64) ->
Option<(&DHCPTransaction, u64, bool)>
{
let mut index = *state as usize;
let len = self.transactions.len();
while index < len {
let tx = &self.transactions[index];
if tx.tx_id < min_tx_id + 1 {
index += 1;
continue;
}
*state = index as u64 + 1;
return Some((tx, tx.tx_id - 1, (len - index) > 1));
}
return None;
}
}
#[no_mangle]
pub extern "C" fn rs_dhcp_probing_parser(_flow: *const Flow,
input: *const libc::uint8_t,
input_len: u32,
_offset: *const u32) -> AppProto {
if input_len < DHCP_MIN_FRAME_LEN {
return ALPROTO_UNKNOWN;
}
let slice = build_slice!(input, input_len as usize);
match parse_header(slice) {
nom::IResult::Done(_, _) => {
return unsafe { ALPROTO_DHCP };
}
_ => {
return ALPROTO_UNKNOWN;
}
}
}
#[no_mangle]
pub extern "C" fn rs_dhcp_tx_get_alstate_progress(_tx: *mut libc::c_void,
_direction: libc::uint8_t) -> libc::c_int {
// As this is a stateless parser, simply use 1.
return 1;
}
#[no_mangle]
pub extern "C" fn rs_dhcp_state_progress_completion_status(
_direction: libc::uint8_t) -> libc::c_int {
// The presence of a transaction means we are complete.
return 1;
}
#[no_mangle]
pub extern "C" fn rs_dhcp_state_get_tx(state: *mut libc::c_void,
tx_id: libc::uint64_t) -> *mut libc::c_void {
let state = cast_pointer!(state, DHCPState);
match state.get_tx(tx_id) {
Some(tx) => {
return unsafe { transmute(tx) };
}
None => {
return std::ptr::null_mut();
}
}
}
#[no_mangle]
pub extern "C" fn rs_dhcp_state_get_tx_count(state: *mut libc::c_void) -> libc::uint64_t {
let state = cast_pointer!(state, DHCPState);
return state.tx_id;
}
#[no_mangle]
pub extern "C" fn rs_dhcp_parse(_flow: *const core::Flow,
state: *mut libc::c_void,
_pstate: *mut libc::c_void,
input: *const libc::uint8_t,
input_len: u32,
_data: *const libc::c_void,
_flags: u8) -> i8 {
let state = cast_pointer!(state, DHCPState);
let buf = build_slice!(input, input_len as usize);
if state.parse(buf) {
return 1;
}
return -1;
}
#[no_mangle]
pub extern "C" fn rs_dhcp_state_tx_free(
state: *mut libc::c_void,
tx_id: libc::uint64_t)
{
let state = cast_pointer!(state, DHCPState);
state.free_tx(tx_id);
}
#[no_mangle]
pub extern "C" fn rs_dhcp_state_new() -> *mut libc::c_void {
let state = DHCPState::new();
let boxed = Box::new(state);
return unsafe {
transmute(boxed)
};
}
#[no_mangle]
pub extern "C" fn rs_dhcp_state_free(state: *mut libc::c_void) {
// Just unbox...
let _drop: Box<DHCPState> = unsafe { transmute(state) };
}
#[no_mangle]
pub extern "C" fn rs_dhcp_tx_get_logged(_state: *mut libc::c_void, tx: *mut libc::c_void) -> u32 {
let tx = cast_pointer!(tx, DHCPTransaction);
return tx.logged.get();
}
#[no_mangle]
pub extern "C" fn rs_dhcp_tx_set_logged(_state: *mut libc::c_void, |
#[no_mangle]
pub extern "C" fn rs_dhcp_state_get_events(state: *mut libc::c_void,
tx_id: libc::uint64_t)
-> *mut core::AppLayerDecoderEvents
{
let state = cast_pointer!(state, DHCPState);
match state.get_tx(tx_id) {
Some(tx) => tx.events,
_ => std::ptr::null_mut(),
}
}
#[no_mangle]
pub extern "C" fn rs_dhcp_state_get_event_info(
event_name: *const libc::c_char,
event_id: *mut libc::c_int,
event_type: *mut core::AppLayerEventType)
-> libc::c_int
{
if event_name == std::ptr::null() {
return -1;
}
let c_event_name: &CStr = unsafe { CStr::from_ptr(event_name) };
let event = match c_event_name.to_str() {
Ok(s) => {
match s {
"malformed_options" => DHCPEvent::MalformedOptions as i32,
"truncated_options" => DHCPEvent::TruncatedOptions as i32,
_ => -1, // unknown event
}
},
Err(_) => -1, // UTF-8 conversion failed
};
unsafe{
*event_type = core::APP_LAYER_EVENT_TYPE_TRANSACTION;
*event_id = event as libc::c_int;
};
0
}
#[no_mangle]
pub extern "C" fn rs_dhcp_state_get_tx_iterator(
_ipproto: libc::uint8_t,
_alproto: AppProto,
state: *mut libc::c_void,
min_tx_id: libc::uint64_t,
_max_tx_id: libc::uint64_t,
istate: &mut libc::uint64_t)
-> applayer::AppLayerGetTxIterTuple
{
let state = cast_pointer!(state, DHCPState);
match state.get_tx_iterator(min_tx_id, istate) {
Some((tx, out_tx_id, has_next)) => {
let c_tx = unsafe { transmute(tx) };
let ires = applayer::AppLayerGetTxIterTuple::with_values(
c_tx, out_tx_id, has_next);
return ires;
}
None => {
return applayer::AppLayerGetTxIterTuple::not_found();
}
}
}
const PARSER_NAME: &'static [u8] = b"dhcp\0";
#[no_mangle]
pub unsafe extern "C" fn rs_dhcp_register_parser() {
SCLogDebug!("Registering DHCP parser.");
let ports = CString::new("[67,68]").unwrap();
let parser = RustParser {
name: PARSER_NAME.as_ptr() as *const libc::c_char,
default_port: ports.as_ptr(),
ipproto: libc::IPPROTO_UDP,
probe_ts: rs_dhcp_probing_parser,
probe_tc: rs_dhcp_probing_parser,
min_depth: 0,
max_depth: 16,
state_new: rs_dhcp_state_new,
state_free: rs_dhcp_state_free,
tx_free: rs_dhcp_state_tx_free,
parse_ts: rs_dhcp_parse,
parse_tc: rs_dhcp_parse,
get_tx_count: rs_dhcp_state_get_tx_count,
get_tx: rs_dhcp_state_get_tx,
tx_get_comp_st: rs_dhcp_state_progress_completion_status,
tx_get_progress: rs_dhcp_tx_get_alstate_progress,
get_tx_logged: Some(rs_dhcp_tx_get_logged),
set_tx_logged: Some(rs_dhcp_tx_set_logged),
get_de_state: rs_dhcp_tx_get_detect_state,
set_de_state: rs_dhcp_tx_set_detect_state,
get_events: Some(rs_dhcp_state_get_events),
get_eventinfo: Some(rs_dhcp_state_get_event_info),
localstorage_new: None,
localstorage_free: None,
get_tx_mpm_id: None,
set_tx_mpm_id: None,
get_files: None,
get_tx_iterator: Some(rs_dhcp_state_get_tx_iterator),
};
let ip_proto_str = CString::new("udp").unwrap();
if AppLayerProtoDetectConfProtoDetectionEnabled(ip_proto_str.as_ptr(), parser.name)!= 0 {
let alproto = AppLayerRegisterProtocolDetection(&parser, 1);
ALPROTO_DHCP = alproto;
if AppLayerParserConfParserEnabled(ip_proto_str.as_ptr(), parser.name)!= 0 {
let _ = AppLayerRegisterParser(&parser, alproto);
}
} else {
SCLogDebug!("Protocol detector and parser disabled for DHCP.");
}
} | tx: *mut libc::c_void,
logged: libc::uint32_t) {
let tx = cast_pointer!(tx, DHCPTransaction);
tx.logged.set(logged);
} | random_line_split |
extern-call.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::libc;
mod rustrt {
use std::libc;
pub extern {
pub fn rust_dbg_call(cb: *u8, data: libc::uintptr_t)
-> libc::uintptr_t;
}
}
extern fn cb(data: libc::uintptr_t) -> libc::uintptr_t {
if data == 1u {
data
} else {
fact(data - 1u) * data
}
}
fn fact(n: uint) -> uint {
unsafe {
debug!("n = %?", n);
rustrt::rust_dbg_call(cb, n)
}
}
pub fn main() | {
let result = fact(10u);
debug!("result = %?", result);
assert_eq!(result, 3628800u);
} | identifier_body |
|
extern-call.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::libc;
mod rustrt {
use std::libc;
pub extern {
pub fn rust_dbg_call(cb: *u8, data: libc::uintptr_t)
-> libc::uintptr_t;
}
}
extern fn cb(data: libc::uintptr_t) -> libc::uintptr_t {
if data == 1u | else {
fact(data - 1u) * data
}
}
fn fact(n: uint) -> uint {
unsafe {
debug!("n = %?", n);
rustrt::rust_dbg_call(cb, n)
}
}
pub fn main() {
let result = fact(10u);
debug!("result = %?", result);
assert_eq!(result, 3628800u);
}
| {
data
} | conditional_block |
extern-call.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::libc;
mod rustrt {
use std::libc;
pub extern {
pub fn rust_dbg_call(cb: *u8, data: libc::uintptr_t)
-> libc::uintptr_t;
}
}
extern fn cb(data: libc::uintptr_t) -> libc::uintptr_t {
if data == 1u {
data
} else {
fact(data - 1u) * data
}
}
fn | (n: uint) -> uint {
unsafe {
debug!("n = %?", n);
rustrt::rust_dbg_call(cb, n)
}
}
pub fn main() {
let result = fact(10u);
debug!("result = %?", result);
assert_eq!(result, 3628800u);
}
| fact | identifier_name |
extern-call.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::libc;
mod rustrt {
use std::libc;
pub extern {
pub fn rust_dbg_call(cb: *u8, data: libc::uintptr_t)
-> libc::uintptr_t;
}
}
| } else {
fact(data - 1u) * data
}
}
fn fact(n: uint) -> uint {
unsafe {
debug!("n = %?", n);
rustrt::rust_dbg_call(cb, n)
}
}
pub fn main() {
let result = fact(10u);
debug!("result = %?", result);
assert_eq!(result, 3628800u);
} | extern fn cb(data: libc::uintptr_t) -> libc::uintptr_t {
if data == 1u {
data | random_line_split |
document.rs | use common::{ApiError, Body, Credentials, Query, discovery_api};
use hyper::method::Method::{Delete, Get, Post};
use serde_json::Value;
pub fn detail(creds: &Credentials,
env_id: &str,
collection_id: &str,
document_id: &str)
-> Result<Value, ApiError> {
let path = "/v1/environments/".to_string() + env_id + "/collections/" +
collection_id +
"/documents/" + document_id;
Ok(discovery_api(creds, Get, &path, Query::None, &Body::None)?)
}
pub fn delete(creds: &Credentials,
env_id: &str,
collection_id: &str,
document_id: &str)
-> Result<Value, ApiError> {
let path = "/v1/environments/".to_string() + env_id + "/collections/" +
collection_id +
"/documents/" + document_id;
Ok(discovery_api(creds, Delete, &path, Query::None, &Body::None)?)
}
pub fn create(creds: &Credentials,
env_id: &str,
collection_id: &str,
configuration_id: Option<&str>,
document_id: Option<&str>,
filename: &str)
-> Result<Value, ApiError> {
let path = match document_id {
Some(id) => {
"/v1/environments/".to_string() + env_id + "/collections/" +
collection_id + "/documents/" + id
}
None => {
"/v1/environments/".to_string() + env_id + "/collections/" +
collection_id + "/documents"
}
}; | };
Ok(discovery_api(creds, Post, &path, q, &Body::Filename(filename))?)
} | let q = match configuration_id {
Some(id) => Query::Config(id.to_string()),
None => Query::None, | random_line_split |
document.rs | use common::{ApiError, Body, Credentials, Query, discovery_api};
use hyper::method::Method::{Delete, Get, Post};
use serde_json::Value;
pub fn | (creds: &Credentials,
env_id: &str,
collection_id: &str,
document_id: &str)
-> Result<Value, ApiError> {
let path = "/v1/environments/".to_string() + env_id + "/collections/" +
collection_id +
"/documents/" + document_id;
Ok(discovery_api(creds, Get, &path, Query::None, &Body::None)?)
}
pub fn delete(creds: &Credentials,
env_id: &str,
collection_id: &str,
document_id: &str)
-> Result<Value, ApiError> {
let path = "/v1/environments/".to_string() + env_id + "/collections/" +
collection_id +
"/documents/" + document_id;
Ok(discovery_api(creds, Delete, &path, Query::None, &Body::None)?)
}
pub fn create(creds: &Credentials,
env_id: &str,
collection_id: &str,
configuration_id: Option<&str>,
document_id: Option<&str>,
filename: &str)
-> Result<Value, ApiError> {
let path = match document_id {
Some(id) => {
"/v1/environments/".to_string() + env_id + "/collections/" +
collection_id + "/documents/" + id
}
None => {
"/v1/environments/".to_string() + env_id + "/collections/" +
collection_id + "/documents"
}
};
let q = match configuration_id {
Some(id) => Query::Config(id.to_string()),
None => Query::None,
};
Ok(discovery_api(creds, Post, &path, q, &Body::Filename(filename))?)
}
| detail | identifier_name |
document.rs | use common::{ApiError, Body, Credentials, Query, discovery_api};
use hyper::method::Method::{Delete, Get, Post};
use serde_json::Value;
pub fn detail(creds: &Credentials,
env_id: &str,
collection_id: &str,
document_id: &str)
-> Result<Value, ApiError> {
let path = "/v1/environments/".to_string() + env_id + "/collections/" +
collection_id +
"/documents/" + document_id;
Ok(discovery_api(creds, Get, &path, Query::None, &Body::None)?)
}
pub fn delete(creds: &Credentials,
env_id: &str,
collection_id: &str,
document_id: &str)
-> Result<Value, ApiError> {
let path = "/v1/environments/".to_string() + env_id + "/collections/" +
collection_id +
"/documents/" + document_id;
Ok(discovery_api(creds, Delete, &path, Query::None, &Body::None)?)
}
pub fn create(creds: &Credentials,
env_id: &str,
collection_id: &str,
configuration_id: Option<&str>,
document_id: Option<&str>,
filename: &str)
-> Result<Value, ApiError> | {
let path = match document_id {
Some(id) => {
"/v1/environments/".to_string() + env_id + "/collections/" +
collection_id + "/documents/" + id
}
None => {
"/v1/environments/".to_string() + env_id + "/collections/" +
collection_id + "/documents"
}
};
let q = match configuration_id {
Some(id) => Query::Config(id.to_string()),
None => Query::None,
};
Ok(discovery_api(creds, Post, &path, q, &Body::Filename(filename))?)
} | identifier_body |
|
ui.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Generic values for UI properties.
use std::fmt::{self, Write};
use style_traits::cursor::CursorKind;
use style_traits::{CssWriter, ToCss};
/// A generic value for the `cursor` property.
///
/// https://drafts.csswg.org/css-ui/#cursor
#[derive(Clone, Debug, MallocSizeOf, PartialEq, SpecifiedValueInfo, ToComputedValue)]
pub struct Cursor<Image> {
/// The parsed images for the cursor.
pub images: Box<[Image]>,
/// The kind of the cursor [default | help |...].
pub keyword: CursorKind,
}
impl<Image> Cursor<Image> {
/// Set `cursor` to `auto`
#[inline]
pub fn auto() -> Self {
Self {
images: vec![].into_boxed_slice(),
keyword: CursorKind::Auto,
}
}
}
impl<Image: ToCss> ToCss for Cursor<Image> {
fn to_css<W>(&self, dest: &mut CssWriter<W>) -> fmt::Result
where
W: Write,
{
for image in &*self.images {
image.to_css(dest)?;
dest.write_str(", ")?;
}
self.keyword.to_css(dest)
}
}
/// A generic value for item of `image cursors`.
#[derive(Clone, Debug, MallocSizeOf, PartialEq, SpecifiedValueInfo, ToComputedValue)]
pub struct CursorImage<ImageUrl, Number> {
/// The url to parse images from.
pub url: ImageUrl,
/// The <x> and <y> coordinates.
pub hotspot: Option<(Number, Number)>,
}
impl<ImageUrl: ToCss, Number: ToCss> ToCss for CursorImage<ImageUrl, Number> {
fn to_css<W>(&self, dest: &mut CssWriter<W>) -> fmt::Result
where
W: Write,
{
self.url.to_css(dest)?;
if let Some((ref x, ref y)) = self.hotspot |
Ok(())
}
}
/// A generic value for `scrollbar-color` property.
///
/// https://drafts.csswg.org/css-scrollbars-1/#scrollbar-color
#[derive(
Animate,
Clone,
ComputeSquaredDistance,
Copy,
Debug,
MallocSizeOf,
PartialEq,
SpecifiedValueInfo,
ToAnimatedValue,
ToAnimatedZero,
ToComputedValue,
ToCss,
)]
pub enum ScrollbarColor<Color> {
/// `auto`
Auto,
/// `<color>{2}`
Colors {
/// First `<color>`, for color of the scrollbar thumb.
thumb: Color,
/// Second `<color>`, for color of the scrollbar track.
track: Color,
},
}
impl<Color> Default for ScrollbarColor<Color> {
#[inline]
fn default() -> Self {
ScrollbarColor::Auto
}
}
| {
dest.write_str(" ")?;
x.to_css(dest)?;
dest.write_str(" ")?;
y.to_css(dest)?;
} | conditional_block |
ui.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Generic values for UI properties.
use std::fmt::{self, Write};
use style_traits::cursor::CursorKind;
use style_traits::{CssWriter, ToCss};
/// A generic value for the `cursor` property.
///
/// https://drafts.csswg.org/css-ui/#cursor
#[derive(Clone, Debug, MallocSizeOf, PartialEq, SpecifiedValueInfo, ToComputedValue)]
pub struct Cursor<Image> {
/// The parsed images for the cursor.
pub images: Box<[Image]>,
/// The kind of the cursor [default | help |...].
pub keyword: CursorKind,
}
impl<Image> Cursor<Image> {
/// Set `cursor` to `auto`
#[inline]
pub fn auto() -> Self {
Self {
images: vec![].into_boxed_slice(),
keyword: CursorKind::Auto,
}
}
}
impl<Image: ToCss> ToCss for Cursor<Image> {
fn | <W>(&self, dest: &mut CssWriter<W>) -> fmt::Result
where
W: Write,
{
for image in &*self.images {
image.to_css(dest)?;
dest.write_str(", ")?;
}
self.keyword.to_css(dest)
}
}
/// A generic value for item of `image cursors`.
#[derive(Clone, Debug, MallocSizeOf, PartialEq, SpecifiedValueInfo, ToComputedValue)]
pub struct CursorImage<ImageUrl, Number> {
/// The url to parse images from.
pub url: ImageUrl,
/// The <x> and <y> coordinates.
pub hotspot: Option<(Number, Number)>,
}
impl<ImageUrl: ToCss, Number: ToCss> ToCss for CursorImage<ImageUrl, Number> {
fn to_css<W>(&self, dest: &mut CssWriter<W>) -> fmt::Result
where
W: Write,
{
self.url.to_css(dest)?;
if let Some((ref x, ref y)) = self.hotspot {
dest.write_str(" ")?;
x.to_css(dest)?;
dest.write_str(" ")?;
y.to_css(dest)?;
}
Ok(())
}
}
/// A generic value for `scrollbar-color` property.
///
/// https://drafts.csswg.org/css-scrollbars-1/#scrollbar-color
#[derive(
Animate,
Clone,
ComputeSquaredDistance,
Copy,
Debug,
MallocSizeOf,
PartialEq,
SpecifiedValueInfo,
ToAnimatedValue,
ToAnimatedZero,
ToComputedValue,
ToCss,
)]
pub enum ScrollbarColor<Color> {
/// `auto`
Auto,
/// `<color>{2}`
Colors {
/// First `<color>`, for color of the scrollbar thumb.
thumb: Color,
/// Second `<color>`, for color of the scrollbar track.
track: Color,
},
}
impl<Color> Default for ScrollbarColor<Color> {
#[inline]
fn default() -> Self {
ScrollbarColor::Auto
}
}
| to_css | identifier_name |
ui.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Generic values for UI properties.
use std::fmt::{self, Write};
use style_traits::cursor::CursorKind;
use style_traits::{CssWriter, ToCss};
/// A generic value for the `cursor` property.
///
/// https://drafts.csswg.org/css-ui/#cursor
#[derive(Clone, Debug, MallocSizeOf, PartialEq, SpecifiedValueInfo, ToComputedValue)]
pub struct Cursor<Image> {
/// The parsed images for the cursor.
pub images: Box<[Image]>,
/// The kind of the cursor [default | help |...].
pub keyword: CursorKind,
}
impl<Image> Cursor<Image> {
/// Set `cursor` to `auto`
#[inline]
pub fn auto() -> Self {
Self {
images: vec![].into_boxed_slice(),
keyword: CursorKind::Auto,
}
}
}
impl<Image: ToCss> ToCss for Cursor<Image> {
fn to_css<W>(&self, dest: &mut CssWriter<W>) -> fmt::Result
where
W: Write,
{
for image in &*self.images {
image.to_css(dest)?;
dest.write_str(", ")?;
}
self.keyword.to_css(dest)
}
}
/// A generic value for item of `image cursors`.
#[derive(Clone, Debug, MallocSizeOf, PartialEq, SpecifiedValueInfo, ToComputedValue)]
pub struct CursorImage<ImageUrl, Number> {
/// The url to parse images from.
pub url: ImageUrl,
/// The <x> and <y> coordinates.
pub hotspot: Option<(Number, Number)>,
}
impl<ImageUrl: ToCss, Number: ToCss> ToCss for CursorImage<ImageUrl, Number> {
fn to_css<W>(&self, dest: &mut CssWriter<W>) -> fmt::Result
where
W: Write,
{
self.url.to_css(dest)?;
if let Some((ref x, ref y)) = self.hotspot {
dest.write_str(" ")?;
x.to_css(dest)?;
dest.write_str(" ")?;
y.to_css(dest)?;
}
Ok(())
}
}
/// A generic value for `scrollbar-color` property.
///
/// https://drafts.csswg.org/css-scrollbars-1/#scrollbar-color
#[derive(
Animate,
Clone,
ComputeSquaredDistance,
Copy,
Debug,
MallocSizeOf,
PartialEq, | ToAnimatedValue,
ToAnimatedZero,
ToComputedValue,
ToCss,
)]
pub enum ScrollbarColor<Color> {
/// `auto`
Auto,
/// `<color>{2}`
Colors {
/// First `<color>`, for color of the scrollbar thumb.
thumb: Color,
/// Second `<color>`, for color of the scrollbar track.
track: Color,
},
}
impl<Color> Default for ScrollbarColor<Color> {
#[inline]
fn default() -> Self {
ScrollbarColor::Auto
}
} | SpecifiedValueInfo, | random_line_split |
styled.rs | use super::*;
use ascii_canvas::AsciiView;
use std::fmt::{Debug, Error, Formatter};
use style::Style;
pub struct Styled {
style: Style,
content: Box<Content>,
}
impl Styled {
pub fn new(style: Style, content: Box<Content>) -> Self {
Styled {
style: style,
content: content,
}
}
}
impl Content for Styled {
fn min_width(&self) -> usize |
fn emit(&self, view: &mut AsciiView) {
self.content.emit(&mut view.styled(self.style))
}
fn into_wrap_items(self: Box<Self>, wrap_items: &mut Vec<Box<Content>>) {
let style = self.style;
super::into_wrap_items_map(self.content, wrap_items, |item| Styled::new(style, item))
}
}
impl Debug for Styled {
fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> {
fmt.debug_struct("Styled")
.field("content", &self.content)
.finish()
}
}
| {
self.content.min_width()
} | identifier_body |
styled.rs | use super::*;
use ascii_canvas::AsciiView;
use std::fmt::{Debug, Error, Formatter};
use style::Style;
pub struct Styled {
style: Style,
content: Box<Content>, | pub fn new(style: Style, content: Box<Content>) -> Self {
Styled {
style: style,
content: content,
}
}
}
impl Content for Styled {
fn min_width(&self) -> usize {
self.content.min_width()
}
fn emit(&self, view: &mut AsciiView) {
self.content.emit(&mut view.styled(self.style))
}
fn into_wrap_items(self: Box<Self>, wrap_items: &mut Vec<Box<Content>>) {
let style = self.style;
super::into_wrap_items_map(self.content, wrap_items, |item| Styled::new(style, item))
}
}
impl Debug for Styled {
fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> {
fmt.debug_struct("Styled")
.field("content", &self.content)
.finish()
}
} | }
impl Styled { | random_line_split |
styled.rs | use super::*;
use ascii_canvas::AsciiView;
use std::fmt::{Debug, Error, Formatter};
use style::Style;
pub struct Styled {
style: Style,
content: Box<Content>,
}
impl Styled {
pub fn new(style: Style, content: Box<Content>) -> Self {
Styled {
style: style,
content: content,
}
}
}
impl Content for Styled {
fn min_width(&self) -> usize {
self.content.min_width()
}
fn emit(&self, view: &mut AsciiView) {
self.content.emit(&mut view.styled(self.style))
}
fn into_wrap_items(self: Box<Self>, wrap_items: &mut Vec<Box<Content>>) {
let style = self.style;
super::into_wrap_items_map(self.content, wrap_items, |item| Styled::new(style, item))
}
}
impl Debug for Styled {
fn | (&self, fmt: &mut Formatter) -> Result<(), Error> {
fmt.debug_struct("Styled")
.field("content", &self.content)
.finish()
}
}
| fmt | identifier_name |
const-impl.rs | #![feature(adt_const_params)]
#![crate_name = "foo"]
#[derive(PartialEq, Eq)]
pub enum Order {
Sorted,
Unsorted,
}
// @has foo/struct.VSet.html '//pre[@class="rust struct"]' 'pub struct VSet<T, const ORDER: Order>'
// @has foo/struct.VSet.html '//div[@id="impl-Send"]/h3[@class="code-header in-band"]' 'impl<T, const ORDER: Order> Send for VSet<T, ORDER>'
// @has foo/struct.VSet.html '//div[@id="impl-Sync"]/h3[@class="code-header in-band"]' 'impl<T, const ORDER: Order> Sync for VSet<T, ORDER>'
pub struct VSet<T, const ORDER: Order> {
inner: Vec<T>,
}
// @has foo/struct.VSet.html '//div[@id="impl"]/h3[@class="code-header in-band"]' 'impl<T> VSet<T, {Order::Sorted}>'
impl <T> VSet<T, {Order::Sorted}> {
pub fn new() -> Self {
Self { inner: Vec::new() }
}
}
// @has foo/struct.VSet.html '//div[@id="impl-1"]/h3[@class="code-header in-band"]' 'impl<T> VSet<T, {Order::Unsorted}>'
impl <T> VSet<T, {Order::Unsorted}> {
pub fn new() -> Self {
Self { inner: Vec::new() }
}
}
pub struct | <const S: &'static str>;
// @has foo/struct.Escape.html '//div[@id="impl"]/h3[@class="code-header in-band"]' 'impl Escape<{ r#"<script>alert("Escape");</script>"# }>'
impl Escape<{ r#"<script>alert("Escape");</script>"# }> {
pub fn f() {}
}
| Escape | identifier_name |
const-impl.rs | #![feature(adt_const_params)]
#![crate_name = "foo"]
#[derive(PartialEq, Eq)]
pub enum Order {
Sorted,
Unsorted,
}
// @has foo/struct.VSet.html '//pre[@class="rust struct"]' 'pub struct VSet<T, const ORDER: Order>'
// @has foo/struct.VSet.html '//div[@id="impl-Send"]/h3[@class="code-header in-band"]' 'impl<T, const ORDER: Order> Send for VSet<T, ORDER>'
// @has foo/struct.VSet.html '//div[@id="impl-Sync"]/h3[@class="code-header in-band"]' 'impl<T, const ORDER: Order> Sync for VSet<T, ORDER>'
pub struct VSet<T, const ORDER: Order> {
inner: Vec<T>,
}
// @has foo/struct.VSet.html '//div[@id="impl"]/h3[@class="code-header in-band"]' 'impl<T> VSet<T, {Order::Sorted}>' | }
// @has foo/struct.VSet.html '//div[@id="impl-1"]/h3[@class="code-header in-band"]' 'impl<T> VSet<T, {Order::Unsorted}>'
impl <T> VSet<T, {Order::Unsorted}> {
pub fn new() -> Self {
Self { inner: Vec::new() }
}
}
pub struct Escape<const S: &'static str>;
// @has foo/struct.Escape.html '//div[@id="impl"]/h3[@class="code-header in-band"]' 'impl Escape<{ r#"<script>alert("Escape");</script>"# }>'
impl Escape<{ r#"<script>alert("Escape");</script>"# }> {
pub fn f() {}
} | impl <T> VSet<T, {Order::Sorted}> {
pub fn new() -> Self {
Self { inner: Vec::new() }
} | random_line_split |
instr_shufps.rs | use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
use ::RegType::*;
use ::instruction_def::*;
use ::Operand::*;
use ::Reg::*;
use ::RegScale::*;
use ::test::run_test;
#[test]
fn shufps_1() {
run_test(&Instruction { mnemonic: Mnemonic::SHUFPS, operand1: Some(Direct(XMM7)), operand2: Some(Direct(XMM3)), operand3: Some(Literal8(82)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[15, 198, 251, 82], OperandSize::Dword)
}
#[test]
fn | () {
run_test(&Instruction { mnemonic: Mnemonic::SHUFPS, operand1: Some(Direct(XMM7)), operand2: Some(IndirectScaledIndexedDisplaced(EBX, ESI, Eight, 909533252, Some(OperandSize::Xmmword), None)), operand3: Some(Literal8(46)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[15, 198, 188, 243, 68, 96, 54, 54, 46], OperandSize::Dword)
}
#[test]
fn shufps_3() {
run_test(&Instruction { mnemonic: Mnemonic::SHUFPS, operand1: Some(Direct(XMM6)), operand2: Some(Direct(XMM1)), operand3: Some(Literal8(66)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[15, 198, 241, 66], OperandSize::Qword)
}
#[test]
fn shufps_4() {
run_test(&Instruction { mnemonic: Mnemonic::SHUFPS, operand1: Some(Direct(XMM6)), operand2: Some(Indirect(RBX, Some(OperandSize::Xmmword), None)), operand3: Some(Literal8(6)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[15, 198, 51, 6], OperandSize::Qword)
}
| shufps_2 | identifier_name |
instr_shufps.rs | use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
use ::RegType::*;
use ::instruction_def::*;
use ::Operand::*;
use ::Reg::*;
use ::RegScale::*;
use ::test::run_test;
#[test]
fn shufps_1() {
run_test(&Instruction { mnemonic: Mnemonic::SHUFPS, operand1: Some(Direct(XMM7)), operand2: Some(Direct(XMM3)), operand3: Some(Literal8(82)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[15, 198, 251, 82], OperandSize::Dword)
}
#[test]
fn shufps_2() |
#[test]
fn shufps_3() {
run_test(&Instruction { mnemonic: Mnemonic::SHUFPS, operand1: Some(Direct(XMM6)), operand2: Some(Direct(XMM1)), operand3: Some(Literal8(66)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[15, 198, 241, 66], OperandSize::Qword)
}
#[test]
fn shufps_4() {
run_test(&Instruction { mnemonic: Mnemonic::SHUFPS, operand1: Some(Direct(XMM6)), operand2: Some(Indirect(RBX, Some(OperandSize::Xmmword), None)), operand3: Some(Literal8(6)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[15, 198, 51, 6], OperandSize::Qword)
}
| {
run_test(&Instruction { mnemonic: Mnemonic::SHUFPS, operand1: Some(Direct(XMM7)), operand2: Some(IndirectScaledIndexedDisplaced(EBX, ESI, Eight, 909533252, Some(OperandSize::Xmmword), None)), operand3: Some(Literal8(46)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[15, 198, 188, 243, 68, 96, 54, 54, 46], OperandSize::Dword)
} | identifier_body |
instr_shufps.rs | use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};
use ::RegType::*;
use ::instruction_def::*;
use ::Operand::*;
use ::Reg::*;
use ::RegScale::*;
use ::test::run_test;
#[test]
fn shufps_1() {
run_test(&Instruction { mnemonic: Mnemonic::SHUFPS, operand1: Some(Direct(XMM7)), operand2: Some(Direct(XMM3)), operand3: Some(Literal8(82)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[15, 198, 251, 82], OperandSize::Dword)
}
|
#[test]
fn shufps_3() {
run_test(&Instruction { mnemonic: Mnemonic::SHUFPS, operand1: Some(Direct(XMM6)), operand2: Some(Direct(XMM1)), operand3: Some(Literal8(66)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[15, 198, 241, 66], OperandSize::Qword)
}
#[test]
fn shufps_4() {
run_test(&Instruction { mnemonic: Mnemonic::SHUFPS, operand1: Some(Direct(XMM6)), operand2: Some(Indirect(RBX, Some(OperandSize::Xmmword), None)), operand3: Some(Literal8(6)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[15, 198, 51, 6], OperandSize::Qword)
} | #[test]
fn shufps_2() {
run_test(&Instruction { mnemonic: Mnemonic::SHUFPS, operand1: Some(Direct(XMM7)), operand2: Some(IndirectScaledIndexedDisplaced(EBX, ESI, Eight, 909533252, Some(OperandSize::Xmmword), None)), operand3: Some(Literal8(46)), operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[15, 198, 188, 243, 68, 96, 54, 54, 46], OperandSize::Dword)
} | random_line_split |
parser.rs | use std::error::Error;
use std::fmt;
use unicode_width::UnicodeWidthStr;
#[derive(Clone)]
pub enum AST {
Output,
Input,
Loop(Vec<AST>),
Right,
Left,
Inc,
Dec,
}
#[derive(Debug)]
pub enum ParseErrorType {
UnclosedLoop,
ExtraCloseLoop,
}
use ParseErrorType::*;
#[derive(Debug)]
pub struct ParseError {
err: ParseErrorType,
line: Vec<u8>,
linenum: usize,
offset: usize,
}
impl ParseError {
fn new(err: ParseErrorType, code: &[u8], i: usize) -> Self {
let (line, linenum, offset) = find_line(code, i);
Self {
err,
line: line.into(),
linenum,
offset,
}
}
}
impl fmt::Display for ParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let line = String::from_utf8_lossy(&self.line);
let width = UnicodeWidthStr::width(&line[0..self.offset]);
match self.err {
UnclosedLoop => {
writeln!(f, "reached EOF with unterminated loop")?;
writeln!(f, "Loop started at {}:{}", self.linenum, self.offset)?;
}
ExtraCloseLoop => {
writeln!(
f,
"[ found at {}:{} when not in a loop",
self.linenum, self.offset
)?;
}
};
writeln!(f, "{}", line)?;
write!(f, "{}^", " ".repeat(width))?;
Ok(())
}
}
impl Error for ParseError {}
/// Parses a string of brainfuck code to unoptimized AST
pub fn parse(code: &[u8]) -> Result<Vec<AST>, ParseError> {
let mut i = 0;
_parse(code, &mut i, 0)
}
fn _parse(code: &[u8], i: &mut usize, level: u32) -> Result<Vec<AST>, ParseError> {
// Starting [ of the loop
let start = i.saturating_sub(1);
let mut tokens = Vec::new();
while let Some(c) = code.get(*i) {
*i += 1; | b'<' => tokens.push(AST::Left),
b'[' => tokens.push(AST::Loop(_parse(code, i, level + 1)?)),
b']' => {
return if level == 0 {
Err(ParseError::new(ExtraCloseLoop, code, *i - 1))
} else {
Ok(tokens)
};
}
b',' => tokens.push(AST::Input),
b'.' => tokens.push(AST::Output),
_ => (),
};
}
if level!= 0 {
Err(ParseError::new(UnclosedLoop, code, start))
} else {
Ok(tokens)
}
}
fn find_line(code: &[u8], i: usize) -> (&[u8], usize, usize) {
let offset = code[0..i].iter().rev().take_while(|x| **x!= b'\n').count();
let end = i + code[i..].iter().take_while(|x| **x!= b'\n').count();
let linenum = code[0..(i - offset)]
.iter()
.filter(|x| **x == b'\n')
.count();
(&code[(i - offset)..end], linenum, offset)
} |
match c {
b'+' => tokens.push(AST::Inc),
b'-' => tokens.push(AST::Dec),
b'>' => tokens.push(AST::Right), | random_line_split |
parser.rs | use std::error::Error;
use std::fmt;
use unicode_width::UnicodeWidthStr;
#[derive(Clone)]
pub enum AST {
Output,
Input,
Loop(Vec<AST>),
Right,
Left,
Inc,
Dec,
}
#[derive(Debug)]
pub enum ParseErrorType {
UnclosedLoop,
ExtraCloseLoop,
}
use ParseErrorType::*;
#[derive(Debug)]
pub struct | {
err: ParseErrorType,
line: Vec<u8>,
linenum: usize,
offset: usize,
}
impl ParseError {
fn new(err: ParseErrorType, code: &[u8], i: usize) -> Self {
let (line, linenum, offset) = find_line(code, i);
Self {
err,
line: line.into(),
linenum,
offset,
}
}
}
impl fmt::Display for ParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let line = String::from_utf8_lossy(&self.line);
let width = UnicodeWidthStr::width(&line[0..self.offset]);
match self.err {
UnclosedLoop => {
writeln!(f, "reached EOF with unterminated loop")?;
writeln!(f, "Loop started at {}:{}", self.linenum, self.offset)?;
}
ExtraCloseLoop => {
writeln!(
f,
"[ found at {}:{} when not in a loop",
self.linenum, self.offset
)?;
}
};
writeln!(f, "{}", line)?;
write!(f, "{}^", " ".repeat(width))?;
Ok(())
}
}
impl Error for ParseError {}
/// Parses a string of brainfuck code to unoptimized AST
pub fn parse(code: &[u8]) -> Result<Vec<AST>, ParseError> {
let mut i = 0;
_parse(code, &mut i, 0)
}
fn _parse(code: &[u8], i: &mut usize, level: u32) -> Result<Vec<AST>, ParseError> {
// Starting [ of the loop
let start = i.saturating_sub(1);
let mut tokens = Vec::new();
while let Some(c) = code.get(*i) {
*i += 1;
match c {
b'+' => tokens.push(AST::Inc),
b'-' => tokens.push(AST::Dec),
b'>' => tokens.push(AST::Right),
b'<' => tokens.push(AST::Left),
b'[' => tokens.push(AST::Loop(_parse(code, i, level + 1)?)),
b']' => {
return if level == 0 {
Err(ParseError::new(ExtraCloseLoop, code, *i - 1))
} else {
Ok(tokens)
};
}
b',' => tokens.push(AST::Input),
b'.' => tokens.push(AST::Output),
_ => (),
};
}
if level!= 0 {
Err(ParseError::new(UnclosedLoop, code, start))
} else {
Ok(tokens)
}
}
fn find_line(code: &[u8], i: usize) -> (&[u8], usize, usize) {
let offset = code[0..i].iter().rev().take_while(|x| **x!= b'\n').count();
let end = i + code[i..].iter().take_while(|x| **x!= b'\n').count();
let linenum = code[0..(i - offset)]
.iter()
.filter(|x| **x == b'\n')
.count();
(&code[(i - offset)..end], linenum, offset)
}
| ParseError | identifier_name |
parser.rs | use std::error::Error;
use std::fmt;
use unicode_width::UnicodeWidthStr;
#[derive(Clone)]
pub enum AST {
Output,
Input,
Loop(Vec<AST>),
Right,
Left,
Inc,
Dec,
}
#[derive(Debug)]
pub enum ParseErrorType {
UnclosedLoop,
ExtraCloseLoop,
}
use ParseErrorType::*;
#[derive(Debug)]
pub struct ParseError {
err: ParseErrorType,
line: Vec<u8>,
linenum: usize,
offset: usize,
}
impl ParseError {
fn new(err: ParseErrorType, code: &[u8], i: usize) -> Self {
let (line, linenum, offset) = find_line(code, i);
Self {
err,
line: line.into(),
linenum,
offset,
}
}
}
impl fmt::Display for ParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let line = String::from_utf8_lossy(&self.line);
let width = UnicodeWidthStr::width(&line[0..self.offset]);
match self.err {
UnclosedLoop => {
writeln!(f, "reached EOF with unterminated loop")?;
writeln!(f, "Loop started at {}:{}", self.linenum, self.offset)?;
}
ExtraCloseLoop => {
writeln!(
f,
"[ found at {}:{} when not in a loop",
self.linenum, self.offset
)?;
}
};
writeln!(f, "{}", line)?;
write!(f, "{}^", " ".repeat(width))?;
Ok(())
}
}
impl Error for ParseError {}
/// Parses a string of brainfuck code to unoptimized AST
pub fn parse(code: &[u8]) -> Result<Vec<AST>, ParseError> {
let mut i = 0;
_parse(code, &mut i, 0)
}
fn _parse(code: &[u8], i: &mut usize, level: u32) -> Result<Vec<AST>, ParseError> {
// Starting [ of the loop
let start = i.saturating_sub(1);
let mut tokens = Vec::new();
while let Some(c) = code.get(*i) {
*i += 1;
match c {
b'+' => tokens.push(AST::Inc),
b'-' => tokens.push(AST::Dec),
b'>' => tokens.push(AST::Right),
b'<' => tokens.push(AST::Left),
b'[' => tokens.push(AST::Loop(_parse(code, i, level + 1)?)),
b']' => {
return if level == 0 {
Err(ParseError::new(ExtraCloseLoop, code, *i - 1))
} else {
Ok(tokens)
};
}
b',' => tokens.push(AST::Input),
b'.' => tokens.push(AST::Output),
_ => (),
};
}
if level!= 0 {
Err(ParseError::new(UnclosedLoop, code, start))
} else |
}
fn find_line(code: &[u8], i: usize) -> (&[u8], usize, usize) {
let offset = code[0..i].iter().rev().take_while(|x| **x!= b'\n').count();
let end = i + code[i..].iter().take_while(|x| **x!= b'\n').count();
let linenum = code[0..(i - offset)]
.iter()
.filter(|x| **x == b'\n')
.count();
(&code[(i - offset)..end], linenum, offset)
}
| {
Ok(tokens)
} | conditional_block |
parser.rs | use std::error::Error;
use std::fmt;
use unicode_width::UnicodeWidthStr;
#[derive(Clone)]
pub enum AST {
Output,
Input,
Loop(Vec<AST>),
Right,
Left,
Inc,
Dec,
}
#[derive(Debug)]
pub enum ParseErrorType {
UnclosedLoop,
ExtraCloseLoop,
}
use ParseErrorType::*;
#[derive(Debug)]
pub struct ParseError {
err: ParseErrorType,
line: Vec<u8>,
linenum: usize,
offset: usize,
}
impl ParseError {
fn new(err: ParseErrorType, code: &[u8], i: usize) -> Self {
let (line, linenum, offset) = find_line(code, i);
Self {
err,
line: line.into(),
linenum,
offset,
}
}
}
impl fmt::Display for ParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let line = String::from_utf8_lossy(&self.line);
let width = UnicodeWidthStr::width(&line[0..self.offset]);
match self.err {
UnclosedLoop => {
writeln!(f, "reached EOF with unterminated loop")?;
writeln!(f, "Loop started at {}:{}", self.linenum, self.offset)?;
}
ExtraCloseLoop => {
writeln!(
f,
"[ found at {}:{} when not in a loop",
self.linenum, self.offset
)?;
}
};
writeln!(f, "{}", line)?;
write!(f, "{}^", " ".repeat(width))?;
Ok(())
}
}
impl Error for ParseError {}
/// Parses a string of brainfuck code to unoptimized AST
pub fn parse(code: &[u8]) -> Result<Vec<AST>, ParseError> |
fn _parse(code: &[u8], i: &mut usize, level: u32) -> Result<Vec<AST>, ParseError> {
// Starting [ of the loop
let start = i.saturating_sub(1);
let mut tokens = Vec::new();
while let Some(c) = code.get(*i) {
*i += 1;
match c {
b'+' => tokens.push(AST::Inc),
b'-' => tokens.push(AST::Dec),
b'>' => tokens.push(AST::Right),
b'<' => tokens.push(AST::Left),
b'[' => tokens.push(AST::Loop(_parse(code, i, level + 1)?)),
b']' => {
return if level == 0 {
Err(ParseError::new(ExtraCloseLoop, code, *i - 1))
} else {
Ok(tokens)
};
}
b',' => tokens.push(AST::Input),
b'.' => tokens.push(AST::Output),
_ => (),
};
}
if level!= 0 {
Err(ParseError::new(UnclosedLoop, code, start))
} else {
Ok(tokens)
}
}
fn find_line(code: &[u8], i: usize) -> (&[u8], usize, usize) {
let offset = code[0..i].iter().rev().take_while(|x| **x!= b'\n').count();
let end = i + code[i..].iter().take_while(|x| **x!= b'\n').count();
let linenum = code[0..(i - offset)]
.iter()
.filter(|x| **x == b'\n')
.count();
(&code[(i - offset)..end], linenum, offset)
}
| {
let mut i = 0;
_parse(code, &mut i, 0)
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.