file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
def.rs
|
#[test]
fn parse_macro_def_no_params() {
super::test_parser(
r#"
<marker type="NODE_MACRO_DEF">macro test<marker type="NODE_MACRO_PARAM_LIST">()</marker> {
}</marker>
"#,
)
}
#[test]
fn parse_macro_def() {
super::test_parser(
r#"
<marker type="NODE_MACRO_DEF">macro test<marker type="NODE_MACRO_PARAM_LIST">(
<marker type="NODE_MACRO_PARAM_LIST_ITEM">type t</marker>
)</marker> {
}</marker>
"#,
)
}
#[test]
#[ignore]
fn
|
() {
super::test_parser(
r#"
<marker type="KW_ABSTRACT">abstract</marker> block test {}
"#,
);
}
#[test]
fn parse_abstract_container_with_extends_list() {
super::test_parser(
r#"
abstract block test <marker type="NODE_EXTENDS_LIST">extends abc</marker> {}
"#,
);
}
#[test]
fn parse_var_def() {
super::test_parser(
r#"
<marker type="NODE_VARIABLE_DEF">type a;</marker>
"#,
)
}
#[test]
fn parse_var_with_initializer() {
super::test_parser(
r#"
<marker type="NODE_VARIABLE_DEF">type_attribute a = <marker type="NODE_BINARY_EXPR">a | b</marker>;</marker>
"#,
)
}
|
parse_abstract_container_def
|
identifier_name
|
def.rs
|
#[test]
fn parse_macro_def_no_params() {
super::test_parser(
r#"
<marker type="NODE_MACRO_DEF">macro test<marker type="NODE_MACRO_PARAM_LIST">()</marker> {
}</marker>
"#,
)
}
#[test]
fn parse_macro_def() {
super::test_parser(
r#"
<marker type="NODE_MACRO_DEF">macro test<marker type="NODE_MACRO_PARAM_LIST">(
<marker type="NODE_MACRO_PARAM_LIST_ITEM">type t</marker>
)</marker> {
}</marker>
"#,
)
}
#[test]
#[ignore]
fn parse_abstract_container_def() {
super::test_parser(
r#"
<marker type="KW_ABSTRACT">abstract</marker> block test {}
"#,
);
}
#[test]
fn parse_abstract_container_with_extends_list() {
super::test_parser(
r#"
abstract block test <marker type="NODE_EXTENDS_LIST">extends abc</marker> {}
"#,
);
}
#[test]
fn parse_var_def() {
super::test_parser(
r#"
<marker type="NODE_VARIABLE_DEF">type a;</marker>
"#,
)
}
#[test]
fn parse_var_with_initializer() {
super::test_parser(
r#"
<marker type="NODE_VARIABLE_DEF">type_attribute a = <marker type="NODE_BINARY_EXPR">a | b</marker>;</marker>
"#,
)
|
}
|
random_line_split
|
|
client_basics.rs
|
extern crate riak;
use riak::Client;
use riak::bucket::{BucketProps, BucketTypeProps};
use riak::object::{DeleteObjectReq, FetchObjectReq, ObjectContent, StoreObjectReq};
use riak::yokozuna::{SearchQuery, YokozunaIndex};
use std::fs::File;
use std::io::Read;
#[test]
fn test_basics() {
// connect and ping
let mut riak = Client::new("10.0.0.2:8087").unwrap();
riak.ping().unwrap();
// get the server info
let (node, version) = riak.server_info().unwrap();
println!("connected to node {} running Riak version {}",
node,
version);
// set bucket properties
let mut bucket_props = BucketProps::new("testbucket");
bucket_props.set_backend("leveldb");
riak.set_bucket_properties(bucket_props).unwrap();
// get the properties back from the server
let bucket_props = riak.get_bucket_properties("testbucket").unwrap();
let found_backend = bucket_props.get_backend().unwrap();
assert_eq!(found_backend, "leveldb".as_bytes());
// store an object
let contents = ObjectContent::new("this is a test".as_bytes());
let mut req = StoreObjectReq::new("testbucket", contents);
req.set_key("testkey");
riak.store_object(req).unwrap();
// fetch an object
let req = FetchObjectReq::new("testbucket", "testkey");
let object = riak.fetch_object(req).unwrap();
let contents = object.content;
let content = contents.first().unwrap();
assert_eq!(content.get_value(), "this is a test".as_bytes());
// delete an object
let req = DeleteObjectReq::new("testbucket", "testkey");
riak.delete_object(req).unwrap();
// make sure deleted object is gone
let req = FetchObjectReq::new("testbucket", "testkey");
let object = riak.fetch_object(req).unwrap();
assert_eq!(object.content.len(), 0);
// list the available buckets
let buckets = riak.list_buckets().unwrap();
let mut bucket_exists = false;
for bucket in buckets.iter() {
if *bucket == "testbucket".as_bytes() {
bucket_exists = true;
}
}
assert!(bucket_exists);
// list the available keys
let keys = riak.list_keys("testbucket").unwrap();
let mut key_exists = false;
for key in keys.iter() {
if *key == "testkey".as_bytes() {
key_exists = true;
}
}
assert!(key_exists);
// fetch the preflist for testbucket/testkey
let preflist = riak.fetch_preflist("testbucket", "testkey").unwrap();
let mut lives_on_nodes: u8 = 0;
let mut has_primary_node = false;
for preflist_item in preflist.iter() {
lives_on_nodes = lives_on_nodes + 1;
if preflist_item.is_primary
|
}
assert_eq!(lives_on_nodes, 3);
assert!(has_primary_node);
// set properties for a bucket type
let mut bucket_props = BucketTypeProps::new("testbuckettype");
bucket_props.set_backend("leveldb");
riak.set_bucket_type_properties(bucket_props).unwrap();
// get the properties back for a bucket type and verify them
let bucket_props = riak.get_bucket_type_properties("testbuckettype").unwrap();
assert_eq!(bucket_props.get_backend().expect("could not get backend"),
"leveldb".as_bytes());
// set a search schema
let mut xml: Vec<u8> = Vec::new();
let mut file = File::open("/tmp/riak-rust-client-default-schema.xml").unwrap();
let _ = file.read_to_end(&mut xml).unwrap();
let schema_name = "schedule".to_string().into_bytes();
riak.set_yokozuna_schema(schema_name.clone(), xml.clone()).unwrap();
// retrieve the search schema
let schema = riak.get_yokozuna_schema(schema_name.clone()).unwrap();
assert_eq!(schema, xml);
// set a search index
let index_name = "myindex".to_string().into_bytes();
let mut index = YokozunaIndex::new(index_name.clone());
index.set_schema(schema_name);
index.set_n_val(3);
riak.set_yokozuna_index(index).unwrap();
// get the search index
let index = riak.get_yokozuna_index(index_name.clone()).unwrap();
assert_eq!(index[0].get_name(), index_name);
// run a search
let mut query = SearchQuery::new("test*", "myindex");
query.set_df("_yz_id");
riak.search(query).unwrap();
// run a MapReduce job
let job = r#"
{"inputs": "bucket_501653", "query": [
{"map": {
"arg": null,
"name": "Riak.mapValuesJson",
"language": "javascript",
"keep": false
}},
{"reduce": {
"arg": null,
"name": "Riak.reduceSum",
"language": "javascript",
"keep": true
}}
]}
"#;
riak.mapreduce(job, "application/json").unwrap();
}
|
{
has_primary_node = true;
}
|
conditional_block
|
client_basics.rs
|
extern crate riak;
use riak::Client;
use riak::bucket::{BucketProps, BucketTypeProps};
use riak::object::{DeleteObjectReq, FetchObjectReq, ObjectContent, StoreObjectReq};
use riak::yokozuna::{SearchQuery, YokozunaIndex};
use std::fs::File;
use std::io::Read;
#[test]
fn test_basics()
|
// store an object
let contents = ObjectContent::new("this is a test".as_bytes());
let mut req = StoreObjectReq::new("testbucket", contents);
req.set_key("testkey");
riak.store_object(req).unwrap();
// fetch an object
let req = FetchObjectReq::new("testbucket", "testkey");
let object = riak.fetch_object(req).unwrap();
let contents = object.content;
let content = contents.first().unwrap();
assert_eq!(content.get_value(), "this is a test".as_bytes());
// delete an object
let req = DeleteObjectReq::new("testbucket", "testkey");
riak.delete_object(req).unwrap();
// make sure deleted object is gone
let req = FetchObjectReq::new("testbucket", "testkey");
let object = riak.fetch_object(req).unwrap();
assert_eq!(object.content.len(), 0);
// list the available buckets
let buckets = riak.list_buckets().unwrap();
let mut bucket_exists = false;
for bucket in buckets.iter() {
if *bucket == "testbucket".as_bytes() {
bucket_exists = true;
}
}
assert!(bucket_exists);
// list the available keys
let keys = riak.list_keys("testbucket").unwrap();
let mut key_exists = false;
for key in keys.iter() {
if *key == "testkey".as_bytes() {
key_exists = true;
}
}
assert!(key_exists);
// fetch the preflist for testbucket/testkey
let preflist = riak.fetch_preflist("testbucket", "testkey").unwrap();
let mut lives_on_nodes: u8 = 0;
let mut has_primary_node = false;
for preflist_item in preflist.iter() {
lives_on_nodes = lives_on_nodes + 1;
if preflist_item.is_primary {
has_primary_node = true;
}
}
assert_eq!(lives_on_nodes, 3);
assert!(has_primary_node);
// set properties for a bucket type
let mut bucket_props = BucketTypeProps::new("testbuckettype");
bucket_props.set_backend("leveldb");
riak.set_bucket_type_properties(bucket_props).unwrap();
// get the properties back for a bucket type and verify them
let bucket_props = riak.get_bucket_type_properties("testbuckettype").unwrap();
assert_eq!(bucket_props.get_backend().expect("could not get backend"),
"leveldb".as_bytes());
// set a search schema
let mut xml: Vec<u8> = Vec::new();
let mut file = File::open("/tmp/riak-rust-client-default-schema.xml").unwrap();
let _ = file.read_to_end(&mut xml).unwrap();
let schema_name = "schedule".to_string().into_bytes();
riak.set_yokozuna_schema(schema_name.clone(), xml.clone()).unwrap();
// retrieve the search schema
let schema = riak.get_yokozuna_schema(schema_name.clone()).unwrap();
assert_eq!(schema, xml);
// set a search index
let index_name = "myindex".to_string().into_bytes();
let mut index = YokozunaIndex::new(index_name.clone());
index.set_schema(schema_name);
index.set_n_val(3);
riak.set_yokozuna_index(index).unwrap();
// get the search index
let index = riak.get_yokozuna_index(index_name.clone()).unwrap();
assert_eq!(index[0].get_name(), index_name);
// run a search
let mut query = SearchQuery::new("test*", "myindex");
query.set_df("_yz_id");
riak.search(query).unwrap();
// run a MapReduce job
let job = r#"
{"inputs": "bucket_501653", "query": [
{"map": {
"arg": null,
"name": "Riak.mapValuesJson",
"language": "javascript",
"keep": false
}},
{"reduce": {
"arg": null,
"name": "Riak.reduceSum",
"language": "javascript",
"keep": true
}}
]}
"#;
riak.mapreduce(job, "application/json").unwrap();
}
|
{
// connect and ping
let mut riak = Client::new("10.0.0.2:8087").unwrap();
riak.ping().unwrap();
// get the server info
let (node, version) = riak.server_info().unwrap();
println!("connected to node {} running Riak version {}",
node,
version);
// set bucket properties
let mut bucket_props = BucketProps::new("testbucket");
bucket_props.set_backend("leveldb");
riak.set_bucket_properties(bucket_props).unwrap();
// get the properties back from the server
let bucket_props = riak.get_bucket_properties("testbucket").unwrap();
let found_backend = bucket_props.get_backend().unwrap();
assert_eq!(found_backend, "leveldb".as_bytes());
|
identifier_body
|
client_basics.rs
|
extern crate riak;
use riak::Client;
use riak::bucket::{BucketProps, BucketTypeProps};
use riak::object::{DeleteObjectReq, FetchObjectReq, ObjectContent, StoreObjectReq};
use riak::yokozuna::{SearchQuery, YokozunaIndex};
use std::fs::File;
use std::io::Read;
#[test]
fn
|
() {
// connect and ping
let mut riak = Client::new("10.0.0.2:8087").unwrap();
riak.ping().unwrap();
// get the server info
let (node, version) = riak.server_info().unwrap();
println!("connected to node {} running Riak version {}",
node,
version);
// set bucket properties
let mut bucket_props = BucketProps::new("testbucket");
bucket_props.set_backend("leveldb");
riak.set_bucket_properties(bucket_props).unwrap();
// get the properties back from the server
let bucket_props = riak.get_bucket_properties("testbucket").unwrap();
let found_backend = bucket_props.get_backend().unwrap();
assert_eq!(found_backend, "leveldb".as_bytes());
// store an object
let contents = ObjectContent::new("this is a test".as_bytes());
let mut req = StoreObjectReq::new("testbucket", contents);
req.set_key("testkey");
riak.store_object(req).unwrap();
// fetch an object
let req = FetchObjectReq::new("testbucket", "testkey");
let object = riak.fetch_object(req).unwrap();
let contents = object.content;
let content = contents.first().unwrap();
assert_eq!(content.get_value(), "this is a test".as_bytes());
// delete an object
let req = DeleteObjectReq::new("testbucket", "testkey");
riak.delete_object(req).unwrap();
// make sure deleted object is gone
let req = FetchObjectReq::new("testbucket", "testkey");
let object = riak.fetch_object(req).unwrap();
assert_eq!(object.content.len(), 0);
// list the available buckets
let buckets = riak.list_buckets().unwrap();
let mut bucket_exists = false;
for bucket in buckets.iter() {
if *bucket == "testbucket".as_bytes() {
bucket_exists = true;
}
}
assert!(bucket_exists);
// list the available keys
let keys = riak.list_keys("testbucket").unwrap();
let mut key_exists = false;
for key in keys.iter() {
if *key == "testkey".as_bytes() {
key_exists = true;
}
}
assert!(key_exists);
// fetch the preflist for testbucket/testkey
let preflist = riak.fetch_preflist("testbucket", "testkey").unwrap();
let mut lives_on_nodes: u8 = 0;
let mut has_primary_node = false;
for preflist_item in preflist.iter() {
lives_on_nodes = lives_on_nodes + 1;
if preflist_item.is_primary {
has_primary_node = true;
}
}
assert_eq!(lives_on_nodes, 3);
assert!(has_primary_node);
// set properties for a bucket type
let mut bucket_props = BucketTypeProps::new("testbuckettype");
bucket_props.set_backend("leveldb");
riak.set_bucket_type_properties(bucket_props).unwrap();
// get the properties back for a bucket type and verify them
let bucket_props = riak.get_bucket_type_properties("testbuckettype").unwrap();
assert_eq!(bucket_props.get_backend().expect("could not get backend"),
"leveldb".as_bytes());
// set a search schema
let mut xml: Vec<u8> = Vec::new();
let mut file = File::open("/tmp/riak-rust-client-default-schema.xml").unwrap();
let _ = file.read_to_end(&mut xml).unwrap();
let schema_name = "schedule".to_string().into_bytes();
riak.set_yokozuna_schema(schema_name.clone(), xml.clone()).unwrap();
// retrieve the search schema
let schema = riak.get_yokozuna_schema(schema_name.clone()).unwrap();
assert_eq!(schema, xml);
// set a search index
let index_name = "myindex".to_string().into_bytes();
let mut index = YokozunaIndex::new(index_name.clone());
index.set_schema(schema_name);
index.set_n_val(3);
riak.set_yokozuna_index(index).unwrap();
// get the search index
let index = riak.get_yokozuna_index(index_name.clone()).unwrap();
assert_eq!(index[0].get_name(), index_name);
// run a search
let mut query = SearchQuery::new("test*", "myindex");
query.set_df("_yz_id");
riak.search(query).unwrap();
// run a MapReduce job
let job = r#"
{"inputs": "bucket_501653", "query": [
{"map": {
"arg": null,
"name": "Riak.mapValuesJson",
"language": "javascript",
"keep": false
}},
{"reduce": {
"arg": null,
"name": "Riak.reduceSum",
"language": "javascript",
"keep": true
}}
]}
"#;
riak.mapreduce(job, "application/json").unwrap();
}
|
test_basics
|
identifier_name
|
client_basics.rs
|
extern crate riak;
|
use riak::yokozuna::{SearchQuery, YokozunaIndex};
use std::fs::File;
use std::io::Read;
#[test]
fn test_basics() {
// connect and ping
let mut riak = Client::new("10.0.0.2:8087").unwrap();
riak.ping().unwrap();
// get the server info
let (node, version) = riak.server_info().unwrap();
println!("connected to node {} running Riak version {}",
node,
version);
// set bucket properties
let mut bucket_props = BucketProps::new("testbucket");
bucket_props.set_backend("leveldb");
riak.set_bucket_properties(bucket_props).unwrap();
// get the properties back from the server
let bucket_props = riak.get_bucket_properties("testbucket").unwrap();
let found_backend = bucket_props.get_backend().unwrap();
assert_eq!(found_backend, "leveldb".as_bytes());
// store an object
let contents = ObjectContent::new("this is a test".as_bytes());
let mut req = StoreObjectReq::new("testbucket", contents);
req.set_key("testkey");
riak.store_object(req).unwrap();
// fetch an object
let req = FetchObjectReq::new("testbucket", "testkey");
let object = riak.fetch_object(req).unwrap();
let contents = object.content;
let content = contents.first().unwrap();
assert_eq!(content.get_value(), "this is a test".as_bytes());
// delete an object
let req = DeleteObjectReq::new("testbucket", "testkey");
riak.delete_object(req).unwrap();
// make sure deleted object is gone
let req = FetchObjectReq::new("testbucket", "testkey");
let object = riak.fetch_object(req).unwrap();
assert_eq!(object.content.len(), 0);
// list the available buckets
let buckets = riak.list_buckets().unwrap();
let mut bucket_exists = false;
for bucket in buckets.iter() {
if *bucket == "testbucket".as_bytes() {
bucket_exists = true;
}
}
assert!(bucket_exists);
// list the available keys
let keys = riak.list_keys("testbucket").unwrap();
let mut key_exists = false;
for key in keys.iter() {
if *key == "testkey".as_bytes() {
key_exists = true;
}
}
assert!(key_exists);
// fetch the preflist for testbucket/testkey
let preflist = riak.fetch_preflist("testbucket", "testkey").unwrap();
let mut lives_on_nodes: u8 = 0;
let mut has_primary_node = false;
for preflist_item in preflist.iter() {
lives_on_nodes = lives_on_nodes + 1;
if preflist_item.is_primary {
has_primary_node = true;
}
}
assert_eq!(lives_on_nodes, 3);
assert!(has_primary_node);
// set properties for a bucket type
let mut bucket_props = BucketTypeProps::new("testbuckettype");
bucket_props.set_backend("leveldb");
riak.set_bucket_type_properties(bucket_props).unwrap();
// get the properties back for a bucket type and verify them
let bucket_props = riak.get_bucket_type_properties("testbuckettype").unwrap();
assert_eq!(bucket_props.get_backend().expect("could not get backend"),
"leveldb".as_bytes());
// set a search schema
let mut xml: Vec<u8> = Vec::new();
let mut file = File::open("/tmp/riak-rust-client-default-schema.xml").unwrap();
let _ = file.read_to_end(&mut xml).unwrap();
let schema_name = "schedule".to_string().into_bytes();
riak.set_yokozuna_schema(schema_name.clone(), xml.clone()).unwrap();
// retrieve the search schema
let schema = riak.get_yokozuna_schema(schema_name.clone()).unwrap();
assert_eq!(schema, xml);
// set a search index
let index_name = "myindex".to_string().into_bytes();
let mut index = YokozunaIndex::new(index_name.clone());
index.set_schema(schema_name);
index.set_n_val(3);
riak.set_yokozuna_index(index).unwrap();
// get the search index
let index = riak.get_yokozuna_index(index_name.clone()).unwrap();
assert_eq!(index[0].get_name(), index_name);
// run a search
let mut query = SearchQuery::new("test*", "myindex");
query.set_df("_yz_id");
riak.search(query).unwrap();
// run a MapReduce job
let job = r#"
{"inputs": "bucket_501653", "query": [
{"map": {
"arg": null,
"name": "Riak.mapValuesJson",
"language": "javascript",
"keep": false
}},
{"reduce": {
"arg": null,
"name": "Riak.reduceSum",
"language": "javascript",
"keep": true
}}
]}
"#;
riak.mapreduce(job, "application/json").unwrap();
}
|
use riak::Client;
use riak::bucket::{BucketProps, BucketTypeProps};
use riak::object::{DeleteObjectReq, FetchObjectReq, ObjectContent, StoreObjectReq};
|
random_line_split
|
test_udp_socket.rs
|
use mio::*;
use mio::udp::*;
use bytes::{Buf, RingBuf, SliceBuf, MutBuf};
use std::str;
use localhost;
const LISTENER: Token = Token(0);
const SENDER: Token = Token(1);
pub struct UdpHandler {
tx: UdpSocket,
rx: UdpSocket,
msg: &'static str,
buf: SliceBuf<'static>,
rx_buf: RingBuf
}
impl UdpHandler {
fn new(tx: UdpSocket, rx: UdpSocket, msg : &'static str) -> UdpHandler {
UdpHandler {
tx: tx,
rx: rx,
msg: msg,
buf: SliceBuf::wrap(msg.as_bytes()),
rx_buf: RingBuf::new(1024)
}
}
}
impl Handler for UdpHandler {
type Timeout = usize;
type Message = ();
fn ready(&mut self, event_loop: &mut EventLoop<UdpHandler>, token: Token, events: EventSet) {
if events.is_readable()
|
if events.is_writable() {
match token {
SENDER => {
let addr = self.rx.local_addr().unwrap();
let cnt = self.tx.send_to(self.buf.bytes(), &addr).unwrap()
.unwrap();
self.buf.advance(cnt);
},
_ => {}
}
}
}
}
fn assert_send<T: Send>() {
}
#[test]
pub fn test_udp_socket() {
debug!("Starting TEST_UDP_SOCKETS");
let mut event_loop = EventLoop::new().unwrap();
let addr = localhost();
let any = str::FromStr::from_str("0.0.0.0:0").unwrap();
let tx = UdpSocket::bound(&any).unwrap();
let rx = UdpSocket::bound(&addr).unwrap();
assert_send::<UdpSocket>();
// ensure that the sockets are non-blocking
let mut buf = [0; 128];
assert!(rx.recv_from(&mut buf).unwrap().is_none());
info!("Registering SENDER");
event_loop.register(&tx, SENDER, EventSet::writable(), PollOpt::edge()).unwrap();
info!("Registering LISTENER");
event_loop.register(&rx, LISTENER, EventSet::readable(), PollOpt::edge()).unwrap();
info!("Starting event loop to test with...");
event_loop.run(&mut UdpHandler::new(tx, rx, "hello world")).unwrap();
}
|
{
match token {
LISTENER => {
debug!("We are receiving a datagram now...");
let (cnt, _) = unsafe {
self.rx.recv_from(self.rx_buf.mut_bytes()).unwrap()
.unwrap()
};
unsafe { MutBuf::advance(&mut self.rx_buf, cnt); }
assert!(str::from_utf8(self.rx_buf.bytes()).unwrap() == self.msg);
event_loop.shutdown();
},
_ => ()
}
}
|
conditional_block
|
test_udp_socket.rs
|
use mio::*;
use mio::udp::*;
use bytes::{Buf, RingBuf, SliceBuf, MutBuf};
use std::str;
use localhost;
const LISTENER: Token = Token(0);
const SENDER: Token = Token(1);
pub struct UdpHandler {
tx: UdpSocket,
rx: UdpSocket,
msg: &'static str,
buf: SliceBuf<'static>,
rx_buf: RingBuf
}
impl UdpHandler {
fn new(tx: UdpSocket, rx: UdpSocket, msg : &'static str) -> UdpHandler {
UdpHandler {
tx: tx,
rx: rx,
msg: msg,
buf: SliceBuf::wrap(msg.as_bytes()),
rx_buf: RingBuf::new(1024)
}
}
}
impl Handler for UdpHandler {
type Timeout = usize;
type Message = ();
fn
|
(&mut self, event_loop: &mut EventLoop<UdpHandler>, token: Token, events: EventSet) {
if events.is_readable() {
match token {
LISTENER => {
debug!("We are receiving a datagram now...");
let (cnt, _) = unsafe {
self.rx.recv_from(self.rx_buf.mut_bytes()).unwrap()
.unwrap()
};
unsafe { MutBuf::advance(&mut self.rx_buf, cnt); }
assert!(str::from_utf8(self.rx_buf.bytes()).unwrap() == self.msg);
event_loop.shutdown();
},
_ => ()
}
}
if events.is_writable() {
match token {
SENDER => {
let addr = self.rx.local_addr().unwrap();
let cnt = self.tx.send_to(self.buf.bytes(), &addr).unwrap()
.unwrap();
self.buf.advance(cnt);
},
_ => {}
}
}
}
}
fn assert_send<T: Send>() {
}
#[test]
pub fn test_udp_socket() {
debug!("Starting TEST_UDP_SOCKETS");
let mut event_loop = EventLoop::new().unwrap();
let addr = localhost();
let any = str::FromStr::from_str("0.0.0.0:0").unwrap();
let tx = UdpSocket::bound(&any).unwrap();
let rx = UdpSocket::bound(&addr).unwrap();
assert_send::<UdpSocket>();
// ensure that the sockets are non-blocking
let mut buf = [0; 128];
assert!(rx.recv_from(&mut buf).unwrap().is_none());
info!("Registering SENDER");
event_loop.register(&tx, SENDER, EventSet::writable(), PollOpt::edge()).unwrap();
info!("Registering LISTENER");
event_loop.register(&rx, LISTENER, EventSet::readable(), PollOpt::edge()).unwrap();
info!("Starting event loop to test with...");
event_loop.run(&mut UdpHandler::new(tx, rx, "hello world")).unwrap();
}
|
ready
|
identifier_name
|
test_udp_socket.rs
|
use mio::*;
use mio::udp::*;
use bytes::{Buf, RingBuf, SliceBuf, MutBuf};
use std::str;
use localhost;
|
pub struct UdpHandler {
tx: UdpSocket,
rx: UdpSocket,
msg: &'static str,
buf: SliceBuf<'static>,
rx_buf: RingBuf
}
impl UdpHandler {
fn new(tx: UdpSocket, rx: UdpSocket, msg : &'static str) -> UdpHandler {
UdpHandler {
tx: tx,
rx: rx,
msg: msg,
buf: SliceBuf::wrap(msg.as_bytes()),
rx_buf: RingBuf::new(1024)
}
}
}
impl Handler for UdpHandler {
type Timeout = usize;
type Message = ();
fn ready(&mut self, event_loop: &mut EventLoop<UdpHandler>, token: Token, events: EventSet) {
if events.is_readable() {
match token {
LISTENER => {
debug!("We are receiving a datagram now...");
let (cnt, _) = unsafe {
self.rx.recv_from(self.rx_buf.mut_bytes()).unwrap()
.unwrap()
};
unsafe { MutBuf::advance(&mut self.rx_buf, cnt); }
assert!(str::from_utf8(self.rx_buf.bytes()).unwrap() == self.msg);
event_loop.shutdown();
},
_ => ()
}
}
if events.is_writable() {
match token {
SENDER => {
let addr = self.rx.local_addr().unwrap();
let cnt = self.tx.send_to(self.buf.bytes(), &addr).unwrap()
.unwrap();
self.buf.advance(cnt);
},
_ => {}
}
}
}
}
fn assert_send<T: Send>() {
}
#[test]
pub fn test_udp_socket() {
debug!("Starting TEST_UDP_SOCKETS");
let mut event_loop = EventLoop::new().unwrap();
let addr = localhost();
let any = str::FromStr::from_str("0.0.0.0:0").unwrap();
let tx = UdpSocket::bound(&any).unwrap();
let rx = UdpSocket::bound(&addr).unwrap();
assert_send::<UdpSocket>();
// ensure that the sockets are non-blocking
let mut buf = [0; 128];
assert!(rx.recv_from(&mut buf).unwrap().is_none());
info!("Registering SENDER");
event_loop.register(&tx, SENDER, EventSet::writable(), PollOpt::edge()).unwrap();
info!("Registering LISTENER");
event_loop.register(&rx, LISTENER, EventSet::readable(), PollOpt::edge()).unwrap();
info!("Starting event loop to test with...");
event_loop.run(&mut UdpHandler::new(tx, rx, "hello world")).unwrap();
}
|
const LISTENER: Token = Token(0);
const SENDER: Token = Token(1);
|
random_line_split
|
lub-glb-with-unbound-infer-var.rs
|
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test for a specific corner case: when we compute the LUB of two fn
// types and their parameters have unbound variables. In that case, we
// wind up relating those two variables. This was causing an ICE in an
// in-progress PR.
fn main()
|
{
let a_f: fn(_) = |_| ();
let b_f: fn(_) = |_| ();
let c_f = match 22 {
0 => a_f,
_ => b_f,
};
c_f(4);
}
|
identifier_body
|
|
lub-glb-with-unbound-infer-var.rs
|
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test for a specific corner case: when we compute the LUB of two fn
// types and their parameters have unbound variables. In that case, we
// wind up relating those two variables. This was causing an ICE in an
|
// in-progress PR.
fn main() {
let a_f: fn(_) = |_| ();
let b_f: fn(_) = |_| ();
let c_f = match 22 {
0 => a_f,
_ => b_f,
};
c_f(4);
}
|
random_line_split
|
|
lub-glb-with-unbound-infer-var.rs
|
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test for a specific corner case: when we compute the LUB of two fn
// types and their parameters have unbound variables. In that case, we
// wind up relating those two variables. This was causing an ICE in an
// in-progress PR.
fn
|
() {
let a_f: fn(_) = |_| ();
let b_f: fn(_) = |_| ();
let c_f = match 22 {
0 => a_f,
_ => b_f,
};
c_f(4);
}
|
main
|
identifier_name
|
main.rs
|
use std::io::Write;
use std::str::FromStr;
fn main()
|
fn gcd(mut n: u64, mut m: u64) -> u64 {
assert!(n!= 0 && m!= 0);
while m!= 0 {
if m < n {
let t = m;
m = n;
n = t;
}
m = m % n;
}
n
}
#[test]
fn test_gcd() {
assert_eq!(gcd(2 * 3 * 5 * 11 * 17, 3 * 7 * 11 * 13 * 19), 3 * 11);
}
|
{
let mut numbers = Vec::new();
for arg in std::env::args().skip(1) {
numbers.push(u64::from_str(&arg).expect("error parsing argument"));
}
if numbers.len() == 0 {
writeln!(std::io::stderr(), "Usage: gcd NUMBER ...").unwrap();
std::process::exit(1);
}
let mut d = numbers[0];
for m in &numbers[1..] {
d = gcd(d, *m);
}
println!("The greatest common divisor of {:?} is {}", numbers, d);
}
|
identifier_body
|
main.rs
|
use std::io::Write;
use std::str::FromStr;
fn
|
() {
let mut numbers = Vec::new();
for arg in std::env::args().skip(1) {
numbers.push(u64::from_str(&arg).expect("error parsing argument"));
}
if numbers.len() == 0 {
writeln!(std::io::stderr(), "Usage: gcd NUMBER...").unwrap();
std::process::exit(1);
}
let mut d = numbers[0];
for m in &numbers[1..] {
d = gcd(d, *m);
}
println!("The greatest common divisor of {:?} is {}", numbers, d);
}
fn gcd(mut n: u64, mut m: u64) -> u64 {
assert!(n!= 0 && m!= 0);
while m!= 0 {
if m < n {
let t = m;
m = n;
n = t;
}
m = m % n;
}
n
}
#[test]
fn test_gcd() {
assert_eq!(gcd(2 * 3 * 5 * 11 * 17, 3 * 7 * 11 * 13 * 19), 3 * 11);
}
|
main
|
identifier_name
|
main.rs
|
use std::io::Write;
use std::str::FromStr;
fn main() {
let mut numbers = Vec::new();
for arg in std::env::args().skip(1) {
numbers.push(u64::from_str(&arg).expect("error parsing argument"));
}
if numbers.len() == 0 {
writeln!(std::io::stderr(), "Usage: gcd NUMBER...").unwrap();
std::process::exit(1);
}
let mut d = numbers[0];
for m in &numbers[1..] {
d = gcd(d, *m);
}
println!("The greatest common divisor of {:?} is {}", numbers, d);
}
fn gcd(mut n: u64, mut m: u64) -> u64 {
assert!(n!= 0 && m!= 0);
while m!= 0 {
if m < n
|
m = m % n;
}
n
}
#[test]
fn test_gcd() {
assert_eq!(gcd(2 * 3 * 5 * 11 * 17, 3 * 7 * 11 * 13 * 19), 3 * 11);
}
|
{
let t = m;
m = n;
n = t;
}
|
conditional_block
|
main.rs
|
use std::io::Write;
use std::str::FromStr;
fn main() {
let mut numbers = Vec::new();
for arg in std::env::args().skip(1) {
numbers.push(u64::from_str(&arg).expect("error parsing argument"));
}
if numbers.len() == 0 {
writeln!(std::io::stderr(), "Usage: gcd NUMBER...").unwrap();
std::process::exit(1);
}
|
}
println!("The greatest common divisor of {:?} is {}", numbers, d);
}
fn gcd(mut n: u64, mut m: u64) -> u64 {
assert!(n!= 0 && m!= 0);
while m!= 0 {
if m < n {
let t = m;
m = n;
n = t;
}
m = m % n;
}
n
}
#[test]
fn test_gcd() {
assert_eq!(gcd(2 * 3 * 5 * 11 * 17, 3 * 7 * 11 * 13 * 19), 3 * 11);
}
|
let mut d = numbers[0];
for m in &numbers[1..] {
d = gcd(d, *m);
|
random_line_split
|
error.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::{io, net, fmt};
use io::IoError;
use {rlp, ethkey, crypto, snappy};
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum DisconnectReason
{
DisconnectRequested,
TCPError,
BadProtocol,
UselessPeer,
TooManyPeers,
DuplicatePeer,
IncompatibleProtocol,
NullIdentity,
ClientQuit,
UnexpectedIdentity,
LocalIdentity,
PingTimeout,
Unknown,
}
impl DisconnectReason {
pub fn from_u8(n: u8) -> DisconnectReason {
match n {
0 => DisconnectReason::DisconnectRequested,
1 => DisconnectReason::TCPError,
2 => DisconnectReason::BadProtocol,
3 => DisconnectReason::UselessPeer,
4 => DisconnectReason::TooManyPeers,
5 => DisconnectReason::DuplicatePeer,
6 => DisconnectReason::IncompatibleProtocol,
7 => DisconnectReason::NullIdentity,
8 => DisconnectReason::ClientQuit,
9 => DisconnectReason::UnexpectedIdentity,
10 => DisconnectReason::LocalIdentity,
11 => DisconnectReason::PingTimeout,
_ => DisconnectReason::Unknown,
}
}
}
impl fmt::Display for DisconnectReason {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::DisconnectReason::*;
let msg = match *self {
DisconnectRequested => "disconnect requested",
TCPError => "TCP error",
BadProtocol => "bad protocol",
UselessPeer => "useless peer",
TooManyPeers => "too many peers",
DuplicatePeer => "duplicate peer",
IncompatibleProtocol => "incompatible protocol",
NullIdentity => "null identity",
ClientQuit => "client quit",
UnexpectedIdentity => "unexpected identity",
LocalIdentity => "local identity",
PingTimeout => "ping timeout",
Unknown => "unknown",
};
f.write_str(msg)
}
}
error_chain! {
foreign_links {
SocketIo(IoError) #[doc = "Socket IO error."];
Io(io::Error) #[doc = "Error concerning the Rust standard library's IO subsystem."];
AddressParse(net::AddrParseError) #[doc = "Error concerning the network address parsing subsystem."];
Decompression(snappy::InvalidInput) #[doc = "Decompression error."];
}
errors {
#[doc = "Error concerning the network address resolution subsystem."]
AddressResolve(err: Option<io::Error>) {
description("Failed to resolve network address"),
display("Failed to resolve network address {}", err.as_ref().map_or("".to_string(), |e| e.to_string())),
}
#[doc = "Authentication failure"]
Auth {
description("Authentication failure"),
display("Authentication failure"),
}
#[doc = "Unrecognised protocol"]
BadProtocol {
description("Bad protocol"),
display("Bad protocol"),
}
#[doc = "Expired message"]
Expired {
description("Expired message"),
display("Expired message"),
}
#[doc = "Peer not found"]
PeerNotFound {
description("Peer not found"),
display("Peer not found"),
}
#[doc = "Peer is disconnected"]
Disconnect(reason: DisconnectReason) {
description("Peer disconnected"),
display("Peer disconnected: {}", reason),
}
#[doc = "Invalid node id"]
InvalidNodeId {
description("Invalid node id"),
display("Invalid node id"),
}
#[doc = "Packet size is over the protocol limit"]
OversizedPacket {
description("Packet is too large"),
display("Packet is too large"),
}
}
}
impl From<rlp::DecoderError> for Error {
fn from(_err: rlp::DecoderError) -> Self {
ErrorKind::Auth.into()
}
}
impl From<ethkey::Error> for Error {
fn
|
(_err: ethkey::Error) -> Self {
ErrorKind::Auth.into()
}
}
impl From<crypto::Error> for Error {
fn from(_err: crypto::Error) -> Self {
ErrorKind::Auth.into()
}
}
#[test]
fn test_errors() {
assert_eq!(DisconnectReason::ClientQuit, DisconnectReason::from_u8(8));
let mut r = DisconnectReason::DisconnectRequested;
for i in 0.. 20 {
r = DisconnectReason::from_u8(i);
}
assert_eq!(DisconnectReason::Unknown, r);
match *<Error as From<rlp::DecoderError>>::from(rlp::DecoderError::RlpIsTooBig).kind() {
ErrorKind::Auth => {},
_ => panic!("Unexpeceted error"),
}
match *<Error as From<crypto::Error>>::from(crypto::Error::InvalidMessage).kind() {
ErrorKind::Auth => {},
_ => panic!("Unexpeceted error"),
}
}
|
from
|
identifier_name
|
error.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::{io, net, fmt};
use io::IoError;
use {rlp, ethkey, crypto, snappy};
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum DisconnectReason
{
DisconnectRequested,
TCPError,
BadProtocol,
UselessPeer,
TooManyPeers,
DuplicatePeer,
IncompatibleProtocol,
NullIdentity,
ClientQuit,
UnexpectedIdentity,
LocalIdentity,
PingTimeout,
Unknown,
}
impl DisconnectReason {
pub fn from_u8(n: u8) -> DisconnectReason {
match n {
0 => DisconnectReason::DisconnectRequested,
1 => DisconnectReason::TCPError,
2 => DisconnectReason::BadProtocol,
3 => DisconnectReason::UselessPeer,
4 => DisconnectReason::TooManyPeers,
5 => DisconnectReason::DuplicatePeer,
6 => DisconnectReason::IncompatibleProtocol,
7 => DisconnectReason::NullIdentity,
8 => DisconnectReason::ClientQuit,
9 => DisconnectReason::UnexpectedIdentity,
10 => DisconnectReason::LocalIdentity,
11 => DisconnectReason::PingTimeout,
_ => DisconnectReason::Unknown,
}
}
}
impl fmt::Display for DisconnectReason {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::DisconnectReason::*;
let msg = match *self {
DisconnectRequested => "disconnect requested",
TCPError => "TCP error",
BadProtocol => "bad protocol",
UselessPeer => "useless peer",
TooManyPeers => "too many peers",
DuplicatePeer => "duplicate peer",
IncompatibleProtocol => "incompatible protocol",
NullIdentity => "null identity",
ClientQuit => "client quit",
UnexpectedIdentity => "unexpected identity",
LocalIdentity => "local identity",
PingTimeout => "ping timeout",
Unknown => "unknown",
};
f.write_str(msg)
}
}
error_chain! {
foreign_links {
SocketIo(IoError) #[doc = "Socket IO error."];
Io(io::Error) #[doc = "Error concerning the Rust standard library's IO subsystem."];
AddressParse(net::AddrParseError) #[doc = "Error concerning the network address parsing subsystem."];
Decompression(snappy::InvalidInput) #[doc = "Decompression error."];
}
errors {
#[doc = "Error concerning the network address resolution subsystem."]
AddressResolve(err: Option<io::Error>) {
description("Failed to resolve network address"),
display("Failed to resolve network address {}", err.as_ref().map_or("".to_string(), |e| e.to_string())),
}
#[doc = "Authentication failure"]
Auth {
description("Authentication failure"),
display("Authentication failure"),
}
#[doc = "Unrecognised protocol"]
BadProtocol {
description("Bad protocol"),
display("Bad protocol"),
}
#[doc = "Expired message"]
Expired {
description("Expired message"),
|
PeerNotFound {
description("Peer not found"),
display("Peer not found"),
}
#[doc = "Peer is disconnected"]
Disconnect(reason: DisconnectReason) {
description("Peer disconnected"),
display("Peer disconnected: {}", reason),
}
#[doc = "Invalid node id"]
InvalidNodeId {
description("Invalid node id"),
display("Invalid node id"),
}
#[doc = "Packet size is over the protocol limit"]
OversizedPacket {
description("Packet is too large"),
display("Packet is too large"),
}
}
}
impl From<rlp::DecoderError> for Error {
fn from(_err: rlp::DecoderError) -> Self {
ErrorKind::Auth.into()
}
}
impl From<ethkey::Error> for Error {
fn from(_err: ethkey::Error) -> Self {
ErrorKind::Auth.into()
}
}
impl From<crypto::Error> for Error {
fn from(_err: crypto::Error) -> Self {
ErrorKind::Auth.into()
}
}
#[test]
fn test_errors() {
assert_eq!(DisconnectReason::ClientQuit, DisconnectReason::from_u8(8));
let mut r = DisconnectReason::DisconnectRequested;
for i in 0.. 20 {
r = DisconnectReason::from_u8(i);
}
assert_eq!(DisconnectReason::Unknown, r);
match *<Error as From<rlp::DecoderError>>::from(rlp::DecoderError::RlpIsTooBig).kind() {
ErrorKind::Auth => {},
_ => panic!("Unexpeceted error"),
}
match *<Error as From<crypto::Error>>::from(crypto::Error::InvalidMessage).kind() {
ErrorKind::Auth => {},
_ => panic!("Unexpeceted error"),
}
}
|
display("Expired message"),
}
#[doc = "Peer not found"]
|
random_line_split
|
error.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::{io, net, fmt};
use io::IoError;
use {rlp, ethkey, crypto, snappy};
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum DisconnectReason
{
DisconnectRequested,
TCPError,
BadProtocol,
UselessPeer,
TooManyPeers,
DuplicatePeer,
IncompatibleProtocol,
NullIdentity,
ClientQuit,
UnexpectedIdentity,
LocalIdentity,
PingTimeout,
Unknown,
}
impl DisconnectReason {
pub fn from_u8(n: u8) -> DisconnectReason {
match n {
0 => DisconnectReason::DisconnectRequested,
1 => DisconnectReason::TCPError,
2 => DisconnectReason::BadProtocol,
3 => DisconnectReason::UselessPeer,
4 => DisconnectReason::TooManyPeers,
5 => DisconnectReason::DuplicatePeer,
6 => DisconnectReason::IncompatibleProtocol,
7 => DisconnectReason::NullIdentity,
8 => DisconnectReason::ClientQuit,
9 => DisconnectReason::UnexpectedIdentity,
10 => DisconnectReason::LocalIdentity,
11 => DisconnectReason::PingTimeout,
_ => DisconnectReason::Unknown,
}
}
}
impl fmt::Display for DisconnectReason {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::DisconnectReason::*;
let msg = match *self {
DisconnectRequested => "disconnect requested",
TCPError => "TCP error",
BadProtocol => "bad protocol",
UselessPeer => "useless peer",
TooManyPeers => "too many peers",
DuplicatePeer => "duplicate peer",
IncompatibleProtocol => "incompatible protocol",
NullIdentity => "null identity",
ClientQuit => "client quit",
UnexpectedIdentity => "unexpected identity",
LocalIdentity => "local identity",
PingTimeout => "ping timeout",
Unknown => "unknown",
};
f.write_str(msg)
}
}
error_chain! {
foreign_links {
SocketIo(IoError) #[doc = "Socket IO error."];
Io(io::Error) #[doc = "Error concerning the Rust standard library's IO subsystem."];
AddressParse(net::AddrParseError) #[doc = "Error concerning the network address parsing subsystem."];
Decompression(snappy::InvalidInput) #[doc = "Decompression error."];
}
errors {
#[doc = "Error concerning the network address resolution subsystem."]
AddressResolve(err: Option<io::Error>) {
description("Failed to resolve network address"),
display("Failed to resolve network address {}", err.as_ref().map_or("".to_string(), |e| e.to_string())),
}
#[doc = "Authentication failure"]
Auth {
description("Authentication failure"),
display("Authentication failure"),
}
#[doc = "Unrecognised protocol"]
BadProtocol {
description("Bad protocol"),
display("Bad protocol"),
}
#[doc = "Expired message"]
Expired {
description("Expired message"),
display("Expired message"),
}
#[doc = "Peer not found"]
PeerNotFound {
description("Peer not found"),
display("Peer not found"),
}
#[doc = "Peer is disconnected"]
Disconnect(reason: DisconnectReason) {
description("Peer disconnected"),
display("Peer disconnected: {}", reason),
}
#[doc = "Invalid node id"]
InvalidNodeId {
description("Invalid node id"),
display("Invalid node id"),
}
#[doc = "Packet size is over the protocol limit"]
OversizedPacket {
description("Packet is too large"),
display("Packet is too large"),
}
}
}
impl From<rlp::DecoderError> for Error {
fn from(_err: rlp::DecoderError) -> Self {
ErrorKind::Auth.into()
}
}
impl From<ethkey::Error> for Error {
fn from(_err: ethkey::Error) -> Self
|
}
impl From<crypto::Error> for Error {
fn from(_err: crypto::Error) -> Self {
ErrorKind::Auth.into()
}
}
#[test]
fn test_errors() {
assert_eq!(DisconnectReason::ClientQuit, DisconnectReason::from_u8(8));
let mut r = DisconnectReason::DisconnectRequested;
for i in 0.. 20 {
r = DisconnectReason::from_u8(i);
}
assert_eq!(DisconnectReason::Unknown, r);
match *<Error as From<rlp::DecoderError>>::from(rlp::DecoderError::RlpIsTooBig).kind() {
ErrorKind::Auth => {},
_ => panic!("Unexpeceted error"),
}
match *<Error as From<crypto::Error>>::from(crypto::Error::InvalidMessage).kind() {
ErrorKind::Auth => {},
_ => panic!("Unexpeceted error"),
}
}
|
{
ErrorKind::Auth.into()
}
|
identifier_body
|
lib.rs
|
//! # Rocket - Code Generation
//!
//! This crate implements the code generation portions of Rocket. This includes
//! custom derives, custom attributes, procedural macros, and lints. The
//! documentation here is purely technical. The code generation facilities are
//! documented thoroughly in the [Rocket programming
//! guide](https://rocket.rs/guide).
//!
//! ## Custom Attributes
//!
//! This crate implements the following custom attributes:
//!
//! * **route**
//! * **get**
//! * **put**
//! * **post**
//! * **delete**
//! * **head**
//! * **patch**
//! * **options**
//! * **error**
//!
//! The grammar for all _route_ attributes, including **route**, **get**,
//! **put**, **post**, **delete**, **head**, **patch**, and **options** is
//! defined as:
//!
//! <pre>
//! route := METHOD? '(' ('path' '=')? path (',' kv_param)* ')'
//!
//! path := URI_SEG
//! | DYNAMIC_PARAM
//! | '?' DYNAMIC_PARAM
//! | path '/' path
//! (string literal)
//!
//! kv_param := 'rank' '=' INTEGER
//! | 'format' '=' STRING
//! | 'data' '=' DYNAMIC_PARAM
|
//! URI_SEG := Valid HTTP URI Segment
//! DYNAMIC_PARAM := '<' IDENT '..'? '>' (string literal)
//! </pre>
//!
//! Note that the **route** attribute takes a method as its first argument,
//! while the remaining do not. That is, **route** looks like:
//!
//! #[route(GET, path = "/hello")]
//!
//! while the equivalent using **get** looks like:
//!
//! #[get("/hello")]
//!
//! The syntax for the **error** attribute is:
//!
//! <pre>
//! error := INTEGER
//! </pre>
//!
//! A use of the `error` attribute looks like:
//!
//! #[error(404)]
//!
//! ## Custom Derives
//!
//! This crate implements the following custom derives:
//!
//! * **FromForm**
//!
//! ## Procedural Macros
//!
//! This crate implements the following procedural macros:
//!
//! * **routes**
//! * **errors**
//!
//! The syntax for both of these is defined as:
//!
//! <pre>
//! macro := PATH (',' macro)*
//!
//! PATH := a path, as defined by Rust
//! </pre>
//!
//! ## Lints
//!
//! This crate implements the following lints:
//!
//! * **unmounted_route**: defaults to _warn_
//!
//! emits a warning when a declared route is not mounted
//!
//! * **unmanaged_state**: defaults to _warn_
//!
//! emits a warning when a `State<T>` request guest is used in a mounted
//! route without managing a value for `T`
//!
//! # Debugging Codegen
//!
//! When the `ROCKET_CODEGEN_DEBUG` environment variable is set, this crate logs
//! the items it has generated to the console at compile-time. For example, you
//! might run the following to build a Rocket application with codegen logging
//! enabled:
//!
//! ```
//! ROCKET_CODEGEN_DEBUG=1 cargo build
//! ```
#![crate_type = "dylib"]
#![feature(quote, concat_idents, plugin_registrar, rustc_private, unicode)]
#![feature(custom_attribute)]
#![feature(i128_type)]
#![allow(unused_attributes)]
#![allow(deprecated)]
#[macro_use] extern crate log;
#[macro_use] extern crate rustc;
extern crate syntax;
extern crate syntax_ext;
extern crate syntax_pos;
extern crate rustc_plugin;
extern crate rocket;
#[macro_use] mod utils;
mod parser;
mod macros;
mod decorators;
mod lints;
use std::env;
use rustc_plugin::Registry;
use syntax::ext::base::SyntaxExtension;
use syntax::symbol::Symbol;
const DEBUG_ENV_VAR: &'static str = "ROCKET_CODEGEN_DEBUG";
const PARAM_PREFIX: &'static str = "rocket_param_";
const ROUTE_STRUCT_PREFIX: &'static str = "static_rocket_route_info_for_";
const CATCH_STRUCT_PREFIX: &'static str = "static_rocket_catch_info_for_";
const ROUTE_FN_PREFIX: &'static str = "rocket_route_fn_";
const CATCH_FN_PREFIX: &'static str = "rocket_catch_fn_";
const ROUTE_ATTR: &'static str = "rocket_route";
const ROUTE_INFO_ATTR: &'static str = "rocket_route_info";
const CATCHER_ATTR: &'static str = "rocket_catcher";
macro_rules! register_decorators {
($registry:expr, $($name:expr => $func:ident),+) => (
$($registry.register_syntax_extension(Symbol::intern($name),
SyntaxExtension::MultiModifier(Box::new(decorators::$func)));
)+
)
}
macro_rules! register_derives {
($registry:expr, $($name:expr => $func:ident),+) => (
$($registry.register_custom_derive(Symbol::intern($name),
SyntaxExtension::MultiDecorator(Box::new(decorators::$func)));
)+
)
}
macro_rules! register_lints {
($registry:expr, $($item:ident),+) => ($(
$registry.register_late_lint_pass(Box::new(lints::$item::default()));
)+)
}
/// Compiler hook for Rust to register plugins.
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
// Enable logging early if the DEBUG_ENV_VAR is set.
if env::var(DEBUG_ENV_VAR).is_ok() {
::rocket::logger::init(::rocket::LoggingLevel::Debug);
}
reg.register_macro("routes", macros::routes);
reg.register_macro("errors", macros::errors);
register_derives!(reg,
"derive_FromForm" => from_form_derive
);
register_decorators!(reg,
"error" => error_decorator,
"route" => route_decorator,
"get" => get_decorator,
"put" => put_decorator,
"post" => post_decorator,
"delete" => delete_decorator,
"head" => head_decorator,
"patch" => patch_decorator,
"options" => options_decorator
);
register_lints!(reg, RocketLint);
}
|
//!
//! INTEGER := isize, as defined by Rust
//! STRING := UTF-8 string literal, as defined by Rust
//! IDENT := Valid identifier, as defined by Rust
//!
|
random_line_split
|
lib.rs
|
//! # Rocket - Code Generation
//!
//! This crate implements the code generation portions of Rocket. This includes
//! custom derives, custom attributes, procedural macros, and lints. The
//! documentation here is purely technical. The code generation facilities are
//! documented thoroughly in the [Rocket programming
//! guide](https://rocket.rs/guide).
//!
//! ## Custom Attributes
//!
//! This crate implements the following custom attributes:
//!
//! * **route**
//! * **get**
//! * **put**
//! * **post**
//! * **delete**
//! * **head**
//! * **patch**
//! * **options**
//! * **error**
//!
//! The grammar for all _route_ attributes, including **route**, **get**,
//! **put**, **post**, **delete**, **head**, **patch**, and **options** is
//! defined as:
//!
//! <pre>
//! route := METHOD? '(' ('path' '=')? path (',' kv_param)* ')'
//!
//! path := URI_SEG
//! | DYNAMIC_PARAM
//! | '?' DYNAMIC_PARAM
//! | path '/' path
//! (string literal)
//!
//! kv_param := 'rank' '=' INTEGER
//! | 'format' '=' STRING
//! | 'data' '=' DYNAMIC_PARAM
//!
//! INTEGER := isize, as defined by Rust
//! STRING := UTF-8 string literal, as defined by Rust
//! IDENT := Valid identifier, as defined by Rust
//!
//! URI_SEG := Valid HTTP URI Segment
//! DYNAMIC_PARAM := '<' IDENT '..'? '>' (string literal)
//! </pre>
//!
//! Note that the **route** attribute takes a method as its first argument,
//! while the remaining do not. That is, **route** looks like:
//!
//! #[route(GET, path = "/hello")]
//!
//! while the equivalent using **get** looks like:
//!
//! #[get("/hello")]
//!
//! The syntax for the **error** attribute is:
//!
//! <pre>
//! error := INTEGER
//! </pre>
//!
//! A use of the `error` attribute looks like:
//!
//! #[error(404)]
//!
//! ## Custom Derives
//!
//! This crate implements the following custom derives:
//!
//! * **FromForm**
//!
//! ## Procedural Macros
//!
//! This crate implements the following procedural macros:
//!
//! * **routes**
//! * **errors**
//!
//! The syntax for both of these is defined as:
//!
//! <pre>
//! macro := PATH (',' macro)*
//!
//! PATH := a path, as defined by Rust
//! </pre>
//!
//! ## Lints
//!
//! This crate implements the following lints:
//!
//! * **unmounted_route**: defaults to _warn_
//!
//! emits a warning when a declared route is not mounted
//!
//! * **unmanaged_state**: defaults to _warn_
//!
//! emits a warning when a `State<T>` request guest is used in a mounted
//! route without managing a value for `T`
//!
//! # Debugging Codegen
//!
//! When the `ROCKET_CODEGEN_DEBUG` environment variable is set, this crate logs
//! the items it has generated to the console at compile-time. For example, you
//! might run the following to build a Rocket application with codegen logging
//! enabled:
//!
//! ```
//! ROCKET_CODEGEN_DEBUG=1 cargo build
//! ```
#![crate_type = "dylib"]
#![feature(quote, concat_idents, plugin_registrar, rustc_private, unicode)]
#![feature(custom_attribute)]
#![feature(i128_type)]
#![allow(unused_attributes)]
#![allow(deprecated)]
#[macro_use] extern crate log;
#[macro_use] extern crate rustc;
extern crate syntax;
extern crate syntax_ext;
extern crate syntax_pos;
extern crate rustc_plugin;
extern crate rocket;
#[macro_use] mod utils;
mod parser;
mod macros;
mod decorators;
mod lints;
use std::env;
use rustc_plugin::Registry;
use syntax::ext::base::SyntaxExtension;
use syntax::symbol::Symbol;
const DEBUG_ENV_VAR: &'static str = "ROCKET_CODEGEN_DEBUG";
const PARAM_PREFIX: &'static str = "rocket_param_";
const ROUTE_STRUCT_PREFIX: &'static str = "static_rocket_route_info_for_";
const CATCH_STRUCT_PREFIX: &'static str = "static_rocket_catch_info_for_";
const ROUTE_FN_PREFIX: &'static str = "rocket_route_fn_";
const CATCH_FN_PREFIX: &'static str = "rocket_catch_fn_";
const ROUTE_ATTR: &'static str = "rocket_route";
const ROUTE_INFO_ATTR: &'static str = "rocket_route_info";
const CATCHER_ATTR: &'static str = "rocket_catcher";
macro_rules! register_decorators {
($registry:expr, $($name:expr => $func:ident),+) => (
$($registry.register_syntax_extension(Symbol::intern($name),
SyntaxExtension::MultiModifier(Box::new(decorators::$func)));
)+
)
}
macro_rules! register_derives {
($registry:expr, $($name:expr => $func:ident),+) => (
$($registry.register_custom_derive(Symbol::intern($name),
SyntaxExtension::MultiDecorator(Box::new(decorators::$func)));
)+
)
}
macro_rules! register_lints {
($registry:expr, $($item:ident),+) => ($(
$registry.register_late_lint_pass(Box::new(lints::$item::default()));
)+)
}
/// Compiler hook for Rust to register plugins.
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
// Enable logging early if the DEBUG_ENV_VAR is set.
if env::var(DEBUG_ENV_VAR).is_ok()
|
reg.register_macro("routes", macros::routes);
reg.register_macro("errors", macros::errors);
register_derives!(reg,
"derive_FromForm" => from_form_derive
);
register_decorators!(reg,
"error" => error_decorator,
"route" => route_decorator,
"get" => get_decorator,
"put" => put_decorator,
"post" => post_decorator,
"delete" => delete_decorator,
"head" => head_decorator,
"patch" => patch_decorator,
"options" => options_decorator
);
register_lints!(reg, RocketLint);
}
|
{
::rocket::logger::init(::rocket::LoggingLevel::Debug);
}
|
conditional_block
|
lib.rs
|
//! # Rocket - Code Generation
//!
//! This crate implements the code generation portions of Rocket. This includes
//! custom derives, custom attributes, procedural macros, and lints. The
//! documentation here is purely technical. The code generation facilities are
//! documented thoroughly in the [Rocket programming
//! guide](https://rocket.rs/guide).
//!
//! ## Custom Attributes
//!
//! This crate implements the following custom attributes:
//!
//! * **route**
//! * **get**
//! * **put**
//! * **post**
//! * **delete**
//! * **head**
//! * **patch**
//! * **options**
//! * **error**
//!
//! The grammar for all _route_ attributes, including **route**, **get**,
//! **put**, **post**, **delete**, **head**, **patch**, and **options** is
//! defined as:
//!
//! <pre>
//! route := METHOD? '(' ('path' '=')? path (',' kv_param)* ')'
//!
//! path := URI_SEG
//! | DYNAMIC_PARAM
//! | '?' DYNAMIC_PARAM
//! | path '/' path
//! (string literal)
//!
//! kv_param := 'rank' '=' INTEGER
//! | 'format' '=' STRING
//! | 'data' '=' DYNAMIC_PARAM
//!
//! INTEGER := isize, as defined by Rust
//! STRING := UTF-8 string literal, as defined by Rust
//! IDENT := Valid identifier, as defined by Rust
//!
//! URI_SEG := Valid HTTP URI Segment
//! DYNAMIC_PARAM := '<' IDENT '..'? '>' (string literal)
//! </pre>
//!
//! Note that the **route** attribute takes a method as its first argument,
//! while the remaining do not. That is, **route** looks like:
//!
//! #[route(GET, path = "/hello")]
//!
//! while the equivalent using **get** looks like:
//!
//! #[get("/hello")]
//!
//! The syntax for the **error** attribute is:
//!
//! <pre>
//! error := INTEGER
//! </pre>
//!
//! A use of the `error` attribute looks like:
//!
//! #[error(404)]
//!
//! ## Custom Derives
//!
//! This crate implements the following custom derives:
//!
//! * **FromForm**
//!
//! ## Procedural Macros
//!
//! This crate implements the following procedural macros:
//!
//! * **routes**
//! * **errors**
//!
//! The syntax for both of these is defined as:
//!
//! <pre>
//! macro := PATH (',' macro)*
//!
//! PATH := a path, as defined by Rust
//! </pre>
//!
//! ## Lints
//!
//! This crate implements the following lints:
//!
//! * **unmounted_route**: defaults to _warn_
//!
//! emits a warning when a declared route is not mounted
//!
//! * **unmanaged_state**: defaults to _warn_
//!
//! emits a warning when a `State<T>` request guest is used in a mounted
//! route without managing a value for `T`
//!
//! # Debugging Codegen
//!
//! When the `ROCKET_CODEGEN_DEBUG` environment variable is set, this crate logs
//! the items it has generated to the console at compile-time. For example, you
//! might run the following to build a Rocket application with codegen logging
//! enabled:
//!
//! ```
//! ROCKET_CODEGEN_DEBUG=1 cargo build
//! ```
#![crate_type = "dylib"]
#![feature(quote, concat_idents, plugin_registrar, rustc_private, unicode)]
#![feature(custom_attribute)]
#![feature(i128_type)]
#![allow(unused_attributes)]
#![allow(deprecated)]
#[macro_use] extern crate log;
#[macro_use] extern crate rustc;
extern crate syntax;
extern crate syntax_ext;
extern crate syntax_pos;
extern crate rustc_plugin;
extern crate rocket;
#[macro_use] mod utils;
mod parser;
mod macros;
mod decorators;
mod lints;
use std::env;
use rustc_plugin::Registry;
use syntax::ext::base::SyntaxExtension;
use syntax::symbol::Symbol;
const DEBUG_ENV_VAR: &'static str = "ROCKET_CODEGEN_DEBUG";
const PARAM_PREFIX: &'static str = "rocket_param_";
const ROUTE_STRUCT_PREFIX: &'static str = "static_rocket_route_info_for_";
const CATCH_STRUCT_PREFIX: &'static str = "static_rocket_catch_info_for_";
const ROUTE_FN_PREFIX: &'static str = "rocket_route_fn_";
const CATCH_FN_PREFIX: &'static str = "rocket_catch_fn_";
const ROUTE_ATTR: &'static str = "rocket_route";
const ROUTE_INFO_ATTR: &'static str = "rocket_route_info";
const CATCHER_ATTR: &'static str = "rocket_catcher";
macro_rules! register_decorators {
($registry:expr, $($name:expr => $func:ident),+) => (
$($registry.register_syntax_extension(Symbol::intern($name),
SyntaxExtension::MultiModifier(Box::new(decorators::$func)));
)+
)
}
macro_rules! register_derives {
($registry:expr, $($name:expr => $func:ident),+) => (
$($registry.register_custom_derive(Symbol::intern($name),
SyntaxExtension::MultiDecorator(Box::new(decorators::$func)));
)+
)
}
macro_rules! register_lints {
($registry:expr, $($item:ident),+) => ($(
$registry.register_late_lint_pass(Box::new(lints::$item::default()));
)+)
}
/// Compiler hook for Rust to register plugins.
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry)
|
"head" => head_decorator,
"patch" => patch_decorator,
"options" => options_decorator
);
register_lints!(reg, RocketLint);
}
|
{
// Enable logging early if the DEBUG_ENV_VAR is set.
if env::var(DEBUG_ENV_VAR).is_ok() {
::rocket::logger::init(::rocket::LoggingLevel::Debug);
}
reg.register_macro("routes", macros::routes);
reg.register_macro("errors", macros::errors);
register_derives!(reg,
"derive_FromForm" => from_form_derive
);
register_decorators!(reg,
"error" => error_decorator,
"route" => route_decorator,
"get" => get_decorator,
"put" => put_decorator,
"post" => post_decorator,
"delete" => delete_decorator,
|
identifier_body
|
lib.rs
|
//! # Rocket - Code Generation
//!
//! This crate implements the code generation portions of Rocket. This includes
//! custom derives, custom attributes, procedural macros, and lints. The
//! documentation here is purely technical. The code generation facilities are
//! documented thoroughly in the [Rocket programming
//! guide](https://rocket.rs/guide).
//!
//! ## Custom Attributes
//!
//! This crate implements the following custom attributes:
//!
//! * **route**
//! * **get**
//! * **put**
//! * **post**
//! * **delete**
//! * **head**
//! * **patch**
//! * **options**
//! * **error**
//!
//! The grammar for all _route_ attributes, including **route**, **get**,
//! **put**, **post**, **delete**, **head**, **patch**, and **options** is
//! defined as:
//!
//! <pre>
//! route := METHOD? '(' ('path' '=')? path (',' kv_param)* ')'
//!
//! path := URI_SEG
//! | DYNAMIC_PARAM
//! | '?' DYNAMIC_PARAM
//! | path '/' path
//! (string literal)
//!
//! kv_param := 'rank' '=' INTEGER
//! | 'format' '=' STRING
//! | 'data' '=' DYNAMIC_PARAM
//!
//! INTEGER := isize, as defined by Rust
//! STRING := UTF-8 string literal, as defined by Rust
//! IDENT := Valid identifier, as defined by Rust
//!
//! URI_SEG := Valid HTTP URI Segment
//! DYNAMIC_PARAM := '<' IDENT '..'? '>' (string literal)
//! </pre>
//!
//! Note that the **route** attribute takes a method as its first argument,
//! while the remaining do not. That is, **route** looks like:
//!
//! #[route(GET, path = "/hello")]
//!
//! while the equivalent using **get** looks like:
//!
//! #[get("/hello")]
//!
//! The syntax for the **error** attribute is:
//!
//! <pre>
//! error := INTEGER
//! </pre>
//!
//! A use of the `error` attribute looks like:
//!
//! #[error(404)]
//!
//! ## Custom Derives
//!
//! This crate implements the following custom derives:
//!
//! * **FromForm**
//!
//! ## Procedural Macros
//!
//! This crate implements the following procedural macros:
//!
//! * **routes**
//! * **errors**
//!
//! The syntax for both of these is defined as:
//!
//! <pre>
//! macro := PATH (',' macro)*
//!
//! PATH := a path, as defined by Rust
//! </pre>
//!
//! ## Lints
//!
//! This crate implements the following lints:
//!
//! * **unmounted_route**: defaults to _warn_
//!
//! emits a warning when a declared route is not mounted
//!
//! * **unmanaged_state**: defaults to _warn_
//!
//! emits a warning when a `State<T>` request guest is used in a mounted
//! route without managing a value for `T`
//!
//! # Debugging Codegen
//!
//! When the `ROCKET_CODEGEN_DEBUG` environment variable is set, this crate logs
//! the items it has generated to the console at compile-time. For example, you
//! might run the following to build a Rocket application with codegen logging
//! enabled:
//!
//! ```
//! ROCKET_CODEGEN_DEBUG=1 cargo build
//! ```
#![crate_type = "dylib"]
#![feature(quote, concat_idents, plugin_registrar, rustc_private, unicode)]
#![feature(custom_attribute)]
#![feature(i128_type)]
#![allow(unused_attributes)]
#![allow(deprecated)]
#[macro_use] extern crate log;
#[macro_use] extern crate rustc;
extern crate syntax;
extern crate syntax_ext;
extern crate syntax_pos;
extern crate rustc_plugin;
extern crate rocket;
#[macro_use] mod utils;
mod parser;
mod macros;
mod decorators;
mod lints;
use std::env;
use rustc_plugin::Registry;
use syntax::ext::base::SyntaxExtension;
use syntax::symbol::Symbol;
const DEBUG_ENV_VAR: &'static str = "ROCKET_CODEGEN_DEBUG";
const PARAM_PREFIX: &'static str = "rocket_param_";
const ROUTE_STRUCT_PREFIX: &'static str = "static_rocket_route_info_for_";
const CATCH_STRUCT_PREFIX: &'static str = "static_rocket_catch_info_for_";
const ROUTE_FN_PREFIX: &'static str = "rocket_route_fn_";
const CATCH_FN_PREFIX: &'static str = "rocket_catch_fn_";
const ROUTE_ATTR: &'static str = "rocket_route";
const ROUTE_INFO_ATTR: &'static str = "rocket_route_info";
const CATCHER_ATTR: &'static str = "rocket_catcher";
macro_rules! register_decorators {
($registry:expr, $($name:expr => $func:ident),+) => (
$($registry.register_syntax_extension(Symbol::intern($name),
SyntaxExtension::MultiModifier(Box::new(decorators::$func)));
)+
)
}
macro_rules! register_derives {
($registry:expr, $($name:expr => $func:ident),+) => (
$($registry.register_custom_derive(Symbol::intern($name),
SyntaxExtension::MultiDecorator(Box::new(decorators::$func)));
)+
)
}
macro_rules! register_lints {
($registry:expr, $($item:ident),+) => ($(
$registry.register_late_lint_pass(Box::new(lints::$item::default()));
)+)
}
/// Compiler hook for Rust to register plugins.
#[plugin_registrar]
pub fn
|
(reg: &mut Registry) {
// Enable logging early if the DEBUG_ENV_VAR is set.
if env::var(DEBUG_ENV_VAR).is_ok() {
::rocket::logger::init(::rocket::LoggingLevel::Debug);
}
reg.register_macro("routes", macros::routes);
reg.register_macro("errors", macros::errors);
register_derives!(reg,
"derive_FromForm" => from_form_derive
);
register_decorators!(reg,
"error" => error_decorator,
"route" => route_decorator,
"get" => get_decorator,
"put" => put_decorator,
"post" => post_decorator,
"delete" => delete_decorator,
"head" => head_decorator,
"patch" => patch_decorator,
"options" => options_decorator
);
register_lints!(reg, RocketLint);
}
|
plugin_registrar
|
identifier_name
|
project.rs
|
use rustc_serialize::json;
use dao::ProtonDao;
use error::Error;
use project_types::{Project, SequenceData};
use utils;
/// Creates a new Proton project. Returns the public key of the root user.
pub fn new_project<PD: ProtonDao>(
dao: &PD,
name: &str,
layout_id: u32
) -> Result<String, Error> {
// Check that layout exists
let _ = try!(dao.get_layout(layout_id));
// Create keys
let (root_pub_key, root_private_key) = try!(utils::create_pub_priv_keys());
// Add project root user
let root_uid = try!(dao.add_initial_user(name, &root_private_key, &root_pub_key));
// Give initial user admin permissions
try!(dao.add_initial_permission(root_uid));
// Create new project
let _ = try!(dao.new_project(name, layout_id));
// Return root user's public key
Ok(root_pub_key)
}
/// Fetches and returns a project
pub fn get_project<PD: ProtonDao>(
dao: &PD,
proj_name: &str
) -> Result<Project, Error> {
dao.get_project(proj_name)
}
/// Finds and returns a project's layout id
pub fn get_layout_id<PD: ProtonDao>(
dao: &PD,
proj_name: &str
) -> Result<u32, Error> {
// Check that project name is valid
if!Project::validate_name(proj_name) {
return Err(Error::InvalidProjectName(proj_name.to_owned()));
}
// Check that project exists
let project = try!(dao.get_project(proj_name));
// Return layout id
Ok(project.layout_id)
}
/// Gets all sequence data in the project's playlist
pub fn get_playlist_data<PD: ProtonDao> (
dao: &PD,
proj_name: &str
) -> Result<String, Error> {
// Check that project exists
let project = try!(dao.get_project(proj_name));
let mut playlist_data = Vec::with_capacity(project.playlist.len());
// Go through each sequence in the playlist
for seqid in project.playlist.iter() {
print!("Getting sequence {}...", seqid);
// Get sequence
let sequence = try!(dao.get_sequence(seqid.to_owned()));
println!("Sequence '{}' retrieved", &sequence.name);
print!("Getting channel ids...");
// Get the sequence's channel ids
let chan_ids = try!(dao.get_channel_ids(seqid.to_owned()));
if chan_ids.len() < 1
|
println!("Channel ids loaded.");
print!("Getting data...");
// Create vector for sequence data
// Up to 512 channels per universe, plus one because DMX starts at 1
let mut seq_data = vec![vec![0; sequence.num_frames as usize]; 513];
// Get each channel's data and put it in the correct vector slot
for chanid in chan_ids {
let channel = try!(dao.get_channel(chanid));
let chan_data = try!(dao.get_data(seqid.to_owned(), chanid.to_owned()));
seq_data[channel.channel_dmx as usize] = chan_data;
}
let sequence_data = SequenceData {
name: sequence.name,
frame_dur_ms: sequence.frame_duration_ms,
music_file: sequence.music_file_name,
num_frames: sequence.num_frames,
data: seq_data
};
playlist_data.push(sequence_data);
println!("done");
}
print!("Encoding playlist data..");
json::encode(&playlist_data).map_err(Error::JsonEncode)
}
|
{
// TODO: make error
println!("No channels found");
panic!("No channels found");
}
|
conditional_block
|
project.rs
|
use rustc_serialize::json;
use dao::ProtonDao;
use error::Error;
use project_types::{Project, SequenceData};
use utils;
/// Creates a new Proton project. Returns the public key of the root user.
pub fn new_project<PD: ProtonDao>(
dao: &PD,
name: &str,
layout_id: u32
) -> Result<String, Error> {
// Check that layout exists
let _ = try!(dao.get_layout(layout_id));
// Create keys
let (root_pub_key, root_private_key) = try!(utils::create_pub_priv_keys());
// Add project root user
let root_uid = try!(dao.add_initial_user(name, &root_private_key, &root_pub_key));
// Give initial user admin permissions
try!(dao.add_initial_permission(root_uid));
// Create new project
let _ = try!(dao.new_project(name, layout_id));
// Return root user's public key
Ok(root_pub_key)
}
/// Fetches and returns a project
pub fn get_project<PD: ProtonDao>(
dao: &PD,
proj_name: &str
) -> Result<Project, Error> {
dao.get_project(proj_name)
}
/// Finds and returns a project's layout id
pub fn get_layout_id<PD: ProtonDao>(
dao: &PD,
proj_name: &str
) -> Result<u32, Error> {
// Check that project name is valid
if!Project::validate_name(proj_name) {
return Err(Error::InvalidProjectName(proj_name.to_owned()));
}
// Check that project exists
let project = try!(dao.get_project(proj_name));
// Return layout id
Ok(project.layout_id)
}
/// Gets all sequence data in the project's playlist
pub fn
|
<PD: ProtonDao> (
dao: &PD,
proj_name: &str
) -> Result<String, Error> {
// Check that project exists
let project = try!(dao.get_project(proj_name));
let mut playlist_data = Vec::with_capacity(project.playlist.len());
// Go through each sequence in the playlist
for seqid in project.playlist.iter() {
print!("Getting sequence {}...", seqid);
// Get sequence
let sequence = try!(dao.get_sequence(seqid.to_owned()));
println!("Sequence '{}' retrieved", &sequence.name);
print!("Getting channel ids...");
// Get the sequence's channel ids
let chan_ids = try!(dao.get_channel_ids(seqid.to_owned()));
if chan_ids.len() < 1 {
// TODO: make error
println!("No channels found");
panic!("No channels found");
}
println!("Channel ids loaded.");
print!("Getting data...");
// Create vector for sequence data
// Up to 512 channels per universe, plus one because DMX starts at 1
let mut seq_data = vec![vec![0; sequence.num_frames as usize]; 513];
// Get each channel's data and put it in the correct vector slot
for chanid in chan_ids {
let channel = try!(dao.get_channel(chanid));
let chan_data = try!(dao.get_data(seqid.to_owned(), chanid.to_owned()));
seq_data[channel.channel_dmx as usize] = chan_data;
}
let sequence_data = SequenceData {
name: sequence.name,
frame_dur_ms: sequence.frame_duration_ms,
music_file: sequence.music_file_name,
num_frames: sequence.num_frames,
data: seq_data
};
playlist_data.push(sequence_data);
println!("done");
}
print!("Encoding playlist data..");
json::encode(&playlist_data).map_err(Error::JsonEncode)
}
|
get_playlist_data
|
identifier_name
|
project.rs
|
use rustc_serialize::json;
use dao::ProtonDao;
use error::Error;
use project_types::{Project, SequenceData};
use utils;
/// Creates a new Proton project. Returns the public key of the root user.
pub fn new_project<PD: ProtonDao>(
dao: &PD,
name: &str,
layout_id: u32
) -> Result<String, Error> {
// Check that layout exists
let _ = try!(dao.get_layout(layout_id));
// Create keys
let (root_pub_key, root_private_key) = try!(utils::create_pub_priv_keys());
// Add project root user
let root_uid = try!(dao.add_initial_user(name, &root_private_key, &root_pub_key));
// Give initial user admin permissions
try!(dao.add_initial_permission(root_uid));
// Create new project
let _ = try!(dao.new_project(name, layout_id));
// Return root user's public key
Ok(root_pub_key)
}
/// Fetches and returns a project
pub fn get_project<PD: ProtonDao>(
dao: &PD,
proj_name: &str
) -> Result<Project, Error> {
dao.get_project(proj_name)
}
/// Finds and returns a project's layout id
pub fn get_layout_id<PD: ProtonDao>(
dao: &PD,
proj_name: &str
) -> Result<u32, Error> {
// Check that project name is valid
if!Project::validate_name(proj_name) {
return Err(Error::InvalidProjectName(proj_name.to_owned()));
}
// Check that project exists
let project = try!(dao.get_project(proj_name));
// Return layout id
Ok(project.layout_id)
}
/// Gets all sequence data in the project's playlist
pub fn get_playlist_data<PD: ProtonDao> (
dao: &PD,
proj_name: &str
) -> Result<String, Error> {
// Check that project exists
let project = try!(dao.get_project(proj_name));
let mut playlist_data = Vec::with_capacity(project.playlist.len());
// Go through each sequence in the playlist
for seqid in project.playlist.iter() {
print!("Getting sequence {}...", seqid);
// Get sequence
let sequence = try!(dao.get_sequence(seqid.to_owned()));
println!("Sequence '{}' retrieved", &sequence.name);
print!("Getting channel ids...");
// Get the sequence's channel ids
let chan_ids = try!(dao.get_channel_ids(seqid.to_owned()));
if chan_ids.len() < 1 {
// TODO: make error
println!("No channels found");
panic!("No channels found");
}
println!("Channel ids loaded.");
print!("Getting data...");
|
// Get each channel's data and put it in the correct vector slot
for chanid in chan_ids {
let channel = try!(dao.get_channel(chanid));
let chan_data = try!(dao.get_data(seqid.to_owned(), chanid.to_owned()));
seq_data[channel.channel_dmx as usize] = chan_data;
}
let sequence_data = SequenceData {
name: sequence.name,
frame_dur_ms: sequence.frame_duration_ms,
music_file: sequence.music_file_name,
num_frames: sequence.num_frames,
data: seq_data
};
playlist_data.push(sequence_data);
println!("done");
}
print!("Encoding playlist data..");
json::encode(&playlist_data).map_err(Error::JsonEncode)
}
|
// Create vector for sequence data
// Up to 512 channels per universe, plus one because DMX starts at 1
let mut seq_data = vec![vec![0; sequence.num_frames as usize]; 513];
|
random_line_split
|
project.rs
|
use rustc_serialize::json;
use dao::ProtonDao;
use error::Error;
use project_types::{Project, SequenceData};
use utils;
/// Creates a new Proton project. Returns the public key of the root user.
pub fn new_project<PD: ProtonDao>(
dao: &PD,
name: &str,
layout_id: u32
) -> Result<String, Error> {
// Check that layout exists
let _ = try!(dao.get_layout(layout_id));
// Create keys
let (root_pub_key, root_private_key) = try!(utils::create_pub_priv_keys());
// Add project root user
let root_uid = try!(dao.add_initial_user(name, &root_private_key, &root_pub_key));
// Give initial user admin permissions
try!(dao.add_initial_permission(root_uid));
// Create new project
let _ = try!(dao.new_project(name, layout_id));
// Return root user's public key
Ok(root_pub_key)
}
/// Fetches and returns a project
pub fn get_project<PD: ProtonDao>(
dao: &PD,
proj_name: &str
) -> Result<Project, Error> {
dao.get_project(proj_name)
}
/// Finds and returns a project's layout id
pub fn get_layout_id<PD: ProtonDao>(
dao: &PD,
proj_name: &str
) -> Result<u32, Error> {
// Check that project name is valid
if!Project::validate_name(proj_name) {
return Err(Error::InvalidProjectName(proj_name.to_owned()));
}
// Check that project exists
let project = try!(dao.get_project(proj_name));
// Return layout id
Ok(project.layout_id)
}
/// Gets all sequence data in the project's playlist
pub fn get_playlist_data<PD: ProtonDao> (
dao: &PD,
proj_name: &str
) -> Result<String, Error>
|
if chan_ids.len() < 1 {
// TODO: make error
println!("No channels found");
panic!("No channels found");
}
println!("Channel ids loaded.");
print!("Getting data...");
// Create vector for sequence data
// Up to 512 channels per universe, plus one because DMX starts at 1
let mut seq_data = vec![vec![0; sequence.num_frames as usize]; 513];
// Get each channel's data and put it in the correct vector slot
for chanid in chan_ids {
let channel = try!(dao.get_channel(chanid));
let chan_data = try!(dao.get_data(seqid.to_owned(), chanid.to_owned()));
seq_data[channel.channel_dmx as usize] = chan_data;
}
let sequence_data = SequenceData {
name: sequence.name,
frame_dur_ms: sequence.frame_duration_ms,
music_file: sequence.music_file_name,
num_frames: sequence.num_frames,
data: seq_data
};
playlist_data.push(sequence_data);
println!("done");
}
print!("Encoding playlist data..");
json::encode(&playlist_data).map_err(Error::JsonEncode)
}
|
{
// Check that project exists
let project = try!(dao.get_project(proj_name));
let mut playlist_data = Vec::with_capacity(project.playlist.len());
// Go through each sequence in the playlist
for seqid in project.playlist.iter() {
print!("Getting sequence {}...", seqid);
// Get sequence
let sequence = try!(dao.get_sequence(seqid.to_owned()));
println!("Sequence '{}' retrieved", &sequence.name);
print!("Getting channel ids...");
// Get the sequence's channel ids
let chan_ids = try!(dao.get_channel_ids(seqid.to_owned()));
|
identifier_body
|
transaction.rs
|
// The transaction model for handling surface states in Smithay
//
// The caching logic in `cache.rs` provides surfaces with a queue of
// pending states identified with numeric commit ids, allowing the compositor
// to precisely control *when* a state become active. This file is the second
// half: these identified states are grouped into transactions, which allow the
// synchronization of updates accross surfaces.
//
// There are 2 main cases when the state of multiple surfaces must be updated
// atomically:
// - synchronized subsurface must have their state updated at the same time as their parents
// - The upcoming `wp_transaction` protocol
//
// In these situations, the individual states in a surface queue are grouped into a transaction
// and are all applied atomically when the transaction itself is applied. The logic for creating
// new transactions is currently the following:
//
// - Each surface has an implicit "pending" transaction, into which its newly commited state is
// recorded
// - Furthermore, on commit, the pending transaction of all synchronized child subsurfaces is merged
// into the current surface's pending transaction, and a new implicit transaction is started for those
// children (logic is implemented in `handlers.rs`, in `PrivateSurfaceData::commit`).
// - Then, still on commit, if the surface is not a synchronized subsurface, its pending transaction is
// directly applied
//
// This last step will change once we have support for explicit synchronization (and further in the future,
// of the wp_transaction protocol). Explicit synchronization introduces a notion of blockers: the transaction
// cannot be applied before all blockers are released, and thus must wait for it to be the case.
//
// For thoses situations, the (currently unused) `TransactionQueue` will come into play. It is a per-client
// queue of transactions, that stores and applies them by both respecting their topological order
// (ensuring that for each surface, states are applied in the correct order) and that all transactions
// wait befor all their blockers are resolved to be merged. If a blocker is cancelled, the whole transaction
// it blocks is cancelled as well, and simply dropped. Thanks to the logic of `Cache::apply_state`, the
// associated state will be applied automatically when the next valid transaction is applied, ensuring
// global coherence.
// A significant part of the logic of this module is not yet used,
// but will be once proper transaction & blockers support is
// added to smithay
#![allow(dead_code)]
use std::{
collections::HashSet,
sync::{Arc, Mutex},
};
use wayland_server::protocol::wl_surface::WlSurface;
use crate::wayland::Serial;
use super::tree::PrivateSurfaceData;
pub trait Blocker {
fn state(&self) -> BlockerState;
}
pub enum BlockerState {
Pending,
Released,
Cancelled,
}
struct TransactionState {
surfaces: Vec<(WlSurface, Serial)>,
blockers: Vec<Box<dyn Blocker + Send>>,
}
impl Default for TransactionState {
fn default() -> Self {
TransactionState {
surfaces: Vec::new(),
|
impl TransactionState {
fn insert(&mut self, surface: WlSurface, id: Serial) {
if let Some(place) = self.surfaces.iter_mut().find(|place| place.0 == surface) {
// the surface is already in the list, update the serial
if place.1 < id {
place.1 = id;
}
} else {
// the surface is not in the list, insert it
self.surfaces.push((surface, id));
}
}
}
enum TransactionInner {
Data(TransactionState),
Fused(Arc<Mutex<TransactionInner>>),
}
pub(crate) struct PendingTransaction {
inner: Arc<Mutex<TransactionInner>>,
}
impl Default for PendingTransaction {
fn default() -> Self {
PendingTransaction {
inner: Arc::new(Mutex::new(TransactionInner::Data(Default::default()))),
}
}
}
impl PendingTransaction {
fn with_inner_state<T, F: FnOnce(&mut TransactionState) -> T>(&self, f: F) -> T {
let mut next = self.inner.clone();
loop {
let tmp = match *next.lock().unwrap() {
TransactionInner::Data(ref mut state) => return f(state),
TransactionInner::Fused(ref into) => into.clone(),
};
next = tmp;
}
}
pub(crate) fn insert_state(&self, surface: WlSurface, id: Serial) {
self.with_inner_state(|state| state.insert(surface, id))
}
pub(crate) fn add_blocker<B: Blocker + Send +'static>(&self, blocker: B) {
self.with_inner_state(|state| state.blockers.push(Box::new(blocker) as Box<_>))
}
pub(crate) fn is_same_as(&self, other: &PendingTransaction) -> bool {
let ptr1 = self.with_inner_state(|state| state as *const _);
let ptr2 = other.with_inner_state(|state| state as *const _);
ptr1 == ptr2
}
pub(crate) fn merge_into(&self, into: &PendingTransaction) {
if self.is_same_as(into) {
// nothing to do
return;
}
// extract our pending surfaces and change our link
let mut next = self.inner.clone();
let my_state;
loop {
let tmp = {
let mut guard = next.lock().unwrap();
match *guard {
TransactionInner::Data(ref mut state) => {
my_state = std::mem::take(state);
*guard = TransactionInner::Fused(into.inner.clone());
break;
}
TransactionInner::Fused(ref into) => into.clone(),
}
};
next = tmp;
}
// fuse our surfaces into our new transaction state
self.with_inner_state(|state| {
for (surface, id) in my_state.surfaces {
state.insert(surface, id);
}
state.blockers.extend(my_state.blockers);
});
}
pub(crate) fn finalize(mut self) -> Transaction {
// When finalizing a transaction, this *must* be the last handle to this transaction
loop {
let inner = match Arc::try_unwrap(self.inner) {
Ok(mutex) => mutex.into_inner().unwrap(),
Err(_) => panic!("Attempting to finalize a transaction but handle is not the last."),
};
match inner {
TransactionInner::Data(TransactionState {
surfaces, blockers,..
}) => return Transaction { surfaces, blockers },
TransactionInner::Fused(into) => self.inner = into,
}
}
}
}
pub(crate) struct Transaction {
surfaces: Vec<(WlSurface, Serial)>,
blockers: Vec<Box<dyn Blocker + Send>>,
}
impl Transaction {
/// Computes the global state of the transaction with regard to its blockers
///
/// The logic is:
///
/// - if at least one blocker is cancelled, the transaction is cancelled
/// - otherwise, if at least one blocker is pending, the transaction is pending
/// - otherwise, all blockers are released, and the transaction is also released
pub(crate) fn state(&self) -> BlockerState {
use BlockerState::*;
self.blockers
.iter()
.fold(Released, |acc, blocker| match (acc, blocker.state()) {
(Cancelled, _) | (_, Cancelled) => Cancelled,
(Pending, _) | (_, Pending) => Pending,
(Released, Released) => Released,
})
}
pub(crate) fn apply(self) {
for (surface, id) in self.surfaces {
PrivateSurfaceData::with_states(&surface, |states| {
states.cached_state.apply_state(id);
})
}
}
}
// This queue should be per-client
pub(crate) struct TransactionQueue {
transactions: Vec<Transaction>,
// we keep the hashset around to reuse allocations
seen_surfaces: HashSet<u32>,
}
impl Default for TransactionQueue {
fn default() -> Self {
TransactionQueue {
transactions: Vec::new(),
seen_surfaces: HashSet::new(),
}
}
}
impl TransactionQueue {
pub(crate) fn append(&mut self, t: Transaction) {
self.transactions.push(t);
}
pub(crate) fn apply_ready(&mut self) {
// this is a very non-optimized implementation
// we just iterate over the queue of transactions, keeping track of which
// surface we have seen as they encode transaction dependencies
self.seen_surfaces.clear();
// manually iterate as we're going to modify the Vec while iterating on it
let mut i = 0;
// the loop will terminate, as at every iteration either i is incremented by 1
// or the lenght of self.transactions is reduced by 1.
while i <= self.transactions.len() {
let mut skip = false;
// does the transaction have any active blocker?
match self.transactions[i].state() {
BlockerState::Cancelled => {
// this transaction is cancelled, remove it without further processing
self.transactions.remove(i);
continue;
}
BlockerState::Pending => {
skip = true;
}
BlockerState::Released => {}
}
// if not, does this transaction depend on any previous transaction?
if!skip {
for (s, _) in &self.transactions[i].surfaces {
if!s.as_ref().is_alive() {
continue;
}
if self.seen_surfaces.contains(&s.as_ref().id()) {
skip = true;
break;
}
}
}
if skip {
// this transaction is not yet ready and should be skipped, add its surfaces to our
// seen list
for (s, _) in &self.transactions[i].surfaces {
if!s.as_ref().is_alive() {
continue;
}
self.seen_surfaces.insert(s.as_ref().id());
}
i += 1;
} else {
// this transaction is to be applied, yay!
self.transactions.remove(i).apply();
}
}
}
}
|
blockers: Vec::new(),
}
}
}
|
random_line_split
|
transaction.rs
|
// The transaction model for handling surface states in Smithay
//
// The caching logic in `cache.rs` provides surfaces with a queue of
// pending states identified with numeric commit ids, allowing the compositor
// to precisely control *when* a state become active. This file is the second
// half: these identified states are grouped into transactions, which allow the
// synchronization of updates accross surfaces.
//
// There are 2 main cases when the state of multiple surfaces must be updated
// atomically:
// - synchronized subsurface must have their state updated at the same time as their parents
// - The upcoming `wp_transaction` protocol
//
// In these situations, the individual states in a surface queue are grouped into a transaction
// and are all applied atomically when the transaction itself is applied. The logic for creating
// new transactions is currently the following:
//
// - Each surface has an implicit "pending" transaction, into which its newly commited state is
// recorded
// - Furthermore, on commit, the pending transaction of all synchronized child subsurfaces is merged
// into the current surface's pending transaction, and a new implicit transaction is started for those
// children (logic is implemented in `handlers.rs`, in `PrivateSurfaceData::commit`).
// - Then, still on commit, if the surface is not a synchronized subsurface, its pending transaction is
// directly applied
//
// This last step will change once we have support for explicit synchronization (and further in the future,
// of the wp_transaction protocol). Explicit synchronization introduces a notion of blockers: the transaction
// cannot be applied before all blockers are released, and thus must wait for it to be the case.
//
// For thoses situations, the (currently unused) `TransactionQueue` will come into play. It is a per-client
// queue of transactions, that stores and applies them by both respecting their topological order
// (ensuring that for each surface, states are applied in the correct order) and that all transactions
// wait befor all their blockers are resolved to be merged. If a blocker is cancelled, the whole transaction
// it blocks is cancelled as well, and simply dropped. Thanks to the logic of `Cache::apply_state`, the
// associated state will be applied automatically when the next valid transaction is applied, ensuring
// global coherence.
// A significant part of the logic of this module is not yet used,
// but will be once proper transaction & blockers support is
// added to smithay
#![allow(dead_code)]
use std::{
collections::HashSet,
sync::{Arc, Mutex},
};
use wayland_server::protocol::wl_surface::WlSurface;
use crate::wayland::Serial;
use super::tree::PrivateSurfaceData;
pub trait Blocker {
fn state(&self) -> BlockerState;
}
pub enum BlockerState {
Pending,
Released,
Cancelled,
}
struct TransactionState {
surfaces: Vec<(WlSurface, Serial)>,
blockers: Vec<Box<dyn Blocker + Send>>,
}
impl Default for TransactionState {
fn default() -> Self {
TransactionState {
surfaces: Vec::new(),
blockers: Vec::new(),
}
}
}
impl TransactionState {
fn insert(&mut self, surface: WlSurface, id: Serial) {
if let Some(place) = self.surfaces.iter_mut().find(|place| place.0 == surface) {
// the surface is already in the list, update the serial
if place.1 < id {
place.1 = id;
}
} else {
// the surface is not in the list, insert it
self.surfaces.push((surface, id));
}
}
}
enum TransactionInner {
Data(TransactionState),
Fused(Arc<Mutex<TransactionInner>>),
}
pub(crate) struct PendingTransaction {
inner: Arc<Mutex<TransactionInner>>,
}
impl Default for PendingTransaction {
fn default() -> Self {
PendingTransaction {
inner: Arc::new(Mutex::new(TransactionInner::Data(Default::default()))),
}
}
}
impl PendingTransaction {
fn with_inner_state<T, F: FnOnce(&mut TransactionState) -> T>(&self, f: F) -> T {
let mut next = self.inner.clone();
loop {
let tmp = match *next.lock().unwrap() {
TransactionInner::Data(ref mut state) => return f(state),
TransactionInner::Fused(ref into) => into.clone(),
};
next = tmp;
}
}
pub(crate) fn insert_state(&self, surface: WlSurface, id: Serial) {
self.with_inner_state(|state| state.insert(surface, id))
}
pub(crate) fn add_blocker<B: Blocker + Send +'static>(&self, blocker: B) {
self.with_inner_state(|state| state.blockers.push(Box::new(blocker) as Box<_>))
}
pub(crate) fn is_same_as(&self, other: &PendingTransaction) -> bool {
let ptr1 = self.with_inner_state(|state| state as *const _);
let ptr2 = other.with_inner_state(|state| state as *const _);
ptr1 == ptr2
}
pub(crate) fn merge_into(&self, into: &PendingTransaction) {
if self.is_same_as(into) {
// nothing to do
return;
}
// extract our pending surfaces and change our link
let mut next = self.inner.clone();
let my_state;
loop {
let tmp = {
let mut guard = next.lock().unwrap();
match *guard {
TransactionInner::Data(ref mut state) => {
my_state = std::mem::take(state);
*guard = TransactionInner::Fused(into.inner.clone());
break;
}
TransactionInner::Fused(ref into) => into.clone(),
}
};
next = tmp;
}
// fuse our surfaces into our new transaction state
self.with_inner_state(|state| {
for (surface, id) in my_state.surfaces {
state.insert(surface, id);
}
state.blockers.extend(my_state.blockers);
});
}
pub(crate) fn finalize(mut self) -> Transaction {
// When finalizing a transaction, this *must* be the last handle to this transaction
loop {
let inner = match Arc::try_unwrap(self.inner) {
Ok(mutex) => mutex.into_inner().unwrap(),
Err(_) => panic!("Attempting to finalize a transaction but handle is not the last."),
};
match inner {
TransactionInner::Data(TransactionState {
surfaces, blockers,..
}) => return Transaction { surfaces, blockers },
TransactionInner::Fused(into) => self.inner = into,
}
}
}
}
pub(crate) struct Transaction {
surfaces: Vec<(WlSurface, Serial)>,
blockers: Vec<Box<dyn Blocker + Send>>,
}
impl Transaction {
/// Computes the global state of the transaction with regard to its blockers
///
/// The logic is:
///
/// - if at least one blocker is cancelled, the transaction is cancelled
/// - otherwise, if at least one blocker is pending, the transaction is pending
/// - otherwise, all blockers are released, and the transaction is also released
pub(crate) fn state(&self) -> BlockerState {
use BlockerState::*;
self.blockers
.iter()
.fold(Released, |acc, blocker| match (acc, blocker.state()) {
(Cancelled, _) | (_, Cancelled) => Cancelled,
(Pending, _) | (_, Pending) => Pending,
(Released, Released) => Released,
})
}
pub(crate) fn apply(self) {
for (surface, id) in self.surfaces {
PrivateSurfaceData::with_states(&surface, |states| {
states.cached_state.apply_state(id);
})
}
}
}
// This queue should be per-client
pub(crate) struct TransactionQueue {
transactions: Vec<Transaction>,
// we keep the hashset around to reuse allocations
seen_surfaces: HashSet<u32>,
}
impl Default for TransactionQueue {
fn default() -> Self {
TransactionQueue {
transactions: Vec::new(),
seen_surfaces: HashSet::new(),
}
}
}
impl TransactionQueue {
pub(crate) fn append(&mut self, t: Transaction) {
self.transactions.push(t);
}
pub(crate) fn apply_ready(&mut self) {
// this is a very non-optimized implementation
// we just iterate over the queue of transactions, keeping track of which
// surface we have seen as they encode transaction dependencies
self.seen_surfaces.clear();
// manually iterate as we're going to modify the Vec while iterating on it
let mut i = 0;
// the loop will terminate, as at every iteration either i is incremented by 1
// or the lenght of self.transactions is reduced by 1.
while i <= self.transactions.len() {
let mut skip = false;
// does the transaction have any active blocker?
match self.transactions[i].state() {
BlockerState::Cancelled => {
// this transaction is cancelled, remove it without further processing
self.transactions.remove(i);
continue;
}
BlockerState::Pending => {
skip = true;
}
BlockerState::Released => {}
}
// if not, does this transaction depend on any previous transaction?
if!skip {
for (s, _) in &self.transactions[i].surfaces {
if!s.as_ref().is_alive() {
continue;
}
if self.seen_surfaces.contains(&s.as_ref().id()) {
skip = true;
break;
}
}
}
if skip {
// this transaction is not yet ready and should be skipped, add its surfaces to our
// seen list
for (s, _) in &self.transactions[i].surfaces {
if!s.as_ref().is_alive() {
continue;
}
self.seen_surfaces.insert(s.as_ref().id());
}
i += 1;
} else
|
}
}
}
|
{
// this transaction is to be applied, yay!
self.transactions.remove(i).apply();
}
|
conditional_block
|
transaction.rs
|
// The transaction model for handling surface states in Smithay
//
// The caching logic in `cache.rs` provides surfaces with a queue of
// pending states identified with numeric commit ids, allowing the compositor
// to precisely control *when* a state become active. This file is the second
// half: these identified states are grouped into transactions, which allow the
// synchronization of updates accross surfaces.
//
// There are 2 main cases when the state of multiple surfaces must be updated
// atomically:
// - synchronized subsurface must have their state updated at the same time as their parents
// - The upcoming `wp_transaction` protocol
//
// In these situations, the individual states in a surface queue are grouped into a transaction
// and are all applied atomically when the transaction itself is applied. The logic for creating
// new transactions is currently the following:
//
// - Each surface has an implicit "pending" transaction, into which its newly commited state is
// recorded
// - Furthermore, on commit, the pending transaction of all synchronized child subsurfaces is merged
// into the current surface's pending transaction, and a new implicit transaction is started for those
// children (logic is implemented in `handlers.rs`, in `PrivateSurfaceData::commit`).
// - Then, still on commit, if the surface is not a synchronized subsurface, its pending transaction is
// directly applied
//
// This last step will change once we have support for explicit synchronization (and further in the future,
// of the wp_transaction protocol). Explicit synchronization introduces a notion of blockers: the transaction
// cannot be applied before all blockers are released, and thus must wait for it to be the case.
//
// For thoses situations, the (currently unused) `TransactionQueue` will come into play. It is a per-client
// queue of transactions, that stores and applies them by both respecting their topological order
// (ensuring that for each surface, states are applied in the correct order) and that all transactions
// wait befor all their blockers are resolved to be merged. If a blocker is cancelled, the whole transaction
// it blocks is cancelled as well, and simply dropped. Thanks to the logic of `Cache::apply_state`, the
// associated state will be applied automatically when the next valid transaction is applied, ensuring
// global coherence.
// A significant part of the logic of this module is not yet used,
// but will be once proper transaction & blockers support is
// added to smithay
#![allow(dead_code)]
use std::{
collections::HashSet,
sync::{Arc, Mutex},
};
use wayland_server::protocol::wl_surface::WlSurface;
use crate::wayland::Serial;
use super::tree::PrivateSurfaceData;
pub trait Blocker {
fn state(&self) -> BlockerState;
}
pub enum BlockerState {
Pending,
Released,
Cancelled,
}
struct TransactionState {
surfaces: Vec<(WlSurface, Serial)>,
blockers: Vec<Box<dyn Blocker + Send>>,
}
impl Default for TransactionState {
fn default() -> Self {
TransactionState {
surfaces: Vec::new(),
blockers: Vec::new(),
}
}
}
impl TransactionState {
fn insert(&mut self, surface: WlSurface, id: Serial) {
if let Some(place) = self.surfaces.iter_mut().find(|place| place.0 == surface) {
// the surface is already in the list, update the serial
if place.1 < id {
place.1 = id;
}
} else {
// the surface is not in the list, insert it
self.surfaces.push((surface, id));
}
}
}
enum TransactionInner {
Data(TransactionState),
Fused(Arc<Mutex<TransactionInner>>),
}
pub(crate) struct PendingTransaction {
inner: Arc<Mutex<TransactionInner>>,
}
impl Default for PendingTransaction {
fn default() -> Self {
PendingTransaction {
inner: Arc::new(Mutex::new(TransactionInner::Data(Default::default()))),
}
}
}
impl PendingTransaction {
fn with_inner_state<T, F: FnOnce(&mut TransactionState) -> T>(&self, f: F) -> T {
let mut next = self.inner.clone();
loop {
let tmp = match *next.lock().unwrap() {
TransactionInner::Data(ref mut state) => return f(state),
TransactionInner::Fused(ref into) => into.clone(),
};
next = tmp;
}
}
pub(crate) fn insert_state(&self, surface: WlSurface, id: Serial) {
self.with_inner_state(|state| state.insert(surface, id))
}
pub(crate) fn add_blocker<B: Blocker + Send +'static>(&self, blocker: B)
|
pub(crate) fn is_same_as(&self, other: &PendingTransaction) -> bool {
let ptr1 = self.with_inner_state(|state| state as *const _);
let ptr2 = other.with_inner_state(|state| state as *const _);
ptr1 == ptr2
}
pub(crate) fn merge_into(&self, into: &PendingTransaction) {
if self.is_same_as(into) {
// nothing to do
return;
}
// extract our pending surfaces and change our link
let mut next = self.inner.clone();
let my_state;
loop {
let tmp = {
let mut guard = next.lock().unwrap();
match *guard {
TransactionInner::Data(ref mut state) => {
my_state = std::mem::take(state);
*guard = TransactionInner::Fused(into.inner.clone());
break;
}
TransactionInner::Fused(ref into) => into.clone(),
}
};
next = tmp;
}
// fuse our surfaces into our new transaction state
self.with_inner_state(|state| {
for (surface, id) in my_state.surfaces {
state.insert(surface, id);
}
state.blockers.extend(my_state.blockers);
});
}
pub(crate) fn finalize(mut self) -> Transaction {
// When finalizing a transaction, this *must* be the last handle to this transaction
loop {
let inner = match Arc::try_unwrap(self.inner) {
Ok(mutex) => mutex.into_inner().unwrap(),
Err(_) => panic!("Attempting to finalize a transaction but handle is not the last."),
};
match inner {
TransactionInner::Data(TransactionState {
surfaces, blockers,..
}) => return Transaction { surfaces, blockers },
TransactionInner::Fused(into) => self.inner = into,
}
}
}
}
pub(crate) struct Transaction {
surfaces: Vec<(WlSurface, Serial)>,
blockers: Vec<Box<dyn Blocker + Send>>,
}
impl Transaction {
/// Computes the global state of the transaction with regard to its blockers
///
/// The logic is:
///
/// - if at least one blocker is cancelled, the transaction is cancelled
/// - otherwise, if at least one blocker is pending, the transaction is pending
/// - otherwise, all blockers are released, and the transaction is also released
pub(crate) fn state(&self) -> BlockerState {
use BlockerState::*;
self.blockers
.iter()
.fold(Released, |acc, blocker| match (acc, blocker.state()) {
(Cancelled, _) | (_, Cancelled) => Cancelled,
(Pending, _) | (_, Pending) => Pending,
(Released, Released) => Released,
})
}
pub(crate) fn apply(self) {
for (surface, id) in self.surfaces {
PrivateSurfaceData::with_states(&surface, |states| {
states.cached_state.apply_state(id);
})
}
}
}
// This queue should be per-client
pub(crate) struct TransactionQueue {
transactions: Vec<Transaction>,
// we keep the hashset around to reuse allocations
seen_surfaces: HashSet<u32>,
}
impl Default for TransactionQueue {
fn default() -> Self {
TransactionQueue {
transactions: Vec::new(),
seen_surfaces: HashSet::new(),
}
}
}
impl TransactionQueue {
pub(crate) fn append(&mut self, t: Transaction) {
self.transactions.push(t);
}
pub(crate) fn apply_ready(&mut self) {
// this is a very non-optimized implementation
// we just iterate over the queue of transactions, keeping track of which
// surface we have seen as they encode transaction dependencies
self.seen_surfaces.clear();
// manually iterate as we're going to modify the Vec while iterating on it
let mut i = 0;
// the loop will terminate, as at every iteration either i is incremented by 1
// or the lenght of self.transactions is reduced by 1.
while i <= self.transactions.len() {
let mut skip = false;
// does the transaction have any active blocker?
match self.transactions[i].state() {
BlockerState::Cancelled => {
// this transaction is cancelled, remove it without further processing
self.transactions.remove(i);
continue;
}
BlockerState::Pending => {
skip = true;
}
BlockerState::Released => {}
}
// if not, does this transaction depend on any previous transaction?
if!skip {
for (s, _) in &self.transactions[i].surfaces {
if!s.as_ref().is_alive() {
continue;
}
if self.seen_surfaces.contains(&s.as_ref().id()) {
skip = true;
break;
}
}
}
if skip {
// this transaction is not yet ready and should be skipped, add its surfaces to our
// seen list
for (s, _) in &self.transactions[i].surfaces {
if!s.as_ref().is_alive() {
continue;
}
self.seen_surfaces.insert(s.as_ref().id());
}
i += 1;
} else {
// this transaction is to be applied, yay!
self.transactions.remove(i).apply();
}
}
}
}
|
{
self.with_inner_state(|state| state.blockers.push(Box::new(blocker) as Box<_>))
}
|
identifier_body
|
transaction.rs
|
// The transaction model for handling surface states in Smithay
//
// The caching logic in `cache.rs` provides surfaces with a queue of
// pending states identified with numeric commit ids, allowing the compositor
// to precisely control *when* a state become active. This file is the second
// half: these identified states are grouped into transactions, which allow the
// synchronization of updates accross surfaces.
//
// There are 2 main cases when the state of multiple surfaces must be updated
// atomically:
// - synchronized subsurface must have their state updated at the same time as their parents
// - The upcoming `wp_transaction` protocol
//
// In these situations, the individual states in a surface queue are grouped into a transaction
// and are all applied atomically when the transaction itself is applied. The logic for creating
// new transactions is currently the following:
//
// - Each surface has an implicit "pending" transaction, into which its newly commited state is
// recorded
// - Furthermore, on commit, the pending transaction of all synchronized child subsurfaces is merged
// into the current surface's pending transaction, and a new implicit transaction is started for those
// children (logic is implemented in `handlers.rs`, in `PrivateSurfaceData::commit`).
// - Then, still on commit, if the surface is not a synchronized subsurface, its pending transaction is
// directly applied
//
// This last step will change once we have support for explicit synchronization (and further in the future,
// of the wp_transaction protocol). Explicit synchronization introduces a notion of blockers: the transaction
// cannot be applied before all blockers are released, and thus must wait for it to be the case.
//
// For thoses situations, the (currently unused) `TransactionQueue` will come into play. It is a per-client
// queue of transactions, that stores and applies them by both respecting their topological order
// (ensuring that for each surface, states are applied in the correct order) and that all transactions
// wait befor all their blockers are resolved to be merged. If a blocker is cancelled, the whole transaction
// it blocks is cancelled as well, and simply dropped. Thanks to the logic of `Cache::apply_state`, the
// associated state will be applied automatically when the next valid transaction is applied, ensuring
// global coherence.
// A significant part of the logic of this module is not yet used,
// but will be once proper transaction & blockers support is
// added to smithay
#![allow(dead_code)]
use std::{
collections::HashSet,
sync::{Arc, Mutex},
};
use wayland_server::protocol::wl_surface::WlSurface;
use crate::wayland::Serial;
use super::tree::PrivateSurfaceData;
pub trait Blocker {
fn state(&self) -> BlockerState;
}
pub enum BlockerState {
Pending,
Released,
Cancelled,
}
struct
|
{
surfaces: Vec<(WlSurface, Serial)>,
blockers: Vec<Box<dyn Blocker + Send>>,
}
impl Default for TransactionState {
fn default() -> Self {
TransactionState {
surfaces: Vec::new(),
blockers: Vec::new(),
}
}
}
impl TransactionState {
fn insert(&mut self, surface: WlSurface, id: Serial) {
if let Some(place) = self.surfaces.iter_mut().find(|place| place.0 == surface) {
// the surface is already in the list, update the serial
if place.1 < id {
place.1 = id;
}
} else {
// the surface is not in the list, insert it
self.surfaces.push((surface, id));
}
}
}
enum TransactionInner {
Data(TransactionState),
Fused(Arc<Mutex<TransactionInner>>),
}
pub(crate) struct PendingTransaction {
inner: Arc<Mutex<TransactionInner>>,
}
impl Default for PendingTransaction {
fn default() -> Self {
PendingTransaction {
inner: Arc::new(Mutex::new(TransactionInner::Data(Default::default()))),
}
}
}
impl PendingTransaction {
fn with_inner_state<T, F: FnOnce(&mut TransactionState) -> T>(&self, f: F) -> T {
let mut next = self.inner.clone();
loop {
let tmp = match *next.lock().unwrap() {
TransactionInner::Data(ref mut state) => return f(state),
TransactionInner::Fused(ref into) => into.clone(),
};
next = tmp;
}
}
pub(crate) fn insert_state(&self, surface: WlSurface, id: Serial) {
self.with_inner_state(|state| state.insert(surface, id))
}
pub(crate) fn add_blocker<B: Blocker + Send +'static>(&self, blocker: B) {
self.with_inner_state(|state| state.blockers.push(Box::new(blocker) as Box<_>))
}
pub(crate) fn is_same_as(&self, other: &PendingTransaction) -> bool {
let ptr1 = self.with_inner_state(|state| state as *const _);
let ptr2 = other.with_inner_state(|state| state as *const _);
ptr1 == ptr2
}
pub(crate) fn merge_into(&self, into: &PendingTransaction) {
if self.is_same_as(into) {
// nothing to do
return;
}
// extract our pending surfaces and change our link
let mut next = self.inner.clone();
let my_state;
loop {
let tmp = {
let mut guard = next.lock().unwrap();
match *guard {
TransactionInner::Data(ref mut state) => {
my_state = std::mem::take(state);
*guard = TransactionInner::Fused(into.inner.clone());
break;
}
TransactionInner::Fused(ref into) => into.clone(),
}
};
next = tmp;
}
// fuse our surfaces into our new transaction state
self.with_inner_state(|state| {
for (surface, id) in my_state.surfaces {
state.insert(surface, id);
}
state.blockers.extend(my_state.blockers);
});
}
pub(crate) fn finalize(mut self) -> Transaction {
// When finalizing a transaction, this *must* be the last handle to this transaction
loop {
let inner = match Arc::try_unwrap(self.inner) {
Ok(mutex) => mutex.into_inner().unwrap(),
Err(_) => panic!("Attempting to finalize a transaction but handle is not the last."),
};
match inner {
TransactionInner::Data(TransactionState {
surfaces, blockers,..
}) => return Transaction { surfaces, blockers },
TransactionInner::Fused(into) => self.inner = into,
}
}
}
}
pub(crate) struct Transaction {
surfaces: Vec<(WlSurface, Serial)>,
blockers: Vec<Box<dyn Blocker + Send>>,
}
impl Transaction {
/// Computes the global state of the transaction with regard to its blockers
///
/// The logic is:
///
/// - if at least one blocker is cancelled, the transaction is cancelled
/// - otherwise, if at least one blocker is pending, the transaction is pending
/// - otherwise, all blockers are released, and the transaction is also released
pub(crate) fn state(&self) -> BlockerState {
use BlockerState::*;
self.blockers
.iter()
.fold(Released, |acc, blocker| match (acc, blocker.state()) {
(Cancelled, _) | (_, Cancelled) => Cancelled,
(Pending, _) | (_, Pending) => Pending,
(Released, Released) => Released,
})
}
pub(crate) fn apply(self) {
for (surface, id) in self.surfaces {
PrivateSurfaceData::with_states(&surface, |states| {
states.cached_state.apply_state(id);
})
}
}
}
// This queue should be per-client
pub(crate) struct TransactionQueue {
transactions: Vec<Transaction>,
// we keep the hashset around to reuse allocations
seen_surfaces: HashSet<u32>,
}
impl Default for TransactionQueue {
fn default() -> Self {
TransactionQueue {
transactions: Vec::new(),
seen_surfaces: HashSet::new(),
}
}
}
impl TransactionQueue {
pub(crate) fn append(&mut self, t: Transaction) {
self.transactions.push(t);
}
pub(crate) fn apply_ready(&mut self) {
// this is a very non-optimized implementation
// we just iterate over the queue of transactions, keeping track of which
// surface we have seen as they encode transaction dependencies
self.seen_surfaces.clear();
// manually iterate as we're going to modify the Vec while iterating on it
let mut i = 0;
// the loop will terminate, as at every iteration either i is incremented by 1
// or the lenght of self.transactions is reduced by 1.
while i <= self.transactions.len() {
let mut skip = false;
// does the transaction have any active blocker?
match self.transactions[i].state() {
BlockerState::Cancelled => {
// this transaction is cancelled, remove it without further processing
self.transactions.remove(i);
continue;
}
BlockerState::Pending => {
skip = true;
}
BlockerState::Released => {}
}
// if not, does this transaction depend on any previous transaction?
if!skip {
for (s, _) in &self.transactions[i].surfaces {
if!s.as_ref().is_alive() {
continue;
}
if self.seen_surfaces.contains(&s.as_ref().id()) {
skip = true;
break;
}
}
}
if skip {
// this transaction is not yet ready and should be skipped, add its surfaces to our
// seen list
for (s, _) in &self.transactions[i].surfaces {
if!s.as_ref().is_alive() {
continue;
}
self.seen_surfaces.insert(s.as_ref().id());
}
i += 1;
} else {
// this transaction is to be applied, yay!
self.transactions.remove(i).apply();
}
}
}
}
|
TransactionState
|
identifier_name
|
get_variable_length_field.rs
|
// Copyright (c) 2015 Robert Clipsham <[email protected]>
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(custom_attribute, plugin, slice_bytes, vec_push_all)]
#![plugin(pnet_macros_plugin)]
extern crate pnet;
extern crate pnet_macros_support;
use pnet_macros_support::types::*;
#[packet]
pub struct WithVariableLengthField {
banana: u32be,
#[length = "3"]
var_length: Vec<u8>,
#[payload]
|
fn main() {
let data = [1, 1, 1, 1, 2, 3, 4, 5, 6];
let packet = WithVariableLengthFieldPacket::new(&data[..]).unwrap();
assert_eq!(packet.get_var_length(), vec![2, 3, 4]);
}
|
payload: Vec<u8>
}
|
random_line_split
|
get_variable_length_field.rs
|
// Copyright (c) 2015 Robert Clipsham <[email protected]>
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(custom_attribute, plugin, slice_bytes, vec_push_all)]
#![plugin(pnet_macros_plugin)]
extern crate pnet;
extern crate pnet_macros_support;
use pnet_macros_support::types::*;
#[packet]
pub struct WithVariableLengthField {
banana: u32be,
#[length = "3"]
var_length: Vec<u8>,
#[payload]
payload: Vec<u8>
}
fn
|
() {
let data = [1, 1, 1, 1, 2, 3, 4, 5, 6];
let packet = WithVariableLengthFieldPacket::new(&data[..]).unwrap();
assert_eq!(packet.get_var_length(), vec![2, 3, 4]);
}
|
main
|
identifier_name
|
get_variable_length_field.rs
|
// Copyright (c) 2015 Robert Clipsham <[email protected]>
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(custom_attribute, plugin, slice_bytes, vec_push_all)]
#![plugin(pnet_macros_plugin)]
extern crate pnet;
extern crate pnet_macros_support;
use pnet_macros_support::types::*;
#[packet]
pub struct WithVariableLengthField {
banana: u32be,
#[length = "3"]
var_length: Vec<u8>,
#[payload]
payload: Vec<u8>
}
fn main()
|
{
let data = [1, 1, 1, 1, 2, 3, 4, 5, 6];
let packet = WithVariableLengthFieldPacket::new(&data[..]).unwrap();
assert_eq!(packet.get_var_length(), vec![2, 3, 4]);
}
|
identifier_body
|
|
attrib.rs
|
// rustfmt-wrap_comments: true
// Test attributes and doc comments are preserved.
//! Doc comment
#![attribute]
//! Crate doc comment
// Comment
// Comment on attribute
#![the(attribute)]
// Another comment
#[invalid attribute]
fn foo() {}
/// Blah blah blah.
/// Blah blah blah.
/// Blah blah blah.
/// Blah blah blah.
/// Blah blah blah.
impl Bar {
/// Blah blah blooo.
/// Blah blah blooo.
/// Blah blah blooo.
/// Blah blah blooo.
#[an_attribute]
fn foo(&mut self) -> isize {
}
/// Blah blah bing.
/// Blah blah bing.
/// Blah blah bing.
/// Blah blah bing.
/// Blah blah bing.
/// Blah blah bing.
pub fn f2(self) {
(foo, bar)
}
#[another_attribute]
fn f3(self) -> Dog
|
/// Blah blah bing.
#[attrib1]
/// Blah blah bing.
#[attrib2]
// Another comment that needs rewrite because it's tooooooooooooooooooooooooooooooo loooooooooooong.
/// Blah blah bing.
fn f4(self) -> Cat {
}
// We want spaces around `=`
#[cfg(feature="nightly")]
fn f5(self) -> Monkey {}
}
// #984
struct Foo {
# [ derive ( Clone, PartialEq, Debug, Deserialize, Serialize ) ]
foo: usize,
}
// #1668
/// Default path (*nix)
#[cfg(all(unix, not(target_os = "macos"), not(target_os = "ios"), not(target_os = "android")))]
fn foo() {
#[cfg(target_os = "freertos")]
match port_id {
'a' | 'A' => GpioPort { port_address: GPIO_A },
'b' | 'B' => GpioPort { port_address: GPIO_B },
_ => panic!(),
}
#[cfg_attr(not(target_os = "freertos"), allow(unused_variables))]
let x = 3;
}
// #1777
#[test]
#[should_panic(expected = "(")]
#[should_panic(expected = /* ( */ "(")]
#[should_panic(/* ((((( */expected /* ((((( */= /* ((((( */ "("/* ((((( */)]
#[should_panic(
/* (((((((( *//*
(((((((((()(((((((( */
expected = "("
// ((((((((
)]
fn foo() {}
// #1799
fn issue_1799() {
#[allow(unreachable_code)] // https://github.com/rust-lang/rust/issues/43336
Some( Err(error) ) ;
#[allow(unreachable_code)]
// https://github.com/rust-lang/rust/issues/43336
Some( Err(error) ) ;
}
// Formatting inner attributes
fn inner_attributes() {
#![ this_is_an_inner_attribute ( foo ) ]
foo();
}
impl InnerAttributes() {
#![ this_is_an_inner_attribute ( foo ) ]
fn foo() {}
}
mod InnerAttributes {
#![ this_is_an_inner_attribute ( foo ) ]
}
fn attributes_on_statements() {
// Local
# [ attr ( on ( local ) ) ]
let x = 3;
// Item
# [ attr ( on ( item ) ) ]
use foo;
// Expr
# [ attr ( on ( expr ) ) ]
{}
// Semi
# [ attr ( on ( semi ) ) ]
foo();
// Mac
# [ attr ( on ( mac ) ) ]
foo!();
}
// Large derive
#[derive(Add, Sub, Mul, Div, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Debug, Hash, Serialize, Deserialize)]
pub struct HP(pub u8);
|
{
}
|
identifier_body
|
attrib.rs
|
// rustfmt-wrap_comments: true
// Test attributes and doc comments are preserved.
//! Doc comment
#![attribute]
//! Crate doc comment
// Comment
// Comment on attribute
#![the(attribute)]
// Another comment
#[invalid attribute]
fn foo() {}
/// Blah blah blah.
/// Blah blah blah.
/// Blah blah blah.
/// Blah blah blah.
/// Blah blah blah.
impl Bar {
/// Blah blah blooo.
/// Blah blah blooo.
/// Blah blah blooo.
/// Blah blah blooo.
#[an_attribute]
fn foo(&mut self) -> isize {
}
/// Blah blah bing.
/// Blah blah bing.
/// Blah blah bing.
/// Blah blah bing.
/// Blah blah bing.
/// Blah blah bing.
pub fn f2(self) {
(foo, bar)
}
#[another_attribute]
fn f3(self) -> Dog {
}
/// Blah blah bing.
#[attrib1]
/// Blah blah bing.
#[attrib2]
// Another comment that needs rewrite because it's tooooooooooooooooooooooooooooooo loooooooooooong.
/// Blah blah bing.
fn f4(self) -> Cat {
}
// We want spaces around `=`
#[cfg(feature="nightly")]
fn f5(self) -> Monkey {}
}
// #984
struct Foo {
# [ derive ( Clone, PartialEq, Debug, Deserialize, Serialize ) ]
foo: usize,
}
// #1668
/// Default path (*nix)
#[cfg(all(unix, not(target_os = "macos"), not(target_os = "ios"), not(target_os = "android")))]
fn foo() {
#[cfg(target_os = "freertos")]
match port_id {
'a' | 'A' => GpioPort { port_address: GPIO_A },
'b' | 'B' => GpioPort { port_address: GPIO_B },
_ => panic!(),
}
#[cfg_attr(not(target_os = "freertos"), allow(unused_variables))]
let x = 3;
}
|
#[test]
#[should_panic(expected = "(")]
#[should_panic(expected = /* ( */ "(")]
#[should_panic(/* ((((( */expected /* ((((( */= /* ((((( */ "("/* ((((( */)]
#[should_panic(
/* (((((((( *//*
(((((((((()(((((((( */
expected = "("
// ((((((((
)]
fn foo() {}
// #1799
fn issue_1799() {
#[allow(unreachable_code)] // https://github.com/rust-lang/rust/issues/43336
Some( Err(error) ) ;
#[allow(unreachable_code)]
// https://github.com/rust-lang/rust/issues/43336
Some( Err(error) ) ;
}
// Formatting inner attributes
fn inner_attributes() {
#![ this_is_an_inner_attribute ( foo ) ]
foo();
}
impl InnerAttributes() {
#![ this_is_an_inner_attribute ( foo ) ]
fn foo() {}
}
mod InnerAttributes {
#![ this_is_an_inner_attribute ( foo ) ]
}
fn attributes_on_statements() {
// Local
# [ attr ( on ( local ) ) ]
let x = 3;
// Item
# [ attr ( on ( item ) ) ]
use foo;
// Expr
# [ attr ( on ( expr ) ) ]
{}
// Semi
# [ attr ( on ( semi ) ) ]
foo();
// Mac
# [ attr ( on ( mac ) ) ]
foo!();
}
// Large derive
#[derive(Add, Sub, Mul, Div, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Debug, Hash, Serialize, Deserialize)]
pub struct HP(pub u8);
|
// #1777
|
random_line_split
|
attrib.rs
|
// rustfmt-wrap_comments: true
// Test attributes and doc comments are preserved.
//! Doc comment
#![attribute]
//! Crate doc comment
// Comment
// Comment on attribute
#![the(attribute)]
// Another comment
#[invalid attribute]
fn foo() {}
/// Blah blah blah.
/// Blah blah blah.
/// Blah blah blah.
/// Blah blah blah.
/// Blah blah blah.
impl Bar {
/// Blah blah blooo.
/// Blah blah blooo.
/// Blah blah blooo.
/// Blah blah blooo.
#[an_attribute]
fn foo(&mut self) -> isize {
}
/// Blah blah bing.
/// Blah blah bing.
/// Blah blah bing.
/// Blah blah bing.
/// Blah blah bing.
/// Blah blah bing.
pub fn f2(self) {
(foo, bar)
}
#[another_attribute]
fn f3(self) -> Dog {
}
/// Blah blah bing.
#[attrib1]
/// Blah blah bing.
#[attrib2]
// Another comment that needs rewrite because it's tooooooooooooooooooooooooooooooo loooooooooooong.
/// Blah blah bing.
fn f4(self) -> Cat {
}
// We want spaces around `=`
#[cfg(feature="nightly")]
fn f5(self) -> Monkey {}
}
// #984
struct Foo {
# [ derive ( Clone, PartialEq, Debug, Deserialize, Serialize ) ]
foo: usize,
}
// #1668
/// Default path (*nix)
#[cfg(all(unix, not(target_os = "macos"), not(target_os = "ios"), not(target_os = "android")))]
fn foo() {
#[cfg(target_os = "freertos")]
match port_id {
'a' | 'A' => GpioPort { port_address: GPIO_A },
'b' | 'B' => GpioPort { port_address: GPIO_B },
_ => panic!(),
}
#[cfg_attr(not(target_os = "freertos"), allow(unused_variables))]
let x = 3;
}
// #1777
#[test]
#[should_panic(expected = "(")]
#[should_panic(expected = /* ( */ "(")]
#[should_panic(/* ((((( */expected /* ((((( */= /* ((((( */ "("/* ((((( */)]
#[should_panic(
/* (((((((( *//*
(((((((((()(((((((( */
expected = "("
// ((((((((
)]
fn foo() {}
// #1799
fn issue_1799() {
#[allow(unreachable_code)] // https://github.com/rust-lang/rust/issues/43336
Some( Err(error) ) ;
#[allow(unreachable_code)]
// https://github.com/rust-lang/rust/issues/43336
Some( Err(error) ) ;
}
// Formatting inner attributes
fn inner_attributes() {
#![ this_is_an_inner_attribute ( foo ) ]
foo();
}
impl InnerAttributes() {
#![ this_is_an_inner_attribute ( foo ) ]
fn foo() {}
}
mod InnerAttributes {
#![ this_is_an_inner_attribute ( foo ) ]
}
fn attributes_on_statements() {
// Local
# [ attr ( on ( local ) ) ]
let x = 3;
// Item
# [ attr ( on ( item ) ) ]
use foo;
// Expr
# [ attr ( on ( expr ) ) ]
{}
// Semi
# [ attr ( on ( semi ) ) ]
foo();
// Mac
# [ attr ( on ( mac ) ) ]
foo!();
}
// Large derive
#[derive(Add, Sub, Mul, Div, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Debug, Hash, Serialize, Deserialize)]
pub struct
|
(pub u8);
|
HP
|
identifier_name
|
regions-infer-borrow-scope-within-loop.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn
|
<'r, T>(x: &'r T) -> &'r T {x}
fn foo(cond: &fn() -> bool, box: &fn() -> @int) {
let mut y: ∫
loop {
let x = box();
// Here we complain because the resulting region
// of this borrow is the fn body as a whole.
y = borrow(x); //~ ERROR illegal borrow: cannot root managed value long enough
assert!(*x == *y);
if cond() { break; }
}
assert!(*y!= 0);
}
fn main() {}
|
borrow
|
identifier_name
|
regions-infer-borrow-scope-within-loop.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn borrow<'r, T>(x: &'r T) -> &'r T {x}
fn foo(cond: &fn() -> bool, box: &fn() -> @int) {
let mut y: ∫
loop {
|
y = borrow(x); //~ ERROR illegal borrow: cannot root managed value long enough
assert!(*x == *y);
if cond() { break; }
}
assert!(*y!= 0);
}
fn main() {}
|
let x = box();
// Here we complain because the resulting region
// of this borrow is the fn body as a whole.
|
random_line_split
|
regions-infer-borrow-scope-within-loop.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn borrow<'r, T>(x: &'r T) -> &'r T {x}
fn foo(cond: &fn() -> bool, box: &fn() -> @int)
|
fn main() {}
|
{
let mut y: ∫
loop {
let x = box();
// Here we complain because the resulting region
// of this borrow is the fn body as a whole.
y = borrow(x); //~ ERROR illegal borrow: cannot root managed value long enough
assert!(*x == *y);
if cond() { break; }
}
assert!(*y != 0);
}
|
identifier_body
|
regions-infer-borrow-scope-within-loop.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn borrow<'r, T>(x: &'r T) -> &'r T {x}
fn foo(cond: &fn() -> bool, box: &fn() -> @int) {
let mut y: ∫
loop {
let x = box();
// Here we complain because the resulting region
// of this borrow is the fn body as a whole.
y = borrow(x); //~ ERROR illegal borrow: cannot root managed value long enough
assert!(*x == *y);
if cond()
|
}
assert!(*y!= 0);
}
fn main() {}
|
{ break; }
|
conditional_block
|
issue-13058.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::ops::Range;
trait Itble<'r, T, I: Iterator<Item=T>> { fn iter(&'r self) -> I; }
impl<'r> Itble<'r, usize, Range<usize>> for (usize, usize) {
fn iter(&'r self) -> Range<usize> {
let &(min, max) = self;
min..max
}
}
fn check<'r, I: Iterator<Item=usize>, T: Itble<'r, usize, I>>(cont: &T) -> bool
|
fn main() {
check(&(3, 5));
}
|
{
let cont_iter = cont.iter();
//~^ ERROR 24:26: 24:30: explicit lifetime required in the type of `cont` [E0621]
let result = cont_iter.fold(Some(0), |state, val| {
state.map_or(None, |mask| {
let bit = 1 << val;
if mask & bit == 0 {Some(mask|bit)} else {None}
})
});
result.is_some()
}
|
identifier_body
|
issue-13058.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::ops::Range;
trait Itble<'r, T, I: Iterator<Item=T>> { fn iter(&'r self) -> I; }
impl<'r> Itble<'r, usize, Range<usize>> for (usize, usize) {
fn iter(&'r self) -> Range<usize> {
let &(min, max) = self;
min..max
}
}
fn
|
<'r, I: Iterator<Item=usize>, T: Itble<'r, usize, I>>(cont: &T) -> bool
{
let cont_iter = cont.iter();
//~^ ERROR 24:26: 24:30: explicit lifetime required in the type of `cont` [E0621]
let result = cont_iter.fold(Some(0), |state, val| {
state.map_or(None, |mask| {
let bit = 1 << val;
if mask & bit == 0 {Some(mask|bit)} else {None}
})
});
result.is_some()
}
fn main() {
check(&(3, 5));
}
|
check
|
identifier_name
|
issue-13058.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::ops::Range;
trait Itble<'r, T, I: Iterator<Item=T>> { fn iter(&'r self) -> I; }
impl<'r> Itble<'r, usize, Range<usize>> for (usize, usize) {
fn iter(&'r self) -> Range<usize> {
let &(min, max) = self;
min..max
}
}
fn check<'r, I: Iterator<Item=usize>, T: Itble<'r, usize, I>>(cont: &T) -> bool
{
let cont_iter = cont.iter();
//~^ ERROR 24:26: 24:30: explicit lifetime required in the type of `cont` [E0621]
let result = cont_iter.fold(Some(0), |state, val| {
state.map_or(None, |mask| {
let bit = 1 << val;
if mask & bit == 0 {Some(mask|bit)} else {None}
})
});
result.is_some()
}
|
fn main() {
check(&(3, 5));
}
|
random_line_split
|
|
issue-13058.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::ops::Range;
trait Itble<'r, T, I: Iterator<Item=T>> { fn iter(&'r self) -> I; }
impl<'r> Itble<'r, usize, Range<usize>> for (usize, usize) {
fn iter(&'r self) -> Range<usize> {
let &(min, max) = self;
min..max
}
}
fn check<'r, I: Iterator<Item=usize>, T: Itble<'r, usize, I>>(cont: &T) -> bool
{
let cont_iter = cont.iter();
//~^ ERROR 24:26: 24:30: explicit lifetime required in the type of `cont` [E0621]
let result = cont_iter.fold(Some(0), |state, val| {
state.map_or(None, |mask| {
let bit = 1 << val;
if mask & bit == 0
|
else {None}
})
});
result.is_some()
}
fn main() {
check(&(3, 5));
}
|
{Some(mask|bit)}
|
conditional_block
|
mod.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
|
//! Useful synchronization primitives
//!
//! This module contains useful safe and unsafe synchronization primitives.
//! Most of the primitives in this module do not provide any sort of locking
//! and/or blocking at all, but rather provide the necessary tools to build
//! other types of concurrent primitives.
#![stable]
pub use alloc::arc::{Arc, Weak};
pub use core::atomic;
/*
pub use self::mutex::{Mutex, MutexGuard, StaticMutex};
pub use self::mutex::MUTEX_INIT;
pub use self::rwlock::{RwLock, StaticRwLock, RW_LOCK_INIT};
pub use self::rwlock::{RwLockReadGuard, RwLockWriteGuard};
pub use self::condvar::{Condvar, StaticCondvar, CONDVAR_INIT};
pub use self::once::{Once, ONCE_INIT};
pub use self::semaphore::{Semaphore, SemaphoreGuard};
pub use self::barrier::{Barrier, BarrierWaitResult};
pub use self::poison::{PoisonError, TryLockError, TryLockResult, LockResult};
pub use self::future::Future;
pub use self::task_pool::TaskPool;
pub mod mpsc;
mod barrier;
mod condvar;
mod future;
mod mutex;
mod once;
mod poison;
mod rwlock;
mod semaphore;
mod task_pool;
*/
|
random_line_split
|
|
timers.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::callback::ExceptionHandling::Report;
use dom::bindings::cell::DomRefCell;
use dom::bindings::codegen::Bindings::FunctionBinding::Function;
use dom::bindings::reflector::DomObject;
use dom::bindings::str::DOMString;
use dom::document::FakeRequestAnimationFrameCallback;
use dom::eventsource::EventSourceTimeoutCallback;
use dom::globalscope::GlobalScope;
use dom::testbinding::TestBindingCallback;
use dom::xmlhttprequest::XHRTimeoutCallback;
use euclid::Length;
use ipc_channel::ipc::IpcSender;
use js::jsapi::Heap;
use js::jsval::{JSVal, UndefinedValue};
use js::rust::HandleValue;
use script_traits::{MsDuration, precise_time_ms};
use script_traits::{TimerEvent, TimerEventId, TimerEventRequest};
use script_traits::{TimerSchedulerMsg, TimerSource};
use servo_config::prefs::PREFS;
use std::cell::Cell;
use std::cmp::{self, Ord, Ordering};
use std::collections::HashMap;
use std::default::Default;
use std::rc::Rc;
#[derive(Clone, Copy, Debug, Eq, Hash, JSTraceable, MallocSizeOf, Ord, PartialEq, PartialOrd)]
pub struct OneshotTimerHandle(i32);
#[derive(DenyPublicFields, JSTraceable, MallocSizeOf)]
pub struct OneshotTimers {
js_timers: JsTimers,
#[ignore_malloc_size_of = "Defined in std"]
timer_event_chan: IpcSender<TimerEvent>,
#[ignore_malloc_size_of = "Defined in std"]
scheduler_chan: IpcSender<TimerSchedulerMsg>,
next_timer_handle: Cell<OneshotTimerHandle>,
timers: DomRefCell<Vec<OneshotTimer>>,
suspended_since: Cell<Option<MsDuration>>,
/// Initially 0, increased whenever the associated document is reactivated
/// by the amount of ms the document was inactive. The current time can be
/// offset back by this amount for a coherent time across document
/// activations.
suspension_offset: Cell<MsDuration>,
/// Calls to `fire_timer` with a different argument than this get ignored.
/// They were previously scheduled and got invalidated when
/// - timers were suspended,
/// - the timer it was scheduled for got canceled or
/// - a timer was added with an earlier callback time. In this case the
/// original timer is rescheduled when it is the next one to get called.
expected_event_id: Cell<TimerEventId>,
}
#[derive(DenyPublicFields, JSTraceable, MallocSizeOf)]
struct OneshotTimer {
handle: OneshotTimerHandle,
source: TimerSource,
callback: OneshotTimerCallback,
scheduled_for: MsDuration,
}
// This enum is required to work around the fact that trait objects do not support generic methods.
// A replacement trait would have a method such as
// `invoke<T: DomObject>(self: Box<Self>, this: &T, js_timers: &JsTimers);`.
#[derive(JSTraceable, MallocSizeOf)]
pub enum OneshotTimerCallback {
XhrTimeout(XHRTimeoutCallback),
EventSourceTimeout(EventSourceTimeoutCallback),
JsTimer(JsTimerTask),
TestBindingCallback(TestBindingCallback),
FakeRequestAnimationFrame(FakeRequestAnimationFrameCallback),
}
impl OneshotTimerCallback {
fn invoke<T: DomObject>(self, this: &T, js_timers: &JsTimers) {
match self {
OneshotTimerCallback::XhrTimeout(callback) => callback.invoke(),
OneshotTimerCallback::EventSourceTimeout(callback) => callback.invoke(),
OneshotTimerCallback::JsTimer(task) => task.invoke(this, js_timers),
OneshotTimerCallback::TestBindingCallback(callback) => callback.invoke(),
OneshotTimerCallback::FakeRequestAnimationFrame(callback) => callback.invoke(),
}
}
}
impl Ord for OneshotTimer {
fn cmp(&self, other: &OneshotTimer) -> Ordering {
match self.scheduled_for.cmp(&other.scheduled_for).reverse() {
Ordering::Equal => self.handle.cmp(&other.handle).reverse(),
res => res
}
}
}
impl PartialOrd for OneshotTimer {
fn partial_cmp(&self, other: &OneshotTimer) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Eq for OneshotTimer {}
impl PartialEq for OneshotTimer {
fn eq(&self, other: &OneshotTimer) -> bool {
self as *const OneshotTimer == other as *const OneshotTimer
}
}
impl OneshotTimers {
pub fn new(timer_event_chan: IpcSender<TimerEvent>,
scheduler_chan: IpcSender<TimerSchedulerMsg>)
-> OneshotTimers {
OneshotTimers {
js_timers: JsTimers::new(),
timer_event_chan: timer_event_chan,
scheduler_chan: scheduler_chan,
next_timer_handle: Cell::new(OneshotTimerHandle(1)),
timers: DomRefCell::new(Vec::new()),
suspended_since: Cell::new(None),
suspension_offset: Cell::new(Length::new(0)),
expected_event_id: Cell::new(TimerEventId(0)),
}
}
pub fn schedule_callback(&self,
callback: OneshotTimerCallback,
duration: MsDuration,
source: TimerSource)
-> OneshotTimerHandle {
let new_handle = self.next_timer_handle.get();
self.next_timer_handle.set(OneshotTimerHandle(new_handle.0 + 1));
let scheduled_for = self.base_time() + duration;
let timer = OneshotTimer {
handle: new_handle,
source: source,
callback: callback,
scheduled_for: scheduled_for,
};
{
let mut timers = self.timers.borrow_mut();
let insertion_index = timers.binary_search(&timer).err().unwrap();
timers.insert(insertion_index, timer);
}
if self.is_next_timer(new_handle) {
self.schedule_timer_call();
}
new_handle
}
pub fn unschedule_callback(&self, handle: OneshotTimerHandle) {
let was_next = self.is_next_timer(handle);
self.timers.borrow_mut().retain(|t| t.handle!= handle);
if was_next {
self.invalidate_expected_event_id();
self.schedule_timer_call();
}
}
fn is_next_timer(&self, handle: OneshotTimerHandle) -> bool {
match self.timers.borrow().last() {
None => false,
Some(ref max_timer) => max_timer.handle == handle
}
}
pub fn fire_timer(&self, id: TimerEventId, global: &GlobalScope) {
let expected_id = self.expected_event_id.get();
if expected_id!= id {
debug!("ignoring timer fire event {:?} (expected {:?})", id, expected_id);
return;
}
assert!(self.suspended_since.get().is_none());
let base_time = self.base_time();
// Since the event id was the expected one, at least one timer should be due.
if base_time < self.timers.borrow().last().unwrap().scheduled_for {
warn!("Unexpected timing!");
return;
}
// select timers to run to prevent firing timers
// that were installed during fire of another timer
let mut timers_to_run = Vec::new();
loop {
let mut timers = self.timers.borrow_mut();
if timers.is_empty() || timers.last().unwrap().scheduled_for > base_time {
break;
}
timers_to_run.push(timers.pop().unwrap());
}
for timer in timers_to_run {
let callback = timer.callback;
callback.invoke(global, &self.js_timers);
}
self.schedule_timer_call();
}
fn base_time(&self) -> MsDuration {
let offset = self.suspension_offset.get();
match self.suspended_since.get() {
Some(time) => time - offset,
None => precise_time_ms() - offset,
}
}
pub fn slow_down(&self) {
let duration = PREFS.get("js.timers.minimum_duration").as_u64().unwrap_or(1000);
self.js_timers.set_min_duration(MsDuration::new(duration));
}
pub fn speed_up(&self) {
self.js_timers.remove_min_duration();
}
pub fn suspend(&self) {
// Suspend is idempotent: do nothing if the timers are already suspended.
if self.suspended_since.get().is_some() {
return warn!("Suspending an already suspended timer.");
}
debug!("Suspending timers.");
self.suspended_since.set(Some(precise_time_ms()));
self.invalidate_expected_event_id();
}
pub fn resume(&self) {
// Resume is idempotent: do nothing if the timers are already resumed.
let additional_offset = match self.suspended_since.get() {
Some(suspended_since) => precise_time_ms() - suspended_since,
None => return warn!("Resuming an already resumed timer."),
};
debug!("Resuming timers.");
self.suspension_offset.set(self.suspension_offset.get() + additional_offset);
self.suspended_since.set(None);
self.schedule_timer_call();
}
fn schedule_timer_call(&self) {
if self.suspended_since.get().is_some() {
// The timer will be scheduled when the pipeline is fully activated.
return;
}
let timers = self.timers.borrow();
if let Some(timer) = timers.last() {
let expected_event_id = self.invalidate_expected_event_id();
let delay = Length::new(timer.scheduled_for.get().saturating_sub(precise_time_ms().get()));
let request = TimerEventRequest(self.timer_event_chan.clone(), timer.source,
expected_event_id, delay);
self.scheduler_chan.send(TimerSchedulerMsg::Request(request)).unwrap();
}
}
fn invalidate_expected_event_id(&self) -> TimerEventId {
let TimerEventId(currently_expected) = self.expected_event_id.get();
let next_id = TimerEventId(currently_expected + 1);
debug!("invalidating expected timer (was {:?}, now {:?}", currently_expected, next_id);
self.expected_event_id.set(next_id);
next_id
}
pub fn set_timeout_or_interval(&self,
global: &GlobalScope,
callback: TimerCallback,
arguments: Vec<HandleValue>,
timeout: i32,
is_interval: IsInterval,
source: TimerSource)
-> i32 {
self.js_timers.set_timeout_or_interval(global,
callback,
arguments,
timeout,
is_interval,
source)
}
pub fn clear_timeout_or_interval(&self, global: &GlobalScope, handle: i32) {
self.js_timers.clear_timeout_or_interval(global, handle)
}
}
#[derive(Clone, Copy, Eq, Hash, JSTraceable, MallocSizeOf, Ord, PartialEq, PartialOrd)]
pub struct JsTimerHandle(i32);
#[derive(DenyPublicFields, JSTraceable, MallocSizeOf)]
pub struct JsTimers {
next_timer_handle: Cell<JsTimerHandle>,
active_timers: DomRefCell<HashMap<JsTimerHandle, JsTimerEntry>>,
/// The nesting level of the currently executing timer task or 0.
nesting_level: Cell<u32>,
/// Used to introduce a minimum delay in event intervals
min_duration: Cell<Option<MsDuration>>,
}
#[derive(JSTraceable, MallocSizeOf)]
struct JsTimerEntry {
oneshot_handle: OneshotTimerHandle,
}
// Holder for the various JS values associated with setTimeout
// (ie. function value to invoke and all arguments to pass
// to the function when calling it)
// TODO: Handle rooting during invocation when movable GC is turned on
#[derive(JSTraceable, MallocSizeOf)]
pub struct JsTimerTask {
#[ignore_malloc_size_of = "Because it is non-owning"]
handle: JsTimerHandle,
source: TimerSource,
callback: InternalTimerCallback,
is_interval: IsInterval,
nesting_level: u32,
duration: MsDuration,
}
// Enum allowing more descriptive values for the is_interval field
#[derive(Clone, Copy, JSTraceable, MallocSizeOf, PartialEq)]
pub enum IsInterval {
Interval,
NonInterval,
}
#[derive(Clone)]
pub enum TimerCallback {
StringTimerCallback(DOMString),
FunctionTimerCallback(Rc<Function>),
}
#[derive(Clone, JSTraceable, MallocSizeOf)]
enum InternalTimerCallback {
StringTimerCallback(DOMString),
FunctionTimerCallback(
#[ignore_malloc_size_of = "Rc"]
Rc<Function>,
#[ignore_malloc_size_of = "Rc"]
Rc<Box<[Heap<JSVal>]>>),
}
impl JsTimers {
pub fn new() -> JsTimers {
JsTimers {
next_timer_handle: Cell::new(JsTimerHandle(1)),
active_timers: DomRefCell::new(HashMap::new()),
nesting_level: Cell::new(0),
min_duration: Cell::new(None),
}
}
// see https://html.spec.whatwg.org/multipage/#timer-initialisation-steps
pub fn set_timeout_or_interval(&self,
global: &GlobalScope,
callback: TimerCallback,
arguments: Vec<HandleValue>,
timeout: i32,
is_interval: IsInterval,
source: TimerSource)
-> i32 {
let callback = match callback {
TimerCallback::StringTimerCallback(code_str) =>
InternalTimerCallback::StringTimerCallback(code_str),
TimerCallback::FunctionTimerCallback(function) => {
// This is a bit complicated, but this ensures that the vector's
// buffer isn't reallocated (and moved) after setting the Heap values
let mut args = Vec::with_capacity(arguments.len());
for _ in 0..arguments.len() {
args.push(Heap::default());
}
for (i, item) in arguments.iter().enumerate() {
args.get_mut(i).unwrap().set(item.get());
}
InternalTimerCallback::FunctionTimerCallback(function, Rc::new(args.into_boxed_slice()))
}
};
// step 2
let JsTimerHandle(new_handle) = self.next_timer_handle.get();
self.next_timer_handle.set(JsTimerHandle(new_handle + 1));
// step 3 as part of initialize_and_schedule below
// step 4
let mut task = JsTimerTask {
handle: JsTimerHandle(new_handle),
source: source,
callback: callback,
is_interval: is_interval,
nesting_level: 0,
duration: Length::new(0),
};
// step 5
task.duration = Length::new(cmp::max(0, timeout) as u64);
// step 3, 6-9, 11-14
self.initialize_and_schedule(global, task);
// step 10
new_handle
}
pub fn clear_timeout_or_interval(&self, global: &GlobalScope, handle: i32) {
let mut active_timers = self.active_timers.borrow_mut();
if let Some(entry) = active_timers.remove(&JsTimerHandle(handle)) {
global.unschedule_callback(entry.oneshot_handle);
}
}
pub fn set_min_duration(&self, duration: MsDuration) {
self.min_duration.set(Some(duration));
}
pub fn
|
(&self) {
self.min_duration.set(None);
}
// see step 13 of https://html.spec.whatwg.org/multipage/#timer-initialisation-steps
fn user_agent_pad(&self, current_duration: MsDuration) -> MsDuration {
match self.min_duration.get() {
Some(min_duration) => {
cmp::max(min_duration, current_duration)
},
None => current_duration
}
}
// see https://html.spec.whatwg.org/multipage/#timer-initialisation-steps
fn initialize_and_schedule(&self, global: &GlobalScope, mut task: JsTimerTask) {
let handle = task.handle;
let mut active_timers = self.active_timers.borrow_mut();
// step 6
let nesting_level = self.nesting_level.get();
// step 7, 13
let duration = self.user_agent_pad(clamp_duration(nesting_level, task.duration));
// step 8, 9
task.nesting_level = nesting_level + 1;
// essentially step 11, 12, and 14
let callback = OneshotTimerCallback::JsTimer(task);
let oneshot_handle = global.schedule_callback(callback, duration);
// step 3
let entry = active_timers.entry(handle).or_insert(JsTimerEntry {
oneshot_handle: oneshot_handle,
});
entry.oneshot_handle = oneshot_handle;
}
}
// see step 7 of https://html.spec.whatwg.org/multipage/#timer-initialisation-steps
fn clamp_duration(nesting_level: u32, unclamped: MsDuration) -> MsDuration {
let lower_bound = if nesting_level > 5 {
4
} else {
0
};
cmp::max(Length::new(lower_bound), unclamped)
}
impl JsTimerTask {
// see https://html.spec.whatwg.org/multipage/#timer-initialisation-steps
pub fn invoke<T: DomObject>(self, this: &T, timers: &JsTimers) {
// step 4.1 can be ignored, because we proactively prevent execution
// of this task when its scheduled execution is canceled.
// prep for step 6 in nested set_timeout_or_interval calls
timers.nesting_level.set(self.nesting_level);
// step 4.2
match self.callback {
InternalTimerCallback::StringTimerCallback(ref code_str) => {
let global = this.global();
let cx = global.get_cx();
rooted!(in(cx) let mut rval = UndefinedValue());
global.evaluate_js_on_global_with_result(
code_str, rval.handle_mut());
},
InternalTimerCallback::FunctionTimerCallback(ref function, ref arguments) => {
let arguments = self.collect_heap_args(arguments);
let _ = function.Call_(this, arguments, Report);
},
};
// reset nesting level (see above)
timers.nesting_level.set(0);
// step 4.3
// Since we choose proactively prevent execution (see 4.1 above), we must only
// reschedule repeating timers when they were not canceled as part of step 4.2.
if self.is_interval == IsInterval::Interval &&
timers.active_timers.borrow().contains_key(&self.handle) {
timers.initialize_and_schedule(&this.global(), self);
}
}
// Returning Handles directly from Heap values is inherently unsafe, but here it's
// always done via rooted JsTimers, which is safe.
#[allow(unsafe_code)]
fn collect_heap_args<'b>(&self, args: &'b [Heap<JSVal>]) -> Vec<HandleValue<'b>> {
args.iter().map(|arg| unsafe { arg.handle() }).collect()
}
}
|
remove_min_duration
|
identifier_name
|
timers.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::callback::ExceptionHandling::Report;
use dom::bindings::cell::DomRefCell;
use dom::bindings::codegen::Bindings::FunctionBinding::Function;
use dom::bindings::reflector::DomObject;
use dom::bindings::str::DOMString;
use dom::document::FakeRequestAnimationFrameCallback;
use dom::eventsource::EventSourceTimeoutCallback;
use dom::globalscope::GlobalScope;
use dom::testbinding::TestBindingCallback;
use dom::xmlhttprequest::XHRTimeoutCallback;
use euclid::Length;
use ipc_channel::ipc::IpcSender;
use js::jsapi::Heap;
use js::jsval::{JSVal, UndefinedValue};
use js::rust::HandleValue;
use script_traits::{MsDuration, precise_time_ms};
use script_traits::{TimerEvent, TimerEventId, TimerEventRequest};
use script_traits::{TimerSchedulerMsg, TimerSource};
use servo_config::prefs::PREFS;
use std::cell::Cell;
use std::cmp::{self, Ord, Ordering};
use std::collections::HashMap;
use std::default::Default;
use std::rc::Rc;
#[derive(Clone, Copy, Debug, Eq, Hash, JSTraceable, MallocSizeOf, Ord, PartialEq, PartialOrd)]
pub struct OneshotTimerHandle(i32);
#[derive(DenyPublicFields, JSTraceable, MallocSizeOf)]
pub struct OneshotTimers {
js_timers: JsTimers,
#[ignore_malloc_size_of = "Defined in std"]
timer_event_chan: IpcSender<TimerEvent>,
#[ignore_malloc_size_of = "Defined in std"]
scheduler_chan: IpcSender<TimerSchedulerMsg>,
next_timer_handle: Cell<OneshotTimerHandle>,
timers: DomRefCell<Vec<OneshotTimer>>,
suspended_since: Cell<Option<MsDuration>>,
/// Initially 0, increased whenever the associated document is reactivated
/// by the amount of ms the document was inactive. The current time can be
/// offset back by this amount for a coherent time across document
/// activations.
suspension_offset: Cell<MsDuration>,
/// Calls to `fire_timer` with a different argument than this get ignored.
/// They were previously scheduled and got invalidated when
/// - timers were suspended,
/// - the timer it was scheduled for got canceled or
/// - a timer was added with an earlier callback time. In this case the
/// original timer is rescheduled when it is the next one to get called.
expected_event_id: Cell<TimerEventId>,
}
#[derive(DenyPublicFields, JSTraceable, MallocSizeOf)]
struct OneshotTimer {
handle: OneshotTimerHandle,
source: TimerSource,
callback: OneshotTimerCallback,
scheduled_for: MsDuration,
}
// This enum is required to work around the fact that trait objects do not support generic methods.
// A replacement trait would have a method such as
// `invoke<T: DomObject>(self: Box<Self>, this: &T, js_timers: &JsTimers);`.
#[derive(JSTraceable, MallocSizeOf)]
pub enum OneshotTimerCallback {
XhrTimeout(XHRTimeoutCallback),
EventSourceTimeout(EventSourceTimeoutCallback),
JsTimer(JsTimerTask),
TestBindingCallback(TestBindingCallback),
FakeRequestAnimationFrame(FakeRequestAnimationFrameCallback),
}
impl OneshotTimerCallback {
fn invoke<T: DomObject>(self, this: &T, js_timers: &JsTimers) {
match self {
OneshotTimerCallback::XhrTimeout(callback) => callback.invoke(),
OneshotTimerCallback::EventSourceTimeout(callback) => callback.invoke(),
OneshotTimerCallback::JsTimer(task) => task.invoke(this, js_timers),
OneshotTimerCallback::TestBindingCallback(callback) => callback.invoke(),
OneshotTimerCallback::FakeRequestAnimationFrame(callback) => callback.invoke(),
}
}
}
impl Ord for OneshotTimer {
fn cmp(&self, other: &OneshotTimer) -> Ordering {
match self.scheduled_for.cmp(&other.scheduled_for).reverse() {
Ordering::Equal => self.handle.cmp(&other.handle).reverse(),
res => res
}
}
}
impl PartialOrd for OneshotTimer {
fn partial_cmp(&self, other: &OneshotTimer) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Eq for OneshotTimer {}
impl PartialEq for OneshotTimer {
fn eq(&self, other: &OneshotTimer) -> bool {
self as *const OneshotTimer == other as *const OneshotTimer
}
}
impl OneshotTimers {
pub fn new(timer_event_chan: IpcSender<TimerEvent>,
scheduler_chan: IpcSender<TimerSchedulerMsg>)
-> OneshotTimers {
OneshotTimers {
js_timers: JsTimers::new(),
timer_event_chan: timer_event_chan,
scheduler_chan: scheduler_chan,
next_timer_handle: Cell::new(OneshotTimerHandle(1)),
timers: DomRefCell::new(Vec::new()),
suspended_since: Cell::new(None),
suspension_offset: Cell::new(Length::new(0)),
expected_event_id: Cell::new(TimerEventId(0)),
}
}
pub fn schedule_callback(&self,
callback: OneshotTimerCallback,
duration: MsDuration,
source: TimerSource)
-> OneshotTimerHandle {
let new_handle = self.next_timer_handle.get();
self.next_timer_handle.set(OneshotTimerHandle(new_handle.0 + 1));
let scheduled_for = self.base_time() + duration;
let timer = OneshotTimer {
handle: new_handle,
source: source,
callback: callback,
scheduled_for: scheduled_for,
};
{
let mut timers = self.timers.borrow_mut();
let insertion_index = timers.binary_search(&timer).err().unwrap();
timers.insert(insertion_index, timer);
}
if self.is_next_timer(new_handle) {
self.schedule_timer_call();
}
new_handle
}
pub fn unschedule_callback(&self, handle: OneshotTimerHandle) {
let was_next = self.is_next_timer(handle);
self.timers.borrow_mut().retain(|t| t.handle!= handle);
if was_next {
self.invalidate_expected_event_id();
self.schedule_timer_call();
}
}
fn is_next_timer(&self, handle: OneshotTimerHandle) -> bool {
match self.timers.borrow().last() {
None => false,
Some(ref max_timer) => max_timer.handle == handle
}
}
pub fn fire_timer(&self, id: TimerEventId, global: &GlobalScope) {
let expected_id = self.expected_event_id.get();
if expected_id!= id {
debug!("ignoring timer fire event {:?} (expected {:?})", id, expected_id);
return;
}
assert!(self.suspended_since.get().is_none());
let base_time = self.base_time();
// Since the event id was the expected one, at least one timer should be due.
if base_time < self.timers.borrow().last().unwrap().scheduled_for {
warn!("Unexpected timing!");
return;
}
// select timers to run to prevent firing timers
// that were installed during fire of another timer
let mut timers_to_run = Vec::new();
loop {
let mut timers = self.timers.borrow_mut();
if timers.is_empty() || timers.last().unwrap().scheduled_for > base_time {
break;
}
timers_to_run.push(timers.pop().unwrap());
}
for timer in timers_to_run {
let callback = timer.callback;
callback.invoke(global, &self.js_timers);
}
self.schedule_timer_call();
}
fn base_time(&self) -> MsDuration {
let offset = self.suspension_offset.get();
match self.suspended_since.get() {
Some(time) => time - offset,
None => precise_time_ms() - offset,
}
}
pub fn slow_down(&self) {
let duration = PREFS.get("js.timers.minimum_duration").as_u64().unwrap_or(1000);
self.js_timers.set_min_duration(MsDuration::new(duration));
}
pub fn speed_up(&self) {
self.js_timers.remove_min_duration();
}
pub fn suspend(&self) {
// Suspend is idempotent: do nothing if the timers are already suspended.
if self.suspended_since.get().is_some() {
return warn!("Suspending an already suspended timer.");
}
debug!("Suspending timers.");
self.suspended_since.set(Some(precise_time_ms()));
self.invalidate_expected_event_id();
}
pub fn resume(&self) {
// Resume is idempotent: do nothing if the timers are already resumed.
let additional_offset = match self.suspended_since.get() {
Some(suspended_since) => precise_time_ms() - suspended_since,
None => return warn!("Resuming an already resumed timer."),
};
debug!("Resuming timers.");
self.suspension_offset.set(self.suspension_offset.get() + additional_offset);
self.suspended_since.set(None);
self.schedule_timer_call();
}
|
if self.suspended_since.get().is_some() {
// The timer will be scheduled when the pipeline is fully activated.
return;
}
let timers = self.timers.borrow();
if let Some(timer) = timers.last() {
let expected_event_id = self.invalidate_expected_event_id();
let delay = Length::new(timer.scheduled_for.get().saturating_sub(precise_time_ms().get()));
let request = TimerEventRequest(self.timer_event_chan.clone(), timer.source,
expected_event_id, delay);
self.scheduler_chan.send(TimerSchedulerMsg::Request(request)).unwrap();
}
}
fn invalidate_expected_event_id(&self) -> TimerEventId {
let TimerEventId(currently_expected) = self.expected_event_id.get();
let next_id = TimerEventId(currently_expected + 1);
debug!("invalidating expected timer (was {:?}, now {:?}", currently_expected, next_id);
self.expected_event_id.set(next_id);
next_id
}
pub fn set_timeout_or_interval(&self,
global: &GlobalScope,
callback: TimerCallback,
arguments: Vec<HandleValue>,
timeout: i32,
is_interval: IsInterval,
source: TimerSource)
-> i32 {
self.js_timers.set_timeout_or_interval(global,
callback,
arguments,
timeout,
is_interval,
source)
}
pub fn clear_timeout_or_interval(&self, global: &GlobalScope, handle: i32) {
self.js_timers.clear_timeout_or_interval(global, handle)
}
}
#[derive(Clone, Copy, Eq, Hash, JSTraceable, MallocSizeOf, Ord, PartialEq, PartialOrd)]
pub struct JsTimerHandle(i32);
#[derive(DenyPublicFields, JSTraceable, MallocSizeOf)]
pub struct JsTimers {
next_timer_handle: Cell<JsTimerHandle>,
active_timers: DomRefCell<HashMap<JsTimerHandle, JsTimerEntry>>,
/// The nesting level of the currently executing timer task or 0.
nesting_level: Cell<u32>,
/// Used to introduce a minimum delay in event intervals
min_duration: Cell<Option<MsDuration>>,
}
#[derive(JSTraceable, MallocSizeOf)]
struct JsTimerEntry {
oneshot_handle: OneshotTimerHandle,
}
// Holder for the various JS values associated with setTimeout
// (ie. function value to invoke and all arguments to pass
// to the function when calling it)
// TODO: Handle rooting during invocation when movable GC is turned on
#[derive(JSTraceable, MallocSizeOf)]
pub struct JsTimerTask {
#[ignore_malloc_size_of = "Because it is non-owning"]
handle: JsTimerHandle,
source: TimerSource,
callback: InternalTimerCallback,
is_interval: IsInterval,
nesting_level: u32,
duration: MsDuration,
}
// Enum allowing more descriptive values for the is_interval field
#[derive(Clone, Copy, JSTraceable, MallocSizeOf, PartialEq)]
pub enum IsInterval {
Interval,
NonInterval,
}
#[derive(Clone)]
pub enum TimerCallback {
StringTimerCallback(DOMString),
FunctionTimerCallback(Rc<Function>),
}
#[derive(Clone, JSTraceable, MallocSizeOf)]
enum InternalTimerCallback {
StringTimerCallback(DOMString),
FunctionTimerCallback(
#[ignore_malloc_size_of = "Rc"]
Rc<Function>,
#[ignore_malloc_size_of = "Rc"]
Rc<Box<[Heap<JSVal>]>>),
}
impl JsTimers {
pub fn new() -> JsTimers {
JsTimers {
next_timer_handle: Cell::new(JsTimerHandle(1)),
active_timers: DomRefCell::new(HashMap::new()),
nesting_level: Cell::new(0),
min_duration: Cell::new(None),
}
}
// see https://html.spec.whatwg.org/multipage/#timer-initialisation-steps
pub fn set_timeout_or_interval(&self,
global: &GlobalScope,
callback: TimerCallback,
arguments: Vec<HandleValue>,
timeout: i32,
is_interval: IsInterval,
source: TimerSource)
-> i32 {
let callback = match callback {
TimerCallback::StringTimerCallback(code_str) =>
InternalTimerCallback::StringTimerCallback(code_str),
TimerCallback::FunctionTimerCallback(function) => {
// This is a bit complicated, but this ensures that the vector's
// buffer isn't reallocated (and moved) after setting the Heap values
let mut args = Vec::with_capacity(arguments.len());
for _ in 0..arguments.len() {
args.push(Heap::default());
}
for (i, item) in arguments.iter().enumerate() {
args.get_mut(i).unwrap().set(item.get());
}
InternalTimerCallback::FunctionTimerCallback(function, Rc::new(args.into_boxed_slice()))
}
};
// step 2
let JsTimerHandle(new_handle) = self.next_timer_handle.get();
self.next_timer_handle.set(JsTimerHandle(new_handle + 1));
// step 3 as part of initialize_and_schedule below
// step 4
let mut task = JsTimerTask {
handle: JsTimerHandle(new_handle),
source: source,
callback: callback,
is_interval: is_interval,
nesting_level: 0,
duration: Length::new(0),
};
// step 5
task.duration = Length::new(cmp::max(0, timeout) as u64);
// step 3, 6-9, 11-14
self.initialize_and_schedule(global, task);
// step 10
new_handle
}
pub fn clear_timeout_or_interval(&self, global: &GlobalScope, handle: i32) {
let mut active_timers = self.active_timers.borrow_mut();
if let Some(entry) = active_timers.remove(&JsTimerHandle(handle)) {
global.unschedule_callback(entry.oneshot_handle);
}
}
pub fn set_min_duration(&self, duration: MsDuration) {
self.min_duration.set(Some(duration));
}
pub fn remove_min_duration(&self) {
self.min_duration.set(None);
}
// see step 13 of https://html.spec.whatwg.org/multipage/#timer-initialisation-steps
fn user_agent_pad(&self, current_duration: MsDuration) -> MsDuration {
match self.min_duration.get() {
Some(min_duration) => {
cmp::max(min_duration, current_duration)
},
None => current_duration
}
}
// see https://html.spec.whatwg.org/multipage/#timer-initialisation-steps
fn initialize_and_schedule(&self, global: &GlobalScope, mut task: JsTimerTask) {
let handle = task.handle;
let mut active_timers = self.active_timers.borrow_mut();
// step 6
let nesting_level = self.nesting_level.get();
// step 7, 13
let duration = self.user_agent_pad(clamp_duration(nesting_level, task.duration));
// step 8, 9
task.nesting_level = nesting_level + 1;
// essentially step 11, 12, and 14
let callback = OneshotTimerCallback::JsTimer(task);
let oneshot_handle = global.schedule_callback(callback, duration);
// step 3
let entry = active_timers.entry(handle).or_insert(JsTimerEntry {
oneshot_handle: oneshot_handle,
});
entry.oneshot_handle = oneshot_handle;
}
}
// see step 7 of https://html.spec.whatwg.org/multipage/#timer-initialisation-steps
fn clamp_duration(nesting_level: u32, unclamped: MsDuration) -> MsDuration {
let lower_bound = if nesting_level > 5 {
4
} else {
0
};
cmp::max(Length::new(lower_bound), unclamped)
}
impl JsTimerTask {
// see https://html.spec.whatwg.org/multipage/#timer-initialisation-steps
pub fn invoke<T: DomObject>(self, this: &T, timers: &JsTimers) {
// step 4.1 can be ignored, because we proactively prevent execution
// of this task when its scheduled execution is canceled.
// prep for step 6 in nested set_timeout_or_interval calls
timers.nesting_level.set(self.nesting_level);
// step 4.2
match self.callback {
InternalTimerCallback::StringTimerCallback(ref code_str) => {
let global = this.global();
let cx = global.get_cx();
rooted!(in(cx) let mut rval = UndefinedValue());
global.evaluate_js_on_global_with_result(
code_str, rval.handle_mut());
},
InternalTimerCallback::FunctionTimerCallback(ref function, ref arguments) => {
let arguments = self.collect_heap_args(arguments);
let _ = function.Call_(this, arguments, Report);
},
};
// reset nesting level (see above)
timers.nesting_level.set(0);
// step 4.3
// Since we choose proactively prevent execution (see 4.1 above), we must only
// reschedule repeating timers when they were not canceled as part of step 4.2.
if self.is_interval == IsInterval::Interval &&
timers.active_timers.borrow().contains_key(&self.handle) {
timers.initialize_and_schedule(&this.global(), self);
}
}
// Returning Handles directly from Heap values is inherently unsafe, but here it's
// always done via rooted JsTimers, which is safe.
#[allow(unsafe_code)]
fn collect_heap_args<'b>(&self, args: &'b [Heap<JSVal>]) -> Vec<HandleValue<'b>> {
args.iter().map(|arg| unsafe { arg.handle() }).collect()
}
}
|
fn schedule_timer_call(&self) {
|
random_line_split
|
data_loader.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use hyper::mime::{Attr, Mime, SubLevel, TopLevel, Value};
use mime_classifier::MimeClassifier;
use net_traits::{LoadData, Metadata, NetworkError};
use net_traits::LoadConsumer;
use net_traits::ProgressMsg::{Done, Payload};
use resource_thread::{CancellationListener, send_error, start_sending_sniffed_opt};
use rustc_serialize::base64::FromBase64;
use std::sync::Arc;
use url::{Position, Url};
use url::percent_encoding::percent_decode;
pub fn factory(load_data: LoadData,
senders: LoadConsumer,
classifier: Arc<MimeClassifier>,
cancel_listener: CancellationListener) {
// NB: we don't spawn a new thread.
// Hypothesis: data URLs are too small for parallel base64 etc. to be worth it.
// Should be tested at some point.
// Left in separate function to allow easy moving to a thread, if desired.
load(load_data, senders, classifier, cancel_listener)
}
pub enum DecodeError {
InvalidDataUri,
NonBase64DataUri,
}
pub type DecodeData = (Mime, Vec<u8>);
pub fn decode(url: &Url) -> Result<DecodeData, DecodeError> {
assert!(url.scheme() == "data");
// Split out content type and data.
let parts: Vec<&str> = url[Position::BeforePath..Position::AfterQuery].splitn(2, ',').collect();
if parts.len()!= 2 {
return Err(DecodeError::InvalidDataUri);
}
// ";base64" must come at the end of the content type, per RFC 2397.
// rust-http will fail to parse it because there's no =value part.
let mut ct_str = parts[0];
let is_base64 = ct_str.ends_with(";base64");
if is_base64 {
ct_str = &ct_str[..ct_str.len() - ";base64".len()];
}
let ct_str = if ct_str.starts_with(";charset=") {
format!("text/plain{}", ct_str)
} else {
ct_str.to_owned()
};
let content_type = ct_str.parse().unwrap_or_else(|_| {
Mime(TopLevel::Text, SubLevel::Plain,
vec![(Attr::Charset, Value::Ext("US-ASCII".to_owned()))])
});
let mut bytes = percent_decode(parts[1].as_bytes()).collect::<Vec<_>>();
if is_base64 {
// FIXME(#2909): It’s unclear what to do with non-alphabet characters,
// but Acid 3 apparently depends on spaces being ignored.
bytes = bytes.into_iter().filter(|&b| b!='' as u8).collect::<Vec<u8>>();
match bytes.from_base64() {
Err(..) => return Err(DecodeError::NonBase64DataUri),
Ok(data) => bytes = data,
}
}
Ok((content_type, bytes))
}
pub fn load(load_data: LoadData,
start_chan: LoadConsumer,
classifier: Arc<MimeClassifier>,
cancel_listener: CancellationListener) {
let url = load_data.url;
if cancel_listener.is_cancelled() {
return;
}
match decode(&url) {
Ok((content_type, bytes)) => {
let mut metadata = Metadata::default(url);
metadata.set_content_type(Some(content_type).as_ref());
if let Ok(chan) = start_sending_sniffed_opt(start_chan,
metadata,
classifier,
&bytes,
load_data.context) {
|
},
Err(DecodeError::InvalidDataUri) =>
send_error(url, NetworkError::Internal("invalid data uri".to_owned()), start_chan),
Err(DecodeError::NonBase64DataUri) =>
send_error(url, NetworkError::Internal("non-base64 data uri".to_owned()), start_chan),
}
}
|
let _ = chan.send(Payload(bytes));
let _ = chan.send(Done(Ok(())));
}
|
conditional_block
|
data_loader.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use hyper::mime::{Attr, Mime, SubLevel, TopLevel, Value};
use mime_classifier::MimeClassifier;
use net_traits::{LoadData, Metadata, NetworkError};
use net_traits::LoadConsumer;
use net_traits::ProgressMsg::{Done, Payload};
use resource_thread::{CancellationListener, send_error, start_sending_sniffed_opt};
use rustc_serialize::base64::FromBase64;
use std::sync::Arc;
use url::{Position, Url};
use url::percent_encoding::percent_decode;
pub fn factory(load_data: LoadData,
senders: LoadConsumer,
classifier: Arc<MimeClassifier>,
cancel_listener: CancellationListener) {
// NB: we don't spawn a new thread.
// Hypothesis: data URLs are too small for parallel base64 etc. to be worth it.
// Should be tested at some point.
// Left in separate function to allow easy moving to a thread, if desired.
load(load_data, senders, classifier, cancel_listener)
}
pub enum DecodeError {
InvalidDataUri,
NonBase64DataUri,
}
|
pub fn decode(url: &Url) -> Result<DecodeData, DecodeError> {
assert!(url.scheme() == "data");
// Split out content type and data.
let parts: Vec<&str> = url[Position::BeforePath..Position::AfterQuery].splitn(2, ',').collect();
if parts.len()!= 2 {
return Err(DecodeError::InvalidDataUri);
}
// ";base64" must come at the end of the content type, per RFC 2397.
// rust-http will fail to parse it because there's no =value part.
let mut ct_str = parts[0];
let is_base64 = ct_str.ends_with(";base64");
if is_base64 {
ct_str = &ct_str[..ct_str.len() - ";base64".len()];
}
let ct_str = if ct_str.starts_with(";charset=") {
format!("text/plain{}", ct_str)
} else {
ct_str.to_owned()
};
let content_type = ct_str.parse().unwrap_or_else(|_| {
Mime(TopLevel::Text, SubLevel::Plain,
vec![(Attr::Charset, Value::Ext("US-ASCII".to_owned()))])
});
let mut bytes = percent_decode(parts[1].as_bytes()).collect::<Vec<_>>();
if is_base64 {
// FIXME(#2909): It’s unclear what to do with non-alphabet characters,
// but Acid 3 apparently depends on spaces being ignored.
bytes = bytes.into_iter().filter(|&b| b!='' as u8).collect::<Vec<u8>>();
match bytes.from_base64() {
Err(..) => return Err(DecodeError::NonBase64DataUri),
Ok(data) => bytes = data,
}
}
Ok((content_type, bytes))
}
pub fn load(load_data: LoadData,
start_chan: LoadConsumer,
classifier: Arc<MimeClassifier>,
cancel_listener: CancellationListener) {
let url = load_data.url;
if cancel_listener.is_cancelled() {
return;
}
match decode(&url) {
Ok((content_type, bytes)) => {
let mut metadata = Metadata::default(url);
metadata.set_content_type(Some(content_type).as_ref());
if let Ok(chan) = start_sending_sniffed_opt(start_chan,
metadata,
classifier,
&bytes,
load_data.context) {
let _ = chan.send(Payload(bytes));
let _ = chan.send(Done(Ok(())));
}
},
Err(DecodeError::InvalidDataUri) =>
send_error(url, NetworkError::Internal("invalid data uri".to_owned()), start_chan),
Err(DecodeError::NonBase64DataUri) =>
send_error(url, NetworkError::Internal("non-base64 data uri".to_owned()), start_chan),
}
}
|
pub type DecodeData = (Mime, Vec<u8>);
|
random_line_split
|
data_loader.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use hyper::mime::{Attr, Mime, SubLevel, TopLevel, Value};
use mime_classifier::MimeClassifier;
use net_traits::{LoadData, Metadata, NetworkError};
use net_traits::LoadConsumer;
use net_traits::ProgressMsg::{Done, Payload};
use resource_thread::{CancellationListener, send_error, start_sending_sniffed_opt};
use rustc_serialize::base64::FromBase64;
use std::sync::Arc;
use url::{Position, Url};
use url::percent_encoding::percent_decode;
pub fn factory(load_data: LoadData,
senders: LoadConsumer,
classifier: Arc<MimeClassifier>,
cancel_listener: CancellationListener) {
// NB: we don't spawn a new thread.
// Hypothesis: data URLs are too small for parallel base64 etc. to be worth it.
// Should be tested at some point.
// Left in separate function to allow easy moving to a thread, if desired.
load(load_data, senders, classifier, cancel_listener)
}
pub enum
|
{
InvalidDataUri,
NonBase64DataUri,
}
pub type DecodeData = (Mime, Vec<u8>);
pub fn decode(url: &Url) -> Result<DecodeData, DecodeError> {
assert!(url.scheme() == "data");
// Split out content type and data.
let parts: Vec<&str> = url[Position::BeforePath..Position::AfterQuery].splitn(2, ',').collect();
if parts.len()!= 2 {
return Err(DecodeError::InvalidDataUri);
}
// ";base64" must come at the end of the content type, per RFC 2397.
// rust-http will fail to parse it because there's no =value part.
let mut ct_str = parts[0];
let is_base64 = ct_str.ends_with(";base64");
if is_base64 {
ct_str = &ct_str[..ct_str.len() - ";base64".len()];
}
let ct_str = if ct_str.starts_with(";charset=") {
format!("text/plain{}", ct_str)
} else {
ct_str.to_owned()
};
let content_type = ct_str.parse().unwrap_or_else(|_| {
Mime(TopLevel::Text, SubLevel::Plain,
vec![(Attr::Charset, Value::Ext("US-ASCII".to_owned()))])
});
let mut bytes = percent_decode(parts[1].as_bytes()).collect::<Vec<_>>();
if is_base64 {
// FIXME(#2909): It’s unclear what to do with non-alphabet characters,
// but Acid 3 apparently depends on spaces being ignored.
bytes = bytes.into_iter().filter(|&b| b!='' as u8).collect::<Vec<u8>>();
match bytes.from_base64() {
Err(..) => return Err(DecodeError::NonBase64DataUri),
Ok(data) => bytes = data,
}
}
Ok((content_type, bytes))
}
pub fn load(load_data: LoadData,
start_chan: LoadConsumer,
classifier: Arc<MimeClassifier>,
cancel_listener: CancellationListener) {
let url = load_data.url;
if cancel_listener.is_cancelled() {
return;
}
match decode(&url) {
Ok((content_type, bytes)) => {
let mut metadata = Metadata::default(url);
metadata.set_content_type(Some(content_type).as_ref());
if let Ok(chan) = start_sending_sniffed_opt(start_chan,
metadata,
classifier,
&bytes,
load_data.context) {
let _ = chan.send(Payload(bytes));
let _ = chan.send(Done(Ok(())));
}
},
Err(DecodeError::InvalidDataUri) =>
send_error(url, NetworkError::Internal("invalid data uri".to_owned()), start_chan),
Err(DecodeError::NonBase64DataUri) =>
send_error(url, NetworkError::Internal("non-base64 data uri".to_owned()), start_chan),
}
}
|
DecodeError
|
identifier_name
|
data_loader.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use hyper::mime::{Attr, Mime, SubLevel, TopLevel, Value};
use mime_classifier::MimeClassifier;
use net_traits::{LoadData, Metadata, NetworkError};
use net_traits::LoadConsumer;
use net_traits::ProgressMsg::{Done, Payload};
use resource_thread::{CancellationListener, send_error, start_sending_sniffed_opt};
use rustc_serialize::base64::FromBase64;
use std::sync::Arc;
use url::{Position, Url};
use url::percent_encoding::percent_decode;
pub fn factory(load_data: LoadData,
senders: LoadConsumer,
classifier: Arc<MimeClassifier>,
cancel_listener: CancellationListener) {
// NB: we don't spawn a new thread.
// Hypothesis: data URLs are too small for parallel base64 etc. to be worth it.
// Should be tested at some point.
// Left in separate function to allow easy moving to a thread, if desired.
load(load_data, senders, classifier, cancel_listener)
}
pub enum DecodeError {
InvalidDataUri,
NonBase64DataUri,
}
pub type DecodeData = (Mime, Vec<u8>);
pub fn decode(url: &Url) -> Result<DecodeData, DecodeError> {
assert!(url.scheme() == "data");
// Split out content type and data.
let parts: Vec<&str> = url[Position::BeforePath..Position::AfterQuery].splitn(2, ',').collect();
if parts.len()!= 2 {
return Err(DecodeError::InvalidDataUri);
}
// ";base64" must come at the end of the content type, per RFC 2397.
// rust-http will fail to parse it because there's no =value part.
let mut ct_str = parts[0];
let is_base64 = ct_str.ends_with(";base64");
if is_base64 {
ct_str = &ct_str[..ct_str.len() - ";base64".len()];
}
let ct_str = if ct_str.starts_with(";charset=") {
format!("text/plain{}", ct_str)
} else {
ct_str.to_owned()
};
let content_type = ct_str.parse().unwrap_or_else(|_| {
Mime(TopLevel::Text, SubLevel::Plain,
vec![(Attr::Charset, Value::Ext("US-ASCII".to_owned()))])
});
let mut bytes = percent_decode(parts[1].as_bytes()).collect::<Vec<_>>();
if is_base64 {
// FIXME(#2909): It’s unclear what to do with non-alphabet characters,
// but Acid 3 apparently depends on spaces being ignored.
bytes = bytes.into_iter().filter(|&b| b!='' as u8).collect::<Vec<u8>>();
match bytes.from_base64() {
Err(..) => return Err(DecodeError::NonBase64DataUri),
Ok(data) => bytes = data,
}
}
Ok((content_type, bytes))
}
pub fn load(load_data: LoadData,
start_chan: LoadConsumer,
classifier: Arc<MimeClassifier>,
cancel_listener: CancellationListener) {
|
send_error(url, NetworkError::Internal("invalid data uri".to_owned()), start_chan),
Err(DecodeError::NonBase64DataUri) =>
send_error(url, NetworkError::Internal("non-base64 data uri".to_owned()), start_chan),
}
}
|
let url = load_data.url;
if cancel_listener.is_cancelled() {
return;
}
match decode(&url) {
Ok((content_type, bytes)) => {
let mut metadata = Metadata::default(url);
metadata.set_content_type(Some(content_type).as_ref());
if let Ok(chan) = start_sending_sniffed_opt(start_chan,
metadata,
classifier,
&bytes,
load_data.context) {
let _ = chan.send(Payload(bytes));
let _ = chan.send(Done(Ok(())));
}
},
Err(DecodeError::InvalidDataUri) =>
|
identifier_body
|
mod.rs
|
//! **Canonicalization** is the key to constructing a query in the
//! middle of type inference. Ordinarily, it is not possible to store
//! types from type inference in query keys, because they contain
//! references to inference variables whose lifetimes are too short
//! and so forth. Canonicalizing a value T1 using `canonicalize_query`
//! produces two things:
//!
//! - a value T2 where each unbound inference variable has been
//! replaced with a **canonical variable**;
//! - a map M (of type `CanonicalVarValues`) from those canonical
//! variables back to the original.
//!
//! We can then do queries using T2. These will give back constraints
//! on the canonical variables which can be translated, using the map
//! M, into constraints in our source context. This process of
//! translating the results back is done by the
//! `instantiate_query_result` method.
//!
//! For a more detailed look at what is happening here, check
//! out the [chapter in the rustc dev guide][c].
//!
//! [c]: https://rust-lang.github.io/chalk/book/canonical_queries/canonicalization.html
use crate::infer::{ConstVariableOrigin, ConstVariableOriginKind};
use crate::infer::{InferCtxt, RegionVariableOrigin, TypeVariableOrigin, TypeVariableOriginKind};
use rustc_index::vec::IndexVec;
use rustc_middle::ty::fold::TypeFoldable;
use rustc_middle::ty::subst::GenericArg;
use rustc_middle::ty::{self, BoundVar, List};
use rustc_span::source_map::Span;
pub use rustc_middle::infer::canonical::*;
use substitute::CanonicalExt;
mod canonicalizer;
pub mod query_response;
mod substitute;
impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> {
/// Creates a substitution S for the canonical value with fresh
/// inference variables and applies it to the canonical value.
/// Returns both the instantiated result *and* the substitution S.
///
/// This is only meant to be invoked as part of constructing an
/// inference context at the start of a query (see
/// `InferCtxtBuilder::enter_with_canonical`). It basically
/// brings the canonical value "into scope" within your new infcx.
///
/// At the end of processing, the substitution S (once
/// canonicalized) then represents the values that you computed
/// for each of the canonical inputs to your query.
pub fn
|
<T>(
&self,
span: Span,
canonical: &Canonical<'tcx, T>,
) -> (T, CanonicalVarValues<'tcx>)
where
T: TypeFoldable<'tcx>,
{
// For each universe that is referred to in the incoming
// query, create a universe in our local inference context. In
// practice, as of this writing, all queries have no universes
// in them, so this code has no effect, but it is looking
// forward to the day when we *do* want to carry universes
// through into queries.
let universes: IndexVec<ty::UniverseIndex, _> = std::iter::once(ty::UniverseIndex::ROOT)
.chain((0..canonical.max_universe.as_u32()).map(|_| self.create_next_universe()))
.collect();
let canonical_inference_vars =
self.instantiate_canonical_vars(span, canonical.variables, |ui| universes[ui]);
let result = canonical.substitute(self.tcx, &canonical_inference_vars);
(result, canonical_inference_vars)
}
/// Given the "infos" about the canonical variables from some
/// canonical, creates fresh variables with the same
/// characteristics (see `instantiate_canonical_var` for
/// details). You can then use `substitute` to instantiate the
/// canonical variable with these inference variables.
fn instantiate_canonical_vars(
&self,
span: Span,
variables: &List<CanonicalVarInfo<'tcx>>,
universe_map: impl Fn(ty::UniverseIndex) -> ty::UniverseIndex,
) -> CanonicalVarValues<'tcx> {
let var_values: IndexVec<BoundVar, GenericArg<'tcx>> = variables
.iter()
.map(|info| self.instantiate_canonical_var(span, info, &universe_map))
.collect();
CanonicalVarValues { var_values }
}
/// Given the "info" about a canonical variable, creates a fresh
/// variable for it. If this is an existentially quantified
/// variable, then you'll get a new inference variable; if it is a
/// universally quantified variable, you get a placeholder.
fn instantiate_canonical_var(
&self,
span: Span,
cv_info: CanonicalVarInfo<'tcx>,
universe_map: impl Fn(ty::UniverseIndex) -> ty::UniverseIndex,
) -> GenericArg<'tcx> {
match cv_info.kind {
CanonicalVarKind::Ty(ty_kind) => {
let ty = match ty_kind {
CanonicalTyVarKind::General(ui) => self.next_ty_var_in_universe(
TypeVariableOrigin { kind: TypeVariableOriginKind::MiscVariable, span },
universe_map(ui),
),
CanonicalTyVarKind::Int => self.next_int_var(),
CanonicalTyVarKind::Float => self.next_float_var(),
};
ty.into()
}
CanonicalVarKind::PlaceholderTy(ty::PlaceholderType { universe, name }) => {
let universe_mapped = universe_map(universe);
let placeholder_mapped = ty::PlaceholderType { universe: universe_mapped, name };
self.tcx.mk_ty(ty::Placeholder(placeholder_mapped)).into()
}
CanonicalVarKind::Region(ui) => self
.next_region_var_in_universe(
RegionVariableOrigin::MiscVariable(span),
universe_map(ui),
)
.into(),
CanonicalVarKind::PlaceholderRegion(ty::PlaceholderRegion { universe, name }) => {
let universe_mapped = universe_map(universe);
let placeholder_mapped = ty::PlaceholderRegion { universe: universe_mapped, name };
self.tcx.mk_region(ty::RePlaceholder(placeholder_mapped)).into()
}
CanonicalVarKind::Const(ui) => self
.next_const_var_in_universe(
self.next_ty_var_in_universe(
TypeVariableOrigin { kind: TypeVariableOriginKind::MiscVariable, span },
universe_map(ui),
),
ConstVariableOrigin { kind: ConstVariableOriginKind::MiscVariable, span },
universe_map(ui),
)
.into(),
CanonicalVarKind::PlaceholderConst(ty::PlaceholderConst { universe, name }) => {
let universe_mapped = universe_map(universe);
let placeholder_mapped = ty::PlaceholderConst { universe: universe_mapped, name };
self.tcx
.mk_const(ty::Const {
val: ty::ConstKind::Placeholder(placeholder_mapped),
ty: name.ty,
})
.into()
}
}
}
}
|
instantiate_canonical_with_fresh_inference_vars
|
identifier_name
|
mod.rs
|
//! **Canonicalization** is the key to constructing a query in the
//! middle of type inference. Ordinarily, it is not possible to store
//! types from type inference in query keys, because they contain
//! references to inference variables whose lifetimes are too short
//! and so forth. Canonicalizing a value T1 using `canonicalize_query`
//! produces two things:
//!
//! - a value T2 where each unbound inference variable has been
//! replaced with a **canonical variable**;
//! - a map M (of type `CanonicalVarValues`) from those canonical
//! variables back to the original.
//!
//! We can then do queries using T2. These will give back constraints
//! on the canonical variables which can be translated, using the map
//! M, into constraints in our source context. This process of
//! translating the results back is done by the
//! `instantiate_query_result` method.
//!
//! For a more detailed look at what is happening here, check
//! out the [chapter in the rustc dev guide][c].
//!
//! [c]: https://rust-lang.github.io/chalk/book/canonical_queries/canonicalization.html
use crate::infer::{ConstVariableOrigin, ConstVariableOriginKind};
use crate::infer::{InferCtxt, RegionVariableOrigin, TypeVariableOrigin, TypeVariableOriginKind};
use rustc_index::vec::IndexVec;
use rustc_middle::ty::fold::TypeFoldable;
use rustc_middle::ty::subst::GenericArg;
use rustc_middle::ty::{self, BoundVar, List};
use rustc_span::source_map::Span;
pub use rustc_middle::infer::canonical::*;
use substitute::CanonicalExt;
mod canonicalizer;
pub mod query_response;
mod substitute;
impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> {
/// Creates a substitution S for the canonical value with fresh
/// inference variables and applies it to the canonical value.
/// Returns both the instantiated result *and* the substitution S.
///
/// This is only meant to be invoked as part of constructing an
/// inference context at the start of a query (see
/// `InferCtxtBuilder::enter_with_canonical`). It basically
/// brings the canonical value "into scope" within your new infcx.
///
/// At the end of processing, the substitution S (once
/// canonicalized) then represents the values that you computed
/// for each of the canonical inputs to your query.
pub fn instantiate_canonical_with_fresh_inference_vars<T>(
&self,
span: Span,
canonical: &Canonical<'tcx, T>,
) -> (T, CanonicalVarValues<'tcx>)
where
T: TypeFoldable<'tcx>,
|
/// Given the "infos" about the canonical variables from some
/// canonical, creates fresh variables with the same
/// characteristics (see `instantiate_canonical_var` for
/// details). You can then use `substitute` to instantiate the
/// canonical variable with these inference variables.
fn instantiate_canonical_vars(
&self,
span: Span,
variables: &List<CanonicalVarInfo<'tcx>>,
universe_map: impl Fn(ty::UniverseIndex) -> ty::UniverseIndex,
) -> CanonicalVarValues<'tcx> {
let var_values: IndexVec<BoundVar, GenericArg<'tcx>> = variables
.iter()
.map(|info| self.instantiate_canonical_var(span, info, &universe_map))
.collect();
CanonicalVarValues { var_values }
}
/// Given the "info" about a canonical variable, creates a fresh
/// variable for it. If this is an existentially quantified
/// variable, then you'll get a new inference variable; if it is a
/// universally quantified variable, you get a placeholder.
fn instantiate_canonical_var(
&self,
span: Span,
cv_info: CanonicalVarInfo<'tcx>,
universe_map: impl Fn(ty::UniverseIndex) -> ty::UniverseIndex,
) -> GenericArg<'tcx> {
match cv_info.kind {
CanonicalVarKind::Ty(ty_kind) => {
let ty = match ty_kind {
CanonicalTyVarKind::General(ui) => self.next_ty_var_in_universe(
TypeVariableOrigin { kind: TypeVariableOriginKind::MiscVariable, span },
universe_map(ui),
),
CanonicalTyVarKind::Int => self.next_int_var(),
CanonicalTyVarKind::Float => self.next_float_var(),
};
ty.into()
}
CanonicalVarKind::PlaceholderTy(ty::PlaceholderType { universe, name }) => {
let universe_mapped = universe_map(universe);
let placeholder_mapped = ty::PlaceholderType { universe: universe_mapped, name };
self.tcx.mk_ty(ty::Placeholder(placeholder_mapped)).into()
}
CanonicalVarKind::Region(ui) => self
.next_region_var_in_universe(
RegionVariableOrigin::MiscVariable(span),
universe_map(ui),
)
.into(),
CanonicalVarKind::PlaceholderRegion(ty::PlaceholderRegion { universe, name }) => {
let universe_mapped = universe_map(universe);
let placeholder_mapped = ty::PlaceholderRegion { universe: universe_mapped, name };
self.tcx.mk_region(ty::RePlaceholder(placeholder_mapped)).into()
}
CanonicalVarKind::Const(ui) => self
.next_const_var_in_universe(
self.next_ty_var_in_universe(
TypeVariableOrigin { kind: TypeVariableOriginKind::MiscVariable, span },
universe_map(ui),
),
ConstVariableOrigin { kind: ConstVariableOriginKind::MiscVariable, span },
universe_map(ui),
)
.into(),
CanonicalVarKind::PlaceholderConst(ty::PlaceholderConst { universe, name }) => {
let universe_mapped = universe_map(universe);
let placeholder_mapped = ty::PlaceholderConst { universe: universe_mapped, name };
self.tcx
.mk_const(ty::Const {
val: ty::ConstKind::Placeholder(placeholder_mapped),
ty: name.ty,
})
.into()
}
}
}
}
|
{
// For each universe that is referred to in the incoming
// query, create a universe in our local inference context. In
// practice, as of this writing, all queries have no universes
// in them, so this code has no effect, but it is looking
// forward to the day when we *do* want to carry universes
// through into queries.
let universes: IndexVec<ty::UniverseIndex, _> = std::iter::once(ty::UniverseIndex::ROOT)
.chain((0..canonical.max_universe.as_u32()).map(|_| self.create_next_universe()))
.collect();
let canonical_inference_vars =
self.instantiate_canonical_vars(span, canonical.variables, |ui| universes[ui]);
let result = canonical.substitute(self.tcx, &canonical_inference_vars);
(result, canonical_inference_vars)
}
|
identifier_body
|
mod.rs
|
//! **Canonicalization** is the key to constructing a query in the
//! middle of type inference. Ordinarily, it is not possible to store
//! types from type inference in query keys, because they contain
//! references to inference variables whose lifetimes are too short
|
//! - a value T2 where each unbound inference variable has been
//! replaced with a **canonical variable**;
//! - a map M (of type `CanonicalVarValues`) from those canonical
//! variables back to the original.
//!
//! We can then do queries using T2. These will give back constraints
//! on the canonical variables which can be translated, using the map
//! M, into constraints in our source context. This process of
//! translating the results back is done by the
//! `instantiate_query_result` method.
//!
//! For a more detailed look at what is happening here, check
//! out the [chapter in the rustc dev guide][c].
//!
//! [c]: https://rust-lang.github.io/chalk/book/canonical_queries/canonicalization.html
use crate::infer::{ConstVariableOrigin, ConstVariableOriginKind};
use crate::infer::{InferCtxt, RegionVariableOrigin, TypeVariableOrigin, TypeVariableOriginKind};
use rustc_index::vec::IndexVec;
use rustc_middle::ty::fold::TypeFoldable;
use rustc_middle::ty::subst::GenericArg;
use rustc_middle::ty::{self, BoundVar, List};
use rustc_span::source_map::Span;
pub use rustc_middle::infer::canonical::*;
use substitute::CanonicalExt;
mod canonicalizer;
pub mod query_response;
mod substitute;
impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> {
/// Creates a substitution S for the canonical value with fresh
/// inference variables and applies it to the canonical value.
/// Returns both the instantiated result *and* the substitution S.
///
/// This is only meant to be invoked as part of constructing an
/// inference context at the start of a query (see
/// `InferCtxtBuilder::enter_with_canonical`). It basically
/// brings the canonical value "into scope" within your new infcx.
///
/// At the end of processing, the substitution S (once
/// canonicalized) then represents the values that you computed
/// for each of the canonical inputs to your query.
pub fn instantiate_canonical_with_fresh_inference_vars<T>(
&self,
span: Span,
canonical: &Canonical<'tcx, T>,
) -> (T, CanonicalVarValues<'tcx>)
where
T: TypeFoldable<'tcx>,
{
// For each universe that is referred to in the incoming
// query, create a universe in our local inference context. In
// practice, as of this writing, all queries have no universes
// in them, so this code has no effect, but it is looking
// forward to the day when we *do* want to carry universes
// through into queries.
let universes: IndexVec<ty::UniverseIndex, _> = std::iter::once(ty::UniverseIndex::ROOT)
.chain((0..canonical.max_universe.as_u32()).map(|_| self.create_next_universe()))
.collect();
let canonical_inference_vars =
self.instantiate_canonical_vars(span, canonical.variables, |ui| universes[ui]);
let result = canonical.substitute(self.tcx, &canonical_inference_vars);
(result, canonical_inference_vars)
}
/// Given the "infos" about the canonical variables from some
/// canonical, creates fresh variables with the same
/// characteristics (see `instantiate_canonical_var` for
/// details). You can then use `substitute` to instantiate the
/// canonical variable with these inference variables.
fn instantiate_canonical_vars(
&self,
span: Span,
variables: &List<CanonicalVarInfo<'tcx>>,
universe_map: impl Fn(ty::UniverseIndex) -> ty::UniverseIndex,
) -> CanonicalVarValues<'tcx> {
let var_values: IndexVec<BoundVar, GenericArg<'tcx>> = variables
.iter()
.map(|info| self.instantiate_canonical_var(span, info, &universe_map))
.collect();
CanonicalVarValues { var_values }
}
/// Given the "info" about a canonical variable, creates a fresh
/// variable for it. If this is an existentially quantified
/// variable, then you'll get a new inference variable; if it is a
/// universally quantified variable, you get a placeholder.
fn instantiate_canonical_var(
&self,
span: Span,
cv_info: CanonicalVarInfo<'tcx>,
universe_map: impl Fn(ty::UniverseIndex) -> ty::UniverseIndex,
) -> GenericArg<'tcx> {
match cv_info.kind {
CanonicalVarKind::Ty(ty_kind) => {
let ty = match ty_kind {
CanonicalTyVarKind::General(ui) => self.next_ty_var_in_universe(
TypeVariableOrigin { kind: TypeVariableOriginKind::MiscVariable, span },
universe_map(ui),
),
CanonicalTyVarKind::Int => self.next_int_var(),
CanonicalTyVarKind::Float => self.next_float_var(),
};
ty.into()
}
CanonicalVarKind::PlaceholderTy(ty::PlaceholderType { universe, name }) => {
let universe_mapped = universe_map(universe);
let placeholder_mapped = ty::PlaceholderType { universe: universe_mapped, name };
self.tcx.mk_ty(ty::Placeholder(placeholder_mapped)).into()
}
CanonicalVarKind::Region(ui) => self
.next_region_var_in_universe(
RegionVariableOrigin::MiscVariable(span),
universe_map(ui),
)
.into(),
CanonicalVarKind::PlaceholderRegion(ty::PlaceholderRegion { universe, name }) => {
let universe_mapped = universe_map(universe);
let placeholder_mapped = ty::PlaceholderRegion { universe: universe_mapped, name };
self.tcx.mk_region(ty::RePlaceholder(placeholder_mapped)).into()
}
CanonicalVarKind::Const(ui) => self
.next_const_var_in_universe(
self.next_ty_var_in_universe(
TypeVariableOrigin { kind: TypeVariableOriginKind::MiscVariable, span },
universe_map(ui),
),
ConstVariableOrigin { kind: ConstVariableOriginKind::MiscVariable, span },
universe_map(ui),
)
.into(),
CanonicalVarKind::PlaceholderConst(ty::PlaceholderConst { universe, name }) => {
let universe_mapped = universe_map(universe);
let placeholder_mapped = ty::PlaceholderConst { universe: universe_mapped, name };
self.tcx
.mk_const(ty::Const {
val: ty::ConstKind::Placeholder(placeholder_mapped),
ty: name.ty,
})
.into()
}
}
}
}
|
//! and so forth. Canonicalizing a value T1 using `canonicalize_query`
//! produces two things:
//!
|
random_line_split
|
mod.rs
|
//! **Canonicalization** is the key to constructing a query in the
//! middle of type inference. Ordinarily, it is not possible to store
//! types from type inference in query keys, because they contain
//! references to inference variables whose lifetimes are too short
//! and so forth. Canonicalizing a value T1 using `canonicalize_query`
//! produces two things:
//!
//! - a value T2 where each unbound inference variable has been
//! replaced with a **canonical variable**;
//! - a map M (of type `CanonicalVarValues`) from those canonical
//! variables back to the original.
//!
//! We can then do queries using T2. These will give back constraints
//! on the canonical variables which can be translated, using the map
//! M, into constraints in our source context. This process of
//! translating the results back is done by the
//! `instantiate_query_result` method.
//!
//! For a more detailed look at what is happening here, check
//! out the [chapter in the rustc dev guide][c].
//!
//! [c]: https://rust-lang.github.io/chalk/book/canonical_queries/canonicalization.html
use crate::infer::{ConstVariableOrigin, ConstVariableOriginKind};
use crate::infer::{InferCtxt, RegionVariableOrigin, TypeVariableOrigin, TypeVariableOriginKind};
use rustc_index::vec::IndexVec;
use rustc_middle::ty::fold::TypeFoldable;
use rustc_middle::ty::subst::GenericArg;
use rustc_middle::ty::{self, BoundVar, List};
use rustc_span::source_map::Span;
pub use rustc_middle::infer::canonical::*;
use substitute::CanonicalExt;
mod canonicalizer;
pub mod query_response;
mod substitute;
impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> {
/// Creates a substitution S for the canonical value with fresh
/// inference variables and applies it to the canonical value.
/// Returns both the instantiated result *and* the substitution S.
///
/// This is only meant to be invoked as part of constructing an
/// inference context at the start of a query (see
/// `InferCtxtBuilder::enter_with_canonical`). It basically
/// brings the canonical value "into scope" within your new infcx.
///
/// At the end of processing, the substitution S (once
/// canonicalized) then represents the values that you computed
/// for each of the canonical inputs to your query.
pub fn instantiate_canonical_with_fresh_inference_vars<T>(
&self,
span: Span,
canonical: &Canonical<'tcx, T>,
) -> (T, CanonicalVarValues<'tcx>)
where
T: TypeFoldable<'tcx>,
{
// For each universe that is referred to in the incoming
// query, create a universe in our local inference context. In
// practice, as of this writing, all queries have no universes
// in them, so this code has no effect, but it is looking
// forward to the day when we *do* want to carry universes
// through into queries.
let universes: IndexVec<ty::UniverseIndex, _> = std::iter::once(ty::UniverseIndex::ROOT)
.chain((0..canonical.max_universe.as_u32()).map(|_| self.create_next_universe()))
.collect();
let canonical_inference_vars =
self.instantiate_canonical_vars(span, canonical.variables, |ui| universes[ui]);
let result = canonical.substitute(self.tcx, &canonical_inference_vars);
(result, canonical_inference_vars)
}
/// Given the "infos" about the canonical variables from some
/// canonical, creates fresh variables with the same
/// characteristics (see `instantiate_canonical_var` for
/// details). You can then use `substitute` to instantiate the
/// canonical variable with these inference variables.
fn instantiate_canonical_vars(
&self,
span: Span,
variables: &List<CanonicalVarInfo<'tcx>>,
universe_map: impl Fn(ty::UniverseIndex) -> ty::UniverseIndex,
) -> CanonicalVarValues<'tcx> {
let var_values: IndexVec<BoundVar, GenericArg<'tcx>> = variables
.iter()
.map(|info| self.instantiate_canonical_var(span, info, &universe_map))
.collect();
CanonicalVarValues { var_values }
}
/// Given the "info" about a canonical variable, creates a fresh
/// variable for it. If this is an existentially quantified
/// variable, then you'll get a new inference variable; if it is a
/// universally quantified variable, you get a placeholder.
fn instantiate_canonical_var(
&self,
span: Span,
cv_info: CanonicalVarInfo<'tcx>,
universe_map: impl Fn(ty::UniverseIndex) -> ty::UniverseIndex,
) -> GenericArg<'tcx> {
match cv_info.kind {
CanonicalVarKind::Ty(ty_kind) => {
let ty = match ty_kind {
CanonicalTyVarKind::General(ui) => self.next_ty_var_in_universe(
TypeVariableOrigin { kind: TypeVariableOriginKind::MiscVariable, span },
universe_map(ui),
),
CanonicalTyVarKind::Int => self.next_int_var(),
CanonicalTyVarKind::Float => self.next_float_var(),
};
ty.into()
}
CanonicalVarKind::PlaceholderTy(ty::PlaceholderType { universe, name }) =>
|
CanonicalVarKind::Region(ui) => self
.next_region_var_in_universe(
RegionVariableOrigin::MiscVariable(span),
universe_map(ui),
)
.into(),
CanonicalVarKind::PlaceholderRegion(ty::PlaceholderRegion { universe, name }) => {
let universe_mapped = universe_map(universe);
let placeholder_mapped = ty::PlaceholderRegion { universe: universe_mapped, name };
self.tcx.mk_region(ty::RePlaceholder(placeholder_mapped)).into()
}
CanonicalVarKind::Const(ui) => self
.next_const_var_in_universe(
self.next_ty_var_in_universe(
TypeVariableOrigin { kind: TypeVariableOriginKind::MiscVariable, span },
universe_map(ui),
),
ConstVariableOrigin { kind: ConstVariableOriginKind::MiscVariable, span },
universe_map(ui),
)
.into(),
CanonicalVarKind::PlaceholderConst(ty::PlaceholderConst { universe, name }) => {
let universe_mapped = universe_map(universe);
let placeholder_mapped = ty::PlaceholderConst { universe: universe_mapped, name };
self.tcx
.mk_const(ty::Const {
val: ty::ConstKind::Placeholder(placeholder_mapped),
ty: name.ty,
})
.into()
}
}
}
}
|
{
let universe_mapped = universe_map(universe);
let placeholder_mapped = ty::PlaceholderType { universe: universe_mapped, name };
self.tcx.mk_ty(ty::Placeholder(placeholder_mapped)).into()
}
|
conditional_block
|
distinct.rs
|
extern crate timely;
use std::fmt::Debug;
use std::hash::Hash;
use timely::communication::Data;
use timely::progress::timestamp::RootTimestamp;
use timely::progress::nested::Summary::Local;
use timely::construction::*;
use timely::construction::operators::*;
fn main() {
timely::execute(std::env::args(), |root| {
let (mut input1, mut input2) = root.subcomputation(|graph| {
// try building some input scopes
let (input1, stream1) = graph.new_input::<u64>();
let (input2, stream2) = graph.new_input::<u64>();
// prepare some feedback edges
let (loop1_source, loop1) = graph.loop_variable(RootTimestamp::new(100), Local(1));
let (loop2_source, loop2) = graph.loop_variable(RootTimestamp::new(100), Local(1));
let concat1 = stream1.concat(&loop1);//graph.concatenate(vec![stream1, loop1]);
let concat2 = stream2.concat(&loop2);//(&mut graph.concatenate(vec![stream2, loop2]);
// build up a subgraph using the concatenated inputs/feedbacks
let (egress1, egress2) = create_subgraph(graph, &concat1, &concat2);
// connect feedback sources. notice that we have swapped indices...
egress1.connect_loop(loop2_source);
egress2.connect_loop(loop1_source);
(input1, input2)
});
root.step();
// move some data into the dataflow graph.
input1.give(0);
input2.give(1);
// see what everyone thinks about that...
|
input2.advance_to(1000000);
input1.close();
input2.close();
// spin
while root.step() { }
});
}
fn create_subgraph<G: GraphBuilder, D>(builder: &G, source1: &Stream<G, D>, source2: &Stream<G, D>) -> (Stream<G, D>, Stream<G, D>)
where D: Data+Hash+Eq+Debug, G::Timestamp: Hash {
builder.clone().subcomputation::<u64,_,_>(|subgraph| {
(subgraph.enter(source1).leave(),
subgraph.enter(source2).leave())
})
}
|
root.step();
input1.advance_to(1000000);
|
random_line_split
|
distinct.rs
|
extern crate timely;
use std::fmt::Debug;
use std::hash::Hash;
use timely::communication::Data;
use timely::progress::timestamp::RootTimestamp;
use timely::progress::nested::Summary::Local;
use timely::construction::*;
use timely::construction::operators::*;
fn main() {
timely::execute(std::env::args(), |root| {
let (mut input1, mut input2) = root.subcomputation(|graph| {
// try building some input scopes
let (input1, stream1) = graph.new_input::<u64>();
let (input2, stream2) = graph.new_input::<u64>();
// prepare some feedback edges
let (loop1_source, loop1) = graph.loop_variable(RootTimestamp::new(100), Local(1));
let (loop2_source, loop2) = graph.loop_variable(RootTimestamp::new(100), Local(1));
let concat1 = stream1.concat(&loop1);//graph.concatenate(vec![stream1, loop1]);
let concat2 = stream2.concat(&loop2);//(&mut graph.concatenate(vec![stream2, loop2]);
// build up a subgraph using the concatenated inputs/feedbacks
let (egress1, egress2) = create_subgraph(graph, &concat1, &concat2);
// connect feedback sources. notice that we have swapped indices...
egress1.connect_loop(loop2_source);
egress2.connect_loop(loop1_source);
(input1, input2)
});
root.step();
// move some data into the dataflow graph.
input1.give(0);
input2.give(1);
// see what everyone thinks about that...
root.step();
input1.advance_to(1000000);
input2.advance_to(1000000);
input1.close();
input2.close();
// spin
while root.step() { }
});
}
fn
|
<G: GraphBuilder, D>(builder: &G, source1: &Stream<G, D>, source2: &Stream<G, D>) -> (Stream<G, D>, Stream<G, D>)
where D: Data+Hash+Eq+Debug, G::Timestamp: Hash {
builder.clone().subcomputation::<u64,_,_>(|subgraph| {
(subgraph.enter(source1).leave(),
subgraph.enter(source2).leave())
})
}
|
create_subgraph
|
identifier_name
|
distinct.rs
|
extern crate timely;
use std::fmt::Debug;
use std::hash::Hash;
use timely::communication::Data;
use timely::progress::timestamp::RootTimestamp;
use timely::progress::nested::Summary::Local;
use timely::construction::*;
use timely::construction::operators::*;
fn main() {
timely::execute(std::env::args(), |root| {
let (mut input1, mut input2) = root.subcomputation(|graph| {
// try building some input scopes
let (input1, stream1) = graph.new_input::<u64>();
let (input2, stream2) = graph.new_input::<u64>();
// prepare some feedback edges
let (loop1_source, loop1) = graph.loop_variable(RootTimestamp::new(100), Local(1));
let (loop2_source, loop2) = graph.loop_variable(RootTimestamp::new(100), Local(1));
let concat1 = stream1.concat(&loop1);//graph.concatenate(vec![stream1, loop1]);
let concat2 = stream2.concat(&loop2);//(&mut graph.concatenate(vec![stream2, loop2]);
// build up a subgraph using the concatenated inputs/feedbacks
let (egress1, egress2) = create_subgraph(graph, &concat1, &concat2);
// connect feedback sources. notice that we have swapped indices...
egress1.connect_loop(loop2_source);
egress2.connect_loop(loop1_source);
(input1, input2)
});
root.step();
// move some data into the dataflow graph.
input1.give(0);
input2.give(1);
// see what everyone thinks about that...
root.step();
input1.advance_to(1000000);
input2.advance_to(1000000);
input1.close();
input2.close();
// spin
while root.step() { }
});
}
fn create_subgraph<G: GraphBuilder, D>(builder: &G, source1: &Stream<G, D>, source2: &Stream<G, D>) -> (Stream<G, D>, Stream<G, D>)
where D: Data+Hash+Eq+Debug, G::Timestamp: Hash
|
{
builder.clone().subcomputation::<u64,_,_>(|subgraph| {
(subgraph.enter(source1).leave(),
subgraph.enter(source2).leave())
})
}
|
identifier_body
|
|
lib.rs
|
// This is a part of rust-encoding.
//
// Any copyright is dedicated to the Public Domain.
// https://creativecommons.org/publicdomain/zero/1.0/
//! Single-byte index tables for
//! [rust-encoding](https://github.com/lifthrasiir/rust-encoding).
#![cfg_attr(test, feature(test))]
#[cfg(test)]
#[macro_use]
extern crate encoding_index_tests;
/// ARMSCII-8
pub mod armscii_8;
/// IBM code page 866.
pub mod ibm866;
/// ISO 8859-2.
pub mod iso_8859_2;
/// ISO 8859-3.
pub mod iso_8859_3;
/// ISO 8859-4.
pub mod iso_8859_4;
/// ISO 8859-5.
pub mod iso_8859_5;
/// ISO 8859-6.
pub mod iso_8859_6;
/// ISO 8859-7.
pub mod iso_8859_7;
/// ISO 8859-8 (either visual or logical).
pub mod iso_8859_8;
/// ISO 8859-10.
pub mod iso_8859_10;
/// ISO 8859-13.
pub mod iso_8859_13;
/// ISO 8859-14.
pub mod iso_8859_14;
/// ISO 8859-15.
pub mod iso_8859_15;
/// ISO 8859-16.
pub mod iso_8859_16;
|
pub mod koi8_r;
/// KOI8-U.
pub mod koi8_u;
/// MacRoman.
pub mod macintosh;
/// Windows code page 874.
pub mod windows_874;
/// Windows code page 1250.
pub mod windows_1250;
/// Windows code page 1251.
pub mod windows_1251;
/// Windows code page 1252.
pub mod windows_1252;
/// Windows code page 1253.
pub mod windows_1253;
/// Windows code page 1254.
pub mod windows_1254;
/// Windows code page 1254.
pub mod windows_1255;
/// Windows code page 1256.
pub mod windows_1256;
/// Windows code page 1257.
pub mod windows_1257;
/// Windows code page 1258.
pub mod windows_1258;
/// MacCyrillic.
pub mod x_mac_cyrillic;
|
/// KOI8-R.
|
random_line_split
|
lib.rs
|
pub fn bottle_or_bottles(n: i32) -> &'static str {
match n {
1 => "bottle",
_ => "bottles",
}
}
pub fn sing(n: i32) {
for i in (1..n + 1).rev() {
println!(
"{0} {1} of beer on the wall, {0} {1} of beer.",
i,
bottle_or_bottles(i)
);
println!(
"Take one down and pass it around, {0} {1} of beer on the wall!",
i - 1,
bottle_or_bottles(i - 1)
);
println!();
}
println!("No more bottles of beer on the wall, no more bottles of beer.");
println!(
"Go to the store and buy some more, {0} bottles of beer on the wall.",
|
n
);
}
|
random_line_split
|
|
lib.rs
|
pub fn bottle_or_bottles(n: i32) -> &'static str {
match n {
1 => "bottle",
_ => "bottles",
}
}
pub fn
|
(n: i32) {
for i in (1..n + 1).rev() {
println!(
"{0} {1} of beer on the wall, {0} {1} of beer.",
i,
bottle_or_bottles(i)
);
println!(
"Take one down and pass it around, {0} {1} of beer on the wall!",
i - 1,
bottle_or_bottles(i - 1)
);
println!();
}
println!("No more bottles of beer on the wall, no more bottles of beer.");
println!(
"Go to the store and buy some more, {0} bottles of beer on the wall.",
n
);
}
|
sing
|
identifier_name
|
lib.rs
|
pub fn bottle_or_bottles(n: i32) -> &'static str {
match n {
1 => "bottle",
_ => "bottles",
}
}
pub fn sing(n: i32)
|
{
for i in (1..n + 1).rev() {
println!(
"{0} {1} of beer on the wall, {0} {1} of beer.",
i,
bottle_or_bottles(i)
);
println!(
"Take one down and pass it around, {0} {1} of beer on the wall!",
i - 1,
bottle_or_bottles(i - 1)
);
println!();
}
println!("No more bottles of beer on the wall, no more bottles of beer.");
println!(
"Go to the store and buy some more, {0} bottles of beer on the wall.",
n
);
}
|
identifier_body
|
|
puback.rs
|
//! PUBACK
use std::io::Read;
use crate::control::variable_header::PacketIdentifier;
use crate::control::{ControlType, FixedHeader, PacketType};
use crate::packet::{DecodablePacket, PacketError};
use crate::Decodable;
/// `PUBACK` packet
#[derive(Debug, Eq, PartialEq, Clone)]
pub struct PubackPacket {
fixed_header: FixedHeader,
packet_identifier: PacketIdentifier,
}
encodable_packet!(PubackPacket(packet_identifier));
impl PubackPacket {
pub fn new(pkid: u16) -> PubackPacket {
PubackPacket {
fixed_header: FixedHeader::new(PacketType::with_default(ControlType::PublishAcknowledgement), 2),
packet_identifier: PacketIdentifier(pkid),
|
}
}
pub fn packet_identifier(&self) -> u16 {
self.packet_identifier.0
}
pub fn set_packet_identifier(&mut self, pkid: u16) {
self.packet_identifier.0 = pkid;
}
}
impl DecodablePacket for PubackPacket {
type DecodePacketError = std::convert::Infallible;
fn decode_packet<R: Read>(reader: &mut R, fixed_header: FixedHeader) -> Result<Self, PacketError<Self>> {
let packet_identifier: PacketIdentifier = PacketIdentifier::decode(reader)?;
Ok(PubackPacket {
fixed_header,
packet_identifier,
})
}
}
|
random_line_split
|
|
puback.rs
|
//! PUBACK
use std::io::Read;
use crate::control::variable_header::PacketIdentifier;
use crate::control::{ControlType, FixedHeader, PacketType};
use crate::packet::{DecodablePacket, PacketError};
use crate::Decodable;
/// `PUBACK` packet
#[derive(Debug, Eq, PartialEq, Clone)]
pub struct PubackPacket {
fixed_header: FixedHeader,
packet_identifier: PacketIdentifier,
}
encodable_packet!(PubackPacket(packet_identifier));
impl PubackPacket {
pub fn new(pkid: u16) -> PubackPacket
|
pub fn packet_identifier(&self) -> u16 {
self.packet_identifier.0
}
pub fn set_packet_identifier(&mut self, pkid: u16) {
self.packet_identifier.0 = pkid;
}
}
impl DecodablePacket for PubackPacket {
type DecodePacketError = std::convert::Infallible;
fn decode_packet<R: Read>(reader: &mut R, fixed_header: FixedHeader) -> Result<Self, PacketError<Self>> {
let packet_identifier: PacketIdentifier = PacketIdentifier::decode(reader)?;
Ok(PubackPacket {
fixed_header,
packet_identifier,
})
}
}
|
{
PubackPacket {
fixed_header: FixedHeader::new(PacketType::with_default(ControlType::PublishAcknowledgement), 2),
packet_identifier: PacketIdentifier(pkid),
}
}
|
identifier_body
|
puback.rs
|
//! PUBACK
use std::io::Read;
use crate::control::variable_header::PacketIdentifier;
use crate::control::{ControlType, FixedHeader, PacketType};
use crate::packet::{DecodablePacket, PacketError};
use crate::Decodable;
/// `PUBACK` packet
#[derive(Debug, Eq, PartialEq, Clone)]
pub struct PubackPacket {
fixed_header: FixedHeader,
packet_identifier: PacketIdentifier,
}
encodable_packet!(PubackPacket(packet_identifier));
impl PubackPacket {
pub fn
|
(pkid: u16) -> PubackPacket {
PubackPacket {
fixed_header: FixedHeader::new(PacketType::with_default(ControlType::PublishAcknowledgement), 2),
packet_identifier: PacketIdentifier(pkid),
}
}
pub fn packet_identifier(&self) -> u16 {
self.packet_identifier.0
}
pub fn set_packet_identifier(&mut self, pkid: u16) {
self.packet_identifier.0 = pkid;
}
}
impl DecodablePacket for PubackPacket {
type DecodePacketError = std::convert::Infallible;
fn decode_packet<R: Read>(reader: &mut R, fixed_header: FixedHeader) -> Result<Self, PacketError<Self>> {
let packet_identifier: PacketIdentifier = PacketIdentifier::decode(reader)?;
Ok(PubackPacket {
fixed_header,
packet_identifier,
})
}
}
|
new
|
identifier_name
|
dedicatedworkerglobalscope.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::DedicatedWorkerGlobalScopeBinding;
use dom::bindings::codegen::Bindings::DedicatedWorkerGlobalScopeBinding::DedicatedWorkerGlobalScopeMethods;
use dom::bindings::codegen::Bindings::EventHandlerBinding::EventHandlerNonNull;
use dom::bindings::codegen::InheritTypes::DedicatedWorkerGlobalScopeDerived;
use dom::bindings::codegen::InheritTypes::{EventTargetCast, WorkerGlobalScopeCast};
use dom::bindings::error::{ErrorResult, DataClone};
use dom::bindings::global;
use dom::bindings::js::{JSRef, Temporary, RootCollection};
use dom::bindings::utils::{Reflectable, Reflector};
use dom::eventtarget::{EventTarget, EventTargetHelpers};
use dom::eventtarget::WorkerGlobalScopeTypeId;
use dom::messageevent::MessageEvent;
use dom::worker::{Worker, TrustedWorkerAddress};
use dom::workerglobalscope::DedicatedGlobalScope;
use dom::workerglobalscope::{WorkerGlobalScope, WorkerGlobalScopeHelpers};
use dom::xmlhttprequest::XMLHttpRequest;
use script_task::{ScriptTask, ScriptChan};
use script_task::{ScriptMsg, FromWorker, DOMMessage, FireTimerMsg, XHRProgressMsg, XHRReleaseMsg, WorkerRelease};
use script_task::WorkerPostMessage;
use script_task::StackRootTLS;
use servo_net::resource_task::{ResourceTask, load_whole_resource};
use servo_util::task::spawn_named_native;
use servo_util::task_state;
use servo_util::task_state::{SCRIPT, IN_WORKER};
use js::glue::JS_STRUCTURED_CLONE_VERSION;
use js::jsapi::{JSContext, JS_ReadStructuredClone, JS_WriteStructuredClone, JS_ClearPendingException};
use js::jsval::{JSVal, UndefinedValue};
use js::rust::Cx;
use std::rc::Rc;
use std::ptr;
use url::Url;
#[dom_struct]
pub struct DedicatedWorkerGlobalScope {
workerglobalscope: WorkerGlobalScope,
receiver: Receiver<ScriptMsg>,
/// Sender to the parent thread.
parent_sender: ScriptChan,
worker: TrustedWorkerAddress,
}
impl DedicatedWorkerGlobalScope {
fn
|
(worker_url: Url,
worker: TrustedWorkerAddress,
cx: Rc<Cx>,
resource_task: ResourceTask,
parent_sender: ScriptChan,
own_sender: ScriptChan,
receiver: Receiver<ScriptMsg>)
-> DedicatedWorkerGlobalScope {
DedicatedWorkerGlobalScope {
workerglobalscope: WorkerGlobalScope::new_inherited(
DedicatedGlobalScope, worker_url, cx, resource_task,
own_sender),
receiver: receiver,
parent_sender: parent_sender,
worker: worker,
}
}
pub fn new(worker_url: Url,
worker: TrustedWorkerAddress,
cx: Rc<Cx>,
resource_task: ResourceTask,
parent_sender: ScriptChan,
own_sender: ScriptChan,
receiver: Receiver<ScriptMsg>)
-> Temporary<DedicatedWorkerGlobalScope> {
let scope = box DedicatedWorkerGlobalScope::new_inherited(
worker_url, worker, cx.clone(), resource_task, parent_sender,
own_sender, receiver);
DedicatedWorkerGlobalScopeBinding::Wrap(cx.ptr, scope)
}
}
impl DedicatedWorkerGlobalScope {
pub fn run_worker_scope(worker_url: Url,
worker: TrustedWorkerAddress,
resource_task: ResourceTask,
parent_sender: ScriptChan,
own_sender: ScriptChan,
receiver: Receiver<ScriptMsg>) {
spawn_named_native(format!("WebWorker for {}", worker_url.serialize()), proc() {
task_state::initialize(SCRIPT | IN_WORKER);
let roots = RootCollection::new();
let _stack_roots_tls = StackRootTLS::new(&roots);
let (url, source) = match load_whole_resource(&resource_task, worker_url.clone()) {
Err(_) => {
println!("error loading script {}", worker_url.serialize());
return;
}
Ok((metadata, bytes)) => {
(metadata.final_url, String::from_utf8(bytes).unwrap())
}
};
let (_js_runtime, js_context) = ScriptTask::new_rt_and_cx();
let global = DedicatedWorkerGlobalScope::new(
worker_url, worker, js_context.clone(), resource_task,
parent_sender, own_sender, receiver).root();
match js_context.evaluate_script(
global.reflector().get_jsobject(), source, url.serialize(), 1) {
Ok(_) => (),
Err(_) => println!("evaluate_script failed")
}
global.delayed_release_worker();
let scope: JSRef<WorkerGlobalScope> =
WorkerGlobalScopeCast::from_ref(*global);
let target: JSRef<EventTarget> =
EventTargetCast::from_ref(*global);
loop {
match global.receiver.recv_opt() {
Ok(DOMMessage(data, nbytes)) => {
let mut message = UndefinedValue();
unsafe {
assert!(JS_ReadStructuredClone(
js_context.ptr, data as *const u64, nbytes,
JS_STRUCTURED_CLONE_VERSION, &mut message,
ptr::null(), ptr::null_mut())!= 0);
}
MessageEvent::dispatch_jsval(target, &global::Worker(scope), message);
global.delayed_release_worker();
},
Ok(XHRProgressMsg(addr, progress)) => {
XMLHttpRequest::handle_progress(addr, progress)
},
Ok(XHRReleaseMsg(addr)) => {
XMLHttpRequest::handle_release(addr)
},
Ok(WorkerPostMessage(addr, data, nbytes)) => {
Worker::handle_message(addr, data, nbytes);
},
Ok(WorkerRelease(addr)) => {
Worker::handle_release(addr)
},
Ok(FireTimerMsg(FromWorker, timer_id)) => {
scope.handle_fire_timer(timer_id, js_context.ptr);
}
Ok(_) => panic!("Unexpected message"),
Err(_) => break,
}
}
});
}
}
impl<'a> DedicatedWorkerGlobalScopeMethods for JSRef<'a, DedicatedWorkerGlobalScope> {
fn PostMessage(self, cx: *mut JSContext, message: JSVal) -> ErrorResult {
let mut data = ptr::null_mut();
let mut nbytes = 0;
let result = unsafe {
JS_WriteStructuredClone(cx, message, &mut data, &mut nbytes,
ptr::null(), ptr::null_mut())
};
if result == 0 {
unsafe { JS_ClearPendingException(cx); }
return Err(DataClone);
}
let ScriptChan(ref sender) = self.parent_sender;
sender.send(WorkerPostMessage(self.worker, data, nbytes));
Ok(())
}
event_handler!(message, GetOnmessage, SetOnmessage)
}
trait PrivateDedicatedWorkerGlobalScopeHelpers {
fn delayed_release_worker(self);
}
impl<'a> PrivateDedicatedWorkerGlobalScopeHelpers for JSRef<'a, DedicatedWorkerGlobalScope> {
fn delayed_release_worker(self) {
let ScriptChan(ref sender) = self.parent_sender;
sender.send(WorkerRelease(self.worker));
}
}
impl Reflectable for DedicatedWorkerGlobalScope {
fn reflector<'a>(&'a self) -> &'a Reflector {
self.workerglobalscope.reflector()
}
}
impl DedicatedWorkerGlobalScopeDerived for EventTarget {
fn is_dedicatedworkerglobalscope(&self) -> bool {
match *self.type_id() {
WorkerGlobalScopeTypeId(DedicatedGlobalScope) => true,
_ => false
}
}
}
|
new_inherited
|
identifier_name
|
dedicatedworkerglobalscope.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::DedicatedWorkerGlobalScopeBinding;
use dom::bindings::codegen::Bindings::DedicatedWorkerGlobalScopeBinding::DedicatedWorkerGlobalScopeMethods;
use dom::bindings::codegen::Bindings::EventHandlerBinding::EventHandlerNonNull;
use dom::bindings::codegen::InheritTypes::DedicatedWorkerGlobalScopeDerived;
use dom::bindings::codegen::InheritTypes::{EventTargetCast, WorkerGlobalScopeCast};
use dom::bindings::error::{ErrorResult, DataClone};
use dom::bindings::global;
use dom::bindings::js::{JSRef, Temporary, RootCollection};
use dom::bindings::utils::{Reflectable, Reflector};
use dom::eventtarget::{EventTarget, EventTargetHelpers};
use dom::eventtarget::WorkerGlobalScopeTypeId;
use dom::messageevent::MessageEvent;
use dom::worker::{Worker, TrustedWorkerAddress};
use dom::workerglobalscope::DedicatedGlobalScope;
use dom::workerglobalscope::{WorkerGlobalScope, WorkerGlobalScopeHelpers};
use dom::xmlhttprequest::XMLHttpRequest;
use script_task::{ScriptTask, ScriptChan};
use script_task::{ScriptMsg, FromWorker, DOMMessage, FireTimerMsg, XHRProgressMsg, XHRReleaseMsg, WorkerRelease};
use script_task::WorkerPostMessage;
use script_task::StackRootTLS;
use servo_net::resource_task::{ResourceTask, load_whole_resource};
use servo_util::task::spawn_named_native;
use servo_util::task_state;
use servo_util::task_state::{SCRIPT, IN_WORKER};
use js::glue::JS_STRUCTURED_CLONE_VERSION;
use js::jsapi::{JSContext, JS_ReadStructuredClone, JS_WriteStructuredClone, JS_ClearPendingException};
use js::jsval::{JSVal, UndefinedValue};
use js::rust::Cx;
use std::rc::Rc;
use std::ptr;
use url::Url;
#[dom_struct]
pub struct DedicatedWorkerGlobalScope {
workerglobalscope: WorkerGlobalScope,
receiver: Receiver<ScriptMsg>,
/// Sender to the parent thread.
parent_sender: ScriptChan,
worker: TrustedWorkerAddress,
}
impl DedicatedWorkerGlobalScope {
fn new_inherited(worker_url: Url,
worker: TrustedWorkerAddress,
cx: Rc<Cx>,
resource_task: ResourceTask,
parent_sender: ScriptChan,
own_sender: ScriptChan,
receiver: Receiver<ScriptMsg>)
-> DedicatedWorkerGlobalScope {
DedicatedWorkerGlobalScope {
workerglobalscope: WorkerGlobalScope::new_inherited(
DedicatedGlobalScope, worker_url, cx, resource_task,
own_sender),
receiver: receiver,
parent_sender: parent_sender,
worker: worker,
}
}
pub fn new(worker_url: Url,
worker: TrustedWorkerAddress,
cx: Rc<Cx>,
resource_task: ResourceTask,
parent_sender: ScriptChan,
own_sender: ScriptChan,
receiver: Receiver<ScriptMsg>)
-> Temporary<DedicatedWorkerGlobalScope> {
let scope = box DedicatedWorkerGlobalScope::new_inherited(
worker_url, worker, cx.clone(), resource_task, parent_sender,
own_sender, receiver);
DedicatedWorkerGlobalScopeBinding::Wrap(cx.ptr, scope)
}
}
impl DedicatedWorkerGlobalScope {
pub fn run_worker_scope(worker_url: Url,
worker: TrustedWorkerAddress,
resource_task: ResourceTask,
parent_sender: ScriptChan,
own_sender: ScriptChan,
receiver: Receiver<ScriptMsg>) {
spawn_named_native(format!("WebWorker for {}", worker_url.serialize()), proc() {
task_state::initialize(SCRIPT | IN_WORKER);
let roots = RootCollection::new();
let _stack_roots_tls = StackRootTLS::new(&roots);
let (url, source) = match load_whole_resource(&resource_task, worker_url.clone()) {
Err(_) => {
println!("error loading script {}", worker_url.serialize());
return;
}
Ok((metadata, bytes)) => {
(metadata.final_url, String::from_utf8(bytes).unwrap())
}
};
let (_js_runtime, js_context) = ScriptTask::new_rt_and_cx();
let global = DedicatedWorkerGlobalScope::new(
worker_url, worker, js_context.clone(), resource_task,
parent_sender, own_sender, receiver).root();
match js_context.evaluate_script(
global.reflector().get_jsobject(), source, url.serialize(), 1) {
Ok(_) => (),
Err(_) => println!("evaluate_script failed")
}
global.delayed_release_worker();
let scope: JSRef<WorkerGlobalScope> =
WorkerGlobalScopeCast::from_ref(*global);
let target: JSRef<EventTarget> =
EventTargetCast::from_ref(*global);
loop {
match global.receiver.recv_opt() {
Ok(DOMMessage(data, nbytes)) => {
let mut message = UndefinedValue();
unsafe {
assert!(JS_ReadStructuredClone(
js_context.ptr, data as *const u64, nbytes,
JS_STRUCTURED_CLONE_VERSION, &mut message,
ptr::null(), ptr::null_mut())!= 0);
}
MessageEvent::dispatch_jsval(target, &global::Worker(scope), message);
global.delayed_release_worker();
},
Ok(XHRProgressMsg(addr, progress)) => {
XMLHttpRequest::handle_progress(addr, progress)
},
Ok(XHRReleaseMsg(addr)) => {
XMLHttpRequest::handle_release(addr)
},
Ok(WorkerPostMessage(addr, data, nbytes)) => {
Worker::handle_message(addr, data, nbytes);
},
Ok(WorkerRelease(addr)) => {
Worker::handle_release(addr)
},
Ok(FireTimerMsg(FromWorker, timer_id)) => {
scope.handle_fire_timer(timer_id, js_context.ptr);
}
Ok(_) => panic!("Unexpected message"),
Err(_) => break,
}
}
});
}
}
impl<'a> DedicatedWorkerGlobalScopeMethods for JSRef<'a, DedicatedWorkerGlobalScope> {
fn PostMessage(self, cx: *mut JSContext, message: JSVal) -> ErrorResult {
let mut data = ptr::null_mut();
let mut nbytes = 0;
let result = unsafe {
JS_WriteStructuredClone(cx, message, &mut data, &mut nbytes,
|
unsafe { JS_ClearPendingException(cx); }
return Err(DataClone);
}
let ScriptChan(ref sender) = self.parent_sender;
sender.send(WorkerPostMessage(self.worker, data, nbytes));
Ok(())
}
event_handler!(message, GetOnmessage, SetOnmessage)
}
trait PrivateDedicatedWorkerGlobalScopeHelpers {
fn delayed_release_worker(self);
}
impl<'a> PrivateDedicatedWorkerGlobalScopeHelpers for JSRef<'a, DedicatedWorkerGlobalScope> {
fn delayed_release_worker(self) {
let ScriptChan(ref sender) = self.parent_sender;
sender.send(WorkerRelease(self.worker));
}
}
impl Reflectable for DedicatedWorkerGlobalScope {
fn reflector<'a>(&'a self) -> &'a Reflector {
self.workerglobalscope.reflector()
}
}
impl DedicatedWorkerGlobalScopeDerived for EventTarget {
fn is_dedicatedworkerglobalscope(&self) -> bool {
match *self.type_id() {
WorkerGlobalScopeTypeId(DedicatedGlobalScope) => true,
_ => false
}
}
}
|
ptr::null(), ptr::null_mut())
};
if result == 0 {
|
random_line_split
|
add_metadata.rs
|
use super::Batch;
use super::act::Act;
use super::iterator::*;
use super::packet_batch::PacketBatch;
use common::*;
use interface::Packet;
use interface::PacketTx;
use std::marker::PhantomData;
pub type MetadataFn<T, M, M2> = Box<FnMut(&Packet<T, M>) -> M2 + Send>;
pub struct AddMetadataBatch<M, V>
where
M: Send + Sized,
V: Batch + BatchIterator + Act,
{
parent: V,
generator: MetadataFn<V::Header, V::Metadata, M>,
applied: bool,
_phantom_m: PhantomData<M>,
}
impl<M, V> AddMetadataBatch<M, V>
where
M: Send + Sized,
V: Batch + BatchIterator + Act,
{
pub fn new(parent: V, generator: MetadataFn<V::Header, V::Metadata, M>) -> AddMetadataBatch<M, V> {
AddMetadataBatch {
parent: parent,
generator: generator,
applied: false,
_phantom_m: PhantomData,
}
}
}
impl<M, V> Batch for AddMetadataBatch<M, V>
where
M: Send + Sized,
V: Batch + BatchIterator + Act,
{
}
impl<M, V> BatchIterator for AddMetadataBatch<M, V>
where
M: Send + Sized,
V: Batch + BatchIterator + Act,
{
type Header = V::Header;
type Metadata = M;
#[inline]
fn start(&mut self) -> usize {
self.parent.start()
}
#[inline]
unsafe fn next_payload(&mut self, idx: usize) -> Option<PacketDescriptor<V::Header, M>> {
self.parent.next_payload(idx).map(|p| PacketDescriptor {
packet: p.packet.reinterpret_metadata(),
})
}
}
impl<M, V> Act for AddMetadataBatch<M, V>
where
M: Send + Sized,
V: Batch + BatchIterator + Act,
{
#[inline]
fn act(&mut self) {
if!self.applied {
self.parent.act();
{
let iter = PayloadEnumerator::<V::Header, V::Metadata>::new(&mut self.parent);
while let Some(ParsedDescriptor { mut packet,.. }) = iter.next(&mut self.parent) {
let metadata = (self.generator)(&packet);
packet.write_metadata(&metadata).unwrap(); // FIXME: WHat to do on error?
}
}
self.applied = true;
}
}
#[inline]
fn done(&mut self) {
self.applied = false;
self.parent.done();
}
#[inline]
fn send_q(&mut self, port: &PacketTx) -> Result<u32> {
self.parent.send_q(port)
}
#[inline]
fn capacity(&self) -> i32 {
self.parent.capacity()
}
#[inline]
fn drop_packets(&mut self, idxes: &[usize]) -> Option<usize> {
self.parent.drop_packets(idxes)
}
#[inline]
fn clear_packets(&mut self) {
self.parent.clear_packets()
}
#[inline]
fn get_packet_batch(&mut self) -> &mut PacketBatch {
self.parent.get_packet_batch()
}
#[inline]
fn get_task_dependencies(&self) -> Vec<usize>
|
}
|
{
self.parent.get_task_dependencies()
}
|
identifier_body
|
add_metadata.rs
|
use super::Batch;
use super::act::Act;
use super::iterator::*;
use super::packet_batch::PacketBatch;
use common::*;
use interface::Packet;
use interface::PacketTx;
use std::marker::PhantomData;
pub type MetadataFn<T, M, M2> = Box<FnMut(&Packet<T, M>) -> M2 + Send>;
pub struct AddMetadataBatch<M, V>
where
M: Send + Sized,
V: Batch + BatchIterator + Act,
{
parent: V,
generator: MetadataFn<V::Header, V::Metadata, M>,
applied: bool,
_phantom_m: PhantomData<M>,
}
impl<M, V> AddMetadataBatch<M, V>
where
M: Send + Sized,
V: Batch + BatchIterator + Act,
{
pub fn new(parent: V, generator: MetadataFn<V::Header, V::Metadata, M>) -> AddMetadataBatch<M, V> {
AddMetadataBatch {
parent: parent,
generator: generator,
applied: false,
_phantom_m: PhantomData,
}
}
}
impl<M, V> Batch for AddMetadataBatch<M, V>
where
M: Send + Sized,
V: Batch + BatchIterator + Act,
{
}
impl<M, V> BatchIterator for AddMetadataBatch<M, V>
where
M: Send + Sized,
V: Batch + BatchIterator + Act,
{
type Header = V::Header;
type Metadata = M;
#[inline]
fn
|
(&mut self) -> usize {
self.parent.start()
}
#[inline]
unsafe fn next_payload(&mut self, idx: usize) -> Option<PacketDescriptor<V::Header, M>> {
self.parent.next_payload(idx).map(|p| PacketDescriptor {
packet: p.packet.reinterpret_metadata(),
})
}
}
impl<M, V> Act for AddMetadataBatch<M, V>
where
M: Send + Sized,
V: Batch + BatchIterator + Act,
{
#[inline]
fn act(&mut self) {
if!self.applied {
self.parent.act();
{
let iter = PayloadEnumerator::<V::Header, V::Metadata>::new(&mut self.parent);
while let Some(ParsedDescriptor { mut packet,.. }) = iter.next(&mut self.parent) {
let metadata = (self.generator)(&packet);
packet.write_metadata(&metadata).unwrap(); // FIXME: WHat to do on error?
}
}
self.applied = true;
}
}
#[inline]
fn done(&mut self) {
self.applied = false;
self.parent.done();
}
#[inline]
fn send_q(&mut self, port: &PacketTx) -> Result<u32> {
self.parent.send_q(port)
}
#[inline]
fn capacity(&self) -> i32 {
self.parent.capacity()
}
#[inline]
fn drop_packets(&mut self, idxes: &[usize]) -> Option<usize> {
self.parent.drop_packets(idxes)
}
#[inline]
fn clear_packets(&mut self) {
self.parent.clear_packets()
}
#[inline]
fn get_packet_batch(&mut self) -> &mut PacketBatch {
self.parent.get_packet_batch()
}
#[inline]
fn get_task_dependencies(&self) -> Vec<usize> {
self.parent.get_task_dependencies()
}
}
|
start
|
identifier_name
|
add_metadata.rs
|
use super::Batch;
use super::act::Act;
use super::iterator::*;
use super::packet_batch::PacketBatch;
use common::*;
use interface::Packet;
use interface::PacketTx;
use std::marker::PhantomData;
pub type MetadataFn<T, M, M2> = Box<FnMut(&Packet<T, M>) -> M2 + Send>;
pub struct AddMetadataBatch<M, V>
where
M: Send + Sized,
V: Batch + BatchIterator + Act,
{
parent: V,
generator: MetadataFn<V::Header, V::Metadata, M>,
applied: bool,
_phantom_m: PhantomData<M>,
}
impl<M, V> AddMetadataBatch<M, V>
where
M: Send + Sized,
V: Batch + BatchIterator + Act,
{
pub fn new(parent: V, generator: MetadataFn<V::Header, V::Metadata, M>) -> AddMetadataBatch<M, V> {
AddMetadataBatch {
parent: parent,
generator: generator,
applied: false,
_phantom_m: PhantomData,
}
}
}
impl<M, V> Batch for AddMetadataBatch<M, V>
where
M: Send + Sized,
V: Batch + BatchIterator + Act,
{
}
impl<M, V> BatchIterator for AddMetadataBatch<M, V>
where
M: Send + Sized,
V: Batch + BatchIterator + Act,
{
type Header = V::Header;
type Metadata = M;
#[inline]
fn start(&mut self) -> usize {
self.parent.start()
}
#[inline]
unsafe fn next_payload(&mut self, idx: usize) -> Option<PacketDescriptor<V::Header, M>> {
self.parent.next_payload(idx).map(|p| PacketDescriptor {
packet: p.packet.reinterpret_metadata(),
})
}
}
impl<M, V> Act for AddMetadataBatch<M, V>
where
M: Send + Sized,
V: Batch + BatchIterator + Act,
{
#[inline]
fn act(&mut self) {
if!self.applied {
self.parent.act();
{
let iter = PayloadEnumerator::<V::Header, V::Metadata>::new(&mut self.parent);
while let Some(ParsedDescriptor { mut packet,.. }) = iter.next(&mut self.parent) {
let metadata = (self.generator)(&packet);
packet.write_metadata(&metadata).unwrap(); // FIXME: WHat to do on error?
}
}
self.applied = true;
}
}
#[inline]
fn done(&mut self) {
self.applied = false;
self.parent.done();
}
#[inline]
fn send_q(&mut self, port: &PacketTx) -> Result<u32> {
self.parent.send_q(port)
}
#[inline]
fn capacity(&self) -> i32 {
self.parent.capacity()
}
#[inline]
fn drop_packets(&mut self, idxes: &[usize]) -> Option<usize> {
self.parent.drop_packets(idxes)
}
#[inline]
fn clear_packets(&mut self) {
self.parent.clear_packets()
}
#[inline]
fn get_packet_batch(&mut self) -> &mut PacketBatch {
self.parent.get_packet_batch()
}
|
}
}
|
#[inline]
fn get_task_dependencies(&self) -> Vec<usize> {
self.parent.get_task_dependencies()
|
random_line_split
|
add_metadata.rs
|
use super::Batch;
use super::act::Act;
use super::iterator::*;
use super::packet_batch::PacketBatch;
use common::*;
use interface::Packet;
use interface::PacketTx;
use std::marker::PhantomData;
pub type MetadataFn<T, M, M2> = Box<FnMut(&Packet<T, M>) -> M2 + Send>;
pub struct AddMetadataBatch<M, V>
where
M: Send + Sized,
V: Batch + BatchIterator + Act,
{
parent: V,
generator: MetadataFn<V::Header, V::Metadata, M>,
applied: bool,
_phantom_m: PhantomData<M>,
}
impl<M, V> AddMetadataBatch<M, V>
where
M: Send + Sized,
V: Batch + BatchIterator + Act,
{
pub fn new(parent: V, generator: MetadataFn<V::Header, V::Metadata, M>) -> AddMetadataBatch<M, V> {
AddMetadataBatch {
parent: parent,
generator: generator,
applied: false,
_phantom_m: PhantomData,
}
}
}
impl<M, V> Batch for AddMetadataBatch<M, V>
where
M: Send + Sized,
V: Batch + BatchIterator + Act,
{
}
impl<M, V> BatchIterator for AddMetadataBatch<M, V>
where
M: Send + Sized,
V: Batch + BatchIterator + Act,
{
type Header = V::Header;
type Metadata = M;
#[inline]
fn start(&mut self) -> usize {
self.parent.start()
}
#[inline]
unsafe fn next_payload(&mut self, idx: usize) -> Option<PacketDescriptor<V::Header, M>> {
self.parent.next_payload(idx).map(|p| PacketDescriptor {
packet: p.packet.reinterpret_metadata(),
})
}
}
impl<M, V> Act for AddMetadataBatch<M, V>
where
M: Send + Sized,
V: Batch + BatchIterator + Act,
{
#[inline]
fn act(&mut self) {
if!self.applied
|
}
#[inline]
fn done(&mut self) {
self.applied = false;
self.parent.done();
}
#[inline]
fn send_q(&mut self, port: &PacketTx) -> Result<u32> {
self.parent.send_q(port)
}
#[inline]
fn capacity(&self) -> i32 {
self.parent.capacity()
}
#[inline]
fn drop_packets(&mut self, idxes: &[usize]) -> Option<usize> {
self.parent.drop_packets(idxes)
}
#[inline]
fn clear_packets(&mut self) {
self.parent.clear_packets()
}
#[inline]
fn get_packet_batch(&mut self) -> &mut PacketBatch {
self.parent.get_packet_batch()
}
#[inline]
fn get_task_dependencies(&self) -> Vec<usize> {
self.parent.get_task_dependencies()
}
}
|
{
self.parent.act();
{
let iter = PayloadEnumerator::<V::Header, V::Metadata>::new(&mut self.parent);
while let Some(ParsedDescriptor { mut packet, .. }) = iter.next(&mut self.parent) {
let metadata = (self.generator)(&packet);
packet.write_metadata(&metadata).unwrap(); // FIXME: WHat to do on error?
}
}
self.applied = true;
}
|
conditional_block
|
lib.rs
|
//! Prisma - The Rust Color Library
//! ===============================
//!
//! ## Table of Contents:
//! * [**Overview**](#overview)
//! - [**Color Models**](#color-models)
//! - [**Why Prisma?**](#why-prisma)
//! - [**A Tour by Example**](#a-tour-by-example)
//! * [**Details**](#details)
//! * [**Definitions**](#definitions)
//!
//! <a name="overview"></a>
//! ## Overview:
//! Prisma is a rust library aimed to be a comprehensive set of color representations, manipulations,
//! conversions and algorithms that are easy to use for projects of all levels. Prisma follows a model
//! of "opt-in" complexity, meaning that if you just want a library to convert from Rgb to Hsv and back,
//! prisma will let you do it with minimal knowledge of color science. If you need to access the CIE spaces
//! or do color space conversions however, prisma also provides that functionality with wrappers to
//! use the type system to enforce validity.
//!
//! Prisma aims to be the go-to source for color conversions and color science in rust. It is currently
//! a work in progress, and any contributions or feature requests are appreciated.
//!
//! <a name="color-models"></a>
//! ### Color Models:
//!
//! Currently prisma supports the following color models:
//!
//! #### Device Dependent:
//! * **[`Rgb`](struct.Rgb.html)** - The standard color model for displays
//! * **[`Rgi`](struct.Rgi.html)** - A chromaticity model constructed from Rgb that decouples chromaticity and lightness
//! * **[`Hsv`](struct.Hsv.html)** - Hue, saturation, value: a more intuitive polar Rgb model
//! * **[`Hsl`](struct.Hsl.html)** - Hue, saturation, lightness: an alternate to Hsv fulfilling similar roles
//! * **[`Hsi`](struct.Hsi.html)** - Hue, saturation, intensity: a hue-based model without distortion
//! * **[`eHsi`](struct.eHsi.html)** - An extension to `Hsi` that rescaled saturation to avoid going out of gamut in Rgb
//! * **[`Hwb`](struct.Hwb.html)** - Hue, whiteness, blackness: a hue-based model made to be easy for users to select colors in
//! * **[`YCbCr`](ycbcr/struct.YCbCr.html)** - A representation of the various YUV and YIQ models used in display and broadcast
//!
//! #### Device Independent:
|
//! * **[`Xyz`](struct.Xyz.html)** - The "parent" absolute color space other color spaces are defined in terms of
//! * **[`Lms`](lms/struct.Lms.html)** - A color space simulating human cone response
//! * **[`Lab`](struct.Lab.html)** - A uniform perception color space transformation of XYZ
//! * **[`Lchab`](struct.Lchab.html)** - A polar transformation of Lab. A uniform perception analog of Hsl
//! * **[`Luv`](struct.Luv.html)** - An alternative uniform perception color space useful in lighting calculations
//! * **[`Lchuv`](struct.Lchuv.html)** - A polar transformation of Luv
//!
//! Prisma also supports these color spaces with an alpha channel via the [`Alpha`](struct.Alpha.html) type.
//!
//! <a name="why-prisma"></a>
//! ### Why Prisma?
//! Currently, there are two main color libraries for rust:
//!
//! * **color** -- `color` is a very old library that hasn't been updated in several years. While it
//! works for conversion through a few color spaces, and is easy to use, it has a very minimal set of features.
//!
//! * **palette** -- `palette` has significantly more features and can go into a few of the CIE spaces,
//! but requiring all computations to be done in linear encoding is a serious drawback, as if you just
//! want a nice looking gradient in a game, linear Hsv will *not* get you that. It also is built on
//! predefined models and doesn't support dynamic configuration. `prisma` supports
//! considerably more color spaces, as well as multiple encodings and spaces which can be built
//! at runtime. `prisma` also does not require you to specify a color space, as most applications
//! don't really care and use the device color space or sRgb.
//!
//! Prisma aims to support all the features of the above libraries, while making it up to the user how
//! much complexity they need.
//!
//! <a name="a-tour-by-example"></a>
//! ### A Tour by Example:
//!
//! ##### Converting from Rgb to Hsv, manipulating hue, and converting back
//!
//! ```rust
//! #[macro_use] extern crate approx;
//! extern crate angular_units as angle;
//! # extern crate prisma;
//!
//! use prisma::{Rgb, Hsv, FromColor};
//! use angle::Deg;
//!
//! let rgb = Rgb::new(0.5, 0.75, 1.0);
//! let mut hsv = Hsv::from_color(&rgb);
//! hsv.set_hue(Deg(180.0));
//! let rgb = Rgb::from_color(&hsv);
//! assert_relative_eq!(rgb, Rgb::new(0.5, 1.0, 1.0), epsilon=1e-6);
//! ```
//!
//! ##### Interpolating between two colors in Hsl.
//!
//! ```rust
//! #[macro_use] extern crate approx;
//! extern crate angular_units as angle;
//! # extern crate prisma;
//!
//! use prisma::{Rgb, Hsl, FromColor, Lerp};
//! use angle::Deg;
//!
//! let rgb1 = Rgb::new(0.8, 0.25, 0.0f32);
//! let rgb2 = Rgb::new(0.5, 0.66, 1.0);
//! // Specify the hue channel should use degrees
//! let hsl1: Hsl<_, Deg<f32>> = Hsl::from_color(&rgb1);
//! let hsl2 = Hsl::from_color(&rgb2);
//! // Note that hue channels will interpolate in the shortest direction. This is usually
//! // the expected behavior, but you can always go forward with `lerp_flat`.
//! let rgb_out = Rgb::from_color(&hsl1.lerp(&hsl2, 0.35));
//! assert_relative_eq!(rgb_out, Rgb::new(1.0, 0.045, 0.62648), epsilon=1e-4);
//! ```
//!
//! ##### Converting from Rgb<u8> to Rgb<f32>
//!
//! ```rust
//! #[macro_use] extern crate approx;
//! # extern crate prisma;
//!
//! use prisma::Rgb;
//!
//! let rgb_in = Rgb::new(100, 200, 255u8);
//! let rgb_out: Rgb<f32> = rgb_in.color_cast();
//! assert_relative_eq!(rgb_out, Rgb::new(0.39216, 0.78431, 1.0), epsilon=1e-4);
//! ```
//!
//! ##### Convert from sRgb encoded to linear encoded Rgb
//!
//! ```rust
//! #[macro_use] extern crate approx;
//! # extern crate prisma;
//!
//! use prisma::Rgb;
//! use prisma::encoding::{EncodableColor, TranscodableColor, SrgbEncoding};
//!
//! // This returns a `EncodedColor<Rgb<f32>, SrgbEncoding>`
//! // Note: no encodind is done. `srgb_encoded` says that this value is already in sRgb encoding.
//! let rgb_srgb = Rgb::new(0.5, 1.0, 0.25f32).srgb_encoded();
//! // Decode goes from an encoding to linear.
//! let rgb_linear = rgb_srgb.clone().decode();
//! assert_relative_eq!(rgb_linear, Rgb::new(0.21404, 1.0, 0.05088).linear(), epsilon=1e-4);
//! // We can then go back with `encode`
//! let rgb_out = rgb_linear.encode(SrgbEncoding);
//! assert_relative_eq!(rgb_out, rgb_srgb, epsilon=1e-6);
//! ```
//!
//! ##### Going to XYZ
//!
//! ```rust
//! #[macro_use] extern crate approx;
//! # extern crate prisma;
//!
//! use prisma::{Rgb, Xyz};
//! use prisma::encoding::{EncodableColor, TranscodableColor};
//! use prisma::color_space::{ColorSpace, EncodedColorSpace, ConvertToXyz};
//! use prisma::color_space::named::SRgb;
//!
//! let rgb = Rgb::new(0.25, 0.5, 0.75f32).srgb_encoded();
//! let color_space = SRgb::new();
//! // In this case, since rgb and color_space know their own encodings, the conversion to linear
//! // is automatic.
//! let xyz = color_space.convert_to_xyz(&rgb);
//! assert_relative_eq!(xyz, Xyz::new(0.191803, 0.201605, 0.523050), epsilon=1e-5);
//! ```
//! <a name="definitions"></a>
#![allow(clippy::unreadable_literal)]
#![allow(clippy::module_inception)]
#![allow(clippy::clone_on_copy)]
#![allow(clippy::useless_transmute)]
#![warn(missing_docs)]
extern crate angular_units as angle;
#[macro_use]
mod impl_macros;
pub mod channel;
mod linalg;
pub mod color_space;
pub mod encoding;
pub mod tags;
pub mod white_point;
mod alpha;
mod chromaticity;
mod color;
mod convert;
mod ehsi;
mod hsi;
mod hsl;
mod hsv;
mod hwb;
mod lab;
mod lchab;
mod lchuv;
pub mod lms;
mod luv;
mod rgb;
mod rgi;
mod xyy;
mod xyz;
pub mod ycbcr;
#[cfg(test)]
pub mod test;
pub use crate::color::{
Bounded, Broadcast, Color, Color3, Color4, DeviceDependentColor, Flatten, FromTuple,
HomogeneousColor, Invert, Lerp, PolarColor,
};
pub use crate::alpha::{
eHsia, Alpha, Hsia, Hsla, Hsva, Hwba, Laba, Lchaba, Lchauv, Lmsa, Luva, Rgba, Rgia, XyYa, Xyza,
YCbCra,
};
pub use crate::chromaticity::ChromaticityCoordinates;
pub use crate::convert::{FromColor, FromHsi, FromYCbCr};
pub use crate::ehsi::eHsi;
pub use crate::hsi::{Hsi, HsiOutOfGamutMode};
pub use crate::hsl::Hsl;
pub use crate::hsv::Hsv;
pub use crate::hwb::{Hwb, HwbBoundedChannelTraits};
pub use crate::lab::Lab;
pub use crate::lchab::Lchab;
pub use crate::lchuv::Lchuv;
pub use crate::linalg::Matrix3;
pub use crate::luv::Luv;
pub use crate::rgb::Rgb;
pub use crate::rgi::Rgi;
pub use crate::xyy::XyY;
pub use crate::xyz::Xyz;
|
random_line_split
|
|
conic.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Conic gradients
//!
//! Specification: https://drafts.csswg.org/css-images-4/#conic-gradients
//!
//! Conic gradients are rendered via cached render tasks and composited with the image brush.
use euclid::vec2;
use api::{ExtendMode, GradientStop, PremultipliedColorF};
use api::units::*;
use crate::scene_building::IsVisible;
use crate::frame_builder::FrameBuildingState;
use crate::gpu_cache::{GpuCache, GpuCacheHandle};
use crate::intern::{Internable, InternDebug, Handle as InternHandle};
use crate::internal_types::LayoutPrimitiveInfo;
use crate::prim_store::{BrushSegment, GradientTileRange};
use crate::prim_store::{PrimitiveInstanceKind, PrimitiveOpacity, FloatKey};
use crate::prim_store::{PrimKeyCommonData, PrimTemplateCommonData, PrimitiveStore};
use crate::prim_store::{NinePatchDescriptor, PointKey, SizeKey, InternablePrimitive};
use crate::render_task::{RenderTask, RenderTaskKind};
use crate::render_task_graph::RenderTaskId;
use crate::render_task_cache::{RenderTaskCacheKeyKind, RenderTaskCacheKey, RenderTaskParent};
use crate::picture::{SurfaceIndex};
use std::{hash, ops::{Deref, DerefMut}};
use super::{stops_and_min_alpha, GradientStopKey, GradientGpuBlockBuilder};
/// Hashable conic gradient parameters, for use during prim interning.
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
#[derive(Debug, Clone, MallocSizeOf, PartialEq)]
pub struct ConicGradientParams {
pub angle: f32, // in radians
pub start_offset: f32,
pub end_offset: f32,
}
impl Eq for ConicGradientParams {}
impl hash::Hash for ConicGradientParams {
fn hash<H: hash::Hasher>(&self, state: &mut H) {
|
}
}
/// Identifying key for a line decoration.
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
#[derive(Debug, Clone, Eq, PartialEq, Hash, MallocSizeOf)]
pub struct ConicGradientKey {
pub common: PrimKeyCommonData,
pub extend_mode: ExtendMode,
pub center: PointKey,
pub params: ConicGradientParams,
pub stretch_size: SizeKey,
pub stops: Vec<GradientStopKey>,
pub tile_spacing: SizeKey,
pub nine_patch: Option<Box<NinePatchDescriptor>>,
}
impl ConicGradientKey {
pub fn new(
info: &LayoutPrimitiveInfo,
conic_grad: ConicGradient,
) -> Self {
ConicGradientKey {
common: info.into(),
extend_mode: conic_grad.extend_mode,
center: conic_grad.center,
params: conic_grad.params,
stretch_size: conic_grad.stretch_size,
stops: conic_grad.stops,
tile_spacing: conic_grad.tile_spacing,
nine_patch: conic_grad.nine_patch,
}
}
}
impl InternDebug for ConicGradientKey {}
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
#[derive(MallocSizeOf)]
pub struct ConicGradientTemplate {
pub common: PrimTemplateCommonData,
pub extend_mode: ExtendMode,
pub center: DevicePoint,
pub params: ConicGradientParams,
pub task_size: DeviceIntSize,
pub scale: DeviceVector2D,
pub stretch_size: LayoutSize,
pub tile_spacing: LayoutSize,
pub brush_segments: Vec<BrushSegment>,
pub stops_opacity: PrimitiveOpacity,
pub stops: Vec<GradientStop>,
pub stops_handle: GpuCacheHandle,
pub src_color: Option<RenderTaskId>,
}
impl Deref for ConicGradientTemplate {
type Target = PrimTemplateCommonData;
fn deref(&self) -> &Self::Target {
&self.common
}
}
impl DerefMut for ConicGradientTemplate {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.common
}
}
impl From<ConicGradientKey> for ConicGradientTemplate {
fn from(item: ConicGradientKey) -> Self {
let common = PrimTemplateCommonData::with_key_common(item.common);
let mut brush_segments = Vec::new();
if let Some(ref nine_patch) = item.nine_patch {
brush_segments = nine_patch.create_segments(common.prim_rect.size());
}
let (stops, min_alpha) = stops_and_min_alpha(&item.stops);
// Save opacity of the stops for use in
// selecting which pass this gradient
// should be drawn in.
let stops_opacity = PrimitiveOpacity::from_alpha(min_alpha);
let mut stretch_size: LayoutSize = item.stretch_size.into();
stretch_size.width = stretch_size.width.min(common.prim_rect.width());
stretch_size.height = stretch_size.height.min(common.prim_rect.height());
fn approx_eq(a: f32, b: f32) -> bool { (a - b).abs() < 0.01 }
// Attempt to detect some of the common configurations with hard gradient stops. Allow
// those a higher maximum resolution to avoid the worst cases of aliasing artifacts with
// large conic gradients. A better solution would be to go back to rendering very large
// conic gradients via a brush shader instead of caching all of them (unclear whether
// it is important enough to warrant the better solution).
let mut has_hard_stops = false;
let mut prev_stop = None;
let offset_range = item.params.end_offset - item.params.start_offset;
for stop in &stops {
if offset_range <= 0.0 {
break;
}
if let Some(prev_offset) = prev_stop {
// Check whether two consecutive stops are very close (hard stops).
if stop.offset < prev_offset + 0.005 / offset_range {
// a is the angle of the stop normalized into 0-1 space and repeating in the 0-0.25 range.
// If close to 0.0 or 0.25 it means the stop is vertical or horizontal. For those, the lower
// resolution isn't a big issue.
let a = item.params.angle / (2.0 * std::f32::consts::PI)
+ item.params.start_offset
+ stop.offset / offset_range;
let a = a.rem_euclid(0.25);
if!approx_eq(a, 0.0) &&!approx_eq(a, 0.25) {
has_hard_stops = true;
break;
}
}
}
prev_stop = Some(stop.offset);
}
let max_size = if has_hard_stops {
2048.0
} else {
1024.0
};
// Avoid rendering enormous gradients. Radial gradients are mostly made of soft transitions,
// so it is unlikely that rendering at a higher resolution that 1024 would produce noticeable
// differences, especially with 8 bits per channel.
let mut task_size: DeviceSize = stretch_size.cast_unit();
let mut scale = vec2(1.0, 1.0);
if task_size.width > max_size {
scale.x = task_size.width / max_size;
task_size.width = max_size;
}
if task_size.height > max_size {
scale.y = task_size.height / max_size;
task_size.height = max_size;
}
ConicGradientTemplate {
common,
center: DevicePoint::new(item.center.x, item.center.y),
extend_mode: item.extend_mode,
params: item.params,
stretch_size,
task_size: task_size.ceil().to_i32(),
scale,
tile_spacing: item.tile_spacing.into(),
brush_segments,
stops_opacity,
stops,
stops_handle: GpuCacheHandle::new(),
src_color: None,
}
}
}
impl ConicGradientTemplate {
/// Update the GPU cache for a given primitive template. This may be called multiple
/// times per frame, by each primitive reference that refers to this interned
/// template. The initial request call to the GPU cache ensures that work is only
/// done if the cache entry is invalid (due to first use or eviction).
pub fn update(
&mut self,
frame_state: &mut FrameBuildingState,
parent_surface: SurfaceIndex,
) {
if let Some(mut request) =
frame_state.gpu_cache.request(&mut self.common.gpu_cache_handle) {
// write_prim_gpu_blocks
request.push(PremultipliedColorF::WHITE);
request.push(PremultipliedColorF::WHITE);
request.push([
self.stretch_size.width,
self.stretch_size.height,
0.0,
0.0,
]);
// write_segment_gpu_blocks
for segment in &self.brush_segments {
// has to match VECS_PER_SEGMENT
request.write_segment(
segment.local_rect,
segment.extra_data,
);
}
}
if let Some(mut request) = frame_state.gpu_cache.request(&mut self.stops_handle) {
GradientGpuBlockBuilder::build(
false,
&mut request,
&self.stops,
);
}
let cache_key = ConicGradientCacheKey {
size: self.task_size,
center: PointKey { x: self.center.x, y: self.center.y },
scale: PointKey { x: self.scale.x, y: self.scale.y },
start_offset: FloatKey(self.params.start_offset),
end_offset: FloatKey(self.params.end_offset),
angle: FloatKey(self.params.angle),
extend_mode: self.extend_mode,
stops: self.stops.iter().map(|stop| (*stop).into()).collect(),
};
let task_id = frame_state.resource_cache.request_render_task(
RenderTaskCacheKey {
size: self.task_size,
kind: RenderTaskCacheKeyKind::ConicGradient(cache_key),
},
frame_state.gpu_cache,
frame_state.rg_builder,
None,
false,
RenderTaskParent::Surface(parent_surface),
frame_state.surfaces,
|rg_builder| {
rg_builder.add().init(RenderTask::new_dynamic(
self.task_size,
RenderTaskKind::ConicGradient(ConicGradientTask {
extend_mode: self.extend_mode,
scale: self.scale,
center: self.center,
params: self.params.clone(),
stops: self.stops_handle,
}),
))
}
);
self.src_color = Some(task_id);
// Tile spacing is always handled by decomposing into separate draw calls so the
// primitive opacity is equivalent to stops opacity. This might change to being
// set to non-opaque in the presence of tile spacing if/when tile spacing is handled
// in the same way as with the image primitive.
self.opacity = self.stops_opacity;
}
}
pub type ConicGradientDataHandle = InternHandle<ConicGradient>;
#[derive(Debug, MallocSizeOf)]
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
pub struct ConicGradient {
pub extend_mode: ExtendMode,
pub center: PointKey,
pub params: ConicGradientParams,
pub stretch_size: SizeKey,
pub stops: Vec<GradientStopKey>,
pub tile_spacing: SizeKey,
pub nine_patch: Option<Box<NinePatchDescriptor>>,
}
impl Internable for ConicGradient {
type Key = ConicGradientKey;
type StoreData = ConicGradientTemplate;
type InternData = ();
const PROFILE_COUNTER: usize = crate::profiler::INTERNED_CONIC_GRADIENTS;
}
impl InternablePrimitive for ConicGradient {
fn into_key(
self,
info: &LayoutPrimitiveInfo,
) -> ConicGradientKey {
ConicGradientKey::new(info, self)
}
fn make_instance_kind(
_key: ConicGradientKey,
data_handle: ConicGradientDataHandle,
_prim_store: &mut PrimitiveStore,
_reference_frame_relative_offset: LayoutVector2D,
) -> PrimitiveInstanceKind {
PrimitiveInstanceKind::ConicGradient {
data_handle,
visible_tiles_range: GradientTileRange::empty(),
}
}
}
impl IsVisible for ConicGradient {
fn is_visible(&self) -> bool {
true
}
}
#[derive(Debug)]
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
pub struct ConicGradientTask {
pub extend_mode: ExtendMode,
pub center: DevicePoint,
pub scale: DeviceVector2D,
pub params: ConicGradientParams,
pub stops: GpuCacheHandle,
}
impl ConicGradientTask {
pub fn to_instance(&self, target_rect: &DeviceIntRect, gpu_cache: &mut GpuCache) -> ConicGradientInstance {
ConicGradientInstance {
task_rect: target_rect.to_f32(),
center: self.center,
scale: self.scale,
start_offset: self.params.start_offset,
end_offset: self.params.end_offset,
angle: self.params.angle,
extend_mode: self.extend_mode as i32,
gradient_stops_address: self.stops.as_int(gpu_cache),
}
}
}
/// The per-instance shader input of a radial gradient render task.
///
/// Must match the RADIAL_GRADIENT instance description in renderer/vertex.rs.
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
#[repr(C)]
#[derive(Clone, Debug)]
pub struct ConicGradientInstance {
pub task_rect: DeviceRect,
pub center: DevicePoint,
pub scale: DeviceVector2D,
pub start_offset: f32,
pub end_offset: f32,
pub angle: f32,
pub extend_mode: i32,
pub gradient_stops_address: i32,
}
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
pub struct ConicGradientCacheKey {
pub size: DeviceIntSize,
pub center: PointKey,
pub scale: PointKey,
pub start_offset: FloatKey,
pub end_offset: FloatKey,
pub angle: FloatKey,
pub extend_mode: ExtendMode,
pub stops: Vec<GradientStopKey>,
}
|
self.angle.to_bits().hash(state);
self.start_offset.to_bits().hash(state);
self.end_offset.to_bits().hash(state);
|
random_line_split
|
conic.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Conic gradients
//!
//! Specification: https://drafts.csswg.org/css-images-4/#conic-gradients
//!
//! Conic gradients are rendered via cached render tasks and composited with the image brush.
use euclid::vec2;
use api::{ExtendMode, GradientStop, PremultipliedColorF};
use api::units::*;
use crate::scene_building::IsVisible;
use crate::frame_builder::FrameBuildingState;
use crate::gpu_cache::{GpuCache, GpuCacheHandle};
use crate::intern::{Internable, InternDebug, Handle as InternHandle};
use crate::internal_types::LayoutPrimitiveInfo;
use crate::prim_store::{BrushSegment, GradientTileRange};
use crate::prim_store::{PrimitiveInstanceKind, PrimitiveOpacity, FloatKey};
use crate::prim_store::{PrimKeyCommonData, PrimTemplateCommonData, PrimitiveStore};
use crate::prim_store::{NinePatchDescriptor, PointKey, SizeKey, InternablePrimitive};
use crate::render_task::{RenderTask, RenderTaskKind};
use crate::render_task_graph::RenderTaskId;
use crate::render_task_cache::{RenderTaskCacheKeyKind, RenderTaskCacheKey, RenderTaskParent};
use crate::picture::{SurfaceIndex};
use std::{hash, ops::{Deref, DerefMut}};
use super::{stops_and_min_alpha, GradientStopKey, GradientGpuBlockBuilder};
/// Hashable conic gradient parameters, for use during prim interning.
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
#[derive(Debug, Clone, MallocSizeOf, PartialEq)]
pub struct ConicGradientParams {
pub angle: f32, // in radians
pub start_offset: f32,
pub end_offset: f32,
}
impl Eq for ConicGradientParams {}
impl hash::Hash for ConicGradientParams {
fn hash<H: hash::Hasher>(&self, state: &mut H) {
self.angle.to_bits().hash(state);
self.start_offset.to_bits().hash(state);
self.end_offset.to_bits().hash(state);
}
}
/// Identifying key for a line decoration.
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
#[derive(Debug, Clone, Eq, PartialEq, Hash, MallocSizeOf)]
pub struct ConicGradientKey {
pub common: PrimKeyCommonData,
pub extend_mode: ExtendMode,
pub center: PointKey,
pub params: ConicGradientParams,
pub stretch_size: SizeKey,
pub stops: Vec<GradientStopKey>,
pub tile_spacing: SizeKey,
pub nine_patch: Option<Box<NinePatchDescriptor>>,
}
impl ConicGradientKey {
pub fn new(
info: &LayoutPrimitiveInfo,
conic_grad: ConicGradient,
) -> Self {
ConicGradientKey {
common: info.into(),
extend_mode: conic_grad.extend_mode,
center: conic_grad.center,
params: conic_grad.params,
stretch_size: conic_grad.stretch_size,
stops: conic_grad.stops,
tile_spacing: conic_grad.tile_spacing,
nine_patch: conic_grad.nine_patch,
}
}
}
impl InternDebug for ConicGradientKey {}
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
#[derive(MallocSizeOf)]
pub struct ConicGradientTemplate {
pub common: PrimTemplateCommonData,
pub extend_mode: ExtendMode,
pub center: DevicePoint,
pub params: ConicGradientParams,
pub task_size: DeviceIntSize,
pub scale: DeviceVector2D,
pub stretch_size: LayoutSize,
pub tile_spacing: LayoutSize,
pub brush_segments: Vec<BrushSegment>,
pub stops_opacity: PrimitiveOpacity,
pub stops: Vec<GradientStop>,
pub stops_handle: GpuCacheHandle,
pub src_color: Option<RenderTaskId>,
}
impl Deref for ConicGradientTemplate {
type Target = PrimTemplateCommonData;
fn deref(&self) -> &Self::Target {
&self.common
}
}
impl DerefMut for ConicGradientTemplate {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.common
}
}
impl From<ConicGradientKey> for ConicGradientTemplate {
fn from(item: ConicGradientKey) -> Self {
let common = PrimTemplateCommonData::with_key_common(item.common);
let mut brush_segments = Vec::new();
if let Some(ref nine_patch) = item.nine_patch {
brush_segments = nine_patch.create_segments(common.prim_rect.size());
}
let (stops, min_alpha) = stops_and_min_alpha(&item.stops);
// Save opacity of the stops for use in
// selecting which pass this gradient
// should be drawn in.
let stops_opacity = PrimitiveOpacity::from_alpha(min_alpha);
let mut stretch_size: LayoutSize = item.stretch_size.into();
stretch_size.width = stretch_size.width.min(common.prim_rect.width());
stretch_size.height = stretch_size.height.min(common.prim_rect.height());
fn approx_eq(a: f32, b: f32) -> bool
|
// Attempt to detect some of the common configurations with hard gradient stops. Allow
// those a higher maximum resolution to avoid the worst cases of aliasing artifacts with
// large conic gradients. A better solution would be to go back to rendering very large
// conic gradients via a brush shader instead of caching all of them (unclear whether
// it is important enough to warrant the better solution).
let mut has_hard_stops = false;
let mut prev_stop = None;
let offset_range = item.params.end_offset - item.params.start_offset;
for stop in &stops {
if offset_range <= 0.0 {
break;
}
if let Some(prev_offset) = prev_stop {
// Check whether two consecutive stops are very close (hard stops).
if stop.offset < prev_offset + 0.005 / offset_range {
// a is the angle of the stop normalized into 0-1 space and repeating in the 0-0.25 range.
// If close to 0.0 or 0.25 it means the stop is vertical or horizontal. For those, the lower
// resolution isn't a big issue.
let a = item.params.angle / (2.0 * std::f32::consts::PI)
+ item.params.start_offset
+ stop.offset / offset_range;
let a = a.rem_euclid(0.25);
if!approx_eq(a, 0.0) &&!approx_eq(a, 0.25) {
has_hard_stops = true;
break;
}
}
}
prev_stop = Some(stop.offset);
}
let max_size = if has_hard_stops {
2048.0
} else {
1024.0
};
// Avoid rendering enormous gradients. Radial gradients are mostly made of soft transitions,
// so it is unlikely that rendering at a higher resolution that 1024 would produce noticeable
// differences, especially with 8 bits per channel.
let mut task_size: DeviceSize = stretch_size.cast_unit();
let mut scale = vec2(1.0, 1.0);
if task_size.width > max_size {
scale.x = task_size.width / max_size;
task_size.width = max_size;
}
if task_size.height > max_size {
scale.y = task_size.height / max_size;
task_size.height = max_size;
}
ConicGradientTemplate {
common,
center: DevicePoint::new(item.center.x, item.center.y),
extend_mode: item.extend_mode,
params: item.params,
stretch_size,
task_size: task_size.ceil().to_i32(),
scale,
tile_spacing: item.tile_spacing.into(),
brush_segments,
stops_opacity,
stops,
stops_handle: GpuCacheHandle::new(),
src_color: None,
}
}
}
impl ConicGradientTemplate {
/// Update the GPU cache for a given primitive template. This may be called multiple
/// times per frame, by each primitive reference that refers to this interned
/// template. The initial request call to the GPU cache ensures that work is only
/// done if the cache entry is invalid (due to first use or eviction).
pub fn update(
&mut self,
frame_state: &mut FrameBuildingState,
parent_surface: SurfaceIndex,
) {
if let Some(mut request) =
frame_state.gpu_cache.request(&mut self.common.gpu_cache_handle) {
// write_prim_gpu_blocks
request.push(PremultipliedColorF::WHITE);
request.push(PremultipliedColorF::WHITE);
request.push([
self.stretch_size.width,
self.stretch_size.height,
0.0,
0.0,
]);
// write_segment_gpu_blocks
for segment in &self.brush_segments {
// has to match VECS_PER_SEGMENT
request.write_segment(
segment.local_rect,
segment.extra_data,
);
}
}
if let Some(mut request) = frame_state.gpu_cache.request(&mut self.stops_handle) {
GradientGpuBlockBuilder::build(
false,
&mut request,
&self.stops,
);
}
let cache_key = ConicGradientCacheKey {
size: self.task_size,
center: PointKey { x: self.center.x, y: self.center.y },
scale: PointKey { x: self.scale.x, y: self.scale.y },
start_offset: FloatKey(self.params.start_offset),
end_offset: FloatKey(self.params.end_offset),
angle: FloatKey(self.params.angle),
extend_mode: self.extend_mode,
stops: self.stops.iter().map(|stop| (*stop).into()).collect(),
};
let task_id = frame_state.resource_cache.request_render_task(
RenderTaskCacheKey {
size: self.task_size,
kind: RenderTaskCacheKeyKind::ConicGradient(cache_key),
},
frame_state.gpu_cache,
frame_state.rg_builder,
None,
false,
RenderTaskParent::Surface(parent_surface),
frame_state.surfaces,
|rg_builder| {
rg_builder.add().init(RenderTask::new_dynamic(
self.task_size,
RenderTaskKind::ConicGradient(ConicGradientTask {
extend_mode: self.extend_mode,
scale: self.scale,
center: self.center,
params: self.params.clone(),
stops: self.stops_handle,
}),
))
}
);
self.src_color = Some(task_id);
// Tile spacing is always handled by decomposing into separate draw calls so the
// primitive opacity is equivalent to stops opacity. This might change to being
// set to non-opaque in the presence of tile spacing if/when tile spacing is handled
// in the same way as with the image primitive.
self.opacity = self.stops_opacity;
}
}
pub type ConicGradientDataHandle = InternHandle<ConicGradient>;
#[derive(Debug, MallocSizeOf)]
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
pub struct ConicGradient {
pub extend_mode: ExtendMode,
pub center: PointKey,
pub params: ConicGradientParams,
pub stretch_size: SizeKey,
pub stops: Vec<GradientStopKey>,
pub tile_spacing: SizeKey,
pub nine_patch: Option<Box<NinePatchDescriptor>>,
}
impl Internable for ConicGradient {
type Key = ConicGradientKey;
type StoreData = ConicGradientTemplate;
type InternData = ();
const PROFILE_COUNTER: usize = crate::profiler::INTERNED_CONIC_GRADIENTS;
}
impl InternablePrimitive for ConicGradient {
fn into_key(
self,
info: &LayoutPrimitiveInfo,
) -> ConicGradientKey {
ConicGradientKey::new(info, self)
}
fn make_instance_kind(
_key: ConicGradientKey,
data_handle: ConicGradientDataHandle,
_prim_store: &mut PrimitiveStore,
_reference_frame_relative_offset: LayoutVector2D,
) -> PrimitiveInstanceKind {
PrimitiveInstanceKind::ConicGradient {
data_handle,
visible_tiles_range: GradientTileRange::empty(),
}
}
}
impl IsVisible for ConicGradient {
fn is_visible(&self) -> bool {
true
}
}
#[derive(Debug)]
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
pub struct ConicGradientTask {
pub extend_mode: ExtendMode,
pub center: DevicePoint,
pub scale: DeviceVector2D,
pub params: ConicGradientParams,
pub stops: GpuCacheHandle,
}
impl ConicGradientTask {
pub fn to_instance(&self, target_rect: &DeviceIntRect, gpu_cache: &mut GpuCache) -> ConicGradientInstance {
ConicGradientInstance {
task_rect: target_rect.to_f32(),
center: self.center,
scale: self.scale,
start_offset: self.params.start_offset,
end_offset: self.params.end_offset,
angle: self.params.angle,
extend_mode: self.extend_mode as i32,
gradient_stops_address: self.stops.as_int(gpu_cache),
}
}
}
/// The per-instance shader input of a radial gradient render task.
///
/// Must match the RADIAL_GRADIENT instance description in renderer/vertex.rs.
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
#[repr(C)]
#[derive(Clone, Debug)]
pub struct ConicGradientInstance {
pub task_rect: DeviceRect,
pub center: DevicePoint,
pub scale: DeviceVector2D,
pub start_offset: f32,
pub end_offset: f32,
pub angle: f32,
pub extend_mode: i32,
pub gradient_stops_address: i32,
}
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
pub struct ConicGradientCacheKey {
pub size: DeviceIntSize,
pub center: PointKey,
pub scale: PointKey,
pub start_offset: FloatKey,
pub end_offset: FloatKey,
pub angle: FloatKey,
pub extend_mode: ExtendMode,
pub stops: Vec<GradientStopKey>,
}
|
{ (a - b).abs() < 0.01 }
|
identifier_body
|
conic.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Conic gradients
//!
//! Specification: https://drafts.csswg.org/css-images-4/#conic-gradients
//!
//! Conic gradients are rendered via cached render tasks and composited with the image brush.
use euclid::vec2;
use api::{ExtendMode, GradientStop, PremultipliedColorF};
use api::units::*;
use crate::scene_building::IsVisible;
use crate::frame_builder::FrameBuildingState;
use crate::gpu_cache::{GpuCache, GpuCacheHandle};
use crate::intern::{Internable, InternDebug, Handle as InternHandle};
use crate::internal_types::LayoutPrimitiveInfo;
use crate::prim_store::{BrushSegment, GradientTileRange};
use crate::prim_store::{PrimitiveInstanceKind, PrimitiveOpacity, FloatKey};
use crate::prim_store::{PrimKeyCommonData, PrimTemplateCommonData, PrimitiveStore};
use crate::prim_store::{NinePatchDescriptor, PointKey, SizeKey, InternablePrimitive};
use crate::render_task::{RenderTask, RenderTaskKind};
use crate::render_task_graph::RenderTaskId;
use crate::render_task_cache::{RenderTaskCacheKeyKind, RenderTaskCacheKey, RenderTaskParent};
use crate::picture::{SurfaceIndex};
use std::{hash, ops::{Deref, DerefMut}};
use super::{stops_and_min_alpha, GradientStopKey, GradientGpuBlockBuilder};
/// Hashable conic gradient parameters, for use during prim interning.
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
#[derive(Debug, Clone, MallocSizeOf, PartialEq)]
pub struct ConicGradientParams {
pub angle: f32, // in radians
pub start_offset: f32,
pub end_offset: f32,
}
impl Eq for ConicGradientParams {}
impl hash::Hash for ConicGradientParams {
fn hash<H: hash::Hasher>(&self, state: &mut H) {
self.angle.to_bits().hash(state);
self.start_offset.to_bits().hash(state);
self.end_offset.to_bits().hash(state);
}
}
/// Identifying key for a line decoration.
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
#[derive(Debug, Clone, Eq, PartialEq, Hash, MallocSizeOf)]
pub struct ConicGradientKey {
pub common: PrimKeyCommonData,
pub extend_mode: ExtendMode,
pub center: PointKey,
pub params: ConicGradientParams,
pub stretch_size: SizeKey,
pub stops: Vec<GradientStopKey>,
pub tile_spacing: SizeKey,
pub nine_patch: Option<Box<NinePatchDescriptor>>,
}
impl ConicGradientKey {
pub fn new(
info: &LayoutPrimitiveInfo,
conic_grad: ConicGradient,
) -> Self {
ConicGradientKey {
common: info.into(),
extend_mode: conic_grad.extend_mode,
center: conic_grad.center,
params: conic_grad.params,
stretch_size: conic_grad.stretch_size,
stops: conic_grad.stops,
tile_spacing: conic_grad.tile_spacing,
nine_patch: conic_grad.nine_patch,
}
}
}
impl InternDebug for ConicGradientKey {}
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
#[derive(MallocSizeOf)]
pub struct ConicGradientTemplate {
pub common: PrimTemplateCommonData,
pub extend_mode: ExtendMode,
pub center: DevicePoint,
pub params: ConicGradientParams,
pub task_size: DeviceIntSize,
pub scale: DeviceVector2D,
pub stretch_size: LayoutSize,
pub tile_spacing: LayoutSize,
pub brush_segments: Vec<BrushSegment>,
pub stops_opacity: PrimitiveOpacity,
pub stops: Vec<GradientStop>,
pub stops_handle: GpuCacheHandle,
pub src_color: Option<RenderTaskId>,
}
impl Deref for ConicGradientTemplate {
type Target = PrimTemplateCommonData;
fn deref(&self) -> &Self::Target {
&self.common
}
}
impl DerefMut for ConicGradientTemplate {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.common
}
}
impl From<ConicGradientKey> for ConicGradientTemplate {
fn from(item: ConicGradientKey) -> Self {
let common = PrimTemplateCommonData::with_key_common(item.common);
let mut brush_segments = Vec::new();
if let Some(ref nine_patch) = item.nine_patch {
brush_segments = nine_patch.create_segments(common.prim_rect.size());
}
let (stops, min_alpha) = stops_and_min_alpha(&item.stops);
// Save opacity of the stops for use in
// selecting which pass this gradient
// should be drawn in.
let stops_opacity = PrimitiveOpacity::from_alpha(min_alpha);
let mut stretch_size: LayoutSize = item.stretch_size.into();
stretch_size.width = stretch_size.width.min(common.prim_rect.width());
stretch_size.height = stretch_size.height.min(common.prim_rect.height());
fn approx_eq(a: f32, b: f32) -> bool { (a - b).abs() < 0.01 }
// Attempt to detect some of the common configurations with hard gradient stops. Allow
// those a higher maximum resolution to avoid the worst cases of aliasing artifacts with
// large conic gradients. A better solution would be to go back to rendering very large
// conic gradients via a brush shader instead of caching all of them (unclear whether
// it is important enough to warrant the better solution).
let mut has_hard_stops = false;
let mut prev_stop = None;
let offset_range = item.params.end_offset - item.params.start_offset;
for stop in &stops {
if offset_range <= 0.0 {
break;
}
if let Some(prev_offset) = prev_stop {
// Check whether two consecutive stops are very close (hard stops).
if stop.offset < prev_offset + 0.005 / offset_range
|
}
prev_stop = Some(stop.offset);
}
let max_size = if has_hard_stops {
2048.0
} else {
1024.0
};
// Avoid rendering enormous gradients. Radial gradients are mostly made of soft transitions,
// so it is unlikely that rendering at a higher resolution that 1024 would produce noticeable
// differences, especially with 8 bits per channel.
let mut task_size: DeviceSize = stretch_size.cast_unit();
let mut scale = vec2(1.0, 1.0);
if task_size.width > max_size {
scale.x = task_size.width / max_size;
task_size.width = max_size;
}
if task_size.height > max_size {
scale.y = task_size.height / max_size;
task_size.height = max_size;
}
ConicGradientTemplate {
common,
center: DevicePoint::new(item.center.x, item.center.y),
extend_mode: item.extend_mode,
params: item.params,
stretch_size,
task_size: task_size.ceil().to_i32(),
scale,
tile_spacing: item.tile_spacing.into(),
brush_segments,
stops_opacity,
stops,
stops_handle: GpuCacheHandle::new(),
src_color: None,
}
}
}
impl ConicGradientTemplate {
/// Update the GPU cache for a given primitive template. This may be called multiple
/// times per frame, by each primitive reference that refers to this interned
/// template. The initial request call to the GPU cache ensures that work is only
/// done if the cache entry is invalid (due to first use or eviction).
pub fn update(
&mut self,
frame_state: &mut FrameBuildingState,
parent_surface: SurfaceIndex,
) {
if let Some(mut request) =
frame_state.gpu_cache.request(&mut self.common.gpu_cache_handle) {
// write_prim_gpu_blocks
request.push(PremultipliedColorF::WHITE);
request.push(PremultipliedColorF::WHITE);
request.push([
self.stretch_size.width,
self.stretch_size.height,
0.0,
0.0,
]);
// write_segment_gpu_blocks
for segment in &self.brush_segments {
// has to match VECS_PER_SEGMENT
request.write_segment(
segment.local_rect,
segment.extra_data,
);
}
}
if let Some(mut request) = frame_state.gpu_cache.request(&mut self.stops_handle) {
GradientGpuBlockBuilder::build(
false,
&mut request,
&self.stops,
);
}
let cache_key = ConicGradientCacheKey {
size: self.task_size,
center: PointKey { x: self.center.x, y: self.center.y },
scale: PointKey { x: self.scale.x, y: self.scale.y },
start_offset: FloatKey(self.params.start_offset),
end_offset: FloatKey(self.params.end_offset),
angle: FloatKey(self.params.angle),
extend_mode: self.extend_mode,
stops: self.stops.iter().map(|stop| (*stop).into()).collect(),
};
let task_id = frame_state.resource_cache.request_render_task(
RenderTaskCacheKey {
size: self.task_size,
kind: RenderTaskCacheKeyKind::ConicGradient(cache_key),
},
frame_state.gpu_cache,
frame_state.rg_builder,
None,
false,
RenderTaskParent::Surface(parent_surface),
frame_state.surfaces,
|rg_builder| {
rg_builder.add().init(RenderTask::new_dynamic(
self.task_size,
RenderTaskKind::ConicGradient(ConicGradientTask {
extend_mode: self.extend_mode,
scale: self.scale,
center: self.center,
params: self.params.clone(),
stops: self.stops_handle,
}),
))
}
);
self.src_color = Some(task_id);
// Tile spacing is always handled by decomposing into separate draw calls so the
// primitive opacity is equivalent to stops opacity. This might change to being
// set to non-opaque in the presence of tile spacing if/when tile spacing is handled
// in the same way as with the image primitive.
self.opacity = self.stops_opacity;
}
}
pub type ConicGradientDataHandle = InternHandle<ConicGradient>;
#[derive(Debug, MallocSizeOf)]
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
pub struct ConicGradient {
pub extend_mode: ExtendMode,
pub center: PointKey,
pub params: ConicGradientParams,
pub stretch_size: SizeKey,
pub stops: Vec<GradientStopKey>,
pub tile_spacing: SizeKey,
pub nine_patch: Option<Box<NinePatchDescriptor>>,
}
impl Internable for ConicGradient {
type Key = ConicGradientKey;
type StoreData = ConicGradientTemplate;
type InternData = ();
const PROFILE_COUNTER: usize = crate::profiler::INTERNED_CONIC_GRADIENTS;
}
impl InternablePrimitive for ConicGradient {
fn into_key(
self,
info: &LayoutPrimitiveInfo,
) -> ConicGradientKey {
ConicGradientKey::new(info, self)
}
fn make_instance_kind(
_key: ConicGradientKey,
data_handle: ConicGradientDataHandle,
_prim_store: &mut PrimitiveStore,
_reference_frame_relative_offset: LayoutVector2D,
) -> PrimitiveInstanceKind {
PrimitiveInstanceKind::ConicGradient {
data_handle,
visible_tiles_range: GradientTileRange::empty(),
}
}
}
impl IsVisible for ConicGradient {
fn is_visible(&self) -> bool {
true
}
}
#[derive(Debug)]
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
pub struct ConicGradientTask {
pub extend_mode: ExtendMode,
pub center: DevicePoint,
pub scale: DeviceVector2D,
pub params: ConicGradientParams,
pub stops: GpuCacheHandle,
}
impl ConicGradientTask {
pub fn to_instance(&self, target_rect: &DeviceIntRect, gpu_cache: &mut GpuCache) -> ConicGradientInstance {
ConicGradientInstance {
task_rect: target_rect.to_f32(),
center: self.center,
scale: self.scale,
start_offset: self.params.start_offset,
end_offset: self.params.end_offset,
angle: self.params.angle,
extend_mode: self.extend_mode as i32,
gradient_stops_address: self.stops.as_int(gpu_cache),
}
}
}
/// The per-instance shader input of a radial gradient render task.
///
/// Must match the RADIAL_GRADIENT instance description in renderer/vertex.rs.
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
#[repr(C)]
#[derive(Clone, Debug)]
pub struct ConicGradientInstance {
pub task_rect: DeviceRect,
pub center: DevicePoint,
pub scale: DeviceVector2D,
pub start_offset: f32,
pub end_offset: f32,
pub angle: f32,
pub extend_mode: i32,
pub gradient_stops_address: i32,
}
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
pub struct ConicGradientCacheKey {
pub size: DeviceIntSize,
pub center: PointKey,
pub scale: PointKey,
pub start_offset: FloatKey,
pub end_offset: FloatKey,
pub angle: FloatKey,
pub extend_mode: ExtendMode,
pub stops: Vec<GradientStopKey>,
}
|
{
// a is the angle of the stop normalized into 0-1 space and repeating in the 0-0.25 range.
// If close to 0.0 or 0.25 it means the stop is vertical or horizontal. For those, the lower
// resolution isn't a big issue.
let a = item.params.angle / (2.0 * std::f32::consts::PI)
+ item.params.start_offset
+ stop.offset / offset_range;
let a = a.rem_euclid(0.25);
if !approx_eq(a, 0.0) && !approx_eq(a, 0.25) {
has_hard_stops = true;
break;
}
}
|
conditional_block
|
conic.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Conic gradients
//!
//! Specification: https://drafts.csswg.org/css-images-4/#conic-gradients
//!
//! Conic gradients are rendered via cached render tasks and composited with the image brush.
use euclid::vec2;
use api::{ExtendMode, GradientStop, PremultipliedColorF};
use api::units::*;
use crate::scene_building::IsVisible;
use crate::frame_builder::FrameBuildingState;
use crate::gpu_cache::{GpuCache, GpuCacheHandle};
use crate::intern::{Internable, InternDebug, Handle as InternHandle};
use crate::internal_types::LayoutPrimitiveInfo;
use crate::prim_store::{BrushSegment, GradientTileRange};
use crate::prim_store::{PrimitiveInstanceKind, PrimitiveOpacity, FloatKey};
use crate::prim_store::{PrimKeyCommonData, PrimTemplateCommonData, PrimitiveStore};
use crate::prim_store::{NinePatchDescriptor, PointKey, SizeKey, InternablePrimitive};
use crate::render_task::{RenderTask, RenderTaskKind};
use crate::render_task_graph::RenderTaskId;
use crate::render_task_cache::{RenderTaskCacheKeyKind, RenderTaskCacheKey, RenderTaskParent};
use crate::picture::{SurfaceIndex};
use std::{hash, ops::{Deref, DerefMut}};
use super::{stops_and_min_alpha, GradientStopKey, GradientGpuBlockBuilder};
/// Hashable conic gradient parameters, for use during prim interning.
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
#[derive(Debug, Clone, MallocSizeOf, PartialEq)]
pub struct ConicGradientParams {
pub angle: f32, // in radians
pub start_offset: f32,
pub end_offset: f32,
}
impl Eq for ConicGradientParams {}
impl hash::Hash for ConicGradientParams {
fn hash<H: hash::Hasher>(&self, state: &mut H) {
self.angle.to_bits().hash(state);
self.start_offset.to_bits().hash(state);
self.end_offset.to_bits().hash(state);
}
}
/// Identifying key for a line decoration.
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
#[derive(Debug, Clone, Eq, PartialEq, Hash, MallocSizeOf)]
pub struct ConicGradientKey {
pub common: PrimKeyCommonData,
pub extend_mode: ExtendMode,
pub center: PointKey,
pub params: ConicGradientParams,
pub stretch_size: SizeKey,
pub stops: Vec<GradientStopKey>,
pub tile_spacing: SizeKey,
pub nine_patch: Option<Box<NinePatchDescriptor>>,
}
impl ConicGradientKey {
pub fn new(
info: &LayoutPrimitiveInfo,
conic_grad: ConicGradient,
) -> Self {
ConicGradientKey {
common: info.into(),
extend_mode: conic_grad.extend_mode,
center: conic_grad.center,
params: conic_grad.params,
stretch_size: conic_grad.stretch_size,
stops: conic_grad.stops,
tile_spacing: conic_grad.tile_spacing,
nine_patch: conic_grad.nine_patch,
}
}
}
impl InternDebug for ConicGradientKey {}
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
#[derive(MallocSizeOf)]
pub struct ConicGradientTemplate {
pub common: PrimTemplateCommonData,
pub extend_mode: ExtendMode,
pub center: DevicePoint,
pub params: ConicGradientParams,
pub task_size: DeviceIntSize,
pub scale: DeviceVector2D,
pub stretch_size: LayoutSize,
pub tile_spacing: LayoutSize,
pub brush_segments: Vec<BrushSegment>,
pub stops_opacity: PrimitiveOpacity,
pub stops: Vec<GradientStop>,
pub stops_handle: GpuCacheHandle,
pub src_color: Option<RenderTaskId>,
}
impl Deref for ConicGradientTemplate {
type Target = PrimTemplateCommonData;
fn deref(&self) -> &Self::Target {
&self.common
}
}
impl DerefMut for ConicGradientTemplate {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.common
}
}
impl From<ConicGradientKey> for ConicGradientTemplate {
fn from(item: ConicGradientKey) -> Self {
let common = PrimTemplateCommonData::with_key_common(item.common);
let mut brush_segments = Vec::new();
if let Some(ref nine_patch) = item.nine_patch {
brush_segments = nine_patch.create_segments(common.prim_rect.size());
}
let (stops, min_alpha) = stops_and_min_alpha(&item.stops);
// Save opacity of the stops for use in
// selecting which pass this gradient
// should be drawn in.
let stops_opacity = PrimitiveOpacity::from_alpha(min_alpha);
let mut stretch_size: LayoutSize = item.stretch_size.into();
stretch_size.width = stretch_size.width.min(common.prim_rect.width());
stretch_size.height = stretch_size.height.min(common.prim_rect.height());
fn approx_eq(a: f32, b: f32) -> bool { (a - b).abs() < 0.01 }
// Attempt to detect some of the common configurations with hard gradient stops. Allow
// those a higher maximum resolution to avoid the worst cases of aliasing artifacts with
// large conic gradients. A better solution would be to go back to rendering very large
// conic gradients via a brush shader instead of caching all of them (unclear whether
// it is important enough to warrant the better solution).
let mut has_hard_stops = false;
let mut prev_stop = None;
let offset_range = item.params.end_offset - item.params.start_offset;
for stop in &stops {
if offset_range <= 0.0 {
break;
}
if let Some(prev_offset) = prev_stop {
// Check whether two consecutive stops are very close (hard stops).
if stop.offset < prev_offset + 0.005 / offset_range {
// a is the angle of the stop normalized into 0-1 space and repeating in the 0-0.25 range.
// If close to 0.0 or 0.25 it means the stop is vertical or horizontal. For those, the lower
// resolution isn't a big issue.
let a = item.params.angle / (2.0 * std::f32::consts::PI)
+ item.params.start_offset
+ stop.offset / offset_range;
let a = a.rem_euclid(0.25);
if!approx_eq(a, 0.0) &&!approx_eq(a, 0.25) {
has_hard_stops = true;
break;
}
}
}
prev_stop = Some(stop.offset);
}
let max_size = if has_hard_stops {
2048.0
} else {
1024.0
};
// Avoid rendering enormous gradients. Radial gradients are mostly made of soft transitions,
// so it is unlikely that rendering at a higher resolution that 1024 would produce noticeable
// differences, especially with 8 bits per channel.
let mut task_size: DeviceSize = stretch_size.cast_unit();
let mut scale = vec2(1.0, 1.0);
if task_size.width > max_size {
scale.x = task_size.width / max_size;
task_size.width = max_size;
}
if task_size.height > max_size {
scale.y = task_size.height / max_size;
task_size.height = max_size;
}
ConicGradientTemplate {
common,
center: DevicePoint::new(item.center.x, item.center.y),
extend_mode: item.extend_mode,
params: item.params,
stretch_size,
task_size: task_size.ceil().to_i32(),
scale,
tile_spacing: item.tile_spacing.into(),
brush_segments,
stops_opacity,
stops,
stops_handle: GpuCacheHandle::new(),
src_color: None,
}
}
}
impl ConicGradientTemplate {
/// Update the GPU cache for a given primitive template. This may be called multiple
/// times per frame, by each primitive reference that refers to this interned
/// template. The initial request call to the GPU cache ensures that work is only
/// done if the cache entry is invalid (due to first use or eviction).
pub fn update(
&mut self,
frame_state: &mut FrameBuildingState,
parent_surface: SurfaceIndex,
) {
if let Some(mut request) =
frame_state.gpu_cache.request(&mut self.common.gpu_cache_handle) {
// write_prim_gpu_blocks
request.push(PremultipliedColorF::WHITE);
request.push(PremultipliedColorF::WHITE);
request.push([
self.stretch_size.width,
self.stretch_size.height,
0.0,
0.0,
]);
// write_segment_gpu_blocks
for segment in &self.brush_segments {
// has to match VECS_PER_SEGMENT
request.write_segment(
segment.local_rect,
segment.extra_data,
);
}
}
if let Some(mut request) = frame_state.gpu_cache.request(&mut self.stops_handle) {
GradientGpuBlockBuilder::build(
false,
&mut request,
&self.stops,
);
}
let cache_key = ConicGradientCacheKey {
size: self.task_size,
center: PointKey { x: self.center.x, y: self.center.y },
scale: PointKey { x: self.scale.x, y: self.scale.y },
start_offset: FloatKey(self.params.start_offset),
end_offset: FloatKey(self.params.end_offset),
angle: FloatKey(self.params.angle),
extend_mode: self.extend_mode,
stops: self.stops.iter().map(|stop| (*stop).into()).collect(),
};
let task_id = frame_state.resource_cache.request_render_task(
RenderTaskCacheKey {
size: self.task_size,
kind: RenderTaskCacheKeyKind::ConicGradient(cache_key),
},
frame_state.gpu_cache,
frame_state.rg_builder,
None,
false,
RenderTaskParent::Surface(parent_surface),
frame_state.surfaces,
|rg_builder| {
rg_builder.add().init(RenderTask::new_dynamic(
self.task_size,
RenderTaskKind::ConicGradient(ConicGradientTask {
extend_mode: self.extend_mode,
scale: self.scale,
center: self.center,
params: self.params.clone(),
stops: self.stops_handle,
}),
))
}
);
self.src_color = Some(task_id);
// Tile spacing is always handled by decomposing into separate draw calls so the
// primitive opacity is equivalent to stops opacity. This might change to being
// set to non-opaque in the presence of tile spacing if/when tile spacing is handled
// in the same way as with the image primitive.
self.opacity = self.stops_opacity;
}
}
pub type ConicGradientDataHandle = InternHandle<ConicGradient>;
#[derive(Debug, MallocSizeOf)]
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
pub struct ConicGradient {
pub extend_mode: ExtendMode,
pub center: PointKey,
pub params: ConicGradientParams,
pub stretch_size: SizeKey,
pub stops: Vec<GradientStopKey>,
pub tile_spacing: SizeKey,
pub nine_patch: Option<Box<NinePatchDescriptor>>,
}
impl Internable for ConicGradient {
type Key = ConicGradientKey;
type StoreData = ConicGradientTemplate;
type InternData = ();
const PROFILE_COUNTER: usize = crate::profiler::INTERNED_CONIC_GRADIENTS;
}
impl InternablePrimitive for ConicGradient {
fn into_key(
self,
info: &LayoutPrimitiveInfo,
) -> ConicGradientKey {
ConicGradientKey::new(info, self)
}
fn make_instance_kind(
_key: ConicGradientKey,
data_handle: ConicGradientDataHandle,
_prim_store: &mut PrimitiveStore,
_reference_frame_relative_offset: LayoutVector2D,
) -> PrimitiveInstanceKind {
PrimitiveInstanceKind::ConicGradient {
data_handle,
visible_tiles_range: GradientTileRange::empty(),
}
}
}
impl IsVisible for ConicGradient {
fn is_visible(&self) -> bool {
true
}
}
#[derive(Debug)]
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
pub struct
|
{
pub extend_mode: ExtendMode,
pub center: DevicePoint,
pub scale: DeviceVector2D,
pub params: ConicGradientParams,
pub stops: GpuCacheHandle,
}
impl ConicGradientTask {
pub fn to_instance(&self, target_rect: &DeviceIntRect, gpu_cache: &mut GpuCache) -> ConicGradientInstance {
ConicGradientInstance {
task_rect: target_rect.to_f32(),
center: self.center,
scale: self.scale,
start_offset: self.params.start_offset,
end_offset: self.params.end_offset,
angle: self.params.angle,
extend_mode: self.extend_mode as i32,
gradient_stops_address: self.stops.as_int(gpu_cache),
}
}
}
/// The per-instance shader input of a radial gradient render task.
///
/// Must match the RADIAL_GRADIENT instance description in renderer/vertex.rs.
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
#[repr(C)]
#[derive(Clone, Debug)]
pub struct ConicGradientInstance {
pub task_rect: DeviceRect,
pub center: DevicePoint,
pub scale: DeviceVector2D,
pub start_offset: f32,
pub end_offset: f32,
pub angle: f32,
pub extend_mode: i32,
pub gradient_stops_address: i32,
}
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
pub struct ConicGradientCacheKey {
pub size: DeviceIntSize,
pub center: PointKey,
pub scale: PointKey,
pub start_offset: FloatKey,
pub end_offset: FloatKey,
pub angle: FloatKey,
pub extend_mode: ExtendMode,
pub stops: Vec<GradientStopKey>,
}
|
ConicGradientTask
|
identifier_name
|
mod.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The Rust Prelude
//!
//! Because `std` is required by most serious Rust software, it is
//! imported at the topmost level of every crate by default, as if the
//! first line of each crate was
//!
//! ```ignore
//! extern crate std;
//! ```
//!
//! This means that the contents of std can be accessed from any context
//! with the `std::` path prefix, as in `use std::vec`, `use std::thread::spawn`,
//! etc.
//!
//! Additionally, `std` contains a `prelude` module that reexports many of the
//! most common traits, types and functions. The contents of the prelude are
//! imported into every *module* by default. Implicitly, all modules behave as if
//! they contained the following prologue:
|
//! The prelude is primarily concerned with exporting *traits* that are so
//! pervasive that it would be obnoxious to import for every use, particularly
//! those that define methods on primitive types.
#![stable(feature = "rust1", since = "1.0.0")]
pub mod v1;
|
//!
//! ```ignore
//! use std::prelude::v1::*;
//! ```
//!
|
random_line_split
|
clap_app.rs
|
/*
Precached - A Linux process monitor and pre-caching daemon
Copyright (C) 2017-2020 the precached developers
This file is part of precached.
Precached is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Precached is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Precached. If not, see <http://www.gnu.org/licenses/>.
*/
use clap::{App, AppSettings, Arg, SubCommand};
use crate::constants;
use crate::i18n;
pub fn get_app<'a, 'b>() -> App<'a, 'b>
where
'a: 'b,
|
.multiple(true)
.help(tr!("precachedtop-output-verbosity")),
)
.subcommand(
SubCommand::with_name("help").setting(AppSettings::DeriveDisplayOrder), //.about(tr!("precachedtop-help")),
)
.subcommand(
SubCommand::with_name("completions")
.setting(AppSettings::Hidden)
//.about(tr!("precachedtop-completions"))
.arg(
Arg::with_name("SHELL")
.required(true)
.possible_values(&["bash", "fish", "zsh", "powershell"])
.help(tr!("precachedtop-completions-shell")),
),
)
}
|
{
App::new("precachedtop")
.version(env!("CARGO_PKG_VERSION"))
.author("X3n0m0rph59 <[email protected]>")
// .versionless_subcommands(true)
// .subcommand_required_else_help(true)
.setting(AppSettings::GlobalVersion)
.setting(AppSettings::DeriveDisplayOrder)
.arg(
Arg::with_name("config")
.short("c")
.long("config")
.value_name("file")
.help(tr!("precachedtop-config-file"))
.default_value(constants::CONFIG_FILE)
.takes_value(true),
)
.arg(
Arg::with_name("v")
.short("v")
|
identifier_body
|
clap_app.rs
|
/*
Precached - A Linux process monitor and pre-caching daemon
Copyright (C) 2017-2020 the precached developers
This file is part of precached.
|
Precached is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Precached is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Precached. If not, see <http://www.gnu.org/licenses/>.
*/
use clap::{App, AppSettings, Arg, SubCommand};
use crate::constants;
use crate::i18n;
pub fn get_app<'a, 'b>() -> App<'a, 'b>
where
'a: 'b,
{
App::new("precachedtop")
.version(env!("CARGO_PKG_VERSION"))
.author("X3n0m0rph59 <[email protected]>")
//.versionless_subcommands(true)
//.subcommand_required_else_help(true)
.setting(AppSettings::GlobalVersion)
.setting(AppSettings::DeriveDisplayOrder)
.arg(
Arg::with_name("config")
.short("c")
.long("config")
.value_name("file")
.help(tr!("precachedtop-config-file"))
.default_value(constants::CONFIG_FILE)
.takes_value(true),
)
.arg(
Arg::with_name("v")
.short("v")
.multiple(true)
.help(tr!("precachedtop-output-verbosity")),
)
.subcommand(
SubCommand::with_name("help").setting(AppSettings::DeriveDisplayOrder), //.about(tr!("precachedtop-help")),
)
.subcommand(
SubCommand::with_name("completions")
.setting(AppSettings::Hidden)
//.about(tr!("precachedtop-completions"))
.arg(
Arg::with_name("SHELL")
.required(true)
.possible_values(&["bash", "fish", "zsh", "powershell"])
.help(tr!("precachedtop-completions-shell")),
),
)
}
|
random_line_split
|
|
clap_app.rs
|
/*
Precached - A Linux process monitor and pre-caching daemon
Copyright (C) 2017-2020 the precached developers
This file is part of precached.
Precached is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Precached is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Precached. If not, see <http://www.gnu.org/licenses/>.
*/
use clap::{App, AppSettings, Arg, SubCommand};
use crate::constants;
use crate::i18n;
pub fn
|
<'a, 'b>() -> App<'a, 'b>
where
'a: 'b,
{
App::new("precachedtop")
.version(env!("CARGO_PKG_VERSION"))
.author("X3n0m0rph59 <[email protected]>")
//.versionless_subcommands(true)
//.subcommand_required_else_help(true)
.setting(AppSettings::GlobalVersion)
.setting(AppSettings::DeriveDisplayOrder)
.arg(
Arg::with_name("config")
.short("c")
.long("config")
.value_name("file")
.help(tr!("precachedtop-config-file"))
.default_value(constants::CONFIG_FILE)
.takes_value(true),
)
.arg(
Arg::with_name("v")
.short("v")
.multiple(true)
.help(tr!("precachedtop-output-verbosity")),
)
.subcommand(
SubCommand::with_name("help").setting(AppSettings::DeriveDisplayOrder), //.about(tr!("precachedtop-help")),
)
.subcommand(
SubCommand::with_name("completions")
.setting(AppSettings::Hidden)
//.about(tr!("precachedtop-completions"))
.arg(
Arg::with_name("SHELL")
.required(true)
.possible_values(&["bash", "fish", "zsh", "powershell"])
.help(tr!("precachedtop-completions-shell")),
),
)
}
|
get_app
|
identifier_name
|
main.rs
|
extern crate iron;
extern crate rustc_serialize;
extern crate queryst;
use rustc_serialize::json::{self, Json};
use iron::prelude::*;
use iron::status;
use queryst::parse;
#[derive(RustcEncodable)]
pub struct PadJson {
str: String
}
fn left_pad(string: &str, len: u32, ch: &str) -> String {
let mut i : i32 = -1;
let temp_len : i32 = len as i32 - (string.len() as i32);
let mut padded_string : String = string.to_string();
while i < temp_len {
padded_string = ch.to_string() + &padded_string;
|
fn main() {
fn handler(req: &mut Request) -> IronResult<Response> {
let query = req.url.query.clone().unwrap_or("".to_string());
let query_params: Json = parse(&query.to_string()).unwrap();
let obj = query_params.as_object().unwrap();
let string : &str = obj.get("str").unwrap().as_string().unwrap();
let len : u32 = obj.get("len").unwrap().as_string().unwrap().parse().unwrap();
let ch : &str = obj.get("ch").unwrap().as_string().unwrap();
let padded_resp = PadJson {
str: left_pad(string, len, ch),
};
Ok(Response::with((status::Ok, json::encode(&padded_resp).unwrap())))
}
Iron::new(handler).http("localhost:3000").unwrap();
}
|
i = i + 1;
}
return padded_string;
}
|
random_line_split
|
main.rs
|
extern crate iron;
extern crate rustc_serialize;
extern crate queryst;
use rustc_serialize::json::{self, Json};
use iron::prelude::*;
use iron::status;
use queryst::parse;
#[derive(RustcEncodable)]
pub struct PadJson {
str: String
}
fn left_pad(string: &str, len: u32, ch: &str) -> String {
let mut i : i32 = -1;
let temp_len : i32 = len as i32 - (string.len() as i32);
let mut padded_string : String = string.to_string();
while i < temp_len {
padded_string = ch.to_string() + &padded_string;
i = i + 1;
}
return padded_string;
}
fn main() {
fn
|
(req: &mut Request) -> IronResult<Response> {
let query = req.url.query.clone().unwrap_or("".to_string());
let query_params: Json = parse(&query.to_string()).unwrap();
let obj = query_params.as_object().unwrap();
let string : &str = obj.get("str").unwrap().as_string().unwrap();
let len : u32 = obj.get("len").unwrap().as_string().unwrap().parse().unwrap();
let ch : &str = obj.get("ch").unwrap().as_string().unwrap();
let padded_resp = PadJson {
str: left_pad(string, len, ch),
};
Ok(Response::with((status::Ok, json::encode(&padded_resp).unwrap())))
}
Iron::new(handler).http("localhost:3000").unwrap();
}
|
handler
|
identifier_name
|
main.rs
|
extern crate iron;
extern crate rustc_serialize;
extern crate queryst;
use rustc_serialize::json::{self, Json};
use iron::prelude::*;
use iron::status;
use queryst::parse;
#[derive(RustcEncodable)]
pub struct PadJson {
str: String
}
fn left_pad(string: &str, len: u32, ch: &str) -> String {
let mut i : i32 = -1;
let temp_len : i32 = len as i32 - (string.len() as i32);
let mut padded_string : String = string.to_string();
while i < temp_len {
padded_string = ch.to_string() + &padded_string;
i = i + 1;
}
return padded_string;
}
fn main() {
fn handler(req: &mut Request) -> IronResult<Response>
|
Iron::new(handler).http("localhost:3000").unwrap();
}
|
{
let query = req.url.query.clone().unwrap_or("".to_string());
let query_params: Json = parse(&query.to_string()).unwrap();
let obj = query_params.as_object().unwrap();
let string : &str = obj.get("str").unwrap().as_string().unwrap();
let len : u32 = obj.get("len").unwrap().as_string().unwrap().parse().unwrap();
let ch : &str = obj.get("ch").unwrap().as_string().unwrap();
let padded_resp = PadJson {
str: left_pad(string, len, ch),
};
Ok(Response::with((status::Ok, json::encode(&padded_resp).unwrap())))
}
|
identifier_body
|
impl-not-adjacent-to-type.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// pretty-expanded FIXME #23616
mod foo {
pub struct
|
{
pub x: i32,
pub y: i32,
}
}
impl foo::Point {
fn x(&self) -> i32 { self.x }
}
fn main() {
assert_eq!((foo::Point { x: 1, y: 3}).x(), 1);
}
|
Point
|
identifier_name
|
impl-not-adjacent-to-type.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// pretty-expanded FIXME #23616
mod foo {
pub struct Point {
pub x: i32,
|
fn x(&self) -> i32 { self.x }
}
fn main() {
assert_eq!((foo::Point { x: 1, y: 3}).x(), 1);
}
|
pub y: i32,
}
}
impl foo::Point {
|
random_line_split
|
impl-not-adjacent-to-type.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// pretty-expanded FIXME #23616
mod foo {
pub struct Point {
pub x: i32,
pub y: i32,
}
}
impl foo::Point {
fn x(&self) -> i32 { self.x }
}
fn main()
|
{
assert_eq!((foo::Point { x: 1, y: 3}).x(), 1);
}
|
identifier_body
|
|
instruction_analyzer.rs
|
//! Implements Instruction-level analysis used for lifting when information from radare2 is
//! insufficient.
//!
//! Instruction analysis may be abbreviated as `IA`
use std::borrow::Cow;
use capstone_rust::capstone as cs;
// TODO: Register must be replaced by Register information from arch-rs.
// This will be a part of a bigger rewrite/refactor.
/// Describes operands for an instruction
#[derive(Debug, Clone)]
pub enum IOperand {
Register(String),
Constant(i64),
Immediate(i64),
Memory {
base: Option<String>,
index: Option<String>,
scale: i32,
disp: i64,
},
Other,
Invalid,
}
/// Holds analysis information for a single instruction
#[derive(Debug, Clone, Default)]
pub struct InstructionInfo {
mnemonic: Cow<'static, str>,
reads: Vec<IOperand>,
writes: Vec<IOperand>,
}
/// Errors from Instruction-level Analysis
pub type IAError = Cow<'static, str>;
/// Trait for a struct to be an Instruction-level analyzer
pub trait InstructionAnalyzer: Sized {
/// Construct the IA
fn new(bytes: Vec<u8>) -> Result<Self, IAError>;
/// Return analyzed information
fn info(&self) -> Result<&InstructionInfo, IAError>;
fn mnemonic(&self) -> &Cow<'static, str> {
&self.info().expect("Unable to get InstructionInfo").mnemonic
}
/// Return a `Vec` of registers that are read by this instruction
fn registers_read(&self) -> Vec<&IOperand>
|
/// Return a `Vec` of registers that are written to by this instruction
fn registers_written(&self) -> Vec<&IOperand> {
self.info().expect("Unable to get InstructionInfo").writes.iter().filter(|&x| match x {
&IOperand::Register(_) => true,
_ => false,
}).collect()
}
fn has_memory_operand(&self) -> bool {
self.has_memory_read() || self.has_memory_written()
}
/// Return if this instruction reads from memory
fn has_memory_read(&self) -> bool {
self.info().expect("Unable to get InstructionInfo").reads.iter().any(|x| match x {
&IOperand::Memory { base: _, index: _, scale: _, disp: _ } => true,
_ => false,
})
}
/// Return if this instruction writes to memory
fn has_memory_written(&self) -> bool {
self.info().expect("Unable to get InstructionInfo").writes.iter().any(|x| match x {
&IOperand::Memory { base: _, index: _, scale: _, disp: _ } => true,
_ => false,
})
}
/// Returns memory written to by the instruction, in standard sib form
fn memory_written(&self) -> Option<&IOperand> {
self.info().expect("Unable to get InstructionInfo").writes.iter().find(|&x| match x {
&IOperand::Memory { base: _, index: _, scale: _, disp: _ } => true,
_ => false,
})
}
/// Returns memory read by the instruction, in standard sib form
fn memory_read(&self) -> Option<&IOperand> {
self.info().expect("Unable to get InstructionInfo").reads.iter().find(|&x| match x {
&IOperand::Memory { base: _, index: _, scale: _, disp: _ } => true,
_ => false,
})
}
}
/// Capstone-based x86 instruction analyzer
#[allow(non_camel_case_types)]
pub struct X86_CS_IA {
bytes: Vec<u8>,
cs: cs::Capstone,
info: InstructionInfo,
}
//impl From<cs::CsErr> for IAError {
//fn from(err: cs::CsErr) -> IAError {
//Cow::from(err.to_string())
//}
//}
impl X86_CS_IA {
fn reg_name(&self, reg: u32) -> Option<String> {
self.cs.reg_name(reg).map(|x| x.to_owned())
}
fn analyze(&mut self) -> Result<(), IAError> {
// Disassembly exactly one instruction
let buf = match self.cs.disasm(self.bytes.as_slice(), 0x0, 1) {
Ok(d) => d,
Err(e) => return Err(Cow::from(e.to_string())),
};
for instr in buf.iter() {
// TODO: Look into prefixes for opcode.
// See how this information is set in capstone.
self.info.mnemonic = Cow::from(instr.mnemonic);
let details = instr.detail.unwrap();
if let cs::DetailsArch::X86(arch) = details.arch {
for i in 0..arch.op_count {
// Get the current operand
let op: cs::cs_x86_op = arch.operands[i as usize];
let iop: IOperand = match op.type_ {
cs::x86_op_type::X86_OP_REG => {
let reg: cs::x86_reg = op.reg();
if let Some(reg_name) = self.reg_name(reg.as_int()) {
IOperand::Register(reg_name)
} else {
IOperand::Invalid
}
},
cs::x86_op_type::X86_OP_MEM => {
let mem: &cs::x86_op_mem = op.mem();
let base = self.reg_name(mem.base);
let index = self.reg_name(mem.index);
let scale = mem.scale;
let disp = mem.disp;
IOperand::Memory {
base: base,
index: index,
scale: scale,
disp: disp,
}
},
cs::x86_op_type::X86_OP_IMM => IOperand::Immediate(op.imm()),
// FP/XMM/etc. We can handle these later. Right now, we only
// bother about registers (gpr) and memory operands.
_ => IOperand::Other,
};
if i == 0 {
self.info.reads.push(iop);
} else {
self.info.writes.push(iop);
}
}
}
}
Ok(())
}
}
impl InstructionAnalyzer for X86_CS_IA {
fn new(bytes: Vec<u8>) -> Result<X86_CS_IA, IAError> {
let dis = cs::Capstone::new(cs::cs_arch::CS_ARCH_X86, cs::CS_MODE_32).unwrap();
dis.option(cs::cs_opt_type::CS_OPT_DETAIL, cs::cs_opt_value::CS_OPT_ON).unwrap();
let mut ia = X86_CS_IA { bytes: bytes, cs: dis, info: InstructionInfo::default() };
ia.analyze()?;
Ok(ia)
}
fn info(&self) -> Result<&InstructionInfo, IAError> {
Ok(&self.info)
}
}
|
{
self.info().expect("Unable to get InstructionInfo").reads.iter().filter(|&x| match x {
&IOperand::Register(_) => true,
_ => false,
}).collect()
}
|
identifier_body
|
instruction_analyzer.rs
|
//! Implements Instruction-level analysis used for lifting when information from radare2 is
//! insufficient.
//!
//! Instruction analysis may be abbreviated as `IA`
use std::borrow::Cow;
use capstone_rust::capstone as cs;
// TODO: Register must be replaced by Register information from arch-rs.
// This will be a part of a bigger rewrite/refactor.
/// Describes operands for an instruction
#[derive(Debug, Clone)]
pub enum IOperand {
Register(String),
Constant(i64),
Immediate(i64),
Memory {
base: Option<String>,
index: Option<String>,
scale: i32,
disp: i64,
},
Other,
Invalid,
}
/// Holds analysis information for a single instruction
#[derive(Debug, Clone, Default)]
pub struct InstructionInfo {
mnemonic: Cow<'static, str>,
reads: Vec<IOperand>,
writes: Vec<IOperand>,
}
/// Errors from Instruction-level Analysis
pub type IAError = Cow<'static, str>;
/// Trait for a struct to be an Instruction-level analyzer
pub trait InstructionAnalyzer: Sized {
/// Construct the IA
fn new(bytes: Vec<u8>) -> Result<Self, IAError>;
/// Return analyzed information
fn info(&self) -> Result<&InstructionInfo, IAError>;
fn mnemonic(&self) -> &Cow<'static, str> {
&self.info().expect("Unable to get InstructionInfo").mnemonic
}
/// Return a `Vec` of registers that are read by this instruction
fn registers_read(&self) -> Vec<&IOperand> {
self.info().expect("Unable to get InstructionInfo").reads.iter().filter(|&x| match x {
&IOperand::Register(_) => true,
_ => false,
}).collect()
}
/// Return a `Vec` of registers that are written to by this instruction
fn registers_written(&self) -> Vec<&IOperand> {
self.info().expect("Unable to get InstructionInfo").writes.iter().filter(|&x| match x {
&IOperand::Register(_) => true,
_ => false,
}).collect()
}
fn has_memory_operand(&self) -> bool {
self.has_memory_read() || self.has_memory_written()
}
/// Return if this instruction reads from memory
fn has_memory_read(&self) -> bool {
self.info().expect("Unable to get InstructionInfo").reads.iter().any(|x| match x {
&IOperand::Memory { base: _, index: _, scale: _, disp: _ } => true,
_ => false,
})
}
/// Return if this instruction writes to memory
fn
|
(&self) -> bool {
self.info().expect("Unable to get InstructionInfo").writes.iter().any(|x| match x {
&IOperand::Memory { base: _, index: _, scale: _, disp: _ } => true,
_ => false,
})
}
/// Returns memory written to by the instruction, in standard sib form
fn memory_written(&self) -> Option<&IOperand> {
self.info().expect("Unable to get InstructionInfo").writes.iter().find(|&x| match x {
&IOperand::Memory { base: _, index: _, scale: _, disp: _ } => true,
_ => false,
})
}
/// Returns memory read by the instruction, in standard sib form
fn memory_read(&self) -> Option<&IOperand> {
self.info().expect("Unable to get InstructionInfo").reads.iter().find(|&x| match x {
&IOperand::Memory { base: _, index: _, scale: _, disp: _ } => true,
_ => false,
})
}
}
/// Capstone-based x86 instruction analyzer
#[allow(non_camel_case_types)]
pub struct X86_CS_IA {
bytes: Vec<u8>,
cs: cs::Capstone,
info: InstructionInfo,
}
//impl From<cs::CsErr> for IAError {
//fn from(err: cs::CsErr) -> IAError {
//Cow::from(err.to_string())
//}
//}
impl X86_CS_IA {
fn reg_name(&self, reg: u32) -> Option<String> {
self.cs.reg_name(reg).map(|x| x.to_owned())
}
fn analyze(&mut self) -> Result<(), IAError> {
// Disassembly exactly one instruction
let buf = match self.cs.disasm(self.bytes.as_slice(), 0x0, 1) {
Ok(d) => d,
Err(e) => return Err(Cow::from(e.to_string())),
};
for instr in buf.iter() {
// TODO: Look into prefixes for opcode.
// See how this information is set in capstone.
self.info.mnemonic = Cow::from(instr.mnemonic);
let details = instr.detail.unwrap();
if let cs::DetailsArch::X86(arch) = details.arch {
for i in 0..arch.op_count {
// Get the current operand
let op: cs::cs_x86_op = arch.operands[i as usize];
let iop: IOperand = match op.type_ {
cs::x86_op_type::X86_OP_REG => {
let reg: cs::x86_reg = op.reg();
if let Some(reg_name) = self.reg_name(reg.as_int()) {
IOperand::Register(reg_name)
} else {
IOperand::Invalid
}
},
cs::x86_op_type::X86_OP_MEM => {
let mem: &cs::x86_op_mem = op.mem();
let base = self.reg_name(mem.base);
let index = self.reg_name(mem.index);
let scale = mem.scale;
let disp = mem.disp;
IOperand::Memory {
base: base,
index: index,
scale: scale,
disp: disp,
}
},
cs::x86_op_type::X86_OP_IMM => IOperand::Immediate(op.imm()),
// FP/XMM/etc. We can handle these later. Right now, we only
// bother about registers (gpr) and memory operands.
_ => IOperand::Other,
};
if i == 0 {
self.info.reads.push(iop);
} else {
self.info.writes.push(iop);
}
}
}
}
Ok(())
}
}
impl InstructionAnalyzer for X86_CS_IA {
fn new(bytes: Vec<u8>) -> Result<X86_CS_IA, IAError> {
let dis = cs::Capstone::new(cs::cs_arch::CS_ARCH_X86, cs::CS_MODE_32).unwrap();
dis.option(cs::cs_opt_type::CS_OPT_DETAIL, cs::cs_opt_value::CS_OPT_ON).unwrap();
let mut ia = X86_CS_IA { bytes: bytes, cs: dis, info: InstructionInfo::default() };
ia.analyze()?;
Ok(ia)
}
fn info(&self) -> Result<&InstructionInfo, IAError> {
Ok(&self.info)
}
}
|
has_memory_written
|
identifier_name
|
instruction_analyzer.rs
|
//! Implements Instruction-level analysis used for lifting when information from radare2 is
//! insufficient.
//!
//! Instruction analysis may be abbreviated as `IA`
use std::borrow::Cow;
use capstone_rust::capstone as cs;
// TODO: Register must be replaced by Register information from arch-rs.
// This will be a part of a bigger rewrite/refactor.
/// Describes operands for an instruction
#[derive(Debug, Clone)]
pub enum IOperand {
Register(String),
Constant(i64),
Immediate(i64),
Memory {
base: Option<String>,
index: Option<String>,
scale: i32,
disp: i64,
},
Other,
Invalid,
}
/// Holds analysis information for a single instruction
#[derive(Debug, Clone, Default)]
pub struct InstructionInfo {
mnemonic: Cow<'static, str>,
reads: Vec<IOperand>,
writes: Vec<IOperand>,
}
/// Errors from Instruction-level Analysis
pub type IAError = Cow<'static, str>;
/// Trait for a struct to be an Instruction-level analyzer
pub trait InstructionAnalyzer: Sized {
/// Construct the IA
fn new(bytes: Vec<u8>) -> Result<Self, IAError>;
/// Return analyzed information
fn info(&self) -> Result<&InstructionInfo, IAError>;
fn mnemonic(&self) -> &Cow<'static, str> {
&self.info().expect("Unable to get InstructionInfo").mnemonic
}
/// Return a `Vec` of registers that are read by this instruction
fn registers_read(&self) -> Vec<&IOperand> {
self.info().expect("Unable to get InstructionInfo").reads.iter().filter(|&x| match x {
&IOperand::Register(_) => true,
_ => false,
}).collect()
}
/// Return a `Vec` of registers that are written to by this instruction
fn registers_written(&self) -> Vec<&IOperand> {
self.info().expect("Unable to get InstructionInfo").writes.iter().filter(|&x| match x {
&IOperand::Register(_) => true,
_ => false,
}).collect()
}
fn has_memory_operand(&self) -> bool {
self.has_memory_read() || self.has_memory_written()
}
/// Return if this instruction reads from memory
fn has_memory_read(&self) -> bool {
self.info().expect("Unable to get InstructionInfo").reads.iter().any(|x| match x {
&IOperand::Memory { base: _, index: _, scale: _, disp: _ } => true,
_ => false,
})
}
/// Return if this instruction writes to memory
fn has_memory_written(&self) -> bool {
self.info().expect("Unable to get InstructionInfo").writes.iter().any(|x| match x {
&IOperand::Memory { base: _, index: _, scale: _, disp: _ } => true,
_ => false,
})
}
/// Returns memory written to by the instruction, in standard sib form
fn memory_written(&self) -> Option<&IOperand> {
self.info().expect("Unable to get InstructionInfo").writes.iter().find(|&x| match x {
&IOperand::Memory { base: _, index: _, scale: _, disp: _ } => true,
_ => false,
})
}
/// Returns memory read by the instruction, in standard sib form
fn memory_read(&self) -> Option<&IOperand> {
self.info().expect("Unable to get InstructionInfo").reads.iter().find(|&x| match x {
&IOperand::Memory { base: _, index: _, scale: _, disp: _ } => true,
_ => false,
})
}
}
/// Capstone-based x86 instruction analyzer
#[allow(non_camel_case_types)]
pub struct X86_CS_IA {
bytes: Vec<u8>,
cs: cs::Capstone,
info: InstructionInfo,
}
//impl From<cs::CsErr> for IAError {
//fn from(err: cs::CsErr) -> IAError {
//Cow::from(err.to_string())
//}
//}
impl X86_CS_IA {
fn reg_name(&self, reg: u32) -> Option<String> {
self.cs.reg_name(reg).map(|x| x.to_owned())
}
|
Err(e) => return Err(Cow::from(e.to_string())),
};
for instr in buf.iter() {
// TODO: Look into prefixes for opcode.
// See how this information is set in capstone.
self.info.mnemonic = Cow::from(instr.mnemonic);
let details = instr.detail.unwrap();
if let cs::DetailsArch::X86(arch) = details.arch {
for i in 0..arch.op_count {
// Get the current operand
let op: cs::cs_x86_op = arch.operands[i as usize];
let iop: IOperand = match op.type_ {
cs::x86_op_type::X86_OP_REG => {
let reg: cs::x86_reg = op.reg();
if let Some(reg_name) = self.reg_name(reg.as_int()) {
IOperand::Register(reg_name)
} else {
IOperand::Invalid
}
},
cs::x86_op_type::X86_OP_MEM => {
let mem: &cs::x86_op_mem = op.mem();
let base = self.reg_name(mem.base);
let index = self.reg_name(mem.index);
let scale = mem.scale;
let disp = mem.disp;
IOperand::Memory {
base: base,
index: index,
scale: scale,
disp: disp,
}
},
cs::x86_op_type::X86_OP_IMM => IOperand::Immediate(op.imm()),
// FP/XMM/etc. We can handle these later. Right now, we only
// bother about registers (gpr) and memory operands.
_ => IOperand::Other,
};
if i == 0 {
self.info.reads.push(iop);
} else {
self.info.writes.push(iop);
}
}
}
}
Ok(())
}
}
impl InstructionAnalyzer for X86_CS_IA {
fn new(bytes: Vec<u8>) -> Result<X86_CS_IA, IAError> {
let dis = cs::Capstone::new(cs::cs_arch::CS_ARCH_X86, cs::CS_MODE_32).unwrap();
dis.option(cs::cs_opt_type::CS_OPT_DETAIL, cs::cs_opt_value::CS_OPT_ON).unwrap();
let mut ia = X86_CS_IA { bytes: bytes, cs: dis, info: InstructionInfo::default() };
ia.analyze()?;
Ok(ia)
}
fn info(&self) -> Result<&InstructionInfo, IAError> {
Ok(&self.info)
}
}
|
fn analyze(&mut self) -> Result<(), IAError> {
// Disassembly exactly one instruction
let buf = match self.cs.disasm(self.bytes.as_slice(), 0x0, 1) {
Ok(d) => d,
|
random_line_split
|
struct.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-flags:-Z extra-debug-info
// debugger:set print pretty off
// debugger:break 29
// debugger:run
// debugger:print pair
// check:$1 = {x = 1, y = 2}
// debugger:print pair.x
// check:$2 = 1
// debugger:print pair.y
// check:$3 = 2
struct Pair {
x: int,
y: int
}
fn main() {
let pair = Pair { x: 1, y: 2 };
let _z = ();
}
|
// http://rust-lang.org/COPYRIGHT.
//
|
random_line_split
|
struct.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-flags:-Z extra-debug-info
// debugger:set print pretty off
// debugger:break 29
// debugger:run
// debugger:print pair
// check:$1 = {x = 1, y = 2}
// debugger:print pair.x
// check:$2 = 1
// debugger:print pair.y
// check:$3 = 2
struct Pair {
x: int,
y: int
}
fn main()
|
{
let pair = Pair { x: 1, y: 2 };
let _z = ();
}
|
identifier_body
|
|
struct.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-flags:-Z extra-debug-info
// debugger:set print pretty off
// debugger:break 29
// debugger:run
// debugger:print pair
// check:$1 = {x = 1, y = 2}
// debugger:print pair.x
// check:$2 = 1
// debugger:print pair.y
// check:$3 = 2
struct
|
{
x: int,
y: int
}
fn main() {
let pair = Pair { x: 1, y: 2 };
let _z = ();
}
|
Pair
|
identifier_name
|
gravity.rs
|
extern crate nalgebra as na;
extern crate ncollide;
extern crate nphysics;
extern crate nphysics_testbed2d;
use na::{Vec2, Pnt3, Translation};
use ncollide::shape::{Ball, Plane};
use nphysics::world::World;
use nphysics::object::RigidBody;
use nphysics_testbed2d::Testbed;
fn main()
|
*/
let mut rb = RigidBody::new_static(Plane::new(Vec2::new(0.0, -1.0)), 0.3, 0.6);
rb.append_translation(&Vec2::new(0.0, 10.0));
world.add_body(rb);
/*
* Create the balls
*/
let num = 1000usize;
let rad = 0.5;
let shift = 2.5 * rad;
let centerx = shift * (num as f32) / 2.0;
let centery = 2.0;
for i in (0usize.. num) {
for j in 0usize.. 2 {
let x = i as f32 * 2.5 * rad - centerx;
let y = j as f32 * 2.5 * rad - centery * 2.0;
let mut rb = RigidBody::new_dynamic(Ball::new(rad), 1.0, 0.3, 0.6);
rb.append_translation(&Vec2::new(x, y));
let color;
if j == 0 {
// invert the gravity for the blue balls.
rb.set_lin_acc_scale(Vec2::new(0.0, -1.0));
color = Pnt3::new(0.0, 0.0, 1.0);
}
else {
// double the gravity for the green balls.
rb.set_lin_acc_scale(Vec2::new(0.0, 2.0));
color = Pnt3::new(0.0, 1.0, 0.0);
}
let body = world.add_body(rb);
testbed.set_color(&body, color);
}
}
/*
* Run the simulation.
*/
testbed.set_world(world);
testbed.run();
}
|
{
let mut testbed = Testbed::new_empty();
/*
* World
*/
let mut world = World::new();
world.set_gravity(Vec2::new(0.0, 9.81));
/*
* First plane
*/
let mut rb = RigidBody::new_static(Plane::new(Vec2::new(0.0, 1.0)), 0.3, 0.6);
rb.append_translation(&Vec2::new(0.0, -10.0));
world.add_body(rb);
/*
* Second plane
|
identifier_body
|
gravity.rs
|
extern crate nalgebra as na;
extern crate ncollide;
extern crate nphysics;
extern crate nphysics_testbed2d;
use na::{Vec2, Pnt3, Translation};
use ncollide::shape::{Ball, Plane};
use nphysics::world::World;
use nphysics::object::RigidBody;
use nphysics_testbed2d::Testbed;
fn
|
() {
let mut testbed = Testbed::new_empty();
/*
* World
*/
let mut world = World::new();
world.set_gravity(Vec2::new(0.0, 9.81));
/*
* First plane
*/
let mut rb = RigidBody::new_static(Plane::new(Vec2::new(0.0, 1.0)), 0.3, 0.6);
rb.append_translation(&Vec2::new(0.0, -10.0));
world.add_body(rb);
/*
* Second plane
*/
let mut rb = RigidBody::new_static(Plane::new(Vec2::new(0.0, -1.0)), 0.3, 0.6);
rb.append_translation(&Vec2::new(0.0, 10.0));
world.add_body(rb);
/*
* Create the balls
*/
let num = 1000usize;
let rad = 0.5;
let shift = 2.5 * rad;
let centerx = shift * (num as f32) / 2.0;
let centery = 2.0;
for i in (0usize.. num) {
for j in 0usize.. 2 {
let x = i as f32 * 2.5 * rad - centerx;
let y = j as f32 * 2.5 * rad - centery * 2.0;
let mut rb = RigidBody::new_dynamic(Ball::new(rad), 1.0, 0.3, 0.6);
rb.append_translation(&Vec2::new(x, y));
let color;
if j == 0 {
// invert the gravity for the blue balls.
rb.set_lin_acc_scale(Vec2::new(0.0, -1.0));
color = Pnt3::new(0.0, 0.0, 1.0);
}
else {
// double the gravity for the green balls.
rb.set_lin_acc_scale(Vec2::new(0.0, 2.0));
color = Pnt3::new(0.0, 1.0, 0.0);
}
let body = world.add_body(rb);
testbed.set_color(&body, color);
}
}
/*
* Run the simulation.
*/
testbed.set_world(world);
testbed.run();
}
|
main
|
identifier_name
|
gravity.rs
|
extern crate nalgebra as na;
extern crate ncollide;
extern crate nphysics;
extern crate nphysics_testbed2d;
use na::{Vec2, Pnt3, Translation};
use ncollide::shape::{Ball, Plane};
use nphysics::world::World;
use nphysics::object::RigidBody;
use nphysics_testbed2d::Testbed;
fn main() {
let mut testbed = Testbed::new_empty();
/*
* World
*/
let mut world = World::new();
world.set_gravity(Vec2::new(0.0, 9.81));
/*
* First plane
*/
let mut rb = RigidBody::new_static(Plane::new(Vec2::new(0.0, 1.0)), 0.3, 0.6);
rb.append_translation(&Vec2::new(0.0, -10.0));
world.add_body(rb);
/*
* Second plane
*/
let mut rb = RigidBody::new_static(Plane::new(Vec2::new(0.0, -1.0)), 0.3, 0.6);
rb.append_translation(&Vec2::new(0.0, 10.0));
world.add_body(rb);
/*
* Create the balls
*/
let num = 1000usize;
let rad = 0.5;
|
for j in 0usize.. 2 {
let x = i as f32 * 2.5 * rad - centerx;
let y = j as f32 * 2.5 * rad - centery * 2.0;
let mut rb = RigidBody::new_dynamic(Ball::new(rad), 1.0, 0.3, 0.6);
rb.append_translation(&Vec2::new(x, y));
let color;
if j == 0 {
// invert the gravity for the blue balls.
rb.set_lin_acc_scale(Vec2::new(0.0, -1.0));
color = Pnt3::new(0.0, 0.0, 1.0);
}
else {
// double the gravity for the green balls.
rb.set_lin_acc_scale(Vec2::new(0.0, 2.0));
color = Pnt3::new(0.0, 1.0, 0.0);
}
let body = world.add_body(rb);
testbed.set_color(&body, color);
}
}
/*
* Run the simulation.
*/
testbed.set_world(world);
testbed.run();
}
|
let shift = 2.5 * rad;
let centerx = shift * (num as f32) / 2.0;
let centery = 2.0;
for i in (0usize .. num) {
|
random_line_split
|
gravity.rs
|
extern crate nalgebra as na;
extern crate ncollide;
extern crate nphysics;
extern crate nphysics_testbed2d;
use na::{Vec2, Pnt3, Translation};
use ncollide::shape::{Ball, Plane};
use nphysics::world::World;
use nphysics::object::RigidBody;
use nphysics_testbed2d::Testbed;
fn main() {
let mut testbed = Testbed::new_empty();
/*
* World
*/
let mut world = World::new();
world.set_gravity(Vec2::new(0.0, 9.81));
/*
* First plane
*/
let mut rb = RigidBody::new_static(Plane::new(Vec2::new(0.0, 1.0)), 0.3, 0.6);
rb.append_translation(&Vec2::new(0.0, -10.0));
world.add_body(rb);
/*
* Second plane
*/
let mut rb = RigidBody::new_static(Plane::new(Vec2::new(0.0, -1.0)), 0.3, 0.6);
rb.append_translation(&Vec2::new(0.0, 10.0));
world.add_body(rb);
/*
* Create the balls
*/
let num = 1000usize;
let rad = 0.5;
let shift = 2.5 * rad;
let centerx = shift * (num as f32) / 2.0;
let centery = 2.0;
for i in (0usize.. num) {
for j in 0usize.. 2 {
let x = i as f32 * 2.5 * rad - centerx;
let y = j as f32 * 2.5 * rad - centery * 2.0;
let mut rb = RigidBody::new_dynamic(Ball::new(rad), 1.0, 0.3, 0.6);
rb.append_translation(&Vec2::new(x, y));
let color;
if j == 0 {
// invert the gravity for the blue balls.
rb.set_lin_acc_scale(Vec2::new(0.0, -1.0));
color = Pnt3::new(0.0, 0.0, 1.0);
}
else
|
let body = world.add_body(rb);
testbed.set_color(&body, color);
}
}
/*
* Run the simulation.
*/
testbed.set_world(world);
testbed.run();
}
|
{
// double the gravity for the green balls.
rb.set_lin_acc_scale(Vec2::new(0.0, 2.0));
color = Pnt3::new(0.0, 1.0, 0.0);
}
|
conditional_block
|
mod.rs
|
use std::io;
use cfg_if::cfg_if;
use socket2::Socket;
cfg_if! {
if #[cfg(unix)] {
mod unix;
pub use self::unix::*;
}
}
#[cfg(unix)]
pub fn
|
<S>(socket: &S, ipv6_only: bool) -> io::Result<()>
where
S: std::os::unix::io::AsRawFd,
{
use std::os::unix::io::{FromRawFd, IntoRawFd};
let fd = socket.as_raw_fd();
let sock = unsafe { Socket::from_raw_fd(fd) };
let result = sock.set_only_v6(ipv6_only);
sock.into_raw_fd();
result
}
#[cfg(windows)]
pub fn set_ipv6_only<S>(socket: &S, ipv6_only: bool) -> io::Result<()>
where
S: std::os::windows::io::AsRawSocket,
{
use std::os::windows::io::{FromRawSocket, IntoRawSocket};
let handle = socket.as_raw_socket();
let sock = unsafe { Socket::from_raw_socket(handle) };
let result = sock.set_only_v6(ipv6_only);
sock.into_raw_socket();
result
}
|
set_ipv6_only
|
identifier_name
|
mod.rs
|
use std::io;
use cfg_if::cfg_if;
use socket2::Socket;
cfg_if! {
if #[cfg(unix)] {
mod unix;
pub use self::unix::*;
}
}
#[cfg(unix)]
pub fn set_ipv6_only<S>(socket: &S, ipv6_only: bool) -> io::Result<()>
where
S: std::os::unix::io::AsRawFd,
{
use std::os::unix::io::{FromRawFd, IntoRawFd};
let fd = socket.as_raw_fd();
let sock = unsafe { Socket::from_raw_fd(fd) };
let result = sock.set_only_v6(ipv6_only);
sock.into_raw_fd();
|
#[cfg(windows)]
pub fn set_ipv6_only<S>(socket: &S, ipv6_only: bool) -> io::Result<()>
where
S: std::os::windows::io::AsRawSocket,
{
use std::os::windows::io::{FromRawSocket, IntoRawSocket};
let handle = socket.as_raw_socket();
let sock = unsafe { Socket::from_raw_socket(handle) };
let result = sock.set_only_v6(ipv6_only);
sock.into_raw_socket();
result
}
|
result
}
|
random_line_split
|
mod.rs
|
use std::io;
use cfg_if::cfg_if;
use socket2::Socket;
cfg_if! {
if #[cfg(unix)] {
mod unix;
pub use self::unix::*;
}
}
#[cfg(unix)]
pub fn set_ipv6_only<S>(socket: &S, ipv6_only: bool) -> io::Result<()>
where
S: std::os::unix::io::AsRawFd,
|
#[cfg(windows)]
pub fn set_ipv6_only<S>(socket: &S, ipv6_only: bool) -> io::Result<()>
where
S: std::os::windows::io::AsRawSocket,
{
use std::os::windows::io::{FromRawSocket, IntoRawSocket};
let handle = socket.as_raw_socket();
let sock = unsafe { Socket::from_raw_socket(handle) };
let result = sock.set_only_v6(ipv6_only);
sock.into_raw_socket();
result
}
|
{
use std::os::unix::io::{FromRawFd, IntoRawFd};
let fd = socket.as_raw_fd();
let sock = unsafe { Socket::from_raw_fd(fd) };
let result = sock.set_only_v6(ipv6_only);
sock.into_raw_fd();
result
}
|
identifier_body
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.