file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
client.rs | //! Handle network connections for a varlink service
#![allow(dead_code)]
use std::net::TcpStream;
#[cfg(unix)]
use std::os::unix::io::{AsRawFd, IntoRawFd};
#[cfg(unix)]
use std::os::unix::net::UnixStream;
use std::process::Child;
#[cfg(unix)]
use libc::{close, dup2, getpid};
use tempfile::TempDir;
#[cfg(windows)]
use uds_windows::UnixStream;
use crate::error::*;
use crate::stream::Stream;
#[allow(clippy::try_err)]
pub fn varlink_connect<S:?Sized + AsRef<str>>(address: &S) -> Result<(Box<dyn Stream>, String)> {
let address = address.as_ref();
let new_address: String = address.into();
if let Some(addr) = new_address.strip_prefix("tcp:") {
Ok((
Box::new(TcpStream::connect(&addr).map_err(map_context!())?),
new_address,
))
} else if let Some(addr) = new_address.strip_prefix("unix:") | else {
Err(context!(ErrorKind::InvalidAddress))?
}
}
#[cfg(any(target_os = "linux", target_os = "android"))]
fn get_abstract_unixstream(addr: &str) -> Result<UnixStream> {
// FIXME: abstract unix domains sockets still not in std
// FIXME: https://github.com/rust-lang/rust/issues/14194
use std::os::unix::io::FromRawFd;
use unix_socket::UnixStream as AbstractStream;
unsafe {
Ok(UnixStream::from_raw_fd(
AbstractStream::connect(addr)
.map_err(map_context!())?
.into_raw_fd(),
))
}
}
#[cfg(not(any(target_os = "linux", target_os = "android")))]
fn get_abstract_unixstream(_addr: &str) -> Result<UnixStream> {
Err(context!(ErrorKind::InvalidAddress))
}
#[cfg(windows)]
pub fn varlink_exec<S:?Sized + AsRef<str>>(
_address: &S,
) -> Result<(Child, String, Option<TempDir>)> {
Err(context!(ErrorKind::MethodNotImplemented(
"varlink_exec".into()
)))
}
#[cfg(unix)]
pub fn varlink_exec<S:?Sized + AsRef<str>>(
address: &S,
) -> Result<(Child, String, Option<TempDir>)> {
use std::env;
use std::os::unix::process::CommandExt;
use std::process::Command;
use tempfile::tempdir;
let executable = String::from("exec ") + address.as_ref();
use unix_socket::UnixListener;
let dir = tempdir().map_err(map_context!())?;
let file_path = dir.path().join("varlink-socket");
let listener = UnixListener::bind(file_path.clone()).map_err(map_context!())?;
let fd = listener.as_raw_fd();
let child = unsafe {
Command::new("sh")
.arg("-c")
.arg(executable)
.pre_exec({
let file_path = file_path.clone();
move || {
dup2(2, 1);
if fd!= 3 {
dup2(fd, 3);
close(fd);
}
env::set_var("VARLINK_ADDRESS", format!("unix:{}", file_path.display()));
env::set_var("LISTEN_FDS", "1");
env::set_var("LISTEN_FDNAMES", "varlink");
env::set_var("LISTEN_PID", format!("{}", getpid()));
Ok(())
}
})
.spawn()
.map_err(map_context!())?
};
Ok((child, format!("unix:{}", file_path.display()), Some(dir)))
}
#[cfg(windows)]
pub fn varlink_bridge<S:?Sized + AsRef<str>>(address: &S) -> Result<(Child, Box<dyn Stream>)> {
use std::io::copy;
use std::process::{Command, Stdio};
use std::thread;
let (stream0, stream1) = UnixStream::pair().map_err(map_context!())?;
let executable = address.as_ref();
let mut child = Command::new("cmd")
.arg("/C")
.arg(executable)
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn()
.map_err(map_context!())?;
let mut client_writer = child.stdin.take().unwrap();
let mut client_reader = child.stdout.take().unwrap();
let mut service_writer = stream1.try_clone().map_err(map_context!())?;
let mut service_reader = stream1;
thread::spawn(move || copy(&mut client_reader, &mut service_writer));
thread::spawn(move || copy(&mut service_reader, &mut client_writer));
Ok((child, Box::new(stream0)))
}
#[cfg(unix)]
pub fn varlink_bridge<S:?Sized + AsRef<str>>(address: &S) -> Result<(Child, Box<dyn Stream>)> {
use std::os::unix::io::FromRawFd;
use std::process::Command;
let executable = address.as_ref();
// use unix_socket::UnixStream;
let (stream0, stream1) = UnixStream::pair().map_err(map_context!())?;
let fd = stream1.into_raw_fd();
let childin = unsafe { ::std::fs::File::from_raw_fd(fd) };
let childout = unsafe { ::std::fs::File::from_raw_fd(fd) };
let child = Command::new("sh")
.arg("-c")
.arg(executable)
.stdin(childin)
.stdout(childout)
.spawn()
.map_err(map_context!())?;
Ok((child, Box::new(stream0)))
}
| {
let mut addr = String::from(addr.split(';').next().unwrap());
if addr.starts_with('@') {
addr = addr.replacen('@', "\0", 1);
return get_abstract_unixstream(&addr)
.map(|v| (Box::new(v) as Box<dyn Stream>, new_address));
}
Ok((
Box::new(UnixStream::connect(addr).map_err(map_context!())?),
new_address,
))
} | conditional_block |
run_command.rs | extern crate std;
extern crate hostname;
extern crate glob;
use std::str;
use std::ffi::OsString; // Probably want OsStr in a few places
use std::path::Path;
use std::process::Command;
use std::fs;
use state::ShellState;
impl ShellState {
pub fn run_command(&self, command: &str, args: std::str::SplitWhitespace) | }
if command == "ls" || command == "grep" {
expanded_args.push(OsString::from("--color=auto"));
}
if Path::new(command).is_file() {
match Command::new(Path::new(command))
.args(expanded_args)
.current_dir(self.variables.get("PWD").unwrap().clone())
.spawn() {
Ok(mut child) => {
child.wait().unwrap();
()
} // This should be an unwrap_or_else
Err(_) => println!("command failed to launch: {}", command),
};
return;
}
let path = self.variables.get("PATH").unwrap().clone();
for entries in path.into_string()
.unwrap()
.split(':')
.map(|dir| fs::read_dir(Path::new(dir)))
.filter_map(|e| e.ok())
{
// loop over the iterator of every directory in PATH that's possible to read
for dir_entry in entries
.filter_map(|e| e.ok()) // Only entries that are ok
.filter(|e| &e.file_name() == command)
{
// Check if entry filename matches
match Command::new(dir_entry.path())
.args(expanded_args)
.current_dir(self.variables.get("PWD").unwrap().clone())
.spawn() {
Ok(mut child) => {
child.wait().unwrap();
()
} // This should be an unwrap_or_else
Err(_) => println!("command failed to launch: {}", command),
};
return;
}
}
println!("command not found: {}", command);
}
}
| {
// Very crude glob support
let mut expanded_args = Vec::new();
for arg in args {
if !arg.contains('*') {
expanded_args.push(OsString::from(arg));
continue;
}
let mut pattern = self.variables.get("PWD").unwrap().clone();
pattern.push(arg);
match glob::glob(pattern.to_str().unwrap()) {
Ok(result_iter) => {
for entry in result_iter.filter_map(|e| e.ok()) {
expanded_args.push(entry.as_os_str().to_owned());
}
}
Err(..) => expanded_args.push(OsString::from(arg)),
} | identifier_body |
run_command.rs | extern crate std;
extern crate hostname;
extern crate glob;
use std::str;
use std::ffi::OsString; // Probably want OsStr in a few places
use std::path::Path;
use std::process::Command;
use std::fs;
use state::ShellState;
impl ShellState {
pub fn | (&self, command: &str, args: std::str::SplitWhitespace) {
// Very crude glob support
let mut expanded_args = Vec::new();
for arg in args {
if!arg.contains('*') {
expanded_args.push(OsString::from(arg));
continue;
}
let mut pattern = self.variables.get("PWD").unwrap().clone();
pattern.push(arg);
match glob::glob(pattern.to_str().unwrap()) {
Ok(result_iter) => {
for entry in result_iter.filter_map(|e| e.ok()) {
expanded_args.push(entry.as_os_str().to_owned());
}
}
Err(..) => expanded_args.push(OsString::from(arg)),
}
}
if command == "ls" || command == "grep" {
expanded_args.push(OsString::from("--color=auto"));
}
if Path::new(command).is_file() {
match Command::new(Path::new(command))
.args(expanded_args)
.current_dir(self.variables.get("PWD").unwrap().clone())
.spawn() {
Ok(mut child) => {
child.wait().unwrap();
()
} // This should be an unwrap_or_else
Err(_) => println!("command failed to launch: {}", command),
};
return;
}
let path = self.variables.get("PATH").unwrap().clone();
for entries in path.into_string()
.unwrap()
.split(':')
.map(|dir| fs::read_dir(Path::new(dir)))
.filter_map(|e| e.ok())
{
// loop over the iterator of every directory in PATH that's possible to read
for dir_entry in entries
.filter_map(|e| e.ok()) // Only entries that are ok
.filter(|e| &e.file_name() == command)
{
// Check if entry filename matches
match Command::new(dir_entry.path())
.args(expanded_args)
.current_dir(self.variables.get("PWD").unwrap().clone())
.spawn() {
Ok(mut child) => {
child.wait().unwrap();
()
} // This should be an unwrap_or_else
Err(_) => println!("command failed to launch: {}", command),
};
return;
}
}
println!("command not found: {}", command);
}
}
| run_command | identifier_name |
run_command.rs | extern crate std;
extern crate hostname;
extern crate glob;
use std::str;
use std::ffi::OsString; // Probably want OsStr in a few places
use std::path::Path;
use std::process::Command;
use std::fs;
use state::ShellState;
impl ShellState {
pub fn run_command(&self, command: &str, args: std::str::SplitWhitespace) {
// Very crude glob support
let mut expanded_args = Vec::new();
for arg in args {
if!arg.contains('*') {
expanded_args.push(OsString::from(arg));
continue;
}
let mut pattern = self.variables.get("PWD").unwrap().clone();
pattern.push(arg);
match glob::glob(pattern.to_str().unwrap()) {
Ok(result_iter) => {
for entry in result_iter.filter_map(|e| e.ok()) {
expanded_args.push(entry.as_os_str().to_owned());
}
}
Err(..) => expanded_args.push(OsString::from(arg)),
}
}
if command == "ls" || command == "grep" {
expanded_args.push(OsString::from("--color=auto"));
}
| .current_dir(self.variables.get("PWD").unwrap().clone())
.spawn() {
Ok(mut child) => {
child.wait().unwrap();
()
} // This should be an unwrap_or_else
Err(_) => println!("command failed to launch: {}", command),
};
return;
}
let path = self.variables.get("PATH").unwrap().clone();
for entries in path.into_string()
.unwrap()
.split(':')
.map(|dir| fs::read_dir(Path::new(dir)))
.filter_map(|e| e.ok())
{
// loop over the iterator of every directory in PATH that's possible to read
for dir_entry in entries
.filter_map(|e| e.ok()) // Only entries that are ok
.filter(|e| &e.file_name() == command)
{
// Check if entry filename matches
match Command::new(dir_entry.path())
.args(expanded_args)
.current_dir(self.variables.get("PWD").unwrap().clone())
.spawn() {
Ok(mut child) => {
child.wait().unwrap();
()
} // This should be an unwrap_or_else
Err(_) => println!("command failed to launch: {}", command),
};
return;
}
}
println!("command not found: {}", command);
}
} | if Path::new(command).is_file() {
match Command::new(Path::new(command))
.args(expanded_args) | random_line_split |
lights.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Module that implements lights for `PhilipsHueAdapter`
//!
//! This module implements AdapterManager-facing functionality.
//! It registers a service for every light and adds setters and
//! getters according to the light type.
use foxbox_taxonomy::api::Error;
use foxbox_taxonomy::manager::*;
use foxbox_taxonomy::services::*;
use foxbox_taxonomy::values::{ Type };
use super::*;
use super::hub_api::HubApi;
use std::collections::HashSet;
use std::sync::Arc;
use traits::Controller;
const CUSTOM_PROPERTY_MANUFACTURER: &'static str = "manufacturer";
const CUSTOM_PROPERTY_MODEL: &'static str = "model";
const CUSTOM_PROPERTY_NAME: &'static str = "name";
const CUSTOM_PROPERTY_TYPE: &'static str = "type";
#[derive(Clone)]
pub struct Light {
api: Arc<HubApi>,
hub_id: String,
light_id: String,
service_id: Id<ServiceId>,
pub get_available_id: Id<Getter>,
pub get_power_id: Id<Getter>,
pub set_power_id: Id<Setter>,
}
impl Light {
pub fn new(api: Arc<HubApi>, hub_id: &str, light_id: &str)
-> Self
{
Light {
api: api,
hub_id: hub_id.to_owned(),
light_id: light_id.to_owned(),
service_id: create_light_id(&hub_id, &light_id),
get_available_id: create_getter_id("available", &hub_id, &light_id),
get_power_id: create_getter_id("power", &hub_id, &light_id),
set_power_id: create_setter_id("power", &hub_id, &light_id),
}
}
pub fn | (&self) {
// Nothing to do, yet
}
pub fn stop(&self) {
// Nothing to do, yet
}
pub fn init_service(&mut self, manager: Arc<AdapterManager>,
services: LightServiceMap) -> Result<(), Error>
{
let adapter_id = create_adapter_id();
let status = self.api.get_light_status(&self.light_id);
if status.lighttype == "Extended color light" {
info!("New Philips Hue Extended Color Light service for light {} on bridge {}",
self.light_id, self.hub_id);
let mut service = Service::empty(self.service_id.clone(), adapter_id.clone());
service.properties.insert(CUSTOM_PROPERTY_MANUFACTURER.to_owned(),
status.manufacturername.to_owned());
service.properties.insert(CUSTOM_PROPERTY_MODEL.to_owned(),
status.modelid.to_owned());
service.properties.insert(CUSTOM_PROPERTY_NAME.to_owned(),
status.name.to_owned());
service.properties.insert(CUSTOM_PROPERTY_TYPE.to_owned(),
"Light/ColorLight".to_owned());
service.tags.insert(tag_id!("type:Light/ColorLight"));
try!(manager.add_service(service));
// The `available` getter yields `On` when the light
// is plugged in and `Off` when it is not. Availability
// Has no effect on the API other than that you won't
// see the light change because it lacks external power.
try!(manager.add_getter(Channel {
tags: HashSet::new(),
adapter: adapter_id.clone(),
id: self.get_available_id.clone(),
last_seen: None,
service: self.service_id.clone(),
mechanism: Getter {
kind: ChannelKind::Extension {
vendor: Id::new("[email protected]"),
adapter: Id::new("Philips Hue Adapter"),
kind: Id::new("available"),
typ: Type::OnOff,
},
updated: None,
},
}));
try!(manager.add_getter(Channel {
tags: HashSet::new(),
adapter: adapter_id.clone(),
id: self.get_power_id.clone(),
last_seen: None,
service: self.service_id.clone(),
mechanism: Getter {
kind: ChannelKind::LightOn,
updated: None,
},
}));
try!(manager.add_setter(Channel {
tags: HashSet::new(),
adapter: adapter_id.clone(),
id: self.set_power_id.clone(),
last_seen: None,
service: self.service_id.clone(),
mechanism: Setter {
kind: ChannelKind::LightOn,
updated: None,
},
}));
let mut services_lock = services.lock().unwrap();
services_lock.getters.insert(self.get_available_id.clone(), self.clone());
services_lock.getters.insert(self.get_power_id.clone(), self.clone());
services_lock.setters.insert(self.set_power_id.clone(), self.clone());
} else {
warn!("Ignoring unsupported Hue light type {}, ID {} on bridge {}",
status.lighttype, self.light_id, self.hub_id);
}
Ok(())
}
pub fn get_available(&self) -> bool {
let status = self.api.get_light_status(&self.light_id);
status.state.reachable
}
pub fn get_power(&self) -> bool {
let status = self.api.get_light_status(&self.light_id);
status.state.on
}
pub fn set_power(&self, on: bool) {
self.api.set_light_power(&self.light_id, on);
}
}
| start | identifier_name |
lights.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Module that implements lights for `PhilipsHueAdapter`
//!
//! This module implements AdapterManager-facing functionality.
//! It registers a service for every light and adds setters and
//! getters according to the light type.
use foxbox_taxonomy::api::Error;
use foxbox_taxonomy::manager::*;
use foxbox_taxonomy::services::*;
use foxbox_taxonomy::values::{ Type };
use super::*;
use super::hub_api::HubApi;
use std::collections::HashSet;
use std::sync::Arc;
use traits::Controller;
const CUSTOM_PROPERTY_MANUFACTURER: &'static str = "manufacturer";
const CUSTOM_PROPERTY_MODEL: &'static str = "model";
const CUSTOM_PROPERTY_NAME: &'static str = "name";
const CUSTOM_PROPERTY_TYPE: &'static str = "type";
#[derive(Clone)]
pub struct Light {
api: Arc<HubApi>,
hub_id: String,
light_id: String,
service_id: Id<ServiceId>,
pub get_available_id: Id<Getter>,
pub get_power_id: Id<Getter>,
pub set_power_id: Id<Setter>,
}
impl Light {
pub fn new(api: Arc<HubApi>, hub_id: &str, light_id: &str)
-> Self
{
Light {
api: api,
hub_id: hub_id.to_owned(),
light_id: light_id.to_owned(),
service_id: create_light_id(&hub_id, &light_id),
get_available_id: create_getter_id("available", &hub_id, &light_id),
get_power_id: create_getter_id("power", &hub_id, &light_id),
set_power_id: create_setter_id("power", &hub_id, &light_id),
}
}
pub fn start(&self) {
// Nothing to do, yet
}
pub fn stop(&self) {
// Nothing to do, yet
}
pub fn init_service(&mut self, manager: Arc<AdapterManager>,
services: LightServiceMap) -> Result<(), Error>
{
let adapter_id = create_adapter_id();
let status = self.api.get_light_status(&self.light_id);
if status.lighttype == "Extended color light" | try!(manager.add_getter(Channel {
tags: HashSet::new(),
adapter: adapter_id.clone(),
id: self.get_available_id.clone(),
last_seen: None,
service: self.service_id.clone(),
mechanism: Getter {
kind: ChannelKind::Extension {
vendor: Id::new("[email protected]"),
adapter: Id::new("Philips Hue Adapter"),
kind: Id::new("available"),
typ: Type::OnOff,
},
updated: None,
},
}));
try!(manager.add_getter(Channel {
tags: HashSet::new(),
adapter: adapter_id.clone(),
id: self.get_power_id.clone(),
last_seen: None,
service: self.service_id.clone(),
mechanism: Getter {
kind: ChannelKind::LightOn,
updated: None,
},
}));
try!(manager.add_setter(Channel {
tags: HashSet::new(),
adapter: adapter_id.clone(),
id: self.set_power_id.clone(),
last_seen: None,
service: self.service_id.clone(),
mechanism: Setter {
kind: ChannelKind::LightOn,
updated: None,
},
}));
let mut services_lock = services.lock().unwrap();
services_lock.getters.insert(self.get_available_id.clone(), self.clone());
services_lock.getters.insert(self.get_power_id.clone(), self.clone());
services_lock.setters.insert(self.set_power_id.clone(), self.clone());
}
else {
warn!("Ignoring unsupported Hue light type {}, ID {} on bridge {}",
status.lighttype, self.light_id, self.hub_id);
}
Ok(())
}
pub fn get_available(&self) -> bool {
let status = self.api.get_light_status(&self.light_id);
status.state.reachable
}
pub fn get_power(&self) -> bool {
let status = self.api.get_light_status(&self.light_id);
status.state.on
}
pub fn set_power(&self, on: bool) {
self.api.set_light_power(&self.light_id, on);
}
}
| {
info!("New Philips Hue Extended Color Light service for light {} on bridge {}",
self.light_id, self.hub_id);
let mut service = Service::empty(self.service_id.clone(), adapter_id.clone());
service.properties.insert(CUSTOM_PROPERTY_MANUFACTURER.to_owned(),
status.manufacturername.to_owned());
service.properties.insert(CUSTOM_PROPERTY_MODEL.to_owned(),
status.modelid.to_owned());
service.properties.insert(CUSTOM_PROPERTY_NAME.to_owned(),
status.name.to_owned());
service.properties.insert(CUSTOM_PROPERTY_TYPE.to_owned(),
"Light/ColorLight".to_owned());
service.tags.insert(tag_id!("type:Light/ColorLight"));
try!(manager.add_service(service));
// The `available` getter yields `On` when the light
// is plugged in and `Off` when it is not. Availability
// Has no effect on the API other than that you won't
// see the light change because it lacks external power. | conditional_block |
lights.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Module that implements lights for `PhilipsHueAdapter`
//!
//! This module implements AdapterManager-facing functionality.
//! It registers a service for every light and adds setters and
//! getters according to the light type.
use foxbox_taxonomy::api::Error;
use foxbox_taxonomy::manager::*;
use foxbox_taxonomy::services::*;
use foxbox_taxonomy::values::{ Type };
use super::*;
use super::hub_api::HubApi;
use std::collections::HashSet;
use std::sync::Arc;
use traits::Controller;
const CUSTOM_PROPERTY_MANUFACTURER: &'static str = "manufacturer";
const CUSTOM_PROPERTY_MODEL: &'static str = "model";
const CUSTOM_PROPERTY_NAME: &'static str = "name";
const CUSTOM_PROPERTY_TYPE: &'static str = "type";
#[derive(Clone)]
pub struct Light {
api: Arc<HubApi>,
hub_id: String,
light_id: String,
service_id: Id<ServiceId>,
pub get_available_id: Id<Getter>,
pub get_power_id: Id<Getter>,
pub set_power_id: Id<Setter>,
}
impl Light {
pub fn new(api: Arc<HubApi>, hub_id: &str, light_id: &str)
-> Self
{
Light {
api: api,
hub_id: hub_id.to_owned(),
light_id: light_id.to_owned(),
service_id: create_light_id(&hub_id, &light_id),
get_available_id: create_getter_id("available", &hub_id, &light_id),
get_power_id: create_getter_id("power", &hub_id, &light_id),
set_power_id: create_setter_id("power", &hub_id, &light_id),
}
}
pub fn start(&self) {
// Nothing to do, yet
}
pub fn stop(&self) {
// Nothing to do, yet
}
pub fn init_service(&mut self, manager: Arc<AdapterManager>,
services: LightServiceMap) -> Result<(), Error>
{
let adapter_id = create_adapter_id();
let status = self.api.get_light_status(&self.light_id);
if status.lighttype == "Extended color light" {
info!("New Philips Hue Extended Color Light service for light {} on bridge {}",
self.light_id, self.hub_id);
let mut service = Service::empty(self.service_id.clone(), adapter_id.clone());
service.properties.insert(CUSTOM_PROPERTY_MANUFACTURER.to_owned(),
status.manufacturername.to_owned());
service.properties.insert(CUSTOM_PROPERTY_MODEL.to_owned(),
status.modelid.to_owned());
service.properties.insert(CUSTOM_PROPERTY_NAME.to_owned(),
status.name.to_owned());
service.properties.insert(CUSTOM_PROPERTY_TYPE.to_owned(),
"Light/ColorLight".to_owned());
service.tags.insert(tag_id!("type:Light/ColorLight"));
try!(manager.add_service(service));
// The `available` getter yields `On` when the light
// is plugged in and `Off` when it is not. Availability
// Has no effect on the API other than that you won't
// see the light change because it lacks external power.
try!(manager.add_getter(Channel {
tags: HashSet::new(),
adapter: adapter_id.clone(),
id: self.get_available_id.clone(),
last_seen: None,
service: self.service_id.clone(),
mechanism: Getter {
kind: ChannelKind::Extension {
vendor: Id::new("[email protected]"),
adapter: Id::new("Philips Hue Adapter"),
kind: Id::new("available"),
typ: Type::OnOff,
},
updated: None,
},
}));
try!(manager.add_getter(Channel {
tags: HashSet::new(),
adapter: adapter_id.clone(),
id: self.get_power_id.clone(),
last_seen: None,
service: self.service_id.clone(),
mechanism: Getter {
kind: ChannelKind::LightOn,
updated: None,
},
}));
try!(manager.add_setter(Channel {
tags: HashSet::new(),
adapter: adapter_id.clone(),
id: self.set_power_id.clone(),
last_seen: None,
service: self.service_id.clone(),
mechanism: Setter {
kind: ChannelKind::LightOn,
updated: None,
},
}));
let mut services_lock = services.lock().unwrap();
services_lock.getters.insert(self.get_available_id.clone(), self.clone());
services_lock.getters.insert(self.get_power_id.clone(), self.clone());
services_lock.setters.insert(self.set_power_id.clone(), self.clone());
} else {
warn!("Ignoring unsupported Hue light type {}, ID {} on bridge {}",
status.lighttype, self.light_id, self.hub_id);
}
Ok(())
}
pub fn get_available(&self) -> bool {
let status = self.api.get_light_status(&self.light_id);
status.state.reachable
}
pub fn get_power(&self) -> bool {
let status = self.api.get_light_status(&self.light_id);
status.state.on
}
pub fn set_power(&self, on: bool) |
}
| {
self.api.set_light_power(&self.light_id, on);
} | identifier_body |
lights.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Module that implements lights for `PhilipsHueAdapter`
//!
//! This module implements AdapterManager-facing functionality.
//! It registers a service for every light and adds setters and
//! getters according to the light type.
use foxbox_taxonomy::api::Error;
use foxbox_taxonomy::manager::*;
use foxbox_taxonomy::services::*;
use foxbox_taxonomy::values::{ Type };
use super::*;
use super::hub_api::HubApi;
use std::collections::HashSet;
use std::sync::Arc;
use traits::Controller;
const CUSTOM_PROPERTY_MANUFACTURER: &'static str = "manufacturer";
const CUSTOM_PROPERTY_MODEL: &'static str = "model";
const CUSTOM_PROPERTY_NAME: &'static str = "name";
const CUSTOM_PROPERTY_TYPE: &'static str = "type";
#[derive(Clone)]
pub struct Light {
api: Arc<HubApi>,
hub_id: String,
light_id: String,
service_id: Id<ServiceId>,
pub get_available_id: Id<Getter>,
pub get_power_id: Id<Getter>,
pub set_power_id: Id<Setter>,
}
impl Light {
pub fn new(api: Arc<HubApi>, hub_id: &str, light_id: &str)
-> Self
{
Light {
api: api,
hub_id: hub_id.to_owned(),
light_id: light_id.to_owned(),
service_id: create_light_id(&hub_id, &light_id),
get_available_id: create_getter_id("available", &hub_id, &light_id),
get_power_id: create_getter_id("power", &hub_id, &light_id),
set_power_id: create_setter_id("power", &hub_id, &light_id),
}
}
pub fn start(&self) {
// Nothing to do, yet
}
pub fn stop(&self) {
// Nothing to do, yet
}
pub fn init_service(&mut self, manager: Arc<AdapterManager>,
services: LightServiceMap) -> Result<(), Error>
{
let adapter_id = create_adapter_id();
let status = self.api.get_light_status(&self.light_id);
if status.lighttype == "Extended color light" {
info!("New Philips Hue Extended Color Light service for light {} on bridge {}",
self.light_id, self.hub_id);
let mut service = Service::empty(self.service_id.clone(), adapter_id.clone());
service.properties.insert(CUSTOM_PROPERTY_MANUFACTURER.to_owned(),
status.manufacturername.to_owned());
service.properties.insert(CUSTOM_PROPERTY_MODEL.to_owned(),
status.modelid.to_owned());
service.properties.insert(CUSTOM_PROPERTY_NAME.to_owned(),
status.name.to_owned());
service.properties.insert(CUSTOM_PROPERTY_TYPE.to_owned(),
"Light/ColorLight".to_owned());
service.tags.insert(tag_id!("type:Light/ColorLight"));
try!(manager.add_service(service));
// The `available` getter yields `On` when the light
// is plugged in and `Off` when it is not. Availability
// Has no effect on the API other than that you won't
// see the light change because it lacks external power.
try!(manager.add_getter(Channel {
tags: HashSet::new(),
adapter: adapter_id.clone(),
id: self.get_available_id.clone(),
last_seen: None,
service: self.service_id.clone(),
mechanism: Getter {
kind: ChannelKind::Extension {
vendor: Id::new("[email protected]"),
adapter: Id::new("Philips Hue Adapter"),
kind: Id::new("available"),
typ: Type::OnOff,
},
updated: None,
},
}));
try!(manager.add_getter(Channel {
tags: HashSet::new(),
adapter: adapter_id.clone(),
id: self.get_power_id.clone(),
last_seen: None,
service: self.service_id.clone(),
mechanism: Getter {
kind: ChannelKind::LightOn,
updated: None,
},
}));
try!(manager.add_setter(Channel {
tags: HashSet::new(),
adapter: adapter_id.clone(),
id: self.set_power_id.clone(),
last_seen: None,
service: self.service_id.clone(),
mechanism: Setter {
kind: ChannelKind::LightOn,
updated: None,
},
}));
let mut services_lock = services.lock().unwrap();
services_lock.getters.insert(self.get_available_id.clone(), self.clone());
services_lock.getters.insert(self.get_power_id.clone(), self.clone());
services_lock.setters.insert(self.set_power_id.clone(), self.clone());
} else {
warn!("Ignoring unsupported Hue light type {}, ID {} on bridge {}",
status.lighttype, self.light_id, self.hub_id);
}
Ok(())
}
pub fn get_available(&self) -> bool {
let status = self.api.get_light_status(&self.light_id);
status.state.reachable
}
pub fn get_power(&self) -> bool {
let status = self.api.get_light_status(&self.light_id);
status.state.on
}
pub fn set_power(&self, on: bool) {
self.api.set_light_power(&self.light_id, on); | } | }
| random_line_split |
complex.rs | use std::f64;
use std::ops::Add;
struct Complex {
real:i32,
imag:i32,
}
// operator overloading for Complex type
impl Add for Complex {
type Output = Complex;
fn add(self,other:Complex) -> Complex { | }
}
impl Complex {
fn new(x:i32,y:i32) -> Self {
Complex {real:x,imag:y}
}
fn to_string(&self) -> String {
if self.imag < 0 {
format!("{}{}i",self.real,self.imag)
} else {
format!("{}+{}i",self.real,self.imag)
}
}
fn times_ten(&mut self) {
self.real *=10;
self.imag *=10;
}
fn abs(&self) -> f64 {
f64::floor(f64::sqrt((self.real*self.real) as f64 + (self.imag*self.imag) as f64))
}
}
fn main() {
let comp1 = Complex::new(5,3);
let comp2 = Complex::new(8,-8);
let mut summation = comp1 + comp2;
println!("{}",summation.to_string());
summation.times_ten();
//println!("10x -> {:?}",summation.to_string());
println!("Abs({}) => {}",summation.to_string(),summation.abs());
} | Complex { real:self.real + other.real,imag:self.imag+other.imag } | random_line_split |
complex.rs | use std::f64;
use std::ops::Add;
struct Complex {
real:i32,
imag:i32,
}
// operator overloading for Complex type
impl Add for Complex {
type Output = Complex;
fn add(self,other:Complex) -> Complex {
Complex { real:self.real + other.real,imag:self.imag+other.imag }
}
}
impl Complex {
fn new(x:i32,y:i32) -> Self {
Complex {real:x,imag:y}
}
fn to_string(&self) -> String {
if self.imag < 0 {
format!("{}{}i",self.real,self.imag)
} else |
}
fn times_ten(&mut self) {
self.real *=10;
self.imag *=10;
}
fn abs(&self) -> f64 {
f64::floor(f64::sqrt((self.real*self.real) as f64 + (self.imag*self.imag) as f64))
}
}
fn main() {
let comp1 = Complex::new(5,3);
let comp2 = Complex::new(8,-8);
let mut summation = comp1 + comp2;
println!("{}",summation.to_string());
summation.times_ten();
//println!("10x -> {:?}",summation.to_string());
println!("Abs({}) => {}",summation.to_string(),summation.abs());
} | {
format!("{}+{}i",self.real,self.imag)
} | conditional_block |
complex.rs | use std::f64;
use std::ops::Add;
struct | {
real:i32,
imag:i32,
}
// operator overloading for Complex type
impl Add for Complex {
type Output = Complex;
fn add(self,other:Complex) -> Complex {
Complex { real:self.real + other.real,imag:self.imag+other.imag }
}
}
impl Complex {
fn new(x:i32,y:i32) -> Self {
Complex {real:x,imag:y}
}
fn to_string(&self) -> String {
if self.imag < 0 {
format!("{}{}i",self.real,self.imag)
} else {
format!("{}+{}i",self.real,self.imag)
}
}
fn times_ten(&mut self) {
self.real *=10;
self.imag *=10;
}
fn abs(&self) -> f64 {
f64::floor(f64::sqrt((self.real*self.real) as f64 + (self.imag*self.imag) as f64))
}
}
fn main() {
let comp1 = Complex::new(5,3);
let comp2 = Complex::new(8,-8);
let mut summation = comp1 + comp2;
println!("{}",summation.to_string());
summation.times_ten();
//println!("10x -> {:?}",summation.to_string());
println!("Abs({}) => {}",summation.to_string(),summation.abs());
} | Complex | identifier_name |
complex.rs | use std::f64;
use std::ops::Add;
struct Complex {
real:i32,
imag:i32,
}
// operator overloading for Complex type
impl Add for Complex {
type Output = Complex;
fn add(self,other:Complex) -> Complex {
Complex { real:self.real + other.real,imag:self.imag+other.imag }
}
}
impl Complex {
fn new(x:i32,y:i32) -> Self {
Complex {real:x,imag:y}
}
fn to_string(&self) -> String {
if self.imag < 0 {
format!("{}{}i",self.real,self.imag)
} else {
format!("{}+{}i",self.real,self.imag)
}
}
fn times_ten(&mut self) {
self.real *=10;
self.imag *=10;
}
fn abs(&self) -> f64 |
}
fn main() {
let comp1 = Complex::new(5,3);
let comp2 = Complex::new(8,-8);
let mut summation = comp1 + comp2;
println!("{}",summation.to_string());
summation.times_ten();
//println!("10x -> {:?}",summation.to_string());
println!("Abs({}) => {}",summation.to_string(),summation.abs());
} | {
f64::floor(f64::sqrt((self.real*self.real) as f64 + (self.imag*self.imag) as f64))
} | identifier_body |
riscv64gc_unknown_none_elf.rs | use crate::spec::{CodeModel, LinkerFlavor, LldFlavor, PanicStrategy, RelocModel};
use crate::spec::{Target, TargetOptions};
pub fn target() -> Target | ..Default::default()
},
}
}
| {
Target {
data_layout: "e-m:e-p:64:64-i64:64-i128:128-n64-S128".to_string(),
llvm_target: "riscv64".to_string(),
pointer_width: 64,
arch: "riscv64".to_string(),
options: TargetOptions {
linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
linker: Some("rust-lld".to_string()),
llvm_abiname: "lp64d".to_string(),
cpu: "generic-rv64".to_string(),
max_atomic_width: Some(64),
features: "+m,+a,+f,+d,+c".to_string(),
executables: true,
panic_strategy: PanicStrategy::Abort,
relocation_model: RelocModel::Static,
code_model: Some(CodeModel::Medium),
emit_debug_gdb_scripts: false,
eh_frame_header: false, | identifier_body |
riscv64gc_unknown_none_elf.rs | use crate::spec::{CodeModel, LinkerFlavor, LldFlavor, PanicStrategy, RelocModel};
use crate::spec::{Target, TargetOptions};
pub fn | () -> Target {
Target {
data_layout: "e-m:e-p:64:64-i64:64-i128:128-n64-S128".to_string(),
llvm_target: "riscv64".to_string(),
pointer_width: 64,
arch: "riscv64".to_string(),
options: TargetOptions {
linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
linker: Some("rust-lld".to_string()),
llvm_abiname: "lp64d".to_string(),
cpu: "generic-rv64".to_string(),
max_atomic_width: Some(64),
features: "+m,+a,+f,+d,+c".to_string(),
executables: true,
panic_strategy: PanicStrategy::Abort,
relocation_model: RelocModel::Static,
code_model: Some(CodeModel::Medium),
emit_debug_gdb_scripts: false,
eh_frame_header: false,
..Default::default()
},
}
}
| target | identifier_name |
riscv64gc_unknown_none_elf.rs | use crate::spec::{CodeModel, LinkerFlavor, LldFlavor, PanicStrategy, RelocModel};
use crate::spec::{Target, TargetOptions};
pub fn target() -> Target {
Target {
data_layout: "e-m:e-p:64:64-i64:64-i128:128-n64-S128".to_string(),
llvm_target: "riscv64".to_string(), | arch: "riscv64".to_string(),
options: TargetOptions {
linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
linker: Some("rust-lld".to_string()),
llvm_abiname: "lp64d".to_string(),
cpu: "generic-rv64".to_string(),
max_atomic_width: Some(64),
features: "+m,+a,+f,+d,+c".to_string(),
executables: true,
panic_strategy: PanicStrategy::Abort,
relocation_model: RelocModel::Static,
code_model: Some(CodeModel::Medium),
emit_debug_gdb_scripts: false,
eh_frame_header: false,
..Default::default()
},
}
} | pointer_width: 64, | random_line_split |
token.rs | // Lexical Tokens
//
// This file is part of AEx.
// Copyright (C) 2017 Jeffrey Sharp
//
// AEx is free software: you can redistribute it and/or modify it
// under the terms of the GNU General Public License as published
// by the Free Software Foundation, either version 3 of the License,
// or (at your option) any later version.
//
// AEx is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
// the GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with AEx. If not, see <http://www.gnu.org/licenses/>.
use std::cell::RefCell;
use std::collections::HashMap;
use num::{self, BigInt, ToPrimitive};
use aex::operator::{OperatorEntry, OperatorTable};
use aex::mem::StringInterner;
use aex::message::Messages;
use aex::source::*;
// Placeholder
pub struct Compiler<'a> {
strings: StringInterner,
operators: OperatorTable,
log: RefCell<Messages<'a>>
}
impl<'a> Compiler<'a> {
pub fn new() -> Self {
Compiler {
strings: StringInterner::new(),
operators: OperatorTable::new(),
log: RefCell::new(Messages::new()),
}
}
}
// -----------------------------------------------------------------------------
// Token
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum Token<'a> {
Id (&'a str), // Identifier
Flag (&'a str), // Condition flag
Int (BigInt), // Literal: integer
Char (char), // Literal: character
Str (&'a str), // Literal: string
KwType, // Keyword: type
KwStruct, // Keyword: struct
KwUnion, // Keyword: union
KwIf, // Keyword: if
KwElse, // Keyword: else
KwLoop, // Keyword: loop
KwWhile, // Keyword: while
KwBreak, // Keyword: break
KwContinue, // Keyword: continue
KwReturn, // Keyword: return
KwJump, // Keyword: jump
BraceL, // {
BraceR, // }
ParenL, // (
ParenR, // )
BracketL, // [
BracketR, // ]
//Dot, //.
//At, // @
//Equal, // =
//EqualArrow, // =>
//MinusArrow, // ->
Colon, // :
Comma, //,
Op(&'a OperatorEntry), // any of:.@!~*/%+-&^|<=>?
Eos, // End of statement
Eof, // End of file
Error // Lexical error
}
// -----------------------------------------------------------------------------
// TokenBuilder
pub struct TokenBuilder<'a> {
// Source
file: &'a File<'a>, // source file
start: Pos, // position of token start
current: Pos, // position of current character
// Accumulators
number: BigInt, // number builder
buffer: String, // string builder
// Language
keywords: HashMap<&'a str, Token<'a>>, // keyword table
operators: &'a OperatorTable, // operator table
// Session
strings: &'a StringInterner, // string interner
log: &'a RefCell<Messages<'a>> // message log
}
impl<'a> TokenBuilder<'a> {
pub fn new(compiler: &'a Compiler <'a>,
file: &'a File <'a>,
) -> Self {
TokenBuilder {
file: file,
start: Pos::bof(),
current: Pos::bof(),
buffer: String::with_capacity(128),
number: num::zero(),
keywords: keywords(&compiler.strings),
operators: &compiler.operators,
strings: &compiler.strings,
log: &compiler.log,
}
}
// Position actions
#[inline]
pub fn start(&mut self) {
self.start = self.current;
}
#[inline]
pub fn advance(&mut self, c: char) {
self.current.advance(c);
}
#[inline]
pub fn newline(&mut self) {
self.current.newline();
}
#[inline]
pub fn source(&self) -> Source<'a> {
let len = self.current.byte - self.start.byte;
Source::File { file: self.file, pos: self.start, len: len }
}
// Number actions
#[inline]
pub fn add_dec(&mut self, c: char) {
self.add_num(10, int_from_dg(c))
}
#[inline]
pub fn add_hex_dg(&mut self, c: char) {
self.add_num(16, int_from_dg(c))
}
#[inline]
pub fn add_hex_uc(&mut self, c: char) {
self.add_num(16, int_from_hex_uc(c))
}
#[inline]
pub fn add_hex_lc(&mut self, c: char) {
self.add_num(16, int_from_hex_lc(c))
}
#[inline]
pub fn add_oct(&mut self, c: char) |
#[inline]
pub fn add_bin(&mut self, c: char) {
self.add_num(2, int_from_dg(c))
}
#[inline]
fn add_num(&mut self, base: u8, digit: u8) {
self.number = &self.number
* BigInt::from(base)
+ BigInt::from(digit);
}
#[inline]
pub fn get_num(&mut self) -> Token<'a> {
let number = self.number.clone();
self.number = num::zero();
Token::Int(number)
}
// Character/String Actions
#[inline]
pub fn add_char(&mut self, c: char) {
self.buffer.push(c);
}
#[inline]
pub fn add_esc(&mut self) -> Option<Token<'a>> {
match self.number.to_u32() {
Some(n) if n <= UNICODE_MAX => {
let c = unsafe { ::std::mem::transmute(n) };
self.buffer.push(c);
self.number = num::zero();
None
},
_ => {
Some(self.err_overflow_esc())
}
}
}
#[inline]
fn intern_str(&mut self) -> &'a str {
let s = self.strings.intern(self.buffer.clone());
self.buffer.clear();
s
}
#[inline]
pub fn get_char(&mut self) -> Token<'a> {
let c = self.buffer.chars().next().unwrap();
self.buffer.clear();
Token::Char(c)
}
#[inline]
pub fn get_str(&mut self) -> Token<'a> {
Token::Str(self.intern_str())
}
#[inline]
pub fn get_id_or_keyword(&mut self) -> Token<'a> {
let s = self.intern_str();
match self.keywords.get(&s) {
Some(k) => k.clone(),
None => Token::Id(s)
}
}
// Error Actions
pub fn err_unrec(&mut self, c: char) -> Token<'a> {
self.log.borrow_mut().err_unrec(self.source(), c);
Token::Error
}
pub fn err_unrec_num(&mut self, c: char) -> Token<'a> {
self.log.borrow_mut().err_unrec_num(self.source(), c);
Token::Error
}
pub fn err_unrec_esc(&mut self, c: char) -> Token<'a> {
self.log.borrow_mut().err_unrec_esc(self.source(), c);
Token::Error
}
pub fn err_unterm_char(&mut self) -> Token<'a> {
self.log.borrow_mut().err_unterm_char(self.source());
Token::Error
}
pub fn err_unterm_str(&mut self) -> Token<'a> {
self.log.borrow_mut().err_unterm_str(self.source());
Token::Error
}
pub fn err_unterm_esc(&mut self) -> Token<'a> {
self.log.borrow_mut().err_unterm_esc(self.source());
Token::Error
}
pub fn err_length_char(&mut self) -> Token<'a> {
self.log.borrow_mut().err_length_char(self.source());
Token::Error
}
pub fn err_overflow_esc(&mut self) -> Token<'a> {
self.log.borrow_mut().err_overflow_esc(self.source());
Token::Error
}
}
const UNICODE_MAX: u32 = 0x10FFFF;
#[inline]
fn int_from_dg(c: char) -> u8 {
c as u8 - 0x30 // c - '0'
}
#[inline]
fn int_from_hex_uc(c: char) -> u8 {
c as u8 - 0x37 // 10 + c - 'A'
}
#[inline]
fn int_from_hex_lc(c: char) -> u8 {
c as u8 - 0x57 // 10 + c - 'a'
}
// -----------------------------------------------------------------------------
// Keywords
// TODO: Consider moving this to Compiler, so that targets can add custom keywords.
#[inline]
fn keywords<'a>(strings: &'a StringInterner) -> HashMap<&'a str, Token<'a>> {
let mut map = HashMap::new();
for &(key, ref tok) in KEYWORDS {
map.insert(strings.intern_ref(key), tok.clone());
}
map
}
const KEYWORDS: &'static [(&'static str, Token<'static>)] = &[
( "type" , Token::KwType ),
( "struct" , Token::KwStruct ),
( "union" , Token::KwUnion ),
( "if" , Token::KwIf ),
( "else" , Token::KwElse ),
( "loop" , Token::KwLoop ),
( "while" , Token::KwWhile ),
( "break" , Token::KwBreak ),
( "continue", Token::KwContinue ),
( "return" , Token::KwReturn ),
( "jump" , Token::KwJump ),
];
| {
self.add_num(8, int_from_dg(c))
} | identifier_body |
token.rs | // Lexical Tokens
//
// This file is part of AEx.
// Copyright (C) 2017 Jeffrey Sharp
//
// AEx is free software: you can redistribute it and/or modify it
// under the terms of the GNU General Public License as published
// by the Free Software Foundation, either version 3 of the License,
// or (at your option) any later version.
//
// AEx is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
// the GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with AEx. If not, see <http://www.gnu.org/licenses/>.
use std::cell::RefCell;
use std::collections::HashMap;
use num::{self, BigInt, ToPrimitive};
use aex::operator::{OperatorEntry, OperatorTable};
use aex::mem::StringInterner;
use aex::message::Messages;
use aex::source::*;
// Placeholder
pub struct Compiler<'a> {
strings: StringInterner,
operators: OperatorTable,
log: RefCell<Messages<'a>>
}
impl<'a> Compiler<'a> {
pub fn new() -> Self {
Compiler {
strings: StringInterner::new(),
operators: OperatorTable::new(),
log: RefCell::new(Messages::new()),
}
}
}
// -----------------------------------------------------------------------------
// Token
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum Token<'a> {
Id (&'a str), // Identifier
Flag (&'a str), // Condition flag
Int (BigInt), // Literal: integer
Char (char), // Literal: character
Str (&'a str), // Literal: string
KwType, // Keyword: type
KwStruct, // Keyword: struct
KwUnion, // Keyword: union
KwIf, // Keyword: if
KwElse, // Keyword: else
KwLoop, // Keyword: loop
KwWhile, // Keyword: while
KwBreak, // Keyword: break
KwContinue, // Keyword: continue
KwReturn, // Keyword: return
KwJump, // Keyword: jump
BraceL, // {
BraceR, // }
ParenL, // (
ParenR, // )
BracketL, // [
BracketR, // ]
//Dot, //.
//At, // @
//Equal, // =
//EqualArrow, // =>
//MinusArrow, // ->
Colon, // :
Comma, //,
Op(&'a OperatorEntry), // any of:.@!~*/%+-&^|<=>?
Eos, // End of statement
Eof, // End of file
Error // Lexical error
}
// -----------------------------------------------------------------------------
// TokenBuilder
pub struct TokenBuilder<'a> {
// Source
file: &'a File<'a>, // source file
start: Pos, // position of token start
current: Pos, // position of current character
// Accumulators
number: BigInt, // number builder
buffer: String, // string builder
// Language
keywords: HashMap<&'a str, Token<'a>>, // keyword table
operators: &'a OperatorTable, // operator table
// Session
strings: &'a StringInterner, // string interner
log: &'a RefCell<Messages<'a>> // message log
}
impl<'a> TokenBuilder<'a> {
pub fn new(compiler: &'a Compiler <'a>,
file: &'a File <'a>,
) -> Self {
TokenBuilder {
file: file,
start: Pos::bof(),
current: Pos::bof(),
buffer: String::with_capacity(128),
number: num::zero(),
keywords: keywords(&compiler.strings),
operators: &compiler.operators,
strings: &compiler.strings,
log: &compiler.log,
}
}
// Position actions
#[inline]
pub fn start(&mut self) {
self.start = self.current;
}
#[inline]
pub fn advance(&mut self, c: char) {
self.current.advance(c);
}
#[inline]
pub fn newline(&mut self) {
self.current.newline();
}
#[inline]
pub fn source(&self) -> Source<'a> {
let len = self.current.byte - self.start.byte;
Source::File { file: self.file, pos: self.start, len: len }
}
// Number actions
#[inline]
pub fn add_dec(&mut self, c: char) {
self.add_num(10, int_from_dg(c))
}
#[inline]
pub fn add_hex_dg(&mut self, c: char) {
self.add_num(16, int_from_dg(c))
}
#[inline]
pub fn add_hex_uc(&mut self, c: char) {
self.add_num(16, int_from_hex_uc(c))
}
#[inline]
pub fn | (&mut self, c: char) {
self.add_num(16, int_from_hex_lc(c))
}
#[inline]
pub fn add_oct(&mut self, c: char) {
self.add_num(8, int_from_dg(c))
}
#[inline]
pub fn add_bin(&mut self, c: char) {
self.add_num(2, int_from_dg(c))
}
#[inline]
fn add_num(&mut self, base: u8, digit: u8) {
self.number = &self.number
* BigInt::from(base)
+ BigInt::from(digit);
}
#[inline]
pub fn get_num(&mut self) -> Token<'a> {
let number = self.number.clone();
self.number = num::zero();
Token::Int(number)
}
// Character/String Actions
#[inline]
pub fn add_char(&mut self, c: char) {
self.buffer.push(c);
}
#[inline]
pub fn add_esc(&mut self) -> Option<Token<'a>> {
match self.number.to_u32() {
Some(n) if n <= UNICODE_MAX => {
let c = unsafe { ::std::mem::transmute(n) };
self.buffer.push(c);
self.number = num::zero();
None
},
_ => {
Some(self.err_overflow_esc())
}
}
}
#[inline]
fn intern_str(&mut self) -> &'a str {
let s = self.strings.intern(self.buffer.clone());
self.buffer.clear();
s
}
#[inline]
pub fn get_char(&mut self) -> Token<'a> {
let c = self.buffer.chars().next().unwrap();
self.buffer.clear();
Token::Char(c)
}
#[inline]
pub fn get_str(&mut self) -> Token<'a> {
Token::Str(self.intern_str())
}
#[inline]
pub fn get_id_or_keyword(&mut self) -> Token<'a> {
let s = self.intern_str();
match self.keywords.get(&s) {
Some(k) => k.clone(),
None => Token::Id(s)
}
}
// Error Actions
pub fn err_unrec(&mut self, c: char) -> Token<'a> {
self.log.borrow_mut().err_unrec(self.source(), c);
Token::Error
}
pub fn err_unrec_num(&mut self, c: char) -> Token<'a> {
self.log.borrow_mut().err_unrec_num(self.source(), c);
Token::Error
}
pub fn err_unrec_esc(&mut self, c: char) -> Token<'a> {
self.log.borrow_mut().err_unrec_esc(self.source(), c);
Token::Error
}
pub fn err_unterm_char(&mut self) -> Token<'a> {
self.log.borrow_mut().err_unterm_char(self.source());
Token::Error
}
pub fn err_unterm_str(&mut self) -> Token<'a> {
self.log.borrow_mut().err_unterm_str(self.source());
Token::Error
}
pub fn err_unterm_esc(&mut self) -> Token<'a> {
self.log.borrow_mut().err_unterm_esc(self.source());
Token::Error
}
pub fn err_length_char(&mut self) -> Token<'a> {
self.log.borrow_mut().err_length_char(self.source());
Token::Error
}
pub fn err_overflow_esc(&mut self) -> Token<'a> {
self.log.borrow_mut().err_overflow_esc(self.source());
Token::Error
}
}
const UNICODE_MAX: u32 = 0x10FFFF;
#[inline]
fn int_from_dg(c: char) -> u8 {
c as u8 - 0x30 // c - '0'
}
#[inline]
fn int_from_hex_uc(c: char) -> u8 {
c as u8 - 0x37 // 10 + c - 'A'
}
#[inline]
fn int_from_hex_lc(c: char) -> u8 {
c as u8 - 0x57 // 10 + c - 'a'
}
// -----------------------------------------------------------------------------
// Keywords
// TODO: Consider moving this to Compiler, so that targets can add custom keywords.
#[inline]
fn keywords<'a>(strings: &'a StringInterner) -> HashMap<&'a str, Token<'a>> {
let mut map = HashMap::new();
for &(key, ref tok) in KEYWORDS {
map.insert(strings.intern_ref(key), tok.clone());
}
map
}
const KEYWORDS: &'static [(&'static str, Token<'static>)] = &[
( "type" , Token::KwType ),
( "struct" , Token::KwStruct ),
( "union" , Token::KwUnion ),
( "if" , Token::KwIf ),
( "else" , Token::KwElse ),
( "loop" , Token::KwLoop ),
( "while" , Token::KwWhile ),
( "break" , Token::KwBreak ),
( "continue", Token::KwContinue ),
( "return" , Token::KwReturn ),
( "jump" , Token::KwJump ),
];
| add_hex_lc | identifier_name |
token.rs | // Lexical Tokens
//
// This file is part of AEx.
// Copyright (C) 2017 Jeffrey Sharp
//
// AEx is free software: you can redistribute it and/or modify it
// under the terms of the GNU General Public License as published
// by the Free Software Foundation, either version 3 of the License,
// or (at your option) any later version.
//
// AEx is distributed in the hope that it will be useful, but | // You should have received a copy of the GNU General Public License
// along with AEx. If not, see <http://www.gnu.org/licenses/>.
use std::cell::RefCell;
use std::collections::HashMap;
use num::{self, BigInt, ToPrimitive};
use aex::operator::{OperatorEntry, OperatorTable};
use aex::mem::StringInterner;
use aex::message::Messages;
use aex::source::*;
// Placeholder
pub struct Compiler<'a> {
strings: StringInterner,
operators: OperatorTable,
log: RefCell<Messages<'a>>
}
impl<'a> Compiler<'a> {
pub fn new() -> Self {
Compiler {
strings: StringInterner::new(),
operators: OperatorTable::new(),
log: RefCell::new(Messages::new()),
}
}
}
// -----------------------------------------------------------------------------
// Token
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum Token<'a> {
Id (&'a str), // Identifier
Flag (&'a str), // Condition flag
Int (BigInt), // Literal: integer
Char (char), // Literal: character
Str (&'a str), // Literal: string
KwType, // Keyword: type
KwStruct, // Keyword: struct
KwUnion, // Keyword: union
KwIf, // Keyword: if
KwElse, // Keyword: else
KwLoop, // Keyword: loop
KwWhile, // Keyword: while
KwBreak, // Keyword: break
KwContinue, // Keyword: continue
KwReturn, // Keyword: return
KwJump, // Keyword: jump
BraceL, // {
BraceR, // }
ParenL, // (
ParenR, // )
BracketL, // [
BracketR, // ]
//Dot, //.
//At, // @
//Equal, // =
//EqualArrow, // =>
//MinusArrow, // ->
Colon, // :
Comma, //,
Op(&'a OperatorEntry), // any of:.@!~*/%+-&^|<=>?
Eos, // End of statement
Eof, // End of file
Error // Lexical error
}
// -----------------------------------------------------------------------------
// TokenBuilder
pub struct TokenBuilder<'a> {
// Source
file: &'a File<'a>, // source file
start: Pos, // position of token start
current: Pos, // position of current character
// Accumulators
number: BigInt, // number builder
buffer: String, // string builder
// Language
keywords: HashMap<&'a str, Token<'a>>, // keyword table
operators: &'a OperatorTable, // operator table
// Session
strings: &'a StringInterner, // string interner
log: &'a RefCell<Messages<'a>> // message log
}
impl<'a> TokenBuilder<'a> {
pub fn new(compiler: &'a Compiler <'a>,
file: &'a File <'a>,
) -> Self {
TokenBuilder {
file: file,
start: Pos::bof(),
current: Pos::bof(),
buffer: String::with_capacity(128),
number: num::zero(),
keywords: keywords(&compiler.strings),
operators: &compiler.operators,
strings: &compiler.strings,
log: &compiler.log,
}
}
// Position actions
#[inline]
pub fn start(&mut self) {
self.start = self.current;
}
#[inline]
pub fn advance(&mut self, c: char) {
self.current.advance(c);
}
#[inline]
pub fn newline(&mut self) {
self.current.newline();
}
#[inline]
pub fn source(&self) -> Source<'a> {
let len = self.current.byte - self.start.byte;
Source::File { file: self.file, pos: self.start, len: len }
}
// Number actions
#[inline]
pub fn add_dec(&mut self, c: char) {
self.add_num(10, int_from_dg(c))
}
#[inline]
pub fn add_hex_dg(&mut self, c: char) {
self.add_num(16, int_from_dg(c))
}
#[inline]
pub fn add_hex_uc(&mut self, c: char) {
self.add_num(16, int_from_hex_uc(c))
}
#[inline]
pub fn add_hex_lc(&mut self, c: char) {
self.add_num(16, int_from_hex_lc(c))
}
#[inline]
pub fn add_oct(&mut self, c: char) {
self.add_num(8, int_from_dg(c))
}
#[inline]
pub fn add_bin(&mut self, c: char) {
self.add_num(2, int_from_dg(c))
}
#[inline]
fn add_num(&mut self, base: u8, digit: u8) {
self.number = &self.number
* BigInt::from(base)
+ BigInt::from(digit);
}
#[inline]
pub fn get_num(&mut self) -> Token<'a> {
let number = self.number.clone();
self.number = num::zero();
Token::Int(number)
}
// Character/String Actions
#[inline]
pub fn add_char(&mut self, c: char) {
self.buffer.push(c);
}
#[inline]
pub fn add_esc(&mut self) -> Option<Token<'a>> {
match self.number.to_u32() {
Some(n) if n <= UNICODE_MAX => {
let c = unsafe { ::std::mem::transmute(n) };
self.buffer.push(c);
self.number = num::zero();
None
},
_ => {
Some(self.err_overflow_esc())
}
}
}
#[inline]
fn intern_str(&mut self) -> &'a str {
let s = self.strings.intern(self.buffer.clone());
self.buffer.clear();
s
}
#[inline]
pub fn get_char(&mut self) -> Token<'a> {
let c = self.buffer.chars().next().unwrap();
self.buffer.clear();
Token::Char(c)
}
#[inline]
pub fn get_str(&mut self) -> Token<'a> {
Token::Str(self.intern_str())
}
#[inline]
pub fn get_id_or_keyword(&mut self) -> Token<'a> {
let s = self.intern_str();
match self.keywords.get(&s) {
Some(k) => k.clone(),
None => Token::Id(s)
}
}
// Error Actions
pub fn err_unrec(&mut self, c: char) -> Token<'a> {
self.log.borrow_mut().err_unrec(self.source(), c);
Token::Error
}
pub fn err_unrec_num(&mut self, c: char) -> Token<'a> {
self.log.borrow_mut().err_unrec_num(self.source(), c);
Token::Error
}
pub fn err_unrec_esc(&mut self, c: char) -> Token<'a> {
self.log.borrow_mut().err_unrec_esc(self.source(), c);
Token::Error
}
pub fn err_unterm_char(&mut self) -> Token<'a> {
self.log.borrow_mut().err_unterm_char(self.source());
Token::Error
}
pub fn err_unterm_str(&mut self) -> Token<'a> {
self.log.borrow_mut().err_unterm_str(self.source());
Token::Error
}
pub fn err_unterm_esc(&mut self) -> Token<'a> {
self.log.borrow_mut().err_unterm_esc(self.source());
Token::Error
}
pub fn err_length_char(&mut self) -> Token<'a> {
self.log.borrow_mut().err_length_char(self.source());
Token::Error
}
pub fn err_overflow_esc(&mut self) -> Token<'a> {
self.log.borrow_mut().err_overflow_esc(self.source());
Token::Error
}
}
const UNICODE_MAX: u32 = 0x10FFFF;
#[inline]
fn int_from_dg(c: char) -> u8 {
c as u8 - 0x30 // c - '0'
}
#[inline]
fn int_from_hex_uc(c: char) -> u8 {
c as u8 - 0x37 // 10 + c - 'A'
}
#[inline]
fn int_from_hex_lc(c: char) -> u8 {
c as u8 - 0x57 // 10 + c - 'a'
}
// -----------------------------------------------------------------------------
// Keywords
// TODO: Consider moving this to Compiler, so that targets can add custom keywords.
#[inline]
fn keywords<'a>(strings: &'a StringInterner) -> HashMap<&'a str, Token<'a>> {
let mut map = HashMap::new();
for &(key, ref tok) in KEYWORDS {
map.insert(strings.intern_ref(key), tok.clone());
}
map
}
const KEYWORDS: &'static [(&'static str, Token<'static>)] = &[
( "type" , Token::KwType ),
( "struct" , Token::KwStruct ),
( "union" , Token::KwUnion ),
( "if" , Token::KwIf ),
( "else" , Token::KwElse ),
( "loop" , Token::KwLoop ),
( "while" , Token::KwWhile ),
( "break" , Token::KwBreak ),
( "continue", Token::KwContinue ),
( "return" , Token::KwReturn ),
( "jump" , Token::KwJump ),
]; | // WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
// the GNU General Public License for more details.
// | random_line_split |
htmlstyleelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::HTMLStyleElementBinding; | use dom::htmlelement::HTMLElement;
use dom::node::{AbstractNode, Node, ScriptView};
pub struct HTMLStyleElement {
htmlelement: HTMLElement,
}
impl HTMLStyleElement {
pub fn new_inherited(localName: ~str, document: AbstractDocument) -> HTMLStyleElement {
HTMLStyleElement {
htmlelement: HTMLElement::new(HTMLStyleElementTypeId, localName, document)
}
}
pub fn new(localName: ~str, document: AbstractDocument) -> AbstractNode<ScriptView> {
let element = HTMLStyleElement::new_inherited(localName, document);
Node::reflect_node(@mut element, document, HTMLStyleElementBinding::Wrap)
}
}
impl HTMLStyleElement {
pub fn Disabled(&self) -> bool {
false
}
pub fn SetDisabled(&self, _disabled: bool) {
}
pub fn Media(&self) -> DOMString {
None
}
pub fn SetMedia(&mut self, _media: &DOMString) -> ErrorResult {
Ok(())
}
pub fn Type(&self) -> DOMString {
None
}
pub fn SetType(&mut self, _type: &DOMString) -> ErrorResult {
Ok(())
}
pub fn Scoped(&self) -> bool {
false
}
pub fn SetScoped(&self, _scoped: bool) -> ErrorResult {
Ok(())
}
} | use dom::bindings::utils::{DOMString, ErrorResult};
use dom::document::AbstractDocument;
use dom::element::HTMLStyleElementTypeId; | random_line_split |
htmlstyleelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::HTMLStyleElementBinding;
use dom::bindings::utils::{DOMString, ErrorResult};
use dom::document::AbstractDocument;
use dom::element::HTMLStyleElementTypeId;
use dom::htmlelement::HTMLElement;
use dom::node::{AbstractNode, Node, ScriptView};
pub struct HTMLStyleElement {
htmlelement: HTMLElement,
}
impl HTMLStyleElement {
pub fn new_inherited(localName: ~str, document: AbstractDocument) -> HTMLStyleElement {
HTMLStyleElement {
htmlelement: HTMLElement::new(HTMLStyleElementTypeId, localName, document)
}
}
pub fn new(localName: ~str, document: AbstractDocument) -> AbstractNode<ScriptView> {
let element = HTMLStyleElement::new_inherited(localName, document);
Node::reflect_node(@mut element, document, HTMLStyleElementBinding::Wrap)
}
}
impl HTMLStyleElement {
pub fn Disabled(&self) -> bool {
false
}
pub fn SetDisabled(&self, _disabled: bool) {
}
pub fn | (&self) -> DOMString {
None
}
pub fn SetMedia(&mut self, _media: &DOMString) -> ErrorResult {
Ok(())
}
pub fn Type(&self) -> DOMString {
None
}
pub fn SetType(&mut self, _type: &DOMString) -> ErrorResult {
Ok(())
}
pub fn Scoped(&self) -> bool {
false
}
pub fn SetScoped(&self, _scoped: bool) -> ErrorResult {
Ok(())
}
}
| Media | identifier_name |
htmlstyleelement.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::HTMLStyleElementBinding;
use dom::bindings::utils::{DOMString, ErrorResult};
use dom::document::AbstractDocument;
use dom::element::HTMLStyleElementTypeId;
use dom::htmlelement::HTMLElement;
use dom::node::{AbstractNode, Node, ScriptView};
pub struct HTMLStyleElement {
htmlelement: HTMLElement,
}
impl HTMLStyleElement {
pub fn new_inherited(localName: ~str, document: AbstractDocument) -> HTMLStyleElement {
HTMLStyleElement {
htmlelement: HTMLElement::new(HTMLStyleElementTypeId, localName, document)
}
}
pub fn new(localName: ~str, document: AbstractDocument) -> AbstractNode<ScriptView> {
let element = HTMLStyleElement::new_inherited(localName, document);
Node::reflect_node(@mut element, document, HTMLStyleElementBinding::Wrap)
}
}
impl HTMLStyleElement {
pub fn Disabled(&self) -> bool {
false
}
pub fn SetDisabled(&self, _disabled: bool) {
}
pub fn Media(&self) -> DOMString {
None
}
pub fn SetMedia(&mut self, _media: &DOMString) -> ErrorResult {
Ok(())
}
pub fn Type(&self) -> DOMString {
None
}
pub fn SetType(&mut self, _type: &DOMString) -> ErrorResult {
Ok(())
}
pub fn Scoped(&self) -> bool {
false
}
pub fn SetScoped(&self, _scoped: bool) -> ErrorResult |
}
| {
Ok(())
} | identifier_body |
ref_binding_to_reference.rs | // edition:2018
// FIXME: run-rustfix waiting on multi-span suggestions
#![warn(clippy::ref_binding_to_reference)]
#![allow(clippy::needless_borrowed_reference)]
fn f1(_: &str) {}
macro_rules! m2 {
($e:expr) => {
f1(*$e)
};
}
macro_rules! m3 {
($i:ident) => {
Some(ref $i)
};
}
#[allow(dead_code)]
fn main() {
let x = String::new();
// Ok, the pattern is from a macro
let _: &&String = match Some(&x) {
m3!(x) => x,
None => return,
};
// Err, reference to a &String
let _: &&String = match Some(&x) {
Some(ref x) => x,
None => return,
};
// Err, reference to a &String
let _: &&String = match Some(&x) {
Some(ref x) => {
f1(x);
f1(*x);
x
},
None => return,
}; | match Some(&x) {
Some(ref x) => m2!(x),
None => return,
}
// Err, reference to a &String
let _ = |&ref x: &&String| {
let _: &&String = x;
};
}
// Err, reference to a &String
fn f2<'a>(&ref x: &&'a String) -> &'a String {
let _: &&String = x;
*x
}
trait T1 {
// Err, reference to a &String
fn f(&ref x: &&String) {
let _: &&String = x;
}
}
struct S;
impl T1 for S {
// Err, reference to a &String
fn f(&ref x: &&String) {
let _: &&String = x;
}
} |
// Err, reference to a &String | random_line_split |
ref_binding_to_reference.rs | // edition:2018
// FIXME: run-rustfix waiting on multi-span suggestions
#![warn(clippy::ref_binding_to_reference)]
#![allow(clippy::needless_borrowed_reference)]
fn | (_: &str) {}
macro_rules! m2 {
($e:expr) => {
f1(*$e)
};
}
macro_rules! m3 {
($i:ident) => {
Some(ref $i)
};
}
#[allow(dead_code)]
fn main() {
let x = String::new();
// Ok, the pattern is from a macro
let _: &&String = match Some(&x) {
m3!(x) => x,
None => return,
};
// Err, reference to a &String
let _: &&String = match Some(&x) {
Some(ref x) => x,
None => return,
};
// Err, reference to a &String
let _: &&String = match Some(&x) {
Some(ref x) => {
f1(x);
f1(*x);
x
},
None => return,
};
// Err, reference to a &String
match Some(&x) {
Some(ref x) => m2!(x),
None => return,
}
// Err, reference to a &String
let _ = |&ref x: &&String| {
let _: &&String = x;
};
}
// Err, reference to a &String
fn f2<'a>(&ref x: &&'a String) -> &'a String {
let _: &&String = x;
*x
}
trait T1 {
// Err, reference to a &String
fn f(&ref x: &&String) {
let _: &&String = x;
}
}
struct S;
impl T1 for S {
// Err, reference to a &String
fn f(&ref x: &&String) {
let _: &&String = x;
}
}
| f1 | identifier_name |
match-phi.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[allow(dead_assignment)];
#[allow(unused_variable)];
enum thing { a, b, c, }
fn foo(it: |int|) { it(10); }
pub fn main() {
let mut x = true;
match a {
a => |
b => { x = false; }
c => { x = false; }
}
}
| { x = true; foo(|_i| { } ) } | conditional_block |
match-phi.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[allow(dead_assignment)];
#[allow(unused_variable)];
enum thing { a, b, c, }
fn foo(it: |int|) { it(10); }
pub fn | () {
let mut x = true;
match a {
a => { x = true; foo(|_i| { } ) }
b => { x = false; }
c => { x = false; }
}
}
| main | identifier_name |
scu_interrupt.rs | #[doc = r"Register block"]
#[repr(C)]
pub struct | {
#[doc = "0x00 - SCU Service Request Status"]
pub srstat: crate::Reg<srstat::SRSTAT_SPEC>,
#[doc = "0x04 - SCU Raw Service Request Status"]
pub srraw: crate::Reg<srraw::SRRAW_SPEC>,
#[doc = "0x08 - SCU Service Request Mask"]
pub srmsk: crate::Reg<srmsk::SRMSK_SPEC>,
#[doc = "0x0c - SCU Service Request Clear"]
pub srclr: crate::Reg<srclr::SRCLR_SPEC>,
#[doc = "0x10 - SCU Service Request Set"]
pub srset: crate::Reg<srset::SRSET_SPEC>,
#[doc = "0x14 - SCU Service Request Mask"]
pub nmireqen: crate::Reg<nmireqen::NMIREQEN_SPEC>,
}
#[doc = "SRSTAT register accessor: an alias for `Reg<SRSTAT_SPEC>`"]
pub type SRSTAT = crate::Reg<srstat::SRSTAT_SPEC>;
#[doc = "SCU Service Request Status"]
pub mod srstat;
#[doc = "SRRAW register accessor: an alias for `Reg<SRRAW_SPEC>`"]
pub type SRRAW = crate::Reg<srraw::SRRAW_SPEC>;
#[doc = "SCU Raw Service Request Status"]
pub mod srraw;
#[doc = "SRMSK register accessor: an alias for `Reg<SRMSK_SPEC>`"]
pub type SRMSK = crate::Reg<srmsk::SRMSK_SPEC>;
#[doc = "SCU Service Request Mask"]
pub mod srmsk;
#[doc = "SRCLR register accessor: an alias for `Reg<SRCLR_SPEC>`"]
pub type SRCLR = crate::Reg<srclr::SRCLR_SPEC>;
#[doc = "SCU Service Request Clear"]
pub mod srclr;
#[doc = "SRSET register accessor: an alias for `Reg<SRSET_SPEC>`"]
pub type SRSET = crate::Reg<srset::SRSET_SPEC>;
#[doc = "SCU Service Request Set"]
pub mod srset;
#[doc = "NMIREQEN register accessor: an alias for `Reg<NMIREQEN_SPEC>`"]
pub type NMIREQEN = crate::Reg<nmireqen::NMIREQEN_SPEC>;
#[doc = "SCU Service Request Mask"]
pub mod nmireqen;
| RegisterBlock | identifier_name |
scu_interrupt.rs | #[doc = r"Register block"]
#[repr(C)]
pub struct RegisterBlock {
#[doc = "0x00 - SCU Service Request Status"]
pub srstat: crate::Reg<srstat::SRSTAT_SPEC>,
#[doc = "0x04 - SCU Raw Service Request Status"]
pub srraw: crate::Reg<srraw::SRRAW_SPEC>,
#[doc = "0x08 - SCU Service Request Mask"]
pub srmsk: crate::Reg<srmsk::SRMSK_SPEC>,
#[doc = "0x0c - SCU Service Request Clear"]
pub srclr: crate::Reg<srclr::SRCLR_SPEC>,
#[doc = "0x10 - SCU Service Request Set"]
pub srset: crate::Reg<srset::SRSET_SPEC>,
#[doc = "0x14 - SCU Service Request Mask"]
pub nmireqen: crate::Reg<nmireqen::NMIREQEN_SPEC>,
}
#[doc = "SRSTAT register accessor: an alias for `Reg<SRSTAT_SPEC>`"]
pub type SRSTAT = crate::Reg<srstat::SRSTAT_SPEC>;
#[doc = "SCU Service Request Status"]
pub mod srstat;
#[doc = "SRRAW register accessor: an alias for `Reg<SRRAW_SPEC>`"]
pub type SRRAW = crate::Reg<srraw::SRRAW_SPEC>; | #[doc = "SCU Service Request Mask"]
pub mod srmsk;
#[doc = "SRCLR register accessor: an alias for `Reg<SRCLR_SPEC>`"]
pub type SRCLR = crate::Reg<srclr::SRCLR_SPEC>;
#[doc = "SCU Service Request Clear"]
pub mod srclr;
#[doc = "SRSET register accessor: an alias for `Reg<SRSET_SPEC>`"]
pub type SRSET = crate::Reg<srset::SRSET_SPEC>;
#[doc = "SCU Service Request Set"]
pub mod srset;
#[doc = "NMIREQEN register accessor: an alias for `Reg<NMIREQEN_SPEC>`"]
pub type NMIREQEN = crate::Reg<nmireqen::NMIREQEN_SPEC>;
#[doc = "SCU Service Request Mask"]
pub mod nmireqen; | #[doc = "SCU Raw Service Request Status"]
pub mod srraw;
#[doc = "SRMSK register accessor: an alias for `Reg<SRMSK_SPEC>`"]
pub type SRMSK = crate::Reg<srmsk::SRMSK_SPEC>; | random_line_split |
lib.rs | //! aobench: Ambient Occlusion Renderer benchmark.
//!
//! Based on [aobench](https://code.google.com/archive/p/aobench/) by Syoyo
//! Fujita.
#![deny(warnings, rust_2018_idioms)]
#![allow(non_snake_case, non_camel_case_types)]
#![cfg_attr(
feature = "cargo-clippy",
allow(
clippy::many_single_char_names,
clippy::similar_names,
clippy::cast_precision_loss,
clippy::inline_always,
clippy::cast_possible_truncation,
clippy::cast_sign_loss,
clippy::identity_op,
clippy::erasing_op
)
)]
pub mod ambient_occlusion;
pub mod geometry;
pub mod image;
pub mod intersection;
pub mod random;
pub mod scene;
#[cfg(feature = "ispc")]
pub mod ispc_;
pub mod scalar;
pub mod scalar_parallel; | pub mod tiled;
pub mod tiled_parallel;
pub mod vector;
pub mod vector_parallel;
pub use self::image::Image;
pub use self::scene::Scene; | random_line_split |
|
main.rs | use std::io; // io::stdin().read_line(&mut <String>)
use std::f64; // f64::NAN
fn get_input() -> String {
let mut input = String::new();
match io::stdin().read_line(&mut input) {
Ok(string) => string,
Err(err) => panic!("Error: {}", err),
};
input
}
fn main() {
let pairs: isize = get_input()
.trim()
.parse::<isize>().expect("Error: can't parse input");
let mut data: Vec<isize> = Vec::new();
for _ in 0..pairs {
let pairs: Vec<f64> = get_input()
.trim()
.split_whitespace()
.map(|s| s.parse::<f64>().expect("Error: can't parse input"))
.collect();
let mut div = f64::NAN; | else {
div /= num;
}
}
let div = div.round() as isize;
data.push(div);
}
println!("");
for ans in data {
print!("{} ", ans);
}
println!("");
} |
for num in pairs {
if div.is_nan() {
div = num;
} | random_line_split |
main.rs | use std::io; // io::stdin().read_line(&mut <String>)
use std::f64; // f64::NAN
fn get_input() -> String {
let mut input = String::new();
match io::stdin().read_line(&mut input) {
Ok(string) => string,
Err(err) => panic!("Error: {}", err),
};
input
}
fn main() | else {
div /= num;
}
}
let div = div.round() as isize;
data.push(div);
}
println!("");
for ans in data {
print!("{} ", ans);
}
println!("");
}
| {
let pairs: isize = get_input()
.trim()
.parse::<isize>().expect("Error: can't parse input");
let mut data: Vec<isize> = Vec::new();
for _ in 0..pairs {
let pairs: Vec<f64> = get_input()
.trim()
.split_whitespace()
.map(|s| s.parse::<f64>().expect("Error: can't parse input"))
.collect();
let mut div = f64::NAN;
for num in pairs {
if div.is_nan() {
div = num;
} | identifier_body |
main.rs | use std::io; // io::stdin().read_line(&mut <String>)
use std::f64; // f64::NAN
fn | () -> String {
let mut input = String::new();
match io::stdin().read_line(&mut input) {
Ok(string) => string,
Err(err) => panic!("Error: {}", err),
};
input
}
fn main() {
let pairs: isize = get_input()
.trim()
.parse::<isize>().expect("Error: can't parse input");
let mut data: Vec<isize> = Vec::new();
for _ in 0..pairs {
let pairs: Vec<f64> = get_input()
.trim()
.split_whitespace()
.map(|s| s.parse::<f64>().expect("Error: can't parse input"))
.collect();
let mut div = f64::NAN;
for num in pairs {
if div.is_nan() {
div = num;
}
else {
div /= num;
}
}
let div = div.round() as isize;
data.push(div);
}
println!("");
for ans in data {
print!("{} ", ans);
}
println!("");
}
| get_input | identifier_name |
procedural_mbe_matching.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// force-host
#![crate_type="dylib"]
#![feature(plugin_registrar, quote, rustc_private)]
extern crate syntax;
extern crate syntax_pos;
extern crate rustc;
extern crate rustc_plugin;
use syntax::feature_gate::Features;
use syntax::parse::token::{NtExpr, NtPat};
use syntax::ast::{Ident, Pat, NodeId};
use syntax::tokenstream::{TokenTree};
use syntax::ext::base::{ExtCtxt, MacResult, MacEager};
use syntax::ext::build::AstBuilder;
use syntax::ext::tt::quoted;
use syntax::ext::tt::macro_parser::{MatchedSeq, MatchedNonterminal};
use syntax::ext::tt::macro_parser::{Success, Failure, Error};
use syntax::ext::tt::macro_parser::parse_failure_msg;
use syntax::ptr::P;
use syntax_pos::{Span, edition::Edition};
use rustc_plugin::Registry;
fn expand_mbe_matches(cx: &mut ExtCtxt, _: Span, args: &[TokenTree])
-> Box<MacResult +'static> {
let mbe_matcher = quote_tokens!(cx, $$matched:expr, $$($$pat:pat)|+);
let mbe_matcher = quoted::parse(mbe_matcher.into_iter().collect(),
true,
cx.parse_sess,
&Features::new(),
&[],
Edition::Edition2015,
// not used...
NodeId::from_u32(0));
let map = match TokenTree::parse(cx, &mbe_matcher, args.iter().cloned().collect()) {
Success(map) => map,
Failure(_, tok, msg) => {
panic!("expected Success, but got Failure: {} - {}", parse_failure_msg(tok), msg);
}
Error(_, s) => {
panic!("expected Success, but got Error: {}", s);
}
};
let matched_nt = match *map[&Ident::from_str("matched")] {
MatchedNonterminal(ref nt) => nt.clone(),
_ => unreachable!(),
};
let mac_expr = match (&*matched_nt, &*map[&Ident::from_str("pat")]) {
(&NtExpr(ref matched_expr), &MatchedSeq(ref pats, seq_sp)) => {
let pats: Vec<P<Pat>> = pats.iter().map(|pat_nt| {
match *pat_nt {
MatchedNonterminal(ref nt) => match **nt {
NtPat(ref pat) => pat.clone(),
_ => unreachable!(),
},
_ => unreachable!(),
}
}).collect();
let span = seq_sp.entire();
let arm = cx.arm(span, pats, cx.expr_bool(span, true));
quote_expr!(cx,
match $matched_expr {
$arm
_ => false
}
)
}
_ => unreachable!()
};
MacEager::expr(mac_expr)
}
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) | {
reg.register_macro("matches", expand_mbe_matches);
} | identifier_body |
|
procedural_mbe_matching.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// force-host
#![crate_type="dylib"]
#![feature(plugin_registrar, quote, rustc_private)]
extern crate syntax;
extern crate syntax_pos;
extern crate rustc;
extern crate rustc_plugin;
use syntax::feature_gate::Features;
use syntax::parse::token::{NtExpr, NtPat};
use syntax::ast::{Ident, Pat, NodeId};
use syntax::tokenstream::{TokenTree};
use syntax::ext::base::{ExtCtxt, MacResult, MacEager};
use syntax::ext::build::AstBuilder;
use syntax::ext::tt::quoted;
use syntax::ext::tt::macro_parser::{MatchedSeq, MatchedNonterminal};
use syntax::ext::tt::macro_parser::{Success, Failure, Error};
use syntax::ext::tt::macro_parser::parse_failure_msg;
use syntax::ptr::P;
use syntax_pos::{Span, edition::Edition};
use rustc_plugin::Registry;
fn expand_mbe_matches(cx: &mut ExtCtxt, _: Span, args: &[TokenTree])
-> Box<MacResult +'static> {
let mbe_matcher = quote_tokens!(cx, $$matched:expr, $$($$pat:pat)|+);
let mbe_matcher = quoted::parse(mbe_matcher.into_iter().collect(),
true,
cx.parse_sess,
&Features::new(),
&[],
Edition::Edition2015,
// not used...
NodeId::from_u32(0));
let map = match TokenTree::parse(cx, &mbe_matcher, args.iter().cloned().collect()) {
Success(map) => map,
Failure(_, tok, msg) => {
panic!("expected Success, but got Failure: {} - {}", parse_failure_msg(tok), msg);
}
Error(_, s) => {
panic!("expected Success, but got Error: {}", s);
}
};
let matched_nt = match *map[&Ident::from_str("matched")] {
MatchedNonterminal(ref nt) => nt.clone(),
_ => unreachable!(),
};
let mac_expr = match (&*matched_nt, &*map[&Ident::from_str("pat")]) {
(&NtExpr(ref matched_expr), &MatchedSeq(ref pats, seq_sp)) => {
let pats: Vec<P<Pat>> = pats.iter().map(|pat_nt| {
match *pat_nt {
MatchedNonterminal(ref nt) => match **nt {
NtPat(ref pat) => pat.clone(),
_ => unreachable!(),
},
_ => unreachable!(),
}
}).collect();
let span = seq_sp.entire();
let arm = cx.arm(span, pats, cx.expr_bool(span, true));
quote_expr!(cx,
match $matched_expr {
$arm
_ => false
}
)
}
_ => unreachable!()
};
MacEager::expr(mac_expr)
}
#[plugin_registrar]
pub fn | (reg: &mut Registry) {
reg.register_macro("matches", expand_mbe_matches);
}
| plugin_registrar | identifier_name |
procedural_mbe_matching.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// force-host
#![crate_type="dylib"]
#![feature(plugin_registrar, quote, rustc_private)]
extern crate syntax;
extern crate syntax_pos;
extern crate rustc;
extern crate rustc_plugin;
use syntax::feature_gate::Features;
use syntax::parse::token::{NtExpr, NtPat};
use syntax::ast::{Ident, Pat, NodeId};
use syntax::tokenstream::{TokenTree};
use syntax::ext::base::{ExtCtxt, MacResult, MacEager};
use syntax::ext::build::AstBuilder;
use syntax::ext::tt::quoted;
use syntax::ext::tt::macro_parser::{MatchedSeq, MatchedNonterminal};
use syntax::ext::tt::macro_parser::{Success, Failure, Error};
use syntax::ext::tt::macro_parser::parse_failure_msg;
use syntax::ptr::P; |
fn expand_mbe_matches(cx: &mut ExtCtxt, _: Span, args: &[TokenTree])
-> Box<MacResult +'static> {
let mbe_matcher = quote_tokens!(cx, $$matched:expr, $$($$pat:pat)|+);
let mbe_matcher = quoted::parse(mbe_matcher.into_iter().collect(),
true,
cx.parse_sess,
&Features::new(),
&[],
Edition::Edition2015,
// not used...
NodeId::from_u32(0));
let map = match TokenTree::parse(cx, &mbe_matcher, args.iter().cloned().collect()) {
Success(map) => map,
Failure(_, tok, msg) => {
panic!("expected Success, but got Failure: {} - {}", parse_failure_msg(tok), msg);
}
Error(_, s) => {
panic!("expected Success, but got Error: {}", s);
}
};
let matched_nt = match *map[&Ident::from_str("matched")] {
MatchedNonterminal(ref nt) => nt.clone(),
_ => unreachable!(),
};
let mac_expr = match (&*matched_nt, &*map[&Ident::from_str("pat")]) {
(&NtExpr(ref matched_expr), &MatchedSeq(ref pats, seq_sp)) => {
let pats: Vec<P<Pat>> = pats.iter().map(|pat_nt| {
match *pat_nt {
MatchedNonterminal(ref nt) => match **nt {
NtPat(ref pat) => pat.clone(),
_ => unreachable!(),
},
_ => unreachable!(),
}
}).collect();
let span = seq_sp.entire();
let arm = cx.arm(span, pats, cx.expr_bool(span, true));
quote_expr!(cx,
match $matched_expr {
$arm
_ => false
}
)
}
_ => unreachable!()
};
MacEager::expr(mac_expr)
}
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
reg.register_macro("matches", expand_mbe_matches);
} | use syntax_pos::{Span, edition::Edition};
use rustc_plugin::Registry; | random_line_split |
issue-11612.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// #11612
// We weren't updating the auto adjustments with all the resolved
// type information after type check.
// pretty-expanded FIXME #23616
trait A { fn | (&self) { } }
struct B<'a, T:'a> {
f: &'a T
}
impl<'a, T> A for B<'a, T> {}
fn foo(_: &A) {}
fn bar<G>(b: &B<G>) {
foo(b); // Coercion should work
foo(b as &A); // Explicit cast should work as well
}
fn main() {}
| dummy | identifier_name |
issue-11612.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// #11612
// We weren't updating the auto adjustments with all the resolved
// type information after type check.
// pretty-expanded FIXME #23616
trait A { fn dummy(&self) { } }
struct B<'a, T:'a> {
f: &'a T
}
impl<'a, T> A for B<'a, T> {}
fn foo(_: &A) {}
fn bar<G>(b: &B<G>) {
foo(b); // Coercion should work
foo(b as &A); // Explicit cast should work as well
}
fn main() | {} | identifier_body |
|
issue-11612.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// #11612
// We weren't updating the auto adjustments with all the resolved
// type information after type check.
|
trait A { fn dummy(&self) { } }
struct B<'a, T:'a> {
f: &'a T
}
impl<'a, T> A for B<'a, T> {}
fn foo(_: &A) {}
fn bar<G>(b: &B<G>) {
foo(b); // Coercion should work
foo(b as &A); // Explicit cast should work as well
}
fn main() {} | // pretty-expanded FIXME #23616 | random_line_split |
arm64_old.rs | // Copyright 2015 Ted Mielczarek. See the COPYRIGHT
// file at the top-level directory of this distribution.
// NOTE: arm64_old.rs and arm64.rs should be identical except for the names of
// their context types.
use crate::process_state::{FrameTrust, StackFrame};
use crate::stackwalker::unwind::Unwind;
use crate::stackwalker::CfiStackWalker;
use crate::{SymbolProvider, SystemInfo};
use log::trace;
use minidump::{
CpuContext, MinidumpContext, MinidumpContextValidity, MinidumpMemory, MinidumpModuleList,
MinidumpRawContext, Module,
};
use std::collections::HashSet;
type ArmContext = minidump::format::CONTEXT_ARM64_OLD;
type Pointer = <ArmContext as CpuContext>::Register;
type Registers = minidump::format::Arm64RegisterNumbers;
const POINTER_WIDTH: Pointer = std::mem::size_of::<Pointer>() as Pointer;
const FRAME_POINTER: &str = Registers::FramePointer.name();
const STACK_POINTER: &str = Registers::StackPointer.name();
const LINK_REGISTER: &str = Registers::LinkRegister.name();
const PROGRAM_COUNTER: &str = Registers::ProgramCounter.name();
const CALLEE_SAVED_REGS: &[&str] = &[
"x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "fp",
];
async fn get_caller_by_cfi<P>(
ctx: &ArmContext,
callee: &StackFrame,
grand_callee: Option<&StackFrame>,
stack_memory: &MinidumpMemory<'_>,
modules: &MinidumpModuleList,
symbol_provider: &P,
) -> Option<StackFrame>
where
P: SymbolProvider + Sync,
{
trace!("unwind: trying cfi");
let valid = &callee.context.valid;
let _last_sp = ctx.get_register(STACK_POINTER, valid)?;
let module = modules.module_at_address(callee.instruction)?;
let grand_callee_parameter_size = grand_callee.and_then(|f| f.parameter_size).unwrap_or(0);
let has_grand_callee = grand_callee.is_some();
let mut stack_walker = CfiStackWalker {
instruction: callee.instruction,
has_grand_callee,
grand_callee_parameter_size,
callee_ctx: ctx,
callee_validity: valid,
// Default to forwarding all callee-saved regs verbatim.
// The CFI evaluator may clear or overwrite these values.
// The stack pointer and instruction pointer are not included.
caller_ctx: *ctx,
caller_validity: callee_forwarded_regs(valid),
stack_memory,
};
symbol_provider
.walk_frame(module, &mut stack_walker)
.await?;
let caller_pc = stack_walker.caller_ctx.get_register_always(PROGRAM_COUNTER);
let caller_sp = stack_walker.caller_ctx.get_register_always(STACK_POINTER);
trace!(
"unwind: cfi evaluation was successful -- caller_pc: 0x{:016x}, caller_sp: 0x{:016x}",
caller_pc,
caller_sp,
);
// Do absolutely NO validation! Yep! As long as CFI evaluation succeeds
// (which does include pc and sp resolving), just blindly assume the
// values are correct. I Don't Like This, but it's what breakpad does and
// we should start with a baseline of parity.
// FIXME?: for whatever reason breakpad actually does block on the address
// being canonical *ONLY* for arm64, which actually rejects null pc early!
// Let's not do that to keep our code more uniform.
let context = MinidumpContext {
raw: MinidumpRawContext::OldArm64(stack_walker.caller_ctx),
valid: MinidumpContextValidity::Some(stack_walker.caller_validity),
};
Some(StackFrame::from_context(context, FrameTrust::CallFrameInfo))
}
fn callee_forwarded_regs(valid: &MinidumpContextValidity) -> HashSet<&'static str> {
match valid {
MinidumpContextValidity::All => CALLEE_SAVED_REGS.iter().copied().collect(),
MinidumpContextValidity::Some(ref which) => CALLEE_SAVED_REGS
.iter()
.filter(|®| which.contains(reg))
.copied()
.collect(),
}
}
fn get_caller_by_frame_pointer<P>(
ctx: &ArmContext,
callee: &StackFrame,
grand_callee: Option<&StackFrame>,
stack_memory: &MinidumpMemory<'_>,
modules: &MinidumpModuleList,
_symbol_provider: &P,
) -> Option<StackFrame>
where
P: SymbolProvider + Sync,
{
trace!("unwind: trying frame pointer");
// Assume that the standard %fp-using ARM64 calling convention is in use.
// The main quirk of this ABI is that the return address doesn't need to
// be restored from the stack -- it's already in the link register (lr).
// But that means we need to save/restore lr itself so that the *caller's*
// return address can be recovered.
//
// In the standard calling convention, the following happens:
//
// PUSH fp, lr (save fp and lr to the stack -- ARM64 pushes in pairs)
// fp := sp (update the frame pointer to the current stack pointer)
// lr := pc (save the return address in the link register)
//
// So to restore the caller's registers, we have:
//
// pc := lr
// sp := fp + ptr*2
// lr := *(fp + ptr)
// fp := *fp
let valid = &callee.context.valid;
let last_fp = ctx.get_register(FRAME_POINTER, valid)?;
let last_sp = ctx.get_register(STACK_POINTER, valid)?;
let last_lr = match ctx.get_register(LINK_REGISTER, valid) {
Some(lr) => ptr_auth_strip(modules, lr),
None => {
// FIXME: it would be good to write this back to the callee's ctx/validity
get_link_register_by_frame_pointer(ctx, valid, stack_memory, grand_callee, modules)?
}
};
if last_fp as u64 >= u64::MAX - POINTER_WIDTH as u64 * 2 {
// Although this code generally works fine if the pointer math overflows,
// debug builds will still panic, and this guard protects against it without
// drowning the rest of the code in checked_add.
return None;
}
let caller_fp = stack_memory.get_memory_at_address(last_fp as u64)?;
let caller_lr = stack_memory.get_memory_at_address(last_fp + POINTER_WIDTH as u64)?;
let caller_lr = ptr_auth_strip(modules, caller_lr);
let caller_pc = last_lr;
// TODO: why does breakpad do this? How could we get this far with a null fp?
let caller_sp = if last_fp == 0 {
last_sp
} else {
last_fp + POINTER_WIDTH * 2
};
// TODO: restore all the other callee-save registers that weren't touched.
// unclear: does this mean we need to be aware of ".undef" entries at this point?
// Breakpad's tests don't like it we validate the frame pointer's value,
// so we don't check that.
// Don't accept obviously wrong instruction pointers.
if is_non_canonical(caller_pc) {
trace!("unwind: rejecting frame pointer result for unreasonable instruction pointer");
return None;
}
// Don't actually validate that the stack makes sense (duplicating breakpad behaviour).
trace!(
"unwind: frame pointer seems valid -- caller_pc: 0x{:016x}, caller_sp: 0x{:016x}",
caller_pc,
caller_sp,
);
let mut caller_ctx = ArmContext::default();
caller_ctx.set_register(PROGRAM_COUNTER, caller_pc);
caller_ctx.set_register(LINK_REGISTER, caller_lr);
caller_ctx.set_register(FRAME_POINTER, caller_fp);
caller_ctx.set_register(STACK_POINTER, caller_sp);
let mut valid = HashSet::new();
valid.insert(PROGRAM_COUNTER);
valid.insert(LINK_REGISTER);
valid.insert(FRAME_POINTER);
valid.insert(STACK_POINTER);
let context = MinidumpContext {
raw: MinidumpRawContext::OldArm64(caller_ctx),
valid: MinidumpContextValidity::Some(valid),
};
Some(StackFrame::from_context(context, FrameTrust::FramePointer))
}
/// Restores the callee's link register from the stack.
fn get_link_register_by_frame_pointer(
ctx: &ArmContext,
valid: &MinidumpContextValidity,
stack_memory: &MinidumpMemory<'_>,
grand_callee: Option<&StackFrame>,
modules: &MinidumpModuleList,
) -> Option<Pointer> {
// It may happen that whatever unwinding strategy we're using managed to
// restore %fp but didn't restore %lr. Frame-pointer-based unwinding requires
// %lr because it contains the return address (the caller's %pc).
//
// In the standard ARM64 calling convention %fp and %lr are pushed together,
// so if the grand-callee appears to have been called with that convention
// then we can recover %lr using its %fp.
// We need the grand_callee's frame pointer
let grand_callee = grand_callee?;
let last_last_fp = if let MinidumpRawContext::OldArm64(ref ctx) = grand_callee.context.raw {
ctx.get_register(FRAME_POINTER, &grand_callee.context.valid)?
} else {
return None;
};
let presumed_last_fp: Pointer = stack_memory.get_memory_at_address(last_last_fp as u64)?;
// Make sure fp and sp aren't obviously garbage (are well-ordered)
let last_fp = ctx.get_register(FRAME_POINTER, valid)?;
let last_sp = ctx.get_register(STACK_POINTER, valid)?;
if last_fp <= last_sp {
return None;
}
// Make sure the grand-callee and callee agree on the value of fp
if presumed_last_fp!= last_fp {
return None;
}
// Now that we're pretty confident that frame pointers are valid, restore
// the callee's %lr, which should be right next to where its %fp is saved.
let last_lr = stack_memory.get_memory_at_address(last_last_fp + POINTER_WIDTH)?;
Some(ptr_auth_strip(modules, last_lr))
}
fn ptr_auth_strip(modules: &MinidumpModuleList, ptr: Pointer) -> Pointer {
// ARMv8.3 introduced a code hardening system called "Pointer Authentication"
// which is used on Apple platforms. It adds some extra high bits to the
// several pointers when they get pushed to memory. Interestingly
// this doesn't seem to affect return addresses pushed by a function call,
// but it does affect lr/fp registers that get pushed to the stack.
//
// Rather than actually thinking about how to recover the key and properly
// decode this, let's apply a simple heuristic. We get the maximum address
// that's contained in a module we know about, which will have some highest
// bit that is set. We can then safely mask out any bit that's higher than
// that one, which will hopefully mask out all the weird security stuff
// in the high bits.
if let Some(last_module) = modules.by_addr().next_back() {
// Get the highest mappable address
let mut mask = last_module.base_address() + last_module.size();
// Repeatedly OR this value with its shifted self to "smear" its
// highest set bit down to all lower bits. This will get us a
// mask we can use to AND out any bits that are higher.
mask |= mask >> 1;
mask |= mask >> 1;
mask |= mask >> 2;
mask |= mask >> 4;
mask |= mask >> 8;
mask |= mask >> 16;
mask |= mask >> 32;
let stripped = ptr & mask;
// Only actually use this stripped value if it ended up pointing in
// a module so we don't start corrupting normal pointers that are just
// in modules we don't know about.
if modules.module_at_address(stripped).is_some() {
// trace!("unwind: stripped pointer {:016x} -> {:016x}", ptr, stripped);
return stripped;
}
}
ptr
}
async fn get_caller_by_scan<P>(
ctx: &ArmContext,
callee: &StackFrame,
stack_memory: &MinidumpMemory<'_>,
modules: &MinidumpModuleList,
symbol_provider: &P,
) -> Option<StackFrame>
where
P: SymbolProvider + Sync,
{
trace!("unwind: trying scan");
// Stack scanning is just walking from the end of the frame until we encounter
// a value on the stack that looks like a pointer into some code (it's an address
// in a range covered by one of our modules). If we find such an instruction,
// we assume it's an pc value that was pushed by the CALL instruction that created
// the current frame. The next frame is then assumed to end just before that
// pc value.
let valid = &callee.context.valid;
let last_sp = ctx.get_register(STACK_POINTER, valid)?;
// Number of pointer-sized values to scan through in our search.
let default_scan_range = 40;
let extended_scan_range = default_scan_range * 4;
// Breakpad devs found that the first frame of an unwind can be really messed up,
// and therefore benefits from a longer scan. Let's do it too.
let scan_range = if let FrameTrust::Context = callee.trust {
extended_scan_range
} else {
default_scan_range
};
for i in 0..scan_range {
let address_of_pc = last_sp.checked_add(i * POINTER_WIDTH)?;
let caller_pc = stack_memory.get_memory_at_address(address_of_pc as u64)?;
if instruction_seems_valid(caller_pc, modules, symbol_provider).await {
// pc is pushed by CALL, so sp is just address_of_pc + ptr
let caller_sp = address_of_pc.checked_add(POINTER_WIDTH)?;
// Don't do any more validation, and don't try to restore fp
// (that's what breakpad does!)
trace!(
"unwind: scan seems valid -- caller_pc: 0x{:08x}, caller_sp: 0x{:08x}",
caller_pc,
caller_sp,
);
let mut caller_ctx = ArmContext::default();
caller_ctx.set_register(PROGRAM_COUNTER, caller_pc);
caller_ctx.set_register(STACK_POINTER, caller_sp);
let mut valid = HashSet::new();
valid.insert(PROGRAM_COUNTER);
valid.insert(STACK_POINTER);
let context = MinidumpContext {
raw: MinidumpRawContext::OldArm64(caller_ctx),
valid: MinidumpContextValidity::Some(valid),
};
return Some(StackFrame::from_context(context, FrameTrust::Scan));
}
}
None
}
/// The most strict validation we have for instruction pointers.
///
/// This is only used for stack-scanning, because it's explicitly
/// trying to distinguish between total garbage and correct values.
/// cfi and frame_pointer approaches do not use this validation
/// because by default they're working with plausible/trustworthy
/// data.
///
/// Specifically, not using this validation allows cfi/fp methods
/// to unwind through frames we don't have mapped modules for (such as
/// OS APIs). This may seem confusing since we obviously don't have cfi
/// for unmapped modules!
///
/// The way this works is that we will use cfi to unwind some frame we
/// know about and *end up* in a function we know nothing about, but with
/// all the right register values. At this point, frame pointers will
/// often do the correct thing even though we don't know what code we're
/// in -- until we get back into code we do know about and cfi kicks back in.
/// At worst, this sets scanning up in a better position for success!
///
/// If we applied this more rigorous validation to cfi/fp methods, we
/// would just discard the correct register values from the known frame
/// and immediately start doing unreliable scans.
async fn instruction_seems_valid<P>(
instruction: Pointer,
modules: &MinidumpModuleList,
symbol_provider: &P,
) -> bool
where
P: SymbolProvider + Sync,
{
if is_non_canonical(instruction) || instruction == 0 {
return false;
}
super::instruction_seems_valid_by_symbols(instruction as u64, modules, symbol_provider).await
}
fn | (instruction: Pointer) -> bool {
// Reject instructions in the first page or above the user-space threshold.
!(0x1000..=0x000fffffffffffff).contains(&instruction)
}
/*
// ARM64 is currently hyper-permissive, so we don't use this,
// but here it is in case we change our minds!
fn stack_seems_valid(
caller_sp: Pointer,
callee_sp: Pointer,
stack_memory: &MinidumpMemory<'_>,
) -> bool {
// The stack shouldn't *grow* when we unwind
if caller_sp < callee_sp {
return false;
}
// The stack pointer should be in the stack
stack_memory
.get_memory_at_address::<Pointer>(caller_sp as u64)
.is_some()
}
*/
#[async_trait::async_trait]
impl Unwind for ArmContext {
async fn get_caller_frame<P>(
&self,
callee: &StackFrame,
grand_callee: Option<&StackFrame>,
stack_memory: Option<&MinidumpMemory<'_>>,
modules: &MinidumpModuleList,
_system_info: &SystemInfo,
syms: &P,
) -> Option<StackFrame>
where
P: SymbolProvider + Sync,
{
let stack = stack_memory.as_ref()?;
//.await doesn't like closures, so don't use Option chaining
let mut frame = None;
if frame.is_none() {
frame = get_caller_by_cfi(self, callee, grand_callee, stack, modules, syms).await;
}
if frame.is_none() {
frame = get_caller_by_frame_pointer(self, callee, grand_callee, stack, modules, syms);
}
if frame.is_none() {
frame = get_caller_by_scan(self, callee, stack, modules, syms).await;
}
let mut frame = frame?;
// We now check the frame to see if it looks like unwinding is complete,
// based on the frame we computed having a nonsense value. Returning
// None signals to the unwinder to stop unwinding.
// if the instruction is within the first ~page of memory, it's basically
// null, and we can assume unwinding is complete.
if frame.context.get_instruction_pointer() < 4096 {
trace!("unwind: instruction pointer was nullish, assuming unwind complete");
return None;
}
// If the new stack pointer is at a lower address than the old,
// then that's clearly incorrect. Treat this as end-of-stack to
// enforce progress and avoid infinite loops.
let sp = frame.context.get_stack_pointer();
let last_sp = self.get_register_always("sp") as u64;
if sp <= last_sp {
// Arm leaf functions may not actually touch the stack (thanks
// to the link register allowing you to "push" the return address
// to a register), so we need to permit the stack pointer to not
// change for the first frame of the unwind. After that we need
// more strict validation to avoid infinite loops.
let is_leaf = callee.trust == FrameTrust::Context && sp == last_sp;
if!is_leaf {
trace!("unwind: stack pointer went backwards, assuming unwind complete");
return None;
}
}
// Ok, the frame now seems well and truly valid, do final cleanup.
// A caller's ip is the return address, which is the instruction
// *after* the CALL that caused us to arrive at the callee. Set
// the value to 4 less than that, so it points to the CALL instruction
// (arm64 instructions are all 4 bytes wide). This is important because
// we use this value to lookup the CFI we need to unwind the next frame.
let ip = frame.context.get_instruction_pointer() as u64;
frame.instruction = ip - 4;
Some(frame)
}
}
| is_non_canonical | identifier_name |
arm64_old.rs | // Copyright 2015 Ted Mielczarek. See the COPYRIGHT
// file at the top-level directory of this distribution.
// NOTE: arm64_old.rs and arm64.rs should be identical except for the names of
// their context types.
use crate::process_state::{FrameTrust, StackFrame};
use crate::stackwalker::unwind::Unwind;
use crate::stackwalker::CfiStackWalker;
use crate::{SymbolProvider, SystemInfo};
use log::trace;
use minidump::{
CpuContext, MinidumpContext, MinidumpContextValidity, MinidumpMemory, MinidumpModuleList,
MinidumpRawContext, Module,
};
use std::collections::HashSet;
type ArmContext = minidump::format::CONTEXT_ARM64_OLD;
type Pointer = <ArmContext as CpuContext>::Register;
type Registers = minidump::format::Arm64RegisterNumbers;
const POINTER_WIDTH: Pointer = std::mem::size_of::<Pointer>() as Pointer;
const FRAME_POINTER: &str = Registers::FramePointer.name();
const STACK_POINTER: &str = Registers::StackPointer.name();
const LINK_REGISTER: &str = Registers::LinkRegister.name();
const PROGRAM_COUNTER: &str = Registers::ProgramCounter.name();
const CALLEE_SAVED_REGS: &[&str] = &[
"x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "fp",
];
async fn get_caller_by_cfi<P>(
ctx: &ArmContext,
callee: &StackFrame,
grand_callee: Option<&StackFrame>,
stack_memory: &MinidumpMemory<'_>,
modules: &MinidumpModuleList,
symbol_provider: &P,
) -> Option<StackFrame>
where
P: SymbolProvider + Sync,
{
trace!("unwind: trying cfi");
let valid = &callee.context.valid;
let _last_sp = ctx.get_register(STACK_POINTER, valid)?;
let module = modules.module_at_address(callee.instruction)?;
let grand_callee_parameter_size = grand_callee.and_then(|f| f.parameter_size).unwrap_or(0);
let has_grand_callee = grand_callee.is_some();
let mut stack_walker = CfiStackWalker {
instruction: callee.instruction,
has_grand_callee,
grand_callee_parameter_size,
callee_ctx: ctx,
callee_validity: valid,
// Default to forwarding all callee-saved regs verbatim.
// The CFI evaluator may clear or overwrite these values.
// The stack pointer and instruction pointer are not included.
caller_ctx: *ctx,
caller_validity: callee_forwarded_regs(valid),
stack_memory,
};
symbol_provider
.walk_frame(module, &mut stack_walker)
.await?;
let caller_pc = stack_walker.caller_ctx.get_register_always(PROGRAM_COUNTER);
let caller_sp = stack_walker.caller_ctx.get_register_always(STACK_POINTER);
trace!(
"unwind: cfi evaluation was successful -- caller_pc: 0x{:016x}, caller_sp: 0x{:016x}",
caller_pc,
caller_sp,
);
// Do absolutely NO validation! Yep! As long as CFI evaluation succeeds
// (which does include pc and sp resolving), just blindly assume the
// values are correct. I Don't Like This, but it's what breakpad does and
// we should start with a baseline of parity.
// FIXME?: for whatever reason breakpad actually does block on the address
// being canonical *ONLY* for arm64, which actually rejects null pc early!
// Let's not do that to keep our code more uniform.
let context = MinidumpContext {
raw: MinidumpRawContext::OldArm64(stack_walker.caller_ctx),
valid: MinidumpContextValidity::Some(stack_walker.caller_validity),
};
Some(StackFrame::from_context(context, FrameTrust::CallFrameInfo))
}
fn callee_forwarded_regs(valid: &MinidumpContextValidity) -> HashSet<&'static str> {
match valid {
MinidumpContextValidity::All => CALLEE_SAVED_REGS.iter().copied().collect(),
MinidumpContextValidity::Some(ref which) => CALLEE_SAVED_REGS
.iter()
.filter(|®| which.contains(reg))
.copied()
.collect(),
}
}
fn get_caller_by_frame_pointer<P>(
ctx: &ArmContext,
callee: &StackFrame,
grand_callee: Option<&StackFrame>,
stack_memory: &MinidumpMemory<'_>,
modules: &MinidumpModuleList,
_symbol_provider: &P,
) -> Option<StackFrame>
where
P: SymbolProvider + Sync,
{
trace!("unwind: trying frame pointer");
// Assume that the standard %fp-using ARM64 calling convention is in use.
// The main quirk of this ABI is that the return address doesn't need to
// be restored from the stack -- it's already in the link register (lr).
// But that means we need to save/restore lr itself so that the *caller's*
// return address can be recovered.
//
// In the standard calling convention, the following happens:
//
// PUSH fp, lr (save fp and lr to the stack -- ARM64 pushes in pairs)
// fp := sp (update the frame pointer to the current stack pointer)
// lr := pc (save the return address in the link register)
//
// So to restore the caller's registers, we have:
//
// pc := lr
// sp := fp + ptr*2
// lr := *(fp + ptr)
// fp := *fp
let valid = &callee.context.valid;
let last_fp = ctx.get_register(FRAME_POINTER, valid)?;
let last_sp = ctx.get_register(STACK_POINTER, valid)?;
let last_lr = match ctx.get_register(LINK_REGISTER, valid) {
Some(lr) => ptr_auth_strip(modules, lr),
None => {
// FIXME: it would be good to write this back to the callee's ctx/validity
get_link_register_by_frame_pointer(ctx, valid, stack_memory, grand_callee, modules)?
}
};
if last_fp as u64 >= u64::MAX - POINTER_WIDTH as u64 * 2 {
// Although this code generally works fine if the pointer math overflows,
// debug builds will still panic, and this guard protects against it without
// drowning the rest of the code in checked_add.
return None;
}
let caller_fp = stack_memory.get_memory_at_address(last_fp as u64)?;
let caller_lr = stack_memory.get_memory_at_address(last_fp + POINTER_WIDTH as u64)?;
let caller_lr = ptr_auth_strip(modules, caller_lr);
let caller_pc = last_lr;
// TODO: why does breakpad do this? How could we get this far with a null fp?
let caller_sp = if last_fp == 0 {
last_sp
} else {
last_fp + POINTER_WIDTH * 2
};
// TODO: restore all the other callee-save registers that weren't touched.
// unclear: does this mean we need to be aware of ".undef" entries at this point?
// Breakpad's tests don't like it we validate the frame pointer's value,
// so we don't check that.
// Don't accept obviously wrong instruction pointers.
if is_non_canonical(caller_pc) {
trace!("unwind: rejecting frame pointer result for unreasonable instruction pointer");
return None;
}
// Don't actually validate that the stack makes sense (duplicating breakpad behaviour).
trace!(
"unwind: frame pointer seems valid -- caller_pc: 0x{:016x}, caller_sp: 0x{:016x}",
caller_pc,
caller_sp,
);
let mut caller_ctx = ArmContext::default();
caller_ctx.set_register(PROGRAM_COUNTER, caller_pc);
caller_ctx.set_register(LINK_REGISTER, caller_lr);
caller_ctx.set_register(FRAME_POINTER, caller_fp);
caller_ctx.set_register(STACK_POINTER, caller_sp);
let mut valid = HashSet::new();
valid.insert(PROGRAM_COUNTER);
valid.insert(LINK_REGISTER);
valid.insert(FRAME_POINTER);
valid.insert(STACK_POINTER);
let context = MinidumpContext {
raw: MinidumpRawContext::OldArm64(caller_ctx),
valid: MinidumpContextValidity::Some(valid),
};
Some(StackFrame::from_context(context, FrameTrust::FramePointer))
}
/// Restores the callee's link register from the stack.
fn get_link_register_by_frame_pointer(
ctx: &ArmContext,
valid: &MinidumpContextValidity,
stack_memory: &MinidumpMemory<'_>,
grand_callee: Option<&StackFrame>,
modules: &MinidumpModuleList,
) -> Option<Pointer> {
// It may happen that whatever unwinding strategy we're using managed to
// restore %fp but didn't restore %lr. Frame-pointer-based unwinding requires
// %lr because it contains the return address (the caller's %pc).
//
// In the standard ARM64 calling convention %fp and %lr are pushed together,
// so if the grand-callee appears to have been called with that convention
// then we can recover %lr using its %fp.
// We need the grand_callee's frame pointer
let grand_callee = grand_callee?;
let last_last_fp = if let MinidumpRawContext::OldArm64(ref ctx) = grand_callee.context.raw {
ctx.get_register(FRAME_POINTER, &grand_callee.context.valid)?
} else {
return None;
};
let presumed_last_fp: Pointer = stack_memory.get_memory_at_address(last_last_fp as u64)?;
// Make sure fp and sp aren't obviously garbage (are well-ordered)
let last_fp = ctx.get_register(FRAME_POINTER, valid)?;
let last_sp = ctx.get_register(STACK_POINTER, valid)?;
if last_fp <= last_sp {
return None;
}
// Make sure the grand-callee and callee agree on the value of fp
if presumed_last_fp!= last_fp {
return None;
}
// Now that we're pretty confident that frame pointers are valid, restore
// the callee's %lr, which should be right next to where its %fp is saved.
let last_lr = stack_memory.get_memory_at_address(last_last_fp + POINTER_WIDTH)?;
Some(ptr_auth_strip(modules, last_lr))
}
fn ptr_auth_strip(modules: &MinidumpModuleList, ptr: Pointer) -> Pointer {
// ARMv8.3 introduced a code hardening system called "Pointer Authentication"
// which is used on Apple platforms. It adds some extra high bits to the
// several pointers when they get pushed to memory. Interestingly
// this doesn't seem to affect return addresses pushed by a function call,
// but it does affect lr/fp registers that get pushed to the stack.
//
// Rather than actually thinking about how to recover the key and properly
// decode this, let's apply a simple heuristic. We get the maximum address
// that's contained in a module we know about, which will have some highest
// bit that is set. We can then safely mask out any bit that's higher than
// that one, which will hopefully mask out all the weird security stuff
// in the high bits.
if let Some(last_module) = modules.by_addr().next_back() {
// Get the highest mappable address
let mut mask = last_module.base_address() + last_module.size();
// Repeatedly OR this value with its shifted self to "smear" its
// highest set bit down to all lower bits. This will get us a
// mask we can use to AND out any bits that are higher.
mask |= mask >> 1;
mask |= mask >> 1;
mask |= mask >> 2;
mask |= mask >> 4;
mask |= mask >> 8;
mask |= mask >> 16;
mask |= mask >> 32;
let stripped = ptr & mask;
// Only actually use this stripped value if it ended up pointing in
// a module so we don't start corrupting normal pointers that are just
// in modules we don't know about.
if modules.module_at_address(stripped).is_some() {
// trace!("unwind: stripped pointer {:016x} -> {:016x}", ptr, stripped);
return stripped;
}
}
ptr
}
async fn get_caller_by_scan<P>(
ctx: &ArmContext,
callee: &StackFrame,
stack_memory: &MinidumpMemory<'_>,
modules: &MinidumpModuleList,
symbol_provider: &P,
) -> Option<StackFrame>
where
P: SymbolProvider + Sync,
{
trace!("unwind: trying scan");
// Stack scanning is just walking from the end of the frame until we encounter
// a value on the stack that looks like a pointer into some code (it's an address
// in a range covered by one of our modules). If we find such an instruction,
// we assume it's an pc value that was pushed by the CALL instruction that created
// the current frame. The next frame is then assumed to end just before that
// pc value.
let valid = &callee.context.valid;
let last_sp = ctx.get_register(STACK_POINTER, valid)?;
// Number of pointer-sized values to scan through in our search.
let default_scan_range = 40;
let extended_scan_range = default_scan_range * 4;
// Breakpad devs found that the first frame of an unwind can be really messed up,
// and therefore benefits from a longer scan. Let's do it too.
let scan_range = if let FrameTrust::Context = callee.trust {
extended_scan_range
} else {
default_scan_range
};
for i in 0..scan_range {
let address_of_pc = last_sp.checked_add(i * POINTER_WIDTH)?;
let caller_pc = stack_memory.get_memory_at_address(address_of_pc as u64)?;
if instruction_seems_valid(caller_pc, modules, symbol_provider).await {
// pc is pushed by CALL, so sp is just address_of_pc + ptr
let caller_sp = address_of_pc.checked_add(POINTER_WIDTH)?;
// Don't do any more validation, and don't try to restore fp
// (that's what breakpad does!)
trace!(
"unwind: scan seems valid -- caller_pc: 0x{:08x}, caller_sp: 0x{:08x}",
caller_pc,
caller_sp,
);
let mut caller_ctx = ArmContext::default();
caller_ctx.set_register(PROGRAM_COUNTER, caller_pc);
caller_ctx.set_register(STACK_POINTER, caller_sp);
let mut valid = HashSet::new();
valid.insert(PROGRAM_COUNTER);
valid.insert(STACK_POINTER);
let context = MinidumpContext {
raw: MinidumpRawContext::OldArm64(caller_ctx),
valid: MinidumpContextValidity::Some(valid),
};
return Some(StackFrame::from_context(context, FrameTrust::Scan));
}
}
None
}
/// The most strict validation we have for instruction pointers.
///
/// This is only used for stack-scanning, because it's explicitly
/// trying to distinguish between total garbage and correct values.
/// cfi and frame_pointer approaches do not use this validation
/// because by default they're working with plausible/trustworthy
/// data.
///
/// Specifically, not using this validation allows cfi/fp methods
/// to unwind through frames we don't have mapped modules for (such as
/// OS APIs). This may seem confusing since we obviously don't have cfi
/// for unmapped modules!
///
/// The way this works is that we will use cfi to unwind some frame we
/// know about and *end up* in a function we know nothing about, but with
/// all the right register values. At this point, frame pointers will
/// often do the correct thing even though we don't know what code we're
/// in -- until we get back into code we do know about and cfi kicks back in.
/// At worst, this sets scanning up in a better position for success!
///
/// If we applied this more rigorous validation to cfi/fp methods, we
/// would just discard the correct register values from the known frame
/// and immediately start doing unreliable scans.
async fn instruction_seems_valid<P>(
instruction: Pointer,
modules: &MinidumpModuleList,
symbol_provider: &P,
) -> bool
where
P: SymbolProvider + Sync,
{
if is_non_canonical(instruction) || instruction == 0 {
return false;
}
super::instruction_seems_valid_by_symbols(instruction as u64, modules, symbol_provider).await
}
fn is_non_canonical(instruction: Pointer) -> bool {
// Reject instructions in the first page or above the user-space threshold.
!(0x1000..=0x000fffffffffffff).contains(&instruction)
}
/*
// ARM64 is currently hyper-permissive, so we don't use this,
// but here it is in case we change our minds!
fn stack_seems_valid(
caller_sp: Pointer,
callee_sp: Pointer,
stack_memory: &MinidumpMemory<'_>,
) -> bool {
// The stack shouldn't *grow* when we unwind
if caller_sp < callee_sp {
return false;
}
// The stack pointer should be in the stack
stack_memory
.get_memory_at_address::<Pointer>(caller_sp as u64)
.is_some()
}
*/
#[async_trait::async_trait]
impl Unwind for ArmContext {
async fn get_caller_frame<P>(
&self,
callee: &StackFrame,
grand_callee: Option<&StackFrame>,
stack_memory: Option<&MinidumpMemory<'_>>,
modules: &MinidumpModuleList,
_system_info: &SystemInfo,
syms: &P,
) -> Option<StackFrame>
where
P: SymbolProvider + Sync,
| // if the instruction is within the first ~page of memory, it's basically
// null, and we can assume unwinding is complete.
if frame.context.get_instruction_pointer() < 4096 {
trace!("unwind: instruction pointer was nullish, assuming unwind complete");
return None;
}
// If the new stack pointer is at a lower address than the old,
// then that's clearly incorrect. Treat this as end-of-stack to
// enforce progress and avoid infinite loops.
let sp = frame.context.get_stack_pointer();
let last_sp = self.get_register_always("sp") as u64;
if sp <= last_sp {
// Arm leaf functions may not actually touch the stack (thanks
// to the link register allowing you to "push" the return address
// to a register), so we need to permit the stack pointer to not
// change for the first frame of the unwind. After that we need
// more strict validation to avoid infinite loops.
let is_leaf = callee.trust == FrameTrust::Context && sp == last_sp;
if!is_leaf {
trace!("unwind: stack pointer went backwards, assuming unwind complete");
return None;
}
}
// Ok, the frame now seems well and truly valid, do final cleanup.
// A caller's ip is the return address, which is the instruction
// *after* the CALL that caused us to arrive at the callee. Set
// the value to 4 less than that, so it points to the CALL instruction
// (arm64 instructions are all 4 bytes wide). This is important because
// we use this value to lookup the CFI we need to unwind the next frame.
let ip = frame.context.get_instruction_pointer() as u64;
frame.instruction = ip - 4;
Some(frame)
}
}
| {
let stack = stack_memory.as_ref()?;
// .await doesn't like closures, so don't use Option chaining
let mut frame = None;
if frame.is_none() {
frame = get_caller_by_cfi(self, callee, grand_callee, stack, modules, syms).await;
}
if frame.is_none() {
frame = get_caller_by_frame_pointer(self, callee, grand_callee, stack, modules, syms);
}
if frame.is_none() {
frame = get_caller_by_scan(self, callee, stack, modules, syms).await;
}
let mut frame = frame?;
// We now check the frame to see if it looks like unwinding is complete,
// based on the frame we computed having a nonsense value. Returning
// None signals to the unwinder to stop unwinding.
| identifier_body |
arm64_old.rs | // Copyright 2015 Ted Mielczarek. See the COPYRIGHT
// file at the top-level directory of this distribution.
// NOTE: arm64_old.rs and arm64.rs should be identical except for the names of
// their context types.
use crate::process_state::{FrameTrust, StackFrame};
use crate::stackwalker::unwind::Unwind;
use crate::stackwalker::CfiStackWalker;
use crate::{SymbolProvider, SystemInfo};
use log::trace;
use minidump::{
CpuContext, MinidumpContext, MinidumpContextValidity, MinidumpMemory, MinidumpModuleList,
MinidumpRawContext, Module,
};
use std::collections::HashSet;
type ArmContext = minidump::format::CONTEXT_ARM64_OLD;
type Pointer = <ArmContext as CpuContext>::Register;
type Registers = minidump::format::Arm64RegisterNumbers;
const POINTER_WIDTH: Pointer = std::mem::size_of::<Pointer>() as Pointer;
const FRAME_POINTER: &str = Registers::FramePointer.name();
const STACK_POINTER: &str = Registers::StackPointer.name();
const LINK_REGISTER: &str = Registers::LinkRegister.name();
const PROGRAM_COUNTER: &str = Registers::ProgramCounter.name();
const CALLEE_SAVED_REGS: &[&str] = &[
"x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "fp",
];
async fn get_caller_by_cfi<P>(
ctx: &ArmContext,
callee: &StackFrame,
grand_callee: Option<&StackFrame>,
stack_memory: &MinidumpMemory<'_>,
modules: &MinidumpModuleList,
symbol_provider: &P,
) -> Option<StackFrame>
where
P: SymbolProvider + Sync,
{
trace!("unwind: trying cfi");
let valid = &callee.context.valid;
let _last_sp = ctx.get_register(STACK_POINTER, valid)?;
let module = modules.module_at_address(callee.instruction)?;
let grand_callee_parameter_size = grand_callee.and_then(|f| f.parameter_size).unwrap_or(0);
let has_grand_callee = grand_callee.is_some();
let mut stack_walker = CfiStackWalker {
instruction: callee.instruction,
has_grand_callee,
grand_callee_parameter_size,
callee_ctx: ctx,
callee_validity: valid,
// Default to forwarding all callee-saved regs verbatim.
// The CFI evaluator may clear or overwrite these values.
// The stack pointer and instruction pointer are not included.
caller_ctx: *ctx,
caller_validity: callee_forwarded_regs(valid),
stack_memory,
};
symbol_provider
.walk_frame(module, &mut stack_walker)
.await?;
let caller_pc = stack_walker.caller_ctx.get_register_always(PROGRAM_COUNTER);
let caller_sp = stack_walker.caller_ctx.get_register_always(STACK_POINTER);
trace!(
"unwind: cfi evaluation was successful -- caller_pc: 0x{:016x}, caller_sp: 0x{:016x}",
caller_pc,
caller_sp,
);
// Do absolutely NO validation! Yep! As long as CFI evaluation succeeds
// (which does include pc and sp resolving), just blindly assume the
// values are correct. I Don't Like This, but it's what breakpad does and
// we should start with a baseline of parity.
// FIXME?: for whatever reason breakpad actually does block on the address
// being canonical *ONLY* for arm64, which actually rejects null pc early!
// Let's not do that to keep our code more uniform.
let context = MinidumpContext {
raw: MinidumpRawContext::OldArm64(stack_walker.caller_ctx),
valid: MinidumpContextValidity::Some(stack_walker.caller_validity),
};
Some(StackFrame::from_context(context, FrameTrust::CallFrameInfo))
}
fn callee_forwarded_regs(valid: &MinidumpContextValidity) -> HashSet<&'static str> {
match valid {
MinidumpContextValidity::All => CALLEE_SAVED_REGS.iter().copied().collect(),
MinidumpContextValidity::Some(ref which) => CALLEE_SAVED_REGS
.iter()
.filter(|®| which.contains(reg))
.copied()
.collect(),
}
}
fn get_caller_by_frame_pointer<P>(
ctx: &ArmContext,
callee: &StackFrame,
grand_callee: Option<&StackFrame>,
stack_memory: &MinidumpMemory<'_>,
modules: &MinidumpModuleList,
_symbol_provider: &P,
) -> Option<StackFrame>
where
P: SymbolProvider + Sync,
{
trace!("unwind: trying frame pointer");
// Assume that the standard %fp-using ARM64 calling convention is in use.
// The main quirk of this ABI is that the return address doesn't need to
// be restored from the stack -- it's already in the link register (lr).
// But that means we need to save/restore lr itself so that the *caller's*
// return address can be recovered.
//
// In the standard calling convention, the following happens:
//
// PUSH fp, lr (save fp and lr to the stack -- ARM64 pushes in pairs)
// fp := sp (update the frame pointer to the current stack pointer)
// lr := pc (save the return address in the link register)
//
// So to restore the caller's registers, we have:
//
// pc := lr
// sp := fp + ptr*2
// lr := *(fp + ptr)
// fp := *fp
let valid = &callee.context.valid;
let last_fp = ctx.get_register(FRAME_POINTER, valid)?;
let last_sp = ctx.get_register(STACK_POINTER, valid)?;
let last_lr = match ctx.get_register(LINK_REGISTER, valid) {
Some(lr) => ptr_auth_strip(modules, lr),
None => {
// FIXME: it would be good to write this back to the callee's ctx/validity
get_link_register_by_frame_pointer(ctx, valid, stack_memory, grand_callee, modules)?
}
};
if last_fp as u64 >= u64::MAX - POINTER_WIDTH as u64 * 2 {
// Although this code generally works fine if the pointer math overflows,
// debug builds will still panic, and this guard protects against it without
// drowning the rest of the code in checked_add.
return None;
}
let caller_fp = stack_memory.get_memory_at_address(last_fp as u64)?;
let caller_lr = stack_memory.get_memory_at_address(last_fp + POINTER_WIDTH as u64)?;
let caller_lr = ptr_auth_strip(modules, caller_lr);
let caller_pc = last_lr;
// TODO: why does breakpad do this? How could we get this far with a null fp?
let caller_sp = if last_fp == 0 {
last_sp
} else {
last_fp + POINTER_WIDTH * 2
};
// TODO: restore all the other callee-save registers that weren't touched.
// unclear: does this mean we need to be aware of ".undef" entries at this point?
// Breakpad's tests don't like it we validate the frame pointer's value,
// so we don't check that.
// Don't accept obviously wrong instruction pointers.
if is_non_canonical(caller_pc) {
trace!("unwind: rejecting frame pointer result for unreasonable instruction pointer");
return None;
}
// Don't actually validate that the stack makes sense (duplicating breakpad behaviour).
trace!(
"unwind: frame pointer seems valid -- caller_pc: 0x{:016x}, caller_sp: 0x{:016x}",
caller_pc,
caller_sp,
);
let mut caller_ctx = ArmContext::default();
caller_ctx.set_register(PROGRAM_COUNTER, caller_pc);
caller_ctx.set_register(LINK_REGISTER, caller_lr);
caller_ctx.set_register(FRAME_POINTER, caller_fp);
caller_ctx.set_register(STACK_POINTER, caller_sp);
let mut valid = HashSet::new();
valid.insert(PROGRAM_COUNTER);
valid.insert(LINK_REGISTER);
valid.insert(FRAME_POINTER);
valid.insert(STACK_POINTER);
let context = MinidumpContext {
raw: MinidumpRawContext::OldArm64(caller_ctx),
valid: MinidumpContextValidity::Some(valid),
};
Some(StackFrame::from_context(context, FrameTrust::FramePointer))
}
/// Restores the callee's link register from the stack.
fn get_link_register_by_frame_pointer(
ctx: &ArmContext,
valid: &MinidumpContextValidity,
stack_memory: &MinidumpMemory<'_>,
grand_callee: Option<&StackFrame>,
modules: &MinidumpModuleList,
) -> Option<Pointer> {
// It may happen that whatever unwinding strategy we're using managed to
// restore %fp but didn't restore %lr. Frame-pointer-based unwinding requires
// %lr because it contains the return address (the caller's %pc).
//
// In the standard ARM64 calling convention %fp and %lr are pushed together,
// so if the grand-callee appears to have been called with that convention
// then we can recover %lr using its %fp.
// We need the grand_callee's frame pointer
let grand_callee = grand_callee?;
let last_last_fp = if let MinidumpRawContext::OldArm64(ref ctx) = grand_callee.context.raw {
ctx.get_register(FRAME_POINTER, &grand_callee.context.valid)?
} else {
return None;
};
let presumed_last_fp: Pointer = stack_memory.get_memory_at_address(last_last_fp as u64)?;
// Make sure fp and sp aren't obviously garbage (are well-ordered)
let last_fp = ctx.get_register(FRAME_POINTER, valid)?;
let last_sp = ctx.get_register(STACK_POINTER, valid)?;
if last_fp <= last_sp {
return None;
}
// Make sure the grand-callee and callee agree on the value of fp
if presumed_last_fp!= last_fp {
return None;
}
// Now that we're pretty confident that frame pointers are valid, restore
// the callee's %lr, which should be right next to where its %fp is saved.
let last_lr = stack_memory.get_memory_at_address(last_last_fp + POINTER_WIDTH)?;
Some(ptr_auth_strip(modules, last_lr))
}
fn ptr_auth_strip(modules: &MinidumpModuleList, ptr: Pointer) -> Pointer {
// ARMv8.3 introduced a code hardening system called "Pointer Authentication"
// which is used on Apple platforms. It adds some extra high bits to the
// several pointers when they get pushed to memory. Interestingly
// this doesn't seem to affect return addresses pushed by a function call,
// but it does affect lr/fp registers that get pushed to the stack.
//
// Rather than actually thinking about how to recover the key and properly
// decode this, let's apply a simple heuristic. We get the maximum address
// that's contained in a module we know about, which will have some highest
// bit that is set. We can then safely mask out any bit that's higher than
// that one, which will hopefully mask out all the weird security stuff
// in the high bits.
if let Some(last_module) = modules.by_addr().next_back() {
// Get the highest mappable address
let mut mask = last_module.base_address() + last_module.size();
// Repeatedly OR this value with its shifted self to "smear" its
// highest set bit down to all lower bits. This will get us a
// mask we can use to AND out any bits that are higher.
mask |= mask >> 1;
mask |= mask >> 1;
mask |= mask >> 2;
mask |= mask >> 4;
mask |= mask >> 8;
mask |= mask >> 16;
mask |= mask >> 32;
let stripped = ptr & mask;
// Only actually use this stripped value if it ended up pointing in
// a module so we don't start corrupting normal pointers that are just
// in modules we don't know about.
if modules.module_at_address(stripped).is_some() {
// trace!("unwind: stripped pointer {:016x} -> {:016x}", ptr, stripped);
return stripped;
}
}
| ctx: &ArmContext,
callee: &StackFrame,
stack_memory: &MinidumpMemory<'_>,
modules: &MinidumpModuleList,
symbol_provider: &P,
) -> Option<StackFrame>
where
P: SymbolProvider + Sync,
{
trace!("unwind: trying scan");
// Stack scanning is just walking from the end of the frame until we encounter
// a value on the stack that looks like a pointer into some code (it's an address
// in a range covered by one of our modules). If we find such an instruction,
// we assume it's an pc value that was pushed by the CALL instruction that created
// the current frame. The next frame is then assumed to end just before that
// pc value.
let valid = &callee.context.valid;
let last_sp = ctx.get_register(STACK_POINTER, valid)?;
// Number of pointer-sized values to scan through in our search.
let default_scan_range = 40;
let extended_scan_range = default_scan_range * 4;
// Breakpad devs found that the first frame of an unwind can be really messed up,
// and therefore benefits from a longer scan. Let's do it too.
let scan_range = if let FrameTrust::Context = callee.trust {
extended_scan_range
} else {
default_scan_range
};
for i in 0..scan_range {
let address_of_pc = last_sp.checked_add(i * POINTER_WIDTH)?;
let caller_pc = stack_memory.get_memory_at_address(address_of_pc as u64)?;
if instruction_seems_valid(caller_pc, modules, symbol_provider).await {
// pc is pushed by CALL, so sp is just address_of_pc + ptr
let caller_sp = address_of_pc.checked_add(POINTER_WIDTH)?;
// Don't do any more validation, and don't try to restore fp
// (that's what breakpad does!)
trace!(
"unwind: scan seems valid -- caller_pc: 0x{:08x}, caller_sp: 0x{:08x}",
caller_pc,
caller_sp,
);
let mut caller_ctx = ArmContext::default();
caller_ctx.set_register(PROGRAM_COUNTER, caller_pc);
caller_ctx.set_register(STACK_POINTER, caller_sp);
let mut valid = HashSet::new();
valid.insert(PROGRAM_COUNTER);
valid.insert(STACK_POINTER);
let context = MinidumpContext {
raw: MinidumpRawContext::OldArm64(caller_ctx),
valid: MinidumpContextValidity::Some(valid),
};
return Some(StackFrame::from_context(context, FrameTrust::Scan));
}
}
None
}
/// The most strict validation we have for instruction pointers.
///
/// This is only used for stack-scanning, because it's explicitly
/// trying to distinguish between total garbage and correct values.
/// cfi and frame_pointer approaches do not use this validation
/// because by default they're working with plausible/trustworthy
/// data.
///
/// Specifically, not using this validation allows cfi/fp methods
/// to unwind through frames we don't have mapped modules for (such as
/// OS APIs). This may seem confusing since we obviously don't have cfi
/// for unmapped modules!
///
/// The way this works is that we will use cfi to unwind some frame we
/// know about and *end up* in a function we know nothing about, but with
/// all the right register values. At this point, frame pointers will
/// often do the correct thing even though we don't know what code we're
/// in -- until we get back into code we do know about and cfi kicks back in.
/// At worst, this sets scanning up in a better position for success!
///
/// If we applied this more rigorous validation to cfi/fp methods, we
/// would just discard the correct register values from the known frame
/// and immediately start doing unreliable scans.
async fn instruction_seems_valid<P>(
instruction: Pointer,
modules: &MinidumpModuleList,
symbol_provider: &P,
) -> bool
where
P: SymbolProvider + Sync,
{
if is_non_canonical(instruction) || instruction == 0 {
return false;
}
super::instruction_seems_valid_by_symbols(instruction as u64, modules, symbol_provider).await
}
fn is_non_canonical(instruction: Pointer) -> bool {
// Reject instructions in the first page or above the user-space threshold.
!(0x1000..=0x000fffffffffffff).contains(&instruction)
}
/*
// ARM64 is currently hyper-permissive, so we don't use this,
// but here it is in case we change our minds!
fn stack_seems_valid(
caller_sp: Pointer,
callee_sp: Pointer,
stack_memory: &MinidumpMemory<'_>,
) -> bool {
// The stack shouldn't *grow* when we unwind
if caller_sp < callee_sp {
return false;
}
// The stack pointer should be in the stack
stack_memory
.get_memory_at_address::<Pointer>(caller_sp as u64)
.is_some()
}
*/
#[async_trait::async_trait]
impl Unwind for ArmContext {
async fn get_caller_frame<P>(
&self,
callee: &StackFrame,
grand_callee: Option<&StackFrame>,
stack_memory: Option<&MinidumpMemory<'_>>,
modules: &MinidumpModuleList,
_system_info: &SystemInfo,
syms: &P,
) -> Option<StackFrame>
where
P: SymbolProvider + Sync,
{
let stack = stack_memory.as_ref()?;
//.await doesn't like closures, so don't use Option chaining
let mut frame = None;
if frame.is_none() {
frame = get_caller_by_cfi(self, callee, grand_callee, stack, modules, syms).await;
}
if frame.is_none() {
frame = get_caller_by_frame_pointer(self, callee, grand_callee, stack, modules, syms);
}
if frame.is_none() {
frame = get_caller_by_scan(self, callee, stack, modules, syms).await;
}
let mut frame = frame?;
// We now check the frame to see if it looks like unwinding is complete,
// based on the frame we computed having a nonsense value. Returning
// None signals to the unwinder to stop unwinding.
// if the instruction is within the first ~page of memory, it's basically
// null, and we can assume unwinding is complete.
if frame.context.get_instruction_pointer() < 4096 {
trace!("unwind: instruction pointer was nullish, assuming unwind complete");
return None;
}
// If the new stack pointer is at a lower address than the old,
// then that's clearly incorrect. Treat this as end-of-stack to
// enforce progress and avoid infinite loops.
let sp = frame.context.get_stack_pointer();
let last_sp = self.get_register_always("sp") as u64;
if sp <= last_sp {
// Arm leaf functions may not actually touch the stack (thanks
// to the link register allowing you to "push" the return address
// to a register), so we need to permit the stack pointer to not
// change for the first frame of the unwind. After that we need
// more strict validation to avoid infinite loops.
let is_leaf = callee.trust == FrameTrust::Context && sp == last_sp;
if!is_leaf {
trace!("unwind: stack pointer went backwards, assuming unwind complete");
return None;
}
}
// Ok, the frame now seems well and truly valid, do final cleanup.
// A caller's ip is the return address, which is the instruction
// *after* the CALL that caused us to arrive at the callee. Set
// the value to 4 less than that, so it points to the CALL instruction
// (arm64 instructions are all 4 bytes wide). This is important because
// we use this value to lookup the CFI we need to unwind the next frame.
let ip = frame.context.get_instruction_pointer() as u64;
frame.instruction = ip - 4;
Some(frame)
}
} | ptr
}
async fn get_caller_by_scan<P>( | random_line_split |
arm64_old.rs | // Copyright 2015 Ted Mielczarek. See the COPYRIGHT
// file at the top-level directory of this distribution.
// NOTE: arm64_old.rs and arm64.rs should be identical except for the names of
// their context types.
use crate::process_state::{FrameTrust, StackFrame};
use crate::stackwalker::unwind::Unwind;
use crate::stackwalker::CfiStackWalker;
use crate::{SymbolProvider, SystemInfo};
use log::trace;
use minidump::{
CpuContext, MinidumpContext, MinidumpContextValidity, MinidumpMemory, MinidumpModuleList,
MinidumpRawContext, Module,
};
use std::collections::HashSet;
type ArmContext = minidump::format::CONTEXT_ARM64_OLD;
type Pointer = <ArmContext as CpuContext>::Register;
type Registers = minidump::format::Arm64RegisterNumbers;
const POINTER_WIDTH: Pointer = std::mem::size_of::<Pointer>() as Pointer;
const FRAME_POINTER: &str = Registers::FramePointer.name();
const STACK_POINTER: &str = Registers::StackPointer.name();
const LINK_REGISTER: &str = Registers::LinkRegister.name();
const PROGRAM_COUNTER: &str = Registers::ProgramCounter.name();
const CALLEE_SAVED_REGS: &[&str] = &[
"x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "fp",
];
async fn get_caller_by_cfi<P>(
ctx: &ArmContext,
callee: &StackFrame,
grand_callee: Option<&StackFrame>,
stack_memory: &MinidumpMemory<'_>,
modules: &MinidumpModuleList,
symbol_provider: &P,
) -> Option<StackFrame>
where
P: SymbolProvider + Sync,
{
trace!("unwind: trying cfi");
let valid = &callee.context.valid;
let _last_sp = ctx.get_register(STACK_POINTER, valid)?;
let module = modules.module_at_address(callee.instruction)?;
let grand_callee_parameter_size = grand_callee.and_then(|f| f.parameter_size).unwrap_or(0);
let has_grand_callee = grand_callee.is_some();
let mut stack_walker = CfiStackWalker {
instruction: callee.instruction,
has_grand_callee,
grand_callee_parameter_size,
callee_ctx: ctx,
callee_validity: valid,
// Default to forwarding all callee-saved regs verbatim.
// The CFI evaluator may clear or overwrite these values.
// The stack pointer and instruction pointer are not included.
caller_ctx: *ctx,
caller_validity: callee_forwarded_regs(valid),
stack_memory,
};
symbol_provider
.walk_frame(module, &mut stack_walker)
.await?;
let caller_pc = stack_walker.caller_ctx.get_register_always(PROGRAM_COUNTER);
let caller_sp = stack_walker.caller_ctx.get_register_always(STACK_POINTER);
trace!(
"unwind: cfi evaluation was successful -- caller_pc: 0x{:016x}, caller_sp: 0x{:016x}",
caller_pc,
caller_sp,
);
// Do absolutely NO validation! Yep! As long as CFI evaluation succeeds
// (which does include pc and sp resolving), just blindly assume the
// values are correct. I Don't Like This, but it's what breakpad does and
// we should start with a baseline of parity.
// FIXME?: for whatever reason breakpad actually does block on the address
// being canonical *ONLY* for arm64, which actually rejects null pc early!
// Let's not do that to keep our code more uniform.
let context = MinidumpContext {
raw: MinidumpRawContext::OldArm64(stack_walker.caller_ctx),
valid: MinidumpContextValidity::Some(stack_walker.caller_validity),
};
Some(StackFrame::from_context(context, FrameTrust::CallFrameInfo))
}
fn callee_forwarded_regs(valid: &MinidumpContextValidity) -> HashSet<&'static str> {
match valid {
MinidumpContextValidity::All => CALLEE_SAVED_REGS.iter().copied().collect(),
MinidumpContextValidity::Some(ref which) => CALLEE_SAVED_REGS
.iter()
.filter(|®| which.contains(reg))
.copied()
.collect(),
}
}
fn get_caller_by_frame_pointer<P>(
ctx: &ArmContext,
callee: &StackFrame,
grand_callee: Option<&StackFrame>,
stack_memory: &MinidumpMemory<'_>,
modules: &MinidumpModuleList,
_symbol_provider: &P,
) -> Option<StackFrame>
where
P: SymbolProvider + Sync,
{
trace!("unwind: trying frame pointer");
// Assume that the standard %fp-using ARM64 calling convention is in use.
// The main quirk of this ABI is that the return address doesn't need to
// be restored from the stack -- it's already in the link register (lr).
// But that means we need to save/restore lr itself so that the *caller's*
// return address can be recovered.
//
// In the standard calling convention, the following happens:
//
// PUSH fp, lr (save fp and lr to the stack -- ARM64 pushes in pairs)
// fp := sp (update the frame pointer to the current stack pointer)
// lr := pc (save the return address in the link register)
//
// So to restore the caller's registers, we have:
//
// pc := lr
// sp := fp + ptr*2
// lr := *(fp + ptr)
// fp := *fp
let valid = &callee.context.valid;
let last_fp = ctx.get_register(FRAME_POINTER, valid)?;
let last_sp = ctx.get_register(STACK_POINTER, valid)?;
let last_lr = match ctx.get_register(LINK_REGISTER, valid) {
Some(lr) => ptr_auth_strip(modules, lr),
None => {
// FIXME: it would be good to write this back to the callee's ctx/validity
get_link_register_by_frame_pointer(ctx, valid, stack_memory, grand_callee, modules)?
}
};
if last_fp as u64 >= u64::MAX - POINTER_WIDTH as u64 * 2 {
// Although this code generally works fine if the pointer math overflows,
// debug builds will still panic, and this guard protects against it without
// drowning the rest of the code in checked_add.
return None;
}
let caller_fp = stack_memory.get_memory_at_address(last_fp as u64)?;
let caller_lr = stack_memory.get_memory_at_address(last_fp + POINTER_WIDTH as u64)?;
let caller_lr = ptr_auth_strip(modules, caller_lr);
let caller_pc = last_lr;
// TODO: why does breakpad do this? How could we get this far with a null fp?
let caller_sp = if last_fp == 0 {
last_sp
} else {
last_fp + POINTER_WIDTH * 2
};
// TODO: restore all the other callee-save registers that weren't touched.
// unclear: does this mean we need to be aware of ".undef" entries at this point?
// Breakpad's tests don't like it we validate the frame pointer's value,
// so we don't check that.
// Don't accept obviously wrong instruction pointers.
if is_non_canonical(caller_pc) {
trace!("unwind: rejecting frame pointer result for unreasonable instruction pointer");
return None;
}
// Don't actually validate that the stack makes sense (duplicating breakpad behaviour).
trace!(
"unwind: frame pointer seems valid -- caller_pc: 0x{:016x}, caller_sp: 0x{:016x}",
caller_pc,
caller_sp,
);
let mut caller_ctx = ArmContext::default();
caller_ctx.set_register(PROGRAM_COUNTER, caller_pc);
caller_ctx.set_register(LINK_REGISTER, caller_lr);
caller_ctx.set_register(FRAME_POINTER, caller_fp);
caller_ctx.set_register(STACK_POINTER, caller_sp);
let mut valid = HashSet::new();
valid.insert(PROGRAM_COUNTER);
valid.insert(LINK_REGISTER);
valid.insert(FRAME_POINTER);
valid.insert(STACK_POINTER);
let context = MinidumpContext {
raw: MinidumpRawContext::OldArm64(caller_ctx),
valid: MinidumpContextValidity::Some(valid),
};
Some(StackFrame::from_context(context, FrameTrust::FramePointer))
}
/// Restores the callee's link register from the stack.
fn get_link_register_by_frame_pointer(
ctx: &ArmContext,
valid: &MinidumpContextValidity,
stack_memory: &MinidumpMemory<'_>,
grand_callee: Option<&StackFrame>,
modules: &MinidumpModuleList,
) -> Option<Pointer> {
// It may happen that whatever unwinding strategy we're using managed to
// restore %fp but didn't restore %lr. Frame-pointer-based unwinding requires
// %lr because it contains the return address (the caller's %pc).
//
// In the standard ARM64 calling convention %fp and %lr are pushed together,
// so if the grand-callee appears to have been called with that convention
// then we can recover %lr using its %fp.
// We need the grand_callee's frame pointer
let grand_callee = grand_callee?;
let last_last_fp = if let MinidumpRawContext::OldArm64(ref ctx) = grand_callee.context.raw {
ctx.get_register(FRAME_POINTER, &grand_callee.context.valid)?
} else {
return None;
};
let presumed_last_fp: Pointer = stack_memory.get_memory_at_address(last_last_fp as u64)?;
// Make sure fp and sp aren't obviously garbage (are well-ordered)
let last_fp = ctx.get_register(FRAME_POINTER, valid)?;
let last_sp = ctx.get_register(STACK_POINTER, valid)?;
if last_fp <= last_sp {
return None;
}
// Make sure the grand-callee and callee agree on the value of fp
if presumed_last_fp!= last_fp {
return None;
}
// Now that we're pretty confident that frame pointers are valid, restore
// the callee's %lr, which should be right next to where its %fp is saved.
let last_lr = stack_memory.get_memory_at_address(last_last_fp + POINTER_WIDTH)?;
Some(ptr_auth_strip(modules, last_lr))
}
fn ptr_auth_strip(modules: &MinidumpModuleList, ptr: Pointer) -> Pointer {
// ARMv8.3 introduced a code hardening system called "Pointer Authentication"
// which is used on Apple platforms. It adds some extra high bits to the
// several pointers when they get pushed to memory. Interestingly
// this doesn't seem to affect return addresses pushed by a function call,
// but it does affect lr/fp registers that get pushed to the stack.
//
// Rather than actually thinking about how to recover the key and properly
// decode this, let's apply a simple heuristic. We get the maximum address
// that's contained in a module we know about, which will have some highest
// bit that is set. We can then safely mask out any bit that's higher than
// that one, which will hopefully mask out all the weird security stuff
// in the high bits.
if let Some(last_module) = modules.by_addr().next_back() {
// Get the highest mappable address
let mut mask = last_module.base_address() + last_module.size();
// Repeatedly OR this value with its shifted self to "smear" its
// highest set bit down to all lower bits. This will get us a
// mask we can use to AND out any bits that are higher.
mask |= mask >> 1;
mask |= mask >> 1;
mask |= mask >> 2;
mask |= mask >> 4;
mask |= mask >> 8;
mask |= mask >> 16;
mask |= mask >> 32;
let stripped = ptr & mask;
// Only actually use this stripped value if it ended up pointing in
// a module so we don't start corrupting normal pointers that are just
// in modules we don't know about.
if modules.module_at_address(stripped).is_some() {
// trace!("unwind: stripped pointer {:016x} -> {:016x}", ptr, stripped);
return stripped;
}
}
ptr
}
async fn get_caller_by_scan<P>(
ctx: &ArmContext,
callee: &StackFrame,
stack_memory: &MinidumpMemory<'_>,
modules: &MinidumpModuleList,
symbol_provider: &P,
) -> Option<StackFrame>
where
P: SymbolProvider + Sync,
{
trace!("unwind: trying scan");
// Stack scanning is just walking from the end of the frame until we encounter
// a value on the stack that looks like a pointer into some code (it's an address
// in a range covered by one of our modules). If we find such an instruction,
// we assume it's an pc value that was pushed by the CALL instruction that created
// the current frame. The next frame is then assumed to end just before that
// pc value.
let valid = &callee.context.valid;
let last_sp = ctx.get_register(STACK_POINTER, valid)?;
// Number of pointer-sized values to scan through in our search.
let default_scan_range = 40;
let extended_scan_range = default_scan_range * 4;
// Breakpad devs found that the first frame of an unwind can be really messed up,
// and therefore benefits from a longer scan. Let's do it too.
let scan_range = if let FrameTrust::Context = callee.trust {
extended_scan_range
} else {
default_scan_range
};
for i in 0..scan_range {
let address_of_pc = last_sp.checked_add(i * POINTER_WIDTH)?;
let caller_pc = stack_memory.get_memory_at_address(address_of_pc as u64)?;
if instruction_seems_valid(caller_pc, modules, symbol_provider).await {
// pc is pushed by CALL, so sp is just address_of_pc + ptr
let caller_sp = address_of_pc.checked_add(POINTER_WIDTH)?;
// Don't do any more validation, and don't try to restore fp
// (that's what breakpad does!)
trace!(
"unwind: scan seems valid -- caller_pc: 0x{:08x}, caller_sp: 0x{:08x}",
caller_pc,
caller_sp,
);
let mut caller_ctx = ArmContext::default();
caller_ctx.set_register(PROGRAM_COUNTER, caller_pc);
caller_ctx.set_register(STACK_POINTER, caller_sp);
let mut valid = HashSet::new();
valid.insert(PROGRAM_COUNTER);
valid.insert(STACK_POINTER);
let context = MinidumpContext {
raw: MinidumpRawContext::OldArm64(caller_ctx),
valid: MinidumpContextValidity::Some(valid),
};
return Some(StackFrame::from_context(context, FrameTrust::Scan));
}
}
None
}
/// The most strict validation we have for instruction pointers.
///
/// This is only used for stack-scanning, because it's explicitly
/// trying to distinguish between total garbage and correct values.
/// cfi and frame_pointer approaches do not use this validation
/// because by default they're working with plausible/trustworthy
/// data.
///
/// Specifically, not using this validation allows cfi/fp methods
/// to unwind through frames we don't have mapped modules for (such as
/// OS APIs). This may seem confusing since we obviously don't have cfi
/// for unmapped modules!
///
/// The way this works is that we will use cfi to unwind some frame we
/// know about and *end up* in a function we know nothing about, but with
/// all the right register values. At this point, frame pointers will
/// often do the correct thing even though we don't know what code we're
/// in -- until we get back into code we do know about and cfi kicks back in.
/// At worst, this sets scanning up in a better position for success!
///
/// If we applied this more rigorous validation to cfi/fp methods, we
/// would just discard the correct register values from the known frame
/// and immediately start doing unreliable scans.
async fn instruction_seems_valid<P>(
instruction: Pointer,
modules: &MinidumpModuleList,
symbol_provider: &P,
) -> bool
where
P: SymbolProvider + Sync,
{
if is_non_canonical(instruction) || instruction == 0 |
super::instruction_seems_valid_by_symbols(instruction as u64, modules, symbol_provider).await
}
fn is_non_canonical(instruction: Pointer) -> bool {
// Reject instructions in the first page or above the user-space threshold.
!(0x1000..=0x000fffffffffffff).contains(&instruction)
}
/*
// ARM64 is currently hyper-permissive, so we don't use this,
// but here it is in case we change our minds!
fn stack_seems_valid(
caller_sp: Pointer,
callee_sp: Pointer,
stack_memory: &MinidumpMemory<'_>,
) -> bool {
// The stack shouldn't *grow* when we unwind
if caller_sp < callee_sp {
return false;
}
// The stack pointer should be in the stack
stack_memory
.get_memory_at_address::<Pointer>(caller_sp as u64)
.is_some()
}
*/
#[async_trait::async_trait]
impl Unwind for ArmContext {
async fn get_caller_frame<P>(
&self,
callee: &StackFrame,
grand_callee: Option<&StackFrame>,
stack_memory: Option<&MinidumpMemory<'_>>,
modules: &MinidumpModuleList,
_system_info: &SystemInfo,
syms: &P,
) -> Option<StackFrame>
where
P: SymbolProvider + Sync,
{
let stack = stack_memory.as_ref()?;
//.await doesn't like closures, so don't use Option chaining
let mut frame = None;
if frame.is_none() {
frame = get_caller_by_cfi(self, callee, grand_callee, stack, modules, syms).await;
}
if frame.is_none() {
frame = get_caller_by_frame_pointer(self, callee, grand_callee, stack, modules, syms);
}
if frame.is_none() {
frame = get_caller_by_scan(self, callee, stack, modules, syms).await;
}
let mut frame = frame?;
// We now check the frame to see if it looks like unwinding is complete,
// based on the frame we computed having a nonsense value. Returning
// None signals to the unwinder to stop unwinding.
// if the instruction is within the first ~page of memory, it's basically
// null, and we can assume unwinding is complete.
if frame.context.get_instruction_pointer() < 4096 {
trace!("unwind: instruction pointer was nullish, assuming unwind complete");
return None;
}
// If the new stack pointer is at a lower address than the old,
// then that's clearly incorrect. Treat this as end-of-stack to
// enforce progress and avoid infinite loops.
let sp = frame.context.get_stack_pointer();
let last_sp = self.get_register_always("sp") as u64;
if sp <= last_sp {
// Arm leaf functions may not actually touch the stack (thanks
// to the link register allowing you to "push" the return address
// to a register), so we need to permit the stack pointer to not
// change for the first frame of the unwind. After that we need
// more strict validation to avoid infinite loops.
let is_leaf = callee.trust == FrameTrust::Context && sp == last_sp;
if!is_leaf {
trace!("unwind: stack pointer went backwards, assuming unwind complete");
return None;
}
}
// Ok, the frame now seems well and truly valid, do final cleanup.
// A caller's ip is the return address, which is the instruction
// *after* the CALL that caused us to arrive at the callee. Set
// the value to 4 less than that, so it points to the CALL instruction
// (arm64 instructions are all 4 bytes wide). This is important because
// we use this value to lookup the CFI we need to unwind the next frame.
let ip = frame.context.get_instruction_pointer() as u64;
frame.instruction = ip - 4;
Some(frame)
}
}
| {
return false;
} | conditional_block |
scanner.rs | // The MIT License (MIT)
//
// Copyright (c) 2017 Doublify Technologies
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
extern crate doublify_toolkit;
use doublify_toolkit::filtering::{Kind, Token, scan};
#[test]
fn | () {
let raw_query = "subject:{'fdhadzh' 'goodmind'}";
let expected = vec![Token::new(Kind::Identifier, "subject"),
Token::new(Kind::Colon, ":"),
Token::new(Kind::Curly, "{"),
Token::new(Kind::Identifier, "'fdhadzh'"),
Token::new(Kind::Identifier, "'goodmind'"),
Token::new(Kind::Curly, "}")];
let wanted = scan(raw_query);
assert_eq!(expected, wanted);
}
#[test]
fn scan_raw_query_test_2() {
let raw_query = "languages:(\"rust\" \"python\" \"typescript\") is:stable";
let expected = vec![Token::new(Kind::Identifier, "languages"),
Token::new(Kind::Colon, ":"),
Token::new(Kind::Parentheses, "("),
Token::new(Kind::Identifier, "\"rust\""),
Token::new(Kind::Identifier, "\"python\""),
Token::new(Kind::Identifier, "\"typescript\""),
Token::new(Kind::Parentheses, ")"),
Token::new(Kind::Identifier, "is"),
Token::new(Kind::Colon, ":"),
Token::new(Kind::Identifier, "stable")];
let wanted = scan(raw_query);
assert_eq!(expected, wanted);
}
| scan_raw_query_test_1 | identifier_name |
scanner.rs | // The MIT License (MIT)
//
// Copyright (c) 2017 Doublify Technologies
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
extern crate doublify_toolkit;
use doublify_toolkit::filtering::{Kind, Token, scan};
#[test]
fn scan_raw_query_test_1() {
let raw_query = "subject:{'fdhadzh' 'goodmind'}";
let expected = vec![Token::new(Kind::Identifier, "subject"),
Token::new(Kind::Colon, ":"),
Token::new(Kind::Curly, "{"),
Token::new(Kind::Identifier, "'fdhadzh'"),
Token::new(Kind::Identifier, "'goodmind'"),
Token::new(Kind::Curly, "}")];
let wanted = scan(raw_query);
assert_eq!(expected, wanted);
}
#[test]
fn scan_raw_query_test_2() {
let raw_query = "languages:(\"rust\" \"python\" \"typescript\") is:stable";
let expected = vec![Token::new(Kind::Identifier, "languages"),
Token::new(Kind::Colon, ":"),
Token::new(Kind::Parentheses, "("),
Token::new(Kind::Identifier, "\"rust\""),
Token::new(Kind::Identifier, "\"python\""),
Token::new(Kind::Identifier, "\"typescript\""),
Token::new(Kind::Parentheses, ")"),
Token::new(Kind::Identifier, "is"),
Token::new(Kind::Colon, ":"),
Token::new(Kind::Identifier, "stable")]; |
assert_eq!(expected, wanted);
} |
let wanted = scan(raw_query); | random_line_split |
scanner.rs | // The MIT License (MIT)
//
// Copyright (c) 2017 Doublify Technologies
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
extern crate doublify_toolkit;
use doublify_toolkit::filtering::{Kind, Token, scan};
#[test]
fn scan_raw_query_test_1() |
#[test]
fn scan_raw_query_test_2() {
let raw_query = "languages:(\"rust\" \"python\" \"typescript\") is:stable";
let expected = vec![Token::new(Kind::Identifier, "languages"),
Token::new(Kind::Colon, ":"),
Token::new(Kind::Parentheses, "("),
Token::new(Kind::Identifier, "\"rust\""),
Token::new(Kind::Identifier, "\"python\""),
Token::new(Kind::Identifier, "\"typescript\""),
Token::new(Kind::Parentheses, ")"),
Token::new(Kind::Identifier, "is"),
Token::new(Kind::Colon, ":"),
Token::new(Kind::Identifier, "stable")];
let wanted = scan(raw_query);
assert_eq!(expected, wanted);
}
| {
let raw_query = "subject:{'fdhadzh' 'goodmind'}";
let expected = vec![Token::new(Kind::Identifier, "subject"),
Token::new(Kind::Colon, ":"),
Token::new(Kind::Curly, "{"),
Token::new(Kind::Identifier, "'fdhadzh'"),
Token::new(Kind::Identifier, "'goodmind'"),
Token::new(Kind::Curly, "}")];
let wanted = scan(raw_query);
assert_eq!(expected, wanted);
} | identifier_body |
explorer_factory.rs | // The MIT License (MIT)
//
// Copyright (c) 2015 dinowernli
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
use explorer::Explorer;
use explorer::monte_carlo_explorer::MonteCarloExplorer;
use explorer::random_explorer::RandomExplorer;
use predictor::Predictor;
use random::RandomImpl;
/// An object which knows how to produce explorers.
pub trait ExplorerFactory {
fn create_monte_carlo_explorer<'a>(
&self, predictor: &'a mut Predictor) -> Box<Explorer + 'a>;
fn create_random_explorer(
&self) -> Box<Explorer>;
}
pub struct ExplorerFactoryImpl;
impl ExplorerFactoryImpl {
pub fn new() -> ExplorerFactoryImpl { ExplorerFactoryImpl }
}
impl ExplorerFactory for ExplorerFactoryImpl { | fn create_monte_carlo_explorer<'a>(
&self, predictor: &'a mut Predictor) -> Box<Explorer + 'a> {
Box::new(MonteCarloExplorer::new(predictor))
}
fn create_random_explorer(&self) -> Box<Explorer> {
Box::new(RandomExplorer::new(Box::new(RandomImpl::create(235669))))
}
} | random_line_split |
|
explorer_factory.rs | // The MIT License (MIT)
//
// Copyright (c) 2015 dinowernli
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
use explorer::Explorer;
use explorer::monte_carlo_explorer::MonteCarloExplorer;
use explorer::random_explorer::RandomExplorer;
use predictor::Predictor;
use random::RandomImpl;
/// An object which knows how to produce explorers.
pub trait ExplorerFactory {
fn create_monte_carlo_explorer<'a>(
&self, predictor: &'a mut Predictor) -> Box<Explorer + 'a>;
fn create_random_explorer(
&self) -> Box<Explorer>;
}
pub struct ExplorerFactoryImpl;
impl ExplorerFactoryImpl {
pub fn | () -> ExplorerFactoryImpl { ExplorerFactoryImpl }
}
impl ExplorerFactory for ExplorerFactoryImpl {
fn create_monte_carlo_explorer<'a>(
&self, predictor: &'a mut Predictor) -> Box<Explorer + 'a> {
Box::new(MonteCarloExplorer::new(predictor))
}
fn create_random_explorer(&self) -> Box<Explorer> {
Box::new(RandomExplorer::new(Box::new(RandomImpl::create(235669))))
}
}
| new | identifier_name |
import_tests.rs | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::unit_tests::testutils::{
compile_module_string_with_stdlib, compile_script_string_with_stdlib,
};
#[test]
fn compile_script_with_imports() {
let code = String::from(
"
import 0x1.DiemCoin;
main() {
let x: u64;
let y: u64;
x = 2;
y = copy(x) + copy(x);
return;
}
",
);
let compiled_script_res = compile_script_string_with_stdlib(&code);
let _compiled_script = compiled_script_res.unwrap();
}
#[test]
fn | () {
let code = String::from(
"
module Foobar {
import 0x1.DiemCoin;
struct FooCoin { value: u64 }
public value(this: &Self.FooCoin): u64 {
let value_ref: &u64;
value_ref = &move(this).value;
return *move(value_ref);
}
public deposit(this: &mut Self.FooCoin, check: Self.FooCoin) {
let value_ref: &mut u64;
let value: u64;
let check_ref: &Self.FooCoin;
let check_value: u64;
let new_value: u64;
let i: u64;
value_ref = &mut move(this).value;
value = *copy(value_ref);
check_ref = ✓
check_value = Self.value(move(check_ref));
new_value = copy(value) + copy(check_value);
*move(value_ref) = move(new_value);
FooCoin { value: i } = move(check);
return;
}
}
",
);
let compiled_module_res = compile_module_string_with_stdlib(&code);
let _compiled_module = compiled_module_res.unwrap();
}
| compile_module_with_imports | identifier_name |
import_tests.rs | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::unit_tests::testutils::{
compile_module_string_with_stdlib, compile_script_string_with_stdlib,
};
#[test]
fn compile_script_with_imports() {
let code = String::from(
"
import 0x1.DiemCoin;
main() {
let x: u64;
let y: u64;
x = 2;
y = copy(x) + copy(x);
return;
}
",
);
let compiled_script_res = compile_script_string_with_stdlib(&code);
let _compiled_script = compiled_script_res.unwrap();
}
#[test]
fn compile_module_with_imports() {
let code = String::from( | "
module Foobar {
import 0x1.DiemCoin;
struct FooCoin { value: u64 }
public value(this: &Self.FooCoin): u64 {
let value_ref: &u64;
value_ref = &move(this).value;
return *move(value_ref);
}
public deposit(this: &mut Self.FooCoin, check: Self.FooCoin) {
let value_ref: &mut u64;
let value: u64;
let check_ref: &Self.FooCoin;
let check_value: u64;
let new_value: u64;
let i: u64;
value_ref = &mut move(this).value;
value = *copy(value_ref);
check_ref = ✓
check_value = Self.value(move(check_ref));
new_value = copy(value) + copy(check_value);
*move(value_ref) = move(new_value);
FooCoin { value: i } = move(check);
return;
}
}
",
);
let compiled_module_res = compile_module_string_with_stdlib(&code);
let _compiled_module = compiled_module_res.unwrap();
} | random_line_split |
|
import_tests.rs | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::unit_tests::testutils::{
compile_module_string_with_stdlib, compile_script_string_with_stdlib,
};
#[test]
fn compile_script_with_imports() |
#[test]
fn compile_module_with_imports() {
let code = String::from(
"
module Foobar {
import 0x1.DiemCoin;
struct FooCoin { value: u64 }
public value(this: &Self.FooCoin): u64 {
let value_ref: &u64;
value_ref = &move(this).value;
return *move(value_ref);
}
public deposit(this: &mut Self.FooCoin, check: Self.FooCoin) {
let value_ref: &mut u64;
let value: u64;
let check_ref: &Self.FooCoin;
let check_value: u64;
let new_value: u64;
let i: u64;
value_ref = &mut move(this).value;
value = *copy(value_ref);
check_ref = ✓
check_value = Self.value(move(check_ref));
new_value = copy(value) + copy(check_value);
*move(value_ref) = move(new_value);
FooCoin { value: i } = move(check);
return;
}
}
",
);
let compiled_module_res = compile_module_string_with_stdlib(&code);
let _compiled_module = compiled_module_res.unwrap();
}
| {
let code = String::from(
"
import 0x1.DiemCoin;
main() {
let x: u64;
let y: u64;
x = 2;
y = copy(x) + copy(x);
return;
}
",
);
let compiled_script_res = compile_script_string_with_stdlib(&code);
let _compiled_script = compiled_script_res.unwrap();
} | identifier_body |
filters.rs | #[allow(unused_imports)]
use serde::json;
use serde::json::value;
use serde::json::Value;
use chrono::offset::utc::UTC;
#[allow(dead_code)]
fn int_to_level(level: u64) -> String {
match level {
10 => "trace".to_string(),
20 => "debug".to_string(),
30 => "info".to_string(),
40 => "warn".to_string(),
50 => "error".to_string(),
60 => "fatal".to_string(),
_ => format!("Unknown level {}", level)
}
}
#[allow(dead_code)]
fn transform(input_value: &mut Value) -> Value {
// {"name":"stakhanov","hostname":"Quark.local","pid":65470,"level":30
// "msg":"pushing http://fr.wikipedia.org/wiki/Giant_Sand",
// "time":"2015-05-21T10:11:02.132Z","v":0}
//
// entry['@timestamp'] = entry.time;
// entry.level = levels[entry.level];
// entry.message = entry.msg;
// delete entry.time;
// delete entry.msg;
let mut input = input_value.as_object_mut().unwrap();
if input.contains_key("time") {
let time = input.get("time").unwrap().clone();
input.insert("@timestamp".to_string(), time);
input.remove("time");
} else {
// Inject now timestamp.
let tm = UTC::now();
let format_prefix = "%Y-%m-%dT%H:%M:%S.%f";
let format_suffix = "%Z";
// truncate up to the third digit
// 2015-05-21T15:27:20.994
// 01234567890123456789012
let mut timestamp = tm.format(format_prefix.as_ref()).to_string();
timestamp.truncate(23);
let timestamp_suffix = tm.format(format_suffix.as_ref()).to_string();
timestamp.push_str(×tamp_suffix);
input.insert("@timestamp".to_string(), value::to_value(×tamp));
}
if input.contains_key("level") {
let level = input.get("level").unwrap().as_u64().unwrap();
input.insert("level".to_string(), value::to_value(&int_to_level(level)));
}
if input.contains_key("msg") {
let message = input.get("msg").unwrap().clone();
input.insert("message".to_string(), message);
input.remove("msg");
}
return value::to_value(input);
}
#[allow(dead_code)]
fn time_to_index_name(full_timestamp: &str) -> String {
// compatible with "2015-05-21T10:11:02.132Z"
let mut input = full_timestamp.to_string();
input.truncate(10);
input = input.replace("-", ".");
format!("logstash-{}", input)
}
#[test]
fn it_transform_ok() |
#[test]
fn it_prepares_index_name() {
// let src = r#"{"name":"stakhanov","hostname":"Quark.local","pid":65470,"level":30,"msg":"pushing http://fr.wikipedia.org/wiki/Giant_Sand","time":"2015-05-21T10:11:02.132Z","v":0}"#;
let src = r#"{"time": "2015-05-21T10:11:02.132Z"}"#;
let decode = json::from_str::<Value>(src).unwrap();
match decode.find("time") {
Some(time) => assert_eq!("logstash-2015.05.21", time_to_index_name(time.as_string().unwrap())),
None => assert!(false)
}
}
| {
// let src = r#"{"name":"stakhanov","hostname":"Quark.local","pid":65470,"level":30,"msg":"pushing http://fr.wikipedia.org/wiki/Giant_Sand","time":"2015-05-21T10:11:02.132Z","v":0}"#;
let src = r#"{"level":30, "msg":"this is a test.", "time": "12"}"#;
let mut decode = json::from_str::<Value>(src).unwrap();
let transformed = transform(&mut decode);
let out = json::to_string(&transformed).unwrap();
assert_eq!(out, r#"{"@timestamp":"12","level":"info","message":"this is a test."}"#);
} | identifier_body |
filters.rs | #[allow(unused_imports)]
use serde::json;
use serde::json::value;
use serde::json::Value;
use chrono::offset::utc::UTC;
#[allow(dead_code)]
fn int_to_level(level: u64) -> String {
match level {
10 => "trace".to_string(),
20 => "debug".to_string(),
30 => "info".to_string(),
40 => "warn".to_string(),
50 => "error".to_string(),
60 => "fatal".to_string(),
_ => format!("Unknown level {}", level)
}
}
#[allow(dead_code)]
fn transform(input_value: &mut Value) -> Value {
// {"name":"stakhanov","hostname":"Quark.local","pid":65470,"level":30
// "msg":"pushing http://fr.wikipedia.org/wiki/Giant_Sand",
// "time":"2015-05-21T10:11:02.132Z","v":0}
//
// entry['@timestamp'] = entry.time;
// entry.level = levels[entry.level];
// entry.message = entry.msg;
// delete entry.time;
// delete entry.msg;
let mut input = input_value.as_object_mut().unwrap();
if input.contains_key("time") {
let time = input.get("time").unwrap().clone();
input.insert("@timestamp".to_string(), time);
input.remove("time");
} else {
// Inject now timestamp.
let tm = UTC::now();
let format_prefix = "%Y-%m-%dT%H:%M:%S.%f";
let format_suffix = "%Z";
// truncate up to the third digit
// 2015-05-21T15:27:20.994 | // 01234567890123456789012
let mut timestamp = tm.format(format_prefix.as_ref()).to_string();
timestamp.truncate(23);
let timestamp_suffix = tm.format(format_suffix.as_ref()).to_string();
timestamp.push_str(×tamp_suffix);
input.insert("@timestamp".to_string(), value::to_value(×tamp));
}
if input.contains_key("level") {
let level = input.get("level").unwrap().as_u64().unwrap();
input.insert("level".to_string(), value::to_value(&int_to_level(level)));
}
if input.contains_key("msg") {
let message = input.get("msg").unwrap().clone();
input.insert("message".to_string(), message);
input.remove("msg");
}
return value::to_value(input);
}
#[allow(dead_code)]
fn time_to_index_name(full_timestamp: &str) -> String {
// compatible with "2015-05-21T10:11:02.132Z"
let mut input = full_timestamp.to_string();
input.truncate(10);
input = input.replace("-", ".");
format!("logstash-{}", input)
}
#[test]
fn it_transform_ok() {
// let src = r#"{"name":"stakhanov","hostname":"Quark.local","pid":65470,"level":30,"msg":"pushing http://fr.wikipedia.org/wiki/Giant_Sand","time":"2015-05-21T10:11:02.132Z","v":0}"#;
let src = r#"{"level":30, "msg":"this is a test.", "time": "12"}"#;
let mut decode = json::from_str::<Value>(src).unwrap();
let transformed = transform(&mut decode);
let out = json::to_string(&transformed).unwrap();
assert_eq!(out, r#"{"@timestamp":"12","level":"info","message":"this is a test."}"#);
}
#[test]
fn it_prepares_index_name() {
// let src = r#"{"name":"stakhanov","hostname":"Quark.local","pid":65470,"level":30,"msg":"pushing http://fr.wikipedia.org/wiki/Giant_Sand","time":"2015-05-21T10:11:02.132Z","v":0}"#;
let src = r#"{"time": "2015-05-21T10:11:02.132Z"}"#;
let decode = json::from_str::<Value>(src).unwrap();
match decode.find("time") {
Some(time) => assert_eq!("logstash-2015.05.21", time_to_index_name(time.as_string().unwrap())),
None => assert!(false)
}
} | random_line_split |
|
filters.rs | #[allow(unused_imports)]
use serde::json;
use serde::json::value;
use serde::json::Value;
use chrono::offset::utc::UTC;
#[allow(dead_code)]
fn int_to_level(level: u64) -> String {
match level {
10 => "trace".to_string(),
20 => "debug".to_string(),
30 => "info".to_string(),
40 => "warn".to_string(),
50 => "error".to_string(),
60 => "fatal".to_string(),
_ => format!("Unknown level {}", level)
}
}
#[allow(dead_code)]
fn transform(input_value: &mut Value) -> Value {
// {"name":"stakhanov","hostname":"Quark.local","pid":65470,"level":30
// "msg":"pushing http://fr.wikipedia.org/wiki/Giant_Sand",
// "time":"2015-05-21T10:11:02.132Z","v":0}
//
// entry['@timestamp'] = entry.time;
// entry.level = levels[entry.level];
// entry.message = entry.msg;
// delete entry.time;
// delete entry.msg;
let mut input = input_value.as_object_mut().unwrap();
if input.contains_key("time") {
let time = input.get("time").unwrap().clone();
input.insert("@timestamp".to_string(), time);
input.remove("time");
} else {
// Inject now timestamp.
let tm = UTC::now();
let format_prefix = "%Y-%m-%dT%H:%M:%S.%f";
let format_suffix = "%Z";
// truncate up to the third digit
// 2015-05-21T15:27:20.994
// 01234567890123456789012
let mut timestamp = tm.format(format_prefix.as_ref()).to_string();
timestamp.truncate(23);
let timestamp_suffix = tm.format(format_suffix.as_ref()).to_string();
timestamp.push_str(×tamp_suffix);
input.insert("@timestamp".to_string(), value::to_value(×tamp));
}
if input.contains_key("level") {
let level = input.get("level").unwrap().as_u64().unwrap();
input.insert("level".to_string(), value::to_value(&int_to_level(level)));
}
if input.contains_key("msg") {
let message = input.get("msg").unwrap().clone();
input.insert("message".to_string(), message);
input.remove("msg");
}
return value::to_value(input);
}
#[allow(dead_code)]
fn | (full_timestamp: &str) -> String {
// compatible with "2015-05-21T10:11:02.132Z"
let mut input = full_timestamp.to_string();
input.truncate(10);
input = input.replace("-", ".");
format!("logstash-{}", input)
}
#[test]
fn it_transform_ok() {
// let src = r#"{"name":"stakhanov","hostname":"Quark.local","pid":65470,"level":30,"msg":"pushing http://fr.wikipedia.org/wiki/Giant_Sand","time":"2015-05-21T10:11:02.132Z","v":0}"#;
let src = r#"{"level":30, "msg":"this is a test.", "time": "12"}"#;
let mut decode = json::from_str::<Value>(src).unwrap();
let transformed = transform(&mut decode);
let out = json::to_string(&transformed).unwrap();
assert_eq!(out, r#"{"@timestamp":"12","level":"info","message":"this is a test."}"#);
}
#[test]
fn it_prepares_index_name() {
// let src = r#"{"name":"stakhanov","hostname":"Quark.local","pid":65470,"level":30,"msg":"pushing http://fr.wikipedia.org/wiki/Giant_Sand","time":"2015-05-21T10:11:02.132Z","v":0}"#;
let src = r#"{"time": "2015-05-21T10:11:02.132Z"}"#;
let decode = json::from_str::<Value>(src).unwrap();
match decode.find("time") {
Some(time) => assert_eq!("logstash-2015.05.21", time_to_index_name(time.as_string().unwrap())),
None => assert!(false)
}
}
| time_to_index_name | identifier_name |
root.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/// Liberally derived from the [Firefox JS implementation]
/// (http://mxr.mozilla.org/mozilla-central/source/toolkit/devtools/server/actors/root.js).
/// Connection point for all new remote devtools interactions, providing lists of know actors
/// that perform more specific actions (targets, addons, browser chrome, etc.)
use crate::actor::{Actor, ActorMessageStatus, ActorRegistry};
use crate::actors::browsing_context::{BrowsingContextActor, BrowsingContextActorMsg};
use crate::actors::device::DeviceActor;
use crate::actors::performance::PerformanceActor;
use crate::protocol::{ActorDescription, JsonPacketStream};
use serde_json::{Map, Value};
use std::net::TcpStream;
#[derive(Serialize)]
struct ActorTraits {
sources: bool,
highlightable: bool,
customHighlighters: bool,
networkMonitor: bool,
}
#[derive(Serialize)]
struct ListAddonsReply {
from: String,
addons: Vec<AddonMsg>,
}
#[derive(Serialize)]
enum AddonMsg {}
#[derive(Serialize)]
struct GetRootReply {
from: String,
selected: u32,
performanceActor: String,
deviceActor: String,
}
#[derive(Serialize)]
struct | {
from: String,
selected: u32,
tabs: Vec<BrowsingContextActorMsg>,
}
#[derive(Serialize)]
pub struct RootActorMsg {
from: String,
applicationType: String,
traits: ActorTraits,
}
#[derive(Serialize)]
pub struct ProtocolDescriptionReply {
from: String,
types: Types,
}
#[derive(Serialize)]
pub struct Types {
performance: ActorDescription,
device: ActorDescription,
}
pub struct RootActor {
pub tabs: Vec<String>,
pub performance: String,
pub device: String,
}
impl Actor for RootActor {
fn name(&self) -> String {
"root".to_owned()
}
fn handle_message(
&self,
registry: &ActorRegistry,
msg_type: &str,
_msg: &Map<String, Value>,
stream: &mut TcpStream,
) -> Result<ActorMessageStatus, ()> {
Ok(match msg_type {
"listAddons" => {
let actor = ListAddonsReply {
from: "root".to_owned(),
addons: vec![],
};
stream.write_json_packet(&actor);
ActorMessageStatus::Processed
},
"getRoot" => {
let actor = GetRootReply {
from: "root".to_owned(),
selected: 0,
performanceActor: self.performance.clone(),
deviceActor: self.device.clone(),
};
stream.write_json_packet(&actor);
ActorMessageStatus::Processed
},
// https://docs.firefox-dev.tools/backend/protocol.html#listing-browser-tabs
"listTabs" => {
let actor = ListTabsReply {
from: "root".to_owned(),
selected: 0,
tabs: self
.tabs
.iter()
.map(|target| registry.find::<BrowsingContextActor>(target).encodable())
.collect(),
};
stream.write_json_packet(&actor);
ActorMessageStatus::Processed
},
"protocolDescription" => {
let msg = ProtocolDescriptionReply {
from: self.name(),
types: Types {
performance: PerformanceActor::description(),
device: DeviceActor::description(),
},
};
stream.write_json_packet(&msg);
ActorMessageStatus::Processed
},
_ => ActorMessageStatus::Ignored,
})
}
}
impl RootActor {
pub fn encodable(&self) -> RootActorMsg {
RootActorMsg {
from: "root".to_owned(),
applicationType: "browser".to_owned(),
traits: ActorTraits {
sources: true,
highlightable: true,
customHighlighters: true,
networkMonitor: true,
},
}
}
}
| ListTabsReply | identifier_name |
root.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/// Liberally derived from the [Firefox JS implementation]
/// (http://mxr.mozilla.org/mozilla-central/source/toolkit/devtools/server/actors/root.js).
/// Connection point for all new remote devtools interactions, providing lists of know actors
/// that perform more specific actions (targets, addons, browser chrome, etc.)
use crate::actor::{Actor, ActorMessageStatus, ActorRegistry};
use crate::actors::browsing_context::{BrowsingContextActor, BrowsingContextActorMsg};
use crate::actors::device::DeviceActor;
use crate::actors::performance::PerformanceActor;
use crate::protocol::{ActorDescription, JsonPacketStream};
use serde_json::{Map, Value};
use std::net::TcpStream;
#[derive(Serialize)]
struct ActorTraits {
sources: bool,
highlightable: bool,
customHighlighters: bool,
networkMonitor: bool,
}
#[derive(Serialize)]
struct ListAddonsReply {
from: String,
addons: Vec<AddonMsg>,
}
#[derive(Serialize)]
enum AddonMsg {}
#[derive(Serialize)]
struct GetRootReply {
from: String,
selected: u32,
performanceActor: String,
deviceActor: String,
}
#[derive(Serialize)]
struct ListTabsReply {
from: String,
selected: u32,
tabs: Vec<BrowsingContextActorMsg>,
}
#[derive(Serialize)]
pub struct RootActorMsg {
from: String,
applicationType: String,
traits: ActorTraits,
}
#[derive(Serialize)]
pub struct ProtocolDescriptionReply {
from: String,
types: Types,
}
#[derive(Serialize)]
pub struct Types {
performance: ActorDescription,
device: ActorDescription,
}
pub struct RootActor {
pub tabs: Vec<String>,
pub performance: String,
pub device: String,
}
impl Actor for RootActor {
fn name(&self) -> String {
"root".to_owned()
}
fn handle_message(
&self,
registry: &ActorRegistry,
msg_type: &str,
_msg: &Map<String, Value>,
stream: &mut TcpStream,
) -> Result<ActorMessageStatus, ()> {
Ok(match msg_type {
"listAddons" => {
let actor = ListAddonsReply {
from: "root".to_owned(),
addons: vec![],
};
stream.write_json_packet(&actor);
ActorMessageStatus::Processed
},
"getRoot" => {
let actor = GetRootReply {
from: "root".to_owned(),
selected: 0,
performanceActor: self.performance.clone(),
deviceActor: self.device.clone(),
};
stream.write_json_packet(&actor);
ActorMessageStatus::Processed
},
// https://docs.firefox-dev.tools/backend/protocol.html#listing-browser-tabs
"listTabs" => {
let actor = ListTabsReply {
from: "root".to_owned(),
selected: 0,
tabs: self
.tabs
.iter()
.map(|target| registry.find::<BrowsingContextActor>(target).encodable())
.collect(),
};
stream.write_json_packet(&actor);
ActorMessageStatus::Processed
},
"protocolDescription" => | ,
_ => ActorMessageStatus::Ignored,
})
}
}
impl RootActor {
pub fn encodable(&self) -> RootActorMsg {
RootActorMsg {
from: "root".to_owned(),
applicationType: "browser".to_owned(),
traits: ActorTraits {
sources: true,
highlightable: true,
customHighlighters: true,
networkMonitor: true,
},
}
}
}
| {
let msg = ProtocolDescriptionReply {
from: self.name(),
types: Types {
performance: PerformanceActor::description(),
device: DeviceActor::description(),
},
};
stream.write_json_packet(&msg);
ActorMessageStatus::Processed
} | conditional_block |
root.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/// Liberally derived from the [Firefox JS implementation]
/// (http://mxr.mozilla.org/mozilla-central/source/toolkit/devtools/server/actors/root.js).
/// Connection point for all new remote devtools interactions, providing lists of know actors
/// that perform more specific actions (targets, addons, browser chrome, etc.)
use crate::actor::{Actor, ActorMessageStatus, ActorRegistry};
use crate::actors::browsing_context::{BrowsingContextActor, BrowsingContextActorMsg};
use crate::actors::device::DeviceActor;
use crate::actors::performance::PerformanceActor;
use crate::protocol::{ActorDescription, JsonPacketStream};
use serde_json::{Map, Value};
use std::net::TcpStream; | struct ActorTraits {
sources: bool,
highlightable: bool,
customHighlighters: bool,
networkMonitor: bool,
}
#[derive(Serialize)]
struct ListAddonsReply {
from: String,
addons: Vec<AddonMsg>,
}
#[derive(Serialize)]
enum AddonMsg {}
#[derive(Serialize)]
struct GetRootReply {
from: String,
selected: u32,
performanceActor: String,
deviceActor: String,
}
#[derive(Serialize)]
struct ListTabsReply {
from: String,
selected: u32,
tabs: Vec<BrowsingContextActorMsg>,
}
#[derive(Serialize)]
pub struct RootActorMsg {
from: String,
applicationType: String,
traits: ActorTraits,
}
#[derive(Serialize)]
pub struct ProtocolDescriptionReply {
from: String,
types: Types,
}
#[derive(Serialize)]
pub struct Types {
performance: ActorDescription,
device: ActorDescription,
}
pub struct RootActor {
pub tabs: Vec<String>,
pub performance: String,
pub device: String,
}
impl Actor for RootActor {
fn name(&self) -> String {
"root".to_owned()
}
fn handle_message(
&self,
registry: &ActorRegistry,
msg_type: &str,
_msg: &Map<String, Value>,
stream: &mut TcpStream,
) -> Result<ActorMessageStatus, ()> {
Ok(match msg_type {
"listAddons" => {
let actor = ListAddonsReply {
from: "root".to_owned(),
addons: vec![],
};
stream.write_json_packet(&actor);
ActorMessageStatus::Processed
},
"getRoot" => {
let actor = GetRootReply {
from: "root".to_owned(),
selected: 0,
performanceActor: self.performance.clone(),
deviceActor: self.device.clone(),
};
stream.write_json_packet(&actor);
ActorMessageStatus::Processed
},
// https://docs.firefox-dev.tools/backend/protocol.html#listing-browser-tabs
"listTabs" => {
let actor = ListTabsReply {
from: "root".to_owned(),
selected: 0,
tabs: self
.tabs
.iter()
.map(|target| registry.find::<BrowsingContextActor>(target).encodable())
.collect(),
};
stream.write_json_packet(&actor);
ActorMessageStatus::Processed
},
"protocolDescription" => {
let msg = ProtocolDescriptionReply {
from: self.name(),
types: Types {
performance: PerformanceActor::description(),
device: DeviceActor::description(),
},
};
stream.write_json_packet(&msg);
ActorMessageStatus::Processed
},
_ => ActorMessageStatus::Ignored,
})
}
}
impl RootActor {
pub fn encodable(&self) -> RootActorMsg {
RootActorMsg {
from: "root".to_owned(),
applicationType: "browser".to_owned(),
traits: ActorTraits {
sources: true,
highlightable: true,
customHighlighters: true,
networkMonitor: true,
},
}
}
} |
#[derive(Serialize)] | random_line_split |
regions-assoc-type-region-bound-in-trait-not-met.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
// | // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that the compiler checks that arbitrary region bounds declared
// in the trait must be satisfied on the impl. Issue #20890.
trait Foo<'a> {
type Value: 'a;
fn dummy(&'a self) { }
}
impl<'a> Foo<'a> for &'a i16 {
// OK.
type Value = &'a i32;
}
impl<'a> Foo<'static> for &'a i32 {
//~^ ERROR cannot infer
type Value = &'a i32;
}
impl<'a,'b> Foo<'b> for &'a i64 {
//~^ ERROR cannot infer
type Value = &'a i32;
}
fn main() { } | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or | random_line_split |
regions-assoc-type-region-bound-in-trait-not-met.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that the compiler checks that arbitrary region bounds declared
// in the trait must be satisfied on the impl. Issue #20890.
trait Foo<'a> {
type Value: 'a;
fn dummy(&'a self) |
}
impl<'a> Foo<'a> for &'a i16 {
// OK.
type Value = &'a i32;
}
impl<'a> Foo<'static> for &'a i32 {
//~^ ERROR cannot infer
type Value = &'a i32;
}
impl<'a,'b> Foo<'b> for &'a i64 {
//~^ ERROR cannot infer
type Value = &'a i32;
}
fn main() { }
| { } | identifier_body |
regions-assoc-type-region-bound-in-trait-not-met.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that the compiler checks that arbitrary region bounds declared
// in the trait must be satisfied on the impl. Issue #20890.
trait Foo<'a> {
type Value: 'a;
fn dummy(&'a self) { }
}
impl<'a> Foo<'a> for &'a i16 {
// OK.
type Value = &'a i32;
}
impl<'a> Foo<'static> for &'a i32 {
//~^ ERROR cannot infer
type Value = &'a i32;
}
impl<'a,'b> Foo<'b> for &'a i64 {
//~^ ERROR cannot infer
type Value = &'a i32;
}
fn | () { }
| main | identifier_name |
out-pointer-aliasing.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub struct Foo {
f1: int,
_f2: int,
}
impl Copy for Foo {}
#[inline(never)]
pub fn foo(f: &mut Foo) -> Foo |
pub fn main() {
let mut f = Foo {
f1: 8,
_f2: 9,
};
f = foo(&mut f);
assert_eq!(f.f1, 8);
}
| {
let ret = *f;
f.f1 = 0;
ret
} | identifier_body |
out-pointer-aliasing.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub struct Foo {
f1: int,
_f2: int,
}
impl Copy for Foo {}
#[inline(never)]
pub fn | (f: &mut Foo) -> Foo {
let ret = *f;
f.f1 = 0;
ret
}
pub fn main() {
let mut f = Foo {
f1: 8,
_f2: 9,
};
f = foo(&mut f);
assert_eq!(f.f1, 8);
}
| foo | identifier_name |
out-pointer-aliasing.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
| f1: int,
_f2: int,
}
impl Copy for Foo {}
#[inline(never)]
pub fn foo(f: &mut Foo) -> Foo {
let ret = *f;
f.f1 = 0;
ret
}
pub fn main() {
let mut f = Foo {
f1: 8,
_f2: 9,
};
f = foo(&mut f);
assert_eq!(f.f1, 8);
} | pub struct Foo { | random_line_split |
font.rs |
use app_units::Au;
use euclid::{Point2D, Rect, Size2D};
use font_context::{FontContext, FontSource};
use font_template::FontTemplateDescriptor;
use ordered_float::NotNan;
use platform::font::{FontHandle, FontTable};
use platform::font_context::FontContextHandle;
pub use platform::font_list::fallback_font_families;
use platform::font_template::FontTemplateData;
use servo_atoms::Atom;
use smallvec::SmallVec;
use std::borrow::ToOwned;
use std::cell::RefCell;
use std::collections::HashMap;
use std::iter;
use std::rc::Rc;
use std::str;
use std::sync::Arc;
use std::sync::atomic::{ATOMIC_USIZE_INIT, AtomicUsize, Ordering};
use style::computed_values::{font_stretch, font_style, font_variant_caps, font_weight};
use style::properties::style_structs::Font as FontStyleStruct;
use style::values::computed::font::SingleFontFamily;
use text::Shaper;
use text::glyph::{ByteIndex, GlyphData, GlyphId, GlyphStore};
use text::shaping::ShaperMethods;
use time;
use unicode_script::Script;
use webrender_api;
macro_rules! ot_tag {
($t1:expr, $t2:expr, $t3:expr, $t4:expr) => {
(($t1 as u32) << 24) | (($t2 as u32) << 16) | (($t3 as u32) << 8) | ($t4 as u32)
};
}
pub const GPOS: u32 = ot_tag!('G', 'P', 'O', 'S');
pub const GSUB: u32 = ot_tag!('G', 'S', 'U', 'B');
pub const KERN: u32 = ot_tag!('k', 'e', 'r', 'n');
static TEXT_SHAPING_PERFORMANCE_COUNTER: AtomicUsize = ATOMIC_USIZE_INIT;
// FontHandle encapsulates access to the platform's font API,
// e.g. quartz, FreeType. It provides access to metrics and tables
// needed by the text shaper as well as access to the underlying font
// resources needed by the graphics layer to draw glyphs.
pub trait FontHandleMethods: Sized {
fn new_from_template(
fctx: &FontContextHandle,
template: Arc<FontTemplateData>,
pt_size: Option<Au>,
) -> Result<Self, ()>;
fn template(&self) -> Arc<FontTemplateData>;
fn family_name(&self) -> Option<String>;
fn face_name(&self) -> Option<String>;
fn style(&self) -> font_style::T;
fn boldness(&self) -> font_weight::T;
fn stretchiness(&self) -> font_stretch::T;
fn glyph_index(&self, codepoint: char) -> Option<GlyphId>;
fn glyph_h_advance(&self, GlyphId) -> Option<FractionalPixel>;
fn glyph_h_kerning(&self, glyph0: GlyphId, glyph1: GlyphId) -> FractionalPixel;
/// Can this font do basic horizontal LTR shaping without Harfbuzz?
fn can_do_fast_shaping(&self) -> bool;
fn metrics(&self) -> FontMetrics;
fn table_for_tag(&self, FontTableTag) -> Option<FontTable>;
/// A unique identifier for the font, allowing comparison.
fn identifier(&self) -> Atom;
}
// Used to abstract over the shaper's choice of fixed int representation.
pub type FractionalPixel = f64;
pub type FontTableTag = u32;
trait FontTableTagConversions {
fn tag_to_str(&self) -> String;
}
impl FontTableTagConversions for FontTableTag {
fn tag_to_str(&self) -> String {
let bytes = [
(self >> 24) as u8,
(self >> 16) as u8,
(self >> 8) as u8,
(self >> 0) as u8,
];
str::from_utf8(&bytes).unwrap().to_owned()
}
}
pub trait FontTableMethods {
fn buffer(&self) -> &[u8];
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct FontMetrics {
pub underline_size: Au,
pub underline_offset: Au,
pub strikeout_size: Au,
pub strikeout_offset: Au,
pub leading: Au,
pub x_height: Au,
pub em_size: Au,
pub ascent: Au,
pub descent: Au,
pub max_advance: Au,
pub average_advance: Au,
pub line_gap: Au,
}
/// `FontDescriptor` describes the parameters of a `Font`. It represents rendering a given font
/// template at a particular size, with a particular font-variant-caps applied, etc. This contrasts
/// with `FontTemplateDescriptor` in that the latter represents only the parameters inherent in the
/// font data (weight, stretch, etc.).
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
pub struct FontDescriptor {
pub template_descriptor: FontTemplateDescriptor,
pub variant: font_variant_caps::T,
pub pt_size: Au,
}
impl<'a> From<&'a FontStyleStruct> for FontDescriptor {
fn from(style: &'a FontStyleStruct) -> Self {
FontDescriptor {
template_descriptor: FontTemplateDescriptor::from(style),
variant: style.font_variant_caps,
pt_size: style.font_size.size(),
}
}
}
#[derive(Debug)]
pub struct Font {
pub handle: FontHandle,
pub metrics: FontMetrics,
pub descriptor: FontDescriptor,
pub actual_pt_size: Au,
shaper: Option<Shaper>,
shape_cache: RefCell<HashMap<ShapeCacheEntry, Arc<GlyphStore>>>,
glyph_advance_cache: RefCell<HashMap<u32, FractionalPixel>>,
pub font_key: webrender_api::FontInstanceKey,
}
impl Font {
pub fn new(
handle: FontHandle,
descriptor: FontDescriptor,
actual_pt_size: Au,
font_key: webrender_api::FontInstanceKey,
) -> Font {
let metrics = handle.metrics();
Font {
handle: handle,
shaper: None,
descriptor,
actual_pt_size,
metrics,
shape_cache: RefCell::new(HashMap::new()),
glyph_advance_cache: RefCell::new(HashMap::new()),
font_key,
}
}
/// A unique identifier for the font, allowing comparison.
pub fn identifier(&self) -> Atom {
self.handle.identifier()
}
}
bitflags! {
pub struct ShapingFlags: u8 {
#[doc = "Set if the text is entirely whitespace."]
const IS_WHITESPACE_SHAPING_FLAG = 0x01;
#[doc = "Set if we are to ignore ligatures."]
const IGNORE_LIGATURES_SHAPING_FLAG = 0x02;
#[doc = "Set if we are to disable kerning."]
const DISABLE_KERNING_SHAPING_FLAG = 0x04;
#[doc = "Text direction is right-to-left."]
const RTL_FLAG = 0x08;
#[doc = "Set if word-break is set to keep-all."]
const KEEP_ALL_FLAG = 0x10;
}
}
/// Various options that control text shaping.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct ShapingOptions {
/// Spacing to add between each letter. Corresponds to the CSS 2.1 `letter-spacing` property.
/// NB: You will probably want to set the `IGNORE_LIGATURES_SHAPING_FLAG` if this is non-null.
pub letter_spacing: Option<Au>,
/// Spacing to add between each word. Corresponds to the CSS 2.1 `word-spacing` property.
pub word_spacing: (Au, NotNan<f32>),
/// The Unicode script property of the characters in this run.
pub script: Script,
/// Various flags.
pub flags: ShapingFlags,
}
/// An entry in the shape cache.
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
struct ShapeCacheEntry {
text: String,
options: ShapingOptions,
}
impl Font {
pub fn shape_text(&mut self, text: &str, options: &ShapingOptions) -> Arc<GlyphStore> {
let this = self as *const Font;
let mut shaper = self.shaper.take();
let lookup_key = ShapeCacheEntry {
text: text.to_owned(),
options: *options,
};
let result = self
.shape_cache
.borrow_mut()
.entry(lookup_key)
.or_insert_with(|| {
let start_time = time::precise_time_ns();
let mut glyphs = GlyphStore::new(
text.len(),
options
.flags
.contains(ShapingFlags::IS_WHITESPACE_SHAPING_FLAG),
options.flags.contains(ShapingFlags::RTL_FLAG),
);
if self.can_do_fast_shaping(text, options) {
debug!("shape_text: Using ASCII fast path.");
self.shape_text_fast(text, options, &mut glyphs);
} else {
debug!("shape_text: Using Harfbuzz.");
if shaper.is_none() {
shaper = Some(Shaper::new(this));
}
shaper
.as_ref()
.unwrap()
.shape_text(text, options, &mut glyphs);
}
let end_time = time::precise_time_ns();
TEXT_SHAPING_PERFORMANCE_COUNTER
.fetch_add((end_time - start_time) as usize, Ordering::Relaxed);
Arc::new(glyphs)
}).clone();
self.shaper = shaper;
result
}
fn can_do_fast_shaping(&self, text: &str, options: &ShapingOptions) -> bool {
options.script == Script::Latin &&
!options.flags.contains(ShapingFlags::RTL_FLAG) &&
self.handle.can_do_fast_shaping() &&
text.is_ascii()
}
/// Fast path for ASCII text that only needs simple horizontal LTR kerning.
fn shape_text_fast(&self, text: &str, options: &ShapingOptions, glyphs: &mut GlyphStore) {
let mut prev_glyph_id = None;
for (i, byte) in text.bytes().enumerate() {
let character = byte as char;
let glyph_id = match self.glyph_index(character) {
Some(id) => id,
None => continue,
};
let mut advance = Au::from_f64_px(self.glyph_h_advance(glyph_id));
if character =='' {
// https://drafts.csswg.org/css-text-3/#word-spacing-property
let (length, percent) = options.word_spacing;
advance = (advance + length) + Au((advance.0 as f32 * percent.into_inner()) as i32);
}
if let Some(letter_spacing) = options.letter_spacing {
advance += letter_spacing;
}
let offset = prev_glyph_id.map(|prev| {
let h_kerning = Au::from_f64_px(self.glyph_h_kerning(prev, glyph_id));
advance += h_kerning;
Point2D::new(h_kerning, Au(0))
});
let glyph = GlyphData::new(glyph_id, advance, offset, true, true);
glyphs.add_glyph_for_byte_index(ByteIndex(i as isize), character, &glyph);
prev_glyph_id = Some(glyph_id);
}
glyphs.finalize_changes();
}
pub fn table_for_tag(&self, tag: FontTableTag) -> Option<FontTable> {
let result = self.handle.table_for_tag(tag);
let status = if result.is_some() {
"Found"
} else {
"Didn't find"
};
debug!(
"{} font table[{}] with family={}, face={}",
status,
tag.tag_to_str(),
self.handle
.family_name()
.unwrap_or("unavailable".to_owned()),
self.handle.face_name().unwrap_or("unavailable".to_owned())
);
result
}
#[inline]
pub fn glyph_index(&self, codepoint: char) -> Option<GlyphId> {
let codepoint = match self.descriptor.variant {
font_variant_caps::T::SmallCaps => codepoint.to_uppercase().next().unwrap(), //FIXME: #5938
font_variant_caps::T::Normal => codepoint,
};
self.handle.glyph_index(codepoint)
}
pub fn has_glyph_for(&self, codepoint: char) -> bool {
self.glyph_index(codepoint).is_some()
}
pub fn glyph_h_kerning(&self, first_glyph: GlyphId, second_glyph: GlyphId) -> FractionalPixel {
self.handle.glyph_h_kerning(first_glyph, second_glyph)
}
pub fn glyph_h_advance(&self, glyph: GlyphId) -> FractionalPixel {
*self
.glyph_advance_cache
.borrow_mut()
.entry(glyph)
.or_insert_with(|| {
match self.handle.glyph_h_advance(glyph) {
Some(adv) => adv,
None => 10f64 as FractionalPixel, // FIXME: Need fallback strategy
}
})
}
}
pub type FontRef = Rc<RefCell<Font>>;
/// A `FontGroup` is a prioritised list of fonts for a given set of font styles. It is used by
/// `TextRun` to decide which font to render a character with. If none of the fonts listed in the
/// styles are suitable, a fallback font may be used.
#[derive(Debug)]
pub struct FontGroup {
descriptor: FontDescriptor,
families: SmallVec<[FontGroupFamily; 8]>,
last_matching_fallback: Option<FontRef>,
}
impl FontGroup {
pub fn new(style: &FontStyleStruct) -> FontGroup {
let descriptor = FontDescriptor::from(style);
let families = style
.font_family
.0
.iter()
.map(|family| FontGroupFamily::new(descriptor.clone(), &family))
.collect();
FontGroup {
descriptor,
families,
last_matching_fallback: None,
}
}
/// Finds the first font, or else the first fallback font, which contains a glyph for
/// `codepoint`. If no such font is found, returns the first available font or fallback font
/// (which will cause a "glyph not found" character to be rendered). If no font at all can be
/// found, returns None.
pub fn find_by_codepoint<S: FontSource>(
&mut self,
mut font_context: &mut FontContext<S>,
codepoint: char,
) -> Option<FontRef> {
let has_glyph = |font: &FontRef| font.borrow().has_glyph_for(codepoint);
let font = self.find(&mut font_context, |font| has_glyph(font));
if font.is_some() {
return font;
}
if let Some(ref fallback) = self.last_matching_fallback {
if has_glyph(&fallback) {
return self.last_matching_fallback.clone();
}
}
let font = self.find_fallback(&mut font_context, Some(codepoint), has_glyph);
if font.is_some() {
self.last_matching_fallback = font.clone();
return font;
}
self.first(&mut font_context)
}
/// Find the first available font in the group, or the first available fallback font.
pub fn first<S: FontSource>(
&mut self,
mut font_context: &mut FontContext<S>,
) -> Option<FontRef> {
self.find(&mut font_context, |_| true)
.or_else(|| self.find_fallback(&mut font_context, None, |_| true))
}
/// Find a font which returns true for `predicate`. This method mutates because we may need to
/// load new font data in the process of finding a suitable font.
fn find<S, P>(&mut self, mut font_context: &mut FontContext<S>, predicate: P) -> Option<FontRef>
where
S: FontSource,
P: FnMut(&FontRef) -> bool,
{
self.families
.iter_mut()
.filter_map(|family| family.font(&mut font_context))
.find(predicate)
}
/// Attempts to find a suitable fallback font which matches the `predicate`. The default
/// family (i.e. "serif") will be tried first, followed by platform-specific family names.
/// If a `codepoint` is provided, then its Unicode block may be used to refine the list of
/// family names which will be tried.
fn find_fallback<S, P>(
&mut self,
font_context: &mut FontContext<S>,
codepoint: Option<char>,
predicate: P,
) -> Option<FontRef>
where
S: FontSource,
P: FnMut(&FontRef) -> bool,
{
iter::once(FontFamilyDescriptor::default())
.chain(fallback_font_families(codepoint).into_iter().map(|family| {
FontFamilyDescriptor::new(FontFamilyName::from(family), FontSearchScope::Local)
})).filter_map(|family| font_context.font(&self.descriptor, &family))
.find(predicate)
}
}
/// A `FontGroupFamily` is a single font family in a `FontGroup`. It corresponds to one of the
/// families listed in the `font-family` CSS property. The corresponding font data is lazy-loaded,
/// only if actually needed.
#[derive(Debug)]
struct FontGroupFamily {
font_descriptor: FontDescriptor,
family_descriptor: FontFamilyDescriptor,
loaded: bool,
font: Option<FontRef>,
}
impl FontGroupFamily {
fn new(font_descriptor: FontDescriptor, family: &SingleFontFamily) -> FontGroupFamily {
let family_descriptor =
FontFamilyDescriptor::new(FontFamilyName::from(family), FontSearchScope::Any);
FontGroupFamily {
font_descriptor,
family_descriptor,
loaded: false,
font: None,
}
}
/// Returns the font within this family which matches the style. We'll fetch the data from the
/// `FontContext` the first time this method is called, and return a cached reference on
/// subsequent calls.
fn font<S: FontSource>(&mut self, font_context: &mut FontContext<S>) -> Option<FontRef> {
if!self.loaded {
self.font = font_context.font(&self.font_descriptor, &self.family_descriptor);
self.loaded = true;
}
self.font.clone()
}
}
pub struct RunMetrics {
// may be negative due to negative width (i.e., kerning of '.' in 'P.T.')
pub advance_width: Au,
pub ascent: Au, // nonzero
pub descent: Au, // nonzero
// this bounding box is relative to the left origin baseline.
// so, bounding_box.position.y = -ascent
pub bounding_box: Rect<Au>,
}
impl RunMetrics {
pub fn new(advance: Au, ascent: Au, descent: Au) -> RunMetrics {
let bounds = Rect::new(
Point2D::new(Au(0), -ascent),
Size2D::new(advance, ascent + descent),
);
// TODO(Issue #125): support loose and tight bounding boxes; using the
// ascent+descent and advance is sometimes too generous and
// looking at actual glyph extents can yield a tighter box.
RunMetrics {
advance_width: advance,
bounding_box: bounds,
ascent: ascent,
descent: descent,
}
}
}
pub fn get_and_reset_text_shaping_performance_counter() -> usize {
let value = TEXT_SHAPING_PERFORMANCE_COUNTER.load(Ordering::SeqCst);
TEXT_SHAPING_PERFORMANCE_COUNTER.store(0, Ordering::SeqCst);
value
}
/// The scope within which we will look for a font.
#[derive(Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
pub enum FontSearchScope {
/// All fonts will be searched, including those specified via `@font-face` rules.
Any,
/// Only local system fonts will be searched.
Local,
}
/// A font family name used in font selection.
#[derive(Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
pub enum | {
/// A specific name such as `"Arial"`
Specific(Atom),
/// A generic name such as `sans-serif`
Generic(Atom),
}
impl FontFamilyName {
pub fn name(&self) -> &str {
match *self {
FontFamilyName::Specific(ref name) => name,
FontFamilyName::Generic(ref name) => name,
}
}
}
impl<'a> From<&'a SingleFontFamily> for FontFamilyName {
fn from(other: &'a SingleFontFamily) -> FontFamilyName {
match *other {
SingleFontFamily::FamilyName(ref family_name) => {
FontFamilyName::Specific(family_name.name.clone())
},
SingleFontFamily::Generic(ref generic_name) => {
FontFamilyName::Generic(generic_name.clone())
},
}
}
}
impl<'a> From<&'a str> for FontFamilyName {
fn from(other: &'a str) -> FontFamilyName {
FontFamilyName::Specific(Atom::from(other))
}
}
/// The font family parameters for font selection.
#[derive(Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
pub struct FontFamilyDescriptor {
pub name: FontFamilyName,
pub scope: FontSearchScope,
}
impl FontFamilyDescriptor {
pub fn new(name: FontFamilyName, scope: FontSearchScope) -> FontFamilyDescriptor { | FontFamilyName | identifier_name |
font.rs |
use app_units::Au;
use euclid::{Point2D, Rect, Size2D};
use font_context::{FontContext, FontSource};
use font_template::FontTemplateDescriptor;
use ordered_float::NotNan;
use platform::font::{FontHandle, FontTable};
use platform::font_context::FontContextHandle;
pub use platform::font_list::fallback_font_families;
use platform::font_template::FontTemplateData;
use servo_atoms::Atom;
use smallvec::SmallVec;
use std::borrow::ToOwned;
use std::cell::RefCell;
use std::collections::HashMap;
use std::iter;
use std::rc::Rc;
use std::str;
use std::sync::Arc;
use std::sync::atomic::{ATOMIC_USIZE_INIT, AtomicUsize, Ordering};
use style::computed_values::{font_stretch, font_style, font_variant_caps, font_weight};
use style::properties::style_structs::Font as FontStyleStruct;
use style::values::computed::font::SingleFontFamily;
use text::Shaper;
use text::glyph::{ByteIndex, GlyphData, GlyphId, GlyphStore};
use text::shaping::ShaperMethods;
use time;
use unicode_script::Script;
use webrender_api;
macro_rules! ot_tag {
($t1:expr, $t2:expr, $t3:expr, $t4:expr) => {
(($t1 as u32) << 24) | (($t2 as u32) << 16) | (($t3 as u32) << 8) | ($t4 as u32)
};
}
pub const GPOS: u32 = ot_tag!('G', 'P', 'O', 'S');
pub const GSUB: u32 = ot_tag!('G', 'S', 'U', 'B');
pub const KERN: u32 = ot_tag!('k', 'e', 'r', 'n');
static TEXT_SHAPING_PERFORMANCE_COUNTER: AtomicUsize = ATOMIC_USIZE_INIT;
// FontHandle encapsulates access to the platform's font API,
// e.g. quartz, FreeType. It provides access to metrics and tables
// needed by the text shaper as well as access to the underlying font
// resources needed by the graphics layer to draw glyphs.
pub trait FontHandleMethods: Sized {
fn new_from_template(
fctx: &FontContextHandle,
template: Arc<FontTemplateData>,
pt_size: Option<Au>,
) -> Result<Self, ()>;
fn template(&self) -> Arc<FontTemplateData>;
fn family_name(&self) -> Option<String>;
fn face_name(&self) -> Option<String>;
fn style(&self) -> font_style::T;
fn boldness(&self) -> font_weight::T;
fn stretchiness(&self) -> font_stretch::T;
fn glyph_index(&self, codepoint: char) -> Option<GlyphId>;
fn glyph_h_advance(&self, GlyphId) -> Option<FractionalPixel>;
fn glyph_h_kerning(&self, glyph0: GlyphId, glyph1: GlyphId) -> FractionalPixel;
/// Can this font do basic horizontal LTR shaping without Harfbuzz?
fn can_do_fast_shaping(&self) -> bool;
fn metrics(&self) -> FontMetrics;
fn table_for_tag(&self, FontTableTag) -> Option<FontTable>;
/// A unique identifier for the font, allowing comparison.
fn identifier(&self) -> Atom;
}
// Used to abstract over the shaper's choice of fixed int representation.
pub type FractionalPixel = f64;
pub type FontTableTag = u32;
trait FontTableTagConversions {
fn tag_to_str(&self) -> String;
}
impl FontTableTagConversions for FontTableTag {
fn tag_to_str(&self) -> String {
let bytes = [
(self >> 24) as u8,
(self >> 16) as u8,
(self >> 8) as u8,
(self >> 0) as u8,
];
str::from_utf8(&bytes).unwrap().to_owned()
}
}
pub trait FontTableMethods {
fn buffer(&self) -> &[u8];
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct FontMetrics {
pub underline_size: Au,
pub underline_offset: Au,
pub strikeout_size: Au,
pub strikeout_offset: Au,
pub leading: Au,
pub x_height: Au,
pub em_size: Au,
pub ascent: Au,
pub descent: Au,
pub max_advance: Au,
pub average_advance: Au,
pub line_gap: Au,
}
/// `FontDescriptor` describes the parameters of a `Font`. It represents rendering a given font
/// template at a particular size, with a particular font-variant-caps applied, etc. This contrasts
/// with `FontTemplateDescriptor` in that the latter represents only the parameters inherent in the
/// font data (weight, stretch, etc.).
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
pub struct FontDescriptor {
pub template_descriptor: FontTemplateDescriptor,
pub variant: font_variant_caps::T,
pub pt_size: Au,
}
impl<'a> From<&'a FontStyleStruct> for FontDescriptor {
fn from(style: &'a FontStyleStruct) -> Self {
FontDescriptor {
template_descriptor: FontTemplateDescriptor::from(style),
variant: style.font_variant_caps,
pt_size: style.font_size.size(),
}
}
}
#[derive(Debug)]
pub struct Font {
pub handle: FontHandle,
pub metrics: FontMetrics,
pub descriptor: FontDescriptor,
pub actual_pt_size: Au,
shaper: Option<Shaper>,
shape_cache: RefCell<HashMap<ShapeCacheEntry, Arc<GlyphStore>>>,
glyph_advance_cache: RefCell<HashMap<u32, FractionalPixel>>,
pub font_key: webrender_api::FontInstanceKey,
}
impl Font {
pub fn new(
handle: FontHandle,
descriptor: FontDescriptor,
actual_pt_size: Au,
font_key: webrender_api::FontInstanceKey,
) -> Font {
let metrics = handle.metrics();
Font {
handle: handle,
shaper: None,
descriptor,
actual_pt_size,
metrics,
shape_cache: RefCell::new(HashMap::new()),
glyph_advance_cache: RefCell::new(HashMap::new()),
font_key,
}
}
/// A unique identifier for the font, allowing comparison.
pub fn identifier(&self) -> Atom {
self.handle.identifier()
}
}
bitflags! {
pub struct ShapingFlags: u8 {
#[doc = "Set if the text is entirely whitespace."]
const IS_WHITESPACE_SHAPING_FLAG = 0x01;
#[doc = "Set if we are to ignore ligatures."]
const IGNORE_LIGATURES_SHAPING_FLAG = 0x02;
#[doc = "Set if we are to disable kerning."]
const DISABLE_KERNING_SHAPING_FLAG = 0x04;
#[doc = "Text direction is right-to-left."]
const RTL_FLAG = 0x08;
#[doc = "Set if word-break is set to keep-all."]
const KEEP_ALL_FLAG = 0x10;
}
}
/// Various options that control text shaping.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct ShapingOptions {
/// Spacing to add between each letter. Corresponds to the CSS 2.1 `letter-spacing` property.
/// NB: You will probably want to set the `IGNORE_LIGATURES_SHAPING_FLAG` if this is non-null.
pub letter_spacing: Option<Au>,
/// Spacing to add between each word. Corresponds to the CSS 2.1 `word-spacing` property.
pub word_spacing: (Au, NotNan<f32>),
/// The Unicode script property of the characters in this run.
pub script: Script,
/// Various flags.
pub flags: ShapingFlags,
}
/// An entry in the shape cache.
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
struct ShapeCacheEntry {
text: String,
options: ShapingOptions,
}
impl Font {
pub fn shape_text(&mut self, text: &str, options: &ShapingOptions) -> Arc<GlyphStore> {
let this = self as *const Font;
let mut shaper = self.shaper.take();
let lookup_key = ShapeCacheEntry {
text: text.to_owned(),
options: *options,
};
let result = self
.shape_cache
.borrow_mut()
.entry(lookup_key)
.or_insert_with(|| {
let start_time = time::precise_time_ns();
let mut glyphs = GlyphStore::new(
text.len(),
options
.flags
.contains(ShapingFlags::IS_WHITESPACE_SHAPING_FLAG),
options.flags.contains(ShapingFlags::RTL_FLAG),
);
if self.can_do_fast_shaping(text, options) {
debug!("shape_text: Using ASCII fast path.");
self.shape_text_fast(text, options, &mut glyphs);
} else {
debug!("shape_text: Using Harfbuzz.");
if shaper.is_none() {
shaper = Some(Shaper::new(this));
}
shaper
.as_ref()
.unwrap()
.shape_text(text, options, &mut glyphs);
}
let end_time = time::precise_time_ns();
TEXT_SHAPING_PERFORMANCE_COUNTER
.fetch_add((end_time - start_time) as usize, Ordering::Relaxed);
Arc::new(glyphs)
}).clone();
self.shaper = shaper;
result
}
fn can_do_fast_shaping(&self, text: &str, options: &ShapingOptions) -> bool {
options.script == Script::Latin &&
!options.flags.contains(ShapingFlags::RTL_FLAG) &&
self.handle.can_do_fast_shaping() &&
text.is_ascii()
}
/// Fast path for ASCII text that only needs simple horizontal LTR kerning.
fn shape_text_fast(&self, text: &str, options: &ShapingOptions, glyphs: &mut GlyphStore) {
let mut prev_glyph_id = None;
for (i, byte) in text.bytes().enumerate() {
let character = byte as char;
let glyph_id = match self.glyph_index(character) {
Some(id) => id,
None => continue,
};
let mut advance = Au::from_f64_px(self.glyph_h_advance(glyph_id));
if character =='' {
// https://drafts.csswg.org/css-text-3/#word-spacing-property
let (length, percent) = options.word_spacing;
advance = (advance + length) + Au((advance.0 as f32 * percent.into_inner()) as i32);
}
if let Some(letter_spacing) = options.letter_spacing {
advance += letter_spacing;
}
let offset = prev_glyph_id.map(|prev| {
let h_kerning = Au::from_f64_px(self.glyph_h_kerning(prev, glyph_id));
advance += h_kerning;
Point2D::new(h_kerning, Au(0))
});
let glyph = GlyphData::new(glyph_id, advance, offset, true, true);
glyphs.add_glyph_for_byte_index(ByteIndex(i as isize), character, &glyph);
prev_glyph_id = Some(glyph_id);
}
glyphs.finalize_changes();
}
pub fn table_for_tag(&self, tag: FontTableTag) -> Option<FontTable> {
let result = self.handle.table_for_tag(tag);
let status = if result.is_some() {
"Found"
} else {
"Didn't find"
};
debug!(
"{} font table[{}] with family={}, face={}",
status,
tag.tag_to_str(),
self.handle
.family_name()
.unwrap_or("unavailable".to_owned()),
self.handle.face_name().unwrap_or("unavailable".to_owned())
);
result
}
#[inline]
pub fn glyph_index(&self, codepoint: char) -> Option<GlyphId> {
let codepoint = match self.descriptor.variant {
font_variant_caps::T::SmallCaps => codepoint.to_uppercase().next().unwrap(), //FIXME: #5938
font_variant_caps::T::Normal => codepoint,
};
self.handle.glyph_index(codepoint)
}
pub fn has_glyph_for(&self, codepoint: char) -> bool {
self.glyph_index(codepoint).is_some()
}
pub fn glyph_h_kerning(&self, first_glyph: GlyphId, second_glyph: GlyphId) -> FractionalPixel {
self.handle.glyph_h_kerning(first_glyph, second_glyph)
}
pub fn glyph_h_advance(&self, glyph: GlyphId) -> FractionalPixel {
*self
.glyph_advance_cache
.borrow_mut()
.entry(glyph)
.or_insert_with(|| {
match self.handle.glyph_h_advance(glyph) {
Some(adv) => adv,
None => 10f64 as FractionalPixel, // FIXME: Need fallback strategy
}
})
}
}
pub type FontRef = Rc<RefCell<Font>>;
/// A `FontGroup` is a prioritised list of fonts for a given set of font styles. It is used by
/// `TextRun` to decide which font to render a character with. If none of the fonts listed in the
/// styles are suitable, a fallback font may be used.
#[derive(Debug)]
pub struct FontGroup {
descriptor: FontDescriptor,
families: SmallVec<[FontGroupFamily; 8]>,
last_matching_fallback: Option<FontRef>,
}
impl FontGroup {
pub fn new(style: &FontStyleStruct) -> FontGroup {
let descriptor = FontDescriptor::from(style);
let families = style
.font_family
.0
.iter()
.map(|family| FontGroupFamily::new(descriptor.clone(), &family))
.collect();
FontGroup {
descriptor,
families,
last_matching_fallback: None,
}
}
/// Finds the first font, or else the first fallback font, which contains a glyph for
/// `codepoint`. If no such font is found, returns the first available font or fallback font
/// (which will cause a "glyph not found" character to be rendered). If no font at all can be
/// found, returns None.
pub fn find_by_codepoint<S: FontSource>(
&mut self,
mut font_context: &mut FontContext<S>,
codepoint: char,
) -> Option<FontRef> {
let has_glyph = |font: &FontRef| font.borrow().has_glyph_for(codepoint);
let font = self.find(&mut font_context, |font| has_glyph(font));
if font.is_some() {
return font;
}
if let Some(ref fallback) = self.last_matching_fallback {
if has_glyph(&fallback) {
return self.last_matching_fallback.clone();
}
}
let font = self.find_fallback(&mut font_context, Some(codepoint), has_glyph);
if font.is_some() {
self.last_matching_fallback = font.clone();
return font;
}
self.first(&mut font_context)
}
/// Find the first available font in the group, or the first available fallback font.
pub fn first<S: FontSource>(
&mut self,
mut font_context: &mut FontContext<S>,
) -> Option<FontRef> {
self.find(&mut font_context, |_| true)
.or_else(|| self.find_fallback(&mut font_context, None, |_| true))
}
/// Find a font which returns true for `predicate`. This method mutates because we may need to
/// load new font data in the process of finding a suitable font.
fn find<S, P>(&mut self, mut font_context: &mut FontContext<S>, predicate: P) -> Option<FontRef>
where
S: FontSource,
P: FnMut(&FontRef) -> bool,
{
self.families
.iter_mut()
.filter_map(|family| family.font(&mut font_context))
.find(predicate)
}
/// Attempts to find a suitable fallback font which matches the `predicate`. The default
/// family (i.e. "serif") will be tried first, followed by platform-specific family names.
/// If a `codepoint` is provided, then its Unicode block may be used to refine the list of
/// family names which will be tried.
fn find_fallback<S, P>(
&mut self,
font_context: &mut FontContext<S>,
codepoint: Option<char>,
predicate: P,
) -> Option<FontRef>
where
S: FontSource,
P: FnMut(&FontRef) -> bool,
{
iter::once(FontFamilyDescriptor::default())
.chain(fallback_font_families(codepoint).into_iter().map(|family| {
FontFamilyDescriptor::new(FontFamilyName::from(family), FontSearchScope::Local)
})).filter_map(|family| font_context.font(&self.descriptor, &family))
.find(predicate)
}
}
/// A `FontGroupFamily` is a single font family in a `FontGroup`. It corresponds to one of the
/// families listed in the `font-family` CSS property. The corresponding font data is lazy-loaded,
/// only if actually needed.
#[derive(Debug)]
struct FontGroupFamily {
font_descriptor: FontDescriptor,
family_descriptor: FontFamilyDescriptor,
loaded: bool,
font: Option<FontRef>,
}
impl FontGroupFamily {
fn new(font_descriptor: FontDescriptor, family: &SingleFontFamily) -> FontGroupFamily {
let family_descriptor =
FontFamilyDescriptor::new(FontFamilyName::from(family), FontSearchScope::Any);
FontGroupFamily {
font_descriptor,
family_descriptor,
loaded: false,
font: None,
}
}
/// Returns the font within this family which matches the style. We'll fetch the data from the
/// `FontContext` the first time this method is called, and return a cached reference on
/// subsequent calls.
fn font<S: FontSource>(&mut self, font_context: &mut FontContext<S>) -> Option<FontRef> |
}
pub struct RunMetrics {
// may be negative due to negative width (i.e., kerning of '.' in 'P.T.')
pub advance_width: Au,
pub ascent: Au, // nonzero
pub descent: Au, // nonzero
// this bounding box is relative to the left origin baseline.
// so, bounding_box.position.y = -ascent
pub bounding_box: Rect<Au>,
}
impl RunMetrics {
pub fn new(advance: Au, ascent: Au, descent: Au) -> RunMetrics {
let bounds = Rect::new(
Point2D::new(Au(0), -ascent),
Size2D::new(advance, ascent + descent),
);
// TODO(Issue #125): support loose and tight bounding boxes; using the
// ascent+descent and advance is sometimes too generous and
// looking at actual glyph extents can yield a tighter box.
RunMetrics {
advance_width: advance,
bounding_box: bounds,
ascent: ascent,
descent: descent,
}
}
}
pub fn get_and_reset_text_shaping_performance_counter() -> usize {
let value = TEXT_SHAPING_PERFORMANCE_COUNTER.load(Ordering::SeqCst);
TEXT_SHAPING_PERFORMANCE_COUNTER.store(0, Ordering::SeqCst);
value
}
/// The scope within which we will look for a font.
#[derive(Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
pub enum FontSearchScope {
/// All fonts will be searched, including those specified via `@font-face` rules.
Any,
/// Only local system fonts will be searched.
Local,
}
/// A font family name used in font selection.
#[derive(Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
pub enum FontFamilyName {
/// A specific name such as `"Arial"`
Specific(Atom),
/// A generic name such as `sans-serif`
Generic(Atom),
}
impl FontFamilyName {
pub fn name(&self) -> &str {
match *self {
FontFamilyName::Specific(ref name) => name,
FontFamilyName::Generic(ref name) => name,
}
}
}
impl<'a> From<&'a SingleFontFamily> for FontFamilyName {
fn from(other: &'a SingleFontFamily) -> FontFamilyName {
match *other {
SingleFontFamily::FamilyName(ref family_name) => {
FontFamilyName::Specific(family_name.name.clone())
},
SingleFontFamily::Generic(ref generic_name) => {
FontFamilyName::Generic(generic_name.clone())
},
}
}
}
impl<'a> From<&'a str> for FontFamilyName {
fn from(other: &'a str) -> FontFamilyName {
FontFamilyName::Specific(Atom::from(other))
}
}
/// The font family parameters for font selection.
#[derive(Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
pub struct FontFamilyDescriptor {
pub name: FontFamilyName,
pub scope: FontSearchScope,
}
impl FontFamilyDescriptor {
pub fn new(name: FontFamilyName, scope: FontSearchScope) -> FontFamilyDescriptor { | {
if !self.loaded {
self.font = font_context.font(&self.font_descriptor, &self.family_descriptor);
self.loaded = true;
}
self.font.clone()
} | identifier_body |
font.rs |
use app_units::Au;
use euclid::{Point2D, Rect, Size2D};
use font_context::{FontContext, FontSource};
use font_template::FontTemplateDescriptor;
use ordered_float::NotNan;
use platform::font::{FontHandle, FontTable};
use platform::font_context::FontContextHandle;
pub use platform::font_list::fallback_font_families;
use platform::font_template::FontTemplateData;
use servo_atoms::Atom;
use smallvec::SmallVec;
use std::borrow::ToOwned;
use std::cell::RefCell;
use std::collections::HashMap;
use std::iter;
use std::rc::Rc;
use std::str;
use std::sync::Arc;
use std::sync::atomic::{ATOMIC_USIZE_INIT, AtomicUsize, Ordering};
use style::computed_values::{font_stretch, font_style, font_variant_caps, font_weight};
use style::properties::style_structs::Font as FontStyleStruct;
use style::values::computed::font::SingleFontFamily;
use text::Shaper;
use text::glyph::{ByteIndex, GlyphData, GlyphId, GlyphStore};
use text::shaping::ShaperMethods;
use time;
use unicode_script::Script;
use webrender_api;
macro_rules! ot_tag {
($t1:expr, $t2:expr, $t3:expr, $t4:expr) => {
(($t1 as u32) << 24) | (($t2 as u32) << 16) | (($t3 as u32) << 8) | ($t4 as u32)
};
}
pub const GPOS: u32 = ot_tag!('G', 'P', 'O', 'S');
pub const GSUB: u32 = ot_tag!('G', 'S', 'U', 'B');
pub const KERN: u32 = ot_tag!('k', 'e', 'r', 'n');
static TEXT_SHAPING_PERFORMANCE_COUNTER: AtomicUsize = ATOMIC_USIZE_INIT;
// FontHandle encapsulates access to the platform's font API,
// e.g. quartz, FreeType. It provides access to metrics and tables
// needed by the text shaper as well as access to the underlying font
// resources needed by the graphics layer to draw glyphs.
pub trait FontHandleMethods: Sized {
fn new_from_template(
fctx: &FontContextHandle,
template: Arc<FontTemplateData>,
pt_size: Option<Au>,
) -> Result<Self, ()>;
fn template(&self) -> Arc<FontTemplateData>;
fn family_name(&self) -> Option<String>;
fn face_name(&self) -> Option<String>;
fn style(&self) -> font_style::T;
fn boldness(&self) -> font_weight::T;
fn stretchiness(&self) -> font_stretch::T;
fn glyph_index(&self, codepoint: char) -> Option<GlyphId>;
fn glyph_h_advance(&self, GlyphId) -> Option<FractionalPixel>;
fn glyph_h_kerning(&self, glyph0: GlyphId, glyph1: GlyphId) -> FractionalPixel;
/// Can this font do basic horizontal LTR shaping without Harfbuzz?
fn can_do_fast_shaping(&self) -> bool;
fn metrics(&self) -> FontMetrics;
fn table_for_tag(&self, FontTableTag) -> Option<FontTable>;
/// A unique identifier for the font, allowing comparison.
fn identifier(&self) -> Atom;
}
// Used to abstract over the shaper's choice of fixed int representation.
pub type FractionalPixel = f64;
pub type FontTableTag = u32;
trait FontTableTagConversions {
fn tag_to_str(&self) -> String;
}
impl FontTableTagConversions for FontTableTag {
fn tag_to_str(&self) -> String {
let bytes = [
(self >> 24) as u8,
(self >> 16) as u8,
(self >> 8) as u8,
(self >> 0) as u8,
];
str::from_utf8(&bytes).unwrap().to_owned()
}
}
pub trait FontTableMethods {
fn buffer(&self) -> &[u8];
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct FontMetrics {
pub underline_size: Au,
pub underline_offset: Au,
pub strikeout_size: Au,
pub strikeout_offset: Au,
pub leading: Au,
pub x_height: Au,
pub em_size: Au,
pub ascent: Au,
pub descent: Au,
pub max_advance: Au,
pub average_advance: Au,
pub line_gap: Au,
}
/// `FontDescriptor` describes the parameters of a `Font`. It represents rendering a given font
/// template at a particular size, with a particular font-variant-caps applied, etc. This contrasts
/// with `FontTemplateDescriptor` in that the latter represents only the parameters inherent in the
/// font data (weight, stretch, etc.).
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
pub struct FontDescriptor {
pub template_descriptor: FontTemplateDescriptor,
pub variant: font_variant_caps::T,
pub pt_size: Au,
}
impl<'a> From<&'a FontStyleStruct> for FontDescriptor {
fn from(style: &'a FontStyleStruct) -> Self {
FontDescriptor {
template_descriptor: FontTemplateDescriptor::from(style),
variant: style.font_variant_caps,
pt_size: style.font_size.size(),
}
}
}
#[derive(Debug)]
pub struct Font {
pub handle: FontHandle,
pub metrics: FontMetrics,
pub descriptor: FontDescriptor,
pub actual_pt_size: Au,
shaper: Option<Shaper>,
shape_cache: RefCell<HashMap<ShapeCacheEntry, Arc<GlyphStore>>>,
glyph_advance_cache: RefCell<HashMap<u32, FractionalPixel>>,
pub font_key: webrender_api::FontInstanceKey,
}
impl Font {
pub fn new(
handle: FontHandle,
descriptor: FontDescriptor,
actual_pt_size: Au,
font_key: webrender_api::FontInstanceKey,
) -> Font {
let metrics = handle.metrics();
Font {
handle: handle,
shaper: None,
descriptor,
actual_pt_size,
metrics,
shape_cache: RefCell::new(HashMap::new()),
glyph_advance_cache: RefCell::new(HashMap::new()),
font_key,
}
}
/// A unique identifier for the font, allowing comparison.
pub fn identifier(&self) -> Atom {
self.handle.identifier()
}
}
bitflags! {
pub struct ShapingFlags: u8 {
#[doc = "Set if the text is entirely whitespace."]
const IS_WHITESPACE_SHAPING_FLAG = 0x01;
#[doc = "Set if we are to ignore ligatures."]
const IGNORE_LIGATURES_SHAPING_FLAG = 0x02;
#[doc = "Set if we are to disable kerning."]
const DISABLE_KERNING_SHAPING_FLAG = 0x04;
#[doc = "Text direction is right-to-left."]
const RTL_FLAG = 0x08;
#[doc = "Set if word-break is set to keep-all."]
const KEEP_ALL_FLAG = 0x10;
}
}
/// Various options that control text shaping.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct ShapingOptions {
/// Spacing to add between each letter. Corresponds to the CSS 2.1 `letter-spacing` property.
/// NB: You will probably want to set the `IGNORE_LIGATURES_SHAPING_FLAG` if this is non-null.
pub letter_spacing: Option<Au>,
/// Spacing to add between each word. Corresponds to the CSS 2.1 `word-spacing` property.
pub word_spacing: (Au, NotNan<f32>),
/// The Unicode script property of the characters in this run.
pub script: Script,
/// Various flags.
pub flags: ShapingFlags,
}
/// An entry in the shape cache.
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
struct ShapeCacheEntry {
text: String,
options: ShapingOptions,
}
impl Font {
pub fn shape_text(&mut self, text: &str, options: &ShapingOptions) -> Arc<GlyphStore> {
let this = self as *const Font;
let mut shaper = self.shaper.take();
let lookup_key = ShapeCacheEntry {
text: text.to_owned(),
options: *options,
};
let result = self
.shape_cache
.borrow_mut()
.entry(lookup_key)
.or_insert_with(|| {
let start_time = time::precise_time_ns();
let mut glyphs = GlyphStore::new(
text.len(),
options
.flags
.contains(ShapingFlags::IS_WHITESPACE_SHAPING_FLAG),
options.flags.contains(ShapingFlags::RTL_FLAG),
);
if self.can_do_fast_shaping(text, options) {
debug!("shape_text: Using ASCII fast path.");
self.shape_text_fast(text, options, &mut glyphs);
} else {
debug!("shape_text: Using Harfbuzz.");
if shaper.is_none() {
shaper = Some(Shaper::new(this));
}
shaper
.as_ref()
.unwrap()
.shape_text(text, options, &mut glyphs);
}
let end_time = time::precise_time_ns();
TEXT_SHAPING_PERFORMANCE_COUNTER
.fetch_add((end_time - start_time) as usize, Ordering::Relaxed);
Arc::new(glyphs)
}).clone();
self.shaper = shaper;
result
}
fn can_do_fast_shaping(&self, text: &str, options: &ShapingOptions) -> bool {
options.script == Script::Latin &&
!options.flags.contains(ShapingFlags::RTL_FLAG) &&
self.handle.can_do_fast_shaping() &&
text.is_ascii()
}
/// Fast path for ASCII text that only needs simple horizontal LTR kerning.
fn shape_text_fast(&self, text: &str, options: &ShapingOptions, glyphs: &mut GlyphStore) {
let mut prev_glyph_id = None;
for (i, byte) in text.bytes().enumerate() {
let character = byte as char;
let glyph_id = match self.glyph_index(character) {
Some(id) => id,
None => continue,
};
let mut advance = Au::from_f64_px(self.glyph_h_advance(glyph_id));
if character =='' {
// https://drafts.csswg.org/css-text-3/#word-spacing-property
let (length, percent) = options.word_spacing;
advance = (advance + length) + Au((advance.0 as f32 * percent.into_inner()) as i32);
}
if let Some(letter_spacing) = options.letter_spacing {
advance += letter_spacing;
}
let offset = prev_glyph_id.map(|prev| {
let h_kerning = Au::from_f64_px(self.glyph_h_kerning(prev, glyph_id));
advance += h_kerning;
Point2D::new(h_kerning, Au(0))
});
let glyph = GlyphData::new(glyph_id, advance, offset, true, true);
glyphs.add_glyph_for_byte_index(ByteIndex(i as isize), character, &glyph);
prev_glyph_id = Some(glyph_id);
}
glyphs.finalize_changes();
}
pub fn table_for_tag(&self, tag: FontTableTag) -> Option<FontTable> {
let result = self.handle.table_for_tag(tag);
let status = if result.is_some() {
"Found"
} else {
"Didn't find"
};
debug!(
"{} font table[{}] with family={}, face={}",
status,
tag.tag_to_str(),
self.handle
.family_name()
.unwrap_or("unavailable".to_owned()),
self.handle.face_name().unwrap_or("unavailable".to_owned())
);
result
}
#[inline]
pub fn glyph_index(&self, codepoint: char) -> Option<GlyphId> {
let codepoint = match self.descriptor.variant {
font_variant_caps::T::SmallCaps => codepoint.to_uppercase().next().unwrap(), //FIXME: #5938
font_variant_caps::T::Normal => codepoint,
};
self.handle.glyph_index(codepoint)
}
pub fn has_glyph_for(&self, codepoint: char) -> bool {
self.glyph_index(codepoint).is_some()
}
pub fn glyph_h_kerning(&self, first_glyph: GlyphId, second_glyph: GlyphId) -> FractionalPixel {
self.handle.glyph_h_kerning(first_glyph, second_glyph)
}
pub fn glyph_h_advance(&self, glyph: GlyphId) -> FractionalPixel {
*self
.glyph_advance_cache
.borrow_mut()
.entry(glyph)
.or_insert_with(|| {
match self.handle.glyph_h_advance(glyph) {
Some(adv) => adv,
None => 10f64 as FractionalPixel, // FIXME: Need fallback strategy
}
})
}
}
pub type FontRef = Rc<RefCell<Font>>;
/// A `FontGroup` is a prioritised list of fonts for a given set of font styles. It is used by
/// `TextRun` to decide which font to render a character with. If none of the fonts listed in the
/// styles are suitable, a fallback font may be used.
#[derive(Debug)]
pub struct FontGroup {
descriptor: FontDescriptor,
families: SmallVec<[FontGroupFamily; 8]>,
last_matching_fallback: Option<FontRef>,
}
impl FontGroup {
pub fn new(style: &FontStyleStruct) -> FontGroup {
let descriptor = FontDescriptor::from(style);
let families = style
.font_family
.0
.iter()
.map(|family| FontGroupFamily::new(descriptor.clone(), &family))
.collect();
FontGroup {
descriptor,
families,
last_matching_fallback: None,
}
}
/// Finds the first font, or else the first fallback font, which contains a glyph for
/// `codepoint`. If no such font is found, returns the first available font or fallback font
/// (which will cause a "glyph not found" character to be rendered). If no font at all can be
/// found, returns None.
pub fn find_by_codepoint<S: FontSource>(
&mut self,
mut font_context: &mut FontContext<S>,
codepoint: char,
) -> Option<FontRef> {
let has_glyph = |font: &FontRef| font.borrow().has_glyph_for(codepoint);
let font = self.find(&mut font_context, |font| has_glyph(font));
if font.is_some() {
return font;
}
if let Some(ref fallback) = self.last_matching_fallback {
if has_glyph(&fallback) {
return self.last_matching_fallback.clone();
}
}
let font = self.find_fallback(&mut font_context, Some(codepoint), has_glyph);
if font.is_some() {
self.last_matching_fallback = font.clone();
return font;
}
self.first(&mut font_context)
}
/// Find the first available font in the group, or the first available fallback font.
pub fn first<S: FontSource>(
&mut self,
mut font_context: &mut FontContext<S>,
) -> Option<FontRef> {
self.find(&mut font_context, |_| true)
.or_else(|| self.find_fallback(&mut font_context, None, |_| true))
}
/// Find a font which returns true for `predicate`. This method mutates because we may need to
/// load new font data in the process of finding a suitable font.
fn find<S, P>(&mut self, mut font_context: &mut FontContext<S>, predicate: P) -> Option<FontRef>
where
S: FontSource,
P: FnMut(&FontRef) -> bool,
{
self.families
.iter_mut()
.filter_map(|family| family.font(&mut font_context))
.find(predicate)
}
/// Attempts to find a suitable fallback font which matches the `predicate`. The default
/// family (i.e. "serif") will be tried first, followed by platform-specific family names.
/// If a `codepoint` is provided, then its Unicode block may be used to refine the list of
/// family names which will be tried.
fn find_fallback<S, P>(
&mut self,
font_context: &mut FontContext<S>,
codepoint: Option<char>,
predicate: P,
) -> Option<FontRef>
where
S: FontSource,
P: FnMut(&FontRef) -> bool,
{
iter::once(FontFamilyDescriptor::default())
.chain(fallback_font_families(codepoint).into_iter().map(|family| {
FontFamilyDescriptor::new(FontFamilyName::from(family), FontSearchScope::Local)
})).filter_map(|family| font_context.font(&self.descriptor, &family))
.find(predicate)
}
}
/// A `FontGroupFamily` is a single font family in a `FontGroup`. It corresponds to one of the
/// families listed in the `font-family` CSS property. The corresponding font data is lazy-loaded,
/// only if actually needed.
#[derive(Debug)]
struct FontGroupFamily {
font_descriptor: FontDescriptor,
family_descriptor: FontFamilyDescriptor,
loaded: bool,
font: Option<FontRef>,
}
impl FontGroupFamily {
fn new(font_descriptor: FontDescriptor, family: &SingleFontFamily) -> FontGroupFamily {
let family_descriptor =
FontFamilyDescriptor::new(FontFamilyName::from(family), FontSearchScope::Any);
FontGroupFamily {
font_descriptor,
family_descriptor,
loaded: false,
font: None,
}
}
/// Returns the font within this family which matches the style. We'll fetch the data from the
/// `FontContext` the first time this method is called, and return a cached reference on
/// subsequent calls.
fn font<S: FontSource>(&mut self, font_context: &mut FontContext<S>) -> Option<FontRef> {
if!self.loaded {
self.font = font_context.font(&self.font_descriptor, &self.family_descriptor);
self.loaded = true;
}
self.font.clone()
}
}
pub struct RunMetrics {
// may be negative due to negative width (i.e., kerning of '.' in 'P.T.')
pub advance_width: Au,
pub ascent: Au, // nonzero
pub descent: Au, // nonzero
// this bounding box is relative to the left origin baseline.
// so, bounding_box.position.y = -ascent
pub bounding_box: Rect<Au>,
}
impl RunMetrics {
pub fn new(advance: Au, ascent: Au, descent: Au) -> RunMetrics {
let bounds = Rect::new(
Point2D::new(Au(0), -ascent),
Size2D::new(advance, ascent + descent),
);
// TODO(Issue #125): support loose and tight bounding boxes; using the
// ascent+descent and advance is sometimes too generous and
// looking at actual glyph extents can yield a tighter box.
RunMetrics {
advance_width: advance,
bounding_box: bounds,
ascent: ascent,
descent: descent,
}
}
}
pub fn get_and_reset_text_shaping_performance_counter() -> usize {
let value = TEXT_SHAPING_PERFORMANCE_COUNTER.load(Ordering::SeqCst);
TEXT_SHAPING_PERFORMANCE_COUNTER.store(0, Ordering::SeqCst);
value
}
/// The scope within which we will look for a font.
#[derive(Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
pub enum FontSearchScope {
/// All fonts will be searched, including those specified via `@font-face` rules.
Any,
/// Only local system fonts will be searched.
Local,
}
/// A font family name used in font selection.
#[derive(Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
pub enum FontFamilyName {
/// A specific name such as `"Arial"`
Specific(Atom),
/// A generic name such as `sans-serif`
Generic(Atom),
}
impl FontFamilyName {
pub fn name(&self) -> &str {
match *self {
FontFamilyName::Specific(ref name) => name, | }
}
}
impl<'a> From<&'a SingleFontFamily> for FontFamilyName {
fn from(other: &'a SingleFontFamily) -> FontFamilyName {
match *other {
SingleFontFamily::FamilyName(ref family_name) => {
FontFamilyName::Specific(family_name.name.clone())
},
SingleFontFamily::Generic(ref generic_name) => {
FontFamilyName::Generic(generic_name.clone())
},
}
}
}
impl<'a> From<&'a str> for FontFamilyName {
fn from(other: &'a str) -> FontFamilyName {
FontFamilyName::Specific(Atom::from(other))
}
}
/// The font family parameters for font selection.
#[derive(Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
pub struct FontFamilyDescriptor {
pub name: FontFamilyName,
pub scope: FontSearchScope,
}
impl FontFamilyDescriptor {
pub fn new(name: FontFamilyName, scope: FontSearchScope) -> FontFamilyDescriptor {
| FontFamilyName::Generic(ref name) => name, | random_line_split |
font.rs |
use app_units::Au;
use euclid::{Point2D, Rect, Size2D};
use font_context::{FontContext, FontSource};
use font_template::FontTemplateDescriptor;
use ordered_float::NotNan;
use platform::font::{FontHandle, FontTable};
use platform::font_context::FontContextHandle;
pub use platform::font_list::fallback_font_families;
use platform::font_template::FontTemplateData;
use servo_atoms::Atom;
use smallvec::SmallVec;
use std::borrow::ToOwned;
use std::cell::RefCell;
use std::collections::HashMap;
use std::iter;
use std::rc::Rc;
use std::str;
use std::sync::Arc;
use std::sync::atomic::{ATOMIC_USIZE_INIT, AtomicUsize, Ordering};
use style::computed_values::{font_stretch, font_style, font_variant_caps, font_weight};
use style::properties::style_structs::Font as FontStyleStruct;
use style::values::computed::font::SingleFontFamily;
use text::Shaper;
use text::glyph::{ByteIndex, GlyphData, GlyphId, GlyphStore};
use text::shaping::ShaperMethods;
use time;
use unicode_script::Script;
use webrender_api;
macro_rules! ot_tag {
($t1:expr, $t2:expr, $t3:expr, $t4:expr) => {
(($t1 as u32) << 24) | (($t2 as u32) << 16) | (($t3 as u32) << 8) | ($t4 as u32)
};
}
pub const GPOS: u32 = ot_tag!('G', 'P', 'O', 'S');
pub const GSUB: u32 = ot_tag!('G', 'S', 'U', 'B');
pub const KERN: u32 = ot_tag!('k', 'e', 'r', 'n');
static TEXT_SHAPING_PERFORMANCE_COUNTER: AtomicUsize = ATOMIC_USIZE_INIT;
// FontHandle encapsulates access to the platform's font API,
// e.g. quartz, FreeType. It provides access to metrics and tables
// needed by the text shaper as well as access to the underlying font
// resources needed by the graphics layer to draw glyphs.
pub trait FontHandleMethods: Sized {
fn new_from_template(
fctx: &FontContextHandle,
template: Arc<FontTemplateData>,
pt_size: Option<Au>,
) -> Result<Self, ()>;
fn template(&self) -> Arc<FontTemplateData>;
fn family_name(&self) -> Option<String>;
fn face_name(&self) -> Option<String>;
fn style(&self) -> font_style::T;
fn boldness(&self) -> font_weight::T;
fn stretchiness(&self) -> font_stretch::T;
fn glyph_index(&self, codepoint: char) -> Option<GlyphId>;
fn glyph_h_advance(&self, GlyphId) -> Option<FractionalPixel>;
fn glyph_h_kerning(&self, glyph0: GlyphId, glyph1: GlyphId) -> FractionalPixel;
/// Can this font do basic horizontal LTR shaping without Harfbuzz?
fn can_do_fast_shaping(&self) -> bool;
fn metrics(&self) -> FontMetrics;
fn table_for_tag(&self, FontTableTag) -> Option<FontTable>;
/// A unique identifier for the font, allowing comparison.
fn identifier(&self) -> Atom;
}
// Used to abstract over the shaper's choice of fixed int representation.
pub type FractionalPixel = f64;
pub type FontTableTag = u32;
trait FontTableTagConversions {
fn tag_to_str(&self) -> String;
}
impl FontTableTagConversions for FontTableTag {
fn tag_to_str(&self) -> String {
let bytes = [
(self >> 24) as u8,
(self >> 16) as u8,
(self >> 8) as u8,
(self >> 0) as u8,
];
str::from_utf8(&bytes).unwrap().to_owned()
}
}
pub trait FontTableMethods {
fn buffer(&self) -> &[u8];
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct FontMetrics {
pub underline_size: Au,
pub underline_offset: Au,
pub strikeout_size: Au,
pub strikeout_offset: Au,
pub leading: Au,
pub x_height: Au,
pub em_size: Au,
pub ascent: Au,
pub descent: Au,
pub max_advance: Au,
pub average_advance: Au,
pub line_gap: Au,
}
/// `FontDescriptor` describes the parameters of a `Font`. It represents rendering a given font
/// template at a particular size, with a particular font-variant-caps applied, etc. This contrasts
/// with `FontTemplateDescriptor` in that the latter represents only the parameters inherent in the
/// font data (weight, stretch, etc.).
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
pub struct FontDescriptor {
pub template_descriptor: FontTemplateDescriptor,
pub variant: font_variant_caps::T,
pub pt_size: Au,
}
impl<'a> From<&'a FontStyleStruct> for FontDescriptor {
fn from(style: &'a FontStyleStruct) -> Self {
FontDescriptor {
template_descriptor: FontTemplateDescriptor::from(style),
variant: style.font_variant_caps,
pt_size: style.font_size.size(),
}
}
}
#[derive(Debug)]
pub struct Font {
pub handle: FontHandle,
pub metrics: FontMetrics,
pub descriptor: FontDescriptor,
pub actual_pt_size: Au,
shaper: Option<Shaper>,
shape_cache: RefCell<HashMap<ShapeCacheEntry, Arc<GlyphStore>>>,
glyph_advance_cache: RefCell<HashMap<u32, FractionalPixel>>,
pub font_key: webrender_api::FontInstanceKey,
}
impl Font {
pub fn new(
handle: FontHandle,
descriptor: FontDescriptor,
actual_pt_size: Au,
font_key: webrender_api::FontInstanceKey,
) -> Font {
let metrics = handle.metrics();
Font {
handle: handle,
shaper: None,
descriptor,
actual_pt_size,
metrics,
shape_cache: RefCell::new(HashMap::new()),
glyph_advance_cache: RefCell::new(HashMap::new()),
font_key,
}
}
/// A unique identifier for the font, allowing comparison.
pub fn identifier(&self) -> Atom {
self.handle.identifier()
}
}
bitflags! {
pub struct ShapingFlags: u8 {
#[doc = "Set if the text is entirely whitespace."]
const IS_WHITESPACE_SHAPING_FLAG = 0x01;
#[doc = "Set if we are to ignore ligatures."]
const IGNORE_LIGATURES_SHAPING_FLAG = 0x02;
#[doc = "Set if we are to disable kerning."]
const DISABLE_KERNING_SHAPING_FLAG = 0x04;
#[doc = "Text direction is right-to-left."]
const RTL_FLAG = 0x08;
#[doc = "Set if word-break is set to keep-all."]
const KEEP_ALL_FLAG = 0x10;
}
}
/// Various options that control text shaping.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct ShapingOptions {
/// Spacing to add between each letter. Corresponds to the CSS 2.1 `letter-spacing` property.
/// NB: You will probably want to set the `IGNORE_LIGATURES_SHAPING_FLAG` if this is non-null.
pub letter_spacing: Option<Au>,
/// Spacing to add between each word. Corresponds to the CSS 2.1 `word-spacing` property.
pub word_spacing: (Au, NotNan<f32>),
/// The Unicode script property of the characters in this run.
pub script: Script,
/// Various flags.
pub flags: ShapingFlags,
}
/// An entry in the shape cache.
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
struct ShapeCacheEntry {
text: String,
options: ShapingOptions,
}
impl Font {
pub fn shape_text(&mut self, text: &str, options: &ShapingOptions) -> Arc<GlyphStore> {
let this = self as *const Font;
let mut shaper = self.shaper.take();
let lookup_key = ShapeCacheEntry {
text: text.to_owned(),
options: *options,
};
let result = self
.shape_cache
.borrow_mut()
.entry(lookup_key)
.or_insert_with(|| {
let start_time = time::precise_time_ns();
let mut glyphs = GlyphStore::new(
text.len(),
options
.flags
.contains(ShapingFlags::IS_WHITESPACE_SHAPING_FLAG),
options.flags.contains(ShapingFlags::RTL_FLAG),
);
if self.can_do_fast_shaping(text, options) {
debug!("shape_text: Using ASCII fast path.");
self.shape_text_fast(text, options, &mut glyphs);
} else {
debug!("shape_text: Using Harfbuzz.");
if shaper.is_none() {
shaper = Some(Shaper::new(this));
}
shaper
.as_ref()
.unwrap()
.shape_text(text, options, &mut glyphs);
}
let end_time = time::precise_time_ns();
TEXT_SHAPING_PERFORMANCE_COUNTER
.fetch_add((end_time - start_time) as usize, Ordering::Relaxed);
Arc::new(glyphs)
}).clone();
self.shaper = shaper;
result
}
fn can_do_fast_shaping(&self, text: &str, options: &ShapingOptions) -> bool {
options.script == Script::Latin &&
!options.flags.contains(ShapingFlags::RTL_FLAG) &&
self.handle.can_do_fast_shaping() &&
text.is_ascii()
}
/// Fast path for ASCII text that only needs simple horizontal LTR kerning.
fn shape_text_fast(&self, text: &str, options: &ShapingOptions, glyphs: &mut GlyphStore) {
let mut prev_glyph_id = None;
for (i, byte) in text.bytes().enumerate() {
let character = byte as char;
let glyph_id = match self.glyph_index(character) {
Some(id) => id,
None => continue,
};
let mut advance = Au::from_f64_px(self.glyph_h_advance(glyph_id));
if character =='' {
// https://drafts.csswg.org/css-text-3/#word-spacing-property
let (length, percent) = options.word_spacing;
advance = (advance + length) + Au((advance.0 as f32 * percent.into_inner()) as i32);
}
if let Some(letter_spacing) = options.letter_spacing {
advance += letter_spacing;
}
let offset = prev_glyph_id.map(|prev| {
let h_kerning = Au::from_f64_px(self.glyph_h_kerning(prev, glyph_id));
advance += h_kerning;
Point2D::new(h_kerning, Au(0))
});
let glyph = GlyphData::new(glyph_id, advance, offset, true, true);
glyphs.add_glyph_for_byte_index(ByteIndex(i as isize), character, &glyph);
prev_glyph_id = Some(glyph_id);
}
glyphs.finalize_changes();
}
pub fn table_for_tag(&self, tag: FontTableTag) -> Option<FontTable> {
let result = self.handle.table_for_tag(tag);
let status = if result.is_some() {
"Found"
} else {
"Didn't find"
};
debug!(
"{} font table[{}] with family={}, face={}",
status,
tag.tag_to_str(),
self.handle
.family_name()
.unwrap_or("unavailable".to_owned()),
self.handle.face_name().unwrap_or("unavailable".to_owned())
);
result
}
#[inline]
pub fn glyph_index(&self, codepoint: char) -> Option<GlyphId> {
let codepoint = match self.descriptor.variant {
font_variant_caps::T::SmallCaps => codepoint.to_uppercase().next().unwrap(), //FIXME: #5938
font_variant_caps::T::Normal => codepoint,
};
self.handle.glyph_index(codepoint)
}
pub fn has_glyph_for(&self, codepoint: char) -> bool {
self.glyph_index(codepoint).is_some()
}
pub fn glyph_h_kerning(&self, first_glyph: GlyphId, second_glyph: GlyphId) -> FractionalPixel {
self.handle.glyph_h_kerning(first_glyph, second_glyph)
}
pub fn glyph_h_advance(&self, glyph: GlyphId) -> FractionalPixel {
*self
.glyph_advance_cache
.borrow_mut()
.entry(glyph)
.or_insert_with(|| {
match self.handle.glyph_h_advance(glyph) {
Some(adv) => adv,
None => 10f64 as FractionalPixel, // FIXME: Need fallback strategy
}
})
}
}
pub type FontRef = Rc<RefCell<Font>>;
/// A `FontGroup` is a prioritised list of fonts for a given set of font styles. It is used by
/// `TextRun` to decide which font to render a character with. If none of the fonts listed in the
/// styles are suitable, a fallback font may be used.
#[derive(Debug)]
pub struct FontGroup {
descriptor: FontDescriptor,
families: SmallVec<[FontGroupFamily; 8]>,
last_matching_fallback: Option<FontRef>,
}
impl FontGroup {
pub fn new(style: &FontStyleStruct) -> FontGroup {
let descriptor = FontDescriptor::from(style);
let families = style
.font_family
.0
.iter()
.map(|family| FontGroupFamily::new(descriptor.clone(), &family))
.collect();
FontGroup {
descriptor,
families,
last_matching_fallback: None,
}
}
/// Finds the first font, or else the first fallback font, which contains a glyph for
/// `codepoint`. If no such font is found, returns the first available font or fallback font
/// (which will cause a "glyph not found" character to be rendered). If no font at all can be
/// found, returns None.
pub fn find_by_codepoint<S: FontSource>(
&mut self,
mut font_context: &mut FontContext<S>,
codepoint: char,
) -> Option<FontRef> {
let has_glyph = |font: &FontRef| font.borrow().has_glyph_for(codepoint);
let font = self.find(&mut font_context, |font| has_glyph(font));
if font.is_some() {
return font;
}
if let Some(ref fallback) = self.last_matching_fallback {
if has_glyph(&fallback) |
}
let font = self.find_fallback(&mut font_context, Some(codepoint), has_glyph);
if font.is_some() {
self.last_matching_fallback = font.clone();
return font;
}
self.first(&mut font_context)
}
/// Find the first available font in the group, or the first available fallback font.
pub fn first<S: FontSource>(
&mut self,
mut font_context: &mut FontContext<S>,
) -> Option<FontRef> {
self.find(&mut font_context, |_| true)
.or_else(|| self.find_fallback(&mut font_context, None, |_| true))
}
/// Find a font which returns true for `predicate`. This method mutates because we may need to
/// load new font data in the process of finding a suitable font.
fn find<S, P>(&mut self, mut font_context: &mut FontContext<S>, predicate: P) -> Option<FontRef>
where
S: FontSource,
P: FnMut(&FontRef) -> bool,
{
self.families
.iter_mut()
.filter_map(|family| family.font(&mut font_context))
.find(predicate)
}
/// Attempts to find a suitable fallback font which matches the `predicate`. The default
/// family (i.e. "serif") will be tried first, followed by platform-specific family names.
/// If a `codepoint` is provided, then its Unicode block may be used to refine the list of
/// family names which will be tried.
fn find_fallback<S, P>(
&mut self,
font_context: &mut FontContext<S>,
codepoint: Option<char>,
predicate: P,
) -> Option<FontRef>
where
S: FontSource,
P: FnMut(&FontRef) -> bool,
{
iter::once(FontFamilyDescriptor::default())
.chain(fallback_font_families(codepoint).into_iter().map(|family| {
FontFamilyDescriptor::new(FontFamilyName::from(family), FontSearchScope::Local)
})).filter_map(|family| font_context.font(&self.descriptor, &family))
.find(predicate)
}
}
/// A `FontGroupFamily` is a single font family in a `FontGroup`. It corresponds to one of the
/// families listed in the `font-family` CSS property. The corresponding font data is lazy-loaded,
/// only if actually needed.
#[derive(Debug)]
struct FontGroupFamily {
font_descriptor: FontDescriptor,
family_descriptor: FontFamilyDescriptor,
loaded: bool,
font: Option<FontRef>,
}
impl FontGroupFamily {
fn new(font_descriptor: FontDescriptor, family: &SingleFontFamily) -> FontGroupFamily {
let family_descriptor =
FontFamilyDescriptor::new(FontFamilyName::from(family), FontSearchScope::Any);
FontGroupFamily {
font_descriptor,
family_descriptor,
loaded: false,
font: None,
}
}
/// Returns the font within this family which matches the style. We'll fetch the data from the
/// `FontContext` the first time this method is called, and return a cached reference on
/// subsequent calls.
fn font<S: FontSource>(&mut self, font_context: &mut FontContext<S>) -> Option<FontRef> {
if!self.loaded {
self.font = font_context.font(&self.font_descriptor, &self.family_descriptor);
self.loaded = true;
}
self.font.clone()
}
}
pub struct RunMetrics {
// may be negative due to negative width (i.e., kerning of '.' in 'P.T.')
pub advance_width: Au,
pub ascent: Au, // nonzero
pub descent: Au, // nonzero
// this bounding box is relative to the left origin baseline.
// so, bounding_box.position.y = -ascent
pub bounding_box: Rect<Au>,
}
impl RunMetrics {
pub fn new(advance: Au, ascent: Au, descent: Au) -> RunMetrics {
let bounds = Rect::new(
Point2D::new(Au(0), -ascent),
Size2D::new(advance, ascent + descent),
);
// TODO(Issue #125): support loose and tight bounding boxes; using the
// ascent+descent and advance is sometimes too generous and
// looking at actual glyph extents can yield a tighter box.
RunMetrics {
advance_width: advance,
bounding_box: bounds,
ascent: ascent,
descent: descent,
}
}
}
pub fn get_and_reset_text_shaping_performance_counter() -> usize {
let value = TEXT_SHAPING_PERFORMANCE_COUNTER.load(Ordering::SeqCst);
TEXT_SHAPING_PERFORMANCE_COUNTER.store(0, Ordering::SeqCst);
value
}
/// The scope within which we will look for a font.
#[derive(Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
pub enum FontSearchScope {
/// All fonts will be searched, including those specified via `@font-face` rules.
Any,
/// Only local system fonts will be searched.
Local,
}
/// A font family name used in font selection.
#[derive(Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
pub enum FontFamilyName {
/// A specific name such as `"Arial"`
Specific(Atom),
/// A generic name such as `sans-serif`
Generic(Atom),
}
impl FontFamilyName {
pub fn name(&self) -> &str {
match *self {
FontFamilyName::Specific(ref name) => name,
FontFamilyName::Generic(ref name) => name,
}
}
}
impl<'a> From<&'a SingleFontFamily> for FontFamilyName {
fn from(other: &'a SingleFontFamily) -> FontFamilyName {
match *other {
SingleFontFamily::FamilyName(ref family_name) => {
FontFamilyName::Specific(family_name.name.clone())
},
SingleFontFamily::Generic(ref generic_name) => {
FontFamilyName::Generic(generic_name.clone())
},
}
}
}
impl<'a> From<&'a str> for FontFamilyName {
fn from(other: &'a str) -> FontFamilyName {
FontFamilyName::Specific(Atom::from(other))
}
}
/// The font family parameters for font selection.
#[derive(Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
pub struct FontFamilyDescriptor {
pub name: FontFamilyName,
pub scope: FontSearchScope,
}
impl FontFamilyDescriptor {
pub fn new(name: FontFamilyName, scope: FontSearchScope) -> FontFamilyDescriptor { | {
return self.last_matching_fallback.clone();
} | conditional_block |
utils.rs | use num::bigint::{ToBigUint, BigUint};
use num::traits::{One, ToPrimitive};
use std::num::ParseIntError;
use std::mem;
pub trait Max {
fn max(width: u32) -> Self;
}
impl Max for BigUint {
fn max(width: u32) -> BigUint {
(BigUint::one() << width as usize) - BigUint::one()
}
}
pub trait ParseNum {
fn parse_num(num: &str) -> Result<Self, ParseIntError> where Self: Sized;
}
impl ParseNum for u64 {
fn parse_num(num: &str) -> Result<Self, ParseIntError> {
if num.len() > 2 && &num[0..2] == "0x" {
u64::from_str_radix(&num[2..], 16)
} else {
u64::from_str_radix(num, 10)
}
}
}
trait AsFixed<T> {
fn as_fixed(&self) -> T;
}
impl AsFixed<u64> for BigUint {
#[inline]
fn as_fixed(&self) -> u64 {
(self & 0xffFFffFF_ffFFffFFu64.to_biguint().unwrap())
.to_u64()
.unwrap()
}
}
impl AsFixed<f64> for BigUint {
#[inline]
fn as_fixed(&self) -> f64 {
let imm: u64 = self.as_fixed();
unsafe { mem::transmute(imm) }
}
}
impl AsFixed<f32> for BigUint {
#[inline]
fn as_fixed(&self) -> f32 {
let imm: u64 = self.as_fixed();
unsafe { mem::transmute(imm as u32) }
}
}
/// Implement this trait for Floats to get the raw representation
pub trait AsRaw<T> {
/// Returns the raw representation of the floating point number
fn as_raw(&self) -> T;
}
impl AsRaw<u32> for f32 {
fn as_raw(&self) -> u32 {
unsafe { mem::transmute(*self) }
}
}
impl AsRaw<u64> for f64 {
fn as_raw(&self) -> u64 {
unsafe { mem::transmute(*self) }
}
}
use std::collections::VecDeque;
use std::collections::vec_deque;
// Objects
pub struct LastCache<T> {
stack: VecDeque<T>,
max: usize
}
impl<T> LastCache<T> {
pub fn new(max: usize) -> LastCache<T> {
LastCache {
stack: VecDeque::with_capacity(max),
max: max
}
}
pub fn push(&mut self, item: T) {
if self.stack.len() >= self.max {
self.stack.pop_back();
self.stack.remove(self.max - 1);
}
self.stack.push_front(item);
}
pub fn iter<'a>(&'a mut self) -> vec_deque::Iter<'a, T> {
self.stack.iter()
}
pub fn last<'a>(&'a self) -> &T {
self.stack.front().unwrap()
}
}
impl<'a, T> IntoIterator for &'a LastCache<T> {
type Item = &'a T;
type IntoIter = vec_deque::Iter<'a, T>;
fn into_iter(self) -> vec_deque::Iter<'a, T> {
self.stack.iter()
}
}
// Arithmetic/binary utils
/// Hamming weight
pub trait Hamming {
fn hamming_weight(&self) -> u32;
}
impl Hamming for u64 {
fn hamming_weight(&self) -> u32 {
// Optimized by rust's intrinsics
self.count_ones()
}
}
impl Hamming for BigUint {
fn hamming_weight(&self) -> u32 {
let bits = self.bits();
if bits <= 64 {
let in64 = self.to_u64().unwrap();
in64.hamming_weight()
} else {
let mut res = 0;
for i in 0..bits {
res += ((self >> i) & BigUint::one()).to_u32().unwrap();
}
res
}
}
}
pub fn flip_bits(num: &BigUint, width: u32) -> BigUint {
let bits = num.bits();
((BigUint::one() << width as usize) - BigUint::one()) ^ num
}
// CloneWeightedChoice
use rand::Rng;
use rand::distributions::{Sample, Weighted, Range, IndependentSample};
pub struct CloneWeightedChoice<T> {
items: Vec<Weighted<T>>,
weight_range: Range<u32>
}
impl<T: Clone> CloneWeightedChoice<T> {
/// Create a new `WeightedChoice`.
///
/// Panics if:
/// - `v` is empty
/// - the total weight is 0
/// - the total weight is larger than a `u32` can contain.
pub fn new(items: &[Weighted<T>]) -> CloneWeightedChoice<T> {
// strictly speaking, this is subsumed by the total weight == 0 case
assert!(!items.is_empty(), "WeightedChoice::new called with no items");
let mut items = items.to_vec();
let mut running_total: u32 = 0;
// we convert the list from individual weights to cumulative
// weights so we can binary search. This *could* drop elements
// with weight == 0 as an optimisation.
for item in items.iter_mut() {
running_total = match running_total.checked_add(item.weight) {
Some(n) => n,
None => panic!("WeightedChoice::new called with a total weight \
larger than a u32 can contain")
};
item.weight = running_total;
}
assert!(running_total!= 0, "WeightedChoice::new called with a total weight of 0");
CloneWeightedChoice {
items: items,
// we're likely to be generating numbers in this range
// relatively often, so might as well cache it
weight_range: Range::new(0, running_total)
}
}
}
impl<T: Clone> Sample<T> for CloneWeightedChoice<T> {
fn sample<R: Rng>(&mut self, rng: &mut R) -> T { self.ind_sample(rng) }
}
impl<T: Clone> IndependentSample<T> for CloneWeightedChoice<T> {
fn ind_sample<R: Rng>(&self, rng: &mut R) -> T {
// we want to find the first element that has cumulative
// weight > sample_weight, which we do by binary since the
// cumulative weights of self.items are sorted.
// choose a weight in [0, total_weight)
let sample_weight = self.weight_range.ind_sample(rng);
// short circuit when it's the first item
if sample_weight < self.items[0].weight {
return self.items[0].item.clone();
}
let mut idx = 0;
let mut modifier = self.items.len();
// now we know that every possibility has an element to the
// left, so we can just search for the last element that has
// cumulative weight <= sample_weight, then the next one will
// be "it". (Note that this greatest element will never be the
// last element of the vector, since sample_weight is chosen
// in [0, total_weight) and the cumulative weight of the last
// one is exactly the total weight.)
while modifier > 1 {
let i = idx + modifier / 2;
if self.items[i].weight <= sample_weight {
// we're small, so look to the right, but allow this
// exact element still.
idx = i;
// we need the `/ 2` to round up otherwise we'll drop
// the trailing elements when `modifier` is odd.
modifier += 1;
} else {
// otherwise we're too big, so go left. (i.e. do
// nothing)
}
modifier /= 2;
}
return self.items[idx + 1].item.clone();
}
}
// Macro utils
macro_rules! for_one {
($x:ident) => (1)
}
macro_rules! enum_and_list{
($e_name:ident, $c_name:ident, $($m:ident),+) => {
#[derive(Copy,Clone,Debug,PartialEq,Eq,Hash,PartialOrd,Ord)]
pub enum $e_name {
$(
$m,
)+
}
pub const $c_name: [$e_name; 0 $( + for_one!($m) )+ ] =
[ $($e_name::$m,)+ ];
impl Rand for $e_name {
fn rand<R: Rng>(rng: &mut R) -> Self {
let val = rng.gen_range(0, $c_name.len());
$c_name[val]
}
}
}
}
macro_rules! debugln{
($($arg:tt)*) => (
{
use std::io::Write;
match writeln!(&mut ::std::io::stderr(), $($arg)* ) {
Ok(_) => {},
Err(x) => panic!("Unable to write to stderr: {}", x),
}
}
)
}
macro_rules! debug{
($($arg:tt)*) => (
{
use std::io::Write;
match write!(&mut ::std::io::stderr(), $($arg)* ) {
Ok(_) => {},
Err(x) => panic!("Unable to write to stderr: {}", x),
}
}
)
} | #[cfg(test)]
mod test_util {
use num::bigint::{BigUint, ToBigUint};
use num::traits::{One, Zero};
use utils::AsRaw;
use utils::Hamming;
use utils::LastCache;
#[test]
fn test_hamming() {
let full_128_bu = {
let mut res = BigUint::zero();
for i in 0..128 {
res = res | BigUint::one() << i;
}
res
};
assert_eq!(full_128_bu.hamming_weight(), 128);
let full_64_u64 = 0xffFF_ffFF_ffFF_ffFFu64;
assert_eq!(full_64_u64.hamming_weight(), 64);
let full_64_bu = 0xffFF_ffFF_ffFF_ffFFu64.to_biguint().unwrap();
assert_eq!(full_64_bu.hamming_weight(), 64);
}
#[test]
fn test_biguint() {
assert_eq!(0xaabbccddu32.to_biguint().unwrap().to_string(),
"2864434397");
}
#[test]
fn test_lastcache() {
let mut lc = LastCache::new(10);
for i in 0.. 10 {
lc.push(1);
}
assert_eq!(lc.iter().all(|i| *i == 1), true);
lc.push(2);
lc.push(2);
let mut i = 0;
for item in &lc {
if i == 0 || i == 1 {
assert_eq!(*item, 2);
} else {
assert_eq!(*item, 1);
}
i += 1;
}
}
#[test]
fn test_as_raw_f32() {
use std::f32::INFINITY;
assert_eq!(2.8411367E-29f32.as_raw(), 0x10101010);
assert_eq!((-4.99999998E11f32).as_raw(), 0xd2e8d4a5);
assert_eq!(100.0f32.as_raw(), 0x42c80000);
assert_eq!(0.0f32.as_raw(), 0);
assert_eq!((-0.0f32).as_raw(), 0x80000000);
assert_eq!(INFINITY.as_raw(), 0x7f800000);
}
#[test]
fn test_as_raw_f64() {
use std::f64::INFINITY;
assert_eq!(2.8411367E-29f64.as_raw(), 0x3A0202020351C16B);
assert_eq!((-4.99999998E11f64).as_raw(), 0xC25D1A94A00C0000);
assert_eq!(100.0f64.as_raw(), 0x4059000000000000);
assert_eq!(0.0f64.as_raw(), 0);
assert_eq!((-0.0f64).as_raw(), 0x8000000000000000);
assert_eq!(INFINITY.as_raw(), 0x7ff0000000000000);
}
} | random_line_split |
|
utils.rs | use num::bigint::{ToBigUint, BigUint};
use num::traits::{One, ToPrimitive};
use std::num::ParseIntError;
use std::mem;
pub trait Max {
fn max(width: u32) -> Self;
}
impl Max for BigUint {
fn max(width: u32) -> BigUint {
(BigUint::one() << width as usize) - BigUint::one()
}
}
pub trait ParseNum {
fn parse_num(num: &str) -> Result<Self, ParseIntError> where Self: Sized;
}
impl ParseNum for u64 {
fn parse_num(num: &str) -> Result<Self, ParseIntError> {
if num.len() > 2 && &num[0..2] == "0x" {
u64::from_str_radix(&num[2..], 16)
} else {
u64::from_str_radix(num, 10)
}
}
}
trait AsFixed<T> {
fn as_fixed(&self) -> T;
}
impl AsFixed<u64> for BigUint {
#[inline]
fn as_fixed(&self) -> u64 {
(self & 0xffFFffFF_ffFFffFFu64.to_biguint().unwrap())
.to_u64()
.unwrap()
}
}
impl AsFixed<f64> for BigUint {
#[inline]
fn as_fixed(&self) -> f64 {
let imm: u64 = self.as_fixed();
unsafe { mem::transmute(imm) }
}
}
impl AsFixed<f32> for BigUint {
#[inline]
fn as_fixed(&self) -> f32 {
let imm: u64 = self.as_fixed();
unsafe { mem::transmute(imm as u32) }
}
}
/// Implement this trait for Floats to get the raw representation
pub trait AsRaw<T> {
/// Returns the raw representation of the floating point number
fn as_raw(&self) -> T;
}
impl AsRaw<u32> for f32 {
fn as_raw(&self) -> u32 {
unsafe { mem::transmute(*self) }
}
}
impl AsRaw<u64> for f64 {
fn as_raw(&self) -> u64 {
unsafe { mem::transmute(*self) }
}
}
use std::collections::VecDeque;
use std::collections::vec_deque;
// Objects
pub struct LastCache<T> {
stack: VecDeque<T>,
max: usize
}
impl<T> LastCache<T> {
pub fn new(max: usize) -> LastCache<T> {
LastCache {
stack: VecDeque::with_capacity(max),
max: max
}
}
pub fn push(&mut self, item: T) {
if self.stack.len() >= self.max {
self.stack.pop_back();
self.stack.remove(self.max - 1);
}
self.stack.push_front(item);
}
pub fn iter<'a>(&'a mut self) -> vec_deque::Iter<'a, T> {
self.stack.iter()
}
pub fn last<'a>(&'a self) -> &T {
self.stack.front().unwrap()
}
}
impl<'a, T> IntoIterator for &'a LastCache<T> {
type Item = &'a T;
type IntoIter = vec_deque::Iter<'a, T>;
fn into_iter(self) -> vec_deque::Iter<'a, T> {
self.stack.iter()
}
}
// Arithmetic/binary utils
/// Hamming weight
pub trait Hamming {
fn hamming_weight(&self) -> u32;
}
impl Hamming for u64 {
fn hamming_weight(&self) -> u32 {
// Optimized by rust's intrinsics
self.count_ones()
}
}
impl Hamming for BigUint {
fn hamming_weight(&self) -> u32 {
let bits = self.bits();
if bits <= 64 {
let in64 = self.to_u64().unwrap();
in64.hamming_weight()
} else {
let mut res = 0;
for i in 0..bits {
res += ((self >> i) & BigUint::one()).to_u32().unwrap();
}
res
}
}
}
pub fn flip_bits(num: &BigUint, width: u32) -> BigUint {
let bits = num.bits();
((BigUint::one() << width as usize) - BigUint::one()) ^ num
}
// CloneWeightedChoice
use rand::Rng;
use rand::distributions::{Sample, Weighted, Range, IndependentSample};
pub struct CloneWeightedChoice<T> {
items: Vec<Weighted<T>>,
weight_range: Range<u32>
}
impl<T: Clone> CloneWeightedChoice<T> {
/// Create a new `WeightedChoice`.
///
/// Panics if:
/// - `v` is empty
/// - the total weight is 0
/// - the total weight is larger than a `u32` can contain.
pub fn new(items: &[Weighted<T>]) -> CloneWeightedChoice<T> {
// strictly speaking, this is subsumed by the total weight == 0 case
assert!(!items.is_empty(), "WeightedChoice::new called with no items");
let mut items = items.to_vec();
let mut running_total: u32 = 0;
// we convert the list from individual weights to cumulative
// weights so we can binary search. This *could* drop elements
// with weight == 0 as an optimisation.
for item in items.iter_mut() {
running_total = match running_total.checked_add(item.weight) {
Some(n) => n,
None => panic!("WeightedChoice::new called with a total weight \
larger than a u32 can contain")
};
item.weight = running_total;
}
assert!(running_total!= 0, "WeightedChoice::new called with a total weight of 0");
CloneWeightedChoice {
items: items,
// we're likely to be generating numbers in this range
// relatively often, so might as well cache it
weight_range: Range::new(0, running_total)
}
}
}
impl<T: Clone> Sample<T> for CloneWeightedChoice<T> {
fn sample<R: Rng>(&mut self, rng: &mut R) -> T { self.ind_sample(rng) }
}
impl<T: Clone> IndependentSample<T> for CloneWeightedChoice<T> {
fn ind_sample<R: Rng>(&self, rng: &mut R) -> T {
// we want to find the first element that has cumulative
// weight > sample_weight, which we do by binary since the
// cumulative weights of self.items are sorted.
// choose a weight in [0, total_weight)
let sample_weight = self.weight_range.ind_sample(rng);
// short circuit when it's the first item
if sample_weight < self.items[0].weight {
return self.items[0].item.clone();
}
let mut idx = 0;
let mut modifier = self.items.len();
// now we know that every possibility has an element to the
// left, so we can just search for the last element that has
// cumulative weight <= sample_weight, then the next one will
// be "it". (Note that this greatest element will never be the
// last element of the vector, since sample_weight is chosen
// in [0, total_weight) and the cumulative weight of the last
// one is exactly the total weight.)
while modifier > 1 {
let i = idx + modifier / 2;
if self.items[i].weight <= sample_weight {
// we're small, so look to the right, but allow this
// exact element still.
idx = i;
// we need the `/ 2` to round up otherwise we'll drop
// the trailing elements when `modifier` is odd.
modifier += 1;
} else {
// otherwise we're too big, so go left. (i.e. do
// nothing)
}
modifier /= 2;
}
return self.items[idx + 1].item.clone();
}
}
// Macro utils
macro_rules! for_one {
($x:ident) => (1)
}
macro_rules! enum_and_list{
($e_name:ident, $c_name:ident, $($m:ident),+) => {
#[derive(Copy,Clone,Debug,PartialEq,Eq,Hash,PartialOrd,Ord)]
pub enum $e_name {
$(
$m,
)+
}
pub const $c_name: [$e_name; 0 $( + for_one!($m) )+ ] =
[ $($e_name::$m,)+ ];
impl Rand for $e_name {
fn rand<R: Rng>(rng: &mut R) -> Self {
let val = rng.gen_range(0, $c_name.len());
$c_name[val]
}
}
}
}
macro_rules! debugln{
($($arg:tt)*) => (
{
use std::io::Write;
match writeln!(&mut ::std::io::stderr(), $($arg)* ) {
Ok(_) => {},
Err(x) => panic!("Unable to write to stderr: {}", x),
}
}
)
}
macro_rules! debug{
($($arg:tt)*) => (
{
use std::io::Write;
match write!(&mut ::std::io::stderr(), $($arg)* ) {
Ok(_) => {},
Err(x) => panic!("Unable to write to stderr: {}", x),
}
}
)
}
#[cfg(test)]
mod test_util {
use num::bigint::{BigUint, ToBigUint};
use num::traits::{One, Zero};
use utils::AsRaw;
use utils::Hamming;
use utils::LastCache;
#[test]
fn test_hamming() {
let full_128_bu = {
let mut res = BigUint::zero();
for i in 0..128 {
res = res | BigUint::one() << i;
}
res
};
assert_eq!(full_128_bu.hamming_weight(), 128);
let full_64_u64 = 0xffFF_ffFF_ffFF_ffFFu64;
assert_eq!(full_64_u64.hamming_weight(), 64);
let full_64_bu = 0xffFF_ffFF_ffFF_ffFFu64.to_biguint().unwrap();
assert_eq!(full_64_bu.hamming_weight(), 64);
}
#[test]
fn test_biguint() {
assert_eq!(0xaabbccddu32.to_biguint().unwrap().to_string(),
"2864434397");
}
#[test]
fn test_lastcache() {
let mut lc = LastCache::new(10);
for i in 0.. 10 {
lc.push(1);
}
assert_eq!(lc.iter().all(|i| *i == 1), true);
lc.push(2);
lc.push(2);
let mut i = 0;
for item in &lc {
if i == 0 || i == 1 | else {
assert_eq!(*item, 1);
}
i += 1;
}
}
#[test]
fn test_as_raw_f32() {
use std::f32::INFINITY;
assert_eq!(2.8411367E-29f32.as_raw(), 0x10101010);
assert_eq!((-4.99999998E11f32).as_raw(), 0xd2e8d4a5);
assert_eq!(100.0f32.as_raw(), 0x42c80000);
assert_eq!(0.0f32.as_raw(), 0);
assert_eq!((-0.0f32).as_raw(), 0x80000000);
assert_eq!(INFINITY.as_raw(), 0x7f800000);
}
#[test]
fn test_as_raw_f64() {
use std::f64::INFINITY;
assert_eq!(2.8411367E-29f64.as_raw(), 0x3A0202020351C16B);
assert_eq!((-4.99999998E11f64).as_raw(), 0xC25D1A94A00C0000);
assert_eq!(100.0f64.as_raw(), 0x4059000000000000);
assert_eq!(0.0f64.as_raw(), 0);
assert_eq!((-0.0f64).as_raw(), 0x8000000000000000);
assert_eq!(INFINITY.as_raw(), 0x7ff0000000000000);
}
}
| {
assert_eq!(*item, 2);
} | conditional_block |
utils.rs | use num::bigint::{ToBigUint, BigUint};
use num::traits::{One, ToPrimitive};
use std::num::ParseIntError;
use std::mem;
pub trait Max {
fn max(width: u32) -> Self;
}
impl Max for BigUint {
fn max(width: u32) -> BigUint {
(BigUint::one() << width as usize) - BigUint::one()
}
}
pub trait ParseNum {
fn parse_num(num: &str) -> Result<Self, ParseIntError> where Self: Sized;
}
impl ParseNum for u64 {
fn parse_num(num: &str) -> Result<Self, ParseIntError> {
if num.len() > 2 && &num[0..2] == "0x" {
u64::from_str_radix(&num[2..], 16)
} else {
u64::from_str_radix(num, 10)
}
}
}
trait AsFixed<T> {
fn as_fixed(&self) -> T;
}
impl AsFixed<u64> for BigUint {
#[inline]
fn as_fixed(&self) -> u64 {
(self & 0xffFFffFF_ffFFffFFu64.to_biguint().unwrap())
.to_u64()
.unwrap()
}
}
impl AsFixed<f64> for BigUint {
#[inline]
fn as_fixed(&self) -> f64 {
let imm: u64 = self.as_fixed();
unsafe { mem::transmute(imm) }
}
}
impl AsFixed<f32> for BigUint {
#[inline]
fn as_fixed(&self) -> f32 {
let imm: u64 = self.as_fixed();
unsafe { mem::transmute(imm as u32) }
}
}
/// Implement this trait for Floats to get the raw representation
pub trait AsRaw<T> {
/// Returns the raw representation of the floating point number
fn as_raw(&self) -> T;
}
impl AsRaw<u32> for f32 {
fn | (&self) -> u32 {
unsafe { mem::transmute(*self) }
}
}
impl AsRaw<u64> for f64 {
fn as_raw(&self) -> u64 {
unsafe { mem::transmute(*self) }
}
}
use std::collections::VecDeque;
use std::collections::vec_deque;
// Objects
pub struct LastCache<T> {
stack: VecDeque<T>,
max: usize
}
impl<T> LastCache<T> {
pub fn new(max: usize) -> LastCache<T> {
LastCache {
stack: VecDeque::with_capacity(max),
max: max
}
}
pub fn push(&mut self, item: T) {
if self.stack.len() >= self.max {
self.stack.pop_back();
self.stack.remove(self.max - 1);
}
self.stack.push_front(item);
}
pub fn iter<'a>(&'a mut self) -> vec_deque::Iter<'a, T> {
self.stack.iter()
}
pub fn last<'a>(&'a self) -> &T {
self.stack.front().unwrap()
}
}
impl<'a, T> IntoIterator for &'a LastCache<T> {
type Item = &'a T;
type IntoIter = vec_deque::Iter<'a, T>;
fn into_iter(self) -> vec_deque::Iter<'a, T> {
self.stack.iter()
}
}
// Arithmetic/binary utils
/// Hamming weight
pub trait Hamming {
fn hamming_weight(&self) -> u32;
}
impl Hamming for u64 {
fn hamming_weight(&self) -> u32 {
// Optimized by rust's intrinsics
self.count_ones()
}
}
impl Hamming for BigUint {
fn hamming_weight(&self) -> u32 {
let bits = self.bits();
if bits <= 64 {
let in64 = self.to_u64().unwrap();
in64.hamming_weight()
} else {
let mut res = 0;
for i in 0..bits {
res += ((self >> i) & BigUint::one()).to_u32().unwrap();
}
res
}
}
}
pub fn flip_bits(num: &BigUint, width: u32) -> BigUint {
let bits = num.bits();
((BigUint::one() << width as usize) - BigUint::one()) ^ num
}
// CloneWeightedChoice
use rand::Rng;
use rand::distributions::{Sample, Weighted, Range, IndependentSample};
pub struct CloneWeightedChoice<T> {
items: Vec<Weighted<T>>,
weight_range: Range<u32>
}
impl<T: Clone> CloneWeightedChoice<T> {
/// Create a new `WeightedChoice`.
///
/// Panics if:
/// - `v` is empty
/// - the total weight is 0
/// - the total weight is larger than a `u32` can contain.
pub fn new(items: &[Weighted<T>]) -> CloneWeightedChoice<T> {
// strictly speaking, this is subsumed by the total weight == 0 case
assert!(!items.is_empty(), "WeightedChoice::new called with no items");
let mut items = items.to_vec();
let mut running_total: u32 = 0;
// we convert the list from individual weights to cumulative
// weights so we can binary search. This *could* drop elements
// with weight == 0 as an optimisation.
for item in items.iter_mut() {
running_total = match running_total.checked_add(item.weight) {
Some(n) => n,
None => panic!("WeightedChoice::new called with a total weight \
larger than a u32 can contain")
};
item.weight = running_total;
}
assert!(running_total!= 0, "WeightedChoice::new called with a total weight of 0");
CloneWeightedChoice {
items: items,
// we're likely to be generating numbers in this range
// relatively often, so might as well cache it
weight_range: Range::new(0, running_total)
}
}
}
impl<T: Clone> Sample<T> for CloneWeightedChoice<T> {
fn sample<R: Rng>(&mut self, rng: &mut R) -> T { self.ind_sample(rng) }
}
impl<T: Clone> IndependentSample<T> for CloneWeightedChoice<T> {
fn ind_sample<R: Rng>(&self, rng: &mut R) -> T {
// we want to find the first element that has cumulative
// weight > sample_weight, which we do by binary since the
// cumulative weights of self.items are sorted.
// choose a weight in [0, total_weight)
let sample_weight = self.weight_range.ind_sample(rng);
// short circuit when it's the first item
if sample_weight < self.items[0].weight {
return self.items[0].item.clone();
}
let mut idx = 0;
let mut modifier = self.items.len();
// now we know that every possibility has an element to the
// left, so we can just search for the last element that has
// cumulative weight <= sample_weight, then the next one will
// be "it". (Note that this greatest element will never be the
// last element of the vector, since sample_weight is chosen
// in [0, total_weight) and the cumulative weight of the last
// one is exactly the total weight.)
while modifier > 1 {
let i = idx + modifier / 2;
if self.items[i].weight <= sample_weight {
// we're small, so look to the right, but allow this
// exact element still.
idx = i;
// we need the `/ 2` to round up otherwise we'll drop
// the trailing elements when `modifier` is odd.
modifier += 1;
} else {
// otherwise we're too big, so go left. (i.e. do
// nothing)
}
modifier /= 2;
}
return self.items[idx + 1].item.clone();
}
}
// Macro utils
macro_rules! for_one {
($x:ident) => (1)
}
macro_rules! enum_and_list{
($e_name:ident, $c_name:ident, $($m:ident),+) => {
#[derive(Copy,Clone,Debug,PartialEq,Eq,Hash,PartialOrd,Ord)]
pub enum $e_name {
$(
$m,
)+
}
pub const $c_name: [$e_name; 0 $( + for_one!($m) )+ ] =
[ $($e_name::$m,)+ ];
impl Rand for $e_name {
fn rand<R: Rng>(rng: &mut R) -> Self {
let val = rng.gen_range(0, $c_name.len());
$c_name[val]
}
}
}
}
macro_rules! debugln{
($($arg:tt)*) => (
{
use std::io::Write;
match writeln!(&mut ::std::io::stderr(), $($arg)* ) {
Ok(_) => {},
Err(x) => panic!("Unable to write to stderr: {}", x),
}
}
)
}
macro_rules! debug{
($($arg:tt)*) => (
{
use std::io::Write;
match write!(&mut ::std::io::stderr(), $($arg)* ) {
Ok(_) => {},
Err(x) => panic!("Unable to write to stderr: {}", x),
}
}
)
}
#[cfg(test)]
mod test_util {
use num::bigint::{BigUint, ToBigUint};
use num::traits::{One, Zero};
use utils::AsRaw;
use utils::Hamming;
use utils::LastCache;
#[test]
fn test_hamming() {
let full_128_bu = {
let mut res = BigUint::zero();
for i in 0..128 {
res = res | BigUint::one() << i;
}
res
};
assert_eq!(full_128_bu.hamming_weight(), 128);
let full_64_u64 = 0xffFF_ffFF_ffFF_ffFFu64;
assert_eq!(full_64_u64.hamming_weight(), 64);
let full_64_bu = 0xffFF_ffFF_ffFF_ffFFu64.to_biguint().unwrap();
assert_eq!(full_64_bu.hamming_weight(), 64);
}
#[test]
fn test_biguint() {
assert_eq!(0xaabbccddu32.to_biguint().unwrap().to_string(),
"2864434397");
}
#[test]
fn test_lastcache() {
let mut lc = LastCache::new(10);
for i in 0.. 10 {
lc.push(1);
}
assert_eq!(lc.iter().all(|i| *i == 1), true);
lc.push(2);
lc.push(2);
let mut i = 0;
for item in &lc {
if i == 0 || i == 1 {
assert_eq!(*item, 2);
} else {
assert_eq!(*item, 1);
}
i += 1;
}
}
#[test]
fn test_as_raw_f32() {
use std::f32::INFINITY;
assert_eq!(2.8411367E-29f32.as_raw(), 0x10101010);
assert_eq!((-4.99999998E11f32).as_raw(), 0xd2e8d4a5);
assert_eq!(100.0f32.as_raw(), 0x42c80000);
assert_eq!(0.0f32.as_raw(), 0);
assert_eq!((-0.0f32).as_raw(), 0x80000000);
assert_eq!(INFINITY.as_raw(), 0x7f800000);
}
#[test]
fn test_as_raw_f64() {
use std::f64::INFINITY;
assert_eq!(2.8411367E-29f64.as_raw(), 0x3A0202020351C16B);
assert_eq!((-4.99999998E11f64).as_raw(), 0xC25D1A94A00C0000);
assert_eq!(100.0f64.as_raw(), 0x4059000000000000);
assert_eq!(0.0f64.as_raw(), 0);
assert_eq!((-0.0f64).as_raw(), 0x8000000000000000);
assert_eq!(INFINITY.as_raw(), 0x7ff0000000000000);
}
}
| as_raw | identifier_name |
panicking.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::any::Any;
use std::boxed::FnBox;
use std::cell::RefCell;
use std::panic::{PanicInfo, take_hook, set_hook};
use std::sync::{Once, ONCE_INIT};
use std::thread;
// only set the panic hook once
static HOOK_SET: Once = ONCE_INIT;
/// TLS data pertaining to how failures should be reported
pub struct PanicHandlerLocal {
/// failure handler passed through spawn_named_with_send_on_failure
pub fail: Box<FnBox(&Any)>
}
thread_local!(pub static LOCAL_INFO: RefCell<Option<PanicHandlerLocal>> = RefCell::new(None));
/// Set the thread-local panic hook
pub fn set_thread_local_hook(local: Box<FnBox(&Any)>) {
LOCAL_INFO.with(|i| *i.borrow_mut() = Some(PanicHandlerLocal { fail: local }));
}
/// Initiates the custom panic hook
/// Should be called in main() after arguments have been parsed
pub fn initiate_panic_hook() {
// Set the panic handler only once. It is global.
HOOK_SET.call_once(|| {
// The original backtrace-printing hook. We still want to call this
let hook = take_hook();
let new_hook = move |info: &PanicInfo| {
let payload = info.payload();
let name = thread::current().name().unwrap_or("<unknown thread>").to_string();
// Notify error handlers stored in LOCAL_INFO if any
LOCAL_INFO.with(|i| {
if let Some(local_info) = i.borrow_mut().take() {
debug!("Thread `{}` failed, notifying error handlers", name);
(local_info.fail).call_box((payload, ));
} else |
});
};
set_hook(Box::new(new_hook));
});
}
| {
hook(&info);
} | conditional_block |
panicking.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::any::Any;
use std::boxed::FnBox;
use std::cell::RefCell;
use std::panic::{PanicInfo, take_hook, set_hook};
use std::sync::{Once, ONCE_INIT};
use std::thread;
// only set the panic hook once
static HOOK_SET: Once = ONCE_INIT;
/// TLS data pertaining to how failures should be reported
pub struct PanicHandlerLocal {
/// failure handler passed through spawn_named_with_send_on_failure
pub fail: Box<FnBox(&Any)>
}
thread_local!(pub static LOCAL_INFO: RefCell<Option<PanicHandlerLocal>> = RefCell::new(None));
/// Set the thread-local panic hook
pub fn | (local: Box<FnBox(&Any)>) {
LOCAL_INFO.with(|i| *i.borrow_mut() = Some(PanicHandlerLocal { fail: local }));
}
/// Initiates the custom panic hook
/// Should be called in main() after arguments have been parsed
pub fn initiate_panic_hook() {
// Set the panic handler only once. It is global.
HOOK_SET.call_once(|| {
// The original backtrace-printing hook. We still want to call this
let hook = take_hook();
let new_hook = move |info: &PanicInfo| {
let payload = info.payload();
let name = thread::current().name().unwrap_or("<unknown thread>").to_string();
// Notify error handlers stored in LOCAL_INFO if any
LOCAL_INFO.with(|i| {
if let Some(local_info) = i.borrow_mut().take() {
debug!("Thread `{}` failed, notifying error handlers", name);
(local_info.fail).call_box((payload, ));
} else {
hook(&info);
}
});
};
set_hook(Box::new(new_hook));
});
}
| set_thread_local_hook | identifier_name |
panicking.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::any::Any;
use std::boxed::FnBox;
use std::cell::RefCell;
use std::panic::{PanicInfo, take_hook, set_hook};
use std::sync::{Once, ONCE_INIT};
use std::thread;
// only set the panic hook once
static HOOK_SET: Once = ONCE_INIT;
/// TLS data pertaining to how failures should be reported
pub struct PanicHandlerLocal {
/// failure handler passed through spawn_named_with_send_on_failure
pub fail: Box<FnBox(&Any)>
}
thread_local!(pub static LOCAL_INFO: RefCell<Option<PanicHandlerLocal>> = RefCell::new(None));
/// Set the thread-local panic hook
pub fn set_thread_local_hook(local: Box<FnBox(&Any)>) {
LOCAL_INFO.with(|i| *i.borrow_mut() = Some(PanicHandlerLocal { fail: local }));
}
/// Initiates the custom panic hook
/// Should be called in main() after arguments have been parsed | // The original backtrace-printing hook. We still want to call this
let hook = take_hook();
let new_hook = move |info: &PanicInfo| {
let payload = info.payload();
let name = thread::current().name().unwrap_or("<unknown thread>").to_string();
// Notify error handlers stored in LOCAL_INFO if any
LOCAL_INFO.with(|i| {
if let Some(local_info) = i.borrow_mut().take() {
debug!("Thread `{}` failed, notifying error handlers", name);
(local_info.fail).call_box((payload, ));
} else {
hook(&info);
}
});
};
set_hook(Box::new(new_hook));
});
} | pub fn initiate_panic_hook() {
// Set the panic handler only once. It is global.
HOOK_SET.call_once(|| { | random_line_split |
day09.rs | extern crate permutohedron;
extern crate pcre;
use std::fs::File;
use std::io::BufReader;
use std::io::BufRead;
use std::collections::{HashMap, HashSet};
use std::cmp::{min, max};
use permutohedron::Heap;
use pcre::Pcre;
fn | () {
let f = File::open("day9.in").unwrap();
let file = BufReader::new(&f);
let mut distances : HashMap<(String,String), i32> = HashMap::new();
let mut cities: HashSet<String> = HashSet::new();
let mut re = Pcre::compile(r"(\w+) to (\w+) = (\d+)").unwrap();
for line in file.lines() {
let line = line.unwrap();
let m = re.exec(&line).unwrap();
let c1 = m.group(1).to_string();
let c2 = m.group(2).to_string();
let d = m.group(3).parse().unwrap();
distances.insert((c1.clone(), c2.clone()), d);
distances.insert((c2.clone(), c1.clone()), d);
cities.insert(c1);
cities.insert(c2);
}
let mut min_dist = 1000000;
let mut max_dist = 0;
let mut cities_vec: Vec<String> = cities.into_iter().collect();
for p in Heap::new(&mut cities_vec) {
let mut dist = 0;
for (a,b) in p.iter().zip(p[1..p.len()].iter()) {
dist += distances[&(a.to_string(),b.to_string())];
}
min_dist = min(min_dist, dist);
max_dist = max(max_dist, dist);
}
println!("{}", min_dist);
println!("{}", max_dist);
}
| main | identifier_name |
day09.rs | extern crate permutohedron;
extern crate pcre;
use std::fs::File;
use std::io::BufReader;
use std::io::BufRead;
use std::collections::{HashMap, HashSet};
use std::cmp::{min, max};
use permutohedron::Heap;
use pcre::Pcre;
fn main() {
let f = File::open("day9.in").unwrap();
let file = BufReader::new(&f);
let mut distances : HashMap<(String,String), i32> = HashMap::new();
let mut cities: HashSet<String> = HashSet::new();
let mut re = Pcre::compile(r"(\w+) to (\w+) = (\d+)").unwrap();
for line in file.lines() {
let line = line.unwrap();
let m = re.exec(&line).unwrap();
let c1 = m.group(1).to_string();
let c2 = m.group(2).to_string();
let d = m.group(3).parse().unwrap();
distances.insert((c1.clone(), c2.clone()), d);
distances.insert((c2.clone(), c1.clone()), d);
cities.insert(c1);
cities.insert(c2);
} | let mut cities_vec: Vec<String> = cities.into_iter().collect();
for p in Heap::new(&mut cities_vec) {
let mut dist = 0;
for (a,b) in p.iter().zip(p[1..p.len()].iter()) {
dist += distances[&(a.to_string(),b.to_string())];
}
min_dist = min(min_dist, dist);
max_dist = max(max_dist, dist);
}
println!("{}", min_dist);
println!("{}", max_dist);
} |
let mut min_dist = 1000000;
let mut max_dist = 0;
| random_line_split |
day09.rs | extern crate permutohedron;
extern crate pcre;
use std::fs::File;
use std::io::BufReader;
use std::io::BufRead;
use std::collections::{HashMap, HashSet};
use std::cmp::{min, max};
use permutohedron::Heap;
use pcre::Pcre;
fn main() | }
let mut min_dist = 1000000;
let mut max_dist = 0;
let mut cities_vec: Vec<String> = cities.into_iter().collect();
for p in Heap::new(&mut cities_vec) {
let mut dist = 0;
for (a,b) in p.iter().zip(p[1..p.len()].iter()) {
dist += distances[&(a.to_string(),b.to_string())];
}
min_dist = min(min_dist, dist);
max_dist = max(max_dist, dist);
}
println!("{}", min_dist);
println!("{}", max_dist);
}
| {
let f = File::open("day9.in").unwrap();
let file = BufReader::new(&f);
let mut distances : HashMap<(String,String), i32> = HashMap::new();
let mut cities: HashSet<String> = HashSet::new();
let mut re = Pcre::compile(r"(\w+) to (\w+) = (\d+)").unwrap();
for line in file.lines() {
let line = line.unwrap();
let m = re.exec(&line).unwrap();
let c1 = m.group(1).to_string();
let c2 = m.group(2).to_string();
let d = m.group(3).parse().unwrap();
distances.insert((c1.clone(), c2.clone()), d);
distances.insert((c2.clone(), c1.clone()), d);
cities.insert(c1);
cities.insert(c2); | identifier_body |
mod.rs | resolve_ivar) {
Ok(resulting_type) if!type_is_ty_var(resulting_type) => resulting_type,
_ => {
inference_context.tcx.sess.span_fatal(span,
"the type of this value must be known in order \
to determine the base type");
}
};
match get(resolved_type).sty {
ty_enum(..) | ty_struct(..) | ty_unboxed_closure(..) => {
debug!("(getting base type) found base type");
Some(resolved_type)
}
_ if ty::type_is_trait(resolved_type) => {
debug!("(getting base type) found base type (trait)");
Some(resolved_type)
}
ty_nil | ty_bot | ty_bool | ty_char | ty_int(..) | ty_uint(..) | ty_float(..) |
ty_str(..) | ty_vec(..) | ty_bare_fn(..) | ty_closure(..) | ty_tup(..) |
ty_infer(..) | ty_param(..) | ty_err | ty_open(..) | ty_uniq(_) |
ty_ptr(_) | ty_rptr(_, _) => {
debug!("(getting base type) no base type; found {}",
get(original_type).sty);
None
}
ty_trait(..) => fail!("should have been caught")
}
}
// Returns the def ID of the base type, if there is one.
fn get_base_type_def_id(inference_context: &InferCtxt,
span: Span,
original_type: t)
-> Option<DefId> {
match get_base_type(inference_context, span, original_type) {
None => None,
Some(base_type) => {
match get(base_type).sty {
ty_enum(def_id, _) |
ty_struct(def_id, _) |
ty_unboxed_closure(def_id, _) => {
Some(def_id)
}
ty_ptr(ty::mt {ty,..}) |
ty_rptr(_, ty::mt {ty,..}) |
ty_uniq(ty) => {
match ty::get(ty).sty {
ty_trait(box ty::TyTrait { def_id,.. }) => {
Some(def_id)
}
_ => {
fail!("get_base_type() returned a type that wasn't an \
enum, struct, or trait");
}
}
}
ty_trait(box ty::TyTrait { def_id,.. }) => {
Some(def_id)
}
_ => {
fail!("get_base_type() returned a type that wasn't an \
enum, struct, or trait");
}
}
}
}
}
struct CoherenceChecker<'a, 'tcx: 'a> {
crate_context: &'a CrateCtxt<'a, 'tcx>,
inference_context: InferCtxt<'a, 'tcx>,
inherent_impls: RefCell<DefIdMap<Rc<RefCell<Vec<ast::DefId>>>>>,
}
struct CoherenceCheckVisitor<'a, 'tcx: 'a> {
cc: &'a CoherenceChecker<'a, 'tcx>
}
impl<'a, 'tcx, 'v> visit::Visitor<'v> for CoherenceCheckVisitor<'a, 'tcx> {
fn visit_item(&mut self, item: &Item) {
//debug!("(checking coherence) item '{}'", token::get_ident(item.ident));
match item.node {
ItemImpl(_, ref opt_trait, _, _) => {
match opt_trait.clone() {
Some(opt_trait) => {
self.cc.check_implementation(item, [opt_trait]);
}
None => self.cc.check_implementation(item, [])
}
}
_ => {
// Nothing to do.
}
};
visit::walk_item(self, item);
}
}
impl<'a, 'tcx> CoherenceChecker<'a, 'tcx> {
fn check(&self, krate: &Crate) {
// Check implementations and traits. This populates the tables
// containing the inherent methods and extension methods. It also
// builds up the trait inheritance table.
let mut visitor = CoherenceCheckVisitor { cc: self };
visit::walk_crate(&mut visitor, krate);
// Copy over the inherent impls we gathered up during the walk into
// the tcx.
let mut tcx_inherent_impls =
self.crate_context.tcx.inherent_impls.borrow_mut();
for (k, v) in self.inherent_impls.borrow().iter() {
tcx_inherent_impls.insert((*k).clone(),
Rc::new((*v.borrow()).clone()));
}
// Bring in external crates. It's fine for this to happen after the
// coherence checks, because we ensure by construction that no errors
// can happen at link time.
self.add_external_crates();
// Populate the table of destructors. It might seem a bit strange to
// do this here, but it's actually the most convenient place, since
// the coherence tables contain the trait -> type mappings.
self.populate_destructor_table();
}
fn check_implementation(&self,
item: &Item,
associated_traits: &[TraitRef]) {
let tcx = self.crate_context.tcx;
let impl_did = local_def(item.id);
let self_type = ty::lookup_item_type(tcx, impl_did);
// If there are no traits, then this implementation must have a
// base type.
let impl_items = self.create_impl_from_item(item);
for associated_trait in associated_traits.iter() {
let trait_ref = ty::node_id_to_trait_ref(
self.crate_context.tcx, associated_trait.ref_id);
debug!("(checking implementation) adding impl for trait '{}', item '{}'",
trait_ref.repr(self.crate_context.tcx),
token::get_ident(item.ident));
self.add_trait_impl(trait_ref.def_id, impl_did);
}
// Add the implementation to the mapping from implementation to base
// type def ID, if there is a base type for this implementation and
// the implementation does not have any associated traits.
match get_base_type_def_id(&self.inference_context,
item.span,
self_type.ty) {
None => {
// Nothing to do.
}
Some(base_type_def_id) => {
// FIXME: Gather up default methods?
if associated_traits.len() == 0 {
self.add_inherent_impl(base_type_def_id, impl_did);
}
}
}
tcx.impl_items.borrow_mut().insert(impl_did, impl_items);
}
// Creates default method IDs and performs type substitutions for an impl
// and trait pair. Then, for each provided method in the trait, inserts a
// `ProvidedMethodInfo` instance into the `provided_method_sources` map.
fn instantiate_default_methods(
&self,
impl_id: DefId,
trait_ref: &ty::TraitRef,
all_impl_items: &mut Vec<ImplOrTraitItemId>) {
let tcx = self.crate_context.tcx;
debug!("instantiate_default_methods(impl_id={}, trait_ref={})",
impl_id, trait_ref.repr(tcx));
let impl_poly_type = ty::lookup_item_type(tcx, impl_id);
let prov = ty::provided_trait_methods(tcx, trait_ref.def_id);
for trait_method in prov.iter() {
// Synthesize an ID.
let new_id = tcx.sess.next_node_id();
let new_did = local_def(new_id);
debug!("new_did={} trait_method={}", new_did, trait_method.repr(tcx));
// Create substitutions for the various trait parameters.
let new_method_ty =
Rc::new(subst_receiver_types_in_method_ty(
tcx,
impl_id,
&impl_poly_type,
trait_ref,
new_did,
&**trait_method,
Some(trait_method.def_id)));
debug!("new_method_ty={}", new_method_ty.repr(tcx));
all_impl_items.push(MethodTraitItemId(new_did));
// construct the polytype for the method based on the
// method_ty. it will have all the generics from the
// impl, plus its own.
let new_polytype = ty::Polytype {
generics: new_method_ty.generics.clone(),
ty: ty::mk_bare_fn(tcx, new_method_ty.fty.clone())
};
debug!("new_polytype={}", new_polytype.repr(tcx));
tcx.tcache.borrow_mut().insert(new_did, new_polytype);
tcx.impl_or_trait_items
.borrow_mut()
.insert(new_did, ty::MethodTraitItem(new_method_ty));
// Pair the new synthesized ID up with the
// ID of the method.
self.crate_context.tcx.provided_method_sources.borrow_mut()
.insert(new_did, trait_method.def_id);
}
}
fn add_inherent_impl(&self, base_def_id: DefId, impl_def_id: DefId) {
match self.inherent_impls.borrow().find(&base_def_id) {
Some(implementation_list) => {
implementation_list.borrow_mut().push(impl_def_id);
return;
}
None => {}
}
self.inherent_impls.borrow_mut().insert(
base_def_id,
Rc::new(RefCell::new(vec!(impl_def_id))));
}
fn add_trait_impl(&self, base_def_id: DefId, impl_def_id: DefId) |
fn get_self_type_for_implementation(&self, impl_did: DefId)
-> Polytype {
self.crate_context.tcx.tcache.borrow().get_copy(&impl_did)
}
// Converts an implementation in the AST to a vector of items.
fn create_impl_from_item(&self, item: &Item) -> Vec<ImplOrTraitItemId> {
match item.node {
ItemImpl(_, ref trait_refs, _, ref ast_items) => {
let mut items: Vec<ImplOrTraitItemId> =
ast_items.iter()
.map(|ast_item| {
match *ast_item {
ast::MethodImplItem(ref ast_method) => {
MethodTraitItemId(
local_def(ast_method.id))
}
ast::TypeImplItem(ref typedef) => {
TypeTraitItemId(local_def(typedef.id))
}
}
}).collect();
for trait_ref in trait_refs.iter() {
let ty_trait_ref = ty::node_id_to_trait_ref(
self.crate_context.tcx,
trait_ref.ref_id);
self.instantiate_default_methods(local_def(item.id),
&*ty_trait_ref,
&mut items);
}
items
}
_ => {
self.crate_context.tcx.sess.span_bug(item.span,
"can't convert a non-impl to an impl");
}
}
}
// External crate handling
fn add_external_impl(&self,
impls_seen: &mut HashSet<DefId>,
impl_def_id: DefId) {
let tcx = self.crate_context.tcx;
let impl_items = csearch::get_impl_items(&tcx.sess.cstore,
impl_def_id);
// Make sure we don't visit the same implementation multiple times.
if!impls_seen.insert(impl_def_id) {
// Skip this one.
return
}
// Good. Continue.
let _ = lookup_item_type(tcx, impl_def_id);
let associated_traits = get_impl_trait(tcx, impl_def_id);
// Do a sanity check.
assert!(associated_traits.is_some());
// Record all the trait items.
for trait_ref in associated_traits.iter() {
self.add_trait_impl(trait_ref.def_id, impl_def_id);
}
// For any methods that use a default implementation, add them to
// the map. This is a bit unfortunate.
for item_def_id in impl_items.iter() {
let impl_item = ty::impl_or_trait_item(tcx, item_def_id.def_id());
match impl_item {
ty::MethodTraitItem(ref method) => {
for &source in method.provided_source.iter() {
tcx.provided_method_sources
.borrow_mut()
.insert(item_def_id.def_id(), source);
}
}
ty::TypeTraitItem(_) => {}
}
}
tcx.impl_items.borrow_mut().insert(impl_def_id, impl_items);
}
// Adds implementations and traits from external crates to the coherence
// info.
fn add_external_crates(&self) {
let mut impls_seen = HashSet::new();
let crate_store = &self.crate_context.tcx.sess.cstore;
crate_store.iter_crate_data(|crate_number, _crate_metadata| {
each_impl(crate_store, crate_number, |def_id| {
assert_eq!(crate_number, def_id.krate);
self.add_external_impl(&mut impls_seen, def_id)
})
})
}
//
// Destructors
//
fn populate_destructor_table(&self) {
let tcx = self.crate_context.tcx;
let drop_trait = match tcx.lang_items.drop_trait() {
Some(id) => id, None => { return }
};
let impl_items = tcx.impl_items.borrow();
let trait_impls = match tcx.trait_impls.borrow().find_copy(&drop_trait) {
None => return, // No types with (new-style) dtors present.
Some(found_impls) => found_impls
};
for &impl_did in trait_impls.borrow().iter() {
let items = impl_items.get(&impl_did);
if items.len() < 1 {
// We'll error out later. For now, just don't ICE.
continue;
}
let method_def_id = *items.get(0);
let self_type = self.get_self_type_for_implementation(impl_did);
match ty::get(self_type.ty).sty {
ty::ty_enum(type_def_id, _) |
ty::ty_struct(type_def_id, _) |
ty::ty_unboxed_closure(type_def_id, _) => {
tcx.destructor_for_type
.borrow_mut()
.insert(type_def_id, method_def_id.def_id());
tcx.destructors
.borrow_mut()
.insert(method_def_id.def_id());
}
_ => {
// Destructors only work on nominal types.
if impl_did.krate == ast::LOCAL_CRATE {
{
match tcx.map.find(impl_did.node) {
Some(ast_map::NodeItem(item)) => {
| {
debug!("add_trait_impl: base_def_id={} impl_def_id={}",
base_def_id, impl_def_id);
ty::record_trait_implementation(self.crate_context.tcx,
base_def_id,
impl_def_id);
} | identifier_body |
mod.rs | resolve_ivar) {
Ok(resulting_type) if!type_is_ty_var(resulting_type) => resulting_type,
_ => {
inference_context.tcx.sess.span_fatal(span,
"the type of this value must be known in order \
to determine the base type");
}
};
match get(resolved_type).sty {
ty_enum(..) | ty_struct(..) | ty_unboxed_closure(..) => {
debug!("(getting base type) found base type");
Some(resolved_type)
}
_ if ty::type_is_trait(resolved_type) => {
debug!("(getting base type) found base type (trait)");
Some(resolved_type)
}
ty_nil | ty_bot | ty_bool | ty_char | ty_int(..) | ty_uint(..) | ty_float(..) |
ty_str(..) | ty_vec(..) | ty_bare_fn(..) | ty_closure(..) | ty_tup(..) |
ty_infer(..) | ty_param(..) | ty_err | ty_open(..) | ty_uniq(_) |
ty_ptr(_) | ty_rptr(_, _) => {
debug!("(getting base type) no base type; found {}",
get(original_type).sty);
None
}
ty_trait(..) => fail!("should have been caught")
}
}
// Returns the def ID of the base type, if there is one.
fn get_base_type_def_id(inference_context: &InferCtxt,
span: Span,
original_type: t)
-> Option<DefId> {
match get_base_type(inference_context, span, original_type) {
None => None,
Some(base_type) => {
match get(base_type).sty {
ty_enum(def_id, _) |
ty_struct(def_id, _) |
ty_unboxed_closure(def_id, _) => {
Some(def_id)
}
ty_ptr(ty::mt {ty,..}) |
ty_rptr(_, ty::mt {ty,..}) |
ty_uniq(ty) => {
match ty::get(ty).sty {
ty_trait(box ty::TyTrait { def_id,.. }) => {
Some(def_id)
}
_ => {
fail!("get_base_type() returned a type that wasn't an \
enum, struct, or trait");
}
}
}
ty_trait(box ty::TyTrait { def_id,.. }) => {
Some(def_id)
}
_ => {
fail!("get_base_type() returned a type that wasn't an \
enum, struct, or trait");
}
}
}
}
}
struct CoherenceChecker<'a, 'tcx: 'a> {
crate_context: &'a CrateCtxt<'a, 'tcx>,
inference_context: InferCtxt<'a, 'tcx>,
inherent_impls: RefCell<DefIdMap<Rc<RefCell<Vec<ast::DefId>>>>>,
}
struct CoherenceCheckVisitor<'a, 'tcx: 'a> {
cc: &'a CoherenceChecker<'a, 'tcx>
}
impl<'a, 'tcx, 'v> visit::Visitor<'v> for CoherenceCheckVisitor<'a, 'tcx> {
fn visit_item(&mut self, item: &Item) {
//debug!("(checking coherence) item '{}'", token::get_ident(item.ident));
match item.node {
ItemImpl(_, ref opt_trait, _, _) => {
match opt_trait.clone() {
Some(opt_trait) => {
self.cc.check_implementation(item, [opt_trait]);
}
None => self.cc.check_implementation(item, [])
}
}
_ => {
// Nothing to do.
}
};
visit::walk_item(self, item);
}
}
impl<'a, 'tcx> CoherenceChecker<'a, 'tcx> {
fn check(&self, krate: &Crate) {
// Check implementations and traits. This populates the tables
// containing the inherent methods and extension methods. It also
// builds up the trait inheritance table.
let mut visitor = CoherenceCheckVisitor { cc: self };
visit::walk_crate(&mut visitor, krate);
// Copy over the inherent impls we gathered up during the walk into
// the tcx.
let mut tcx_inherent_impls =
self.crate_context.tcx.inherent_impls.borrow_mut();
for (k, v) in self.inherent_impls.borrow().iter() {
tcx_inherent_impls.insert((*k).clone(),
Rc::new((*v.borrow()).clone()));
}
// Bring in external crates. It's fine for this to happen after the
// coherence checks, because we ensure by construction that no errors
// can happen at link time.
self.add_external_crates();
// Populate the table of destructors. It might seem a bit strange to
// do this here, but it's actually the most convenient place, since
// the coherence tables contain the trait -> type mappings.
self.populate_destructor_table();
}
fn check_implementation(&self,
item: &Item,
associated_traits: &[TraitRef]) {
let tcx = self.crate_context.tcx;
let impl_did = local_def(item.id);
let self_type = ty::lookup_item_type(tcx, impl_did);
// If there are no traits, then this implementation must have a
// base type.
let impl_items = self.create_impl_from_item(item);
for associated_trait in associated_traits.iter() {
let trait_ref = ty::node_id_to_trait_ref(
self.crate_context.tcx, associated_trait.ref_id);
debug!("(checking implementation) adding impl for trait '{}', item '{}'",
trait_ref.repr(self.crate_context.tcx),
token::get_ident(item.ident));
self.add_trait_impl(trait_ref.def_id, impl_did);
}
// Add the implementation to the mapping from implementation to base
// type def ID, if there is a base type for this implementation and
// the implementation does not have any associated traits.
match get_base_type_def_id(&self.inference_context,
item.span,
self_type.ty) {
None => {
// Nothing to do.
}
Some(base_type_def_id) => {
// FIXME: Gather up default methods?
if associated_traits.len() == 0 {
self.add_inherent_impl(base_type_def_id, impl_did);
}
}
}
tcx.impl_items.borrow_mut().insert(impl_did, impl_items);
}
// Creates default method IDs and performs type substitutions for an impl
// and trait pair. Then, for each provided method in the trait, inserts a
// `ProvidedMethodInfo` instance into the `provided_method_sources` map.
fn instantiate_default_methods(
&self,
impl_id: DefId,
trait_ref: &ty::TraitRef,
all_impl_items: &mut Vec<ImplOrTraitItemId>) {
let tcx = self.crate_context.tcx;
debug!("instantiate_default_methods(impl_id={}, trait_ref={})",
impl_id, trait_ref.repr(tcx));
let impl_poly_type = ty::lookup_item_type(tcx, impl_id);
let prov = ty::provided_trait_methods(tcx, trait_ref.def_id);
for trait_method in prov.iter() {
// Synthesize an ID.
let new_id = tcx.sess.next_node_id();
let new_did = local_def(new_id);
debug!("new_did={} trait_method={}", new_did, trait_method.repr(tcx));
// Create substitutions for the various trait parameters.
let new_method_ty =
Rc::new(subst_receiver_types_in_method_ty(
tcx,
impl_id,
&impl_poly_type,
trait_ref,
new_did,
&**trait_method,
Some(trait_method.def_id)));
debug!("new_method_ty={}", new_method_ty.repr(tcx));
all_impl_items.push(MethodTraitItemId(new_did));
// construct the polytype for the method based on the
// method_ty. it will have all the generics from the
// impl, plus its own.
let new_polytype = ty::Polytype {
generics: new_method_ty.generics.clone(),
ty: ty::mk_bare_fn(tcx, new_method_ty.fty.clone())
};
debug!("new_polytype={}", new_polytype.repr(tcx));
tcx.tcache.borrow_mut().insert(new_did, new_polytype);
tcx.impl_or_trait_items
.borrow_mut()
.insert(new_did, ty::MethodTraitItem(new_method_ty));
// Pair the new synthesized ID up with the
// ID of the method.
self.crate_context.tcx.provided_method_sources.borrow_mut()
.insert(new_did, trait_method.def_id);
}
}
fn add_inherent_impl(&self, base_def_id: DefId, impl_def_id: DefId) {
match self.inherent_impls.borrow().find(&base_def_id) {
Some(implementation_list) => {
implementation_list.borrow_mut().push(impl_def_id);
return;
}
None => {}
}
self.inherent_impls.borrow_mut().insert(
base_def_id,
Rc::new(RefCell::new(vec!(impl_def_id))));
}
fn add_trait_impl(&self, base_def_id: DefId, impl_def_id: DefId) {
debug!("add_trait_impl: base_def_id={} impl_def_id={}",
base_def_id, impl_def_id);
ty::record_trait_implementation(self.crate_context.tcx,
base_def_id,
impl_def_id);
}
fn get_self_type_for_implementation(&self, impl_did: DefId)
-> Polytype {
self.crate_context.tcx.tcache.borrow().get_copy(&impl_did)
}
// Converts an implementation in the AST to a vector of items.
fn create_impl_from_item(&self, item: &Item) -> Vec<ImplOrTraitItemId> {
match item.node {
ItemImpl(_, ref trait_refs, _, ref ast_items) => | self.instantiate_default_methods(local_def(item.id),
&*ty_trait_ref,
&mut items);
}
items
}
_ => {
self.crate_context.tcx.sess.span_bug(item.span,
"can't convert a non-impl to an impl");
}
}
}
// External crate handling
fn add_external_impl(&self,
impls_seen: &mut HashSet<DefId>,
impl_def_id: DefId) {
let tcx = self.crate_context.tcx;
let impl_items = csearch::get_impl_items(&tcx.sess.cstore,
impl_def_id);
// Make sure we don't visit the same implementation multiple times.
if!impls_seen.insert(impl_def_id) {
// Skip this one.
return
}
// Good. Continue.
let _ = lookup_item_type(tcx, impl_def_id);
let associated_traits = get_impl_trait(tcx, impl_def_id);
// Do a sanity check.
assert!(associated_traits.is_some());
// Record all the trait items.
for trait_ref in associated_traits.iter() {
self.add_trait_impl(trait_ref.def_id, impl_def_id);
}
// For any methods that use a default implementation, add them to
// the map. This is a bit unfortunate.
for item_def_id in impl_items.iter() {
let impl_item = ty::impl_or_trait_item(tcx, item_def_id.def_id());
match impl_item {
ty::MethodTraitItem(ref method) => {
for &source in method.provided_source.iter() {
tcx.provided_method_sources
.borrow_mut()
.insert(item_def_id.def_id(), source);
}
}
ty::TypeTraitItem(_) => {}
}
}
tcx.impl_items.borrow_mut().insert(impl_def_id, impl_items);
}
// Adds implementations and traits from external crates to the coherence
// info.
fn add_external_crates(&self) {
let mut impls_seen = HashSet::new();
let crate_store = &self.crate_context.tcx.sess.cstore;
crate_store.iter_crate_data(|crate_number, _crate_metadata| {
each_impl(crate_store, crate_number, |def_id| {
assert_eq!(crate_number, def_id.krate);
self.add_external_impl(&mut impls_seen, def_id)
})
})
}
//
// Destructors
//
fn populate_destructor_table(&self) {
let tcx = self.crate_context.tcx;
let drop_trait = match tcx.lang_items.drop_trait() {
Some(id) => id, None => { return }
};
let impl_items = tcx.impl_items.borrow();
let trait_impls = match tcx.trait_impls.borrow().find_copy(&drop_trait) {
None => return, // No types with (new-style) dtors present.
Some(found_impls) => found_impls
};
for &impl_did in trait_impls.borrow().iter() {
let items = impl_items.get(&impl_did);
if items.len() < 1 {
// We'll error out later. For now, just don't ICE.
continue;
}
let method_def_id = *items.get(0);
let self_type = self.get_self_type_for_implementation(impl_did);
match ty::get(self_type.ty).sty {
ty::ty_enum(type_def_id, _) |
ty::ty_struct(type_def_id, _) |
ty::ty_unboxed_closure(type_def_id, _) => {
tcx.destructor_for_type
.borrow_mut()
.insert(type_def_id, method_def_id.def_id());
tcx.destructors
.borrow_mut()
.insert(method_def_id.def_id());
}
_ => {
// Destructors only work on nominal types.
if impl_did.krate == ast::LOCAL_CRATE {
{
match tcx.map.find(impl_did.node) {
Some(ast_map::NodeItem(item)) => {
| {
let mut items: Vec<ImplOrTraitItemId> =
ast_items.iter()
.map(|ast_item| {
match *ast_item {
ast::MethodImplItem(ref ast_method) => {
MethodTraitItemId(
local_def(ast_method.id))
}
ast::TypeImplItem(ref typedef) => {
TypeTraitItemId(local_def(typedef.id))
}
}
}).collect();
for trait_ref in trait_refs.iter() {
let ty_trait_ref = ty::node_id_to_trait_ref(
self.crate_context.tcx,
trait_ref.ref_id);
| conditional_block |
mod.rs | debug!("(getting base type) no base type; found {}",
get(original_type).sty);
None
}
ty_trait(..) => fail!("should have been caught")
}
}
// Returns the def ID of the base type, if there is one.
fn get_base_type_def_id(inference_context: &InferCtxt,
span: Span,
original_type: t)
-> Option<DefId> {
match get_base_type(inference_context, span, original_type) {
None => None,
Some(base_type) => {
match get(base_type).sty {
ty_enum(def_id, _) |
ty_struct(def_id, _) |
ty_unboxed_closure(def_id, _) => {
Some(def_id)
}
ty_ptr(ty::mt {ty,..}) |
ty_rptr(_, ty::mt {ty,..}) |
ty_uniq(ty) => {
match ty::get(ty).sty {
ty_trait(box ty::TyTrait { def_id,.. }) => {
Some(def_id)
}
_ => {
fail!("get_base_type() returned a type that wasn't an \
enum, struct, or trait");
}
}
}
ty_trait(box ty::TyTrait { def_id,.. }) => {
Some(def_id)
}
_ => {
fail!("get_base_type() returned a type that wasn't an \
enum, struct, or trait");
}
}
}
}
}
struct CoherenceChecker<'a, 'tcx: 'a> {
crate_context: &'a CrateCtxt<'a, 'tcx>,
inference_context: InferCtxt<'a, 'tcx>,
inherent_impls: RefCell<DefIdMap<Rc<RefCell<Vec<ast::DefId>>>>>,
}
struct CoherenceCheckVisitor<'a, 'tcx: 'a> {
cc: &'a CoherenceChecker<'a, 'tcx>
}
impl<'a, 'tcx, 'v> visit::Visitor<'v> for CoherenceCheckVisitor<'a, 'tcx> {
fn visit_item(&mut self, item: &Item) {
//debug!("(checking coherence) item '{}'", token::get_ident(item.ident));
match item.node {
ItemImpl(_, ref opt_trait, _, _) => {
match opt_trait.clone() {
Some(opt_trait) => {
self.cc.check_implementation(item, [opt_trait]);
}
None => self.cc.check_implementation(item, [])
}
}
_ => {
// Nothing to do.
}
};
visit::walk_item(self, item);
}
}
impl<'a, 'tcx> CoherenceChecker<'a, 'tcx> {
fn check(&self, krate: &Crate) {
// Check implementations and traits. This populates the tables
// containing the inherent methods and extension methods. It also
// builds up the trait inheritance table.
let mut visitor = CoherenceCheckVisitor { cc: self };
visit::walk_crate(&mut visitor, krate);
// Copy over the inherent impls we gathered up during the walk into
// the tcx.
let mut tcx_inherent_impls =
self.crate_context.tcx.inherent_impls.borrow_mut();
for (k, v) in self.inherent_impls.borrow().iter() {
tcx_inherent_impls.insert((*k).clone(),
Rc::new((*v.borrow()).clone()));
}
// Bring in external crates. It's fine for this to happen after the
// coherence checks, because we ensure by construction that no errors
// can happen at link time.
self.add_external_crates();
// Populate the table of destructors. It might seem a bit strange to
// do this here, but it's actually the most convenient place, since
// the coherence tables contain the trait -> type mappings.
self.populate_destructor_table();
}
fn check_implementation(&self,
item: &Item,
associated_traits: &[TraitRef]) {
let tcx = self.crate_context.tcx;
let impl_did = local_def(item.id);
let self_type = ty::lookup_item_type(tcx, impl_did);
// If there are no traits, then this implementation must have a
// base type.
let impl_items = self.create_impl_from_item(item);
for associated_trait in associated_traits.iter() {
let trait_ref = ty::node_id_to_trait_ref(
self.crate_context.tcx, associated_trait.ref_id);
debug!("(checking implementation) adding impl for trait '{}', item '{}'",
trait_ref.repr(self.crate_context.tcx),
token::get_ident(item.ident));
self.add_trait_impl(trait_ref.def_id, impl_did);
}
// Add the implementation to the mapping from implementation to base
// type def ID, if there is a base type for this implementation and
// the implementation does not have any associated traits.
match get_base_type_def_id(&self.inference_context,
item.span,
self_type.ty) {
None => {
// Nothing to do.
}
Some(base_type_def_id) => {
// FIXME: Gather up default methods?
if associated_traits.len() == 0 {
self.add_inherent_impl(base_type_def_id, impl_did);
}
}
}
tcx.impl_items.borrow_mut().insert(impl_did, impl_items);
}
// Creates default method IDs and performs type substitutions for an impl
// and trait pair. Then, for each provided method in the trait, inserts a
// `ProvidedMethodInfo` instance into the `provided_method_sources` map.
fn instantiate_default_methods(
&self,
impl_id: DefId,
trait_ref: &ty::TraitRef,
all_impl_items: &mut Vec<ImplOrTraitItemId>) {
let tcx = self.crate_context.tcx;
debug!("instantiate_default_methods(impl_id={}, trait_ref={})",
impl_id, trait_ref.repr(tcx));
let impl_poly_type = ty::lookup_item_type(tcx, impl_id);
let prov = ty::provided_trait_methods(tcx, trait_ref.def_id);
for trait_method in prov.iter() {
// Synthesize an ID.
let new_id = tcx.sess.next_node_id();
let new_did = local_def(new_id);
debug!("new_did={} trait_method={}", new_did, trait_method.repr(tcx));
// Create substitutions for the various trait parameters.
let new_method_ty =
Rc::new(subst_receiver_types_in_method_ty(
tcx,
impl_id,
&impl_poly_type,
trait_ref,
new_did,
&**trait_method,
Some(trait_method.def_id)));
debug!("new_method_ty={}", new_method_ty.repr(tcx));
all_impl_items.push(MethodTraitItemId(new_did));
// construct the polytype for the method based on the
// method_ty. it will have all the generics from the
// impl, plus its own.
let new_polytype = ty::Polytype {
generics: new_method_ty.generics.clone(),
ty: ty::mk_bare_fn(tcx, new_method_ty.fty.clone())
};
debug!("new_polytype={}", new_polytype.repr(tcx));
tcx.tcache.borrow_mut().insert(new_did, new_polytype);
tcx.impl_or_trait_items
.borrow_mut()
.insert(new_did, ty::MethodTraitItem(new_method_ty));
// Pair the new synthesized ID up with the
// ID of the method.
self.crate_context.tcx.provided_method_sources.borrow_mut()
.insert(new_did, trait_method.def_id);
}
}
fn add_inherent_impl(&self, base_def_id: DefId, impl_def_id: DefId) {
match self.inherent_impls.borrow().find(&base_def_id) {
Some(implementation_list) => {
implementation_list.borrow_mut().push(impl_def_id);
return;
}
None => {}
}
self.inherent_impls.borrow_mut().insert(
base_def_id,
Rc::new(RefCell::new(vec!(impl_def_id))));
}
fn add_trait_impl(&self, base_def_id: DefId, impl_def_id: DefId) {
debug!("add_trait_impl: base_def_id={} impl_def_id={}",
base_def_id, impl_def_id);
ty::record_trait_implementation(self.crate_context.tcx,
base_def_id,
impl_def_id);
}
fn get_self_type_for_implementation(&self, impl_did: DefId)
-> Polytype {
self.crate_context.tcx.tcache.borrow().get_copy(&impl_did)
}
// Converts an implementation in the AST to a vector of items.
fn create_impl_from_item(&self, item: &Item) -> Vec<ImplOrTraitItemId> {
match item.node {
ItemImpl(_, ref trait_refs, _, ref ast_items) => {
let mut items: Vec<ImplOrTraitItemId> =
ast_items.iter()
.map(|ast_item| {
match *ast_item {
ast::MethodImplItem(ref ast_method) => {
MethodTraitItemId(
local_def(ast_method.id))
}
ast::TypeImplItem(ref typedef) => {
TypeTraitItemId(local_def(typedef.id))
}
}
}).collect();
for trait_ref in trait_refs.iter() {
let ty_trait_ref = ty::node_id_to_trait_ref(
self.crate_context.tcx,
trait_ref.ref_id);
self.instantiate_default_methods(local_def(item.id),
&*ty_trait_ref,
&mut items);
}
items
}
_ => {
self.crate_context.tcx.sess.span_bug(item.span,
"can't convert a non-impl to an impl");
}
}
}
// External crate handling
fn add_external_impl(&self,
impls_seen: &mut HashSet<DefId>,
impl_def_id: DefId) {
let tcx = self.crate_context.tcx;
let impl_items = csearch::get_impl_items(&tcx.sess.cstore,
impl_def_id);
// Make sure we don't visit the same implementation multiple times.
if!impls_seen.insert(impl_def_id) {
// Skip this one.
return
}
// Good. Continue.
let _ = lookup_item_type(tcx, impl_def_id);
let associated_traits = get_impl_trait(tcx, impl_def_id);
// Do a sanity check.
assert!(associated_traits.is_some());
// Record all the trait items.
for trait_ref in associated_traits.iter() {
self.add_trait_impl(trait_ref.def_id, impl_def_id);
}
// For any methods that use a default implementation, add them to
// the map. This is a bit unfortunate.
for item_def_id in impl_items.iter() {
let impl_item = ty::impl_or_trait_item(tcx, item_def_id.def_id());
match impl_item {
ty::MethodTraitItem(ref method) => {
for &source in method.provided_source.iter() {
tcx.provided_method_sources
.borrow_mut()
.insert(item_def_id.def_id(), source);
}
}
ty::TypeTraitItem(_) => {}
}
}
tcx.impl_items.borrow_mut().insert(impl_def_id, impl_items);
}
// Adds implementations and traits from external crates to the coherence
// info.
fn add_external_crates(&self) {
let mut impls_seen = HashSet::new();
let crate_store = &self.crate_context.tcx.sess.cstore;
crate_store.iter_crate_data(|crate_number, _crate_metadata| {
each_impl(crate_store, crate_number, |def_id| {
assert_eq!(crate_number, def_id.krate);
self.add_external_impl(&mut impls_seen, def_id)
})
})
}
//
// Destructors
//
fn populate_destructor_table(&self) {
let tcx = self.crate_context.tcx;
let drop_trait = match tcx.lang_items.drop_trait() {
Some(id) => id, None => { return }
};
let impl_items = tcx.impl_items.borrow();
let trait_impls = match tcx.trait_impls.borrow().find_copy(&drop_trait) {
None => return, // No types with (new-style) dtors present.
Some(found_impls) => found_impls
};
for &impl_did in trait_impls.borrow().iter() {
let items = impl_items.get(&impl_did);
if items.len() < 1 {
// We'll error out later. For now, just don't ICE.
continue;
}
let method_def_id = *items.get(0);
let self_type = self.get_self_type_for_implementation(impl_did);
match ty::get(self_type.ty).sty {
ty::ty_enum(type_def_id, _) |
ty::ty_struct(type_def_id, _) |
ty::ty_unboxed_closure(type_def_id, _) => {
tcx.destructor_for_type
.borrow_mut()
.insert(type_def_id, method_def_id.def_id());
tcx.destructors
.borrow_mut()
.insert(method_def_id.def_id());
}
_ => {
// Destructors only work on nominal types.
if impl_did.krate == ast::LOCAL_CRATE {
{
match tcx.map.find(impl_did.node) {
Some(ast_map::NodeItem(item)) => {
span_err!(tcx.sess, item.span, E0120,
"the Drop trait may only be implemented on structures");
}
_ => {
tcx.sess.bug("didn't find impl in ast \
map");
}
}
}
} else {
tcx.sess.bug("found external impl of Drop trait on \
something other than a struct");
}
}
}
}
}
}
pub fn | make_substs_for_receiver_types | identifier_name |
|
mod.rs | resolve_ivar) {
Ok(resulting_type) if!type_is_ty_var(resulting_type) => resulting_type,
_ => {
inference_context.tcx.sess.span_fatal(span,
"the type of this value must be known in order \
to determine the base type");
}
};
match get(resolved_type).sty {
ty_enum(..) | ty_struct(..) | ty_unboxed_closure(..) => {
debug!("(getting base type) found base type");
Some(resolved_type)
}
_ if ty::type_is_trait(resolved_type) => {
debug!("(getting base type) found base type (trait)");
Some(resolved_type)
}
ty_nil | ty_bot | ty_bool | ty_char | ty_int(..) | ty_uint(..) | ty_float(..) |
ty_str(..) | ty_vec(..) | ty_bare_fn(..) | ty_closure(..) | ty_tup(..) |
ty_infer(..) | ty_param(..) | ty_err | ty_open(..) | ty_uniq(_) |
ty_ptr(_) | ty_rptr(_, _) => {
debug!("(getting base type) no base type; found {}",
get(original_type).sty);
None
}
ty_trait(..) => fail!("should have been caught")
}
}
// Returns the def ID of the base type, if there is one.
fn get_base_type_def_id(inference_context: &InferCtxt,
span: Span,
original_type: t)
-> Option<DefId> {
match get_base_type(inference_context, span, original_type) {
None => None,
Some(base_type) => {
match get(base_type).sty {
ty_enum(def_id, _) |
ty_struct(def_id, _) |
ty_unboxed_closure(def_id, _) => {
Some(def_id)
}
ty_ptr(ty::mt {ty,..}) |
ty_rptr(_, ty::mt {ty,..}) |
ty_uniq(ty) => {
match ty::get(ty).sty {
ty_trait(box ty::TyTrait { def_id,.. }) => {
Some(def_id)
}
_ => {
fail!("get_base_type() returned a type that wasn't an \
enum, struct, or trait");
}
}
}
ty_trait(box ty::TyTrait { def_id,.. }) => {
Some(def_id)
}
_ => {
fail!("get_base_type() returned a type that wasn't an \
enum, struct, or trait");
}
}
}
}
}
struct CoherenceChecker<'a, 'tcx: 'a> {
crate_context: &'a CrateCtxt<'a, 'tcx>,
inference_context: InferCtxt<'a, 'tcx>,
inherent_impls: RefCell<DefIdMap<Rc<RefCell<Vec<ast::DefId>>>>>,
}
struct CoherenceCheckVisitor<'a, 'tcx: 'a> {
cc: &'a CoherenceChecker<'a, 'tcx>
}
impl<'a, 'tcx, 'v> visit::Visitor<'v> for CoherenceCheckVisitor<'a, 'tcx> {
fn visit_item(&mut self, item: &Item) {
//debug!("(checking coherence) item '{}'", token::get_ident(item.ident));
match item.node {
ItemImpl(_, ref opt_trait, _, _) => {
match opt_trait.clone() {
Some(opt_trait) => {
self.cc.check_implementation(item, [opt_trait]);
}
None => self.cc.check_implementation(item, [])
}
}
_ => {
// Nothing to do.
}
};
visit::walk_item(self, item);
}
}
impl<'a, 'tcx> CoherenceChecker<'a, 'tcx> {
fn check(&self, krate: &Crate) {
// Check implementations and traits. This populates the tables
// containing the inherent methods and extension methods. It also
// builds up the trait inheritance table.
let mut visitor = CoherenceCheckVisitor { cc: self };
visit::walk_crate(&mut visitor, krate);
// Copy over the inherent impls we gathered up during the walk into
// the tcx.
let mut tcx_inherent_impls =
self.crate_context.tcx.inherent_impls.borrow_mut();
for (k, v) in self.inherent_impls.borrow().iter() {
tcx_inherent_impls.insert((*k).clone(),
Rc::new((*v.borrow()).clone()));
}
// Bring in external crates. It's fine for this to happen after the
// coherence checks, because we ensure by construction that no errors
// can happen at link time.
self.add_external_crates();
// Populate the table of destructors. It might seem a bit strange to
// do this here, but it's actually the most convenient place, since
// the coherence tables contain the trait -> type mappings.
self.populate_destructor_table();
}
fn check_implementation(&self,
item: &Item,
associated_traits: &[TraitRef]) {
let tcx = self.crate_context.tcx;
let impl_did = local_def(item.id);
let self_type = ty::lookup_item_type(tcx, impl_did);
// If there are no traits, then this implementation must have a
// base type.
let impl_items = self.create_impl_from_item(item);
for associated_trait in associated_traits.iter() {
let trait_ref = ty::node_id_to_trait_ref(
self.crate_context.tcx, associated_trait.ref_id);
debug!("(checking implementation) adding impl for trait '{}', item '{}'",
trait_ref.repr(self.crate_context.tcx),
token::get_ident(item.ident));
self.add_trait_impl(trait_ref.def_id, impl_did);
}
// Add the implementation to the mapping from implementation to base
// type def ID, if there is a base type for this implementation and
// the implementation does not have any associated traits.
match get_base_type_def_id(&self.inference_context,
item.span,
self_type.ty) {
None => {
// Nothing to do.
}
Some(base_type_def_id) => {
// FIXME: Gather up default methods?
if associated_traits.len() == 0 {
self.add_inherent_impl(base_type_def_id, impl_did);
}
}
}
tcx.impl_items.borrow_mut().insert(impl_did, impl_items);
}
// Creates default method IDs and performs type substitutions for an impl
// and trait pair. Then, for each provided method in the trait, inserts a
// `ProvidedMethodInfo` instance into the `provided_method_sources` map.
fn instantiate_default_methods(
&self,
impl_id: DefId,
trait_ref: &ty::TraitRef,
all_impl_items: &mut Vec<ImplOrTraitItemId>) {
let tcx = self.crate_context.tcx;
debug!("instantiate_default_methods(impl_id={}, trait_ref={})",
impl_id, trait_ref.repr(tcx));
let impl_poly_type = ty::lookup_item_type(tcx, impl_id);
let prov = ty::provided_trait_methods(tcx, trait_ref.def_id);
for trait_method in prov.iter() {
// Synthesize an ID.
let new_id = tcx.sess.next_node_id();
let new_did = local_def(new_id);
debug!("new_did={} trait_method={}", new_did, trait_method.repr(tcx));
// Create substitutions for the various trait parameters.
let new_method_ty =
Rc::new(subst_receiver_types_in_method_ty(
tcx,
impl_id,
&impl_poly_type,
trait_ref,
new_did,
&**trait_method,
Some(trait_method.def_id)));
debug!("new_method_ty={}", new_method_ty.repr(tcx));
all_impl_items.push(MethodTraitItemId(new_did));
// construct the polytype for the method based on the
// method_ty. it will have all the generics from the
// impl, plus its own.
let new_polytype = ty::Polytype {
generics: new_method_ty.generics.clone(),
ty: ty::mk_bare_fn(tcx, new_method_ty.fty.clone())
};
debug!("new_polytype={}", new_polytype.repr(tcx));
tcx.tcache.borrow_mut().insert(new_did, new_polytype);
tcx.impl_or_trait_items
.borrow_mut()
.insert(new_did, ty::MethodTraitItem(new_method_ty));
// Pair the new synthesized ID up with the
// ID of the method.
self.crate_context.tcx.provided_method_sources.borrow_mut()
.insert(new_did, trait_method.def_id);
}
}
fn add_inherent_impl(&self, base_def_id: DefId, impl_def_id: DefId) {
match self.inherent_impls.borrow().find(&base_def_id) {
Some(implementation_list) => {
implementation_list.borrow_mut().push(impl_def_id);
return;
}
None => {}
}
self.inherent_impls.borrow_mut().insert(
base_def_id,
Rc::new(RefCell::new(vec!(impl_def_id))));
}
fn add_trait_impl(&self, base_def_id: DefId, impl_def_id: DefId) {
debug!("add_trait_impl: base_def_id={} impl_def_id={}",
base_def_id, impl_def_id);
ty::record_trait_implementation(self.crate_context.tcx,
base_def_id,
impl_def_id);
}
fn get_self_type_for_implementation(&self, impl_did: DefId)
-> Polytype {
self.crate_context.tcx.tcache.borrow().get_copy(&impl_did)
}
// Converts an implementation in the AST to a vector of items.
fn create_impl_from_item(&self, item: &Item) -> Vec<ImplOrTraitItemId> {
match item.node {
ItemImpl(_, ref trait_refs, _, ref ast_items) => {
let mut items: Vec<ImplOrTraitItemId> =
ast_items.iter()
.map(|ast_item| {
match *ast_item {
ast::MethodImplItem(ref ast_method) => {
MethodTraitItemId(
local_def(ast_method.id))
}
ast::TypeImplItem(ref typedef) => {
TypeTraitItemId(local_def(typedef.id))
}
}
}).collect();
for trait_ref in trait_refs.iter() {
let ty_trait_ref = ty::node_id_to_trait_ref(
self.crate_context.tcx,
trait_ref.ref_id);
self.instantiate_default_methods(local_def(item.id),
&*ty_trait_ref,
&mut items);
}
items
}
_ => {
self.crate_context.tcx.sess.span_bug(item.span,
"can't convert a non-impl to an impl");
}
}
}
// External crate handling |
fn add_external_impl(&self,
impls_seen: &mut HashSet<DefId>,
impl_def_id: DefId) {
let tcx = self.crate_context.tcx;
let impl_items = csearch::get_impl_items(&tcx.sess.cstore,
impl_def_id);
// Make sure we don't visit the same implementation multiple times.
if!impls_seen.insert(impl_def_id) {
// Skip this one.
return
}
// Good. Continue.
let _ = lookup_item_type(tcx, impl_def_id);
let associated_traits = get_impl_trait(tcx, impl_def_id);
// Do a sanity check.
assert!(associated_traits.is_some());
// Record all the trait items.
for trait_ref in associated_traits.iter() {
self.add_trait_impl(trait_ref.def_id, impl_def_id);
}
// For any methods that use a default implementation, add them to
// the map. This is a bit unfortunate.
for item_def_id in impl_items.iter() {
let impl_item = ty::impl_or_trait_item(tcx, item_def_id.def_id());
match impl_item {
ty::MethodTraitItem(ref method) => {
for &source in method.provided_source.iter() {
tcx.provided_method_sources
.borrow_mut()
.insert(item_def_id.def_id(), source);
}
}
ty::TypeTraitItem(_) => {}
}
}
tcx.impl_items.borrow_mut().insert(impl_def_id, impl_items);
}
// Adds implementations and traits from external crates to the coherence
// info.
fn add_external_crates(&self) {
let mut impls_seen = HashSet::new();
let crate_store = &self.crate_context.tcx.sess.cstore;
crate_store.iter_crate_data(|crate_number, _crate_metadata| {
each_impl(crate_store, crate_number, |def_id| {
assert_eq!(crate_number, def_id.krate);
self.add_external_impl(&mut impls_seen, def_id)
})
})
}
//
// Destructors
//
fn populate_destructor_table(&self) {
let tcx = self.crate_context.tcx;
let drop_trait = match tcx.lang_items.drop_trait() {
Some(id) => id, None => { return }
};
let impl_items = tcx.impl_items.borrow();
let trait_impls = match tcx.trait_impls.borrow().find_copy(&drop_trait) {
None => return, // No types with (new-style) dtors present.
Some(found_impls) => found_impls
};
for &impl_did in trait_impls.borrow().iter() {
let items = impl_items.get(&impl_did);
if items.len() < 1 {
// We'll error out later. For now, just don't ICE.
continue;
}
let method_def_id = *items.get(0);
let self_type = self.get_self_type_for_implementation(impl_did);
match ty::get(self_type.ty).sty {
ty::ty_enum(type_def_id, _) |
ty::ty_struct(type_def_id, _) |
ty::ty_unboxed_closure(type_def_id, _) => {
tcx.destructor_for_type
.borrow_mut()
.insert(type_def_id, method_def_id.def_id());
tcx.destructors
.borrow_mut()
.insert(method_def_id.def_id());
}
_ => {
// Destructors only work on nominal types.
if impl_did.krate == ast::LOCAL_CRATE {
{
match tcx.map.find(impl_did.node) {
Some(ast_map::NodeItem(item)) => {
| random_line_split |
|
tee.rs | #![crate_name = "uu_tee"]
/*
* This file is part of the uutils coreutils package.
*
* (c) Aleksander Bielawski <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
extern crate getopts;
#[macro_use]
extern crate uucore;
use std::fs::OpenOptions;
use std::io::{copy, Error, ErrorKind, Read, Result, sink, stdin, stdout, Write};
use std::path::{Path, PathBuf};
static NAME: &'static str = "tee";
static VERSION: &'static str = env!("CARGO_PKG_VERSION");
pub fn uumain(args: Vec<String>) -> i32 {
match options(&args).and_then(exec) {
Ok(_) => 0,
Err(_) => 1
}
}
#[allow(dead_code)]
struct Options {
program: String,
append: bool,
ignore_interrupts: bool,
print_and_exit: Option<String>,
files: Vec<String>
}
fn options(args: &[String]) -> Result<Options> {
let mut opts = getopts::Options::new();
opts.optflag("a", "append", "append to the given FILEs, do not overwrite");
opts.optflag("i", "ignore-interrupts", "ignore interrupt signals");
opts.optflag("h", "help", "display this help and exit");
opts.optflag("V", "version", "output version information and exit");
opts.parse(&args[1..]).map_err(|e| Error::new(ErrorKind::Other, format!("{}", e))).and_then(|m| {
let version = format!("{} {}", NAME, VERSION);
let arguments = "[OPTION]... [FILE]...";
let brief = "Copy standard input to each FILE, and also to standard output.";
let comment = "If a FILE is -, copy again to standard output.";
let help = format!("{}\n\nUsage:\n {} {}\n\n{}\n{}",
version, NAME, arguments, opts.usage(brief),
comment);
let mut names: Vec<String> = m.free.clone().into_iter().collect();
names.push("-".to_owned());
let to_print = if m.opt_present("help") { Some(help) }
else if m.opt_present("version") { Some(version) }
else { None };
Ok(Options {
program: NAME.to_owned(),
append: m.opt_present("append"),
ignore_interrupts: m.opt_present("ignore-interrupts"),
print_and_exit: to_print,
files: names
})
}).map_err(|message| warn(format!("{}", message).as_ref()))
}
fn exec(options: Options) -> Result<()> {
match options.print_and_exit {
Some(text) => Ok(println!("{}", text)),
None => tee(options)
}
}
fn tee(options: Options) -> Result<()> {
let writers: Vec<Box<Write>> = options.files.clone().into_iter().map(|file| open(file, options.append)).collect();
let output = &mut MultiWriter { writers: writers };
let input = &mut NamedReader { inner: Box::new(stdin()) as Box<Read> };
if copy(input, output).is_err() || output.flush().is_err() {
Err(Error::new(ErrorKind::Other, ""))
} else {
Ok(())
}
}
fn open(name: String, append: bool) -> Box<Write> {
let is_stdout = name == "-";
let path = PathBuf::from(name);
let inner: Box<Write> = if is_stdout {
Box::new(stdout())
} else {
let mut options = OpenOptions::new();
let mode = if append { options.append(true) } else { options.truncate(true) };
match mode.write(true).create(true).open(path.as_path()) {
Ok(file) => Box::new(file),
Err(_) => Box::new(sink())
}
};
Box::new(NamedWriter { inner: inner, path: path }) as Box<Write>
}
struct MultiWriter {
writers: Vec<Box<Write>>
}
impl Write for MultiWriter {
fn write(&mut self, buf: &[u8]) -> Result<usize> {
for writer in &mut self.writers {
try!(writer.write_all(buf));
}
Ok(buf.len())
}
fn flush(&mut self) -> Result<()> {
for writer in &mut self.writers {
try!(writer.flush());
}
Ok(())
}
}
struct NamedWriter {
inner: Box<Write>,
path: PathBuf
}
impl Write for NamedWriter {
fn write(&mut self, buf: &[u8]) -> Result<usize> {
match self.inner.write(buf) {
Err(f) => {
self.inner = Box::new(sink()) as Box<Write>;
warn(format!("{}: {}", self.path.display(), f.to_string()).as_ref());
Err(f)
}
okay => okay
}
}
fn flush(&mut self) -> Result<()> {
match self.inner.flush() {
Err(f) => {
self.inner = Box::new(sink()) as Box<Write>;
warn(format!("{}: {}", self.path.display(), f.to_string()).as_ref());
Err(f)
}
okay => okay
}
}
}
struct NamedReader {
inner: Box<Read>
}
impl Read for NamedReader {
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
match self.inner.read(buf) {
Err(f) => {
warn(format!("{}: {}", Path::new("stdin").display(), f.to_string()).as_ref());
Err(f)
}
okay => okay
}
}
}
fn | (message: &str) -> Error {
eprintln!("{}: {}", NAME, message);
Error::new(ErrorKind::Other, format!("{}: {}", NAME, message))
}
| warn | identifier_name |
tee.rs | #![crate_name = "uu_tee"]
/*
* This file is part of the uutils coreutils package.
*
* (c) Aleksander Bielawski <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
extern crate getopts;
#[macro_use]
extern crate uucore;
use std::fs::OpenOptions;
use std::io::{copy, Error, ErrorKind, Read, Result, sink, stdin, stdout, Write};
use std::path::{Path, PathBuf};
static NAME: &'static str = "tee";
static VERSION: &'static str = env!("CARGO_PKG_VERSION");
pub fn uumain(args: Vec<String>) -> i32 {
match options(&args).and_then(exec) {
Ok(_) => 0,
Err(_) => 1
}
}
#[allow(dead_code)]
struct Options {
program: String,
append: bool,
ignore_interrupts: bool,
print_and_exit: Option<String>,
files: Vec<String>
}
fn options(args: &[String]) -> Result<Options> {
let mut opts = getopts::Options::new();
opts.optflag("a", "append", "append to the given FILEs, do not overwrite");
opts.optflag("i", "ignore-interrupts", "ignore interrupt signals");
opts.optflag("h", "help", "display this help and exit");
opts.optflag("V", "version", "output version information and exit");
opts.parse(&args[1..]).map_err(|e| Error::new(ErrorKind::Other, format!("{}", e))).and_then(|m| {
let version = format!("{} {}", NAME, VERSION);
let arguments = "[OPTION]... [FILE]...";
let brief = "Copy standard input to each FILE, and also to standard output.";
let comment = "If a FILE is -, copy again to standard output.";
let help = format!("{}\n\nUsage:\n {} {}\n\n{}\n{}",
version, NAME, arguments, opts.usage(brief),
comment);
let mut names: Vec<String> = m.free.clone().into_iter().collect();
names.push("-".to_owned());
let to_print = if m.opt_present("help") { Some(help) }
else if m.opt_present("version") { Some(version) }
else { None };
Ok(Options {
program: NAME.to_owned(),
append: m.opt_present("append"),
ignore_interrupts: m.opt_present("ignore-interrupts"),
print_and_exit: to_print,
files: names
})
}).map_err(|message| warn(format!("{}", message).as_ref()))
}
fn exec(options: Options) -> Result<()> {
match options.print_and_exit {
Some(text) => Ok(println!("{}", text)),
None => tee(options)
}
}
fn tee(options: Options) -> Result<()> {
let writers: Vec<Box<Write>> = options.files.clone().into_iter().map(|file| open(file, options.append)).collect();
let output = &mut MultiWriter { writers: writers };
let input = &mut NamedReader { inner: Box::new(stdin()) as Box<Read> };
if copy(input, output).is_err() || output.flush().is_err() {
Err(Error::new(ErrorKind::Other, ""))
} else {
Ok(())
}
}
fn open(name: String, append: bool) -> Box<Write> {
let is_stdout = name == "-";
let path = PathBuf::from(name);
let inner: Box<Write> = if is_stdout {
Box::new(stdout())
} else {
let mut options = OpenOptions::new();
let mode = if append { options.append(true) } else { options.truncate(true) };
match mode.write(true).create(true).open(path.as_path()) {
Ok(file) => Box::new(file),
Err(_) => Box::new(sink())
}
};
Box::new(NamedWriter { inner: inner, path: path }) as Box<Write>
}
struct MultiWriter {
writers: Vec<Box<Write>>
}
impl Write for MultiWriter {
fn write(&mut self, buf: &[u8]) -> Result<usize> {
for writer in &mut self.writers {
try!(writer.write_all(buf));
}
Ok(buf.len())
}
fn flush(&mut self) -> Result<()> |
}
struct NamedWriter {
inner: Box<Write>,
path: PathBuf
}
impl Write for NamedWriter {
fn write(&mut self, buf: &[u8]) -> Result<usize> {
match self.inner.write(buf) {
Err(f) => {
self.inner = Box::new(sink()) as Box<Write>;
warn(format!("{}: {}", self.path.display(), f.to_string()).as_ref());
Err(f)
}
okay => okay
}
}
fn flush(&mut self) -> Result<()> {
match self.inner.flush() {
Err(f) => {
self.inner = Box::new(sink()) as Box<Write>;
warn(format!("{}: {}", self.path.display(), f.to_string()).as_ref());
Err(f)
}
okay => okay
}
}
}
struct NamedReader {
inner: Box<Read>
}
impl Read for NamedReader {
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
match self.inner.read(buf) {
Err(f) => {
warn(format!("{}: {}", Path::new("stdin").display(), f.to_string()).as_ref());
Err(f)
}
okay => okay
}
}
}
fn warn(message: &str) -> Error {
eprintln!("{}: {}", NAME, message);
Error::new(ErrorKind::Other, format!("{}: {}", NAME, message))
}
| {
for writer in &mut self.writers {
try!(writer.flush());
}
Ok(())
} | identifier_body |
tee.rs | #![crate_name = "uu_tee"]
/*
* This file is part of the uutils coreutils package.
*
* (c) Aleksander Bielawski <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
extern crate getopts;
#[macro_use]
extern crate uucore;
use std::fs::OpenOptions;
use std::io::{copy, Error, ErrorKind, Read, Result, sink, stdin, stdout, Write};
use std::path::{Path, PathBuf};
static NAME: &'static str = "tee";
static VERSION: &'static str = env!("CARGO_PKG_VERSION");
pub fn uumain(args: Vec<String>) -> i32 {
match options(&args).and_then(exec) {
Ok(_) => 0,
Err(_) => 1
}
}
#[allow(dead_code)]
struct Options {
program: String,
append: bool,
ignore_interrupts: bool,
print_and_exit: Option<String>,
files: Vec<String>
}
fn options(args: &[String]) -> Result<Options> {
let mut opts = getopts::Options::new();
opts.optflag("a", "append", "append to the given FILEs, do not overwrite");
opts.optflag("i", "ignore-interrupts", "ignore interrupt signals");
opts.optflag("h", "help", "display this help and exit");
opts.optflag("V", "version", "output version information and exit");
opts.parse(&args[1..]).map_err(|e| Error::new(ErrorKind::Other, format!("{}", e))).and_then(|m| {
let version = format!("{} {}", NAME, VERSION);
let arguments = "[OPTION]... [FILE]...";
let brief = "Copy standard input to each FILE, and also to standard output.";
let comment = "If a FILE is -, copy again to standard output.";
let help = format!("{}\n\nUsage:\n {} {}\n\n{}\n{}",
version, NAME, arguments, opts.usage(brief),
comment);
let mut names: Vec<String> = m.free.clone().into_iter().collect();
names.push("-".to_owned());
let to_print = if m.opt_present("help") { Some(help) }
else if m.opt_present("version") { Some(version) }
else { None };
Ok(Options {
program: NAME.to_owned(),
append: m.opt_present("append"),
ignore_interrupts: m.opt_present("ignore-interrupts"),
print_and_exit: to_print,
files: names
})
}).map_err(|message| warn(format!("{}", message).as_ref()))
}
fn exec(options: Options) -> Result<()> {
match options.print_and_exit {
Some(text) => Ok(println!("{}", text)),
None => tee(options)
}
}
fn tee(options: Options) -> Result<()> {
let writers: Vec<Box<Write>> = options.files.clone().into_iter().map(|file| open(file, options.append)).collect();
let output = &mut MultiWriter { writers: writers };
let input = &mut NamedReader { inner: Box::new(stdin()) as Box<Read> };
if copy(input, output).is_err() || output.flush().is_err() {
Err(Error::new(ErrorKind::Other, ""))
} else {
Ok(())
}
}
fn open(name: String, append: bool) -> Box<Write> {
let is_stdout = name == "-";
let path = PathBuf::from(name);
let inner: Box<Write> = if is_stdout {
Box::new(stdout())
} else {
let mut options = OpenOptions::new();
let mode = if append { options.append(true) } else { options.truncate(true) };
match mode.write(true).create(true).open(path.as_path()) {
Ok(file) => Box::new(file),
Err(_) => Box::new(sink())
}
};
Box::new(NamedWriter { inner: inner, path: path }) as Box<Write>
}
struct MultiWriter {
writers: Vec<Box<Write>>
}
impl Write for MultiWriter {
fn write(&mut self, buf: &[u8]) -> Result<usize> {
for writer in &mut self.writers {
try!(writer.write_all(buf));
}
Ok(buf.len())
}
fn flush(&mut self) -> Result<()> {
for writer in &mut self.writers {
try!(writer.flush());
}
Ok(())
}
}
struct NamedWriter {
inner: Box<Write>,
path: PathBuf
}
impl Write for NamedWriter {
fn write(&mut self, buf: &[u8]) -> Result<usize> {
match self.inner.write(buf) {
Err(f) => {
self.inner = Box::new(sink()) as Box<Write>;
warn(format!("{}: {}", self.path.display(), f.to_string()).as_ref());
Err(f)
}
okay => okay
}
}
fn flush(&mut self) -> Result<()> {
match self.inner.flush() {
Err(f) => {
self.inner = Box::new(sink()) as Box<Write>;
warn(format!("{}: {}", self.path.display(), f.to_string()).as_ref());
Err(f)
}
okay => okay
}
}
}
struct NamedReader {
inner: Box<Read>
}
impl Read for NamedReader {
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
match self.inner.read(buf) {
Err(f) => {
warn(format!("{}: {}", Path::new("stdin").display(), f.to_string()).as_ref());
Err(f)
} | }
}
fn warn(message: &str) -> Error {
eprintln!("{}: {}", NAME, message);
Error::new(ErrorKind::Other, format!("{}: {}", NAME, message))
} | okay => okay
} | random_line_split |
ptr.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// FIXME: talk about offset, copy_memory, copy_nonoverlapping_memory
//! Raw, unsafe pointers, `*const T`, and `*mut T`
//!
//! *[See also the pointer primitive types](../primitive.pointer.html).*
#![stable(feature = "rust1", since = "1.0.0")]
use clone::Clone;
use intrinsics;
use ops::Deref;
use fmt;
use option::Option::{self, Some, None};
use marker::{PhantomData, Send, Sized, Sync};
use mem;
use nonzero::NonZero;
use cmp::{PartialEq, Eq, Ord, PartialOrd};
use cmp::Ordering::{self, Less, Equal, Greater};
// FIXME #19649: intrinsic docs don't render, so these have no docs :(
#[stable(feature = "rust1", since = "1.0.0")]
pub use intrinsics::copy_nonoverlapping;
#[stable(feature = "rust1", since = "1.0.0")]
pub use intrinsics::copy;
#[stable(feature = "rust1", since = "1.0.0")]
pub use intrinsics::write_bytes;
/// Creates a null raw pointer.
///
/// # Examples
///
/// ```
/// use std::ptr;
///
/// let p: *const i32 = ptr::null();
/// assert!(p.is_null());
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn null<T>() -> *const T { 0 as *const T } | ///
/// # Examples
///
/// ```
/// use std::ptr;
///
/// let p: *mut i32 = ptr::null_mut();
/// assert!(p.is_null());
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn null_mut<T>() -> *mut T { 0 as *mut T }
/// Swaps the values at two mutable locations of the same type, without
/// deinitialising either. They may overlap, unlike `mem::swap` which is
/// otherwise equivalent.
///
/// # Safety
///
/// This is only unsafe because it accepts a raw pointer.
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn swap<T>(x: *mut T, y: *mut T) {
// Give ourselves some scratch space to work with
let mut tmp: T = mem::uninitialized();
// Perform the swap
copy_nonoverlapping(x, &mut tmp, 1);
copy(y, x, 1); // `x` and `y` may overlap
copy_nonoverlapping(&tmp, y, 1);
// y and t now point to the same thing, but we need to completely forget `tmp`
// because it's no longer relevant.
mem::forget(tmp);
}
/// Replaces the value at `dest` with `src`, returning the old
/// value, without dropping either.
///
/// # Safety
///
/// This is only unsafe because it accepts a raw pointer.
/// Otherwise, this operation is identical to `mem::replace`.
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn replace<T>(dest: *mut T, mut src: T) -> T {
mem::swap(&mut *dest, &mut src); // cannot overlap
src
}
/// Reads the value from `src` without moving it. This leaves the
/// memory in `src` unchanged.
///
/// # Safety
///
/// Beyond accepting a raw pointer, this is unsafe because it semantically
/// moves the value out of `src` without preventing further usage of `src`.
/// If `T` is not `Copy`, then care must be taken to ensure that the value at
/// `src` is not used before the data is overwritten again (e.g. with `write`,
/// `zero_memory`, or `copy_memory`). Note that `*src = foo` counts as a use
/// because it will attempt to drop the value previously at `*src`.
#[inline(always)]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn read<T>(src: *const T) -> T {
let mut tmp: T = mem::uninitialized();
copy_nonoverlapping(src, &mut tmp, 1);
tmp
}
/// Variant of read_and_zero that writes the specific drop-flag byte
/// (which may be more appropriate than zero).
#[inline(always)]
#[unstable(feature = "filling_drop",
reason = "may play a larger role in std::ptr future extensions",
issue = "5016")]
pub unsafe fn read_and_drop<T>(dest: *mut T) -> T {
// Copy the data out from `dest`:
let tmp = read(&*dest);
// Now mark `dest` as dropped:
write_bytes(dest, mem::POST_DROP_U8, 1);
tmp
}
/// Overwrites a memory location with the given value without reading or
/// dropping the old value.
///
/// # Safety
///
/// Beyond accepting a raw pointer, this operation is unsafe because it does
/// not drop the contents of `dst`. This could leak allocations or resources,
/// so care must be taken not to overwrite an object that should be dropped.
///
/// This is appropriate for initializing uninitialized memory, or overwriting
/// memory that has previously been `read` from.
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn write<T>(dst: *mut T, src: T) {
intrinsics::move_val_init(&mut *dst, src)
}
#[stable(feature = "rust1", since = "1.0.0")]
#[lang = "const_ptr"]
impl<T:?Sized> *const T {
/// Returns true if the pointer is null.
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn is_null(self) -> bool where T: Sized {
self == 0 as *const T
}
/// Returns `None` if the pointer is null, or else returns a reference to
/// the value wrapped in `Some`.
///
/// # Safety
///
/// While this method and its mutable counterpart are useful for
/// null-safety, it is important to note that this is still an unsafe
/// operation because the returned value could be pointing to invalid
/// memory.
#[unstable(feature = "ptr_as_ref",
reason = "Option is not clearly the right return type, and we \
may want to tie the return lifetime to a borrow of \
the raw pointer",
issue = "27780")]
#[inline]
pub unsafe fn as_ref<'a>(&self) -> Option<&'a T> where T: Sized {
if self.is_null() {
None
} else {
Some(&**self)
}
}
/// Calculates the offset from a pointer. `count` is in units of T; e.g. a
/// `count` of 3 represents a pointer offset of `3 * sizeof::<T>()` bytes.
///
/// # Safety
///
/// Both the starting and resulting pointer must be either in bounds or one
/// byte past the end of an allocated object. If either pointer is out of
/// bounds or arithmetic overflow occurs then
/// any further use of the returned value will result in undefined behavior.
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub unsafe fn offset(self, count: isize) -> *const T where T: Sized {
intrinsics::offset(self, count)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
#[lang = "mut_ptr"]
impl<T:?Sized> *mut T {
/// Returns true if the pointer is null.
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn is_null(self) -> bool where T: Sized {
self == 0 as *mut T
}
/// Returns `None` if the pointer is null, or else returns a reference to
/// the value wrapped in `Some`.
///
/// # Safety
///
/// While this method and its mutable counterpart are useful for
/// null-safety, it is important to note that this is still an unsafe
/// operation because the returned value could be pointing to invalid
/// memory.
#[unstable(feature = "ptr_as_ref",
reason = "Option is not clearly the right return type, and we \
may want to tie the return lifetime to a borrow of \
the raw pointer",
issue = "27780")]
#[inline]
pub unsafe fn as_ref<'a>(&self) -> Option<&'a T> where T: Sized {
if self.is_null() {
None
} else {
Some(&**self)
}
}
/// Calculates the offset from a pointer. `count` is in units of T; e.g. a
/// `count` of 3 represents a pointer offset of `3 * sizeof::<T>()` bytes.
///
/// # Safety
///
/// The offset must be in-bounds of the object, or one-byte-past-the-end.
/// Otherwise `offset` invokes Undefined Behaviour, regardless of whether
/// the pointer is used.
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub unsafe fn offset(self, count: isize) -> *mut T where T: Sized {
intrinsics::offset(self, count) as *mut T
}
/// Returns `None` if the pointer is null, or else returns a mutable
/// reference to the value wrapped in `Some`.
///
/// # Safety
///
/// As with `as_ref`, this is unsafe because it cannot verify the validity
/// of the returned pointer.
#[unstable(feature = "ptr_as_ref",
reason = "return value does not necessarily convey all possible \
information",
issue = "27780")]
#[inline]
pub unsafe fn as_mut<'a>(&self) -> Option<&'a mut T> where T: Sized {
if self.is_null() {
None
} else {
Some(&mut **self)
}
}
}
// Equality for pointers
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized> PartialEq for *const T {
#[inline]
fn eq(&self, other: &*const T) -> bool { *self == *other }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized> Eq for *const T {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized> PartialEq for *mut T {
#[inline]
fn eq(&self, other: &*mut T) -> bool { *self == *other }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized> Eq for *mut T {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized> Clone for *const T {
#[inline]
fn clone(&self) -> *const T {
*self
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized> Clone for *mut T {
#[inline]
fn clone(&self) -> *mut T {
*self
}
}
// Equality for extern "C" fn pointers
mod externfnpointers {
use cmp::PartialEq;
#[stable(feature = "rust1", since = "1.0.0")]
impl<_R> PartialEq for extern "C" fn() -> _R {
#[inline]
fn eq(&self, other: &extern "C" fn() -> _R) -> bool {
let self_ = *self as usize;
let other_ = *other as usize;
self_ == other_
}
}
macro_rules! fnptreq {
($($p:ident),*) => {
#[stable(feature = "rust1", since = "1.0.0")]
impl<_R,$($p),*> PartialEq for extern "C" fn($($p),*) -> _R {
#[inline]
fn eq(&self, other: &extern "C" fn($($p),*) -> _R) -> bool {
let self_ = *self as usize;
let other_ = *other as usize;
self_ == other_
}
}
}
}
fnptreq! { A }
fnptreq! { A,B }
fnptreq! { A,B,C }
fnptreq! { A,B,C,D }
fnptreq! { A,B,C,D,E }
}
// Comparison for pointers
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized> Ord for *const T {
#[inline]
fn cmp(&self, other: &*const T) -> Ordering {
if self < other {
Less
} else if self == other {
Equal
} else {
Greater
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized> PartialOrd for *const T {
#[inline]
fn partial_cmp(&self, other: &*const T) -> Option<Ordering> {
Some(self.cmp(other))
}
#[inline]
fn lt(&self, other: &*const T) -> bool { *self < *other }
#[inline]
fn le(&self, other: &*const T) -> bool { *self <= *other }
#[inline]
fn gt(&self, other: &*const T) -> bool { *self > *other }
#[inline]
fn ge(&self, other: &*const T) -> bool { *self >= *other }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized> Ord for *mut T {
#[inline]
fn cmp(&self, other: &*mut T) -> Ordering {
if self < other {
Less
} else if self == other {
Equal
} else {
Greater
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized> PartialOrd for *mut T {
#[inline]
fn partial_cmp(&self, other: &*mut T) -> Option<Ordering> {
Some(self.cmp(other))
}
#[inline]
fn lt(&self, other: &*mut T) -> bool { *self < *other }
#[inline]
fn le(&self, other: &*mut T) -> bool { *self <= *other }
#[inline]
fn gt(&self, other: &*mut T) -> bool { *self > *other }
#[inline]
fn ge(&self, other: &*mut T) -> bool { *self >= *other }
}
/// A wrapper around a raw `*mut T` that indicates that the possessor
/// of this wrapper owns the referent. This in turn implies that the
/// `Unique<T>` is `Send`/`Sync` if `T` is `Send`/`Sync`, unlike a raw
/// `*mut T` (which conveys no particular ownership semantics). It
/// also implies that the referent of the pointer should not be
/// modified without a unique path to the `Unique` reference. Useful
/// for building abstractions like `Vec<T>` or `Box<T>`, which
/// internally use raw pointers to manage the memory that they own.
#[unstable(feature = "unique", reason = "needs an RFC to flesh out design",
issue = "27730")]
pub struct Unique<T:?Sized> {
pointer: NonZero<*const T>,
// NOTE: this marker has no consequences for variance, but is necessary
// for dropck to understand that we logically own a `T`.
//
// For details, see:
// https://github.com/rust-lang/rfcs/blob/master/text/0769-sound-generic-drop.md#phantom-data
_marker: PhantomData<T>,
}
/// `Unique` pointers are `Send` if `T` is `Send` because the data they
/// reference is unaliased. Note that this aliasing invariant is
/// unenforced by the type system; the abstraction using the
/// `Unique` must enforce it.
#[unstable(feature = "unique", issue = "27730")]
unsafe impl<T: Send +?Sized> Send for Unique<T> { }
/// `Unique` pointers are `Sync` if `T` is `Sync` because the data they
/// reference is unaliased. Note that this aliasing invariant is
/// unenforced by the type system; the abstraction using the
/// `Unique` must enforce it.
#[unstable(feature = "unique", issue = "27730")]
unsafe impl<T: Sync +?Sized> Sync for Unique<T> { }
#[unstable(feature = "unique", issue = "27730")]
impl<T:?Sized> Unique<T> {
/// Creates a new `Unique`.
pub unsafe fn new(ptr: *mut T) -> Unique<T> {
Unique { pointer: NonZero::new(ptr), _marker: PhantomData }
}
/// Dereferences the content.
pub unsafe fn get(&self) -> &T {
&**self.pointer
}
/// Mutably dereferences the content.
pub unsafe fn get_mut(&mut self) -> &mut T {
&mut ***self
}
}
#[unstable(feature = "unique", issue= "27730")]
impl<T:?Sized> Deref for Unique<T> {
type Target = *mut T;
#[inline]
fn deref<'a>(&'a self) -> &'a *mut T {
unsafe { mem::transmute(&*self.pointer) }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> fmt::Pointer for Unique<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Pointer::fmt(&*self.pointer, f)
}
} |
/// Creates a null mutable raw pointer. | random_line_split |
ptr.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// FIXME: talk about offset, copy_memory, copy_nonoverlapping_memory
//! Raw, unsafe pointers, `*const T`, and `*mut T`
//!
//! *[See also the pointer primitive types](../primitive.pointer.html).*
#![stable(feature = "rust1", since = "1.0.0")]
use clone::Clone;
use intrinsics;
use ops::Deref;
use fmt;
use option::Option::{self, Some, None};
use marker::{PhantomData, Send, Sized, Sync};
use mem;
use nonzero::NonZero;
use cmp::{PartialEq, Eq, Ord, PartialOrd};
use cmp::Ordering::{self, Less, Equal, Greater};
// FIXME #19649: intrinsic docs don't render, so these have no docs :(
#[stable(feature = "rust1", since = "1.0.0")]
pub use intrinsics::copy_nonoverlapping;
#[stable(feature = "rust1", since = "1.0.0")]
pub use intrinsics::copy;
#[stable(feature = "rust1", since = "1.0.0")]
pub use intrinsics::write_bytes;
/// Creates a null raw pointer.
///
/// # Examples
///
/// ```
/// use std::ptr;
///
/// let p: *const i32 = ptr::null();
/// assert!(p.is_null());
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn null<T>() -> *const T { 0 as *const T }
/// Creates a null mutable raw pointer.
///
/// # Examples
///
/// ```
/// use std::ptr;
///
/// let p: *mut i32 = ptr::null_mut();
/// assert!(p.is_null());
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn null_mut<T>() -> *mut T { 0 as *mut T }
/// Swaps the values at two mutable locations of the same type, without
/// deinitialising either. They may overlap, unlike `mem::swap` which is
/// otherwise equivalent.
///
/// # Safety
///
/// This is only unsafe because it accepts a raw pointer.
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn swap<T>(x: *mut T, y: *mut T) {
// Give ourselves some scratch space to work with
let mut tmp: T = mem::uninitialized();
// Perform the swap
copy_nonoverlapping(x, &mut tmp, 1);
copy(y, x, 1); // `x` and `y` may overlap
copy_nonoverlapping(&tmp, y, 1);
// y and t now point to the same thing, but we need to completely forget `tmp`
// because it's no longer relevant.
mem::forget(tmp);
}
/// Replaces the value at `dest` with `src`, returning the old
/// value, without dropping either.
///
/// # Safety
///
/// This is only unsafe because it accepts a raw pointer.
/// Otherwise, this operation is identical to `mem::replace`.
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn replace<T>(dest: *mut T, mut src: T) -> T {
mem::swap(&mut *dest, &mut src); // cannot overlap
src
}
/// Reads the value from `src` without moving it. This leaves the
/// memory in `src` unchanged.
///
/// # Safety
///
/// Beyond accepting a raw pointer, this is unsafe because it semantically
/// moves the value out of `src` without preventing further usage of `src`.
/// If `T` is not `Copy`, then care must be taken to ensure that the value at
/// `src` is not used before the data is overwritten again (e.g. with `write`,
/// `zero_memory`, or `copy_memory`). Note that `*src = foo` counts as a use
/// because it will attempt to drop the value previously at `*src`.
#[inline(always)]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn read<T>(src: *const T) -> T {
let mut tmp: T = mem::uninitialized();
copy_nonoverlapping(src, &mut tmp, 1);
tmp
}
/// Variant of read_and_zero that writes the specific drop-flag byte
/// (which may be more appropriate than zero).
#[inline(always)]
#[unstable(feature = "filling_drop",
reason = "may play a larger role in std::ptr future extensions",
issue = "5016")]
pub unsafe fn read_and_drop<T>(dest: *mut T) -> T {
// Copy the data out from `dest`:
let tmp = read(&*dest);
// Now mark `dest` as dropped:
write_bytes(dest, mem::POST_DROP_U8, 1);
tmp
}
/// Overwrites a memory location with the given value without reading or
/// dropping the old value.
///
/// # Safety
///
/// Beyond accepting a raw pointer, this operation is unsafe because it does
/// not drop the contents of `dst`. This could leak allocations or resources,
/// so care must be taken not to overwrite an object that should be dropped.
///
/// This is appropriate for initializing uninitialized memory, or overwriting
/// memory that has previously been `read` from.
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn write<T>(dst: *mut T, src: T) {
intrinsics::move_val_init(&mut *dst, src)
}
#[stable(feature = "rust1", since = "1.0.0")]
#[lang = "const_ptr"]
impl<T:?Sized> *const T {
/// Returns true if the pointer is null.
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn is_null(self) -> bool where T: Sized {
self == 0 as *const T
}
/// Returns `None` if the pointer is null, or else returns a reference to
/// the value wrapped in `Some`.
///
/// # Safety
///
/// While this method and its mutable counterpart are useful for
/// null-safety, it is important to note that this is still an unsafe
/// operation because the returned value could be pointing to invalid
/// memory.
#[unstable(feature = "ptr_as_ref",
reason = "Option is not clearly the right return type, and we \
may want to tie the return lifetime to a borrow of \
the raw pointer",
issue = "27780")]
#[inline]
pub unsafe fn as_ref<'a>(&self) -> Option<&'a T> where T: Sized {
if self.is_null() {
None
} else {
Some(&**self)
}
}
/// Calculates the offset from a pointer. `count` is in units of T; e.g. a
/// `count` of 3 represents a pointer offset of `3 * sizeof::<T>()` bytes.
///
/// # Safety
///
/// Both the starting and resulting pointer must be either in bounds or one
/// byte past the end of an allocated object. If either pointer is out of
/// bounds or arithmetic overflow occurs then
/// any further use of the returned value will result in undefined behavior.
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub unsafe fn offset(self, count: isize) -> *const T where T: Sized {
intrinsics::offset(self, count)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
#[lang = "mut_ptr"]
impl<T:?Sized> *mut T {
/// Returns true if the pointer is null.
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn is_null(self) -> bool where T: Sized |
/// Returns `None` if the pointer is null, or else returns a reference to
/// the value wrapped in `Some`.
///
/// # Safety
///
/// While this method and its mutable counterpart are useful for
/// null-safety, it is important to note that this is still an unsafe
/// operation because the returned value could be pointing to invalid
/// memory.
#[unstable(feature = "ptr_as_ref",
reason = "Option is not clearly the right return type, and we \
may want to tie the return lifetime to a borrow of \
the raw pointer",
issue = "27780")]
#[inline]
pub unsafe fn as_ref<'a>(&self) -> Option<&'a T> where T: Sized {
if self.is_null() {
None
} else {
Some(&**self)
}
}
/// Calculates the offset from a pointer. `count` is in units of T; e.g. a
/// `count` of 3 represents a pointer offset of `3 * sizeof::<T>()` bytes.
///
/// # Safety
///
/// The offset must be in-bounds of the object, or one-byte-past-the-end.
/// Otherwise `offset` invokes Undefined Behaviour, regardless of whether
/// the pointer is used.
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub unsafe fn offset(self, count: isize) -> *mut T where T: Sized {
intrinsics::offset(self, count) as *mut T
}
/// Returns `None` if the pointer is null, or else returns a mutable
/// reference to the value wrapped in `Some`.
///
/// # Safety
///
/// As with `as_ref`, this is unsafe because it cannot verify the validity
/// of the returned pointer.
#[unstable(feature = "ptr_as_ref",
reason = "return value does not necessarily convey all possible \
information",
issue = "27780")]
#[inline]
pub unsafe fn as_mut<'a>(&self) -> Option<&'a mut T> where T: Sized {
if self.is_null() {
None
} else {
Some(&mut **self)
}
}
}
// Equality for pointers
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized> PartialEq for *const T {
#[inline]
fn eq(&self, other: &*const T) -> bool { *self == *other }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized> Eq for *const T {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized> PartialEq for *mut T {
#[inline]
fn eq(&self, other: &*mut T) -> bool { *self == *other }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized> Eq for *mut T {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized> Clone for *const T {
#[inline]
fn clone(&self) -> *const T {
*self
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized> Clone for *mut T {
#[inline]
fn clone(&self) -> *mut T {
*self
}
}
// Equality for extern "C" fn pointers
mod externfnpointers {
use cmp::PartialEq;
#[stable(feature = "rust1", since = "1.0.0")]
impl<_R> PartialEq for extern "C" fn() -> _R {
#[inline]
fn eq(&self, other: &extern "C" fn() -> _R) -> bool {
let self_ = *self as usize;
let other_ = *other as usize;
self_ == other_
}
}
macro_rules! fnptreq {
($($p:ident),*) => {
#[stable(feature = "rust1", since = "1.0.0")]
impl<_R,$($p),*> PartialEq for extern "C" fn($($p),*) -> _R {
#[inline]
fn eq(&self, other: &extern "C" fn($($p),*) -> _R) -> bool {
let self_ = *self as usize;
let other_ = *other as usize;
self_ == other_
}
}
}
}
fnptreq! { A }
fnptreq! { A,B }
fnptreq! { A,B,C }
fnptreq! { A,B,C,D }
fnptreq! { A,B,C,D,E }
}
// Comparison for pointers
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized> Ord for *const T {
#[inline]
fn cmp(&self, other: &*const T) -> Ordering {
if self < other {
Less
} else if self == other {
Equal
} else {
Greater
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized> PartialOrd for *const T {
#[inline]
fn partial_cmp(&self, other: &*const T) -> Option<Ordering> {
Some(self.cmp(other))
}
#[inline]
fn lt(&self, other: &*const T) -> bool { *self < *other }
#[inline]
fn le(&self, other: &*const T) -> bool { *self <= *other }
#[inline]
fn gt(&self, other: &*const T) -> bool { *self > *other }
#[inline]
fn ge(&self, other: &*const T) -> bool { *self >= *other }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized> Ord for *mut T {
#[inline]
fn cmp(&self, other: &*mut T) -> Ordering {
if self < other {
Less
} else if self == other {
Equal
} else {
Greater
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized> PartialOrd for *mut T {
#[inline]
fn partial_cmp(&self, other: &*mut T) -> Option<Ordering> {
Some(self.cmp(other))
}
#[inline]
fn lt(&self, other: &*mut T) -> bool { *self < *other }
#[inline]
fn le(&self, other: &*mut T) -> bool { *self <= *other }
#[inline]
fn gt(&self, other: &*mut T) -> bool { *self > *other }
#[inline]
fn ge(&self, other: &*mut T) -> bool { *self >= *other }
}
/// A wrapper around a raw `*mut T` that indicates that the possessor
/// of this wrapper owns the referent. This in turn implies that the
/// `Unique<T>` is `Send`/`Sync` if `T` is `Send`/`Sync`, unlike a raw
/// `*mut T` (which conveys no particular ownership semantics). It
/// also implies that the referent of the pointer should not be
/// modified without a unique path to the `Unique` reference. Useful
/// for building abstractions like `Vec<T>` or `Box<T>`, which
/// internally use raw pointers to manage the memory that they own.
#[unstable(feature = "unique", reason = "needs an RFC to flesh out design",
issue = "27730")]
pub struct Unique<T:?Sized> {
pointer: NonZero<*const T>,
// NOTE: this marker has no consequences for variance, but is necessary
// for dropck to understand that we logically own a `T`.
//
// For details, see:
// https://github.com/rust-lang/rfcs/blob/master/text/0769-sound-generic-drop.md#phantom-data
_marker: PhantomData<T>,
}
/// `Unique` pointers are `Send` if `T` is `Send` because the data they
/// reference is unaliased. Note that this aliasing invariant is
/// unenforced by the type system; the abstraction using the
/// `Unique` must enforce it.
#[unstable(feature = "unique", issue = "27730")]
unsafe impl<T: Send +?Sized> Send for Unique<T> { }
/// `Unique` pointers are `Sync` if `T` is `Sync` because the data they
/// reference is unaliased. Note that this aliasing invariant is
/// unenforced by the type system; the abstraction using the
/// `Unique` must enforce it.
#[unstable(feature = "unique", issue = "27730")]
unsafe impl<T: Sync +?Sized> Sync for Unique<T> { }
#[unstable(feature = "unique", issue = "27730")]
impl<T:?Sized> Unique<T> {
/// Creates a new `Unique`.
pub unsafe fn new(ptr: *mut T) -> Unique<T> {
Unique { pointer: NonZero::new(ptr), _marker: PhantomData }
}
/// Dereferences the content.
pub unsafe fn get(&self) -> &T {
&**self.pointer
}
/// Mutably dereferences the content.
pub unsafe fn get_mut(&mut self) -> &mut T {
&mut ***self
}
}
#[unstable(feature = "unique", issue= "27730")]
impl<T:?Sized> Deref for Unique<T> {
type Target = *mut T;
#[inline]
fn deref<'a>(&'a self) -> &'a *mut T {
unsafe { mem::transmute(&*self.pointer) }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> fmt::Pointer for Unique<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Pointer::fmt(&*self.pointer, f)
}
}
| {
self == 0 as *mut T
} | identifier_body |
ptr.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// FIXME: talk about offset, copy_memory, copy_nonoverlapping_memory
//! Raw, unsafe pointers, `*const T`, and `*mut T`
//!
//! *[See also the pointer primitive types](../primitive.pointer.html).*
#![stable(feature = "rust1", since = "1.0.0")]
use clone::Clone;
use intrinsics;
use ops::Deref;
use fmt;
use option::Option::{self, Some, None};
use marker::{PhantomData, Send, Sized, Sync};
use mem;
use nonzero::NonZero;
use cmp::{PartialEq, Eq, Ord, PartialOrd};
use cmp::Ordering::{self, Less, Equal, Greater};
// FIXME #19649: intrinsic docs don't render, so these have no docs :(
#[stable(feature = "rust1", since = "1.0.0")]
pub use intrinsics::copy_nonoverlapping;
#[stable(feature = "rust1", since = "1.0.0")]
pub use intrinsics::copy;
#[stable(feature = "rust1", since = "1.0.0")]
pub use intrinsics::write_bytes;
/// Creates a null raw pointer.
///
/// # Examples
///
/// ```
/// use std::ptr;
///
/// let p: *const i32 = ptr::null();
/// assert!(p.is_null());
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn null<T>() -> *const T { 0 as *const T }
/// Creates a null mutable raw pointer.
///
/// # Examples
///
/// ```
/// use std::ptr;
///
/// let p: *mut i32 = ptr::null_mut();
/// assert!(p.is_null());
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn null_mut<T>() -> *mut T { 0 as *mut T }
/// Swaps the values at two mutable locations of the same type, without
/// deinitialising either. They may overlap, unlike `mem::swap` which is
/// otherwise equivalent.
///
/// # Safety
///
/// This is only unsafe because it accepts a raw pointer.
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn swap<T>(x: *mut T, y: *mut T) {
// Give ourselves some scratch space to work with
let mut tmp: T = mem::uninitialized();
// Perform the swap
copy_nonoverlapping(x, &mut tmp, 1);
copy(y, x, 1); // `x` and `y` may overlap
copy_nonoverlapping(&tmp, y, 1);
// y and t now point to the same thing, but we need to completely forget `tmp`
// because it's no longer relevant.
mem::forget(tmp);
}
/// Replaces the value at `dest` with `src`, returning the old
/// value, without dropping either.
///
/// # Safety
///
/// This is only unsafe because it accepts a raw pointer.
/// Otherwise, this operation is identical to `mem::replace`.
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn replace<T>(dest: *mut T, mut src: T) -> T {
mem::swap(&mut *dest, &mut src); // cannot overlap
src
}
/// Reads the value from `src` without moving it. This leaves the
/// memory in `src` unchanged.
///
/// # Safety
///
/// Beyond accepting a raw pointer, this is unsafe because it semantically
/// moves the value out of `src` without preventing further usage of `src`.
/// If `T` is not `Copy`, then care must be taken to ensure that the value at
/// `src` is not used before the data is overwritten again (e.g. with `write`,
/// `zero_memory`, or `copy_memory`). Note that `*src = foo` counts as a use
/// because it will attempt to drop the value previously at `*src`.
#[inline(always)]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn read<T>(src: *const T) -> T {
let mut tmp: T = mem::uninitialized();
copy_nonoverlapping(src, &mut tmp, 1);
tmp
}
/// Variant of read_and_zero that writes the specific drop-flag byte
/// (which may be more appropriate than zero).
#[inline(always)]
#[unstable(feature = "filling_drop",
reason = "may play a larger role in std::ptr future extensions",
issue = "5016")]
pub unsafe fn read_and_drop<T>(dest: *mut T) -> T {
// Copy the data out from `dest`:
let tmp = read(&*dest);
// Now mark `dest` as dropped:
write_bytes(dest, mem::POST_DROP_U8, 1);
tmp
}
/// Overwrites a memory location with the given value without reading or
/// dropping the old value.
///
/// # Safety
///
/// Beyond accepting a raw pointer, this operation is unsafe because it does
/// not drop the contents of `dst`. This could leak allocations or resources,
/// so care must be taken not to overwrite an object that should be dropped.
///
/// This is appropriate for initializing uninitialized memory, or overwriting
/// memory that has previously been `read` from.
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn write<T>(dst: *mut T, src: T) {
intrinsics::move_val_init(&mut *dst, src)
}
#[stable(feature = "rust1", since = "1.0.0")]
#[lang = "const_ptr"]
impl<T:?Sized> *const T {
/// Returns true if the pointer is null.
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn is_null(self) -> bool where T: Sized {
self == 0 as *const T
}
/// Returns `None` if the pointer is null, or else returns a reference to
/// the value wrapped in `Some`.
///
/// # Safety
///
/// While this method and its mutable counterpart are useful for
/// null-safety, it is important to note that this is still an unsafe
/// operation because the returned value could be pointing to invalid
/// memory.
#[unstable(feature = "ptr_as_ref",
reason = "Option is not clearly the right return type, and we \
may want to tie the return lifetime to a borrow of \
the raw pointer",
issue = "27780")]
#[inline]
pub unsafe fn as_ref<'a>(&self) -> Option<&'a T> where T: Sized {
if self.is_null() {
None
} else {
Some(&**self)
}
}
/// Calculates the offset from a pointer. `count` is in units of T; e.g. a
/// `count` of 3 represents a pointer offset of `3 * sizeof::<T>()` bytes.
///
/// # Safety
///
/// Both the starting and resulting pointer must be either in bounds or one
/// byte past the end of an allocated object. If either pointer is out of
/// bounds or arithmetic overflow occurs then
/// any further use of the returned value will result in undefined behavior.
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub unsafe fn offset(self, count: isize) -> *const T where T: Sized {
intrinsics::offset(self, count)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
#[lang = "mut_ptr"]
impl<T:?Sized> *mut T {
/// Returns true if the pointer is null.
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn is_null(self) -> bool where T: Sized {
self == 0 as *mut T
}
/// Returns `None` if the pointer is null, or else returns a reference to
/// the value wrapped in `Some`.
///
/// # Safety
///
/// While this method and its mutable counterpart are useful for
/// null-safety, it is important to note that this is still an unsafe
/// operation because the returned value could be pointing to invalid
/// memory.
#[unstable(feature = "ptr_as_ref",
reason = "Option is not clearly the right return type, and we \
may want to tie the return lifetime to a borrow of \
the raw pointer",
issue = "27780")]
#[inline]
pub unsafe fn as_ref<'a>(&self) -> Option<&'a T> where T: Sized {
if self.is_null() {
None
} else {
Some(&**self)
}
}
/// Calculates the offset from a pointer. `count` is in units of T; e.g. a
/// `count` of 3 represents a pointer offset of `3 * sizeof::<T>()` bytes.
///
/// # Safety
///
/// The offset must be in-bounds of the object, or one-byte-past-the-end.
/// Otherwise `offset` invokes Undefined Behaviour, regardless of whether
/// the pointer is used.
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub unsafe fn offset(self, count: isize) -> *mut T where T: Sized {
intrinsics::offset(self, count) as *mut T
}
/// Returns `None` if the pointer is null, or else returns a mutable
/// reference to the value wrapped in `Some`.
///
/// # Safety
///
/// As with `as_ref`, this is unsafe because it cannot verify the validity
/// of the returned pointer.
#[unstable(feature = "ptr_as_ref",
reason = "return value does not necessarily convey all possible \
information",
issue = "27780")]
#[inline]
pub unsafe fn as_mut<'a>(&self) -> Option<&'a mut T> where T: Sized {
if self.is_null() {
None
} else {
Some(&mut **self)
}
}
}
// Equality for pointers
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized> PartialEq for *const T {
#[inline]
fn eq(&self, other: &*const T) -> bool { *self == *other }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized> Eq for *const T {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized> PartialEq for *mut T {
#[inline]
fn eq(&self, other: &*mut T) -> bool { *self == *other }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized> Eq for *mut T {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized> Clone for *const T {
#[inline]
fn clone(&self) -> *const T {
*self
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized> Clone for *mut T {
#[inline]
fn clone(&self) -> *mut T {
*self
}
}
// Equality for extern "C" fn pointers
mod externfnpointers {
use cmp::PartialEq;
#[stable(feature = "rust1", since = "1.0.0")]
impl<_R> PartialEq for extern "C" fn() -> _R {
#[inline]
fn eq(&self, other: &extern "C" fn() -> _R) -> bool {
let self_ = *self as usize;
let other_ = *other as usize;
self_ == other_
}
}
macro_rules! fnptreq {
($($p:ident),*) => {
#[stable(feature = "rust1", since = "1.0.0")]
impl<_R,$($p),*> PartialEq for extern "C" fn($($p),*) -> _R {
#[inline]
fn eq(&self, other: &extern "C" fn($($p),*) -> _R) -> bool {
let self_ = *self as usize;
let other_ = *other as usize;
self_ == other_
}
}
}
}
fnptreq! { A }
fnptreq! { A,B }
fnptreq! { A,B,C }
fnptreq! { A,B,C,D }
fnptreq! { A,B,C,D,E }
}
// Comparison for pointers
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized> Ord for *const T {
#[inline]
fn cmp(&self, other: &*const T) -> Ordering {
if self < other | else if self == other {
Equal
} else {
Greater
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized> PartialOrd for *const T {
#[inline]
fn partial_cmp(&self, other: &*const T) -> Option<Ordering> {
Some(self.cmp(other))
}
#[inline]
fn lt(&self, other: &*const T) -> bool { *self < *other }
#[inline]
fn le(&self, other: &*const T) -> bool { *self <= *other }
#[inline]
fn gt(&self, other: &*const T) -> bool { *self > *other }
#[inline]
fn ge(&self, other: &*const T) -> bool { *self >= *other }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized> Ord for *mut T {
#[inline]
fn cmp(&self, other: &*mut T) -> Ordering {
if self < other {
Less
} else if self == other {
Equal
} else {
Greater
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized> PartialOrd for *mut T {
#[inline]
fn partial_cmp(&self, other: &*mut T) -> Option<Ordering> {
Some(self.cmp(other))
}
#[inline]
fn lt(&self, other: &*mut T) -> bool { *self < *other }
#[inline]
fn le(&self, other: &*mut T) -> bool { *self <= *other }
#[inline]
fn gt(&self, other: &*mut T) -> bool { *self > *other }
#[inline]
fn ge(&self, other: &*mut T) -> bool { *self >= *other }
}
/// A wrapper around a raw `*mut T` that indicates that the possessor
/// of this wrapper owns the referent. This in turn implies that the
/// `Unique<T>` is `Send`/`Sync` if `T` is `Send`/`Sync`, unlike a raw
/// `*mut T` (which conveys no particular ownership semantics). It
/// also implies that the referent of the pointer should not be
/// modified without a unique path to the `Unique` reference. Useful
/// for building abstractions like `Vec<T>` or `Box<T>`, which
/// internally use raw pointers to manage the memory that they own.
#[unstable(feature = "unique", reason = "needs an RFC to flesh out design",
issue = "27730")]
pub struct Unique<T:?Sized> {
pointer: NonZero<*const T>,
// NOTE: this marker has no consequences for variance, but is necessary
// for dropck to understand that we logically own a `T`.
//
// For details, see:
// https://github.com/rust-lang/rfcs/blob/master/text/0769-sound-generic-drop.md#phantom-data
_marker: PhantomData<T>,
}
/// `Unique` pointers are `Send` if `T` is `Send` because the data they
/// reference is unaliased. Note that this aliasing invariant is
/// unenforced by the type system; the abstraction using the
/// `Unique` must enforce it.
#[unstable(feature = "unique", issue = "27730")]
unsafe impl<T: Send +?Sized> Send for Unique<T> { }
/// `Unique` pointers are `Sync` if `T` is `Sync` because the data they
/// reference is unaliased. Note that this aliasing invariant is
/// unenforced by the type system; the abstraction using the
/// `Unique` must enforce it.
#[unstable(feature = "unique", issue = "27730")]
unsafe impl<T: Sync +?Sized> Sync for Unique<T> { }
#[unstable(feature = "unique", issue = "27730")]
impl<T:?Sized> Unique<T> {
/// Creates a new `Unique`.
pub unsafe fn new(ptr: *mut T) -> Unique<T> {
Unique { pointer: NonZero::new(ptr), _marker: PhantomData }
}
/// Dereferences the content.
pub unsafe fn get(&self) -> &T {
&**self.pointer
}
/// Mutably dereferences the content.
pub unsafe fn get_mut(&mut self) -> &mut T {
&mut ***self
}
}
#[unstable(feature = "unique", issue= "27730")]
impl<T:?Sized> Deref for Unique<T> {
type Target = *mut T;
#[inline]
fn deref<'a>(&'a self) -> &'a *mut T {
unsafe { mem::transmute(&*self.pointer) }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> fmt::Pointer for Unique<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Pointer::fmt(&*self.pointer, f)
}
}
| {
Less
} | conditional_block |
ptr.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// FIXME: talk about offset, copy_memory, copy_nonoverlapping_memory
//! Raw, unsafe pointers, `*const T`, and `*mut T`
//!
//! *[See also the pointer primitive types](../primitive.pointer.html).*
#![stable(feature = "rust1", since = "1.0.0")]
use clone::Clone;
use intrinsics;
use ops::Deref;
use fmt;
use option::Option::{self, Some, None};
use marker::{PhantomData, Send, Sized, Sync};
use mem;
use nonzero::NonZero;
use cmp::{PartialEq, Eq, Ord, PartialOrd};
use cmp::Ordering::{self, Less, Equal, Greater};
// FIXME #19649: intrinsic docs don't render, so these have no docs :(
#[stable(feature = "rust1", since = "1.0.0")]
pub use intrinsics::copy_nonoverlapping;
#[stable(feature = "rust1", since = "1.0.0")]
pub use intrinsics::copy;
#[stable(feature = "rust1", since = "1.0.0")]
pub use intrinsics::write_bytes;
/// Creates a null raw pointer.
///
/// # Examples
///
/// ```
/// use std::ptr;
///
/// let p: *const i32 = ptr::null();
/// assert!(p.is_null());
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn null<T>() -> *const T { 0 as *const T }
/// Creates a null mutable raw pointer.
///
/// # Examples
///
/// ```
/// use std::ptr;
///
/// let p: *mut i32 = ptr::null_mut();
/// assert!(p.is_null());
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn null_mut<T>() -> *mut T { 0 as *mut T }
/// Swaps the values at two mutable locations of the same type, without
/// deinitialising either. They may overlap, unlike `mem::swap` which is
/// otherwise equivalent.
///
/// # Safety
///
/// This is only unsafe because it accepts a raw pointer.
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn swap<T>(x: *mut T, y: *mut T) {
// Give ourselves some scratch space to work with
let mut tmp: T = mem::uninitialized();
// Perform the swap
copy_nonoverlapping(x, &mut tmp, 1);
copy(y, x, 1); // `x` and `y` may overlap
copy_nonoverlapping(&tmp, y, 1);
// y and t now point to the same thing, but we need to completely forget `tmp`
// because it's no longer relevant.
mem::forget(tmp);
}
/// Replaces the value at `dest` with `src`, returning the old
/// value, without dropping either.
///
/// # Safety
///
/// This is only unsafe because it accepts a raw pointer.
/// Otherwise, this operation is identical to `mem::replace`.
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn replace<T>(dest: *mut T, mut src: T) -> T {
mem::swap(&mut *dest, &mut src); // cannot overlap
src
}
/// Reads the value from `src` without moving it. This leaves the
/// memory in `src` unchanged.
///
/// # Safety
///
/// Beyond accepting a raw pointer, this is unsafe because it semantically
/// moves the value out of `src` without preventing further usage of `src`.
/// If `T` is not `Copy`, then care must be taken to ensure that the value at
/// `src` is not used before the data is overwritten again (e.g. with `write`,
/// `zero_memory`, or `copy_memory`). Note that `*src = foo` counts as a use
/// because it will attempt to drop the value previously at `*src`.
#[inline(always)]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn read<T>(src: *const T) -> T {
let mut tmp: T = mem::uninitialized();
copy_nonoverlapping(src, &mut tmp, 1);
tmp
}
/// Variant of read_and_zero that writes the specific drop-flag byte
/// (which may be more appropriate than zero).
#[inline(always)]
#[unstable(feature = "filling_drop",
reason = "may play a larger role in std::ptr future extensions",
issue = "5016")]
pub unsafe fn read_and_drop<T>(dest: *mut T) -> T {
// Copy the data out from `dest`:
let tmp = read(&*dest);
// Now mark `dest` as dropped:
write_bytes(dest, mem::POST_DROP_U8, 1);
tmp
}
/// Overwrites a memory location with the given value without reading or
/// dropping the old value.
///
/// # Safety
///
/// Beyond accepting a raw pointer, this operation is unsafe because it does
/// not drop the contents of `dst`. This could leak allocations or resources,
/// so care must be taken not to overwrite an object that should be dropped.
///
/// This is appropriate for initializing uninitialized memory, or overwriting
/// memory that has previously been `read` from.
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn write<T>(dst: *mut T, src: T) {
intrinsics::move_val_init(&mut *dst, src)
}
#[stable(feature = "rust1", since = "1.0.0")]
#[lang = "const_ptr"]
impl<T:?Sized> *const T {
/// Returns true if the pointer is null.
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn is_null(self) -> bool where T: Sized {
self == 0 as *const T
}
/// Returns `None` if the pointer is null, or else returns a reference to
/// the value wrapped in `Some`.
///
/// # Safety
///
/// While this method and its mutable counterpart are useful for
/// null-safety, it is important to note that this is still an unsafe
/// operation because the returned value could be pointing to invalid
/// memory.
#[unstable(feature = "ptr_as_ref",
reason = "Option is not clearly the right return type, and we \
may want to tie the return lifetime to a borrow of \
the raw pointer",
issue = "27780")]
#[inline]
pub unsafe fn as_ref<'a>(&self) -> Option<&'a T> where T: Sized {
if self.is_null() {
None
} else {
Some(&**self)
}
}
/// Calculates the offset from a pointer. `count` is in units of T; e.g. a
/// `count` of 3 represents a pointer offset of `3 * sizeof::<T>()` bytes.
///
/// # Safety
///
/// Both the starting and resulting pointer must be either in bounds or one
/// byte past the end of an allocated object. If either pointer is out of
/// bounds or arithmetic overflow occurs then
/// any further use of the returned value will result in undefined behavior.
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub unsafe fn offset(self, count: isize) -> *const T where T: Sized {
intrinsics::offset(self, count)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
#[lang = "mut_ptr"]
impl<T:?Sized> *mut T {
/// Returns true if the pointer is null.
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn is_null(self) -> bool where T: Sized {
self == 0 as *mut T
}
/// Returns `None` if the pointer is null, or else returns a reference to
/// the value wrapped in `Some`.
///
/// # Safety
///
/// While this method and its mutable counterpart are useful for
/// null-safety, it is important to note that this is still an unsafe
/// operation because the returned value could be pointing to invalid
/// memory.
#[unstable(feature = "ptr_as_ref",
reason = "Option is not clearly the right return type, and we \
may want to tie the return lifetime to a borrow of \
the raw pointer",
issue = "27780")]
#[inline]
pub unsafe fn as_ref<'a>(&self) -> Option<&'a T> where T: Sized {
if self.is_null() {
None
} else {
Some(&**self)
}
}
/// Calculates the offset from a pointer. `count` is in units of T; e.g. a
/// `count` of 3 represents a pointer offset of `3 * sizeof::<T>()` bytes.
///
/// # Safety
///
/// The offset must be in-bounds of the object, or one-byte-past-the-end.
/// Otherwise `offset` invokes Undefined Behaviour, regardless of whether
/// the pointer is used.
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub unsafe fn offset(self, count: isize) -> *mut T where T: Sized {
intrinsics::offset(self, count) as *mut T
}
/// Returns `None` if the pointer is null, or else returns a mutable
/// reference to the value wrapped in `Some`.
///
/// # Safety
///
/// As with `as_ref`, this is unsafe because it cannot verify the validity
/// of the returned pointer.
#[unstable(feature = "ptr_as_ref",
reason = "return value does not necessarily convey all possible \
information",
issue = "27780")]
#[inline]
pub unsafe fn as_mut<'a>(&self) -> Option<&'a mut T> where T: Sized {
if self.is_null() {
None
} else {
Some(&mut **self)
}
}
}
// Equality for pointers
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized> PartialEq for *const T {
#[inline]
fn eq(&self, other: &*const T) -> bool { *self == *other }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized> Eq for *const T {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized> PartialEq for *mut T {
#[inline]
fn eq(&self, other: &*mut T) -> bool { *self == *other }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized> Eq for *mut T {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized> Clone for *const T {
#[inline]
fn clone(&self) -> *const T {
*self
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized> Clone for *mut T {
#[inline]
fn clone(&self) -> *mut T {
*self
}
}
// Equality for extern "C" fn pointers
mod externfnpointers {
use cmp::PartialEq;
#[stable(feature = "rust1", since = "1.0.0")]
impl<_R> PartialEq for extern "C" fn() -> _R {
#[inline]
fn eq(&self, other: &extern "C" fn() -> _R) -> bool {
let self_ = *self as usize;
let other_ = *other as usize;
self_ == other_
}
}
macro_rules! fnptreq {
($($p:ident),*) => {
#[stable(feature = "rust1", since = "1.0.0")]
impl<_R,$($p),*> PartialEq for extern "C" fn($($p),*) -> _R {
#[inline]
fn eq(&self, other: &extern "C" fn($($p),*) -> _R) -> bool {
let self_ = *self as usize;
let other_ = *other as usize;
self_ == other_
}
}
}
}
fnptreq! { A }
fnptreq! { A,B }
fnptreq! { A,B,C }
fnptreq! { A,B,C,D }
fnptreq! { A,B,C,D,E }
}
// Comparison for pointers
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized> Ord for *const T {
#[inline]
fn cmp(&self, other: &*const T) -> Ordering {
if self < other {
Less
} else if self == other {
Equal
} else {
Greater
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized> PartialOrd for *const T {
#[inline]
fn partial_cmp(&self, other: &*const T) -> Option<Ordering> {
Some(self.cmp(other))
}
#[inline]
fn lt(&self, other: &*const T) -> bool { *self < *other }
#[inline]
fn le(&self, other: &*const T) -> bool { *self <= *other }
#[inline]
fn gt(&self, other: &*const T) -> bool { *self > *other }
#[inline]
fn | (&self, other: &*const T) -> bool { *self >= *other }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized> Ord for *mut T {
#[inline]
fn cmp(&self, other: &*mut T) -> Ordering {
if self < other {
Less
} else if self == other {
Equal
} else {
Greater
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T:?Sized> PartialOrd for *mut T {
#[inline]
fn partial_cmp(&self, other: &*mut T) -> Option<Ordering> {
Some(self.cmp(other))
}
#[inline]
fn lt(&self, other: &*mut T) -> bool { *self < *other }
#[inline]
fn le(&self, other: &*mut T) -> bool { *self <= *other }
#[inline]
fn gt(&self, other: &*mut T) -> bool { *self > *other }
#[inline]
fn ge(&self, other: &*mut T) -> bool { *self >= *other }
}
/// A wrapper around a raw `*mut T` that indicates that the possessor
/// of this wrapper owns the referent. This in turn implies that the
/// `Unique<T>` is `Send`/`Sync` if `T` is `Send`/`Sync`, unlike a raw
/// `*mut T` (which conveys no particular ownership semantics). It
/// also implies that the referent of the pointer should not be
/// modified without a unique path to the `Unique` reference. Useful
/// for building abstractions like `Vec<T>` or `Box<T>`, which
/// internally use raw pointers to manage the memory that they own.
#[unstable(feature = "unique", reason = "needs an RFC to flesh out design",
issue = "27730")]
pub struct Unique<T:?Sized> {
pointer: NonZero<*const T>,
// NOTE: this marker has no consequences for variance, but is necessary
// for dropck to understand that we logically own a `T`.
//
// For details, see:
// https://github.com/rust-lang/rfcs/blob/master/text/0769-sound-generic-drop.md#phantom-data
_marker: PhantomData<T>,
}
/// `Unique` pointers are `Send` if `T` is `Send` because the data they
/// reference is unaliased. Note that this aliasing invariant is
/// unenforced by the type system; the abstraction using the
/// `Unique` must enforce it.
#[unstable(feature = "unique", issue = "27730")]
unsafe impl<T: Send +?Sized> Send for Unique<T> { }
/// `Unique` pointers are `Sync` if `T` is `Sync` because the data they
/// reference is unaliased. Note that this aliasing invariant is
/// unenforced by the type system; the abstraction using the
/// `Unique` must enforce it.
#[unstable(feature = "unique", issue = "27730")]
unsafe impl<T: Sync +?Sized> Sync for Unique<T> { }
#[unstable(feature = "unique", issue = "27730")]
impl<T:?Sized> Unique<T> {
/// Creates a new `Unique`.
pub unsafe fn new(ptr: *mut T) -> Unique<T> {
Unique { pointer: NonZero::new(ptr), _marker: PhantomData }
}
/// Dereferences the content.
pub unsafe fn get(&self) -> &T {
&**self.pointer
}
/// Mutably dereferences the content.
pub unsafe fn get_mut(&mut self) -> &mut T {
&mut ***self
}
}
#[unstable(feature = "unique", issue= "27730")]
impl<T:?Sized> Deref for Unique<T> {
type Target = *mut T;
#[inline]
fn deref<'a>(&'a self) -> &'a *mut T {
unsafe { mem::transmute(&*self.pointer) }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> fmt::Pointer for Unique<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Pointer::fmt(&*self.pointer, f)
}
}
| ge | identifier_name |
lib.rs | #![crate_name = "nickel"]
#![comment = "A expressjs inspired web framework for Rust"]
#![license = "MIT"]
#![crate_type = "rlib"]
#![feature(macro_rules, phase, slicing_syntax)]
//!Nickel is supposed to be a simple and lightweight foundation for web applications written in Rust. Its API is inspired by the popular express framework for JavaScript.
//! | //!* Easy handlers: A handler is just a function that takes a `Request` and `ResponseWriter`
//!* Variables in routes. Just write `my/route/:someid`
//!* Easy parameter access: `request.params.get(&"someid")`
//!* simple wildcard routes: `/some/*/route`
//!* double wildcard routes: `/a/**/route`
//!* middleware
extern crate time;
extern crate http;
extern crate serialize;
extern crate regex;
extern crate anymap;
extern crate url;
extern crate mustache;
extern crate groupable;
#[phase(plugin)]
extern crate regex_macros;
#[phase(plugin, link)]
extern crate log;
pub use nickel::Nickel;
pub use request::Request;
pub use response::Response;
pub use middleware::{Action, Continue, Halt, Middleware, ErrorHandler, MiddlewareResult};
pub use static_files_handler::StaticFilesHandler;
pub use favicon_handler::FaviconHandler;
pub use default_error_handler::DefaultErrorHandler;
pub use json_body_parser::{JsonBodyParser, JsonBody};
pub use query_string::{QueryStringParser, QueryString};
pub use router::{Router, Route, RouteResult, RequestHandler, HttpRouter};
pub use nickel_error::{ NickelError, NickelErrorKind, ErrorWithStatusCode, UserDefinedError, Other };
pub use mimes::get_media_type;
pub mod router;
mod server;
mod nickel;
mod request;
mod response;
mod middleware;
mod favicon_handler;
mod static_files_handler;
mod json_body_parser;
mod mimes;
mod query_string;
mod urlencoded;
mod nickel_error;
mod default_error_handler; | //!Some of the features are:
//! | random_line_split |
env.rs | // Copyright 2017 Jeremy Wall <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Contains code for converting a UCG Val into the environment variable output target.
use std::io::Write as IOWrite;
use std::rc::Rc;
use crate::build::Val;
use crate::convert::traits::{ConvertResult, Converter};
/// EnvConverter implements the conversion logic for converting a Val into a
/// set of environment variables.
pub struct EnvConverter {}
impl EnvConverter {
pub fn new() -> Self {
EnvConverter {}
}
fn convert_tuple(&self, flds: &Vec<(String, Rc<Val>)>, w: &mut dyn IOWrite) -> ConvertResult {
for &(ref name, ref val) in flds.iter() {
if val.is_tuple() {
eprintln!("Skipping embedded tuple...");
return Ok(());
}
if let &Val::Empty = val.as_ref() {
eprintln!("Skipping empty variable: {}", name);
return Ok(());
}
write!(w, "{}=", name)?;
self.write(&val, w)?;
}
Ok(())
}
fn convert_list(&self, _items: &Vec<Rc<Val>>, _w: &mut dyn IOWrite) -> ConvertResult {
eprintln!("Skipping List...");
Ok(())
}
fn write(&self, v: &Val, w: &mut dyn IOWrite) -> ConvertResult {
match v {
&Val::Empty => {
// Empty is a noop.
return Ok(());
}
&Val::Boolean(b) => {
write!(w, "{}\n", if b { "true" } else { "false" })?;
}
&Val::Float(ref f) => {
write!(w, "{}\n", f)?;
}
&Val::Int(ref i) => {
write!(w, "{}\n", i)?;
}
&Val::Str(ref s) => {
write!(w, "'{}'\n", s)?;
}
&Val::List(ref items) => {
self.convert_list(items, w)?;
}
&Val::Tuple(ref flds) => {
self.convert_tuple(flds, w)?;
}
&Val::Env(ref _fs) => {
// This is ignored
eprintln!("Skipping env...");
}
}
Ok(())
}
}
impl Converter for EnvConverter {
fn convert(&self, v: Rc<Val>, mut w: &mut dyn IOWrite) -> ConvertResult {
self.write(&v, &mut w)
}
fn | (&self) -> String {
String::from("env")
}
fn description(&self) -> String {
"Convert ucg Vals into environment variables.".to_string()
}
#[allow(unused_must_use)]
fn help(&self) -> String {
include_str!("env_help.txt").to_string()
}
}
| file_ext | identifier_name |
env.rs | // Copyright 2017 Jeremy Wall <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Contains code for converting a UCG Val into the environment variable output target.
use std::io::Write as IOWrite;
use std::rc::Rc;
use crate::build::Val;
use crate::convert::traits::{ConvertResult, Converter};
/// EnvConverter implements the conversion logic for converting a Val into a
/// set of environment variables.
pub struct EnvConverter {}
impl EnvConverter {
pub fn new() -> Self {
EnvConverter {}
}
fn convert_tuple(&self, flds: &Vec<(String, Rc<Val>)>, w: &mut dyn IOWrite) -> ConvertResult {
for &(ref name, ref val) in flds.iter() {
if val.is_tuple() |
if let &Val::Empty = val.as_ref() {
eprintln!("Skipping empty variable: {}", name);
return Ok(());
}
write!(w, "{}=", name)?;
self.write(&val, w)?;
}
Ok(())
}
fn convert_list(&self, _items: &Vec<Rc<Val>>, _w: &mut dyn IOWrite) -> ConvertResult {
eprintln!("Skipping List...");
Ok(())
}
fn write(&self, v: &Val, w: &mut dyn IOWrite) -> ConvertResult {
match v {
&Val::Empty => {
// Empty is a noop.
return Ok(());
}
&Val::Boolean(b) => {
write!(w, "{}\n", if b { "true" } else { "false" })?;
}
&Val::Float(ref f) => {
write!(w, "{}\n", f)?;
}
&Val::Int(ref i) => {
write!(w, "{}\n", i)?;
}
&Val::Str(ref s) => {
write!(w, "'{}'\n", s)?;
}
&Val::List(ref items) => {
self.convert_list(items, w)?;
}
&Val::Tuple(ref flds) => {
self.convert_tuple(flds, w)?;
}
&Val::Env(ref _fs) => {
// This is ignored
eprintln!("Skipping env...");
}
}
Ok(())
}
}
impl Converter for EnvConverter {
fn convert(&self, v: Rc<Val>, mut w: &mut dyn IOWrite) -> ConvertResult {
self.write(&v, &mut w)
}
fn file_ext(&self) -> String {
String::from("env")
}
fn description(&self) -> String {
"Convert ucg Vals into environment variables.".to_string()
}
#[allow(unused_must_use)]
fn help(&self) -> String {
include_str!("env_help.txt").to_string()
}
}
| {
eprintln!("Skipping embedded tuple...");
return Ok(());
} | conditional_block |
env.rs | // Copyright 2017 Jeremy Wall <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at | // Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Contains code for converting a UCG Val into the environment variable output target.
use std::io::Write as IOWrite;
use std::rc::Rc;
use crate::build::Val;
use crate::convert::traits::{ConvertResult, Converter};
/// EnvConverter implements the conversion logic for converting a Val into a
/// set of environment variables.
pub struct EnvConverter {}
impl EnvConverter {
pub fn new() -> Self {
EnvConverter {}
}
fn convert_tuple(&self, flds: &Vec<(String, Rc<Val>)>, w: &mut dyn IOWrite) -> ConvertResult {
for &(ref name, ref val) in flds.iter() {
if val.is_tuple() {
eprintln!("Skipping embedded tuple...");
return Ok(());
}
if let &Val::Empty = val.as_ref() {
eprintln!("Skipping empty variable: {}", name);
return Ok(());
}
write!(w, "{}=", name)?;
self.write(&val, w)?;
}
Ok(())
}
fn convert_list(&self, _items: &Vec<Rc<Val>>, _w: &mut dyn IOWrite) -> ConvertResult {
eprintln!("Skipping List...");
Ok(())
}
fn write(&self, v: &Val, w: &mut dyn IOWrite) -> ConvertResult {
match v {
&Val::Empty => {
// Empty is a noop.
return Ok(());
}
&Val::Boolean(b) => {
write!(w, "{}\n", if b { "true" } else { "false" })?;
}
&Val::Float(ref f) => {
write!(w, "{}\n", f)?;
}
&Val::Int(ref i) => {
write!(w, "{}\n", i)?;
}
&Val::Str(ref s) => {
write!(w, "'{}'\n", s)?;
}
&Val::List(ref items) => {
self.convert_list(items, w)?;
}
&Val::Tuple(ref flds) => {
self.convert_tuple(flds, w)?;
}
&Val::Env(ref _fs) => {
// This is ignored
eprintln!("Skipping env...");
}
}
Ok(())
}
}
impl Converter for EnvConverter {
fn convert(&self, v: Rc<Val>, mut w: &mut dyn IOWrite) -> ConvertResult {
self.write(&v, &mut w)
}
fn file_ext(&self) -> String {
String::from("env")
}
fn description(&self) -> String {
"Convert ucg Vals into environment variables.".to_string()
}
#[allow(unused_must_use)]
fn help(&self) -> String {
include_str!("env_help.txt").to_string()
}
} | //
// http://www.apache.org/licenses/LICENSE-2.0
// | random_line_split |
env.rs | // Copyright 2017 Jeremy Wall <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Contains code for converting a UCG Val into the environment variable output target.
use std::io::Write as IOWrite;
use std::rc::Rc;
use crate::build::Val;
use crate::convert::traits::{ConvertResult, Converter};
/// EnvConverter implements the conversion logic for converting a Val into a
/// set of environment variables.
pub struct EnvConverter {}
impl EnvConverter {
pub fn new() -> Self {
EnvConverter {}
}
fn convert_tuple(&self, flds: &Vec<(String, Rc<Val>)>, w: &mut dyn IOWrite) -> ConvertResult |
fn convert_list(&self, _items: &Vec<Rc<Val>>, _w: &mut dyn IOWrite) -> ConvertResult {
eprintln!("Skipping List...");
Ok(())
}
fn write(&self, v: &Val, w: &mut dyn IOWrite) -> ConvertResult {
match v {
&Val::Empty => {
// Empty is a noop.
return Ok(());
}
&Val::Boolean(b) => {
write!(w, "{}\n", if b { "true" } else { "false" })?;
}
&Val::Float(ref f) => {
write!(w, "{}\n", f)?;
}
&Val::Int(ref i) => {
write!(w, "{}\n", i)?;
}
&Val::Str(ref s) => {
write!(w, "'{}'\n", s)?;
}
&Val::List(ref items) => {
self.convert_list(items, w)?;
}
&Val::Tuple(ref flds) => {
self.convert_tuple(flds, w)?;
}
&Val::Env(ref _fs) => {
// This is ignored
eprintln!("Skipping env...");
}
}
Ok(())
}
}
impl Converter for EnvConverter {
fn convert(&self, v: Rc<Val>, mut w: &mut dyn IOWrite) -> ConvertResult {
self.write(&v, &mut w)
}
fn file_ext(&self) -> String {
String::from("env")
}
fn description(&self) -> String {
"Convert ucg Vals into environment variables.".to_string()
}
#[allow(unused_must_use)]
fn help(&self) -> String {
include_str!("env_help.txt").to_string()
}
}
| {
for &(ref name, ref val) in flds.iter() {
if val.is_tuple() {
eprintln!("Skipping embedded tuple...");
return Ok(());
}
if let &Val::Empty = val.as_ref() {
eprintln!("Skipping empty variable: {}", name);
return Ok(());
}
write!(w, "{}=", name)?;
self.write(&val, w)?;
}
Ok(())
} | identifier_body |
progpoint.rs | //! Program points.
use entity::EntityRef;
use ir::{Ebb, Inst, ValueDef};
use std::fmt;
use std::u32;
use std::cmp;
/// A `ProgramPoint` represents a position in a function where the live range of an SSA value can
/// begin or end. It can be either:
///
/// 1. An instruction or
/// 2. An EBB header.
///
/// This corresponds more or less to the lines in the textual representation of Cretonne IL.
#[derive(PartialEq, Eq, Clone, Copy)]
pub struct ProgramPoint(u32);
impl From<Inst> for ProgramPoint {
fn from(inst: Inst) -> ProgramPoint {
let idx = inst.index();
debug_assert!(idx < (u32::MAX / 2) as usize);
ProgramPoint((idx * 2) as u32)
}
}
impl From<Ebb> for ProgramPoint {
fn from(ebb: Ebb) -> ProgramPoint {
let idx = ebb.index();
debug_assert!(idx < (u32::MAX / 2) as usize);
ProgramPoint((idx * 2 + 1) as u32)
}
}
impl From<ValueDef> for ProgramPoint {
fn from(def: ValueDef) -> ProgramPoint {
match def {
ValueDef::Result(inst, _) => inst.into(),
ValueDef::Param(ebb, _) => ebb.into(),
}
}
}
/// An expanded program point directly exposes the variants, but takes twice the space to
/// represent.
#[derive(PartialEq, Eq, Clone, Copy)]
pub enum ExpandedProgramPoint {
/// An instruction in the function.
Inst(Inst),
/// An EBB header.
Ebb(Ebb),
}
impl ExpandedProgramPoint {
/// Get the instruction we know is inside.
pub fn unwrap_inst(self) -> Inst {
match self {
ExpandedProgramPoint::Inst(x) => x,
ExpandedProgramPoint::Ebb(x) => panic!("expected inst: {}", x),
}
}
}
impl From<Inst> for ExpandedProgramPoint {
fn from(inst: Inst) -> ExpandedProgramPoint {
ExpandedProgramPoint::Inst(inst)
}
}
impl From<Ebb> for ExpandedProgramPoint {
fn from(ebb: Ebb) -> ExpandedProgramPoint {
ExpandedProgramPoint::Ebb(ebb)
}
}
impl From<ValueDef> for ExpandedProgramPoint {
fn from(def: ValueDef) -> ExpandedProgramPoint {
match def {
ValueDef::Result(inst, _) => inst.into(),
ValueDef::Param(ebb, _) => ebb.into(),
}
}
}
impl From<ProgramPoint> for ExpandedProgramPoint {
fn | (pp: ProgramPoint) -> ExpandedProgramPoint {
if pp.0 & 1 == 0 {
ExpandedProgramPoint::Inst(Inst::new((pp.0 / 2) as usize))
} else {
ExpandedProgramPoint::Ebb(Ebb::new((pp.0 / 2) as usize))
}
}
}
impl fmt::Display for ExpandedProgramPoint {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ExpandedProgramPoint::Inst(x) => write!(f, "{}", x),
ExpandedProgramPoint::Ebb(x) => write!(f, "{}", x),
}
}
}
impl fmt::Display for ProgramPoint {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let epp: ExpandedProgramPoint = (*self).into();
epp.fmt(f)
}
}
impl fmt::Debug for ExpandedProgramPoint {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ExpandedProgramPoint({})", self)
}
}
impl fmt::Debug for ProgramPoint {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ProgramPoint({})", self)
}
}
/// Context for ordering program points.
///
/// `ProgramPoint` objects don't carry enough information to be ordered independently, they need a
/// context providing the program order.
pub trait ProgramOrder {
/// Compare the program points `a` and `b` relative to this program order.
///
/// Return `Less` if `a` appears in the program before `b`.
///
/// This is declared as a generic such that it can be called with `Inst` and `Ebb` arguments
/// directly. Depending on the implementation, there is a good chance performance will be
/// improved for those cases where the type of either argument is known statically.
fn cmp<A, B>(&self, a: A, b: B) -> cmp::Ordering
where
A: Into<ExpandedProgramPoint>,
B: Into<ExpandedProgramPoint>;
/// Is the range from `inst` to `ebb` just the gap between consecutive EBBs?
///
/// This returns true if `inst` is the terminator in the EBB immediately before `ebb`.
fn is_ebb_gap(&self, inst: Inst, ebb: Ebb) -> bool;
}
#[cfg(test)]
mod tests {
use super::*;
use entity::EntityRef;
use ir::{Inst, Ebb};
use std::string::ToString;
#[test]
fn convert() {
let i5 = Inst::new(5);
let b3 = Ebb::new(3);
let pp1: ProgramPoint = i5.into();
let pp2: ProgramPoint = b3.into();
assert_eq!(pp1.to_string(), "inst5");
assert_eq!(pp2.to_string(), "ebb3");
}
}
| from | identifier_name |
progpoint.rs | //! Program points.
use entity::EntityRef;
use ir::{Ebb, Inst, ValueDef};
use std::fmt;
use std::u32;
use std::cmp;
/// A `ProgramPoint` represents a position in a function where the live range of an SSA value can
/// begin or end. It can be either:
///
/// 1. An instruction or
/// 2. An EBB header.
///
/// This corresponds more or less to the lines in the textual representation of Cretonne IL.
#[derive(PartialEq, Eq, Clone, Copy)]
pub struct ProgramPoint(u32);
impl From<Inst> for ProgramPoint {
fn from(inst: Inst) -> ProgramPoint {
let idx = inst.index();
debug_assert!(idx < (u32::MAX / 2) as usize);
ProgramPoint((idx * 2) as u32)
}
}
impl From<Ebb> for ProgramPoint {
fn from(ebb: Ebb) -> ProgramPoint {
let idx = ebb.index();
debug_assert!(idx < (u32::MAX / 2) as usize);
ProgramPoint((idx * 2 + 1) as u32)
}
}
impl From<ValueDef> for ProgramPoint {
fn from(def: ValueDef) -> ProgramPoint {
match def {
ValueDef::Result(inst, _) => inst.into(),
ValueDef::Param(ebb, _) => ebb.into(),
}
}
}
/// An expanded program point directly exposes the variants, but takes twice the space to
/// represent.
#[derive(PartialEq, Eq, Clone, Copy)]
pub enum ExpandedProgramPoint {
/// An instruction in the function.
Inst(Inst),
/// An EBB header.
Ebb(Ebb),
}
impl ExpandedProgramPoint {
/// Get the instruction we know is inside.
pub fn unwrap_inst(self) -> Inst {
match self {
ExpandedProgramPoint::Inst(x) => x,
ExpandedProgramPoint::Ebb(x) => panic!("expected inst: {}", x),
}
}
}
impl From<Inst> for ExpandedProgramPoint {
fn from(inst: Inst) -> ExpandedProgramPoint {
ExpandedProgramPoint::Inst(inst)
}
}
impl From<Ebb> for ExpandedProgramPoint {
fn from(ebb: Ebb) -> ExpandedProgramPoint {
ExpandedProgramPoint::Ebb(ebb)
}
}
impl From<ValueDef> for ExpandedProgramPoint {
fn from(def: ValueDef) -> ExpandedProgramPoint {
match def {
ValueDef::Result(inst, _) => inst.into(),
ValueDef::Param(ebb, _) => ebb.into(),
}
}
}
impl From<ProgramPoint> for ExpandedProgramPoint {
fn from(pp: ProgramPoint) -> ExpandedProgramPoint {
if pp.0 & 1 == 0 {
ExpandedProgramPoint::Inst(Inst::new((pp.0 / 2) as usize))
} else {
ExpandedProgramPoint::Ebb(Ebb::new((pp.0 / 2) as usize))
}
}
}
impl fmt::Display for ExpandedProgramPoint {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ExpandedProgramPoint::Inst(x) => write!(f, "{}", x),
ExpandedProgramPoint::Ebb(x) => write!(f, "{}", x),
}
}
}
impl fmt::Display for ProgramPoint {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let epp: ExpandedProgramPoint = (*self).into();
epp.fmt(f)
}
}
impl fmt::Debug for ExpandedProgramPoint {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ExpandedProgramPoint({})", self)
}
}
impl fmt::Debug for ProgramPoint {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ProgramPoint({})", self)
}
}
/// Context for ordering program points.
///
/// `ProgramPoint` objects don't carry enough information to be ordered independently, they need a
/// context providing the program order.
pub trait ProgramOrder {
/// Compare the program points `a` and `b` relative to this program order. | /// This is declared as a generic such that it can be called with `Inst` and `Ebb` arguments
/// directly. Depending on the implementation, there is a good chance performance will be
/// improved for those cases where the type of either argument is known statically.
fn cmp<A, B>(&self, a: A, b: B) -> cmp::Ordering
where
A: Into<ExpandedProgramPoint>,
B: Into<ExpandedProgramPoint>;
/// Is the range from `inst` to `ebb` just the gap between consecutive EBBs?
///
/// This returns true if `inst` is the terminator in the EBB immediately before `ebb`.
fn is_ebb_gap(&self, inst: Inst, ebb: Ebb) -> bool;
}
#[cfg(test)]
mod tests {
use super::*;
use entity::EntityRef;
use ir::{Inst, Ebb};
use std::string::ToString;
#[test]
fn convert() {
let i5 = Inst::new(5);
let b3 = Ebb::new(3);
let pp1: ProgramPoint = i5.into();
let pp2: ProgramPoint = b3.into();
assert_eq!(pp1.to_string(), "inst5");
assert_eq!(pp2.to_string(), "ebb3");
}
} | ///
/// Return `Less` if `a` appears in the program before `b`.
/// | random_line_split |
progpoint.rs | //! Program points.
use entity::EntityRef;
use ir::{Ebb, Inst, ValueDef};
use std::fmt;
use std::u32;
use std::cmp;
/// A `ProgramPoint` represents a position in a function where the live range of an SSA value can
/// begin or end. It can be either:
///
/// 1. An instruction or
/// 2. An EBB header.
///
/// This corresponds more or less to the lines in the textual representation of Cretonne IL.
#[derive(PartialEq, Eq, Clone, Copy)]
pub struct ProgramPoint(u32);
impl From<Inst> for ProgramPoint {
fn from(inst: Inst) -> ProgramPoint {
let idx = inst.index();
debug_assert!(idx < (u32::MAX / 2) as usize);
ProgramPoint((idx * 2) as u32)
}
}
impl From<Ebb> for ProgramPoint {
fn from(ebb: Ebb) -> ProgramPoint {
let idx = ebb.index();
debug_assert!(idx < (u32::MAX / 2) as usize);
ProgramPoint((idx * 2 + 1) as u32)
}
}
impl From<ValueDef> for ProgramPoint {
fn from(def: ValueDef) -> ProgramPoint {
match def {
ValueDef::Result(inst, _) => inst.into(),
ValueDef::Param(ebb, _) => ebb.into(),
}
}
}
/// An expanded program point directly exposes the variants, but takes twice the space to
/// represent.
#[derive(PartialEq, Eq, Clone, Copy)]
pub enum ExpandedProgramPoint {
/// An instruction in the function.
Inst(Inst),
/// An EBB header.
Ebb(Ebb),
}
impl ExpandedProgramPoint {
/// Get the instruction we know is inside.
pub fn unwrap_inst(self) -> Inst {
match self {
ExpandedProgramPoint::Inst(x) => x,
ExpandedProgramPoint::Ebb(x) => panic!("expected inst: {}", x),
}
}
}
impl From<Inst> for ExpandedProgramPoint {
fn from(inst: Inst) -> ExpandedProgramPoint {
ExpandedProgramPoint::Inst(inst)
}
}
impl From<Ebb> for ExpandedProgramPoint {
fn from(ebb: Ebb) -> ExpandedProgramPoint {
ExpandedProgramPoint::Ebb(ebb)
}
}
impl From<ValueDef> for ExpandedProgramPoint {
fn from(def: ValueDef) -> ExpandedProgramPoint {
match def {
ValueDef::Result(inst, _) => inst.into(),
ValueDef::Param(ebb, _) => ebb.into(),
}
}
}
impl From<ProgramPoint> for ExpandedProgramPoint {
fn from(pp: ProgramPoint) -> ExpandedProgramPoint {
if pp.0 & 1 == 0 {
ExpandedProgramPoint::Inst(Inst::new((pp.0 / 2) as usize))
} else {
ExpandedProgramPoint::Ebb(Ebb::new((pp.0 / 2) as usize))
}
}
}
impl fmt::Display for ExpandedProgramPoint {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ExpandedProgramPoint::Inst(x) => write!(f, "{}", x),
ExpandedProgramPoint::Ebb(x) => write!(f, "{}", x),
}
}
}
impl fmt::Display for ProgramPoint {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let epp: ExpandedProgramPoint = (*self).into();
epp.fmt(f)
}
}
impl fmt::Debug for ExpandedProgramPoint {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result |
}
impl fmt::Debug for ProgramPoint {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ProgramPoint({})", self)
}
}
/// Context for ordering program points.
///
/// `ProgramPoint` objects don't carry enough information to be ordered independently, they need a
/// context providing the program order.
pub trait ProgramOrder {
/// Compare the program points `a` and `b` relative to this program order.
///
/// Return `Less` if `a` appears in the program before `b`.
///
/// This is declared as a generic such that it can be called with `Inst` and `Ebb` arguments
/// directly. Depending on the implementation, there is a good chance performance will be
/// improved for those cases where the type of either argument is known statically.
fn cmp<A, B>(&self, a: A, b: B) -> cmp::Ordering
where
A: Into<ExpandedProgramPoint>,
B: Into<ExpandedProgramPoint>;
/// Is the range from `inst` to `ebb` just the gap between consecutive EBBs?
///
/// This returns true if `inst` is the terminator in the EBB immediately before `ebb`.
fn is_ebb_gap(&self, inst: Inst, ebb: Ebb) -> bool;
}
#[cfg(test)]
mod tests {
use super::*;
use entity::EntityRef;
use ir::{Inst, Ebb};
use std::string::ToString;
#[test]
fn convert() {
let i5 = Inst::new(5);
let b3 = Ebb::new(3);
let pp1: ProgramPoint = i5.into();
let pp2: ProgramPoint = b3.into();
assert_eq!(pp1.to_string(), "inst5");
assert_eq!(pp2.to_string(), "ebb3");
}
}
| {
write!(f, "ExpandedProgramPoint({})", self)
} | identifier_body |
panic.rs | use pi::uart::MiniUart;
use core::fmt::{Write, Arguments};
#[lang = "eh_personality"]
pub extern "C" fn eh_personality() {}
#[lang = "panic_fmt"]
#[no_mangle]
pub extern fn panic_fmt(msg: Arguments, file: &'static str, line: u32, col: u32) ->! {
let mut uart = MiniUart::new();
let _ = uart.write_str("\n\n---------- KERNEL PANIC ----------\n");
let _ = uart.write_fmt(format_args!("FILE: {}\nLINE: {}\n COL: {}\n", file, line, col));
let _ = uart.write_fmt(format_args!("{}", msg));
loop { unsafe { asm!("wfe") } }
}
#[no_mangle]
#[lang = "oom"]
pub unsafe extern "C" fn rust_oom() ->! {
panic!("OOM")
}
#[no_mangle]
pub unsafe extern fn memcpy(dest: *mut u8, src: *const u8, n: usize) -> *mut u8 {
let mut i = 0;
while i < n {
*dest.offset(i as isize) = *src.offset(i as isize);
i += 1;
}
return dest;
}
#[no_mangle]
pub unsafe extern fn | (dest: *mut u8, src: *const u8, n: usize) -> *mut u8 {
if src < dest as *const u8 { // copy from end
let mut i = n;
while i!= 0 {
i -= 1;
*dest.offset(i as isize) = *src.offset(i as isize);
}
} else { // copy from beginning
let mut i = 0;
while i < n {
*dest.offset(i as isize) = *src.offset(i as isize);
i += 1;
}
}
return dest;
}
#[no_mangle]
pub unsafe extern fn memset(s: *mut u8, c: i32, n: usize) -> *mut u8 {
let mut i = 0;
while i < n {
*s.offset(i as isize) = c as u8;
i += 1;
}
return s;
}
#[no_mangle]
pub unsafe extern fn memcmp(s1: *const u8, s2: *const u8, n: usize) -> i32 {
let mut i = 0;
while i < n {
let a = *s1.offset(i as isize);
let b = *s2.offset(i as isize);
if a!= b {
return a as i32 - b as i32
}
i += 1;
}
return 0;
}
| memmove | identifier_name |
panic.rs | use pi::uart::MiniUart;
use core::fmt::{Write, Arguments};
#[lang = "eh_personality"]
pub extern "C" fn eh_personality() {}
#[lang = "panic_fmt"]
#[no_mangle]
pub extern fn panic_fmt(msg: Arguments, file: &'static str, line: u32, col: u32) ->! |
#[no_mangle]
#[lang = "oom"]
pub unsafe extern "C" fn rust_oom() ->! {
panic!("OOM")
}
#[no_mangle]
pub unsafe extern fn memcpy(dest: *mut u8, src: *const u8, n: usize) -> *mut u8 {
let mut i = 0;
while i < n {
*dest.offset(i as isize) = *src.offset(i as isize);
i += 1;
}
return dest;
}
#[no_mangle]
pub unsafe extern fn memmove(dest: *mut u8, src: *const u8, n: usize) -> *mut u8 {
if src < dest as *const u8 { // copy from end
let mut i = n;
while i!= 0 {
i -= 1;
*dest.offset(i as isize) = *src.offset(i as isize);
}
} else { // copy from beginning
let mut i = 0;
while i < n {
*dest.offset(i as isize) = *src.offset(i as isize);
i += 1;
}
}
return dest;
}
#[no_mangle]
pub unsafe extern fn memset(s: *mut u8, c: i32, n: usize) -> *mut u8 {
let mut i = 0;
while i < n {
*s.offset(i as isize) = c as u8;
i += 1;
}
return s;
}
#[no_mangle]
pub unsafe extern fn memcmp(s1: *const u8, s2: *const u8, n: usize) -> i32 {
let mut i = 0;
while i < n {
let a = *s1.offset(i as isize);
let b = *s2.offset(i as isize);
if a!= b {
return a as i32 - b as i32
}
i += 1;
}
return 0;
}
| {
let mut uart = MiniUart::new();
let _ = uart.write_str("\n\n---------- KERNEL PANIC ----------\n");
let _ = uart.write_fmt(format_args!("FILE: {}\nLINE: {}\n COL: {}\n", file, line, col));
let _ = uart.write_fmt(format_args!("{}", msg));
loop { unsafe { asm!("wfe") } }
} | identifier_body |
panic.rs | use pi::uart::MiniUart;
use core::fmt::{Write, Arguments};
#[lang = "eh_personality"]
pub extern "C" fn eh_personality() {}
#[lang = "panic_fmt"]
#[no_mangle]
pub extern fn panic_fmt(msg: Arguments, file: &'static str, line: u32, col: u32) ->! {
let mut uart = MiniUart::new();
let _ = uart.write_str("\n\n---------- KERNEL PANIC ----------\n");
let _ = uart.write_fmt(format_args!("FILE: {}\nLINE: {}\n COL: {}\n", file, line, col));
let _ = uart.write_fmt(format_args!("{}", msg));
loop { unsafe { asm!("wfe") } }
}
#[no_mangle]
#[lang = "oom"]
pub unsafe extern "C" fn rust_oom() ->! {
panic!("OOM")
}
#[no_mangle]
pub unsafe extern fn memcpy(dest: *mut u8, src: *const u8, n: usize) -> *mut u8 {
let mut i = 0;
while i < n {
*dest.offset(i as isize) = *src.offset(i as isize);
i += 1;
}
return dest;
}
#[no_mangle]
pub unsafe extern fn memmove(dest: *mut u8, src: *const u8, n: usize) -> *mut u8 {
if src < dest as *const u8 { // copy from end
let mut i = n;
while i!= 0 {
i -= 1; | *dest.offset(i as isize) = *src.offset(i as isize);
}
} else { // copy from beginning
let mut i = 0;
while i < n {
*dest.offset(i as isize) = *src.offset(i as isize);
i += 1;
}
}
return dest;
}
#[no_mangle]
pub unsafe extern fn memset(s: *mut u8, c: i32, n: usize) -> *mut u8 {
let mut i = 0;
while i < n {
*s.offset(i as isize) = c as u8;
i += 1;
}
return s;
}
#[no_mangle]
pub unsafe extern fn memcmp(s1: *const u8, s2: *const u8, n: usize) -> i32 {
let mut i = 0;
while i < n {
let a = *s1.offset(i as isize);
let b = *s2.offset(i as isize);
if a!= b {
return a as i32 - b as i32
}
i += 1;
}
return 0;
} | random_line_split |
|
panic.rs | use pi::uart::MiniUart;
use core::fmt::{Write, Arguments};
#[lang = "eh_personality"]
pub extern "C" fn eh_personality() {}
#[lang = "panic_fmt"]
#[no_mangle]
pub extern fn panic_fmt(msg: Arguments, file: &'static str, line: u32, col: u32) ->! {
let mut uart = MiniUart::new();
let _ = uart.write_str("\n\n---------- KERNEL PANIC ----------\n");
let _ = uart.write_fmt(format_args!("FILE: {}\nLINE: {}\n COL: {}\n", file, line, col));
let _ = uart.write_fmt(format_args!("{}", msg));
loop { unsafe { asm!("wfe") } }
}
#[no_mangle]
#[lang = "oom"]
pub unsafe extern "C" fn rust_oom() ->! {
panic!("OOM")
}
#[no_mangle]
pub unsafe extern fn memcpy(dest: *mut u8, src: *const u8, n: usize) -> *mut u8 {
let mut i = 0;
while i < n {
*dest.offset(i as isize) = *src.offset(i as isize);
i += 1;
}
return dest;
}
#[no_mangle]
pub unsafe extern fn memmove(dest: *mut u8, src: *const u8, n: usize) -> *mut u8 {
if src < dest as *const u8 | else { // copy from beginning
let mut i = 0;
while i < n {
*dest.offset(i as isize) = *src.offset(i as isize);
i += 1;
}
}
return dest;
}
#[no_mangle]
pub unsafe extern fn memset(s: *mut u8, c: i32, n: usize) -> *mut u8 {
let mut i = 0;
while i < n {
*s.offset(i as isize) = c as u8;
i += 1;
}
return s;
}
#[no_mangle]
pub unsafe extern fn memcmp(s1: *const u8, s2: *const u8, n: usize) -> i32 {
let mut i = 0;
while i < n {
let a = *s1.offset(i as isize);
let b = *s2.offset(i as isize);
if a!= b {
return a as i32 - b as i32
}
i += 1;
}
return 0;
}
| { // copy from end
let mut i = n;
while i != 0 {
i -= 1;
*dest.offset(i as isize) = *src.offset(i as isize);
}
} | conditional_block |
fedora.rs | // Copyright 2015-2017 Intecture Developers.
//
// Licensed under the Mozilla Public License 2.0 <LICENSE or
// https://www.tldrlegal.com/l/mpl-2.0>. This file may not be copied,
// modified, or distributed except according to those terms.
use errors::*;
use futures::{future, Future};
use pnet::datalink::interfaces;
use std::env;
use super::TelemetryProvider;
use target::{default, linux, redhat}; |
impl TelemetryProvider for Fedora {
fn available() -> bool {
cfg!(target_os="linux") && linux::fingerprint_os() == Some(LinuxFlavour::Fedora)
}
fn load(&self) -> Box<Future<Item = Telemetry, Error = Error>> {
Box::new(future::lazy(|| {
let t = match do_load() {
Ok(t) => t,
Err(e) => return future::err(e),
};
future::ok(t.into())
}))
}
}
fn do_load() -> Result<Telemetry> {
let (version_str, version_maj, version_min, version_patch) = redhat::version()?;
Ok(Telemetry {
cpu: Cpu {
vendor: linux::cpu_vendor()?,
brand_string: linux::cpu_brand_string()?,
cores: linux::cpu_cores()?,
},
fs: default::fs().chain_err(|| "could not resolve telemetry data")?,
hostname: default::hostname()?,
memory: linux::memory().chain_err(|| "could not resolve telemetry data")?,
net: interfaces(),
os: Os {
arch: env::consts::ARCH.into(),
family: OsFamily::Linux(LinuxDistro::RHEL),
platform: OsPlatform::Fedora,
version_str: version_str,
version_maj: version_maj,
version_min: version_min,
version_patch: version_patch,
},
user: default::user()?,
})
} | use target::linux::LinuxFlavour;
use telemetry::{Cpu, LinuxDistro, Os, OsFamily, OsPlatform, Telemetry};
pub struct Fedora; | random_line_split |
fedora.rs | // Copyright 2015-2017 Intecture Developers.
//
// Licensed under the Mozilla Public License 2.0 <LICENSE or
// https://www.tldrlegal.com/l/mpl-2.0>. This file may not be copied,
// modified, or distributed except according to those terms.
use errors::*;
use futures::{future, Future};
use pnet::datalink::interfaces;
use std::env;
use super::TelemetryProvider;
use target::{default, linux, redhat};
use target::linux::LinuxFlavour;
use telemetry::{Cpu, LinuxDistro, Os, OsFamily, OsPlatform, Telemetry};
pub struct Fedora;
impl TelemetryProvider for Fedora {
fn available() -> bool {
cfg!(target_os="linux") && linux::fingerprint_os() == Some(LinuxFlavour::Fedora)
}
fn load(&self) -> Box<Future<Item = Telemetry, Error = Error>> {
Box::new(future::lazy(|| {
let t = match do_load() {
Ok(t) => t,
Err(e) => return future::err(e),
};
future::ok(t.into())
}))
}
}
fn | () -> Result<Telemetry> {
let (version_str, version_maj, version_min, version_patch) = redhat::version()?;
Ok(Telemetry {
cpu: Cpu {
vendor: linux::cpu_vendor()?,
brand_string: linux::cpu_brand_string()?,
cores: linux::cpu_cores()?,
},
fs: default::fs().chain_err(|| "could not resolve telemetry data")?,
hostname: default::hostname()?,
memory: linux::memory().chain_err(|| "could not resolve telemetry data")?,
net: interfaces(),
os: Os {
arch: env::consts::ARCH.into(),
family: OsFamily::Linux(LinuxDistro::RHEL),
platform: OsPlatform::Fedora,
version_str: version_str,
version_maj: version_maj,
version_min: version_min,
version_patch: version_patch,
},
user: default::user()?,
})
}
| do_load | identifier_name |
fedora.rs | // Copyright 2015-2017 Intecture Developers.
//
// Licensed under the Mozilla Public License 2.0 <LICENSE or
// https://www.tldrlegal.com/l/mpl-2.0>. This file may not be copied,
// modified, or distributed except according to those terms.
use errors::*;
use futures::{future, Future};
use pnet::datalink::interfaces;
use std::env;
use super::TelemetryProvider;
use target::{default, linux, redhat};
use target::linux::LinuxFlavour;
use telemetry::{Cpu, LinuxDistro, Os, OsFamily, OsPlatform, Telemetry};
pub struct Fedora;
impl TelemetryProvider for Fedora {
fn available() -> bool {
cfg!(target_os="linux") && linux::fingerprint_os() == Some(LinuxFlavour::Fedora)
}
fn load(&self) -> Box<Future<Item = Telemetry, Error = Error>> |
}
fn do_load() -> Result<Telemetry> {
let (version_str, version_maj, version_min, version_patch) = redhat::version()?;
Ok(Telemetry {
cpu: Cpu {
vendor: linux::cpu_vendor()?,
brand_string: linux::cpu_brand_string()?,
cores: linux::cpu_cores()?,
},
fs: default::fs().chain_err(|| "could not resolve telemetry data")?,
hostname: default::hostname()?,
memory: linux::memory().chain_err(|| "could not resolve telemetry data")?,
net: interfaces(),
os: Os {
arch: env::consts::ARCH.into(),
family: OsFamily::Linux(LinuxDistro::RHEL),
platform: OsPlatform::Fedora,
version_str: version_str,
version_maj: version_maj,
version_min: version_min,
version_patch: version_patch,
},
user: default::user()?,
})
}
| {
Box::new(future::lazy(|| {
let t = match do_load() {
Ok(t) => t,
Err(e) => return future::err(e),
};
future::ok(t.into())
}))
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.