file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
test_region_info_accessor.rs
|
// Copyright 2018 TiKV Project Authors. Licensed under Apache-2.0.
use keys::data_end_key;
use kvproto::metapb::Region;
use raft::StateRole;
use raftstore::coprocessor::{RegionInfo, RegionInfoAccessor};
use raftstore::store::util::{find_peer, new_peer};
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread;
use std::time::Duration;
use test_raftstore::{configure_for_merge, new_node_cluster, Cluster, NodeCluster};
use tikv_util::worker::Worker;
use tikv_util::HandyRwLock;
fn dump(c: &RegionInfoAccessor) -> Vec<(Region, StateRole)> {
let (regions, region_ranges) = c.debug_dump();
assert_eq!(regions.len(), region_ranges.len());
let mut res = Vec::new();
for (end_key, id) in region_ranges {
let RegionInfo { ref region, role } = regions[&id];
assert_eq!(end_key, data_end_key(region.get_end_key()));
assert_eq!(id, region.get_id());
res.push((region.clone(), role));
}
res
}
fn check_region_ranges(regions: &[(Region, StateRole)], ranges: &[(&[u8], &[u8])]) {
assert_eq!(regions.len(), ranges.len());
regions
.iter()
.zip(ranges.iter())
.for_each(|((r, _), (start_key, end_key))| {
assert_eq!(r.get_start_key(), *start_key);
assert_eq!(r.get_end_key(), *end_key);
})
}
fn test_region_info_accessor_impl(cluster: &mut Cluster<NodeCluster>, c: &RegionInfoAccessor)
|
#[test]
fn test_node_cluster_region_info_accessor() {
let mut cluster = new_node_cluster(1, 3);
configure_for_merge(&mut cluster);
let pd_client = Arc::clone(&cluster.pd_client);
pd_client.disable_default_operator();
// Create a RegionInfoAccessor on node 1
let (tx, rx) = channel();
let worker = Worker::new("test");
cluster
.sim
.wl()
.post_create_coprocessor_host(Box::new(move |id, host| {
if id == 1 {
let c = RegionInfoAccessor::new(host, &worker);
tx.send(c).unwrap();
}
}));
cluster.run_conf_change();
let c = rx.recv().unwrap();
// We only created it on the node whose id == 1 so we shouldn't receive more than one item.
assert!(rx.try_recv().is_err());
test_region_info_accessor_impl(&mut cluster, &c);
drop(cluster);
c.stop();
}
|
{
for i in 0..9 {
let k = format!("k{}", i).into_bytes();
let v = format!("v{}", i).into_bytes();
cluster.must_put(&k, &v);
}
let pd_client = Arc::clone(&cluster.pd_client);
let init_regions = dump(c);
check_region_ranges(&init_regions, &[(&b""[..], &b""[..])]);
assert_eq!(init_regions[0].0, cluster.get_region(b"k1"));
// Split
{
let r1 = cluster.get_region(b"k1");
cluster.must_split(&r1, b"k1");
let r2 = cluster.get_region(b"k4");
cluster.must_split(&r2, b"k4");
let r3 = cluster.get_region(b"k2");
cluster.must_split(&r3, b"k2");
let r4 = cluster.get_region(b"k3");
cluster.must_split(&r4, b"k3");
}
let split_regions = dump(c);
check_region_ranges(
&split_regions,
&[
(&b""[..], &b"k1"[..]),
(b"k1", b"k2"),
(b"k2", b"k3"),
(b"k3", b"k4"),
(b"k4", b""),
],
);
for (ref region, _) in &split_regions {
if region.get_id() == init_regions[0].0.get_id() {
assert_ne!(
region.get_region_epoch(),
init_regions[0].0.get_region_epoch()
);
}
}
// Merge from left to right
pd_client.must_merge(split_regions[1].0.get_id(), split_regions[2].0.get_id());
let merge_regions = dump(&c);
check_region_ranges(
&merge_regions,
&[
(&b""[..], &b"k1"[..]),
(b"k1", b"k3"),
(b"k3", b"k4"),
(b"k4", b""),
],
);
// Merge from right to left
pd_client.must_merge(merge_regions[2].0.get_id(), merge_regions[1].0.get_id());
let mut merge_regions_2 = dump(&c);
check_region_ranges(
&merge_regions_2,
&[(&b""[..], &b"k1"[..]), (b"k1", b"k4"), (b"k4", b"")],
);
// Add peer
let (region1, role1) = merge_regions_2.remove(1);
assert_eq!(role1, StateRole::Leader);
assert_eq!(region1.get_peers().len(), 1);
assert_eq!(region1.get_peers()[0].get_store_id(), 1);
pd_client.must_add_peer(region1.get_id(), new_peer(2, 100));
let (region2, role2) = dump(c).remove(1);
assert_eq!(role2, StateRole::Leader);
assert_eq!(region2.get_peers().len(), 2);
assert!(find_peer(®ion2, 1).is_some());
assert!(find_peer(®ion2, 2).is_some());
// Change leader
pd_client.transfer_leader(region2.get_id(), find_peer(®ion2, 2).unwrap().clone());
let mut region3 = Region::default();
let mut role3 = StateRole::default();
// Wait for transfer leader finish
for _ in 0..100 {
let r = dump(c).remove(1);
region3 = r.0;
role3 = r.1;
if role3 == StateRole::Follower {
break;
}
thread::sleep(Duration::from_millis(20));
}
assert_eq!(role3, StateRole::Follower);
// Remove peer
check_region_ranges(
&dump(c),
&[(&b""[..], &b"k1"[..]), (b"k1", b"k4"), (b"k4", b"")],
);
pd_client.must_remove_peer(region3.get_id(), find_peer(®ion3, 1).unwrap().clone());
let mut regions_after_removing = Vec::new();
// It seems region_info_accessor is a little delayed than raftstore...
for _ in 0..100 {
regions_after_removing = dump(c);
if regions_after_removing.len() == 2 {
break;
}
thread::sleep(Duration::from_millis(20));
}
check_region_ranges(
®ions_after_removing,
&[(&b""[..], &b"k1"[..]), (b"k4", b"")],
);
}
|
identifier_body
|
test_region_info_accessor.rs
|
// Copyright 2018 TiKV Project Authors. Licensed under Apache-2.0.
use keys::data_end_key;
use kvproto::metapb::Region;
use raft::StateRole;
use raftstore::coprocessor::{RegionInfo, RegionInfoAccessor};
use raftstore::store::util::{find_peer, new_peer};
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread;
use std::time::Duration;
use test_raftstore::{configure_for_merge, new_node_cluster, Cluster, NodeCluster};
use tikv_util::worker::Worker;
use tikv_util::HandyRwLock;
fn dump(c: &RegionInfoAccessor) -> Vec<(Region, StateRole)> {
let (regions, region_ranges) = c.debug_dump();
assert_eq!(regions.len(), region_ranges.len());
let mut res = Vec::new();
for (end_key, id) in region_ranges {
let RegionInfo { ref region, role } = regions[&id];
assert_eq!(end_key, data_end_key(region.get_end_key()));
assert_eq!(id, region.get_id());
res.push((region.clone(), role));
}
res
}
fn
|
(regions: &[(Region, StateRole)], ranges: &[(&[u8], &[u8])]) {
assert_eq!(regions.len(), ranges.len());
regions
.iter()
.zip(ranges.iter())
.for_each(|((r, _), (start_key, end_key))| {
assert_eq!(r.get_start_key(), *start_key);
assert_eq!(r.get_end_key(), *end_key);
})
}
fn test_region_info_accessor_impl(cluster: &mut Cluster<NodeCluster>, c: &RegionInfoAccessor) {
for i in 0..9 {
let k = format!("k{}", i).into_bytes();
let v = format!("v{}", i).into_bytes();
cluster.must_put(&k, &v);
}
let pd_client = Arc::clone(&cluster.pd_client);
let init_regions = dump(c);
check_region_ranges(&init_regions, &[(&b""[..], &b""[..])]);
assert_eq!(init_regions[0].0, cluster.get_region(b"k1"));
// Split
{
let r1 = cluster.get_region(b"k1");
cluster.must_split(&r1, b"k1");
let r2 = cluster.get_region(b"k4");
cluster.must_split(&r2, b"k4");
let r3 = cluster.get_region(b"k2");
cluster.must_split(&r3, b"k2");
let r4 = cluster.get_region(b"k3");
cluster.must_split(&r4, b"k3");
}
let split_regions = dump(c);
check_region_ranges(
&split_regions,
&[
(&b""[..], &b"k1"[..]),
(b"k1", b"k2"),
(b"k2", b"k3"),
(b"k3", b"k4"),
(b"k4", b""),
],
);
for (ref region, _) in &split_regions {
if region.get_id() == init_regions[0].0.get_id() {
assert_ne!(
region.get_region_epoch(),
init_regions[0].0.get_region_epoch()
);
}
}
// Merge from left to right
pd_client.must_merge(split_regions[1].0.get_id(), split_regions[2].0.get_id());
let merge_regions = dump(&c);
check_region_ranges(
&merge_regions,
&[
(&b""[..], &b"k1"[..]),
(b"k1", b"k3"),
(b"k3", b"k4"),
(b"k4", b""),
],
);
// Merge from right to left
pd_client.must_merge(merge_regions[2].0.get_id(), merge_regions[1].0.get_id());
let mut merge_regions_2 = dump(&c);
check_region_ranges(
&merge_regions_2,
&[(&b""[..], &b"k1"[..]), (b"k1", b"k4"), (b"k4", b"")],
);
// Add peer
let (region1, role1) = merge_regions_2.remove(1);
assert_eq!(role1, StateRole::Leader);
assert_eq!(region1.get_peers().len(), 1);
assert_eq!(region1.get_peers()[0].get_store_id(), 1);
pd_client.must_add_peer(region1.get_id(), new_peer(2, 100));
let (region2, role2) = dump(c).remove(1);
assert_eq!(role2, StateRole::Leader);
assert_eq!(region2.get_peers().len(), 2);
assert!(find_peer(®ion2, 1).is_some());
assert!(find_peer(®ion2, 2).is_some());
// Change leader
pd_client.transfer_leader(region2.get_id(), find_peer(®ion2, 2).unwrap().clone());
let mut region3 = Region::default();
let mut role3 = StateRole::default();
// Wait for transfer leader finish
for _ in 0..100 {
let r = dump(c).remove(1);
region3 = r.0;
role3 = r.1;
if role3 == StateRole::Follower {
break;
}
thread::sleep(Duration::from_millis(20));
}
assert_eq!(role3, StateRole::Follower);
// Remove peer
check_region_ranges(
&dump(c),
&[(&b""[..], &b"k1"[..]), (b"k1", b"k4"), (b"k4", b"")],
);
pd_client.must_remove_peer(region3.get_id(), find_peer(®ion3, 1).unwrap().clone());
let mut regions_after_removing = Vec::new();
// It seems region_info_accessor is a little delayed than raftstore...
for _ in 0..100 {
regions_after_removing = dump(c);
if regions_after_removing.len() == 2 {
break;
}
thread::sleep(Duration::from_millis(20));
}
check_region_ranges(
®ions_after_removing,
&[(&b""[..], &b"k1"[..]), (b"k4", b"")],
);
}
#[test]
fn test_node_cluster_region_info_accessor() {
let mut cluster = new_node_cluster(1, 3);
configure_for_merge(&mut cluster);
let pd_client = Arc::clone(&cluster.pd_client);
pd_client.disable_default_operator();
// Create a RegionInfoAccessor on node 1
let (tx, rx) = channel();
let worker = Worker::new("test");
cluster
.sim
.wl()
.post_create_coprocessor_host(Box::new(move |id, host| {
if id == 1 {
let c = RegionInfoAccessor::new(host, &worker);
tx.send(c).unwrap();
}
}));
cluster.run_conf_change();
let c = rx.recv().unwrap();
// We only created it on the node whose id == 1 so we shouldn't receive more than one item.
assert!(rx.try_recv().is_err());
test_region_info_accessor_impl(&mut cluster, &c);
drop(cluster);
c.stop();
}
|
check_region_ranges
|
identifier_name
|
test_region_info_accessor.rs
|
// Copyright 2018 TiKV Project Authors. Licensed under Apache-2.0.
use keys::data_end_key;
use kvproto::metapb::Region;
use raft::StateRole;
use raftstore::coprocessor::{RegionInfo, RegionInfoAccessor};
use raftstore::store::util::{find_peer, new_peer};
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread;
use std::time::Duration;
use test_raftstore::{configure_for_merge, new_node_cluster, Cluster, NodeCluster};
use tikv_util::worker::Worker;
use tikv_util::HandyRwLock;
fn dump(c: &RegionInfoAccessor) -> Vec<(Region, StateRole)> {
let (regions, region_ranges) = c.debug_dump();
assert_eq!(regions.len(), region_ranges.len());
let mut res = Vec::new();
for (end_key, id) in region_ranges {
let RegionInfo { ref region, role } = regions[&id];
assert_eq!(end_key, data_end_key(region.get_end_key()));
assert_eq!(id, region.get_id());
res.push((region.clone(), role));
}
res
}
fn check_region_ranges(regions: &[(Region, StateRole)], ranges: &[(&[u8], &[u8])]) {
assert_eq!(regions.len(), ranges.len());
regions
.iter()
.zip(ranges.iter())
.for_each(|((r, _), (start_key, end_key))| {
assert_eq!(r.get_start_key(), *start_key);
assert_eq!(r.get_end_key(), *end_key);
})
}
fn test_region_info_accessor_impl(cluster: &mut Cluster<NodeCluster>, c: &RegionInfoAccessor) {
for i in 0..9 {
let k = format!("k{}", i).into_bytes();
let v = format!("v{}", i).into_bytes();
cluster.must_put(&k, &v);
}
let pd_client = Arc::clone(&cluster.pd_client);
let init_regions = dump(c);
check_region_ranges(&init_regions, &[(&b""[..], &b""[..])]);
assert_eq!(init_regions[0].0, cluster.get_region(b"k1"));
// Split
{
let r1 = cluster.get_region(b"k1");
cluster.must_split(&r1, b"k1");
let r2 = cluster.get_region(b"k4");
cluster.must_split(&r2, b"k4");
let r3 = cluster.get_region(b"k2");
cluster.must_split(&r3, b"k2");
let r4 = cluster.get_region(b"k3");
cluster.must_split(&r4, b"k3");
}
let split_regions = dump(c);
check_region_ranges(
&split_regions,
&[
(&b""[..], &b"k1"[..]),
(b"k1", b"k2"),
(b"k2", b"k3"),
(b"k3", b"k4"),
(b"k4", b""),
],
);
for (ref region, _) in &split_regions {
if region.get_id() == init_regions[0].0.get_id() {
assert_ne!(
region.get_region_epoch(),
init_regions[0].0.get_region_epoch()
);
}
}
// Merge from left to right
pd_client.must_merge(split_regions[1].0.get_id(), split_regions[2].0.get_id());
let merge_regions = dump(&c);
check_region_ranges(
&merge_regions,
&[
(&b""[..], &b"k1"[..]),
(b"k1", b"k3"),
(b"k3", b"k4"),
(b"k4", b""),
],
);
// Merge from right to left
pd_client.must_merge(merge_regions[2].0.get_id(), merge_regions[1].0.get_id());
let mut merge_regions_2 = dump(&c);
check_region_ranges(
&merge_regions_2,
&[(&b""[..], &b"k1"[..]), (b"k1", b"k4"), (b"k4", b"")],
);
// Add peer
let (region1, role1) = merge_regions_2.remove(1);
assert_eq!(role1, StateRole::Leader);
assert_eq!(region1.get_peers().len(), 1);
assert_eq!(region1.get_peers()[0].get_store_id(), 1);
pd_client.must_add_peer(region1.get_id(), new_peer(2, 100));
let (region2, role2) = dump(c).remove(1);
assert_eq!(role2, StateRole::Leader);
assert_eq!(region2.get_peers().len(), 2);
assert!(find_peer(®ion2, 1).is_some());
assert!(find_peer(®ion2, 2).is_some());
// Change leader
pd_client.transfer_leader(region2.get_id(), find_peer(®ion2, 2).unwrap().clone());
let mut region3 = Region::default();
let mut role3 = StateRole::default();
// Wait for transfer leader finish
for _ in 0..100 {
let r = dump(c).remove(1);
region3 = r.0;
role3 = r.1;
if role3 == StateRole::Follower {
break;
}
thread::sleep(Duration::from_millis(20));
}
assert_eq!(role3, StateRole::Follower);
// Remove peer
check_region_ranges(
&dump(c),
&[(&b""[..], &b"k1"[..]), (b"k1", b"k4"), (b"k4", b"")],
);
pd_client.must_remove_peer(region3.get_id(), find_peer(®ion3, 1).unwrap().clone());
let mut regions_after_removing = Vec::new();
// It seems region_info_accessor is a little delayed than raftstore...
for _ in 0..100 {
regions_after_removing = dump(c);
if regions_after_removing.len() == 2
|
thread::sleep(Duration::from_millis(20));
}
check_region_ranges(
®ions_after_removing,
&[(&b""[..], &b"k1"[..]), (b"k4", b"")],
);
}
#[test]
fn test_node_cluster_region_info_accessor() {
let mut cluster = new_node_cluster(1, 3);
configure_for_merge(&mut cluster);
let pd_client = Arc::clone(&cluster.pd_client);
pd_client.disable_default_operator();
// Create a RegionInfoAccessor on node 1
let (tx, rx) = channel();
let worker = Worker::new("test");
cluster
.sim
.wl()
.post_create_coprocessor_host(Box::new(move |id, host| {
if id == 1 {
let c = RegionInfoAccessor::new(host, &worker);
tx.send(c).unwrap();
}
}));
cluster.run_conf_change();
let c = rx.recv().unwrap();
// We only created it on the node whose id == 1 so we shouldn't receive more than one item.
assert!(rx.try_recv().is_err());
test_region_info_accessor_impl(&mut cluster, &c);
drop(cluster);
c.stop();
}
|
{
break;
}
|
conditional_block
|
test_region_info_accessor.rs
|
// Copyright 2018 TiKV Project Authors. Licensed under Apache-2.0.
use keys::data_end_key;
use kvproto::metapb::Region;
use raft::StateRole;
use raftstore::coprocessor::{RegionInfo, RegionInfoAccessor};
use raftstore::store::util::{find_peer, new_peer};
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread;
use std::time::Duration;
use test_raftstore::{configure_for_merge, new_node_cluster, Cluster, NodeCluster};
use tikv_util::worker::Worker;
use tikv_util::HandyRwLock;
fn dump(c: &RegionInfoAccessor) -> Vec<(Region, StateRole)> {
let (regions, region_ranges) = c.debug_dump();
assert_eq!(regions.len(), region_ranges.len());
let mut res = Vec::new();
for (end_key, id) in region_ranges {
let RegionInfo { ref region, role } = regions[&id];
assert_eq!(end_key, data_end_key(region.get_end_key()));
assert_eq!(id, region.get_id());
res.push((region.clone(), role));
}
res
}
fn check_region_ranges(regions: &[(Region, StateRole)], ranges: &[(&[u8], &[u8])]) {
assert_eq!(regions.len(), ranges.len());
regions
.iter()
.zip(ranges.iter())
.for_each(|((r, _), (start_key, end_key))| {
assert_eq!(r.get_start_key(), *start_key);
assert_eq!(r.get_end_key(), *end_key);
})
}
fn test_region_info_accessor_impl(cluster: &mut Cluster<NodeCluster>, c: &RegionInfoAccessor) {
for i in 0..9 {
let k = format!("k{}", i).into_bytes();
let v = format!("v{}", i).into_bytes();
cluster.must_put(&k, &v);
}
let pd_client = Arc::clone(&cluster.pd_client);
let init_regions = dump(c);
check_region_ranges(&init_regions, &[(&b""[..], &b""[..])]);
assert_eq!(init_regions[0].0, cluster.get_region(b"k1"));
// Split
{
let r1 = cluster.get_region(b"k1");
cluster.must_split(&r1, b"k1");
let r2 = cluster.get_region(b"k4");
cluster.must_split(&r2, b"k4");
let r3 = cluster.get_region(b"k2");
cluster.must_split(&r3, b"k2");
let r4 = cluster.get_region(b"k3");
cluster.must_split(&r4, b"k3");
}
let split_regions = dump(c);
check_region_ranges(
&split_regions,
&[
(&b""[..], &b"k1"[..]),
(b"k1", b"k2"),
(b"k2", b"k3"),
(b"k3", b"k4"),
(b"k4", b""),
],
);
for (ref region, _) in &split_regions {
if region.get_id() == init_regions[0].0.get_id() {
assert_ne!(
region.get_region_epoch(),
init_regions[0].0.get_region_epoch()
);
}
}
// Merge from left to right
pd_client.must_merge(split_regions[1].0.get_id(), split_regions[2].0.get_id());
let merge_regions = dump(&c);
check_region_ranges(
&merge_regions,
&[
(&b""[..], &b"k1"[..]),
(b"k1", b"k3"),
(b"k3", b"k4"),
(b"k4", b""),
],
);
// Merge from right to left
pd_client.must_merge(merge_regions[2].0.get_id(), merge_regions[1].0.get_id());
let mut merge_regions_2 = dump(&c);
check_region_ranges(
&merge_regions_2,
&[(&b""[..], &b"k1"[..]), (b"k1", b"k4"), (b"k4", b"")],
);
// Add peer
let (region1, role1) = merge_regions_2.remove(1);
assert_eq!(role1, StateRole::Leader);
assert_eq!(region1.get_peers().len(), 1);
assert_eq!(region1.get_peers()[0].get_store_id(), 1);
pd_client.must_add_peer(region1.get_id(), new_peer(2, 100));
let (region2, role2) = dump(c).remove(1);
assert_eq!(role2, StateRole::Leader);
assert_eq!(region2.get_peers().len(), 2);
assert!(find_peer(®ion2, 1).is_some());
assert!(find_peer(®ion2, 2).is_some());
// Change leader
pd_client.transfer_leader(region2.get_id(), find_peer(®ion2, 2).unwrap().clone());
let mut region3 = Region::default();
let mut role3 = StateRole::default();
// Wait for transfer leader finish
for _ in 0..100 {
let r = dump(c).remove(1);
region3 = r.0;
role3 = r.1;
if role3 == StateRole::Follower {
break;
}
thread::sleep(Duration::from_millis(20));
}
assert_eq!(role3, StateRole::Follower);
// Remove peer
check_region_ranges(
&dump(c),
&[(&b""[..], &b"k1"[..]), (b"k1", b"k4"), (b"k4", b"")],
);
pd_client.must_remove_peer(region3.get_id(), find_peer(®ion3, 1).unwrap().clone());
let mut regions_after_removing = Vec::new();
// It seems region_info_accessor is a little delayed than raftstore...
for _ in 0..100 {
regions_after_removing = dump(c);
if regions_after_removing.len() == 2 {
break;
}
thread::sleep(Duration::from_millis(20));
}
check_region_ranges(
®ions_after_removing,
&[(&b""[..], &b"k1"[..]), (b"k4", b"")],
);
}
#[test]
fn test_node_cluster_region_info_accessor() {
let mut cluster = new_node_cluster(1, 3);
configure_for_merge(&mut cluster);
let pd_client = Arc::clone(&cluster.pd_client);
pd_client.disable_default_operator();
// Create a RegionInfoAccessor on node 1
let (tx, rx) = channel();
let worker = Worker::new("test");
cluster
.sim
.wl()
.post_create_coprocessor_host(Box::new(move |id, host| {
if id == 1 {
let c = RegionInfoAccessor::new(host, &worker);
tx.send(c).unwrap();
}
}));
cluster.run_conf_change();
let c = rx.recv().unwrap();
// We only created it on the node whose id == 1 so we shouldn't receive more than one item.
assert!(rx.try_recv().is_err());
test_region_info_accessor_impl(&mut cluster, &c);
|
drop(cluster);
c.stop();
}
|
random_line_split
|
|
admin_list.py
|
from __future__ import unicode_literals
import datetime
from django.contrib.admin.templatetags.admin_urls import add_preserved_filters
from django.contrib.admin.util import (lookup_field, display_for_field,
display_for_value, label_for_field)
from django.contrib.admin.views.main import (ALL_VAR, EMPTY_CHANGELIST_VALUE,
ORDER_VAR, PAGE_VAR, SEARCH_VAR)
from django.contrib.admin.templatetags.admin_static import static
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.utils import formats
from django.utils.html import escapejs, format_html
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from django.utils.encoding import force_text
from django.template import Library
from django.template.loader import get_template
from django.template.context import Context
register = Library()
DOT = '.'
@register.simple_tag
def paginator_number(cl,i):
"""
Generates an individual page index link in a paginated list.
"""
if i == DOT:
return '... '
elif i == cl.page_num:
return format_html('<span class="this-page">{0}</span> ', i+1)
else:
return format_html('<a href="{0}"{1}>{2}</a> ',
cl.get_query_string({PAGE_VAR: i}),
mark_safe(' class="end"' if i == cl.paginator.num_pages-1 else ''),
i+1)
@register.inclusion_tag('admin/pagination.html')
def pagination(cl):
"""
Generates the series of links to the pages in a paginated list.
"""
paginator, page_num = cl.paginator, cl.page_num
pagination_required = (not cl.show_all or not cl.can_show_all) and cl.multi_page
if not pagination_required:
|
else:
ON_EACH_SIDE = 3
ON_ENDS = 2
# If there are 10 or fewer pages, display links to every page.
# Otherwise, do some fancy
if paginator.num_pages <= 10:
page_range = range(paginator.num_pages)
else:
# Insert "smart" pagination links, so that there are always ON_ENDS
# links at either end of the list of pages, and there are always
# ON_EACH_SIDE links at either end of the "current page" link.
page_range = []
if page_num > (ON_EACH_SIDE + ON_ENDS):
page_range.extend(range(0, ON_ENDS))
page_range.append(DOT)
page_range.extend(range(page_num - ON_EACH_SIDE, page_num + 1))
else:
page_range.extend(range(0, page_num + 1))
if page_num < (paginator.num_pages - ON_EACH_SIDE - ON_ENDS - 1):
page_range.extend(range(page_num + 1, page_num + ON_EACH_SIDE + 1))
page_range.append(DOT)
page_range.extend(range(paginator.num_pages - ON_ENDS, paginator.num_pages))
else:
page_range.extend(range(page_num + 1, paginator.num_pages))
need_show_all_link = cl.can_show_all and not cl.show_all and cl.multi_page
return {
'cl': cl,
'pagination_required': pagination_required,
'show_all_url': need_show_all_link and cl.get_query_string({ALL_VAR: ''}),
'page_range': page_range,
'ALL_VAR': ALL_VAR,
'1': 1,
}
def result_headers(cl):
"""
Generates the list column headers.
"""
ordering_field_columns = cl.get_ordering_field_columns()
for i, field_name in enumerate(cl.list_display):
text, attr = label_for_field(field_name, cl.model,
model_admin = cl.model_admin,
return_attr = True
)
if attr:
# Potentially not sortable
# if the field is the action checkbox: no sorting and special class
if field_name == 'action_checkbox':
yield {
"text": text,
"class_attrib": mark_safe(' class="action-checkbox-column"'),
"sortable": False,
}
continue
admin_order_field = getattr(attr, "admin_order_field", None)
if not admin_order_field:
# Not sortable
yield {
"text": text,
"class_attrib": format_html(' class="column-{0}"', field_name),
"sortable": False,
}
continue
# OK, it is sortable if we got this far
th_classes = ['sortable', 'column-{0}'.format(field_name)]
order_type = ''
new_order_type = 'asc'
sort_priority = 0
sorted = False
# Is it currently being sorted on?
if i in ordering_field_columns:
sorted = True
order_type = ordering_field_columns.get(i).lower()
sort_priority = list(ordering_field_columns).index(i) + 1
th_classes.append('sorted %sending' % order_type)
new_order_type = {'asc': 'desc', 'desc': 'asc'}[order_type]
# build new ordering param
o_list_primary = [] # URL for making this field the primary sort
o_list_remove = [] # URL for removing this field from sort
o_list_toggle = [] # URL for toggling order type for this field
make_qs_param = lambda t, n: ('-' if t == 'desc' else '') + str(n)
for j, ot in ordering_field_columns.items():
if j == i: # Same column
param = make_qs_param(new_order_type, j)
# We want clicking on this header to bring the ordering to the
# front
o_list_primary.insert(0, param)
o_list_toggle.append(param)
# o_list_remove - omit
else:
param = make_qs_param(ot, j)
o_list_primary.append(param)
o_list_toggle.append(param)
o_list_remove.append(param)
if i not in ordering_field_columns:
o_list_primary.insert(0, make_qs_param(new_order_type, i))
yield {
"text": text,
"sortable": True,
"sorted": sorted,
"ascending": order_type == "asc",
"sort_priority": sort_priority,
"url_primary": cl.get_query_string({ORDER_VAR: '.'.join(o_list_primary)}),
"url_remove": cl.get_query_string({ORDER_VAR: '.'.join(o_list_remove)}),
"url_toggle": cl.get_query_string({ORDER_VAR: '.'.join(o_list_toggle)}),
"class_attrib": format_html(' class="{0}"', ' '.join(th_classes))
if th_classes else '',
}
def _boolean_icon(field_val):
icon_url = static('admin/img/icon-%s.gif' %
{True: 'yes', False: 'no', None: 'unknown'}[field_val])
return format_html('<img src="{0}" alt="{1}" />', icon_url, field_val)
def items_for_result(cl, result, form):
"""
Generates the actual list of data.
"""
first = True
pk = cl.lookup_opts.pk.attname
for field_name in cl.list_display:
row_class = ''
try:
f, attr, value = lookup_field(field_name, result, cl.model_admin)
except ObjectDoesNotExist:
result_repr = EMPTY_CHANGELIST_VALUE
else:
if f is None:
if field_name == 'action_checkbox':
row_class = mark_safe(' class="action-checkbox"')
allow_tags = getattr(attr, 'allow_tags', False)
boolean = getattr(attr, 'boolean', False)
if boolean:
allow_tags = True
result_repr = display_for_value(value, boolean)
# Strip HTML tags in the resulting text, except if the
# function has an "allow_tags" attribute set to True.
if allow_tags:
result_repr = mark_safe(result_repr)
if isinstance(value, (datetime.date, datetime.time)):
row_class = mark_safe(' class="nowrap"')
else:
if isinstance(f.rel, models.ManyToOneRel):
field_val = getattr(result, f.name)
if field_val is None:
result_repr = EMPTY_CHANGELIST_VALUE
else:
result_repr = field_val
else:
result_repr = display_for_field(value, f)
if isinstance(f, (models.DateField, models.TimeField, models.ForeignKey)):
row_class = mark_safe(' class="nowrap"')
if force_text(result_repr) == '':
result_repr = mark_safe(' ')
# If list_display_links not defined, add the link tag to the first field
if (first and not cl.list_display_links) or field_name in cl.list_display_links:
table_tag = {True:'th', False:'td'}[first]
first = False
url = cl.url_for_result(result)
url = add_preserved_filters({'preserved_filters': cl.preserved_filters, 'opts': cl.opts}, url)
# Convert the pk to something that can be used in Javascript.
# Problem cases are long ints (23L) and non-ASCII strings.
if cl.to_field:
attr = str(cl.to_field)
else:
attr = pk
value = result.serializable_value(attr)
result_id = escapejs(value)
yield format_html('<{0}{1}><a href="{2}"{3}>{4}</a></{5}>',
table_tag,
row_class,
url,
format_html(' onclick="opener.dismissRelatedLookupPopup(window, '{0}'); return false;"', result_id)
if cl.is_popup else '',
result_repr,
table_tag)
else:
# By default the fields come from ModelAdmin.list_editable, but if we pull
# the fields out of the form instead of list_editable custom admins
# can provide fields on a per request basis
if (form and field_name in form.fields and not (
field_name == cl.model._meta.pk.name and
form[cl.model._meta.pk.name].is_hidden)):
bf = form[field_name]
result_repr = mark_safe(force_text(bf.errors) + force_text(bf))
yield format_html('<td{0}>{1}</td>', row_class, result_repr)
if form and not form[cl.model._meta.pk.name].is_hidden:
yield format_html('<td>{0}</td>', force_text(form[cl.model._meta.pk.name]))
class ResultList(list):
# Wrapper class used to return items in a list_editable
# changelist, annotated with the form object for error
# reporting purposes. Needed to maintain backwards
# compatibility with existing admin templates.
def __init__(self, form, *items):
self.form = form
super(ResultList, self).__init__(*items)
def results(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
yield ResultList(form, items_for_result(cl, res, form))
else:
for res in cl.result_list:
yield ResultList(None, items_for_result(cl, res, None))
def result_hidden_fields(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
if form[cl.model._meta.pk.name].is_hidden:
yield mark_safe(force_text(form[cl.model._meta.pk.name]))
@register.inclusion_tag("admin/change_list_results.html")
def result_list(cl):
"""
Displays the headers and data list together
"""
headers = list(result_headers(cl))
num_sorted_fields = 0
for h in headers:
if h['sortable'] and h['sorted']:
num_sorted_fields += 1
return {'cl': cl,
'result_hidden_fields': list(result_hidden_fields(cl)),
'result_headers': headers,
'num_sorted_fields': num_sorted_fields,
'results': list(results(cl))}
@register.inclusion_tag('admin/date_hierarchy.html')
def date_hierarchy(cl):
"""
Displays the date hierarchy for date drill-down functionality.
"""
if cl.date_hierarchy:
field_name = cl.date_hierarchy
field = cl.opts.get_field_by_name(field_name)[0]
dates_or_datetimes = 'datetimes' if isinstance(field, models.DateTimeField) else 'dates'
year_field = '%s__year' % field_name
month_field = '%s__month' % field_name
day_field = '%s__day' % field_name
field_generic = '%s__' % field_name
year_lookup = cl.params.get(year_field)
month_lookup = cl.params.get(month_field)
day_lookup = cl.params.get(day_field)
link = lambda d: cl.get_query_string(d, [field_generic])
if not (year_lookup or month_lookup or day_lookup):
# select appropriate start level
date_range = cl.queryset.aggregate(first=models.Min(field_name),
last=models.Max(field_name))
if date_range['first'] and date_range['last']:
if date_range['first'].year == date_range['last'].year:
year_lookup = date_range['first'].year
if date_range['first'].month == date_range['last'].month:
month_lookup = date_range['first'].month
if year_lookup and month_lookup and day_lookup:
day = datetime.date(int(year_lookup), int(month_lookup), int(day_lookup))
return {
'show': True,
'back': {
'link': link({year_field: year_lookup, month_field: month_lookup}),
'title': capfirst(formats.date_format(day, 'YEAR_MONTH_FORMAT'))
},
'choices': [{'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))}]
}
elif year_lookup and month_lookup:
days = cl.queryset.filter(**{year_field: year_lookup, month_field: month_lookup})
days = getattr(days, dates_or_datetimes)(field_name, 'day')
return {
'show': True,
'back': {
'link': link({year_field: year_lookup}),
'title': str(year_lookup)
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month_lookup, day_field: day.day}),
'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))
} for day in days]
}
elif year_lookup:
months = cl.queryset.filter(**{year_field: year_lookup})
months = getattr(months, dates_or_datetimes)(field_name, 'month')
return {
'show': True,
'back': {
'link': link({}),
'title': _('All dates')
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month.month}),
'title': capfirst(formats.date_format(month, 'YEAR_MONTH_FORMAT'))
} for month in months]
}
else:
years = getattr(cl.queryset, dates_or_datetimes)(field_name, 'year')
return {
'show': True,
'choices': [{
'link': link({year_field: str(year.year)}),
'title': str(year.year),
} for year in years]
}
@register.inclusion_tag('admin/search_form.html')
def search_form(cl):
"""
Displays a search form for searching the list.
"""
return {
'cl': cl,
'show_result_count': cl.result_count != cl.full_result_count,
'search_var': SEARCH_VAR
}
@register.simple_tag
def admin_list_filter(cl, spec):
tpl = get_template(spec.template)
return tpl.render(Context({
'title': spec.title,
'choices' : list(spec.choices(cl)),
'spec': spec,
}))
@register.inclusion_tag('admin/actions.html', takes_context=True)
def admin_actions(context):
"""
Track the number of times the action field has been rendered on the page,
so we know which value to use.
"""
context['action_index'] = context.get('action_index', -1) + 1
return context
|
page_range = []
|
conditional_block
|
admin_list.py
|
from __future__ import unicode_literals
import datetime
from django.contrib.admin.templatetags.admin_urls import add_preserved_filters
from django.contrib.admin.util import (lookup_field, display_for_field,
display_for_value, label_for_field)
from django.contrib.admin.views.main import (ALL_VAR, EMPTY_CHANGELIST_VALUE,
ORDER_VAR, PAGE_VAR, SEARCH_VAR)
from django.contrib.admin.templatetags.admin_static import static
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.utils import formats
from django.utils.html import escapejs, format_html
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from django.utils.encoding import force_text
from django.template import Library
from django.template.loader import get_template
from django.template.context import Context
register = Library()
DOT = '.'
@register.simple_tag
def paginator_number(cl,i):
"""
Generates an individual page index link in a paginated list.
"""
if i == DOT:
return '... '
elif i == cl.page_num:
return format_html('<span class="this-page">{0}</span> ', i+1)
else:
return format_html('<a href="{0}"{1}>{2}</a> ',
cl.get_query_string({PAGE_VAR: i}),
mark_safe(' class="end"' if i == cl.paginator.num_pages-1 else ''),
i+1)
@register.inclusion_tag('admin/pagination.html')
def pagination(cl):
"""
Generates the series of links to the pages in a paginated list.
"""
paginator, page_num = cl.paginator, cl.page_num
pagination_required = (not cl.show_all or not cl.can_show_all) and cl.multi_page
if not pagination_required:
page_range = []
else:
ON_EACH_SIDE = 3
ON_ENDS = 2
# If there are 10 or fewer pages, display links to every page.
# Otherwise, do some fancy
if paginator.num_pages <= 10:
page_range = range(paginator.num_pages)
else:
# Insert "smart" pagination links, so that there are always ON_ENDS
# links at either end of the list of pages, and there are always
# ON_EACH_SIDE links at either end of the "current page" link.
page_range = []
if page_num > (ON_EACH_SIDE + ON_ENDS):
page_range.extend(range(0, ON_ENDS))
page_range.append(DOT)
page_range.extend(range(page_num - ON_EACH_SIDE, page_num + 1))
else:
page_range.extend(range(0, page_num + 1))
if page_num < (paginator.num_pages - ON_EACH_SIDE - ON_ENDS - 1):
page_range.extend(range(page_num + 1, page_num + ON_EACH_SIDE + 1))
page_range.append(DOT)
page_range.extend(range(paginator.num_pages - ON_ENDS, paginator.num_pages))
else:
page_range.extend(range(page_num + 1, paginator.num_pages))
need_show_all_link = cl.can_show_all and not cl.show_all and cl.multi_page
return {
'cl': cl,
'pagination_required': pagination_required,
'show_all_url': need_show_all_link and cl.get_query_string({ALL_VAR: ''}),
'page_range': page_range,
'ALL_VAR': ALL_VAR,
'1': 1,
}
def result_headers(cl):
"""
Generates the list column headers.
"""
ordering_field_columns = cl.get_ordering_field_columns()
for i, field_name in enumerate(cl.list_display):
text, attr = label_for_field(field_name, cl.model,
model_admin = cl.model_admin,
return_attr = True
)
if attr:
# Potentially not sortable
# if the field is the action checkbox: no sorting and special class
if field_name == 'action_checkbox':
yield {
"text": text,
"class_attrib": mark_safe(' class="action-checkbox-column"'),
"sortable": False,
}
continue
admin_order_field = getattr(attr, "admin_order_field", None)
if not admin_order_field:
# Not sortable
yield {
"text": text,
"class_attrib": format_html(' class="column-{0}"', field_name),
"sortable": False,
}
continue
# OK, it is sortable if we got this far
th_classes = ['sortable', 'column-{0}'.format(field_name)]
order_type = ''
new_order_type = 'asc'
sort_priority = 0
sorted = False
# Is it currently being sorted on?
if i in ordering_field_columns:
sorted = True
order_type = ordering_field_columns.get(i).lower()
sort_priority = list(ordering_field_columns).index(i) + 1
th_classes.append('sorted %sending' % order_type)
new_order_type = {'asc': 'desc', 'desc': 'asc'}[order_type]
# build new ordering param
o_list_primary = [] # URL for making this field the primary sort
o_list_remove = [] # URL for removing this field from sort
o_list_toggle = [] # URL for toggling order type for this field
make_qs_param = lambda t, n: ('-' if t == 'desc' else '') + str(n)
for j, ot in ordering_field_columns.items():
if j == i: # Same column
param = make_qs_param(new_order_type, j)
# We want clicking on this header to bring the ordering to the
# front
o_list_primary.insert(0, param)
o_list_toggle.append(param)
# o_list_remove - omit
else:
param = make_qs_param(ot, j)
o_list_primary.append(param)
o_list_toggle.append(param)
o_list_remove.append(param)
if i not in ordering_field_columns:
o_list_primary.insert(0, make_qs_param(new_order_type, i))
yield {
"text": text,
"sortable": True,
"sorted": sorted,
"ascending": order_type == "asc",
"sort_priority": sort_priority,
"url_primary": cl.get_query_string({ORDER_VAR: '.'.join(o_list_primary)}),
"url_remove": cl.get_query_string({ORDER_VAR: '.'.join(o_list_remove)}),
"url_toggle": cl.get_query_string({ORDER_VAR: '.'.join(o_list_toggle)}),
"class_attrib": format_html(' class="{0}"', ' '.join(th_classes))
if th_classes else '',
}
def _boolean_icon(field_val):
icon_url = static('admin/img/icon-%s.gif' %
{True: 'yes', False: 'no', None: 'unknown'}[field_val])
return format_html('<img src="{0}" alt="{1}" />', icon_url, field_val)
def items_for_result(cl, result, form):
"""
Generates the actual list of data.
"""
first = True
pk = cl.lookup_opts.pk.attname
for field_name in cl.list_display:
row_class = ''
try:
f, attr, value = lookup_field(field_name, result, cl.model_admin)
except ObjectDoesNotExist:
result_repr = EMPTY_CHANGELIST_VALUE
else:
if f is None:
if field_name == 'action_checkbox':
row_class = mark_safe(' class="action-checkbox"')
allow_tags = getattr(attr, 'allow_tags', False)
boolean = getattr(attr, 'boolean', False)
if boolean:
allow_tags = True
result_repr = display_for_value(value, boolean)
# Strip HTML tags in the resulting text, except if the
# function has an "allow_tags" attribute set to True.
if allow_tags:
result_repr = mark_safe(result_repr)
if isinstance(value, (datetime.date, datetime.time)):
row_class = mark_safe(' class="nowrap"')
else:
if isinstance(f.rel, models.ManyToOneRel):
field_val = getattr(result, f.name)
if field_val is None:
result_repr = EMPTY_CHANGELIST_VALUE
else:
result_repr = field_val
else:
result_repr = display_for_field(value, f)
if isinstance(f, (models.DateField, models.TimeField, models.ForeignKey)):
row_class = mark_safe(' class="nowrap"')
if force_text(result_repr) == '':
result_repr = mark_safe(' ')
# If list_display_links not defined, add the link tag to the first field
if (first and not cl.list_display_links) or field_name in cl.list_display_links:
table_tag = {True:'th', False:'td'}[first]
first = False
url = cl.url_for_result(result)
url = add_preserved_filters({'preserved_filters': cl.preserved_filters, 'opts': cl.opts}, url)
# Convert the pk to something that can be used in Javascript.
# Problem cases are long ints (23L) and non-ASCII strings.
if cl.to_field:
attr = str(cl.to_field)
else:
attr = pk
value = result.serializable_value(attr)
result_id = escapejs(value)
yield format_html('<{0}{1}><a href="{2}"{3}>{4}</a></{5}>',
table_tag,
row_class,
url,
format_html(' onclick="opener.dismissRelatedLookupPopup(window, '{0}'); return false;"', result_id)
if cl.is_popup else '',
result_repr,
table_tag)
else:
# By default the fields come from ModelAdmin.list_editable, but if we pull
# the fields out of the form instead of list_editable custom admins
# can provide fields on a per request basis
if (form and field_name in form.fields and not (
field_name == cl.model._meta.pk.name and
form[cl.model._meta.pk.name].is_hidden)):
bf = form[field_name]
result_repr = mark_safe(force_text(bf.errors) + force_text(bf))
yield format_html('<td{0}>{1}</td>', row_class, result_repr)
if form and not form[cl.model._meta.pk.name].is_hidden:
yield format_html('<td>{0}</td>', force_text(form[cl.model._meta.pk.name]))
class ResultList(list):
# Wrapper class used to return items in a list_editable
# changelist, annotated with the form object for error
# reporting purposes. Needed to maintain backwards
# compatibility with existing admin templates.
def __init__(self, form, *items):
self.form = form
super(ResultList, self).__init__(*items)
def results(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
yield ResultList(form, items_for_result(cl, res, form))
else:
for res in cl.result_list:
yield ResultList(None, items_for_result(cl, res, None))
def result_hidden_fields(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
if form[cl.model._meta.pk.name].is_hidden:
yield mark_safe(force_text(form[cl.model._meta.pk.name]))
@register.inclusion_tag("admin/change_list_results.html")
def result_list(cl):
"""
Displays the headers and data list together
"""
headers = list(result_headers(cl))
num_sorted_fields = 0
for h in headers:
if h['sortable'] and h['sorted']:
num_sorted_fields += 1
return {'cl': cl,
'result_hidden_fields': list(result_hidden_fields(cl)),
'result_headers': headers,
'num_sorted_fields': num_sorted_fields,
'results': list(results(cl))}
@register.inclusion_tag('admin/date_hierarchy.html')
def date_hierarchy(cl):
"""
Displays the date hierarchy for date drill-down functionality.
"""
if cl.date_hierarchy:
field_name = cl.date_hierarchy
field = cl.opts.get_field_by_name(field_name)[0]
dates_or_datetimes = 'datetimes' if isinstance(field, models.DateTimeField) else 'dates'
year_field = '%s__year' % field_name
|
year_lookup = cl.params.get(year_field)
month_lookup = cl.params.get(month_field)
day_lookup = cl.params.get(day_field)
link = lambda d: cl.get_query_string(d, [field_generic])
if not (year_lookup or month_lookup or day_lookup):
# select appropriate start level
date_range = cl.queryset.aggregate(first=models.Min(field_name),
last=models.Max(field_name))
if date_range['first'] and date_range['last']:
if date_range['first'].year == date_range['last'].year:
year_lookup = date_range['first'].year
if date_range['first'].month == date_range['last'].month:
month_lookup = date_range['first'].month
if year_lookup and month_lookup and day_lookup:
day = datetime.date(int(year_lookup), int(month_lookup), int(day_lookup))
return {
'show': True,
'back': {
'link': link({year_field: year_lookup, month_field: month_lookup}),
'title': capfirst(formats.date_format(day, 'YEAR_MONTH_FORMAT'))
},
'choices': [{'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))}]
}
elif year_lookup and month_lookup:
days = cl.queryset.filter(**{year_field: year_lookup, month_field: month_lookup})
days = getattr(days, dates_or_datetimes)(field_name, 'day')
return {
'show': True,
'back': {
'link': link({year_field: year_lookup}),
'title': str(year_lookup)
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month_lookup, day_field: day.day}),
'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))
} for day in days]
}
elif year_lookup:
months = cl.queryset.filter(**{year_field: year_lookup})
months = getattr(months, dates_or_datetimes)(field_name, 'month')
return {
'show': True,
'back': {
'link': link({}),
'title': _('All dates')
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month.month}),
'title': capfirst(formats.date_format(month, 'YEAR_MONTH_FORMAT'))
} for month in months]
}
else:
years = getattr(cl.queryset, dates_or_datetimes)(field_name, 'year')
return {
'show': True,
'choices': [{
'link': link({year_field: str(year.year)}),
'title': str(year.year),
} for year in years]
}
@register.inclusion_tag('admin/search_form.html')
def search_form(cl):
"""
Displays a search form for searching the list.
"""
return {
'cl': cl,
'show_result_count': cl.result_count != cl.full_result_count,
'search_var': SEARCH_VAR
}
@register.simple_tag
def admin_list_filter(cl, spec):
tpl = get_template(spec.template)
return tpl.render(Context({
'title': spec.title,
'choices' : list(spec.choices(cl)),
'spec': spec,
}))
@register.inclusion_tag('admin/actions.html', takes_context=True)
def admin_actions(context):
"""
Track the number of times the action field has been rendered on the page,
so we know which value to use.
"""
context['action_index'] = context.get('action_index', -1) + 1
return context
|
month_field = '%s__month' % field_name
day_field = '%s__day' % field_name
field_generic = '%s__' % field_name
|
random_line_split
|
admin_list.py
|
from __future__ import unicode_literals
import datetime
from django.contrib.admin.templatetags.admin_urls import add_preserved_filters
from django.contrib.admin.util import (lookup_field, display_for_field,
display_for_value, label_for_field)
from django.contrib.admin.views.main import (ALL_VAR, EMPTY_CHANGELIST_VALUE,
ORDER_VAR, PAGE_VAR, SEARCH_VAR)
from django.contrib.admin.templatetags.admin_static import static
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.utils import formats
from django.utils.html import escapejs, format_html
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from django.utils.encoding import force_text
from django.template import Library
from django.template.loader import get_template
from django.template.context import Context
register = Library()
DOT = '.'
@register.simple_tag
def paginator_number(cl,i):
"""
Generates an individual page index link in a paginated list.
"""
if i == DOT:
return '... '
elif i == cl.page_num:
return format_html('<span class="this-page">{0}</span> ', i+1)
else:
return format_html('<a href="{0}"{1}>{2}</a> ',
cl.get_query_string({PAGE_VAR: i}),
mark_safe(' class="end"' if i == cl.paginator.num_pages-1 else ''),
i+1)
@register.inclusion_tag('admin/pagination.html')
def pagination(cl):
"""
Generates the series of links to the pages in a paginated list.
"""
paginator, page_num = cl.paginator, cl.page_num
pagination_required = (not cl.show_all or not cl.can_show_all) and cl.multi_page
if not pagination_required:
page_range = []
else:
ON_EACH_SIDE = 3
ON_ENDS = 2
# If there are 10 or fewer pages, display links to every page.
# Otherwise, do some fancy
if paginator.num_pages <= 10:
page_range = range(paginator.num_pages)
else:
# Insert "smart" pagination links, so that there are always ON_ENDS
# links at either end of the list of pages, and there are always
# ON_EACH_SIDE links at either end of the "current page" link.
page_range = []
if page_num > (ON_EACH_SIDE + ON_ENDS):
page_range.extend(range(0, ON_ENDS))
page_range.append(DOT)
page_range.extend(range(page_num - ON_EACH_SIDE, page_num + 1))
else:
page_range.extend(range(0, page_num + 1))
if page_num < (paginator.num_pages - ON_EACH_SIDE - ON_ENDS - 1):
page_range.extend(range(page_num + 1, page_num + ON_EACH_SIDE + 1))
page_range.append(DOT)
page_range.extend(range(paginator.num_pages - ON_ENDS, paginator.num_pages))
else:
page_range.extend(range(page_num + 1, paginator.num_pages))
need_show_all_link = cl.can_show_all and not cl.show_all and cl.multi_page
return {
'cl': cl,
'pagination_required': pagination_required,
'show_all_url': need_show_all_link and cl.get_query_string({ALL_VAR: ''}),
'page_range': page_range,
'ALL_VAR': ALL_VAR,
'1': 1,
}
def result_headers(cl):
|
def _boolean_icon(field_val):
icon_url = static('admin/img/icon-%s.gif' %
{True: 'yes', False: 'no', None: 'unknown'}[field_val])
return format_html('<img src="{0}" alt="{1}" />', icon_url, field_val)
def items_for_result(cl, result, form):
"""
Generates the actual list of data.
"""
first = True
pk = cl.lookup_opts.pk.attname
for field_name in cl.list_display:
row_class = ''
try:
f, attr, value = lookup_field(field_name, result, cl.model_admin)
except ObjectDoesNotExist:
result_repr = EMPTY_CHANGELIST_VALUE
else:
if f is None:
if field_name == 'action_checkbox':
row_class = mark_safe(' class="action-checkbox"')
allow_tags = getattr(attr, 'allow_tags', False)
boolean = getattr(attr, 'boolean', False)
if boolean:
allow_tags = True
result_repr = display_for_value(value, boolean)
# Strip HTML tags in the resulting text, except if the
# function has an "allow_tags" attribute set to True.
if allow_tags:
result_repr = mark_safe(result_repr)
if isinstance(value, (datetime.date, datetime.time)):
row_class = mark_safe(' class="nowrap"')
else:
if isinstance(f.rel, models.ManyToOneRel):
field_val = getattr(result, f.name)
if field_val is None:
result_repr = EMPTY_CHANGELIST_VALUE
else:
result_repr = field_val
else:
result_repr = display_for_field(value, f)
if isinstance(f, (models.DateField, models.TimeField, models.ForeignKey)):
row_class = mark_safe(' class="nowrap"')
if force_text(result_repr) == '':
result_repr = mark_safe(' ')
# If list_display_links not defined, add the link tag to the first field
if (first and not cl.list_display_links) or field_name in cl.list_display_links:
table_tag = {True:'th', False:'td'}[first]
first = False
url = cl.url_for_result(result)
url = add_preserved_filters({'preserved_filters': cl.preserved_filters, 'opts': cl.opts}, url)
# Convert the pk to something that can be used in Javascript.
# Problem cases are long ints (23L) and non-ASCII strings.
if cl.to_field:
attr = str(cl.to_field)
else:
attr = pk
value = result.serializable_value(attr)
result_id = escapejs(value)
yield format_html('<{0}{1}><a href="{2}"{3}>{4}</a></{5}>',
table_tag,
row_class,
url,
format_html(' onclick="opener.dismissRelatedLookupPopup(window, '{0}'); return false;"', result_id)
if cl.is_popup else '',
result_repr,
table_tag)
else:
# By default the fields come from ModelAdmin.list_editable, but if we pull
# the fields out of the form instead of list_editable custom admins
# can provide fields on a per request basis
if (form and field_name in form.fields and not (
field_name == cl.model._meta.pk.name and
form[cl.model._meta.pk.name].is_hidden)):
bf = form[field_name]
result_repr = mark_safe(force_text(bf.errors) + force_text(bf))
yield format_html('<td{0}>{1}</td>', row_class, result_repr)
if form and not form[cl.model._meta.pk.name].is_hidden:
yield format_html('<td>{0}</td>', force_text(form[cl.model._meta.pk.name]))
class ResultList(list):
# Wrapper class used to return items in a list_editable
# changelist, annotated with the form object for error
# reporting purposes. Needed to maintain backwards
# compatibility with existing admin templates.
def __init__(self, form, *items):
self.form = form
super(ResultList, self).__init__(*items)
def results(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
yield ResultList(form, items_for_result(cl, res, form))
else:
for res in cl.result_list:
yield ResultList(None, items_for_result(cl, res, None))
def result_hidden_fields(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
if form[cl.model._meta.pk.name].is_hidden:
yield mark_safe(force_text(form[cl.model._meta.pk.name]))
@register.inclusion_tag("admin/change_list_results.html")
def result_list(cl):
"""
Displays the headers and data list together
"""
headers = list(result_headers(cl))
num_sorted_fields = 0
for h in headers:
if h['sortable'] and h['sorted']:
num_sorted_fields += 1
return {'cl': cl,
'result_hidden_fields': list(result_hidden_fields(cl)),
'result_headers': headers,
'num_sorted_fields': num_sorted_fields,
'results': list(results(cl))}
@register.inclusion_tag('admin/date_hierarchy.html')
def date_hierarchy(cl):
"""
Displays the date hierarchy for date drill-down functionality.
"""
if cl.date_hierarchy:
field_name = cl.date_hierarchy
field = cl.opts.get_field_by_name(field_name)[0]
dates_or_datetimes = 'datetimes' if isinstance(field, models.DateTimeField) else 'dates'
year_field = '%s__year' % field_name
month_field = '%s__month' % field_name
day_field = '%s__day' % field_name
field_generic = '%s__' % field_name
year_lookup = cl.params.get(year_field)
month_lookup = cl.params.get(month_field)
day_lookup = cl.params.get(day_field)
link = lambda d: cl.get_query_string(d, [field_generic])
if not (year_lookup or month_lookup or day_lookup):
# select appropriate start level
date_range = cl.queryset.aggregate(first=models.Min(field_name),
last=models.Max(field_name))
if date_range['first'] and date_range['last']:
if date_range['first'].year == date_range['last'].year:
year_lookup = date_range['first'].year
if date_range['first'].month == date_range['last'].month:
month_lookup = date_range['first'].month
if year_lookup and month_lookup and day_lookup:
day = datetime.date(int(year_lookup), int(month_lookup), int(day_lookup))
return {
'show': True,
'back': {
'link': link({year_field: year_lookup, month_field: month_lookup}),
'title': capfirst(formats.date_format(day, 'YEAR_MONTH_FORMAT'))
},
'choices': [{'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))}]
}
elif year_lookup and month_lookup:
days = cl.queryset.filter(**{year_field: year_lookup, month_field: month_lookup})
days = getattr(days, dates_or_datetimes)(field_name, 'day')
return {
'show': True,
'back': {
'link': link({year_field: year_lookup}),
'title': str(year_lookup)
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month_lookup, day_field: day.day}),
'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))
} for day in days]
}
elif year_lookup:
months = cl.queryset.filter(**{year_field: year_lookup})
months = getattr(months, dates_or_datetimes)(field_name, 'month')
return {
'show': True,
'back': {
'link': link({}),
'title': _('All dates')
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month.month}),
'title': capfirst(formats.date_format(month, 'YEAR_MONTH_FORMAT'))
} for month in months]
}
else:
years = getattr(cl.queryset, dates_or_datetimes)(field_name, 'year')
return {
'show': True,
'choices': [{
'link': link({year_field: str(year.year)}),
'title': str(year.year),
} for year in years]
}
@register.inclusion_tag('admin/search_form.html')
def search_form(cl):
"""
Displays a search form for searching the list.
"""
return {
'cl': cl,
'show_result_count': cl.result_count != cl.full_result_count,
'search_var': SEARCH_VAR
}
@register.simple_tag
def admin_list_filter(cl, spec):
tpl = get_template(spec.template)
return tpl.render(Context({
'title': spec.title,
'choices' : list(spec.choices(cl)),
'spec': spec,
}))
@register.inclusion_tag('admin/actions.html', takes_context=True)
def admin_actions(context):
"""
Track the number of times the action field has been rendered on the page,
so we know which value to use.
"""
context['action_index'] = context.get('action_index', -1) + 1
return context
|
"""
Generates the list column headers.
"""
ordering_field_columns = cl.get_ordering_field_columns()
for i, field_name in enumerate(cl.list_display):
text, attr = label_for_field(field_name, cl.model,
model_admin = cl.model_admin,
return_attr = True
)
if attr:
# Potentially not sortable
# if the field is the action checkbox: no sorting and special class
if field_name == 'action_checkbox':
yield {
"text": text,
"class_attrib": mark_safe(' class="action-checkbox-column"'),
"sortable": False,
}
continue
admin_order_field = getattr(attr, "admin_order_field", None)
if not admin_order_field:
# Not sortable
yield {
"text": text,
"class_attrib": format_html(' class="column-{0}"', field_name),
"sortable": False,
}
continue
# OK, it is sortable if we got this far
th_classes = ['sortable', 'column-{0}'.format(field_name)]
order_type = ''
new_order_type = 'asc'
sort_priority = 0
sorted = False
# Is it currently being sorted on?
if i in ordering_field_columns:
sorted = True
order_type = ordering_field_columns.get(i).lower()
sort_priority = list(ordering_field_columns).index(i) + 1
th_classes.append('sorted %sending' % order_type)
new_order_type = {'asc': 'desc', 'desc': 'asc'}[order_type]
# build new ordering param
o_list_primary = [] # URL for making this field the primary sort
o_list_remove = [] # URL for removing this field from sort
o_list_toggle = [] # URL for toggling order type for this field
make_qs_param = lambda t, n: ('-' if t == 'desc' else '') + str(n)
for j, ot in ordering_field_columns.items():
if j == i: # Same column
param = make_qs_param(new_order_type, j)
# We want clicking on this header to bring the ordering to the
# front
o_list_primary.insert(0, param)
o_list_toggle.append(param)
# o_list_remove - omit
else:
param = make_qs_param(ot, j)
o_list_primary.append(param)
o_list_toggle.append(param)
o_list_remove.append(param)
if i not in ordering_field_columns:
o_list_primary.insert(0, make_qs_param(new_order_type, i))
yield {
"text": text,
"sortable": True,
"sorted": sorted,
"ascending": order_type == "asc",
"sort_priority": sort_priority,
"url_primary": cl.get_query_string({ORDER_VAR: '.'.join(o_list_primary)}),
"url_remove": cl.get_query_string({ORDER_VAR: '.'.join(o_list_remove)}),
"url_toggle": cl.get_query_string({ORDER_VAR: '.'.join(o_list_toggle)}),
"class_attrib": format_html(' class="{0}"', ' '.join(th_classes))
if th_classes else '',
}
|
identifier_body
|
admin_list.py
|
from __future__ import unicode_literals
import datetime
from django.contrib.admin.templatetags.admin_urls import add_preserved_filters
from django.contrib.admin.util import (lookup_field, display_for_field,
display_for_value, label_for_field)
from django.contrib.admin.views.main import (ALL_VAR, EMPTY_CHANGELIST_VALUE,
ORDER_VAR, PAGE_VAR, SEARCH_VAR)
from django.contrib.admin.templatetags.admin_static import static
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.utils import formats
from django.utils.html import escapejs, format_html
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from django.utils.encoding import force_text
from django.template import Library
from django.template.loader import get_template
from django.template.context import Context
register = Library()
DOT = '.'
@register.simple_tag
def paginator_number(cl,i):
"""
Generates an individual page index link in a paginated list.
"""
if i == DOT:
return '... '
elif i == cl.page_num:
return format_html('<span class="this-page">{0}</span> ', i+1)
else:
return format_html('<a href="{0}"{1}>{2}</a> ',
cl.get_query_string({PAGE_VAR: i}),
mark_safe(' class="end"' if i == cl.paginator.num_pages-1 else ''),
i+1)
@register.inclusion_tag('admin/pagination.html')
def pagination(cl):
"""
Generates the series of links to the pages in a paginated list.
"""
paginator, page_num = cl.paginator, cl.page_num
pagination_required = (not cl.show_all or not cl.can_show_all) and cl.multi_page
if not pagination_required:
page_range = []
else:
ON_EACH_SIDE = 3
ON_ENDS = 2
# If there are 10 or fewer pages, display links to every page.
# Otherwise, do some fancy
if paginator.num_pages <= 10:
page_range = range(paginator.num_pages)
else:
# Insert "smart" pagination links, so that there are always ON_ENDS
# links at either end of the list of pages, and there are always
# ON_EACH_SIDE links at either end of the "current page" link.
page_range = []
if page_num > (ON_EACH_SIDE + ON_ENDS):
page_range.extend(range(0, ON_ENDS))
page_range.append(DOT)
page_range.extend(range(page_num - ON_EACH_SIDE, page_num + 1))
else:
page_range.extend(range(0, page_num + 1))
if page_num < (paginator.num_pages - ON_EACH_SIDE - ON_ENDS - 1):
page_range.extend(range(page_num + 1, page_num + ON_EACH_SIDE + 1))
page_range.append(DOT)
page_range.extend(range(paginator.num_pages - ON_ENDS, paginator.num_pages))
else:
page_range.extend(range(page_num + 1, paginator.num_pages))
need_show_all_link = cl.can_show_all and not cl.show_all and cl.multi_page
return {
'cl': cl,
'pagination_required': pagination_required,
'show_all_url': need_show_all_link and cl.get_query_string({ALL_VAR: ''}),
'page_range': page_range,
'ALL_VAR': ALL_VAR,
'1': 1,
}
def result_headers(cl):
"""
Generates the list column headers.
"""
ordering_field_columns = cl.get_ordering_field_columns()
for i, field_name in enumerate(cl.list_display):
text, attr = label_for_field(field_name, cl.model,
model_admin = cl.model_admin,
return_attr = True
)
if attr:
# Potentially not sortable
# if the field is the action checkbox: no sorting and special class
if field_name == 'action_checkbox':
yield {
"text": text,
"class_attrib": mark_safe(' class="action-checkbox-column"'),
"sortable": False,
}
continue
admin_order_field = getattr(attr, "admin_order_field", None)
if not admin_order_field:
# Not sortable
yield {
"text": text,
"class_attrib": format_html(' class="column-{0}"', field_name),
"sortable": False,
}
continue
# OK, it is sortable if we got this far
th_classes = ['sortable', 'column-{0}'.format(field_name)]
order_type = ''
new_order_type = 'asc'
sort_priority = 0
sorted = False
# Is it currently being sorted on?
if i in ordering_field_columns:
sorted = True
order_type = ordering_field_columns.get(i).lower()
sort_priority = list(ordering_field_columns).index(i) + 1
th_classes.append('sorted %sending' % order_type)
new_order_type = {'asc': 'desc', 'desc': 'asc'}[order_type]
# build new ordering param
o_list_primary = [] # URL for making this field the primary sort
o_list_remove = [] # URL for removing this field from sort
o_list_toggle = [] # URL for toggling order type for this field
make_qs_param = lambda t, n: ('-' if t == 'desc' else '') + str(n)
for j, ot in ordering_field_columns.items():
if j == i: # Same column
param = make_qs_param(new_order_type, j)
# We want clicking on this header to bring the ordering to the
# front
o_list_primary.insert(0, param)
o_list_toggle.append(param)
# o_list_remove - omit
else:
param = make_qs_param(ot, j)
o_list_primary.append(param)
o_list_toggle.append(param)
o_list_remove.append(param)
if i not in ordering_field_columns:
o_list_primary.insert(0, make_qs_param(new_order_type, i))
yield {
"text": text,
"sortable": True,
"sorted": sorted,
"ascending": order_type == "asc",
"sort_priority": sort_priority,
"url_primary": cl.get_query_string({ORDER_VAR: '.'.join(o_list_primary)}),
"url_remove": cl.get_query_string({ORDER_VAR: '.'.join(o_list_remove)}),
"url_toggle": cl.get_query_string({ORDER_VAR: '.'.join(o_list_toggle)}),
"class_attrib": format_html(' class="{0}"', ' '.join(th_classes))
if th_classes else '',
}
def _boolean_icon(field_val):
icon_url = static('admin/img/icon-%s.gif' %
{True: 'yes', False: 'no', None: 'unknown'}[field_val])
return format_html('<img src="{0}" alt="{1}" />', icon_url, field_val)
def items_for_result(cl, result, form):
"""
Generates the actual list of data.
"""
first = True
pk = cl.lookup_opts.pk.attname
for field_name in cl.list_display:
row_class = ''
try:
f, attr, value = lookup_field(field_name, result, cl.model_admin)
except ObjectDoesNotExist:
result_repr = EMPTY_CHANGELIST_VALUE
else:
if f is None:
if field_name == 'action_checkbox':
row_class = mark_safe(' class="action-checkbox"')
allow_tags = getattr(attr, 'allow_tags', False)
boolean = getattr(attr, 'boolean', False)
if boolean:
allow_tags = True
result_repr = display_for_value(value, boolean)
# Strip HTML tags in the resulting text, except if the
# function has an "allow_tags" attribute set to True.
if allow_tags:
result_repr = mark_safe(result_repr)
if isinstance(value, (datetime.date, datetime.time)):
row_class = mark_safe(' class="nowrap"')
else:
if isinstance(f.rel, models.ManyToOneRel):
field_val = getattr(result, f.name)
if field_val is None:
result_repr = EMPTY_CHANGELIST_VALUE
else:
result_repr = field_val
else:
result_repr = display_for_field(value, f)
if isinstance(f, (models.DateField, models.TimeField, models.ForeignKey)):
row_class = mark_safe(' class="nowrap"')
if force_text(result_repr) == '':
result_repr = mark_safe(' ')
# If list_display_links not defined, add the link tag to the first field
if (first and not cl.list_display_links) or field_name in cl.list_display_links:
table_tag = {True:'th', False:'td'}[first]
first = False
url = cl.url_for_result(result)
url = add_preserved_filters({'preserved_filters': cl.preserved_filters, 'opts': cl.opts}, url)
# Convert the pk to something that can be used in Javascript.
# Problem cases are long ints (23L) and non-ASCII strings.
if cl.to_field:
attr = str(cl.to_field)
else:
attr = pk
value = result.serializable_value(attr)
result_id = escapejs(value)
yield format_html('<{0}{1}><a href="{2}"{3}>{4}</a></{5}>',
table_tag,
row_class,
url,
format_html(' onclick="opener.dismissRelatedLookupPopup(window, '{0}'); return false;"', result_id)
if cl.is_popup else '',
result_repr,
table_tag)
else:
# By default the fields come from ModelAdmin.list_editable, but if we pull
# the fields out of the form instead of list_editable custom admins
# can provide fields on a per request basis
if (form and field_name in form.fields and not (
field_name == cl.model._meta.pk.name and
form[cl.model._meta.pk.name].is_hidden)):
bf = form[field_name]
result_repr = mark_safe(force_text(bf.errors) + force_text(bf))
yield format_html('<td{0}>{1}</td>', row_class, result_repr)
if form and not form[cl.model._meta.pk.name].is_hidden:
yield format_html('<td>{0}</td>', force_text(form[cl.model._meta.pk.name]))
class ResultList(list):
# Wrapper class used to return items in a list_editable
# changelist, annotated with the form object for error
# reporting purposes. Needed to maintain backwards
# compatibility with existing admin templates.
def
|
(self, form, *items):
self.form = form
super(ResultList, self).__init__(*items)
def results(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
yield ResultList(form, items_for_result(cl, res, form))
else:
for res in cl.result_list:
yield ResultList(None, items_for_result(cl, res, None))
def result_hidden_fields(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
if form[cl.model._meta.pk.name].is_hidden:
yield mark_safe(force_text(form[cl.model._meta.pk.name]))
@register.inclusion_tag("admin/change_list_results.html")
def result_list(cl):
"""
Displays the headers and data list together
"""
headers = list(result_headers(cl))
num_sorted_fields = 0
for h in headers:
if h['sortable'] and h['sorted']:
num_sorted_fields += 1
return {'cl': cl,
'result_hidden_fields': list(result_hidden_fields(cl)),
'result_headers': headers,
'num_sorted_fields': num_sorted_fields,
'results': list(results(cl))}
@register.inclusion_tag('admin/date_hierarchy.html')
def date_hierarchy(cl):
"""
Displays the date hierarchy for date drill-down functionality.
"""
if cl.date_hierarchy:
field_name = cl.date_hierarchy
field = cl.opts.get_field_by_name(field_name)[0]
dates_or_datetimes = 'datetimes' if isinstance(field, models.DateTimeField) else 'dates'
year_field = '%s__year' % field_name
month_field = '%s__month' % field_name
day_field = '%s__day' % field_name
field_generic = '%s__' % field_name
year_lookup = cl.params.get(year_field)
month_lookup = cl.params.get(month_field)
day_lookup = cl.params.get(day_field)
link = lambda d: cl.get_query_string(d, [field_generic])
if not (year_lookup or month_lookup or day_lookup):
# select appropriate start level
date_range = cl.queryset.aggregate(first=models.Min(field_name),
last=models.Max(field_name))
if date_range['first'] and date_range['last']:
if date_range['first'].year == date_range['last'].year:
year_lookup = date_range['first'].year
if date_range['first'].month == date_range['last'].month:
month_lookup = date_range['first'].month
if year_lookup and month_lookup and day_lookup:
day = datetime.date(int(year_lookup), int(month_lookup), int(day_lookup))
return {
'show': True,
'back': {
'link': link({year_field: year_lookup, month_field: month_lookup}),
'title': capfirst(formats.date_format(day, 'YEAR_MONTH_FORMAT'))
},
'choices': [{'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))}]
}
elif year_lookup and month_lookup:
days = cl.queryset.filter(**{year_field: year_lookup, month_field: month_lookup})
days = getattr(days, dates_or_datetimes)(field_name, 'day')
return {
'show': True,
'back': {
'link': link({year_field: year_lookup}),
'title': str(year_lookup)
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month_lookup, day_field: day.day}),
'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))
} for day in days]
}
elif year_lookup:
months = cl.queryset.filter(**{year_field: year_lookup})
months = getattr(months, dates_or_datetimes)(field_name, 'month')
return {
'show': True,
'back': {
'link': link({}),
'title': _('All dates')
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month.month}),
'title': capfirst(formats.date_format(month, 'YEAR_MONTH_FORMAT'))
} for month in months]
}
else:
years = getattr(cl.queryset, dates_or_datetimes)(field_name, 'year')
return {
'show': True,
'choices': [{
'link': link({year_field: str(year.year)}),
'title': str(year.year),
} for year in years]
}
@register.inclusion_tag('admin/search_form.html')
def search_form(cl):
"""
Displays a search form for searching the list.
"""
return {
'cl': cl,
'show_result_count': cl.result_count != cl.full_result_count,
'search_var': SEARCH_VAR
}
@register.simple_tag
def admin_list_filter(cl, spec):
tpl = get_template(spec.template)
return tpl.render(Context({
'title': spec.title,
'choices' : list(spec.choices(cl)),
'spec': spec,
}))
@register.inclusion_tag('admin/actions.html', takes_context=True)
def admin_actions(context):
"""
Track the number of times the action field has been rendered on the page,
so we know which value to use.
"""
context['action_index'] = context.get('action_index', -1) + 1
return context
|
__init__
|
identifier_name
|
codemap.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//
// ignore-lexer-test FIXME #15679
//! The CodeMap tracks all the source code used within a single crate, mapping
//! from integer byte positions to the original source code location. Each bit
//! of source parsed during crate parsing (typically files, in-memory strings,
//! or various bits of macro expansion) cover a continuous range of bytes in the
//! CodeMap and are represented by FileMaps. Byte positions are stored in
//! `spans` and used pervasively in the compiler. They are absolute positions
//! within the CodeMap, which upon request can be converted to line and column
//! information, source code snippets, etc.
pub use self::MacroFormat::*;
use std::cell::RefCell;
use std::num::ToPrimitive;
use std::ops::{Add, Sub};
use std::rc::Rc;
use libc::c_uint;
use serialize::{Encodable, Decodable, Encoder, Decoder};
pub trait Pos {
fn from_usize(n: usize) -> Self;
fn to_usize(&self) -> usize;
}
/// A byte offset. Keep this small (currently 32-bits), as AST contains
/// a lot of them.
#[derive(Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Show)]
pub struct BytePos(pub u32);
/// A character offset. Because of multibyte utf8 characters, a byte offset
/// is not equivalent to a character offset. The CodeMap will convert BytePos
/// values to CharPos values as necessary.
#[derive(Copy, PartialEq, Hash, PartialOrd, Show)]
pub struct CharPos(pub usize);
// FIXME: Lots of boilerplate in these impls, but so far my attempts to fix
// have been unsuccessful
impl Pos for BytePos {
fn from_usize(n: usize) -> BytePos { BytePos(n as u32) }
fn to_usize(&self) -> usize { let BytePos(n) = *self; n as usize }
}
impl Add for BytePos {
type Output = BytePos;
fn add(self, rhs: BytePos) -> BytePos {
BytePos((self.to_usize() + rhs.to_usize()) as u32)
}
}
impl Sub for BytePos {
type Output = BytePos;
fn sub(self, rhs: BytePos) -> BytePos {
BytePos((self.to_usize() - rhs.to_usize()) as u32)
}
}
impl Pos for CharPos {
fn from_usize(n: usize) -> CharPos { CharPos(n) }
fn to_usize(&self) -> usize { let CharPos(n) = *self; n }
}
impl Add for CharPos {
type Output = CharPos;
fn add(self, rhs: CharPos) -> CharPos {
CharPos(self.to_usize() + rhs.to_usize())
}
}
impl Sub for CharPos {
type Output = CharPos;
fn sub(self, rhs: CharPos) -> CharPos {
CharPos(self.to_usize() - rhs.to_usize())
}
}
/// Spans represent a region of code, used for error reporting. Positions in spans
/// are *absolute* positions from the beginning of the codemap, not positions
/// relative to FileMaps. Methods on the CodeMap can be used to relate spans back
/// to the original source.
#[derive(Clone, Copy, Show, Hash)]
pub struct Span {
pub lo: BytePos,
pub hi: BytePos,
/// Information about where the macro came from, if this piece of
/// code was created by a macro expansion.
pub expn_id: ExpnId
}
pub const DUMMY_SP: Span = Span { lo: BytePos(0), hi: BytePos(0), expn_id: NO_EXPANSION };
// Generic span to be used for code originating from the command line
pub const COMMAND_LINE_SP: Span = Span { lo: BytePos(0),
hi: BytePos(0),
expn_id: COMMAND_LINE_EXPN };
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Show, Copy)]
pub struct Spanned<T> {
pub node: T,
pub span: Span,
}
impl PartialEq for Span {
fn eq(&self, other: &Span) -> bool {
return (*self).lo == (*other).lo && (*self).hi == (*other).hi;
}
fn ne(&self, other: &Span) -> bool { !(*self).eq(other) }
}
impl Eq for Span {}
impl Encodable for Span {
/* Note #1972 -- spans are encoded but not decoded */
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
s.emit_nil()
}
}
impl Decodable for Span {
fn decode<D: Decoder>(_d: &mut D) -> Result<Span, D::Error> {
Ok(DUMMY_SP)
}
}
pub fn spanned<T>(lo: BytePos, hi: BytePos, t: T) -> Spanned<T> {
respan(mk_sp(lo, hi), t)
}
pub fn respan<T>(sp: Span, t: T) -> Spanned<T> {
Spanned {node: t, span: sp}
}
pub fn dummy_spanned<T>(t: T) -> Spanned<T> {
respan(DUMMY_SP, t)
}
/* assuming that we're not in macro expansion */
pub fn mk_sp(lo: BytePos, hi: BytePos) -> Span {
Span {lo: lo, hi: hi, expn_id: NO_EXPANSION}
}
/// Return the span itself if it doesn't come from a macro expansion,
/// otherwise return the call site span up to the `enclosing_sp` by
/// following the `expn_info` chain.
pub fn original_sp(cm: &CodeMap, sp: Span, enclosing_sp: Span) -> Span {
let call_site1 = cm.with_expn_info(sp.expn_id, |ei| ei.map(|ei| ei.call_site));
let call_site2 = cm.with_expn_info(enclosing_sp.expn_id, |ei| ei.map(|ei| ei.call_site));
match (call_site1, call_site2) {
(None, _) => sp,
(Some(call_site1), Some(call_site2)) if call_site1 == call_site2 => sp,
(Some(call_site1), _) => original_sp(cm, call_site1, enclosing_sp),
}
}
/// A source code location used for error reporting
pub struct Loc {
/// Information about the original source
pub file: Rc<FileMap>,
/// The (1-based) line number
pub line: usize,
/// The (0-based) column offset
pub col: CharPos
}
/// A source code location used as the result of lookup_char_pos_adj
// Actually, *none* of the clients use the filename *or* file field;
// perhaps they should just be removed.
pub struct LocWithOpt {
pub filename: FileName,
pub line: usize,
pub col: CharPos,
pub file: Option<Rc<FileMap>>,
}
// used to be structural records. Better names, anyone?
pub struct FileMapAndLine { pub fm: Rc<FileMap>, pub line: usize }
pub struct FileMapAndBytePos { pub fm: Rc<FileMap>, pub pos: BytePos }
/// The syntax with which a macro was invoked.
#[derive(Clone, Copy, Hash, Show)]
pub enum MacroFormat {
/// e.g. #[derive(...)] <item>
MacroAttribute,
/// e.g. `format!()`
MacroBang
}
#[derive(Clone, Hash, Show)]
pub struct NameAndSpan {
/// The name of the macro that was invoked to create the thing
/// with this Span.
pub name: String,
/// The format with which the macro was invoked.
pub format: MacroFormat,
/// The span of the macro definition itself. The macro may not
/// have a sensible definition span (e.g. something defined
/// completely inside libsyntax) in which case this is None.
pub span: Option<Span>
}
/// Extra information for tracking macro expansion of spans
#[derive(Hash, Show)]
pub struct ExpnInfo {
/// The location of the actual macro invocation, e.g. `let x =
/// foo!();`
///
/// This may recursively refer to other macro invocations, e.g. if
/// `foo!()` invoked `bar!()` internally, and there was an
/// expression inside `bar!`; the call_site of the expression in
/// the expansion would point to the `bar!` invocation; that
/// call_site span would have its own ExpnInfo, with the call_site
/// pointing to the `foo!` invocation.
pub call_site: Span,
/// Information about the macro and its definition.
///
/// The `callee` of the inner expression in the `call_site`
/// example would point to the `macro_rules! bar { ... }` and that
/// of the `bar!()` invocation would point to the `macro_rules!
/// foo { ... }`.
pub callee: NameAndSpan
}
#[derive(PartialEq, Eq, Clone, Show, Hash, RustcEncodable, RustcDecodable, Copy)]
pub struct ExpnId(u32);
pub const NO_EXPANSION: ExpnId = ExpnId(-1);
// For code appearing from the command line
pub const COMMAND_LINE_EXPN: ExpnId = ExpnId(-2);
impl ExpnId {
pub fn from_llvm_cookie(cookie: c_uint) -> ExpnId {
ExpnId(cookie as u32)
}
pub fn to_llvm_cookie(self) -> i32 {
let ExpnId(cookie) = self;
cookie as i32
}
}
pub type FileName = String;
pub struct FileLines {
pub file: Rc<FileMap>,
pub lines: Vec<usize>
}
/// Identifies an offset of a multi-byte character in a FileMap
#[derive(Copy)]
pub struct MultiByteChar {
/// The absolute offset of the character in the CodeMap
pub pos: BytePos,
/// The number of bytes, >=2
pub bytes: usize,
}
/// A single source in the CodeMap
pub struct FileMap {
/// The name of the file that the source came from, source that doesn't
/// originate from files has names between angle brackets by convention,
/// e.g. `<anon>`
pub name: FileName,
/// The complete source code
pub src: String,
/// The start position of this source in the CodeMap
pub start_pos: BytePos,
/// Locations of lines beginnings in the source code
pub lines: RefCell<Vec<BytePos> >,
/// Locations of multi-byte characters in the source code
pub multibyte_chars: RefCell<Vec<MultiByteChar> >,
}
impl FileMap {
/// EFFECT: register a start-of-line offset in the
/// table of line-beginnings.
/// UNCHECKED INVARIANT: these offsets must be added in the right
/// order and must be in the right places; there is shared knowledge
/// about what ends a line between this file and parse.rs
/// WARNING: pos param here is the offset relative to start of CodeMap,
/// and CodeMap will append a newline when adding a filemap without a newline at the end,
/// so the safe way to call this is with value calculated as
/// filemap.start_pos + newline_offset_relative_to_the_start_of_filemap.
pub fn next_line(&self, pos: BytePos) {
// the new charpos must be > the last one (or it's the first one).
let mut lines = self.lines.borrow_mut();
let line_len = lines.len();
assert!(line_len == 0 || ((*lines)[line_len - 1] < pos));
lines.push(pos);
}
/// get a line from the list of pre-computed line-beginnings
///
pub fn get_line(&self, line_number: usize) -> Option<String> {
let lines = self.lines.borrow();
lines.get(line_number).map(|&line| {
let begin: BytePos = line - self.start_pos;
let begin = begin.to_usize();
let slice = &self.src[begin..];
match slice.find('\n') {
Some(e) => &slice[..e],
None => slice
}.to_string()
})
}
pub fn record_multibyte_char(&self, pos: BytePos, bytes: usize) {
assert!(bytes >=2 && bytes <= 4);
let mbc = MultiByteChar {
pos: pos,
bytes: bytes,
};
self.multibyte_chars.borrow_mut().push(mbc);
}
pub fn is_real_file(&self) -> bool {
!(self.name.starts_with("<") &&
self.name.ends_with(">"))
}
}
pub struct CodeMap {
pub files: RefCell<Vec<Rc<FileMap>>>,
expansions: RefCell<Vec<ExpnInfo>>
}
impl CodeMap {
pub fn new() -> CodeMap {
CodeMap {
files: RefCell::new(Vec::new()),
expansions: RefCell::new(Vec::new()),
}
}
pub fn new_filemap(&self, filename: FileName, src: String) -> Rc<FileMap> {
let mut files = self.files.borrow_mut();
let start_pos = match files.last() {
None => 0,
Some(last) => last.start_pos.to_usize() + last.src.len(),
};
// Remove utf-8 BOM if any.
// FIXME #12884: no efficient/safe way to remove from the start of a string
// and reuse the allocation.
let mut src = if src.starts_with("\u{feff}") {
String::from_str(&src[3..])
} else {
String::from_str(&src[])
};
// Append '\n' in case it's not already there.
// This is a workaround to prevent CodeMap.lookup_filemap_idx from accidentally
// overflowing into the next filemap in case the last byte of span is also the last
// byte of filemap, which leads to incorrect results from CodeMap.span_to_*.
if src.len() > 0 && !src.ends_with("\n") {
src.push('\n');
}
let filemap = Rc::new(FileMap {
name: filename,
src: src.to_string(),
start_pos: Pos::from_usize(start_pos),
lines: RefCell::new(Vec::new()),
multibyte_chars: RefCell::new(Vec::new()),
});
files.push(filemap.clone());
filemap
}
pub fn mk_substr_filename(&self, sp: Span) -> String {
let pos = self.lookup_char_pos(sp.lo);
(format!("<{}:{}:{}>",
pos.file.name,
pos.line,
pos.col.to_usize() + 1)).to_string()
}
/// Lookup source information about a BytePos
pub fn lookup_char_pos(&self, pos: BytePos) -> Loc {
self.lookup_pos(pos)
}
pub fn lookup_char_pos_adj(&self, pos: BytePos) -> LocWithOpt {
let loc = self.lookup_char_pos(pos);
LocWithOpt {
filename: loc.file.name.to_string(),
line: loc.line,
col: loc.col,
file: Some(loc.file)
}
}
pub fn span_to_string(&self, sp: Span) -> String {
if self.files.borrow().len() == 0 && sp == DUMMY_SP {
return "no-location".to_string();
}
let lo = self.lookup_char_pos_adj(sp.lo);
let hi = self.lookup_char_pos_adj(sp.hi);
return (format!("{}:{}:{}: {}:{}",
lo.filename,
lo.line,
lo.col.to_usize() + 1,
hi.line,
hi.col.to_usize() + 1)).to_string()
}
pub fn span_to_filename(&self, sp: Span) -> FileName {
self.lookup_char_pos(sp.lo).file.name.to_string()
}
pub fn span_to_lines(&self, sp: Span) -> FileLines {
let lo = self.lookup_char_pos(sp.lo);
let hi = self.lookup_char_pos(sp.hi);
let mut lines = Vec::new();
for i in range(lo.line - 1us, hi.line as usize) {
lines.push(i);
};
FileLines {file: lo.file, lines: lines}
}
pub fn span_to_snippet(&self, sp: Span) -> Option<String> {
let begin = self.lookup_byte_offset(sp.lo);
let end = self.lookup_byte_offset(sp.hi);
// FIXME #8256: this used to be an assert but whatever precondition
// it's testing isn't true for all spans in the AST, so to allow the
// caller to not have to panic (and it can't catch it since the CodeMap
// isn't sendable), return None
if begin.fm.start_pos != end.fm.start_pos {
None
} else {
Some((&begin.fm.src[begin.pos.to_usize()..end.pos.to_usize()]).to_string())
}
}
pub fn get_filemap(&self, filename: &str) -> Rc<FileMap>
|
pub fn lookup_byte_offset(&self, bpos: BytePos) -> FileMapAndBytePos {
let idx = self.lookup_filemap_idx(bpos);
let fm = (*self.files.borrow())[idx].clone();
let offset = bpos - fm.start_pos;
FileMapAndBytePos {fm: fm, pos: offset}
}
/// Converts an absolute BytePos to a CharPos relative to the filemap and above.
pub fn bytepos_to_file_charpos(&self, bpos: BytePos) -> CharPos {
let idx = self.lookup_filemap_idx(bpos);
let files = self.files.borrow();
let map = &(*files)[idx];
// The number of extra bytes due to multibyte chars in the FileMap
let mut total_extra_bytes = 0;
for mbc in map.multibyte_chars.borrow().iter() {
debug!("{}-byte char at {:?}", mbc.bytes, mbc.pos);
if mbc.pos < bpos {
// every character is at least one byte, so we only
// count the actual extra bytes.
total_extra_bytes += mbc.bytes - 1;
// We should never see a byte position in the middle of a
// character
assert!(bpos.to_usize() >= mbc.pos.to_usize() + mbc.bytes);
} else {
break;
}
}
assert!(map.start_pos.to_usize() + total_extra_bytes <= bpos.to_usize());
CharPos(bpos.to_usize() - map.start_pos.to_usize() - total_extra_bytes)
}
fn lookup_filemap_idx(&self, pos: BytePos) -> usize {
let files = self.files.borrow();
let files = &*files;
let len = files.len();
let mut a = 0us;
let mut b = len;
while b - a > 1us {
let m = (a + b) / 2us;
if files[m].start_pos > pos {
b = m;
} else {
a = m;
}
}
// There can be filemaps with length 0. These have the same start_pos as
// the previous filemap, but are not the filemaps we want (because they
// are length 0, they cannot contain what we are looking for). So,
// rewind until we find a useful filemap.
loop {
let lines = files[a].lines.borrow();
let lines = lines;
if lines.len() > 0 {
break;
}
if a == 0 {
panic!("position {} does not resolve to a source location",
pos.to_usize());
}
a -= 1;
}
if a >= len {
panic!("position {} does not resolve to a source location",
pos.to_usize())
}
return a;
}
fn lookup_line(&self, pos: BytePos) -> FileMapAndLine {
let idx = self.lookup_filemap_idx(pos);
let files = self.files.borrow();
let f = (*files)[idx].clone();
let mut a = 0us;
{
let lines = f.lines.borrow();
let mut b = lines.len();
while b - a > 1us {
let m = (a + b) / 2us;
if (*lines)[m] > pos { b = m; } else { a = m; }
}
}
FileMapAndLine {fm: f, line: a}
}
fn lookup_pos(&self, pos: BytePos) -> Loc {
let FileMapAndLine {fm: f, line: a} = self.lookup_line(pos);
let line = a + 1us; // Line numbers start at 1
let chpos = self.bytepos_to_file_charpos(pos);
let linebpos = (*f.lines.borrow())[a];
let linechpos = self.bytepos_to_file_charpos(linebpos);
debug!("byte pos {:?} is on the line at byte pos {:?}",
pos, linebpos);
debug!("char pos {:?} is on the line at char pos {:?}",
chpos, linechpos);
debug!("byte is on line: {}", line);
assert!(chpos >= linechpos);
Loc {
file: f,
line: line,
col: chpos - linechpos
}
}
pub fn record_expansion(&self, expn_info: ExpnInfo) -> ExpnId {
let mut expansions = self.expansions.borrow_mut();
expansions.push(expn_info);
ExpnId(expansions.len().to_u32().expect("too many ExpnInfo's!") - 1)
}
pub fn with_expn_info<T, F>(&self, id: ExpnId, f: F) -> T where
F: FnOnce(Option<&ExpnInfo>) -> T,
{
match id {
NO_EXPANSION => f(None),
ExpnId(i) => f(Some(&(*self.expansions.borrow())[i as usize]))
}
}
/// Check if a span is "internal" to a macro. This means that it is entirely generated by a
/// macro expansion and contains no code that was passed in as an argument.
pub fn span_is_internal(&self, span: Span) -> bool {
// first, check if the given expression was generated by a macro or not
// we need to go back the expn_info tree to check only the arguments
// of the initial macro call, not the nested ones.
let mut is_internal = false;
let mut expnid = span.expn_id;
while self.with_expn_info(expnid, |expninfo| {
match expninfo {
Some(ref info) => {
// save the parent expn_id for next loop iteration
expnid = info.call_site.expn_id;
if info.callee.name == "format_args" {
// This is a hack because the format_args builtin calls unstable APIs.
// I spent like 6 hours trying to solve this more generally but am stupid.
is_internal = true;
false
} else if info.callee.span.is_none() {
// it's a compiler built-in, we *really* don't want to mess with it
// so we skip it, unless it was called by a regular macro, in which case
// we will handle the caller macro next turn
is_internal = true;
true // continue looping
} else {
// was this expression from the current macro arguments ?
is_internal = !( span.lo > info.call_site.lo &&
span.hi < info.call_site.hi );
true // continue looping
}
},
_ => false // stop looping
}
}) { /* empty while loop body */ }
return is_internal;
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn t1 () {
let cm = CodeMap::new();
let fm = cm.new_filemap("blork.rs".to_string(),
"first line.\nsecond line".to_string());
fm.next_line(BytePos(0));
assert_eq!(fm.get_line(0), Some("first line.".to_string()));
// TESTING BROKEN BEHAVIOR:
fm.next_line(BytePos(10));
assert_eq!(fm.get_line(1), Some(".".to_string()));
}
#[test]
#[should_fail]
fn t2 () {
let cm = CodeMap::new();
let fm = cm.new_filemap("blork.rs".to_string(),
"first line.\nsecond line".to_string());
// TESTING *REALLY* BROKEN BEHAVIOR:
fm.next_line(BytePos(0));
fm.next_line(BytePos(10));
fm.next_line(BytePos(2));
}
fn init_code_map() -> CodeMap {
let cm = CodeMap::new();
let fm1 = cm.new_filemap("blork.rs".to_string(),
"first line.\nsecond line".to_string());
let fm2 = cm.new_filemap("empty.rs".to_string(),
"".to_string());
let fm3 = cm.new_filemap("blork2.rs".to_string(),
"first line.\nsecond line".to_string());
fm1.next_line(BytePos(0));
fm1.next_line(BytePos(12));
fm2.next_line(BytePos(24));
fm3.next_line(BytePos(24));
fm3.next_line(BytePos(34));
cm
}
#[test]
fn t3() {
// Test lookup_byte_offset
let cm = init_code_map();
let fmabp1 = cm.lookup_byte_offset(BytePos(22));
assert_eq!(fmabp1.fm.name, "blork.rs");
assert_eq!(fmabp1.pos, BytePos(22));
let fmabp2 = cm.lookup_byte_offset(BytePos(24));
assert_eq!(fmabp2.fm.name, "blork2.rs");
assert_eq!(fmabp2.pos, BytePos(0));
}
#[test]
fn t4() {
// Test bytepos_to_file_charpos
let cm = init_code_map();
let cp1 = cm.bytepos_to_file_charpos(BytePos(22));
assert_eq!(cp1, CharPos(22));
let cp2 = cm.bytepos_to_file_charpos(BytePos(24));
assert_eq!(cp2, CharPos(0));
}
#[test]
fn t5() {
// Test zero-length filemaps.
let cm = init_code_map();
let loc1 = cm.lookup_char_pos(BytePos(22));
assert_eq!(loc1.file.name, "blork.rs");
assert_eq!(loc1.line, 2);
assert_eq!(loc1.col, CharPos(10));
let loc2 = cm.lookup_char_pos(BytePos(24));
assert_eq!(loc2.file.name, "blork2.rs");
assert_eq!(loc2.line, 1);
assert_eq!(loc2.col, CharPos(0));
}
fn init_code_map_mbc() -> CodeMap {
let cm = CodeMap::new();
// € is a three byte utf8 char.
let fm1 =
cm.new_filemap("blork.rs".to_string(),
"fir€st €€€€ line.\nsecond line".to_string());
let fm2 = cm.new_filemap("blork2.rs".to_string(),
"first line€€.\n€ second line".to_string());
fm1.next_line(BytePos(0));
fm1.next_line(BytePos(22));
fm2.next_line(BytePos(40));
fm2.next_line(BytePos(58));
fm1.record_multibyte_char(BytePos(3), 3);
fm1.record_multibyte_char(BytePos(9), 3);
fm1.record_multibyte_char(BytePos(12), 3);
fm1.record_multibyte_char(BytePos(15), 3);
fm1.record_multibyte_char(BytePos(18), 3);
fm2.record_multibyte_char(BytePos(50), 3);
fm2.record_multibyte_char(BytePos(53), 3);
fm2.record_multibyte_char(BytePos(58), 3);
cm
}
#[test]
fn t6() {
// Test bytepos_to_file_charpos in the presence of multi-byte chars
let cm = init_code_map_mbc();
let cp1 = cm.bytepos_to_file_charpos(BytePos(3));
assert_eq!(cp1, CharPos(3));
let cp2 = cm.bytepos_to_file_charpos(BytePos(6));
assert_eq!(cp2, CharPos(4));
let cp3 = cm.bytepos_to_file_charpos(BytePos(56));
assert_eq!(cp3, CharPos(12));
let cp4 = cm.bytepos_to_file_charpos(BytePos(61));
assert_eq!(cp4, CharPos(15));
}
#[test]
fn t7() {
// Test span_to_lines for a span ending at the end of filemap
let cm = init_code_map();
let span = Span {lo: BytePos(12), hi: BytePos(23), expn_id: NO_EXPANSION};
let file_lines = cm.span_to_lines(span);
assert_eq!(file_lines.file.name, "blork.rs");
assert_eq!(file_lines.lines.len(), 1);
assert_eq!(file_lines.lines[0], 1us);
}
#[test]
fn t8() {
// Test span_to_snippet for a span ending at the end of filemap
let cm = init_code_map();
let span = Span {lo: BytePos(12), hi: BytePos(23), expn_id: NO_EXPANSION};
let snippet = cm.span_to_snippet(span);
assert_eq!(snippet, Some("second line".to_string()));
}
#[test]
fn t9() {
// Test span_to_str for a span ending at the end of filemap
let cm = init_code_map();
let span = Span {lo: BytePos(12), hi: BytePos(23), expn_id: NO_EXPANSION};
let sstr = cm.span_to_string(span);
assert_eq!(sstr, "blork.rs:2:1: 2:12");
}
}
|
{
for fm in self.files.borrow().iter() {
if filename == fm.name {
return fm.clone();
}
}
panic!("asking for {} which we don't know about", filename);
}
|
identifier_body
|
codemap.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//
// ignore-lexer-test FIXME #15679
//! The CodeMap tracks all the source code used within a single crate, mapping
//! from integer byte positions to the original source code location. Each bit
//! of source parsed during crate parsing (typically files, in-memory strings,
//! or various bits of macro expansion) cover a continuous range of bytes in the
//! CodeMap and are represented by FileMaps. Byte positions are stored in
//! `spans` and used pervasively in the compiler. They are absolute positions
//! within the CodeMap, which upon request can be converted to line and column
//! information, source code snippets, etc.
pub use self::MacroFormat::*;
use std::cell::RefCell;
use std::num::ToPrimitive;
use std::ops::{Add, Sub};
use std::rc::Rc;
use libc::c_uint;
use serialize::{Encodable, Decodable, Encoder, Decoder};
pub trait Pos {
fn from_usize(n: usize) -> Self;
fn to_usize(&self) -> usize;
}
/// A byte offset. Keep this small (currently 32-bits), as AST contains
/// a lot of them.
#[derive(Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Show)]
pub struct BytePos(pub u32);
/// A character offset. Because of multibyte utf8 characters, a byte offset
/// is not equivalent to a character offset. The CodeMap will convert BytePos
/// values to CharPos values as necessary.
#[derive(Copy, PartialEq, Hash, PartialOrd, Show)]
pub struct CharPos(pub usize);
// FIXME: Lots of boilerplate in these impls, but so far my attempts to fix
// have been unsuccessful
impl Pos for BytePos {
fn from_usize(n: usize) -> BytePos { BytePos(n as u32) }
fn to_usize(&self) -> usize { let BytePos(n) = *self; n as usize }
}
impl Add for BytePos {
type Output = BytePos;
fn add(self, rhs: BytePos) -> BytePos {
BytePos((self.to_usize() + rhs.to_usize()) as u32)
}
}
impl Sub for BytePos {
type Output = BytePos;
fn sub(self, rhs: BytePos) -> BytePos {
BytePos((self.to_usize() - rhs.to_usize()) as u32)
}
}
impl Pos for CharPos {
fn from_usize(n: usize) -> CharPos { CharPos(n) }
fn to_usize(&self) -> usize { let CharPos(n) = *self; n }
}
impl Add for CharPos {
type Output = CharPos;
fn add(self, rhs: CharPos) -> CharPos {
CharPos(self.to_usize() + rhs.to_usize())
}
}
impl Sub for CharPos {
type Output = CharPos;
fn sub(self, rhs: CharPos) -> CharPos {
CharPos(self.to_usize() - rhs.to_usize())
}
}
/// Spans represent a region of code, used for error reporting. Positions in spans
/// are *absolute* positions from the beginning of the codemap, not positions
/// relative to FileMaps. Methods on the CodeMap can be used to relate spans back
/// to the original source.
#[derive(Clone, Copy, Show, Hash)]
pub struct Span {
pub lo: BytePos,
pub hi: BytePos,
/// Information about where the macro came from, if this piece of
/// code was created by a macro expansion.
pub expn_id: ExpnId
}
pub const DUMMY_SP: Span = Span { lo: BytePos(0), hi: BytePos(0), expn_id: NO_EXPANSION };
// Generic span to be used for code originating from the command line
pub const COMMAND_LINE_SP: Span = Span { lo: BytePos(0),
hi: BytePos(0),
expn_id: COMMAND_LINE_EXPN };
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Show, Copy)]
pub struct Spanned<T> {
pub node: T,
pub span: Span,
}
impl PartialEq for Span {
fn eq(&self, other: &Span) -> bool {
return (*self).lo == (*other).lo && (*self).hi == (*other).hi;
}
fn ne(&self, other: &Span) -> bool { !(*self).eq(other) }
}
impl Eq for Span {}
impl Encodable for Span {
/* Note #1972 -- spans are encoded but not decoded */
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
s.emit_nil()
}
}
impl Decodable for Span {
fn
|
<D: Decoder>(_d: &mut D) -> Result<Span, D::Error> {
Ok(DUMMY_SP)
}
}
pub fn spanned<T>(lo: BytePos, hi: BytePos, t: T) -> Spanned<T> {
respan(mk_sp(lo, hi), t)
}
pub fn respan<T>(sp: Span, t: T) -> Spanned<T> {
Spanned {node: t, span: sp}
}
pub fn dummy_spanned<T>(t: T) -> Spanned<T> {
respan(DUMMY_SP, t)
}
/* assuming that we're not in macro expansion */
pub fn mk_sp(lo: BytePos, hi: BytePos) -> Span {
Span {lo: lo, hi: hi, expn_id: NO_EXPANSION}
}
/// Return the span itself if it doesn't come from a macro expansion,
/// otherwise return the call site span up to the `enclosing_sp` by
/// following the `expn_info` chain.
pub fn original_sp(cm: &CodeMap, sp: Span, enclosing_sp: Span) -> Span {
let call_site1 = cm.with_expn_info(sp.expn_id, |ei| ei.map(|ei| ei.call_site));
let call_site2 = cm.with_expn_info(enclosing_sp.expn_id, |ei| ei.map(|ei| ei.call_site));
match (call_site1, call_site2) {
(None, _) => sp,
(Some(call_site1), Some(call_site2)) if call_site1 == call_site2 => sp,
(Some(call_site1), _) => original_sp(cm, call_site1, enclosing_sp),
}
}
/// A source code location used for error reporting
pub struct Loc {
/// Information about the original source
pub file: Rc<FileMap>,
/// The (1-based) line number
pub line: usize,
/// The (0-based) column offset
pub col: CharPos
}
/// A source code location used as the result of lookup_char_pos_adj
// Actually, *none* of the clients use the filename *or* file field;
// perhaps they should just be removed.
pub struct LocWithOpt {
pub filename: FileName,
pub line: usize,
pub col: CharPos,
pub file: Option<Rc<FileMap>>,
}
// used to be structural records. Better names, anyone?
pub struct FileMapAndLine { pub fm: Rc<FileMap>, pub line: usize }
pub struct FileMapAndBytePos { pub fm: Rc<FileMap>, pub pos: BytePos }
/// The syntax with which a macro was invoked.
#[derive(Clone, Copy, Hash, Show)]
pub enum MacroFormat {
/// e.g. #[derive(...)] <item>
MacroAttribute,
/// e.g. `format!()`
MacroBang
}
#[derive(Clone, Hash, Show)]
pub struct NameAndSpan {
/// The name of the macro that was invoked to create the thing
/// with this Span.
pub name: String,
/// The format with which the macro was invoked.
pub format: MacroFormat,
/// The span of the macro definition itself. The macro may not
/// have a sensible definition span (e.g. something defined
/// completely inside libsyntax) in which case this is None.
pub span: Option<Span>
}
/// Extra information for tracking macro expansion of spans
#[derive(Hash, Show)]
pub struct ExpnInfo {
/// The location of the actual macro invocation, e.g. `let x =
/// foo!();`
///
/// This may recursively refer to other macro invocations, e.g. if
/// `foo!()` invoked `bar!()` internally, and there was an
/// expression inside `bar!`; the call_site of the expression in
/// the expansion would point to the `bar!` invocation; that
/// call_site span would have its own ExpnInfo, with the call_site
/// pointing to the `foo!` invocation.
pub call_site: Span,
/// Information about the macro and its definition.
///
/// The `callee` of the inner expression in the `call_site`
/// example would point to the `macro_rules! bar { ... }` and that
/// of the `bar!()` invocation would point to the `macro_rules!
/// foo { ... }`.
pub callee: NameAndSpan
}
#[derive(PartialEq, Eq, Clone, Show, Hash, RustcEncodable, RustcDecodable, Copy)]
pub struct ExpnId(u32);
pub const NO_EXPANSION: ExpnId = ExpnId(-1);
// For code appearing from the command line
pub const COMMAND_LINE_EXPN: ExpnId = ExpnId(-2);
impl ExpnId {
pub fn from_llvm_cookie(cookie: c_uint) -> ExpnId {
ExpnId(cookie as u32)
}
pub fn to_llvm_cookie(self) -> i32 {
let ExpnId(cookie) = self;
cookie as i32
}
}
pub type FileName = String;
pub struct FileLines {
pub file: Rc<FileMap>,
pub lines: Vec<usize>
}
/// Identifies an offset of a multi-byte character in a FileMap
#[derive(Copy)]
pub struct MultiByteChar {
/// The absolute offset of the character in the CodeMap
pub pos: BytePos,
/// The number of bytes, >=2
pub bytes: usize,
}
/// A single source in the CodeMap
pub struct FileMap {
/// The name of the file that the source came from, source that doesn't
/// originate from files has names between angle brackets by convention,
/// e.g. `<anon>`
pub name: FileName,
/// The complete source code
pub src: String,
/// The start position of this source in the CodeMap
pub start_pos: BytePos,
/// Locations of lines beginnings in the source code
pub lines: RefCell<Vec<BytePos> >,
/// Locations of multi-byte characters in the source code
pub multibyte_chars: RefCell<Vec<MultiByteChar> >,
}
impl FileMap {
/// EFFECT: register a start-of-line offset in the
/// table of line-beginnings.
/// UNCHECKED INVARIANT: these offsets must be added in the right
/// order and must be in the right places; there is shared knowledge
/// about what ends a line between this file and parse.rs
/// WARNING: pos param here is the offset relative to start of CodeMap,
/// and CodeMap will append a newline when adding a filemap without a newline at the end,
/// so the safe way to call this is with value calculated as
/// filemap.start_pos + newline_offset_relative_to_the_start_of_filemap.
pub fn next_line(&self, pos: BytePos) {
// the new charpos must be > the last one (or it's the first one).
let mut lines = self.lines.borrow_mut();
let line_len = lines.len();
assert!(line_len == 0 || ((*lines)[line_len - 1] < pos));
lines.push(pos);
}
/// get a line from the list of pre-computed line-beginnings
///
pub fn get_line(&self, line_number: usize) -> Option<String> {
let lines = self.lines.borrow();
lines.get(line_number).map(|&line| {
let begin: BytePos = line - self.start_pos;
let begin = begin.to_usize();
let slice = &self.src[begin..];
match slice.find('\n') {
Some(e) => &slice[..e],
None => slice
}.to_string()
})
}
pub fn record_multibyte_char(&self, pos: BytePos, bytes: usize) {
assert!(bytes >=2 && bytes <= 4);
let mbc = MultiByteChar {
pos: pos,
bytes: bytes,
};
self.multibyte_chars.borrow_mut().push(mbc);
}
pub fn is_real_file(&self) -> bool {
!(self.name.starts_with("<") &&
self.name.ends_with(">"))
}
}
pub struct CodeMap {
pub files: RefCell<Vec<Rc<FileMap>>>,
expansions: RefCell<Vec<ExpnInfo>>
}
impl CodeMap {
pub fn new() -> CodeMap {
CodeMap {
files: RefCell::new(Vec::new()),
expansions: RefCell::new(Vec::new()),
}
}
pub fn new_filemap(&self, filename: FileName, src: String) -> Rc<FileMap> {
let mut files = self.files.borrow_mut();
let start_pos = match files.last() {
None => 0,
Some(last) => last.start_pos.to_usize() + last.src.len(),
};
// Remove utf-8 BOM if any.
// FIXME #12884: no efficient/safe way to remove from the start of a string
// and reuse the allocation.
let mut src = if src.starts_with("\u{feff}") {
String::from_str(&src[3..])
} else {
String::from_str(&src[])
};
// Append '\n' in case it's not already there.
// This is a workaround to prevent CodeMap.lookup_filemap_idx from accidentally
// overflowing into the next filemap in case the last byte of span is also the last
// byte of filemap, which leads to incorrect results from CodeMap.span_to_*.
if src.len() > 0 && !src.ends_with("\n") {
src.push('\n');
}
let filemap = Rc::new(FileMap {
name: filename,
src: src.to_string(),
start_pos: Pos::from_usize(start_pos),
lines: RefCell::new(Vec::new()),
multibyte_chars: RefCell::new(Vec::new()),
});
files.push(filemap.clone());
filemap
}
pub fn mk_substr_filename(&self, sp: Span) -> String {
let pos = self.lookup_char_pos(sp.lo);
(format!("<{}:{}:{}>",
pos.file.name,
pos.line,
pos.col.to_usize() + 1)).to_string()
}
/// Lookup source information about a BytePos
pub fn lookup_char_pos(&self, pos: BytePos) -> Loc {
self.lookup_pos(pos)
}
pub fn lookup_char_pos_adj(&self, pos: BytePos) -> LocWithOpt {
let loc = self.lookup_char_pos(pos);
LocWithOpt {
filename: loc.file.name.to_string(),
line: loc.line,
col: loc.col,
file: Some(loc.file)
}
}
pub fn span_to_string(&self, sp: Span) -> String {
if self.files.borrow().len() == 0 && sp == DUMMY_SP {
return "no-location".to_string();
}
let lo = self.lookup_char_pos_adj(sp.lo);
let hi = self.lookup_char_pos_adj(sp.hi);
return (format!("{}:{}:{}: {}:{}",
lo.filename,
lo.line,
lo.col.to_usize() + 1,
hi.line,
hi.col.to_usize() + 1)).to_string()
}
pub fn span_to_filename(&self, sp: Span) -> FileName {
self.lookup_char_pos(sp.lo).file.name.to_string()
}
pub fn span_to_lines(&self, sp: Span) -> FileLines {
let lo = self.lookup_char_pos(sp.lo);
let hi = self.lookup_char_pos(sp.hi);
let mut lines = Vec::new();
for i in range(lo.line - 1us, hi.line as usize) {
lines.push(i);
};
FileLines {file: lo.file, lines: lines}
}
pub fn span_to_snippet(&self, sp: Span) -> Option<String> {
let begin = self.lookup_byte_offset(sp.lo);
let end = self.lookup_byte_offset(sp.hi);
// FIXME #8256: this used to be an assert but whatever precondition
// it's testing isn't true for all spans in the AST, so to allow the
// caller to not have to panic (and it can't catch it since the CodeMap
// isn't sendable), return None
if begin.fm.start_pos != end.fm.start_pos {
None
} else {
Some((&begin.fm.src[begin.pos.to_usize()..end.pos.to_usize()]).to_string())
}
}
pub fn get_filemap(&self, filename: &str) -> Rc<FileMap> {
for fm in self.files.borrow().iter() {
if filename == fm.name {
return fm.clone();
}
}
panic!("asking for {} which we don't know about", filename);
}
pub fn lookup_byte_offset(&self, bpos: BytePos) -> FileMapAndBytePos {
let idx = self.lookup_filemap_idx(bpos);
let fm = (*self.files.borrow())[idx].clone();
let offset = bpos - fm.start_pos;
FileMapAndBytePos {fm: fm, pos: offset}
}
/// Converts an absolute BytePos to a CharPos relative to the filemap and above.
pub fn bytepos_to_file_charpos(&self, bpos: BytePos) -> CharPos {
let idx = self.lookup_filemap_idx(bpos);
let files = self.files.borrow();
let map = &(*files)[idx];
// The number of extra bytes due to multibyte chars in the FileMap
let mut total_extra_bytes = 0;
for mbc in map.multibyte_chars.borrow().iter() {
debug!("{}-byte char at {:?}", mbc.bytes, mbc.pos);
if mbc.pos < bpos {
// every character is at least one byte, so we only
// count the actual extra bytes.
total_extra_bytes += mbc.bytes - 1;
// We should never see a byte position in the middle of a
// character
assert!(bpos.to_usize() >= mbc.pos.to_usize() + mbc.bytes);
} else {
break;
}
}
assert!(map.start_pos.to_usize() + total_extra_bytes <= bpos.to_usize());
CharPos(bpos.to_usize() - map.start_pos.to_usize() - total_extra_bytes)
}
fn lookup_filemap_idx(&self, pos: BytePos) -> usize {
let files = self.files.borrow();
let files = &*files;
let len = files.len();
let mut a = 0us;
let mut b = len;
while b - a > 1us {
let m = (a + b) / 2us;
if files[m].start_pos > pos {
b = m;
} else {
a = m;
}
}
// There can be filemaps with length 0. These have the same start_pos as
// the previous filemap, but are not the filemaps we want (because they
// are length 0, they cannot contain what we are looking for). So,
// rewind until we find a useful filemap.
loop {
let lines = files[a].lines.borrow();
let lines = lines;
if lines.len() > 0 {
break;
}
if a == 0 {
panic!("position {} does not resolve to a source location",
pos.to_usize());
}
a -= 1;
}
if a >= len {
panic!("position {} does not resolve to a source location",
pos.to_usize())
}
return a;
}
fn lookup_line(&self, pos: BytePos) -> FileMapAndLine {
let idx = self.lookup_filemap_idx(pos);
let files = self.files.borrow();
let f = (*files)[idx].clone();
let mut a = 0us;
{
let lines = f.lines.borrow();
let mut b = lines.len();
while b - a > 1us {
let m = (a + b) / 2us;
if (*lines)[m] > pos { b = m; } else { a = m; }
}
}
FileMapAndLine {fm: f, line: a}
}
fn lookup_pos(&self, pos: BytePos) -> Loc {
let FileMapAndLine {fm: f, line: a} = self.lookup_line(pos);
let line = a + 1us; // Line numbers start at 1
let chpos = self.bytepos_to_file_charpos(pos);
let linebpos = (*f.lines.borrow())[a];
let linechpos = self.bytepos_to_file_charpos(linebpos);
debug!("byte pos {:?} is on the line at byte pos {:?}",
pos, linebpos);
debug!("char pos {:?} is on the line at char pos {:?}",
chpos, linechpos);
debug!("byte is on line: {}", line);
assert!(chpos >= linechpos);
Loc {
file: f,
line: line,
col: chpos - linechpos
}
}
pub fn record_expansion(&self, expn_info: ExpnInfo) -> ExpnId {
let mut expansions = self.expansions.borrow_mut();
expansions.push(expn_info);
ExpnId(expansions.len().to_u32().expect("too many ExpnInfo's!") - 1)
}
pub fn with_expn_info<T, F>(&self, id: ExpnId, f: F) -> T where
F: FnOnce(Option<&ExpnInfo>) -> T,
{
match id {
NO_EXPANSION => f(None),
ExpnId(i) => f(Some(&(*self.expansions.borrow())[i as usize]))
}
}
/// Check if a span is "internal" to a macro. This means that it is entirely generated by a
/// macro expansion and contains no code that was passed in as an argument.
pub fn span_is_internal(&self, span: Span) -> bool {
// first, check if the given expression was generated by a macro or not
// we need to go back the expn_info tree to check only the arguments
// of the initial macro call, not the nested ones.
let mut is_internal = false;
let mut expnid = span.expn_id;
while self.with_expn_info(expnid, |expninfo| {
match expninfo {
Some(ref info) => {
// save the parent expn_id for next loop iteration
expnid = info.call_site.expn_id;
if info.callee.name == "format_args" {
// This is a hack because the format_args builtin calls unstable APIs.
// I spent like 6 hours trying to solve this more generally but am stupid.
is_internal = true;
false
} else if info.callee.span.is_none() {
// it's a compiler built-in, we *really* don't want to mess with it
// so we skip it, unless it was called by a regular macro, in which case
// we will handle the caller macro next turn
is_internal = true;
true // continue looping
} else {
// was this expression from the current macro arguments ?
is_internal = !( span.lo > info.call_site.lo &&
span.hi < info.call_site.hi );
true // continue looping
}
},
_ => false // stop looping
}
}) { /* empty while loop body */ }
return is_internal;
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn t1 () {
let cm = CodeMap::new();
let fm = cm.new_filemap("blork.rs".to_string(),
"first line.\nsecond line".to_string());
fm.next_line(BytePos(0));
assert_eq!(fm.get_line(0), Some("first line.".to_string()));
// TESTING BROKEN BEHAVIOR:
fm.next_line(BytePos(10));
assert_eq!(fm.get_line(1), Some(".".to_string()));
}
#[test]
#[should_fail]
fn t2 () {
let cm = CodeMap::new();
let fm = cm.new_filemap("blork.rs".to_string(),
"first line.\nsecond line".to_string());
// TESTING *REALLY* BROKEN BEHAVIOR:
fm.next_line(BytePos(0));
fm.next_line(BytePos(10));
fm.next_line(BytePos(2));
}
fn init_code_map() -> CodeMap {
let cm = CodeMap::new();
let fm1 = cm.new_filemap("blork.rs".to_string(),
"first line.\nsecond line".to_string());
let fm2 = cm.new_filemap("empty.rs".to_string(),
"".to_string());
let fm3 = cm.new_filemap("blork2.rs".to_string(),
"first line.\nsecond line".to_string());
fm1.next_line(BytePos(0));
fm1.next_line(BytePos(12));
fm2.next_line(BytePos(24));
fm3.next_line(BytePos(24));
fm3.next_line(BytePos(34));
cm
}
#[test]
fn t3() {
// Test lookup_byte_offset
let cm = init_code_map();
let fmabp1 = cm.lookup_byte_offset(BytePos(22));
assert_eq!(fmabp1.fm.name, "blork.rs");
assert_eq!(fmabp1.pos, BytePos(22));
let fmabp2 = cm.lookup_byte_offset(BytePos(24));
assert_eq!(fmabp2.fm.name, "blork2.rs");
assert_eq!(fmabp2.pos, BytePos(0));
}
#[test]
fn t4() {
// Test bytepos_to_file_charpos
let cm = init_code_map();
let cp1 = cm.bytepos_to_file_charpos(BytePos(22));
assert_eq!(cp1, CharPos(22));
let cp2 = cm.bytepos_to_file_charpos(BytePos(24));
assert_eq!(cp2, CharPos(0));
}
#[test]
fn t5() {
// Test zero-length filemaps.
let cm = init_code_map();
let loc1 = cm.lookup_char_pos(BytePos(22));
assert_eq!(loc1.file.name, "blork.rs");
assert_eq!(loc1.line, 2);
assert_eq!(loc1.col, CharPos(10));
let loc2 = cm.lookup_char_pos(BytePos(24));
assert_eq!(loc2.file.name, "blork2.rs");
assert_eq!(loc2.line, 1);
assert_eq!(loc2.col, CharPos(0));
}
fn init_code_map_mbc() -> CodeMap {
let cm = CodeMap::new();
// € is a three byte utf8 char.
let fm1 =
cm.new_filemap("blork.rs".to_string(),
"fir€st €€€€ line.\nsecond line".to_string());
let fm2 = cm.new_filemap("blork2.rs".to_string(),
"first line€€.\n€ second line".to_string());
fm1.next_line(BytePos(0));
fm1.next_line(BytePos(22));
fm2.next_line(BytePos(40));
fm2.next_line(BytePos(58));
fm1.record_multibyte_char(BytePos(3), 3);
fm1.record_multibyte_char(BytePos(9), 3);
fm1.record_multibyte_char(BytePos(12), 3);
fm1.record_multibyte_char(BytePos(15), 3);
fm1.record_multibyte_char(BytePos(18), 3);
fm2.record_multibyte_char(BytePos(50), 3);
fm2.record_multibyte_char(BytePos(53), 3);
fm2.record_multibyte_char(BytePos(58), 3);
cm
}
#[test]
fn t6() {
// Test bytepos_to_file_charpos in the presence of multi-byte chars
let cm = init_code_map_mbc();
let cp1 = cm.bytepos_to_file_charpos(BytePos(3));
assert_eq!(cp1, CharPos(3));
let cp2 = cm.bytepos_to_file_charpos(BytePos(6));
assert_eq!(cp2, CharPos(4));
let cp3 = cm.bytepos_to_file_charpos(BytePos(56));
assert_eq!(cp3, CharPos(12));
let cp4 = cm.bytepos_to_file_charpos(BytePos(61));
assert_eq!(cp4, CharPos(15));
}
#[test]
fn t7() {
// Test span_to_lines for a span ending at the end of filemap
let cm = init_code_map();
let span = Span {lo: BytePos(12), hi: BytePos(23), expn_id: NO_EXPANSION};
let file_lines = cm.span_to_lines(span);
assert_eq!(file_lines.file.name, "blork.rs");
assert_eq!(file_lines.lines.len(), 1);
assert_eq!(file_lines.lines[0], 1us);
}
#[test]
fn t8() {
// Test span_to_snippet for a span ending at the end of filemap
let cm = init_code_map();
let span = Span {lo: BytePos(12), hi: BytePos(23), expn_id: NO_EXPANSION};
let snippet = cm.span_to_snippet(span);
assert_eq!(snippet, Some("second line".to_string()));
}
#[test]
fn t9() {
// Test span_to_str for a span ending at the end of filemap
let cm = init_code_map();
let span = Span {lo: BytePos(12), hi: BytePos(23), expn_id: NO_EXPANSION};
let sstr = cm.span_to_string(span);
assert_eq!(sstr, "blork.rs:2:1: 2:12");
}
}
|
decode
|
identifier_name
|
codemap.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//
// ignore-lexer-test FIXME #15679
//! The CodeMap tracks all the source code used within a single crate, mapping
//! from integer byte positions to the original source code location. Each bit
//! of source parsed during crate parsing (typically files, in-memory strings,
//! or various bits of macro expansion) cover a continuous range of bytes in the
//! CodeMap and are represented by FileMaps. Byte positions are stored in
//! `spans` and used pervasively in the compiler. They are absolute positions
//! within the CodeMap, which upon request can be converted to line and column
//! information, source code snippets, etc.
pub use self::MacroFormat::*;
use std::cell::RefCell;
use std::num::ToPrimitive;
use std::ops::{Add, Sub};
use std::rc::Rc;
use libc::c_uint;
use serialize::{Encodable, Decodable, Encoder, Decoder};
pub trait Pos {
fn from_usize(n: usize) -> Self;
fn to_usize(&self) -> usize;
}
/// A byte offset. Keep this small (currently 32-bits), as AST contains
/// a lot of them.
#[derive(Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Show)]
pub struct BytePos(pub u32);
/// A character offset. Because of multibyte utf8 characters, a byte offset
/// is not equivalent to a character offset. The CodeMap will convert BytePos
/// values to CharPos values as necessary.
#[derive(Copy, PartialEq, Hash, PartialOrd, Show)]
pub struct CharPos(pub usize);
// FIXME: Lots of boilerplate in these impls, but so far my attempts to fix
// have been unsuccessful
impl Pos for BytePos {
fn from_usize(n: usize) -> BytePos { BytePos(n as u32) }
fn to_usize(&self) -> usize { let BytePos(n) = *self; n as usize }
}
impl Add for BytePos {
type Output = BytePos;
fn add(self, rhs: BytePos) -> BytePos {
BytePos((self.to_usize() + rhs.to_usize()) as u32)
}
}
impl Sub for BytePos {
type Output = BytePos;
fn sub(self, rhs: BytePos) -> BytePos {
BytePos((self.to_usize() - rhs.to_usize()) as u32)
}
}
impl Pos for CharPos {
fn from_usize(n: usize) -> CharPos { CharPos(n) }
fn to_usize(&self) -> usize { let CharPos(n) = *self; n }
}
impl Add for CharPos {
type Output = CharPos;
fn add(self, rhs: CharPos) -> CharPos {
CharPos(self.to_usize() + rhs.to_usize())
}
}
impl Sub for CharPos {
type Output = CharPos;
fn sub(self, rhs: CharPos) -> CharPos {
CharPos(self.to_usize() - rhs.to_usize())
}
}
/// Spans represent a region of code, used for error reporting. Positions in spans
/// are *absolute* positions from the beginning of the codemap, not positions
/// relative to FileMaps. Methods on the CodeMap can be used to relate spans back
/// to the original source.
#[derive(Clone, Copy, Show, Hash)]
pub struct Span {
pub lo: BytePos,
pub hi: BytePos,
/// Information about where the macro came from, if this piece of
/// code was created by a macro expansion.
pub expn_id: ExpnId
}
pub const DUMMY_SP: Span = Span { lo: BytePos(0), hi: BytePos(0), expn_id: NO_EXPANSION };
// Generic span to be used for code originating from the command line
pub const COMMAND_LINE_SP: Span = Span { lo: BytePos(0),
hi: BytePos(0),
expn_id: COMMAND_LINE_EXPN };
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Show, Copy)]
pub struct Spanned<T> {
pub node: T,
pub span: Span,
}
impl PartialEq for Span {
fn eq(&self, other: &Span) -> bool {
return (*self).lo == (*other).lo && (*self).hi == (*other).hi;
}
fn ne(&self, other: &Span) -> bool { !(*self).eq(other) }
}
impl Eq for Span {}
impl Encodable for Span {
/* Note #1972 -- spans are encoded but not decoded */
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
s.emit_nil()
}
}
impl Decodable for Span {
fn decode<D: Decoder>(_d: &mut D) -> Result<Span, D::Error> {
Ok(DUMMY_SP)
}
}
pub fn spanned<T>(lo: BytePos, hi: BytePos, t: T) -> Spanned<T> {
respan(mk_sp(lo, hi), t)
}
pub fn respan<T>(sp: Span, t: T) -> Spanned<T> {
Spanned {node: t, span: sp}
}
pub fn dummy_spanned<T>(t: T) -> Spanned<T> {
respan(DUMMY_SP, t)
}
/* assuming that we're not in macro expansion */
pub fn mk_sp(lo: BytePos, hi: BytePos) -> Span {
Span {lo: lo, hi: hi, expn_id: NO_EXPANSION}
}
/// Return the span itself if it doesn't come from a macro expansion,
/// otherwise return the call site span up to the `enclosing_sp` by
/// following the `expn_info` chain.
pub fn original_sp(cm: &CodeMap, sp: Span, enclosing_sp: Span) -> Span {
let call_site1 = cm.with_expn_info(sp.expn_id, |ei| ei.map(|ei| ei.call_site));
let call_site2 = cm.with_expn_info(enclosing_sp.expn_id, |ei| ei.map(|ei| ei.call_site));
match (call_site1, call_site2) {
(None, _) => sp,
(Some(call_site1), Some(call_site2)) if call_site1 == call_site2 => sp,
(Some(call_site1), _) => original_sp(cm, call_site1, enclosing_sp),
}
}
/// A source code location used for error reporting
pub struct Loc {
/// Information about the original source
pub file: Rc<FileMap>,
/// The (1-based) line number
pub line: usize,
/// The (0-based) column offset
pub col: CharPos
}
/// A source code location used as the result of lookup_char_pos_adj
// Actually, *none* of the clients use the filename *or* file field;
// perhaps they should just be removed.
pub struct LocWithOpt {
pub filename: FileName,
pub line: usize,
pub col: CharPos,
pub file: Option<Rc<FileMap>>,
}
// used to be structural records. Better names, anyone?
pub struct FileMapAndLine { pub fm: Rc<FileMap>, pub line: usize }
pub struct FileMapAndBytePos { pub fm: Rc<FileMap>, pub pos: BytePos }
/// The syntax with which a macro was invoked.
#[derive(Clone, Copy, Hash, Show)]
pub enum MacroFormat {
/// e.g. #[derive(...)] <item>
MacroAttribute,
/// e.g. `format!()`
MacroBang
}
#[derive(Clone, Hash, Show)]
pub struct NameAndSpan {
/// The name of the macro that was invoked to create the thing
/// with this Span.
pub name: String,
/// The format with which the macro was invoked.
pub format: MacroFormat,
/// The span of the macro definition itself. The macro may not
/// have a sensible definition span (e.g. something defined
/// completely inside libsyntax) in which case this is None.
pub span: Option<Span>
}
/// Extra information for tracking macro expansion of spans
#[derive(Hash, Show)]
pub struct ExpnInfo {
/// The location of the actual macro invocation, e.g. `let x =
/// foo!();`
///
/// This may recursively refer to other macro invocations, e.g. if
/// `foo!()` invoked `bar!()` internally, and there was an
/// expression inside `bar!`; the call_site of the expression in
/// the expansion would point to the `bar!` invocation; that
/// call_site span would have its own ExpnInfo, with the call_site
/// pointing to the `foo!` invocation.
pub call_site: Span,
/// Information about the macro and its definition.
///
/// The `callee` of the inner expression in the `call_site`
/// example would point to the `macro_rules! bar { ... }` and that
/// of the `bar!()` invocation would point to the `macro_rules!
/// foo { ... }`.
pub callee: NameAndSpan
}
#[derive(PartialEq, Eq, Clone, Show, Hash, RustcEncodable, RustcDecodable, Copy)]
pub struct ExpnId(u32);
pub const NO_EXPANSION: ExpnId = ExpnId(-1);
// For code appearing from the command line
pub const COMMAND_LINE_EXPN: ExpnId = ExpnId(-2);
impl ExpnId {
pub fn from_llvm_cookie(cookie: c_uint) -> ExpnId {
ExpnId(cookie as u32)
}
pub fn to_llvm_cookie(self) -> i32 {
let ExpnId(cookie) = self;
cookie as i32
}
}
pub type FileName = String;
pub struct FileLines {
pub file: Rc<FileMap>,
pub lines: Vec<usize>
}
/// Identifies an offset of a multi-byte character in a FileMap
#[derive(Copy)]
pub struct MultiByteChar {
/// The absolute offset of the character in the CodeMap
pub pos: BytePos,
/// The number of bytes, >=2
pub bytes: usize,
}
/// A single source in the CodeMap
pub struct FileMap {
/// The name of the file that the source came from, source that doesn't
/// originate from files has names between angle brackets by convention,
/// e.g. `<anon>`
pub name: FileName,
/// The complete source code
pub src: String,
/// The start position of this source in the CodeMap
pub start_pos: BytePos,
/// Locations of lines beginnings in the source code
pub lines: RefCell<Vec<BytePos> >,
/// Locations of multi-byte characters in the source code
pub multibyte_chars: RefCell<Vec<MultiByteChar> >,
}
impl FileMap {
/// EFFECT: register a start-of-line offset in the
/// table of line-beginnings.
/// UNCHECKED INVARIANT: these offsets must be added in the right
/// order and must be in the right places; there is shared knowledge
/// about what ends a line between this file and parse.rs
/// WARNING: pos param here is the offset relative to start of CodeMap,
/// and CodeMap will append a newline when adding a filemap without a newline at the end,
/// so the safe way to call this is with value calculated as
/// filemap.start_pos + newline_offset_relative_to_the_start_of_filemap.
pub fn next_line(&self, pos: BytePos) {
// the new charpos must be > the last one (or it's the first one).
let mut lines = self.lines.borrow_mut();
let line_len = lines.len();
assert!(line_len == 0 || ((*lines)[line_len - 1] < pos));
lines.push(pos);
}
/// get a line from the list of pre-computed line-beginnings
///
pub fn get_line(&self, line_number: usize) -> Option<String> {
let lines = self.lines.borrow();
lines.get(line_number).map(|&line| {
let begin: BytePos = line - self.start_pos;
let begin = begin.to_usize();
let slice = &self.src[begin..];
match slice.find('\n') {
Some(e) => &slice[..e],
None => slice
}.to_string()
})
}
pub fn record_multibyte_char(&self, pos: BytePos, bytes: usize) {
assert!(bytes >=2 && bytes <= 4);
let mbc = MultiByteChar {
pos: pos,
bytes: bytes,
};
self.multibyte_chars.borrow_mut().push(mbc);
}
pub fn is_real_file(&self) -> bool {
!(self.name.starts_with("<") &&
self.name.ends_with(">"))
}
}
pub struct CodeMap {
pub files: RefCell<Vec<Rc<FileMap>>>,
expansions: RefCell<Vec<ExpnInfo>>
}
impl CodeMap {
pub fn new() -> CodeMap {
CodeMap {
files: RefCell::new(Vec::new()),
expansions: RefCell::new(Vec::new()),
}
}
pub fn new_filemap(&self, filename: FileName, src: String) -> Rc<FileMap> {
let mut files = self.files.borrow_mut();
let start_pos = match files.last() {
None => 0,
Some(last) => last.start_pos.to_usize() + last.src.len(),
};
// Remove utf-8 BOM if any.
// FIXME #12884: no efficient/safe way to remove from the start of a string
// and reuse the allocation.
let mut src = if src.starts_with("\u{feff}") {
String::from_str(&src[3..])
} else {
String::from_str(&src[])
};
// Append '\n' in case it's not already there.
// This is a workaround to prevent CodeMap.lookup_filemap_idx from accidentally
// overflowing into the next filemap in case the last byte of span is also the last
// byte of filemap, which leads to incorrect results from CodeMap.span_to_*.
if src.len() > 0 && !src.ends_with("\n") {
src.push('\n');
}
let filemap = Rc::new(FileMap {
name: filename,
src: src.to_string(),
start_pos: Pos::from_usize(start_pos),
lines: RefCell::new(Vec::new()),
multibyte_chars: RefCell::new(Vec::new()),
});
files.push(filemap.clone());
filemap
}
pub fn mk_substr_filename(&self, sp: Span) -> String {
let pos = self.lookup_char_pos(sp.lo);
(format!("<{}:{}:{}>",
pos.file.name,
pos.line,
pos.col.to_usize() + 1)).to_string()
}
/// Lookup source information about a BytePos
pub fn lookup_char_pos(&self, pos: BytePos) -> Loc {
self.lookup_pos(pos)
}
pub fn lookup_char_pos_adj(&self, pos: BytePos) -> LocWithOpt {
let loc = self.lookup_char_pos(pos);
LocWithOpt {
filename: loc.file.name.to_string(),
line: loc.line,
col: loc.col,
file: Some(loc.file)
}
}
pub fn span_to_string(&self, sp: Span) -> String {
if self.files.borrow().len() == 0 && sp == DUMMY_SP {
return "no-location".to_string();
}
let lo = self.lookup_char_pos_adj(sp.lo);
let hi = self.lookup_char_pos_adj(sp.hi);
return (format!("{}:{}:{}: {}:{}",
lo.filename,
lo.line,
lo.col.to_usize() + 1,
hi.line,
hi.col.to_usize() + 1)).to_string()
}
pub fn span_to_filename(&self, sp: Span) -> FileName {
self.lookup_char_pos(sp.lo).file.name.to_string()
}
pub fn span_to_lines(&self, sp: Span) -> FileLines {
let lo = self.lookup_char_pos(sp.lo);
let hi = self.lookup_char_pos(sp.hi);
let mut lines = Vec::new();
for i in range(lo.line - 1us, hi.line as usize) {
lines.push(i);
};
FileLines {file: lo.file, lines: lines}
}
pub fn span_to_snippet(&self, sp: Span) -> Option<String> {
let begin = self.lookup_byte_offset(sp.lo);
let end = self.lookup_byte_offset(sp.hi);
// FIXME #8256: this used to be an assert but whatever precondition
// it's testing isn't true for all spans in the AST, so to allow the
// caller to not have to panic (and it can't catch it since the CodeMap
// isn't sendable), return None
if begin.fm.start_pos != end.fm.start_pos {
None
} else {
Some((&begin.fm.src[begin.pos.to_usize()..end.pos.to_usize()]).to_string())
}
}
pub fn get_filemap(&self, filename: &str) -> Rc<FileMap> {
for fm in self.files.borrow().iter() {
if filename == fm.name {
return fm.clone();
}
}
panic!("asking for {} which we don't know about", filename);
}
pub fn lookup_byte_offset(&self, bpos: BytePos) -> FileMapAndBytePos {
let idx = self.lookup_filemap_idx(bpos);
let fm = (*self.files.borrow())[idx].clone();
let offset = bpos - fm.start_pos;
FileMapAndBytePos {fm: fm, pos: offset}
}
/// Converts an absolute BytePos to a CharPos relative to the filemap and above.
pub fn bytepos_to_file_charpos(&self, bpos: BytePos) -> CharPos {
let idx = self.lookup_filemap_idx(bpos);
let files = self.files.borrow();
let map = &(*files)[idx];
// The number of extra bytes due to multibyte chars in the FileMap
let mut total_extra_bytes = 0;
for mbc in map.multibyte_chars.borrow().iter() {
debug!("{}-byte char at {:?}", mbc.bytes, mbc.pos);
if mbc.pos < bpos {
// every character is at least one byte, so we only
// count the actual extra bytes.
total_extra_bytes += mbc.bytes - 1;
// We should never see a byte position in the middle of a
// character
assert!(bpos.to_usize() >= mbc.pos.to_usize() + mbc.bytes);
} else {
break;
}
}
assert!(map.start_pos.to_usize() + total_extra_bytes <= bpos.to_usize());
CharPos(bpos.to_usize() - map.start_pos.to_usize() - total_extra_bytes)
}
fn lookup_filemap_idx(&self, pos: BytePos) -> usize {
let files = self.files.borrow();
let files = &*files;
let len = files.len();
let mut a = 0us;
let mut b = len;
while b - a > 1us {
let m = (a + b) / 2us;
if files[m].start_pos > pos {
b = m;
} else {
a = m;
}
}
// There can be filemaps with length 0. These have the same start_pos as
// the previous filemap, but are not the filemaps we want (because they
// are length 0, they cannot contain what we are looking for). So,
// rewind until we find a useful filemap.
loop {
let lines = files[a].lines.borrow();
let lines = lines;
if lines.len() > 0 {
break;
}
if a == 0 {
panic!("position {} does not resolve to a source location",
pos.to_usize());
}
a -= 1;
}
if a >= len {
panic!("position {} does not resolve to a source location",
pos.to_usize())
}
return a;
}
fn lookup_line(&self, pos: BytePos) -> FileMapAndLine {
let idx = self.lookup_filemap_idx(pos);
let files = self.files.borrow();
let f = (*files)[idx].clone();
let mut a = 0us;
{
let lines = f.lines.borrow();
let mut b = lines.len();
while b - a > 1us {
let m = (a + b) / 2us;
if (*lines)[m] > pos { b = m; } else { a = m; }
}
}
FileMapAndLine {fm: f, line: a}
}
fn lookup_pos(&self, pos: BytePos) -> Loc {
let FileMapAndLine {fm: f, line: a} = self.lookup_line(pos);
let line = a + 1us; // Line numbers start at 1
let chpos = self.bytepos_to_file_charpos(pos);
let linebpos = (*f.lines.borrow())[a];
let linechpos = self.bytepos_to_file_charpos(linebpos);
debug!("byte pos {:?} is on the line at byte pos {:?}",
pos, linebpos);
debug!("char pos {:?} is on the line at char pos {:?}",
chpos, linechpos);
debug!("byte is on line: {}", line);
assert!(chpos >= linechpos);
Loc {
file: f,
line: line,
col: chpos - linechpos
}
}
pub fn record_expansion(&self, expn_info: ExpnInfo) -> ExpnId {
let mut expansions = self.expansions.borrow_mut();
expansions.push(expn_info);
ExpnId(expansions.len().to_u32().expect("too many ExpnInfo's!") - 1)
}
pub fn with_expn_info<T, F>(&self, id: ExpnId, f: F) -> T where
F: FnOnce(Option<&ExpnInfo>) -> T,
{
match id {
NO_EXPANSION => f(None),
ExpnId(i) => f(Some(&(*self.expansions.borrow())[i as usize]))
}
}
/// Check if a span is "internal" to a macro. This means that it is entirely generated by a
/// macro expansion and contains no code that was passed in as an argument.
pub fn span_is_internal(&self, span: Span) -> bool {
// first, check if the given expression was generated by a macro or not
// we need to go back the expn_info tree to check only the arguments
// of the initial macro call, not the nested ones.
let mut is_internal = false;
let mut expnid = span.expn_id;
while self.with_expn_info(expnid, |expninfo| {
match expninfo {
Some(ref info) => {
// save the parent expn_id for next loop iteration
expnid = info.call_site.expn_id;
if info.callee.name == "format_args" {
// This is a hack because the format_args builtin calls unstable APIs.
// I spent like 6 hours trying to solve this more generally but am stupid.
is_internal = true;
false
} else if info.callee.span.is_none() {
// it's a compiler built-in, we *really* don't want to mess with it
// so we skip it, unless it was called by a regular macro, in which case
// we will handle the caller macro next turn
is_internal = true;
true // continue looping
} else {
// was this expression from the current macro arguments ?
is_internal = !( span.lo > info.call_site.lo &&
span.hi < info.call_site.hi );
true // continue looping
}
},
_ => false // stop looping
}
}) { /* empty while loop body */ }
return is_internal;
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn t1 () {
let cm = CodeMap::new();
let fm = cm.new_filemap("blork.rs".to_string(),
"first line.\nsecond line".to_string());
fm.next_line(BytePos(0));
assert_eq!(fm.get_line(0), Some("first line.".to_string()));
// TESTING BROKEN BEHAVIOR:
fm.next_line(BytePos(10));
assert_eq!(fm.get_line(1), Some(".".to_string()));
}
#[test]
#[should_fail]
fn t2 () {
let cm = CodeMap::new();
let fm = cm.new_filemap("blork.rs".to_string(),
"first line.\nsecond line".to_string());
// TESTING *REALLY* BROKEN BEHAVIOR:
fm.next_line(BytePos(0));
fm.next_line(BytePos(10));
fm.next_line(BytePos(2));
}
fn init_code_map() -> CodeMap {
let cm = CodeMap::new();
let fm1 = cm.new_filemap("blork.rs".to_string(),
"first line.\nsecond line".to_string());
let fm2 = cm.new_filemap("empty.rs".to_string(),
"".to_string());
let fm3 = cm.new_filemap("blork2.rs".to_string(),
"first line.\nsecond line".to_string());
fm1.next_line(BytePos(0));
fm1.next_line(BytePos(12));
fm2.next_line(BytePos(24));
fm3.next_line(BytePos(24));
fm3.next_line(BytePos(34));
cm
}
#[test]
fn t3() {
// Test lookup_byte_offset
let cm = init_code_map();
let fmabp1 = cm.lookup_byte_offset(BytePos(22));
assert_eq!(fmabp1.fm.name, "blork.rs");
assert_eq!(fmabp1.pos, BytePos(22));
let fmabp2 = cm.lookup_byte_offset(BytePos(24));
assert_eq!(fmabp2.fm.name, "blork2.rs");
assert_eq!(fmabp2.pos, BytePos(0));
}
#[test]
fn t4() {
// Test bytepos_to_file_charpos
let cm = init_code_map();
let cp1 = cm.bytepos_to_file_charpos(BytePos(22));
assert_eq!(cp1, CharPos(22));
let cp2 = cm.bytepos_to_file_charpos(BytePos(24));
assert_eq!(cp2, CharPos(0));
}
#[test]
fn t5() {
// Test zero-length filemaps.
let cm = init_code_map();
let loc1 = cm.lookup_char_pos(BytePos(22));
assert_eq!(loc1.file.name, "blork.rs");
assert_eq!(loc1.line, 2);
assert_eq!(loc1.col, CharPos(10));
|
assert_eq!(loc2.line, 1);
assert_eq!(loc2.col, CharPos(0));
}
fn init_code_map_mbc() -> CodeMap {
let cm = CodeMap::new();
// € is a three byte utf8 char.
let fm1 =
cm.new_filemap("blork.rs".to_string(),
"fir€st €€€€ line.\nsecond line".to_string());
let fm2 = cm.new_filemap("blork2.rs".to_string(),
"first line€€.\n€ second line".to_string());
fm1.next_line(BytePos(0));
fm1.next_line(BytePos(22));
fm2.next_line(BytePos(40));
fm2.next_line(BytePos(58));
fm1.record_multibyte_char(BytePos(3), 3);
fm1.record_multibyte_char(BytePos(9), 3);
fm1.record_multibyte_char(BytePos(12), 3);
fm1.record_multibyte_char(BytePos(15), 3);
fm1.record_multibyte_char(BytePos(18), 3);
fm2.record_multibyte_char(BytePos(50), 3);
fm2.record_multibyte_char(BytePos(53), 3);
fm2.record_multibyte_char(BytePos(58), 3);
cm
}
#[test]
fn t6() {
// Test bytepos_to_file_charpos in the presence of multi-byte chars
let cm = init_code_map_mbc();
let cp1 = cm.bytepos_to_file_charpos(BytePos(3));
assert_eq!(cp1, CharPos(3));
let cp2 = cm.bytepos_to_file_charpos(BytePos(6));
assert_eq!(cp2, CharPos(4));
let cp3 = cm.bytepos_to_file_charpos(BytePos(56));
assert_eq!(cp3, CharPos(12));
let cp4 = cm.bytepos_to_file_charpos(BytePos(61));
assert_eq!(cp4, CharPos(15));
}
#[test]
fn t7() {
// Test span_to_lines for a span ending at the end of filemap
let cm = init_code_map();
let span = Span {lo: BytePos(12), hi: BytePos(23), expn_id: NO_EXPANSION};
let file_lines = cm.span_to_lines(span);
assert_eq!(file_lines.file.name, "blork.rs");
assert_eq!(file_lines.lines.len(), 1);
assert_eq!(file_lines.lines[0], 1us);
}
#[test]
fn t8() {
// Test span_to_snippet for a span ending at the end of filemap
let cm = init_code_map();
let span = Span {lo: BytePos(12), hi: BytePos(23), expn_id: NO_EXPANSION};
let snippet = cm.span_to_snippet(span);
assert_eq!(snippet, Some("second line".to_string()));
}
#[test]
fn t9() {
// Test span_to_str for a span ending at the end of filemap
let cm = init_code_map();
let span = Span {lo: BytePos(12), hi: BytePos(23), expn_id: NO_EXPANSION};
let sstr = cm.span_to_string(span);
assert_eq!(sstr, "blork.rs:2:1: 2:12");
}
}
|
let loc2 = cm.lookup_char_pos(BytePos(24));
assert_eq!(loc2.file.name, "blork2.rs");
|
random_line_split
|
restaurant_oper.py
|
#!/usr/bin/env python
#-*- encoding:utf-8 -*-
import json
from datetime import datetime
from bottle import route, mako_template as template, redirect, request, response, get, post
from bottle import static_file, view #为了不经过controller直接返回诸如html,css等静态文件引入
from model.documents import *
from setting import *
DATE_FORMAT = '%Y-%m-%d %H:%M:%S' # 入库格式化时间
@route('/to_add_item')
def to_add_item():
return template('views/system/item/add', site_opt = site_opt)
@route('/add_item', method = 'POST')
def add_item():
DATE_FORMAT = '%Y%m%d%H%M%S'
innerName = 'attr_%s' % datetime.now().strftime(DATE_FORMAT)
#request.params可以同时获取到GET或者POST方法传入的参数
name = request.params.get('name')
address = request.params.get('address')
telno = request.params.get('telno')
lat = request.params.get('lat')
lon = request.params.get('lon')
item = Restaurant(name=unicode(name, 'utf8'), address=unicode(address, 'utf8'), telno=telno, lat = lat, lon = lon)
item.save()
redirect('list_item')
@route('/list_item')
def list_item():
start = request.params.get('start') or '0'
size = request.params.get('size') or '1000'
items = Restaurant.objects[int(start):(int(start) + int(size))]
data = {
'items': items
}
return template('views/system/item/list', data = data, site_opt = site_opt)
@route('/del_item')
def del_item():
id = request.params.get('id')
Restaurant.objects(id=id).delete()
# cascade delete menus of the restaurant
Menu.objects(restaurant=id).delete()
redirect('/list_item')
@route('/modify_item', method = 'POST')
def modify_item():
id = request.params.get('id')
name = request.params.get('name')
address = request.
|
em = Restaurant.objects(id = id)[0]
data = {
'item': item
}
return template('views/system/item/edit', data = data, site_opt = site_opt)
|
params.get('address')
telno = request.params.get('telno')
lat = request.params.get('lat')
lon = request.params.get('lon')
print 'modify item=====%s, %s, %s, %s' % (id, name, address, telno)
Restaurant.objects(id=id).update(set__name = unicode(name, 'utf8'), set__address = address, set__telno = unicode(telno, 'utf-8'), set__lat = lat, set__lon = lon)
redirect('/list_item')
@route('/to_modify_item')
def to_modify_item():
id = request.params.get('id')
it
|
identifier_body
|
restaurant_oper.py
|
#!/usr/bin/env python
#-*- encoding:utf-8 -*-
import json
from datetime import datetime
from bottle import route, mako_template as template, redirect, request, response, get, post
from bottle import static_file, view #为了不经过controller直接返回诸如html,css等静态文件引入
from model.documents import *
from setting import *
DATE_FORMAT = '%Y-%m-%d %H:%M:%S' # 入库格式化时间
@route('/to_add_item')
def to_add_item():
return template('views/system/item/add', site_opt = site_opt)
|
#request.params可以同时获取到GET或者POST方法传入的参数
name = request.params.get('name')
address = request.params.get('address')
telno = request.params.get('telno')
lat = request.params.get('lat')
lon = request.params.get('lon')
item = Restaurant(name=unicode(name, 'utf8'), address=unicode(address, 'utf8'), telno=telno, lat = lat, lon = lon)
item.save()
redirect('list_item')
@route('/list_item')
def list_item():
start = request.params.get('start') or '0'
size = request.params.get('size') or '1000'
items = Restaurant.objects[int(start):(int(start) + int(size))]
data = {
'items': items
}
return template('views/system/item/list', data = data, site_opt = site_opt)
@route('/del_item')
def del_item():
id = request.params.get('id')
Restaurant.objects(id=id).delete()
# cascade delete menus of the restaurant
Menu.objects(restaurant=id).delete()
redirect('/list_item')
@route('/modify_item', method = 'POST')
def modify_item():
id = request.params.get('id')
name = request.params.get('name')
address = request.params.get('address')
telno = request.params.get('telno')
lat = request.params.get('lat')
lon = request.params.get('lon')
print 'modify item=====%s, %s, %s, %s' % (id, name, address, telno)
Restaurant.objects(id=id).update(set__name = unicode(name, 'utf8'), set__address = address, set__telno = unicode(telno, 'utf-8'), set__lat = lat, set__lon = lon)
redirect('/list_item')
@route('/to_modify_item')
def to_modify_item():
id = request.params.get('id')
item = Restaurant.objects(id = id)[0]
data = {
'item': item
}
return template('views/system/item/edit', data = data, site_opt = site_opt)
|
@route('/add_item', method = 'POST')
def add_item():
DATE_FORMAT = '%Y%m%d%H%M%S'
innerName = 'attr_%s' % datetime.now().strftime(DATE_FORMAT)
|
random_line_split
|
restaurant_oper.py
|
#!/usr/bin/env python
#-*- encoding:utf-8 -*-
import json
from datetime import datetime
from bottle import route, mako_template as template, redirect, request, response, get, post
from bottle import static_file, view #为了不经过controller直接返回诸如html,css等静态文件引入
from model.documents import *
from setting import *
DATE_FORMAT = '%Y-%m-%d %H:%M:%S' # 入库格式化时间
@route('/to_add_item')
def to_add_item():
return template('views/system/item/add', site_opt = site_opt)
@route('/add_item', method = 'POST')
def add_item():
DATE_FORMAT = '%Y%m%d%H%M%S'
innerName = 'attr_%s' % datetime.now().strftime(DATE_FORMAT)
#request.params可以同时获取到GET或者POST方法传入的参数
name = request.params.get('name')
address = request.params.get('address')
telno = request.params.get('telno')
lat = request.params.get('lat')
lon = request.params.get('lon')
item = Restaurant(name=unicode(name, 'utf8'), address=unicode(address, 'utf8'), telno=telno, lat = lat, lon = lon)
item.save()
redirect('list_item')
@route('/list_item')
def list_item():
start = request.params.get('start') or '0'
size = request.params.get('size') or '1000'
items = Restaurant.objects[int(start):(int(start) + int(size))]
data = {
'items': items
}
return template('views/system/item/list', data = data, site_opt = site_opt)
@route('/del_item')
def del_item():
id = request.params.get('id')
Restaurant.objects(id=id).delete()
# cascade delete menus of the restaurant
Menu.objects(restaurant=id).delete()
redirect('/list_item')
@route('/modify_item', method = 'POST')
def modify_item():
id = request.params.get('id')
name = request.params.get('name')
address = request.params.get('address')
telno = request.params.get('telno')
lat = request.params.get('lat')
lon = request.params.get('lon')
print 'modify item=====%s, %s, %s, %s' % (id, name, address, telno)
Restaurant.objects(id=id).update(set__name = unicode(name, 'utf8'), set__address = address, set__telno = unicode(telno, 'utf-8'), set__lat = lat, set__lon = lon)
redirect('/list_item')
@route('/to_modify_item')
def to_modify_item():
id = request.params.get('id')
item = Restaurant.objects(id = id)
|
'item': item
}
return template('views/system/item/edit', data = data, site_opt = site_opt)
|
[0]
data = {
|
identifier_name
|
RegulatingControl.py
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61970.Core.PowerSystemResource import PowerSystemResource
class RegulatingControl(PowerSystemResource):
"""Specifies a set of equipment that works together to control a power system quantity such as voltage or flow.Specifies a set of equipment that works together to control a power system quantity such as voltage or flow.
"""
def __init__(self, mode="fixed", targetRange=0.0, discrete=False, targetValue=0.0, monitoredPhase="s12N", RegulatingCondEq=None, Terminal=None, RegulationSchedule=None, *args, **kw_args):
"""Initialises a new 'RegulatingControl' instance.
@param mode: The regulating control mode presently available. This specifications allows for determining the kind of regualation without need for obtaining the units from a schedule. Values are: "fixed", "timeScheduled", "voltage", "admittance", "reactivePower", "powerFactor", "currentFlow", "activePower", "temperature"
@param targetRange: This is the case input target range. This performs the same function as the value2 attribute on the regulation schedule in the case that schedules are not used. The units of those appropriate for the mode.
@param discrete: The regulation is performed in a discrete mode.
@param targetValue: The target value specified for case input. This value can be used for the target value wihout the use of schedules. The value has the units appropriate to the mode attribute.
@param monitoredPhase: Phase voltage controlling this regulator, measured at regulator location. Values are: "s12N", "BN", "BC", "ABN", "s2N", "N", "ACN", "BCN", "ABCN", "AC", "s1N", "AN", "B", "AB", "C", "A", "CN", "ABC"
@param RegulatingCondEq: The equipment that participates in this regulating control scheme.
@param Terminal: The terminal associated with this regulating control.
@param RegulationSchedule: Schedule for this Regulating regulating control.
"""
#: The regulating control mode presently available. This specifications allows for determining the kind of regualation without need for obtaining the units from a schedule. Values are: "fixed", "timeScheduled", "voltage", "admittance", "reactivePower", "powerFactor", "currentFlow", "activePower", "temperature"
self.mode = mode
#: This is the case input target range. This performs the same function as the value2 attribute on the regulation schedule in the case that schedules are not used. The units of those appropriate for the mode.
self.targetRange = targetRange
#: The regulation is performed in a discrete mode.
self.discrete = discrete
#: The target value specified for case input. This value can be used for the target value wihout the use of schedules. The value has the units appropriate to the mode attribute.
self.targetValue = targetValue
#: Phase voltage controlling this regulator, measured at regulator location. Values are: "s12N", "BN", "BC", "ABN", "s2N", "N", "ACN", "BCN", "ABCN", "AC", "s1N", "AN", "B", "AB", "C", "A", "CN", "ABC"
self.monitoredPhase = monitoredPhase
self._RegulatingCondEq = []
self.RegulatingCondEq = [] if RegulatingCondEq is None else RegulatingCondEq
self._Terminal = None
self.Terminal = Terminal
self._RegulationSchedule = []
self.RegulationSchedule = [] if RegulationSchedule is None else RegulationSchedule
super(RegulatingControl, self).__init__(*args, **kw_args)
_attrs = ["mode", "targetRange", "discrete", "targetValue", "monitoredPhase"]
_attr_types = {"mode": str, "targetRange": float, "discrete": bool, "targetValue": float, "monitoredPhase": str}
_defaults = {"mode": "fixed", "targetRange": 0.0, "discrete": False, "targetValue": 0.0, "monitoredPhase": "s12N"}
_enums = {"mode": "RegulatingControlModeKind", "monitoredPhase": "PhaseCode"}
_refs = ["RegulatingCondEq", "Terminal", "RegulationSchedule"]
_many_refs = ["RegulatingCondEq", "RegulationSchedule"]
def getRegulatingCondEq(self):
"""The equipment that participates in this regulating control scheme.
"""
return self._RegulatingCondEq
def setRegulatingCondEq(self, value):
for x in self._RegulatingCondEq:
x.RegulatingControl = None
for y in value:
y._RegulatingControl = self
self._RegulatingCondEq = value
RegulatingCondEq = property(getRegulatingCondEq, setRegulatingCondEq)
|
def removeRegulatingCondEq(self, *RegulatingCondEq):
for obj in RegulatingCondEq:
obj.RegulatingControl = None
def getTerminal(self):
"""The terminal associated with this regulating control.
"""
return self._Terminal
def setTerminal(self, value):
if self._Terminal is not None:
filtered = [x for x in self.Terminal.RegulatingControl if x != self]
self._Terminal._RegulatingControl = filtered
self._Terminal = value
if self._Terminal is not None:
if self not in self._Terminal._RegulatingControl:
self._Terminal._RegulatingControl.append(self)
Terminal = property(getTerminal, setTerminal)
def getRegulationSchedule(self):
"""Schedule for this Regulating regulating control.
"""
return self._RegulationSchedule
def setRegulationSchedule(self, value):
for x in self._RegulationSchedule:
x.RegulatingControl = None
for y in value:
y._RegulatingControl = self
self._RegulationSchedule = value
RegulationSchedule = property(getRegulationSchedule, setRegulationSchedule)
def addRegulationSchedule(self, *RegulationSchedule):
for obj in RegulationSchedule:
obj.RegulatingControl = self
def removeRegulationSchedule(self, *RegulationSchedule):
for obj in RegulationSchedule:
obj.RegulatingControl = None
|
def addRegulatingCondEq(self, *RegulatingCondEq):
for obj in RegulatingCondEq:
obj.RegulatingControl = self
|
random_line_split
|
RegulatingControl.py
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61970.Core.PowerSystemResource import PowerSystemResource
class RegulatingControl(PowerSystemResource):
"""Specifies a set of equipment that works together to control a power system quantity such as voltage or flow.Specifies a set of equipment that works together to control a power system quantity such as voltage or flow.
"""
def __init__(self, mode="fixed", targetRange=0.0, discrete=False, targetValue=0.0, monitoredPhase="s12N", RegulatingCondEq=None, Terminal=None, RegulationSchedule=None, *args, **kw_args):
"""Initialises a new 'RegulatingControl' instance.
@param mode: The regulating control mode presently available. This specifications allows for determining the kind of regualation without need for obtaining the units from a schedule. Values are: "fixed", "timeScheduled", "voltage", "admittance", "reactivePower", "powerFactor", "currentFlow", "activePower", "temperature"
@param targetRange: This is the case input target range. This performs the same function as the value2 attribute on the regulation schedule in the case that schedules are not used. The units of those appropriate for the mode.
@param discrete: The regulation is performed in a discrete mode.
@param targetValue: The target value specified for case input. This value can be used for the target value wihout the use of schedules. The value has the units appropriate to the mode attribute.
@param monitoredPhase: Phase voltage controlling this regulator, measured at regulator location. Values are: "s12N", "BN", "BC", "ABN", "s2N", "N", "ACN", "BCN", "ABCN", "AC", "s1N", "AN", "B", "AB", "C", "A", "CN", "ABC"
@param RegulatingCondEq: The equipment that participates in this regulating control scheme.
@param Terminal: The terminal associated with this regulating control.
@param RegulationSchedule: Schedule for this Regulating regulating control.
"""
#: The regulating control mode presently available. This specifications allows for determining the kind of regualation without need for obtaining the units from a schedule. Values are: "fixed", "timeScheduled", "voltage", "admittance", "reactivePower", "powerFactor", "currentFlow", "activePower", "temperature"
self.mode = mode
#: This is the case input target range. This performs the same function as the value2 attribute on the regulation schedule in the case that schedules are not used. The units of those appropriate for the mode.
self.targetRange = targetRange
#: The regulation is performed in a discrete mode.
self.discrete = discrete
#: The target value specified for case input. This value can be used for the target value wihout the use of schedules. The value has the units appropriate to the mode attribute.
self.targetValue = targetValue
#: Phase voltage controlling this regulator, measured at regulator location. Values are: "s12N", "BN", "BC", "ABN", "s2N", "N", "ACN", "BCN", "ABCN", "AC", "s1N", "AN", "B", "AB", "C", "A", "CN", "ABC"
self.monitoredPhase = monitoredPhase
self._RegulatingCondEq = []
self.RegulatingCondEq = [] if RegulatingCondEq is None else RegulatingCondEq
self._Terminal = None
self.Terminal = Terminal
self._RegulationSchedule = []
self.RegulationSchedule = [] if RegulationSchedule is None else RegulationSchedule
super(RegulatingControl, self).__init__(*args, **kw_args)
_attrs = ["mode", "targetRange", "discrete", "targetValue", "monitoredPhase"]
_attr_types = {"mode": str, "targetRange": float, "discrete": bool, "targetValue": float, "monitoredPhase": str}
_defaults = {"mode": "fixed", "targetRange": 0.0, "discrete": False, "targetValue": 0.0, "monitoredPhase": "s12N"}
_enums = {"mode": "RegulatingControlModeKind", "monitoredPhase": "PhaseCode"}
_refs = ["RegulatingCondEq", "Terminal", "RegulationSchedule"]
_many_refs = ["RegulatingCondEq", "RegulationSchedule"]
def getRegulatingCondEq(self):
"""The equipment that participates in this regulating control scheme.
"""
return self._RegulatingCondEq
def setRegulatingCondEq(self, value):
for x in self._RegulatingCondEq:
x.RegulatingControl = None
for y in value:
y._RegulatingControl = self
self._RegulatingCondEq = value
RegulatingCondEq = property(getRegulatingCondEq, setRegulatingCondEq)
def addRegulatingCondEq(self, *RegulatingCondEq):
for obj in RegulatingCondEq:
obj.RegulatingControl = self
def removeRegulatingCondEq(self, *RegulatingCondEq):
for obj in RegulatingCondEq:
obj.RegulatingControl = None
def getTerminal(self):
"""The terminal associated with this regulating control.
"""
return self._Terminal
def setTerminal(self, value):
if self._Terminal is not None:
filtered = [x for x in self.Terminal.RegulatingControl if x != self]
self._Terminal._RegulatingControl = filtered
self._Terminal = value
if self._Terminal is not None:
if self not in self._Terminal._RegulatingControl:
self._Terminal._RegulatingControl.append(self)
Terminal = property(getTerminal, setTerminal)
def getRegulationSchedule(self):
"""Schedule for this Regulating regulating control.
"""
return self._RegulationSchedule
def setRegulationSchedule(self, value):
for x in self._RegulationSchedule:
x.RegulatingControl = None
for y in value:
y._RegulatingControl = self
self._RegulationSchedule = value
RegulationSchedule = property(getRegulationSchedule, setRegulationSchedule)
def addRegulationSchedule(self, *RegulationSchedule):
for obj in RegulationSchedule:
obj.RegulatingControl = self
def removeRegulationSchedule(self, *RegulationSchedule):
for obj in RegulationSchedule:
|
obj.RegulatingControl = None
|
conditional_block
|
|
RegulatingControl.py
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61970.Core.PowerSystemResource import PowerSystemResource
class RegulatingControl(PowerSystemResource):
"""Specifies a set of equipment that works together to control a power system quantity such as voltage or flow.Specifies a set of equipment that works together to control a power system quantity such as voltage or flow.
"""
def __init__(self, mode="fixed", targetRange=0.0, discrete=False, targetValue=0.0, monitoredPhase="s12N", RegulatingCondEq=None, Terminal=None, RegulationSchedule=None, *args, **kw_args):
"""Initialises a new 'RegulatingControl' instance.
@param mode: The regulating control mode presently available. This specifications allows for determining the kind of regualation without need for obtaining the units from a schedule. Values are: "fixed", "timeScheduled", "voltage", "admittance", "reactivePower", "powerFactor", "currentFlow", "activePower", "temperature"
@param targetRange: This is the case input target range. This performs the same function as the value2 attribute on the regulation schedule in the case that schedules are not used. The units of those appropriate for the mode.
@param discrete: The regulation is performed in a discrete mode.
@param targetValue: The target value specified for case input. This value can be used for the target value wihout the use of schedules. The value has the units appropriate to the mode attribute.
@param monitoredPhase: Phase voltage controlling this regulator, measured at regulator location. Values are: "s12N", "BN", "BC", "ABN", "s2N", "N", "ACN", "BCN", "ABCN", "AC", "s1N", "AN", "B", "AB", "C", "A", "CN", "ABC"
@param RegulatingCondEq: The equipment that participates in this regulating control scheme.
@param Terminal: The terminal associated with this regulating control.
@param RegulationSchedule: Schedule for this Regulating regulating control.
"""
#: The regulating control mode presently available. This specifications allows for determining the kind of regualation without need for obtaining the units from a schedule. Values are: "fixed", "timeScheduled", "voltage", "admittance", "reactivePower", "powerFactor", "currentFlow", "activePower", "temperature"
self.mode = mode
#: This is the case input target range. This performs the same function as the value2 attribute on the regulation schedule in the case that schedules are not used. The units of those appropriate for the mode.
self.targetRange = targetRange
#: The regulation is performed in a discrete mode.
self.discrete = discrete
#: The target value specified for case input. This value can be used for the target value wihout the use of schedules. The value has the units appropriate to the mode attribute.
self.targetValue = targetValue
#: Phase voltage controlling this regulator, measured at regulator location. Values are: "s12N", "BN", "BC", "ABN", "s2N", "N", "ACN", "BCN", "ABCN", "AC", "s1N", "AN", "B", "AB", "C", "A", "CN", "ABC"
self.monitoredPhase = monitoredPhase
self._RegulatingCondEq = []
self.RegulatingCondEq = [] if RegulatingCondEq is None else RegulatingCondEq
self._Terminal = None
self.Terminal = Terminal
self._RegulationSchedule = []
self.RegulationSchedule = [] if RegulationSchedule is None else RegulationSchedule
super(RegulatingControl, self).__init__(*args, **kw_args)
_attrs = ["mode", "targetRange", "discrete", "targetValue", "monitoredPhase"]
_attr_types = {"mode": str, "targetRange": float, "discrete": bool, "targetValue": float, "monitoredPhase": str}
_defaults = {"mode": "fixed", "targetRange": 0.0, "discrete": False, "targetValue": 0.0, "monitoredPhase": "s12N"}
_enums = {"mode": "RegulatingControlModeKind", "monitoredPhase": "PhaseCode"}
_refs = ["RegulatingCondEq", "Terminal", "RegulationSchedule"]
_many_refs = ["RegulatingCondEq", "RegulationSchedule"]
def getRegulatingCondEq(self):
|
def setRegulatingCondEq(self, value):
for x in self._RegulatingCondEq:
x.RegulatingControl = None
for y in value:
y._RegulatingControl = self
self._RegulatingCondEq = value
RegulatingCondEq = property(getRegulatingCondEq, setRegulatingCondEq)
def addRegulatingCondEq(self, *RegulatingCondEq):
for obj in RegulatingCondEq:
obj.RegulatingControl = self
def removeRegulatingCondEq(self, *RegulatingCondEq):
for obj in RegulatingCondEq:
obj.RegulatingControl = None
def getTerminal(self):
"""The terminal associated with this regulating control.
"""
return self._Terminal
def setTerminal(self, value):
if self._Terminal is not None:
filtered = [x for x in self.Terminal.RegulatingControl if x != self]
self._Terminal._RegulatingControl = filtered
self._Terminal = value
if self._Terminal is not None:
if self not in self._Terminal._RegulatingControl:
self._Terminal._RegulatingControl.append(self)
Terminal = property(getTerminal, setTerminal)
def getRegulationSchedule(self):
"""Schedule for this Regulating regulating control.
"""
return self._RegulationSchedule
def setRegulationSchedule(self, value):
for x in self._RegulationSchedule:
x.RegulatingControl = None
for y in value:
y._RegulatingControl = self
self._RegulationSchedule = value
RegulationSchedule = property(getRegulationSchedule, setRegulationSchedule)
def addRegulationSchedule(self, *RegulationSchedule):
for obj in RegulationSchedule:
obj.RegulatingControl = self
def removeRegulationSchedule(self, *RegulationSchedule):
for obj in RegulationSchedule:
obj.RegulatingControl = None
|
"""The equipment that participates in this regulating control scheme.
"""
return self._RegulatingCondEq
|
identifier_body
|
RegulatingControl.py
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61970.Core.PowerSystemResource import PowerSystemResource
class RegulatingControl(PowerSystemResource):
"""Specifies a set of equipment that works together to control a power system quantity such as voltage or flow.Specifies a set of equipment that works together to control a power system quantity such as voltage or flow.
"""
def __init__(self, mode="fixed", targetRange=0.0, discrete=False, targetValue=0.0, monitoredPhase="s12N", RegulatingCondEq=None, Terminal=None, RegulationSchedule=None, *args, **kw_args):
"""Initialises a new 'RegulatingControl' instance.
@param mode: The regulating control mode presently available. This specifications allows for determining the kind of regualation without need for obtaining the units from a schedule. Values are: "fixed", "timeScheduled", "voltage", "admittance", "reactivePower", "powerFactor", "currentFlow", "activePower", "temperature"
@param targetRange: This is the case input target range. This performs the same function as the value2 attribute on the regulation schedule in the case that schedules are not used. The units of those appropriate for the mode.
@param discrete: The regulation is performed in a discrete mode.
@param targetValue: The target value specified for case input. This value can be used for the target value wihout the use of schedules. The value has the units appropriate to the mode attribute.
@param monitoredPhase: Phase voltage controlling this regulator, measured at regulator location. Values are: "s12N", "BN", "BC", "ABN", "s2N", "N", "ACN", "BCN", "ABCN", "AC", "s1N", "AN", "B", "AB", "C", "A", "CN", "ABC"
@param RegulatingCondEq: The equipment that participates in this regulating control scheme.
@param Terminal: The terminal associated with this regulating control.
@param RegulationSchedule: Schedule for this Regulating regulating control.
"""
#: The regulating control mode presently available. This specifications allows for determining the kind of regualation without need for obtaining the units from a schedule. Values are: "fixed", "timeScheduled", "voltage", "admittance", "reactivePower", "powerFactor", "currentFlow", "activePower", "temperature"
self.mode = mode
#: This is the case input target range. This performs the same function as the value2 attribute on the regulation schedule in the case that schedules are not used. The units of those appropriate for the mode.
self.targetRange = targetRange
#: The regulation is performed in a discrete mode.
self.discrete = discrete
#: The target value specified for case input. This value can be used for the target value wihout the use of schedules. The value has the units appropriate to the mode attribute.
self.targetValue = targetValue
#: Phase voltage controlling this regulator, measured at regulator location. Values are: "s12N", "BN", "BC", "ABN", "s2N", "N", "ACN", "BCN", "ABCN", "AC", "s1N", "AN", "B", "AB", "C", "A", "CN", "ABC"
self.monitoredPhase = monitoredPhase
self._RegulatingCondEq = []
self.RegulatingCondEq = [] if RegulatingCondEq is None else RegulatingCondEq
self._Terminal = None
self.Terminal = Terminal
self._RegulationSchedule = []
self.RegulationSchedule = [] if RegulationSchedule is None else RegulationSchedule
super(RegulatingControl, self).__init__(*args, **kw_args)
_attrs = ["mode", "targetRange", "discrete", "targetValue", "monitoredPhase"]
_attr_types = {"mode": str, "targetRange": float, "discrete": bool, "targetValue": float, "monitoredPhase": str}
_defaults = {"mode": "fixed", "targetRange": 0.0, "discrete": False, "targetValue": 0.0, "monitoredPhase": "s12N"}
_enums = {"mode": "RegulatingControlModeKind", "monitoredPhase": "PhaseCode"}
_refs = ["RegulatingCondEq", "Terminal", "RegulationSchedule"]
_many_refs = ["RegulatingCondEq", "RegulationSchedule"]
def getRegulatingCondEq(self):
"""The equipment that participates in this regulating control scheme.
"""
return self._RegulatingCondEq
def setRegulatingCondEq(self, value):
for x in self._RegulatingCondEq:
x.RegulatingControl = None
for y in value:
y._RegulatingControl = self
self._RegulatingCondEq = value
RegulatingCondEq = property(getRegulatingCondEq, setRegulatingCondEq)
def addRegulatingCondEq(self, *RegulatingCondEq):
for obj in RegulatingCondEq:
obj.RegulatingControl = self
def
|
(self, *RegulatingCondEq):
for obj in RegulatingCondEq:
obj.RegulatingControl = None
def getTerminal(self):
"""The terminal associated with this regulating control.
"""
return self._Terminal
def setTerminal(self, value):
if self._Terminal is not None:
filtered = [x for x in self.Terminal.RegulatingControl if x != self]
self._Terminal._RegulatingControl = filtered
self._Terminal = value
if self._Terminal is not None:
if self not in self._Terminal._RegulatingControl:
self._Terminal._RegulatingControl.append(self)
Terminal = property(getTerminal, setTerminal)
def getRegulationSchedule(self):
"""Schedule for this Regulating regulating control.
"""
return self._RegulationSchedule
def setRegulationSchedule(self, value):
for x in self._RegulationSchedule:
x.RegulatingControl = None
for y in value:
y._RegulatingControl = self
self._RegulationSchedule = value
RegulationSchedule = property(getRegulationSchedule, setRegulationSchedule)
def addRegulationSchedule(self, *RegulationSchedule):
for obj in RegulationSchedule:
obj.RegulatingControl = self
def removeRegulationSchedule(self, *RegulationSchedule):
for obj in RegulationSchedule:
obj.RegulatingControl = None
|
removeRegulatingCondEq
|
identifier_name
|
spamcan1.py
|
#!/usr/bin/env python
###############################################################################
# Copyright (C) 1994 - 2013, Performance Dynamics Company #
# #
# This software is licensed as described in the file COPYING, which #
# you should have received as part of this distribution. The terms #
# are also available at http://www.perfdynamics.com/Tools/copyright.html. #
# #
# You may opt to use, copy, modify, merge, publish, distribute and/or sell #
# copies of the Software, and permit persons to whom the Software is #
# furnished to do so, under the terms of the COPYING file. #
# #
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY #
# KIND, either express or implied. #
###############################################################################
# $Id: spamcan1.py,v 1.3 2012/11/13 03:12:04 earl-lang Exp $
# Created by NJG on Wed, Apr 18, 2007
#
# Queueing model of an email-spam analyzer system comprising a
# battery of SMP servers essentially running in batch mode.
# Each node was a 4-way SMP server.
# The performance metric of interest was the mean queue length.
#
# This simple M/M/4 model gave results that were in surprisingly
|
# good agreement with monitored queue lengths.
import pdq
# Measured performance parameters
cpusPerServer = 4
emailThruput = 2376 # emails per hour
scannerTime = 6.0 # seconds per email
pdq.Init("Spam Farm Model")
# Timebase is SECONDS ...
nstreams = pdq.CreateOpen("Email", float(emailThruput)/3600)
nnodes = pdq.CreateNode("spamCan", int(cpusPerServer), pdq.MSQ)
pdq.SetDemand("spamCan", "Email", scannerTime)
pdq.Solve(pdq.CANON)
pdq.Report()
|
random_line_split
|
|
navigation-instruction.ts
|
import core from 'core-js';
export class
|
{
public fragment;
public queryString;
public params;
public queryParams;
public config;
public lifecycleArgs;
public viewPortInstructions;
constructor(fragment, queryString, params, queryParams, config, parentInstruction) {
const allParams = Object.assign({}, queryParams, params);
this.fragment = fragment;
this.queryString = queryString;
this.params = params || {};
this.queryParams = queryParams;
this.config = config;
this.lifecycleArgs = [allParams, config, this];
this.viewPortInstructions = {};
if (parentInstruction) {
this.params.$parent = parentInstruction.params;
}
}
addViewPortInstruction(viewPortName, strategy, moduleId, component) {
return this.viewPortInstructions[viewPortName] = {
name: viewPortName,
strategy: strategy,
moduleId: moduleId,
component: component,
childRouter: component.childRouter,
lifecycleArgs: this.lifecycleArgs.slice()
};
}
getWildCardName() {
var wildcardIndex = this.config.route.lastIndexOf('*');
return this.config.route.substr(wildcardIndex + 1);
}
getWildcardPath() {
var wildcardName = this.getWildCardName(),
path = this.params[wildcardName];
if (this.queryString) {
path += "?" + this.queryString;
}
return path;
}
getBaseUrl() {
if (!this.params) {
return this.fragment;
}
var wildcardName = this.getWildCardName(),
path = this.params[wildcardName];
if (!path) {
return this.fragment;
}
return this.fragment.substr(0, this.fragment.lastIndexOf(path));
}
}
|
NavigationInstruction
|
identifier_name
|
navigation-instruction.ts
|
import core from 'core-js';
export class NavigationInstruction {
public fragment;
public queryString;
public params;
public queryParams;
public config;
public lifecycleArgs;
public viewPortInstructions;
constructor(fragment, queryString, params, queryParams, config, parentInstruction) {
const allParams = Object.assign({}, queryParams, params);
this.fragment = fragment;
this.queryString = queryString;
this.params = params || {};
this.queryParams = queryParams;
this.config = config;
this.lifecycleArgs = [allParams, config, this];
this.viewPortInstructions = {};
if (parentInstruction) {
this.params.$parent = parentInstruction.params;
}
}
addViewPortInstruction(viewPortName, strategy, moduleId, component) {
return this.viewPortInstructions[viewPortName] = {
name: viewPortName,
strategy: strategy,
moduleId: moduleId,
component: component,
childRouter: component.childRouter,
lifecycleArgs: this.lifecycleArgs.slice()
};
}
getWildCardName() {
var wildcardIndex = this.config.route.lastIndexOf('*');
return this.config.route.substr(wildcardIndex + 1);
}
getWildcardPath() {
var wildcardName = this.getWildCardName(),
path = this.params[wildcardName];
if (this.queryString) {
path += "?" + this.queryString;
}
return path;
}
getBaseUrl() {
if (!this.params) {
return this.fragment;
}
var wildcardName = this.getWildCardName(),
path = this.params[wildcardName];
if (!path)
|
return this.fragment.substr(0, this.fragment.lastIndexOf(path));
}
}
|
{
return this.fragment;
}
|
conditional_block
|
navigation-instruction.ts
|
import core from 'core-js';
export class NavigationInstruction {
public fragment;
public queryString;
public params;
public queryParams;
public config;
public lifecycleArgs;
public viewPortInstructions;
constructor(fragment, queryString, params, queryParams, config, parentInstruction) {
const allParams = Object.assign({}, queryParams, params);
this.fragment = fragment;
this.queryString = queryString;
this.params = params || {};
this.queryParams = queryParams;
this.config = config;
this.lifecycleArgs = [allParams, config, this];
this.viewPortInstructions = {};
if (parentInstruction) {
this.params.$parent = parentInstruction.params;
}
}
addViewPortInstruction(viewPortName, strategy, moduleId, component) {
return this.viewPortInstructions[viewPortName] = {
name: viewPortName,
strategy: strategy,
moduleId: moduleId,
component: component,
childRouter: component.childRouter,
lifecycleArgs: this.lifecycleArgs.slice()
};
|
}
getWildCardName() {
var wildcardIndex = this.config.route.lastIndexOf('*');
return this.config.route.substr(wildcardIndex + 1);
}
getWildcardPath() {
var wildcardName = this.getWildCardName(),
path = this.params[wildcardName];
if (this.queryString) {
path += "?" + this.queryString;
}
return path;
}
getBaseUrl() {
if (!this.params) {
return this.fragment;
}
var wildcardName = this.getWildCardName(),
path = this.params[wildcardName];
if (!path) {
return this.fragment;
}
return this.fragment.substr(0, this.fragment.lastIndexOf(path));
}
}
|
random_line_split
|
|
navigation-instruction.ts
|
import core from 'core-js';
export class NavigationInstruction {
public fragment;
public queryString;
public params;
public queryParams;
public config;
public lifecycleArgs;
public viewPortInstructions;
constructor(fragment, queryString, params, queryParams, config, parentInstruction) {
const allParams = Object.assign({}, queryParams, params);
this.fragment = fragment;
this.queryString = queryString;
this.params = params || {};
this.queryParams = queryParams;
this.config = config;
this.lifecycleArgs = [allParams, config, this];
this.viewPortInstructions = {};
if (parentInstruction) {
this.params.$parent = parentInstruction.params;
}
}
addViewPortInstruction(viewPortName, strategy, moduleId, component) {
return this.viewPortInstructions[viewPortName] = {
name: viewPortName,
strategy: strategy,
moduleId: moduleId,
component: component,
childRouter: component.childRouter,
lifecycleArgs: this.lifecycleArgs.slice()
};
}
getWildCardName() {
var wildcardIndex = this.config.route.lastIndexOf('*');
return this.config.route.substr(wildcardIndex + 1);
}
getWildcardPath() {
var wildcardName = this.getWildCardName(),
path = this.params[wildcardName];
if (this.queryString) {
path += "?" + this.queryString;
}
return path;
}
getBaseUrl()
|
}
|
{
if (!this.params) {
return this.fragment;
}
var wildcardName = this.getWildCardName(),
path = this.params[wildcardName];
if (!path) {
return this.fragment;
}
return this.fragment.substr(0, this.fragment.lastIndexOf(path));
}
|
identifier_body
|
server.ts
|
import { Services } from './services/services'
var app = require('./app');
var debug = require('debug');
var http = require('http');
/**
* Get port from environment and store in Express.
*/
var port = 3000;
app.set('port', port);
/**
* Create HTTP server.
*/
var server = http.createServer(app);
/**
* Listen on provided port, on all network interfaces.
*/
server.listen(port);
server.on('error', onError);
server.on('listening', onListening);
Services.setHttpServerAndInitializeStream(server)
/**
* Event listener for HTTP server "error" event.
*/
function onError(error:any)
|
/**
* Event listener for HTTP server "listening" event.
*/
function onListening() {
var addr = server.address();
var bind = typeof addr === 'string'
? 'pipe ' + addr
: 'port ' + addr.port;
debug('Listening on ' + bind);
}
|
{
if (error.syscall !== 'listen') {
throw error;
}
var bind = typeof port === 'string'
? 'Pipe ' + port
: 'Port ' + port;
// handle specific listen errors with friendly messages
switch (error.code) {
case 'EACCES':
console.error(bind + ' requires elevated privileges');
process.exit(1);
break;
case 'EADDRINUSE':
console.error(bind + ' is already in use');
process.exit(1);
break;
default:
throw error;
}
}
|
identifier_body
|
server.ts
|
import { Services } from './services/services'
var app = require('./app');
var debug = require('debug');
var http = require('http');
/**
* Get port from environment and store in Express.
*/
var port = 3000;
app.set('port', port);
/**
* Create HTTP server.
*/
var server = http.createServer(app);
/**
* Listen on provided port, on all network interfaces.
*/
server.listen(port);
server.on('error', onError);
server.on('listening', onListening);
Services.setHttpServerAndInitializeStream(server)
/**
* Event listener for HTTP server "error" event.
*/
function
|
(error:any) {
if (error.syscall !== 'listen') {
throw error;
}
var bind = typeof port === 'string'
? 'Pipe ' + port
: 'Port ' + port;
// handle specific listen errors with friendly messages
switch (error.code) {
case 'EACCES':
console.error(bind + ' requires elevated privileges');
process.exit(1);
break;
case 'EADDRINUSE':
console.error(bind + ' is already in use');
process.exit(1);
break;
default:
throw error;
}
}
/**
* Event listener for HTTP server "listening" event.
*/
function onListening() {
var addr = server.address();
var bind = typeof addr === 'string'
? 'pipe ' + addr
: 'port ' + addr.port;
debug('Listening on ' + bind);
}
|
onError
|
identifier_name
|
server.ts
|
import { Services } from './services/services'
var app = require('./app');
var debug = require('debug');
var http = require('http');
/**
* Get port from environment and store in Express.
*/
var port = 3000;
app.set('port', port);
/**
* Create HTTP server.
*/
var server = http.createServer(app);
/**
* Listen on provided port, on all network interfaces.
*/
server.listen(port);
server.on('error', onError);
server.on('listening', onListening);
Services.setHttpServerAndInitializeStream(server)
/**
* Event listener for HTTP server "error" event.
*/
function onError(error:any) {
if (error.syscall !== 'listen') {
throw error;
}
var bind = typeof port === 'string'
? 'Pipe ' + port
: 'Port ' + port;
|
case 'EACCES':
console.error(bind + ' requires elevated privileges');
process.exit(1);
break;
case 'EADDRINUSE':
console.error(bind + ' is already in use');
process.exit(1);
break;
default:
throw error;
}
}
/**
* Event listener for HTTP server "listening" event.
*/
function onListening() {
var addr = server.address();
var bind = typeof addr === 'string'
? 'pipe ' + addr
: 'port ' + addr.port;
debug('Listening on ' + bind);
}
|
// handle specific listen errors with friendly messages
switch (error.code) {
|
random_line_split
|
language_tools.js
|
/* ***** BEGIN LICENSE BLOCK *****
* Distributed under the BSD license:
*
* Copyright (c) 2012, Ajax.org B.V.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Ajax.org B.V. nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL AJAX.ORG B.V. BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ***** END LICENSE BLOCK ***** */
define(function(require, exports, module) {
"use strict";
var snippetManager = require("../snippets").snippetManager;
var Autocomplete = require("../autocomplete").Autocomplete;
var config = require("../config");
var util = require("../autocomplete/util");
var textCompleter = require("../autocomplete/text_completer");
var keyWordCompleter = {
getCompletions: function(editor, session, pos, prefix, callback) {
var state = editor.session.getState(pos.row);
var completions = session.$mode.getCompletions(state, session, pos, prefix);
callback(null, completions);
}
};
var snippetCompleter = {
getCompletions: function(editor, session, pos, prefix, callback) {
var snippetMap = snippetManager.snippetMap;
var completions = [];
snippetManager.getActiveScopes(editor).forEach(function(scope) {
var snippets = snippetMap[scope] || [];
for (var i = snippets.length; i--;) {
var s = snippets[i];
var caption = s.name || s.tabTrigger;
if (!caption)
continue;
completions.push({
caption: caption,
snippet: s.content,
meta: s.tabTrigger && !s.name ? s.tabTrigger + "\u21E5 " : "snippet"
});
}
}, this);
callback(null, completions);
}
};
var completers = [snippetCompleter, textCompleter, keyWordCompleter];
exports.addCompleter = function(completer) {
completers.push(completer);
};
// Exports existing completer so that user can construct his own set of completers.
exports.textCompleter = textCompleter;
exports.keyWordCompleter = keyWordCompleter;
exports.snippetCompleter = snippetCompleter;
var expandSnippet = {
name: "expandSnippet",
exec: function(editor) {
var success = snippetManager.expandWithTab(editor);
if (!success)
editor.execCommand("indent");
},
bindKey: "Tab"
};
|
var onChangeMode = function(e, editor) {
loadSnippetsForMode(editor.session.$mode);
};
var loadSnippetsForMode = function(mode) {
var id = mode.$id;
if (!snippetManager.files)
snippetManager.files = {};
loadSnippetFile(id);
if (mode.modes)
mode.modes.forEach(loadSnippetsForMode);
};
var loadSnippetFile = function(id) {
if (!id || snippetManager.files[id])
return;
var snippetFilePath = id.replace("mode", "snippets");
snippetManager.files[id] = {};
config.loadModule(snippetFilePath, function(m) {
if (m) {
snippetManager.files[id] = m;
if (!m.snippets && m.snippetText)
m.snippets = snippetManager.parseSnippetFile(m.snippetText);
snippetManager.register(m.snippets || [], m.scope);
if (m.includeScopes) {
snippetManager.snippetMap[m.scope].includeScopes = m.includeScopes;
m.includeScopes.forEach(function(x) {
loadSnippetFile("ace/mode/" + x);
});
}
}
});
};
function getCompletionPrefix(editor) {
var pos = editor.getCursorPosition();
var line = editor.session.getLine(pos.row);
var prefix = util.retrievePrecedingIdentifier(line, pos.column);
// Try to find custom prefixes on the completers
editor.completers.forEach(function(completer) {
if (completer.identifierRegexps) {
completer.identifierRegexps.forEach(function(identifierRegex) {
if (!prefix && identifierRegex)
prefix = util.retrievePrecedingIdentifier(line, pos.column, identifierRegex);
});
}
});
return prefix;
}
var doLiveAutocomplete = function(e) {
var editor = e.editor;
var text = e.args || "";
var hasCompleter = editor.completer && editor.completer.activated;
// We don't want to autocomplete with no prefix
if (e.command.name === "backspace") {
if (hasCompleter && !getCompletionPrefix(editor))
editor.completer.detach();
}
else if (e.command.name === "insertstring") {
var prefix = getCompletionPrefix(editor);
// Only autocomplete if there's a prefix that can be matched
if (prefix && !hasCompleter) {
if (!editor.completer) {
// Create new autocompleter
editor.completer = new Autocomplete();
}
// Disable autoInsert
editor.completer.autoSelect = false;
editor.completer.autoInsert = false;
editor.completer.showPopup(editor);
} else if (!prefix && hasCompleter) {
// When the prefix is empty
// close the autocomplete dialog
editor.completer.detach();
}
}
};
var Editor = require("../editor").Editor;
require("../config").defineOptions(Editor.prototype, "editor", {
enableBasicAutocompletion: {
set: function(val) {
if (val) {
if (!this.completers)
this.completers = Array.isArray(val)? val: completers;
this.commands.addCommand(Autocomplete.startCommand);
} else {
this.commands.removeCommand(Autocomplete.startCommand);
}
},
value: false
},
/**
* Enable live autocomplete. If the value is an array, it is assumed to be an array of completers
* and will use them instead of the default completers.
*/
enableLiveAutocompletion: {
set: function(val) {
if (val) {
if (!this.completers)
this.completers = Array.isArray(val)? val: completers;
// On each change automatically trigger the autocomplete
this.commands.on('afterExec', doLiveAutocomplete);
} else {
this.commands.removeListener('afterExec', doLiveAutocomplete);
}
},
value: false
},
enableSnippets: {
set: function(val) {
if (val) {
this.commands.addCommand(expandSnippet);
this.on("changeMode", onChangeMode);
onChangeMode(null, this);
} else {
this.commands.removeCommand(expandSnippet);
this.off("changeMode", onChangeMode);
}
},
value: false
}
});
});
|
random_line_split
|
|
language_tools.js
|
/* ***** BEGIN LICENSE BLOCK *****
* Distributed under the BSD license:
*
* Copyright (c) 2012, Ajax.org B.V.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Ajax.org B.V. nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL AJAX.ORG B.V. BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ***** END LICENSE BLOCK ***** */
define(function(require, exports, module) {
"use strict";
var snippetManager = require("../snippets").snippetManager;
var Autocomplete = require("../autocomplete").Autocomplete;
var config = require("../config");
var util = require("../autocomplete/util");
var textCompleter = require("../autocomplete/text_completer");
var keyWordCompleter = {
getCompletions: function(editor, session, pos, prefix, callback) {
var state = editor.session.getState(pos.row);
var completions = session.$mode.getCompletions(state, session, pos, prefix);
callback(null, completions);
}
};
var snippetCompleter = {
getCompletions: function(editor, session, pos, prefix, callback) {
var snippetMap = snippetManager.snippetMap;
var completions = [];
snippetManager.getActiveScopes(editor).forEach(function(scope) {
var snippets = snippetMap[scope] || [];
for (var i = snippets.length; i--;) {
var s = snippets[i];
var caption = s.name || s.tabTrigger;
if (!caption)
continue;
completions.push({
caption: caption,
snippet: s.content,
meta: s.tabTrigger && !s.name ? s.tabTrigger + "\u21E5 " : "snippet"
});
}
}, this);
callback(null, completions);
}
};
var completers = [snippetCompleter, textCompleter, keyWordCompleter];
exports.addCompleter = function(completer) {
completers.push(completer);
};
// Exports existing completer so that user can construct his own set of completers.
exports.textCompleter = textCompleter;
exports.keyWordCompleter = keyWordCompleter;
exports.snippetCompleter = snippetCompleter;
var expandSnippet = {
name: "expandSnippet",
exec: function(editor) {
var success = snippetManager.expandWithTab(editor);
if (!success)
editor.execCommand("indent");
},
bindKey: "Tab"
};
var onChangeMode = function(e, editor) {
loadSnippetsForMode(editor.session.$mode);
};
var loadSnippetsForMode = function(mode) {
var id = mode.$id;
if (!snippetManager.files)
snippetManager.files = {};
loadSnippetFile(id);
if (mode.modes)
mode.modes.forEach(loadSnippetsForMode);
};
var loadSnippetFile = function(id) {
if (!id || snippetManager.files[id])
return;
var snippetFilePath = id.replace("mode", "snippets");
snippetManager.files[id] = {};
config.loadModule(snippetFilePath, function(m) {
if (m) {
snippetManager.files[id] = m;
if (!m.snippets && m.snippetText)
m.snippets = snippetManager.parseSnippetFile(m.snippetText);
snippetManager.register(m.snippets || [], m.scope);
if (m.includeScopes) {
snippetManager.snippetMap[m.scope].includeScopes = m.includeScopes;
m.includeScopes.forEach(function(x) {
loadSnippetFile("ace/mode/" + x);
});
}
}
});
};
function getCompletionPrefix(editor)
|
var doLiveAutocomplete = function(e) {
var editor = e.editor;
var text = e.args || "";
var hasCompleter = editor.completer && editor.completer.activated;
// We don't want to autocomplete with no prefix
if (e.command.name === "backspace") {
if (hasCompleter && !getCompletionPrefix(editor))
editor.completer.detach();
}
else if (e.command.name === "insertstring") {
var prefix = getCompletionPrefix(editor);
// Only autocomplete if there's a prefix that can be matched
if (prefix && !hasCompleter) {
if (!editor.completer) {
// Create new autocompleter
editor.completer = new Autocomplete();
}
// Disable autoInsert
editor.completer.autoSelect = false;
editor.completer.autoInsert = false;
editor.completer.showPopup(editor);
} else if (!prefix && hasCompleter) {
// When the prefix is empty
// close the autocomplete dialog
editor.completer.detach();
}
}
};
var Editor = require("../editor").Editor;
require("../config").defineOptions(Editor.prototype, "editor", {
enableBasicAutocompletion: {
set: function(val) {
if (val) {
if (!this.completers)
this.completers = Array.isArray(val)? val: completers;
this.commands.addCommand(Autocomplete.startCommand);
} else {
this.commands.removeCommand(Autocomplete.startCommand);
}
},
value: false
},
/**
* Enable live autocomplete. If the value is an array, it is assumed to be an array of completers
* and will use them instead of the default completers.
*/
enableLiveAutocompletion: {
set: function(val) {
if (val) {
if (!this.completers)
this.completers = Array.isArray(val)? val: completers;
// On each change automatically trigger the autocomplete
this.commands.on('afterExec', doLiveAutocomplete);
} else {
this.commands.removeListener('afterExec', doLiveAutocomplete);
}
},
value: false
},
enableSnippets: {
set: function(val) {
if (val) {
this.commands.addCommand(expandSnippet);
this.on("changeMode", onChangeMode);
onChangeMode(null, this);
} else {
this.commands.removeCommand(expandSnippet);
this.off("changeMode", onChangeMode);
}
},
value: false
}
});
});
|
{
var pos = editor.getCursorPosition();
var line = editor.session.getLine(pos.row);
var prefix = util.retrievePrecedingIdentifier(line, pos.column);
// Try to find custom prefixes on the completers
editor.completers.forEach(function(completer) {
if (completer.identifierRegexps) {
completer.identifierRegexps.forEach(function(identifierRegex) {
if (!prefix && identifierRegex)
prefix = util.retrievePrecedingIdentifier(line, pos.column, identifierRegex);
});
}
});
return prefix;
}
|
identifier_body
|
language_tools.js
|
/* ***** BEGIN LICENSE BLOCK *****
* Distributed under the BSD license:
*
* Copyright (c) 2012, Ajax.org B.V.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Ajax.org B.V. nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL AJAX.ORG B.V. BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ***** END LICENSE BLOCK ***** */
define(function(require, exports, module) {
"use strict";
var snippetManager = require("../snippets").snippetManager;
var Autocomplete = require("../autocomplete").Autocomplete;
var config = require("../config");
var util = require("../autocomplete/util");
var textCompleter = require("../autocomplete/text_completer");
var keyWordCompleter = {
getCompletions: function(editor, session, pos, prefix, callback) {
var state = editor.session.getState(pos.row);
var completions = session.$mode.getCompletions(state, session, pos, prefix);
callback(null, completions);
}
};
var snippetCompleter = {
getCompletions: function(editor, session, pos, prefix, callback) {
var snippetMap = snippetManager.snippetMap;
var completions = [];
snippetManager.getActiveScopes(editor).forEach(function(scope) {
var snippets = snippetMap[scope] || [];
for (var i = snippets.length; i--;) {
var s = snippets[i];
var caption = s.name || s.tabTrigger;
if (!caption)
continue;
completions.push({
caption: caption,
snippet: s.content,
meta: s.tabTrigger && !s.name ? s.tabTrigger + "\u21E5 " : "snippet"
});
}
}, this);
callback(null, completions);
}
};
var completers = [snippetCompleter, textCompleter, keyWordCompleter];
exports.addCompleter = function(completer) {
completers.push(completer);
};
// Exports existing completer so that user can construct his own set of completers.
exports.textCompleter = textCompleter;
exports.keyWordCompleter = keyWordCompleter;
exports.snippetCompleter = snippetCompleter;
var expandSnippet = {
name: "expandSnippet",
exec: function(editor) {
var success = snippetManager.expandWithTab(editor);
if (!success)
editor.execCommand("indent");
},
bindKey: "Tab"
};
var onChangeMode = function(e, editor) {
loadSnippetsForMode(editor.session.$mode);
};
var loadSnippetsForMode = function(mode) {
var id = mode.$id;
if (!snippetManager.files)
snippetManager.files = {};
loadSnippetFile(id);
if (mode.modes)
mode.modes.forEach(loadSnippetsForMode);
};
var loadSnippetFile = function(id) {
if (!id || snippetManager.files[id])
return;
var snippetFilePath = id.replace("mode", "snippets");
snippetManager.files[id] = {};
config.loadModule(snippetFilePath, function(m) {
if (m) {
snippetManager.files[id] = m;
if (!m.snippets && m.snippetText)
m.snippets = snippetManager.parseSnippetFile(m.snippetText);
snippetManager.register(m.snippets || [], m.scope);
if (m.includeScopes) {
snippetManager.snippetMap[m.scope].includeScopes = m.includeScopes;
m.includeScopes.forEach(function(x) {
loadSnippetFile("ace/mode/" + x);
});
}
}
});
};
function
|
(editor) {
var pos = editor.getCursorPosition();
var line = editor.session.getLine(pos.row);
var prefix = util.retrievePrecedingIdentifier(line, pos.column);
// Try to find custom prefixes on the completers
editor.completers.forEach(function(completer) {
if (completer.identifierRegexps) {
completer.identifierRegexps.forEach(function(identifierRegex) {
if (!prefix && identifierRegex)
prefix = util.retrievePrecedingIdentifier(line, pos.column, identifierRegex);
});
}
});
return prefix;
}
var doLiveAutocomplete = function(e) {
var editor = e.editor;
var text = e.args || "";
var hasCompleter = editor.completer && editor.completer.activated;
// We don't want to autocomplete with no prefix
if (e.command.name === "backspace") {
if (hasCompleter && !getCompletionPrefix(editor))
editor.completer.detach();
}
else if (e.command.name === "insertstring") {
var prefix = getCompletionPrefix(editor);
// Only autocomplete if there's a prefix that can be matched
if (prefix && !hasCompleter) {
if (!editor.completer) {
// Create new autocompleter
editor.completer = new Autocomplete();
}
// Disable autoInsert
editor.completer.autoSelect = false;
editor.completer.autoInsert = false;
editor.completer.showPopup(editor);
} else if (!prefix && hasCompleter) {
// When the prefix is empty
// close the autocomplete dialog
editor.completer.detach();
}
}
};
var Editor = require("../editor").Editor;
require("../config").defineOptions(Editor.prototype, "editor", {
enableBasicAutocompletion: {
set: function(val) {
if (val) {
if (!this.completers)
this.completers = Array.isArray(val)? val: completers;
this.commands.addCommand(Autocomplete.startCommand);
} else {
this.commands.removeCommand(Autocomplete.startCommand);
}
},
value: false
},
/**
* Enable live autocomplete. If the value is an array, it is assumed to be an array of completers
* and will use them instead of the default completers.
*/
enableLiveAutocompletion: {
set: function(val) {
if (val) {
if (!this.completers)
this.completers = Array.isArray(val)? val: completers;
// On each change automatically trigger the autocomplete
this.commands.on('afterExec', doLiveAutocomplete);
} else {
this.commands.removeListener('afterExec', doLiveAutocomplete);
}
},
value: false
},
enableSnippets: {
set: function(val) {
if (val) {
this.commands.addCommand(expandSnippet);
this.on("changeMode", onChangeMode);
onChangeMode(null, this);
} else {
this.commands.removeCommand(expandSnippet);
this.off("changeMode", onChangeMode);
}
},
value: false
}
});
});
|
getCompletionPrefix
|
identifier_name
|
dynamic.to.top.min.js
|
/*
* Dynamic To Top Plugin
* http://www.mattvarone.com
*
* By Matt Varone
* @sksmatt
*
*/
var mv_dynamic_to_top;(function($,mv_dynamic_to_top){jQuery.fn.DynamicToTop=function(options){var defaults={text:mv_dynamic_to_top.text,min:parseInt(mv_dynamic_to_top.min,10),fade_in:600,fade_out:400,speed:parseInt(mv_dynamic_to_top.speed,10),easing:mv_dynamic_to_top.easing,version:mv_dynamic_to_top.version,id:'dynamic-to-top'},settings=$.extend(defaults,options);if(settings.version===""||settings.version==='0'){settings.text='<span> </span>';}
if(!$.isFunction(settings.easing)){settings.easing='linear';}
var $toTop=$('<a href=\"#\" id=\"'+settings.id+'\"></a>').html(settings.text);$toTop.hide().appendTo('body').click(function(){$('html, body').stop().animate({scrollTop:0},settings.speed,settings.easing);return false;});$(window).scroll(function(){var sd=jQuery(window).scrollTop();if(typeof document.body.style.maxHeight==="undefined"){$toTop.css({'position':'absolute','top':sd+$(window).height()-mv_dynamic_to_top.margin});}
if(sd>settings.min){$toTop.fadeIn(settings.fade_in);}else
|
});};$('body').DynamicToTop();})(jQuery,mv_dynamic_to_top);
|
{$toTop.fadeOut(settings.fade_out);}
|
conditional_block
|
dynamic.to.top.min.js
|
/*
|
*
* By Matt Varone
* @sksmatt
*
*/
var mv_dynamic_to_top;(function($,mv_dynamic_to_top){jQuery.fn.DynamicToTop=function(options){var defaults={text:mv_dynamic_to_top.text,min:parseInt(mv_dynamic_to_top.min,10),fade_in:600,fade_out:400,speed:parseInt(mv_dynamic_to_top.speed,10),easing:mv_dynamic_to_top.easing,version:mv_dynamic_to_top.version,id:'dynamic-to-top'},settings=$.extend(defaults,options);if(settings.version===""||settings.version==='0'){settings.text='<span> </span>';}
if(!$.isFunction(settings.easing)){settings.easing='linear';}
var $toTop=$('<a href=\"#\" id=\"'+settings.id+'\"></a>').html(settings.text);$toTop.hide().appendTo('body').click(function(){$('html, body').stop().animate({scrollTop:0},settings.speed,settings.easing);return false;});$(window).scroll(function(){var sd=jQuery(window).scrollTop();if(typeof document.body.style.maxHeight==="undefined"){$toTop.css({'position':'absolute','top':sd+$(window).height()-mv_dynamic_to_top.margin});}
if(sd>settings.min){$toTop.fadeIn(settings.fade_in);}else{$toTop.fadeOut(settings.fade_out);}});};$('body').DynamicToTop();})(jQuery,mv_dynamic_to_top);
|
* Dynamic To Top Plugin
* http://www.mattvarone.com
|
random_line_split
|
index.d.ts
|
// Type definitions for pty.js 0.2
// Project: https://github.com/chjj/pty.js
// Definitions by: Vadim Macagon <https://github.com/enlight>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
// TypeScript Version: 2.2
/// <reference types="node" />
/** Options that can be used when creating a new pseudo-terminal. */
interface TerminalOptions {
name?: string | undefined;
cols?: number | undefined;
rows?: number | undefined;
cwd?: string | undefined;
env?: any;
uid?: number | undefined;
gid?: number | undefined;
}
import net = require('net');
export declare class Terminal {
/** Read-only name of the terminal. */
name: string;
/** Read-only number of columns in the terminal. */
cols: number;
/** Read-only number of rows in the terminal. */
rows: number;
/**
* Read-only identifier of the spawned process associated with the slave end of the
* pseudo-terminal. This will be null if the terminal was created via [[Terminal.open]].
*/
pid: number;
/** Read-only file descriptor of the master end of the pseudo-terminal. */
fd: number;
/** Read-only name of the slave end of the pseudo-terminal. */
pty: string;
/** Read-only filename of the executable associated with the slave end of the pseudo-terminal. */
file: string;
/** Read-only name of the process associated with the slave end of the pseudo-terminal. */
process: string;
stdout: Terminal;
/** Note that an exception will be thrown if an attempt is made to access this property. */
stderr: Terminal;
stdin: Terminal;
socket: net.Socket;
/**
* Creates a new pseudo-terminal, spawns a child process, and associates it with the slave
* end of the pseudo-terminal.
*/
constructor(file?: string, args?: string[], opt?: TerminalOptions);
resize(cols?: number, rows?: number): void;
/**
* Sends a signal to the spawned process associated with the slave end of the
* pseudo-terminal (this only works if [[pid]] is not null).
*/
kill(signal?: string): void;
redraw(): void;
// NodeJS Socket-like interface (wrappers for this.socket)
write(data: any): boolean;
end(data: any): void;
pause(): void;
resume(): void;
setEncoding(encoding: string): void;
/**
* Closes the master end of the pseudo-terminal, and attempts to kill the spawned process
* associated with the slave end of the pseudo-terminal (but only if [[pid]] is not null).
*/
destroy(): void;
// NodeJS Stream interface
pipe<T extends NodeJS.WritableStream>(destination: T, options?: { end?: boolean | undefined; }): T;
// NodeJS EventEmitter interface
addListener(event: string, listener: Function): this;
on(event: string, listener: Function): this;
once(event: string, listener: Function): this;
removeListener(event: string, listener: Function): this;
removeAllListeners(event?: string): this;
listeners(event: string): Function[];
emit(event: string, ...args: any[]): boolean;
eventNames(): string[];
}
/**
* Creates a new pseudo-terminal, spawns a child process, and associates it with the slave
* end of the pseudo-terminal.
*/
export declare function createTerminal(file?: string, args?: string[], opt?: TerminalOptions): Terminal;
/** Alias for [[createTerminal]]. */
export declare function fork(file?: string, args?: string[], opt?: TerminalOptions): Terminal;
/** Alias for [[createTerminal]]. */
export declare function spawn(file?: string, args?: string[], opt?: TerminalOptions): Terminal;
/**
* Creates a new pseudo-terminal.
* This function is not available on Windows, use [[fork]] there instead.
*/
export declare function open(opt?: { cols?: number | undefined; rows?: number | undefined }): Terminal;
// Internal stuff that probably isn't very useful but is exported by pty.js
export declare module native {
/** Unix-only. */
export function fork(
file: string, args: string[], env: any, cwd: string, cols: number, rows: number,
uid?: number, gid?: number
): { fd: number; pid: number; pty: string };
/** Unix-only. */
export function open(
cols: number, rows: number
): { master: number; slave: number; pty: string };
/** Unix-only. */
export function process(fd: number, tty: string): string;
/** Windows-only. */
export function open(
dataPipe: string, cols: number, rows: number, debug: boolean
): { pid: number; pty: number; fd: number };
/** Windows-only. */
|
/** Windows-only. */
export function kill(pid: number): void;
export function resize(fd: number, cols: number, rows: number): void;
}
|
export function startProcess(
pid: number, file: string, cmdline: string, env: string[], cwd: string
): void;
|
random_line_split
|
index.d.ts
|
// Type definitions for pty.js 0.2
// Project: https://github.com/chjj/pty.js
// Definitions by: Vadim Macagon <https://github.com/enlight>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
// TypeScript Version: 2.2
/// <reference types="node" />
/** Options that can be used when creating a new pseudo-terminal. */
interface TerminalOptions {
name?: string | undefined;
cols?: number | undefined;
rows?: number | undefined;
cwd?: string | undefined;
env?: any;
uid?: number | undefined;
gid?: number | undefined;
}
import net = require('net');
export declare class
|
{
/** Read-only name of the terminal. */
name: string;
/** Read-only number of columns in the terminal. */
cols: number;
/** Read-only number of rows in the terminal. */
rows: number;
/**
* Read-only identifier of the spawned process associated with the slave end of the
* pseudo-terminal. This will be null if the terminal was created via [[Terminal.open]].
*/
pid: number;
/** Read-only file descriptor of the master end of the pseudo-terminal. */
fd: number;
/** Read-only name of the slave end of the pseudo-terminal. */
pty: string;
/** Read-only filename of the executable associated with the slave end of the pseudo-terminal. */
file: string;
/** Read-only name of the process associated with the slave end of the pseudo-terminal. */
process: string;
stdout: Terminal;
/** Note that an exception will be thrown if an attempt is made to access this property. */
stderr: Terminal;
stdin: Terminal;
socket: net.Socket;
/**
* Creates a new pseudo-terminal, spawns a child process, and associates it with the slave
* end of the pseudo-terminal.
*/
constructor(file?: string, args?: string[], opt?: TerminalOptions);
resize(cols?: number, rows?: number): void;
/**
* Sends a signal to the spawned process associated with the slave end of the
* pseudo-terminal (this only works if [[pid]] is not null).
*/
kill(signal?: string): void;
redraw(): void;
// NodeJS Socket-like interface (wrappers for this.socket)
write(data: any): boolean;
end(data: any): void;
pause(): void;
resume(): void;
setEncoding(encoding: string): void;
/**
* Closes the master end of the pseudo-terminal, and attempts to kill the spawned process
* associated with the slave end of the pseudo-terminal (but only if [[pid]] is not null).
*/
destroy(): void;
// NodeJS Stream interface
pipe<T extends NodeJS.WritableStream>(destination: T, options?: { end?: boolean | undefined; }): T;
// NodeJS EventEmitter interface
addListener(event: string, listener: Function): this;
on(event: string, listener: Function): this;
once(event: string, listener: Function): this;
removeListener(event: string, listener: Function): this;
removeAllListeners(event?: string): this;
listeners(event: string): Function[];
emit(event: string, ...args: any[]): boolean;
eventNames(): string[];
}
/**
* Creates a new pseudo-terminal, spawns a child process, and associates it with the slave
* end of the pseudo-terminal.
*/
export declare function createTerminal(file?: string, args?: string[], opt?: TerminalOptions): Terminal;
/** Alias for [[createTerminal]]. */
export declare function fork(file?: string, args?: string[], opt?: TerminalOptions): Terminal;
/** Alias for [[createTerminal]]. */
export declare function spawn(file?: string, args?: string[], opt?: TerminalOptions): Terminal;
/**
* Creates a new pseudo-terminal.
* This function is not available on Windows, use [[fork]] there instead.
*/
export declare function open(opt?: { cols?: number | undefined; rows?: number | undefined }): Terminal;
// Internal stuff that probably isn't very useful but is exported by pty.js
export declare module native {
/** Unix-only. */
export function fork(
file: string, args: string[], env: any, cwd: string, cols: number, rows: number,
uid?: number, gid?: number
): { fd: number; pid: number; pty: string };
/** Unix-only. */
export function open(
cols: number, rows: number
): { master: number; slave: number; pty: string };
/** Unix-only. */
export function process(fd: number, tty: string): string;
/** Windows-only. */
export function open(
dataPipe: string, cols: number, rows: number, debug: boolean
): { pid: number; pty: number; fd: number };
/** Windows-only. */
export function startProcess(
pid: number, file: string, cmdline: string, env: string[], cwd: string
): void;
/** Windows-only. */
export function kill(pid: number): void;
export function resize(fd: number, cols: number, rows: number): void;
}
|
Terminal
|
identifier_name
|
dictio.py
|
#!/usr/bin/python
#Covered by GPL V2.0
from encoders import *
from payloads import *
# generate_dictio evolution
class dictionary:
def __init__(self,dicc=None):
if dicc:
self.__payload=dicc.getpayload()
|
self.__payload=payload()
self.__encoder = [lambda x: encoder().encode(x)]
self.restart()
def count (self):
return self.__payload.count() * len(self.__encoder)
def setpayload(self,payl):
self.__payload = payl
self.restart()
def setencoder(self,encd):
self.__encoder=encd
self.generator = self.gen()
def getpayload (self):
return self.__payload
def getencoder (self):
return self.__encoder
def generate_all(self):
dicc=[]
for i in self.__payload:
dicc.append(self.__encoder.encode(i))
return dicc
def __iter__(self):
self.restart()
return self
def gen(self):
while 1:
pl=self.iter.next()
for encode in self.__encoder:
yield encode(pl)
def next(self):
return self.generator.next()
def restart(self):
self.iter=self.__payload.__iter__()
self.generator = self.gen()
|
self.__encoder=dicc.getencoder()
else:
|
random_line_split
|
dictio.py
|
#!/usr/bin/python
#Covered by GPL V2.0
from encoders import *
from payloads import *
# generate_dictio evolution
class dictionary:
def __init__(self,dicc=None):
if dicc:
self.__payload=dicc.getpayload()
self.__encoder=dicc.getencoder()
else:
self.__payload=payload()
self.__encoder = [lambda x: encoder().encode(x)]
self.restart()
def count (self):
return self.__payload.count() * len(self.__encoder)
def setpayload(self,payl):
self.__payload = payl
self.restart()
def setencoder(self,encd):
self.__encoder=encd
self.generator = self.gen()
def getpayload (self):
return self.__payload
def
|
(self):
return self.__encoder
def generate_all(self):
dicc=[]
for i in self.__payload:
dicc.append(self.__encoder.encode(i))
return dicc
def __iter__(self):
self.restart()
return self
def gen(self):
while 1:
pl=self.iter.next()
for encode in self.__encoder:
yield encode(pl)
def next(self):
return self.generator.next()
def restart(self):
self.iter=self.__payload.__iter__()
self.generator = self.gen()
|
getencoder
|
identifier_name
|
dictio.py
|
#!/usr/bin/python
#Covered by GPL V2.0
from encoders import *
from payloads import *
# generate_dictio evolution
class dictionary:
def __init__(self,dicc=None):
if dicc:
self.__payload=dicc.getpayload()
self.__encoder=dicc.getencoder()
else:
self.__payload=payload()
self.__encoder = [lambda x: encoder().encode(x)]
self.restart()
def count (self):
return self.__payload.count() * len(self.__encoder)
def setpayload(self,payl):
self.__payload = payl
self.restart()
def setencoder(self,encd):
self.__encoder=encd
self.generator = self.gen()
def getpayload (self):
|
def getencoder (self):
return self.__encoder
def generate_all(self):
dicc=[]
for i in self.__payload:
dicc.append(self.__encoder.encode(i))
return dicc
def __iter__(self):
self.restart()
return self
def gen(self):
while 1:
pl=self.iter.next()
for encode in self.__encoder:
yield encode(pl)
def next(self):
return self.generator.next()
def restart(self):
self.iter=self.__payload.__iter__()
self.generator = self.gen()
|
return self.__payload
|
identifier_body
|
dictio.py
|
#!/usr/bin/python
#Covered by GPL V2.0
from encoders import *
from payloads import *
# generate_dictio evolution
class dictionary:
def __init__(self,dicc=None):
if dicc:
self.__payload=dicc.getpayload()
self.__encoder=dicc.getencoder()
else:
self.__payload=payload()
self.__encoder = [lambda x: encoder().encode(x)]
self.restart()
def count (self):
return self.__payload.count() * len(self.__encoder)
def setpayload(self,payl):
self.__payload = payl
self.restart()
def setencoder(self,encd):
self.__encoder=encd
self.generator = self.gen()
def getpayload (self):
return self.__payload
def getencoder (self):
return self.__encoder
def generate_all(self):
dicc=[]
for i in self.__payload:
|
return dicc
def __iter__(self):
self.restart()
return self
def gen(self):
while 1:
pl=self.iter.next()
for encode in self.__encoder:
yield encode(pl)
def next(self):
return self.generator.next()
def restart(self):
self.iter=self.__payload.__iter__()
self.generator = self.gen()
|
dicc.append(self.__encoder.encode(i))
|
conditional_block
|
carousels.js
|
$(document).ready(function(){
$('#bx1').bxSlider();
$('#bx2').bxSlider({
hideControlOnEnd: true,
captions: true,
pager: false
})
$('#bx3').bxSlider({
hideControlOnEnd: true,
minSlides: 3,
maxSlides: 3,
slideWidth: 360,
slideMargin: 10,
pager: false,
nextSelector: '#bx-next',
prevSelector: '#bx-prev',
nextText: '>',
prevText: '<'
|
$('#bx4').bxSlider({
hideControlOnEnd: true,
minSlides: 4,
maxSlides: 4,
slideWidth: 360,
slideMargin: 10,
pager: false,
nextSelector: '#bx-next4',
prevSelector: '#bx-prev4',
nextText: '>',
prevText: '<',
})
$('#bx5').bxSlider({
minSlides: 2,
maxSlides: 3,
slideWidth: 360,
slideMargin: 10,
pager: false,
ticker: true,
speed: 12000,
tickerHover: true,
useCSS: false
})
});
|
})
|
random_line_split
|
test_nmount.rs
|
use crate::*;
use nix::{
errno::Errno,
mount::{MntFlags, Nmount, unmount}
};
use std::{
ffi::CString,
|
path::Path
};
use tempfile::tempdir;
#[test]
fn ok() {
require_mount!("nullfs");
let mountpoint = tempdir().unwrap();
let target = tempdir().unwrap();
let _sentry = File::create(target.path().join("sentry")).unwrap();
let fstype = CString::new("fstype").unwrap();
let nullfs = CString::new("nullfs").unwrap();
Nmount::new()
.str_opt(&fstype, &nullfs)
.str_opt_owned("fspath", mountpoint.path().to_str().unwrap())
.str_opt_owned("target", target.path().to_str().unwrap())
.nmount(MntFlags::empty()).unwrap();
// Now check that the sentry is visible through the mountpoint
let exists = Path::exists(&mountpoint.path().join("sentry"));
// Cleanup the mountpoint before asserting
unmount(mountpoint.path(), MntFlags::empty()).unwrap();
assert!(exists);
}
#[test]
fn bad_fstype() {
let mountpoint = tempdir().unwrap();
let target = tempdir().unwrap();
let _sentry = File::create(target.path().join("sentry")).unwrap();
let e = Nmount::new()
.str_opt_owned("fspath", mountpoint.path().to_str().unwrap())
.str_opt_owned("target", target.path().to_str().unwrap())
.nmount(MntFlags::empty()).unwrap_err();
assert_eq!(e.error(), Errno::EINVAL);
assert_eq!(e.errmsg(), Some("Invalid fstype"));
}
|
fs::File,
|
random_line_split
|
test_nmount.rs
|
use crate::*;
use nix::{
errno::Errno,
mount::{MntFlags, Nmount, unmount}
};
use std::{
ffi::CString,
fs::File,
path::Path
};
use tempfile::tempdir;
#[test]
fn
|
() {
require_mount!("nullfs");
let mountpoint = tempdir().unwrap();
let target = tempdir().unwrap();
let _sentry = File::create(target.path().join("sentry")).unwrap();
let fstype = CString::new("fstype").unwrap();
let nullfs = CString::new("nullfs").unwrap();
Nmount::new()
.str_opt(&fstype, &nullfs)
.str_opt_owned("fspath", mountpoint.path().to_str().unwrap())
.str_opt_owned("target", target.path().to_str().unwrap())
.nmount(MntFlags::empty()).unwrap();
// Now check that the sentry is visible through the mountpoint
let exists = Path::exists(&mountpoint.path().join("sentry"));
// Cleanup the mountpoint before asserting
unmount(mountpoint.path(), MntFlags::empty()).unwrap();
assert!(exists);
}
#[test]
fn bad_fstype() {
let mountpoint = tempdir().unwrap();
let target = tempdir().unwrap();
let _sentry = File::create(target.path().join("sentry")).unwrap();
let e = Nmount::new()
.str_opt_owned("fspath", mountpoint.path().to_str().unwrap())
.str_opt_owned("target", target.path().to_str().unwrap())
.nmount(MntFlags::empty()).unwrap_err();
assert_eq!(e.error(), Errno::EINVAL);
assert_eq!(e.errmsg(), Some("Invalid fstype"));
}
|
ok
|
identifier_name
|
test_nmount.rs
|
use crate::*;
use nix::{
errno::Errno,
mount::{MntFlags, Nmount, unmount}
};
use std::{
ffi::CString,
fs::File,
path::Path
};
use tempfile::tempdir;
#[test]
fn ok() {
require_mount!("nullfs");
let mountpoint = tempdir().unwrap();
let target = tempdir().unwrap();
let _sentry = File::create(target.path().join("sentry")).unwrap();
let fstype = CString::new("fstype").unwrap();
let nullfs = CString::new("nullfs").unwrap();
Nmount::new()
.str_opt(&fstype, &nullfs)
.str_opt_owned("fspath", mountpoint.path().to_str().unwrap())
.str_opt_owned("target", target.path().to_str().unwrap())
.nmount(MntFlags::empty()).unwrap();
// Now check that the sentry is visible through the mountpoint
let exists = Path::exists(&mountpoint.path().join("sentry"));
// Cleanup the mountpoint before asserting
unmount(mountpoint.path(), MntFlags::empty()).unwrap();
assert!(exists);
}
#[test]
fn bad_fstype()
|
{
let mountpoint = tempdir().unwrap();
let target = tempdir().unwrap();
let _sentry = File::create(target.path().join("sentry")).unwrap();
let e = Nmount::new()
.str_opt_owned("fspath", mountpoint.path().to_str().unwrap())
.str_opt_owned("target", target.path().to_str().unwrap())
.nmount(MntFlags::empty()).unwrap_err();
assert_eq!(e.error(), Errno::EINVAL);
assert_eq!(e.errmsg(), Some("Invalid fstype"));
}
|
identifier_body
|
|
prepareProject.ts
|
import { addTypenameIfAbsent } from "./addTypenameToSelectionSet";
import fs from "fs"
import {parse, visit, print, OperationDefinitionNode, FragmentDefinitionNode, FragmentSpreadNode, DocumentNode} from "graphql"
import { removeClientFields } from "./removeClientFields";
/**
* Take a whole bunch of GraphQL in one big string
* and validate it, especially:
*
* - operation names are unique
* - fragment names are unique
*
* Then, split each operation into a free-standing document,
* so it has all the fragments it needs.
*/
function prepareProject(filenames: string[], addTypename: boolean) {
if(!filenames.length) { return []; }
var allGraphQL = ""
filenames.forEach(function(filename) {
allGraphQL += fs.readFileSync(filename)
})
var ast = parse(allGraphQL)
// This will contain { name: [name, name] } pairs
var definitionDependencyNames: {[key: string] : string[] } = {}
var allOperationNames: string[] = []
var currentDependencyNames = null
// When entering a fragment or operation,
// start recording its dependencies
var enterDefinition = function(node: FragmentDefinitionNode | OperationDefinitionNode) {
// Technically, it could be an anonymous definition
if (node.name) {
var definitionName = node.name.value
if (definitionDependencyNames[definitionName]) {
throw new Error("Found duplicate definition name: " + definitionName + ", fragment & operation names must be unique to sync")
} else {
currentDependencyNames = definitionDependencyNames[definitionName] = []
}
}
}
var visitor = {
OperationDefinition: {
enter: function(node: OperationDefinitionNode) {
enterDefinition(node)
node.name && allOperationNames.push(node.name.value)
},
},
FragmentDefinition: {
enter: enterDefinition,
},
// When entering a fragment spread, register it as a
// dependency of its context
FragmentSpread: {
enter: function(node: FragmentSpreadNode) {
currentDependencyNames.push(node.name.value)
}
},
Field: {
leave: addTypename ? addTypenameIfAbsent : () => {}
},
InlineFragment: {
leave: addTypename ? addTypenameIfAbsent : () => {}
}
}
// Find the dependencies, build the accumulator
ast = visit(ast, visitor)
ast = removeClientFields(ast)
// For each operation, build a separate document of that operation and its deps
// then print the new document to a string
var operations = allOperationNames.map(function(operationName) {
var visitedDepNames: string[] = []
var depNamesToVisit = [operationName]
var depName
while (depNamesToVisit.length > 0) {
depName = depNamesToVisit.shift()
if (depName) {
visitedDepNames.push(depName)
definitionDependencyNames[depName].forEach(function(nextDepName) {
if (visitedDepNames.indexOf(nextDepName) === -1) {
depNamesToVisit.push(nextDepName)
}
})
}
}
var newAST = extractDefinitions(ast, visitedDepNames)
return {
name: operationName,
body: print(newAST),
alias: "", // will be filled in later, when hashFunc is available
}
})
return operations
}
// Return a new AST which contains only `definitionNames`
function extractDefinitions(ast: DocumentNode, definitionNames: string[])
|
export default prepareProject
|
{
var removeDefinitionNode = function(node: FragmentDefinitionNode | OperationDefinitionNode) {
if (node.name && definitionNames.indexOf(node.name.value) === -1) {
return null
} else {
return undefined
}
}
var visitor = {
OperationDefinition: removeDefinitionNode,
FragmentDefinition: removeDefinitionNode,
}
var newAST = visit(ast, visitor)
return newAST
}
|
identifier_body
|
prepareProject.ts
|
import { addTypenameIfAbsent } from "./addTypenameToSelectionSet";
import fs from "fs"
import {parse, visit, print, OperationDefinitionNode, FragmentDefinitionNode, FragmentSpreadNode, DocumentNode} from "graphql"
import { removeClientFields } from "./removeClientFields";
/**
* Take a whole bunch of GraphQL in one big string
* and validate it, especially:
*
* - operation names are unique
* - fragment names are unique
*
* Then, split each operation into a free-standing document,
* so it has all the fragments it needs.
*/
function prepareProject(filenames: string[], addTypename: boolean) {
if(!filenames.length) { return []; }
var allGraphQL = ""
filenames.forEach(function(filename) {
allGraphQL += fs.readFileSync(filename)
})
var ast = parse(allGraphQL)
// This will contain { name: [name, name] } pairs
var definitionDependencyNames: {[key: string] : string[] } = {}
var allOperationNames: string[] = []
var currentDependencyNames = null
// When entering a fragment or operation,
// start recording its dependencies
var enterDefinition = function(node: FragmentDefinitionNode | OperationDefinitionNode) {
// Technically, it could be an anonymous definition
if (node.name)
|
}
var visitor = {
OperationDefinition: {
enter: function(node: OperationDefinitionNode) {
enterDefinition(node)
node.name && allOperationNames.push(node.name.value)
},
},
FragmentDefinition: {
enter: enterDefinition,
},
// When entering a fragment spread, register it as a
// dependency of its context
FragmentSpread: {
enter: function(node: FragmentSpreadNode) {
currentDependencyNames.push(node.name.value)
}
},
Field: {
leave: addTypename ? addTypenameIfAbsent : () => {}
},
InlineFragment: {
leave: addTypename ? addTypenameIfAbsent : () => {}
}
}
// Find the dependencies, build the accumulator
ast = visit(ast, visitor)
ast = removeClientFields(ast)
// For each operation, build a separate document of that operation and its deps
// then print the new document to a string
var operations = allOperationNames.map(function(operationName) {
var visitedDepNames: string[] = []
var depNamesToVisit = [operationName]
var depName
while (depNamesToVisit.length > 0) {
depName = depNamesToVisit.shift()
if (depName) {
visitedDepNames.push(depName)
definitionDependencyNames[depName].forEach(function(nextDepName) {
if (visitedDepNames.indexOf(nextDepName) === -1) {
depNamesToVisit.push(nextDepName)
}
})
}
}
var newAST = extractDefinitions(ast, visitedDepNames)
return {
name: operationName,
body: print(newAST),
alias: "", // will be filled in later, when hashFunc is available
}
})
return operations
}
// Return a new AST which contains only `definitionNames`
function extractDefinitions(ast: DocumentNode, definitionNames: string[]) {
var removeDefinitionNode = function(node: FragmentDefinitionNode | OperationDefinitionNode) {
if (node.name && definitionNames.indexOf(node.name.value) === -1) {
return null
} else {
return undefined
}
}
var visitor = {
OperationDefinition: removeDefinitionNode,
FragmentDefinition: removeDefinitionNode,
}
var newAST = visit(ast, visitor)
return newAST
}
export default prepareProject
|
{
var definitionName = node.name.value
if (definitionDependencyNames[definitionName]) {
throw new Error("Found duplicate definition name: " + definitionName + ", fragment & operation names must be unique to sync")
} else {
currentDependencyNames = definitionDependencyNames[definitionName] = []
}
}
|
conditional_block
|
prepareProject.ts
|
import { addTypenameIfAbsent } from "./addTypenameToSelectionSet";
import fs from "fs"
import {parse, visit, print, OperationDefinitionNode, FragmentDefinitionNode, FragmentSpreadNode, DocumentNode} from "graphql"
import { removeClientFields } from "./removeClientFields";
/**
* Take a whole bunch of GraphQL in one big string
* and validate it, especially:
*
* - operation names are unique
* - fragment names are unique
*
* Then, split each operation into a free-standing document,
* so it has all the fragments it needs.
*/
function
|
(filenames: string[], addTypename: boolean) {
if(!filenames.length) { return []; }
var allGraphQL = ""
filenames.forEach(function(filename) {
allGraphQL += fs.readFileSync(filename)
})
var ast = parse(allGraphQL)
// This will contain { name: [name, name] } pairs
var definitionDependencyNames: {[key: string] : string[] } = {}
var allOperationNames: string[] = []
var currentDependencyNames = null
// When entering a fragment or operation,
// start recording its dependencies
var enterDefinition = function(node: FragmentDefinitionNode | OperationDefinitionNode) {
// Technically, it could be an anonymous definition
if (node.name) {
var definitionName = node.name.value
if (definitionDependencyNames[definitionName]) {
throw new Error("Found duplicate definition name: " + definitionName + ", fragment & operation names must be unique to sync")
} else {
currentDependencyNames = definitionDependencyNames[definitionName] = []
}
}
}
var visitor = {
OperationDefinition: {
enter: function(node: OperationDefinitionNode) {
enterDefinition(node)
node.name && allOperationNames.push(node.name.value)
},
},
FragmentDefinition: {
enter: enterDefinition,
},
// When entering a fragment spread, register it as a
// dependency of its context
FragmentSpread: {
enter: function(node: FragmentSpreadNode) {
currentDependencyNames.push(node.name.value)
}
},
Field: {
leave: addTypename ? addTypenameIfAbsent : () => {}
},
InlineFragment: {
leave: addTypename ? addTypenameIfAbsent : () => {}
}
}
// Find the dependencies, build the accumulator
ast = visit(ast, visitor)
ast = removeClientFields(ast)
// For each operation, build a separate document of that operation and its deps
// then print the new document to a string
var operations = allOperationNames.map(function(operationName) {
var visitedDepNames: string[] = []
var depNamesToVisit = [operationName]
var depName
while (depNamesToVisit.length > 0) {
depName = depNamesToVisit.shift()
if (depName) {
visitedDepNames.push(depName)
definitionDependencyNames[depName].forEach(function(nextDepName) {
if (visitedDepNames.indexOf(nextDepName) === -1) {
depNamesToVisit.push(nextDepName)
}
})
}
}
var newAST = extractDefinitions(ast, visitedDepNames)
return {
name: operationName,
body: print(newAST),
alias: "", // will be filled in later, when hashFunc is available
}
})
return operations
}
// Return a new AST which contains only `definitionNames`
function extractDefinitions(ast: DocumentNode, definitionNames: string[]) {
var removeDefinitionNode = function(node: FragmentDefinitionNode | OperationDefinitionNode) {
if (node.name && definitionNames.indexOf(node.name.value) === -1) {
return null
} else {
return undefined
}
}
var visitor = {
OperationDefinition: removeDefinitionNode,
FragmentDefinition: removeDefinitionNode,
}
var newAST = visit(ast, visitor)
return newAST
}
export default prepareProject
|
prepareProject
|
identifier_name
|
prepareProject.ts
|
import { addTypenameIfAbsent } from "./addTypenameToSelectionSet";
import fs from "fs"
import {parse, visit, print, OperationDefinitionNode, FragmentDefinitionNode, FragmentSpreadNode, DocumentNode} from "graphql"
import { removeClientFields } from "./removeClientFields";
/**
* Take a whole bunch of GraphQL in one big string
* and validate it, especially:
*
* - operation names are unique
* - fragment names are unique
*
* Then, split each operation into a free-standing document,
* so it has all the fragments it needs.
*/
function prepareProject(filenames: string[], addTypename: boolean) {
if(!filenames.length) { return []; }
var allGraphQL = ""
filenames.forEach(function(filename) {
allGraphQL += fs.readFileSync(filename)
})
var ast = parse(allGraphQL)
// This will contain { name: [name, name] } pairs
var definitionDependencyNames: {[key: string] : string[] } = {}
var allOperationNames: string[] = []
var currentDependencyNames = null
// When entering a fragment or operation,
// start recording its dependencies
var enterDefinition = function(node: FragmentDefinitionNode | OperationDefinitionNode) {
// Technically, it could be an anonymous definition
if (node.name) {
var definitionName = node.name.value
if (definitionDependencyNames[definitionName]) {
throw new Error("Found duplicate definition name: " + definitionName + ", fragment & operation names must be unique to sync")
} else {
currentDependencyNames = definitionDependencyNames[definitionName] = []
}
}
}
var visitor = {
OperationDefinition: {
enter: function(node: OperationDefinitionNode) {
enterDefinition(node)
node.name && allOperationNames.push(node.name.value)
},
},
FragmentDefinition: {
enter: enterDefinition,
},
// When entering a fragment spread, register it as a
// dependency of its context
FragmentSpread: {
enter: function(node: FragmentSpreadNode) {
currentDependencyNames.push(node.name.value)
}
},
Field: {
leave: addTypename ? addTypenameIfAbsent : () => {}
},
InlineFragment: {
leave: addTypename ? addTypenameIfAbsent : () => {}
}
}
// Find the dependencies, build the accumulator
ast = visit(ast, visitor)
ast = removeClientFields(ast)
// For each operation, build a separate document of that operation and its deps
// then print the new document to a string
var operations = allOperationNames.map(function(operationName) {
var visitedDepNames: string[] = []
var depNamesToVisit = [operationName]
var depName
while (depNamesToVisit.length > 0) {
depName = depNamesToVisit.shift()
if (depName) {
visitedDepNames.push(depName)
definitionDependencyNames[depName].forEach(function(nextDepName) {
if (visitedDepNames.indexOf(nextDepName) === -1) {
depNamesToVisit.push(nextDepName)
}
})
}
}
var newAST = extractDefinitions(ast, visitedDepNames)
return {
name: operationName,
body: print(newAST),
alias: "", // will be filled in later, when hashFunc is available
}
})
return operations
}
// Return a new AST which contains only `definitionNames`
function extractDefinitions(ast: DocumentNode, definitionNames: string[]) {
var removeDefinitionNode = function(node: FragmentDefinitionNode | OperationDefinitionNode) {
if (node.name && definitionNames.indexOf(node.name.value) === -1) {
return null
} else {
return undefined
}
}
var visitor = {
OperationDefinition: removeDefinitionNode,
FragmentDefinition: removeDefinitionNode,
}
var newAST = visit(ast, visitor)
return newAST
}
|
export default prepareProject
|
random_line_split
|
|
x86_64.rs
|
#![allow(unused_imports)]
use core::intrinsics;
// NOTE These functions are implemented using assembly because they using a custom
// calling convention which can't be implemented using a normal Rust function
|
// NOTE These functions are never mangled as they are not tested against compiler-rt
// and mangling ___chkstk would break the `jmp ___chkstk` instruction in __alloca
#[cfg(all(windows, target_env = "gnu", not(feature = "mangled-names")))]
#[naked]
#[no_mangle]
pub unsafe fn ___chkstk_ms() {
asm!("
push %rcx
push %rax
cmp $$0x1000,%rax
lea 24(%rsp),%rcx
jb 1f
2:
sub $$0x1000,%rcx
test %rcx,(%rcx)
sub $$0x1000,%rax
cmp $$0x1000,%rax
ja 2b
1:
sub %rax,%rcx
test %rcx,(%rcx)
pop %rax
pop %rcx
ret" ::: "memory" : "volatile");
intrinsics::unreachable();
}
#[cfg(all(windows, target_env = "gnu", not(feature = "mangled-names")))]
#[naked]
#[no_mangle]
pub unsafe fn __alloca() {
asm!("mov %rcx,%rax // x64 _alloca is a normal function with parameter in rcx
jmp ___chkstk // Jump to ___chkstk since fallthrough may be unreliable"
::: "memory" : "volatile");
intrinsics::unreachable();
}
#[cfg(all(windows, target_env = "gnu", not(feature = "mangled-names")))]
#[naked]
#[no_mangle]
pub unsafe fn ___chkstk() {
asm!(
"
push %rcx
cmp $$0x1000,%rax
lea 16(%rsp),%rcx // rsp before calling this routine -> rcx
jb 1f
2:
sub $$0x1000,%rcx
test %rcx,(%rcx)
sub $$0x1000,%rax
cmp $$0x1000,%rax
ja 2b
1:
sub %rax,%rcx
test %rcx,(%rcx)
lea 8(%rsp),%rax // load pointer to the return address into rax
mov %rcx,%rsp // install the new top of stack pointer into rsp
mov -8(%rax),%rcx // restore rcx
push (%rax) // push return address onto the stack
sub %rsp,%rax // restore the original value in rax
ret"
::: "memory" : "volatile"
);
intrinsics::unreachable();
}
// HACK(https://github.com/rust-lang/rust/issues/62785): x86_64-unknown-uefi needs special LLVM
// support unless we emit the _fltused
#[no_mangle]
#[used]
#[cfg(target_os = "uefi")]
static _fltused: i32 = 0;
|
random_line_split
|
|
x86_64.rs
|
#![allow(unused_imports)]
use core::intrinsics;
// NOTE These functions are implemented using assembly because they using a custom
// calling convention which can't be implemented using a normal Rust function
// NOTE These functions are never mangled as they are not tested against compiler-rt
// and mangling ___chkstk would break the `jmp ___chkstk` instruction in __alloca
#[cfg(all(windows, target_env = "gnu", not(feature = "mangled-names")))]
#[naked]
#[no_mangle]
pub unsafe fn ___chkstk_ms()
|
#[cfg(all(windows, target_env = "gnu", not(feature = "mangled-names")))]
#[naked]
#[no_mangle]
pub unsafe fn __alloca() {
asm!("mov %rcx,%rax // x64 _alloca is a normal function with parameter in rcx
jmp ___chkstk // Jump to ___chkstk since fallthrough may be unreliable"
::: "memory" : "volatile");
intrinsics::unreachable();
}
#[cfg(all(windows, target_env = "gnu", not(feature = "mangled-names")))]
#[naked]
#[no_mangle]
pub unsafe fn ___chkstk() {
asm!(
"
push %rcx
cmp $$0x1000,%rax
lea 16(%rsp),%rcx // rsp before calling this routine -> rcx
jb 1f
2:
sub $$0x1000,%rcx
test %rcx,(%rcx)
sub $$0x1000,%rax
cmp $$0x1000,%rax
ja 2b
1:
sub %rax,%rcx
test %rcx,(%rcx)
lea 8(%rsp),%rax // load pointer to the return address into rax
mov %rcx,%rsp // install the new top of stack pointer into rsp
mov -8(%rax),%rcx // restore rcx
push (%rax) // push return address onto the stack
sub %rsp,%rax // restore the original value in rax
ret"
::: "memory" : "volatile"
);
intrinsics::unreachable();
}
// HACK(https://github.com/rust-lang/rust/issues/62785): x86_64-unknown-uefi needs special LLVM
// support unless we emit the _fltused
#[no_mangle]
#[used]
#[cfg(target_os = "uefi")]
static _fltused: i32 = 0;
|
{
asm!("
push %rcx
push %rax
cmp $$0x1000,%rax
lea 24(%rsp),%rcx
jb 1f
2:
sub $$0x1000,%rcx
test %rcx,(%rcx)
sub $$0x1000,%rax
cmp $$0x1000,%rax
ja 2b
1:
sub %rax,%rcx
test %rcx,(%rcx)
pop %rax
pop %rcx
ret" ::: "memory" : "volatile");
intrinsics::unreachable();
}
|
identifier_body
|
x86_64.rs
|
#![allow(unused_imports)]
use core::intrinsics;
// NOTE These functions are implemented using assembly because they using a custom
// calling convention which can't be implemented using a normal Rust function
// NOTE These functions are never mangled as they are not tested against compiler-rt
// and mangling ___chkstk would break the `jmp ___chkstk` instruction in __alloca
#[cfg(all(windows, target_env = "gnu", not(feature = "mangled-names")))]
#[naked]
#[no_mangle]
pub unsafe fn ___chkstk_ms() {
asm!("
push %rcx
push %rax
cmp $$0x1000,%rax
lea 24(%rsp),%rcx
jb 1f
2:
sub $$0x1000,%rcx
test %rcx,(%rcx)
sub $$0x1000,%rax
cmp $$0x1000,%rax
ja 2b
1:
sub %rax,%rcx
test %rcx,(%rcx)
pop %rax
pop %rcx
ret" ::: "memory" : "volatile");
intrinsics::unreachable();
}
#[cfg(all(windows, target_env = "gnu", not(feature = "mangled-names")))]
#[naked]
#[no_mangle]
pub unsafe fn
|
() {
asm!("mov %rcx,%rax // x64 _alloca is a normal function with parameter in rcx
jmp ___chkstk // Jump to ___chkstk since fallthrough may be unreliable"
::: "memory" : "volatile");
intrinsics::unreachable();
}
#[cfg(all(windows, target_env = "gnu", not(feature = "mangled-names")))]
#[naked]
#[no_mangle]
pub unsafe fn ___chkstk() {
asm!(
"
push %rcx
cmp $$0x1000,%rax
lea 16(%rsp),%rcx // rsp before calling this routine -> rcx
jb 1f
2:
sub $$0x1000,%rcx
test %rcx,(%rcx)
sub $$0x1000,%rax
cmp $$0x1000,%rax
ja 2b
1:
sub %rax,%rcx
test %rcx,(%rcx)
lea 8(%rsp),%rax // load pointer to the return address into rax
mov %rcx,%rsp // install the new top of stack pointer into rsp
mov -8(%rax),%rcx // restore rcx
push (%rax) // push return address onto the stack
sub %rsp,%rax // restore the original value in rax
ret"
::: "memory" : "volatile"
);
intrinsics::unreachable();
}
// HACK(https://github.com/rust-lang/rust/issues/62785): x86_64-unknown-uefi needs special LLVM
// support unless we emit the _fltused
#[no_mangle]
#[used]
#[cfg(target_os = "uefi")]
static _fltused: i32 = 0;
|
__alloca
|
identifier_name
|
tile.js
|
var Tile = function (type, x, y) {
this.type = type;
this.tint = 0;
this.hover = false;
this.isAllowed = undefined;
this.isAllowedForBeat = undefined;
this.x = x;
this.y = y;
this.graphic = new fabric.Rect({
left: Tile.size * x,
top: Tile.size * y,
fill: type === Tile.TileType.NONPLAYABLE ? Tile.ColorNonplayable : Tile.ColorPlayable,
width: Tile.size,
height: Tile.size,
selectable: false,
obj: this
});
canvas.add(this.graphic);
};
Tile.size = undefined;
Tile.ColorPlayable = "#717070";
Tile.ColorNonplayable = "#d9d9d9";
Tile.ColorAllowed = "#38b321";
Tile.ColorMovingToNow = "#b8c153";
Tile.ColorAllowedForBeat = "#cf3a3a";
Tile.TileType = {
PLAYABLE: 0,
NONPLAYABLE: 1
};
Tile.prototype.setMan = function(man) {
this.man = man;
};
Tile.prototype.clearMan = function() {
this.man = undefined; //TODO null? +RETHINK
};
Tile.prototype.setAsAllowed = function() {
var graphic = this.graphic;
fabric.util.animateColor(this.graphic.fill, Tile.ColorAllowed, colorAnimationTime, {
onChange: function(val) {
graphic.setFill(val);
canvas.renderAll();
}
});
this.isAllowed = true;
};
Tile.prototype.setAsAllowedForBeat = function() {
var graphic = this.graphic;
|
}
});
this.isAllowedForBeat = true;
};
Tile.prototype.clearHighlights = function() {
var graphic = this.graphic;
fabric.util.animateColor(this.graphic.fill, Tile.ColorPlayable, colorAnimationTime, {
onChange: function(val) {
graphic.setFill(val);
canvas.renderAll();
}
});
this.isAllowed = false;
this.isAllowedForBeat = false;
};
Tile.prototype.setAsMovingToNow = function() {
var graphic = this.graphic;
fabric.util.animateColor(this.graphic.fill, Tile.ColorMovingToNow, colorAnimationTime, {
onChange: function(val) {
graphic.setFill(val);
canvas.renderAll();
}
});
};
Tile.prototype.isAllowedForMove = function() {
return this.isAllowed || this.isAllowedForBeat;
};
Tile.prototype.onMouseOver = function() {
if (this.isAllowedForMove()) {
var graphic = this.graphic;
fabric.util.animateColor(graphic.fill, //TODO: colorAllowed/this.fill +REFACTOR
Color(graphic.fill).lightenByRatio(0.2).toString(), hoverAnimationTime, {
onChange: function(val) {
graphic.setFill(val);
canvas.renderAll();
}
});
}
};
Tile.prototype.onMouseOut = function() {
if (this.isAllowedForMove()) {
var graphic = this.graphic;
fabric.util.animateColor(graphic.fill, Color(graphic.fill).darkenByRatio(0.1666).toString(),
hoverAnimationTime, {
onChange: function(val) {
graphic.setFill(val);
canvas.renderAll();
}
});
}
};
Tile.prototype.onMouseDown = function() {
if (this.isAllowedForMove()) {
//move men to selected (this) tile
board.sendMove(board.selectedMan.tile, this);
board.moveSelectedManTo(this);
} else {
//or unselect man, if clicked on empty tile
board.unselect();
}
};
|
fabric.util.animateColor(this.graphic.fill, Tile.ColorAllowedForBeat, colorAnimationTime, {
onChange: function(val) {
graphic.setFill(val);
canvas.renderAll();
|
random_line_split
|
tile.js
|
var Tile = function (type, x, y) {
this.type = type;
this.tint = 0;
this.hover = false;
this.isAllowed = undefined;
this.isAllowedForBeat = undefined;
this.x = x;
this.y = y;
this.graphic = new fabric.Rect({
left: Tile.size * x,
top: Tile.size * y,
fill: type === Tile.TileType.NONPLAYABLE ? Tile.ColorNonplayable : Tile.ColorPlayable,
width: Tile.size,
height: Tile.size,
selectable: false,
obj: this
});
canvas.add(this.graphic);
};
Tile.size = undefined;
Tile.ColorPlayable = "#717070";
Tile.ColorNonplayable = "#d9d9d9";
Tile.ColorAllowed = "#38b321";
Tile.ColorMovingToNow = "#b8c153";
Tile.ColorAllowedForBeat = "#cf3a3a";
Tile.TileType = {
PLAYABLE: 0,
NONPLAYABLE: 1
};
Tile.prototype.setMan = function(man) {
this.man = man;
};
Tile.prototype.clearMan = function() {
this.man = undefined; //TODO null? +RETHINK
};
Tile.prototype.setAsAllowed = function() {
var graphic = this.graphic;
fabric.util.animateColor(this.graphic.fill, Tile.ColorAllowed, colorAnimationTime, {
onChange: function(val) {
graphic.setFill(val);
canvas.renderAll();
}
});
this.isAllowed = true;
};
Tile.prototype.setAsAllowedForBeat = function() {
var graphic = this.graphic;
fabric.util.animateColor(this.graphic.fill, Tile.ColorAllowedForBeat, colorAnimationTime, {
onChange: function(val) {
graphic.setFill(val);
canvas.renderAll();
}
});
this.isAllowedForBeat = true;
};
Tile.prototype.clearHighlights = function() {
var graphic = this.graphic;
fabric.util.animateColor(this.graphic.fill, Tile.ColorPlayable, colorAnimationTime, {
onChange: function(val) {
graphic.setFill(val);
canvas.renderAll();
}
});
this.isAllowed = false;
this.isAllowedForBeat = false;
};
Tile.prototype.setAsMovingToNow = function() {
var graphic = this.graphic;
fabric.util.animateColor(this.graphic.fill, Tile.ColorMovingToNow, colorAnimationTime, {
onChange: function(val) {
graphic.setFill(val);
canvas.renderAll();
}
});
};
Tile.prototype.isAllowedForMove = function() {
return this.isAllowed || this.isAllowedForBeat;
};
Tile.prototype.onMouseOver = function() {
if (this.isAllowedForMove()) {
var graphic = this.graphic;
fabric.util.animateColor(graphic.fill, //TODO: colorAllowed/this.fill +REFACTOR
Color(graphic.fill).lightenByRatio(0.2).toString(), hoverAnimationTime, {
onChange: function(val) {
graphic.setFill(val);
canvas.renderAll();
}
});
}
};
Tile.prototype.onMouseOut = function() {
if (this.isAllowedForMove()) {
var graphic = this.graphic;
fabric.util.animateColor(graphic.fill, Color(graphic.fill).darkenByRatio(0.1666).toString(),
hoverAnimationTime, {
onChange: function(val) {
graphic.setFill(val);
canvas.renderAll();
}
});
}
};
Tile.prototype.onMouseDown = function() {
if (this.isAllowedForMove()) {
//move men to selected (this) tile
board.sendMove(board.selectedMan.tile, this);
board.moveSelectedManTo(this);
} else
|
};
|
{
//or unselect man, if clicked on empty tile
board.unselect();
}
|
conditional_block
|
test_api.py
|
from __future__ import absolute_import
import unittest
import deviantart
from .helpers import mock_response, optional
from .api_credentials import CLIENT_ID, CLIENT_SECRET
class ApiTest(unittest.TestCase):
@optional(CLIENT_ID == "", mock_response('token'))
def
|
(self):
self.da = deviantart.Api(CLIENT_ID, CLIENT_SECRET)
@optional(CLIENT_ID == "", mock_response('user_profile_devart'))
def test_get_user(self):
user = self.da.get_user("devart")
self.assertEqual("devart", user.username)
self.assertEqual("devart", repr(user))
@optional(CLIENT_ID == "", mock_response('deviation'))
def test_get_deviation(self):
deviation = self.da.get_deviation("234546F5-C9D1-A9B1-D823-47C4E3D2DB95")
self.assertEqual("234546F5-C9D1-A9B1-D823-47C4E3D2DB95", deviation.deviationid)
self.assertEqual("234546F5-C9D1-A9B1-D823-47C4E3D2DB95", repr(deviation))
@optional(CLIENT_ID == "", mock_response('comments_siblings'))
def test_get_comment(self):
comments = self.da.get_comments("siblings", commentid="E99B1CEB-933F-B54D-ABC2-88FD0F66D421")
comment = comments['thread'][0]
self.assertEqual("E99B1CEB-933F-B54D-ABC2-88FD0F66D421", comment.commentid)
self.assertEqual("E99B1CEB-933F-B54D-ABC2-88FD0F66D421", repr(comment))
|
setUp
|
identifier_name
|
test_api.py
|
from __future__ import absolute_import
import unittest
import deviantart
from .helpers import mock_response, optional
from .api_credentials import CLIENT_ID, CLIENT_SECRET
class ApiTest(unittest.TestCase):
@optional(CLIENT_ID == "", mock_response('token'))
def setUp(self):
|
@optional(CLIENT_ID == "", mock_response('user_profile_devart'))
def test_get_user(self):
user = self.da.get_user("devart")
self.assertEqual("devart", user.username)
self.assertEqual("devart", repr(user))
@optional(CLIENT_ID == "", mock_response('deviation'))
def test_get_deviation(self):
deviation = self.da.get_deviation("234546F5-C9D1-A9B1-D823-47C4E3D2DB95")
self.assertEqual("234546F5-C9D1-A9B1-D823-47C4E3D2DB95", deviation.deviationid)
self.assertEqual("234546F5-C9D1-A9B1-D823-47C4E3D2DB95", repr(deviation))
@optional(CLIENT_ID == "", mock_response('comments_siblings'))
def test_get_comment(self):
comments = self.da.get_comments("siblings", commentid="E99B1CEB-933F-B54D-ABC2-88FD0F66D421")
comment = comments['thread'][0]
self.assertEqual("E99B1CEB-933F-B54D-ABC2-88FD0F66D421", comment.commentid)
self.assertEqual("E99B1CEB-933F-B54D-ABC2-88FD0F66D421", repr(comment))
|
self.da = deviantart.Api(CLIENT_ID, CLIENT_SECRET)
|
identifier_body
|
test_api.py
|
from __future__ import absolute_import
import unittest
import deviantart
from .helpers import mock_response, optional
from .api_credentials import CLIENT_ID, CLIENT_SECRET
class ApiTest(unittest.TestCase):
@optional(CLIENT_ID == "", mock_response('token'))
def setUp(self):
self.da = deviantart.Api(CLIENT_ID, CLIENT_SECRET)
@optional(CLIENT_ID == "", mock_response('user_profile_devart'))
def test_get_user(self):
user = self.da.get_user("devart")
self.assertEqual("devart", user.username)
|
self.assertEqual("234546F5-C9D1-A9B1-D823-47C4E3D2DB95", deviation.deviationid)
self.assertEqual("234546F5-C9D1-A9B1-D823-47C4E3D2DB95", repr(deviation))
@optional(CLIENT_ID == "", mock_response('comments_siblings'))
def test_get_comment(self):
comments = self.da.get_comments("siblings", commentid="E99B1CEB-933F-B54D-ABC2-88FD0F66D421")
comment = comments['thread'][0]
self.assertEqual("E99B1CEB-933F-B54D-ABC2-88FD0F66D421", comment.commentid)
self.assertEqual("E99B1CEB-933F-B54D-ABC2-88FD0F66D421", repr(comment))
|
self.assertEqual("devart", repr(user))
@optional(CLIENT_ID == "", mock_response('deviation'))
def test_get_deviation(self):
deviation = self.da.get_deviation("234546F5-C9D1-A9B1-D823-47C4E3D2DB95")
|
random_line_split
|
commands.ts
|
// ***********************************************
// This example commands.js shows you how to
// create various custom commands and overwrite
// existing commands.
//
// For more comprehensive examples of custom
// commands please read more here:
// https://on.cypress.io/custom-commands
// ***********************************************
//
//
// -- This is a parent command --
// Cypress.Commands.add("login", (email, password) => { ... })
//
//
// -- This is a child command --
// Cypress.Commands.add("drag", { prevSubject: 'element'}, (subject, options) => { ... })
//
//
// -- This is a dual command --
// Cypress.Commands.add("dismiss", { prevSubject: 'optional'}, (subject, options) => { ... })
//
//
// -- This will overwrite an existing command --
// Cypress.Commands.overwrite("visit", (originalFn, url, options) => { ... })
Cypress.Commands.add('getByClass', (selector, ...args) => {
return cy.get(`.w-e-${selector}`, ...args)
})
Cypress.Commands.add('getEditor', () => {
return cy.window().its('editor')
})
Cypress.Commands.add('saveRange', (el?: HTMLElement) => {
cy.getByClass('text-container').children().first().as('Editable')
return cy.window().then(win => {
const range = win.document.createRange()
if (el != null)
|
else {
return cy
.get('@Editable')
.children()
.then($el => {
const el = $el.get(0)
range.setStart(el, 0)
range.setEnd(el, 0)
return cy.getEditor().then(editor => {
editor.selection.saveRange(range)
return editor
})
})
}
})
})
|
{
range.setStart(el, 0)
range.setEnd(el, 0)
return cy.getEditor().then(editor => {
editor.selection.saveRange(range)
return editor
})
}
|
conditional_block
|
commands.ts
|
// ***********************************************
// This example commands.js shows you how to
// create various custom commands and overwrite
// existing commands.
//
// For more comprehensive examples of custom
// commands please read more here:
// https://on.cypress.io/custom-commands
// ***********************************************
//
//
// -- This is a parent command --
// Cypress.Commands.add("login", (email, password) => { ... })
//
//
// -- This is a child command --
// Cypress.Commands.add("drag", { prevSubject: 'element'}, (subject, options) => { ... })
//
//
|
// Cypress.Commands.overwrite("visit", (originalFn, url, options) => { ... })
Cypress.Commands.add('getByClass', (selector, ...args) => {
return cy.get(`.w-e-${selector}`, ...args)
})
Cypress.Commands.add('getEditor', () => {
return cy.window().its('editor')
})
Cypress.Commands.add('saveRange', (el?: HTMLElement) => {
cy.getByClass('text-container').children().first().as('Editable')
return cy.window().then(win => {
const range = win.document.createRange()
if (el != null) {
range.setStart(el, 0)
range.setEnd(el, 0)
return cy.getEditor().then(editor => {
editor.selection.saveRange(range)
return editor
})
} else {
return cy
.get('@Editable')
.children()
.then($el => {
const el = $el.get(0)
range.setStart(el, 0)
range.setEnd(el, 0)
return cy.getEditor().then(editor => {
editor.selection.saveRange(range)
return editor
})
})
}
})
})
|
// -- This is a dual command --
// Cypress.Commands.add("dismiss", { prevSubject: 'optional'}, (subject, options) => { ... })
//
//
// -- This will overwrite an existing command --
|
random_line_split
|
feed_parse_extractSecretchateauWordpressCom.py
|
def extractSecretchateauWordpressCom(item):
'''
Parser for 'secretchateau.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
titlemap = [
('GRMHCD ', 'Grim Reaper Makes His C-Debut', 'translated'),
('Tensei Shoujo no Rirekisho', 'Tensei Shoujo no Rirekisho', 'translated'),
('Master of Dungeon', 'Master of Dungeon', 'oel'),
]
for titlecomponent, name, tl_type in titlemap:
if titlecomponent.lower() in item['title'].lower():
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
tagmap = [
('PRC', 'PRC', 'translated'),
|
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
|
random_line_split
|
feed_parse_extractSecretchateauWordpressCom.py
|
def extractSecretchateauWordpressCom(item):
|
'''
Parser for 'secretchateau.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
titlemap = [
('GRMHCD ', 'Grim Reaper Makes His C-Debut', 'translated'),
('Tensei Shoujo no Rirekisho', 'Tensei Shoujo no Rirekisho', 'translated'),
('Master of Dungeon', 'Master of Dungeon', 'oel'),
]
for titlecomponent, name, tl_type in titlemap:
if titlecomponent.lower() in item['title'].lower():
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
identifier_body
|
|
feed_parse_extractSecretchateauWordpressCom.py
|
def
|
(item):
'''
Parser for 'secretchateau.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
titlemap = [
('GRMHCD ', 'Grim Reaper Makes His C-Debut', 'translated'),
('Tensei Shoujo no Rirekisho', 'Tensei Shoujo no Rirekisho', 'translated'),
('Master of Dungeon', 'Master of Dungeon', 'oel'),
]
for titlecomponent, name, tl_type in titlemap:
if titlecomponent.lower() in item['title'].lower():
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
extractSecretchateauWordpressCom
|
identifier_name
|
feed_parse_extractSecretchateauWordpressCom.py
|
def extractSecretchateauWordpressCom(item):
'''
Parser for 'secretchateau.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
titlemap = [
('GRMHCD ', 'Grim Reaper Makes His C-Debut', 'translated'),
('Tensei Shoujo no Rirekisho', 'Tensei Shoujo no Rirekisho', 'translated'),
('Master of Dungeon', 'Master of Dungeon', 'oel'),
]
for titlecomponent, name, tl_type in titlemap:
if titlecomponent.lower() in item['title'].lower():
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
|
return False
|
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
|
conditional_block
|
Slice.rs
|
#![feature(core)]
extern crate core;
#[cfg(test)]
mod tests {
use core::raw::Slice;
use core::raw::Repr;
// pub unsafe trait Repr<T> {
// /// This function "unwraps" a rust value (without consuming it) into its raw
// /// struct representation. This can be used to read/write different values
// /// for the struct. This is a safe method because by default it does not
// /// enable write-access to the fields of the return value in safe code.
// #[inline]
// fn repr(&self) -> T { unsafe { mem::transmute_copy(&self) } }
// }
// #[repr(C)]
// pub struct Slice<T> {
// pub data: *const T,
// pub len: usize,
// }
type T = i32;
#[test]
fn
|
() {
let slice: &[T] = &[1, 2, 3, 4];
let repr: Slice<T> = slice.repr();
assert_eq!(repr.len, 4);
}
#[test]
fn slice_test2 () {
let slice: &[T] = &[1, 2, 3, 4];
let repr: Slice<T> = slice.repr();
let copy: &[T] = slice;
let copy_repr: Slice<T> = copy.repr();
assert_eq!(copy_repr.data, repr.data);
assert_eq!(copy_repr.len, repr.len);
}
}
|
slice_test1
|
identifier_name
|
Slice.rs
|
#[cfg(test)]
mod tests {
use core::raw::Slice;
use core::raw::Repr;
// pub unsafe trait Repr<T> {
// /// This function "unwraps" a rust value (without consuming it) into its raw
// /// struct representation. This can be used to read/write different values
// /// for the struct. This is a safe method because by default it does not
// /// enable write-access to the fields of the return value in safe code.
// #[inline]
// fn repr(&self) -> T { unsafe { mem::transmute_copy(&self) } }
// }
// #[repr(C)]
// pub struct Slice<T> {
// pub data: *const T,
// pub len: usize,
// }
type T = i32;
#[test]
fn slice_test1 () {
let slice: &[T] = &[1, 2, 3, 4];
let repr: Slice<T> = slice.repr();
assert_eq!(repr.len, 4);
}
#[test]
fn slice_test2 () {
let slice: &[T] = &[1, 2, 3, 4];
let repr: Slice<T> = slice.repr();
let copy: &[T] = slice;
let copy_repr: Slice<T> = copy.repr();
assert_eq!(copy_repr.data, repr.data);
assert_eq!(copy_repr.len, repr.len);
}
}
|
#![feature(core)]
extern crate core;
|
random_line_split
|
|
Slice.rs
|
#![feature(core)]
extern crate core;
#[cfg(test)]
mod tests {
use core::raw::Slice;
use core::raw::Repr;
// pub unsafe trait Repr<T> {
// /// This function "unwraps" a rust value (without consuming it) into its raw
// /// struct representation. This can be used to read/write different values
// /// for the struct. This is a safe method because by default it does not
// /// enable write-access to the fields of the return value in safe code.
// #[inline]
// fn repr(&self) -> T { unsafe { mem::transmute_copy(&self) } }
// }
// #[repr(C)]
// pub struct Slice<T> {
// pub data: *const T,
// pub len: usize,
// }
type T = i32;
#[test]
fn slice_test1 () {
let slice: &[T] = &[1, 2, 3, 4];
let repr: Slice<T> = slice.repr();
assert_eq!(repr.len, 4);
}
#[test]
fn slice_test2 ()
|
}
|
{
let slice: &[T] = &[1, 2, 3, 4];
let repr: Slice<T> = slice.repr();
let copy: &[T] = slice;
let copy_repr: Slice<T> = copy.repr();
assert_eq!(copy_repr.data, repr.data);
assert_eq!(copy_repr.len, repr.len);
}
|
identifier_body
|
yelp_polarity_test.py
|
# coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for yelp dataset module."""
from tensorflow_datasets import testing
from tensorflow_datasets.text import yelp_polarity
class YelpPolarityReviewsTest(testing.DatasetBuilderTestCase):
DATASET_CLASS = yelp_polarity.YelpPolarityReviews
SPLITS = {
"train": 2,
"test": 2,
}
if __name__ == "__main__":
testing.test_main()
|
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
|
random_line_split
|
yelp_polarity_test.py
|
# coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for yelp dataset module."""
from tensorflow_datasets import testing
from tensorflow_datasets.text import yelp_polarity
class YelpPolarityReviewsTest(testing.DatasetBuilderTestCase):
DATASET_CLASS = yelp_polarity.YelpPolarityReviews
SPLITS = {
"train": 2,
"test": 2,
}
if __name__ == "__main__":
|
testing.test_main()
|
conditional_block
|
|
yelp_polarity_test.py
|
# coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for yelp dataset module."""
from tensorflow_datasets import testing
from tensorflow_datasets.text import yelp_polarity
class
|
(testing.DatasetBuilderTestCase):
DATASET_CLASS = yelp_polarity.YelpPolarityReviews
SPLITS = {
"train": 2,
"test": 2,
}
if __name__ == "__main__":
testing.test_main()
|
YelpPolarityReviewsTest
|
identifier_name
|
yelp_polarity_test.py
|
# coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for yelp dataset module."""
from tensorflow_datasets import testing
from tensorflow_datasets.text import yelp_polarity
class YelpPolarityReviewsTest(testing.DatasetBuilderTestCase):
|
if __name__ == "__main__":
testing.test_main()
|
DATASET_CLASS = yelp_polarity.YelpPolarityReviews
SPLITS = {
"train": 2,
"test": 2,
}
|
identifier_body
|
test_socket.rs
|
use nix::sys::socket::{InetAddr, UnixAddr, getsockname};
use std::{mem, net};
use std::path::Path;
use std::str::FromStr;
use std::os::unix::io::AsRawFd;
use ports::localhost;
#[test]
pub fn test_inetv4_addr_to_sock_addr() {
let actual: net::SocketAddr = FromStr::from_str("127.0.0.1:3000").unwrap();
let addr = InetAddr::from_std(&actual);
match addr {
InetAddr::V4(addr) => {
let ip: u32 = 0x7f000001;
let port: u16 = 3000;
|
}
_ => panic!("nope"),
}
assert_eq!(addr.to_str(), "127.0.0.1:3000");
let inet = addr.to_std();
assert_eq!(actual, inet);
}
#[test]
pub fn test_path_to_sock_addr() {
let actual = Path::new("/foo/bar");
let addr = UnixAddr::new(actual).unwrap();
let expect: &'static [i8] = unsafe { mem::transmute(&b"/foo/bar"[..]) };
assert_eq!(&addr.0.sun_path[..8], expect);
assert_eq!(addr.path(), actual);
}
#[test]
pub fn test_getsockname() {
use std::net::TcpListener;
let addr = localhost();
let sock = TcpListener::bind(&*addr).unwrap();
let res = getsockname(sock.as_raw_fd()).unwrap();
assert_eq!(addr, res.to_str());
}
|
assert_eq!(addr.sin_addr.s_addr, ip.to_be());
assert_eq!(addr.sin_port, port.to_be());
|
random_line_split
|
test_socket.rs
|
use nix::sys::socket::{InetAddr, UnixAddr, getsockname};
use std::{mem, net};
use std::path::Path;
use std::str::FromStr;
use std::os::unix::io::AsRawFd;
use ports::localhost;
#[test]
pub fn test_inetv4_addr_to_sock_addr() {
let actual: net::SocketAddr = FromStr::from_str("127.0.0.1:3000").unwrap();
let addr = InetAddr::from_std(&actual);
match addr {
InetAddr::V4(addr) => {
let ip: u32 = 0x7f000001;
let port: u16 = 3000;
assert_eq!(addr.sin_addr.s_addr, ip.to_be());
assert_eq!(addr.sin_port, port.to_be());
}
_ => panic!("nope"),
}
assert_eq!(addr.to_str(), "127.0.0.1:3000");
let inet = addr.to_std();
assert_eq!(actual, inet);
}
#[test]
pub fn test_path_to_sock_addr() {
let actual = Path::new("/foo/bar");
let addr = UnixAddr::new(actual).unwrap();
let expect: &'static [i8] = unsafe { mem::transmute(&b"/foo/bar"[..]) };
assert_eq!(&addr.0.sun_path[..8], expect);
assert_eq!(addr.path(), actual);
}
#[test]
pub fn
|
() {
use std::net::TcpListener;
let addr = localhost();
let sock = TcpListener::bind(&*addr).unwrap();
let res = getsockname(sock.as_raw_fd()).unwrap();
assert_eq!(addr, res.to_str());
}
|
test_getsockname
|
identifier_name
|
test_socket.rs
|
use nix::sys::socket::{InetAddr, UnixAddr, getsockname};
use std::{mem, net};
use std::path::Path;
use std::str::FromStr;
use std::os::unix::io::AsRawFd;
use ports::localhost;
#[test]
pub fn test_inetv4_addr_to_sock_addr() {
let actual: net::SocketAddr = FromStr::from_str("127.0.0.1:3000").unwrap();
let addr = InetAddr::from_std(&actual);
match addr {
InetAddr::V4(addr) => {
let ip: u32 = 0x7f000001;
let port: u16 = 3000;
assert_eq!(addr.sin_addr.s_addr, ip.to_be());
assert_eq!(addr.sin_port, port.to_be());
}
_ => panic!("nope"),
}
assert_eq!(addr.to_str(), "127.0.0.1:3000");
let inet = addr.to_std();
assert_eq!(actual, inet);
}
#[test]
pub fn test_path_to_sock_addr() {
let actual = Path::new("/foo/bar");
let addr = UnixAddr::new(actual).unwrap();
let expect: &'static [i8] = unsafe { mem::transmute(&b"/foo/bar"[..]) };
assert_eq!(&addr.0.sun_path[..8], expect);
assert_eq!(addr.path(), actual);
}
#[test]
pub fn test_getsockname()
|
{
use std::net::TcpListener;
let addr = localhost();
let sock = TcpListener::bind(&*addr).unwrap();
let res = getsockname(sock.as_raw_fd()).unwrap();
assert_eq!(addr, res.to_str());
}
|
identifier_body
|
|
async.ts
|
import { assert } from 'utils/assert';
type AsyncIterable<T> = Iterable<T | Promise<T>> | Promise<Iterable<T | Promise<T>>>;
type AsyncForEachIteratee<T> = (
value: T,
index: number,
arrayLength: number,
) => unknown | Promise<unknown>;
/**
* Native port of Bluebird's Promise.prototype.each. Accepts an iterable (or
* Promise-wrapped iterable) of any value, and an callback function to be
* executed for each value that the iterable yields.
*
* If a value is a promise, `asyncForEach` will wait for it before iterating
* further.
*/
export async function asyncForEach<T>(
iterable: AsyncIterable<T>,
iteratee: AsyncForEachIteratee<T>,
): Promise<T[]> {
const results: T[] = [];
const resolvedList = Array.from(await iterable);
const resolvedLength = resolvedList.length;
for (let i = 0; i < resolvedList.length; i++) {
// eslint-disable-next-line no-await-in-loop
const value = await resolvedList[i];
results.push(value);
// eslint-disable-next-line no-await-in-loop
await iteratee(value, i, resolvedLength);
}
return results;
}
type AsyncMapIteratee<T, R> = (value: T, index: number, arrayLength: number) => R | Promise<R>;
type AsyncMapOptions = { concurrency?: number };
export async function asyncMap<T, R>(
iterable: AsyncIterable<T>,
iteratee: AsyncMapIteratee<T, R>,
options?: AsyncMapOptions,
): Promise<R[]> {
const concurrency = options?.concurrency ?? Infinity;
const resolvedList = Array.from(await iterable);
const resolvedLength = resolvedList.length;
assert(concurrency > 0);
return new Promise((resolve, reject) => {
const results: R[] = [];
let cursor = 0;
let pending = 0;
function enqueueNextPromises() {
// If we have called .then() for all values, and no promises are pending,
// resolve with the final array of results.
if (cursor === resolvedLength && pending === 0) {
resolve(results);
} else {
// Call .then() in batches for promises moving left->right, only
// executing at maximum the value of the configured concurrency.
while (pending < Math.min(concurrency, resolvedLength - cursor + 1))
|
}
}
enqueueNextPromises();
});
}
export async function asyncMapSeries<T, R>(
iterable: AsyncIterable<T>,
iteratee: AsyncMapIteratee<T, R>,
): Promise<R[]> {
return asyncMap(iterable, iteratee, { concurrency: 1 });
}
|
{
const index = cursor;
const next = resolvedList[index];
cursor++;
pending++;
Promise.resolve(next)
.then((value) => {
return iteratee(value, index, resolvedLength);
})
.then(
// eslint-disable-next-line no-loop-func
(value) => {
pending--;
results[index] = value;
enqueueNextPromises();
},
)
.catch(
// eslint-disable-next-line no-loop-func
(err) => {
pending--;
reject(err);
},
);
}
|
conditional_block
|
async.ts
|
import { assert } from 'utils/assert';
type AsyncIterable<T> = Iterable<T | Promise<T>> | Promise<Iterable<T | Promise<T>>>;
type AsyncForEachIteratee<T> = (
value: T,
index: number,
arrayLength: number,
) => unknown | Promise<unknown>;
/**
* Native port of Bluebird's Promise.prototype.each. Accepts an iterable (or
* Promise-wrapped iterable) of any value, and an callback function to be
* executed for each value that the iterable yields.
*
* If a value is a promise, `asyncForEach` will wait for it before iterating
* further.
*/
export async function asyncForEach<T>(
iterable: AsyncIterable<T>,
iteratee: AsyncForEachIteratee<T>,
|
): Promise<T[]> {
const results: T[] = [];
const resolvedList = Array.from(await iterable);
const resolvedLength = resolvedList.length;
for (let i = 0; i < resolvedList.length; i++) {
// eslint-disable-next-line no-await-in-loop
const value = await resolvedList[i];
results.push(value);
// eslint-disable-next-line no-await-in-loop
await iteratee(value, i, resolvedLength);
}
return results;
}
type AsyncMapIteratee<T, R> = (value: T, index: number, arrayLength: number) => R | Promise<R>;
type AsyncMapOptions = { concurrency?: number };
export async function asyncMap<T, R>(
iterable: AsyncIterable<T>,
iteratee: AsyncMapIteratee<T, R>,
options?: AsyncMapOptions,
): Promise<R[]> {
const concurrency = options?.concurrency ?? Infinity;
const resolvedList = Array.from(await iterable);
const resolvedLength = resolvedList.length;
assert(concurrency > 0);
return new Promise((resolve, reject) => {
const results: R[] = [];
let cursor = 0;
let pending = 0;
function enqueueNextPromises() {
// If we have called .then() for all values, and no promises are pending,
// resolve with the final array of results.
if (cursor === resolvedLength && pending === 0) {
resolve(results);
} else {
// Call .then() in batches for promises moving left->right, only
// executing at maximum the value of the configured concurrency.
while (pending < Math.min(concurrency, resolvedLength - cursor + 1)) {
const index = cursor;
const next = resolvedList[index];
cursor++;
pending++;
Promise.resolve(next)
.then((value) => {
return iteratee(value, index, resolvedLength);
})
.then(
// eslint-disable-next-line no-loop-func
(value) => {
pending--;
results[index] = value;
enqueueNextPromises();
},
)
.catch(
// eslint-disable-next-line no-loop-func
(err) => {
pending--;
reject(err);
},
);
}
}
}
enqueueNextPromises();
});
}
export async function asyncMapSeries<T, R>(
iterable: AsyncIterable<T>,
iteratee: AsyncMapIteratee<T, R>,
): Promise<R[]> {
return asyncMap(iterable, iteratee, { concurrency: 1 });
}
|
random_line_split
|
|
async.ts
|
import { assert } from 'utils/assert';
type AsyncIterable<T> = Iterable<T | Promise<T>> | Promise<Iterable<T | Promise<T>>>;
type AsyncForEachIteratee<T> = (
value: T,
index: number,
arrayLength: number,
) => unknown | Promise<unknown>;
/**
* Native port of Bluebird's Promise.prototype.each. Accepts an iterable (or
* Promise-wrapped iterable) of any value, and an callback function to be
* executed for each value that the iterable yields.
*
* If a value is a promise, `asyncForEach` will wait for it before iterating
* further.
*/
export async function asyncForEach<T>(
iterable: AsyncIterable<T>,
iteratee: AsyncForEachIteratee<T>,
): Promise<T[]> {
const results: T[] = [];
const resolvedList = Array.from(await iterable);
const resolvedLength = resolvedList.length;
for (let i = 0; i < resolvedList.length; i++) {
// eslint-disable-next-line no-await-in-loop
const value = await resolvedList[i];
results.push(value);
// eslint-disable-next-line no-await-in-loop
await iteratee(value, i, resolvedLength);
}
return results;
}
type AsyncMapIteratee<T, R> = (value: T, index: number, arrayLength: number) => R | Promise<R>;
type AsyncMapOptions = { concurrency?: number };
export async function
|
<T, R>(
iterable: AsyncIterable<T>,
iteratee: AsyncMapIteratee<T, R>,
options?: AsyncMapOptions,
): Promise<R[]> {
const concurrency = options?.concurrency ?? Infinity;
const resolvedList = Array.from(await iterable);
const resolvedLength = resolvedList.length;
assert(concurrency > 0);
return new Promise((resolve, reject) => {
const results: R[] = [];
let cursor = 0;
let pending = 0;
function enqueueNextPromises() {
// If we have called .then() for all values, and no promises are pending,
// resolve with the final array of results.
if (cursor === resolvedLength && pending === 0) {
resolve(results);
} else {
// Call .then() in batches for promises moving left->right, only
// executing at maximum the value of the configured concurrency.
while (pending < Math.min(concurrency, resolvedLength - cursor + 1)) {
const index = cursor;
const next = resolvedList[index];
cursor++;
pending++;
Promise.resolve(next)
.then((value) => {
return iteratee(value, index, resolvedLength);
})
.then(
// eslint-disable-next-line no-loop-func
(value) => {
pending--;
results[index] = value;
enqueueNextPromises();
},
)
.catch(
// eslint-disable-next-line no-loop-func
(err) => {
pending--;
reject(err);
},
);
}
}
}
enqueueNextPromises();
});
}
export async function asyncMapSeries<T, R>(
iterable: AsyncIterable<T>,
iteratee: AsyncMapIteratee<T, R>,
): Promise<R[]> {
return asyncMap(iterable, iteratee, { concurrency: 1 });
}
|
asyncMap
|
identifier_name
|
async.ts
|
import { assert } from 'utils/assert';
type AsyncIterable<T> = Iterable<T | Promise<T>> | Promise<Iterable<T | Promise<T>>>;
type AsyncForEachIteratee<T> = (
value: T,
index: number,
arrayLength: number,
) => unknown | Promise<unknown>;
/**
* Native port of Bluebird's Promise.prototype.each. Accepts an iterable (or
* Promise-wrapped iterable) of any value, and an callback function to be
* executed for each value that the iterable yields.
*
* If a value is a promise, `asyncForEach` will wait for it before iterating
* further.
*/
export async function asyncForEach<T>(
iterable: AsyncIterable<T>,
iteratee: AsyncForEachIteratee<T>,
): Promise<T[]> {
const results: T[] = [];
const resolvedList = Array.from(await iterable);
const resolvedLength = resolvedList.length;
for (let i = 0; i < resolvedList.length; i++) {
// eslint-disable-next-line no-await-in-loop
const value = await resolvedList[i];
results.push(value);
// eslint-disable-next-line no-await-in-loop
await iteratee(value, i, resolvedLength);
}
return results;
}
type AsyncMapIteratee<T, R> = (value: T, index: number, arrayLength: number) => R | Promise<R>;
type AsyncMapOptions = { concurrency?: number };
export async function asyncMap<T, R>(
iterable: AsyncIterable<T>,
iteratee: AsyncMapIteratee<T, R>,
options?: AsyncMapOptions,
): Promise<R[]>
|
export async function asyncMapSeries<T, R>(
iterable: AsyncIterable<T>,
iteratee: AsyncMapIteratee<T, R>,
): Promise<R[]> {
return asyncMap(iterable, iteratee, { concurrency: 1 });
}
|
{
const concurrency = options?.concurrency ?? Infinity;
const resolvedList = Array.from(await iterable);
const resolvedLength = resolvedList.length;
assert(concurrency > 0);
return new Promise((resolve, reject) => {
const results: R[] = [];
let cursor = 0;
let pending = 0;
function enqueueNextPromises() {
// If we have called .then() for all values, and no promises are pending,
// resolve with the final array of results.
if (cursor === resolvedLength && pending === 0) {
resolve(results);
} else {
// Call .then() in batches for promises moving left->right, only
// executing at maximum the value of the configured concurrency.
while (pending < Math.min(concurrency, resolvedLength - cursor + 1)) {
const index = cursor;
const next = resolvedList[index];
cursor++;
pending++;
Promise.resolve(next)
.then((value) => {
return iteratee(value, index, resolvedLength);
})
.then(
// eslint-disable-next-line no-loop-func
(value) => {
pending--;
results[index] = value;
enqueueNextPromises();
},
)
.catch(
// eslint-disable-next-line no-loop-func
(err) => {
pending--;
reject(err);
},
);
}
}
}
enqueueNextPromises();
});
}
|
identifier_body
|
feature_format.py
|
#!/usr/bin/python
"""
A general tool for converting data from the
dictionary format to an (n x k) python list that's
ready for training an sklearn algorithm
n--no. of key-value pairs in dictonary
k--no. of features being extracted
dictionary keys are names of persons in dataset
dictionary values are dictionaries, where each
key-value pair in the dict is the name
of a feature, and its value for that person
In addition to converting a dictionary to a numpy
array, you may want to separate the labels from the
features--this is what targetFeatureSplit is for
so, if you want to have the poi label as the target,
and the features you want to use are the person's
salary and bonus, here's what you would do:
feature_list = ["poi", "salary", "bonus"]
data_array = featureFormat( data_dictionary, feature_list )
label, features = targetFeatureSplit(data_array)
the line above (targetFeatureSplit) assumes that the
label is the _first_ item in feature_list--very important
that poi is listed first!
"""
import numpy as np
def featureFormat( dictionary, features, remove_NaN=True, remove_all_zeroes=True, remove_any_zeroes=False, sort_keys = False):
""" convert dictionary to numpy array of features
remove_NaN = True will convert "NaN" string to 0.0
remove_all_zeroes = True will omit any data points for which
all the features you seek are 0.0
remove_any_zeroes = True will omit any data points for which
any of the features you seek are 0.0
sort_keys = True sorts keys by alphabetical order. Setting the value as
a string opens the corresponding pickle file with a preset key
order (this is used for Python 3 compatibility, and sort_keys
should be left as False for the course mini-projects).
NOTE: first feature is assumed to be 'poi' and is not checked for
removal for zero or missing values.
"""
return_list = []
# Key order - first branch is for Python 3 compatibility on mini-projects,
# second branch is for compatibility on final project.
if isinstance(sort_keys, str):
|
elif sort_keys:
keys = sorted(dictionary.keys())
else:
keys = dictionary.keys()
for key in keys:
tmp_list = []
for feature in features:
try:
dictionary[key][feature]
except KeyError:
print "error: key ", feature, " not present"
return
value = dictionary[key][feature]
if value=="NaN" and remove_NaN:
value = 0
tmp_list.append( float(value) )
# Logic for deciding whether or not to add the data point.
append = True
# exclude 'poi' class as criteria.
if features[0] == 'poi':
test_list = tmp_list[1:]
else:
test_list = tmp_list
### if all features are zero and you want to remove
### data points that are all zero, do that here
if remove_all_zeroes:
append = False
for item in test_list:
if item != 0 and item != "NaN":
append = True
break
### if any features for a given data point are zero
### and you want to remove data points with any zeroes,
### handle that here
if remove_any_zeroes:
if 0 in test_list or "NaN" in test_list:
append = False
### Append the data point if flagged for addition.
if append:
return_list.append( np.array(tmp_list) )
return np.array(return_list)
def targetFeatureSplit( data ):
"""
given a numpy array like the one returned from
featureFormat, separate out the first feature
and put it into its own list (this should be the
quantity you want to predict)
return targets and features as separate lists
(sklearn can generally handle both lists and numpy arrays as
input formats when training/predicting)
"""
target = []
features = []
for item in data:
target.append( item[0] )
features.append( item[1:] )
return target, features
|
import pickle
keys = pickle.load(open(sort_keys, "rb"))
|
conditional_block
|
feature_format.py
|
#!/usr/bin/python
"""
A general tool for converting data from the
dictionary format to an (n x k) python list that's
ready for training an sklearn algorithm
n--no. of key-value pairs in dictonary
k--no. of features being extracted
dictionary keys are names of persons in dataset
dictionary values are dictionaries, where each
key-value pair in the dict is the name
of a feature, and its value for that person
In addition to converting a dictionary to a numpy
array, you may want to separate the labels from the
features--this is what targetFeatureSplit is for
so, if you want to have the poi label as the target,
and the features you want to use are the person's
salary and bonus, here's what you would do:
feature_list = ["poi", "salary", "bonus"]
data_array = featureFormat( data_dictionary, feature_list )
label, features = targetFeatureSplit(data_array)
the line above (targetFeatureSplit) assumes that the
label is the _first_ item in feature_list--very important
that poi is listed first!
"""
import numpy as np
def
|
( dictionary, features, remove_NaN=True, remove_all_zeroes=True, remove_any_zeroes=False, sort_keys = False):
""" convert dictionary to numpy array of features
remove_NaN = True will convert "NaN" string to 0.0
remove_all_zeroes = True will omit any data points for which
all the features you seek are 0.0
remove_any_zeroes = True will omit any data points for which
any of the features you seek are 0.0
sort_keys = True sorts keys by alphabetical order. Setting the value as
a string opens the corresponding pickle file with a preset key
order (this is used for Python 3 compatibility, and sort_keys
should be left as False for the course mini-projects).
NOTE: first feature is assumed to be 'poi' and is not checked for
removal for zero or missing values.
"""
return_list = []
# Key order - first branch is for Python 3 compatibility on mini-projects,
# second branch is for compatibility on final project.
if isinstance(sort_keys, str):
import pickle
keys = pickle.load(open(sort_keys, "rb"))
elif sort_keys:
keys = sorted(dictionary.keys())
else:
keys = dictionary.keys()
for key in keys:
tmp_list = []
for feature in features:
try:
dictionary[key][feature]
except KeyError:
print "error: key ", feature, " not present"
return
value = dictionary[key][feature]
if value=="NaN" and remove_NaN:
value = 0
tmp_list.append( float(value) )
# Logic for deciding whether or not to add the data point.
append = True
# exclude 'poi' class as criteria.
if features[0] == 'poi':
test_list = tmp_list[1:]
else:
test_list = tmp_list
### if all features are zero and you want to remove
### data points that are all zero, do that here
if remove_all_zeroes:
append = False
for item in test_list:
if item != 0 and item != "NaN":
append = True
break
### if any features for a given data point are zero
### and you want to remove data points with any zeroes,
### handle that here
if remove_any_zeroes:
if 0 in test_list or "NaN" in test_list:
append = False
### Append the data point if flagged for addition.
if append:
return_list.append( np.array(tmp_list) )
return np.array(return_list)
def targetFeatureSplit( data ):
"""
given a numpy array like the one returned from
featureFormat, separate out the first feature
and put it into its own list (this should be the
quantity you want to predict)
return targets and features as separate lists
(sklearn can generally handle both lists and numpy arrays as
input formats when training/predicting)
"""
target = []
features = []
for item in data:
target.append( item[0] )
features.append( item[1:] )
return target, features
|
featureFormat
|
identifier_name
|
feature_format.py
|
#!/usr/bin/python
"""
A general tool for converting data from the
dictionary format to an (n x k) python list that's
ready for training an sklearn algorithm
n--no. of key-value pairs in dictonary
k--no. of features being extracted
dictionary keys are names of persons in dataset
dictionary values are dictionaries, where each
key-value pair in the dict is the name
of a feature, and its value for that person
In addition to converting a dictionary to a numpy
array, you may want to separate the labels from the
features--this is what targetFeatureSplit is for
so, if you want to have the poi label as the target,
and the features you want to use are the person's
salary and bonus, here's what you would do:
feature_list = ["poi", "salary", "bonus"]
data_array = featureFormat( data_dictionary, feature_list )
label, features = targetFeatureSplit(data_array)
the line above (targetFeatureSplit) assumes that the
label is the _first_ item in feature_list--very important
that poi is listed first!
"""
import numpy as np
def featureFormat( dictionary, features, remove_NaN=True, remove_all_zeroes=True, remove_any_zeroes=False, sort_keys = False):
""" convert dictionary to numpy array of features
remove_NaN = True will convert "NaN" string to 0.0
remove_all_zeroes = True will omit any data points for which
all the features you seek are 0.0
remove_any_zeroes = True will omit any data points for which
any of the features you seek are 0.0
sort_keys = True sorts keys by alphabetical order. Setting the value as
a string opens the corresponding pickle file with a preset key
order (this is used for Python 3 compatibility, and sort_keys
should be left as False for the course mini-projects).
NOTE: first feature is assumed to be 'poi' and is not checked for
removal for zero or missing values.
"""
return_list = []
# Key order - first branch is for Python 3 compatibility on mini-projects,
# second branch is for compatibility on final project.
if isinstance(sort_keys, str):
import pickle
keys = pickle.load(open(sort_keys, "rb"))
elif sort_keys:
keys = sorted(dictionary.keys())
else:
keys = dictionary.keys()
for key in keys:
tmp_list = []
for feature in features:
try:
dictionary[key][feature]
except KeyError:
print "error: key ", feature, " not present"
return
value = dictionary[key][feature]
if value=="NaN" and remove_NaN:
value = 0
tmp_list.append( float(value) )
# Logic for deciding whether or not to add the data point.
append = True
# exclude 'poi' class as criteria.
if features[0] == 'poi':
test_list = tmp_list[1:]
else:
test_list = tmp_list
### if all features are zero and you want to remove
### data points that are all zero, do that here
if remove_all_zeroes:
append = False
for item in test_list:
if item != 0 and item != "NaN":
append = True
break
### if any features for a given data point are zero
### and you want to remove data points with any zeroes,
### handle that here
if remove_any_zeroes:
if 0 in test_list or "NaN" in test_list:
append = False
### Append the data point if flagged for addition.
if append:
return_list.append( np.array(tmp_list) )
return np.array(return_list)
def targetFeatureSplit( data ):
|
"""
given a numpy array like the one returned from
featureFormat, separate out the first feature
and put it into its own list (this should be the
quantity you want to predict)
return targets and features as separate lists
(sklearn can generally handle both lists and numpy arrays as
input formats when training/predicting)
"""
target = []
features = []
for item in data:
target.append( item[0] )
features.append( item[1:] )
return target, features
|
identifier_body
|
|
feature_format.py
|
#!/usr/bin/python
"""
A general tool for converting data from the
dictionary format to an (n x k) python list that's
ready for training an sklearn algorithm
n--no. of key-value pairs in dictonary
k--no. of features being extracted
dictionary keys are names of persons in dataset
dictionary values are dictionaries, where each
key-value pair in the dict is the name
of a feature, and its value for that person
In addition to converting a dictionary to a numpy
array, you may want to separate the labels from the
features--this is what targetFeatureSplit is for
so, if you want to have the poi label as the target,
and the features you want to use are the person's
salary and bonus, here's what you would do:
feature_list = ["poi", "salary", "bonus"]
data_array = featureFormat( data_dictionary, feature_list )
label, features = targetFeatureSplit(data_array)
the line above (targetFeatureSplit) assumes that the
label is the _first_ item in feature_list--very important
that poi is listed first!
"""
import numpy as np
def featureFormat( dictionary, features, remove_NaN=True, remove_all_zeroes=True, remove_any_zeroes=False, sort_keys = False):
""" convert dictionary to numpy array of features
remove_NaN = True will convert "NaN" string to 0.0
remove_all_zeroes = True will omit any data points for which
all the features you seek are 0.0
remove_any_zeroes = True will omit any data points for which
any of the features you seek are 0.0
sort_keys = True sorts keys by alphabetical order. Setting the value as
a string opens the corresponding pickle file with a preset key
order (this is used for Python 3 compatibility, and sort_keys
should be left as False for the course mini-projects).
NOTE: first feature is assumed to be 'poi' and is not checked for
removal for zero or missing values.
"""
return_list = []
# Key order - first branch is for Python 3 compatibility on mini-projects,
# second branch is for compatibility on final project.
if isinstance(sort_keys, str):
import pickle
keys = pickle.load(open(sort_keys, "rb"))
elif sort_keys:
keys = sorted(dictionary.keys())
else:
keys = dictionary.keys()
for key in keys:
tmp_list = []
for feature in features:
try:
dictionary[key][feature]
except KeyError:
print "error: key ", feature, " not present"
return
value = dictionary[key][feature]
if value=="NaN" and remove_NaN:
value = 0
tmp_list.append( float(value) )
# Logic for deciding whether or not to add the data point.
append = True
# exclude 'poi' class as criteria.
if features[0] == 'poi':
test_list = tmp_list[1:]
else:
test_list = tmp_list
### if all features are zero and you want to remove
### data points that are all zero, do that here
if remove_all_zeroes:
append = False
for item in test_list:
if item != 0 and item != "NaN":
append = True
break
### if any features for a given data point are zero
### and you want to remove data points with any zeroes,
### handle that here
if remove_any_zeroes:
if 0 in test_list or "NaN" in test_list:
append = False
### Append the data point if flagged for addition.
if append:
return_list.append( np.array(tmp_list) )
return np.array(return_list)
def targetFeatureSplit( data ):
"""
given a numpy array like the one returned from
featureFormat, separate out the first feature
|
quantity you want to predict)
return targets and features as separate lists
(sklearn can generally handle both lists and numpy arrays as
input formats when training/predicting)
"""
target = []
features = []
for item in data:
target.append( item[0] )
features.append( item[1:] )
return target, features
|
and put it into its own list (this should be the
|
random_line_split
|
weight.rs
|
//! Provides configuration of weights and their initialization.
use capnp_util::*;
use co::{ITensorDesc, SharedTensor};
use juice_capnp::weight_config as capnp_config;
use rand;
use rand::distributions::{IndependentSample, Range};
use util::native_backend;
#[derive(Debug, Clone)]
/// Specifies training configuration for a weight blob.
pub struct WeightConfig {
/// The name of the weight blob -- useful for sharing weights among
/// layers, but never required otherwise. To share a weight between two
/// layers, give it a (non-empty) name.
///
/// Default: ""
pub name: String,
/// Whether to require shared weights to have the same shape, or just the same
/// count
///
/// Default: DimCheckMode::Strict
pub share_mode: DimCheckMode,
/// The multiplier on the global learning rate for this parameter.
///
/// Default: 1.0f32
pub lr_mult: Option<f32>,
/// The multiplier on the global weight decay for this parameter.
///
/// Default: 1.0f32
pub decay_mult: Option<f32>,
/// The filler that initializes the weights in the weight blob.
///
/// Default: None
pub filler: Option<FillerType>,
}
impl Default for WeightConfig {
fn default() -> WeightConfig {
WeightConfig {
name: "".to_owned(),
share_mode: DimCheckMode::Strict,
lr_mult: None,
decay_mult: None,
filler: None,
}
}
}
impl WeightConfig {
/// Checks dimensions of two blobs according to the `share_mode`.
/// Returns an error if there is a count/shape mismatch.
pub fn check_dimensions<T>(&self,
tensor_one: &SharedTensor<T>,
tensor_two: &SharedTensor<T>,
param_name: String,
owner_name: String,
layer_name: String)
-> Result<(), String> {
match self.share_mode {
// Permissive dimension checking -- only check counts are the same.
DimCheckMode::Permissive => {
if tensor_one.desc().size() != tensor_two.desc().size() {
return Err(format!("Cannot share weight '{}' owned by layer '{}' with layer '{}';
count mismatch.
Owner layer weight shape is {:?};
Sharing layer weight shape is {:?}",
param_name,
owner_name,
layer_name,
tensor_two.desc(),
tensor_one.desc()));
}
}
// Strict dimension checking -- all dims must be the same.
DimCheckMode::Strict => {
if tensor_one.desc() != tensor_two.desc() {
return Err(format!("Cannot share weight '{}' owned by layer '{}' with layer '{}';
shape mismatch.
Owner layer weight shape is {:?};
Sharing layer expects weight shape {:?}",
param_name,
owner_name,
layer_name,
tensor_two.desc(),
tensor_one.desc()));
}
}
}
Ok(())
}
/// The multiplier on the global learning rate for this weight blob.
pub fn
|
(&self) -> f32 {
match self.lr_mult {
Some(val) => val,
None => 1.0f32,
}
}
/// The multiplier on the global weight decay for this weight blob.
pub fn decay_mult(&self) -> f32 {
match self.decay_mult {
Some(val) => val,
None => 1.0f32,
}
}
}
impl<'a> CapnpWrite<'a> for WeightConfig {
type Builder = capnp_config::Builder<'a>;
/// Write the WeightConfig into a capnp message.
fn write_capnp(&self, builder: &mut Self::Builder) {
// TODO: incomplete since WeightConfig isn't really used internally in Juice at the moment.
builder.borrow().set_name(&self.name);
}
}
impl<'a> CapnpRead<'a> for WeightConfig {
type Reader = capnp_config::Reader<'a>;
fn read_capnp(reader: Self::Reader) -> Self {
// TODO: incomplete since WeightConfig isn't really used internally in Juice at the moment.
let name = reader.get_name().unwrap().to_owned();
WeightConfig { name: name, ..Self::default() }
}
}
#[derive(Debug, Copy, Clone)]
/// Enum for specifing the shared weights behaviour
pub enum DimCheckMode {
/// Strict requires that shapes match.
Strict,
/// Permissive requires only the count of weights to match.
Permissive,
}
#[derive(Debug, Copy, Clone)]
/// Enum for specifing the type of Filler.
pub enum FillerType {
/// Fills the weight blob with a constant `value` (all values are the same).
Constant {
/// The value that will be used to fill the blob.
value: f32,
},
/// Fills the weight blobs based on the paper:
///
/// `[Bengio and Glorot 2010]: Understanding the difficulty of training deep feedforward neural networks.`
///
/// Also known as Xavier filler.
Glorot {
/// Number of input nodes for each output.
input_size: usize,
/// Number of output nodes for each input.
output_size: usize,
},
}
impl FillerType {
/// Uses a filler as specified by this FillerType to fill the values in a SharedTensor
///
/// This filling of weights is usually done directly after creation of the weight blob.
pub fn fill(&self, weight: &mut SharedTensor<f32>) {
let native = native_backend();
let native_device = native.device();
match *self {
FillerType::Constant { value } => Self::fill_constant(weight, value),
FillerType::Glorot { input_size, output_size } => Self::fill_glorot(weight, input_size, output_size),
}
}
/// Directly use the [Constant Filler](#variant.Constant).
pub fn fill_constant(weight: &mut SharedTensor<f32>, value: f32) {
let native = native_backend();
let native_weight = weight.write_only(native.device()).unwrap();
for e in native_weight.as_mut_slice::<f32>() {
*e = value;
}
}
/// Directly use the [Glorot Filler](#variant.Glorot).
pub fn fill_glorot(weight: &mut SharedTensor<f32>, num_inputs: usize, num_outputs: usize) {
let native = native_backend();
let native_weight = weight.write_only(native.device()).unwrap();
let init_range = (6.0f32 / (num_inputs as f32 + num_outputs as f32)).sqrt();
let between = Range::new(-init_range, init_range);
let mut rng = rand::thread_rng();
for e in native_weight.as_mut_slice::<f32>() {
*e = between.ind_sample(&mut rng);
}
}
}
|
lr_mult
|
identifier_name
|
weight.rs
|
//! Provides configuration of weights and their initialization.
use capnp_util::*;
use co::{ITensorDesc, SharedTensor};
use juice_capnp::weight_config as capnp_config;
use rand;
use rand::distributions::{IndependentSample, Range};
|
/// The name of the weight blob -- useful for sharing weights among
/// layers, but never required otherwise. To share a weight between two
/// layers, give it a (non-empty) name.
///
/// Default: ""
pub name: String,
/// Whether to require shared weights to have the same shape, or just the same
/// count
///
/// Default: DimCheckMode::Strict
pub share_mode: DimCheckMode,
/// The multiplier on the global learning rate for this parameter.
///
/// Default: 1.0f32
pub lr_mult: Option<f32>,
/// The multiplier on the global weight decay for this parameter.
///
/// Default: 1.0f32
pub decay_mult: Option<f32>,
/// The filler that initializes the weights in the weight blob.
///
/// Default: None
pub filler: Option<FillerType>,
}
impl Default for WeightConfig {
fn default() -> WeightConfig {
WeightConfig {
name: "".to_owned(),
share_mode: DimCheckMode::Strict,
lr_mult: None,
decay_mult: None,
filler: None,
}
}
}
impl WeightConfig {
/// Checks dimensions of two blobs according to the `share_mode`.
/// Returns an error if there is a count/shape mismatch.
pub fn check_dimensions<T>(&self,
tensor_one: &SharedTensor<T>,
tensor_two: &SharedTensor<T>,
param_name: String,
owner_name: String,
layer_name: String)
-> Result<(), String> {
match self.share_mode {
// Permissive dimension checking -- only check counts are the same.
DimCheckMode::Permissive => {
if tensor_one.desc().size() != tensor_two.desc().size() {
return Err(format!("Cannot share weight '{}' owned by layer '{}' with layer '{}';
count mismatch.
Owner layer weight shape is {:?};
Sharing layer weight shape is {:?}",
param_name,
owner_name,
layer_name,
tensor_two.desc(),
tensor_one.desc()));
}
}
// Strict dimension checking -- all dims must be the same.
DimCheckMode::Strict => {
if tensor_one.desc() != tensor_two.desc() {
return Err(format!("Cannot share weight '{}' owned by layer '{}' with layer '{}';
shape mismatch.
Owner layer weight shape is {:?};
Sharing layer expects weight shape {:?}",
param_name,
owner_name,
layer_name,
tensor_two.desc(),
tensor_one.desc()));
}
}
}
Ok(())
}
/// The multiplier on the global learning rate for this weight blob.
pub fn lr_mult(&self) -> f32 {
match self.lr_mult {
Some(val) => val,
None => 1.0f32,
}
}
/// The multiplier on the global weight decay for this weight blob.
pub fn decay_mult(&self) -> f32 {
match self.decay_mult {
Some(val) => val,
None => 1.0f32,
}
}
}
impl<'a> CapnpWrite<'a> for WeightConfig {
type Builder = capnp_config::Builder<'a>;
/// Write the WeightConfig into a capnp message.
fn write_capnp(&self, builder: &mut Self::Builder) {
// TODO: incomplete since WeightConfig isn't really used internally in Juice at the moment.
builder.borrow().set_name(&self.name);
}
}
impl<'a> CapnpRead<'a> for WeightConfig {
type Reader = capnp_config::Reader<'a>;
fn read_capnp(reader: Self::Reader) -> Self {
// TODO: incomplete since WeightConfig isn't really used internally in Juice at the moment.
let name = reader.get_name().unwrap().to_owned();
WeightConfig { name: name, ..Self::default() }
}
}
#[derive(Debug, Copy, Clone)]
/// Enum for specifing the shared weights behaviour
pub enum DimCheckMode {
/// Strict requires that shapes match.
Strict,
/// Permissive requires only the count of weights to match.
Permissive,
}
#[derive(Debug, Copy, Clone)]
/// Enum for specifing the type of Filler.
pub enum FillerType {
/// Fills the weight blob with a constant `value` (all values are the same).
Constant {
/// The value that will be used to fill the blob.
value: f32,
},
/// Fills the weight blobs based on the paper:
///
/// `[Bengio and Glorot 2010]: Understanding the difficulty of training deep feedforward neural networks.`
///
/// Also known as Xavier filler.
Glorot {
/// Number of input nodes for each output.
input_size: usize,
/// Number of output nodes for each input.
output_size: usize,
},
}
impl FillerType {
/// Uses a filler as specified by this FillerType to fill the values in a SharedTensor
///
/// This filling of weights is usually done directly after creation of the weight blob.
pub fn fill(&self, weight: &mut SharedTensor<f32>) {
let native = native_backend();
let native_device = native.device();
match *self {
FillerType::Constant { value } => Self::fill_constant(weight, value),
FillerType::Glorot { input_size, output_size } => Self::fill_glorot(weight, input_size, output_size),
}
}
/// Directly use the [Constant Filler](#variant.Constant).
pub fn fill_constant(weight: &mut SharedTensor<f32>, value: f32) {
let native = native_backend();
let native_weight = weight.write_only(native.device()).unwrap();
for e in native_weight.as_mut_slice::<f32>() {
*e = value;
}
}
/// Directly use the [Glorot Filler](#variant.Glorot).
pub fn fill_glorot(weight: &mut SharedTensor<f32>, num_inputs: usize, num_outputs: usize) {
let native = native_backend();
let native_weight = weight.write_only(native.device()).unwrap();
let init_range = (6.0f32 / (num_inputs as f32 + num_outputs as f32)).sqrt();
let between = Range::new(-init_range, init_range);
let mut rng = rand::thread_rng();
for e in native_weight.as_mut_slice::<f32>() {
*e = between.ind_sample(&mut rng);
}
}
}
|
use util::native_backend;
#[derive(Debug, Clone)]
/// Specifies training configuration for a weight blob.
pub struct WeightConfig {
|
random_line_split
|
weight.rs
|
//! Provides configuration of weights and their initialization.
use capnp_util::*;
use co::{ITensorDesc, SharedTensor};
use juice_capnp::weight_config as capnp_config;
use rand;
use rand::distributions::{IndependentSample, Range};
use util::native_backend;
#[derive(Debug, Clone)]
/// Specifies training configuration for a weight blob.
pub struct WeightConfig {
/// The name of the weight blob -- useful for sharing weights among
/// layers, but never required otherwise. To share a weight between two
/// layers, give it a (non-empty) name.
///
/// Default: ""
pub name: String,
/// Whether to require shared weights to have the same shape, or just the same
/// count
///
/// Default: DimCheckMode::Strict
pub share_mode: DimCheckMode,
/// The multiplier on the global learning rate for this parameter.
///
/// Default: 1.0f32
pub lr_mult: Option<f32>,
/// The multiplier on the global weight decay for this parameter.
///
/// Default: 1.0f32
pub decay_mult: Option<f32>,
/// The filler that initializes the weights in the weight blob.
///
/// Default: None
pub filler: Option<FillerType>,
}
impl Default for WeightConfig {
fn default() -> WeightConfig {
WeightConfig {
name: "".to_owned(),
share_mode: DimCheckMode::Strict,
lr_mult: None,
decay_mult: None,
filler: None,
}
}
}
impl WeightConfig {
/// Checks dimensions of two blobs according to the `share_mode`.
/// Returns an error if there is a count/shape mismatch.
pub fn check_dimensions<T>(&self,
tensor_one: &SharedTensor<T>,
tensor_two: &SharedTensor<T>,
param_name: String,
owner_name: String,
layer_name: String)
-> Result<(), String> {
match self.share_mode {
// Permissive dimension checking -- only check counts are the same.
DimCheckMode::Permissive => {
if tensor_one.desc().size() != tensor_two.desc().size() {
return Err(format!("Cannot share weight '{}' owned by layer '{}' with layer '{}';
count mismatch.
Owner layer weight shape is {:?};
Sharing layer weight shape is {:?}",
param_name,
owner_name,
layer_name,
tensor_two.desc(),
tensor_one.desc()));
}
}
// Strict dimension checking -- all dims must be the same.
DimCheckMode::Strict => {
if tensor_one.desc() != tensor_two.desc() {
return Err(format!("Cannot share weight '{}' owned by layer '{}' with layer '{}';
shape mismatch.
Owner layer weight shape is {:?};
Sharing layer expects weight shape {:?}",
param_name,
owner_name,
layer_name,
tensor_two.desc(),
tensor_one.desc()));
}
}
}
Ok(())
}
/// The multiplier on the global learning rate for this weight blob.
pub fn lr_mult(&self) -> f32 {
match self.lr_mult {
Some(val) => val,
None => 1.0f32,
}
}
/// The multiplier on the global weight decay for this weight blob.
pub fn decay_mult(&self) -> f32
|
}
impl<'a> CapnpWrite<'a> for WeightConfig {
type Builder = capnp_config::Builder<'a>;
/// Write the WeightConfig into a capnp message.
fn write_capnp(&self, builder: &mut Self::Builder) {
// TODO: incomplete since WeightConfig isn't really used internally in Juice at the moment.
builder.borrow().set_name(&self.name);
}
}
impl<'a> CapnpRead<'a> for WeightConfig {
type Reader = capnp_config::Reader<'a>;
fn read_capnp(reader: Self::Reader) -> Self {
// TODO: incomplete since WeightConfig isn't really used internally in Juice at the moment.
let name = reader.get_name().unwrap().to_owned();
WeightConfig { name: name, ..Self::default() }
}
}
#[derive(Debug, Copy, Clone)]
/// Enum for specifing the shared weights behaviour
pub enum DimCheckMode {
/// Strict requires that shapes match.
Strict,
/// Permissive requires only the count of weights to match.
Permissive,
}
#[derive(Debug, Copy, Clone)]
/// Enum for specifing the type of Filler.
pub enum FillerType {
/// Fills the weight blob with a constant `value` (all values are the same).
Constant {
/// The value that will be used to fill the blob.
value: f32,
},
/// Fills the weight blobs based on the paper:
///
/// `[Bengio and Glorot 2010]: Understanding the difficulty of training deep feedforward neural networks.`
///
/// Also known as Xavier filler.
Glorot {
/// Number of input nodes for each output.
input_size: usize,
/// Number of output nodes for each input.
output_size: usize,
},
}
impl FillerType {
/// Uses a filler as specified by this FillerType to fill the values in a SharedTensor
///
/// This filling of weights is usually done directly after creation of the weight blob.
pub fn fill(&self, weight: &mut SharedTensor<f32>) {
let native = native_backend();
let native_device = native.device();
match *self {
FillerType::Constant { value } => Self::fill_constant(weight, value),
FillerType::Glorot { input_size, output_size } => Self::fill_glorot(weight, input_size, output_size),
}
}
/// Directly use the [Constant Filler](#variant.Constant).
pub fn fill_constant(weight: &mut SharedTensor<f32>, value: f32) {
let native = native_backend();
let native_weight = weight.write_only(native.device()).unwrap();
for e in native_weight.as_mut_slice::<f32>() {
*e = value;
}
}
/// Directly use the [Glorot Filler](#variant.Glorot).
pub fn fill_glorot(weight: &mut SharedTensor<f32>, num_inputs: usize, num_outputs: usize) {
let native = native_backend();
let native_weight = weight.write_only(native.device()).unwrap();
let init_range = (6.0f32 / (num_inputs as f32 + num_outputs as f32)).sqrt();
let between = Range::new(-init_range, init_range);
let mut rng = rand::thread_rng();
for e in native_weight.as_mut_slice::<f32>() {
*e = between.ind_sample(&mut rng);
}
}
}
|
{
match self.decay_mult {
Some(val) => val,
None => 1.0f32,
}
}
|
identifier_body
|
StringBuilder.js
|
const assert = require('assert');
const is = require('is_js');
function StringBuilder() {
const chars = [];
//appends a value to the string
function append(value) {
assert.equal(is.not.object(value), true);
value + '';
for (let i = 0; i < value.length; i++) {
chars.push(value[i]);
}
}
// removes any instance of the value from within the string
function remove(value) {
assert.equal(is.not.object(value), true);
let j = 0;
let strs = [];
for (let i = 0; i < chars.length; i++) {
if (chars[i] == value[j])
|
}
}
// returns the string
let toString = function toString() {
return chars.reduce((x, char) => {
return x + char;
});
}
return {
append: append,
remove: remove,
toString: toString,
};
}
module.exports = StringBuilder;
|
{
let str = chars.slice(i, i + value.length);
let matches = true;
for (let k = 0; k < str.length; k++) {
if (str[k] != value[j]) {
matches = false;
break;
}
j++;
}
if (matches) {
chars.splice(i, value.length);
}
i = j;
j = 0;
}
|
conditional_block
|
StringBuilder.js
|
const assert = require('assert');
const is = require('is_js');
function StringBuilder() {
const chars = [];
//appends a value to the string
function append(value) {
assert.equal(is.not.object(value), true);
value + '';
for (let i = 0; i < value.length; i++) {
chars.push(value[i]);
}
}
// removes any instance of the value from within the string
function remove(value) {
assert.equal(is.not.object(value), true);
let j = 0;
let strs = [];
for (let i = 0; i < chars.length; i++) {
if (chars[i] == value[j]) {
let str = chars.slice(i, i + value.length);
let matches = true;
for (let k = 0; k < str.length; k++) {
if (str[k] != value[j]) {
matches = false;
break;
}
j++;
}
|
if (matches) {
chars.splice(i, value.length);
}
i = j;
j = 0;
}
}
}
// returns the string
let toString = function toString() {
return chars.reduce((x, char) => {
return x + char;
});
}
return {
append: append,
remove: remove,
toString: toString,
};
}
module.exports = StringBuilder;
|
random_line_split
|
|
StringBuilder.js
|
const assert = require('assert');
const is = require('is_js');
function
|
() {
const chars = [];
//appends a value to the string
function append(value) {
assert.equal(is.not.object(value), true);
value + '';
for (let i = 0; i < value.length; i++) {
chars.push(value[i]);
}
}
// removes any instance of the value from within the string
function remove(value) {
assert.equal(is.not.object(value), true);
let j = 0;
let strs = [];
for (let i = 0; i < chars.length; i++) {
if (chars[i] == value[j]) {
let str = chars.slice(i, i + value.length);
let matches = true;
for (let k = 0; k < str.length; k++) {
if (str[k] != value[j]) {
matches = false;
break;
}
j++;
}
if (matches) {
chars.splice(i, value.length);
}
i = j;
j = 0;
}
}
}
// returns the string
let toString = function toString() {
return chars.reduce((x, char) => {
return x + char;
});
}
return {
append: append,
remove: remove,
toString: toString,
};
}
module.exports = StringBuilder;
|
StringBuilder
|
identifier_name
|
StringBuilder.js
|
const assert = require('assert');
const is = require('is_js');
function StringBuilder() {
const chars = [];
//appends a value to the string
function append(value)
|
// removes any instance of the value from within the string
function remove(value) {
assert.equal(is.not.object(value), true);
let j = 0;
let strs = [];
for (let i = 0; i < chars.length; i++) {
if (chars[i] == value[j]) {
let str = chars.slice(i, i + value.length);
let matches = true;
for (let k = 0; k < str.length; k++) {
if (str[k] != value[j]) {
matches = false;
break;
}
j++;
}
if (matches) {
chars.splice(i, value.length);
}
i = j;
j = 0;
}
}
}
// returns the string
let toString = function toString() {
return chars.reduce((x, char) => {
return x + char;
});
}
return {
append: append,
remove: remove,
toString: toString,
};
}
module.exports = StringBuilder;
|
{
assert.equal(is.not.object(value), true);
value + '';
for (let i = 0; i < value.length; i++) {
chars.push(value[i]);
}
}
|
identifier_body
|
columnWidget.js
|
import React from 'react';
import PropTypes from 'prop-types';
import ColumnChart from './columnChart';
import Tooltip from './../tooltip/tooltip';
import {dateFormats} from './../../utils/displayFormats';
import './columnWidget.css';
const ColumnWidget = ({
chartTitle,
chartDescription,
chartUpdatedDate,
|
xAxis,
yAxis,
viewport,
displayHighContrast,
}) => {
return (
<article role="article" className="D_widget">
<header>
{chartDescription && <div className="D_CW_infoContainer"><Tooltip text={chartDescription} viewport={viewport} /></div>}
<h1 className="highcharts-title">{chartTitle}</h1>
<span className="highcharts-subtitle">Last updated at <time dateTime={dateFormats.dateTime(chartUpdatedDate)}>{dateFormats.dayMonthYear(chartUpdatedDate)}</time></span>
</header>
<section>
<ColumnChart series={series}
xAxis={xAxis}
yAxis={yAxis}
chartDescription={chartDescription}
displayHighContrast={displayHighContrast} />
</section>
</article>
)
};
if (__DEV__) {
ColumnWidget.propTypes = {
chartTitle: PropTypes.string,
chartDescription: PropTypes.string,
chartUpdatedDate: PropTypes.string,
series: PropTypes.arrayOf(PropTypes.shape({
name: PropTypes.string.isRequired,
units: PropTypes.string,
color: PropTypes.string,
data: PropTypes.array.isRequired,
})).isRequired,
xAxis: PropTypes.arrayOf(PropTypes.shape({
categories: PropTypes.array,
})),
yAxis: PropTypes.arrayOf(PropTypes.shape({
title: PropTypes.object,
})),
viewport: PropTypes.oneOf(['sm', 'md', 'lg', 'xl']),
displayHighContrast: PropTypes.bool,
};
}
export default ColumnWidget;
|
series,
|
random_line_split
|
columnWidget.js
|
import React from 'react';
import PropTypes from 'prop-types';
import ColumnChart from './columnChart';
import Tooltip from './../tooltip/tooltip';
import {dateFormats} from './../../utils/displayFormats';
import './columnWidget.css';
const ColumnWidget = ({
chartTitle,
chartDescription,
chartUpdatedDate,
series,
xAxis,
yAxis,
viewport,
displayHighContrast,
}) => {
return (
<article role="article" className="D_widget">
<header>
{chartDescription && <div className="D_CW_infoContainer"><Tooltip text={chartDescription} viewport={viewport} /></div>}
<h1 className="highcharts-title">{chartTitle}</h1>
<span className="highcharts-subtitle">Last updated at <time dateTime={dateFormats.dateTime(chartUpdatedDate)}>{dateFormats.dayMonthYear(chartUpdatedDate)}</time></span>
</header>
<section>
<ColumnChart series={series}
xAxis={xAxis}
yAxis={yAxis}
chartDescription={chartDescription}
displayHighContrast={displayHighContrast} />
</section>
</article>
)
};
if (__DEV__)
|
export default ColumnWidget;
|
{
ColumnWidget.propTypes = {
chartTitle: PropTypes.string,
chartDescription: PropTypes.string,
chartUpdatedDate: PropTypes.string,
series: PropTypes.arrayOf(PropTypes.shape({
name: PropTypes.string.isRequired,
units: PropTypes.string,
color: PropTypes.string,
data: PropTypes.array.isRequired,
})).isRequired,
xAxis: PropTypes.arrayOf(PropTypes.shape({
categories: PropTypes.array,
})),
yAxis: PropTypes.arrayOf(PropTypes.shape({
title: PropTypes.object,
})),
viewport: PropTypes.oneOf(['sm', 'md', 'lg', 'xl']),
displayHighContrast: PropTypes.bool,
};
}
|
conditional_block
|
keyboard.rs
|
use libc::{c_int, c_char, uint8_t, uint16_t,
|
use keycode::{SDL_Keycode, SDL_Keymod};
pub type SDL_bool = c_int;
// SDL_keyboard.h
#[derive(Copy, Clone)]
pub struct SDL_Keysym {
pub scancode: SDL_Scancode,
pub sym: SDL_Keycode,
pub _mod: uint16_t,
pub unused: uint32_t,
}
extern "C" {
pub fn SDL_GetKeyboardFocus() -> *mut SDL_Window;
pub fn SDL_GetKeyboardState(numkeys: *mut c_int) -> *const uint8_t;
pub fn SDL_GetModState() -> SDL_Keymod;
pub fn SDL_SetModState(modstate: SDL_Keymod);
pub fn SDL_GetKeyFromScancode(scancode: SDL_Scancode) -> SDL_Keycode;
pub fn SDL_GetScancodeFromKey(key: SDL_Keycode) -> SDL_Scancode;
pub fn SDL_GetScancodeName(scancode: SDL_Scancode) -> *const c_char;
pub fn SDL_GetScancodeFromName(name: *const c_char) -> SDL_Scancode;
pub fn SDL_GetKeyName(key: SDL_Keycode) -> *const c_char;
pub fn SDL_GetKeyFromName(name: *const c_char) -> SDL_Keycode;
pub fn SDL_StartTextInput();
pub fn SDL_IsTextInputActive() -> SDL_bool;
pub fn SDL_StopTextInput();
pub fn SDL_SetTextInputRect(rect: *const SDL_Rect);
pub fn SDL_HasScreenKeyboardSupport() -> SDL_bool;
pub fn SDL_IsScreenKeyboardShown(window: *mut SDL_Window) -> SDL_bool;
}
|
uint32_t};
use rect::SDL_Rect;
use video::SDL_Window;
use scancode::SDL_Scancode;
|
random_line_split
|
keyboard.rs
|
use libc::{c_int, c_char, uint8_t, uint16_t,
uint32_t};
use rect::SDL_Rect;
use video::SDL_Window;
use scancode::SDL_Scancode;
use keycode::{SDL_Keycode, SDL_Keymod};
pub type SDL_bool = c_int;
// SDL_keyboard.h
#[derive(Copy, Clone)]
pub struct
|
{
pub scancode: SDL_Scancode,
pub sym: SDL_Keycode,
pub _mod: uint16_t,
pub unused: uint32_t,
}
extern "C" {
pub fn SDL_GetKeyboardFocus() -> *mut SDL_Window;
pub fn SDL_GetKeyboardState(numkeys: *mut c_int) -> *const uint8_t;
pub fn SDL_GetModState() -> SDL_Keymod;
pub fn SDL_SetModState(modstate: SDL_Keymod);
pub fn SDL_GetKeyFromScancode(scancode: SDL_Scancode) -> SDL_Keycode;
pub fn SDL_GetScancodeFromKey(key: SDL_Keycode) -> SDL_Scancode;
pub fn SDL_GetScancodeName(scancode: SDL_Scancode) -> *const c_char;
pub fn SDL_GetScancodeFromName(name: *const c_char) -> SDL_Scancode;
pub fn SDL_GetKeyName(key: SDL_Keycode) -> *const c_char;
pub fn SDL_GetKeyFromName(name: *const c_char) -> SDL_Keycode;
pub fn SDL_StartTextInput();
pub fn SDL_IsTextInputActive() -> SDL_bool;
pub fn SDL_StopTextInput();
pub fn SDL_SetTextInputRect(rect: *const SDL_Rect);
pub fn SDL_HasScreenKeyboardSupport() -> SDL_bool;
pub fn SDL_IsScreenKeyboardShown(window: *mut SDL_Window) -> SDL_bool;
}
|
SDL_Keysym
|
identifier_name
|
tag-align-shape.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
enum a_tag {
a_tag(u64)
}
struct t_rec {
c8: u8,
t: a_tag
}
pub fn main()
|
{
let x = t_rec {c8: 22u8, t: a_tag(44u64)};
let y = fmt!("%?", x);
debug!("y = %s", y);
assert_eq!(y, ~"{c8: 22, t: a_tag(44)}");
}
|
identifier_body
|
|
tag-align-shape.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
|
a_tag(u64)
}
struct t_rec {
c8: u8,
t: a_tag
}
pub fn main() {
let x = t_rec {c8: 22u8, t: a_tag(44u64)};
let y = fmt!("%?", x);
debug!("y = %s", y);
assert_eq!(y, ~"{c8: 22, t: a_tag(44)}");
}
|
enum a_tag {
|
random_line_split
|
tag-align-shape.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
enum
|
{
a_tag(u64)
}
struct t_rec {
c8: u8,
t: a_tag
}
pub fn main() {
let x = t_rec {c8: 22u8, t: a_tag(44u64)};
let y = fmt!("%?", x);
debug!("y = %s", y);
assert_eq!(y, ~"{c8: 22, t: a_tag(44)}");
}
|
a_tag
|
identifier_name
|
main.rs
|
#[derive(Debug)]
struct Rectangle {
width: u32,
height: u32,
}
// ANCHOR: here
impl Rectangle {
fn area(&self) -> u32 {
self.width * self.height
}
fn can_hold(&self, other: &Rectangle) -> bool
|
}
// ANCHOR_END: here
fn main() {
let rect1 = Rectangle {
width: 30,
height: 50,
};
let rect2 = Rectangle {
width: 10,
height: 40,
};
let rect3 = Rectangle {
width: 60,
height: 45,
};
println!("Can rect1 hold rect2? {}", rect1.can_hold(&rect2));
println!("Can rect1 hold rect3? {}", rect1.can_hold(&rect3));
}
|
{
self.width > other.width && self.height > other.height
}
|
identifier_body
|
main.rs
|
#[derive(Debug)]
struct Rectangle {
width: u32,
height: u32,
|
impl Rectangle {
fn area(&self) -> u32 {
self.width * self.height
}
fn can_hold(&self, other: &Rectangle) -> bool {
self.width > other.width && self.height > other.height
}
}
// ANCHOR_END: here
fn main() {
let rect1 = Rectangle {
width: 30,
height: 50,
};
let rect2 = Rectangle {
width: 10,
height: 40,
};
let rect3 = Rectangle {
width: 60,
height: 45,
};
println!("Can rect1 hold rect2? {}", rect1.can_hold(&rect2));
println!("Can rect1 hold rect3? {}", rect1.can_hold(&rect3));
}
|
}
// ANCHOR: here
|
random_line_split
|
main.rs
|
#[derive(Debug)]
struct
|
{
width: u32,
height: u32,
}
// ANCHOR: here
impl Rectangle {
fn area(&self) -> u32 {
self.width * self.height
}
fn can_hold(&self, other: &Rectangle) -> bool {
self.width > other.width && self.height > other.height
}
}
// ANCHOR_END: here
fn main() {
let rect1 = Rectangle {
width: 30,
height: 50,
};
let rect2 = Rectangle {
width: 10,
height: 40,
};
let rect3 = Rectangle {
width: 60,
height: 45,
};
println!("Can rect1 hold rect2? {}", rect1.can_hold(&rect2));
println!("Can rect1 hold rect3? {}", rect1.can_hold(&rect3));
}
|
Rectangle
|
identifier_name
|
identifier.component.ts
|
import {Component, OnInit,Input} from 'angular2/core';
import {Identifier} from 'gedcomx';
import {CollapsibleFieldsetComponent} from './collapsibleFieldset.component';
|
<button (click)="createIdentifier()" *ngIf="!data">Set Identifier</button>
<div *ngIf="data">
<label>
Value: <input [(ngModel)]="data.value" placeholder="Value"/>
</label>
<label *ngIf="data">Type:
<select [(ngModel)]="data.type">
<option *ngFor="#type of getAllIdentifierTypes()">{{type}}</option>
</select>
</label>
</div>
</collapsibleFieldset>
`,
inputs:['data'],
directives:[CollapsibleFieldsetComponent]
})
export class IdentifierComponent implements OnInit {
data: Identifier;
constructor() { }
ngOnInit() { }
getAllIdentifierTypes() {
return [];
}
}
|
@Component({
selector: 'identifier',
template: `
<collapsibleFieldset legendLabel="Identifier">
|
random_line_split
|
identifier.component.ts
|
import {Component, OnInit,Input} from 'angular2/core';
import {Identifier} from 'gedcomx';
import {CollapsibleFieldsetComponent} from './collapsibleFieldset.component';
@Component({
selector: 'identifier',
template: `
<collapsibleFieldset legendLabel="Identifier">
<button (click)="createIdentifier()" *ngIf="!data">Set Identifier</button>
<div *ngIf="data">
<label>
Value: <input [(ngModel)]="data.value" placeholder="Value"/>
</label>
<label *ngIf="data">Type:
<select [(ngModel)]="data.type">
<option *ngFor="#type of getAllIdentifierTypes()">{{type}}</option>
</select>
</label>
</div>
</collapsibleFieldset>
`,
inputs:['data'],
directives:[CollapsibleFieldsetComponent]
})
export class IdentifierComponent implements OnInit {
data: Identifier;
constructor() { }
ngOnInit()
|
getAllIdentifierTypes() {
return [];
}
}
|
{ }
|
identifier_body
|
identifier.component.ts
|
import {Component, OnInit,Input} from 'angular2/core';
import {Identifier} from 'gedcomx';
import {CollapsibleFieldsetComponent} from './collapsibleFieldset.component';
@Component({
selector: 'identifier',
template: `
<collapsibleFieldset legendLabel="Identifier">
<button (click)="createIdentifier()" *ngIf="!data">Set Identifier</button>
<div *ngIf="data">
<label>
Value: <input [(ngModel)]="data.value" placeholder="Value"/>
</label>
<label *ngIf="data">Type:
<select [(ngModel)]="data.type">
<option *ngFor="#type of getAllIdentifierTypes()">{{type}}</option>
</select>
</label>
</div>
</collapsibleFieldset>
`,
inputs:['data'],
directives:[CollapsibleFieldsetComponent]
})
export class
|
implements OnInit {
data: Identifier;
constructor() { }
ngOnInit() { }
getAllIdentifierTypes() {
return [];
}
}
|
IdentifierComponent
|
identifier_name
|
builtin-superkinds-capabilities.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
// except according to those terms.
// Tests "capabilities" granted by traits that inherit from super-
// builtin-kinds, e.g., if a trait requires Send to implement, then
// at usage site of that trait, we know we have the Send capability.
trait Foo : Send { }
impl <T: Send> Foo for T { }
fn foo<T: Foo>(val: T, chan: Sender<T>) {
chan.send(val);
}
pub fn main() {
let (tx, rx): (Sender<int>, Receiver<int>) = channel();
foo(31337i, tx);
assert!(rx.recv() == 31337i);
}
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
|
random_line_split
|
builtin-superkinds-capabilities.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tests "capabilities" granted by traits that inherit from super-
// builtin-kinds, e.g., if a trait requires Send to implement, then
// at usage site of that trait, we know we have the Send capability.
trait Foo : Send { }
impl <T: Send> Foo for T { }
fn foo<T: Foo>(val: T, chan: Sender<T>) {
chan.send(val);
}
pub fn
|
() {
let (tx, rx): (Sender<int>, Receiver<int>) = channel();
foo(31337i, tx);
assert!(rx.recv() == 31337i);
}
|
main
|
identifier_name
|
builtin-superkinds-capabilities.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tests "capabilities" granted by traits that inherit from super-
// builtin-kinds, e.g., if a trait requires Send to implement, then
// at usage site of that trait, we know we have the Send capability.
trait Foo : Send { }
impl <T: Send> Foo for T { }
fn foo<T: Foo>(val: T, chan: Sender<T>) {
chan.send(val);
}
pub fn main()
|
{
let (tx, rx): (Sender<int>, Receiver<int>) = channel();
foo(31337i, tx);
assert!(rx.recv() == 31337i);
}
|
identifier_body
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.