file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
main.rs | ::convert::TryInto;
use std::env;
use std::fs;
use std::fs::File;
use std::io::prelude::*;
use std::str;
use uuid::Uuid;
static mut USER_TOKEN: Vec<(String, String)> = Vec::new();
static mut USER_CHALLENGE: Vec<(String, u64)> = Vec::new();
#[derive(Debug)]
struct User {
username: String,
salt: Salt,
password_kdf: [u8; 32],
secret: String,
}
#[derive(Serialize, Deserialize, Debug)]
struct UserChallenge {
username: String,
challenge: u64,
salt: Salt,
}
#[derive(Serialize, Deserialize, Debug)]
struct Metadata {
file_name: String,
username: Vec<String>,
nonce: [u8; 12],
key: Vec<u8>,
}
#[derive(Deserialize, Debug)]
struct ComputedChallenge {
challenge: [u8; 32],
}
lazy_static! {
static ref USER_DB: HashMap<&'static str, User> = {
let mut map = HashMap::new();
// configuration google authenticator
let auth = GoogleAuthenticator::new();
// Cette partie se fait normalement sur le client mais elle est volontairement
// mise sur le serveur pour simplifié l'architecture
let salt = argon2id13::gen_salt();
let mut key = [0u8; 32];
argon2id13::derive_key(
&mut key,
"P@ssw0rd".as_bytes(),
&salt,
argon2id13::OPSLIMIT_SENSITIVE,
argon2id13::MEMLIMIT_SENSITIVE,
)
.unwrap();
map.insert(
"jerome",
User {
username: "jerome".to_string(),
salt: salt,
password_kdf: key,
secret: auth.create_secret(32),
},
);
map
};
}
#[get("/server/{user_id}")]
async fn username(web::Path(user_id): web::Path<String>) -> HttpResponse {
// regarde si l'utilisateur est dans la DB, si oui on lui envoie un challenge à résoudre
match USER_DB.get::<str>(&user_id.to_string()) {
Some(username) => {
let user_challenge = UserChallenge {
username: user_id.to_string(),
salt: username.salt,
challenge: OsRng.next_u64(),
};
unsafe {
USER_CHALLENGE.push((user_id, user_challenge.challenge));
}
HttpResponse::Ok().body(serde_json::to_string(&user_challenge).unwrap())
}
None => HttpResponse::NotFound().finish(),
}
}
#[post("/server/{user_id}")] // <- define path parameters
async fn username_post(
web::Path(user_id): web::Path<String>,
mut body: web::Payload,
) -> HttpResponse {
// check dans la DB si l'utilisateur est présent
let user = match USER_DB.get::<str>(&user_id.to_string()) {
Some(user) => user,
None => {
return HttpResponse::NotFound().finish();
}
};
// lecture du body pour avoir le challenge envoyé
let mut bytes = web::BytesMut::new();
while let Some(item) = body.next().await {
let item = item.unwrap();
bytes.extend_from_slice(&item);
}
// on désérialise le challenge envoyé
let computed_challenge: ComputedChallenge =
serde_json::from_str(str::from_utf8(&bytes).unwrap()).unwrap();
// récupération du challenge envoyé au client
let challenge_to_compute: u64;
unsafe {
let index = USER_CHALLENGE.iter().position(|x| x.0 == user_id).unwrap();
challenge_to_compute = USER_CHALLENGE.get(index).unwrap().1;
USER_CHALLENGE.remove(index);
}
// Fait le mac à partir de la kdf dans la DB
type HmacSha256 = Hmac<Sha256>;
let mut mac = HmacSha256::new_varkey(&user.password_kdf).expect("HMAC Error");
mac.update(&challenge_to_compute.to_be_bytes());
let challenge: [u8; 32] = mac
.finalize()
.into_bytes()
.as_slice()
.try_into()
.expect("Wrong length");
// on teste si les valeurs sont identiques
if challenge == computed_challenge.challenge {
return HttpResponse::Ok().finish();
}
HttpResponse::NonAuthoritativeInformation().finish()
}
#[get("/2fa/{user_id}")]
async fn get_code(web::Path(user_id): web::Path<String>) -> HttpResponse {
// configuration google authenticator
let auth = GoogleAuthenticator::new();
// check dans la DB si l'utilisateur est présent
let user = match USER_DB.get::<str>(&user_id.to_string()) {
Some(user) => user,
None => {
return HttpResponse::NotFound().finish();
}
};
// création du code QR
let url = auth.qr_code_url(
&user.secret,
"qr_code",
"name",
200,
200,
ErrorCorrectionLevel::High,
);
HttpResponse::Ok().body(url)
}
#[post("/2fa/{user_id}")]
async fn validate_code(web::Path(user_id): web::Path<String>, req: HttpRequest) -> HttpResponse {
// configuration google authenticator
let auth = GoogleAuthenticator::new();
// check dans la DB si l'utilisateur est présent
let user = match USER_DB.get::<str>(&user_id.to_string()) {
Some(user) => user,
None => {
return HttpResponse::NotFound().finish();
}
};
// récupère le code dans le header
let input_code: &str = req.headers().get("Code").unwrap().to_str().unwrap();
if !auth.verify_code(&user.secret, &input_code, 0, 0) {
println!("Mauvais code.");
return HttpResponse::Unauthorized().finish();
}
// si ok, un token est envoyé à l'utilisateur pour les prochains échanges
let user_token: String = Uuid::new_v4().hyphenated().to_string();
unsafe {
USER_TOKEN.push((user_id, user_token.clone()));
}
HttpResponse::Ok().header("Token", user_token).finish()
}
#[post("/upload")]
async fn upload(mut body: web | oad, req: HttpRequest) -> HttpResponse {
// lire et vérifier le Token
if !check_token(&req) {
return HttpResponse::NonAuthoritativeInformation().finish();
}
// lire le body
let mut bytes = web::BytesMut::new();
while let Some(item) = body.next().await {
let item = item.unwrap();
bytes.extend_from_slice(&item);
}
let res: Vec<u8> = bytes.to_vec();
// écriture des données dans un fichier
let mut file = File::create(req.headers().get("filename").unwrap().to_str().unwrap()).unwrap();
file.write_all(&res).unwrap();
HttpResponse::Ok().finish()
}
#[get("/download")]
async fn download(req: HttpRequest) -> HttpResponse {
// lire et vérifier le Token
let filename: &str = req.headers().get("FileName").unwrap().to_str().unwrap();
if !check_token(&req) {
return HttpResponse::NonAuthoritativeInformation().finish();
}
let work_file = env::current_dir().unwrap().join(&filename);
// ouvrir et lire le fichier
let mut file = match File::open(work_file) {
Ok(result) => result,
Err(_) => {
return HttpResponse::NoContent().finish();
}
};
let mut ciphertext: Vec<u8> = Vec::new();
file.read_to_end(&mut ciphertext).unwrap();
HttpResponse::Ok().body(ciphertext)
}
#[get("/list")]
async fn get_list(req: HttpRequest) -> HttpResponse {
// lire et vérifier le Token
if !check_token(&req) {
return HttpResponse::NonAuthoritativeInformation().finish();
}
let user_name: &str = req.headers().get("Username").unwrap().to_str().unwrap();
// préparation des clés pour AES-GCM et du nonce
let key_aes = Key::from_slice(b"an example very very secret key.");
let aead = Aes256Gcm::new(key_aes);
let nonce = Nonce::from_slice(b"unique nonce");
let mut file_list = String::new();
// on lit le contenu du répertoire
let paths = fs::read_dir("./").unwrap();
for path in paths {
let file = path.unwrap().path().into_os_string().into_string().unwrap();
// pour tous les fichiers est de type metadonnée
if file.contains(".metadata") {
let mut current_file = File::open(&file).expect("Unable to open the file");
let mut contents = String::new();
current_file
.read_to_string(&mut contents)
.expect("Unable to read the file");
let meta: Metadata = serde_json::from_str(&contents).unwrap();
if meta.username.contains(&user_name.to_string()) {
file_list.push_str(&file.split(".metadata").collect::<String>());
| ::Payl | identifier_name |
main.rs | OPSLIMIT_SENSITIVE,
argon2id13::MEMLIMIT_SENSITIVE,
)
.unwrap();
map.insert(
"jerome",
User {
username: "jerome".to_string(),
salt: salt,
password_kdf: key,
secret: auth.create_secret(32),
},
);
map
};
}
#[get("/server/{user_id}")]
async fn username(web::Path(user_id): web::Path<String>) -> HttpResponse {
// regarde si l'utilisateur est dans la DB, si oui on lui envoie un challenge à résoudre
match USER_DB.get::<str>(&user_id.to_string()) {
Some(username) => {
let user_challenge = UserChallenge {
username: user_id.to_string(),
salt: username.salt,
challenge: OsRng.next_u64(),
};
unsafe {
USER_CHALLENGE.push((user_id, user_challenge.challenge));
}
HttpResponse::Ok().body(serde_json::to_string(&user_challenge).unwrap())
}
None => HttpResponse::NotFound().finish(),
}
}
#[post("/server/{user_id}")] // <- define path parameters
async fn username_post(
web::Path(user_id): web::Path<String>,
mut body: web::Payload,
) -> HttpResponse {
// check dans la DB si l'utilisateur est présent
let user = match USER_DB.get::<str>(&user_id.to_string()) {
Some(user) => user,
None => {
return HttpResponse::NotFound().finish();
}
};
// lecture du body pour avoir le challenge envoyé
let mut bytes = web::BytesMut::new();
while let Some(item) = body.next().await {
let item = item.unwrap();
bytes.extend_from_slice(&item);
}
// on désérialise le challenge envoyé
let computed_challenge: ComputedChallenge =
serde_json::from_str(str::from_utf8(&bytes).unwrap()).unwrap();
// récupération du challenge envoyé au client
let challenge_to_compute: u64;
unsafe {
let index = USER_CHALLENGE.iter().position(|x| x.0 == user_id).unwrap();
challenge_to_compute = USER_CHALLENGE.get(index).unwrap().1;
USER_CHALLENGE.remove(index);
}
// Fait le mac à partir de la kdf dans la DB
type HmacSha256 = Hmac<Sha256>;
let mut mac = HmacSha256::new_varkey(&user.password_kdf).expect("HMAC Error");
mac.update(&challenge_to_compute.to_be_bytes());
let challenge: [u8; 32] = mac
.finalize()
.into_bytes()
.as_slice()
.try_into()
.expect("Wrong length");
// on teste si les valeurs sont identiques
if challenge == computed_challenge.challenge {
return HttpResponse::Ok().finish();
}
HttpResponse::NonAuthoritativeInformation().finish()
}
#[get("/2fa/{user_id}")]
async fn get_code(web::Path(user_id): web::Path<String>) -> HttpResponse {
// configuration google authenticator
let auth = GoogleAuthenticator::new();
// check dans la DB si l'utilisateur est présent
let user = match USER_DB.get::<str>(&user_id.to_string()) {
Some(user) => user,
None => {
return HttpResponse::NotFound().finish();
}
};
// création du code QR
let url = auth.qr_code_url(
&user.secret,
"qr_code",
"name",
200,
200,
ErrorCorrectionLevel::High,
);
HttpResponse::Ok().body(url)
}
#[post("/2fa/{user_id}")]
async fn validate_code(web::Path(user_id): web::Path<String>, req: HttpRequest) -> HttpResponse {
// configuration google authenticator
let auth = GoogleAuthenticator::new();
// check dans la DB si l'utilisateur est présent
let user = match USER_DB.get::<str>(&user_id.to_string()) {
Some(user) => user,
None => {
return HttpResponse::NotFound().finish();
}
};
// récupère le code dans le header
let input_code: &str = req.headers().get("Code").unwrap().to_str().unwrap();
if !auth.verify_code(&user.secret, &input_code, 0, 0) {
println!("Mauvais code.");
return HttpResponse::Unauthorized().finish();
}
// si ok, un token est envoyé à l'utilisateur pour les prochains échanges
let user_token: String = Uuid::new_v4().hyphenated().to_string();
unsafe {
USER_TOKEN.push((user_id, user_token.clone()));
}
HttpResponse::Ok().header("Token", user_token).finish()
}
#[post("/upload")]
async fn upload(mut body: web::Payload, req: HttpRequest) -> HttpResponse {
// lire et vérifier le Token
if !check_token(&req) {
return HttpResponse::NonAuthoritativeInformation().finish();
}
// lire le body
let mut bytes = web::BytesMut::new();
while let Some(item) = body.next().await {
let item = item.unwrap();
bytes.extend_from_slice(&item);
}
let res: Vec<u8> = bytes.to_vec();
// écriture des données dans un fichier
let mut file = File::create(req.headers().get("filename").unwrap().to_str().unwrap()).unwrap();
file.write_all(&res).unwrap();
HttpResponse::Ok().finish()
}
#[get("/download")]
async fn download(req: HttpRequest) -> HttpResponse {
// lire et vérifier le Token
let filename: &str = req.headers().get("FileName").unwrap().to_str().unwrap();
if !check_token(&req) {
return HttpResponse::NonAuthoritativeInformation().finish();
}
let work_file = env::current_dir().unwrap().join(&filename);
// ouvrir et lire le fichier
let mut file = match File::open(work_file) {
Ok(result) => result,
Err(_) => {
return HttpResponse::NoContent().finish();
}
};
let mut ciphertext: Vec<u8> = Vec::new();
file.read_to_end(&mut ciphertext).unwrap();
HttpResponse::Ok().body(ciphertext)
}
#[get("/list")]
async fn get_list(req: HttpRequest) -> HttpResponse {
// lire et vérifier le Token
if !check_token(&req) {
return HttpResponse::NonAuthoritativeInformation().finish();
}
let user_name: &str = req.headers().get("Username").unwrap().to_str().unwrap();
// préparation des clés pour AES-GCM et du nonce
let key_aes = Key::from_slice(b"an example very very secret key.");
let aead = Aes256Gcm::new(key_aes);
let nonce = Nonce::from_slice(b"unique nonce");
let mut file_list = String::new();
// on lit le contenu du répertoire
let paths = fs::read_dir("./").unwrap();
for path in paths {
let file = path.unwrap().path().into_os_string().into_string().unwrap();
// pour tous les fichiers est de type metadonnée
if file.contains(".metadata") {
let mut current_file = File::open(&file).expect("Unable to open the file");
let mut contents = String::new();
current_file
.read_to_string(&mut contents)
.expect("Unable to read the file");
let meta: Metadata = serde_json::from_str(&contents).unwrap();
if meta.username.contains(&user_name.to_string()) {
file_list.push_str(&file.split(".metadata").collect::<String>());
file_list.push('\n');
}
}
}
let ciphertext = aead
.encrypt(nonce, file_list.as_bytes())
.expect("encryption failure!");
HttpResponse::Ok().body(ciphertext)
}
#[actix_web::main]
async fn main() -> std::io::Result<()> {
println!("Le serveur est prêt à recevoir des requêtes");
use actix_web::{App, HttpServer};
HttpServer::new(|| {
App::new()
.service(username)
.service(username_post)
.service(get_code)
.service(validate_code)
.service(upload)
.service(download)
.service(get_list)
})
.bind("127.0.0.1:8080")?
.run()
.await
}
// vérification double facteur
pub fn verifiy_2fa(user_secret: &str, token: String) -> bool {
let auth = GoogleAuthenticator::new();
if !auth.verify_code(user_secret, &token, 0, 0) {
println!("Mauvais code.");
return false;
}
true
}
// vérifie si le token existe et appartient au bon utilisateur
fn check_token(req: &HttpRequest) -> bool {
let token: &str = req.headers().get("Token").unwrap().to_str().unwrap();
let user: &str = req.headers().get("Username").unwrap().to_str().unwrap();
unsafe {
for pair in USER_TOKEN.iter() {
if pair.0 == user && pair.1 == token {
return true;
| }
}
}
return false; | conditional_block |
|
main.rs | ::convert::TryInto;
use std::env;
use std::fs;
use std::fs::File;
use std::io::prelude::*;
use std::str;
use uuid::Uuid;
static mut USER_TOKEN: Vec<(String, String)> = Vec::new();
static mut USER_CHALLENGE: Vec<(String, u64)> = Vec::new();
#[derive(Debug)]
struct User {
username: String,
salt: Salt,
password_kdf: [u8; 32],
secret: String,
}
#[derive(Serialize, Deserialize, Debug)]
struct UserChallenge {
username: String,
challenge: u64,
salt: Salt,
}
#[derive(Serialize, Deserialize, Debug)]
struct Metadata {
file_name: String,
username: Vec<String>,
nonce: [u8; 12],
key: Vec<u8>,
}
#[derive(Deserialize, Debug)]
struct ComputedChallenge {
challenge: [u8; 32],
}
lazy_static! {
static ref USER_DB: HashMap<&'static str, User> = {
let mut map = HashMap::new();
// configuration google authenticator
let auth = GoogleAuthenticator::new();
// Cette partie se fait normalement sur le client mais elle est volontairement
// mise sur le serveur pour simplifié l'architecture
let salt = argon2id13::gen_salt();
let mut key = [0u8; 32];
argon2id13::derive_key(
&mut key,
"P@ssw0rd".as_bytes(),
&salt,
argon2id13::OPSLIMIT_SENSITIVE,
argon2id13::MEMLIMIT_SENSITIVE,
)
.unwrap();
map.insert(
"jerome",
User {
username: "jerome".to_string(),
salt: salt,
password_kdf: key,
secret: auth.create_secret(32),
},
);
map
};
}
#[get("/server/{user_id}")]
async fn username(web::Path(user_id): web::Path<String>) -> HttpResponse {
// regarde si l'utilisateur est dans la DB, si oui on lui envoie un challenge à résoudre
match USER_DB.get::<str>(&user_id.to_string()) {
Some(username) => {
let user_challenge = UserChallenge {
username: user_id.to_string(),
salt: username.salt,
challenge: OsRng.next_u64(),
};
unsafe {
USER_CHALLENGE.push((user_id, user_challenge.challenge));
}
HttpResponse::Ok().body(serde_json::to_string(&user_challenge).unwrap())
}
None => HttpResponse::NotFound().finish(),
}
}
#[post("/server/{user_id}")] // <- define path parameters
async fn username_post(
web::Path(user_id): web::Path<String>,
mut body: web::Payload,
) -> HttpResponse {
// check dans la DB si l'utilisateur est présent
let user = match USER_DB.get::<str>(&user_id.to_string()) {
Some(user) => user,
None => {
return HttpResponse::NotFound().finish();
}
};
// lecture du body pour avoir le challenge envoyé
let mut bytes = web::BytesMut::new();
while let Some(item) = body.next().await {
let item = item.unwrap();
bytes.extend_from_slice(&item);
}
// on désérialise le challenge envoyé
let computed_challenge: ComputedChallenge =
serde_json::from_str(str::from_utf8(&bytes).unwrap()).unwrap();
// récupération du challenge envoyé au client
let challenge_to_compute: u64;
unsafe {
let index = USER_CHALLENGE.iter().position(|x| x.0 == user_id).unwrap();
challenge_to_compute = USER_CHALLENGE.get(index).unwrap().1;
USER_CHALLENGE.remove(index);
}
// Fait le mac à partir de la kdf dans la DB
type HmacSha256 = Hmac<Sha256>;
let mut mac = HmacSha256::new_varkey(&user.password_kdf).expect("HMAC Error");
mac.update(&challenge_to_compute.to_be_bytes());
let challenge: [u8; 32] = mac
.finalize()
.into_bytes()
.as_slice()
.try_into()
.expect("Wrong length");
// on teste si les valeurs sont identiques
if challenge == computed_challenge.challenge {
return HttpResponse::Ok().finish();
}
HttpResponse::NonAuthoritativeInformation().finish()
}
#[get("/2fa/{user_id}")]
async fn get_code(web::Path(user_id): web::Path<String>) -> HttpResponse {
// configuration google authenticator
let auth = GoogleAuthenticator::new();
// check dans la DB si l'utilisateur est présent
let user = match USER_DB.get::<str>(&user_id.to_string()) {
Some(user) => user,
None => {
return HttpResponse::NotFound().finish();
}
};
// création du code QR
let url = auth.qr_code_url(
&user.secret,
"qr_code",
"name",
200,
200,
ErrorCorrectionLevel::High,
);
HttpResponse::Ok().body(url)
}
#[post("/2fa/{user_id}")]
async fn validate_code(web::Path(user_id): web::Path<String>, req: HttpRequest) -> HttpResponse {
// configuration google authenticator
let auth = GoogleAuthenticator::new();
// check dans la DB si l'utilisateur est présent
let user = match USER_DB.get::<str>(&user_id.to_string()) {
Some(user) => user,
None => {
return HttpResponse::NotFound().finish();
}
};
// récupère le code dans le header
let input_code: &str = req.headers().get("Code").unwrap().to_str().unwrap();
if !auth.verify_code(&user.secret, &input_code, 0, 0) {
println!("Mauvais code.");
return HttpResponse::Unauthorized().finish();
}
// si ok, un token est envoyé à l'utilisateur pour les prochains échanges
let user_token: String = Uuid::new_v4().hyphenated().to_string();
unsafe {
USER_TOKEN.push((user_id, user_token.clone()));
}
HttpResponse::Ok().header("Token", user_token).finish()
}
#[post("/upload")]
async fn upload(mut body: web::Payload, req: HttpRequest) -> HttpResponse {
// lire et vérifier le Token
if !check_token(&req) {
return HttpResponse::NonAuthoritativeInformation().finish();
}
// lire le body
let mut bytes = web::BytesMut::new();
while let Some(item) = body.next().await {
let item = item.unwrap();
bytes.extend_from_slice(&item);
}
let res: Vec<u8> = bytes.to_vec();
// écriture des données dans un fichier
let mut file = File::create(req.headers().get("filename").unwrap().to_str().unwrap()).unwrap();
file.write_all(&res).unwrap();
HttpResponse::Ok().finish()
}
#[get("/download")]
async fn download(req: HttpRequest) -> HttpResponse {
// lire et vérifier le Token
let filename: &str = req.headers().get("FileName").unwrap().to_str().unwrap();
if !check_token(&req) {
return HttpResponse::NonAuthoritativeInformation().finish();
}
let work_file = env::current_dir().unwrap().join(&filename);
// ouvrir et lire le fichier
let mut file = match File::open(work_file) {
Ok(result) => result,
Err(_) => {
return HttpResponse::NoContent().finish();
}
};
let mut ciphertext: Vec<u8> = Vec::new();
file.read_to_end(&mut ciphertext).unwrap();
HttpResponse::Ok().body(ciphertext)
}
#[get("/list")]
async fn get_list(req: HttpRequest) -> HttpResponse {
// lire et vérifie | let mut contents = String::new();
current_file
.read_to_string(&mut contents)
.expect("Unable to read the file");
let meta: Metadata = serde_json::from_str(&contents).unwrap();
if meta.username.contains(&user_name.to_string()) {
file_list.push_str(&file.split(".metadata").collect::<String>());
| r le Token
if !check_token(&req) {
return HttpResponse::NonAuthoritativeInformation().finish();
}
let user_name: &str = req.headers().get("Username").unwrap().to_str().unwrap();
// préparation des clés pour AES-GCM et du nonce
let key_aes = Key::from_slice(b"an example very very secret key.");
let aead = Aes256Gcm::new(key_aes);
let nonce = Nonce::from_slice(b"unique nonce");
let mut file_list = String::new();
// on lit le contenu du répertoire
let paths = fs::read_dir("./").unwrap();
for path in paths {
let file = path.unwrap().path().into_os_string().into_string().unwrap();
// pour tous les fichiers est de type metadonnée
if file.contains(".metadata") {
let mut current_file = File::open(&file).expect("Unable to open the file"); | identifier_body |
4_orient_grasping.py | print "============ Generating plan 1"
pose_target = Pose()
pose_target.position.x = x
pose_target.position.y = y
pose_target.position.z = z
pose_target.orientation.x = Ox
pose_target.orientation.y = Oy
pose_target.orientation.z = Oz
pose_target.orientation.w = Ow
move_group.set_pose_target(pose_target)
move_group.go(True)
print "============ plan 1 complete!"
trans_1,rot_1 = get_TF('odom','/ee_link')
print "============ ee pose : "
print move_group.get_current_pose()
print move_group.get_planning_frame()
print 'odom_TF',trans_1,rot_1
print "============"
def move_base(a,b):
sub = rospy.Subscriber("/odometry/filtered", Odometry, newOdom)
pub = rospy.Publisher("/cmd_vel", Twist, queue_size = 1)
speed = Twist()
r = rospy.Rate(4)
goal = Point()
goal.x = a
goal.y = b
arrival_radius = 0.1
while (goal.x-x)**2 + (goal.y-y)**2 >= arrival_radius**2 :
#while abs(goal.x-x) >0.1 or abs(goal.y-y) >0.1 or abs(angle_to_goal-theta) >0.1 : #가까의 범위가 0.3이내로 들어오면 break.
inc_x = goal.x -x
inc_y = goal.y -y
angle_to_goal = atan2(inc_y,inc_x)
| def cartesian_path_planner(a,b,c):
waypoints = []
wpose = move_group.get_current_pose().pose
wpose.position.z += a # First move up (z)
waypoints.append(copy.deepcopy
(wpose))
wpose = move_group.get_current_pose().pose
wpose.position.x += b # First move up (z)
waypoints.append(copy.deepcopy(wpose))
wpose = move_group.get_current_pose().pose
wpose.position.y += c # First move up (z)
waypoints.append(copy.deepcopy(wpose))
# We want the Cartesian path to be interpolated at a resolution of 1 cm
# which is why we will specify 0.01 as the eef_step in Cartesian
# translation. We will disable the jump threshold by setting it to 0.0 disabling:
(plan, fraction) = move_group.compute_cartesian_path(
waypoints, # waypoints to follow
0.1, # eef_step
0.0) # jump_threshold
def x_path_planner(a):
pose_target = move_group.get_current_pose().pose
pose_target.position.x += a # First move up (z)
move_group.set_pose_target(pose_target)
move_group.go(True)
def y_path_planner(c):
pose_target = move_group.get_current_pose().pose
pose_target.position.y += c # First move up (z)
move_group.set_pose_target(pose_target)
move_group.go(True)
def z_path_planner(b):
pose_target = move_group.get_current_pose().pose
pose_target.position.z += b # First move up (z)
move_group.set_pose_target(pose_target)
move_group.go(True)
def down_demo():
move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #down pose
print "Down demo is ready to start!, press enter..!"
raw_input()
print "go up..!"
z_path_planner(0.1)
print "go down..!"
z_path_planner(-0.1)
rospy.sleep(2)
print "Down demo complete!, Go to home pose..!"
move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #home pose
def up_demo():
move_Joint(1.57,-2.27,1.93,-1.19,1.57,0) #up pose
print "Up demo is ready to start!, press enter..!"
raw_input()
print "go up..!"
rospy.sleep(1)
z_path_planner(-0.05)
print "go down..!"
z_path_planner(0.1)
rospy.sleep(1)
print "up demo complete!, Go to home pose..!"
move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #home pose
def left_demo():
#move_Joint(1.57,-2.27,1.93,-1.19,1.57,0) #up pose
move_Joint(1.57,-2.27,1.93,-1.19,3.14,0) #left pose
print "Left demo is ready to start!, press enter..!"
raw_input()
print "go left..!"
#y_path_planner(0.1)
print "go more left..!"
y_path_planner(-0.2)
rospy.sleep(2)
print "left demo complete!, Go to home pose..!"
move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #home pose
def right_demo():
move_Joint(1.57,-2.27,1.93,-1.19,0,0) #left pose
print "right demo is ready to start!, press enter..!"
raw_input()
print "go right..!"
y_path_planner(-0.1)
print "go more right..!"
y_path_planner(0.2)
rospy.sleep(2)
print "right demo complete!, Go to home pose..!"
move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #home pose
if __name__=='__main__':
down_demo()
up_demo()
left_demo()
right_demo()
#move_Joint(1.57079632679490 ,-1.57079632679490, 0, 0, 1.57079632679490, 0)
#move_Joint(1.38187901932325 ,0.594965748224829 ,-1.84587120888068 ,-0.259201159280024 ,1.87922844334536 ,-2.94403460825812)
#move_Joint(1.49234992746732 ,0.505575183819339 ,-1.77749928330972 ,-0.242572378864612 ,2.19692733555951 ,-3.04571339173395)
#move_Joint(1.57882340366397 ,0.392747674943758 ,-1.68144316751832 ,-0.294244456380595 ,2.51322054731526 ,3.12658213006687)
| if abs(angle_to_goal - theta) > 2*pi/180:
speed.linear.x = 0.0
speed.angular.z = 0.3
if abs(angle_to_goal - theta) < 5*pi/180: # 0.5이내로 들어오면 속도를 매우 줄여서 목표점을 지나쳐버리는 일이 없도록함.
speed.angular.z = 0.03
speed.linear.x = 0.0
else:
speed.linear.x = 0.2
speed.angular.z = 0.0
if abs(goal.x-x) <0.3 and abs(goal.y-y)<0.3: #x,y val이 0.3이내로 들어오면 속도 매우 줄임.
speed.angular.x = 0.05
speed.angular.z = 0.0
print goal.x-x, goal.y-y, angle_to_goal-theta
pub.publish(speed)
r.sleep()
| conditional_block |
4_orient_grasping.py | _goal = atan2(inc_y,inc_x)
if abs(angle_to_goal - theta) > 2*pi/180:
speed.linear.x = 0.0
speed.angular.z = 0.3
if abs(angle_to_goal - theta) < 5*pi/180: # 0.5이내로 들어오면 속도를 매우 줄여서 목표점을 지나쳐버리는 일이 없도록함.
speed.angular.z = 0.03
speed.linear.x = 0.0
else:
speed.linear.x = 0.2
speed.angular.z = 0.0
if abs(goal.x-x) <0.3 and abs(goal.y-y)<0.3: #x,y val이 0.3이내로 들어오면 속도 매우 줄임.
speed.angular.x = 0.05
speed.angular.z = 0.0
print goal.x-x, goal.y-y, angle_to_goal-theta
pub.publish(speed)
r.sleep()
def cartesian_path_planner(a,b,c):
waypoints = []
wpose = move_group.get_current_pose().pose
wpose.position.z += a # First move up (z)
waypoints.append(copy.deepcopy(wpose))
wpose = move_group.get_current_pose().pose
wpose.position.x += b # First move up (z)
waypoints.append(copy.deepcopy(wpose))
wpose = move_group.get_current_pose().pose
wpose.position.y += c # First move up (z)
waypoints.append(copy.deepcopy(wpose))
# We want the Cartesian path to be interpolated at a resolution of 1 cm
# which is why we will specify 0.01 as the eef_step in Cartesian
# translation. We will disable the jump threshold by setting it to 0.0 disabling:
(plan, fraction) = move_group.compute_cartesian_path(
waypoints, # waypoints to follow
0.1, # eef_step
0.0) # jump_threshold
def x_path_planner(a):
pose_target = move_group.get_current_pose().pose
pose_target.position.x += a # First move up (z)
move_group.set_pose_target(pose_target)
move_group.go(True)
def y_path_planner(c):
pose_target = move_group.get_current_pose().pose
pose_target.position.y += c # First move up (z)
move_group.set_pose_target(pose_target)
move_group.go(True)
def z_path_planner(b):
pose_target = move_group.get_current_pose().pose
pose_target.position.z += b # First move up (z)
move_group.set_pose_target(pose_target)
move_group.go(True)
def down_demo():
move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #down pose
print "Down demo is ready to start!, press enter..!"
raw_input()
print "go up..!"
z_path_planner(0.1)
print "go down..!"
z_path_planner(-0.1)
rospy.sleep(2)
print "Down demo complete!, Go to home pose..!"
move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #home pose
def up_demo():
move_Joint(1.57,-2.27,1.93,-1.19,1.57,0) #up pose
print "Up demo is ready to start!, press enter..!"
raw_input()
print "go up..!"
rospy.sleep(1)
z_path_planner(-0.05)
print "go down..!"
z_path_planner(0.1)
rospy.sleep(1)
print "up demo complete!, Go to home pose..!"
move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #home pose
def left_demo():
#move_Joint(1.57,-2.27,1.93,-1.19,1.57,0) #up pose
move_Joint(1.57,-2.27,1.93,-1.19,3.14,0) #left pose
print "Left demo is ready to start!, press enter..!"
raw_input()
print "go left..!"
#y_path_planner(0.1)
print "go more left..!"
y_path_planner(-0.2)
rospy.sleep(2)
print "left demo complete!, Go to home pose..!"
move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #home pose
def right_demo():
move_Joint(1.57,-2.27,1.93,-1.19,0,0) #left pose
print "right demo is ready to start!, press enter..!"
raw_input()
print "go right..!"
y_path_planner(-0.1)
print "go more right..!"
y_path_planner(0.2)
rospy.sleep(2)
print "right demo complete!, Go to home pose..!"
move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #home pose
if __name__=='__main__':
down_demo()
up_demo()
left_demo()
right_demo()
#move_Joint(1.57079632679490 ,-1.57079632679490, 0, 0, 1.57079632679490, 0)
#move_Joint(1.38187901932325 ,0.594965748224829 ,-1.84587120888068 ,-0.259201159280024 ,1.87922844334536 ,-2.94403460825812)
#move_Joint(1.49234992746732 ,0.505575183819339 ,-1.77749928330972 ,-0.242572378864612 ,2.19692733555951 ,-3.04571339173395)
#move_Joint(1.57882340366397 ,0.392747674943758 ,-1.68144316751832 ,-0.294244456380595 ,2.51322054731526 ,3.12658213006687)
#move_Joint(1.63317876240784 ,0.285981577941942 ,-1.57592439233013 ,-0.472776683731581 ,2.82134996965689 ,2.93965970526083) | #move_Joint(1.65163641729639 ,0.202937041530315 ,-1.05397766677144 ,-2.29055198297394 ,3.05995622779418 ,1.57079637373908)
#move_Joint(1.63317874347654 ,-0.210429202660752 ,-1.10151162936461 ,-0.0669323613463442 ,-2.82134998229054 ,-2.93965974902066) | random_line_split |
|
4_orient_grasping.py | print "============ Generating plan 1"
pose_target = Pose()
pose_target.position.x = x
pose_target.position.y = y
pose_target.position.z = z
pose_target.orientation.x = Ox
pose_target.orientation.y = Oy
pose_target.orientation.z = Oz
pose_target.orientation.w = Ow
move_group.set_pose_target(pose_target)
move_group.go(True)
print "============ plan 1 complete!"
trans_1,rot_1 = get_TF('odom','/ee_link')
print "============ ee pose : "
print move_group.get_current_pose()
print move_group.get_planning_frame()
print 'odom_TF',trans_1,rot_1
print "============"
def move_base(a,b):
sub = rospy.Subscriber("/odometry/filtered", Odometry, newOdom)
pub = rospy.Publisher("/cmd_vel", Twist, queue_size = 1)
speed = Twist()
r = rospy.Rate(4)
goal = Point()
goal.x = a
goal.y = b
arrival_radius = 0.1
while (goal.x-x)**2 + (goal.y-y)**2 >= arrival_radius**2 :
#while abs(goal.x-x) >0.1 or abs(goal.y-y) >0.1 or abs(angle_to_goal-theta) >0.1 : #가까의 범위가 0.3이내로 들어오면 break.
inc_x = goal.x -x
inc_y = goal.y -y
angle_to_goal = atan2(inc_y,inc_x)
if abs(angle_to_goal - theta) > 2*pi/180:
speed.linear.x = 0.0
speed.angular.z = 0.3
if abs(angle_to_goal - theta) < 5*pi/180: # 0.5이내로 들어오면 속도를 매우 줄여서 목표점을 지나쳐버리는 일이 없도록함.
speed.angular.z = 0.03
speed.linear.x = 0.0
else:
speed.linear.x = 0.2
speed.angular.z = 0.0
if abs(goal.x-x) <0.3 and abs(goal.y-y)<0.3: #x,y val이 0.3이내로 들어오면 속도 매우 줄임.
speed.angular.x = 0.05
speed.angular.z = 0.0
print goal.x-x, goal.y-y, angle_to_goal-theta
pub.publish(speed)
r.sleep()
def cartesian_path_planner(a,b,c):
waypoints = []
wpose = move_group.get_current_pose().pose
wpose.position.z += a # First move up (z)
waypoints.append(copy.deepcopy(wpose))
wpose = move_group.get_current_pose().pose
wpose.position.x += b # First move up (z)
waypoints.append(copy.deepcopy(wpose))
wpose = move_group.get_current_pose().pose
wpose.position.y += c # First move up (z)
waypoints.append(copy.deepcopy(wpose))
# We want the Cartesian path to be interpolated at a resolution of 1 cm
# which is why we will specify 0.01 as the eef_step in Cartesian
# translation. We will disable the jump threshold by setting it to 0.0 disabling:
(plan, fraction) = move_group.compute_cartesian_path(
waypoints, # waypoints to follow
0.1, # eef_step
0.0) # jump_threshold
def x_path_planner(a):
pose_target = move_group.get_current_pose().pose
pose_target.position.x += a # First move up (z)
move_group.set_pose_target(pose_target)
move_group.go(True)
def y_path_planner(c):
pose_target = move_group.get_current_pose().pose
pose_target.position.y += c # First move up (z)
move_group.set_pose_target(pose_target)
move_group.go(True)
def z_path_planner(b):
pose_target = move_group.get_current_pose().pose
pose_target.position.z += b # First move up (z)
move_group.set_pose_target(pose_target)
move_group.go(True)
def down_demo():
move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #down pose
print "Down demo is ready to start!, press enter..!"
raw_input()
print "go up..!"
z_path_planner(0.1)
print "go down..!"
z_path_planner(-0.1)
rospy.sleep(2)
print "Down demo complete!, Go to home pose..!"
move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #home pose
def up_demo():
move_Joint(1.57,-2.27,1.93,-1.19,1.57,0) #up pose
print "Up demo is ready to start!, press enter..!"
raw_input()
print "go up..!"
rospy.sleep(1)
z_path_planner(-0.05)
print "go down..!"
z_path_planner(0.1)
rospy.sleep(1)
print "up demo complete!, Go to home pose..!"
move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #home pose
def left_demo():
#move_Joint(1.57,-2.27,1.93,-1.19,1.57,0) #up pose
move_Joint(1.57,-2.27,1.93,-1.19,3.14,0) #left pose
print "Left demo is ready to start!, press enter..!"
raw_input()
print "go left..!"
#y_path_planner(0.1)
print "go more left..!"
y_path_planner(-0.2)
rospy.sleep(2)
print "left demo complete!, Go to home pose..!"
move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #home pose
def right_demo():
move_Joint(1.57,-2.27,1.93,-1.19,0,0) #left pose
print "right demo is ready to start!, press enter..!"
raw_input()
print "go right..!"
y_path_planner(-0.1)
print |
#move_Joint(1.38187901932325 ,0.594965748224829 ,-1.84587120888068 ,-0.259201159280024 ,1.87922844334536 ,-2.94403460825812)
#move_Joint(1.49234992746732 ,0.505575183819339 ,-1.77749928330972 ,-0.242572378864612 ,2.19692733555951 ,-3.04571339173395)
#move_Joint(1.57882340366397 ,0.392747674943758 ,-1.68144316751832 ,-0.294244456380595 ,2.51322054731526 ,3.12658213006687)
| "go more right..!"
y_path_planner(0.2)
rospy.sleep(2)
print "right demo complete!, Go to home pose..!"
move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #home pose
if __name__=='__main__':
down_demo()
up_demo()
left_demo()
right_demo()
#move_Joint(1.57079632679490 ,-1.57079632679490, 0, 0, 1.57079632679490, 0) | identifier_body |
4_orient_grasping.py | print "============ Generating plan 1"
pose_target = Pose()
pose_target.position.x = x
pose_target.position.y = y
pose_target.position.z = z
pose_target.orientation.x = Ox
pose_target.orientation.y = Oy
pose_target.orientation.z = Oz
pose_target.orientation.w = Ow
move_group.set_pose_target(pose_target)
move_group.go(True)
print "============ plan 1 complete!"
trans_1,rot_1 = get_TF('odom','/ee_link')
print "============ ee pose : "
print move_group.get_current_pose()
print move_group.get_planning_frame()
print 'odom_TF',trans_1,rot_1
print "============"
def move_base(a,b):
sub = rospy.Subscriber("/odometry/filtered", Odometry, newOdom)
pub = rospy.Publisher("/cmd_vel", Twist, queue_size = 1)
speed = Twist()
r = rospy.Rate(4)
goal = Point()
goal.x = a
goal.y = b
arrival_radius = 0.1
while (goal.x-x)**2 + (goal.y-y)**2 >= arrival_radius**2 :
#while abs(goal.x-x) >0.1 or abs(goal.y-y) >0.1 or abs(angle_to_goal-theta) >0.1 : #가까의 범위가 0.3이내로 들어오면 break.
inc_x = goal.x -x
inc_y = goal.y -y
angle_to_goal = atan2(inc_y,inc_x)
if abs(angle_to_goal - theta) > 2*pi/180:
speed.linear.x = 0.0
speed.angular.z = 0.3
if abs(angle_to_goal - theta) < 5*pi/180: # 0.5이내로 들어오면 속도를 매우 줄여서 목표점을 지나쳐버리는 일이 없도록함.
speed.angular.z = 0.03
speed.linear.x = 0.0
else:
speed.linear.x = 0.2
speed.angular.z = 0.0
if abs(goal.x-x) <0.3 and abs(goal.y-y)<0.3: #x,y val이 0.3이내로 들어오면 속도 매우 줄임.
speed.angular.x = 0.05
speed.angular.z = 0.0
print goal.x-x, goal.y-y, angle_to_goal-theta
pub.publish(speed)
r.sleep()
def cartesian_path_planner(a,b,c):
waypoints = []
wpose = move_group.get_current_pose().pose
wpose.position.z += a # First move up (z)
waypoints.append(copy.deepcopy(wpose))
wpose = move_group.get_current_pose().pose
wpose.position.x += b # First move up (z)
waypoints.append(copy.deepcopy(wpose))
wpose = move_group.get_current_pose().pose
wpose.position.y += c # First move up (z)
waypoints.append(copy.deepcopy(wpose))
# We want the Cartesian path to be interpolated at a resolution of 1 cm
# which is why we will specify 0.01 as the eef_step in Cartesian
# translation. We will disable the jump threshold by setting it to 0.0 disabling:
(plan, fraction) = move_group.compute_cartesian_path(
waypoints, # waypoints to follow
0.1, # eef_step
0.0) # jump_threshold
def x_path_planner(a):
pose_target = move_group.get_current_pose().pose
pose_target.position.x += a # First move up (z)
move_group.set_pose_target(pose_target)
move_group.go(True)
def y_path_planner(c):
pose_target = move_group.get_current_pose().pose
pose_target.position.y += c # First move up (z)
move_group.set_pose_target(pose_target)
move_group.go(True)
def z_path_planner(b):
pose_target = move_group.get_current_pose().pose
pose_target.position.z += b # First move up (z)
move_group.set_pose_target(pose_target)
move_group.go(True)
def down_demo():
move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #down pose
print "Down demo is ready to start!, press enter..!"
raw_input()
print "go up..!"
z_path_planner(0.1)
print "go down..!"
z_path_planner(-0.1)
rospy.sleep(2)
print "Down demo complete!, Go to home pose..!"
move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #home pose
def up_demo():
move_Joint(1.57,-2.27,1.93,-1.19,1.57,0) #up pose
print "Up demo is ready to start!, press enter..!"
raw_input()
print "go up..!"
rospy.sleep(1)
z_ | anner(-0.05)
print "go down..!"
z_path_planner(0.1)
rospy.sleep(1)
print "up demo complete!, Go to home pose..!"
move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #home pose
def left_demo():
#move_Joint(1.57,-2.27,1.93,-1.19,1.57,0) #up pose
move_Joint(1.57,-2.27,1.93,-1.19,3.14,0) #left pose
print "Left demo is ready to start!, press enter..!"
raw_input()
print "go left..!"
#y_path_planner(0.1)
print "go more left..!"
y_path_planner(-0.2)
rospy.sleep(2)
print "left demo complete!, Go to home pose..!"
move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #home pose
def right_demo():
move_Joint(1.57,-2.27,1.93,-1.19,0,0) #left pose
print "right demo is ready to start!, press enter..!"
raw_input()
print "go right..!"
y_path_planner(-0.1)
print "go more right..!"
y_path_planner(0.2)
rospy.sleep(2)
print "right demo complete!, Go to home pose..!"
move_Joint(1.57,-2.27,1.93,-1.19,-1.57,0) #home pose
if __name__=='__main__':
down_demo()
up_demo()
left_demo()
right_demo()
#move_Joint(1.57079632679490 ,-1.57079632679490, 0, 0, 1.57079632679490, 0)
#move_Joint(1.38187901932325 ,0.594965748224829 ,-1.84587120888068 ,-0.259201159280024 ,1.87922844334536 ,-2.94403460825812)
#move_Joint(1.49234992746732 ,0.505575183819339 ,-1.77749928330972 ,-0.242572378864612 ,2.19692733555951 ,-3.04571339173395)
#move_Joint(1.57882340366397 ,0.392747674943758 ,-1.68144316751832 ,-0.294244456380595 ,2.51322054731526 ,3.12658213006687)
| path_pl | identifier_name |
lib.register_lints.rs | comparison_chain::COMPARISON_CHAIN,
copies::BRANCHES_SHARING_CODE,
copies::IFS_SAME_COND,
copies::IF_SAME_THEN_ELSE,
copies::SAME_FUNCTIONS_IN_IF_CONDITION,
copy_iterator::COPY_ITERATOR,
create_dir::CREATE_DIR,
dbg_macro::DBG_MACRO,
default::DEFAULT_TRAIT_ACCESS,
default::FIELD_REASSIGN_WITH_DEFAULT,
default_numeric_fallback::DEFAULT_NUMERIC_FALLBACK,
default_union_representation::DEFAULT_UNION_REPRESENTATION,
dereference::EXPLICIT_DEREF_METHODS,
dereference::NEEDLESS_BORROW,
dereference::REF_BINDING_TO_REFERENCE,
derivable_impls::DERIVABLE_IMPLS,
derive::DERIVE_HASH_XOR_EQ,
derive::DERIVE_ORD_XOR_PARTIAL_ORD,
derive::EXPL_IMPL_CLONE_ON_COPY,
derive::UNSAFE_DERIVE_DESERIALIZE,
disallowed_methods::DISALLOWED_METHODS,
disallowed_script_idents::DISALLOWED_SCRIPT_IDENTS,
disallowed_types::DISALLOWED_TYPES,
doc::DOC_MARKDOWN,
doc::MISSING_ERRORS_DOC,
doc::MISSING_PANICS_DOC,
doc::MISSING_SAFETY_DOC,
doc::NEEDLESS_DOCTEST_MAIN,
double_comparison::DOUBLE_COMPARISONS,
double_parens::DOUBLE_PARENS,
drop_forget_ref::DROP_COPY,
drop_forget_ref::DROP_REF,
drop_forget_ref::FORGET_COPY,
drop_forget_ref::FORGET_REF,
duration_subsec::DURATION_SUBSEC,
else_if_without_else::ELSE_IF_WITHOUT_ELSE,
empty_enum::EMPTY_ENUM,
entry::MAP_ENTRY,
enum_clike::ENUM_CLIKE_UNPORTABLE_VARIANT,
enum_variants::ENUM_VARIANT_NAMES,
enum_variants::MODULE_INCEPTION,
enum_variants::MODULE_NAME_REPETITIONS,
eq_op::EQ_OP,
eq_op::OP_REF,
equatable_if_let::EQUATABLE_IF_LET,
erasing_op::ERASING_OP,
escape::BOXED_LOCAL,
eta_reduction::REDUNDANT_CLOSURE,
eta_reduction::REDUNDANT_CLOSURE_FOR_METHOD_CALLS,
eval_order_dependence::DIVERGING_SUB_EXPRESSION,
eval_order_dependence::EVAL_ORDER_DEPENDENCE,
excessive_bools::FN_PARAMS_EXCESSIVE_BOOLS,
excessive_bools::STRUCT_EXCESSIVE_BOOLS,
exhaustive_items::EXHAUSTIVE_ENUMS,
exhaustive_items::EXHAUSTIVE_STRUCTS,
exit::EXIT,
explicit_write::EXPLICIT_WRITE,
fallible_impl_from::FALLIBLE_IMPL_FROM,
float_equality_without_abs::FLOAT_EQUALITY_WITHOUT_ABS,
float_literal::EXCESSIVE_PRECISION,
float_literal::LOSSY_FLOAT_LITERAL,
floating_point_arithmetic::IMPRECISE_FLOPS,
floating_point_arithmetic::SUBOPTIMAL_FLOPS,
format::USELESS_FORMAT,
format_args::FORMAT_IN_FORMAT_ARGS,
format_args::TO_STRING_IN_FORMAT_ARGS,
format_impl::PRINT_IN_FORMAT_IMPL,
format_impl::RECURSIVE_FORMAT_IMPL,
formatting::POSSIBLE_MISSING_COMMA,
formatting::SUSPICIOUS_ASSIGNMENT_FORMATTING,
formatting::SUSPICIOUS_ELSE_FORMATTING,
formatting::SUSPICIOUS_UNARY_OP_FORMATTING,
from_over_into::FROM_OVER_INTO,
from_str_radix_10::FROM_STR_RADIX_10,
functions::DOUBLE_MUST_USE,
functions::MUST_USE_CANDIDATE,
functions::MUST_USE_UNIT,
functions::NOT_UNSAFE_PTR_ARG_DEREF,
functions::RESULT_UNIT_ERR,
functions::TOO_MANY_ARGUMENTS,
functions::TOO_MANY_LINES,
future_not_send::FUTURE_NOT_SEND,
get_last_with_len::GET_LAST_WITH_LEN,
identity_op::IDENTITY_OP,
if_let_mutex::IF_LET_MUTEX,
if_not_else::IF_NOT_ELSE,
if_then_some_else_none::IF_THEN_SOME_ELSE_NONE,
implicit_hasher::IMPLICIT_HASHER,
implicit_return::IMPLICIT_RETURN,
implicit_saturating_sub::IMPLICIT_SATURATING_SUB,
inconsistent_struct_constructor::INCONSISTENT_STRUCT_CONSTRUCTOR,
index_refutable_slice::INDEX_REFUTABLE_SLICE,
indexing_slicing::INDEXING_SLICING,
indexing_slicing::OUT_OF_BOUNDS_INDEXING,
infinite_iter::INFINITE_ITER,
infinite_iter::MAYBE_INFINITE_ITER,
inherent_impl::MULTIPLE_INHERENT_IMPL,
inherent_to_string::INHERENT_TO_STRING,
inherent_to_string::INHERENT_TO_STRING_SHADOW_DISPLAY,
init_numbered_fields::INIT_NUMBERED_FIELDS,
inline_fn_without_body::INLINE_FN_WITHOUT_BODY,
int_plus_one::INT_PLUS_ONE,
integer_division::INTEGER_DIVISION,
invalid_upcast_comparisons::INVALID_UPCAST_COMPARISONS,
items_after_statements::ITEMS_AFTER_STATEMENTS,
iter_not_returning_iterator::ITER_NOT_RETURNING_ITERATOR,
large_const_arrays::LARGE_CONST_ARRAYS,
large_enum_variant::LARGE_ENUM_VARIANT,
large_stack_arrays::LARGE_STACK_ARRAYS,
len_zero::COMPARISON_TO_EMPTY,
len_zero::LEN_WITHOUT_IS_EMPTY,
len_zero::LEN_ZERO,
let_if_seq::USELESS_LET_IF_SEQ,
let_underscore::LET_UNDERSCORE_DROP,
let_underscore::LET_UNDERSCORE_LOCK,
let_underscore::LET_UNDERSCORE_MUST_USE,
lifetimes::EXTRA_UNUSED_LIFETIMES,
lifetimes::NEEDLESS_LIFETIMES,
literal_representation::DECIMAL_LITERAL_REPRESENTATION,
literal_representation::INCONSISTENT_DIGIT_GROUPING,
literal_representation::LARGE_DIGIT_GROUPS,
literal_representation::MISTYPED_LITERAL_SUFFIXES,
literal_representation::UNREADABLE_LITERAL,
literal_representation::UNUSUAL_BYTE_GROUPINGS,
loops::EMPTY_LOOP,
loops::EXPLICIT_COUNTER_LOOP,
loops::EXPLICIT_INTO_ITER_LOOP,
loops::EXPLICIT_ITER_LOOP,
loops::FOR_KV_MAP,
loops::FOR_LOOPS_OVER_FALLIBLES,
loops::ITER_NEXT_LOOP,
loops::MANUAL_FLATTEN,
loops::MANUAL_MEMCPY,
loops::MISSING_SPIN_LOOP,
loops::MUT_RANGE_BOUND,
loops::NEEDLESS_COLLECT,
loops::NEEDLESS_RANGE_LOOP,
loops::NEVER_LOOP,
loops::SAME_ITEM_PUSH,
loops::SINGLE_ELEMENT_LOOP,
loops::WHILE_IMMUTABLE_CONDITION,
loops::WHILE_LET_LOOP,
loops::WHILE_LET_ON_ITERATOR,
macro_use::MACRO_USE_IMPORTS,
main_recursion::MAIN_RECURSION,
manual_assert::MANUAL_ASSERT,
manual_async_fn::MANUAL_ASYNC_FN,
manual_bits::MANUAL_BITS,
manual_map::MANUAL_MAP,
manual_non_exhaustive::MANUAL_NON_EXHAUSTIVE,
manual_ok_or::MANUAL_OK_OR,
manual_strip::MANUAL_STRIP,
manual_unwrap_or::MANUAL_UNWRAP_OR,
map_clone::MAP_CLONE,
map_err_ignore::MAP_ERR_IGNORE,
map_unit_fn::OPTION_MAP_UNIT_FN,
map_unit_fn::RESULT_MAP_UNIT_FN,
match_on_vec_items::MATCH_ON_VEC_ITEMS,
match_result_ok::MATCH_RESULT_OK,
match_str_case_mismatch::MATCH_STR_CASE_MISMATCH,
matches::INFALLIBLE_DESTRUCTURING_MATCH,
matches::MATCH_AS_REF,
matches::MATCH_BOOL,
matches::MATCH_LIKE_MATCHES_MACRO,
matches::MATCH_OVERLAPPING_ARM,
matches::MATCH_REF_PATS,
matches::MATCH_SAME_ARMS,
matches::MATCH_SINGLE_BINDING,
matches::MATCH_WILDCARD_FOR_SINGLE_VARIANTS,
matches::MATCH_WILD_ERR_ARM,
matches::NEEDLESS_MATCH,
matches::REDUNDANT_PATTERN_MATCHING,
matches::REST_PAT_IN_FULLY_BOUND_STRUCTS,
matches::SINGLE_MATCH,
matches::SINGLE_MATCH_ELSE,
matches::WILDCARD_ENUM_MATCH_ARM,
matches::WILDCARD_IN_OR_PATTERNS,
mem_forget::MEM_FORGET,
mem_replace::MEM_REPLACE_OPTION_WITH_NONE,
mem_replace::MEM_REPLACE_WITH_DEFAULT,
mem_replace::MEM_REPLACE_WITH_UNINIT,
methods::BIND_INSTEAD_OF_MAP,
methods::BYTES_NTH,
methods::CHARS_LAST_CMP,
methods::CHARS_NEXT_CMP,
methods::CLONED_INSTEAD_OF_COPIED,
methods::CLONE_DOUBLE_REF,
methods::CLONE_ON_COPY,
methods::CLONE_ON_REF_PTR,
methods::EXPECT_FUN_CALL,
methods::EXPECT_USED,
methods::EXTEND_WITH_DRAIN,
methods::FILETYPE_IS_FILE,
methods::FILTER_MAP_IDENTITY,
methods::FILTER_MAP_NEXT,
methods::FILTER_NEXT,
methods::FLAT_MAP_IDENTITY,
methods::FLAT_MAP_OPTION,
methods::FROM_ITER_INSTEAD_OF_COLLECT,
methods::GET_UNWRAP,
methods::IMPLICIT_CLONE,
methods::INEFFICIENT | collapsible_if::COLLAPSIBLE_ELSE_IF,
collapsible_if::COLLAPSIBLE_IF,
collapsible_match::COLLAPSIBLE_MATCH, | random_line_split |
|
tracesegment.go | indicates that the type of the `cause`
// field is an object
CauseTypeObject
)
// Segment schema is documented in xray-segmentdocument-schema-v1.0.0 listed
// on https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html
type Segment struct {
// Required fields for both segment and subsegments
Name *string `json:"name"`
ID *string `json:"id"`
StartTime *float64 `json:"start_time"`
// Segment-only optional fields
Service *ServiceData `json:"service,omitempty"`
Origin *string `json:"origin,omitempty"`
User *string `json:"user,omitempty"`
ResourceARN *string `json:"resource_arn,omitempty"`
Links []SpanLinkData `json:"links,omitempty"`
// Optional fields for both Segment and subsegments
TraceID *string `json:"trace_id,omitempty"`
EndTime *float64 `json:"end_time,omitempty"`
InProgress *bool `json:"in_progress,omitempty"`
HTTP *HTTPData `json:"http,omitempty"`
Fault *bool `json:"fault,omitempty"`
Error *bool `json:"error,omitempty"`
Throttle *bool `json:"throttle,omitempty"`
Cause *CauseData `json:"cause,omitempty"`
AWS *AWSData `json:"aws,omitempty"`
Annotations map[string]interface{} `json:"annotations,omitempty"`
Metadata map[string]map[string]interface{} `json:"metadata,omitempty"`
Subsegments []Segment `json:"subsegments,omitempty"`
// (for both embedded and independent) subsegment-only (optional) fields.
// Please refer to https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html#api-segmentdocuments-subsegments
// for more information on subsegment.
Namespace *string `json:"namespace,omitempty"`
ParentID *string `json:"parent_id,omitempty"`
Type *string `json:"type,omitempty"`
PrecursorIDs []string `json:"precursor_ids,omitempty"`
Traced *bool `json:"traced,omitempty"`
SQL *SQLData `json:"sql,omitempty"`
}
// Validate checks whether the segment is valid or not
func (s *Segment) Validate() error {
if s.Name == nil {
return errors.New(`segment "name" can not be nil`)
}
if s.ID == nil {
return errors.New(`segment "id" can not be nil`)
}
if s.StartTime == nil {
return errors.New(`segment "start_time" can not be nil`)
}
// it's ok for embedded subsegments to not have trace_id
// but the root segment and independent subsegments must all
// have trace_id.
if s.TraceID == nil {
return errors.New(`segment "trace_id" can not be nil`)
}
return nil
}
// AWSData represents the aws resource that this segment
// originates from
type AWSData struct {
// Segment-only
Beanstalk *BeanstalkMetadata `json:"elastic_beanstalk,omitempty"`
CWLogs []LogGroupMetadata `json:"cloudwatch_logs,omitempty"`
ECS *ECSMetadata `json:"ecs,omitempty"`
EC2 *EC2Metadata `json:"ec2,omitempty"`
EKS *EKSMetadata `json:"eks,omitempty"`
XRay *XRayMetaData `json:"xray,omitempty"`
// For both segment and subsegments
AccountID *string `json:"account_id,omitempty"`
Operation *string `json:"operation,omitempty"`
RemoteRegion *string `json:"region,omitempty"`
RequestID *string `json:"request_id,omitempty"`
QueueURL *string `json:"queue_url,omitempty"`
TableName *string `json:"table_name,omitempty"`
TableNames []string `json:"table_names,omitempty"`
Retries *int64 `json:"retries,omitempty"`
}
// EC2Metadata represents the EC2 metadata field
type EC2Metadata struct {
InstanceID *string `json:"instance_id"`
AvailabilityZone *string `json:"availability_zone"`
InstanceSize *string `json:"instance_size"`
AmiID *string `json:"ami_id"`
}
// ECSMetadata represents the ECS metadata field. All must be omitempty b/c they come from two different detectors:
// Docker and ECS, so it's possible one is present and not the other
type ECSMetadata struct {
ContainerName *string `json:"container,omitempty"`
ContainerID *string `json:"container_id,omitempty"`
TaskArn *string `json:"task_arn,omitempty"`
TaskFamily *string `json:"task_family,omitempty"`
ClusterArn *string `json:"cluster_arn,omitempty"`
ContainerArn *string `json:"container_arn,omitempty"`
AvailabilityZone *string `json:"availability_zone,omitempty"`
LaunchType *string `json:"launch_type,omitempty"`
}
// BeanstalkMetadata represents the Elastic Beanstalk environment metadata field
type BeanstalkMetadata struct {
Environment *string `json:"environment_name"`
VersionLabel *string `json:"version_label"`
DeploymentID *int64 `json:"deployment_id"`
}
// EKSMetadata represents the EKS metadata field
type EKSMetadata struct {
ClusterName *string `json:"cluster_name"`
Pod *string `json:"pod"`
ContainerID *string `json:"container_id"`
}
// LogGroupMetadata represents a single CloudWatch Log Group
type LogGroupMetadata struct {
LogGroup *string `json:"log_group"`
Arn *string `json:"arn,omitempty"`
}
// CauseData is the container that contains the `cause` field
type CauseData struct {
Type CauseType `json:"-"`
// it will contain one of ExceptionID or (WorkingDirectory, Paths, Exceptions)
ExceptionID *string `json:"-"`
CauseObject
}
type CauseObject struct {
WorkingDirectory *string `json:"working_directory,omitempty"`
Paths []string `json:"paths,omitempty"`
Exceptions []Exception `json:"exceptions,omitempty"`
}
// UnmarshalJSON is the custom unmarshaller for the cause field
func (c *CauseData) | (data []byte) error {
err := json.Unmarshal(data, &c.CauseObject)
if err == nil {
c.Type = CauseTypeObject
return nil
}
rawStr := string(data)
if len(rawStr) > 0 && (rawStr[0] != '"' || rawStr[len(rawStr)-1] != '"') {
return fmt.Errorf("the value assigned to the `cause` field does not appear to be a string: %v", data)
}
exceptionID := rawStr[1 : len(rawStr)-1]
c.Type = CauseTypeExceptionID
c.ExceptionID = &exceptionID
return nil
}
// Exception represents an exception occurred
type Exception struct {
ID *string `json:"id,omitempty"`
Message *string `json:"message,omitempty"`
Type *string `json:"type,omitempty"`
Remote *bool `json:"remote,omitempty"`
Truncated *int64 `json:"truncated,omitempty"`
Skipped *int64 `json:"skipped,omitempty"`
Cause *string `json:"cause,omitempty"`
Stack []StackFrame `json:"stack,omitempty"`
}
// StackFrame represents a frame in the stack when an exception occurred
type StackFrame struct {
Path *string `json:"path,omitempty"`
Line *int `json:"line,omitempty"`
Label *string `json:"label,omitempty"`
}
// HTTPData provides the shape for unmarshalling request and response fields.
type HTTPData struct {
Request *RequestData `json:"request,omitempty"`
Response *ResponseData `json:"response,omitempty"`
}
// RequestData provides the shape for unmarshalling the request field.
type RequestData struct {
// Available in segment
XForwardedFor *bool `json:"x_forwarded_for,omitempty"`
// Available in both segment and subsegments
Method *string `json:"method,omitempty"`
URL *string `json:"url,omitempty"`
UserAgent *string `json:"user_agent,omitempty"`
ClientIP *string `json:"client_ip,omitempty"`
}
// ResponseData provides the shape for unmarshalling the response field.
type ResponseData struct {
Status *int64 `json:"status,omitempty"`
ContentLength interface{} `json:"content_length,omitempty"`
}
// ECSData provides the shape for unmarshalling the ecs field.
type ECSData struct {
Container *string `json:"container"`
}
// EC2Data provides the shape for unmarshalling the ec2 field.
type EC2Data struct {
InstanceID *string `json:"instance_id"`
AvailabilityZone *string `json:"availability_zone"`
}
// ElasticBeanstalkData provides the shape for unmarshalling the elastic_beanstalk field.
type ElasticBeanstalkData struct {
EnvironmentName *string `json:"environment_name"`
VersionLabel *string `json:"version_label"`
DeploymentID *int `json:"deployment_id"`
}
// XRayMetaData provides the shape for unmarshalling the xray field
type XRayMetaData struct {
SDK *string `json:"sdk,omitempty"`
SDKVersion *string `json:"sdk_version,omitempty"`
AutoInstrumentation *bool `json:"auto_instrumentation"`
}
// SQLData provides the shape for unmarshalling the sql field | UnmarshalJSON | identifier_name |
tracesegment.go | indicates that the type of the `cause`
// field is an object
CauseTypeObject
)
// Segment schema is documented in xray-segmentdocument-schema-v1.0.0 listed
// on https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html
type Segment struct {
// Required fields for both segment and subsegments
Name *string `json:"name"`
ID *string `json:"id"`
StartTime *float64 `json:"start_time"`
// Segment-only optional fields
Service *ServiceData `json:"service,omitempty"`
Origin *string `json:"origin,omitempty"`
User *string `json:"user,omitempty"`
ResourceARN *string `json:"resource_arn,omitempty"`
Links []SpanLinkData `json:"links,omitempty"`
// Optional fields for both Segment and subsegments
TraceID *string `json:"trace_id,omitempty"`
EndTime *float64 `json:"end_time,omitempty"`
InProgress *bool `json:"in_progress,omitempty"`
HTTP *HTTPData `json:"http,omitempty"`
Fault *bool `json:"fault,omitempty"`
Error *bool `json:"error,omitempty"`
Throttle *bool `json:"throttle,omitempty"`
Cause *CauseData `json:"cause,omitempty"`
AWS *AWSData `json:"aws,omitempty"`
Annotations map[string]interface{} `json:"annotations,omitempty"`
Metadata map[string]map[string]interface{} `json:"metadata,omitempty"`
Subsegments []Segment `json:"subsegments,omitempty"`
// (for both embedded and independent) subsegment-only (optional) fields.
// Please refer to https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html#api-segmentdocuments-subsegments
// for more information on subsegment.
Namespace *string `json:"namespace,omitempty"`
ParentID *string `json:"parent_id,omitempty"`
Type *string `json:"type,omitempty"`
PrecursorIDs []string `json:"precursor_ids,omitempty"`
Traced *bool `json:"traced,omitempty"`
SQL *SQLData `json:"sql,omitempty"`
}
// Validate checks whether the segment is valid or not
func (s *Segment) Validate() error {
if s.Name == nil {
return errors.New(`segment "name" can not be nil`)
}
if s.ID == nil {
return errors.New(`segment "id" can not be nil`)
}
if s.StartTime == nil |
// it's ok for embedded subsegments to not have trace_id
// but the root segment and independent subsegments must all
// have trace_id.
if s.TraceID == nil {
return errors.New(`segment "trace_id" can not be nil`)
}
return nil
}
// AWSData represents the aws resource that this segment
// originates from
type AWSData struct {
// Segment-only
Beanstalk *BeanstalkMetadata `json:"elastic_beanstalk,omitempty"`
CWLogs []LogGroupMetadata `json:"cloudwatch_logs,omitempty"`
ECS *ECSMetadata `json:"ecs,omitempty"`
EC2 *EC2Metadata `json:"ec2,omitempty"`
EKS *EKSMetadata `json:"eks,omitempty"`
XRay *XRayMetaData `json:"xray,omitempty"`
// For both segment and subsegments
AccountID *string `json:"account_id,omitempty"`
Operation *string `json:"operation,omitempty"`
RemoteRegion *string `json:"region,omitempty"`
RequestID *string `json:"request_id,omitempty"`
QueueURL *string `json:"queue_url,omitempty"`
TableName *string `json:"table_name,omitempty"`
TableNames []string `json:"table_names,omitempty"`
Retries *int64 `json:"retries,omitempty"`
}
// EC2Metadata represents the EC2 metadata field
type EC2Metadata struct {
InstanceID *string `json:"instance_id"`
AvailabilityZone *string `json:"availability_zone"`
InstanceSize *string `json:"instance_size"`
AmiID *string `json:"ami_id"`
}
// ECSMetadata represents the ECS metadata field. All must be omitempty b/c they come from two different detectors:
// Docker and ECS, so it's possible one is present and not the other
type ECSMetadata struct {
ContainerName *string `json:"container,omitempty"`
ContainerID *string `json:"container_id,omitempty"`
TaskArn *string `json:"task_arn,omitempty"`
TaskFamily *string `json:"task_family,omitempty"`
ClusterArn *string `json:"cluster_arn,omitempty"`
ContainerArn *string `json:"container_arn,omitempty"`
AvailabilityZone *string `json:"availability_zone,omitempty"`
LaunchType *string `json:"launch_type,omitempty"`
}
// BeanstalkMetadata represents the Elastic Beanstalk environment metadata field
type BeanstalkMetadata struct {
Environment *string `json:"environment_name"`
VersionLabel *string `json:"version_label"`
DeploymentID *int64 `json:"deployment_id"`
}
// EKSMetadata represents the EKS metadata field
type EKSMetadata struct {
ClusterName *string `json:"cluster_name"`
Pod *string `json:"pod"`
ContainerID *string `json:"container_id"`
}
// LogGroupMetadata represents a single CloudWatch Log Group
type LogGroupMetadata struct {
LogGroup *string `json:"log_group"`
Arn *string `json:"arn,omitempty"`
}
// CauseData is the container that contains the `cause` field
type CauseData struct {
Type CauseType `json:"-"`
// it will contain one of ExceptionID or (WorkingDirectory, Paths, Exceptions)
ExceptionID *string `json:"-"`
CauseObject
}
type CauseObject struct {
WorkingDirectory *string `json:"working_directory,omitempty"`
Paths []string `json:"paths,omitempty"`
Exceptions []Exception `json:"exceptions,omitempty"`
}
// UnmarshalJSON is the custom unmarshaller for the cause field
func (c *CauseData) UnmarshalJSON(data []byte) error {
err := json.Unmarshal(data, &c.CauseObject)
if err == nil {
c.Type = CauseTypeObject
return nil
}
rawStr := string(data)
if len(rawStr) > 0 && (rawStr[0] != '"' || rawStr[len(rawStr)-1] != '"') {
return fmt.Errorf("the value assigned to the `cause` field does not appear to be a string: %v", data)
}
exceptionID := rawStr[1 : len(rawStr)-1]
c.Type = CauseTypeExceptionID
c.ExceptionID = &exceptionID
return nil
}
// Exception represents an exception occurred
type Exception struct {
ID *string `json:"id,omitempty"`
Message *string `json:"message,omitempty"`
Type *string `json:"type,omitempty"`
Remote *bool `json:"remote,omitempty"`
Truncated *int64 `json:"truncated,omitempty"`
Skipped *int64 `json:"skipped,omitempty"`
Cause *string `json:"cause,omitempty"`
Stack []StackFrame `json:"stack,omitempty"`
}
// StackFrame represents a frame in the stack when an exception occurred
type StackFrame struct {
Path *string `json:"path,omitempty"`
Line *int `json:"line,omitempty"`
Label *string `json:"label,omitempty"`
}
// HTTPData provides the shape for unmarshalling request and response fields.
type HTTPData struct {
Request *RequestData `json:"request,omitempty"`
Response *ResponseData `json:"response,omitempty"`
}
// RequestData provides the shape for unmarshalling the request field.
type RequestData struct {
// Available in segment
XForwardedFor *bool `json:"x_forwarded_for,omitempty"`
// Available in both segment and subsegments
Method *string `json:"method,omitempty"`
URL *string `json:"url,omitempty"`
UserAgent *string `json:"user_agent,omitempty"`
ClientIP *string `json:"client_ip,omitempty"`
}
// ResponseData provides the shape for unmarshalling the response field.
type ResponseData struct {
Status *int64 `json:"status,omitempty"`
ContentLength interface{} `json:"content_length,omitempty"`
}
// ECSData provides the shape for unmarshalling the ecs field.
type ECSData struct {
Container *string `json:"container"`
}
// EC2Data provides the shape for unmarshalling the ec2 field.
type EC2Data struct {
InstanceID *string `json:"instance_id"`
AvailabilityZone *string `json:"availability_zone"`
}
// ElasticBeanstalkData provides the shape for unmarshalling the elastic_beanstalk field.
type ElasticBeanstalkData struct {
EnvironmentName *string `json:"environment_name"`
VersionLabel *string `json:"version_label"`
DeploymentID *int `json:"deployment_id"`
}
// XRayMetaData provides the shape for unmarshalling the xray field
type XRayMetaData struct {
SDK *string `json:"sdk,omitempty"`
SDKVersion *string `json:"sdk_version,omitempty"`
AutoInstrumentation *bool `json:"auto_instrumentation"`
}
// SQLData provides the shape for unmarshalling the sql | {
return errors.New(`segment "start_time" can not be nil`)
} | conditional_block |
tracesegment.go | indicates that the type of the `cause`
// field is an object
CauseTypeObject
)
// Segment schema is documented in xray-segmentdocument-schema-v1.0.0 listed
// on https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html
type Segment struct {
// Required fields for both segment and subsegments
Name *string `json:"name"`
ID *string `json:"id"`
StartTime *float64 `json:"start_time"`
// Segment-only optional fields
Service *ServiceData `json:"service,omitempty"`
Origin *string `json:"origin,omitempty"`
User *string `json:"user,omitempty"`
ResourceARN *string `json:"resource_arn,omitempty"`
Links []SpanLinkData `json:"links,omitempty"`
// Optional fields for both Segment and subsegments
TraceID *string `json:"trace_id,omitempty"`
EndTime *float64 `json:"end_time,omitempty"`
InProgress *bool `json:"in_progress,omitempty"`
HTTP *HTTPData `json:"http,omitempty"`
Fault *bool `json:"fault,omitempty"`
Error *bool `json:"error,omitempty"`
Throttle *bool `json:"throttle,omitempty"`
Cause *CauseData `json:"cause,omitempty"`
AWS *AWSData `json:"aws,omitempty"`
Annotations map[string]interface{} `json:"annotations,omitempty"`
Metadata map[string]map[string]interface{} `json:"metadata,omitempty"`
Subsegments []Segment `json:"subsegments,omitempty"`
// (for both embedded and independent) subsegment-only (optional) fields.
// Please refer to https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html#api-segmentdocuments-subsegments
// for more information on subsegment.
Namespace *string `json:"namespace,omitempty"`
ParentID *string `json:"parent_id,omitempty"`
Type *string `json:"type,omitempty"`
PrecursorIDs []string `json:"precursor_ids,omitempty"`
Traced *bool `json:"traced,omitempty"`
SQL *SQLData `json:"sql,omitempty"`
}
// Validate checks whether the segment is valid or not
func (s *Segment) Validate() error {
if s.Name == nil {
return errors.New(`segment "name" can not be nil`)
}
if s.ID == nil {
return errors.New(`segment "id" can not be nil`)
}
if s.StartTime == nil {
return errors.New(`segment "start_time" can not be nil`)
}
// it's ok for embedded subsegments to not have trace_id
// but the root segment and independent subsegments must all
// have trace_id.
if s.TraceID == nil {
return errors.New(`segment "trace_id" can not be nil`)
}
return nil
}
// AWSData represents the aws resource that this segment
// originates from
type AWSData struct {
// Segment-only
Beanstalk *BeanstalkMetadata `json:"elastic_beanstalk,omitempty"`
CWLogs []LogGroupMetadata `json:"cloudwatch_logs,omitempty"`
ECS *ECSMetadata `json:"ecs,omitempty"`
EC2 *EC2Metadata `json:"ec2,omitempty"`
EKS *EKSMetadata `json:"eks,omitempty"`
XRay *XRayMetaData `json:"xray,omitempty"`
// For both segment and subsegments
AccountID *string `json:"account_id,omitempty"`
Operation *string `json:"operation,omitempty"`
RemoteRegion *string `json:"region,omitempty"`
RequestID *string `json:"request_id,omitempty"`
QueueURL *string `json:"queue_url,omitempty"`
TableName *string `json:"table_name,omitempty"`
TableNames []string `json:"table_names,omitempty"`
Retries *int64 `json:"retries,omitempty"`
}
// EC2Metadata represents the EC2 metadata field
type EC2Metadata struct {
InstanceID *string `json:"instance_id"`
AvailabilityZone *string `json:"availability_zone"`
InstanceSize *string `json:"instance_size"`
AmiID *string `json:"ami_id"`
}
// ECSMetadata represents the ECS metadata field. All must be omitempty b/c they come from two different detectors:
// Docker and ECS, so it's possible one is present and not the other
type ECSMetadata struct {
ContainerName *string `json:"container,omitempty"`
ContainerID *string `json:"container_id,omitempty"`
TaskArn *string `json:"task_arn,omitempty"`
TaskFamily *string `json:"task_family,omitempty"`
ClusterArn *string `json:"cluster_arn,omitempty"`
ContainerArn *string `json:"container_arn,omitempty"`
AvailabilityZone *string `json:"availability_zone,omitempty"`
LaunchType *string `json:"launch_type,omitempty"` | }
// BeanstalkMetadata represents the Elastic Beanstalk environment metadata field
type BeanstalkMetadata struct {
Environment *string `json:"environment_name"`
VersionLabel *string `json:"version_label"`
DeploymentID *int64 `json:"deployment_id"`
}
// EKSMetadata represents the EKS metadata field
type EKSMetadata struct {
ClusterName *string `json:"cluster_name"`
Pod *string `json:"pod"`
ContainerID *string `json:"container_id"`
}
// LogGroupMetadata represents a single CloudWatch Log Group
type LogGroupMetadata struct {
LogGroup *string `json:"log_group"`
Arn *string `json:"arn,omitempty"`
}
// CauseData is the container that contains the `cause` field
type CauseData struct {
Type CauseType `json:"-"`
// it will contain one of ExceptionID or (WorkingDirectory, Paths, Exceptions)
ExceptionID *string `json:"-"`
CauseObject
}
type CauseObject struct {
WorkingDirectory *string `json:"working_directory,omitempty"`
Paths []string `json:"paths,omitempty"`
Exceptions []Exception `json:"exceptions,omitempty"`
}
// UnmarshalJSON is the custom unmarshaller for the cause field
func (c *CauseData) UnmarshalJSON(data []byte) error {
err := json.Unmarshal(data, &c.CauseObject)
if err == nil {
c.Type = CauseTypeObject
return nil
}
rawStr := string(data)
if len(rawStr) > 0 && (rawStr[0] != '"' || rawStr[len(rawStr)-1] != '"') {
return fmt.Errorf("the value assigned to the `cause` field does not appear to be a string: %v", data)
}
exceptionID := rawStr[1 : len(rawStr)-1]
c.Type = CauseTypeExceptionID
c.ExceptionID = &exceptionID
return nil
}
// Exception represents an exception occurred
type Exception struct {
ID *string `json:"id,omitempty"`
Message *string `json:"message,omitempty"`
Type *string `json:"type,omitempty"`
Remote *bool `json:"remote,omitempty"`
Truncated *int64 `json:"truncated,omitempty"`
Skipped *int64 `json:"skipped,omitempty"`
Cause *string `json:"cause,omitempty"`
Stack []StackFrame `json:"stack,omitempty"`
}
// StackFrame represents a frame in the stack when an exception occurred
type StackFrame struct {
Path *string `json:"path,omitempty"`
Line *int `json:"line,omitempty"`
Label *string `json:"label,omitempty"`
}
// HTTPData provides the shape for unmarshalling request and response fields.
type HTTPData struct {
Request *RequestData `json:"request,omitempty"`
Response *ResponseData `json:"response,omitempty"`
}
// RequestData provides the shape for unmarshalling the request field.
type RequestData struct {
// Available in segment
XForwardedFor *bool `json:"x_forwarded_for,omitempty"`
// Available in both segment and subsegments
Method *string `json:"method,omitempty"`
URL *string `json:"url,omitempty"`
UserAgent *string `json:"user_agent,omitempty"`
ClientIP *string `json:"client_ip,omitempty"`
}
// ResponseData provides the shape for unmarshalling the response field.
type ResponseData struct {
Status *int64 `json:"status,omitempty"`
ContentLength interface{} `json:"content_length,omitempty"`
}
// ECSData provides the shape for unmarshalling the ecs field.
type ECSData struct {
Container *string `json:"container"`
}
// EC2Data provides the shape for unmarshalling the ec2 field.
type EC2Data struct {
InstanceID *string `json:"instance_id"`
AvailabilityZone *string `json:"availability_zone"`
}
// ElasticBeanstalkData provides the shape for unmarshalling the elastic_beanstalk field.
type ElasticBeanstalkData struct {
EnvironmentName *string `json:"environment_name"`
VersionLabel *string `json:"version_label"`
DeploymentID *int `json:"deployment_id"`
}
// XRayMetaData provides the shape for unmarshalling the xray field
type XRayMetaData struct {
SDK *string `json:"sdk,omitempty"`
SDKVersion *string `json:"sdk_version,omitempty"`
AutoInstrumentation *bool `json:"auto_instrumentation"`
}
// SQLData provides the shape for unmarshalling the sql field.
| random_line_split |
|
tracesegment.go | indicates that the type of the `cause`
// field is an object
CauseTypeObject
)
// Segment schema is documented in xray-segmentdocument-schema-v1.0.0 listed
// on https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html
type Segment struct {
// Required fields for both segment and subsegments
Name *string `json:"name"`
ID *string `json:"id"`
StartTime *float64 `json:"start_time"`
// Segment-only optional fields
Service *ServiceData `json:"service,omitempty"`
Origin *string `json:"origin,omitempty"`
User *string `json:"user,omitempty"`
ResourceARN *string `json:"resource_arn,omitempty"`
Links []SpanLinkData `json:"links,omitempty"`
// Optional fields for both Segment and subsegments
TraceID *string `json:"trace_id,omitempty"`
EndTime *float64 `json:"end_time,omitempty"`
InProgress *bool `json:"in_progress,omitempty"`
HTTP *HTTPData `json:"http,omitempty"`
Fault *bool `json:"fault,omitempty"`
Error *bool `json:"error,omitempty"`
Throttle *bool `json:"throttle,omitempty"`
Cause *CauseData `json:"cause,omitempty"`
AWS *AWSData `json:"aws,omitempty"`
Annotations map[string]interface{} `json:"annotations,omitempty"`
Metadata map[string]map[string]interface{} `json:"metadata,omitempty"`
Subsegments []Segment `json:"subsegments,omitempty"`
// (for both embedded and independent) subsegment-only (optional) fields.
// Please refer to https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html#api-segmentdocuments-subsegments
// for more information on subsegment.
Namespace *string `json:"namespace,omitempty"`
ParentID *string `json:"parent_id,omitempty"`
Type *string `json:"type,omitempty"`
PrecursorIDs []string `json:"precursor_ids,omitempty"`
Traced *bool `json:"traced,omitempty"`
SQL *SQLData `json:"sql,omitempty"`
}
// Validate checks whether the segment is valid or not
func (s *Segment) Validate() error {
if s.Name == nil {
return errors.New(`segment "name" can not be nil`)
}
if s.ID == nil {
return errors.New(`segment "id" can not be nil`)
}
if s.StartTime == nil {
return errors.New(`segment "start_time" can not be nil`)
}
// it's ok for embedded subsegments to not have trace_id
// but the root segment and independent subsegments must all
// have trace_id.
if s.TraceID == nil {
return errors.New(`segment "trace_id" can not be nil`)
}
return nil
}
// AWSData represents the aws resource that this segment
// originates from
type AWSData struct {
// Segment-only
Beanstalk *BeanstalkMetadata `json:"elastic_beanstalk,omitempty"`
CWLogs []LogGroupMetadata `json:"cloudwatch_logs,omitempty"`
ECS *ECSMetadata `json:"ecs,omitempty"`
EC2 *EC2Metadata `json:"ec2,omitempty"`
EKS *EKSMetadata `json:"eks,omitempty"`
XRay *XRayMetaData `json:"xray,omitempty"`
// For both segment and subsegments
AccountID *string `json:"account_id,omitempty"`
Operation *string `json:"operation,omitempty"`
RemoteRegion *string `json:"region,omitempty"`
RequestID *string `json:"request_id,omitempty"`
QueueURL *string `json:"queue_url,omitempty"`
TableName *string `json:"table_name,omitempty"`
TableNames []string `json:"table_names,omitempty"`
Retries *int64 `json:"retries,omitempty"`
}
// EC2Metadata represents the EC2 metadata field
type EC2Metadata struct {
InstanceID *string `json:"instance_id"`
AvailabilityZone *string `json:"availability_zone"`
InstanceSize *string `json:"instance_size"`
AmiID *string `json:"ami_id"`
}
// ECSMetadata represents the ECS metadata field. All must be omitempty b/c they come from two different detectors:
// Docker and ECS, so it's possible one is present and not the other
type ECSMetadata struct {
ContainerName *string `json:"container,omitempty"`
ContainerID *string `json:"container_id,omitempty"`
TaskArn *string `json:"task_arn,omitempty"`
TaskFamily *string `json:"task_family,omitempty"`
ClusterArn *string `json:"cluster_arn,omitempty"`
ContainerArn *string `json:"container_arn,omitempty"`
AvailabilityZone *string `json:"availability_zone,omitempty"`
LaunchType *string `json:"launch_type,omitempty"`
}
// BeanstalkMetadata represents the Elastic Beanstalk environment metadata field
type BeanstalkMetadata struct {
Environment *string `json:"environment_name"`
VersionLabel *string `json:"version_label"`
DeploymentID *int64 `json:"deployment_id"`
}
// EKSMetadata represents the EKS metadata field
type EKSMetadata struct {
ClusterName *string `json:"cluster_name"`
Pod *string `json:"pod"`
ContainerID *string `json:"container_id"`
}
// LogGroupMetadata represents a single CloudWatch Log Group
type LogGroupMetadata struct {
LogGroup *string `json:"log_group"`
Arn *string `json:"arn,omitempty"`
}
// CauseData is the container that contains the `cause` field
type CauseData struct {
Type CauseType `json:"-"`
// it will contain one of ExceptionID or (WorkingDirectory, Paths, Exceptions)
ExceptionID *string `json:"-"`
CauseObject
}
type CauseObject struct {
WorkingDirectory *string `json:"working_directory,omitempty"`
Paths []string `json:"paths,omitempty"`
Exceptions []Exception `json:"exceptions,omitempty"`
}
// UnmarshalJSON is the custom unmarshaller for the cause field
func (c *CauseData) UnmarshalJSON(data []byte) error |
// Exception represents an exception occurred
type Exception struct {
ID *string `json:"id,omitempty"`
Message *string `json:"message,omitempty"`
Type *string `json:"type,omitempty"`
Remote *bool `json:"remote,omitempty"`
Truncated *int64 `json:"truncated,omitempty"`
Skipped *int64 `json:"skipped,omitempty"`
Cause *string `json:"cause,omitempty"`
Stack []StackFrame `json:"stack,omitempty"`
}
// StackFrame represents a frame in the stack when an exception occurred
type StackFrame struct {
Path *string `json:"path,omitempty"`
Line *int `json:"line,omitempty"`
Label *string `json:"label,omitempty"`
}
// HTTPData provides the shape for unmarshalling request and response fields.
type HTTPData struct {
Request *RequestData `json:"request,omitempty"`
Response *ResponseData `json:"response,omitempty"`
}
// RequestData provides the shape for unmarshalling the request field.
type RequestData struct {
// Available in segment
XForwardedFor *bool `json:"x_forwarded_for,omitempty"`
// Available in both segment and subsegments
Method *string `json:"method,omitempty"`
URL *string `json:"url,omitempty"`
UserAgent *string `json:"user_agent,omitempty"`
ClientIP *string `json:"client_ip,omitempty"`
}
// ResponseData provides the shape for unmarshalling the response field.
type ResponseData struct {
Status *int64 `json:"status,omitempty"`
ContentLength interface{} `json:"content_length,omitempty"`
}
// ECSData provides the shape for unmarshalling the ecs field.
type ECSData struct {
Container *string `json:"container"`
}
// EC2Data provides the shape for unmarshalling the ec2 field.
type EC2Data struct {
InstanceID *string `json:"instance_id"`
AvailabilityZone *string `json:"availability_zone"`
}
// ElasticBeanstalkData provides the shape for unmarshalling the elastic_beanstalk field.
type ElasticBeanstalkData struct {
EnvironmentName *string `json:"environment_name"`
VersionLabel *string `json:"version_label"`
DeploymentID *int `json:"deployment_id"`
}
// XRayMetaData provides the shape for unmarshalling the xray field
type XRayMetaData struct {
SDK *string `json:"sdk,omitempty"`
SDKVersion *string `json:"sdk_version,omitempty"`
AutoInstrumentation *bool `json:"auto_instrumentation"`
}
// SQLData provides the shape for unmarshalling the sql | {
err := json.Unmarshal(data, &c.CauseObject)
if err == nil {
c.Type = CauseTypeObject
return nil
}
rawStr := string(data)
if len(rawStr) > 0 && (rawStr[0] != '"' || rawStr[len(rawStr)-1] != '"') {
return fmt.Errorf("the value assigned to the `cause` field does not appear to be a string: %v", data)
}
exceptionID := rawStr[1 : len(rawStr)-1]
c.Type = CauseTypeExceptionID
c.ExceptionID = &exceptionID
return nil
} | identifier_body |
gateway.go | statusUpdater k8s.StatusUpdater
log logrus.FieldLogger
// gatewayClassControllerName is the configured controller of managed gatewayclasses.
gatewayClassControllerName gatewayapi_v1beta1.GatewayController
eventSource chan event.GenericEvent
}
// RegisterGatewayController creates the gateway controller from mgr. The controller will be pre-configured
// to watch for Gateway objects across all namespaces and reconcile those that match class.
func RegisterGatewayController(
log logrus.FieldLogger,
mgr manager.Manager,
eventHandler cache.ResourceEventHandler,
statusUpdater k8s.StatusUpdater,
gatewayClassControllerName string,
) (leadership.NeedLeaderElectionNotification, error) {
r := &gatewayReconciler{
log: log,
client: mgr.GetClient(),
eventHandler: eventHandler,
statusUpdater: statusUpdater,
gatewayClassControllerName: gatewayapi_v1beta1.GatewayController(gatewayClassControllerName),
// Set up a source.Channel that will trigger reconciles
// for all GatewayClasses when this Contour process is
// elected leader, to ensure that their statuses are up
// to date.
eventSource: make(chan event.GenericEvent),
}
c, err := controller.NewUnmanaged("gateway-controller", mgr, controller.Options{Reconciler: r})
if err != nil {
return nil, err
}
if err := mgr.Add(&noLeaderElectionController{c}); err != nil {
return nil, err
}
if err := c.Watch(
source.Kind(mgr.GetCache(), &gatewayapi_v1beta1.Gateway{}),
&handler.EnqueueRequestForObject{},
predicate.NewPredicateFuncs(r.hasMatchingController),
); err != nil {
return nil, err
}
// Watch GatewayClasses and reconcile their associated Gateways
// to handle changes in the GatewayClasses' "Accepted" conditions.
if err := c.Watch(
source.Kind(mgr.GetCache(), &gatewayapi_v1beta1.GatewayClass{}),
handler.EnqueueRequestsFromMapFunc(r.mapGatewayClassToGateways),
predicate.NewPredicateFuncs(r.gatewayClassHasMatchingController),
); err != nil {
return nil, err
}
// Set up a source.Channel that will trigger reconciles
// for all Gateways when this Contour process is
// elected leader, to ensure that their statuses are up
// to date.
if err := c.Watch(
&source.Channel{Source: r.eventSource},
&handler.EnqueueRequestForObject{},
predicate.NewPredicateFuncs(r.hasMatchingController),
); err != nil {
return nil, err
}
return r, nil
}
func (r *gatewayReconciler) OnElectedLeader() {
r.log.Info("elected leader, triggering reconciles for all gateways")
var gateways gatewayapi_v1beta1.GatewayList
if err := r.client.List(context.Background(), &gateways); err != nil {
r.log.WithError(err).Error("error listing gateways")
return
}
for i := range gateways.Items {
r.eventSource <- event.GenericEvent{Object: &gateways.Items[i]}
}
}
func (r *gatewayReconciler) mapGatewayClassToGateways(ctx context.Context, gatewayClass client.Object) []reconcile.Request {
var gateways gatewayapi_v1beta1.GatewayList
if err := r.client.List(ctx, &gateways); err != nil {
r.log.WithError(err).Error("error listing gateways")
return nil
}
var reconciles []reconcile.Request
for _, gw := range gateways.Items {
if string(gw.Spec.GatewayClassName) == gatewayClass.GetName() {
reconciles = append(reconciles, reconcile.Request{
NamespacedName: types.NamespacedName{
Namespace: gw.Namespace,
Name: gw.Name,
},
})
}
}
return reconciles
}
// hasMatchingController returns true if the provided object is a Gateway
// using a GatewayClass with a Spec.Controller string matching this Contour's
// controller string, or false otherwise.
func (r *gatewayReconciler) hasMatchingController(obj client.Object) bool {
log := r.log.WithFields(logrus.Fields{
"namespace": obj.GetNamespace(),
"name": obj.GetName(),
})
gw, ok := obj.(*gatewayapi_v1beta1.Gateway)
if !ok {
log.Debugf("unexpected object type %T, bypassing reconciliation.", obj)
return false
}
gc := &gatewayapi_v1beta1.GatewayClass{}
if err := r.client.Get(context.Background(), types.NamespacedName{Name: string(gw.Spec.GatewayClassName)}, gc); err != nil {
log.WithError(err).Errorf("failed to get gatewayclass %s", gw.Spec.GatewayClassName)
return false
}
if gc.Spec.ControllerName != r.gatewayClassControllerName {
log.Debugf("gateway's class controller is not %s; bypassing reconciliation", r.gatewayClassControllerName)
return false
}
return true
}
func (r *gatewayReconciler) gatewayClassHasMatchingController(obj client.Object) bool {
gc, ok := obj.(*gatewayapi_v1beta1.GatewayClass)
if !ok {
r.log.Infof("expected GatewayClass, got %T", obj)
return false
}
return gc.Spec.ControllerName == r.gatewayClassControllerName
}
// Reconcile finds all the Gateways for the GatewayClass with an "Accepted: true" condition.
// It passes the oldest such Gateway to the DAG for processing, and sets an "Accepted: false"
// condition on all other Gateways for the accepted GatewayClass.
func (r *gatewayReconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
r.log.WithField("namespace", request.Namespace).WithField("name", request.Name).Info("reconciling gateway")
var gatewayClasses gatewayapi_v1beta1.GatewayClassList
if err := r.client.List(context.Background(), &gatewayClasses); err != nil {
return reconcile.Result{}, fmt.Errorf("error listing gateway classes")
}
// Find the GatewayClass for this controller with Accepted=true.
var acceptedGatewayClass *gatewayapi_v1beta1.GatewayClass
for i := range gatewayClasses.Items {
gatewayClass := &gatewayClasses.Items[i]
if gatewayClass.Spec.ControllerName != r.gatewayClassControllerName {
continue
}
if !isAccepted(gatewayClass) {
continue
}
acceptedGatewayClass = gatewayClass
break
}
if acceptedGatewayClass == nil {
r.log.Info("No accepted gateway class found")
r.eventHandler.OnDelete(&gatewayapi_v1beta1.Gateway{
ObjectMeta: metav1.ObjectMeta{
Namespace: request.Namespace,
Name: request.Name,
}})
return reconcile.Result{}, nil
}
var allGateways gatewayapi_v1beta1.GatewayList
if err := r.client.List(context.Background(), &allGateways); err != nil {
return reconcile.Result{}, fmt.Errorf("error listing gateways")
}
// Get all the Gateways for the Accepted=true GatewayClass.
var gatewaysForClass []*gatewayapi_v1beta1.Gateway
for i := range allGateways.Items {
if string(allGateways.Items[i].Spec.GatewayClassName) == acceptedGatewayClass.Name {
gatewaysForClass = append(gatewaysForClass, &allGateways.Items[i])
}
}
if len(gatewaysForClass) == 0 {
r.log.Info("No gateways found for accepted gateway class")
r.eventHandler.OnDelete(&gatewayapi_v1beta1.Gateway{
ObjectMeta: metav1.ObjectMeta{
Namespace: request.Namespace,
Name: request.Name,
}})
return reconcile.Result{}, nil
}
// Find the oldest Gateway, using alphabetical order
// as a tiebreaker.
var oldest *gatewayapi_v1beta1.Gateway
for _, gw := range gatewaysForClass {
switch {
case oldest == nil:
oldest = gw
case gw.CreationTimestamp.Before(&oldest.CreationTimestamp):
oldest = gw
case gw.CreationTimestamp.Equal(&oldest.CreationTimestamp):
if fmt.Sprintf("%s/%s", gw.Namespace, gw.Name) < fmt.Sprintf("%s/%s", oldest.Namespace, oldest.Name) {
oldest = gw
}
}
}
// Set the "Accepted" condition to false for all gateways
// except the oldest. The oldest will have its status set
// by the DAG processor, so don't set it here.
for _, gw := range gatewaysForClass | {
if gw == oldest {
continue
}
if r.statusUpdater != nil {
r.statusUpdater.Send(k8s.StatusUpdate{
NamespacedName: k8s.NamespacedNameOf(gw),
Resource: &gatewayapi_v1beta1.Gateway{},
Mutator: k8s.StatusMutatorFunc(func(obj client.Object) client.Object {
gw, ok := obj.(*gatewayapi_v1beta1.Gateway)
if !ok {
panic(fmt.Sprintf("unsupported object type %T", obj))
}
return setGatewayNotAccepted(gw.DeepCopy())
}),
})
} else {
// this branch makes testing easier by not going through the StatusUpdater. | conditional_block |
|
gateway.go | "
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
gatewayapi_v1beta1 "sigs.k8s.io/gateway-api/apis/v1beta1"
)
type gatewayReconciler struct {
client client.Client
eventHandler cache.ResourceEventHandler
statusUpdater k8s.StatusUpdater
log logrus.FieldLogger
// gatewayClassControllerName is the configured controller of managed gatewayclasses.
gatewayClassControllerName gatewayapi_v1beta1.GatewayController
eventSource chan event.GenericEvent
}
// RegisterGatewayController creates the gateway controller from mgr. The controller will be pre-configured
// to watch for Gateway objects across all namespaces and reconcile those that match class.
func RegisterGatewayController(
log logrus.FieldLogger,
mgr manager.Manager,
eventHandler cache.ResourceEventHandler,
statusUpdater k8s.StatusUpdater,
gatewayClassControllerName string,
) (leadership.NeedLeaderElectionNotification, error) {
r := &gatewayReconciler{
log: log,
client: mgr.GetClient(),
eventHandler: eventHandler,
statusUpdater: statusUpdater,
gatewayClassControllerName: gatewayapi_v1beta1.GatewayController(gatewayClassControllerName),
// Set up a source.Channel that will trigger reconciles
// for all GatewayClasses when this Contour process is
// elected leader, to ensure that their statuses are up
// to date.
eventSource: make(chan event.GenericEvent),
}
c, err := controller.NewUnmanaged("gateway-controller", mgr, controller.Options{Reconciler: r})
if err != nil {
return nil, err
}
if err := mgr.Add(&noLeaderElectionController{c}); err != nil {
return nil, err
}
if err := c.Watch(
source.Kind(mgr.GetCache(), &gatewayapi_v1beta1.Gateway{}),
&handler.EnqueueRequestForObject{},
predicate.NewPredicateFuncs(r.hasMatchingController),
); err != nil {
return nil, err
}
// Watch GatewayClasses and reconcile their associated Gateways
// to handle changes in the GatewayClasses' "Accepted" conditions.
if err := c.Watch(
source.Kind(mgr.GetCache(), &gatewayapi_v1beta1.GatewayClass{}),
handler.EnqueueRequestsFromMapFunc(r.mapGatewayClassToGateways),
predicate.NewPredicateFuncs(r.gatewayClassHasMatchingController),
); err != nil {
return nil, err
}
// Set up a source.Channel that will trigger reconciles
// for all Gateways when this Contour process is
// elected leader, to ensure that their statuses are up
// to date.
if err := c.Watch(
&source.Channel{Source: r.eventSource},
&handler.EnqueueRequestForObject{},
predicate.NewPredicateFuncs(r.hasMatchingController),
); err != nil {
return nil, err
}
return r, nil
}
func (r *gatewayReconciler) OnElectedLeader() {
r.log.Info("elected leader, triggering reconciles for all gateways")
var gateways gatewayapi_v1beta1.GatewayList
if err := r.client.List(context.Background(), &gateways); err != nil {
r.log.WithError(err).Error("error listing gateways")
return
}
for i := range gateways.Items {
r.eventSource <- event.GenericEvent{Object: &gateways.Items[i]}
}
}
func (r *gatewayReconciler) mapGatewayClassToGateways(ctx context.Context, gatewayClass client.Object) []reconcile.Request {
var gateways gatewayapi_v1beta1.GatewayList
if err := r.client.List(ctx, &gateways); err != nil {
r.log.WithError(err).Error("error listing gateways")
return nil
}
var reconciles []reconcile.Request
for _, gw := range gateways.Items {
if string(gw.Spec.GatewayClassName) == gatewayClass.GetName() {
reconciles = append(reconciles, reconcile.Request{
NamespacedName: types.NamespacedName{
Namespace: gw.Namespace,
Name: gw.Name,
},
})
}
}
return reconciles
}
// hasMatchingController returns true if the provided object is a Gateway
// using a GatewayClass with a Spec.Controller string matching this Contour's
// controller string, or false otherwise.
func (r *gatewayReconciler) | (obj client.Object) bool {
log := r.log.WithFields(logrus.Fields{
"namespace": obj.GetNamespace(),
"name": obj.GetName(),
})
gw, ok := obj.(*gatewayapi_v1beta1.Gateway)
if !ok {
log.Debugf("unexpected object type %T, bypassing reconciliation.", obj)
return false
}
gc := &gatewayapi_v1beta1.GatewayClass{}
if err := r.client.Get(context.Background(), types.NamespacedName{Name: string(gw.Spec.GatewayClassName)}, gc); err != nil {
log.WithError(err).Errorf("failed to get gatewayclass %s", gw.Spec.GatewayClassName)
return false
}
if gc.Spec.ControllerName != r.gatewayClassControllerName {
log.Debugf("gateway's class controller is not %s; bypassing reconciliation", r.gatewayClassControllerName)
return false
}
return true
}
func (r *gatewayReconciler) gatewayClassHasMatchingController(obj client.Object) bool {
gc, ok := obj.(*gatewayapi_v1beta1.GatewayClass)
if !ok {
r.log.Infof("expected GatewayClass, got %T", obj)
return false
}
return gc.Spec.ControllerName == r.gatewayClassControllerName
}
// Reconcile finds all the Gateways for the GatewayClass with an "Accepted: true" condition.
// It passes the oldest such Gateway to the DAG for processing, and sets an "Accepted: false"
// condition on all other Gateways for the accepted GatewayClass.
func (r *gatewayReconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
r.log.WithField("namespace", request.Namespace).WithField("name", request.Name).Info("reconciling gateway")
var gatewayClasses gatewayapi_v1beta1.GatewayClassList
if err := r.client.List(context.Background(), &gatewayClasses); err != nil {
return reconcile.Result{}, fmt.Errorf("error listing gateway classes")
}
// Find the GatewayClass for this controller with Accepted=true.
var acceptedGatewayClass *gatewayapi_v1beta1.GatewayClass
for i := range gatewayClasses.Items {
gatewayClass := &gatewayClasses.Items[i]
if gatewayClass.Spec.ControllerName != r.gatewayClassControllerName {
continue
}
if !isAccepted(gatewayClass) {
continue
}
acceptedGatewayClass = gatewayClass
break
}
if acceptedGatewayClass == nil {
r.log.Info("No accepted gateway class found")
r.eventHandler.OnDelete(&gatewayapi_v1beta1.Gateway{
ObjectMeta: metav1.ObjectMeta{
Namespace: request.Namespace,
Name: request.Name,
}})
return reconcile.Result{}, nil
}
var allGateways gatewayapi_v1beta1.GatewayList
if err := r.client.List(context.Background(), &allGateways); err != nil {
return reconcile.Result{}, fmt.Errorf("error listing gateways")
}
// Get all the Gateways for the Accepted=true GatewayClass.
var gatewaysForClass []*gatewayapi_v1beta1.Gateway
for i := range allGateways.Items {
if string(allGateways.Items[i].Spec.GatewayClassName) == acceptedGatewayClass.Name {
gatewaysForClass = append(gatewaysForClass, &allGateways.Items[i])
}
}
if len(gatewaysForClass) == 0 {
r.log.Info("No gateways found for accepted gateway class")
r.eventHandler.OnDelete(&gatewayapi_v1beta1.Gateway{
ObjectMeta: metav1.ObjectMeta{
Namespace: request.Namespace,
Name: request.Name,
}})
return reconcile.Result{}, nil
}
// Find the oldest Gateway, using alphabetical order
// as a tiebreaker.
var oldest *gatewayapi_v1beta1.Gateway
for _, gw := range gatewaysForClass {
switch {
case oldest == nil:
oldest = gw
case gw.CreationTimestamp.Before(&oldest.CreationTimestamp):
oldest = gw
case gw.CreationTimestamp.Equal(&oldest.CreationTimestamp):
if fmt.Sprintf("%s/%s", gw.Namespace, gw.Name) < fmt.Sprintf("%s/%s", oldest.Namespace, oldest.Name) {
oldest = gw
}
}
}
// Set the "Accepted" condition to false for all gateways
// except the oldest. The oldest will have its status set
// by the DAG processor, so don't set it here.
for _, gw := range gatewaysForClass {
if gw == oldest {
continue
}
if r.statusUpdater != nil {
r.statusUpdater.Send(k8s.StatusUpdate{
NamespacedName: k8s.NamespacedNameOf(gw),
Resource: &gatewayapi_v1beta1.Gateway{},
| hasMatchingController | identifier_name |
gateway.go | ClassControllerName),
// Set up a source.Channel that will trigger reconciles
// for all GatewayClasses when this Contour process is
// elected leader, to ensure that their statuses are up
// to date.
eventSource: make(chan event.GenericEvent),
}
c, err := controller.NewUnmanaged("gateway-controller", mgr, controller.Options{Reconciler: r})
if err != nil {
return nil, err
}
if err := mgr.Add(&noLeaderElectionController{c}); err != nil {
return nil, err
}
if err := c.Watch(
source.Kind(mgr.GetCache(), &gatewayapi_v1beta1.Gateway{}),
&handler.EnqueueRequestForObject{},
predicate.NewPredicateFuncs(r.hasMatchingController),
); err != nil {
return nil, err
}
// Watch GatewayClasses and reconcile their associated Gateways
// to handle changes in the GatewayClasses' "Accepted" conditions.
if err := c.Watch(
source.Kind(mgr.GetCache(), &gatewayapi_v1beta1.GatewayClass{}),
handler.EnqueueRequestsFromMapFunc(r.mapGatewayClassToGateways),
predicate.NewPredicateFuncs(r.gatewayClassHasMatchingController),
); err != nil {
return nil, err
}
// Set up a source.Channel that will trigger reconciles
// for all Gateways when this Contour process is
// elected leader, to ensure that their statuses are up
// to date.
if err := c.Watch(
&source.Channel{Source: r.eventSource},
&handler.EnqueueRequestForObject{},
predicate.NewPredicateFuncs(r.hasMatchingController),
); err != nil {
return nil, err
}
return r, nil
}
func (r *gatewayReconciler) OnElectedLeader() {
r.log.Info("elected leader, triggering reconciles for all gateways")
var gateways gatewayapi_v1beta1.GatewayList
if err := r.client.List(context.Background(), &gateways); err != nil {
r.log.WithError(err).Error("error listing gateways")
return
}
for i := range gateways.Items {
r.eventSource <- event.GenericEvent{Object: &gateways.Items[i]}
}
}
func (r *gatewayReconciler) mapGatewayClassToGateways(ctx context.Context, gatewayClass client.Object) []reconcile.Request {
var gateways gatewayapi_v1beta1.GatewayList
if err := r.client.List(ctx, &gateways); err != nil {
r.log.WithError(err).Error("error listing gateways")
return nil
}
var reconciles []reconcile.Request
for _, gw := range gateways.Items {
if string(gw.Spec.GatewayClassName) == gatewayClass.GetName() {
reconciles = append(reconciles, reconcile.Request{
NamespacedName: types.NamespacedName{
Namespace: gw.Namespace,
Name: gw.Name,
},
})
}
}
return reconciles
}
// hasMatchingController returns true if the provided object is a Gateway
// using a GatewayClass with a Spec.Controller string matching this Contour's
// controller string, or false otherwise.
func (r *gatewayReconciler) hasMatchingController(obj client.Object) bool {
log := r.log.WithFields(logrus.Fields{
"namespace": obj.GetNamespace(),
"name": obj.GetName(),
})
gw, ok := obj.(*gatewayapi_v1beta1.Gateway)
if !ok {
log.Debugf("unexpected object type %T, bypassing reconciliation.", obj)
return false
}
gc := &gatewayapi_v1beta1.GatewayClass{}
if err := r.client.Get(context.Background(), types.NamespacedName{Name: string(gw.Spec.GatewayClassName)}, gc); err != nil {
log.WithError(err).Errorf("failed to get gatewayclass %s", gw.Spec.GatewayClassName)
return false
}
if gc.Spec.ControllerName != r.gatewayClassControllerName {
log.Debugf("gateway's class controller is not %s; bypassing reconciliation", r.gatewayClassControllerName)
return false
}
return true
}
func (r *gatewayReconciler) gatewayClassHasMatchingController(obj client.Object) bool {
gc, ok := obj.(*gatewayapi_v1beta1.GatewayClass)
if !ok {
r.log.Infof("expected GatewayClass, got %T", obj)
return false
}
return gc.Spec.ControllerName == r.gatewayClassControllerName
}
// Reconcile finds all the Gateways for the GatewayClass with an "Accepted: true" condition.
// It passes the oldest such Gateway to the DAG for processing, and sets an "Accepted: false"
// condition on all other Gateways for the accepted GatewayClass.
func (r *gatewayReconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
r.log.WithField("namespace", request.Namespace).WithField("name", request.Name).Info("reconciling gateway")
var gatewayClasses gatewayapi_v1beta1.GatewayClassList
if err := r.client.List(context.Background(), &gatewayClasses); err != nil {
return reconcile.Result{}, fmt.Errorf("error listing gateway classes")
}
// Find the GatewayClass for this controller with Accepted=true.
var acceptedGatewayClass *gatewayapi_v1beta1.GatewayClass
for i := range gatewayClasses.Items {
gatewayClass := &gatewayClasses.Items[i]
if gatewayClass.Spec.ControllerName != r.gatewayClassControllerName {
continue
}
if !isAccepted(gatewayClass) {
continue
}
acceptedGatewayClass = gatewayClass
break
}
if acceptedGatewayClass == nil {
r.log.Info("No accepted gateway class found")
r.eventHandler.OnDelete(&gatewayapi_v1beta1.Gateway{
ObjectMeta: metav1.ObjectMeta{
Namespace: request.Namespace,
Name: request.Name,
}})
return reconcile.Result{}, nil
}
var allGateways gatewayapi_v1beta1.GatewayList
if err := r.client.List(context.Background(), &allGateways); err != nil {
return reconcile.Result{}, fmt.Errorf("error listing gateways")
}
// Get all the Gateways for the Accepted=true GatewayClass.
var gatewaysForClass []*gatewayapi_v1beta1.Gateway
for i := range allGateways.Items {
if string(allGateways.Items[i].Spec.GatewayClassName) == acceptedGatewayClass.Name {
gatewaysForClass = append(gatewaysForClass, &allGateways.Items[i])
}
}
if len(gatewaysForClass) == 0 {
r.log.Info("No gateways found for accepted gateway class")
r.eventHandler.OnDelete(&gatewayapi_v1beta1.Gateway{
ObjectMeta: metav1.ObjectMeta{
Namespace: request.Namespace,
Name: request.Name,
}})
return reconcile.Result{}, nil
}
// Find the oldest Gateway, using alphabetical order
// as a tiebreaker.
var oldest *gatewayapi_v1beta1.Gateway
for _, gw := range gatewaysForClass {
switch {
case oldest == nil:
oldest = gw
case gw.CreationTimestamp.Before(&oldest.CreationTimestamp):
oldest = gw
case gw.CreationTimestamp.Equal(&oldest.CreationTimestamp):
if fmt.Sprintf("%s/%s", gw.Namespace, gw.Name) < fmt.Sprintf("%s/%s", oldest.Namespace, oldest.Name) {
oldest = gw
}
}
}
// Set the "Accepted" condition to false for all gateways
// except the oldest. The oldest will have its status set
// by the DAG processor, so don't set it here.
for _, gw := range gatewaysForClass {
if gw == oldest {
continue
}
if r.statusUpdater != nil {
r.statusUpdater.Send(k8s.StatusUpdate{
NamespacedName: k8s.NamespacedNameOf(gw),
Resource: &gatewayapi_v1beta1.Gateway{},
Mutator: k8s.StatusMutatorFunc(func(obj client.Object) client.Object {
gw, ok := obj.(*gatewayapi_v1beta1.Gateway)
if !ok {
panic(fmt.Sprintf("unsupported object type %T", obj))
}
return setGatewayNotAccepted(gw.DeepCopy())
}),
})
} else {
// this branch makes testing easier by not going through the StatusUpdater.
copy := setGatewayNotAccepted(gw.DeepCopy())
if err := r.client.Status().Update(context.Background(), copy); err != nil {
r.log.WithError(err).Error("error updating gateway status")
return reconcile.Result{}, fmt.Errorf("error updating status of gateway %s/%s: %v", gw.Namespace, gw.Name, err)
}
}
}
// TODO: Ensure the gateway by creating manage infrastructure, i.e. the Envoy service.
// xref: https://github.com/projectcontour/contour/issues/3545
r.log.WithField("namespace", oldest.Namespace).WithField("name", oldest.Name).Info("assigning gateway to DAG")
r.eventHandler.OnAdd(oldest, false)
return reconcile.Result{}, nil
}
| func isAccepted(gatewayClass *gatewayapi_v1beta1.GatewayClass) bool {
for _, cond := range gatewayClass.Status.Conditions { | random_line_split |
|
gateway.go | "
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
gatewayapi_v1beta1 "sigs.k8s.io/gateway-api/apis/v1beta1"
)
type gatewayReconciler struct {
client client.Client
eventHandler cache.ResourceEventHandler
statusUpdater k8s.StatusUpdater
log logrus.FieldLogger
// gatewayClassControllerName is the configured controller of managed gatewayclasses.
gatewayClassControllerName gatewayapi_v1beta1.GatewayController
eventSource chan event.GenericEvent
}
// RegisterGatewayController creates the gateway controller from mgr. The controller will be pre-configured
// to watch for Gateway objects across all namespaces and reconcile those that match class.
func RegisterGatewayController(
log logrus.FieldLogger,
mgr manager.Manager,
eventHandler cache.ResourceEventHandler,
statusUpdater k8s.StatusUpdater,
gatewayClassControllerName string,
) (leadership.NeedLeaderElectionNotification, error) |
if err := c.Watch(
source.Kind(mgr.GetCache(), &gatewayapi_v1beta1.Gateway{}),
&handler.EnqueueRequestForObject{},
predicate.NewPredicateFuncs(r.hasMatchingController),
); err != nil {
return nil, err
}
// Watch GatewayClasses and reconcile their associated Gateways
// to handle changes in the GatewayClasses' "Accepted" conditions.
if err := c.Watch(
source.Kind(mgr.GetCache(), &gatewayapi_v1beta1.GatewayClass{}),
handler.EnqueueRequestsFromMapFunc(r.mapGatewayClassToGateways),
predicate.NewPredicateFuncs(r.gatewayClassHasMatchingController),
); err != nil {
return nil, err
}
// Set up a source.Channel that will trigger reconciles
// for all Gateways when this Contour process is
// elected leader, to ensure that their statuses are up
// to date.
if err := c.Watch(
&source.Channel{Source: r.eventSource},
&handler.EnqueueRequestForObject{},
predicate.NewPredicateFuncs(r.hasMatchingController),
); err != nil {
return nil, err
}
return r, nil
}
func (r *gatewayReconciler) OnElectedLeader() {
r.log.Info("elected leader, triggering reconciles for all gateways")
var gateways gatewayapi_v1beta1.GatewayList
if err := r.client.List(context.Background(), &gateways); err != nil {
r.log.WithError(err).Error("error listing gateways")
return
}
for i := range gateways.Items {
r.eventSource <- event.GenericEvent{Object: &gateways.Items[i]}
}
}
func (r *gatewayReconciler) mapGatewayClassToGateways(ctx context.Context, gatewayClass client.Object) []reconcile.Request {
var gateways gatewayapi_v1beta1.GatewayList
if err := r.client.List(ctx, &gateways); err != nil {
r.log.WithError(err).Error("error listing gateways")
return nil
}
var reconciles []reconcile.Request
for _, gw := range gateways.Items {
if string(gw.Spec.GatewayClassName) == gatewayClass.GetName() {
reconciles = append(reconciles, reconcile.Request{
NamespacedName: types.NamespacedName{
Namespace: gw.Namespace,
Name: gw.Name,
},
})
}
}
return reconciles
}
// hasMatchingController returns true if the provided object is a Gateway
// using a GatewayClass with a Spec.Controller string matching this Contour's
// controller string, or false otherwise.
func (r *gatewayReconciler) hasMatchingController(obj client.Object) bool {
log := r.log.WithFields(logrus.Fields{
"namespace": obj.GetNamespace(),
"name": obj.GetName(),
})
gw, ok := obj.(*gatewayapi_v1beta1.Gateway)
if !ok {
log.Debugf("unexpected object type %T, bypassing reconciliation.", obj)
return false
}
gc := &gatewayapi_v1beta1.GatewayClass{}
if err := r.client.Get(context.Background(), types.NamespacedName{Name: string(gw.Spec.GatewayClassName)}, gc); err != nil {
log.WithError(err).Errorf("failed to get gatewayclass %s", gw.Spec.GatewayClassName)
return false
}
if gc.Spec.ControllerName != r.gatewayClassControllerName {
log.Debugf("gateway's class controller is not %s; bypassing reconciliation", r.gatewayClassControllerName)
return false
}
return true
}
func (r *gatewayReconciler) gatewayClassHasMatchingController(obj client.Object) bool {
gc, ok := obj.(*gatewayapi_v1beta1.GatewayClass)
if !ok {
r.log.Infof("expected GatewayClass, got %T", obj)
return false
}
return gc.Spec.ControllerName == r.gatewayClassControllerName
}
// Reconcile finds all the Gateways for the GatewayClass with an "Accepted: true" condition.
// It passes the oldest such Gateway to the DAG for processing, and sets an "Accepted: false"
// condition on all other Gateways for the accepted GatewayClass.
func (r *gatewayReconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
r.log.WithField("namespace", request.Namespace).WithField("name", request.Name).Info("reconciling gateway")
var gatewayClasses gatewayapi_v1beta1.GatewayClassList
if err := r.client.List(context.Background(), &gatewayClasses); err != nil {
return reconcile.Result{}, fmt.Errorf("error listing gateway classes")
}
// Find the GatewayClass for this controller with Accepted=true.
var acceptedGatewayClass *gatewayapi_v1beta1.GatewayClass
for i := range gatewayClasses.Items {
gatewayClass := &gatewayClasses.Items[i]
if gatewayClass.Spec.ControllerName != r.gatewayClassControllerName {
continue
}
if !isAccepted(gatewayClass) {
continue
}
acceptedGatewayClass = gatewayClass
break
}
if acceptedGatewayClass == nil {
r.log.Info("No accepted gateway class found")
r.eventHandler.OnDelete(&gatewayapi_v1beta1.Gateway{
ObjectMeta: metav1.ObjectMeta{
Namespace: request.Namespace,
Name: request.Name,
}})
return reconcile.Result{}, nil
}
var allGateways gatewayapi_v1beta1.GatewayList
if err := r.client.List(context.Background(), &allGateways); err != nil {
return reconcile.Result{}, fmt.Errorf("error listing gateways")
}
// Get all the Gateways for the Accepted=true GatewayClass.
var gatewaysForClass []*gatewayapi_v1beta1.Gateway
for i := range allGateways.Items {
if string(allGateways.Items[i].Spec.GatewayClassName) == acceptedGatewayClass.Name {
gatewaysForClass = append(gatewaysForClass, &allGateways.Items[i])
}
}
if len(gatewaysForClass) == 0 {
r.log.Info("No gateways found for accepted gateway class")
r.eventHandler.OnDelete(&gatewayapi_v1beta1.Gateway{
ObjectMeta: metav1.ObjectMeta{
Namespace: request.Namespace,
Name: request.Name,
}})
return reconcile.Result{}, nil
}
// Find the oldest Gateway, using alphabetical order
// as a tiebreaker.
var oldest *gatewayapi_v1beta1.Gateway
for _, gw := range gatewaysForClass {
switch {
case oldest == nil:
oldest = gw
case gw.CreationTimestamp.Before(&oldest.CreationTimestamp):
oldest = gw
case gw.CreationTimestamp.Equal(&oldest.CreationTimestamp):
if fmt.Sprintf("%s/%s", gw.Namespace, gw.Name) < fmt.Sprintf("%s/%s", oldest.Namespace, oldest.Name) {
oldest = gw
}
}
}
// Set the "Accepted" condition to false for all gateways
// except the oldest. The oldest will have its status set
// by the DAG processor, so don't set it here.
for _, gw := range gatewaysForClass {
if gw == oldest {
continue
}
if r.statusUpdater != nil {
r.statusUpdater.Send(k8s.StatusUpdate{
NamespacedName: k8s.NamespacedNameOf(gw),
Resource: &gatewayapi_v1beta1.Gateway | {
r := &gatewayReconciler{
log: log,
client: mgr.GetClient(),
eventHandler: eventHandler,
statusUpdater: statusUpdater,
gatewayClassControllerName: gatewayapi_v1beta1.GatewayController(gatewayClassControllerName),
// Set up a source.Channel that will trigger reconciles
// for all GatewayClasses when this Contour process is
// elected leader, to ensure that their statuses are up
// to date.
eventSource: make(chan event.GenericEvent),
}
c, err := controller.NewUnmanaged("gateway-controller", mgr, controller.Options{Reconciler: r})
if err != nil {
return nil, err
}
if err := mgr.Add(&noLeaderElectionController{c}); err != nil {
return nil, err
} | identifier_body |
FieldClassifierAndKeywords.py | ', "input.txt"]
pos_file = open("output.txt", 'w')
p = subprocess.Popen(command, stdout=pos_file, shell=False)
p.wait()
##s就是pos后的question
pos_file.close()
f = codecs.open("output.txt","r")
s = f.readline().strip()
Keywords = self.extract(s)
#KeywordsWithWeight = keywordWeight(s)
kw = wordWithWeight2()
return [questionTag,Keywords,kw]
pattern_person = re.compile(ur"谁|哪位", re.UNICODE)
pattern_time = re.compile(ur"什么时候|(哪|几.*(年|月|日|天|朝代))", re.UNICODE)
pattern_loc = re.compile(ur"哪.*(地|国|省|市|城|岛|山|湖|洋|河|海)", re.UNICODE)
pattern_integer = re.compile(ur"几任", re.UNICODE)
pattern_decimal = re.compile(ur"率|比例", re.UNICODE)
# question types: Name, Location, Time, Other
def typeClassify(self,question):
# Use regex to classify
result = self.regexClassify(question)
if result is not None:
return result
words = jieba.cut(question, cut_all = False)
ques=[]
for i in words:
ques.append(i)
t1 = time.time()
result = self.nbClassifier(ques)
t2 = time.time() - t1
print t2
return result
def tagQues(self,que,wordSet):
tag =[0,0,0,0]
for i in que:
i = i.encode("utf-8")
if wordSet.has_key(i):
tag[0] = tag[0] + wordSet[i][0]
tag[1] = tag[1] + wordSet[i][1]
tag[2] = tag[2] + wordSet[i][2]
tag[3] = tag[3] + wordSet[i][3]
inx = tag.index(max(tag))
if inx == 0:
tg = "人"
return tg
elif inx ==1:
tg = "时间"
return tg
elif inx == 2:
tg = "地点"
return tg
else:
tg = "名词"
return tg
def nbClassifier(self,question):
f1 = open("out-put.txt", "r")
f2 = open("ques_classifier_training.txt","r")
wordSet = {}
c1 = 0
c2 = 0
c3 = 0
c4 = 0
while True:
s1 = f1.readline()
s2 = f2.readline()
if len(s1) == 0:
break
else:
l1 = s1.split()
l2 = s2.split(':')
type = l2[1]
type = type.strip('\n')
if type == "人":
for w in l1:
c1 = c1 + 1
if wordSet.has_key(w):
wordSet[w][0] = wordSet[w][0]+1
else:
wordSet[w] = [1,0,0,0]
elif type == "时间":
for w in l1:
c2 = c2 + 1
if wordSet.has_key(w):
wordSet[w][1] = wordSet[w][1] + 1
else:
wordSet[w] = [0, 1, 0, 0]
elif type == "地点":
for w in l1:
c3 = c3 + 1
if wordSet.has_key(w):
wordSet[w][2] = wordSet[w][2] + 1
else:
wordSet[w] = [0, 0, 1, 0]
elif type == "名词":
for w in l1:
c4 = c4 +1
if wordSet.has_key(w):
wordSet[w][3] = wordSet[w][3] + 1
else:
wordSet[w] = [0, 0, 0, 1]
for i in wordSet:
wordSet[i] = [wordSet[i][0]+1,wordSet[i][1]+1,wordSet[i][2]+1,wordSet[i][3]+1]
for i in wordSet:
wordSet[i] = [math.log(wordSet[i][0]/float(c1+len(wordSet))),math.log(wordSet[i][1]/float(c2+len(wordSet))),math.log(wordSet[i][2]/float(c3+len(wordSet))),math.log(wordSet[i][3]/float(c4+len(wordSet)))]
tag=self.tagQues(question,wordSet)
return tag
def regexClassify(self,question):
if self.pattern_person.search(question.decode('utf8')) is not None:
return "person"
elif self.pattern_loc.search(question.decode('utf8')) is not None:
return "loc"
elif self.pattern_time.search(question.decode('utf8')) is not None:
return "time"
elif self.pattern_integer.search(question.decode('utf8')) is not None:
return "integer"
elif self.pattern_decimal.search(question.decode('utf8')) is not None:
return "decimal"
else:
return None
def target(self,question):
if self.pattern_person.search(question.decode('utf8')) is not None\
or self.pattern_loc.search(question.decode('utf8')) is not None:
return "name"
elif self.pattern_integer.search(question.decode('utf8')) is not None \
or self.pattern_decimal.search(question.decode('utf8')) is not None:
return "quantity"
elif self.pattern_time.search(question.decode('utf8')) is not None:
return "time"
else:
return None
def nbClassify(self,question, model_dict):
from operator import add
classifyArray = [0,0,0,0]
for word in question.spilt(' '):
if model_dict.has_key(word):
classifyArray = map(add, classifyArray, model_dict[word])
summation = sum(classifyArray)
classifyArray = [x - summation/4 for x in classifyArray]
if classifyArray[0] == max(classifyArray):
return "person"
elif classifyArray[1] == max(classifyArray):
return "loc"
elif classifyArray[2] == max(classifyArray):
return "time"
elif classifyArray[3] == max(classifyArray):
return "other"
def extract(self,question):
keywords = set()
for word in question.split():
sep = word.split('#')
word = sep[0]
tag = sep[1]
| while True:
s = f.readline()
if len(s) ==0:
break
else:
s= s.strip("\r\n")
stopWord[s] = 1
for word in question.split():
sep = word.split('#')
word = sep[0].decode("utf-8")
tag = sep[1]
if stopWord.has_key(word):
continue
else:
if tag[0] =='N':
keyword.append(word)
else:
keyword.append(word)
keyword.append(word)
return keyword
def keyweight(self,question):
words = []
tag = []
for word in question.split():
sep = word.split('#')
words.append(sep[0])
tag.append(sep[1])
f = open("tagwithweight.txt","r")
pairs = json.loads(f.read())
finaltagWeights = []
for i in pairs:
f =False
if len(i[0]) != len(tag):
continue
for n in range(0,len(i[0])):
if i[0][n] == tag[n]:
f = True
else:
f = False
break
if f == True:
finaltagWeights = i[1]
break
key = {}
for i in range(0,len(finaltagWeights)):
if finaltagWeights[i] == 0:
continue
else:
key[words[i]] = finaltagWeights[i]
return key
def wordWithWeight2():
words = []
tag = []
f = codecs.open("output.txt", "r")
question = f.readline().strip()
f.close()
for word in question.split():
sep = word.split('#')
words.append(sep[0])
tag.append(unicode(sep[1],'unicode-escape'))
f = open("tagwithweight.txt", "r")
pairs = json.loads(f.read())
maxSimilarity = 0
maxtag = []
maxweight = []
f.close()
for p in pairs:
s = SimilarityComparison(tag, p[0])
if s >maxSimilarity:
maxSimilarity = s
maxtag = p[0]
maxweight = p[1 | if tag[0] == 'N':
keywords.add(word)
return keywords
def keywordWeight(self,question):
keyword = []
f = codecs.open("chinese_stopwords.txt","r","utf-8")
stopWord ={}
| identifier_body |
FieldClassifierAndKeywords.py | agger', "input.txt"]
pos_file = open("output.txt", 'w')
p = subprocess.Popen(command, stdout=pos_file, shell=False)
p.wait()
##s就是pos后的question
pos_file.close()
f = codecs.open("output.txt","r")
s = f.readline().strip()
Keywords = self.extract(s)
#KeywordsWithWeight = keywordWeight(s)
kw = wordWithWeight2()
return [questionTag,Keywords,kw]
pattern_person = re.compile(ur"谁|哪位", re.UNICODE)
pattern_time = re.compile(ur"什么时候|(哪|几.*(年|月|日|天|朝代))", re.UNICODE)
pattern_loc = re.compile(ur"哪.*(地|国|省|市|城|岛|山|湖|洋|河|海)", re.UNICODE)
pattern_integer = re.compile(ur"几任", re.UNICODE)
pattern_decimal = re.compile(ur"率|比例", re.UNICODE)
# question types: Name, Location, Time, Other
def typeClassify(self,question):
# Use regex to classify
result = self.regexClassify(question)
if result is not None:
return result
words = jieba.cut(question, cut_all = False)
ques=[]
for i in words:
ques.append(i)
t1 = time.time()
result = self.nbClassifier(ques)
t2 = time.time() - t1
print t2
return result
def tagQues(self,que,wordSet):
tag =[0,0,0,0]
for i in que:
i = i.encode("utf-8")
if wordSet.has_key(i):
tag[0] = tag[0] + wordSet[i][0]
tag[1] = tag[1] + wordSet[i][1]
tag[2] = tag[2] + wordSet[i][2]
tag[3] = tag[3] + wordSet[i][3]
inx = tag.index(max(tag))
if inx == 0:
tg = "人"
return tg
elif inx ==1:
tg = "时间"
return tg
elif inx == 2:
tg = "地点"
return tg
else:
tg = "名词"
return tg
def nbClassifier(self,question):
f1 = open("out-put.txt", "r")
f2 = open("ques_classifier_training.txt","r")
wordSet = {}
c1 = 0
c2 = 0
c3 = 0
c4 = 0
while True:
s1 = f1.readline()
s2 = f2.readline()
if len(s1) == 0:
break
else:
l1 = s1.split()
l2 = s2.split(':')
type = l2[1]
type = type.strip('\n')
if type == "人":
for w in l1:
c1 = c1 + 1
if wordSet.has_key(w):
wordSet[w][0] = wordSet[w][0]+1
else:
wordSet[w] = [1,0,0,0]
elif type == "时间":
for w in l1:
c2 = c2 + 1
if wordSet.has_key(w):
wordSet[w][1] = wordSet[w][1] + 1
else:
wordSet[w] = [0, 1, 0, 0]
elif type == "地点":
for w in l1:
c3 = c3 + 1
if wordSet.has_key(w):
wordSet[w][2] = wordSet[w][2] + 1 | else:
wordSet[w] = [0, 0, 1, 0]
elif type == "名词":
for w in l1:
c4 = c4 +1
if wordSet.has_key(w):
wordSet[w][3] = wordSet[w][3] + 1
else:
wordSet[w] = [0, 0, 0, 1]
for i in wordSet:
wordSet[i] = [wordSet[i][0]+1,wordSet[i][1]+1,wordSet[i][2]+1,wordSet[i][3]+1]
for i in wordSet:
wordSet[i] = [math.log(wordSet[i][0]/float(c1+len(wordSet))),math.log(wordSet[i][1]/float(c2+len(wordSet))),math.log(wordSet[i][2]/float(c3+len(wordSet))),math.log(wordSet[i][3]/float(c4+len(wordSet)))]
tag=self.tagQues(question,wordSet)
return tag
def regexClassify(self,question):
if self.pattern_person.search(question.decode('utf8')) is not None:
return "person"
elif self.pattern_loc.search(question.decode('utf8')) is not None:
return "loc"
elif self.pattern_time.search(question.decode('utf8')) is not None:
return "time"
elif self.pattern_integer.search(question.decode('utf8')) is not None:
return "integer"
elif self.pattern_decimal.search(question.decode('utf8')) is not None:
return "decimal"
else:
return None
def target(self,question):
if self.pattern_person.search(question.decode('utf8')) is not None\
or self.pattern_loc.search(question.decode('utf8')) is not None:
return "name"
elif self.pattern_integer.search(question.decode('utf8')) is not None \
or self.pattern_decimal.search(question.decode('utf8')) is not None:
return "quantity"
elif self.pattern_time.search(question.decode('utf8')) is not None:
return "time"
else:
return None
def nbClassify(self,question, model_dict):
from operator import add
classifyArray = [0,0,0,0]
for word in question.spilt(' '):
if model_dict.has_key(word):
classifyArray = map(add, classifyArray, model_dict[word])
summation = sum(classifyArray)
classifyArray = [x - summation/4 for x in classifyArray]
if classifyArray[0] == max(classifyArray):
return "person"
elif classifyArray[1] == max(classifyArray):
return "loc"
elif classifyArray[2] == max(classifyArray):
return "time"
elif classifyArray[3] == max(classifyArray):
return "other"
def extract(self,question):
keywords = set()
for word in question.split():
sep = word.split('#')
word = sep[0]
tag = sep[1]
if tag[0] == 'N':
keywords.add(word)
return keywords
def keywordWeight(self,question):
keyword = []
f = codecs.open("chinese_stopwords.txt","r","utf-8")
stopWord ={}
while True:
s = f.readline()
if len(s) ==0:
break
else:
s= s.strip("\r\n")
stopWord[s] = 1
for word in question.split():
sep = word.split('#')
word = sep[0].decode("utf-8")
tag = sep[1]
if stopWord.has_key(word):
continue
else:
if tag[0] =='N':
keyword.append(word)
else:
keyword.append(word)
keyword.append(word)
return keyword
def keyweight(self,question):
words = []
tag = []
for word in question.split():
sep = word.split('#')
words.append(sep[0])
tag.append(sep[1])
f = open("tagwithweight.txt","r")
pairs = json.loads(f.read())
finaltagWeights = []
for i in pairs:
f =False
if len(i[0]) != len(tag):
continue
for n in range(0,len(i[0])):
if i[0][n] == tag[n]:
f = True
else:
f = False
break
if f == True:
finaltagWeights = i[1]
break
key = {}
for i in range(0,len(finaltagWeights)):
if finaltagWeights[i] == 0:
continue
else:
key[words[i]] = finaltagWeights[i]
return key
def wordWithWeight2():
words = []
tag = []
f = codecs.open("output.txt", "r")
question = f.readline().strip()
f.close()
for word in question.split():
sep = word.split('#')
words.append(sep[0])
tag.append(unicode(sep[1],'unicode-escape'))
f = open("tagwithweight.txt", "r")
pairs = json.loads(f.read())
maxSimilarity = 0
maxtag = []
maxweight = []
f.close()
for p in pairs:
s = SimilarityComparison(tag, p[0])
if s >maxSimilarity:
maxSimilarity = s
maxtag = p[0]
maxweight = p[1]
| random_line_split |
|
FieldClassifierAndKeywords.py | ', "input.txt"]
pos_file = open("output.txt", 'w')
p = subprocess.Popen(command, stdout=pos_file, shell=False)
p.wait()
##s就是pos后的question
pos_file.close()
f = codecs.open("output.txt","r")
s = f.readline().strip()
Keywords = self.extract(s)
#KeywordsWithWeight = keywordWeight(s)
kw = wordWithWeight2()
return [questionTag,Keywords,kw]
pattern_person = re.compile(ur"谁|哪位", re.UNICODE)
pattern_time = re.compile(ur"什么时候|(哪|几.*(年|月|日|天|朝代))", re.UNICODE)
pattern_loc = re.compile(ur"哪.*(地|国|省|市|城|岛|山|湖|洋|河|海)", re.UNICODE)
pattern_integer = re.compile(ur"几任", re.UNICODE)
pattern_decimal = re.compile(ur"率|比例", re.UNICODE)
# question types: Name, Location, Time, Other
def typeClassify(self,question):
# Use regex to classify
result = self.regexClassify(question)
if result is not None:
return result
words = jieba.cut(question, cut_all = False)
ques=[]
for i in words:
ques.append(i)
t1 = time.time()
result = self.nbClassifier(ques)
t2 = time.time() - t1
print t2
return result
def tagQues(self,que,wordSet):
tag =[0,0,0,0]
for i in que:
i = i.encode("utf-8")
if wordSet.has_key(i):
tag[0] = tag[0] + wordSet[i][0]
tag[1] = tag[1] + wordSet[i][1]
tag[2] = tag[2] + wordSet[i][2]
tag[3] = tag[3] + wordSet[i][3]
inx = tag.index(max(tag))
if inx == 0:
tg = "人"
return tg
elif inx ==1:
tg = "时间"
return tg
elif inx == 2:
tg = "地点"
return tg
else:
tg = "名词"
return tg
def nbClassifier(self,question):
f1 = open("out-put.txt", "r")
f2 = open("ques_classifier_training.txt","r")
wordSet = {}
c1 = 0
c2 = 0
c3 = 0
c4 = 0
while True:
s1 = f1.readline()
s2 = f2.readline()
if len(s1) == 0:
break
else:
l1 = s1.split()
l2 = s2.split(':')
type = l2[1]
type = type.strip('\n')
if type == "人":
for w in l1:
c1 = c1 + 1
if wordSet.has_key(w):
wordSet[w][0] = wordSet[w][0]+1
else:
wordSet[w] = [1,0,0,0]
elif type == "时间":
for w in l1:
c2 = c2 + 1
if wordSet.has_key(w):
wordSet[w][1] = wordSet[w][1] + 1
else:
wordSet[w] = [0, 1, 0, 0]
elif type == "地点":
for w in l1:
c3 = c3 + 1
if wordSet.has_key(w):
wordSet[w][2] = wordSet[w][2] + 1
else:
wordSet[w] = [0, 0, 1, 0]
elif type == "名词":
for w in l1:
c4 = c4 +1
if wordSet.has_key(w):
wordSet[w][3] = wordSet[w][3] + 1
else:
wordSet[w] = [0, 0, 0, 1]
for i in wordSet:
wordSet[i] = [wordSet[i][0]+1,wordSet[i][1]+1,wordSet[i][2]+1,wordSet[i][3]+1]
for i in wordSet:
wordSet[i] = [math.log(wordSet[i][0]/float(c1+len(wordSet))),math.log(wordSet[i][1]/float(c2+len(wordSet))),math.log(wordSet[i][2]/float(c3+len(wordSet))),math.log(wordSet[i][3]/float(c4+len(wordSet)))]
tag=self.tagQues(question,wordSet)
return tag
def regexClassify(self,question):
if self.pattern_person.search(question.decode('utf8')) is not None:
return "person"
elif self.pattern_loc.search(question.decode('utf8')) is not None:
return "loc"
elif self.pattern_time.search(question.decode('utf8')) is not None:
return "time"
elif self.pattern_integer.search(question.decode('utf8')) is not None:
return "integer"
elif self.pattern_decimal.search(question.decode('utf8')) is not None:
return "decimal"
else:
return None
def target(self,question):
if self.pattern_person.search(question.decode('utf8')) is not None\
or self.pattern_loc.search(question.decode('utf8')) is not None:
return "name"
elif self.pattern_integer.search(question.decode('utf8')) is not None \
or self.pattern_decimal.search(question.decode('utf8')) is not None:
return "quantity"
elif self.pattern_time.search(question.decode('utf8')) is not None:
return "time"
else:
return None
def nbClassify(self,question, model_dict):
from operator import add
classifyArray = [0,0,0,0]
for word in question.spilt(' '):
if model_dict.has_key(word):
classifyArray = map(add, classifyArray, model_dict[word])
summation = sum(classifyArray)
classifyArray = [x - summat | urn "loc"
elif classifyArray[2] == max(classifyArray):
return "time"
elif classifyArray[3] == max(classifyArray):
return "other"
def extract(self,question):
keywords = set()
for word in question.split():
sep = word.split('#')
word = sep[0]
tag = sep[1]
if tag[0] == 'N':
keywords.add(word)
return keywords
def keywordWeight(self,question):
keyword = []
f = codecs.open("chinese_stopwords.txt","r","utf-8")
stopWord ={}
while True:
s = f.readline()
if len(s) ==0:
break
else:
s= s.strip("\r\n")
stopWord[s] = 1
for word in question.split():
sep = word.split('#')
word = sep[0].decode("utf-8")
tag = sep[1]
if stopWord.has_key(word):
continue
else:
if tag[0] =='N':
keyword.append(word)
else:
keyword.append(word)
keyword.append(word)
return keyword
def keyweight(self,question):
words = []
tag = []
for word in question.split():
sep = word.split('#')
words.append(sep[0])
tag.append(sep[1])
f = open("tagwithweight.txt","r")
pairs = json.loads(f.read())
finaltagWeights = []
for i in pairs:
f =False
if len(i[0]) != len(tag):
continue
for n in range(0,len(i[0])):
if i[0][n] == tag[n]:
f = True
else:
f = False
break
if f == True:
finaltagWeights = i[1]
break
key = {}
for i in range(0,len(finaltagWeights)):
if finaltagWeights[i] == 0:
continue
else:
key[words[i]] = finaltagWeights[i]
return key
def wordWithWeight2():
words = []
tag = []
f = codecs.open("output.txt", "r")
question = f.readline().strip()
f.close()
for word in question.split():
sep = word.split('#')
words.append(sep[0])
tag.append(unicode(sep[1],'unicode-escape'))
f = open("tagwithweight.txt", "r")
pairs = json.loads(f.read())
maxSimilarity = 0
maxtag = []
maxweight = []
f.close()
for p in pairs:
s = SimilarityComparison(tag, p[0])
if s >maxSimilarity:
maxSimilarity = s
maxtag = p[0]
maxweight = p[1 | ion/4 for x in classifyArray]
if classifyArray[0] == max(classifyArray):
return "person"
elif classifyArray[1] == max(classifyArray):
ret | conditional_block |
FieldClassifierAndKeywords.py | :
def __init__(self):
words = jieba.cut("我是谁", cut_all=False)
def FieldClassifierAndKeywords(self,question):
##读入问题,调用分词工具分词,同时去除标点符号
delset = string.punctuation
question = question.translate(None, delset)
questionTag = self.typeClassify(question)
f = open("input.txt","w")
words = jieba.cut(question, cut_all = False)
s = ""
for i in words:
s = s+i.encode('utf-8')+" "
f.write(s)
f.close()
command = ["stanford-postagger-full-2015-12-09/stanford-postagger.sh",
'stanford-postagger-full-2015-12-09/models/chinese-distsim.tagger', "input.txt"]
pos_file = open("output.txt", 'w')
p = subprocess.Popen(command, stdout=pos_file, shell=False)
p.wait()
##s就是pos后的question
pos_file.close()
f = codecs.open("output.txt","r")
s = f.readline().strip()
Keywords = self.extract(s)
#KeywordsWithWeight = keywordWeight(s)
kw = wordWithWeight2()
return [questionTag,Keywords,kw]
pattern_person = re.compile(ur"谁|哪位", re.UNICODE)
pattern_time = re.compile(ur"什么时候|(哪|几.*(年|月|日|天|朝代))", re.UNICODE)
pattern_loc = re.compile(ur"哪.*(地|国|省|市|城|岛|山|湖|洋|河|海)", re.UNICODE)
pattern_integer = re.compile(ur"几任", re.UNICODE)
pattern_decimal = re.compile(ur"率|比例", re.UNICODE)
# question types: Name, Location, Time, Other
def typeClassify(self,question):
# Use regex to classify
result = self.regexClassify(question)
if result is not None:
return result
words = jieba.cut(question, cut_all = False)
ques=[]
for i in words:
ques.append(i)
t1 = time.time()
result = self.nbClassifier(ques)
t2 = time.time() - t1
print t2
return result
def tagQues(self,que,wordSet):
tag =[0,0,0,0]
for i in que:
i = i.encode("utf-8")
if wordSet.has_key(i):
tag[0] = tag[0] + wordSet[i][0]
tag[1] = tag[1] + wordSet[i][1]
tag[2] = tag[2] + wordSet[i][2]
tag[3] = tag[3] + wordSet[i][3]
inx = tag.index(max(tag))
if inx == 0:
tg = "人"
return tg
elif inx ==1:
tg = "时间"
return tg
elif inx == 2:
tg = "地点"
return tg
else:
tg = "名词"
return tg
def nbClassifier(self,question):
f1 = open("out-put.txt", "r")
f2 = open("ques_classifier_training.txt","r")
wordSet = {}
c1 = 0
c2 = 0
c3 = 0
c4 = 0
while True:
s1 = f1.readline()
s2 = f2.readline()
if len(s1) == 0:
break
else:
l1 = s1.split()
l2 = s2.split(':')
type = l2[1]
type = type.strip('\n')
if type == "人":
for w in l1:
c1 = c1 + 1
if wordSet.has_key(w):
wordSet[w][0] = wordSet[w][0]+1
else:
wordSet[w] = [1,0,0,0]
elif type == "时间":
for w in l1:
c2 = c2 + 1
if wordSet.has_key(w):
wordSet[w][1] = wordSet[w][1] + 1
else:
wordSet[w] = [0, 1, 0, 0]
elif type == "地点":
for w in l1:
c3 = c3 + 1
if wordSet.has_key(w):
wordSet[w][2] = wordSet[w][2] + 1
else:
wordSet[w] = [0, 0, 1, 0]
elif type == "名词":
for w in l1:
c4 = c4 +1
if wordSet.has_key(w):
wordSet[w][3] = wordSet[w][3] + 1
else:
wordSet[w] = [0, 0, 0, 1]
for i in wordSet:
wordSet[i] = [wordSet[i][0]+1,wordSet[i][1]+1,wordSet[i][2]+1,wordSet[i][3]+1]
for i in wordSet:
wordSet[i] = [math.log(wordSet[i][0]/float(c1+len(wordSet))),math.log(wordSet[i][1]/float(c2+len(wordSet))),math.log(wordSet[i][2]/float(c3+len(wordSet))),math.log(wordSet[i][3]/float(c4+len(wordSet)))]
tag=self.tagQues(question,wordSet)
return tag
def regexClassify(self,question):
if self.pattern_person.search(question.decode('utf8')) is not None:
return "person"
elif self.pattern_loc.search(question.decode('utf8')) is not None:
return "loc"
elif self.pattern_time.search(question.decode('utf8')) is not None:
return "time"
elif self.pattern_integer.search(question.decode('utf8')) is not None:
return "integer"
elif self.pattern_decimal.search(question.decode('utf8')) is not None:
return "decimal"
else:
return None
def target(self,question):
if self.pattern_person.search(question.decode('utf8')) is not None\
or self.pattern_loc.search(question.decode('utf8')) is not None:
return "name"
elif self.pattern_integer.search(question.decode('utf8')) is not None \
or self.pattern_decimal.search(question.decode('utf8')) is not None:
return "quantity"
elif self.pattern_time.search(question.decode('utf8')) is not None:
return "time"
else:
return None
def nbClassify(self,question, model_dict):
from operator import add
classifyArray = [0,0,0,0]
for word in question.spilt(' '):
if model_dict.has_key(word):
classifyArray = map(add, classifyArray, model_dict[word])
summation = sum(classifyArray)
classifyArray = [x - summation/4 for x in classifyArray]
if classifyArray[0] == max(classifyArray):
return "person"
elif classifyArray[1] == max(classifyArray):
return "loc"
elif classifyArray[2] == max(classifyArray):
return "time"
elif classifyArray[3] == max(classifyArray):
return "other"
def extract(self,question):
keywords = set()
for word in question.split():
sep = word.split('#')
word = sep[0]
tag = sep[1]
if tag[0] == 'N':
keywords.add(word)
return keywords
def keywordWeight(self,question):
keyword = []
f = codecs.open("chinese_stopwords.txt","r","utf-8")
stopWord ={}
while True:
s = f.readline()
if len(s) ==0:
break
else:
s= s.strip("\r\n")
stopWord[s] = 1
for word in question.split():
sep = word.split('#')
word = sep[0].decode("utf-8")
tag = sep[1]
if stopWord.has_key(word):
continue
else:
if tag[0] =='N':
keyword.append(word)
else:
keyword.append(word)
keyword.append(word)
return keyword
def keyweight(self,question):
words = []
tag = []
for word in question.split():
sep = word.split('#')
words.append(sep[0])
tag.append(sep[1])
f = open("tagwithweight.txt","r")
pairs = json.loads(f.read())
finaltagWeights = []
for i in pairs:
f =False
if len(i[0]) != len(tag):
continue
for n in range(0,len(i[0])):
if i[0][n] == tag[n]:
f = True
else:
f = False
break
if f == True:
finaltagWeights = i[1]
break
key = {}
for i in range(0,len(finaltagWeights)):
if finaltagWeights[i] | FieldClassifierAndKeywords | identifier_name |
|
index.js | +"\" ,\"nick_name\":\""+nickname+"\" ,\"im_sign\":\""+resapp.data.im_sig+"\" ,\"user_icon_url\":\""+bbs_icon+"\" ,\"txy_sign\":\""+resapp.data.file_sig+"\" ,\"im_identifier\":\""+resapp.data.im_id+"\"}");
});
this.getlist();
window.scrollTo(0,0);
}
componentWillUnmou | entStatus = false;
document.removeEventListener('scroll',this.scroll);
}
appzhibocallback=()=>{
this.page = 1;
this.getlist();
}
getlist=()=>{
this.page==1 ? this.setState({ "liststatus" : "pending" ,"list" : [] }) : this.setState({ "liststatus" : "pending" });
setTimeout(()=>{
if(this.datatype==1){ /*创作*/
api.FetchPost('/hyb-stu/stu_user_hot_point/find_main_article',{
UserKey : this.props.userstate.userKey,
token : this.token,
body : JSON.stringify({ page_size : this.page ,create_time : this.create_time})
}).then(({res})=>{
if(this.page>1 && api.isEmptyObject(res.data.article_map)){
return false;
}
this.remain = res.data.remain;
if(!api.isEmptyObject(res.data.article_map)){
if(api.isEmptyObject(this.state.chuangzuolist)){
this.setState({ "chuangzuolist" : res.data.article_map ,"liststatus" : 'success' },()=>{
document.addEventListener('scroll',this.scroll,false);
});
}else{
let list = Object.assign({}, this.state.chuangzuolist);
let newlist = {};
Object.keys(list).map((key)=>{
Object.keys(res.data.article_map).map((key2)=>{
if(!list[key2]){
if(newlist[key2]){
newlist[key2] = [];
}
newlist[key2] = res.data.article_map[key2];
}else{
if(!newlist[key]){
newlist[key] = [];
}
if(res.data.article_map[key]!=void 0){
newlist[key] = list[key].concat(res.data.article_map[key]);
}else{
newlist[key] = list[key];
}
}
});
});
this.setState({ "chuangzuolist" : Object.assign(newlist, list) ,"liststatus" : 'success' },()=>{
document.addEventListener('scroll',this.scroll,false);
});
}
let last = Object.keys(res.data.article_map)[Object.keys(res.data.article_map).length-1];
last = res.data.article_map[last];
last = last[last.length-1];
this.create_time = last.create_time;
}else{
this.setState({ "liststatus" : 'nodata' });
}
});
}else if(this.datatype==2){ /*直播*/
api.FetchPost('/hyb-stu/stu_talk/list',{
UserKey : this.props.userstate.userKey,
token : this.token,
body : JSON.stringify({ page : this.page ,size : 10 })
}).then(({res})=>{
this.total = res.data.total;
if(this.page==1){
if(res.data.list.length){
this.componentStatus && this.setState({ "list" : res.data.list ,"liststatus" : "success" },()=>{
document.addEventListener('scroll',this.scroll,false);
});
}else{
this.componentStatus && this.setState({ "list" : [] ,"liststatus" : "nodata" });
}
}else{
this.componentStatus && this.setState({ "list" : this.state.list.concat(res.data.list) ,"liststatus" : "success" },()=>{
document.addEventListener('scroll',this.scroll,false);
});
}
});
}
},400);
}
scroll=(event)=>{
let scrolltop = document.documentElement.scrollTop || document.body.scrollTop;
let el = '';
if(this.datatype==2){
el = document.querySelectorAll('ul.livelist li:last-child')[0];
}
if(this.datatype==1){
el = document.querySelectorAll('.box2:last-child')[0];
}
if(!el){
return;
}
if(this.datatype==2 && this.page>=this.total){ /*直播*/
return;
}
if(this.datatype==1 && !this.remain){ /*创作*/
return;
}
if(this.state.liststatus!='pending'){
if(scrolltop + window.innerHeight + 10 >= Math.ceil(document.body.scrollHeight)){
++this.page;
this.getlist();
}
}
}
openNavOnnLive=(event)=>{
const id = api.closest(event.target ,'li').getAttribute('data-id');
const actiontype = "looklivejump?param={\"token\":\""+this.token+"\" ,\"meeting_id\":\""+id+"\" }";
api.webview(actiontype);
}
changetype=(event)=>{
if(!event.target.classList.contains('on')){
this.datatype = event.target.getAttribute('data-type');
let li = api.closest(event.target ,'ul').querySelectorAll('li');
for(let item of li){
item.classList.remove('on');
}
event.target.classList.add('on');
this.page = 1;
this.create_time = '';
this.getlist();
}
}
actionzan=(event)=>{
const el = api.closest(event.target ,'span') ,el_i = el.querySelectorAll('i')[0] ,div = api.closest(event.target ,'div.box');
let praise_type = 0;
if(el_i.classList.contains('on')){
praise_type = 1;
}
api.FetchPost('/hyb-stu/stu_user_hot_point/praise_count_inc',{
UserKey : this.props.userstate.userKey,
token : this.props.userstate.token,
body : JSON.stringify({
praise_type : praise_type,
hot_point_id : el.getAttribute('data-id'),
hot_point_user_id : el.getAttribute('data-userid')
})
}).then(({res})=>{
if(praise_type==1){
el.querySelectorAll('em')[0].textContent = --el.querySelectorAll('em')[0].textContent;
el_i.classList.remove('on');
}else{
el.querySelectorAll('em')[0].textContent = ++el.querySelectorAll('em')[0].textContent;
el_i.classList.add('on');
}
})
}
chuangzuo=()=>{
return <div className="indexdynamic">
{ this.page==1 && this.state.liststatus=='pending' ? <Tip text="" type="loading" />
: this.page==1 && this.state.liststatus=='nodata' ? <Tip text="抱歉,暂时没有相关内容" type="nodata" />
: this.page==1 && this.state.liststatus=='error' ? <Tip text="出错误了" type="tiperro" />
: Object.keys(this.state.chuangzuolist).sort().reverse().map((key ,index)=>{
return <div key={key} className="dynamic">
<h1>{key}</h1>
{
this.state.chuangzuolist[key].map((item ,index)=>{
return <div className="box2" key={index} data-key={key} data-id={item.hot_point_id}>
<div className="boxhd2">
<img src={item.icon ? item.icon : usericonimg} />
<p>
<Link to={{ "pathname" : "/creationsocial" ,state : { "pointid" : item.hot_point_id } }} >
<span>{item.name}</span>
<span>{item.collection_time}</span>
</Link>
</p>
</div>
<div className="boxbd2">
<ul className="imglist">
<li className="long">
{
item.cover_picture ? <Link to={{ "pathname" : "/creationdetail" ,state : { "hotpointid" : item.hot_point_id ,"name" : item.name ,"icon" : item.icon ,"userid" : item.user_id } }}><img src={item.cover_picture} /></Link> : ''
}
<Link to={{ "pathname" : "/creationdetail" ,state : { "hotpointid" : item.hot_point_id ,"name" : item.name ,"icon" : item.icon ,"userid" : item.user_id } }}>
<p>
<em>{item.summary.length > 30 ? api.substring(item.summary ,30 ,'...') : item.summary}</em>
{item.summary.length > 30 ? <span>【长文】</span> : ''}
</p>
</Link>
</li>
</ul>
</div>
<div className="ft2">
<span><i className="icon b"></i>{item.comment_count}</span>
<span onClick={this.actionzan} data-status={item.praise_status} data-id={item.hot_point_id} data-userid={item.user_id}><i className={item.praise_status==1 ? "icon a on" : "icon a"}></i><em>{item.praise_count}</em></span>
</div>
</div>
})
| nt(){
this.compon | identifier_name |
index.js | mobile+"\" ,\"nick_name\":\""+nickname+"\" ,\"im_sign\":\""+resapp.data.im_sig+"\" ,\"user_icon_url\":\""+bbs_icon+"\" ,\"txy_sign\":\""+resapp.data.file_sig+"\" ,\"im_identifier\":\""+resapp.data.im_id+"\"}");
});
this.getlist();
window.scrollTo(0,0);
}
componentWillUnmount(){
this.componentStatus = false;
document.removeEventListener('scroll',this.scroll);
}
appzhibocallback=()=>{
this.page = 1;
this.getlist();
}
getlist=()=>{
this.page==1 ? this.setState({ "liststatus" : "pending" ,"list" : [] }) : this.setState({ "liststatus" : "pending" });
setTimeout(()=>{
if(this.datatype==1){ /*创作*/
api.FetchPost('/hyb-stu/stu_user_hot_point/find_main_article',{
UserKey : this.props.userstate.userKey,
token : this.token,
body : JSON.stringify({ page_size : this.page ,create_time : this.create_time})
}).then(({res})=>{
if(this.page>1 && api.isEmptyObject(res.data.article_map)){
return false;
}
this.remain = res.data.remain;
if(!api.isEmptyObject(res.data.article_map)){
if(api.isEmptyObject(this.state.chuangzuolist)){
this.setState({ "chuangzuolist" : res.data.article_map ,"liststatus" : 'success' },()=>{
document.addEventListener('scroll',this.scroll,false);
});
}else{
let list = Object.assign({}, this.state.chuangzuolist);
let newlist = {};
Object.keys(list).map((key)=>{
Object.keys(res.data.article_map).map((key2)=>{
if(!list[key2]){
if(newlist[key2]){
newlist[key2] = [];
}
newlist[key2] = res.data.article_map[key2];
}else{
if(!newlist[key]){
newlist[key] = [];
}
if(res.data.article_map[key]!=void 0){
newlist[key] = list[key].concat(res.data.article_map[key]);
}else{
newlist[key] = list[key];
}
}
});
});
this.setState({ "chuangzuolist" : Object.assign(newlist, list) ,"liststatus" : 'success' },()=>{
document.addEventListener('scroll',this.scroll,false);
});
}
let last = Object.keys(res.data.article_map)[Object.keys(res.data.article_map).length-1];
last = res.data.article_map[last];
last = last[last.length-1];
this.create_time = last.create_time;
}else{
this.setState({ "liststatus" : 'nodata' });
}
});
}else if(this.datatype==2){ /*直播*/
api.Fetc | }
},400);
}
scroll=(
event)=>{
let scrolltop = document.documentElement.scrollTop || document.body.scrollTop;
let el = '';
if(this.datatype==2){
el = document.querySelectorAll('ul.livelist li:last-child')[0];
}
if(this.datatype==1){
el = document.querySelectorAll('.box2:last-child')[0];
}
if(!el){
return;
}
if(this.datatype==2 && this.page>=this.total){ /*直播*/
return;
}
if(this.datatype==1 && !this.remain){ /*创作*/
return;
}
if(this.state.liststatus!='pending'){
if(scrolltop + window.innerHeight + 10 >= Math.ceil(document.body.scrollHeight)){
++this.page;
this.getlist();
}
}
}
openNavOnnLive=(event)=>{
const id = api.closest(event.target ,'li').getAttribute('data-id');
const actiontype = "looklivejump?param={\"token\":\""+this.token+"\" ,\"meeting_id\":\""+id+"\" }";
api.webview(actiontype);
}
changetype=(event)=>{
if(!event.target.classList.contains('on')){
this.datatype = event.target.getAttribute('data-type');
let li = api.closest(event.target ,'ul').querySelectorAll('li');
for(let item of li){
item.classList.remove('on');
}
event.target.classList.add('on');
this.page = 1;
this.create_time = '';
this.getlist();
}
}
actionzan=(event)=>{
const el = api.closest(event.target ,'span') ,el_i = el.querySelectorAll('i')[0] ,div = api.closest(event.target ,'div.box');
let praise_type = 0;
if(el_i.classList.contains('on')){
praise_type = 1;
}
api.FetchPost('/hyb-stu/stu_user_hot_point/praise_count_inc',{
UserKey : this.props.userstate.userKey,
token : this.props.userstate.token,
body : JSON.stringify({
praise_type : praise_type,
hot_point_id : el.getAttribute('data-id'),
hot_point_user_id : el.getAttribute('data-userid')
})
}).then(({res})=>{
if(praise_type==1){
el.querySelectorAll('em')[0].textContent = --el.querySelectorAll('em')[0].textContent;
el_i.classList.remove('on');
}else{
el.querySelectorAll('em')[0].textContent = ++el.querySelectorAll('em')[0].textContent;
el_i.classList.add('on');
}
})
}
chuangzuo=()=>{
return <div className="indexdynamic">
{ this.page==1 && this.state.liststatus=='pending' ? <Tip text="" type="loading" />
: this.page==1 && this.state.liststatus=='nodata' ? <Tip text="抱歉,暂时没有相关内容" type="nodata" />
: this.page==1 && this.state.liststatus=='error' ? <Tip text="出错误了" type="tiperro" />
: Object.keys(this.state.chuangzuolist).sort().reverse().map((key ,index)=>{
return <div key={key} className="dynamic">
<h1>{key}</h1>
{
this.state.chuangzuolist[key].map((item ,index)=>{
return <div className="box2" key={index} data-key={key} data-id={item.hot_point_id}>
<div className="boxhd2">
<img src={item.icon ? item.icon : usericonimg} />
<p>
<Link to={{ "pathname" : "/creationsocial" ,state : { "pointid" : item.hot_point_id } }} >
<span>{item.name}</span>
<span>{item.collection_time}</span>
</Link>
</p>
</div>
<div className="boxbd2">
<ul className="imglist">
<li className="long">
{
item.cover_picture ? <Link to={{ "pathname" : "/creationdetail" ,state : { "hotpointid" : item.hot_point_id ,"name" : item.name ,"icon" : item.icon ,"userid" : item.user_id } }}><img src={item.cover_picture} /></Link> : ''
}
<Link to={{ "pathname" : "/creationdetail" ,state : { "hotpointid" : item.hot_point_id ,"name" : item.name ,"icon" : item.icon ,"userid" : item.user_id } }}>
<p>
<em>{item.summary.length > 30 ? api.substring(item.summary ,30 ,'...') : item.summary}</em>
{item.summary.length > 30 ? <span>【长文】</span> : ''}
</p>
</Link>
</li>
</ul>
</div>
<div className="ft2">
<span><i className="icon b"></i>{item.comment_count}</span>
<span onClick={this.actionzan} data-status={item.praise_status} data-id={item.hot_point_id} data-userid={item.user_id}><i className={item.praise_status==1 ? "icon a on" : "icon a"}></i><em>{item.praise_count}</em></span>
</div>
</div>
})
| hPost('/hyb-stu/stu_talk/list',{
UserKey : this.props.userstate.userKey,
token : this.token,
body : JSON.stringify({ page : this.page ,size : 10 })
}).then(({res})=>{
this.total = res.data.total;
if(this.page==1){
if(res.data.list.length){
this.componentStatus && this.setState({ "list" : res.data.list ,"liststatus" : "success" },()=>{
document.addEventListener('scroll',this.scroll,false);
});
}else{
this.componentStatus && this.setState({ "list" : [] ,"liststatus" : "nodata" });
}
}else{
this.componentStatus && this.setState({ "list" : this.state.list.concat(res.data.list) ,"liststatus" : "success" },()=>{
document.addEventListener('scroll',this.scroll,false);
});
}
});
| conditional_block |
index.js | this.page = 1;
this.create_time = '';
this.getlist();
}
}
actionzan=(event)=>{
const el = api.closest(event.target ,'span') ,el_i = el.querySelectorAll('i')[0] ,div = api.closest(event.target ,'div.box');
let praise_type = 0;
if(el_i.classList.contains('on')){
praise_type = 1;
}
api.FetchPost('/hyb-stu/stu_user_hot_point/praise_count_inc',{
UserKey : this.props.userstate.userKey,
token : this.props.userstate.token,
body : JSON.stringify({
praise_type : praise_type,
hot_point_id : el.getAttribute('data-id'),
hot_point_user_id : el.getAttribute('data-userid')
})
}).then(({res})=>{
if(praise_type==1){
el.querySelectorAll('em')[0].textContent = --el.querySelectorAll('em')[0].textContent;
el_i.classList.remove('on');
}else{
el.querySelectorAll('em')[0].textContent = ++el.querySelectorAll('em')[0].textContent;
el_i.classList.add('on');
}
})
}
chuangzuo=()=>{
return <div className="indexdynamic">
{ this.page==1 && this.state.liststatus=='pending' ? <Tip text="" type="loading" />
: this.page==1 && this.state.liststatus=='nodata' ? <Tip text="抱歉,暂时没有相关内容" type="nodata" />
: this.page==1 && this.state.liststatus=='error' ? <Tip text="出错误了" type="tiperro" />
: Object.keys(this.state.chuangzuolist).sort().reverse().map((key ,index)=>{
return <div key={key} className="dynamic">
<h1>{key}</h1>
{
this.state.chuangzuolist[key].map((item ,index)=>{
return <div className="box2" key={index} data-key={key} data-id={item.hot_point_id}>
<div className="boxhd2">
<img src={item.icon ? item.icon : usericonimg} />
<p>
<Link to={{ "pathname" : "/creationsocial" ,state : { "pointid" : item.hot_point_id } }} >
<span>{item.name}</span>
<span>{item.collection_time}</span>
</Link>
</p>
</div>
<div className="boxbd2">
<ul className="imglist">
<li className="long">
{
item.cover_picture ? <Link to={{ "pathname" : "/creationdetail" ,state : { "hotpointid" : item.hot_point_id ,"name" : item.name ,"icon" : item.icon ,"userid" : item.user_id } }}><img src={item.cover_picture} /></Link> : ''
}
<Link to={{ "pathname" : "/creationdetail" ,state : { "hotpointid" : item.hot_point_id ,"name" : item.name ,"icon" : item.icon ,"userid" : item.user_id } }}>
<p>
<em>{item.summary.length > 30 ? api.substring(item.summary ,30 ,'...') : item.summary}</em>
{item.summary.length > 30 ? <span>【长文】</span> : ''}
</p>
</Link>
</li>
</ul>
</div>
<div className="ft2">
<span><i className="icon b"></i>{item.comment_count}</span>
<span onClick={this.actionzan} data-status={item.praise_status} data-id={item.hot_point_id} data-userid={item.user_id}><i className={item.praise_status==1 ? "icon a on" : "icon a"}></i><em>{item.praise_count}</em></span>
</div>
</div>
})
}
</div>
})
}
{ this.page > 1 && this.state.liststatus=='pending' ? <Tipshowend text="加载中请稍等"/> : '' }
</div>
}
handleBind = (event)=>{
if(event.type=='touchstart'){
this.touchStart(event);
}else if(event.type=='touchmove'){
this.touchMove(event);
}
}
touchStart = (event)=>{
this.touchY = event.targetTouches[0].pageY;
}
touchMove = (event)=>{
let dir = event.targetTouches[0].pageY - this.touchY ,translateY = 0 ,direction = dir > 0 ? 1 : -1;
const scrollY = document.documentElement.scrollTop || document.body.scrollTop;
const end = ()=>{
if(this.state.translateY>20){
this.appzhibocallback();
setTimeout(()=>{
this.refs.updatamsg.innerHTML = '下拉即可刷新';
},320);
}
this.setState({ "translateY" : 0 });
this.istouchmove = false;
this.updatamsgshow = false;
window.removeEventListener('touchend' ,end);
}
if(direction>0 && scrollY<=0){
translateY = Math.min(dir, 35) / 2 + Math.max(0, dir - 35);
if(translateY>10){
this.updatamsgshow = true;
}
if(translateY>23){
this.refs.updatamsg.innerHTML = '释放即可刷新';
}
if(!this.istouchmove){
window.addEventListener('touchend' ,end ,false);
}
this.setState({ "translateY" : api.damping(translateY) });
this.istouchmove = true;
}
}
zhibo=()=>{
let style = { transform : `translateY(${this.state.translateY}px)` },
style1 = this.updatamsgshow ? { visibility : "visible" ,transform : `translateY(${this.state.translateY/6}px)` } : { transform : `translateY(${this.state.translateY/6}px)` };
return <div className="box" onTouchStart={this.handleBind} onTouchMove={this.handleBind}>
<div className="updatamsg" style={style1}><img src={loadingimg2} /><b ref="updatamsg" >下拉即可刷新</b></div>
<ul className="livelist" style={style}>
{ this.page==1 && this.state.liststatus=='pending' ? <Tip text="" type="loading" /> : '' }
{ this.page==1 && this.state.liststatus=='error' ? <Tip text="出错误了" type="tiperro" /> : '' }
{ this.page==1 && this.state.liststatus=='nodata' ? <Tip text="抱歉,暂时没有相关内容" type="nodata" /> : '' }
{
this.state.list.map((item ,index)=>{
return item.type==0 ? '' : <li data-channel={item.channel_id} data-id={item.id} key={index} onClick={this.openNavOnnLive}>
<p className="title">
<label><img src={item.logo ? item.logo : companylogo} /></label>
<span><em>{item.full_name}</em><em><i className="icon3"></i>开播时间:{item.start_time}</em></span>
</p>
<p className="bo">
{
item.live_status==0
? <label><img src={jijiangkaibo} /><em>即将开播</em></label>
: item.live_status==1 ? <label><img src={item.preview_url ? item.preview_url : zhibo} /><em><i className="onlive"></i>直播中</em></label>
: item.live_status==2 ? <label><img src={item.preview_url ? item.preview_url : chongbo} /><em>观看重播</em></label>
: ''
}
<b className="detail"><i className="icon3"></i>{item.theme}</b>
</p>
</li>
})
}
</ul>
{ this.page > 1 && this.state.liststatus=='pending' ? <Tipshowend text="加载中请稍等"/> : '' }
</div>
}
/*<li onClick={this.changetype} data-type="0">热点</li> <i className="icon jia"></i>*/
/* <li onClick={this.changetype} data-type="1" className="on">创作</li> */
render(){
return(
<div className="indexPage">
<div className="hd">
<ul>
<li onClick={this.changetype} data-type="2">直播</li>
</ul>
</div>
{
this.datatype==1 ? this.chuangzuo() : this.zhibo()
}
</div>
)
}
}
const mapStateToProps = (state ,ownProps) =>{
return {
| userstate : state.UserState
| random_line_split |
|
manager.go | Len() int {
return len(*q)
}
func (q *PriorityQueue) Less(i, j int) bool {
return (*q)[i].NextExecutionTime.Before((*q)[j].NextExecutionTime)
}
func (q *PriorityQueue) Pop() interface{} {
old := *q
n := len(*q)
item := (*q)[n-1]
*q = old[0 : n-1]
return item
}
func (q *PriorityQueue) Push(x interface{}) {
*q = append(*q, x.(*Task))
}
func (taskManager *TaskManager) RegisterTaskSourceHandler(source TaskSource, handler TaskHandler) {
taskManager.handlers[source] = handler
}
func (taskManager *TaskManager) internalRemoveTask(task *Task) error {
var ok bool
key := TaskKey{
Source: task.Source,
Uid: task.UserIdentifier,
}
taskManager.TaskMap.RLock()
_, ok = taskManager.TaskMap.inner[key]
taskManager.TaskMap.RUnlock()
if !ok {
return fmt.Errorf("task not exists: %v", key)
}
taskManager.TaskMap.Lock()
_, ok = taskManager.TaskMap.inner[key]
if !ok {
taskManager.TaskMap.Unlock()
return fmt.Errorf("tasks not exists: %v", key)
}
delete(taskManager.TaskMap.inner, key)
taskManager.TaskMap.Unlock()
return nil
}
func (taskManager *TaskManager) internalAddTask(task *Task) error {
var ok bool
key := TaskKey{
Source: task.Source,
Uid: task.UserIdentifier,
}
taskManager.TaskMap.RLock()
_, ok = taskManager.TaskMap.inner[key]
taskManager.TaskMap.RUnlock()
if ok {
return fmt.Errorf("task exists")
}
taskManager.TaskMap.Lock()
_, ok = taskManager.TaskMap.inner[key]
if ok {
taskManager.TaskMap.Unlock()
return fmt.Errorf("tasks exists")
}
taskManager.TaskMap.inner[key] = task
taskManager.TaskMap.Unlock()
return nil
}
func (taskManager *TaskManager) getNextWakeupTime() time.Time {
taskManager.PendingQueue.RLock()
defer taskManager.PendingQueue.RUnlock()
if taskManager.PendingQueue.inner.Len() == 0 {
return time.Now().Add(TICK)
} else {
return taskManager.PendingQueue.inner[0].NextExecutionTime
}
}
func (taskManager *TaskManager) popAvaliableTasks(deadline time.Time) []*Task {
taskManager.PendingQueue.Lock()
defer taskManager.PendingQueue.Unlock()
ret := make([]*Task, 0)
for len(taskManager.PendingQueue.inner) > 0 {
next := taskManager.PendingQueue.inner[0].NextExecutionTime
if next.Before(deadline) || next.Equal(deadline) {
p := heap.Pop(&taskManager.PendingQueue.inner)
ret = append(ret, p.(*Task))
} else {
break
}
}
return ret
}
func (*TaskManager) GetTaskLog(id int) (*TaskLog, error) {
return nil, nil
}
func (taskManager *TaskManager) NewOneshotTask(at time.Time,
identifier string,
source TaskSource,
retry, retryInterval int,
context interface{}) *Task {
if _, ok := taskManager.handlers[source]; !ok {
panic("please register your type first")
}
return &Task{
UserIdentifier: identifier,
Type: TASK_TYPE_ONESHOT,
Source: source,
NextExecutionTime: at,
Context: context,
Retry: retry,
RetryInterval: retryInterval,
LastExecutionTime: time.Time{},
Handler: taskManager.handlers[source],
}
}
func (taskManager *TaskManager) addTaskToPendingQueue(task *Task) {
taskManager.updateTaskStatus(task, STATUS_PENDING)
taskManager.PendingQueue.Lock()
defer taskManager.PendingQueue.Unlock()
heap.Push(&taskManager.PendingQueue.inner, task)
select {
case taskManager.wake <- true:
default:
}
}
func (taskManager *TaskManager) CancelTask(uid string, source TaskSource) error {
task := &Task{}
if err := taskManager.rdb.Where("uid = ? and source = ?", uid, source).First(task).Error; err != nil {
return err
}
if err := taskManager.saveCancelTask(task); err != nil {
return err
}
taskManager.PendingQueue.Lock()
for idx, iter := range taskManager.PendingQueue.inner {
if task.Equal(iter) {
//remove element
taskManager.PendingQueue.inner = append(taskManager.PendingQueue.inner[:idx], taskManager.PendingQueue.inner[idx+1:]...)
break
}
}
taskManager.PendingQueue.Unlock()
if err := taskManager.internalRemoveTask(task); err != nil {
return err
}
select {
case taskManager.wake <- true:
default:
}
return nil
}
func (taskManager *TaskManager) GetTasks(pn, ps int) ([]*Task, int) {
taskManager.PendingQueue.RLock()
defer taskManager.PendingQueue.RUnlock()
var tmp []*Task
offset := pn * ps
if offset < len(taskManager.PendingQueue.inner) {
if offset+pn >= len(taskManager.PendingQueue.inner) {
tmp = taskManager.PendingQueue.inner[offset:]
} else {
tmp = taskManager.PendingQueue.inner[offset : offset+pn]
}
}
ret := make([]*Task, len(tmp))
for idx, t := range tmp {
task := *t
ret[idx] = &task
}
return ret, len(taskManager.PendingQueue.inner)/pn + 1
}
func (taskManager *TaskManager) AddAndScheduleTask(task *Task) error {
now := time.Now()
if task.NextExecutionTime.Before(now) {
return fmt.Errorf("can't add task than now: %v < %v", task.NextExecutionTime, now)
}
task.Status = STATUS_INIT
if err := taskManager.saveTaskToDB(task); err != nil {
return fmt.Errorf("save task to db error : %v", err)
}
if err := taskManager.internalAddTask(task); err != nil {
return fmt.Errorf("add internal task error: %v", err)
}
log4go.Info("new task %v added type:%v next execaution time %s", task.UserIdentifier, task.Type, task.NextExecutionTime)
taskManager.addTaskToPendingQueue(task)
return nil
}
func (taskManager *TaskManager) doneTask(task *Task, status TaskStatus) {
key := TaskKey{
Uid: task.UserIdentifier,
Source: task.Source,
}
switch task.Type {
case TASK_TYPE_ONESHOT:
switch status {
case STATUS_SUCC:
taskManager.saveSuccessTask(task)
fallthrough
case STATUS_FAIL:
taskManager.updateTaskStatus(task, STATUS_FAIL)
taskManager.TaskMap.Lock()
delete(taskManager.TaskMap.inner, key)
taskManager.TaskMap.Unlock()
}
default:
panic("not support task type yet")
}
}
func (taskManager *TaskManager) runTasks(tasks []*Task) {
var wg sync.WaitGroup
for _, task := range tasks {
wg.Add(1)
go func() {
defer wg.Done()
b := task.Retry
for {
taskManager.updateTaskStatus(task, STATUS_EXEC)
err := task.Handler.DoTask(task.UserIdentifier, task.Context)
if err != nil {
if task.Retry > 0 {
log4go.Global.Info("task %v-%v fails, retry (%v/%v)", task.Type, task.UserIdentifier, task.Retry, b)
task.Retry--
time.Sleep(time.Second * time.Duration(task.RetryInterval))
} else {
break
}
} else {
taskManager.saveSuccessTask(task)
return
}
}
taskManager.doneTask(task, STATUS_FAIL)
}()
}
wg.Wait()
}
func (taskManager *TaskManager) Run() {
for {
now := time.Now()
next := taskManager.getNextWakeupTime()
var duration time.Duration
if now.After(next) {
duration = time.Duration(0)
} else {
duration = next.Sub(now)
}
log4go.Global.Debug("wait for duration %v next:%v now:%v", duration, next, now)
select {
case <-taskManager.stop:
log4go.Global.Info("taskmanager closed")
return
case <-time.After(duration):
tasks := taskManager.popAvaliableTasks(now)
if len(tasks) > 0 {
log4go.Global.Debug("run tasks [%d]", len(tasks))
go taskManager.runTasks(tasks)
}
case <-taskManager.wake:
log4go.Global.Debug("taskmanager waked")
continue
}
}
}
func (taskManager *TaskManager) Stop() {
taskManager.stop <- true
}
func (taskManager *TaskManager) SyncTask() error {
tasks := []*Task{}
if err := taskManager.rdb.Where("status in (?)", []TaskStatus{STATUS_PENDING, STATUS_EXEC, STATUS_INIT}).Find(&tasks).Error; err != nil {
return err
}
for _, task := range tasks {
var context interface{}
var err error
if _, ok := taskManager.handlers[task.Source]; !ok | {
log4go.Warn("unknown task source :%v", task.Source)
continue
} | conditional_block |
|
manager.go | click"`
Reach int `gorm:"column:reach"`
ClickRate float32 `gorm:"column:click_rate"`
Retry int `gorm:"-"`
RetryInterval int `gorm:"-"`
Timeout int `gorm:"-"`
Handler TaskHandler `gorm:"-" json:"-"`
Context interface{} `gorm:"-"`
}
type TaskLog struct {
TaskId int
Status int
Start time.Time
End time.Time
}
type TaskManager struct {
TaskMap struct {
sync.RWMutex
inner map[TaskKey]*Task
}
PendingQueue struct {
sync.RWMutex
inner PriorityQueue
}
stop chan bool
wake chan bool
wdb *gorm.DB
rdb *gorm.DB
handlers map[TaskSource]TaskHandler
}
type PriorityQueue []*Task
var (
GlobalTaskManager *TaskManager
)
func (Task) TableName() string {
return "tb_task"
}
func (t *Task) Equal(other *Task) bool {
return t.UserIdentifier == other.UserIdentifier && t.Source == other.Source
}
func NewTaskManager(rdb, wdb *gorm.DB) (*TaskManager, error) {
m := &TaskManager{
TaskMap: struct {
sync.RWMutex
inner map[TaskKey]*Task
}{
inner: make(map[TaskKey]*Task),
},
PendingQueue: struct {
sync.RWMutex
inner PriorityQueue
}{
inner: make(PriorityQueue, 0),
},
stop: make(chan bool),
wake: make(chan bool),
wdb: wdb,
rdb: rdb,
handlers: make(map[TaskSource]TaskHandler),
}
heap.Init(&m.PendingQueue.inner)
return m, nil
}
func (q *PriorityQueue) Swap(i, j int) {
(*q)[i], (*q)[j] = (*q)[j], (*q)[i]
}
func (q *PriorityQueue) Len() int {
return len(*q)
}
func (q *PriorityQueue) Less(i, j int) bool {
return (*q)[i].NextExecutionTime.Before((*q)[j].NextExecutionTime)
}
func (q *PriorityQueue) Pop() interface{} {
old := *q
n := len(*q)
item := (*q)[n-1]
*q = old[0 : n-1]
return item
}
func (q *PriorityQueue) | (x interface{}) {
*q = append(*q, x.(*Task))
}
func (taskManager *TaskManager) RegisterTaskSourceHandler(source TaskSource, handler TaskHandler) {
taskManager.handlers[source] = handler
}
func (taskManager *TaskManager) internalRemoveTask(task *Task) error {
var ok bool
key := TaskKey{
Source: task.Source,
Uid: task.UserIdentifier,
}
taskManager.TaskMap.RLock()
_, ok = taskManager.TaskMap.inner[key]
taskManager.TaskMap.RUnlock()
if !ok {
return fmt.Errorf("task not exists: %v", key)
}
taskManager.TaskMap.Lock()
_, ok = taskManager.TaskMap.inner[key]
if !ok {
taskManager.TaskMap.Unlock()
return fmt.Errorf("tasks not exists: %v", key)
}
delete(taskManager.TaskMap.inner, key)
taskManager.TaskMap.Unlock()
return nil
}
func (taskManager *TaskManager) internalAddTask(task *Task) error {
var ok bool
key := TaskKey{
Source: task.Source,
Uid: task.UserIdentifier,
}
taskManager.TaskMap.RLock()
_, ok = taskManager.TaskMap.inner[key]
taskManager.TaskMap.RUnlock()
if ok {
return fmt.Errorf("task exists")
}
taskManager.TaskMap.Lock()
_, ok = taskManager.TaskMap.inner[key]
if ok {
taskManager.TaskMap.Unlock()
return fmt.Errorf("tasks exists")
}
taskManager.TaskMap.inner[key] = task
taskManager.TaskMap.Unlock()
return nil
}
func (taskManager *TaskManager) getNextWakeupTime() time.Time {
taskManager.PendingQueue.RLock()
defer taskManager.PendingQueue.RUnlock()
if taskManager.PendingQueue.inner.Len() == 0 {
return time.Now().Add(TICK)
} else {
return taskManager.PendingQueue.inner[0].NextExecutionTime
}
}
func (taskManager *TaskManager) popAvaliableTasks(deadline time.Time) []*Task {
taskManager.PendingQueue.Lock()
defer taskManager.PendingQueue.Unlock()
ret := make([]*Task, 0)
for len(taskManager.PendingQueue.inner) > 0 {
next := taskManager.PendingQueue.inner[0].NextExecutionTime
if next.Before(deadline) || next.Equal(deadline) {
p := heap.Pop(&taskManager.PendingQueue.inner)
ret = append(ret, p.(*Task))
} else {
break
}
}
return ret
}
func (*TaskManager) GetTaskLog(id int) (*TaskLog, error) {
return nil, nil
}
func (taskManager *TaskManager) NewOneshotTask(at time.Time,
identifier string,
source TaskSource,
retry, retryInterval int,
context interface{}) *Task {
if _, ok := taskManager.handlers[source]; !ok {
panic("please register your type first")
}
return &Task{
UserIdentifier: identifier,
Type: TASK_TYPE_ONESHOT,
Source: source,
NextExecutionTime: at,
Context: context,
Retry: retry,
RetryInterval: retryInterval,
LastExecutionTime: time.Time{},
Handler: taskManager.handlers[source],
}
}
func (taskManager *TaskManager) addTaskToPendingQueue(task *Task) {
taskManager.updateTaskStatus(task, STATUS_PENDING)
taskManager.PendingQueue.Lock()
defer taskManager.PendingQueue.Unlock()
heap.Push(&taskManager.PendingQueue.inner, task)
select {
case taskManager.wake <- true:
default:
}
}
func (taskManager *TaskManager) CancelTask(uid string, source TaskSource) error {
task := &Task{}
if err := taskManager.rdb.Where("uid = ? and source = ?", uid, source).First(task).Error; err != nil {
return err
}
if err := taskManager.saveCancelTask(task); err != nil {
return err
}
taskManager.PendingQueue.Lock()
for idx, iter := range taskManager.PendingQueue.inner {
if task.Equal(iter) {
//remove element
taskManager.PendingQueue.inner = append(taskManager.PendingQueue.inner[:idx], taskManager.PendingQueue.inner[idx+1:]...)
break
}
}
taskManager.PendingQueue.Unlock()
if err := taskManager.internalRemoveTask(task); err != nil {
return err
}
select {
case taskManager.wake <- true:
default:
}
return nil
}
func (taskManager *TaskManager) GetTasks(pn, ps int) ([]*Task, int) {
taskManager.PendingQueue.RLock()
defer taskManager.PendingQueue.RUnlock()
var tmp []*Task
offset := pn * ps
if offset < len(taskManager.PendingQueue.inner) {
if offset+pn >= len(taskManager.PendingQueue.inner) {
tmp = taskManager.PendingQueue.inner[offset:]
} else {
tmp = taskManager.PendingQueue.inner[offset : offset+pn]
}
}
ret := make([]*Task, len(tmp))
for idx, t := range tmp {
task := *t
ret[idx] = &task
}
return ret, len(taskManager.PendingQueue.inner)/pn + 1
}
func (taskManager *TaskManager) AddAndScheduleTask(task *Task) error {
now := time.Now()
if task.NextExecutionTime.Before(now) {
return fmt.Errorf("can't add task than now: %v < %v", task.NextExecutionTime, now)
}
task.Status = STATUS_INIT
if err := taskManager.saveTaskToDB(task); err != nil {
return fmt.Errorf("save task to db error : %v", err)
}
if err := taskManager.internalAddTask(task); err != nil {
return fmt.Errorf("add internal task error: %v", err)
}
log4go.Info("new task %v added type:%v next execaution time %s", task.UserIdentifier, task.Type, task.NextExecutionTime)
taskManager.addTaskToPendingQueue(task)
return nil
}
func (taskManager *TaskManager) doneTask(task *Task, status TaskStatus) {
key := TaskKey{
Uid: task.UserIdentifier,
Source: task.Source,
}
switch task.Type {
case TASK_TYPE_ONESHOT:
switch status {
case STATUS_SUCC:
taskManager.saveSuccessTask(task)
fallthrough
case STATUS_FAIL:
taskManager.updateTaskStatus(task, STATUS_FAIL)
taskManager.TaskMap.Lock()
delete(taskManager.TaskMap.inner, key)
taskManager.TaskMap.Unlock()
}
default:
panic("not support task type yet")
}
}
func (taskManager *TaskManager) runTasks(tasks []*Task) {
var wg sync.WaitGroup
for _, task := range tasks {
wg.Add(1)
go func() {
defer wg.Done()
b := task.Retry
for {
taskManager.updateTaskStatus(task, STATUS_EXEC)
err := task.Handler.DoTask(task.UserIdentifier, task.Context)
if err != nil {
if task.R | Push | identifier_name |
manager.go | ([]*Task, 0)
for len(taskManager.PendingQueue.inner) > 0 {
next := taskManager.PendingQueue.inner[0].NextExecutionTime
if next.Before(deadline) || next.Equal(deadline) {
p := heap.Pop(&taskManager.PendingQueue.inner)
ret = append(ret, p.(*Task))
} else {
break
}
}
return ret
}
func (*TaskManager) GetTaskLog(id int) (*TaskLog, error) {
return nil, nil
}
func (taskManager *TaskManager) NewOneshotTask(at time.Time,
identifier string,
source TaskSource,
retry, retryInterval int,
context interface{}) *Task {
if _, ok := taskManager.handlers[source]; !ok {
panic("please register your type first")
}
return &Task{
UserIdentifier: identifier,
Type: TASK_TYPE_ONESHOT,
Source: source,
NextExecutionTime: at,
Context: context,
Retry: retry,
RetryInterval: retryInterval,
LastExecutionTime: time.Time{},
Handler: taskManager.handlers[source],
}
}
func (taskManager *TaskManager) addTaskToPendingQueue(task *Task) {
taskManager.updateTaskStatus(task, STATUS_PENDING)
taskManager.PendingQueue.Lock()
defer taskManager.PendingQueue.Unlock()
heap.Push(&taskManager.PendingQueue.inner, task)
select {
case taskManager.wake <- true:
default:
}
}
func (taskManager *TaskManager) CancelTask(uid string, source TaskSource) error {
task := &Task{}
if err := taskManager.rdb.Where("uid = ? and source = ?", uid, source).First(task).Error; err != nil {
return err
}
if err := taskManager.saveCancelTask(task); err != nil {
return err
}
taskManager.PendingQueue.Lock()
for idx, iter := range taskManager.PendingQueue.inner {
if task.Equal(iter) {
//remove element
taskManager.PendingQueue.inner = append(taskManager.PendingQueue.inner[:idx], taskManager.PendingQueue.inner[idx+1:]...)
break
}
}
taskManager.PendingQueue.Unlock()
if err := taskManager.internalRemoveTask(task); err != nil {
return err
}
select {
case taskManager.wake <- true:
default:
}
return nil
}
func (taskManager *TaskManager) GetTasks(pn, ps int) ([]*Task, int) {
taskManager.PendingQueue.RLock()
defer taskManager.PendingQueue.RUnlock()
var tmp []*Task
offset := pn * ps
if offset < len(taskManager.PendingQueue.inner) {
if offset+pn >= len(taskManager.PendingQueue.inner) {
tmp = taskManager.PendingQueue.inner[offset:]
} else {
tmp = taskManager.PendingQueue.inner[offset : offset+pn]
}
}
ret := make([]*Task, len(tmp))
for idx, t := range tmp {
task := *t
ret[idx] = &task
}
return ret, len(taskManager.PendingQueue.inner)/pn + 1
}
func (taskManager *TaskManager) AddAndScheduleTask(task *Task) error {
now := time.Now()
if task.NextExecutionTime.Before(now) {
return fmt.Errorf("can't add task than now: %v < %v", task.NextExecutionTime, now)
}
task.Status = STATUS_INIT
if err := taskManager.saveTaskToDB(task); err != nil {
return fmt.Errorf("save task to db error : %v", err)
}
if err := taskManager.internalAddTask(task); err != nil {
return fmt.Errorf("add internal task error: %v", err)
}
log4go.Info("new task %v added type:%v next execaution time %s", task.UserIdentifier, task.Type, task.NextExecutionTime)
taskManager.addTaskToPendingQueue(task)
return nil
}
func (taskManager *TaskManager) doneTask(task *Task, status TaskStatus) {
key := TaskKey{
Uid: task.UserIdentifier,
Source: task.Source,
}
switch task.Type {
case TASK_TYPE_ONESHOT:
switch status {
case STATUS_SUCC:
taskManager.saveSuccessTask(task)
fallthrough
case STATUS_FAIL:
taskManager.updateTaskStatus(task, STATUS_FAIL)
taskManager.TaskMap.Lock()
delete(taskManager.TaskMap.inner, key)
taskManager.TaskMap.Unlock()
}
default:
panic("not support task type yet")
}
}
func (taskManager *TaskManager) runTasks(tasks []*Task) {
var wg sync.WaitGroup
for _, task := range tasks {
wg.Add(1)
go func() {
defer wg.Done()
b := task.Retry
for {
taskManager.updateTaskStatus(task, STATUS_EXEC)
err := task.Handler.DoTask(task.UserIdentifier, task.Context)
if err != nil {
if task.Retry > 0 {
log4go.Global.Info("task %v-%v fails, retry (%v/%v)", task.Type, task.UserIdentifier, task.Retry, b)
task.Retry--
time.Sleep(time.Second * time.Duration(task.RetryInterval))
} else {
break
}
} else {
taskManager.saveSuccessTask(task)
return
}
}
taskManager.doneTask(task, STATUS_FAIL)
}()
}
wg.Wait()
}
func (taskManager *TaskManager) Run() {
for {
now := time.Now()
next := taskManager.getNextWakeupTime()
var duration time.Duration
if now.After(next) {
duration = time.Duration(0)
} else {
duration = next.Sub(now)
}
log4go.Global.Debug("wait for duration %v next:%v now:%v", duration, next, now)
select {
case <-taskManager.stop:
log4go.Global.Info("taskmanager closed")
return
case <-time.After(duration):
tasks := taskManager.popAvaliableTasks(now)
if len(tasks) > 0 {
log4go.Global.Debug("run tasks [%d]", len(tasks))
go taskManager.runTasks(tasks)
}
case <-taskManager.wake:
log4go.Global.Debug("taskmanager waked")
continue
}
}
}
func (taskManager *TaskManager) Stop() {
taskManager.stop <- true
}
func (taskManager *TaskManager) SyncTask() error {
tasks := []*Task{}
if err := taskManager.rdb.Where("status in (?)", []TaskStatus{STATUS_PENDING, STATUS_EXEC, STATUS_INIT}).Find(&tasks).Error; err != nil {
return err
}
for _, task := range tasks {
var context interface{}
var err error
if _, ok := taskManager.handlers[task.Source]; !ok {
log4go.Warn("unknown task source :%v", task.Source)
continue
} else {
task.Handler = taskManager.handlers[task.Source]
context, err = task.Handler.Sync(task.UserIdentifier)
if err != nil {
log4go.Warn("task context sync error: %v", err)
continue
} else {
task.Context = context
}
}
now := time.Now()
if task.NextExecutionTime.Before(now) {
log4go.Warn("next execution time is to early, just set it to failure")
taskManager.updateTaskStatus(task, STATUS_FAIL)
} else {
taskManager.addTaskToPendingQueue(task)
log4go.Warn("schedule task : [%v]", task.UserIdentifier)
}
}
return nil
}
func (taskManager *TaskManager) updateTaskStatus(task *Task, status TaskStatus) error {
if err := taskManager.wdb.Model(task).Update("status", status).Error; err != nil {
return fmt.Errorf("update taks error : %v", status)
}
log4go.Info("update task [%v] status [%v] ", task.UserIdentifier, status)
return nil
}
func (taskManager *TaskManager) saveSuccessTask(task *Task) error {
log4go.Info("update task [%v] status SUCCESS", task.UserIdentifier)
task.LastExecutionTime = time.Now()
if err := taskManager.wdb.Model(task).Update(
map[string]interface{}{
"status": STATUS_SUCC,
"last_execution_time": task.LastExecutionTime}).Error; err != nil {
return fmt.Errorf("update delivery time and status error")
}
task.Status = STATUS_SUCC
return nil
}
func (taskManager *TaskManager) saveCancelTask(task *Task) error {
log4go.Info("update task [%v] status canceld", task.UserIdentifier)
task.CanceledAt = time.Now()
if err := taskManager.wdb.Model(task).Update(
map[string]interface{}{
"status": STATUS_CANCEL,
"canceled_at": task.CanceledAt}).Error; err != nil {
return fmt.Errorf("update canceld time and status error")
}
task.Status = STATUS_CANCEL
return nil
}
func (taskManager *TaskManager) saveTaskLog(tasklog *TaskLog) {
panic("error")
}
func (taskManager *TaskManager) saveTaskToDB(task *Task) error | {
var err error
if err = taskManager.wdb.Create(task).Error; err != nil {
return err
}
log4go.Info("saved task %d to db", task.ID)
return nil
} | identifier_body |
|
manager.go |
type TaskKey struct {
Source TaskSource
Uid string
}
type Task struct {
ID uint `gorm:"column:id;primary_key"`
CreatedAt time.Time `gorm:"column:created_at"`
UpdatedAt time.Time `gorm:"column:updated_at"`
CanceledAt time.Time `gorm:"column:canceled_at"`
UserIdentifier string `gorm:"column:uid;type:varchar(32);not null;index"`
Type TaskType `gorm:"column:type;type:tinyint(4)"`
Source TaskSource `gorm:"column:source;type:tinyint(4)"`
Period int `gorm:"column:period;type:int(11)"`
LastExecutionTime time.Time `gorm:"column:last_execution_time"`
NextExecutionTime time.Time `gorm:"column:next_execution_time"`
Status TaskStatus `gorm:"column:status;type:tinyint(4);index"`
Click int `gorm:"column:click"`
Reach int `gorm:"column:reach"`
ClickRate float32 `gorm:"column:click_rate"`
Retry int `gorm:"-"`
RetryInterval int `gorm:"-"`
Timeout int `gorm:"-"`
Handler TaskHandler `gorm:"-" json:"-"`
Context interface{} `gorm:"-"`
}
type TaskLog struct {
TaskId int
Status int
Start time.Time
End time.Time
}
type TaskManager struct {
TaskMap struct {
sync.RWMutex
inner map[TaskKey]*Task
}
PendingQueue struct {
sync.RWMutex
inner PriorityQueue
}
stop chan bool
wake chan bool
wdb *gorm.DB
rdb *gorm.DB
handlers map[TaskSource]TaskHandler
}
type PriorityQueue []*Task
var (
GlobalTaskManager *TaskManager
)
func (Task) TableName() string {
return "tb_task"
}
func (t *Task) Equal(other *Task) bool {
return t.UserIdentifier == other.UserIdentifier && t.Source == other.Source
}
func NewTaskManager(rdb, wdb *gorm.DB) (*TaskManager, error) {
m := &TaskManager{
TaskMap: struct {
sync.RWMutex
inner map[TaskKey]*Task
}{
inner: make(map[TaskKey]*Task),
},
PendingQueue: struct {
sync.RWMutex
inner PriorityQueue
}{
inner: make(PriorityQueue, 0),
},
stop: make(chan bool),
wake: make(chan bool),
wdb: wdb,
rdb: rdb,
handlers: make(map[TaskSource]TaskHandler),
}
heap.Init(&m.PendingQueue.inner)
return m, nil
}
func (q *PriorityQueue) Swap(i, j int) {
(*q)[i], (*q)[j] = (*q)[j], (*q)[i]
}
func (q *PriorityQueue) Len() int {
return len(*q)
}
func (q *PriorityQueue) Less(i, j int) bool {
return (*q)[i].NextExecutionTime.Before((*q)[j].NextExecutionTime)
}
func (q *PriorityQueue) Pop() interface{} {
old := *q
n := len(*q)
item := (*q)[n-1]
*q = old[0 : n-1]
return item
}
func (q *PriorityQueue) Push(x interface{}) {
*q = append(*q, x.(*Task))
}
func (taskManager *TaskManager) RegisterTaskSourceHandler(source TaskSource, handler TaskHandler) {
taskManager.handlers[source] = handler
}
func (taskManager *TaskManager) internalRemoveTask(task *Task) error {
var ok bool
key := TaskKey{
Source: task.Source,
Uid: task.UserIdentifier,
}
taskManager.TaskMap.RLock()
_, ok = taskManager.TaskMap.inner[key]
taskManager.TaskMap.RUnlock()
if !ok {
return fmt.Errorf("task not exists: %v", key)
}
taskManager.TaskMap.Lock()
_, ok = taskManager.TaskMap.inner[key]
if !ok {
taskManager.TaskMap.Unlock()
return fmt.Errorf("tasks not exists: %v", key)
}
delete(taskManager.TaskMap.inner, key)
taskManager.TaskMap.Unlock()
return nil
}
func (taskManager *TaskManager) internalAddTask(task *Task) error {
var ok bool
key := TaskKey{
Source: task.Source,
Uid: task.UserIdentifier,
}
taskManager.TaskMap.RLock()
_, ok = taskManager.TaskMap.inner[key]
taskManager.TaskMap.RUnlock()
if ok {
return fmt.Errorf("task exists")
}
taskManager.TaskMap.Lock()
_, ok = taskManager.TaskMap.inner[key]
if ok {
taskManager.TaskMap.Unlock()
return fmt.Errorf("tasks exists")
}
taskManager.TaskMap.inner[key] = task
taskManager.TaskMap.Unlock()
return nil
}
func (taskManager *TaskManager) getNextWakeupTime() time.Time {
taskManager.PendingQueue.RLock()
defer taskManager.PendingQueue.RUnlock()
if taskManager.PendingQueue.inner.Len() == 0 {
return time.Now().Add(TICK)
} else {
return taskManager.PendingQueue.inner[0].NextExecutionTime
}
}
func (taskManager *TaskManager) popAvaliableTasks(deadline time.Time) []*Task {
taskManager.PendingQueue.Lock()
defer taskManager.PendingQueue.Unlock()
ret := make([]*Task, 0)
for len(taskManager.PendingQueue.inner) > 0 {
next := taskManager.PendingQueue.inner[0].NextExecutionTime
if next.Before(deadline) || next.Equal(deadline) {
p := heap.Pop(&taskManager.PendingQueue.inner)
ret = append(ret, p.(*Task))
} else {
break
}
}
return ret
}
func (*TaskManager) GetTaskLog(id int) (*TaskLog, error) {
return nil, nil
}
func (taskManager *TaskManager) NewOneshotTask(at time.Time,
identifier string,
source TaskSource,
retry, retryInterval int,
context interface{}) *Task {
if _, ok := taskManager.handlers[source]; !ok {
panic("please register your type first")
}
return &Task{
UserIdentifier: identifier,
Type: TASK_TYPE_ONESHOT,
Source: source,
NextExecutionTime: at,
Context: context,
Retry: retry,
RetryInterval: retryInterval,
LastExecutionTime: time.Time{},
Handler: taskManager.handlers[source],
}
}
func (taskManager *TaskManager) addTaskToPendingQueue(task *Task) {
taskManager.updateTaskStatus(task, STATUS_PENDING)
taskManager.PendingQueue.Lock()
defer taskManager.PendingQueue.Unlock()
heap.Push(&taskManager.PendingQueue.inner, task)
select {
case taskManager.wake <- true:
default:
}
}
func (taskManager *TaskManager) CancelTask(uid string, source TaskSource) error {
task := &Task{}
if err := taskManager.rdb.Where("uid = ? and source = ?", uid, source).First(task).Error; err != nil {
return err
}
if err := taskManager.saveCancelTask(task); err != nil {
return err
}
taskManager.PendingQueue.Lock()
for idx, iter := range taskManager.PendingQueue.inner {
if task.Equal(iter) {
//remove element
taskManager.PendingQueue.inner = append(taskManager.PendingQueue.inner[:idx], taskManager.PendingQueue.inner[idx+1:]...)
break
}
}
taskManager.PendingQueue.Unlock()
if err := taskManager.internalRemoveTask(task); err != nil {
return err
}
select {
case taskManager.wake <- true:
default:
}
return nil
}
func (taskManager *TaskManager) GetTasks(pn, ps int) ([]*Task, int) {
taskManager.PendingQueue.RLock()
defer taskManager.PendingQueue.RUnlock()
var tmp []*Task
offset := pn * ps
if offset < len(taskManager.PendingQueue.inner) {
if offset+pn >= len(taskManager.PendingQueue.inner) {
tmp = taskManager.PendingQueue.inner[offset:]
} else {
tmp = taskManager.PendingQueue.inner[offset : offset+pn]
}
}
ret := make([]*Task, len(tmp))
for idx, t := range tmp {
task := *t
ret[idx] = &task
}
return ret, len(taskManager.PendingQueue.inner)/pn + 1
}
func (taskManager *TaskManager) AddAndScheduleTask(task *Task) error {
now := time.Now()
if task.NextExecutionTime.Before(now) {
return fmt.Errorf("can't add task than now: %v < %v", task.NextExecutionTime, now)
}
task.Status = STATUS_INIT
if err := taskManager.saveTaskToDB(task); err != nil {
return fmt.Errorf("save task to db error : %v", err)
}
if err := taskManager.internalAddTask(task); err != nil {
return fmt.Errorf("add internal task error: %v", err)
}
log4go.Info("new task %v added type:%v next execaution time %s", task.UserIdentifier, task.Type, task.NextExecution | DoTask(identifier string, context interface{}) error
Sync(uid string) (interface{}, error)
} | random_line_split |
|
retrieval_eval_bleu.py | "
)
parser.add_argument("--name", type=str, help="Part of name of response output file")
parser.add_argument("--no-cuda", action="store_true", help="Use CPU only")
parser.add_argument(
"--normalize-cands", action="store_true", help="Normalize encoded candidates"
)
parser.add_argument(
"--output-folder", type=str, default=None, help="Path to output folder"
)
parser.add_argument(
"--reactonly",
action="store_true",
help="EmpatheticDialogues: only consider Listener responses",
)
parser.add_argument(
"--reddit-cands", action="store_true", help="Include Reddit candidates"
)
parser.add_argument("--reddit-folder", type=str, help="Path to Reddit data folder")
parser.add_argument(
"--save-candidates", action="store_true", help="If true, save candidate files"
)
parser.add_argument(
"--task",
type=str,
choices=["dailydialog", "empchat", "reddit"],
default="empchat",
help="Dataset for context/target-response pairs",
)
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
torch.cuda.set_device(args.gpu)
logger.info(f"CUDA enabled (GPU {args.gpu:d})")
else:
logger.info("Running on CPU only.")
if args.fasttext is not None:
args.max_cand_length += args.fasttext
net, net_dictionary = load_model(args.model, get_opt(existing_opt=args))
if "bert_tokenizer" in net_dictionary:
if args.task == "dailydialog":
raise NotImplementedError("BERT model currently incompatible with DailyDialog!")
if args.bleu_dict is not None:
_, bleu_dictionary = load_model(args.bleu_dict, get_opt(existing_opt=args))
else:
bleu_dictionary = net_dictionary
paramnum = 0
trainable = 0
for parameter in net.parameters():
if parameter.requires_grad:
trainable += parameter.numel()
paramnum += parameter.numel()
print(paramnum, trainable)
print(type(net_dictionary))
NET_PAD_IDX = net_dictionary["words"][PAD_TOKEN]
NET_UNK_IDX = net_dictionary["words"][UNK_TOKEN]
print(type(bleu_dictionary))
BLEU_PAD_IDX = bleu_dictionary["words"][PAD_TOKEN]
BLEU_UNK_IDX = bleu_dictionary["words"][UNK_TOKEN]
BLEU_EOS_IDX = bleu_dictionary["words"][START_OF_COMMENT]
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
torch.cuda.set_device(args.gpu)
logger.info(f"CUDA enabled (GPU {args.gpu:d})")
else:
logger.info("Running on CPU only.")
actual_ct = [0, 0, 0]
if args.cuda:
net = torch.nn.DataParallel(net)
net.cuda()
net.eval()
def pad(items):
max_len = max(len(i) for i in items)
tensor = torch.LongTensor(len(items), max_len).fill_(NET_PAD_IDX)
for i, sentence in enumerate(items):
tensor[i, : sentence.size(0)] = sentence
return tensor
def build_candidates(
max_cand_length, n_cands=int(1e7), rm_duplicates=True, rm_starting_gt=True
):
global actual_ct
global args
tensor = torch.LongTensor(n_cands, max_cand_length).fill_(NET_PAD_IDX)
i = 0
chunk = 422
if "bert_tokenizer" in net_dictionary:
gt_tokens = torch.LongTensor(
net_dictionary["bert_tokenizer"].convert_tokens_to_ids(["&", "g", "##t"])
)
else:
gt_index = net_dictionary["words"][">"]
lt_index = net_dictionary["words"]["<"]
unk_index = net_dictionary["words"]["<UNK>"]
n_duplicates = n_start_gt = 0
if rm_duplicates:
all_sent = set()
def _has_lts(sentence_) -> bool:
if "bert_tokenizer" in net_dictionary:
tokens = net_dictionary["bert_tokenizer"].convert_ids_to_tokens(
sentence_.tolist()
)
return "& l ##t" in " ".join(tokens)
else:
return torch.sum(sentence_ == lt_index).gt(0)
def _starts_with_gt(sentence_) -> bool:
if "bert_tokenizer" in net_dictionary:
if sentence_.size(0) < 3:
return False
else:
return torch.eq(sentence_[:3], gt_tokens).all()
else:
return sentence_[0].item == gt_index
parlai_dict = ParlAIDictionary.create_from_reddit_style(net_dictionary)
if args.empchat_cands:
dataset = EmpDataset(
"train",
parlai_dict,
data_folder=args.empchat_folder,
reactonly=False,
fasttext=args.fasttext,
fasttext_type=args.fasttext_type,
fasttext_path=args.fasttext_path,
)
sample_index = range(len(dataset))
for data_idx in sample_index:
_context, sentence, _ = dataset[data_idx]
sent_length = sentence.size(0)
if torch.sum(sentence == unk_index).gt(0):
continue
if _has_lts(sentence):
continue
if sent_length <= max_cand_length:
if _starts_with_gt(sentence) and rm_starting_gt:
n_start_gt += 1
continue
if rm_duplicates:
tuple_sent = tuple(sentence.numpy())
if tuple_sent in all_sent:
n_duplicates += 1
continue
all_sent.add(tuple_sent)
tensor[i, : sentence.size(0)] = sentence
i += 1
if i >= n_cands:
break
breakpoint_ = i
actual_ct[1] = i
if args.dailydialog_cands:
dataset = DDDataset("train", parlai_dict, data_folder=args.dailydialog_folder)
sample_index = range(len(dataset))
for data_idx in sample_index:
_context, sentence = dataset[data_idx]
sent_length = sentence.size(0)
if torch.sum(sentence == unk_index).gt(0):
continue
if _has_lts(sentence):
continue
if sent_length <= max_cand_length:
if _starts_with_gt(sentence) and rm_starting_gt:
n_start_gt += 1
continue
if rm_duplicates:
tuple_sent = tuple(sentence.numpy())
if tuple_sent in all_sent:
n_duplicates += 1
continue
all_sent.add(tuple_sent)
tensor[i, : sentence.size(0)] = sentence
i += 1
if i >= n_cands:
break
bp2 = i
actual_ct[2] = i - breakpoint_
if args.reddit_cands:
while i < n_cands:
chunk += 1
logging.info(f"Loaded {i} / {n_cands} candidates")
dataset = RedditDataset(args.reddit_folder, chunk, net_dictionary)
sample_index = range(len(dataset))
for data_idx in sample_index:
_context, sentence = dataset[data_idx]
sent_length = sentence.size(0)
if sent_length == 0:
print(f"Reddit sentence {data_idx} is of length 0.")
continue
if torch.sum(sentence == unk_index).gt(0):
continue
if _has_lts(sentence):
continue
if sent_length <= max_cand_length:
if _starts_with_gt(sentence) and rm_starting_gt:
n_start_gt += 1
continue
if rm_duplicates:
tuple_sent = tuple(sentence.numpy())
if tuple_sent in all_sent:
n_duplicates += 1
continue
all_sent.add(tuple_sent)
tensor[i, : sentence.size(0)] = sentence
i += 1
if i >= n_cands:
break
actual_ct[0] = i - bp2
logging.info(
f"Loaded {i} candidates, {n_start_gt} start with >, {n_duplicates} duplicates"
)
args.n_candidates = i
return tensor[:i, :], breakpoint_, bp2
def embed_candidates(candidates):
out_tensor = None
i = 0
# ch = candidates.split(2048, dim=0)
ch = candidates.split(1024, dim=0)
for chunk in tqdm(range(len(ch))):
_, encoded_cand = net(None, ch[chunk])
if out_tensor is None:
out_tensor = torch.FloatTensor(candidates.size(0), encoded_cand.size(1))
if args.cuda:
out_tensor = out_tensor.cuda()
if args.normalize_cands:
encoded_cand /= encoded_cand.norm(2, dim=1, keepdim=True)
batch_size = encoded_cand.size(0)
out_tensor[i : i + batch_size] = encoded_cand
i += batch_size
return out_tensor
def get_token_tensor(sentence):
words = net_dictionary["words"]
tokenized = tokenize(sentence, split_sep=None)
return torch.LongTensor([words.get(w, NET_UNK_IDX) for w in tokenized])
def stringify(tensor):
| iwords = net_dictionary["iwords"]
assert tensor.squeeze().dim() == 1, "Wrong tensor size!"
return " ".join(
iwords[i] for i in tensor.squeeze().cpu().numpy() if i != NET_PAD_IDX
).replace("@@ ", "")
# Remove any BPE tokenization | identifier_body |
|
retrieval_eval_bleu.py | parser.add_argument(
"--model", "--pretrained", type=str, default=None, help="Path to model to use"
)
parser.add_argument(
"--n-candidates", type=int, default=int(1e6), help="Max number of candidates"
)
parser.add_argument("--name", type=str, help="Part of name of response output file")
parser.add_argument("--no-cuda", action="store_true", help="Use CPU only")
parser.add_argument(
"--normalize-cands", action="store_true", help="Normalize encoded candidates"
)
parser.add_argument(
"--output-folder", type=str, default=None, help="Path to output folder"
)
parser.add_argument(
"--reactonly",
action="store_true",
help="EmpatheticDialogues: only consider Listener responses",
)
parser.add_argument(
"--reddit-cands", action="store_true", help="Include Reddit candidates"
)
parser.add_argument("--reddit-folder", type=str, help="Path to Reddit data folder")
parser.add_argument(
"--save-candidates", action="store_true", help="If true, save candidate files"
)
parser.add_argument(
"--task",
type=str,
choices=["dailydialog", "empchat", "reddit"],
default="empchat",
help="Dataset for context/target-response pairs",
)
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
torch.cuda.set_device(args.gpu)
logger.info(f"CUDA enabled (GPU {args.gpu:d})")
else:
logger.info("Running on CPU only.")
if args.fasttext is not None:
args.max_cand_length += args.fasttext
net, net_dictionary = load_model(args.model, get_opt(existing_opt=args))
if "bert_tokenizer" in net_dictionary:
if args.task == "dailydialog":
raise NotImplementedError("BERT model currently incompatible with DailyDialog!")
if args.bleu_dict is not None:
_, bleu_dictionary = load_model(args.bleu_dict, get_opt(existing_opt=args))
else:
bleu_dictionary = net_dictionary
paramnum = 0
trainable = 0
for parameter in net.parameters():
if parameter.requires_grad:
trainable += parameter.numel()
paramnum += parameter.numel()
print(paramnum, trainable)
print(type(net_dictionary))
NET_PAD_IDX = net_dictionary["words"][PAD_TOKEN]
NET_UNK_IDX = net_dictionary["words"][UNK_TOKEN]
print(type(bleu_dictionary))
BLEU_PAD_IDX = bleu_dictionary["words"][PAD_TOKEN]
BLEU_UNK_IDX = bleu_dictionary["words"][UNK_TOKEN]
BLEU_EOS_IDX = bleu_dictionary["words"][START_OF_COMMENT]
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
torch.cuda.set_device(args.gpu)
logger.info(f"CUDA enabled (GPU {args.gpu:d})")
else:
logger.info("Running on CPU only.")
actual_ct = [0, 0, 0]
if args.cuda:
net = torch.nn.DataParallel(net)
net.cuda()
net.eval()
def pad(items):
max_len = max(len(i) for i in items)
tensor = torch.LongTensor(len(items), max_len).fill_(NET_PAD_IDX)
for i, sentence in enumerate(items):
tensor[i, : sentence.size(0)] = sentence
return tensor
def build_candidates(
max_cand_length, n_cands=int(1e7), rm_duplicates=True, rm_starting_gt=True
):
global actual_ct
global args
tensor = torch.LongTensor(n_cands, max_cand_length).fill_(NET_PAD_IDX)
i = 0
chunk = 422
if "bert_tokenizer" in net_dictionary:
gt_tokens = torch.LongTensor(
net_dictionary["bert_tokenizer"].convert_tokens_to_ids(["&", "g", "##t"])
)
else:
gt_index = net_dictionary["words"][">"]
lt_index = net_dictionary["words"]["<"]
unk_index = net_dictionary["words"]["<UNK>"]
n_duplicates = n_start_gt = 0
if rm_duplicates:
all_sent = set()
def _has_lts(sentence_) -> bool:
if "bert_tokenizer" in net_dictionary:
tokens = net_dictionary["bert_tokenizer"].convert_ids_to_tokens(
sentence_.tolist()
)
return "& l ##t" in " ".join(tokens)
else:
return torch.sum(sentence_ == lt_index).gt(0)
def _starts_with_gt(sentence_) -> bool:
if "bert_tokenizer" in net_dictionary:
if sentence_.size(0) < 3:
return False
else:
return torch.eq(sentence_[:3], gt_tokens).all()
else:
return sentence_[0].item == gt_index
parlai_dict = ParlAIDictionary.create_from_reddit_style(net_dictionary)
if args.empchat_cands:
dataset = EmpDataset(
"train",
parlai_dict,
data_folder=args.empchat_folder,
reactonly=False,
fasttext=args.fasttext,
fasttext_type=args.fasttext_type,
fasttext_path=args.fasttext_path,
)
sample_index = range(len(dataset))
for data_idx in sample_index:
_context, sentence, _ = dataset[data_idx]
sent_length = sentence.size(0)
if torch.sum(sentence == unk_index).gt(0):
continue
if _has_lts(sentence):
continue
if sent_length <= max_cand_length:
if _starts_with_gt(sentence) and rm_starting_gt:
n_start_gt += 1
continue
if rm_duplicates:
tuple_sent = tuple(sentence.numpy())
if tuple_sent in all_sent:
n_duplicates += 1
continue
all_sent.add(tuple_sent)
tensor[i, : sentence.size(0)] = sentence
i += 1
if i >= n_cands:
break
breakpoint_ = i
actual_ct[1] = i
if args.dailydialog_cands:
dataset = DDDataset("train", parlai_dict, data_folder=args.dailydialog_folder)
sample_index = range(len(dataset))
for data_idx in sample_index:
_context, sentence = dataset[data_idx]
sent_length = sentence.size(0)
if torch.sum(sentence == unk_index).gt(0):
continue
if _has_lts(sentence):
continue
if sent_length <= max_cand_length:
if _starts_with_gt(sentence) and rm_starting_gt:
n_start_gt += 1
continue
if rm_duplicates:
tuple_sent = tuple(sentence.numpy())
if tuple_sent in all_sent:
n_duplicates += 1
continue
all_sent.add(tuple_sent)
tensor[i, : sentence.size(0)] = sentence
i += 1
if i >= n_cands:
break
bp2 = i
actual_ct[2] = i - breakpoint_
if args.reddit_cands:
while i < n_cands:
chunk += 1
logging.info(f"Loaded {i} / {n_cands} candidates")
dataset = RedditDataset(args.reddit_folder, chunk, net_dictionary)
sample_index = range(len(dataset))
for data_idx in sample_index:
_context, sentence = dataset[data_idx]
sent_length = sentence.size(0)
if sent_length == 0:
print(f"Reddit sentence {data_idx} is of length 0.")
continue
if torch.sum(sentence == unk_index).gt(0):
continue
if _has_lts(sentence):
continue
if sent_length <= max_cand_length:
if _starts_with_gt(sentence) and rm_starting_gt:
n_start_gt += 1
continue
if rm_duplicates:
tuple_sent = tuple(sentence.numpy())
if tuple_sent in all_sent:
n_duplicates += 1
continue
all_sent.add(tuple_sent)
tensor[i, : sentence.size(0)] = sentence
i += 1
if i >= n_cands:
break
actual_ct[0] = i - bp2
logging.info(
f"Loaded {i} candidates, {n_start_gt} start with >, {n_duplicates} duplicates"
)
args.n_candidates = i
return tensor[:i, :], breakpoint_, bp2
def embed_candidates(candidates):
out_tensor = None
i = 0
# ch = candidates.split(2048, dim=0)
ch = candidates.split(1024, dim=0)
for chunk in tqdm(range(len(ch))):
_, encoded_cand = net(None, ch[chunk])
if out_tensor is None:
out_tensor = torch.FloatTensor(candidates.size(0), encoded_cand.size(1))
if args.cuda:
out_tensor = out_tensor.cuda()
if args.normalize_cands:
encoded_cand /= encoded_cand.norm(2, dim=1, keepdim=True)
batch_size = encoded_cand.size(0)
out_tensor[i : i + batch_size] = encoded_cand
i += batch_size
return out_tensor
def get_token_tensor(sentence):
words = net_dictionary["words"]
tokenized = tokenize(sentence, split_sep=None)
return torch.LongTensor([words.get(w, NET_UNK_IDX) for w in tokenized])
def stringify(tensor):
iwords = net_dictionary["iwords"]
assert tensor.squeeze(). | ) | random_line_split |
|
retrieval_eval_bleu.py | default=int(1e6), help="Max number of candidates"
)
parser.add_argument("--name", type=str, help="Part of name of response output file")
parser.add_argument("--no-cuda", action="store_true", help="Use CPU only")
parser.add_argument(
"--normalize-cands", action="store_true", help="Normalize encoded candidates"
)
parser.add_argument(
"--output-folder", type=str, default=None, help="Path to output folder"
)
parser.add_argument(
"--reactonly",
action="store_true",
help="EmpatheticDialogues: only consider Listener responses",
)
parser.add_argument(
"--reddit-cands", action="store_true", help="Include Reddit candidates"
)
parser.add_argument("--reddit-folder", type=str, help="Path to Reddit data folder")
parser.add_argument(
"--save-candidates", action="store_true", help="If true, save candidate files"
)
parser.add_argument(
"--task",
type=str,
choices=["dailydialog", "empchat", "reddit"],
default="empchat",
help="Dataset for context/target-response pairs",
)
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
torch.cuda.set_device(args.gpu)
logger.info(f"CUDA enabled (GPU {args.gpu:d})")
else:
logger.info("Running on CPU only.")
if args.fasttext is not None:
args.max_cand_length += args.fasttext
net, net_dictionary = load_model(args.model, get_opt(existing_opt=args))
if "bert_tokenizer" in net_dictionary:
if args.task == "dailydialog":
raise NotImplementedError("BERT model currently incompatible with DailyDialog!")
if args.bleu_dict is not None:
_, bleu_dictionary = load_model(args.bleu_dict, get_opt(existing_opt=args))
else:
bleu_dictionary = net_dictionary
paramnum = 0
trainable = 0
for parameter in net.parameters():
if parameter.requires_grad:
trainable += parameter.numel()
paramnum += parameter.numel()
print(paramnum, trainable)
print(type(net_dictionary))
NET_PAD_IDX = net_dictionary["words"][PAD_TOKEN]
NET_UNK_IDX = net_dictionary["words"][UNK_TOKEN]
print(type(bleu_dictionary))
BLEU_PAD_IDX = bleu_dictionary["words"][PAD_TOKEN]
BLEU_UNK_IDX = bleu_dictionary["words"][UNK_TOKEN]
BLEU_EOS_IDX = bleu_dictionary["words"][START_OF_COMMENT]
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
torch.cuda.set_device(args.gpu)
logger.info(f"CUDA enabled (GPU {args.gpu:d})")
else:
logger.info("Running on CPU only.")
actual_ct = [0, 0, 0]
if args.cuda:
net = torch.nn.DataParallel(net)
net.cuda()
net.eval()
def pad(items):
max_len = max(len(i) for i in items)
tensor = torch.LongTensor(len(items), max_len).fill_(NET_PAD_IDX)
for i, sentence in enumerate(items):
tensor[i, : sentence.size(0)] = sentence
return tensor
def build_candidates(
max_cand_length, n_cands=int(1e7), rm_duplicates=True, rm_starting_gt=True
):
global actual_ct
global args
tensor = torch.LongTensor(n_cands, max_cand_length).fill_(NET_PAD_IDX)
i = 0
chunk = 422
if "bert_tokenizer" in net_dictionary:
gt_tokens = torch.LongTensor(
net_dictionary["bert_tokenizer"].convert_tokens_to_ids(["&", "g", "##t"])
)
else:
gt_index = net_dictionary["words"][">"]
lt_index = net_dictionary["words"]["<"]
unk_index = net_dictionary["words"]["<UNK>"]
n_duplicates = n_start_gt = 0
if rm_duplicates:
|
def _has_lts(sentence_) -> bool:
if "bert_tokenizer" in net_dictionary:
tokens = net_dictionary["bert_tokenizer"].convert_ids_to_tokens(
sentence_.tolist()
)
return "& l ##t" in " ".join(tokens)
else:
return torch.sum(sentence_ == lt_index).gt(0)
def _starts_with_gt(sentence_) -> bool:
if "bert_tokenizer" in net_dictionary:
if sentence_.size(0) < 3:
return False
else:
return torch.eq(sentence_[:3], gt_tokens).all()
else:
return sentence_[0].item == gt_index
parlai_dict = ParlAIDictionary.create_from_reddit_style(net_dictionary)
if args.empchat_cands:
dataset = EmpDataset(
"train",
parlai_dict,
data_folder=args.empchat_folder,
reactonly=False,
fasttext=args.fasttext,
fasttext_type=args.fasttext_type,
fasttext_path=args.fasttext_path,
)
sample_index = range(len(dataset))
for data_idx in sample_index:
_context, sentence, _ = dataset[data_idx]
sent_length = sentence.size(0)
if torch.sum(sentence == unk_index).gt(0):
continue
if _has_lts(sentence):
continue
if sent_length <= max_cand_length:
if _starts_with_gt(sentence) and rm_starting_gt:
n_start_gt += 1
continue
if rm_duplicates:
tuple_sent = tuple(sentence.numpy())
if tuple_sent in all_sent:
n_duplicates += 1
continue
all_sent.add(tuple_sent)
tensor[i, : sentence.size(0)] = sentence
i += 1
if i >= n_cands:
break
breakpoint_ = i
actual_ct[1] = i
if args.dailydialog_cands:
dataset = DDDataset("train", parlai_dict, data_folder=args.dailydialog_folder)
sample_index = range(len(dataset))
for data_idx in sample_index:
_context, sentence = dataset[data_idx]
sent_length = sentence.size(0)
if torch.sum(sentence == unk_index).gt(0):
continue
if _has_lts(sentence):
continue
if sent_length <= max_cand_length:
if _starts_with_gt(sentence) and rm_starting_gt:
n_start_gt += 1
continue
if rm_duplicates:
tuple_sent = tuple(sentence.numpy())
if tuple_sent in all_sent:
n_duplicates += 1
continue
all_sent.add(tuple_sent)
tensor[i, : sentence.size(0)] = sentence
i += 1
if i >= n_cands:
break
bp2 = i
actual_ct[2] = i - breakpoint_
if args.reddit_cands:
while i < n_cands:
chunk += 1
logging.info(f"Loaded {i} / {n_cands} candidates")
dataset = RedditDataset(args.reddit_folder, chunk, net_dictionary)
sample_index = range(len(dataset))
for data_idx in sample_index:
_context, sentence = dataset[data_idx]
sent_length = sentence.size(0)
if sent_length == 0:
print(f"Reddit sentence {data_idx} is of length 0.")
continue
if torch.sum(sentence == unk_index).gt(0):
continue
if _has_lts(sentence):
continue
if sent_length <= max_cand_length:
if _starts_with_gt(sentence) and rm_starting_gt:
n_start_gt += 1
continue
if rm_duplicates:
tuple_sent = tuple(sentence.numpy())
if tuple_sent in all_sent:
n_duplicates += 1
continue
all_sent.add(tuple_sent)
tensor[i, : sentence.size(0)] = sentence
i += 1
if i >= n_cands:
break
actual_ct[0] = i - bp2
logging.info(
f"Loaded {i} candidates, {n_start_gt} start with >, {n_duplicates} duplicates"
)
args.n_candidates = i
return tensor[:i, :], breakpoint_, bp2
def embed_candidates(candidates):
out_tensor = None
i = 0
# ch = candidates.split(2048, dim=0)
ch = candidates.split(1024, dim=0)
for chunk in tqdm(range(len(ch))):
_, encoded_cand = net(None, ch[chunk])
if out_tensor is None:
out_tensor = torch.FloatTensor(candidates.size(0), encoded_cand.size(1))
if args.cuda:
out_tensor = out_tensor.cuda()
if args.normalize_cands:
encoded_cand /= encoded_cand.norm(2, dim=1, keepdim=True)
batch_size = encoded_cand.size(0)
out_tensor[i : i + batch_size] = encoded_cand
i += batch_size
return out_tensor
def get_token_tensor(sentence):
words = net_dictionary["words"]
tokenized = tokenize(sentence, split_sep=None)
return torch.LongTensor([words.get(w, NET_UNK_IDX) for w in tokenized])
def stringify(tensor):
iwords = net_dictionary["iwords"]
assert tensor.squeeze().dim() == 1, "Wrong tensor size!"
return " ".join(
iwords[i] for i in tensor.squeeze().cpu().numpy() if i != NET_PAD_IDX
| all_sent = set() | conditional_block |
retrieval_eval_bleu.py | default=int(1e6), help="Max number of candidates"
)
parser.add_argument("--name", type=str, help="Part of name of response output file")
parser.add_argument("--no-cuda", action="store_true", help="Use CPU only")
parser.add_argument(
"--normalize-cands", action="store_true", help="Normalize encoded candidates"
)
parser.add_argument(
"--output-folder", type=str, default=None, help="Path to output folder"
)
parser.add_argument(
"--reactonly",
action="store_true",
help="EmpatheticDialogues: only consider Listener responses",
)
parser.add_argument(
"--reddit-cands", action="store_true", help="Include Reddit candidates"
)
parser.add_argument("--reddit-folder", type=str, help="Path to Reddit data folder")
parser.add_argument(
"--save-candidates", action="store_true", help="If true, save candidate files"
)
parser.add_argument(
"--task",
type=str,
choices=["dailydialog", "empchat", "reddit"],
default="empchat",
help="Dataset for context/target-response pairs",
)
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
torch.cuda.set_device(args.gpu)
logger.info(f"CUDA enabled (GPU {args.gpu:d})")
else:
logger.info("Running on CPU only.")
if args.fasttext is not None:
args.max_cand_length += args.fasttext
net, net_dictionary = load_model(args.model, get_opt(existing_opt=args))
if "bert_tokenizer" in net_dictionary:
if args.task == "dailydialog":
raise NotImplementedError("BERT model currently incompatible with DailyDialog!")
if args.bleu_dict is not None:
_, bleu_dictionary = load_model(args.bleu_dict, get_opt(existing_opt=args))
else:
bleu_dictionary = net_dictionary
paramnum = 0
trainable = 0
for parameter in net.parameters():
if parameter.requires_grad:
trainable += parameter.numel()
paramnum += parameter.numel()
print(paramnum, trainable)
print(type(net_dictionary))
NET_PAD_IDX = net_dictionary["words"][PAD_TOKEN]
NET_UNK_IDX = net_dictionary["words"][UNK_TOKEN]
print(type(bleu_dictionary))
BLEU_PAD_IDX = bleu_dictionary["words"][PAD_TOKEN]
BLEU_UNK_IDX = bleu_dictionary["words"][UNK_TOKEN]
BLEU_EOS_IDX = bleu_dictionary["words"][START_OF_COMMENT]
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
torch.cuda.set_device(args.gpu)
logger.info(f"CUDA enabled (GPU {args.gpu:d})")
else:
logger.info("Running on CPU only.")
actual_ct = [0, 0, 0]
if args.cuda:
net = torch.nn.DataParallel(net)
net.cuda()
net.eval()
def pad(items):
max_len = max(len(i) for i in items)
tensor = torch.LongTensor(len(items), max_len).fill_(NET_PAD_IDX)
for i, sentence in enumerate(items):
tensor[i, : sentence.size(0)] = sentence
return tensor
def build_candidates(
max_cand_length, n_cands=int(1e7), rm_duplicates=True, rm_starting_gt=True
):
global actual_ct
global args
tensor = torch.LongTensor(n_cands, max_cand_length).fill_(NET_PAD_IDX)
i = 0
chunk = 422
if "bert_tokenizer" in net_dictionary:
gt_tokens = torch.LongTensor(
net_dictionary["bert_tokenizer"].convert_tokens_to_ids(["&", "g", "##t"])
)
else:
gt_index = net_dictionary["words"][">"]
lt_index = net_dictionary["words"]["<"]
unk_index = net_dictionary["words"]["<UNK>"]
n_duplicates = n_start_gt = 0
if rm_duplicates:
all_sent = set()
def _has_lts(sentence_) -> bool:
if "bert_tokenizer" in net_dictionary:
tokens = net_dictionary["bert_tokenizer"].convert_ids_to_tokens(
sentence_.tolist()
)
return "& l ##t" in " ".join(tokens)
else:
return torch.sum(sentence_ == lt_index).gt(0)
def | (sentence_) -> bool:
if "bert_tokenizer" in net_dictionary:
if sentence_.size(0) < 3:
return False
else:
return torch.eq(sentence_[:3], gt_tokens).all()
else:
return sentence_[0].item == gt_index
parlai_dict = ParlAIDictionary.create_from_reddit_style(net_dictionary)
if args.empchat_cands:
dataset = EmpDataset(
"train",
parlai_dict,
data_folder=args.empchat_folder,
reactonly=False,
fasttext=args.fasttext,
fasttext_type=args.fasttext_type,
fasttext_path=args.fasttext_path,
)
sample_index = range(len(dataset))
for data_idx in sample_index:
_context, sentence, _ = dataset[data_idx]
sent_length = sentence.size(0)
if torch.sum(sentence == unk_index).gt(0):
continue
if _has_lts(sentence):
continue
if sent_length <= max_cand_length:
if _starts_with_gt(sentence) and rm_starting_gt:
n_start_gt += 1
continue
if rm_duplicates:
tuple_sent = tuple(sentence.numpy())
if tuple_sent in all_sent:
n_duplicates += 1
continue
all_sent.add(tuple_sent)
tensor[i, : sentence.size(0)] = sentence
i += 1
if i >= n_cands:
break
breakpoint_ = i
actual_ct[1] = i
if args.dailydialog_cands:
dataset = DDDataset("train", parlai_dict, data_folder=args.dailydialog_folder)
sample_index = range(len(dataset))
for data_idx in sample_index:
_context, sentence = dataset[data_idx]
sent_length = sentence.size(0)
if torch.sum(sentence == unk_index).gt(0):
continue
if _has_lts(sentence):
continue
if sent_length <= max_cand_length:
if _starts_with_gt(sentence) and rm_starting_gt:
n_start_gt += 1
continue
if rm_duplicates:
tuple_sent = tuple(sentence.numpy())
if tuple_sent in all_sent:
n_duplicates += 1
continue
all_sent.add(tuple_sent)
tensor[i, : sentence.size(0)] = sentence
i += 1
if i >= n_cands:
break
bp2 = i
actual_ct[2] = i - breakpoint_
if args.reddit_cands:
while i < n_cands:
chunk += 1
logging.info(f"Loaded {i} / {n_cands} candidates")
dataset = RedditDataset(args.reddit_folder, chunk, net_dictionary)
sample_index = range(len(dataset))
for data_idx in sample_index:
_context, sentence = dataset[data_idx]
sent_length = sentence.size(0)
if sent_length == 0:
print(f"Reddit sentence {data_idx} is of length 0.")
continue
if torch.sum(sentence == unk_index).gt(0):
continue
if _has_lts(sentence):
continue
if sent_length <= max_cand_length:
if _starts_with_gt(sentence) and rm_starting_gt:
n_start_gt += 1
continue
if rm_duplicates:
tuple_sent = tuple(sentence.numpy())
if tuple_sent in all_sent:
n_duplicates += 1
continue
all_sent.add(tuple_sent)
tensor[i, : sentence.size(0)] = sentence
i += 1
if i >= n_cands:
break
actual_ct[0] = i - bp2
logging.info(
f"Loaded {i} candidates, {n_start_gt} start with >, {n_duplicates} duplicates"
)
args.n_candidates = i
return tensor[:i, :], breakpoint_, bp2
def embed_candidates(candidates):
out_tensor = None
i = 0
# ch = candidates.split(2048, dim=0)
ch = candidates.split(1024, dim=0)
for chunk in tqdm(range(len(ch))):
_, encoded_cand = net(None, ch[chunk])
if out_tensor is None:
out_tensor = torch.FloatTensor(candidates.size(0), encoded_cand.size(1))
if args.cuda:
out_tensor = out_tensor.cuda()
if args.normalize_cands:
encoded_cand /= encoded_cand.norm(2, dim=1, keepdim=True)
batch_size = encoded_cand.size(0)
out_tensor[i : i + batch_size] = encoded_cand
i += batch_size
return out_tensor
def get_token_tensor(sentence):
words = net_dictionary["words"]
tokenized = tokenize(sentence, split_sep=None)
return torch.LongTensor([words.get(w, NET_UNK_IDX) for w in tokenized])
def stringify(tensor):
iwords = net_dictionary["iwords"]
assert tensor.squeeze().dim() == 1, "Wrong tensor size!"
return " ".join(
iwords[i] for i in tensor.squeeze().cpu().numpy() if i != NET_PAD_IDX
). | _starts_with_gt | identifier_name |
debugger-script.js | return null;
var location = generatorMirror.sourceLocation() || funcMirror.sourceLocation();
var script = funcMirror.script();
if (script && location) {
return {
scriptId: "" + script.id(),
lineNumber: location.line,
columnNumber: location.column
};
}
return null;
}
/**
* @param {Object} object
* @return {!Array<!{value: *}>|undefined}
*/
DebuggerScript.getCollectionEntries = function(object)
{
var mirror = MakeMirror(object, true /* transient */);
if (mirror.isMap())
return /** @type {!MapMirror} */(mirror).entries();
if (mirror.isSet() || mirror.isIterator()) {
var result = [];
var values = mirror.isSet() ? /** @type {!SetMirror} */(mirror).values() : /** @type {!IteratorMirror} */(mirror).preview();
for (var i = 0; i < values.length; ++i)
result.push({ value: values[i] });
return result;
}
}
/**
* @param {string|undefined} contextData
* @return {number}
*/
DebuggerScript._executionContextId = function(contextData)
{
if (!contextData)
return 0;
var match = contextData.match(/^[^,]*,([^,]*),.*$/);
if (!match)
return 0;
return parseInt(match[1], 10) || 0;
}
/**
* @param {string|undefined} contextData
* @return {string}
*/
DebuggerScript._executionContextAuxData = function(contextData)
{
if (!contextData)
return "";
var match = contextData.match(/^[^,]*,[^,]*,(.*)$/);
return match ? match[1] : "";
}
/**
* @param {string} contextGroupId
* @return {!Array<!FormattedScript>}
*/
DebuggerScript.getScripts = function(contextGroupId)
{
var result = [];
var scripts = Debug.scripts();
var contextDataPrefix = null;
if (contextGroupId)
contextDataPrefix = contextGroupId + ",";
for (var i = 0; i < scripts.length; ++i) {
var script = scripts[i];
if (contextDataPrefix) {
if (!script.context_data)
continue;
// Context data is a string in the following format:
// <contextGroupId>,<contextId>,<auxData>
if (script.context_data.indexOf(contextDataPrefix) !== 0)
continue;
}
if (script.is_debugger_script)
continue;
result.push(DebuggerScript._formatScript(script));
}
return result;
}
/**
* @param {!Script} script
* @return {!FormattedScript}
*/
DebuggerScript._formatScript = function(script)
{
var lineEnds = script.line_ends;
var lineCount = lineEnds.length;
var endLine = script.line_offset + lineCount - 1;
var endColumn; | endLine += 1;
endColumn = 0;
} else {
if (lineCount === 1)
endColumn = script.source.length + script.column_offset;
else
endColumn = script.source.length - (lineEnds[lineCount - 2] + 1);
}
return {
id: script.id,
name: script.nameOrSourceURL(),
sourceURL: script.source_url,
sourceMappingURL: script.source_mapping_url,
source: script.source,
startLine: script.line_offset,
startColumn: script.column_offset,
endLine: endLine,
endColumn: endColumn,
executionContextId: DebuggerScript._executionContextId(script.context_data),
// Note that we cannot derive aux data from context id because of compilation cache.
executionContextAuxData: DebuggerScript._executionContextAuxData(script.context_data)
};
}
/**
* @param {!ExecutionState} execState
* @param {!BreakpointInfo} info
* @return {string|undefined}
*/
DebuggerScript.setBreakpoint = function(execState, info)
{
var breakId = Debug.setScriptBreakPointById(info.sourceID, info.lineNumber, info.columnNumber, info.condition, undefined, Debug.BreakPositionAlignment.Statement);
var locations = Debug.findBreakPointActualLocations(breakId);
if (!locations.length)
return undefined;
info.lineNumber = locations[0].line;
info.columnNumber = locations[0].column;
return breakId.toString();
}
/**
* @param {!ExecutionState} execState
* @param {!{breakpointId: number}} info
*/
DebuggerScript.removeBreakpoint = function(execState, info)
{
Debug.findBreakPoint(info.breakpointId, true);
}
/**
* @return {number}
*/
DebuggerScript.pauseOnExceptionsState = function()
{
return DebuggerScript._pauseOnExceptionsState;
}
/**
* @param {number} newState
*/
DebuggerScript.setPauseOnExceptionsState = function(newState)
{
DebuggerScript._pauseOnExceptionsState = newState;
if (DebuggerScript.PauseOnExceptionsState.PauseOnAllExceptions === newState)
Debug.setBreakOnException();
else
Debug.clearBreakOnException();
if (DebuggerScript.PauseOnExceptionsState.PauseOnUncaughtExceptions === newState)
Debug.setBreakOnUncaughtException();
else
Debug.clearBreakOnUncaughtException();
}
/**
* @param {!ExecutionState} execState
* @param {number} limit
* @return {!Array<!JavaScriptCallFrame>}
*/
DebuggerScript.currentCallFrames = function(execState, limit)
{
var frames = [];
for (var i = 0; i < execState.frameCount() && (!limit || i < limit); ++i)
frames.push(DebuggerScript._frameMirrorToJSCallFrame(execState.frame(i)));
return frames;
}
/**
* @param {!ExecutionState} execState
*/
DebuggerScript.stepIntoStatement = function(execState)
{
execState.prepareStep(Debug.StepAction.StepIn);
}
/**
* @param {!ExecutionState} execState
*/
DebuggerScript.stepFrameStatement = function(execState)
{
execState.prepareStep(Debug.StepAction.StepFrame);
}
/**
* @param {!ExecutionState} execState
*/
DebuggerScript.stepOverStatement = function(execState)
{
execState.prepareStep(Debug.StepAction.StepNext);
}
/**
* @param {!ExecutionState} execState
*/
DebuggerScript.stepOutOfFunction = function(execState)
{
execState.prepareStep(Debug.StepAction.StepOut);
}
DebuggerScript.clearStepping = function()
{
Debug.clearStepping();
}
// Returns array in form:
// [ 0, <v8_result_report> ] in case of success
// or [ 1, <general_error_message>, <compiler_message>, <line_number>, <column_number> ] in case of compile error, numbers are 1-based.
// or throws exception with message.
/**
* @param {number} scriptId
* @param {string} newSource
* @param {boolean} preview
* @return {!Array<*>}
*/
DebuggerScript.liveEditScriptSource = function(scriptId, newSource, preview)
{
var scripts = Debug.scripts();
var scriptToEdit = null;
for (var i = 0; i < scripts.length; i++) {
if (scripts[i].id == scriptId) {
scriptToEdit = scripts[i];
break;
}
}
if (!scriptToEdit)
throw("Script not found");
var changeLog = [];
try {
var result = Debug.LiveEdit.SetScriptSource(scriptToEdit, newSource, preview, changeLog);
return [0, result.stack_modified];
} catch (e) {
if (e instanceof Debug.LiveEdit.Failure && "details" in e) {
var details = /** @type {!LiveEditErrorDetails} */(e.details);
if (details.type === "liveedit_compile_error") {
var startPosition = details.position.start;
return [1, String(e), String(details.syntaxErrorMessage), Number(startPosition.line), Number(startPosition.column)];
}
}
throw e;
}
}
/**
* @param {!ExecutionState} execState
*/
DebuggerScript.clearBreakpoints = function(execState)
{
Debug.clearAllBreakPoints();
}
/**
* @param {!ExecutionState} execState
* @param {!{enabled: boolean}} info
*/
DebuggerScript.setBreakpointsActivated = function(execState, info)
{
Debug.debuggerFlags().breakPointsActive.setValue(info.enabled);
}
/**
* @param {!BreakEvent} eventData
*/
DebuggerScript.getBreakpointNumbers = function(eventData)
{
var breakpoints = eventData.breakPointsHit();
var numbers = [];
if (!breakpoints)
return numbers;
for (var i = 0; i < breakpoints.length; i++) {
var breakpoint = breakpoints[i];
var scriptBreakPoint = breakpoint.script_break_point();
numbers.push(scriptBreakPoint ? scriptBreakPoint.number() : breakpoint.number());
}
return numbers;
}
// NOTE: This function is performance critical, as it can be run on every
// statement that generates an async event (like addEventListener) to support
// asynchronous call stacks. Thus, when possible, initialize the data lazily.
/**
* @param {!FrameMirror} frameMirror
* @return {!JavaScriptCallFrame}
*/
DebuggerScript._frameMirrorToJSCallFrame = function(frameMirror)
{
// Stuff that can not be initialized laz | // V8 will not count last line if script source ends with \n.
if (script.source[script.source.length - 1] === '\n') { | random_line_split |
debugger-script.js | Script.stepIntoStatement = function(execState)
{
execState.prepareStep(Debug.StepAction.StepIn);
}
/**
* @param {!ExecutionState} execState
*/
DebuggerScript.stepFrameStatement = function(execState)
{
execState.prepareStep(Debug.StepAction.StepFrame);
}
/**
* @param {!ExecutionState} execState
*/
DebuggerScript.stepOverStatement = function(execState)
{
execState.prepareStep(Debug.StepAction.StepNext);
}
/**
* @param {!ExecutionState} execState
*/
DebuggerScript.stepOutOfFunction = function(execState)
{
execState.prepareStep(Debug.StepAction.StepOut);
}
DebuggerScript.clearStepping = function()
{
Debug.clearStepping();
}
// Returns array in form:
// [ 0, <v8_result_report> ] in case of success
// or [ 1, <general_error_message>, <compiler_message>, <line_number>, <column_number> ] in case of compile error, numbers are 1-based.
// or throws exception with message.
/**
* @param {number} scriptId
* @param {string} newSource
* @param {boolean} preview
* @return {!Array<*>}
*/
DebuggerScript.liveEditScriptSource = function(scriptId, newSource, preview)
{
var scripts = Debug.scripts();
var scriptToEdit = null;
for (var i = 0; i < scripts.length; i++) {
if (scripts[i].id == scriptId) {
scriptToEdit = scripts[i];
break;
}
}
if (!scriptToEdit)
throw("Script not found");
var changeLog = [];
try {
var result = Debug.LiveEdit.SetScriptSource(scriptToEdit, newSource, preview, changeLog);
return [0, result.stack_modified];
} catch (e) {
if (e instanceof Debug.LiveEdit.Failure && "details" in e) {
var details = /** @type {!LiveEditErrorDetails} */(e.details);
if (details.type === "liveedit_compile_error") {
var startPosition = details.position.start;
return [1, String(e), String(details.syntaxErrorMessage), Number(startPosition.line), Number(startPosition.column)];
}
}
throw e;
}
}
/**
* @param {!ExecutionState} execState
*/
DebuggerScript.clearBreakpoints = function(execState)
{
Debug.clearAllBreakPoints();
}
/**
* @param {!ExecutionState} execState
* @param {!{enabled: boolean}} info
*/
DebuggerScript.setBreakpointsActivated = function(execState, info)
{
Debug.debuggerFlags().breakPointsActive.setValue(info.enabled);
}
/**
* @param {!BreakEvent} eventData
*/
DebuggerScript.getBreakpointNumbers = function(eventData)
{
var breakpoints = eventData.breakPointsHit();
var numbers = [];
if (!breakpoints)
return numbers;
for (var i = 0; i < breakpoints.length; i++) {
var breakpoint = breakpoints[i];
var scriptBreakPoint = breakpoint.script_break_point();
numbers.push(scriptBreakPoint ? scriptBreakPoint.number() : breakpoint.number());
}
return numbers;
}
// NOTE: This function is performance critical, as it can be run on every
// statement that generates an async event (like addEventListener) to support
// asynchronous call stacks. Thus, when possible, initialize the data lazily.
/**
* @param {!FrameMirror} frameMirror
* @return {!JavaScriptCallFrame}
*/
DebuggerScript._frameMirrorToJSCallFrame = function(frameMirror)
{
// Stuff that can not be initialized lazily (i.e. valid while paused with a valid break_id).
// The frameMirror and scopeMirror can be accessed only while paused on the debugger.
var frameDetails = frameMirror.details();
var funcObject = frameDetails.func();
var sourcePosition = frameDetails.sourcePosition();
var thisObject = frameDetails.receiver();
var isAtReturn = !!frameDetails.isAtReturn();
var returnValue = isAtReturn ? frameDetails.returnValue() : undefined;
var scopeMirrors = frameMirror.allScopes(false);
/** @type {!Array<number>} */
var scopeTypes = new Array(scopeMirrors.length);
/** @type {?Array<!Object>} */
var scopeObjects = new Array(scopeMirrors.length);
/** @type {!Array<string|undefined>} */
var scopeNames = new Array(scopeMirrors.length);
/** @type {?Array<number>} */
var scopeStartPositions = new Array(scopeMirrors.length);
/** @type {?Array<number>} */
var scopeEndPositions = new Array(scopeMirrors.length);
/** @type {?Array<function()|null>} */
var scopeFunctions = new Array(scopeMirrors.length);
for (var i = 0; i < scopeMirrors.length; ++i) {
var scopeDetails = scopeMirrors[i].details();
scopeTypes[i] = scopeDetails.type();
scopeObjects[i] = scopeDetails.object();
scopeNames[i] = scopeDetails.name();
scopeStartPositions[i] = scopeDetails.startPosition ? scopeDetails.startPosition() : 0;
scopeEndPositions[i] = scopeDetails.endPosition ? scopeDetails.endPosition() : 0;
scopeFunctions[i] = scopeDetails.func ? scopeDetails.func() : null;
}
// Calculated lazily.
var scopeChain;
var funcMirror;
var location;
/** @type {!Array<?RawLocation>} */
var scopeStartLocations;
/** @type {!Array<?RawLocation>} */
var scopeEndLocations;
var details;
/**
* @param {!ScriptMirror|undefined} script
* @param {number} pos
* @return {?RawLocation}
*/
function createLocation(script, pos)
{
if (!script)
return null;
var location = script.locationFromPosition(pos, true);
return {
"lineNumber": location.line,
"columnNumber": location.column,
"scriptId": String(script.id())
}
}
/**
* @return {!Array<!Object>}
*/
function ensureScopeChain()
{
if (!scopeChain) {
scopeChain = [];
scopeStartLocations = [];
scopeEndLocations = [];
for (var i = 0, j = 0; i < scopeObjects.length; ++i) {
var scopeObject = DebuggerScript._buildScopeObject(scopeTypes[i], scopeObjects[i]);
if (scopeObject) {
scopeTypes[j] = scopeTypes[i];
scopeNames[j] = scopeNames[i];
scopeChain[j] = scopeObject;
var funcMirror = scopeFunctions ? MakeMirror(scopeFunctions[i]) : null;
if (!funcMirror || !funcMirror.isFunction())
funcMirror = new UnresolvedFunctionMirror(funcObject);
var script = /** @type {!FunctionMirror} */(funcMirror).script();
scopeStartLocations[j] = createLocation(script, scopeStartPositions[i]);
scopeEndLocations[j] = createLocation(script, scopeEndPositions[i]);
++j;
}
}
scopeTypes.length = scopeChain.length;
scopeNames.length = scopeChain.length;
scopeObjects = null; // Free for GC.
scopeFunctions = null;
scopeStartPositions = null;
scopeEndPositions = null;
}
return scopeChain;
}
/**
* @return {!JavaScriptCallFrameDetails}
*/
function lazyDetails()
{
if (!details) {
var scopeObjects = ensureScopeChain();
var script = ensureFuncMirror().script();
/** @type {!Array<Scope>} */
var scopes = [];
for (var i = 0; i < scopeObjects.length; ++i) {
var scope = {
"type": /** @type {string} */(DebuggerScript._scopeTypeNames.get(scopeTypes[i])),
"object": scopeObjects[i],
};
if (scopeNames[i])
scope.name = scopeNames[i];
if (scopeStartLocations[i])
scope.startLocation = /** @type {!RawLocation} */(scopeStartLocations[i]);
if (scopeEndLocations[i])
scope.endLocation = /** @type {!RawLocation} */(scopeEndLocations[i]);
scopes.push(scope);
}
details = {
"functionName": ensureFuncMirror().debugName(),
"location": {
"lineNumber": line(),
"columnNumber": column(),
"scriptId": String(script.id())
},
"this": thisObject,
"scopeChain": scopes
};
var functionLocation = ensureFuncMirror().sourceLocation();
if (functionLocation) {
details.functionLocation = {
"lineNumber": functionLocation.line,
"columnNumber": functionLocation.column,
"scriptId": String(script.id())
};
}
if (isAtReturn)
details.returnValue = returnValue;
}
return details;
}
/**
* @return {!FunctionMirror}
*/
function ensureFuncMirror()
{
if (!funcMirror) {
funcMirror = MakeMirror(funcObject);
if (!funcMirror.isFunction())
funcMirror = new UnresolvedFunctionMirror(funcObject);
}
return /** @type {!FunctionMirror} */(funcMirror);
}
/**
* @return {!{line: number, column: number}}
*/
function ensureLocation()
| {
if (!location) {
var script = ensureFuncMirror().script();
if (script)
location = script.locationFromPosition(sourcePosition, true);
if (!location)
location = { line: 0, column: 0 };
}
return location;
} | identifier_body |
|
debugger-script.js | (execState)
{
execState.prepareStep(Debug.StepAction.StepFrame);
}
/**
* @param {!ExecutionState} execState
*/
DebuggerScript.stepOverStatement = function(execState)
{
execState.prepareStep(Debug.StepAction.StepNext);
}
/**
* @param {!ExecutionState} execState
*/
DebuggerScript.stepOutOfFunction = function(execState)
{
execState.prepareStep(Debug.StepAction.StepOut);
}
DebuggerScript.clearStepping = function()
{
Debug.clearStepping();
}
// Returns array in form:
// [ 0, <v8_result_report> ] in case of success
// or [ 1, <general_error_message>, <compiler_message>, <line_number>, <column_number> ] in case of compile error, numbers are 1-based.
// or throws exception with message.
/**
* @param {number} scriptId
* @param {string} newSource
* @param {boolean} preview
* @return {!Array<*>}
*/
DebuggerScript.liveEditScriptSource = function(scriptId, newSource, preview)
{
var scripts = Debug.scripts();
var scriptToEdit = null;
for (var i = 0; i < scripts.length; i++) {
if (scripts[i].id == scriptId) {
scriptToEdit = scripts[i];
break;
}
}
if (!scriptToEdit)
throw("Script not found");
var changeLog = [];
try {
var result = Debug.LiveEdit.SetScriptSource(scriptToEdit, newSource, preview, changeLog);
return [0, result.stack_modified];
} catch (e) {
if (e instanceof Debug.LiveEdit.Failure && "details" in e) {
var details = /** @type {!LiveEditErrorDetails} */(e.details);
if (details.type === "liveedit_compile_error") {
var startPosition = details.position.start;
return [1, String(e), String(details.syntaxErrorMessage), Number(startPosition.line), Number(startPosition.column)];
}
}
throw e;
}
}
/**
* @param {!ExecutionState} execState
*/
DebuggerScript.clearBreakpoints = function(execState)
{
Debug.clearAllBreakPoints();
}
/**
* @param {!ExecutionState} execState
* @param {!{enabled: boolean}} info
*/
DebuggerScript.setBreakpointsActivated = function(execState, info)
{
Debug.debuggerFlags().breakPointsActive.setValue(info.enabled);
}
/**
* @param {!BreakEvent} eventData
*/
DebuggerScript.getBreakpointNumbers = function(eventData)
{
var breakpoints = eventData.breakPointsHit();
var numbers = [];
if (!breakpoints)
return numbers;
for (var i = 0; i < breakpoints.length; i++) {
var breakpoint = breakpoints[i];
var scriptBreakPoint = breakpoint.script_break_point();
numbers.push(scriptBreakPoint ? scriptBreakPoint.number() : breakpoint.number());
}
return numbers;
}
// NOTE: This function is performance critical, as it can be run on every
// statement that generates an async event (like addEventListener) to support
// asynchronous call stacks. Thus, when possible, initialize the data lazily.
/**
* @param {!FrameMirror} frameMirror
* @return {!JavaScriptCallFrame}
*/
DebuggerScript._frameMirrorToJSCallFrame = function(frameMirror)
{
// Stuff that can not be initialized lazily (i.e. valid while paused with a valid break_id).
// The frameMirror and scopeMirror can be accessed only while paused on the debugger.
var frameDetails = frameMirror.details();
var funcObject = frameDetails.func();
var sourcePosition = frameDetails.sourcePosition();
var thisObject = frameDetails.receiver();
var isAtReturn = !!frameDetails.isAtReturn();
var returnValue = isAtReturn ? frameDetails.returnValue() : undefined;
var scopeMirrors = frameMirror.allScopes(false);
/** @type {!Array<number>} */
var scopeTypes = new Array(scopeMirrors.length);
/** @type {?Array<!Object>} */
var scopeObjects = new Array(scopeMirrors.length);
/** @type {!Array<string|undefined>} */
var scopeNames = new Array(scopeMirrors.length);
/** @type {?Array<number>} */
var scopeStartPositions = new Array(scopeMirrors.length);
/** @type {?Array<number>} */
var scopeEndPositions = new Array(scopeMirrors.length);
/** @type {?Array<function()|null>} */
var scopeFunctions = new Array(scopeMirrors.length);
for (var i = 0; i < scopeMirrors.length; ++i) {
var scopeDetails = scopeMirrors[i].details();
scopeTypes[i] = scopeDetails.type();
scopeObjects[i] = scopeDetails.object();
scopeNames[i] = scopeDetails.name();
scopeStartPositions[i] = scopeDetails.startPosition ? scopeDetails.startPosition() : 0;
scopeEndPositions[i] = scopeDetails.endPosition ? scopeDetails.endPosition() : 0;
scopeFunctions[i] = scopeDetails.func ? scopeDetails.func() : null;
}
// Calculated lazily.
var scopeChain;
var funcMirror;
var location;
/** @type {!Array<?RawLocation>} */
var scopeStartLocations;
/** @type {!Array<?RawLocation>} */
var scopeEndLocations;
var details;
/**
* @param {!ScriptMirror|undefined} script
* @param {number} pos
* @return {?RawLocation}
*/
function createLocation(script, pos)
{
if (!script)
return null;
var location = script.locationFromPosition(pos, true);
return {
"lineNumber": location.line,
"columnNumber": location.column,
"scriptId": String(script.id())
}
}
/**
* @return {!Array<!Object>}
*/
function ensureScopeChain()
{
if (!scopeChain) {
scopeChain = [];
scopeStartLocations = [];
scopeEndLocations = [];
for (var i = 0, j = 0; i < scopeObjects.length; ++i) {
var scopeObject = DebuggerScript._buildScopeObject(scopeTypes[i], scopeObjects[i]);
if (scopeObject) {
scopeTypes[j] = scopeTypes[i];
scopeNames[j] = scopeNames[i];
scopeChain[j] = scopeObject;
var funcMirror = scopeFunctions ? MakeMirror(scopeFunctions[i]) : null;
if (!funcMirror || !funcMirror.isFunction())
funcMirror = new UnresolvedFunctionMirror(funcObject);
var script = /** @type {!FunctionMirror} */(funcMirror).script();
scopeStartLocations[j] = createLocation(script, scopeStartPositions[i]);
scopeEndLocations[j] = createLocation(script, scopeEndPositions[i]);
++j;
}
}
scopeTypes.length = scopeChain.length;
scopeNames.length = scopeChain.length;
scopeObjects = null; // Free for GC.
scopeFunctions = null;
scopeStartPositions = null;
scopeEndPositions = null;
}
return scopeChain;
}
/**
* @return {!JavaScriptCallFrameDetails}
*/
function lazyDetails()
{
if (!details) {
var scopeObjects = ensureScopeChain();
var script = ensureFuncMirror().script();
/** @type {!Array<Scope>} */
var scopes = [];
for (var i = 0; i < scopeObjects.length; ++i) {
var scope = {
"type": /** @type {string} */(DebuggerScript._scopeTypeNames.get(scopeTypes[i])),
"object": scopeObjects[i],
};
if (scopeNames[i])
scope.name = scopeNames[i];
if (scopeStartLocations[i])
scope.startLocation = /** @type {!RawLocation} */(scopeStartLocations[i]);
if (scopeEndLocations[i])
scope.endLocation = /** @type {!RawLocation} */(scopeEndLocations[i]);
scopes.push(scope);
}
details = {
"functionName": ensureFuncMirror().debugName(),
"location": {
"lineNumber": line(),
"columnNumber": column(),
"scriptId": String(script.id())
},
"this": thisObject,
"scopeChain": scopes
};
var functionLocation = ensureFuncMirror().sourceLocation();
if (functionLocation) {
details.functionLocation = {
"lineNumber": functionLocation.line,
"columnNumber": functionLocation.column,
"scriptId": String(script.id())
};
}
if (isAtReturn)
details.returnValue = returnValue;
}
return details;
}
/**
* @return {!FunctionMirror}
*/
function ensureFuncMirror()
{
if (!funcMirror) {
funcMirror = MakeMirror(funcObject);
if (!funcMirror.isFunction())
funcMirror = new UnresolvedFunctionMirror(funcObject);
}
return /** @type {!FunctionMirror} */(funcMirror);
}
/**
* @return {!{line: number, column: number}}
*/
function ensureLocation()
{
if (!location) {
var script = ensureFuncMirror().script();
if (script)
location = script.locationFromPosition(sourcePosition, true);
if (!location)
location = { line: 0, column: 0 };
}
return location;
}
/**
* @return {number}
*/
function line()
{
return ensureLocation().line;
}
/**
* @return {number}
*/
function | column | identifier_name |
|
main.rs | TreeViewColumn, Type, Window, WindowType};
use gtk::{BuilderExt, ButtonExt, CellLayoutExt, DialogExt, EntryExt, FileChooserExt, GtkWindowExt,
ListStoreExt, ListStoreExtManual, TreeModelExt, TreeSelectionExt, TreeSortableExtManual,
TreeViewColumnExt, TreeViewExt, WidgetExt};
#[derive(Debug, PartialEq, Eq)]
enum Column {
ID,
Type,
Size,
Offset,
}
impl Into<u32> for Column {
fn into(self) -> u32 {
match self {
Column::ID => 0,
Column::Type => 1,
Column::Size => 2,
Column::Offset => 3,
}
}
}
impl Into<i32> for Column {
fn into(self) -> i32 {
match self {
Column::ID => 0,
Column::Type => 1,
Column::Size => 2,
Column::Offset => 3,
}
}
}
macro_rules! add_column {
($tree:ident, $title:expr, $id:expr) => {{
let column = TreeViewColumn::new();
let renderer = gtk::CellRendererText::new();
column.set_title($title);
column.set_resizable(true);
column.pack_start(&renderer, true);
column.add_attribute(&renderer, "text", $id);
$tree.append_column(&column);
}}
}
macro_rules! add_sort_func {
($tree:ident, $store:ident, $convert:ident, $col:expr) => {{
let store_clone = $store.clone();
$store.set_sort_func(gtk::SortColumn::Index($col.into()), move |_this, a, b| {
let string_at_iter = |iter| store_clone.get_value(iter, $col.into())
.get::<String>()
.unwrap();
let a = $convert(string_at_iter(a));
let b = $convert(string_at_iter(b));
a.cmp(&b)
});
$tree.get_column($col.into()).unwrap().set_sort_column_id($col.into());
}}
}
fn setup_tree(tree: TreeView, extract_button: Button) {
let sel = tree.get_selection();
let model = match tree.get_model() {
Some(m) => m,
_ => return,
};
sel.connect_changed(move |this| {
// TODO: Do all of this when an archive is opened, too.
let selected_count = this.count_selected_rows();
let store_len = model.iter_n_children(None);
let count_str = if selected_count == 0 || selected_count == store_len {
"all".into()
} else {
format!("({})", selected_count)
};
extract_button.set_label(&format!("Extract {}", count_str))
});
}
fn | (
title: &str,
window_type: gtk::WindowType,
action: gtk::FileChooserAction,
) -> Option<PathBuf> {
let dialog = FileChooserDialog::new(Some(title), Some(&Window::new(window_type)), action);
dialog.add_button("_Cancel", gtk::ResponseType::Cancel.into());
match action {
gtk::FileChooserAction::Open => {
dialog.add_button("_Open", gtk::ResponseType::Ok.into());
}
gtk::FileChooserAction::SelectFolder => {
dialog.add_button("_Select", gtk::ResponseType::Ok.into());
}
_ => (),
};
let path = if dialog.run() == gtk::ResponseType::Ok.into() {
dialog.get_filename()
} else {
None
};
dialog.destroy();
path
}
fn enable_archive_button(
archive: Rc<Cell<Option<Archive>>>,
extract_button: Button,
archive_button: Button,
archive_entrybox: EntryBox,
ei_store: ListStore,
) {
archive_button.connect_clicked(move |_this| {
let archive_path = match select_dir_dialog(
"Select a DRS archive",
WindowType::Popup,
gtk::FileChooserAction::Open,
) {
Some(p) => p,
_ => return,
};
let archive_path = match archive_path.to_str() {
Some(p) => p,
_ => return,
};
let arch = match Archive::read_from_file(archive_path) {
Ok(a) => a,
_ => return,
};
ei_store.clear();
extract_button.set_sensitive(true);
archive_entrybox.set_text(archive_path);
for table in arch.tables.iter() {
for entry in table.entries.iter() {
let float_len = entry.file_size as f32;
let formatted_size = match binary_prefix(float_len) {
Standalone(bytes) => format!("{} B", bytes),
Prefixed(prefix, n) => format!("{:.2} {}B", n, prefix),
};
ei_store.insert_with_values(
None,
&[
Column::ID.into(),
Column::Type.into(),
Column::Size.into(),
Column::Offset.into(),
],
&[
&entry.file_id.to_string(),
&table.header.file_extension(),
&formatted_size,
&format!("{:#X}", entry.file_offset),
],
);
}
}
archive.replace(Some(arch));
});
}
fn enable_extract_button(
archive: Rc<Cell<Option<Archive>>>,
extract_button: Button,
entryinfo_tree: TreeView,
) {
extract_button.connect_clicked(move |_this| {
if let Some(dest_dir_path) = select_dir_dialog(
"Select a directory to extract to",
WindowType::Toplevel,
gtk::FileChooserAction::SelectFolder,
) {
let arch = match archive.take() {
Some(a) => a,
_ => return,
};
let sel = entryinfo_tree.get_selection();
let (mut sel_paths, model) = sel.get_selected_rows();
if sel_paths.len() == 0 {
sel.select_all();
let (s, _) = sel.get_selected_rows();
sel_paths = s;
sel.unselect_all();
}
for sel_path in sel_paths {
let iter = match model.get_iter(&sel_path) {
Some(i) => i,
_ => continue,
};
let val = model.get_value(&iter, 0);
let name = val.get::<String>().expect(&format!(
"Unable to convert gtk::Type::String {:?} to a Rust String",
val
));
for table in arch.tables.iter() {
let data = match table.find_file_contents(name.parse::<u32>().unwrap()) {
Some(d) => d,
_ => continue,
};
let mut output_filepath = dest_dir_path.clone();
output_filepath.push(name.replace("\\", "/"));
output_filepath.set_extension(table.header.file_extension());
let parent = output_filepath.parent().expect(&format!(
"Unable to determine parent path of {:?}",
&output_filepath
));
fs::create_dir_all(&parent)
.expect("Failed to create necessary parent directories");
let mut f = OpenOptions::new()
.create(true)
.read(true)
.write(true)
.truncate(true)
.open(&output_filepath)
.expect(&format!(
"Failed to open file {:?} for writing",
output_filepath
));
f.write(data).expect("Failed to write data");
}
}
archive.replace(Some(arch));
}
});
}
fn enable_sortable_cols(ei_store: &ListStore, entryinfo_tree: &TreeView) {
// Values in the table are strings. They should be converted back
// to their original type to make the sort function work properly
fn convert_name(s: String) -> u32 {
s.parse::<u32>().unwrap()
}
fn convert_type(s: String) -> String {
s
}
fn convert_size(s: String) -> u32 {
let v = s.split(' ').collect::<Vec<&str>>();
let exp = match v.get(1) {
Some(&"B") => 0,
Some(&"KiB") => 1,
Some(&"MiB") => 2,
Some(&"GiB") => 3,
_ => panic!("Unable to convert size: `{}`", s),
};
(1024u32.pow(exp) as f32 * v[0].parse::<f32>().unwrap()) as u32
}
fn convert_offset(s: String) -> u32 {
u32::from_str_radix(&s[2..], 16).unwrap()
}
add_sort_func!(entryinfo_tree, ei_store, convert_name, Column::ID);
add_sort_func!(entryinfo_tree, ei_store, convert_type, Column::Type);
add_sort_func!(entryinfo_tree, ei_store, convert_size, Column::Size);
add_sort_func!(entryinfo_tree, ei_store, convert_offset, Column::Offset);
}
fn main() {
gtk::init().unwrap();
let builder = Builder::new();
builder
.add_from_string(include_str!("../ui.glade"))
.unwrap();
let window: Window = builder.get_object("main_window").unwrap();
let archive_entrybox: EntryBox = builder.get_object("archive_file_entry").unwrap();
let archive_button: Button = builder.get_object("archive_file_button").unwrap();
let extract_button: Button = builder.get_object("extract_button").unwrap();
extract_button.set_sensitive(false);
let entryinfo_tree = {
| select_dir_dialog | identifier_name |
main.rs | TreeViewColumn, Type, Window, WindowType};
use gtk::{BuilderExt, ButtonExt, CellLayoutExt, DialogExt, EntryExt, FileChooserExt, GtkWindowExt,
ListStoreExt, ListStoreExtManual, TreeModelExt, TreeSelectionExt, TreeSortableExtManual,
TreeViewColumnExt, TreeViewExt, WidgetExt};
#[derive(Debug, PartialEq, Eq)]
enum Column {
ID,
Type,
Size,
Offset,
}
impl Into<u32> for Column {
fn into(self) -> u32 {
match self {
Column::ID => 0,
Column::Type => 1,
Column::Size => 2,
Column::Offset => 3,
}
}
}
impl Into<i32> for Column {
fn into(self) -> i32 {
match self {
Column::ID => 0,
Column::Type => 1,
Column::Size => 2,
Column::Offset => 3,
}
}
}
macro_rules! add_column {
($tree:ident, $title:expr, $id:expr) => {{
let column = TreeViewColumn::new();
let renderer = gtk::CellRendererText::new();
column.set_title($title);
column.set_resizable(true);
column.pack_start(&renderer, true);
column.add_attribute(&renderer, "text", $id);
$tree.append_column(&column);
}}
}
macro_rules! add_sort_func {
($tree:ident, $store:ident, $convert:ident, $col:expr) => {{
let store_clone = $store.clone();
$store.set_sort_func(gtk::SortColumn::Index($col.into()), move |_this, a, b| {
let string_at_iter = |iter| store_clone.get_value(iter, $col.into())
.get::<String>()
.unwrap();
let a = $convert(string_at_iter(a));
let b = $convert(string_at_iter(b));
a.cmp(&b)
});
$tree.get_column($col.into()).unwrap().set_sort_column_id($col.into());
}}
}
fn setup_tree(tree: TreeView, extract_button: Button) {
let sel = tree.get_selection();
let model = match tree.get_model() {
Some(m) => m,
_ => return,
};
sel.connect_changed(move |this| {
// TODO: Do all of this when an archive is opened, too.
let selected_count = this.count_selected_rows();
let store_len = model.iter_n_children(None); | let count_str = if selected_count == 0 || selected_count == store_len {
"all".into()
} else {
format!("({})", selected_count)
};
extract_button.set_label(&format!("Extract {}", count_str))
});
}
fn select_dir_dialog(
title: &str,
window_type: gtk::WindowType,
action: gtk::FileChooserAction,
) -> Option<PathBuf> {
let dialog = FileChooserDialog::new(Some(title), Some(&Window::new(window_type)), action);
dialog.add_button("_Cancel", gtk::ResponseType::Cancel.into());
match action {
gtk::FileChooserAction::Open => {
dialog.add_button("_Open", gtk::ResponseType::Ok.into());
}
gtk::FileChooserAction::SelectFolder => {
dialog.add_button("_Select", gtk::ResponseType::Ok.into());
}
_ => (),
};
let path = if dialog.run() == gtk::ResponseType::Ok.into() {
dialog.get_filename()
} else {
None
};
dialog.destroy();
path
}
fn enable_archive_button(
archive: Rc<Cell<Option<Archive>>>,
extract_button: Button,
archive_button: Button,
archive_entrybox: EntryBox,
ei_store: ListStore,
) {
archive_button.connect_clicked(move |_this| {
let archive_path = match select_dir_dialog(
"Select a DRS archive",
WindowType::Popup,
gtk::FileChooserAction::Open,
) {
Some(p) => p,
_ => return,
};
let archive_path = match archive_path.to_str() {
Some(p) => p,
_ => return,
};
let arch = match Archive::read_from_file(archive_path) {
Ok(a) => a,
_ => return,
};
ei_store.clear();
extract_button.set_sensitive(true);
archive_entrybox.set_text(archive_path);
for table in arch.tables.iter() {
for entry in table.entries.iter() {
let float_len = entry.file_size as f32;
let formatted_size = match binary_prefix(float_len) {
Standalone(bytes) => format!("{} B", bytes),
Prefixed(prefix, n) => format!("{:.2} {}B", n, prefix),
};
ei_store.insert_with_values(
None,
&[
Column::ID.into(),
Column::Type.into(),
Column::Size.into(),
Column::Offset.into(),
],
&[
&entry.file_id.to_string(),
&table.header.file_extension(),
&formatted_size,
&format!("{:#X}", entry.file_offset),
],
);
}
}
archive.replace(Some(arch));
});
}
fn enable_extract_button(
archive: Rc<Cell<Option<Archive>>>,
extract_button: Button,
entryinfo_tree: TreeView,
) {
extract_button.connect_clicked(move |_this| {
if let Some(dest_dir_path) = select_dir_dialog(
"Select a directory to extract to",
WindowType::Toplevel,
gtk::FileChooserAction::SelectFolder,
) {
let arch = match archive.take() {
Some(a) => a,
_ => return,
};
let sel = entryinfo_tree.get_selection();
let (mut sel_paths, model) = sel.get_selected_rows();
if sel_paths.len() == 0 {
sel.select_all();
let (s, _) = sel.get_selected_rows();
sel_paths = s;
sel.unselect_all();
}
for sel_path in sel_paths {
let iter = match model.get_iter(&sel_path) {
Some(i) => i,
_ => continue,
};
let val = model.get_value(&iter, 0);
let name = val.get::<String>().expect(&format!(
"Unable to convert gtk::Type::String {:?} to a Rust String",
val
));
for table in arch.tables.iter() {
let data = match table.find_file_contents(name.parse::<u32>().unwrap()) {
Some(d) => d,
_ => continue,
};
let mut output_filepath = dest_dir_path.clone();
output_filepath.push(name.replace("\\", "/"));
output_filepath.set_extension(table.header.file_extension());
let parent = output_filepath.parent().expect(&format!(
"Unable to determine parent path of {:?}",
&output_filepath
));
fs::create_dir_all(&parent)
.expect("Failed to create necessary parent directories");
let mut f = OpenOptions::new()
.create(true)
.read(true)
.write(true)
.truncate(true)
.open(&output_filepath)
.expect(&format!(
"Failed to open file {:?} for writing",
output_filepath
));
f.write(data).expect("Failed to write data");
}
}
archive.replace(Some(arch));
}
});
}
fn enable_sortable_cols(ei_store: &ListStore, entryinfo_tree: &TreeView) {
// Values in the table are strings. They should be converted back
// to their original type to make the sort function work properly
fn convert_name(s: String) -> u32 {
s.parse::<u32>().unwrap()
}
fn convert_type(s: String) -> String {
s
}
fn convert_size(s: String) -> u32 {
let v = s.split(' ').collect::<Vec<&str>>();
let exp = match v.get(1) {
Some(&"B") => 0,
Some(&"KiB") => 1,
Some(&"MiB") => 2,
Some(&"GiB") => 3,
_ => panic!("Unable to convert size: `{}`", s),
};
(1024u32.pow(exp) as f32 * v[0].parse::<f32>().unwrap()) as u32
}
fn convert_offset(s: String) -> u32 {
u32::from_str_radix(&s[2..], 16).unwrap()
}
add_sort_func!(entryinfo_tree, ei_store, convert_name, Column::ID);
add_sort_func!(entryinfo_tree, ei_store, convert_type, Column::Type);
add_sort_func!(entryinfo_tree, ei_store, convert_size, Column::Size);
add_sort_func!(entryinfo_tree, ei_store, convert_offset, Column::Offset);
}
fn main() {
gtk::init().unwrap();
let builder = Builder::new();
builder
.add_from_string(include_str!("../ui.glade"))
.unwrap();
let window: Window = builder.get_object("main_window").unwrap();
let archive_entrybox: EntryBox = builder.get_object("archive_file_entry").unwrap();
let archive_button: Button = builder.get_object("archive_file_button").unwrap();
let extract_button: Button = builder.get_object("extract_button").unwrap();
extract_button.set_sensitive(false);
let entryinfo_tree = {
| random_line_split |
|
main.rs | TreeViewColumn, Type, Window, WindowType};
use gtk::{BuilderExt, ButtonExt, CellLayoutExt, DialogExt, EntryExt, FileChooserExt, GtkWindowExt,
ListStoreExt, ListStoreExtManual, TreeModelExt, TreeSelectionExt, TreeSortableExtManual,
TreeViewColumnExt, TreeViewExt, WidgetExt};
#[derive(Debug, PartialEq, Eq)]
enum Column {
ID,
Type,
Size,
Offset,
}
impl Into<u32> for Column {
fn into(self) -> u32 {
match self {
Column::ID => 0,
Column::Type => 1,
Column::Size => 2,
Column::Offset => 3,
}
}
}
impl Into<i32> for Column {
fn into(self) -> i32 {
match self {
Column::ID => 0,
Column::Type => 1,
Column::Size => 2,
Column::Offset => 3,
}
}
}
macro_rules! add_column {
($tree:ident, $title:expr, $id:expr) => {{
let column = TreeViewColumn::new();
let renderer = gtk::CellRendererText::new();
column.set_title($title);
column.set_resizable(true);
column.pack_start(&renderer, true);
column.add_attribute(&renderer, "text", $id);
$tree.append_column(&column);
}}
}
macro_rules! add_sort_func {
($tree:ident, $store:ident, $convert:ident, $col:expr) => {{
let store_clone = $store.clone();
$store.set_sort_func(gtk::SortColumn::Index($col.into()), move |_this, a, b| {
let string_at_iter = |iter| store_clone.get_value(iter, $col.into())
.get::<String>()
.unwrap();
let a = $convert(string_at_iter(a));
let b = $convert(string_at_iter(b));
a.cmp(&b)
});
$tree.get_column($col.into()).unwrap().set_sort_column_id($col.into());
}}
}
fn setup_tree(tree: TreeView, extract_button: Button) {
let sel = tree.get_selection();
let model = match tree.get_model() {
Some(m) => m,
_ => return,
};
sel.connect_changed(move |this| {
// TODO: Do all of this when an archive is opened, too.
let selected_count = this.count_selected_rows();
let store_len = model.iter_n_children(None);
let count_str = if selected_count == 0 || selected_count == store_len {
"all".into()
} else {
format!("({})", selected_count)
};
extract_button.set_label(&format!("Extract {}", count_str))
});
}
fn select_dir_dialog(
title: &str,
window_type: gtk::WindowType,
action: gtk::FileChooserAction,
) -> Option<PathBuf> {
let dialog = FileChooserDialog::new(Some(title), Some(&Window::new(window_type)), action);
dialog.add_button("_Cancel", gtk::ResponseType::Cancel.into());
match action {
gtk::FileChooserAction::Open => {
dialog.add_button("_Open", gtk::ResponseType::Ok.into());
}
gtk::FileChooserAction::SelectFolder => {
dialog.add_button("_Select", gtk::ResponseType::Ok.into());
}
_ => (),
};
let path = if dialog.run() == gtk::ResponseType::Ok.into() {
dialog.get_filename()
} else {
None
};
dialog.destroy();
path
}
fn enable_archive_button(
archive: Rc<Cell<Option<Archive>>>,
extract_button: Button,
archive_button: Button,
archive_entrybox: EntryBox,
ei_store: ListStore,
) {
archive_button.connect_clicked(move |_this| {
let archive_path = match select_dir_dialog(
"Select a DRS archive",
WindowType::Popup,
gtk::FileChooserAction::Open,
) {
Some(p) => p,
_ => return,
};
let archive_path = match archive_path.to_str() {
Some(p) => p,
_ => return,
};
let arch = match Archive::read_from_file(archive_path) {
Ok(a) => a,
_ => return,
};
ei_store.clear();
extract_button.set_sensitive(true);
archive_entrybox.set_text(archive_path);
for table in arch.tables.iter() {
for entry in table.entries.iter() {
let float_len = entry.file_size as f32;
let formatted_size = match binary_prefix(float_len) {
Standalone(bytes) => format!("{} B", bytes),
Prefixed(prefix, n) => format!("{:.2} {}B", n, prefix),
};
ei_store.insert_with_values(
None,
&[
Column::ID.into(),
Column::Type.into(),
Column::Size.into(),
Column::Offset.into(),
],
&[
&entry.file_id.to_string(),
&table.header.file_extension(),
&formatted_size,
&format!("{:#X}", entry.file_offset),
],
);
}
}
archive.replace(Some(arch));
});
}
fn enable_extract_button(
archive: Rc<Cell<Option<Archive>>>,
extract_button: Button,
entryinfo_tree: TreeView,
) {
extract_button.connect_clicked(move |_this| {
if let Some(dest_dir_path) = select_dir_dialog(
"Select a directory to extract to",
WindowType::Toplevel,
gtk::FileChooserAction::SelectFolder,
) {
let arch = match archive.take() {
Some(a) => a,
_ => return,
};
let sel = entryinfo_tree.get_selection();
let (mut sel_paths, model) = sel.get_selected_rows();
if sel_paths.len() == 0 {
sel.select_all();
let (s, _) = sel.get_selected_rows();
sel_paths = s;
sel.unselect_all();
}
for sel_path in sel_paths {
let iter = match model.get_iter(&sel_path) {
Some(i) => i,
_ => continue,
};
let val = model.get_value(&iter, 0);
let name = val.get::<String>().expect(&format!(
"Unable to convert gtk::Type::String {:?} to a Rust String",
val
));
for table in arch.tables.iter() {
let data = match table.find_file_contents(name.parse::<u32>().unwrap()) {
Some(d) => d,
_ => continue,
};
let mut output_filepath = dest_dir_path.clone();
output_filepath.push(name.replace("\\", "/"));
output_filepath.set_extension(table.header.file_extension());
let parent = output_filepath.parent().expect(&format!(
"Unable to determine parent path of {:?}",
&output_filepath
));
fs::create_dir_all(&parent)
.expect("Failed to create necessary parent directories");
let mut f = OpenOptions::new()
.create(true)
.read(true)
.write(true)
.truncate(true)
.open(&output_filepath)
.expect(&format!(
"Failed to open file {:?} for writing",
output_filepath
));
f.write(data).expect("Failed to write data");
}
}
archive.replace(Some(arch));
}
});
}
fn enable_sortable_cols(ei_store: &ListStore, entryinfo_tree: &TreeView) {
// Values in the table are strings. They should be converted back
// to their original type to make the sort function work properly
fn convert_name(s: String) -> u32 {
s.parse::<u32>().unwrap()
}
fn convert_type(s: String) -> String |
fn convert_size(s: String) -> u32 {
let v = s.split(' ').collect::<Vec<&str>>();
let exp = match v.get(1) {
Some(&"B") => 0,
Some(&"KiB") => 1,
Some(&"MiB") => 2,
Some(&"GiB") => 3,
_ => panic!("Unable to convert size: `{}`", s),
};
(1024u32.pow(exp) as f32 * v[0].parse::<f32>().unwrap()) as u32
}
fn convert_offset(s: String) -> u32 {
u32::from_str_radix(&s[2..], 16).unwrap()
}
add_sort_func!(entryinfo_tree, ei_store, convert_name, Column::ID);
add_sort_func!(entryinfo_tree, ei_store, convert_type, Column::Type);
add_sort_func!(entryinfo_tree, ei_store, convert_size, Column::Size);
add_sort_func!(entryinfo_tree, ei_store, convert_offset, Column::Offset);
}
fn main() {
gtk::init().unwrap();
let builder = Builder::new();
builder
.add_from_string(include_str!("../ui.glade"))
.unwrap();
let window: Window = builder.get_object("main_window").unwrap();
let archive_entrybox: EntryBox = builder.get_object("archive_file_entry").unwrap();
let archive_button: Button = builder.get_object("archive_file_button").unwrap();
let extract_button: Button = builder.get_object("extract_button").unwrap();
extract_button.set_sensitive(false);
let entryinfo_tree = | {
s
} | identifier_body |
block.rs | (&Data::from_bytes(test_bytes));
assert!(metrics::has_repeated_blocks(&output, block_size));
// Keep track of the suffix bytes that we have decrypted so far.
let mut suffix = Vec::new();
// Decrypt the suffix one byte at a time.
'outer: loop {
// Pad the known suffix with null bytes until it finishes one byte before a block boundary.
let num_bytes = block_size - 1 - (suffix.len() % block_size);
let padding = vec![0; num_bytes];
let mut padded_known = padding.clone();
padded_known.extend_from_slice(&suffix);
// Pass the padding into the box, and grab the encrypted block which corresponds to our
// input block whose last byte we are trying to determine.
let block_pos = padding.len() + suffix.len() + 1 - block_size;
let output = ecb_suffix_box.encrypt(&Data::from_bytes(padding));
if output.len() <= block_pos + block_size {
// We've retrieved the whole suffix, so break.
break;
}
let block = &output.bytes()[block_pos..block_pos + block_size];
// Compare the encrypted block against all the possible outputs that the block could
// encrypt to, depending on its final byte.
let partial_block = &padded_known[block_pos..];
for byte in 0..256 {
let mut test_block = partial_block.to_vec();
test_block.push(byte as u8);
let output = ecb_suffix_box.encrypt(&Data::from_bytes(test_block));
if &output.bytes()[..block_size] == block {
suffix.push(byte as u8);
continue 'outer;
}
}
}
Data::from_bytes(suffix)
}
/// Find the length of an unknown prefix which is appended to ECB-encrypted messages.
fn find_ecb_prefix_len(ecb_affixes_box: &EcbWithAffixes, block_size: usize) -> usize {
// Find the block in which the prefix ends, by finding the first block which is different upon
// inserting a null byte.
let empty = ecb_affixes_box.encrypt(&Data::new());
let noisy = ecb_affixes_box.encrypt(&Data::from_bytes(vec![0]));
let mut prefix_block = 0;
for (ix, (byte1, byte2)) in empty.bytes().iter().zip(noisy.bytes().iter()).enumerate() {
if byte1 != byte2 {
prefix_block = ix / block_size;
break;
}
}
// Now find the length of the prefix modulo the block size, by finding the smallest number of
// null bytes we need to provide as input in order to produce repeated blocks.
let mut prefix_len = block_size * prefix_block;
for ix in 0..block_size {
let repeats = Data::from_bytes(vec![0; 2 * block_size + ix]);
let output = ecb_affixes_box.encrypt(&repeats);
if output.bytes()[block_size * (prefix_block + 1)..block_size * (prefix_block + 2)] ==
output.bytes()[block_size * (prefix_block + 2)..block_size * (prefix_block + 3)] {
prefix_len += block_size - ix;
break;
}
}
prefix_len
}
/// Decrypt an unknown suffix encrypted under ECB mode, when a prefix is also added.
///
/// Given a black box which adds an unknown prefix and suffix to input data before encrypting under
/// ECB mode with the given block size, determine the suffix.
pub fn find_ecb_suffix_with_prefix(ecb_affixes_box: &EcbWithAffixes) -> Data {
// Determine the block size by repeatedly encrypting larger chunks of data until the output
// jumps in length.
let block_size;
let base_len = ecb_affixes_box.encrypt(&Data::new()).len();
let mut cnt = 1;
loop {
let bytes = vec![0; cnt];
let input = Data::from_bytes(bytes);
let new_len = ecb_affixes_box.encrypt(&input).len();
if new_len > base_len {
block_size = new_len - base_len;
break;
}
cnt += 1;
}
// Confirm that ECB is being used.
let test_bytes = vec![0; block_size * 10];
let output = ecb_affixes_box.encrypt(&Data::from_bytes(test_bytes));
assert!(metrics::has_repeated_blocks(&output, block_size));
// First, find the length of the prefix, which is currently unknown.
let prefix_len = find_ecb_prefix_len(ecb_affixes_box, block_size);
// Keep track of the suffix bytes that we have decrypted so far.
let mut suffix = Vec::new();
// Decrypt the suffix one byte at a time.
'outer: loop {
// Pad the known suffix with null bytes until it finishes one byte before a block boundary.
let num_bytes = 2 * block_size - 1 - ((prefix_len + suffix.len()) % block_size);
let padding = vec![0; num_bytes];
let mut padded_known = vec![0; prefix_len];
padded_known.extend_from_slice(&padding);
padded_known.extend_from_slice(&suffix);
// Pass the padding into the box, and grab the encrypted block which corresponds to our
// input block whose last byte we are trying to determine.
let block_pos = prefix_len + padding.len() + suffix.len() + 1 - block_size;
let output = ecb_affixes_box.encrypt(&Data::from_bytes(padding));
if output.len() <= block_pos + block_size {
// We've retrieved the whole suffix, so break.
break;
}
let block = &output.bytes()[block_pos..block_pos + block_size];
// Compare the encrypted block against all the possible outputs that the block could
// encrypt to, depending on its final byte.
let partial_block = &padded_known[block_pos..];
let extra_padding = block_size - (prefix_len % block_size);
let output_start = prefix_len + extra_padding;
for byte in 0..256 {
let mut test_block = vec![0; block_size - (prefix_len % block_size)];
test_block.extend_from_slice(partial_block);
test_block.push(byte as u8);
let output = ecb_affixes_box.encrypt(&Data::from_bytes(test_block));
if &output.bytes()[output_start..output_start + block_size] == block {
suffix.push(byte as u8);
continue 'outer;
}
}
}
Data::from_bytes(suffix)
}
/// Create a token which the `EcbUserProfile` decodes into a user profile with admin privileges.
///
/// Given - a black box which, given an email address, creates a user profile encoded in the form
/// `email=<user-email>&uid=10&role=user`, then encrypts that under ECB mode and provides the
/// output as a token to the user.
///
/// This utilises an ECB cut-and-paste attack to create an admin token.
pub fn craft_ecb_admin_token(ecb_profile_box: &EcbUserProfile) -> Data {
// Paste together non-admin tokens in order to create an admin token. This works by first
// asking for the following three tokens:
//
// 0123456789ABCDEF 0123456789ABCDEF 0123456789ABCDEF
// [email protected] --> email=email@foo. com&uid=10&role= user
// noone@fakeadmin --> email=noone@fake admin&uid=10&rol e=user
// [email protected] --> email=useless@ma deup.com&uid=10& role=user
//
// If we then take the first two blocks of the first token, the second block of the second
// token and the final block of the third token, and paste them together, we will end up with
// the following token:
//
// [email protected]&uid=10&role=admin&uid=10&rolrole=user
let token1 = ecb_profile_box.make_token("[email protected]");
let token2 = ecb_profile_box.make_token("noone@fakeadmin");
let token3 = ecb_profile_box.make_token("useless@madeup");
let mut new_token_bytes = Vec::with_capacity(4 * 16);
new_token_bytes.extend_from_slice(&token1.bytes()[..32]);
new_token_bytes.extend_from_slice(&token2.bytes()[16..32]);
new_token_bytes.extend_from_slice(&token3.bytes()[32..]);
Data::from_bytes(new_token_bytes)
}
/// Create a token which the `CbcCookie` decodes into a cookie with admin privileges.
///
/// Given - a black box which, given an arbitrary string, escapes the metacharacters ';' and '='
/// from the input, then produces a cookie in the form
/// `comment1=cooking%20MCs;userdata=<user-data>;comment2=%20like%20a%20pound%20of%20bacon` and
/// encrypts the result under CBC mode.
///
/// This utilises a CBC bitflipping attack to create an admin token.
pub fn | craft_cbc_admin_token | identifier_name |
|
block.rs | (&input);
metrics::has_repeated_blocks(&encrypted, block_size)
}
/// Decrypt an unknown suffix encrypted under ECB mode.
///
/// Given a black box which adds an unknown suffix to input data before encrypting under ECB mode
/// with the given block size, determine the suffix.
pub fn find_ecb_suffix(ecb_suffix_box: &EcbWithSuffix) -> Data {
// Determine the block size by repeatedly encrypting larger chunks of data until the output
// jumps in length.
let block_size;
let base_len = ecb_suffix_box.encrypt(&Data::new()).len();
let mut cnt = 1;
loop {
let bytes = vec![0; cnt];
let input = Data::from_bytes(bytes);
let new_len = ecb_suffix_box.encrypt(&input).len();
if new_len > base_len {
block_size = new_len - base_len;
break;
}
cnt += 1;
}
// Confirm that ECB is being used.
let test_bytes = vec![0; block_size * 10];
let output = ecb_suffix_box.encrypt(&Data::from_bytes(test_bytes));
assert!(metrics::has_repeated_blocks(&output, block_size));
// Keep track of the suffix bytes that we have decrypted so far.
let mut suffix = Vec::new();
// Decrypt the suffix one byte at a time.
'outer: loop {
// Pad the known suffix with null bytes until it finishes one byte before a block boundary.
let num_bytes = block_size - 1 - (suffix.len() % block_size);
let padding = vec![0; num_bytes];
let mut padded_known = padding.clone();
padded_known.extend_from_slice(&suffix);
// Pass the padding into the box, and grab the encrypted block which corresponds to our
// input block whose last byte we are trying to determine.
let block_pos = padding.len() + suffix.len() + 1 - block_size;
let output = ecb_suffix_box.encrypt(&Data::from_bytes(padding));
if output.len() <= block_pos + block_size {
// We've retrieved the whole suffix, so break.
break;
}
let block = &output.bytes()[block_pos..block_pos + block_size];
// Compare the encrypted block against all the possible outputs that the block could
// encrypt to, depending on its final byte.
let partial_block = &padded_known[block_pos..];
for byte in 0..256 {
let mut test_block = partial_block.to_vec();
test_block.push(byte as u8);
let output = ecb_suffix_box.encrypt(&Data::from_bytes(test_block));
if &output.bytes()[..block_size] == block {
suffix.push(byte as u8);
continue 'outer;
}
}
}
Data::from_bytes(suffix)
}
/// Find the length of an unknown prefix which is appended to ECB-encrypted messages.
fn find_ecb_prefix_len(ecb_affixes_box: &EcbWithAffixes, block_size: usize) -> usize {
// Find the block in which the prefix ends, by finding the first block which is different upon
// inserting a null byte.
let empty = ecb_affixes_box.encrypt(&Data::new());
let noisy = ecb_affixes_box.encrypt(&Data::from_bytes(vec![0]));
let mut prefix_block = 0;
for (ix, (byte1, byte2)) in empty.bytes().iter().zip(noisy.bytes().iter()).enumerate() {
if byte1 != byte2 {
prefix_block = ix / block_size;
break;
}
}
// Now find the length of the prefix modulo the block size, by finding the smallest number of
// null bytes we need to provide as input in order to produce repeated blocks.
let mut prefix_len = block_size * prefix_block;
for ix in 0..block_size {
let repeats = Data::from_bytes(vec![0; 2 * block_size + ix]);
let output = ecb_affixes_box.encrypt(&repeats);
if output.bytes()[block_size * (prefix_block + 1)..block_size * (prefix_block + 2)] ==
output.bytes()[block_size * (prefix_block + 2)..block_size * (prefix_block + 3)] {
prefix_len += block_size - ix;
break;
}
}
prefix_len
}
/// Decrypt an unknown suffix encrypted under ECB mode, when a prefix is also added.
///
/// Given a black box which adds an unknown prefix and suffix to input data before encrypting under
/// ECB mode with the given block size, determine the suffix.
pub fn find_ecb_suffix_with_prefix(ecb_affixes_box: &EcbWithAffixes) -> Data {
// Determine the block size by repeatedly encrypting larger chunks of data until the output
// jumps in length.
let block_size;
let base_len = ecb_affixes_box.encrypt(&Data::new()).len();
let mut cnt = 1;
loop {
let bytes = vec![0; cnt];
let input = Data::from_bytes(bytes);
let new_len = ecb_affixes_box.encrypt(&input).len();
if new_len > base_len {
block_size = new_len - base_len;
break;
}
cnt += 1;
}
// Confirm that ECB is being used.
let test_bytes = vec![0; block_size * 10];
let output = ecb_affixes_box.encrypt(&Data::from_bytes(test_bytes));
assert!(metrics::has_repeated_blocks(&output, block_size));
// First, find the length of the prefix, which is currently unknown.
let prefix_len = find_ecb_prefix_len(ecb_affixes_box, block_size);
// Keep track of the suffix bytes that we have decrypted so far.
let mut suffix = Vec::new();
// Decrypt the suffix one byte at a time.
'outer: loop {
// Pad the known suffix with null bytes until it finishes one byte before a block boundary.
let num_bytes = 2 * block_size - 1 - ((prefix_len + suffix.len()) % block_size);
let padding = vec![0; num_bytes];
let mut padded_known = vec![0; prefix_len];
padded_known.extend_from_slice(&padding);
padded_known.extend_from_slice(&suffix);
// Pass the padding into the box, and grab the encrypted block which corresponds to our
// input block whose last byte we are trying to determine.
let block_pos = prefix_len + padding.len() + suffix.len() + 1 - block_size;
let output = ecb_affixes_box.encrypt(&Data::from_bytes(padding));
if output.len() <= block_pos + block_size {
// We've retrieved the whole suffix, so break.
break;
}
let block = &output.bytes()[block_pos..block_pos + block_size];
// Compare the encrypted block against all the possible outputs that the block could
// encrypt to, depending on its final byte.
let partial_block = &padded_known[block_pos..];
let extra_padding = block_size - (prefix_len % block_size); | test_block.push(byte as u8);
let output = ecb_affixes_box.encrypt(&Data::from_bytes(test_block));
if &output.bytes()[output_start..output_start + block_size] == block {
suffix.push(byte as u8);
continue 'outer;
}
}
}
Data::from_bytes(suffix)
}
/// Create a token which the `EcbUserProfile` decodes into a user profile with admin privileges.
///
/// Given - a black box which, given an email address, creates a user profile encoded in the form
/// `email=<user-email>&uid=10&role=user`, then encrypts that under ECB mode and provides the
/// output as a token to the user.
///
/// This utilises an ECB cut-and-paste attack to create an admin token.
pub fn craft_ecb_admin_token(ecb_profile_box: &EcbUserProfile) -> Data {
// Paste together non-admin tokens in order to create an admin token. This works by first
// asking for the following three tokens:
//
// 0123456789ABCDEF 0123456789ABCDEF 0123456789ABCDEF
// [email protected] --> email=email@foo. com&uid=10&role= user
// noone@fakeadmin --> email=noone@fake admin&uid=10&rol e=user
// [email protected] --> email=useless@ma deup.com&uid=10& role=user
//
// If we then take the first two blocks of the first token, the second block of the second
// token and the final block of the third token, and paste them together, we will end up with
// the following token:
//
// [email protected]&uid=10&role=admin&uid=10&rolrole=user
let token1 = ecb_profile_box.make_token("[email protected]");
let token2 = ecb_profile_box.make_token("noone | let output_start = prefix_len + extra_padding;
for byte in 0..256 {
let mut test_block = vec![0; block_size - (prefix_len % block_size)];
test_block.extend_from_slice(partial_block); | random_line_split |
block.rs | input);
metrics::has_repeated_blocks(&encrypted, block_size)
}
/// Decrypt an unknown suffix encrypted under ECB mode.
///
/// Given a black box which adds an unknown suffix to input data before encrypting under ECB mode
/// with the given block size, determine the suffix.
pub fn find_ecb_suffix(ecb_suffix_box: &EcbWithSuffix) -> Data | let output = ecb_suffix_box.encrypt(&Data::from_bytes(test_bytes));
assert!(metrics::has_repeated_blocks(&output, block_size));
// Keep track of the suffix bytes that we have decrypted so far.
let mut suffix = Vec::new();
// Decrypt the suffix one byte at a time.
'outer: loop {
// Pad the known suffix with null bytes until it finishes one byte before a block boundary.
let num_bytes = block_size - 1 - (suffix.len() % block_size);
let padding = vec![0; num_bytes];
let mut padded_known = padding.clone();
padded_known.extend_from_slice(&suffix);
// Pass the padding into the box, and grab the encrypted block which corresponds to our
// input block whose last byte we are trying to determine.
let block_pos = padding.len() + suffix.len() + 1 - block_size;
let output = ecb_suffix_box.encrypt(&Data::from_bytes(padding));
if output.len() <= block_pos + block_size {
// We've retrieved the whole suffix, so break.
break;
}
let block = &output.bytes()[block_pos..block_pos + block_size];
// Compare the encrypted block against all the possible outputs that the block could
// encrypt to, depending on its final byte.
let partial_block = &padded_known[block_pos..];
for byte in 0..256 {
let mut test_block = partial_block.to_vec();
test_block.push(byte as u8);
let output = ecb_suffix_box.encrypt(&Data::from_bytes(test_block));
if &output.bytes()[..block_size] == block {
suffix.push(byte as u8);
continue 'outer;
}
}
}
Data::from_bytes(suffix)
}
/// Find the length of an unknown prefix which is appended to ECB-encrypted messages.
fn find_ecb_prefix_len(ecb_affixes_box: &EcbWithAffixes, block_size: usize) -> usize {
// Find the block in which the prefix ends, by finding the first block which is different upon
// inserting a null byte.
let empty = ecb_affixes_box.encrypt(&Data::new());
let noisy = ecb_affixes_box.encrypt(&Data::from_bytes(vec![0]));
let mut prefix_block = 0;
for (ix, (byte1, byte2)) in empty.bytes().iter().zip(noisy.bytes().iter()).enumerate() {
if byte1 != byte2 {
prefix_block = ix / block_size;
break;
}
}
// Now find the length of the prefix modulo the block size, by finding the smallest number of
// null bytes we need to provide as input in order to produce repeated blocks.
let mut prefix_len = block_size * prefix_block;
for ix in 0..block_size {
let repeats = Data::from_bytes(vec![0; 2 * block_size + ix]);
let output = ecb_affixes_box.encrypt(&repeats);
if output.bytes()[block_size * (prefix_block + 1)..block_size * (prefix_block + 2)] ==
output.bytes()[block_size * (prefix_block + 2)..block_size * (prefix_block + 3)] {
prefix_len += block_size - ix;
break;
}
}
prefix_len
}
/// Decrypt an unknown suffix encrypted under ECB mode, when a prefix is also added.
///
/// Given a black box which adds an unknown prefix and suffix to input data before encrypting under
/// ECB mode with the given block size, determine the suffix.
pub fn find_ecb_suffix_with_prefix(ecb_affixes_box: &EcbWithAffixes) -> Data {
// Determine the block size by repeatedly encrypting larger chunks of data until the output
// jumps in length.
let block_size;
let base_len = ecb_affixes_box.encrypt(&Data::new()).len();
let mut cnt = 1;
loop {
let bytes = vec![0; cnt];
let input = Data::from_bytes(bytes);
let new_len = ecb_affixes_box.encrypt(&input).len();
if new_len > base_len {
block_size = new_len - base_len;
break;
}
cnt += 1;
}
// Confirm that ECB is being used.
let test_bytes = vec![0; block_size * 10];
let output = ecb_affixes_box.encrypt(&Data::from_bytes(test_bytes));
assert!(metrics::has_repeated_blocks(&output, block_size));
// First, find the length of the prefix, which is currently unknown.
let prefix_len = find_ecb_prefix_len(ecb_affixes_box, block_size);
// Keep track of the suffix bytes that we have decrypted so far.
let mut suffix = Vec::new();
// Decrypt the suffix one byte at a time.
'outer: loop {
// Pad the known suffix with null bytes until it finishes one byte before a block boundary.
let num_bytes = 2 * block_size - 1 - ((prefix_len + suffix.len()) % block_size);
let padding = vec![0; num_bytes];
let mut padded_known = vec![0; prefix_len];
padded_known.extend_from_slice(&padding);
padded_known.extend_from_slice(&suffix);
// Pass the padding into the box, and grab the encrypted block which corresponds to our
// input block whose last byte we are trying to determine.
let block_pos = prefix_len + padding.len() + suffix.len() + 1 - block_size;
let output = ecb_affixes_box.encrypt(&Data::from_bytes(padding));
if output.len() <= block_pos + block_size {
// We've retrieved the whole suffix, so break.
break;
}
let block = &output.bytes()[block_pos..block_pos + block_size];
// Compare the encrypted block against all the possible outputs that the block could
// encrypt to, depending on its final byte.
let partial_block = &padded_known[block_pos..];
let extra_padding = block_size - (prefix_len % block_size);
let output_start = prefix_len + extra_padding;
for byte in 0..256 {
let mut test_block = vec![0; block_size - (prefix_len % block_size)];
test_block.extend_from_slice(partial_block);
test_block.push(byte as u8);
let output = ecb_affixes_box.encrypt(&Data::from_bytes(test_block));
if &output.bytes()[output_start..output_start + block_size] == block {
suffix.push(byte as u8);
continue 'outer;
}
}
}
Data::from_bytes(suffix)
}
/// Create a token which the `EcbUserProfile` decodes into a user profile with admin privileges.
///
/// Given - a black box which, given an email address, creates a user profile encoded in the form
/// `email=<user-email>&uid=10&role=user`, then encrypts that under ECB mode and provides the
/// output as a token to the user.
///
/// This utilises an ECB cut-and-paste attack to create an admin token.
pub fn craft_ecb_admin_token(ecb_profile_box: &EcbUserProfile) -> Data {
// Paste together non-admin tokens in order to create an admin token. This works by first
// asking for the following three tokens:
//
// 0123456789ABCDEF 0123456789ABCDEF 0123456789ABCDEF
// [email protected] --> email=email@foo. com&uid=10&role= user
// noone@fakeadmin --> email=noone@fake admin&uid=10&rol e=user
// [email protected] --> email=useless@ma deup.com&uid=10& role=user
//
// If we then take the first two blocks of the first token, the second block of the second
// token and the final block of the third token, and paste them together, we will end up with
// the following token:
//
// [email protected]&uid=10&role=admin&uid=10&rolrole=user
let token1 = ecb_profile_box.make_token("[email protected]");
let token2 = ecb_profile_box.make_token("noone | {
// Determine the block size by repeatedly encrypting larger chunks of data until the output
// jumps in length.
let block_size;
let base_len = ecb_suffix_box.encrypt(&Data::new()).len();
let mut cnt = 1;
loop {
let bytes = vec![0; cnt];
let input = Data::from_bytes(bytes);
let new_len = ecb_suffix_box.encrypt(&input).len();
if new_len > base_len {
block_size = new_len - base_len;
break;
}
cnt += 1;
}
// Confirm that ECB is being used.
let test_bytes = vec![0; block_size * 10]; | identifier_body |
block.rs | input);
metrics::has_repeated_blocks(&encrypted, block_size)
}
/// Decrypt an unknown suffix encrypted under ECB mode.
///
/// Given a black box which adds an unknown suffix to input data before encrypting under ECB mode
/// with the given block size, determine the suffix.
pub fn find_ecb_suffix(ecb_suffix_box: &EcbWithSuffix) -> Data {
// Determine the block size by repeatedly encrypting larger chunks of data until the output
// jumps in length.
let block_size;
let base_len = ecb_suffix_box.encrypt(&Data::new()).len();
let mut cnt = 1;
loop {
let bytes = vec![0; cnt];
let input = Data::from_bytes(bytes);
let new_len = ecb_suffix_box.encrypt(&input).len();
if new_len > base_len {
block_size = new_len - base_len;
break;
}
cnt += 1;
}
// Confirm that ECB is being used.
let test_bytes = vec![0; block_size * 10];
let output = ecb_suffix_box.encrypt(&Data::from_bytes(test_bytes));
assert!(metrics::has_repeated_blocks(&output, block_size));
// Keep track of the suffix bytes that we have decrypted so far.
let mut suffix = Vec::new();
// Decrypt the suffix one byte at a time.
'outer: loop {
// Pad the known suffix with null bytes until it finishes one byte before a block boundary.
let num_bytes = block_size - 1 - (suffix.len() % block_size);
let padding = vec![0; num_bytes];
let mut padded_known = padding.clone();
padded_known.extend_from_slice(&suffix);
// Pass the padding into the box, and grab the encrypted block which corresponds to our
// input block whose last byte we are trying to determine.
let block_pos = padding.len() + suffix.len() + 1 - block_size;
let output = ecb_suffix_box.encrypt(&Data::from_bytes(padding));
if output.len() <= block_pos + block_size {
// We've retrieved the whole suffix, so break.
break;
}
let block = &output.bytes()[block_pos..block_pos + block_size];
// Compare the encrypted block against all the possible outputs that the block could
// encrypt to, depending on its final byte.
let partial_block = &padded_known[block_pos..];
for byte in 0..256 {
let mut test_block = partial_block.to_vec();
test_block.push(byte as u8);
let output = ecb_suffix_box.encrypt(&Data::from_bytes(test_block));
if &output.bytes()[..block_size] == block |
}
}
Data::from_bytes(suffix)
}
/// Find the length of an unknown prefix which is appended to ECB-encrypted messages.
fn find_ecb_prefix_len(ecb_affixes_box: &EcbWithAffixes, block_size: usize) -> usize {
// Find the block in which the prefix ends, by finding the first block which is different upon
// inserting a null byte.
let empty = ecb_affixes_box.encrypt(&Data::new());
let noisy = ecb_affixes_box.encrypt(&Data::from_bytes(vec![0]));
let mut prefix_block = 0;
for (ix, (byte1, byte2)) in empty.bytes().iter().zip(noisy.bytes().iter()).enumerate() {
if byte1 != byte2 {
prefix_block = ix / block_size;
break;
}
}
// Now find the length of the prefix modulo the block size, by finding the smallest number of
// null bytes we need to provide as input in order to produce repeated blocks.
let mut prefix_len = block_size * prefix_block;
for ix in 0..block_size {
let repeats = Data::from_bytes(vec![0; 2 * block_size + ix]);
let output = ecb_affixes_box.encrypt(&repeats);
if output.bytes()[block_size * (prefix_block + 1)..block_size * (prefix_block + 2)] ==
output.bytes()[block_size * (prefix_block + 2)..block_size * (prefix_block + 3)] {
prefix_len += block_size - ix;
break;
}
}
prefix_len
}
/// Decrypt an unknown suffix encrypted under ECB mode, when a prefix is also added.
///
/// Given a black box which adds an unknown prefix and suffix to input data before encrypting under
/// ECB mode with the given block size, determine the suffix.
pub fn find_ecb_suffix_with_prefix(ecb_affixes_box: &EcbWithAffixes) -> Data {
// Determine the block size by repeatedly encrypting larger chunks of data until the output
// jumps in length.
let block_size;
let base_len = ecb_affixes_box.encrypt(&Data::new()).len();
let mut cnt = 1;
loop {
let bytes = vec![0; cnt];
let input = Data::from_bytes(bytes);
let new_len = ecb_affixes_box.encrypt(&input).len();
if new_len > base_len {
block_size = new_len - base_len;
break;
}
cnt += 1;
}
// Confirm that ECB is being used.
let test_bytes = vec![0; block_size * 10];
let output = ecb_affixes_box.encrypt(&Data::from_bytes(test_bytes));
assert!(metrics::has_repeated_blocks(&output, block_size));
// First, find the length of the prefix, which is currently unknown.
let prefix_len = find_ecb_prefix_len(ecb_affixes_box, block_size);
// Keep track of the suffix bytes that we have decrypted so far.
let mut suffix = Vec::new();
// Decrypt the suffix one byte at a time.
'outer: loop {
// Pad the known suffix with null bytes until it finishes one byte before a block boundary.
let num_bytes = 2 * block_size - 1 - ((prefix_len + suffix.len()) % block_size);
let padding = vec![0; num_bytes];
let mut padded_known = vec![0; prefix_len];
padded_known.extend_from_slice(&padding);
padded_known.extend_from_slice(&suffix);
// Pass the padding into the box, and grab the encrypted block which corresponds to our
// input block whose last byte we are trying to determine.
let block_pos = prefix_len + padding.len() + suffix.len() + 1 - block_size;
let output = ecb_affixes_box.encrypt(&Data::from_bytes(padding));
if output.len() <= block_pos + block_size {
// We've retrieved the whole suffix, so break.
break;
}
let block = &output.bytes()[block_pos..block_pos + block_size];
// Compare the encrypted block against all the possible outputs that the block could
// encrypt to, depending on its final byte.
let partial_block = &padded_known[block_pos..];
let extra_padding = block_size - (prefix_len % block_size);
let output_start = prefix_len + extra_padding;
for byte in 0..256 {
let mut test_block = vec![0; block_size - (prefix_len % block_size)];
test_block.extend_from_slice(partial_block);
test_block.push(byte as u8);
let output = ecb_affixes_box.encrypt(&Data::from_bytes(test_block));
if &output.bytes()[output_start..output_start + block_size] == block {
suffix.push(byte as u8);
continue 'outer;
}
}
}
Data::from_bytes(suffix)
}
/// Create a token which the `EcbUserProfile` decodes into a user profile with admin privileges.
///
/// Given - a black box which, given an email address, creates a user profile encoded in the form
/// `email=<user-email>&uid=10&role=user`, then encrypts that under ECB mode and provides the
/// output as a token to the user.
///
/// This utilises an ECB cut-and-paste attack to create an admin token.
pub fn craft_ecb_admin_token(ecb_profile_box: &EcbUserProfile) -> Data {
// Paste together non-admin tokens in order to create an admin token. This works by first
// asking for the following three tokens:
//
// 0123456789ABCDEF 0123456789ABCDEF 0123456789ABCDEF
// [email protected] --> email=email@foo. com&uid=10&role= user
// noone@fakeadmin --> email=noone@fake admin&uid=10&rol e=user
// [email protected] --> email=useless@ma deup.com&uid=10& role=user
//
// If we then take the first two blocks of the first token, the second block of the second
// token and the final block of the third token, and paste them together, we will end up with
// the following token:
//
// [email protected]&uid=10&role=admin&uid=10&rolrole=user
let token1 = ecb_profile_box.make_token("[email protected]");
let token2 = ecb_profile_box.make_token("no | {
suffix.push(byte as u8);
continue 'outer;
} | conditional_block |
emoji-picker-qt.py | font.setPointSize(emojiFontSize)
# quits without a lag
def | ():
mainWindow.hide()
quit()
# gets mouse position from Xlib
def mousePosition():
pointerData = display.Display().screen().root.query_pointer()._data
return pointerData["root_x"], pointerData["root_y"]
# copies and pastes selected emoji
def execute_emoji(char):
add_char_to_history(char)
global willExitOnItsOwn
willExitOnItsOwn = True
mainWindow.hide()
QApplication.clipboard().setText(char)
pyautogui.hotkey("ctrl","v")
QtTest.QTest.qWait(250)
quit()
# fills grid with given char list and takes care of layout and counting
def fill_grid_with_char_list(charList):
# for wraparound
global emojiToShowCount
global fullRowsCount
global lastRowEmojiCount
emojiToShowCount = min(len(charList),(emojiGridColumnCount*emojiGridRowCount))
fullRowsCount = emojiToShowCount//emojiGridColumnCount
lastRowEmojiCount = emojiToShowCount%emojiGridColumnCount
global foundAnyEmoji
if emojiToShowCount>0:
foundAnyEmoji = True
layoutStack.setCurrentIndex(0)
else:
foundAnyEmoji = False
layoutStack.setCurrentIndex(1)
# clear grid
global emojiGridLayout
for i in reversed(range(emojiGridLayout.count())):
emojiGridLayout.itemAt(i).widget().setParent(None)
# fill with new chars
rowIdx = 0
colIdx = 0
for emoji in charList:
if rowIdx>emojiGridRowCount-1:
break;
label = QClickableLabel(emoji)
label.clicked.connect(execute_emoji)
label.setFont(font)
label.setAlignment(Qt.AlignCenter)
label.setMinimumHeight(49)
emojiGridLayout.addWidget(label,rowIdx,colIdx)
emojiGridLayout.setAlignment(label,Qt.AlignTop)
if colIdx < emojiGridColumnCount-1:
colIdx+=1
else:
colIdx=0
rowIdx+=1
emojiGridLayout.setContentsMargins(0,0,0,0)
emojiGridLayout.setHorizontalSpacing(0)
emojiGridLayout.setVerticalSpacing(0)
if emojiToShowCount>0:
highlight_emoji([0,0])
# searches for emoji, and passes them to fill_grid_with_char_list
def execute_search(text):
selectedEmoji = (0,0)
if not text or text.isspace():
fill_grid_with_history()
return
foundEmoji = edp.find_by_name(text)
charList = [emoji.char for emoji in foundEmoji]
fill_grid_with_char_list(charList)
# handles what to do after hovering over a given label
def emoji_hovered(hoveredLabel):
parentGrid = hoveredLabel.parentWidget().layout()
hoveredIndex = parentGrid.indexOf(hoveredLabel)
hoveredRow, hoveredColumn, _, _ = parentGrid.getItemPosition(hoveredIndex)
highlight_emoji([hoveredRow,hoveredColumn])
# selects, sets style and handles wraparound
def highlight_emoji(newPosition):
global selectedEmojiPosition
# grid is filled to a full rectangle (last row fills the window horizontally)
if lastRowEmojiCount==0:
if newPosition[0]<0:
newPosition[0]=fullRowsCount-1
elif newPosition[1]<0:
newPosition[1]=emojiGridColumnCount-1
elif newPosition[0]>fullRowsCount-1:
newPosition[0]=0
elif newPosition[1]>emojiGridColumnCount-1:
newPosition[1]=0
# last row is not full
else:
#horizontal wraparound through RIGHT edge for full rows
if (newPosition[0]<fullRowsCount) and (newPosition[1]>emojiGridColumnCount-1):
newPosition[1]=0
#horizontal wraparound through LEFT edge for full rows
elif (newPosition[0]<fullRowsCount) and (newPosition[1]<0):
newPosition[1]=emojiGridColumnCount-1
#horizontal wraparound through right edge for NON FULL rows
elif (newPosition[0]==fullRowsCount) and (newPosition[1]>lastRowEmojiCount-1) and ((selectedEmojiPosition[0]-newPosition[0])==0):
newPosition[1]=0
#horizontal wraparound through LEFT edge for NON FULL rows
elif (newPosition[0]>=fullRowsCount) and (newPosition[1]<0):
newPosition[1]=lastRowEmojiCount-1
#vertical wraparound through BOTTOM edge for full cols
elif (newPosition[0]>fullRowsCount) and (newPosition[1]<lastRowEmojiCount):
newPosition[0]=0
#vertical wraparound through TOP edge for full cols
elif (newPosition[0]<0) and (newPosition[1]<lastRowEmojiCount):
newPosition[0]=fullRowsCount
#vertical wraparound through BOTTOM edge for NON FULL cols
elif (newPosition[0]>fullRowsCount-1) and (newPosition[1]>lastRowEmojiCount-1):
newPosition[0]=0
#vertical wraparound through TOP edge for NON FULL cols
elif (newPosition[0]<0) and (newPosition[1]>lastRowEmojiCount-1):
newPosition[0]=fullRowsCount-1
oldPosition = selectedEmojiPosition
selectedEmojiPosition = newPosition
widgetToDeselect = emojiGridLayout.itemAtPosition(oldPosition[0],oldPosition[1])
if widgetToDeselect:
widgetToDeselect = widgetToDeselect.widget()
widgetToDeselect.setStyleSheet("")
global selectedEmojiChar
widgetToSelect = emojiGridLayout.itemAtPosition(selectedEmojiPosition[0],selectedEmojiPosition[1])
if widgetToSelect:
widgetToSelect = widgetToSelect.widget()
selectedEmojiChar = widgetToSelect.text()
widgetToSelect.setStyleSheet("QLabel{background-color: palette(highlight);}")
# handles direction where to move emoji selection
def move_selection(direction):
if direction=="right":
highlight_emoji([sum(x) for x in zip(selectedEmojiPosition, [0,1])])
elif direction=="left":
highlight_emoji([sum(x) for x in zip(selectedEmojiPosition, [0,-1])])
elif direction=="up":
highlight_emoji([sum(x) for x in zip(selectedEmojiPosition, [-1,0])])
elif direction=="down":
highlight_emoji([sum(x) for x in zip(selectedEmojiPosition, [1,0])])
# handles Esc
def on_key(key):
# test for a specific key
if key == Qt.Key_Escape:
quitNicely()
# adds given emoji to history and saves it to config file
def add_char_to_history(char):
global settingsFile
global historyList
if not historyList:
historyList = [char]
else:
if char in historyList:
historyList.remove(char)
tempList = [char]
tempList.extend(historyList)
historyList = tempList[:(emojiGridColumnCount*emojiGridRowCount)]
settingsFile.setValue('history/history',historyList)
# wrapper around filling the grid
def fill_grid_with_history():
fill_grid_with_char_list(historyList)
# main app window class with inits
class EmojiPickerWindow(QWidget):
def __init__(self):
super().__init__()
# focus handling
self.installEventFilter(self)
self.title = 'Emoji picker \(^o^)/'
self.width = 281
self.height = 251
# start with text box centered at mouse pointer position
self.left, self.top = mousePosition()
self.left -= self.width//2
self.top += (24-self.height)
self.initSettings()
self.initUI()
def initUI(self):
# topmost window layout
layout = QVBoxLayout()
global layoutStack
layoutStack = QStackedLayout()
layoutStackWidget = QWidget()
layoutStackWidget.setLayout(layoutStack)
# scroll area setup shenanigans
scrollArea = QScrollArea()
gridWidget = QWidget()
global emojiGridLayout
emojiGridLayout = QGridLayout(gridWidget)
emojiGridLayout.setAlignment(Qt.AlignTop | Qt.AlignLeft)
# stretch grid to widget
for col in range(emojiGridColumnCount):
emojiGridLayout.setColumnStretch(col,1)
for row in range(emojiGridRowCount):
emojiGridLayout.setRowStretch(row,1)
scrollArea.setWidget(gridWidget)
scrollArea.setWidgetResizable(True)
layoutStack.addWidget(scrollArea)
# info to show when no emoji has been found
noEmojiFoundLabel = QLabel("No emoji found 🙁")
noEmojiFoundLabel.setAlignment(Qt.AlignCenter | Qt.AlignHCenter | Qt.AlignVCenter)
layoutStack.addWidget(noEmojiFoundLabel)
layout.addWidget(layoutStackWidget)
# fill with a placeholder for now (smiling or smile)
# execute_search('smil')
fill_grid_with_history()
# bottom text entry
lineEdit = QLineEditWithArrows()
lineEdit.textChanged.connect(execute_search)
layout.addWidget(lineEdit)
# align it to the bottom, so that it won't stay centered vertically
layout.setAlignment(lineEdit, Qt.AlignBottom)
self.setLayout(layout)
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, | quitNicely | identifier_name |
emoji-picker-qt.py | font.setPointSize(emojiFontSize)
# quits without a lag
def quitNicely():
mainWindow.hide()
quit()
# gets mouse position from Xlib
def mousePosition():
pointerData = display.Display().screen().root.query_pointer()._data
return pointerData["root_x"], pointerData["root_y"]
# copies and pastes selected emoji
def execute_emoji(char):
add_char_to_history(char)
global willExitOnItsOwn
willExitOnItsOwn = True
mainWindow.hide()
QApplication.clipboard().setText(char)
pyautogui.hotkey("ctrl","v")
QtTest.QTest.qWait(250)
quit()
# fills grid with given char list and takes care of layout and counting
def fill_grid_with_char_list(charList):
# for wraparound
global emojiToShowCount
global fullRowsCount
global lastRowEmojiCount
emojiToShowCount = min(len(charList),(emojiGridColumnCount*emojiGridRowCount))
fullRowsCount = emojiToShowCount//emojiGridColumnCount
lastRowEmojiCount = emojiToShowCount%emojiGridColumnCount
global foundAnyEmoji
if emojiToShowCount>0:
foundAnyEmoji = True
layoutStack.setCurrentIndex(0)
else:
|
# clear grid
global emojiGridLayout
for i in reversed(range(emojiGridLayout.count())):
emojiGridLayout.itemAt(i).widget().setParent(None)
# fill with new chars
rowIdx = 0
colIdx = 0
for emoji in charList:
if rowIdx>emojiGridRowCount-1:
break;
label = QClickableLabel(emoji)
label.clicked.connect(execute_emoji)
label.setFont(font)
label.setAlignment(Qt.AlignCenter)
label.setMinimumHeight(49)
emojiGridLayout.addWidget(label,rowIdx,colIdx)
emojiGridLayout.setAlignment(label,Qt.AlignTop)
if colIdx < emojiGridColumnCount-1:
colIdx+=1
else:
colIdx=0
rowIdx+=1
emojiGridLayout.setContentsMargins(0,0,0,0)
emojiGridLayout.setHorizontalSpacing(0)
emojiGridLayout.setVerticalSpacing(0)
if emojiToShowCount>0:
highlight_emoji([0,0])
# searches for emoji, and passes them to fill_grid_with_char_list
def execute_search(text):
selectedEmoji = (0,0)
if not text or text.isspace():
fill_grid_with_history()
return
foundEmoji = edp.find_by_name(text)
charList = [emoji.char for emoji in foundEmoji]
fill_grid_with_char_list(charList)
# handles what to do after hovering over a given label
def emoji_hovered(hoveredLabel):
parentGrid = hoveredLabel.parentWidget().layout()
hoveredIndex = parentGrid.indexOf(hoveredLabel)
hoveredRow, hoveredColumn, _, _ = parentGrid.getItemPosition(hoveredIndex)
highlight_emoji([hoveredRow,hoveredColumn])
# selects, sets style and handles wraparound
def highlight_emoji(newPosition):
global selectedEmojiPosition
# grid is filled to a full rectangle (last row fills the window horizontally)
if lastRowEmojiCount==0:
if newPosition[0]<0:
newPosition[0]=fullRowsCount-1
elif newPosition[1]<0:
newPosition[1]=emojiGridColumnCount-1
elif newPosition[0]>fullRowsCount-1:
newPosition[0]=0
elif newPosition[1]>emojiGridColumnCount-1:
newPosition[1]=0
# last row is not full
else:
#horizontal wraparound through RIGHT edge for full rows
if (newPosition[0]<fullRowsCount) and (newPosition[1]>emojiGridColumnCount-1):
newPosition[1]=0
#horizontal wraparound through LEFT edge for full rows
elif (newPosition[0]<fullRowsCount) and (newPosition[1]<0):
newPosition[1]=emojiGridColumnCount-1
#horizontal wraparound through right edge for NON FULL rows
elif (newPosition[0]==fullRowsCount) and (newPosition[1]>lastRowEmojiCount-1) and ((selectedEmojiPosition[0]-newPosition[0])==0):
newPosition[1]=0
#horizontal wraparound through LEFT edge for NON FULL rows
elif (newPosition[0]>=fullRowsCount) and (newPosition[1]<0):
newPosition[1]=lastRowEmojiCount-1
#vertical wraparound through BOTTOM edge for full cols
elif (newPosition[0]>fullRowsCount) and (newPosition[1]<lastRowEmojiCount):
newPosition[0]=0
#vertical wraparound through TOP edge for full cols
elif (newPosition[0]<0) and (newPosition[1]<lastRowEmojiCount):
newPosition[0]=fullRowsCount
#vertical wraparound through BOTTOM edge for NON FULL cols
elif (newPosition[0]>fullRowsCount-1) and (newPosition[1]>lastRowEmojiCount-1):
newPosition[0]=0
#vertical wraparound through TOP edge for NON FULL cols
elif (newPosition[0]<0) and (newPosition[1]>lastRowEmojiCount-1):
newPosition[0]=fullRowsCount-1
oldPosition = selectedEmojiPosition
selectedEmojiPosition = newPosition
widgetToDeselect = emojiGridLayout.itemAtPosition(oldPosition[0],oldPosition[1])
if widgetToDeselect:
widgetToDeselect = widgetToDeselect.widget()
widgetToDeselect.setStyleSheet("")
global selectedEmojiChar
widgetToSelect = emojiGridLayout.itemAtPosition(selectedEmojiPosition[0],selectedEmojiPosition[1])
if widgetToSelect:
widgetToSelect = widgetToSelect.widget()
selectedEmojiChar = widgetToSelect.text()
widgetToSelect.setStyleSheet("QLabel{background-color: palette(highlight);}")
# handles direction where to move emoji selection
def move_selection(direction):
if direction=="right":
highlight_emoji([sum(x) for x in zip(selectedEmojiPosition, [0,1])])
elif direction=="left":
highlight_emoji([sum(x) for x in zip(selectedEmojiPosition, [0,-1])])
elif direction=="up":
highlight_emoji([sum(x) for x in zip(selectedEmojiPosition, [-1,0])])
elif direction=="down":
highlight_emoji([sum(x) for x in zip(selectedEmojiPosition, [1,0])])
# handles Esc
def on_key(key):
# test for a specific key
if key == Qt.Key_Escape:
quitNicely()
# adds given emoji to history and saves it to config file
def add_char_to_history(char):
global settingsFile
global historyList
if not historyList:
historyList = [char]
else:
if char in historyList:
historyList.remove(char)
tempList = [char]
tempList.extend(historyList)
historyList = tempList[:(emojiGridColumnCount*emojiGridRowCount)]
settingsFile.setValue('history/history',historyList)
# wrapper around filling the grid
def fill_grid_with_history():
fill_grid_with_char_list(historyList)
# main app window class with inits
class EmojiPickerWindow(QWidget):
def __init__(self):
super().__init__()
# focus handling
self.installEventFilter(self)
self.title = 'Emoji picker \(^o^)/'
self.width = 281
self.height = 251
# start with text box centered at mouse pointer position
self.left, self.top = mousePosition()
self.left -= self.width//2
self.top += (24-self.height)
self.initSettings()
self.initUI()
def initUI(self):
# topmost window layout
layout = QVBoxLayout()
global layoutStack
layoutStack = QStackedLayout()
layoutStackWidget = QWidget()
layoutStackWidget.setLayout(layoutStack)
# scroll area setup shenanigans
scrollArea = QScrollArea()
gridWidget = QWidget()
global emojiGridLayout
emojiGridLayout = QGridLayout(gridWidget)
emojiGridLayout.setAlignment(Qt.AlignTop | Qt.AlignLeft)
# stretch grid to widget
for col in range(emojiGridColumnCount):
emojiGridLayout.setColumnStretch(col,1)
for row in range(emojiGridRowCount):
emojiGridLayout.setRowStretch(row,1)
scrollArea.setWidget(gridWidget)
scrollArea.setWidgetResizable(True)
layoutStack.addWidget(scrollArea)
# info to show when no emoji has been found
noEmojiFoundLabel = QLabel("No emoji found 🙁")
noEmojiFoundLabel.setAlignment(Qt.AlignCenter | Qt.AlignHCenter | Qt.AlignVCenter)
layoutStack.addWidget(noEmojiFoundLabel)
layout.addWidget(layoutStackWidget)
# fill with a placeholder for now (smiling or smile)
# execute_search('smil')
fill_grid_with_history()
# bottom text entry
lineEdit = QLineEditWithArrows()
lineEdit.textChanged.connect(execute_search)
layout.addWidget(lineEdit)
# align it to the bottom, so that it won't stay centered vertically
layout.setAlignment(lineEdit, Qt.AlignBottom)
self.setLayout(layout)
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, | foundAnyEmoji = False
layoutStack.setCurrentIndex(1) | conditional_block |
emoji-picker-qt.py | font.setPointSize(emojiFontSize)
# quits without a lag
def quitNicely():
mainWindow.hide()
quit()
# gets mouse position from Xlib
def mousePosition():
pointerData = display.Display().screen().root.query_pointer()._data
return pointerData["root_x"], pointerData["root_y"]
# copies and pastes selected emoji
def execute_emoji(char):
add_char_to_history(char)
global willExitOnItsOwn
willExitOnItsOwn = True
mainWindow.hide()
QApplication.clipboard().setText(char)
pyautogui.hotkey("ctrl","v")
QtTest.QTest.qWait(250)
quit()
# fills grid with given char list and takes care of layout and counting
def fill_grid_with_char_list(charList):
# for wraparound
global emojiToShowCount
global fullRowsCount
global lastRowEmojiCount
emojiToShowCount = min(len(charList),(emojiGridColumnCount*emojiGridRowCount))
fullRowsCount = emojiToShowCount//emojiGridColumnCount
lastRowEmojiCount = emojiToShowCount%emojiGridColumnCount
global foundAnyEmoji
if emojiToShowCount>0:
foundAnyEmoji = True
layoutStack.setCurrentIndex(0)
else:
foundAnyEmoji = False
layoutStack.setCurrentIndex(1)
# clear grid
global emojiGridLayout
for i in reversed(range(emojiGridLayout.count())):
emojiGridLayout.itemAt(i).widget().setParent(None)
# fill with new chars
rowIdx = 0
colIdx = 0
for emoji in charList:
if rowIdx>emojiGridRowCount-1:
break;
label = QClickableLabel(emoji)
label.clicked.connect(execute_emoji)
label.setFont(font)
label.setAlignment(Qt.AlignCenter)
label.setMinimumHeight(49)
emojiGridLayout.addWidget(label,rowIdx,colIdx)
emojiGridLayout.setAlignment(label,Qt.AlignTop)
if colIdx < emojiGridColumnCount-1:
colIdx+=1
else:
colIdx=0
rowIdx+=1
emojiGridLayout.setContentsMargins(0,0,0,0)
emojiGridLayout.setHorizontalSpacing(0)
emojiGridLayout.setVerticalSpacing(0)
if emojiToShowCount>0:
highlight_emoji([0,0])
# searches for emoji, and passes them to fill_grid_with_char_list
def execute_search(text):
|
# handles what to do after hovering over a given label
def emoji_hovered(hoveredLabel):
parentGrid = hoveredLabel.parentWidget().layout()
hoveredIndex = parentGrid.indexOf(hoveredLabel)
hoveredRow, hoveredColumn, _, _ = parentGrid.getItemPosition(hoveredIndex)
highlight_emoji([hoveredRow,hoveredColumn])
# selects, sets style and handles wraparound
def highlight_emoji(newPosition):
global selectedEmojiPosition
# grid is filled to a full rectangle (last row fills the window horizontally)
if lastRowEmojiCount==0:
if newPosition[0]<0:
newPosition[0]=fullRowsCount-1
elif newPosition[1]<0:
newPosition[1]=emojiGridColumnCount-1
elif newPosition[0]>fullRowsCount-1:
newPosition[0]=0
elif newPosition[1]>emojiGridColumnCount-1:
newPosition[1]=0
# last row is not full
else:
#horizontal wraparound through RIGHT edge for full rows
if (newPosition[0]<fullRowsCount) and (newPosition[1]>emojiGridColumnCount-1):
newPosition[1]=0
#horizontal wraparound through LEFT edge for full rows
elif (newPosition[0]<fullRowsCount) and (newPosition[1]<0):
newPosition[1]=emojiGridColumnCount-1
#horizontal wraparound through right edge for NON FULL rows
elif (newPosition[0]==fullRowsCount) and (newPosition[1]>lastRowEmojiCount-1) and ((selectedEmojiPosition[0]-newPosition[0])==0):
newPosition[1]=0
#horizontal wraparound through LEFT edge for NON FULL rows
elif (newPosition[0]>=fullRowsCount) and (newPosition[1]<0):
newPosition[1]=lastRowEmojiCount-1
#vertical wraparound through BOTTOM edge for full cols
elif (newPosition[0]>fullRowsCount) and (newPosition[1]<lastRowEmojiCount):
newPosition[0]=0
#vertical wraparound through TOP edge for full cols
elif (newPosition[0]<0) and (newPosition[1]<lastRowEmojiCount):
newPosition[0]=fullRowsCount
#vertical wraparound through BOTTOM edge for NON FULL cols
elif (newPosition[0]>fullRowsCount-1) and (newPosition[1]>lastRowEmojiCount-1):
newPosition[0]=0
#vertical wraparound through TOP edge for NON FULL cols
elif (newPosition[0]<0) and (newPosition[1]>lastRowEmojiCount-1):
newPosition[0]=fullRowsCount-1
oldPosition = selectedEmojiPosition
selectedEmojiPosition = newPosition
widgetToDeselect = emojiGridLayout.itemAtPosition(oldPosition[0],oldPosition[1])
if widgetToDeselect:
widgetToDeselect = widgetToDeselect.widget()
widgetToDeselect.setStyleSheet("")
global selectedEmojiChar
widgetToSelect = emojiGridLayout.itemAtPosition(selectedEmojiPosition[0],selectedEmojiPosition[1])
if widgetToSelect:
widgetToSelect = widgetToSelect.widget()
selectedEmojiChar = widgetToSelect.text()
widgetToSelect.setStyleSheet("QLabel{background-color: palette(highlight);}")
# handles direction where to move emoji selection
def move_selection(direction):
if direction=="right":
highlight_emoji([sum(x) for x in zip(selectedEmojiPosition, [0,1])])
elif direction=="left":
highlight_emoji([sum(x) for x in zip(selectedEmojiPosition, [0,-1])])
elif direction=="up":
highlight_emoji([sum(x) for x in zip(selectedEmojiPosition, [-1,0])])
elif direction=="down":
highlight_emoji([sum(x) for x in zip(selectedEmojiPosition, [1,0])])
# handles Esc
def on_key(key):
# test for a specific key
if key == Qt.Key_Escape:
quitNicely()
# adds given emoji to history and saves it to config file
def add_char_to_history(char):
global settingsFile
global historyList
if not historyList:
historyList = [char]
else:
if char in historyList:
historyList.remove(char)
tempList = [char]
tempList.extend(historyList)
historyList = tempList[:(emojiGridColumnCount*emojiGridRowCount)]
settingsFile.setValue('history/history',historyList)
# wrapper around filling the grid
def fill_grid_with_history():
fill_grid_with_char_list(historyList)
# main app window class with inits
class EmojiPickerWindow(QWidget):
def __init__(self):
super().__init__()
# focus handling
self.installEventFilter(self)
self.title = 'Emoji picker \(^o^)/'
self.width = 281
self.height = 251
# start with text box centered at mouse pointer position
self.left, self.top = mousePosition()
self.left -= self.width//2
self.top += (24-self.height)
self.initSettings()
self.initUI()
def initUI(self):
# topmost window layout
layout = QVBoxLayout()
global layoutStack
layoutStack = QStackedLayout()
layoutStackWidget = QWidget()
layoutStackWidget.setLayout(layoutStack)
# scroll area setup shenanigans
scrollArea = QScrollArea()
gridWidget = QWidget()
global emojiGridLayout
emojiGridLayout = QGridLayout(gridWidget)
emojiGridLayout.setAlignment(Qt.AlignTop | Qt.AlignLeft)
# stretch grid to widget
for col in range(emojiGridColumnCount):
emojiGridLayout.setColumnStretch(col,1)
for row in range(emojiGridRowCount):
emojiGridLayout.setRowStretch(row,1)
scrollArea.setWidget(gridWidget)
scrollArea.setWidgetResizable(True)
layoutStack.addWidget(scrollArea)
# info to show when no emoji has been found
noEmojiFoundLabel = QLabel("No emoji found 🙁")
noEmojiFoundLabel.setAlignment(Qt.AlignCenter | Qt.AlignHCenter | Qt.AlignVCenter)
layoutStack.addWidget(noEmojiFoundLabel)
layout.addWidget(layoutStackWidget)
# fill with a placeholder for now (smiling or smile)
# execute_search('smil')
fill_grid_with_history()
# bottom text entry
lineEdit = QLineEditWithArrows()
lineEdit.textChanged.connect(execute_search)
layout.addWidget(lineEdit)
# align it to the bottom, so that it won't stay centered vertically
layout.setAlignment(lineEdit, Qt.AlignBottom)
self.setLayout(layout)
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, | selectedEmoji = (0,0)
if not text or text.isspace():
fill_grid_with_history()
return
foundEmoji = edp.find_by_name(text)
charList = [emoji.char for emoji in foundEmoji]
fill_grid_with_char_list(charList) | identifier_body |
emoji-picker-qt.py | font.setPointSize(emojiFontSize)
# quits without a lag
def quitNicely():
mainWindow.hide()
quit() | return pointerData["root_x"], pointerData["root_y"]
# copies and pastes selected emoji
def execute_emoji(char):
add_char_to_history(char)
global willExitOnItsOwn
willExitOnItsOwn = True
mainWindow.hide()
QApplication.clipboard().setText(char)
pyautogui.hotkey("ctrl","v")
QtTest.QTest.qWait(250)
quit()
# fills grid with given char list and takes care of layout and counting
def fill_grid_with_char_list(charList):
# for wraparound
global emojiToShowCount
global fullRowsCount
global lastRowEmojiCount
emojiToShowCount = min(len(charList),(emojiGridColumnCount*emojiGridRowCount))
fullRowsCount = emojiToShowCount//emojiGridColumnCount
lastRowEmojiCount = emojiToShowCount%emojiGridColumnCount
global foundAnyEmoji
if emojiToShowCount>0:
foundAnyEmoji = True
layoutStack.setCurrentIndex(0)
else:
foundAnyEmoji = False
layoutStack.setCurrentIndex(1)
# clear grid
global emojiGridLayout
for i in reversed(range(emojiGridLayout.count())):
emojiGridLayout.itemAt(i).widget().setParent(None)
# fill with new chars
rowIdx = 0
colIdx = 0
for emoji in charList:
if rowIdx>emojiGridRowCount-1:
break;
label = QClickableLabel(emoji)
label.clicked.connect(execute_emoji)
label.setFont(font)
label.setAlignment(Qt.AlignCenter)
label.setMinimumHeight(49)
emojiGridLayout.addWidget(label,rowIdx,colIdx)
emojiGridLayout.setAlignment(label,Qt.AlignTop)
if colIdx < emojiGridColumnCount-1:
colIdx+=1
else:
colIdx=0
rowIdx+=1
emojiGridLayout.setContentsMargins(0,0,0,0)
emojiGridLayout.setHorizontalSpacing(0)
emojiGridLayout.setVerticalSpacing(0)
if emojiToShowCount>0:
highlight_emoji([0,0])
# searches for emoji, and passes them to fill_grid_with_char_list
def execute_search(text):
selectedEmoji = (0,0)
if not text or text.isspace():
fill_grid_with_history()
return
foundEmoji = edp.find_by_name(text)
charList = [emoji.char for emoji in foundEmoji]
fill_grid_with_char_list(charList)
# handles what to do after hovering over a given label
def emoji_hovered(hoveredLabel):
parentGrid = hoveredLabel.parentWidget().layout()
hoveredIndex = parentGrid.indexOf(hoveredLabel)
hoveredRow, hoveredColumn, _, _ = parentGrid.getItemPosition(hoveredIndex)
highlight_emoji([hoveredRow,hoveredColumn])
# selects, sets style and handles wraparound
def highlight_emoji(newPosition):
global selectedEmojiPosition
# grid is filled to a full rectangle (last row fills the window horizontally)
if lastRowEmojiCount==0:
if newPosition[0]<0:
newPosition[0]=fullRowsCount-1
elif newPosition[1]<0:
newPosition[1]=emojiGridColumnCount-1
elif newPosition[0]>fullRowsCount-1:
newPosition[0]=0
elif newPosition[1]>emojiGridColumnCount-1:
newPosition[1]=0
# last row is not full
else:
#horizontal wraparound through RIGHT edge for full rows
if (newPosition[0]<fullRowsCount) and (newPosition[1]>emojiGridColumnCount-1):
newPosition[1]=0
#horizontal wraparound through LEFT edge for full rows
elif (newPosition[0]<fullRowsCount) and (newPosition[1]<0):
newPosition[1]=emojiGridColumnCount-1
#horizontal wraparound through right edge for NON FULL rows
elif (newPosition[0]==fullRowsCount) and (newPosition[1]>lastRowEmojiCount-1) and ((selectedEmojiPosition[0]-newPosition[0])==0):
newPosition[1]=0
#horizontal wraparound through LEFT edge for NON FULL rows
elif (newPosition[0]>=fullRowsCount) and (newPosition[1]<0):
newPosition[1]=lastRowEmojiCount-1
#vertical wraparound through BOTTOM edge for full cols
elif (newPosition[0]>fullRowsCount) and (newPosition[1]<lastRowEmojiCount):
newPosition[0]=0
#vertical wraparound through TOP edge for full cols
elif (newPosition[0]<0) and (newPosition[1]<lastRowEmojiCount):
newPosition[0]=fullRowsCount
#vertical wraparound through BOTTOM edge for NON FULL cols
elif (newPosition[0]>fullRowsCount-1) and (newPosition[1]>lastRowEmojiCount-1):
newPosition[0]=0
#vertical wraparound through TOP edge for NON FULL cols
elif (newPosition[0]<0) and (newPosition[1]>lastRowEmojiCount-1):
newPosition[0]=fullRowsCount-1
oldPosition = selectedEmojiPosition
selectedEmojiPosition = newPosition
widgetToDeselect = emojiGridLayout.itemAtPosition(oldPosition[0],oldPosition[1])
if widgetToDeselect:
widgetToDeselect = widgetToDeselect.widget()
widgetToDeselect.setStyleSheet("")
global selectedEmojiChar
widgetToSelect = emojiGridLayout.itemAtPosition(selectedEmojiPosition[0],selectedEmojiPosition[1])
if widgetToSelect:
widgetToSelect = widgetToSelect.widget()
selectedEmojiChar = widgetToSelect.text()
widgetToSelect.setStyleSheet("QLabel{background-color: palette(highlight);}")
# handles direction where to move emoji selection
def move_selection(direction):
if direction=="right":
highlight_emoji([sum(x) for x in zip(selectedEmojiPosition, [0,1])])
elif direction=="left":
highlight_emoji([sum(x) for x in zip(selectedEmojiPosition, [0,-1])])
elif direction=="up":
highlight_emoji([sum(x) for x in zip(selectedEmojiPosition, [-1,0])])
elif direction=="down":
highlight_emoji([sum(x) for x in zip(selectedEmojiPosition, [1,0])])
# handles Esc
def on_key(key):
# test for a specific key
if key == Qt.Key_Escape:
quitNicely()
# adds given emoji to history and saves it to config file
def add_char_to_history(char):
global settingsFile
global historyList
if not historyList:
historyList = [char]
else:
if char in historyList:
historyList.remove(char)
tempList = [char]
tempList.extend(historyList)
historyList = tempList[:(emojiGridColumnCount*emojiGridRowCount)]
settingsFile.setValue('history/history',historyList)
# wrapper around filling the grid
def fill_grid_with_history():
fill_grid_with_char_list(historyList)
# main app window class with inits
class EmojiPickerWindow(QWidget):
def __init__(self):
super().__init__()
# focus handling
self.installEventFilter(self)
self.title = 'Emoji picker \(^o^)/'
self.width = 281
self.height = 251
# start with text box centered at mouse pointer position
self.left, self.top = mousePosition()
self.left -= self.width//2
self.top += (24-self.height)
self.initSettings()
self.initUI()
def initUI(self):
# topmost window layout
layout = QVBoxLayout()
global layoutStack
layoutStack = QStackedLayout()
layoutStackWidget = QWidget()
layoutStackWidget.setLayout(layoutStack)
# scroll area setup shenanigans
scrollArea = QScrollArea()
gridWidget = QWidget()
global emojiGridLayout
emojiGridLayout = QGridLayout(gridWidget)
emojiGridLayout.setAlignment(Qt.AlignTop | Qt.AlignLeft)
# stretch grid to widget
for col in range(emojiGridColumnCount):
emojiGridLayout.setColumnStretch(col,1)
for row in range(emojiGridRowCount):
emojiGridLayout.setRowStretch(row,1)
scrollArea.setWidget(gridWidget)
scrollArea.setWidgetResizable(True)
layoutStack.addWidget(scrollArea)
# info to show when no emoji has been found
noEmojiFoundLabel = QLabel("No emoji found 🙁")
noEmojiFoundLabel.setAlignment(Qt.AlignCenter | Qt.AlignHCenter | Qt.AlignVCenter)
layoutStack.addWidget(noEmojiFoundLabel)
layout.addWidget(layoutStackWidget)
# fill with a placeholder for now (smiling or smile)
# execute_search('smil')
fill_grid_with_history()
# bottom text entry
lineEdit = QLineEditWithArrows()
lineEdit.textChanged.connect(execute_search)
layout.addWidget(lineEdit)
# align it to the bottom, so that it won't stay centered vertically
layout.setAlignment(lineEdit, Qt.AlignBottom)
self.setLayout(layout)
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self |
# gets mouse position from Xlib
def mousePosition():
pointerData = display.Display().screen().root.query_pointer()._data | random_line_split |
qasync.py | by
default `eos_marker` is passed through to output queue, but you can
disable that.
Calls `task_done()` method on input queue after result was copied to output queue.
Assumption is that mapping function doesn't raise exceptions, instead it
should return some sort of error object. If calling `func` does result
in an exception it will be caught and logged but otherwise ignored.
It is safe to have multiple consumers/producers reading/writing from the
queues, although you might want to disable eos pass-through in those
cases.
func : Callable
q_in: Input asyncio.Queue
q_out: Output asyncio.Queue
eos_marker: Value that indicates end of stream
eos_passthrough: If True copy eos_marker to output queue before
terminating, if False then don't
"""
while True:
x = await q_in.get()
if x is eos_marker:
if eos_passthrough:
await q_out.put(x)
q_in.task_done()
return
err, result = (None, None)
try:
result = await func(x, **kwargs)
except Exception as e:
err = str(e)
log.error("Uncaught exception: %s", err)
if err is None:
await q_out.put(result)
q_in.task_done()
async def gen2q_async(func,
q_out,
nconcurrent,
eos_marker=EOS_MARKER,
eos_passthrough=True,
loop=None):
""" Run upto `nconcurrent` generator functions, pump values from generator function into `q_out`
To indicate that no more data is available func should return special value `eos_marker`
[func(0)] \
[func(1)] >--> q_out
[func(2)] /
- func is expected not to raise exceptions
"""
async def worker(idx):
n = 0
while True:
try:
x = await func(idx)
except Exception as e:
log.error("Uncaught exception: %s", str(e))
return n
if x is eos_marker:
return n
n += 1
await q_out.put(x)
return n
ff = [asyncio.ensure_future(worker(i), loop=loop)
for i in range(nconcurrent)]
n_total = 0
for f in ff:
n_total += (await f)
if eos_passthrough:
await q_out.put(eos_marker)
return n_total
async def aq2sq_pump(src, dst,
eos_marker=EOS_MARKER,
eos_passthrough=True,
dt=0.01):
""" Pump from async Queue to synchronous queue.
dt -- how much to sleep when dst is full
"""
def safe_put(x, dst):
try:
dst.put_nowait(x)
except queue.Full:
return False
return True
async def push_to_dst(x, dst, dt):
while not safe_put(x, dst):
await asyncio.sleep(dt)
while True:
x = await src.get()
if x is eos_marker:
if eos_passthrough:
await push_to_dst(x, dst, dt)
src.task_done()
break
await push_to_dst(x, dst, dt)
src.task_done()
async def q2q_nmap(func,
q_in,
q_out,
nconcurrent,
eos_marker=EOS_MARKER,
eos_passthrough=True,
dt=0.01,
loop=None):
"""Pump data from synchronous queue to another synchronous queue via a worker
pool of async `func`s. Allow upto `nconcurrent` concurrent `func` tasks
at a time.
/ [func] \
q_in -> [func] >--> q_out
\ [func] /
- Order is not preserved.
- func is expected not to raise exceptions
"""
def safe_get(src):
try:
x = src.get_nowait()
return (x, True)
except queue.Empty:
return (None, False)
def safe_put(x, dst):
try:
dst.put_nowait(x)
except queue.Full:
return False
return True
async def push_to_dst(x, dst, dt):
while not safe_put(x, dst):
await asyncio.sleep(dt)
async def intake_loop(src, dst, dt):
while True:
x, ok = safe_get(src)
if not ok:
await asyncio.sleep(dt)
elif x is eos_marker:
src.task_done()
break
else:
await dst.put(x)
src.task_done()
for _ in range(nconcurrent):
await dst.put(eos_marker)
await dst.join()
async def output_loop(src, dst, dt):
while True:
x = await src.get()
if x is eos_marker:
src.task_done()
break
await push_to_dst(x, dst, dt)
src.task_done()
aq_in = asyncio.Queue(nconcurrent*2)
aq_out = asyncio.Queue(aq_in.maxsize)
# / [func] \
# q_in -> aq_in -> [func] >--> aq_out -> q_out
# \ [func] /
# Launch async worker pool: aq_in ->[func]-> aq_out
for _ in range(nconcurrent):
asyncio.ensure_future(async_q2q_map(func, aq_in, aq_out,
eos_marker=eos_marker,
eos_passthrough=False),
loop=loop)
# Pump from aq_out -> q_out (async to sync interface)
asyncio.ensure_future(output_loop(aq_out, q_out, dt), loop=loop)
# Pump from q_in -> aq_in (sync to async interface)
await intake_loop(q_in, aq_in, dt)
# by this time all input items have been mapped through func and are in aq_out
# terminate output pump
await aq_out.put(eos_marker) # tell output_loop to stop
await aq_out.join() # wait for ack, all valid data is in `q_out` now
# finally push through eos_marker unless asked not too
if eos_passthrough:
await push_to_dst(eos_marker, q_out, dt)
################################################################################
# tests below
################################################################################
def test_q2q_map():
async def proc(x):
await asyncio.sleep(0.01)
return (x, x)
loop = asyncio.new_event_loop()
def run(**kwargs):
q1 = asyncio.Queue(10)
q2 = asyncio.Queue(10)
for i in range(4):
q1.put_nowait(i)
q1.put_nowait(EOS_MARKER)
async def run_test(**kwargs):
await async_q2q_map(proc, q1, q2, **kwargs)
await q1.join()
xx = []
while not q2.empty():
xx.append(q2.get_nowait())
return xx
return loop.run_until_complete(run_test(**kwargs))
expect = [(i, i) for i in range(4)]
assert run() == expect + [EOS_MARKER]
assert run(eos_passthrough=False) == expect
loop.close()
def test_q2qnmap():
import random
async def proc(x, state, delay=0.1):
state.active += 1
delay = random.uniform(0, delay)
await asyncio.sleep(delay)
state.max_active = max(state.active, state.max_active)
state.active -= 1
return (x, x)
def run_producer(n, q, eos_marker):
for i in range(n):
q.put(i)
q.put(eos_marker)
q.join()
def run_consumer(q, eos_marker):
xx = []
while True:
|
return xx
wk_pool = ThreadPoolExecutor(max_workers=2)
src = queue.Queue(3)
dst = queue.Queue(3)
# first do self test of consumer/producer
N = 100
wk_pool.submit(run_producer, N, src, EOS_MARKER)
xx = wk_pool.submit(run_consumer, src, EOS_MARKER)
xx = xx.result()
assert len(xx) == N + 1
assert len(set(xx) - set(range(N)) - set([EOS_MARKER])) == 0
assert src.qsize() == 0
loop = asyncio.new_event_loop()
def run(N, nconcurrent, delay, eos_passthrough=True):
async def run_test(func, N, nconcurrent):
wk_pool.submit(run_producer, N, src, EOS_MARKER)
xx = wk_pool.submit(run_consumer, dst, EOS_MARKER)
await q2q_nmap(func, src, dst, nconcurrent, eos_passthrough=eos_passthrough)
if eos_passthrough is False:
dst.put(EOS_MARKER)
return xx.result()
state = SimpleNamespace(active=0, max_active=0)
func = lambda x: proc(x, delay=delay, state=state)
return state, loop.run_until_complete(run_test(func, N, nconcurrent))
expect = set([(x, x) for x in range(N)] + [EOS_MARKER])
st, xx = | x = q.get()
q.task_done()
xx.append(x)
if x is eos_marker:
break | conditional_block |
qasync.py | by
default `eos_marker` is passed through to output queue, but you can
disable that.
Calls `task_done()` method on input queue after result was copied to output queue.
Assumption is that mapping function doesn't raise exceptions, instead it
should return some sort of error object. If calling `func` does result
in an exception it will be caught and logged but otherwise ignored.
It is safe to have multiple consumers/producers reading/writing from the
queues, although you might want to disable eos pass-through in those
cases.
func : Callable
q_in: Input asyncio.Queue
q_out: Output asyncio.Queue
eos_marker: Value that indicates end of stream
eos_passthrough: If True copy eos_marker to output queue before
terminating, if False then don't
"""
while True:
x = await q_in.get()
if x is eos_marker:
if eos_passthrough:
await q_out.put(x)
q_in.task_done()
return
err, result = (None, None)
try:
result = await func(x, **kwargs)
except Exception as e:
err = str(e)
log.error("Uncaught exception: %s", err)
if err is None:
await q_out.put(result)
q_in.task_done()
async def gen2q_async(func,
q_out,
nconcurrent,
eos_marker=EOS_MARKER,
eos_passthrough=True,
loop=None):
""" Run upto `nconcurrent` generator functions, pump values from generator function into `q_out`
To indicate that no more data is available func should return special value `eos_marker`
[func(0)] \
[func(1)] >--> q_out
[func(2)] /
- func is expected not to raise exceptions
"""
async def worker(idx):
n = 0
while True:
try:
x = await func(idx)
except Exception as e:
log.error("Uncaught exception: %s", str(e))
return n
if x is eos_marker:
return n
n += 1
await q_out.put(x)
return n
ff = [asyncio.ensure_future(worker(i), loop=loop)
for i in range(nconcurrent)]
n_total = 0
for f in ff:
n_total += (await f)
if eos_passthrough:
await q_out.put(eos_marker)
return n_total
async def aq2sq_pump(src, dst,
eos_marker=EOS_MARKER,
eos_passthrough=True,
dt=0.01):
""" Pump from async Queue to synchronous queue.
dt -- how much to sleep when dst is full
"""
def safe_put(x, dst):
try:
dst.put_nowait(x)
except queue.Full:
return False
return True
async def push_to_dst(x, dst, dt):
while not safe_put(x, dst):
await asyncio.sleep(dt)
while True:
x = await src.get()
if x is eos_marker:
if eos_passthrough:
await push_to_dst(x, dst, dt)
src.task_done()
break
await push_to_dst(x, dst, dt)
src.task_done()
async def q2q_nmap(func,
q_in,
q_out,
nconcurrent,
eos_marker=EOS_MARKER,
eos_passthrough=True,
dt=0.01,
loop=None):
"""Pump data from synchronous queue to another synchronous queue via a worker
pool of async `func`s. Allow upto `nconcurrent` concurrent `func` tasks
at a time.
/ [func] \
q_in -> [func] >--> q_out
\ [func] /
- Order is not preserved.
- func is expected not to raise exceptions
"""
def safe_get(src):
try:
x = src.get_nowait()
return (x, True)
except queue.Empty:
return (None, False)
def safe_put(x, dst):
try:
dst.put_nowait(x)
except queue.Full:
return False
return True
async def push_to_dst(x, dst, dt):
while not safe_put(x, dst):
await asyncio.sleep(dt)
async def intake_loop(src, dst, dt):
while True:
x, ok = safe_get(src)
if not ok:
await asyncio.sleep(dt)
elif x is eos_marker:
src.task_done()
break
else:
await dst.put(x)
src.task_done()
for _ in range(nconcurrent):
await dst.put(eos_marker)
await dst.join()
async def output_loop(src, dst, dt):
while True:
x = await src.get()
if x is eos_marker:
src.task_done()
break
await push_to_dst(x, dst, dt)
src.task_done()
aq_in = asyncio.Queue(nconcurrent*2)
aq_out = asyncio.Queue(aq_in.maxsize)
# / [func] \
# q_in -> aq_in -> [func] >--> aq_out -> q_out
# \ [func] /
# Launch async worker pool: aq_in ->[func]-> aq_out
for _ in range(nconcurrent):
asyncio.ensure_future(async_q2q_map(func, aq_in, aq_out,
eos_marker=eos_marker,
eos_passthrough=False),
loop=loop)
# Pump from aq_out -> q_out (async to sync interface)
asyncio.ensure_future(output_loop(aq_out, q_out, dt), loop=loop)
# Pump from q_in -> aq_in (sync to async interface)
await intake_loop(q_in, aq_in, dt)
# by this time all input items have been mapped through func and are in aq_out
# terminate output pump
await aq_out.put(eos_marker) # tell output_loop to stop
await aq_out.join() # wait for ack, all valid data is in `q_out` now
# finally push through eos_marker unless asked not too
if eos_passthrough:
await push_to_dst(eos_marker, q_out, dt)
################################################################################
# tests below
################################################################################
def test_q2q_map():
async def proc(x):
await asyncio.sleep(0.01)
return (x, x)
loop = asyncio.new_event_loop()
def run(**kwargs):
q1 = asyncio.Queue(10)
q2 = asyncio.Queue(10)
for i in range(4):
q1.put_nowait(i)
q1.put_nowait(EOS_MARKER)
async def run_test(**kwargs):
await async_q2q_map(proc, q1, q2, **kwargs)
await q1.join()
xx = []
while not q2.empty():
xx.append(q2.get_nowait())
return xx
return loop.run_until_complete(run_test(**kwargs))
expect = [(i, i) for i in range(4)]
assert run() == expect + [EOS_MARKER]
assert run(eos_passthrough=False) == expect
loop.close()
def test_q2qnmap():
import random
async def proc(x, state, delay=0.1):
state.active += 1
delay = random.uniform(0, delay)
await asyncio.sleep(delay)
state.max_active = max(state.active, state.max_active)
state.active -= 1
return (x, x)
def run_producer(n, q, eos_marker):
for i in range(n):
q.put(i)
q.put(eos_marker)
q.join()
def run_consumer(q, eos_marker):
xx = []
while True:
x = q.get()
q.task_done()
xx.append(x)
if x is eos_marker:
break
return xx
wk_pool = ThreadPoolExecutor(max_workers=2)
src = queue.Queue(3)
dst = queue.Queue(3)
# first do self test of consumer/producer
N = 100
wk_pool.submit(run_producer, N, src, EOS_MARKER)
xx = wk_pool.submit(run_consumer, src, EOS_MARKER)
xx = xx.result()
assert len(xx) == N + 1
assert len(set(xx) - set(range(N)) - set([EOS_MARKER])) == 0
assert src.qsize() == 0
loop = asyncio.new_event_loop()
def run(N, nconcurrent, delay, eos_passthrough=True):
async def | (func, N, nconcurrent):
wk_pool.submit(run_producer, N, src, EOS_MARKER)
xx = wk_pool.submit(run_consumer, dst, EOS_MARKER)
await q2q_nmap(func, src, dst, nconcurrent, eos_passthrough=eos_passthrough)
if eos_passthrough is False:
dst.put(EOS_MARKER)
return xx.result()
state = SimpleNamespace(active=0, max_active=0)
func = lambda x: proc(x, delay=delay, state=state)
return state, loop.run_until_complete(run_test(func, N, nconcurrent))
expect = set([(x, x) for x in range(N)] + [EOS_MARKER])
st, xx | run_test | identifier_name |
qasync.py | by
default `eos_marker` is passed through to output queue, but you can
disable that.
Calls `task_done()` method on input queue after result was copied to output queue.
Assumption is that mapping function doesn't raise exceptions, instead it
should return some sort of error object. If calling `func` does result
in an exception it will be caught and logged but otherwise ignored.
It is safe to have multiple consumers/producers reading/writing from the
queues, although you might want to disable eos pass-through in those
cases.
func : Callable
q_in: Input asyncio.Queue
q_out: Output asyncio.Queue
eos_marker: Value that indicates end of stream
eos_passthrough: If True copy eos_marker to output queue before
terminating, if False then don't
"""
while True:
x = await q_in.get()
if x is eos_marker:
if eos_passthrough:
await q_out.put(x)
q_in.task_done()
return
err, result = (None, None)
try:
result = await func(x, **kwargs)
except Exception as e:
err = str(e)
log.error("Uncaught exception: %s", err)
if err is None:
await q_out.put(result)
q_in.task_done()
async def gen2q_async(func,
q_out,
nconcurrent,
eos_marker=EOS_MARKER,
eos_passthrough=True,
loop=None):
""" Run upto `nconcurrent` generator functions, pump values from generator function into `q_out`
To indicate that no more data is available func should return special value `eos_marker`
[func(0)] \
[func(1)] >--> q_out
[func(2)] /
- func is expected not to raise exceptions
"""
async def worker(idx):
n = 0
while True:
try:
x = await func(idx)
except Exception as e:
log.error("Uncaught exception: %s", str(e))
return n
if x is eos_marker:
return n
n += 1
await q_out.put(x)
return n
ff = [asyncio.ensure_future(worker(i), loop=loop)
for i in range(nconcurrent)]
n_total = 0
for f in ff:
n_total += (await f)
if eos_passthrough:
await q_out.put(eos_marker)
return n_total
async def aq2sq_pump(src, dst,
eos_marker=EOS_MARKER,
eos_passthrough=True,
dt=0.01):
""" Pump from async Queue to synchronous queue.
dt -- how much to sleep when dst is full
"""
def safe_put(x, dst):
try:
dst.put_nowait(x)
except queue.Full:
return False
return True
async def push_to_dst(x, dst, dt):
while not safe_put(x, dst):
await asyncio.sleep(dt)
while True:
x = await src.get()
if x is eos_marker:
if eos_passthrough:
await push_to_dst(x, dst, dt)
src.task_done()
break
await push_to_dst(x, dst, dt)
src.task_done()
async def q2q_nmap(func,
q_in,
q_out,
nconcurrent,
eos_marker=EOS_MARKER,
eos_passthrough=True,
dt=0.01,
loop=None):
"""Pump data from synchronous queue to another synchronous queue via a worker
pool of async `func`s. Allow upto `nconcurrent` concurrent `func` tasks
at a time.
/ [func] \
q_in -> [func] >--> q_out
\ [func] /
- Order is not preserved.
- func is expected not to raise exceptions
"""
def safe_get(src):
try:
x = src.get_nowait()
return (x, True)
except queue.Empty:
return (None, False)
def safe_put(x, dst):
try:
dst.put_nowait(x)
except queue.Full:
return False
return True
async def push_to_dst(x, dst, dt):
while not safe_put(x, dst):
await asyncio.sleep(dt)
async def intake_loop(src, dst, dt):
while True:
x, ok = safe_get(src)
if not ok:
await asyncio.sleep(dt)
elif x is eos_marker:
src.task_done()
break
else:
await dst.put(x)
src.task_done()
for _ in range(nconcurrent):
await dst.put(eos_marker)
await dst.join()
async def output_loop(src, dst, dt):
while True:
x = await src.get()
if x is eos_marker:
src.task_done()
break
await push_to_dst(x, dst, dt)
src.task_done()
aq_in = asyncio.Queue(nconcurrent*2)
aq_out = asyncio.Queue(aq_in.maxsize)
# / [func] \
# q_in -> aq_in -> [func] >--> aq_out -> q_out
# \ [func] /
# Launch async worker pool: aq_in ->[func]-> aq_out
for _ in range(nconcurrent):
asyncio.ensure_future(async_q2q_map(func, aq_in, aq_out,
eos_marker=eos_marker,
eos_passthrough=False),
loop=loop)
# Pump from aq_out -> q_out (async to sync interface)
asyncio.ensure_future(output_loop(aq_out, q_out, dt), loop=loop)
# Pump from q_in -> aq_in (sync to async interface)
await intake_loop(q_in, aq_in, dt)
# by this time all input items have been mapped through func and are in aq_out
# terminate output pump
await aq_out.put(eos_marker) # tell output_loop to stop
await aq_out.join() # wait for ack, all valid data is in `q_out` now
# finally push through eos_marker unless asked not too
if eos_passthrough:
await push_to_dst(eos_marker, q_out, dt)
################################################################################
# tests below
################################################################################
def test_q2q_map():
async def proc(x):
await asyncio.sleep(0.01)
return (x, x)
loop = asyncio.new_event_loop()
def run(**kwargs):
q1 = asyncio.Queue(10)
q2 = asyncio.Queue(10)
for i in range(4):
q1.put_nowait(i)
q1.put_nowait(EOS_MARKER)
async def run_test(**kwargs):
await async_q2q_map(proc, q1, q2, **kwargs)
await q1.join()
xx = []
while not q2.empty():
xx.append(q2.get_nowait())
return xx
return loop.run_until_complete(run_test(**kwargs))
expect = [(i, i) for i in range(4)]
assert run() == expect + [EOS_MARKER]
assert run(eos_passthrough=False) == expect
loop.close()
def test_q2qnmap():
import random
async def proc(x, state, delay=0.1):
state.active += 1
delay = random.uniform(0, delay)
await asyncio.sleep(delay)
state.max_active = max(state.active, state.max_active)
state.active -= 1
return (x, x)
def run_producer(n, q, eos_marker):
for i in range(n):
q.put(i)
q.put(eos_marker)
q.join()
def run_consumer(q, eos_marker):
xx = []
while True:
x = q.get()
q.task_done()
xx.append(x)
if x is eos_marker:
break
return xx
wk_pool = ThreadPoolExecutor(max_workers=2)
src = queue.Queue(3)
dst = queue.Queue(3)
# first do self test of consumer/producer
N = 100
wk_pool.submit(run_producer, N, src, EOS_MARKER)
xx = wk_pool.submit(run_consumer, src, EOS_MARKER)
xx = xx.result()
assert len(xx) == N + 1
assert len(set(xx) - set(range(N)) - set([EOS_MARKER])) == 0
assert src.qsize() == 0 | loop = asyncio.new_event_loop()
def run(N, nconcurrent, delay, eos_passthrough=True):
async def run_test(func, N, nconcurrent):
wk_pool.submit(run_producer, N, src, EOS_MARKER)
xx = wk_pool.submit(run_consumer, dst, EOS_MARKER)
await q2q_nmap(func, src, dst, nconcurrent, eos_passthrough=eos_passthrough)
if eos_passthrough is False:
dst.put(EOS_MARKER)
return xx.result()
state = SimpleNamespace(active=0, max_active=0)
func = lambda x: proc(x, delay=delay, state=state)
return state, loop.run_until_complete(run_test(func, N, nconcurrent))
expect = set([(x, x) for x in range(N)] + [EOS_MARKER])
st, xx = run | random_line_split |
|
qasync.py | by
default `eos_marker` is passed through to output queue, but you can
disable that.
Calls `task_done()` method on input queue after result was copied to output queue.
Assumption is that mapping function doesn't raise exceptions, instead it
should return some sort of error object. If calling `func` does result
in an exception it will be caught and logged but otherwise ignored.
It is safe to have multiple consumers/producers reading/writing from the
queues, although you might want to disable eos pass-through in those
cases.
func : Callable
q_in: Input asyncio.Queue
q_out: Output asyncio.Queue
eos_marker: Value that indicates end of stream
eos_passthrough: If True copy eos_marker to output queue before
terminating, if False then don't
"""
while True:
x = await q_in.get()
if x is eos_marker:
if eos_passthrough:
await q_out.put(x)
q_in.task_done()
return
err, result = (None, None)
try:
result = await func(x, **kwargs)
except Exception as e:
err = str(e)
log.error("Uncaught exception: %s", err)
if err is None:
await q_out.put(result)
q_in.task_done()
async def gen2q_async(func,
q_out,
nconcurrent,
eos_marker=EOS_MARKER,
eos_passthrough=True,
loop=None):
""" Run upto `nconcurrent` generator functions, pump values from generator function into `q_out`
To indicate that no more data is available func should return special value `eos_marker`
[func(0)] \
[func(1)] >--> q_out
[func(2)] /
- func is expected not to raise exceptions
"""
async def worker(idx):
n = 0
while True:
try:
x = await func(idx)
except Exception as e:
log.error("Uncaught exception: %s", str(e))
return n
if x is eos_marker:
return n
n += 1
await q_out.put(x)
return n
ff = [asyncio.ensure_future(worker(i), loop=loop)
for i in range(nconcurrent)]
n_total = 0
for f in ff:
n_total += (await f)
if eos_passthrough:
await q_out.put(eos_marker)
return n_total
async def aq2sq_pump(src, dst,
eos_marker=EOS_MARKER,
eos_passthrough=True,
dt=0.01):
""" Pump from async Queue to synchronous queue.
dt -- how much to sleep when dst is full
"""
def safe_put(x, dst):
try:
dst.put_nowait(x)
except queue.Full:
return False
return True
async def push_to_dst(x, dst, dt):
|
while True:
x = await src.get()
if x is eos_marker:
if eos_passthrough:
await push_to_dst(x, dst, dt)
src.task_done()
break
await push_to_dst(x, dst, dt)
src.task_done()
async def q2q_nmap(func,
q_in,
q_out,
nconcurrent,
eos_marker=EOS_MARKER,
eos_passthrough=True,
dt=0.01,
loop=None):
"""Pump data from synchronous queue to another synchronous queue via a worker
pool of async `func`s. Allow upto `nconcurrent` concurrent `func` tasks
at a time.
/ [func] \
q_in -> [func] >--> q_out
\ [func] /
- Order is not preserved.
- func is expected not to raise exceptions
"""
def safe_get(src):
try:
x = src.get_nowait()
return (x, True)
except queue.Empty:
return (None, False)
def safe_put(x, dst):
try:
dst.put_nowait(x)
except queue.Full:
return False
return True
async def push_to_dst(x, dst, dt):
while not safe_put(x, dst):
await asyncio.sleep(dt)
async def intake_loop(src, dst, dt):
while True:
x, ok = safe_get(src)
if not ok:
await asyncio.sleep(dt)
elif x is eos_marker:
src.task_done()
break
else:
await dst.put(x)
src.task_done()
for _ in range(nconcurrent):
await dst.put(eos_marker)
await dst.join()
async def output_loop(src, dst, dt):
while True:
x = await src.get()
if x is eos_marker:
src.task_done()
break
await push_to_dst(x, dst, dt)
src.task_done()
aq_in = asyncio.Queue(nconcurrent*2)
aq_out = asyncio.Queue(aq_in.maxsize)
# / [func] \
# q_in -> aq_in -> [func] >--> aq_out -> q_out
# \ [func] /
# Launch async worker pool: aq_in ->[func]-> aq_out
for _ in range(nconcurrent):
asyncio.ensure_future(async_q2q_map(func, aq_in, aq_out,
eos_marker=eos_marker,
eos_passthrough=False),
loop=loop)
# Pump from aq_out -> q_out (async to sync interface)
asyncio.ensure_future(output_loop(aq_out, q_out, dt), loop=loop)
# Pump from q_in -> aq_in (sync to async interface)
await intake_loop(q_in, aq_in, dt)
# by this time all input items have been mapped through func and are in aq_out
# terminate output pump
await aq_out.put(eos_marker) # tell output_loop to stop
await aq_out.join() # wait for ack, all valid data is in `q_out` now
# finally push through eos_marker unless asked not too
if eos_passthrough:
await push_to_dst(eos_marker, q_out, dt)
################################################################################
# tests below
################################################################################
def test_q2q_map():
async def proc(x):
await asyncio.sleep(0.01)
return (x, x)
loop = asyncio.new_event_loop()
def run(**kwargs):
q1 = asyncio.Queue(10)
q2 = asyncio.Queue(10)
for i in range(4):
q1.put_nowait(i)
q1.put_nowait(EOS_MARKER)
async def run_test(**kwargs):
await async_q2q_map(proc, q1, q2, **kwargs)
await q1.join()
xx = []
while not q2.empty():
xx.append(q2.get_nowait())
return xx
return loop.run_until_complete(run_test(**kwargs))
expect = [(i, i) for i in range(4)]
assert run() == expect + [EOS_MARKER]
assert run(eos_passthrough=False) == expect
loop.close()
def test_q2qnmap():
import random
async def proc(x, state, delay=0.1):
state.active += 1
delay = random.uniform(0, delay)
await asyncio.sleep(delay)
state.max_active = max(state.active, state.max_active)
state.active -= 1
return (x, x)
def run_producer(n, q, eos_marker):
for i in range(n):
q.put(i)
q.put(eos_marker)
q.join()
def run_consumer(q, eos_marker):
xx = []
while True:
x = q.get()
q.task_done()
xx.append(x)
if x is eos_marker:
break
return xx
wk_pool = ThreadPoolExecutor(max_workers=2)
src = queue.Queue(3)
dst = queue.Queue(3)
# first do self test of consumer/producer
N = 100
wk_pool.submit(run_producer, N, src, EOS_MARKER)
xx = wk_pool.submit(run_consumer, src, EOS_MARKER)
xx = xx.result()
assert len(xx) == N + 1
assert len(set(xx) - set(range(N)) - set([EOS_MARKER])) == 0
assert src.qsize() == 0
loop = asyncio.new_event_loop()
def run(N, nconcurrent, delay, eos_passthrough=True):
async def run_test(func, N, nconcurrent):
wk_pool.submit(run_producer, N, src, EOS_MARKER)
xx = wk_pool.submit(run_consumer, dst, EOS_MARKER)
await q2q_nmap(func, src, dst, nconcurrent, eos_passthrough=eos_passthrough)
if eos_passthrough is False:
dst.put(EOS_MARKER)
return xx.result()
state = SimpleNamespace(active=0, max_active=0)
func = lambda x: proc(x, delay=delay, state=state)
return state, loop.run_until_complete(run_test(func, N, nconcurrent))
expect = set([(x, x) for x in range(N)] + [EOS_MARKER])
st, xx | while not safe_put(x, dst):
await asyncio.sleep(dt) | identifier_body |
prxlistcache.go | aleTime = 5 * time.Minute
}
listCache = newListObjectsCache(p)
hk.Reg(hkListObjectName, func() time.Duration { return housekeepListCache(p) }, bucketPrefixStaleTime)
}
// TODO: Remove old entries, or those which take a lot of memory
// until MemPressure/PctMemUsed falls below some level.
func housekeepListCache(p *proxyrunner) time.Duration {
if p.gmm.MemPressure() <= memsys.MemPressureModerate {
return bucketPrefixStaleTime
}
now := mono.NanoTime()
listCache.mtx.Lock()
defer listCache.mtx.Unlock()
for k, v := range listCache.reqs {
if v.lastUsage+int64(bucketPrefixStaleTime) < now {
delete(listCache.reqs, k)
}
}
return bucketPrefixStaleTime
}
func newRequestCacheEntry(parent *listObjCache, bck *cluster.Bck, msg *cmn.SelectMsg) *locReq {
return &locReq{
parent: parent,
bck: bck,
targets: make(map[string]*locTarget),
msg: msg,
}
}
func newTargetCacheEntry(parent *locReq, t *cluster.Snode) *locTarget {
return &locTarget{parent: parent, t: t}
}
//////////////////////////
// listObjCache //
//////////////////////////
func (c *listObjCache) next(smap *cluster.Smap, smsg cmn.SelectMsg, bck *cluster.Bck, pageSize uint) (result fetchResult) {
cmn.Assert(smsg.UUID != "")
if smap.CountTargets() == 0 {
return fetchResult{err: fmt.Errorf("no targets registered")}
}
entries := c.allTargetsEntries(smsg, smap, bck)
cmn.Assert(len(entries) > 0)
entries[0].parent.mtx.Lock()
result = c.initResultsFromEntries(entries, smsg, pageSize, smsg.UUID)
if result.allOK && result.err == nil {
result = c.fetchAll(entries, smsg, pageSize)
}
entries[0].parent.mtx.Unlock()
c.mtx.Lock()
delete(c.reqs, smsg.ListObjectsCacheID(bck.Bck))
c.mtx.Unlock()
return result
}
func (c *listObjCache) targetEntry(t *cluster.Snode, smsg cmn.SelectMsg, bck *cluster.Bck) *locTarget {
id := smsg.ListObjectsCacheID(bck.Bck)
c.mtx.Lock()
requestEntry, ok := c.reqs[id]
if !ok {
requestEntry = newRequestCacheEntry(c, bck, &smsg)
c.reqs[id] = requestEntry
}
c.mtx.Unlock()
defer func() {
requestEntry.lastUsage = mono.NanoTime()
}()
requestEntry.mtx.Lock()
targetEntry, ok := requestEntry.targets[t.ID()]
if !ok {
targetEntry = newTargetCacheEntry(requestEntry, t)
requestEntry.targets[t.ID()] = targetEntry
}
requestEntry.mtx.Unlock()
return targetEntry
}
func (c *listObjCache) leftovers(smsg cmn.SelectMsg, bck *cluster.Bck) map[string]*locTarget {
if smsg.Passthrough {
return nil
}
id := smsg.ListObjectsCacheID(bck.Bck)
requestEntry, ok := c.getRequestEntry(id)
if !ok {
return nil
}
// find pages that are unused or partially used
requestEntry.mtx.Lock()
defer requestEntry.mtx.Unlock()
tce := make(map[string]*locTarget)
for _, targetEntry := range requestEntry.targets {
targetEntry.mtx.Lock()
cnt := len(targetEntry.buff)
if cnt == 0 || cmn.PageMarkerIncludesObject(smsg.PageMarker, targetEntry.buff[cnt-1].Name) {
targetEntry.mtx.Unlock()
continue
}
entry, ok := tce[targetEntry.t.ID()]
if !ok {
entry = &locTarget{parent: targetEntry.parent, t: targetEntry.t, buff: make([]*cmn.BucketEntry, 0)}
tce[targetEntry.t.ID()] = entry
}
// First case: the entire page was unused
if !cmn.PageMarkerIncludesObject(smsg.PageMarker, targetEntry.buff[0].Name) {
entry.buff = append(entry.buff, targetEntry.buff...)
targetEntry.mtx.Unlock()
continue
}
// Seconds case: partially used page
cond := func(i int) bool { return !cmn.PageMarkerIncludesObject(smsg.PageMarker, targetEntry.buff[i].Name) }
idx := sort.Search(len(targetEntry.buff), cond)
entry.buff = append(entry.buff, targetEntry.buff[idx:]...)
targetEntry.mtx.Unlock()
}
return tce
}
func (c *listObjCache) allTargetsEntries(smsg cmn.SelectMsg, smap *cluster.Smap, bck *cluster.Bck) []*locTarget {
result := make([]*locTarget, 0, len(smap.Tmap))
// First, get the data from the cache that was not sent yet
partial := c.leftovers(smsg, bck)
for _, t := range smap.Tmap {
var (
targetLeftovers *locTarget
ok bool
)
if smsg.Passthrough {
// In passthrough mode we have to create "normal" but fake cache page.
reqEntry := newRequestCacheEntry(c, bck, &smsg)
entry := newTargetCacheEntry(reqEntry, t)
result = append(result, entry)
continue
}
if len(partial) != 0 {
targetLeftovers, ok = partial[t.ID()]
}
// If nothing is found for a target in the cache, initialize a new
// cache page. Without it, the new page leftovers can be lost.
if !ok || len(targetLeftovers.buff) == 0 {
targetEntry := c.targetEntry(t, smsg, bck)
result = append(result, targetEntry)
continue
}
// Order of pages in cache may be random. Sort them right away
less := func(i, j int) bool { return targetLeftovers.buff[i].Name < targetLeftovers.buff[j].Name }
sort.Slice(targetLeftovers.buff, less)
result = append(result, targetLeftovers)
}
return result
}
func (c *listObjCache) initResults(smap *cluster.Smap, smsg cmn.SelectMsg, bck *cluster.Bck, size uint, newUUID string) fetchResult {
entries := c.allTargetsEntries(smsg, smap, bck)
return c.initResultsFromEntries(entries, smsg, size, newUUID)
}
// initResultsFromEntries notifies targets to prepare next objects page.
// It returns information if all calls succeed, and if there were any errors.
func (c *listObjCache) initResultsFromEntries(entries []*locTarget, smsg cmn.SelectMsg, size uint, newUUID string) fetchResult {
ch := c.initAllTargets(entries, smsg, size, newUUID)
return gatherTargetListObjsResults(smsg.UUID, ch, 0, &smsg)
}
// fetchAll returns next `size` object names from each target. It include additional information
// if all calls to targets succeeded and if there were any errors. It cache has buffered object names
// it might return results without making any API calls.
func (c *listObjCache) fetchAll(entries []*locTarget, smsg cmn.SelectMsg, size uint) fetchResult {
wg := &sync.WaitGroup{}
wg.Add(len(entries))
resCh := make(chan *locTargetResp, len(entries))
for _, entry := range entries {
entry.fetch(smsg, size, wg, resCh)
}
wg.Wait()
close(resCh)
return gatherTargetListObjsResults(smsg.UUID, resCh, len(entries), &smsg)
}
// Discard all entries of given task which were included in marker `until`.
func (c *listObjCache) discard(smsg *cmn.SelectMsg, bck *cluster.Bck) {
id := smsg.ListObjectsCacheID(bck.Bck)
c.mtx.Lock()
delete(c.reqs, id)
c.mtx.Unlock()
}
func (c *listObjCache) | (cacheID string) (*locReq, bool) {
c.mtx.Lock()
req, ok := c.reqs[cacheID]
c.mtx.Unlock()
return req, ok
}
// Gathers init results for each target on `resultCh`
func (c *listObjCache) initAllTargets(entries []*locTarget, smsg cmn.SelectMsg, size uint, newUUID string) (resultCh chan *locTargetResp) {
resultCh = make(chan *locTargetResp, len(entries))
wg := &sync.WaitGroup{}
wg.Add(len(entries))
for _, targetEntry := range entries {
targetEntry.init(smsg, size, wg, resultCh, newUUID)
}
wg.Wait()
close(resultCh)
return
}
//////////////////////////
// locTarget //
/////////////////////////
func (c *locTarget) init(smsg cmn.SelectMsg, size uint, wg *sync.WaitGroup, resCh chan *locTargetResp, newUUID string) {
cacheSufficient := (uint(len(c.buff)) >= size && size != | getRequestEntry | identifier_name |
prxlistcache.go | smsg)
}
// fetchAll returns next `size` object names from each target. It include additional information
// if all calls to targets succeeded and if there were any errors. It cache has buffered object names
// it might return results without making any API calls.
func (c *listObjCache) fetchAll(entries []*locTarget, smsg cmn.SelectMsg, size uint) fetchResult {
wg := &sync.WaitGroup{}
wg.Add(len(entries))
resCh := make(chan *locTargetResp, len(entries))
for _, entry := range entries {
entry.fetch(smsg, size, wg, resCh)
}
wg.Wait()
close(resCh)
return gatherTargetListObjsResults(smsg.UUID, resCh, len(entries), &smsg)
}
// Discard all entries of given task which were included in marker `until`.
func (c *listObjCache) discard(smsg *cmn.SelectMsg, bck *cluster.Bck) {
id := smsg.ListObjectsCacheID(bck.Bck)
c.mtx.Lock()
delete(c.reqs, id)
c.mtx.Unlock()
}
func (c *listObjCache) getRequestEntry(cacheID string) (*locReq, bool) {
c.mtx.Lock()
req, ok := c.reqs[cacheID]
c.mtx.Unlock()
return req, ok
}
// Gathers init results for each target on `resultCh`
func (c *listObjCache) initAllTargets(entries []*locTarget, smsg cmn.SelectMsg, size uint, newUUID string) (resultCh chan *locTargetResp) {
resultCh = make(chan *locTargetResp, len(entries))
wg := &sync.WaitGroup{}
wg.Add(len(entries))
for _, targetEntry := range entries {
targetEntry.init(smsg, size, wg, resultCh, newUUID)
}
wg.Wait()
close(resultCh)
return
}
//////////////////////////
// locTarget //
/////////////////////////
func (c *locTarget) init(smsg cmn.SelectMsg, size uint, wg *sync.WaitGroup, resCh chan *locTargetResp, newUUID string) {
cacheSufficient := (uint(len(c.buff)) >= size && size != 0) || c.done
if !smsg.Passthrough && cacheSufficient {
// Everything that is requested is already in the cache, we don't have to do any API calls.
// Returning StatusOK as if we did a request.
resCh <- &locTargetResp{status: http.StatusOK, err: nil}
wg.Done()
return
}
// Make an actual call to the target.
go func() {
resCh <- c.initOnRemote(smsg, newUUID)
wg.Done()
}()
}
func (c *locTarget) initOnRemote(smsg cmn.SelectMsg, newUUID string) (result *locTargetResp) {
p := c.parent.parent.p
bck := c.parent.bck
_, q := p.initAsyncQuery(bck, &smsg, newUUID)
args := c.newListObjectsTaskMsg(smsg, bck, q) // Changes PageMarker to point to last element in buff.
status, err := c.renewTaskOnRemote(args)
return &locTargetResp{status: status, err: err}
}
// Returns next `size` objects or less if no more exists.
// If everything that is requested already is present in the cache, don't make any API calls.
func (c *locTarget) fetch(smsg cmn.SelectMsg, size uint, wg *sync.WaitGroup, resCh chan *locTargetResp) {
cond := func(i int) bool { return !cmn.PageMarkerIncludesObject(smsg.PageMarker, c.buff[i].Name) }
j := sort.Search(len(c.buff), cond)
// discard entries which somehow don't fit the request. They're name is smaller than pageMarker,
// which means that user already has them from previous requests.
bf := c.buff[j:]
// We have everything in cache or target has nothing more.
// We didn't do init request to the target.
if (uint(len(bf)) >= size && size != 0) || c.done {
if size == 0 {
size = uint(len(bf))
} else {
size = uint(cmn.Min(len(bf), int(size)))
}
resCh <- &locTargetResp{list: &cmn.BucketList{Entries: bf[:size]}, status: http.StatusOK}
wg.Done()
return
}
go func() {
resCh <- c.fetchFromRemote(smsg, size)
wg.Done()
}()
}
// TODO: gaps, overlaps
func (c *locTarget) mergePage(page []*cmn.BucketEntry) {
if len(page) == 0 {
return
}
l := len(c.buff)
if l == 0 {
c.buff = page
return
}
// the page preceds items in the cache
if !cmn.PageMarkerIncludesObject(c.buff[0].Name, page[len(page)-1].Name) {
c.buff = append(page, c.buff...)
return
}
// the page follows the cache
if !cmn.PageMarkerIncludesObject(c.buff[l-1].Name, page[0].Name) {
c.buff = append(c.buff, page...)
return
}
if glog.FastV(4, glog.SmoduleAIS) {
glog.Infof("Page %q : %q discarded", page[0].Name, page[len(page)-1].Name)
}
}
// Has to be called with Lock!
// Fetches objects from target, appends them to buffer and returns required number of objects.
func (c *locTarget) fetchFromRemote(smsg cmn.SelectMsg, size uint) *locTargetResp {
p := c.parent.parent.p
bck := c.parent.bck
args := c.newListObjectsTaskMsg(smsg, bck, newTaskResultQuery(bck.Bck))
args.req.Method = http.MethodPost
// Target prepare the final result.
res := p.call(*args)
preallocSize := cmn.DefaultListPageSize
if smsg.PageSize != 0 {
preallocSize = smsg.PageSize
}
if res.err != nil {
return &locTargetResp{list: nil, status: res.status, err: res.err}
}
if len(res.outjson) == 0 {
s := cmn.Min(int(size), len(c.buff))
if s == 0 {
s = len(c.buff)
}
return &locTargetResp{list: &cmn.BucketList{Entries: c.buff[:s]}, status: res.status, err: res.err}
}
bucketList := &cmn.BucketList{Entries: make([]*cmn.BucketEntry, 0, preallocSize)}
if err := jsoniter.Unmarshal(res.outjson, &bucketList); err != nil {
return &locTargetResp{list: nil, status: http.StatusInternalServerError, err: err}
}
res.outjson = nil
if len(bucketList.Entries) < int(size) || size == 0 {
c.done = true
}
if smsg.Passthrough {
return &locTargetResp{list: bucketList, status: http.StatusOK}
}
c.mtx.Lock()
c.mergePage(bucketList.Entries)
cond := func(i int) bool { return !cmn.PageMarkerIncludesObject(smsg.PageMarker, c.buff[i].Name) }
j := sort.Search(len(c.buff), cond)
c.mtx.Unlock()
j = cmn.Max(j, 0)
if size != 0 {
last := cmn.Min(len(c.buff), int(size)+j)
return &locTargetResp{list: &cmn.BucketList{Entries: c.buff[j:last]}, status: http.StatusOK}
}
return &locTargetResp{list: &cmn.BucketList{Entries: c.buff[j:]}, status: http.StatusOK}
}
// Prepares callArgs for list object init or list objects result call.
// Should be called with Lock or RLock acquired.
func (c *locTarget) newListObjectsTaskMsg(smsg cmn.SelectMsg, bck *cluster.Bck, q url.Values) *callArgs {
p := c.parent.parent.p
if len(c.buff) > 0 {
// Request only new objects.
smsg.PageMarker = c.buff[len(c.buff)-1].Name
}
// Cache all props, filter only requested props later.
smsg.Props = strings.Join(cmn.GetPropsAll, ",")
var (
config = cmn.GCO.Get()
smap = p.owner.smap.get()
aisMsg = p.newAisMsg(&cmn.ActionMsg{Action: cmn.ActListObjects, Value: smsg}, smap, nil)
body = cmn.MustMarshal(aisMsg)
)
return &callArgs{
si: c.t,
req: cmn.ReqArgs{
Method: http.MethodPost,
Path: cmn.URLPath(cmn.Version, cmn.Buckets, bck.Name),
Query: q,
Body: body,
},
timeout: config.Timeout.MaxHostBusy + config.Timeout.CplaneOperation,
}
}
func (c *locTarget) renewTaskOnRemote(args *callArgs) (int, error) | {
res := c.parent.parent.p.call(*args)
return res.status, res.err
} | identifier_body |
|
prxlistcache.go | }
entry, ok := tce[targetEntry.t.ID()]
if !ok {
entry = &locTarget{parent: targetEntry.parent, t: targetEntry.t, buff: make([]*cmn.BucketEntry, 0)}
tce[targetEntry.t.ID()] = entry
}
// First case: the entire page was unused
if !cmn.PageMarkerIncludesObject(smsg.PageMarker, targetEntry.buff[0].Name) {
entry.buff = append(entry.buff, targetEntry.buff...)
targetEntry.mtx.Unlock()
continue
}
// Seconds case: partially used page
cond := func(i int) bool { return !cmn.PageMarkerIncludesObject(smsg.PageMarker, targetEntry.buff[i].Name) }
idx := sort.Search(len(targetEntry.buff), cond)
entry.buff = append(entry.buff, targetEntry.buff[idx:]...)
targetEntry.mtx.Unlock()
}
return tce
}
func (c *listObjCache) allTargetsEntries(smsg cmn.SelectMsg, smap *cluster.Smap, bck *cluster.Bck) []*locTarget {
result := make([]*locTarget, 0, len(smap.Tmap))
// First, get the data from the cache that was not sent yet
partial := c.leftovers(smsg, bck)
for _, t := range smap.Tmap {
var (
targetLeftovers *locTarget
ok bool
)
if smsg.Passthrough {
// In passthrough mode we have to create "normal" but fake cache page.
reqEntry := newRequestCacheEntry(c, bck, &smsg)
entry := newTargetCacheEntry(reqEntry, t)
result = append(result, entry)
continue
}
if len(partial) != 0 {
targetLeftovers, ok = partial[t.ID()]
}
// If nothing is found for a target in the cache, initialize a new
// cache page. Without it, the new page leftovers can be lost.
if !ok || len(targetLeftovers.buff) == 0 {
targetEntry := c.targetEntry(t, smsg, bck)
result = append(result, targetEntry)
continue
}
// Order of pages in cache may be random. Sort them right away
less := func(i, j int) bool { return targetLeftovers.buff[i].Name < targetLeftovers.buff[j].Name }
sort.Slice(targetLeftovers.buff, less)
result = append(result, targetLeftovers)
}
return result
}
func (c *listObjCache) initResults(smap *cluster.Smap, smsg cmn.SelectMsg, bck *cluster.Bck, size uint, newUUID string) fetchResult {
entries := c.allTargetsEntries(smsg, smap, bck)
return c.initResultsFromEntries(entries, smsg, size, newUUID)
}
// initResultsFromEntries notifies targets to prepare next objects page.
// It returns information if all calls succeed, and if there were any errors.
func (c *listObjCache) initResultsFromEntries(entries []*locTarget, smsg cmn.SelectMsg, size uint, newUUID string) fetchResult {
ch := c.initAllTargets(entries, smsg, size, newUUID)
return gatherTargetListObjsResults(smsg.UUID, ch, 0, &smsg)
}
// fetchAll returns next `size` object names from each target. It include additional information
// if all calls to targets succeeded and if there were any errors. It cache has buffered object names
// it might return results without making any API calls.
func (c *listObjCache) fetchAll(entries []*locTarget, smsg cmn.SelectMsg, size uint) fetchResult {
wg := &sync.WaitGroup{}
wg.Add(len(entries))
resCh := make(chan *locTargetResp, len(entries))
for _, entry := range entries {
entry.fetch(smsg, size, wg, resCh)
}
wg.Wait()
close(resCh)
return gatherTargetListObjsResults(smsg.UUID, resCh, len(entries), &smsg)
}
// Discard all entries of given task which were included in marker `until`.
func (c *listObjCache) discard(smsg *cmn.SelectMsg, bck *cluster.Bck) {
id := smsg.ListObjectsCacheID(bck.Bck)
c.mtx.Lock()
delete(c.reqs, id)
c.mtx.Unlock()
}
func (c *listObjCache) getRequestEntry(cacheID string) (*locReq, bool) {
c.mtx.Lock()
req, ok := c.reqs[cacheID]
c.mtx.Unlock()
return req, ok
}
// Gathers init results for each target on `resultCh`
func (c *listObjCache) initAllTargets(entries []*locTarget, smsg cmn.SelectMsg, size uint, newUUID string) (resultCh chan *locTargetResp) {
resultCh = make(chan *locTargetResp, len(entries))
wg := &sync.WaitGroup{}
wg.Add(len(entries))
for _, targetEntry := range entries {
targetEntry.init(smsg, size, wg, resultCh, newUUID)
}
wg.Wait()
close(resultCh)
return
}
//////////////////////////
// locTarget //
/////////////////////////
func (c *locTarget) init(smsg cmn.SelectMsg, size uint, wg *sync.WaitGroup, resCh chan *locTargetResp, newUUID string) {
cacheSufficient := (uint(len(c.buff)) >= size && size != 0) || c.done
if !smsg.Passthrough && cacheSufficient {
// Everything that is requested is already in the cache, we don't have to do any API calls.
// Returning StatusOK as if we did a request.
resCh <- &locTargetResp{status: http.StatusOK, err: nil}
wg.Done()
return
}
// Make an actual call to the target.
go func() {
resCh <- c.initOnRemote(smsg, newUUID)
wg.Done()
}()
}
func (c *locTarget) initOnRemote(smsg cmn.SelectMsg, newUUID string) (result *locTargetResp) {
p := c.parent.parent.p
bck := c.parent.bck
_, q := p.initAsyncQuery(bck, &smsg, newUUID)
args := c.newListObjectsTaskMsg(smsg, bck, q) // Changes PageMarker to point to last element in buff.
status, err := c.renewTaskOnRemote(args)
return &locTargetResp{status: status, err: err}
}
// Returns next `size` objects or less if no more exists.
// If everything that is requested already is present in the cache, don't make any API calls.
func (c *locTarget) fetch(smsg cmn.SelectMsg, size uint, wg *sync.WaitGroup, resCh chan *locTargetResp) {
cond := func(i int) bool { return !cmn.PageMarkerIncludesObject(smsg.PageMarker, c.buff[i].Name) }
j := sort.Search(len(c.buff), cond)
// discard entries which somehow don't fit the request. They're name is smaller than pageMarker,
// which means that user already has them from previous requests.
bf := c.buff[j:]
// We have everything in cache or target has nothing more.
// We didn't do init request to the target.
if (uint(len(bf)) >= size && size != 0) || c.done {
if size == 0 {
size = uint(len(bf))
} else {
size = uint(cmn.Min(len(bf), int(size)))
}
resCh <- &locTargetResp{list: &cmn.BucketList{Entries: bf[:size]}, status: http.StatusOK}
wg.Done()
return
}
go func() {
resCh <- c.fetchFromRemote(smsg, size)
wg.Done()
}()
}
// TODO: gaps, overlaps
func (c *locTarget) mergePage(page []*cmn.BucketEntry) {
if len(page) == 0 {
return
}
l := len(c.buff)
if l == 0 {
c.buff = page
return
}
// the page preceds items in the cache
if !cmn.PageMarkerIncludesObject(c.buff[0].Name, page[len(page)-1].Name) {
c.buff = append(page, c.buff...)
return
}
// the page follows the cache
if !cmn.PageMarkerIncludesObject(c.buff[l-1].Name, page[0].Name) {
c.buff = append(c.buff, page...)
return
}
if glog.FastV(4, glog.SmoduleAIS) {
glog.Infof("Page %q : %q discarded", page[0].Name, page[len(page)-1].Name)
}
}
// Has to be called with Lock!
// Fetches objects from target, appends them to buffer and returns required number of objects.
func (c *locTarget) fetchFromRemote(smsg cmn.SelectMsg, size uint) *locTargetResp {
p := c.parent.parent.p
bck := c.parent.bck
args := c.newListObjectsTaskMsg(smsg, bck, newTaskResultQuery(bck.Bck))
args.req.Method = http.MethodPost |
// Target prepare the final result. | random_line_split |
|
prxlistcache.go | .buff, targetEntry.buff...)
targetEntry.mtx.Unlock()
continue
}
// Seconds case: partially used page
cond := func(i int) bool { return !cmn.PageMarkerIncludesObject(smsg.PageMarker, targetEntry.buff[i].Name) }
idx := sort.Search(len(targetEntry.buff), cond)
entry.buff = append(entry.buff, targetEntry.buff[idx:]...)
targetEntry.mtx.Unlock()
}
return tce
}
func (c *listObjCache) allTargetsEntries(smsg cmn.SelectMsg, smap *cluster.Smap, bck *cluster.Bck) []*locTarget {
result := make([]*locTarget, 0, len(smap.Tmap))
// First, get the data from the cache that was not sent yet
partial := c.leftovers(smsg, bck)
for _, t := range smap.Tmap {
var (
targetLeftovers *locTarget
ok bool
)
if smsg.Passthrough {
// In passthrough mode we have to create "normal" but fake cache page.
reqEntry := newRequestCacheEntry(c, bck, &smsg)
entry := newTargetCacheEntry(reqEntry, t)
result = append(result, entry)
continue
}
if len(partial) != 0 {
targetLeftovers, ok = partial[t.ID()]
}
// If nothing is found for a target in the cache, initialize a new
// cache page. Without it, the new page leftovers can be lost.
if !ok || len(targetLeftovers.buff) == 0 {
targetEntry := c.targetEntry(t, smsg, bck)
result = append(result, targetEntry)
continue
}
// Order of pages in cache may be random. Sort them right away
less := func(i, j int) bool { return targetLeftovers.buff[i].Name < targetLeftovers.buff[j].Name }
sort.Slice(targetLeftovers.buff, less)
result = append(result, targetLeftovers)
}
return result
}
func (c *listObjCache) initResults(smap *cluster.Smap, smsg cmn.SelectMsg, bck *cluster.Bck, size uint, newUUID string) fetchResult {
entries := c.allTargetsEntries(smsg, smap, bck)
return c.initResultsFromEntries(entries, smsg, size, newUUID)
}
// initResultsFromEntries notifies targets to prepare next objects page.
// It returns information if all calls succeed, and if there were any errors.
func (c *listObjCache) initResultsFromEntries(entries []*locTarget, smsg cmn.SelectMsg, size uint, newUUID string) fetchResult {
ch := c.initAllTargets(entries, smsg, size, newUUID)
return gatherTargetListObjsResults(smsg.UUID, ch, 0, &smsg)
}
// fetchAll returns next `size` object names from each target. It include additional information
// if all calls to targets succeeded and if there were any errors. It cache has buffered object names
// it might return results without making any API calls.
func (c *listObjCache) fetchAll(entries []*locTarget, smsg cmn.SelectMsg, size uint) fetchResult {
wg := &sync.WaitGroup{}
wg.Add(len(entries))
resCh := make(chan *locTargetResp, len(entries))
for _, entry := range entries {
entry.fetch(smsg, size, wg, resCh)
}
wg.Wait()
close(resCh)
return gatherTargetListObjsResults(smsg.UUID, resCh, len(entries), &smsg)
}
// Discard all entries of given task which were included in marker `until`.
func (c *listObjCache) discard(smsg *cmn.SelectMsg, bck *cluster.Bck) {
id := smsg.ListObjectsCacheID(bck.Bck)
c.mtx.Lock()
delete(c.reqs, id)
c.mtx.Unlock()
}
func (c *listObjCache) getRequestEntry(cacheID string) (*locReq, bool) {
c.mtx.Lock()
req, ok := c.reqs[cacheID]
c.mtx.Unlock()
return req, ok
}
// Gathers init results for each target on `resultCh`
func (c *listObjCache) initAllTargets(entries []*locTarget, smsg cmn.SelectMsg, size uint, newUUID string) (resultCh chan *locTargetResp) {
resultCh = make(chan *locTargetResp, len(entries))
wg := &sync.WaitGroup{}
wg.Add(len(entries))
for _, targetEntry := range entries {
targetEntry.init(smsg, size, wg, resultCh, newUUID)
}
wg.Wait()
close(resultCh)
return
}
//////////////////////////
// locTarget //
/////////////////////////
func (c *locTarget) init(smsg cmn.SelectMsg, size uint, wg *sync.WaitGroup, resCh chan *locTargetResp, newUUID string) {
cacheSufficient := (uint(len(c.buff)) >= size && size != 0) || c.done
if !smsg.Passthrough && cacheSufficient {
// Everything that is requested is already in the cache, we don't have to do any API calls.
// Returning StatusOK as if we did a request.
resCh <- &locTargetResp{status: http.StatusOK, err: nil}
wg.Done()
return
}
// Make an actual call to the target.
go func() {
resCh <- c.initOnRemote(smsg, newUUID)
wg.Done()
}()
}
func (c *locTarget) initOnRemote(smsg cmn.SelectMsg, newUUID string) (result *locTargetResp) {
p := c.parent.parent.p
bck := c.parent.bck
_, q := p.initAsyncQuery(bck, &smsg, newUUID)
args := c.newListObjectsTaskMsg(smsg, bck, q) // Changes PageMarker to point to last element in buff.
status, err := c.renewTaskOnRemote(args)
return &locTargetResp{status: status, err: err}
}
// Returns next `size` objects or less if no more exists.
// If everything that is requested already is present in the cache, don't make any API calls.
func (c *locTarget) fetch(smsg cmn.SelectMsg, size uint, wg *sync.WaitGroup, resCh chan *locTargetResp) {
cond := func(i int) bool { return !cmn.PageMarkerIncludesObject(smsg.PageMarker, c.buff[i].Name) }
j := sort.Search(len(c.buff), cond)
// discard entries which somehow don't fit the request. They're name is smaller than pageMarker,
// which means that user already has them from previous requests.
bf := c.buff[j:]
// We have everything in cache or target has nothing more.
// We didn't do init request to the target.
if (uint(len(bf)) >= size && size != 0) || c.done {
if size == 0 {
size = uint(len(bf))
} else {
size = uint(cmn.Min(len(bf), int(size)))
}
resCh <- &locTargetResp{list: &cmn.BucketList{Entries: bf[:size]}, status: http.StatusOK}
wg.Done()
return
}
go func() {
resCh <- c.fetchFromRemote(smsg, size)
wg.Done()
}()
}
// TODO: gaps, overlaps
func (c *locTarget) mergePage(page []*cmn.BucketEntry) {
if len(page) == 0 {
return
}
l := len(c.buff)
if l == 0 {
c.buff = page
return
}
// the page preceds items in the cache
if !cmn.PageMarkerIncludesObject(c.buff[0].Name, page[len(page)-1].Name) {
c.buff = append(page, c.buff...)
return
}
// the page follows the cache
if !cmn.PageMarkerIncludesObject(c.buff[l-1].Name, page[0].Name) {
c.buff = append(c.buff, page...)
return
}
if glog.FastV(4, glog.SmoduleAIS) {
glog.Infof("Page %q : %q discarded", page[0].Name, page[len(page)-1].Name)
}
}
// Has to be called with Lock!
// Fetches objects from target, appends them to buffer and returns required number of objects.
func (c *locTarget) fetchFromRemote(smsg cmn.SelectMsg, size uint) *locTargetResp {
p := c.parent.parent.p
bck := c.parent.bck
args := c.newListObjectsTaskMsg(smsg, bck, newTaskResultQuery(bck.Bck))
args.req.Method = http.MethodPost
// Target prepare the final result.
res := p.call(*args)
preallocSize := cmn.DefaultListPageSize
if smsg.PageSize != 0 {
preallocSize = smsg.PageSize
}
if res.err != nil {
return &locTargetResp{list: nil, status: res.status, err: res.err}
}
if len(res.outjson) == 0 {
s := cmn.Min(int(size), len(c.buff))
if s == 0 | {
s = len(c.buff)
} | conditional_block |
|
lib.rs | {
/// The note column.
///
/// - 0: Nothing.
/// - 1 to 127 inclusive: A normal note.
/// - 128+: See the `NOTECMD` constants.
pub note: c_uchar,
/// The velocity column (note velocity).
///
/// - 0: Empty (default).
/// - 1 to 129 inclusive: The specified velocity + 1.
pub vel: c_uchar,
/// The module column (module to affect).
///
/// - 0: Empty (none).
/// - 1 to 255 inclusive: The specified module + 1.
pub module: c_uchar,
/// Padding.
pub nothing: c_uchar,
/// The value of the controller/effect column.
///
/// Interpreted as a hexadecimal number, the first two digits are the
/// controller of the selected module to affect, and the last two digits
/// are the number of an effect. Set a pair of digits to zero to
/// ignore that part.
pub ctl: c_ushort,
/// The value of the controller/effect parameter column.
pub ctl_val: c_ushort,
}
/// Supresses debug output from the SunVox library.
pub const SV_INIT_FLAG_NO_DEBUG_OUTPUT: c_uint = 1 << 0;
/// Interaction with sound card is on the user side.
///
/// See `sv_audio_callback()`.
pub const SV_INIT_FLAG_USER_AUDIO_CALLBACK: c_uint = 1 << 1;
/// Audio is signed 16-bit (`c_short`).
pub const SV_INIT_FLAG_AUDIO_INT16: c_uint = 1 << 2;
/// Audio is float (`c_float`).
pub const SV_INIT_FLAG_AUDIO_FLOAT32: c_uint = 1 << 3;
/// Audio callback and song modification functions are in a single thread.
pub const SV_INIT_FLAG_ONE_THREAD: c_uint = 1 << 4;
pub const SV_MODULE_FLAG_EXISTS: c_int = 1;
pub const SV_MODULE_FLAG_EFFECT: c_int = 2;
pub const SV_MODULE_INPUTS_OFF: c_int = 16;
pub const SV_MODULE_INPUTS_MASK: c_int = 255 << SV_MODULE_INPUTS_OFF;
pub const SV_MODULE_OUTPUTS_OFF: c_int = 16 + 8;
pub const SV_MODULE_OUTPUTS_MASK: c_int = 255 << SV_MODULE_OUTPUTS_OFF;
pub const SV_STYPE_INT16: c_int = 0;
pub const SV_STYPE_INT32: c_int = 1;
pub const SV_STYPE_FLOAT32: c_int = 2;
pub const SV_STYPE_FLOAT64: c_int = 3;
#[link(name = "sunvox")]
extern "C" {
/// Gets the next piece of SunVox audio.
///
/// With `sv_audio_callback()` you can ignore the built-in SunVox sound
/// output mechanism and use some other sound system. Set the
/// `SV_INIT_FLAG_USER_AUDIO_CALLBACK` flag when calling `sv_init()` if
/// you want to use this function.
///
/// # Parameters
///
/// - buf: Destination buffer. If `SV_INIT_FLAG_AUDIO_INT16` was passed to
/// `sv_init()`, this is a buffer of `c_short`s. If `SV_INIT_FLAG_AUDIO_FLOAT32`
/// was passed, this is a buffer of `c_float`s. Stereo data will be interleaved
/// in this buffer: LRLR... ; where the LR is one frame (Left+Right channels).
/// - frames: Number of frames in destination buffer.
/// - latency: Audio latency (in frames).
/// - out_time: Output time (in ticks).
///
/// The `out_time` parameter is elaborated on a little bit in this thread:
/// http://www.warmplace.ru/forum/viewtopic.php?f=12&t=4152
///
/// For normal use, pass the value of `sv_get_ticks()`, as detailed in that
/// thread.
pub fn sv_audio_callback(buf: *mut c_void,
frames: c_int,
latency: c_int,
out_time: c_uint)
-> c_int;
/// Opens a slot.
///
/// A slot is like an instance of the SunVox engine. Each slot can have a
/// single project loaded at a time. The library supports up to four slots,
/// 0 to 3 inclusive. This call appears to hang if called with a number
/// outside this range.
///
/// Returns 0 on success, -1 on failure. Failure conditions include the
/// slot already being open.
///
/// I say "like" an instance of the engine because I think all slots share
/// the same tick counter, which you can get by calling `sv_get_ticks()`.
pub fn sv_open_slot(slot: c_int) -> c_int;
/// Closes a slot. See `sv_open_slot()` for more details.
pub fn sv_close_slot(slot: c_int) -> c_int;
/// Locks a slot.
///
/// There are a few functions that need to be called between a
/// `sv_lock_slot()`/`sv_unlock_slot()` pair. These are marked with
/// "USE LOCK/UNLOCK!".
pub fn sv_lock_slot(slot: c_int) -> c_int;
/// Unlocks a slot. See `sv_lock_slot()` for more details.
pub fn sv_unlock_slot(slot: c_int) -> c_int;
/// Initializes the library.
///
/// The `flags` parameter takes either zero (for default options), or a
/// number of `SV_INIT_FLAG_xxx` constants ORed together.
pub fn sv_init(dev: *const c_char, freq: c_int, channels: c_int, flags: c_uint) -> c_int;
/// Deinitializes the library.
pub fn sv_deinit() -> c_int;
/// Gets the internal sample type of the SunVox engine.
///
/// Returns one of the `SV_STYPE_xxx` constants.
///
/// Use it to get the scope buffer type from `get_module_scope()` function.
pub fn sv_get_sample_type() -> c_int;
/// Loads a SunVox project file into the specified slot.
pub fn sv_load(slot: c_int, name: *const c_char) -> c_int;
/// Loads a SunVox project from file data in memory.
pub fn sv_load_from_memory(slot: c_int, data: *mut c_void, data_size: c_uint) -> c_int;
/// Starts playing the project from the current play cursor position.
pub fn sv_play(slot: c_int) -> c_int;
/// Starts playing the project from the beginning.
pub fn sv_play_from_beginning(slot: c_int) -> c_int;
/// Stops playing the project. The play cursor stays where it is.
pub fn sv_stop(slot: c_int) -> c_int;
/// Enables or disables autostop.
///
/// - 0: Disable autostop.
/// - 1: Enable autostop.
///
/// When disabled, the project plays in a loop.
pub fn sv_set_autostop(slot: c_int, autostop: c_int) -> c_int;
/// Gets whether the project is stopped (ie. not playing).
///
/// Returns 0 if it is playing, 1 if it is stopped.
pub fn sv_end_of_song(slot: c_int) -> c_int;
/// Rewinds the project to the beginning.
pub fn sv_rewind(slot: c_int, line_num: c_int) -> c_int;
/// Sets the volume of the project.
pub fn sv_volume(slot: c_int, vol: c_int) -> c_int;
/// Causes an event to occur as though it had been played in a pattern.
///
/// `track_num` is in the range 0 to 15 inclusive, and refers to the track
/// number in a special hidden pattern.
pub fn sv_send_event(slot: c_int,
track_num: c_int,
note: c_int,
vel: c_int,
module: c_int,
ctl: c_int,
ctl_val: c_int)
-> c_int;
/// Gets the line number of the play cursor.
pub fn sv_get_current_line(slot: c_int) -> c_int;
/// Gets the line number of the play in fixed point format: 27.5
///
/// TODO: Figure out exactly what this means.
/// I'm guessing it means 27 bits for the integer part and 5 bits for the
/// fractional part.
pub fn sv_get_current_line2(slot: c_int) -> c_int;
/// Gets the current signal level/amplitude for a given audio channel
/// in the range 0 to 255 inclusive.
pub fn sv_get_current_signal_level(slot: c_int, channel: c_int) -> c_int;
/// Gets the name of the currently loaded project.
///
/// Returns NULL if no project is loaded.
pub fn sv_get_song_name(slot: c_int) -> *const c_char;
/// Gets the Beats Per Minute of the currently loaded project.
///
/// Returns zero if no project is loaded.
pub fn sv_get_song_bpm(slot: c_int) | sunvox_note | identifier_name |
|
lib.rs |
/// A single note cell in a pattern.
#[repr(C)]
#[derive(Clone, Debug)]
pub struct sunvox_note {
/// The note column.
///
/// - 0: Nothing.
/// - 1 to 127 inclusive: A normal note.
/// - 128+: See the `NOTECMD` constants.
pub note: c_uchar,
/// The velocity column (note velocity).
///
/// - 0: Empty (default).
/// - 1 to 129 inclusive: The specified velocity + 1.
pub vel: c_uchar,
/// The module column (module to affect).
///
/// - 0: Empty (none).
/// - 1 to 255 inclusive: The specified module + 1.
pub module: c_uchar,
/// Padding.
pub nothing: c_uchar,
/// The value of the controller/effect column.
///
/// Interpreted as a hexadecimal number, the first two digits are the
/// controller of the selected module to affect, and the last two digits
/// are the number of an effect. Set a pair of digits to zero to
/// ignore that part.
pub ctl: c_ushort,
/// The value of the controller/effect parameter column.
pub ctl_val: c_ushort,
}
/// Supresses debug output from the SunVox library.
pub const SV_INIT_FLAG_NO_DEBUG_OUTPUT: c_uint = 1 << 0;
/// Interaction with sound card is on the user side.
///
/// See `sv_audio_callback()`.
pub const SV_INIT_FLAG_USER_AUDIO_CALLBACK: c_uint = 1 << 1;
/// Audio is signed 16-bit (`c_short`).
pub const SV_INIT_FLAG_AUDIO_INT16: c_uint = 1 << 2;
/// Audio is float (`c_float`).
pub const SV_INIT_FLAG_AUDIO_FLOAT32: c_uint = 1 << 3;
/// Audio callback and song modification functions are in a single thread.
pub const SV_INIT_FLAG_ONE_THREAD: c_uint = 1 << 4;
pub const SV_MODULE_FLAG_EXISTS: c_int = 1;
pub const SV_MODULE_FLAG_EFFECT: c_int = 2;
pub const SV_MODULE_INPUTS_OFF: c_int = 16;
pub const SV_MODULE_INPUTS_MASK: c_int = 255 << SV_MODULE_INPUTS_OFF;
pub const SV_MODULE_OUTPUTS_OFF: c_int = 16 + 8;
pub const SV_MODULE_OUTPUTS_MASK: c_int = 255 << SV_MODULE_OUTPUTS_OFF;
pub const SV_STYPE_INT16: c_int = 0;
pub const SV_STYPE_INT32: c_int = 1;
pub const SV_STYPE_FLOAT32: c_int = 2;
pub const SV_STYPE_FLOAT64: c_int = 3;
#[link(name = "sunvox")]
extern "C" {
/// Gets the next piece of SunVox audio.
///
/// With `sv_audio_callback()` you can ignore the built-in SunVox sound
/// output mechanism and use some other sound system. Set the
/// `SV_INIT_FLAG_USER_AUDIO_CALLBACK` flag when calling `sv_init()` if
/// you want to use this function.
///
/// # Parameters
///
/// - buf: Destination buffer. If `SV_INIT_FLAG_AUDIO_INT16` was passed to
/// `sv_init()`, this is a buffer of `c_short`s. If `SV_INIT_FLAG_AUDIO_FLOAT32`
/// was passed, this is a buffer of `c_float`s. Stereo data will be interleaved
/// in this buffer: LRLR... ; where the LR is one frame (Left+Right channels).
/// - frames: Number of frames in destination buffer.
/// - latency: Audio latency (in frames).
/// - out_time: Output time (in ticks).
///
/// The `out_time` parameter is elaborated on a little bit in this thread:
/// http://www.warmplace.ru/forum/viewtopic.php?f=12&t=4152
///
/// For normal use, pass the value of `sv_get_ticks()`, as detailed in that
/// thread.
pub fn sv_audio_callback(buf: *mut c_void,
frames: c_int,
latency: c_int,
out_time: c_uint)
-> c_int;
/// Opens a slot.
///
/// A slot is like an instance of the SunVox engine. Each slot can have a
/// single project loaded at a time. The library supports up to four slots,
/// 0 to 3 inclusive. This call appears to hang if called with a number
/// outside this range.
///
/// Returns 0 on success, -1 on failure. Failure conditions include the
/// slot already being open.
///
/// I say "like" an instance of the engine because I think all slots share
/// the same tick counter, which you can get by calling `sv_get_ticks()`.
pub fn sv_open_slot(slot: c_int) -> c_int;
/// Closes a slot. See `sv_open_slot()` for more details.
pub fn sv_close_slot(slot: c_int) -> c_int;
/// Locks a slot.
///
/// There are a few functions that need to be called between a
/// `sv_lock_slot()`/`sv_unlock_slot()` pair. These are marked with
/// "USE LOCK/UNLOCK!".
pub fn sv_lock_slot(slot: c_int) -> c_int;
/// Unlocks a slot. See `sv_lock_slot()` for more details.
pub fn sv_unlock_slot(slot: c_int) -> c_int;
/// Initializes the library.
///
/// The `flags` parameter takes either zero (for default options), or a
/// number of `SV_INIT_FLAG_xxx` constants ORed together.
pub fn sv_init(dev: *const c_char, freq: c_int, channels: c_int, flags: c_uint) -> c_int;
/// Deinitializes the library.
pub fn sv_deinit() -> c_int;
/// Gets the internal sample type of the SunVox engine.
///
/// Returns one of the `SV_STYPE_xxx` constants.
///
/// Use it to get the scope buffer type from `get_module_scope()` function.
pub fn sv_get_sample_type() -> c_int;
/// Loads a SunVox project file into the specified slot.
pub fn sv_load(slot: c_int, name: *const c_char) -> c_int;
/// Loads a SunVox project from file data in memory.
pub fn sv_load_from_memory(slot: c_int, data: *mut c_void, data_size: c_uint) -> c_int;
/// Starts playing the project from the current play cursor position.
pub fn sv_play(slot: c_int) -> c_int;
/// Starts playing the project from the beginning.
pub fn sv_play_from_beginning(slot: c_int) -> c_int;
/// Stops playing the project. The play cursor stays where it is.
pub fn sv_stop(slot: c_int) -> c_int;
/// Enables or disables autostop.
///
/// - 0: Disable autostop.
/// - 1: Enable autostop.
///
/// When disabled, the project plays in a loop.
pub fn sv_set_autostop(slot: c_int, autostop: c_int) -> c_int;
/// Gets whether the project is stopped (ie. not playing).
///
/// Returns 0 if it is playing, 1 if it is stopped.
pub fn sv_end_of_song(slot: c_int) -> c_int;
/// Rewinds the project to the beginning.
pub fn sv_rewind(slot: c_int, line_num: c_int) -> c_int;
/// Sets the volume of the project.
pub fn sv_volume(slot: c_int, vol: c_int) -> c_int;
/// Causes an event to occur as though it had been played in a pattern.
///
/// `track_num` is in the range 0 to 15 inclusive, and refers to the track
/// number in a special hidden pattern.
pub fn sv_send_event(slot: c_int,
track_num: c_int,
note: c_int,
vel: c_int,
module: c_int,
ctl: c_int,
ctl_val: c_int)
-> c_int;
/// Gets the line number of the play cursor.
pub fn sv_get_current_line(slot: c_int) -> c_int;
/// Gets the line number of the play in fixed point format: 27.5
///
/// TODO: Figure out exactly what this means.
/// I'm guessing it means 27 bits for the integer part and 5 bits for the
/// fractional part.
pub fn sv_get_current_line2(slot: c_int) -> c_int;
/// Gets the current signal level/amplitude for a given audio channel
/// in the range 0 to 255 inclusive.
pub fn sv_get_current_signal_level(slot: c_int, channel: c_int) -> c_int;
/// Gets the name of the currently loaded project.
///
/// Returns NULL if no project is loaded.
pub fn sv_get_song_name(slot: c_int) -> *const c_char;
/// Gets the Beats Per Minute of the currently loaded project.
///
| random_line_split |
||
beacon.py | (self, server, type_, name):
# method is required, but can be ignored if you don't care about updates. We don't.
if self.logger is not None:
# ex. WARNING:pyTivo.beacon:ZCListener.update_service name='Movies._tivo-videos._tcp.local.' type_='_tivo-videos._tcp.local.'
# WARNING:pyTivo.beacon:ZCListener.update_service name='LivingRoomVox._tivo-videos._tcp.local.' type_='_tivo-videos._tcp.local.'
self.logger.debug(f'ZCListener.update_service {name=} {type_=}')
class ZCBroadcast:
def __init__(self, logger):
""" Announce our shares via Zeroconf. """
self.share_names = []
self.share_info = []
self.logger = logger
self.rz = zeroconf.Zeroconf()
self.renamed = {}
old_titles = self.scan()
address = socket.inet_aton(config.get_ip())
port = int(config.getPort())
logger.info('Announcing pytivo shares ({}:{})...'.format(config.get_ip(), port))
for section, settings in config.getShares():
try:
plugin = GetPlugin(settings['type'])
ct = plugin.CONTENT_TYPE
# if the plugin provides a test for validity use it otherwise assume valid
if hasattr(plugin, 'is_valid') and not plugin.is_valid(section, settings):
logger.warning('share "%s" is invalid. It will be ignored (maybe check that path exists)', section)
continue
except Exception as e:
logger.error('ZCBroadcast.__init__: raised %s: %s', e.__class__.__name__, e)
continue
if ct.startswith('x-container/'):
if 'video' in ct:
platform = PLATFORM_VIDEO
else:
platform = PLATFORM_MAIN
logger.info('Registering: %s' % section)
self.share_names.append(section)
desc = {b'path': bytes(SHARE_TEMPLATE % quote(section), 'utf-8'),
b'platform': bytes(platform, 'utf-8'),
b'protocol': b'http',
b'tsn': bytes('{%s}' % uuid.uuid4(), 'utf-8')}
tt = ct.split('/')[1]
title = section
count = 1
while title in old_titles:
# debugging info while I try to figure out what this loop is for
logger.info(" title b4: {}".format(title))
count += 1
title = '%s [%d]' % (section, count)
self.renamed[section] = title
# more debugging info
logger.info(" title after: {}\n section: {}".format(title, section))
info = zeroconf.ServiceInfo('_%s._tcp.local.' % tt,
'%s._%s._tcp.local.' % (title, tt),
port=port, addresses=[address], properties=desc)
log_serviceinfo(self.logger, info)
self.rz.register_service(info)
self.share_info.append(info)
def scan(self):
""" Look for TiVos using Zeroconf. """
VIDS = '_tivo-videos._tcp.local.'
names = []
self.logger.info('Scanning for TiVos...\n')
# Get the names of servers offering TiVo videos
browser = zeroconf.ServiceBrowser(self.rz, VIDS, None, ZCListener(names, logger=self.logger))
# Give them a second (or more if no one has responded in the 1st second) to respond
time.sleep(1)
max_sec_to_wait = 10
sec_waited = 0
while not names and sec_waited < max_sec_to_wait:
sec_waited += 1
time.sleep(1)
# Any results?
if names:
config.tivos_found = True
# Now get the addresses -- this is the slow part
for name in names:
info = self.rz.get_service_info(VIDS, name + '.' + VIDS)
log_serviceinfo(self.logger, info)
if info:
# zeroconf v2.7 removed ServiceInfo address member says use addresses instead.
# Some debug logging to see if there is always at least the currently assumed 1 address (and maybe more?)
self.logger.debug(f'Found zeroconf.ServiceInfo with {len(info.addresses)} IP addresses\n')
tsn = info.properties.get(b'TSN')
if config.get_togo('all'):
tsn = info.properties.get(b'tsn', tsn)
if tsn:
if isinstance(tsn, bytes):
tsn = tsn.decode('utf-8')
address = socket.inet_ntoa(info.addresses[0])
port = info.port
config.tivos[tsn] = {'name': name, 'address': address,
'port': port}
# info.properties has bytes keys and values, but we'd rather
# deal with str keys and values, so convert them before adding
# them to our tivos dict.
config.tivos[tsn].update(bytes2str(info.properties))
# Debugging information on what services have been found:
# try:
# all_services = zeroconf.ZeroconfServiceTypes.find(self.rz)
# self.logger.info("All services found")
# for s in all_services:
# self.logger.info(" {}".format(s))
# except Exception as e:
# self.logger.error(e)
return names
def shutdown(self):
self.logger.info('Unregistering: %s' % ', '.join(self.share_names))
for info in self.share_info:
self.rz.unregister_service(info)
self.rz.close()
class Beacon:
def __init__(self):
self.UDPSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.UDPSock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.services = []
self.timer = None
self.platform = PLATFORM_VIDEO
for section, settings in config.getShares():
try:
ct = GetPlugin(settings['type']).CONTENT_TYPE
except:
continue
if ct in ('x-container/tivo-music', 'x-container/tivo-photos'):
self.platform = PLATFORM_MAIN
break
if config.get_zc():
logger = logging.getLogger('pyTivo.beacon')
try:
self.bd = ZCBroadcast(logger)
except Exception as e:
logger.debug('Beacon.__init__: raised %s: %s', e.__class__.__name__, e)
logger.error('Zeroconf failure')
self.bd = None
else:
self.bd = None
def add_service(self, service):
self.services.append(service)
self.send_beacon()
def format_services(self):
return ';'.join(self.services)
def format_beacon(self, conntype, services=True):
beacon = ['tivoconnect=1',
'method=%s' % conntype,
'identity={%s}' % config.getGUID(),
'machine=%s' % socket.gethostname(),
'platform=%s' % self.platform]
if services:
beacon.append('services=' + self.format_services())
else:
beacon.append('services=TiVoMediaServer:0/http')
return '\n'.join(beacon) + '\n'
def send_beacon(self):
beacon_ips = config.getBeaconAddresses()
beacon = self.format_beacon('broadcast')
for beacon_ip in beacon_ips.split():
if beacon_ip != 'listen':
try:
packet = bytes(beacon, "utf-8")
while packet:
result = self.UDPSock.sendto(packet, (beacon_ip, 2190))
if result < 0:
break
packet = packet[result:]
except Exception as e:
print(e)
def start(self):
self.send_beacon()
self.timer = Timer(60, self.start)
self.timer.start()
def stop(self):
self.timer.cancel()
if self.bd:
self.bd.shutdown()
@staticmethod
def recv_bytes(sock, length):
block = ''
while len(block) < length:
add = sock.recv(length - len(block))
if not add:
break
block += add
return block
@staticmethod
def recv_packet(sock):
length = struct.unpack('!I', Beacon.recv_bytes(sock, 4))[0]
return Beacon.recv_bytes(sock, length)
@staticmethod
def send_packet(sock, packet):
sock.sendall(struct.pack('!I', len(packet)) + packet)
def listen(self):
""" For the direct-connect, TCP-style beacon """
import _thread
def server():
TCPSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
TCPSock.bind(('', 2190))
TCPSock.listen(5)
while True:
# Wait for a connection
client, address = TCPSock.accept()
# Accept (and discard) the client's beacon
self.recv_packet(client)
# Send ours
self.send_packet(client, self.format_beacon('connected'))
client.close()
_thread.start_new_thread(server, ())
def get_name(self, address): | """ Exchange beacons, and extract the machine name. """
our_beacon = self.format_beacon('connected', False) | random_line_split |
|
beacon.py | n {address}:{port} {name}\n"
log_fmt = log_hdr
if debugging:
log_level = logging.DEBUG
if info.server != info.name:
log_info['server'] = info.server
log_fmt += " server: {server}\n"
for (k, v) in info.properties.items():
li_k = "prop_" + bytes2str(k)
log_info[li_k] = v
log_fmt += " {k}: {{{li_k}}}\n".format(k=k, li_k=li_k)
logger.log(log_level, log_fmt.format(**log_info))
except:
logger.exception("exception in log_tivo_serviceinfo")
class ZCListener:
# pylint: disable=redefined-builtin
def __init__(self, names, logger=None):
self.names = names
self.logger = logger
def remove_service(self, server, type_, name):
self.names.remove(name.replace('.' + type_, ''))
def add_service(self, server, type_, name):
self.names.append(name.replace('.' + type_, ''))
def update_service(self, server, type_, name):
# method is required, but can be ignored if you don't care about updates. We don't.
if self.logger is not None:
# ex. WARNING:pyTivo.beacon:ZCListener.update_service name='Movies._tivo-videos._tcp.local.' type_='_tivo-videos._tcp.local.'
# WARNING:pyTivo.beacon:ZCListener.update_service name='LivingRoomVox._tivo-videos._tcp.local.' type_='_tivo-videos._tcp.local.'
self.logger.debug(f'ZCListener.update_service {name=} {type_=}')
class ZCBroadcast:
def __init__(self, logger):
""" Announce our shares via Zeroconf. """
self.share_names = []
self.share_info = []
self.logger = logger
self.rz = zeroconf.Zeroconf()
self.renamed = {}
old_titles = self.scan()
address = socket.inet_aton(config.get_ip())
port = int(config.getPort())
logger.info('Announcing pytivo shares ({}:{})...'.format(config.get_ip(), port))
for section, settings in config.getShares():
try:
plugin = GetPlugin(settings['type'])
ct = plugin.CONTENT_TYPE
# if the plugin provides a test for validity use it otherwise assume valid
if hasattr(plugin, 'is_valid') and not plugin.is_valid(section, settings):
logger.warning('share "%s" is invalid. It will be ignored (maybe check that path exists)', section)
continue
except Exception as e:
logger.error('ZCBroadcast.__init__: raised %s: %s', e.__class__.__name__, e)
continue
if ct.startswith('x-container/'):
if 'video' in ct:
platform = PLATFORM_VIDEO
else:
platform = PLATFORM_MAIN
logger.info('Registering: %s' % section)
self.share_names.append(section)
desc = {b'path': bytes(SHARE_TEMPLATE % quote(section), 'utf-8'),
b'platform': bytes(platform, 'utf-8'),
b'protocol': b'http',
b'tsn': bytes('{%s}' % uuid.uuid4(), 'utf-8')}
tt = ct.split('/')[1]
title = section
count = 1
while title in old_titles:
# debugging info while I try to figure out what this loop is for
logger.info(" title b4: {}".format(title))
count += 1
title = '%s [%d]' % (section, count)
self.renamed[section] = title
# more debugging info
logger.info(" title after: {}\n section: {}".format(title, section))
info = zeroconf.ServiceInfo('_%s._tcp.local.' % tt,
'%s._%s._tcp.local.' % (title, tt),
port=port, addresses=[address], properties=desc)
log_serviceinfo(self.logger, info)
self.rz.register_service(info)
self.share_info.append(info)
def scan(self):
""" Look for TiVos using Zeroconf. """
VIDS = '_tivo-videos._tcp.local.'
names = []
self.logger.info('Scanning for TiVos...\n')
# Get the names of servers offering TiVo videos
browser = zeroconf.ServiceBrowser(self.rz, VIDS, None, ZCListener(names, logger=self.logger))
# Give them a second (or more if no one has responded in the 1st second) to respond
time.sleep(1)
max_sec_to_wait = 10
sec_waited = 0
while not names and sec_waited < max_sec_to_wait:
sec_waited += 1
time.sleep(1)
# Any results?
if names:
config.tivos_found = True
# Now get the addresses -- this is the slow part
for name in names:
info = self.rz.get_service_info(VIDS, name + '.' + VIDS)
log_serviceinfo(self.logger, info)
if info:
# zeroconf v2.7 removed ServiceInfo address member says use addresses instead.
# Some debug logging to see if there is always at least the currently assumed 1 address (and maybe more?)
self.logger.debug(f'Found zeroconf.ServiceInfo with {len(info.addresses)} IP addresses\n')
tsn = info.properties.get(b'TSN')
if config.get_togo('all'):
tsn = info.properties.get(b'tsn', tsn)
if tsn:
if isinstance(tsn, bytes):
tsn = tsn.decode('utf-8')
address = socket.inet_ntoa(info.addresses[0])
port = info.port
config.tivos[tsn] = {'name': name, 'address': address,
'port': port}
# info.properties has bytes keys and values, but we'd rather
# deal with str keys and values, so convert them before adding
# them to our tivos dict.
config.tivos[tsn].update(bytes2str(info.properties))
# Debugging information on what services have been found:
# try:
# all_services = zeroconf.ZeroconfServiceTypes.find(self.rz)
# self.logger.info("All services found")
# for s in all_services:
# self.logger.info(" {}".format(s))
# except Exception as e:
# self.logger.error(e)
return names
def shutdown(self):
self.logger.info('Unregistering: %s' % ', '.join(self.share_names))
for info in self.share_info:
self.rz.unregister_service(info)
self.rz.close()
class Beacon:
def __init__(self):
self.UDPSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.UDPSock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.services = []
self.timer = None
self.platform = PLATFORM_VIDEO
for section, settings in config.getShares():
try:
ct = GetPlugin(settings['type']).CONTENT_TYPE
except:
continue
if ct in ('x-container/tivo-music', 'x-container/tivo-photos'):
self.platform = PLATFORM_MAIN
break
if config.get_zc():
logger = logging.getLogger('pyTivo.beacon')
try:
self.bd = ZCBroadcast(logger)
except Exception as e:
logger.debug('Beacon.__init__: raised %s: %s', e.__class__.__name__, e)
logger.error('Zeroconf failure')
self.bd = None
else:
self.bd = None
def add_service(self, service):
self.services.append(service)
self.send_beacon()
def format_services(self):
return ';'.join(self.services)
def format_beacon(self, conntype, services=True):
beacon = ['tivoconnect=1',
'method=%s' % conntype,
'identity={%s}' % config.getGUID(),
'machine=%s' % socket.gethostname(),
'platform=%s' % self.platform]
if services:
beacon.append('services=' + self.format_services())
else:
beacon.append('services=TiVoMediaServer:0/http')
return '\n'.join(beacon) + '\n'
def send_beacon(self):
beacon_ips = config.getBeaconAddresses()
beacon = self.format_beacon('broadcast')
for beacon_ip in beacon_ips.split():
if beacon_ip != 'listen':
try:
packet = bytes(beacon, "utf-8")
while packet:
result = self.UDPSock.sendto(packet, (beacon_ip, 2190))
if result < 0:
break
packet = packet[result:]
except Exception as e:
print(e)
def start(self):
self.send_beacon()
self.timer = Timer(60, self.start)
self.timer.start()
def stop(self):
self.timer.cancel()
if self.bd:
self.bd.shutdown()
@staticmethod
def recv_bytes(sock, length):
block = ''
while len(block) < length:
| add = sock.recv(length - len(block))
if not add:
break
block += add | conditional_block |
|
beacon.py | / log level INFO, if the log level is DEBUG
more the basic info plus more (all properties) is written
w/ log level DEBUG.
"""
try:
debugging = logger.isEnabledFor(logging.DEBUG)
log_level = logging.INFO
log_info = {'name': info.name,
'address': socket.inet_ntoa(info.addresses[0]),
'port': info.port}
log_hdr = "\n {address}:{port} {name}\n"
log_fmt = log_hdr
if debugging:
log_level = logging.DEBUG
if info.server != info.name:
log_info['server'] = info.server
log_fmt += " server: {server}\n"
for (k, v) in info.properties.items():
li_k = "prop_" + bytes2str(k)
log_info[li_k] = v
log_fmt += " {k}: {{{li_k}}}\n".format(k=k, li_k=li_k)
logger.log(log_level, log_fmt.format(**log_info))
except:
logger.exception("exception in log_tivo_serviceinfo")
class ZCListener:
# pylint: disable=redefined-builtin
def __init__(self, names, logger=None):
self.names = names
self.logger = logger
def remove_service(self, server, type_, name):
self.names.remove(name.replace('.' + type_, ''))
def add_service(self, server, type_, name):
self.names.append(name.replace('.' + type_, ''))
def update_service(self, server, type_, name):
# method is required, but can be ignored if you don't care about updates. We don't.
if self.logger is not None:
# ex. WARNING:pyTivo.beacon:ZCListener.update_service name='Movies._tivo-videos._tcp.local.' type_='_tivo-videos._tcp.local.'
# WARNING:pyTivo.beacon:ZCListener.update_service name='LivingRoomVox._tivo-videos._tcp.local.' type_='_tivo-videos._tcp.local.'
self.logger.debug(f'ZCListener.update_service {name=} {type_=}')
class ZCBroadcast:
def __init__(self, logger):
""" Announce our shares via Zeroconf. """
self.share_names = []
self.share_info = []
self.logger = logger
self.rz = zeroconf.Zeroconf()
self.renamed = {}
old_titles = self.scan()
address = socket.inet_aton(config.get_ip())
port = int(config.getPort())
logger.info('Announcing pytivo shares ({}:{})...'.format(config.get_ip(), port))
for section, settings in config.getShares():
try:
plugin = GetPlugin(settings['type'])
ct = plugin.CONTENT_TYPE
# if the plugin provides a test for validity use it otherwise assume valid
if hasattr(plugin, 'is_valid') and not plugin.is_valid(section, settings):
logger.warning('share "%s" is invalid. It will be ignored (maybe check that path exists)', section)
continue
except Exception as e:
logger.error('ZCBroadcast.__init__: raised %s: %s', e.__class__.__name__, e)
continue
if ct.startswith('x-container/'):
if 'video' in ct:
platform = PLATFORM_VIDEO
else:
platform = PLATFORM_MAIN
logger.info('Registering: %s' % section)
self.share_names.append(section)
desc = {b'path': bytes(SHARE_TEMPLATE % quote(section), 'utf-8'),
b'platform': bytes(platform, 'utf-8'),
b'protocol': b'http',
b'tsn': bytes('{%s}' % uuid.uuid4(), 'utf-8')}
tt = ct.split('/')[1]
title = section
count = 1
while title in old_titles:
# debugging info while I try to figure out what this loop is for
logger.info(" title b4: {}".format(title))
count += 1
title = '%s [%d]' % (section, count)
self.renamed[section] = title
# more debugging info
logger.info(" title after: {}\n section: {}".format(title, section))
info = zeroconf.ServiceInfo('_%s._tcp.local.' % tt,
'%s._%s._tcp.local.' % (title, tt),
port=port, addresses=[address], properties=desc)
log_serviceinfo(self.logger, info)
self.rz.register_service(info)
self.share_info.append(info)
def scan(self):
""" Look for TiVos using Zeroconf. """
VIDS = '_tivo-videos._tcp.local.'
names = []
self.logger.info('Scanning for TiVos...\n')
# Get the names of servers offering TiVo videos
browser = zeroconf.ServiceBrowser(self.rz, VIDS, None, ZCListener(names, logger=self.logger))
# Give them a second (or more if no one has responded in the 1st second) to respond
time.sleep(1)
max_sec_to_wait = 10
sec_waited = 0
while not names and sec_waited < max_sec_to_wait:
sec_waited += 1
time.sleep(1)
# Any results?
if names:
config.tivos_found = True
# Now get the addresses -- this is the slow part
for name in names:
info = self.rz.get_service_info(VIDS, name + '.' + VIDS)
log_serviceinfo(self.logger, info)
if info:
# zeroconf v2.7 removed ServiceInfo address member says use addresses instead.
# Some debug logging to see if there is always at least the currently assumed 1 address (and maybe more?)
self.logger.debug(f'Found zeroconf.ServiceInfo with {len(info.addresses)} IP addresses\n')
tsn = info.properties.get(b'TSN')
if config.get_togo('all'):
tsn = info.properties.get(b'tsn', tsn)
if tsn:
if isinstance(tsn, bytes):
tsn = tsn.decode('utf-8')
address = socket.inet_ntoa(info.addresses[0])
port = info.port
config.tivos[tsn] = {'name': name, 'address': address,
'port': port}
# info.properties has bytes keys and values, but we'd rather
# deal with str keys and values, so convert them before adding
# them to our tivos dict.
config.tivos[tsn].update(bytes2str(info.properties))
# Debugging information on what services have been found:
# try:
# all_services = zeroconf.ZeroconfServiceTypes.find(self.rz)
# self.logger.info("All services found")
# for s in all_services:
# self.logger.info(" {}".format(s))
# except Exception as e:
# self.logger.error(e)
return names
def shutdown(self):
self.logger.info('Unregistering: %s' % ', '.join(self.share_names))
for info in self.share_info:
self.rz.unregister_service(info)
self.rz.close()
class Beacon:
def __init__(self):
self.UDPSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.UDPSock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.services = []
self.timer = None
self.platform = PLATFORM_VIDEO
for section, settings in config.getShares():
try:
ct = GetPlugin(settings['type']).CONTENT_TYPE
except:
continue
if ct in ('x-container/tivo-music', 'x-container/tivo-photos'):
self.platform = PLATFORM_MAIN
break
if config.get_zc():
logger = logging.getLogger('pyTivo.beacon')
try:
self.bd = ZCBroadcast(logger)
except Exception as e:
logger.debug('Beacon.__init__: raised %s: %s', e.__class__.__name__, e)
logger.error('Zeroconf failure')
self.bd = None
else:
self.bd = None
def add_service(self, service):
self.services.append(service)
self.send_beacon()
def format_services(self):
return ';'.join(self.services)
def format_beacon(self, conntype, services=True):
beacon = ['tivoconnect=1',
'method=%s' % conntype,
'identity={%s}' % config.getGUID(),
'machine=%s' % socket.gethostname(),
'platform=%s' % self.platform]
if services:
beacon.append('services=' + self.format_services())
else:
beacon.append('services=TiVoMediaServer:0/http')
return '\n'.join(beacon) + '\n'
def send_beacon(self):
beacon_ips = config.getBeaconAddresses()
beacon = self.format_beacon('broadcast')
for beacon_ip in beacon_ips.split():
if beacon_ip != 'listen':
try:
packet = bytes(beacon, "utf-8")
while packet:
result = self.UDPSock.sendto(packet, (beacon_ip, 2190))
if result < 0:
break
packet = packet[result:]
except Exception as e:
print(e)
def | start | identifier_name |
|
beacon.py | data)
return data
def log_serviceinfo(logger, info):
"""
Write interesting attributes from a ServiceInfo to the log.
Information written depends on the log level, basic info
is written w/ log level INFO, if the log level is DEBUG
more the basic info plus more (all properties) is written
w/ log level DEBUG.
"""
try:
debugging = logger.isEnabledFor(logging.DEBUG)
log_level = logging.INFO
log_info = {'name': info.name,
'address': socket.inet_ntoa(info.addresses[0]),
'port': info.port}
log_hdr = "\n {address}:{port} {name}\n"
log_fmt = log_hdr
if debugging:
log_level = logging.DEBUG
if info.server != info.name:
log_info['server'] = info.server
log_fmt += " server: {server}\n"
for (k, v) in info.properties.items():
li_k = "prop_" + bytes2str(k)
log_info[li_k] = v
log_fmt += " {k}: {{{li_k}}}\n".format(k=k, li_k=li_k)
logger.log(log_level, log_fmt.format(**log_info))
except:
logger.exception("exception in log_tivo_serviceinfo")
class ZCListener:
# pylint: disable=redefined-builtin
def __init__(self, names, logger=None):
self.names = names
self.logger = logger
def remove_service(self, server, type_, name):
self.names.remove(name.replace('.' + type_, ''))
def add_service(self, server, type_, name):
self.names.append(name.replace('.' + type_, ''))
def update_service(self, server, type_, name):
# method is required, but can be ignored if you don't care about updates. We don't.
if self.logger is not None:
# ex. WARNING:pyTivo.beacon:ZCListener.update_service name='Movies._tivo-videos._tcp.local.' type_='_tivo-videos._tcp.local.'
# WARNING:pyTivo.beacon:ZCListener.update_service name='LivingRoomVox._tivo-videos._tcp.local.' type_='_tivo-videos._tcp.local.'
self.logger.debug(f'ZCListener.update_service {name=} {type_=}')
class ZCBroadcast:
def __init__(self, logger):
""" Announce our shares via Zeroconf. """
self.share_names = []
self.share_info = []
self.logger = logger
self.rz = zeroconf.Zeroconf()
self.renamed = {}
old_titles = self.scan()
address = socket.inet_aton(config.get_ip())
port = int(config.getPort())
logger.info('Announcing pytivo shares ({}:{})...'.format(config.get_ip(), port))
for section, settings in config.getShares():
try:
plugin = GetPlugin(settings['type'])
ct = plugin.CONTENT_TYPE
# if the plugin provides a test for validity use it otherwise assume valid
if hasattr(plugin, 'is_valid') and not plugin.is_valid(section, settings):
logger.warning('share "%s" is invalid. It will be ignored (maybe check that path exists)', section)
continue
except Exception as e:
logger.error('ZCBroadcast.__init__: raised %s: %s', e.__class__.__name__, e)
continue
if ct.startswith('x-container/'):
if 'video' in ct:
platform = PLATFORM_VIDEO
else:
platform = PLATFORM_MAIN
logger.info('Registering: %s' % section)
self.share_names.append(section)
desc = {b'path': bytes(SHARE_TEMPLATE % quote(section), 'utf-8'),
b'platform': bytes(platform, 'utf-8'),
b'protocol': b'http',
b'tsn': bytes('{%s}' % uuid.uuid4(), 'utf-8')}
tt = ct.split('/')[1]
title = section
count = 1
while title in old_titles:
# debugging info while I try to figure out what this loop is for
logger.info(" title b4: {}".format(title))
count += 1
title = '%s [%d]' % (section, count)
self.renamed[section] = title
# more debugging info
logger.info(" title after: {}\n section: {}".format(title, section))
info = zeroconf.ServiceInfo('_%s._tcp.local.' % tt,
'%s._%s._tcp.local.' % (title, tt),
port=port, addresses=[address], properties=desc)
log_serviceinfo(self.logger, info)
self.rz.register_service(info)
self.share_info.append(info)
def scan(self):
""" Look for TiVos using Zeroconf. """
VIDS = '_tivo-videos._tcp.local.'
names = []
self.logger.info('Scanning for TiVos...\n')
# Get the names of servers offering TiVo videos
browser = zeroconf.ServiceBrowser(self.rz, VIDS, None, ZCListener(names, logger=self.logger))
# Give them a second (or more if no one has responded in the 1st second) to respond
time.sleep(1)
max_sec_to_wait = 10
sec_waited = 0
while not names and sec_waited < max_sec_to_wait:
sec_waited += 1
time.sleep(1)
# Any results?
if names:
config.tivos_found = True
# Now get the addresses -- this is the slow part
for name in names:
info = self.rz.get_service_info(VIDS, name + '.' + VIDS)
log_serviceinfo(self.logger, info)
if info:
# zeroconf v2.7 removed ServiceInfo address member says use addresses instead.
# Some debug logging to see if there is always at least the currently assumed 1 address (and maybe more?)
self.logger.debug(f'Found zeroconf.ServiceInfo with {len(info.addresses)} IP addresses\n')
tsn = info.properties.get(b'TSN')
if config.get_togo('all'):
tsn = info.properties.get(b'tsn', tsn)
if tsn:
if isinstance(tsn, bytes):
tsn = tsn.decode('utf-8')
address = socket.inet_ntoa(info.addresses[0])
port = info.port
config.tivos[tsn] = {'name': name, 'address': address,
'port': port}
# info.properties has bytes keys and values, but we'd rather
# deal with str keys and values, so convert them before adding
# them to our tivos dict.
config.tivos[tsn].update(bytes2str(info.properties))
# Debugging information on what services have been found:
# try:
# all_services = zeroconf.ZeroconfServiceTypes.find(self.rz)
# self.logger.info("All services found")
# for s in all_services:
# self.logger.info(" {}".format(s))
# except Exception as e:
# self.logger.error(e)
return names
def shutdown(self):
self.logger.info('Unregistering: %s' % ', '.join(self.share_names))
for info in self.share_info:
self.rz.unregister_service(info)
self.rz.close()
class Beacon:
def __init__(self):
self.UDPSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.UDPSock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.services = []
self.timer = None
self.platform = PLATFORM_VIDEO
for section, settings in config.getShares():
try:
ct = GetPlugin(settings['type']).CONTENT_TYPE
except:
continue
if ct in ('x-container/tivo-music', 'x-container/tivo-photos'):
self.platform = PLATFORM_MAIN
break
if config.get_zc():
logger = logging.getLogger('pyTivo.beacon')
try:
self.bd = ZCBroadcast(logger)
except Exception as e:
logger.debug('Beacon.__init__: raised %s: %s', e.__class__.__name__, e)
logger.error('Zeroconf failure')
self.bd = None
else:
self.bd = None
def add_service(self, service):
self.services.append(service)
self.send_beacon()
def format_services(self):
|
def format_beacon(self, conntype, services=True):
beacon = ['tivoconnect=1',
'method=%s' % conntype,
'identity={%s}' % config.getGUID(),
'machine=%s' % socket.gethostname(),
'platform=%s' % self.platform]
if services:
beacon.append('services=' + self.format_services())
else:
beacon.append('services=TiVoMediaServer:0/http')
return '\n'.join(beacon) + '\n'
def send_beacon(self):
beacon_ips = config.getBeaconAddresses()
beacon = self.format_beacon('broadcast')
for beacon_ip in beacon_ips.split():
if beacon_ip != 'listen':
try:
packet = bytes(beacon, "utf-8")
while packet:
result = self.UDPSock | return ';'.join(self.services) | identifier_body |
ctap.rs | romiumos/platform2/+/master/u2fd/u2fhid.cc
static REPORT_DESCRIPTOR: &'static [u8] = &[
0x06, 0xD0, 0xF1, // HID_UsagePage ( FIDO_USAGE_PAGE ),
0x09, 0x01, // HID_Usage ( FIDO_USAGE_CTAPHID ),
0xA1, 0x01, // HID_Collection ( HID_Application ),
0x09, 0x20, // HID_Usage ( FIDO_USAGE_DATA_IN ),
0x15, 0x00, // HID_LogicalMin ( 0 ),
0x26, 0xFF, 0x00, // HID_LogicalMaxS ( 0xff ),
0x75, 0x08, // HID_ReportSize ( 8 ),
0x95, 0x40, // HID_ReportCount ( HID_INPUT_REPORT_BYTES ),
0x81, 0x02, // HID_Input ( HID_Data | HID_Absolute | HID_Variable ),
0x09, 0x21, // HID_Usage ( FIDO_USAGE_DATA_OUT ),
0x15, 0x00, // HID_LogicalMin ( 0 ),
0x26, 0xFF, 0x00, // HID_LogicalMaxS ( 0xff ),
0x75, 0x08, // HID_ReportSize ( 8 ),
0x95, 0x40, // HID_ReportCount ( HID_OUTPUT_REPORT_BYTES ),
0x91, 0x02, // HID_Output ( HID_Data | HID_Absolute | HID_Variable ),
0xC0, // HID_EndCollection
];
static REPORT: ReportDescriptor<'static> = ReportDescriptor {
desc: REPORT_DESCRIPTOR,
};
static SUB_HID_DESCRIPTOR: &'static [HIDSubordinateDescriptor] = &[HIDSubordinateDescriptor {
typ: DescriptorType::Report,
len: REPORT_DESCRIPTOR.len() as u16,
}];
static HID_DESCRIPTOR: HIDDescriptor<'static> = HIDDescriptor {
hid_class: 0x0110,
country_code: HIDCountryCode::NotSupported,
sub_descriptors: SUB_HID_DESCRIPTOR,
};
/// Implementation of the CTAP HID (Human Interface Device)
pub struct CtapHid<'a, U: 'a> {
/// Helper USB client library for handling many USB operations.
client_ctrl: ClientCtrl<'a, 'static, U>,
/// 64 byte buffers for each endpoint.
buffers: [Buffer64; N_ENDPOINTS],
client: OptionalCell<&'a dyn hil::usb_hid::Client<'a, [u8; 64]>>,
/// A buffer to hold the data we want to send
send_buffer: TakeCell<'static, [u8; 64]>,
/// A holder for the buffer to receive bytes into. We use this as a flag as
/// well, if we have a buffer then we are actively doing a receive.
recv_buffer: TakeCell<'static, [u8; 64]>,
/// How many bytes the client wants us to receive.
recv_len: Cell<usize>,
/// How many bytes we have received so far.
recv_offset: Cell<usize>,
saved_endpoint: OptionalCell<usize>,
}
impl<'a, U: hil::usb::UsbController<'a>> CtapHid<'a, U> {
pub fn new(
controller: &'a U,
vendor_id: u16,
product_id: u16,
strings: &'static [&'static str; 3],
) -> Self {
let interfaces: &mut [InterfaceDescriptor] = &mut [InterfaceDescriptor {
interface_number: 0,
interface_class: 0x03, // HID
interface_subclass: 0x00, // No subcall
interface_protocol: 0x00, // No protocol
..InterfaceDescriptor::default()
}];
let endpoints: &[&[EndpointDescriptor]] = &[&[
EndpointDescriptor {
endpoint_address: EndpointAddress::new_const(
ENDPOINT_NUM,
TransferDirection::DeviceToHost,
),
transfer_type: TransferType::Interrupt,
max_packet_size: 64,
interval: 5,
},
EndpointDescriptor {
endpoint_address: EndpointAddress::new_const(
ENDPOINT_NUM,
TransferDirection::HostToDevice,
),
transfer_type: TransferType::Interrupt,
max_packet_size: 64,
interval: 5,
},
]];
let (device_descriptor_buffer, other_descriptor_buffer) =
descriptors::create_descriptor_buffers(
descriptors::DeviceDescriptor {
vendor_id: vendor_id,
product_id: product_id,
manufacturer_string: 1,
product_string: 2,
serial_number_string: 3,
class: 0x03, // Class: HID
max_packet_size_ep0: MAX_CTRL_PACKET_SIZE,
..descriptors::DeviceDescriptor::default()
},
descriptors::ConfigurationDescriptor {
..descriptors::ConfigurationDescriptor::default()
},
interfaces,
endpoints,
Some(&HID_DESCRIPTOR),
None,
);
CtapHid {
client_ctrl: ClientCtrl::new(
controller,
device_descriptor_buffer,
other_descriptor_buffer,
Some(&HID_DESCRIPTOR),
Some(&REPORT),
LANGUAGES,
strings,
),
buffers: [Buffer64::default(), Buffer64::default()],
client: OptionalCell::empty(),
send_buffer: TakeCell::empty(),
recv_buffer: TakeCell::empty(),
recv_len: Cell::new(0),
recv_offset: Cell::new(0),
saved_endpoint: OptionalCell::empty(),
}
}
#[inline]
fn controller(&self) -> &'a U {
self.client_ctrl.controller()
}
pub fn set_client(&'a self, client: &'a dyn hil::usb_hid::Client<'a, [u8; 64]>) {
self.client.set(client);
}
fn can_receive(&'a self) -> bool {
self.client
.map(move |client| client.can_receive())
.unwrap_or(false)
}
}
impl<'a, U: hil::usb::UsbController<'a>> hil::usb_hid::UsbHid<'a, [u8; 64]> for CtapHid<'a, U> {
fn send_buffer(
&'a self,
send: &'static mut [u8; 64],
) -> Result<usize, (ErrorCode, &'static mut [u8; 64])> {
let len = send.len();
self.send_buffer.replace(send);
self.controller().endpoint_resume_in(ENDPOINT_NUM);
Ok(len)
}
fn send_cancel(&'a self) -> Result<&'static mut [u8; 64], ErrorCode> {
match self.send_buffer.take() {
Some(buf) => Ok(buf),
None => Err(ErrorCode::BUSY),
}
}
fn receive_buffer(
&'a self,
recv: &'static mut [u8; 64],
) -> Result<(), (ErrorCode, &'static mut [u8; 64])> {
self.recv_buffer.replace(recv);
if self.saved_endpoint.is_some() {
// We have saved data from before, let's pass it.
if self.can_receive() {
self.recv_buffer.take().map(|buf| {
self.client.map(move |client| {
client.packet_received(Ok(()), buf, self.saved_endpoint.take().unwrap());
});
});
// Reset the offset
self.recv_offset.set(0);
}
} else {
// If we have nothing to process, accept more data
self.controller().endpoint_resume_out(ENDPOINT_NUM);
}
Ok(())
}
fn receive_cancel(&'a self) -> Result<&'static mut [u8; 64], ErrorCode> {
self.saved_endpoint.take();
match self.recv_buffer.take() {
Some(buf) => Ok(buf),
None => Err(ErrorCode::BUSY),
}
} | fn enable(&'a self) {
// Set up the default control endpoint
self.client_ctrl.enable();
// Setup buffers for IN and OUT data transfer.
self.controller()
.endpoint_set_out_buffer(ENDPOINT_NUM, &self.buffers[OUT_BUFFER].buf);
self.controller()
.endpoint_set_in_buffer(ENDPOINT_NUM, &self.buffers[IN_BUFFER].buf);
self.controller()
.endpoint_in_out_enable(TransferType::Interrupt, ENDPOINT_NUM);
}
fn attach(&'a self) {
self.client_ctrl.attach();
}
fn bus_reset(&'a self) {}
/// Handle a Control Setup transaction.
fn ctrl_setup(&'a self, endpoint: usize) -> hil::usb::CtrlSetupResult {
self.client_ctrl.ctrl_setup(endpoint)
}
/// Handle a Control In transaction
fn ctrl_in(&'a self, endpoint: usize) -> hil::usb::CtrlInResult {
self.client_ctrl.ctrl | }
impl<'a, U: hil::usb::UsbController<'a>> hil::usb::Client<'a> for CtapHid<'a, U> { | random_line_split |
ctap.rs |
..InterfaceDescriptor::default()
}];
let endpoints: &[&[EndpointDescriptor]] = &[&[
EndpointDescriptor {
endpoint_address: EndpointAddress::new_const(
ENDPOINT_NUM,
TransferDirection::DeviceToHost,
),
transfer_type: TransferType::Interrupt,
max_packet_size: 64,
interval: 5,
},
EndpointDescriptor {
endpoint_address: EndpointAddress::new_const(
ENDPOINT_NUM,
TransferDirection::HostToDevice,
),
transfer_type: TransferType::Interrupt,
max_packet_size: 64,
interval: 5,
},
]];
let (device_descriptor_buffer, other_descriptor_buffer) =
descriptors::create_descriptor_buffers(
descriptors::DeviceDescriptor {
vendor_id: vendor_id,
product_id: product_id,
manufacturer_string: 1,
product_string: 2,
serial_number_string: 3,
class: 0x03, // Class: HID
max_packet_size_ep0: MAX_CTRL_PACKET_SIZE,
..descriptors::DeviceDescriptor::default()
},
descriptors::ConfigurationDescriptor {
..descriptors::ConfigurationDescriptor::default()
},
interfaces,
endpoints,
Some(&HID_DESCRIPTOR),
None,
);
CtapHid {
client_ctrl: ClientCtrl::new(
controller,
device_descriptor_buffer,
other_descriptor_buffer,
Some(&HID_DESCRIPTOR),
Some(&REPORT),
LANGUAGES,
strings,
),
buffers: [Buffer64::default(), Buffer64::default()],
client: OptionalCell::empty(),
send_buffer: TakeCell::empty(),
recv_buffer: TakeCell::empty(),
recv_len: Cell::new(0),
recv_offset: Cell::new(0),
saved_endpoint: OptionalCell::empty(),
}
}
#[inline]
fn controller(&self) -> &'a U {
self.client_ctrl.controller()
}
pub fn set_client(&'a self, client: &'a dyn hil::usb_hid::Client<'a, [u8; 64]>) {
self.client.set(client);
}
fn can_receive(&'a self) -> bool {
self.client
.map(move |client| client.can_receive())
.unwrap_or(false)
}
}
impl<'a, U: hil::usb::UsbController<'a>> hil::usb_hid::UsbHid<'a, [u8; 64]> for CtapHid<'a, U> {
fn send_buffer(
&'a self,
send: &'static mut [u8; 64],
) -> Result<usize, (ErrorCode, &'static mut [u8; 64])> {
let len = send.len();
self.send_buffer.replace(send);
self.controller().endpoint_resume_in(ENDPOINT_NUM);
Ok(len)
}
fn send_cancel(&'a self) -> Result<&'static mut [u8; 64], ErrorCode> {
match self.send_buffer.take() {
Some(buf) => Ok(buf),
None => Err(ErrorCode::BUSY),
}
}
fn receive_buffer(
&'a self,
recv: &'static mut [u8; 64],
) -> Result<(), (ErrorCode, &'static mut [u8; 64])> {
self.recv_buffer.replace(recv);
if self.saved_endpoint.is_some() {
// We have saved data from before, let's pass it.
if self.can_receive() {
self.recv_buffer.take().map(|buf| {
self.client.map(move |client| {
client.packet_received(Ok(()), buf, self.saved_endpoint.take().unwrap());
});
});
// Reset the offset
self.recv_offset.set(0);
}
} else {
// If we have nothing to process, accept more data
self.controller().endpoint_resume_out(ENDPOINT_NUM);
}
Ok(())
}
fn receive_cancel(&'a self) -> Result<&'static mut [u8; 64], ErrorCode> {
self.saved_endpoint.take();
match self.recv_buffer.take() {
Some(buf) => Ok(buf),
None => Err(ErrorCode::BUSY),
}
}
}
impl<'a, U: hil::usb::UsbController<'a>> hil::usb::Client<'a> for CtapHid<'a, U> {
fn enable(&'a self) {
// Set up the default control endpoint
self.client_ctrl.enable();
// Setup buffers for IN and OUT data transfer.
self.controller()
.endpoint_set_out_buffer(ENDPOINT_NUM, &self.buffers[OUT_BUFFER].buf);
self.controller()
.endpoint_set_in_buffer(ENDPOINT_NUM, &self.buffers[IN_BUFFER].buf);
self.controller()
.endpoint_in_out_enable(TransferType::Interrupt, ENDPOINT_NUM);
}
fn attach(&'a self) {
self.client_ctrl.attach();
}
fn bus_reset(&'a self) {}
/// Handle a Control Setup transaction.
fn ctrl_setup(&'a self, endpoint: usize) -> hil::usb::CtrlSetupResult {
self.client_ctrl.ctrl_setup(endpoint)
}
/// Handle a Control In transaction
fn ctrl_in(&'a self, endpoint: usize) -> hil::usb::CtrlInResult {
self.client_ctrl.ctrl_in(endpoint)
}
/// Handle a Control Out transaction
fn ctrl_out(&'a self, endpoint: usize, packet_bytes: u32) -> hil::usb::CtrlOutResult {
self.client_ctrl.ctrl_out(endpoint, packet_bytes)
}
fn ctrl_status(&'a self, endpoint: usize) {
self.client_ctrl.ctrl_status(endpoint)
}
/// Handle the completion of a Control transfer
fn ctrl_status_complete(&'a self, endpoint: usize) {
if self.send_buffer.is_some() {
self.controller().endpoint_resume_in(ENDPOINT_NUM);
}
self.client_ctrl.ctrl_status_complete(endpoint)
}
/// Handle a Bulk/Interrupt IN transaction.
///
/// This is called when we can send data to the host. It should get called
/// when we tell the controller we want to resume the IN endpoint (meaning
/// we know we have data to send) and afterwards until we return
/// `hil::usb::InResult::Delay` from this function. That means we can use
/// this as a callback to mean that the transmission finished by waiting
/// until this function is called when we don't have anything left to send.
fn packet_in(&'a self, transfer_type: TransferType, _endpoint: usize) -> hil::usb::InResult {
match transfer_type {
TransferType::Interrupt => {
self.send_buffer
.take()
.map_or(hil::usb::InResult::Delay, |buf| {
// Get packet that we have shared with the underlying
// USB stack to copy the tx into.
let packet = &self.buffers[IN_BUFFER].buf;
// Copy from the TX buffer to the outgoing USB packet.
for i in 0..64 {
packet[i].set(buf[i]);
}
// Put the TX buffer back so we can keep sending from it.
self.send_buffer.replace(buf);
// Return that we have data to send.
hil::usb::InResult::Packet(64)
})
}
TransferType::Bulk | TransferType::Control | TransferType::Isochronous => {
panic!("Transfer protocol not supported by CTAP v2");
}
}
}
/// Handle a Bulk/Interrupt OUT transaction
///
/// This is data going from the host to the device (us)
fn packet_out(
&'a self,
transfer_type: TransferType,
endpoint: usize,
packet_bytes: u32,
) -> hil::usb::OutResult {
match transfer_type {
TransferType::Interrupt => {
self.recv_buffer
.take()
.map_or(hil::usb::OutResult::Error, |buf| {
let recv_offset = self.recv_offset.get();
// How many more bytes can we store in our RX buffer?
let available_bytes = buf.len() - recv_offset;
let copy_length = cmp::min(packet_bytes as usize, available_bytes);
// Do the copy into the RX buffer.
let packet = &self.buffers[OUT_BUFFER].buf;
for i in 0..copy_length {
buf[recv_offset + i] = packet[i].get();
}
// Keep track of how many bytes we have received so far.
let total_received_bytes = recv_offset + copy_length;
// Update how many bytes we have gotten.
self.recv_offset.set(total_received_bytes);
// Check if we have received at least as many bytes as the
// client asked for.
if total_received_bytes >= self.recv_len.get() {
if self.can_receive() {
self.client.map(move |client| {
client.packet_received(Ok(()), buf, endpoint);
});
// Reset the offset
self.recv_offset.set(0);
// Delay the next packet until we have finished
// processing this packet
hil::usb::OutResult::Delay
} else | {
// We can't receive data. Record that we have data to send later
// and apply back pressure to USB
self.saved_endpoint.set(endpoint);
self.recv_buffer.replace(buf);
hil::usb::OutResult::Delay
} | conditional_block |
|
ctap.rs | os/platform2/+/master/u2fd/u2fhid.cc
static REPORT_DESCRIPTOR: &'static [u8] = &[
0x06, 0xD0, 0xF1, // HID_UsagePage ( FIDO_USAGE_PAGE ),
0x09, 0x01, // HID_Usage ( FIDO_USAGE_CTAPHID ),
0xA1, 0x01, // HID_Collection ( HID_Application ),
0x09, 0x20, // HID_Usage ( FIDO_USAGE_DATA_IN ),
0x15, 0x00, // HID_LogicalMin ( 0 ),
0x26, 0xFF, 0x00, // HID_LogicalMaxS ( 0xff ),
0x75, 0x08, // HID_ReportSize ( 8 ),
0x95, 0x40, // HID_ReportCount ( HID_INPUT_REPORT_BYTES ),
0x81, 0x02, // HID_Input ( HID_Data | HID_Absolute | HID_Variable ),
0x09, 0x21, // HID_Usage ( FIDO_USAGE_DATA_OUT ),
0x15, 0x00, // HID_LogicalMin ( 0 ),
0x26, 0xFF, 0x00, // HID_LogicalMaxS ( 0xff ),
0x75, 0x08, // HID_ReportSize ( 8 ),
0x95, 0x40, // HID_ReportCount ( HID_OUTPUT_REPORT_BYTES ),
0x91, 0x02, // HID_Output ( HID_Data | HID_Absolute | HID_Variable ),
0xC0, // HID_EndCollection
];
static REPORT: ReportDescriptor<'static> = ReportDescriptor {
desc: REPORT_DESCRIPTOR,
};
static SUB_HID_DESCRIPTOR: &'static [HIDSubordinateDescriptor] = &[HIDSubordinateDescriptor {
typ: DescriptorType::Report,
len: REPORT_DESCRIPTOR.len() as u16,
}];
static HID_DESCRIPTOR: HIDDescriptor<'static> = HIDDescriptor {
hid_class: 0x0110,
country_code: HIDCountryCode::NotSupported,
sub_descriptors: SUB_HID_DESCRIPTOR,
};
/// Implementation of the CTAP HID (Human Interface Device)
pub struct CtapHid<'a, U: 'a> {
/// Helper USB client library for handling many USB operations.
client_ctrl: ClientCtrl<'a, 'static, U>,
/// 64 byte buffers for each endpoint.
buffers: [Buffer64; N_ENDPOINTS],
client: OptionalCell<&'a dyn hil::usb_hid::Client<'a, [u8; 64]>>,
/// A buffer to hold the data we want to send
send_buffer: TakeCell<'static, [u8; 64]>,
/// A holder for the buffer to receive bytes into. We use this as a flag as
/// well, if we have a buffer then we are actively doing a receive.
recv_buffer: TakeCell<'static, [u8; 64]>,
/// How many bytes the client wants us to receive.
recv_len: Cell<usize>,
/// How many bytes we have received so far.
recv_offset: Cell<usize>,
saved_endpoint: OptionalCell<usize>,
}
impl<'a, U: hil::usb::UsbController<'a>> CtapHid<'a, U> {
pub fn new(
controller: &'a U,
vendor_id: u16,
product_id: u16,
strings: &'static [&'static str; 3],
) -> Self {
let interfaces: &mut [InterfaceDescriptor] = &mut [InterfaceDescriptor {
interface_number: 0,
interface_class: 0x03, // HID
interface_subclass: 0x00, // No subcall
interface_protocol: 0x00, // No protocol
..InterfaceDescriptor::default()
}];
let endpoints: &[&[EndpointDescriptor]] = &[&[
EndpointDescriptor {
endpoint_address: EndpointAddress::new_const(
ENDPOINT_NUM,
TransferDirection::DeviceToHost,
),
transfer_type: TransferType::Interrupt,
max_packet_size: 64,
interval: 5,
},
EndpointDescriptor {
endpoint_address: EndpointAddress::new_const(
ENDPOINT_NUM,
TransferDirection::HostToDevice,
),
transfer_type: TransferType::Interrupt,
max_packet_size: 64,
interval: 5,
},
]];
let (device_descriptor_buffer, other_descriptor_buffer) =
descriptors::create_descriptor_buffers(
descriptors::DeviceDescriptor {
vendor_id: vendor_id,
product_id: product_id,
manufacturer_string: 1,
product_string: 2,
serial_number_string: 3,
class: 0x03, // Class: HID
max_packet_size_ep0: MAX_CTRL_PACKET_SIZE,
..descriptors::DeviceDescriptor::default()
},
descriptors::ConfigurationDescriptor {
..descriptors::ConfigurationDescriptor::default()
},
interfaces,
endpoints,
Some(&HID_DESCRIPTOR),
None,
);
CtapHid {
client_ctrl: ClientCtrl::new(
controller,
device_descriptor_buffer,
other_descriptor_buffer,
Some(&HID_DESCRIPTOR),
Some(&REPORT),
LANGUAGES,
strings,
),
buffers: [Buffer64::default(), Buffer64::default()],
client: OptionalCell::empty(),
send_buffer: TakeCell::empty(),
recv_buffer: TakeCell::empty(),
recv_len: Cell::new(0),
recv_offset: Cell::new(0),
saved_endpoint: OptionalCell::empty(),
}
}
#[inline]
fn controller(&self) -> &'a U {
self.client_ctrl.controller()
}
pub fn set_client(&'a self, client: &'a dyn hil::usb_hid::Client<'a, [u8; 64]>) {
self.client.set(client);
}
fn can_receive(&'a self) -> bool {
self.client
.map(move |client| client.can_receive())
.unwrap_or(false)
}
}
impl<'a, U: hil::usb::UsbController<'a>> hil::usb_hid::UsbHid<'a, [u8; 64]> for CtapHid<'a, U> {
fn send_buffer(
&'a self,
send: &'static mut [u8; 64],
) -> Result<usize, (ErrorCode, &'static mut [u8; 64])> {
let len = send.len();
self.send_buffer.replace(send);
self.controller().endpoint_resume_in(ENDPOINT_NUM);
Ok(len)
}
fn send_cancel(&'a self) -> Result<&'static mut [u8; 64], ErrorCode> {
match self.send_buffer.take() {
Some(buf) => Ok(buf),
None => Err(ErrorCode::BUSY),
}
}
fn receive_buffer(
&'a self,
recv: &'static mut [u8; 64],
) -> Result<(), (ErrorCode, &'static mut [u8; 64])> | }
fn receive_cancel(&'a self) -> Result<&'static mut [u8; 64], ErrorCode> {
self.saved_endpoint.take();
match self.recv_buffer.take() {
Some(buf) => Ok(buf),
None => Err(ErrorCode::BUSY),
}
}
}
impl<'a, U: hil::usb::UsbController<'a>> hil::usb::Client<'a> for CtapHid<'a, U> {
fn enable(&'a self) {
// Set up the default control endpoint
self.client_ctrl.enable();
// Setup buffers for IN and OUT data transfer.
self.controller()
.endpoint_set_out_buffer(ENDPOINT_NUM, &self.buffers[OUT_BUFFER].buf);
self.controller()
.endpoint_set_in_buffer(ENDPOINT_NUM, &self.buffers[IN_BUFFER].buf);
self.controller()
.endpoint_in_out_enable(TransferType::Interrupt, ENDPOINT_NUM);
}
fn attach(&'a self) {
self.client_ctrl.attach();
}
fn bus_reset(&'a self) {}
/// Handle a Control Setup transaction.
fn ctrl_setup(&'a self, endpoint: usize) -> hil::usb::CtrlSetupResult {
self.client_ctrl.ctrl_setup(endpoint)
}
/// Handle a Control In transaction
fn ctrl_in(&'a self, endpoint: usize) -> hil::usb::CtrlInResult {
self.client_ctrl | {
self.recv_buffer.replace(recv);
if self.saved_endpoint.is_some() {
// We have saved data from before, let's pass it.
if self.can_receive() {
self.recv_buffer.take().map(|buf| {
self.client.map(move |client| {
client.packet_received(Ok(()), buf, self.saved_endpoint.take().unwrap());
});
});
// Reset the offset
self.recv_offset.set(0);
}
} else {
// If we have nothing to process, accept more data
self.controller().endpoint_resume_out(ENDPOINT_NUM);
}
Ok(()) | identifier_body |
ctap.rs | x09, 0x01, // HID_Usage ( FIDO_USAGE_CTAPHID ),
0xA1, 0x01, // HID_Collection ( HID_Application ),
0x09, 0x20, // HID_Usage ( FIDO_USAGE_DATA_IN ),
0x15, 0x00, // HID_LogicalMin ( 0 ),
0x26, 0xFF, 0x00, // HID_LogicalMaxS ( 0xff ),
0x75, 0x08, // HID_ReportSize ( 8 ),
0x95, 0x40, // HID_ReportCount ( HID_INPUT_REPORT_BYTES ),
0x81, 0x02, // HID_Input ( HID_Data | HID_Absolute | HID_Variable ),
0x09, 0x21, // HID_Usage ( FIDO_USAGE_DATA_OUT ),
0x15, 0x00, // HID_LogicalMin ( 0 ),
0x26, 0xFF, 0x00, // HID_LogicalMaxS ( 0xff ),
0x75, 0x08, // HID_ReportSize ( 8 ),
0x95, 0x40, // HID_ReportCount ( HID_OUTPUT_REPORT_BYTES ),
0x91, 0x02, // HID_Output ( HID_Data | HID_Absolute | HID_Variable ),
0xC0, // HID_EndCollection
];
static REPORT: ReportDescriptor<'static> = ReportDescriptor {
desc: REPORT_DESCRIPTOR,
};
static SUB_HID_DESCRIPTOR: &'static [HIDSubordinateDescriptor] = &[HIDSubordinateDescriptor {
typ: DescriptorType::Report,
len: REPORT_DESCRIPTOR.len() as u16,
}];
static HID_DESCRIPTOR: HIDDescriptor<'static> = HIDDescriptor {
hid_class: 0x0110,
country_code: HIDCountryCode::NotSupported,
sub_descriptors: SUB_HID_DESCRIPTOR,
};
/// Implementation of the CTAP HID (Human Interface Device)
pub struct CtapHid<'a, U: 'a> {
/// Helper USB client library for handling many USB operations.
client_ctrl: ClientCtrl<'a, 'static, U>,
/// 64 byte buffers for each endpoint.
buffers: [Buffer64; N_ENDPOINTS],
client: OptionalCell<&'a dyn hil::usb_hid::Client<'a, [u8; 64]>>,
/// A buffer to hold the data we want to send
send_buffer: TakeCell<'static, [u8; 64]>,
/// A holder for the buffer to receive bytes into. We use this as a flag as
/// well, if we have a buffer then we are actively doing a receive.
recv_buffer: TakeCell<'static, [u8; 64]>,
/// How many bytes the client wants us to receive.
recv_len: Cell<usize>,
/// How many bytes we have received so far.
recv_offset: Cell<usize>,
saved_endpoint: OptionalCell<usize>,
}
impl<'a, U: hil::usb::UsbController<'a>> CtapHid<'a, U> {
pub fn new(
controller: &'a U,
vendor_id: u16,
product_id: u16,
strings: &'static [&'static str; 3],
) -> Self {
let interfaces: &mut [InterfaceDescriptor] = &mut [InterfaceDescriptor {
interface_number: 0,
interface_class: 0x03, // HID
interface_subclass: 0x00, // No subcall
interface_protocol: 0x00, // No protocol
..InterfaceDescriptor::default()
}];
let endpoints: &[&[EndpointDescriptor]] = &[&[
EndpointDescriptor {
endpoint_address: EndpointAddress::new_const(
ENDPOINT_NUM,
TransferDirection::DeviceToHost,
),
transfer_type: TransferType::Interrupt,
max_packet_size: 64,
interval: 5,
},
EndpointDescriptor {
endpoint_address: EndpointAddress::new_const(
ENDPOINT_NUM,
TransferDirection::HostToDevice,
),
transfer_type: TransferType::Interrupt,
max_packet_size: 64,
interval: 5,
},
]];
let (device_descriptor_buffer, other_descriptor_buffer) =
descriptors::create_descriptor_buffers(
descriptors::DeviceDescriptor {
vendor_id: vendor_id,
product_id: product_id,
manufacturer_string: 1,
product_string: 2,
serial_number_string: 3,
class: 0x03, // Class: HID
max_packet_size_ep0: MAX_CTRL_PACKET_SIZE,
..descriptors::DeviceDescriptor::default()
},
descriptors::ConfigurationDescriptor {
..descriptors::ConfigurationDescriptor::default()
},
interfaces,
endpoints,
Some(&HID_DESCRIPTOR),
None,
);
CtapHid {
client_ctrl: ClientCtrl::new(
controller,
device_descriptor_buffer,
other_descriptor_buffer,
Some(&HID_DESCRIPTOR),
Some(&REPORT),
LANGUAGES,
strings,
),
buffers: [Buffer64::default(), Buffer64::default()],
client: OptionalCell::empty(),
send_buffer: TakeCell::empty(),
recv_buffer: TakeCell::empty(),
recv_len: Cell::new(0),
recv_offset: Cell::new(0),
saved_endpoint: OptionalCell::empty(),
}
}
#[inline]
fn controller(&self) -> &'a U {
self.client_ctrl.controller()
}
pub fn set_client(&'a self, client: &'a dyn hil::usb_hid::Client<'a, [u8; 64]>) {
self.client.set(client);
}
fn can_receive(&'a self) -> bool {
self.client
.map(move |client| client.can_receive())
.unwrap_or(false)
}
}
impl<'a, U: hil::usb::UsbController<'a>> hil::usb_hid::UsbHid<'a, [u8; 64]> for CtapHid<'a, U> {
fn send_buffer(
&'a self,
send: &'static mut [u8; 64],
) -> Result<usize, (ErrorCode, &'static mut [u8; 64])> {
let len = send.len();
self.send_buffer.replace(send);
self.controller().endpoint_resume_in(ENDPOINT_NUM);
Ok(len)
}
fn send_cancel(&'a self) -> Result<&'static mut [u8; 64], ErrorCode> {
match self.send_buffer.take() {
Some(buf) => Ok(buf),
None => Err(ErrorCode::BUSY),
}
}
fn receive_buffer(
&'a self,
recv: &'static mut [u8; 64],
) -> Result<(), (ErrorCode, &'static mut [u8; 64])> {
self.recv_buffer.replace(recv);
if self.saved_endpoint.is_some() {
// We have saved data from before, let's pass it.
if self.can_receive() {
self.recv_buffer.take().map(|buf| {
self.client.map(move |client| {
client.packet_received(Ok(()), buf, self.saved_endpoint.take().unwrap());
});
});
// Reset the offset
self.recv_offset.set(0);
}
} else {
// If we have nothing to process, accept more data
self.controller().endpoint_resume_out(ENDPOINT_NUM);
}
Ok(())
}
fn receive_cancel(&'a self) -> Result<&'static mut [u8; 64], ErrorCode> {
self.saved_endpoint.take();
match self.recv_buffer.take() {
Some(buf) => Ok(buf),
None => Err(ErrorCode::BUSY),
}
}
}
impl<'a, U: hil::usb::UsbController<'a>> hil::usb::Client<'a> for CtapHid<'a, U> {
fn enable(&'a self) {
// Set up the default control endpoint
self.client_ctrl.enable();
// Setup buffers for IN and OUT data transfer.
self.controller()
.endpoint_set_out_buffer(ENDPOINT_NUM, &self.buffers[OUT_BUFFER].buf);
self.controller()
.endpoint_set_in_buffer(ENDPOINT_NUM, &self.buffers[IN_BUFFER].buf);
self.controller()
.endpoint_in_out_enable(TransferType::Interrupt, ENDPOINT_NUM);
}
fn attach(&'a self) {
self.client_ctrl.attach();
}
fn bus_reset(&'a self) {}
/// Handle a Control Setup transaction.
fn ctrl_setup(&'a self, endpoint: usize) -> hil::usb::CtrlSetupResult {
self.client_ctrl.ctrl_setup(endpoint)
}
/// Handle a Control In transaction
fn ctrl_in(&'a self, endpoint: usize) -> hil::usb::CtrlInResult {
self.client_ctrl.ctrl_in(endpoint)
}
/// Handle a Control Out transaction
fn ctrl_out(&'a self, endpoint: usize, packet_bytes: u32) -> hil::usb::CtrlOutResult {
self.client_ctrl.ctrl_out(endpoint, packet_bytes)
}
fn | ctrl_status | identifier_name |
|
oracle.py | _transformation_config["prediction_market_minute"] = self.scheduling.prediction_frequency.minutes_offset
data_transformation_config["features_start_market_minute"] = self.scheduling.training_frequency.minutes_offset
data_transformation_config["target_delta_ndays"] = int(self.scheduling.prediction_horizon.days)
data_transformation_config["target_market_minute"] = self.scheduling.prediction_frequency.minutes_offset
self._target_feature = self._extract_target_feature(self._feature_list)
self._data_transformation = GymDataTransformation(data_transformation_config)
def train(self, data, execution_time):
"""
Trains the model
:param dict data: OHLCV data as dictionary of pandas DataFrame.
:param datetime.datetime execution_time: time of execution of training
:return:
"""
logger.info('Training model on {}.'.format(
execution_time,
))
train_x_dict, train_y_dict = self._data_transformation.create_train_data(data)
logger.info("Preprocessing training data")
train_x = self._preprocess_inputs(train_x_dict)
train_y = self._preprocess_outputs(train_y_dict)
logger.info("Processed train_x shape {}".format(train_x.shape))
train_x, train_y = self.filter_nan_samples(train_x, train_y)
logger.info("Filtered train_x shape {}".format(train_x.shape))
n_valid_samples = train_x.shape[0]
if n_valid_samples == 0:
raise ValueError("Aborting training: No valid samples")
elif n_valid_samples < 2e4:
logger.warning("Low number of training samples: {}".format(n_valid_samples))
# Topology can either be directly constructed from layers, or build from sequence of parameters
if self._topology is None:
n_timesteps = train_x.shape[2]
self.initialise_topology(n_timesteps)
logger.info('Initialised network topology: {}.'.format(self._topology.layers))
logger.info('Training features of shape: {}.'.format(
train_x.shape,
))
logger.info('Training labels of shape: {}.'.format(
train_y.shape,
))
resume_train_path = None
if self._tensorflow_flags.resume_training:
try:
resume_train_path = self._train_file_manager.latest_train_filename(execution_time)
except ValueError:
pass
train_path = self._train_file_manager.new_filename(execution_time)
tensorflow_path = TensorflowPath(train_path, resume_train_path)
tensorboard_options = TensorboardOptions(self._tensorflow_flags.tensorboard_log_path,
self._tensorflow_flags.learning_rate,
self._tensorflow_flags.batch_size,
execution_time
)
first_sample = train_x[0, :].flatten()
logger.info("Sample from first example in train_x: {}".format(first_sample[0:8]))
data_provider = TrainDataProvider(train_x, train_y, self._tensorflow_flags.batch_size)
self._do_train(tensorflow_path, tensorboard_options, data_provider)
@logtime(message="Training the model.")
def _do_train(self, tensorflow_path, tensorboard_options, data_provider):
cromulon.train(self._topology, data_provider, tensorflow_path, tensorboard_options, self._tensorflow_flags)
def _get_train_template(self):
|
def predict_classification(self, data, current_timestamp):
""" Returns the raw pdf from the network. """
latest_train_file = self._train_file_manager.latest_train_filename(current_timestamp)
predict_x, symbols, prediction_timestamp, target_timestamp = self._data_transformation.create_predict_data(
data)
predict_x = self._preprocess_inputs(predict_x)
if self._topology is None:
n_timesteps = predict_x.shape[2]
self.initialise_topology(n_timesteps)
# Verify data is the correct shape
network_input_shape = self._topology.get_network_input_shape()
data_input_shape = predict_x.shape[-3:]
if data_input_shape != network_input_shape:
err_msg = 'Data shape' + str(data_input_shape) + " doesnt match network input " + str(
network_input_shape)
raise ValueError(err_msg)
predict_y = cromulon_eval.eval_neural_net(
predict_x, self._topology,
self._tensorflow_flags,
latest_train_file
)
if self._tensorflow_flags.predict_single_shares: # Return batch axis to series position
predict_y = np.swapaxes(predict_y, axis1=1, axis2=2)
predict_y = np.squeeze(predict_y, axis=1)
target_timestamps = []
for i in range(self._topology.n_forecasts):
temp_timestamp = deepcopy(target_timestamp)
target_timestamps.append(temp_timestamp)
target_timestamp += timedelta(days=self._data_transformation.target_delta_ndays)
return predict_y, symbols, target_timestamps
def predict(self, data, current_timestamp, number_of_iterations=1):
"""
Main method that gives us a prediction after the training phase is done
:param data: The dict of dataframes to be used for prediction
:type data: dict
:param current_timestamp: The timestamp of the time when the prediction is executed
:type current_timestamp: datetime.datetime
:param number_of_iterations: The number of iterations which we use to sample the uncertain features.
:type number_of_iterations: Integer
:return: Mean forecast, lower and upper confidence limits, and the timestamp of the prediction
:rtype: OraclePrediction
"""
if self._topology is None:
logger.warning('Not ready for prediction - safer to run train first')
logger.info('Cromulon Oracle prediction on {}.'.format(current_timestamp))
predict_y_list = []
for i in range(number_of_iterations):
predict_y, symbols, target_timestamps = self.predict_classification(data, current_timestamp)
predict_y_list.append(predict_y)
predict_y_stack = np.stack(predict_y_list)
average_predict_y = np.mean(predict_y_stack, axis=0)
means, conf_low, conf_high = self._data_transformation.inverse_transform_multi_predict_y(average_predict_y, symbols)
self.log_validity_of_predictions(means, conf_low, conf_high)
means_pd = pd.DataFrame(data=means, columns=symbols, index=target_timestamps)
conf_low_pd = pd.DataFrame(data=conf_low, columns=symbols, index=target_timestamps)
conf_high_pd = pd.DataFrame(data=conf_high, columns=symbols, index=target_timestamps)
means_pd, conf_low_pd, conf_high_pd = self.filter_predictions(means_pd, conf_low_pd, conf_high_pd)
return OraclePrediction(means_pd, conf_low_pd, conf_high_pd, current_timestamp)
def log_validity_of_predictions(self, means, conf_low, conf_high):
""" Checks that the network outputs are sensible. """
if not (np.isfinite(conf_low).all() and np.isfinite(conf_high).all()):
logger.warning('Confidence interval contains non-finite values.')
if not np.isfinite(means).all():
logger.warning('Means found to contain non-finite values.')
logger.info('Samples from predicted means: {}'.format(means[0:10]))
def filter_predictions(self, means, conf_low, conf_high):
""" Drops any predictions that are NaN, and remove those symbols from the corresponding confidence dataframe.
:param pdDF means: The predictions from which we'll extract the valid ones
:param pdDF conf_low: Lower bound of the confidence range of the prediction
:param pdDF conf_high: Upper bound of the confidence range of the prediction
:return: pdDF, pdDF, pdDF
"""
means = means.dropna()
valid_symbols = means.index.tolist()
conf_low = conf_low.loc[valid_symbols]
conf_high = conf_high.loc[valid_symbols]
return means, conf_low, conf_high
def filter_nan_samples(self, train_x, train_y):
""" Remove any sample in zeroth dimension which holds a nan """
n_samples = train_x.shape[0]
if n_samples != train_y.shape[0]:
raise ValueError("x and y sample lengths don't match")
validity_array = np.zeros(n_samples)
for i in range(n_samples):
x_sample = train_x[i, :]
y_sample = train_y[i, :]
validity_array[i] = np.isfinite(x_sample).all() and np.isfinite(y_sample).all()
mask = np.where(validity_array)[0]
return train_x[mask, :], train_y[mask, :]
def print_verification_report(self, data, data_name):
data = data.flatten()
nans = np.isnan(data).sum()
infs = np.isinf(data).sum()
finite_data = data[np.isfinite(data)]
max_data = np.max(finite_data)
min_data = np.min(finite_data)
mean = np.mean(finite_data)
sigma = np.std(finite_data)
logger.info("{} Infs, Nans: {}, {}".format(data_name, infs, nans))
logger.info("{} Min, Max: {}, {}".format(data_name, min_data, max_data))
logger.info("{} Mean, Sigma: {}, {}".format(data_name, mean, sigma))
if data_name == 'X_data' and np.abs(mean) > 1e-2:
logger.warning('Mean of input data is too large')
if data_name == 'Y_data' and max_data < 1e-2:
raise ValueError("Y Data not classified")
return min_data, max_data
def verify_y_data(self, y_data):
| return TRAIN_FILE_NAME_TEMPLATE | identifier_body |
oracle.py | _transformation_config["prediction_market_minute"] = self.scheduling.prediction_frequency.minutes_offset
data_transformation_config["features_start_market_minute"] = self.scheduling.training_frequency.minutes_offset
data_transformation_config["target_delta_ndays"] = int(self.scheduling.prediction_horizon.days)
data_transformation_config["target_market_minute"] = self.scheduling.prediction_frequency.minutes_offset
self._target_feature = self._extract_target_feature(self._feature_list)
self._data_transformation = GymDataTransformation(data_transformation_config)
def train(self, data, execution_time):
"""
Trains the model
:param dict data: OHLCV data as dictionary of pandas DataFrame.
:param datetime.datetime execution_time: time of execution of training
:return:
"""
logger.info('Training model on {}.'.format(
execution_time,
))
train_x_dict, train_y_dict = self._data_transformation.create_train_data(data)
logger.info("Preprocessing training data")
train_x = self._preprocess_inputs(train_x_dict)
train_y = self._preprocess_outputs(train_y_dict)
logger.info("Processed train_x shape {}".format(train_x.shape))
train_x, train_y = self.filter_nan_samples(train_x, train_y)
logger.info("Filtered train_x shape {}".format(train_x.shape))
n_valid_samples = train_x.shape[0]
if n_valid_samples == 0:
raise ValueError("Aborting training: No valid samples")
elif n_valid_samples < 2e4:
logger.warning("Low number of training samples: {}".format(n_valid_samples))
# Topology can either be directly constructed from layers, or build from sequence of parameters
if self._topology is None:
|
logger.info('Initialised network topology: {}.'.format(self._topology.layers))
logger.info('Training features of shape: {}.'.format(
train_x.shape,
))
logger.info('Training labels of shape: {}.'.format(
train_y.shape,
))
resume_train_path = None
if self._tensorflow_flags.resume_training:
try:
resume_train_path = self._train_file_manager.latest_train_filename(execution_time)
except ValueError:
pass
train_path = self._train_file_manager.new_filename(execution_time)
tensorflow_path = TensorflowPath(train_path, resume_train_path)
tensorboard_options = TensorboardOptions(self._tensorflow_flags.tensorboard_log_path,
self._tensorflow_flags.learning_rate,
self._tensorflow_flags.batch_size,
execution_time
)
first_sample = train_x[0, :].flatten()
logger.info("Sample from first example in train_x: {}".format(first_sample[0:8]))
data_provider = TrainDataProvider(train_x, train_y, self._tensorflow_flags.batch_size)
self._do_train(tensorflow_path, tensorboard_options, data_provider)
@logtime(message="Training the model.")
def _do_train(self, tensorflow_path, tensorboard_options, data_provider):
cromulon.train(self._topology, data_provider, tensorflow_path, tensorboard_options, self._tensorflow_flags)
def _get_train_template(self):
return TRAIN_FILE_NAME_TEMPLATE
def predict_classification(self, data, current_timestamp):
""" Returns the raw pdf from the network. """
latest_train_file = self._train_file_manager.latest_train_filename(current_timestamp)
predict_x, symbols, prediction_timestamp, target_timestamp = self._data_transformation.create_predict_data(
data)
predict_x = self._preprocess_inputs(predict_x)
if self._topology is None:
n_timesteps = predict_x.shape[2]
self.initialise_topology(n_timesteps)
# Verify data is the correct shape
network_input_shape = self._topology.get_network_input_shape()
data_input_shape = predict_x.shape[-3:]
if data_input_shape != network_input_shape:
err_msg = 'Data shape' + str(data_input_shape) + " doesnt match network input " + str(
network_input_shape)
raise ValueError(err_msg)
predict_y = cromulon_eval.eval_neural_net(
predict_x, self._topology,
self._tensorflow_flags,
latest_train_file
)
if self._tensorflow_flags.predict_single_shares: # Return batch axis to series position
predict_y = np.swapaxes(predict_y, axis1=1, axis2=2)
predict_y = np.squeeze(predict_y, axis=1)
target_timestamps = []
for i in range(self._topology.n_forecasts):
temp_timestamp = deepcopy(target_timestamp)
target_timestamps.append(temp_timestamp)
target_timestamp += timedelta(days=self._data_transformation.target_delta_ndays)
return predict_y, symbols, target_timestamps
def predict(self, data, current_timestamp, number_of_iterations=1):
"""
Main method that gives us a prediction after the training phase is done
:param data: The dict of dataframes to be used for prediction
:type data: dict
:param current_timestamp: The timestamp of the time when the prediction is executed
:type current_timestamp: datetime.datetime
:param number_of_iterations: The number of iterations which we use to sample the uncertain features.
:type number_of_iterations: Integer
:return: Mean forecast, lower and upper confidence limits, and the timestamp of the prediction
:rtype: OraclePrediction
"""
if self._topology is None:
logger.warning('Not ready for prediction - safer to run train first')
logger.info('Cromulon Oracle prediction on {}.'.format(current_timestamp))
predict_y_list = []
for i in range(number_of_iterations):
predict_y, symbols, target_timestamps = self.predict_classification(data, current_timestamp)
predict_y_list.append(predict_y)
predict_y_stack = np.stack(predict_y_list)
average_predict_y = np.mean(predict_y_stack, axis=0)
means, conf_low, conf_high = self._data_transformation.inverse_transform_multi_predict_y(average_predict_y, symbols)
self.log_validity_of_predictions(means, conf_low, conf_high)
means_pd = pd.DataFrame(data=means, columns=symbols, index=target_timestamps)
conf_low_pd = pd.DataFrame(data=conf_low, columns=symbols, index=target_timestamps)
conf_high_pd = pd.DataFrame(data=conf_high, columns=symbols, index=target_timestamps)
means_pd, conf_low_pd, conf_high_pd = self.filter_predictions(means_pd, conf_low_pd, conf_high_pd)
return OraclePrediction(means_pd, conf_low_pd, conf_high_pd, current_timestamp)
def log_validity_of_predictions(self, means, conf_low, conf_high):
""" Checks that the network outputs are sensible. """
if not (np.isfinite(conf_low).all() and np.isfinite(conf_high).all()):
logger.warning('Confidence interval contains non-finite values.')
if not np.isfinite(means).all():
logger.warning('Means found to contain non-finite values.')
logger.info('Samples from predicted means: {}'.format(means[0:10]))
def filter_predictions(self, means, conf_low, conf_high):
""" Drops any predictions that are NaN, and remove those symbols from the corresponding confidence dataframe.
:param pdDF means: The predictions from which we'll extract the valid ones
:param pdDF conf_low: Lower bound of the confidence range of the prediction
:param pdDF conf_high: Upper bound of the confidence range of the prediction
:return: pdDF, pdDF, pdDF
"""
means = means.dropna()
valid_symbols = means.index.tolist()
conf_low = conf_low.loc[valid_symbols]
conf_high = conf_high.loc[valid_symbols]
return means, conf_low, conf_high
def filter_nan_samples(self, train_x, train_y):
""" Remove any sample in zeroth dimension which holds a nan """
n_samples = train_x.shape[0]
if n_samples != train_y.shape[0]:
raise ValueError("x and y sample lengths don't match")
validity_array = np.zeros(n_samples)
for i in range(n_samples):
x_sample = train_x[i, :]
y_sample = train_y[i, :]
validity_array[i] = np.isfinite(x_sample).all() and np.isfinite(y_sample).all()
mask = np.where(validity_array)[0]
return train_x[mask, :], train_y[mask, :]
def print_verification_report(self, data, data_name):
data = data.flatten()
nans = np.isnan(data).sum()
infs = np.isinf(data).sum()
finite_data = data[np.isfinite(data)]
max_data = np.max(finite_data)
min_data = np.min(finite_data)
mean = np.mean(finite_data)
sigma = np.std(finite_data)
logger.info("{} Infs, Nans: {}, {}".format(data_name, infs, nans))
logger.info("{} Min, Max: {}, {}".format(data_name, min_data, max_data))
logger.info("{} Mean, Sigma: {}, {}".format(data_name, mean, sigma))
if data_name == 'X_data' and np.abs(mean) > 1e-2:
logger.warning('Mean of input data is too large')
if data_name == 'Y_data' and max_data < 1e-2:
raise ValueError("Y Data not classified")
return min_data, max_data
def verify_y_data(self, y_data | n_timesteps = train_x.shape[2]
self.initialise_topology(n_timesteps) | conditional_block |
oracle.py | def predict_classification(self, data, current_timestamp):
""" Returns the raw pdf from the network. """
latest_train_file = self._train_file_manager.latest_train_filename(current_timestamp)
predict_x, symbols, prediction_timestamp, target_timestamp = self._data_transformation.create_predict_data(
data)
predict_x = self._preprocess_inputs(predict_x)
if self._topology is None:
n_timesteps = predict_x.shape[2]
self.initialise_topology(n_timesteps)
# Verify data is the correct shape
network_input_shape = self._topology.get_network_input_shape()
data_input_shape = predict_x.shape[-3:]
if data_input_shape != network_input_shape:
err_msg = 'Data shape' + str(data_input_shape) + " doesnt match network input " + str(
network_input_shape)
raise ValueError(err_msg)
predict_y = cromulon_eval.eval_neural_net(
predict_x, self._topology,
self._tensorflow_flags,
latest_train_file
)
if self._tensorflow_flags.predict_single_shares: # Return batch axis to series position
predict_y = np.swapaxes(predict_y, axis1=1, axis2=2)
predict_y = np.squeeze(predict_y, axis=1)
target_timestamps = []
for i in range(self._topology.n_forecasts):
temp_timestamp = deepcopy(target_timestamp)
target_timestamps.append(temp_timestamp)
target_timestamp += timedelta(days=self._data_transformation.target_delta_ndays)
return predict_y, symbols, target_timestamps
def predict(self, data, current_timestamp, number_of_iterations=1):
"""
Main method that gives us a prediction after the training phase is done
:param data: The dict of dataframes to be used for prediction
:type data: dict
:param current_timestamp: The timestamp of the time when the prediction is executed
:type current_timestamp: datetime.datetime
:param number_of_iterations: The number of iterations which we use to sample the uncertain features.
:type number_of_iterations: Integer
:return: Mean forecast, lower and upper confidence limits, and the timestamp of the prediction
:rtype: OraclePrediction
"""
if self._topology is None:
logger.warning('Not ready for prediction - safer to run train first')
logger.info('Cromulon Oracle prediction on {}.'.format(current_timestamp))
predict_y_list = []
for i in range(number_of_iterations):
predict_y, symbols, target_timestamps = self.predict_classification(data, current_timestamp)
predict_y_list.append(predict_y)
predict_y_stack = np.stack(predict_y_list)
average_predict_y = np.mean(predict_y_stack, axis=0)
means, conf_low, conf_high = self._data_transformation.inverse_transform_multi_predict_y(average_predict_y, symbols)
self.log_validity_of_predictions(means, conf_low, conf_high)
means_pd = pd.DataFrame(data=means, columns=symbols, index=target_timestamps)
conf_low_pd = pd.DataFrame(data=conf_low, columns=symbols, index=target_timestamps)
conf_high_pd = pd.DataFrame(data=conf_high, columns=symbols, index=target_timestamps)
means_pd, conf_low_pd, conf_high_pd = self.filter_predictions(means_pd, conf_low_pd, conf_high_pd)
return OraclePrediction(means_pd, conf_low_pd, conf_high_pd, current_timestamp)
def log_validity_of_predictions(self, means, conf_low, conf_high):
""" Checks that the network outputs are sensible. """
if not (np.isfinite(conf_low).all() and np.isfinite(conf_high).all()):
logger.warning('Confidence interval contains non-finite values.')
if not np.isfinite(means).all():
logger.warning('Means found to contain non-finite values.')
logger.info('Samples from predicted means: {}'.format(means[0:10]))
def filter_predictions(self, means, conf_low, conf_high):
""" Drops any predictions that are NaN, and remove those symbols from the corresponding confidence dataframe.
:param pdDF means: The predictions from which we'll extract the valid ones
:param pdDF conf_low: Lower bound of the confidence range of the prediction
:param pdDF conf_high: Upper bound of the confidence range of the prediction
:return: pdDF, pdDF, pdDF
"""
means = means.dropna()
valid_symbols = means.index.tolist()
conf_low = conf_low.loc[valid_symbols]
conf_high = conf_high.loc[valid_symbols]
return means, conf_low, conf_high
def filter_nan_samples(self, train_x, train_y):
""" Remove any sample in zeroth dimension which holds a nan """
n_samples = train_x.shape[0]
if n_samples != train_y.shape[0]:
raise ValueError("x and y sample lengths don't match")
validity_array = np.zeros(n_samples)
for i in range(n_samples):
x_sample = train_x[i, :]
y_sample = train_y[i, :]
validity_array[i] = np.isfinite(x_sample).all() and np.isfinite(y_sample).all()
mask = np.where(validity_array)[0]
return train_x[mask, :], train_y[mask, :]
def print_verification_report(self, data, data_name):
data = data.flatten()
nans = np.isnan(data).sum()
infs = np.isinf(data).sum()
finite_data = data[np.isfinite(data)]
max_data = np.max(finite_data)
min_data = np.min(finite_data)
mean = np.mean(finite_data)
sigma = np.std(finite_data)
logger.info("{} Infs, Nans: {}, {}".format(data_name, infs, nans))
logger.info("{} Min, Max: {}, {}".format(data_name, min_data, max_data))
logger.info("{} Mean, Sigma: {}, {}".format(data_name, mean, sigma))
if data_name == 'X_data' and np.abs(mean) > 1e-2:
logger.warning('Mean of input data is too large')
if data_name == 'Y_data' and max_data < 1e-2:
raise ValueError("Y Data not classified")
return min_data, max_data
def verify_y_data(self, y_data):
testy = deepcopy(y_data)
self.print_verification_report(testy, 'Y_data')
def verify_x_data(self, x_data):
"""Check for nans or crazy numbers.
"""
testx = deepcopy(x_data).flatten()
xmin, xmax = self.print_verification_report(testx, 'X_data')
if xmax > CLIP_VALUE or xmin < -CLIP_VALUE:
n_clipped_elements = np.sum(CLIP_VALUE < np.abs(testx))
n_elements = len(testx)
x_data = np.clip(x_data, a_min=-CLIP_VALUE, a_max=CLIP_VALUE)
logger.warning("Large inputs detected: clip values exceeding {}".format(CLIP_VALUE))
logger.info("{} of {} elements were clipped.".format(n_clipped_elements, n_elements))
return x_data
def update_configuration(self, config):
""" Pass on some config entries to data_transformation"""
config["data_transformation"]["n_classification_bins"] = config["n_classification_bins"]
config["data_transformation"]["nassets"] = config["nassets"]
config["data_transformation"]["classify_per_series"] = config["classify_per_series"]
config["data_transformation"]["normalise_per_series"] = config["normalise_per_series"]
return config
def _preprocess_inputs(self, train_x_dict):
""" Prepare training data to be fed into Cromulon. """
numpy_arrays = []
for key, value in train_x_dict.items():
numpy_arrays.append(value)
logger.info("Appending feature of shape {}".format(value.shape))
# Currently train_x will have dimensions [features; samples; timesteps; symbols]
train_x = np.stack(numpy_arrays, axis=0)
train_x = self.reorder_input_dimensions(train_x)
# Expand dataset if requested
if self._tensorflow_flags.predict_single_shares:
train_x = self.expand_input_data(train_x)
train_x = self.verify_x_data(train_x)
return train_x.astype(np.float32) # FIXME: set float32 in data transform, conditional on config file
def _preprocess_outputs(self, train_y_dict):
train_y = list(train_y_dict.values())[0]
train_y = np.swapaxes(train_y, axis1=1, axis2=2)
if self._tensorflow_flags.predict_single_shares:
n_feat_y = train_y.shape[2]
train_y = np.reshape(train_y, [-1, 1, 1, n_feat_y])
self.verify_y_data(train_y)
return train_y.astype(np.float32) # FIXME:set float32 in data transform, conditional on config file
def gaussianise_series(self, train_x):
""" Gaussianise each series within each batch - but don't normalise means
:param nparray train_x: Series in format [batches, features, series]. NB ensure all features
are of the same kind
:return: nparray The same data but now each series is gaussianised
"""
n_batches = train_x.shape[0]
for batch in range(n_batches):
train_x[batch, :, :] = gaussianise(train_x[batch, :, :], target_sigma=1.0)
return train_x
def | reorder_input_dimensions | identifier_name |
|
oracle.py |
self._topology = None
def _init_train_file_manager(self):
self._train_file_manager = TrainFileManager(
self._train_path,
TRAIN_FILE_NAME_TEMPLATE,
DATETIME_FORMAT_COMPACT
)
self._train_file_manager.ensure_path_exists()
def _init_data_transformation(self):
data_transformation_config = self.config['data_transformation']
self._feature_list = data_transformation_config['feature_config_list']
self._n_features = len(self._feature_list)
data_transformation_config["prediction_market_minute"] = self.scheduling.prediction_frequency.minutes_offset
data_transformation_config["features_start_market_minute"] = self.scheduling.training_frequency.minutes_offset
data_transformation_config["target_delta_ndays"] = int(self.scheduling.prediction_horizon.days)
data_transformation_config["target_market_minute"] = self.scheduling.prediction_frequency.minutes_offset
self._target_feature = self._extract_target_feature(self._feature_list)
self._data_transformation = GymDataTransformation(data_transformation_config)
def train(self, data, execution_time):
"""
Trains the model
:param dict data: OHLCV data as dictionary of pandas DataFrame.
:param datetime.datetime execution_time: time of execution of training
:return:
"""
logger.info('Training model on {}.'.format(
execution_time,
))
train_x_dict, train_y_dict = self._data_transformation.create_train_data(data)
logger.info("Preprocessing training data")
train_x = self._preprocess_inputs(train_x_dict)
train_y = self._preprocess_outputs(train_y_dict)
logger.info("Processed train_x shape {}".format(train_x.shape))
train_x, train_y = self.filter_nan_samples(train_x, train_y)
logger.info("Filtered train_x shape {}".format(train_x.shape))
n_valid_samples = train_x.shape[0]
if n_valid_samples == 0:
raise ValueError("Aborting training: No valid samples")
elif n_valid_samples < 2e4:
logger.warning("Low number of training samples: {}".format(n_valid_samples))
# Topology can either be directly constructed from layers, or build from sequence of parameters
if self._topology is None:
n_timesteps = train_x.shape[2]
self.initialise_topology(n_timesteps)
logger.info('Initialised network topology: {}.'.format(self._topology.layers))
logger.info('Training features of shape: {}.'.format(
train_x.shape,
))
logger.info('Training labels of shape: {}.'.format(
train_y.shape,
))
resume_train_path = None
if self._tensorflow_flags.resume_training:
try:
resume_train_path = self._train_file_manager.latest_train_filename(execution_time)
except ValueError:
pass
train_path = self._train_file_manager.new_filename(execution_time)
tensorflow_path = TensorflowPath(train_path, resume_train_path)
tensorboard_options = TensorboardOptions(self._tensorflow_flags.tensorboard_log_path,
self._tensorflow_flags.learning_rate,
self._tensorflow_flags.batch_size,
execution_time
)
first_sample = train_x[0, :].flatten()
logger.info("Sample from first example in train_x: {}".format(first_sample[0:8]))
data_provider = TrainDataProvider(train_x, train_y, self._tensorflow_flags.batch_size)
self._do_train(tensorflow_path, tensorboard_options, data_provider)
@logtime(message="Training the model.")
def _do_train(self, tensorflow_path, tensorboard_options, data_provider):
cromulon.train(self._topology, data_provider, tensorflow_path, tensorboard_options, self._tensorflow_flags)
def _get_train_template(self):
return TRAIN_FILE_NAME_TEMPLATE
def predict_classification(self, data, current_timestamp):
""" Returns the raw pdf from the network. """
latest_train_file = self._train_file_manager.latest_train_filename(current_timestamp)
predict_x, symbols, prediction_timestamp, target_timestamp = self._data_transformation.create_predict_data(
data)
predict_x = self._preprocess_inputs(predict_x)
if self._topology is None:
n_timesteps = predict_x.shape[2]
self.initialise_topology(n_timesteps)
# Verify data is the correct shape
network_input_shape = self._topology.get_network_input_shape()
data_input_shape = predict_x.shape[-3:]
if data_input_shape != network_input_shape:
err_msg = 'Data shape' + str(data_input_shape) + " doesnt match network input " + str(
network_input_shape)
raise ValueError(err_msg)
predict_y = cromulon_eval.eval_neural_net(
predict_x, self._topology,
self._tensorflow_flags,
latest_train_file
)
if self._tensorflow_flags.predict_single_shares: # Return batch axis to series position
predict_y = np.swapaxes(predict_y, axis1=1, axis2=2)
predict_y = np.squeeze(predict_y, axis=1)
target_timestamps = []
for i in range(self._topology.n_forecasts):
temp_timestamp = deepcopy(target_timestamp)
target_timestamps.append(temp_timestamp)
target_timestamp += timedelta(days=self._data_transformation.target_delta_ndays)
return predict_y, symbols, target_timestamps
def predict(self, data, current_timestamp, number_of_iterations=1):
"""
Main method that gives us a prediction after the training phase is done
:param data: The dict of dataframes to be used for prediction
:type data: dict
:param current_timestamp: The timestamp of the time when the prediction is executed
:type current_timestamp: datetime.datetime
:param number_of_iterations: The number of iterations which we use to sample the uncertain features.
:type number_of_iterations: Integer
:return: Mean forecast, lower and upper confidence limits, and the timestamp of the prediction
:rtype: OraclePrediction
"""
if self._topology is None:
logger.warning('Not ready for prediction - safer to run train first')
logger.info('Cromulon Oracle prediction on {}.'.format(current_timestamp))
predict_y_list = []
for i in range(number_of_iterations):
predict_y, symbols, target_timestamps = self.predict_classification(data, current_timestamp)
predict_y_list.append(predict_y)
predict_y_stack = np.stack(predict_y_list)
average_predict_y = np.mean(predict_y_stack, axis=0)
means, conf_low, conf_high = self._data_transformation.inverse_transform_multi_predict_y(average_predict_y, symbols)
self.log_validity_of_predictions(means, conf_low, conf_high)
means_pd = pd.DataFrame(data=means, columns=symbols, index=target_timestamps)
conf_low_pd = pd.DataFrame(data=conf_low, columns=symbols, index=target_timestamps)
conf_high_pd = pd.DataFrame(data=conf_high, columns=symbols, index=target_timestamps)
means_pd, conf_low_pd, conf_high_pd = self.filter_predictions(means_pd, conf_low_pd, conf_high_pd)
return OraclePrediction(means_pd, conf_low_pd, conf_high_pd, current_timestamp)
def log_validity_of_predictions(self, means, conf_low, conf_high):
""" Checks that the network outputs are sensible. """
if not (np.isfinite(conf_low).all() and np.isfinite(conf_high).all()):
logger.warning('Confidence interval contains non-finite values.')
if not np.isfinite(means).all():
logger.warning('Means found to contain non-finite values.')
logger.info('Samples from predicted means: {}'.format(means[0:10]))
def filter_predictions(self, means, conf_low, conf_high):
""" Drops any predictions that are NaN, and remove those symbols from the corresponding confidence dataframe.
:param pdDF means: The predictions from which we'll extract the valid ones
:param pdDF conf_low: Lower bound of the confidence range of the prediction
:param pdDF conf_high: Upper bound of the confidence range of the prediction
:return: pdDF, pdDF, pdDF
"""
means = means.dropna()
valid_symbols = means.index.tolist()
conf_low = conf_low.loc[valid_symbols]
conf_high = conf_high.loc[valid_symbols]
return means, conf_low, conf_high
def filter_nan_samples(self, train_x, train_y):
""" Remove any sample in zeroth dimension which holds a nan """
n_samples = train_x.shape[0]
if n_samples != train_y.shape[0]:
raise ValueError("x and y sample lengths don't match")
validity_array = np.zeros(n_samples)
for i in range(n_samples):
x_sample = train_x[i, :]
y_sample = train_y[i, :]
validity_array[i] = np.isfinite(x_sample).all() and np.isfinite(y_sample).all()
mask = np.where(validity_array)[0]
return train_x[mask, :], train_y[mask, :]
def print_verification_report(self, data, data_name):
data = data.flatten()
nans = np.isnan(data).sum()
infs = np.isinf(data).sum()
finite_data = data[np.isfinite(data)]
max_data = np.max(finite_data)
min_data = np.min(finite_data)
mean = np.mean(finite | self._n_forecasts = 1
else:
self._n_input_series = self.config['n_series']
self._n_forecasts = self.config['n_forecasts'] | random_line_split |
|
context.ts | governing permissions and
* limitations under the License.
*/
/// <reference path="../../../typings/globals/node/index.d.ts" />
import {action_set_state_data, action_set_state_user} from "./actions";
const GLOBAL_CONSTANTS = require('../../global/constants').GLOBAL_CONSTANTS;
const LOGGING_ENABLED = require('../../global/constants').LOGGING_ENABLED;
import {customMiddleware, add_todo_item, toggle_todo_item} from "./mymiddlewares";
import {UserIF, DataIF, ReduxStateIF} from "./interfaces";
import {createStore, applyMiddleware, compose} from 'redux';
import * as reducers from './reducers';
import * as actions from './actions';
import * as persistence from './firebase';
import * as presence from './presence';
const lodash = require('lodash');
const events = require('events');
const uuid = require('node-uuid');
/**
* this holds the app's state which is comprised of:
* 1) user object: UserIF
* 2) data for the user: DataIF
*
* any time this user or data is modified, it emits events to notify any listeners
* that are interested in listening to these changes via:
* 1) LE_SET_USER
* 2) LE_SET_DATA
*/
class ApplicationContext {
public sessionId;
public socket;
public firebase;
public eventEmitter;
public reduxStore;
constructor() {
// init redux reduxStore
this.initReduxStore();
// init firebase
this.initFirebase();
// setup websocket (used for group chat)
this.initSocket();
// unique session id
this.sessionId = uuid.v4();
// create event emitter
this.initEventEmitter();
// setup firebase auth
persistence.initAuth(this);
// setup firebase presence
presence.initPresence(this);
}
isProduction() {
const hostname = window.location.hostname;
if (!lodash.isEqual(hostname, "localhost")) {
// prod app
return true;
} else {
// dev app
return false;
}
}
isDevelopment() {
return !this.isProduction();
}
/**
* this generates a different URL depending on whether the code is running on
* localhost or not.
* DEV - If it's running in localhost, then it understands this to be
* the dev environment and it tries to connect to "localhost:8080".
* PROD - If it's NOT running in localhost, then it understands this to be the
* production environment and tries to connect to "/".
* @returns {string}
*/
getSocketURL() {
let socketURL = "http://localhost:8080";
if (this.isProduction()) |
return socketURL;
}
/**
* this sets up the socket object for use by this context
*/
initSocket() {
let io = require("socket.io-client");
this.socket = new io.connect(this.getSocketURL());
}
/**
* to access the socket for this context use this method ... you can emit()
* using it, and you can attach on() listeners to this as well ... if you attach
* listeners, it's up to you to remove them from the socket when they're no longer
* needed. This class will NOT do the cleanup for you.
* @returns {io.connect|*}
*/
getSocket() {
return this.socket;
}
/**
* this returns an ephermeral session id for this session ... will change every
* time this session is restarted (ApplicationContext is created).
* @returns {string|*}
*/
getSessionId() {
return this.sessionId;
}
/**
* is true if the user object is set, and it contains a uid field.
* you can get the user object from getUser()
* you can get the uid from getUserId()
* @returns {boolean}
*/
isUserSet() {
try {
if (!lodash.isNil(this.getUser())) {
if (!lodash.isNil(this.getUserId())) {
return true;
}
}
return false;
} catch (err) {
return false;
}
}
/**
* get a reference to the saved user object
* @returns {UserIF}
*/
getUser() {
try {
return this.getReduxState().user;
} catch (err) {
return null;
}
}
/** gets the uid field of the userObject */
getUserId() {
try {
return this.getUser().uid;
} catch (err) {
return null;
}
}
/**
* get a reference to the saved data object
* @returns {DataIF}
*/
getData(): DataIF {
return this.getReduxState().data;
}
/** this tells firebase to start sign-in using Google (vs anon auth) */
forceSignIn() {
persistence.forceSignIn(this);
}
/** this tells firebase to initiate sign-out (of users who came in thru any
* auth providers - Google and anon) */
forceSignOut() {
persistence.forceSignOut(this);
}
/** setup the internal firebase object */
initFirebase() {
this.firebase = require("firebase");
const config = require('../../global/constants').FIREBASE_CONFIG;
this.firebase.initializeApp(config);
}
/**
* get a ref to the firebase instance
* @returns {firebase|*}
*/
getFirebase() {
return this.firebase;
}
/** this is a convenience method that allows you to get the firebase server
* timestamp object
*/
getFirebaseServerTimestampObject() {
return this.firebase.database.ServerValue.TIMESTAMP
}
/**
* get a ref to the firebase.database() instance
* @returns {*|firebase.database.Database|!firebase.database.Database}
*/
getDatabase() {
return this.firebase.database();
}
/** creates the event emitter */
initEventEmitter() {
this.eventEmitter = new events.EventEmitter();
}
/** disconnect the socket connection */
disconnectSocket() {
this.socket.disconnect();
}
/** convenience method to emit an event to the server */
emitToServer(eventName, payload) {
if (LOGGING_ENABLED) {
console.log(`emitToServer: eventName ${eventName} fired`);
console.dir(payload);
}
this.socket.emit(eventName, payload);
}
/** convenience method to emit an event */
emit(eventName, payload) {
if (LOGGING_ENABLED) {
console.log(`emit: eventName ${eventName} fired`);
console.dir(payload);
}
this.eventEmitter.emit(eventName, payload);
}
/** convenience method to listen to event
* @returns the listener that is passed as param
*/
addListener(eventName, listener) {
function logging_listener() {
if (LOGGING_ENABLED) {
console.log(`listener: for eventName ${eventName} responding`);
}
listener.apply(this, arguments);
}
this.eventEmitter.addListener(
eventName, logging_listener
);
return logging_listener;
}
/** convenience method to remove listener for event */
removeListener(eventName, listener) {
this.eventEmitter.removeListener(eventName, listener);
}
/**
* initialize the redux store and get the actions and reducers wired up to it
* this also tests to see if the browser is inDevelopment and if so, it will try and
* use the Redux Chrome Dev Tools Extension.
*/
initReduxStore() {
try {
const composeEnhancers = window.__REDUX_DEVTOOLS_EXTENSION_COMPOSE__ || compose;
const middlewares = [
window.__REDUX_DEVTOOLS_EXTENSION__ && window.__REDUX_DEVTOOLS_EXTENSION__(),
add_todo_item,
toggle_todo_item];
const middlewareEnhancer = applyMiddleware(...middlewares);
this.reduxStore = createStore(
reducers.reducer_main,
null,
composeEnhancers(middlewareEnhancer)
);
} catch (e) {
const middlewares = [add_todo_item, toggle_todo_item];
const middlewareEnhancer = applyMiddleware(...middlewares);
this.reduxStore = createStore(
reducers.reducer_main,
null,
middlewareEnhancer
);
}
// explicitly INIT Redux!
this.reduxStore.dispatch(actions.action_init());
/**
* this enables the use of redux dev tools in Chrome if you have the
* Chrome extension installed - https://goo.gl/xU4D6P
*/
// let USE_REDUX_DEVTOOLS = this.isDevelopment();
// create redux reduxStore
// if (USE_REDUX_DEVTOOLS) {
// // the following line uses chrome devtools redux plugin
// this.reduxStore = createStore(
// reducers.reducer_main,
// null,
// window.devToolsExtension && window.devToolsExtension()
// );
// }
// else {
// this.reduxStore = createStore(
// reducers.reducer_main,
// null
// );
// }
}
/**
* get a reference to the redux store
* @returns {any}
*/
getReduxStore() {
return this.reduxStore;
}
/**
* get a reference to the redux state
* @returns {S}
*/
getReduxState(): ReduxStateIF {
return this.reduxStore.getState();
| {
socketURL = "/";
} | conditional_block |
context.ts | language governing permissions and
* limitations under the License.
*/
/// <reference path="../../../typings/globals/node/index.d.ts" />
import {action_set_state_data, action_set_state_user} from "./actions";
const GLOBAL_CONSTANTS = require('../../global/constants').GLOBAL_CONSTANTS;
const LOGGING_ENABLED = require('../../global/constants').LOGGING_ENABLED;
import {customMiddleware, add_todo_item, toggle_todo_item} from "./mymiddlewares";
import {UserIF, DataIF, ReduxStateIF} from "./interfaces";
import {createStore, applyMiddleware, compose} from 'redux';
import * as reducers from './reducers';
import * as actions from './actions';
import * as persistence from './firebase';
import * as presence from './presence';
const lodash = require('lodash');
const events = require('events');
const uuid = require('node-uuid');
/**
* this holds the app's state which is comprised of:
* 1) user object: UserIF
* 2) data for the user: DataIF
*
* any time this user or data is modified, it emits events to notify any listeners
* that are interested in listening to these changes via:
* 1) LE_SET_USER
* 2) LE_SET_DATA
*/
class ApplicationContext {
public sessionId;
public socket;
public firebase;
public eventEmitter;
public reduxStore;
constructor() {
// init redux reduxStore
this.initReduxStore();
// init firebase
this.initFirebase();
// setup websocket (used for group chat)
this.initSocket();
// unique session id
this.sessionId = uuid.v4();
// create event emitter
this.initEventEmitter();
// setup firebase auth
persistence.initAuth(this);
// setup firebase presence
presence.initPresence(this);
}
isProduction() {
const hostname = window.location.hostname;
if (!lodash.isEqual(hostname, "localhost")) {
// prod app
return true;
} else {
// dev app
return false;
}
}
isDevelopment() {
return !this.isProduction();
}
/**
* this generates a different URL depending on whether the code is running on
* localhost or not.
* DEV - If it's running in localhost, then it understands this to be
* the dev environment and it tries to connect to "localhost:8080".
* PROD - If it's NOT running in localhost, then it understands this to be the
* production environment and tries to connect to "/".
* @returns {string}
*/
getSocketURL() {
let socketURL = "http://localhost:8080";
if (this.isProduction()) {
socketURL = "/";
}
return socketURL;
}
/**
* this sets up the socket object for use by this context
*/
initSocket() {
let io = require("socket.io-client");
this.socket = new io.connect(this.getSocketURL());
}
/**
* to access the socket for this context use this method ... you can emit()
* using it, and you can attach on() listeners to this as well ... if you attach
* listeners, it's up to you to remove them from the socket when they're no longer
* needed. This class will NOT do the cleanup for you.
* @returns {io.connect|*}
*/
getSocket() {
return this.socket;
}
/**
* this returns an ephermeral session id for this session ... will change every
* time this session is restarted (ApplicationContext is created).
* @returns {string|*}
*/
getSessionId() {
return this.sessionId;
}
/**
* is true if the user object is set, and it contains a uid field.
* you can get the user object from getUser()
* you can get the uid from getUserId()
* @returns {boolean}
*/
isUserSet() {
try {
if (!lodash.isNil(this.getUser())) {
if (!lodash.isNil(this.getUserId())) {
return true;
}
}
return false;
} catch (err) {
return false;
}
}
/**
* get a reference to the saved user object
* @returns {UserIF}
*/
| () {
try {
return this.getReduxState().user;
} catch (err) {
return null;
}
}
/** gets the uid field of the userObject */
getUserId() {
try {
return this.getUser().uid;
} catch (err) {
return null;
}
}
/**
* get a reference to the saved data object
* @returns {DataIF}
*/
getData(): DataIF {
return this.getReduxState().data;
}
/** this tells firebase to start sign-in using Google (vs anon auth) */
forceSignIn() {
persistence.forceSignIn(this);
}
/** this tells firebase to initiate sign-out (of users who came in thru any
* auth providers - Google and anon) */
forceSignOut() {
persistence.forceSignOut(this);
}
/** setup the internal firebase object */
initFirebase() {
this.firebase = require("firebase");
const config = require('../../global/constants').FIREBASE_CONFIG;
this.firebase.initializeApp(config);
}
/**
* get a ref to the firebase instance
* @returns {firebase|*}
*/
getFirebase() {
return this.firebase;
}
/** this is a convenience method that allows you to get the firebase server
* timestamp object
*/
getFirebaseServerTimestampObject() {
return this.firebase.database.ServerValue.TIMESTAMP
}
/**
* get a ref to the firebase.database() instance
* @returns {*|firebase.database.Database|!firebase.database.Database}
*/
getDatabase() {
return this.firebase.database();
}
/** creates the event emitter */
initEventEmitter() {
this.eventEmitter = new events.EventEmitter();
}
/** disconnect the socket connection */
disconnectSocket() {
this.socket.disconnect();
}
/** convenience method to emit an event to the server */
emitToServer(eventName, payload) {
if (LOGGING_ENABLED) {
console.log(`emitToServer: eventName ${eventName} fired`);
console.dir(payload);
}
this.socket.emit(eventName, payload);
}
/** convenience method to emit an event */
emit(eventName, payload) {
if (LOGGING_ENABLED) {
console.log(`emit: eventName ${eventName} fired`);
console.dir(payload);
}
this.eventEmitter.emit(eventName, payload);
}
/** convenience method to listen to event
* @returns the listener that is passed as param
*/
addListener(eventName, listener) {
function logging_listener() {
if (LOGGING_ENABLED) {
console.log(`listener: for eventName ${eventName} responding`);
}
listener.apply(this, arguments);
}
this.eventEmitter.addListener(
eventName, logging_listener
);
return logging_listener;
}
/** convenience method to remove listener for event */
removeListener(eventName, listener) {
this.eventEmitter.removeListener(eventName, listener);
}
/**
* initialize the redux store and get the actions and reducers wired up to it
* this also tests to see if the browser is inDevelopment and if so, it will try and
* use the Redux Chrome Dev Tools Extension.
*/
initReduxStore() {
try {
const composeEnhancers = window.__REDUX_DEVTOOLS_EXTENSION_COMPOSE__ || compose;
const middlewares = [
window.__REDUX_DEVTOOLS_EXTENSION__ && window.__REDUX_DEVTOOLS_EXTENSION__(),
add_todo_item,
toggle_todo_item];
const middlewareEnhancer = applyMiddleware(...middlewares);
this.reduxStore = createStore(
reducers.reducer_main,
null,
composeEnhancers(middlewareEnhancer)
);
} catch (e) {
const middlewares = [add_todo_item, toggle_todo_item];
const middlewareEnhancer = applyMiddleware(...middlewares);
this.reduxStore = createStore(
reducers.reducer_main,
null,
middlewareEnhancer
);
}
// explicitly INIT Redux!
this.reduxStore.dispatch(actions.action_init());
/**
* this enables the use of redux dev tools in Chrome if you have the
* Chrome extension installed - https://goo.gl/xU4D6P
*/
// let USE_REDUX_DEVTOOLS = this.isDevelopment();
// create redux reduxStore
// if (USE_REDUX_DEVTOOLS) {
// // the following line uses chrome devtools redux plugin
// this.reduxStore = createStore(
// reducers.reducer_main,
// null,
// window.devToolsExtension && window.devToolsExtension()
// );
// }
// else {
// this.reduxStore = createStore(
// reducers.reducer_main,
// null
// );
// }
}
/**
* get a reference to the redux store
* @returns {any}
*/
getReduxStore() {
return this.reduxStore;
}
/**
* get a reference to the redux state
* @returns {S}
*/
getReduxState(): ReduxStateIF {
return this.reduxStore.getState();
}
| getUser | identifier_name |
context.ts | language governing permissions and
* limitations under the License.
*/
/// <reference path="../../../typings/globals/node/index.d.ts" />
import {action_set_state_data, action_set_state_user} from "./actions";
const GLOBAL_CONSTANTS = require('../../global/constants').GLOBAL_CONSTANTS;
const LOGGING_ENABLED = require('../../global/constants').LOGGING_ENABLED;
import {customMiddleware, add_todo_item, toggle_todo_item} from "./mymiddlewares";
import {UserIF, DataIF, ReduxStateIF} from "./interfaces";
import {createStore, applyMiddleware, compose} from 'redux';
import * as reducers from './reducers';
import * as actions from './actions';
import * as persistence from './firebase';
import * as presence from './presence';
const lodash = require('lodash');
const events = require('events');
const uuid = require('node-uuid');
/**
* this holds the app's state which is comprised of:
* 1) user object: UserIF
* 2) data for the user: DataIF
*
* any time this user or data is modified, it emits events to notify any listeners
* that are interested in listening to these changes via:
* 1) LE_SET_USER
* 2) LE_SET_DATA
*/
class ApplicationContext {
public sessionId;
public socket;
public firebase;
public eventEmitter;
public reduxStore;
constructor() {
// init redux reduxStore
this.initReduxStore();
// init firebase
this.initFirebase();
// setup websocket (used for group chat)
this.initSocket();
// unique session id
this.sessionId = uuid.v4();
// create event emitter
this.initEventEmitter();
// setup firebase auth
persistence.initAuth(this);
// setup firebase presence
presence.initPresence(this);
}
isProduction() {
const hostname = window.location.hostname;
if (!lodash.isEqual(hostname, "localhost")) {
// prod app
return true;
} else {
// dev app
return false;
}
}
isDevelopment() {
return !this.isProduction();
}
/**
* this generates a different URL depending on whether the code is running on
* localhost or not. | * @returns {string}
*/
getSocketURL() {
let socketURL = "http://localhost:8080";
if (this.isProduction()) {
socketURL = "/";
}
return socketURL;
}
/**
* this sets up the socket object for use by this context
*/
initSocket() {
let io = require("socket.io-client");
this.socket = new io.connect(this.getSocketURL());
}
/**
* to access the socket for this context use this method ... you can emit()
* using it, and you can attach on() listeners to this as well ... if you attach
* listeners, it's up to you to remove them from the socket when they're no longer
* needed. This class will NOT do the cleanup for you.
* @returns {io.connect|*}
*/
getSocket() {
return this.socket;
}
/**
* this returns an ephermeral session id for this session ... will change every
* time this session is restarted (ApplicationContext is created).
* @returns {string|*}
*/
getSessionId() {
return this.sessionId;
}
/**
* is true if the user object is set, and it contains a uid field.
* you can get the user object from getUser()
* you can get the uid from getUserId()
* @returns {boolean}
*/
isUserSet() {
try {
if (!lodash.isNil(this.getUser())) {
if (!lodash.isNil(this.getUserId())) {
return true;
}
}
return false;
} catch (err) {
return false;
}
}
/**
* get a reference to the saved user object
* @returns {UserIF}
*/
getUser() {
try {
return this.getReduxState().user;
} catch (err) {
return null;
}
}
/** gets the uid field of the userObject */
getUserId() {
try {
return this.getUser().uid;
} catch (err) {
return null;
}
}
/**
* get a reference to the saved data object
* @returns {DataIF}
*/
getData(): DataIF {
return this.getReduxState().data;
}
/** this tells firebase to start sign-in using Google (vs anon auth) */
forceSignIn() {
persistence.forceSignIn(this);
}
/** this tells firebase to initiate sign-out (of users who came in thru any
* auth providers - Google and anon) */
forceSignOut() {
persistence.forceSignOut(this);
}
/** setup the internal firebase object */
initFirebase() {
this.firebase = require("firebase");
const config = require('../../global/constants').FIREBASE_CONFIG;
this.firebase.initializeApp(config);
}
/**
* get a ref to the firebase instance
* @returns {firebase|*}
*/
getFirebase() {
return this.firebase;
}
/** this is a convenience method that allows you to get the firebase server
* timestamp object
*/
getFirebaseServerTimestampObject() {
return this.firebase.database.ServerValue.TIMESTAMP
}
/**
* get a ref to the firebase.database() instance
* @returns {*|firebase.database.Database|!firebase.database.Database}
*/
getDatabase() {
return this.firebase.database();
}
/** creates the event emitter */
initEventEmitter() {
this.eventEmitter = new events.EventEmitter();
}
/** disconnect the socket connection */
disconnectSocket() {
this.socket.disconnect();
}
/** convenience method to emit an event to the server */
emitToServer(eventName, payload) {
if (LOGGING_ENABLED) {
console.log(`emitToServer: eventName ${eventName} fired`);
console.dir(payload);
}
this.socket.emit(eventName, payload);
}
/** convenience method to emit an event */
emit(eventName, payload) {
if (LOGGING_ENABLED) {
console.log(`emit: eventName ${eventName} fired`);
console.dir(payload);
}
this.eventEmitter.emit(eventName, payload);
}
/** convenience method to listen to event
* @returns the listener that is passed as param
*/
addListener(eventName, listener) {
function logging_listener() {
if (LOGGING_ENABLED) {
console.log(`listener: for eventName ${eventName} responding`);
}
listener.apply(this, arguments);
}
this.eventEmitter.addListener(
eventName, logging_listener
);
return logging_listener;
}
/** convenience method to remove listener for event */
removeListener(eventName, listener) {
this.eventEmitter.removeListener(eventName, listener);
}
/**
* initialize the redux store and get the actions and reducers wired up to it
* this also tests to see if the browser is inDevelopment and if so, it will try and
* use the Redux Chrome Dev Tools Extension.
*/
initReduxStore() {
try {
const composeEnhancers = window.__REDUX_DEVTOOLS_EXTENSION_COMPOSE__ || compose;
const middlewares = [
window.__REDUX_DEVTOOLS_EXTENSION__ && window.__REDUX_DEVTOOLS_EXTENSION__(),
add_todo_item,
toggle_todo_item];
const middlewareEnhancer = applyMiddleware(...middlewares);
this.reduxStore = createStore(
reducers.reducer_main,
null,
composeEnhancers(middlewareEnhancer)
);
} catch (e) {
const middlewares = [add_todo_item, toggle_todo_item];
const middlewareEnhancer = applyMiddleware(...middlewares);
this.reduxStore = createStore(
reducers.reducer_main,
null,
middlewareEnhancer
);
}
// explicitly INIT Redux!
this.reduxStore.dispatch(actions.action_init());
/**
* this enables the use of redux dev tools in Chrome if you have the
* Chrome extension installed - https://goo.gl/xU4D6P
*/
// let USE_REDUX_DEVTOOLS = this.isDevelopment();
// create redux reduxStore
// if (USE_REDUX_DEVTOOLS) {
// // the following line uses chrome devtools redux plugin
// this.reduxStore = createStore(
// reducers.reducer_main,
// null,
// window.devToolsExtension && window.devToolsExtension()
// );
// }
// else {
// this.reduxStore = createStore(
// reducers.reducer_main,
// null
// );
// }
}
/**
* get a reference to the redux store
* @returns {any}
*/
getReduxStore() {
return this.reduxStore;
}
/**
* get a reference to the redux state
* @returns {S}
*/
getReduxState(): ReduxStateIF {
return this.reduxStore.getState();
}
| * DEV - If it's running in localhost, then it understands this to be
* the dev environment and it tries to connect to "localhost:8080".
* PROD - If it's NOT running in localhost, then it understands this to be the
* production environment and tries to connect to "/". | random_line_split |
block.rs | size_of::<u32>();
coding::decode_fixed32(&data[offset..]) as usize
}
fn iter_slice<'a, T: SliceComparator>(&'a self, comparator: T, slice: Slice<'a>) -> BlockIterator<'a, T>
{
if self.get_size() < mem::size_of::<u32>() {
BlockIterator::new(comparator, &[], 0, 0)
.with_status(Status::Corruption("bad block contents".into()))
} else {
let num_restarts = Self::num_restarts(slice);
if num_restarts == 0 {
BlockIterator::new(comparator, &[], 0, 0)
} else {
let restart_offset = self.restart_offset();
BlockIterator::new(comparator, slice, restart_offset, num_restarts)
}
}
}
}
impl Block for OwnedBlock {
fn get_size(&self) -> usize { self.data.len() }
fn data(&self) -> Slice { &self.data }
fn restart_offset(&self) -> usize { self.restart_offset }
fn iter<'a, T: SliceComparator>(&'a self, comparator: T) -> BlockIterator<'a, T>
{
self.iter_slice(comparator, self.data.as_slice())
}
}
impl<'a> Block for SliceBlock<'a> {
fn get_size(&self) -> usize { self.data.len() }
fn data(&self) -> Slice { self.data }
fn restart_offset(&self) -> usize { self.restart_offset }
fn iter<'i, T: SliceComparator>(&'i self, comparator: T) -> BlockIterator<'i, T>
{
self.iter_slice(comparator, self.data)
}
}
impl OwnedBlock {
pub fn new(contents: Slice) -> RubbleResult<OwnedBlock>
{
let sizeof_u32 = mem::size_of::<u32>();
let max_restarts_allowed = (contents.len() - sizeof_u32) / sizeof_u32;
let num_restarts = Self::num_restarts(contents);
if num_restarts > max_restarts_allowed {
return Err("The size is too small for num_restarts()".into())
}
Ok(OwnedBlock {
data: contents.to_vec(),
restart_offset: contents.len() - (1 + num_restarts) * sizeof_u32,
})
}
} | impl<'a> SliceBlock<'a> {
fn get_size(&self) -> usize { self.data.len() }
}
struct DecodedEntry<'a> {
new_slice: Slice<'a>,
shared: u32,
non_shared: u32,
value_length: u32,
}
/// Helper routine: decode the next block entry starting at "p",
/// storing the number of shared key bytes, non_shared key bytes,
/// and the length of the value in "*shared", "*non_shared", and
/// "*value_length", respectively. Will not dereference past "limit".
///
/// If any errors are detected, returns NULL. Otherwise, returns a
/// pointer to the key delta (just past the three decoded values).
fn decode_entry(mut p: &[u8]) -> RubbleResult<DecodedEntry>
{
if p.len() < 3 {
return Err("Entry missing header!".into())
};
let mut cur = 0;
let mut shared = p[0] as u32;
let mut non_shared = p[1] as u32;
let mut value_length = p[2] as u32;
if (shared | non_shared | value_length) < 128 {
// Fast path: all three values are encoded in one byte each
cur += 3;
} else {
let fallback = try!(coding::get_varint32_ptr_fallback(p));
p = fallback.slice;
shared = fallback.value;
let fallback = try!(coding::get_varint32_ptr_fallback(p));
p = fallback.slice;
non_shared = fallback.value;
let fallback = try!(coding::get_varint32_ptr_fallback(p));
p = fallback.slice;
value_length = fallback.value;
}
let new_slice = &p[cur..];
if new_slice.len() < (non_shared + value_length) as usize {
return Err("bad block?".into());
}
return Ok(DecodedEntry {
new_slice: new_slice,
shared: shared,
non_shared: non_shared,
value_length: value_length,
});
}
pub struct BlockIterator<'a, T: SliceComparator> {
comparator: T,
data: Slice<'a>,
value_offset: usize,
value_len: usize,
restarts: usize,
num_restarts: usize,
current: usize,
restart_index: usize,
key: String,
status: Status,
}
impl<'a, T: SliceComparator> BlockIterator<'a, T> {
pub fn new(comparator: T, data: Slice<'a>, restarts: usize, num_restarts: usize)
-> BlockIterator<'a, T>
{
assert!(num_restarts > 0);
BlockIterator::<'a, T> {
key: String::new(),
status: Status::Ok,
value_offset: 0,
value_len: 0,
comparator: comparator,
data: data,
restarts: restarts,
num_restarts: num_restarts,
current: restarts,
restart_index: num_restarts,
}
}
fn with_status(mut self, status: Status) -> BlockIterator<'a, T>
{
self.status = status;
self
}
fn compare(&self, a: Slice, b: Slice) -> i32
{
self.comparator.compare(a, b)
}
/// Return the offset in data_ just past the end of the current entry.
fn next_entry_offset(&self) -> usize
{
self.value_offset + self.value_len
}
fn get_restart_point(&self, index: usize) -> usize
{
assert!(index < self.num_restarts);
let offset = self.restarts + index * mem::size_of::<u32>();
coding::decode_fixed32(&self.data[offset..]) as usize
}
pub fn seek_to_restart_point(&mut self, index: usize)
{
self.key = String::new();
self.restart_index = index;
// current_ will be fixed by ParseNextKey();
// ParseNextKey() starts at the end of value_, so set value_ accordingly
self.value_offset = self.get_restart_point(index);
}
pub fn is_valid(&self) -> bool
{
self.current < self.restarts
}
pub fn status(&self) -> &Status {
&self.status
}
pub fn key(&self) -> String {
assert!(self.is_valid());
self.key.clone()
}
pub fn value(&self) -> Slice {
assert!(self.is_valid());
&self.data[self.value_offset..self.value_offset+self.value_len]
}
pub fn step(&mut self) {
assert!(self.is_valid());
self.parse_next_key();
}
pub fn prev(&mut self) {
assert!(self.is_valid());
// Scan backwards to a restart point before current_
let original = self.current;
while self.get_restart_point(self.restart_index) >= original {
if self.restart_index == 0 {
// No more entries
self.current = self.restarts;
self.restart_index = self.num_restarts;
return;
}
self.restart_index -= 1;
}
}
pub fn seek(&mut self, target: Slice)
{
// Binary search in restart array to find the last restart point
// with a key < target
let mut left = 0;
let mut right = self.num_restarts - 1;
while left < right {
let mid = (left + right + 1) / 2;
let region_offset = self.get_restart_point(mid);
// let shared, non_shared, value_length;
let entry = match decode_entry(&self.data[region_offset as usize..]) {
Err(_) => return self.corruption_error(),
Ok(key) => key,
};
if entry.shared != 0 {
return self.corruption_error()
}
let mid_key = entry.new_slice;
if self.compare(mid_key, target) < 0 {
// Key at "mid" is smaller than "target". Therefore all
// blocks before "mid" are uninteresting.
left = mid;
} else {
// Key at "mid" is >= "target". Therefore all blocks at or
// after "mid" are uninteresting.
right = mid - 1;
}
}
// Linear search (within restart block) for first key >= target
self.seek_to_restart_point(left);
loop {
if !self.parse_next_key() {
return;
}
if self.compare(self.key.as_bytes(), target) >= 0 {
return;
}
}
}
pub fn seek_to_first(&mut self) {
self.seek_to_restart_point(0);
self.parse_next_key();
}
pub fn seek_to_last(&mut self) {
let n_restarts = self.num_restarts - 1;
self.seek_to_restart_point(n_restarts);
while self | random_line_split |
|
block.rs | _of::<u32>();
coding::decode_fixed32(&data[offset..]) as usize
}
fn iter_slice<'a, T: SliceComparator>(&'a self, comparator: T, slice: Slice<'a>) -> BlockIterator<'a, T>
{
if self.get_size() < mem::size_of::<u32>() {
BlockIterator::new(comparator, &[], 0, 0)
.with_status(Status::Corruption("bad block contents".into()))
} else {
let num_restarts = Self::num_restarts(slice);
if num_restarts == 0 {
BlockIterator::new(comparator, &[], 0, 0)
} else {
let restart_offset = self.restart_offset();
BlockIterator::new(comparator, slice, restart_offset, num_restarts)
}
}
}
}
impl Block for OwnedBlock {
fn get_size(&self) -> usize { self.data.len() }
fn data(&self) -> Slice { &self.data }
fn restart_offset(&self) -> usize { self.restart_offset }
fn iter<'a, T: SliceComparator>(&'a self, comparator: T) -> BlockIterator<'a, T>
{
self.iter_slice(comparator, self.data.as_slice())
}
}
impl<'a> Block for SliceBlock<'a> {
fn get_size(&self) -> usize { self.data.len() }
fn data(&self) -> Slice { self.data }
fn restart_offset(&self) -> usize { self.restart_offset }
fn iter<'i, T: SliceComparator>(&'i self, comparator: T) -> BlockIterator<'i, T>
{
self.iter_slice(comparator, self.data)
}
}
impl OwnedBlock {
pub fn new(contents: Slice) -> RubbleResult<OwnedBlock>
{
let sizeof_u32 = mem::size_of::<u32>();
let max_restarts_allowed = (contents.len() - sizeof_u32) / sizeof_u32;
let num_restarts = Self::num_restarts(contents);
if num_restarts > max_restarts_allowed {
return Err("The size is too small for num_restarts()".into())
}
Ok(OwnedBlock {
data: contents.to_vec(),
restart_offset: contents.len() - (1 + num_restarts) * sizeof_u32,
})
}
}
impl<'a> SliceBlock<'a> {
fn get_size(&self) -> usize { self.data.len() }
}
struct DecodedEntry<'a> {
new_slice: Slice<'a>,
shared: u32,
non_shared: u32,
value_length: u32,
}
/// Helper routine: decode the next block entry starting at "p",
/// storing the number of shared key bytes, non_shared key bytes,
/// and the length of the value in "*shared", "*non_shared", and
/// "*value_length", respectively. Will not dereference past "limit".
///
/// If any errors are detected, returns NULL. Otherwise, returns a
/// pointer to the key delta (just past the three decoded values).
fn decode_entry(mut p: &[u8]) -> RubbleResult<DecodedEntry>
{
if p.len() < 3 {
return Err("Entry missing header!".into())
};
let mut cur = 0;
let mut shared = p[0] as u32;
let mut non_shared = p[1] as u32;
let mut value_length = p[2] as u32;
if (shared | non_shared | value_length) < 128 {
// Fast path: all three values are encoded in one byte each
cur += 3;
} else {
let fallback = try!(coding::get_varint32_ptr_fallback(p));
p = fallback.slice;
shared = fallback.value;
let fallback = try!(coding::get_varint32_ptr_fallback(p));
p = fallback.slice;
non_shared = fallback.value;
let fallback = try!(coding::get_varint32_ptr_fallback(p));
p = fallback.slice;
value_length = fallback.value;
}
let new_slice = &p[cur..];
if new_slice.len() < (non_shared + value_length) as usize {
return Err("bad block?".into());
}
return Ok(DecodedEntry {
new_slice: new_slice,
shared: shared,
non_shared: non_shared,
value_length: value_length,
});
}
pub struct BlockIterator<'a, T: SliceComparator> {
comparator: T,
data: Slice<'a>,
value_offset: usize,
value_len: usize,
restarts: usize,
num_restarts: usize,
current: usize,
restart_index: usize,
key: String,
status: Status,
}
impl<'a, T: SliceComparator> BlockIterator<'a, T> {
pub fn new(comparator: T, data: Slice<'a>, restarts: usize, num_restarts: usize)
-> BlockIterator<'a, T>
{
assert!(num_restarts > 0);
BlockIterator::<'a, T> {
key: String::new(),
status: Status::Ok,
value_offset: 0,
value_len: 0,
comparator: comparator,
data: data,
restarts: restarts,
num_restarts: num_restarts,
current: restarts,
restart_index: num_restarts,
}
}
fn with_status(mut self, status: Status) -> BlockIterator<'a, T>
{
self.status = status;
self
}
fn compare(&self, a: Slice, b: Slice) -> i32
{
self.comparator.compare(a, b)
}
/// Return the offset in data_ just past the end of the current entry.
fn next_entry_offset(&self) -> usize
|
fn get_restart_point(&self, index: usize) -> usize
{
assert!(index < self.num_restarts);
let offset = self.restarts + index * mem::size_of::<u32>();
coding::decode_fixed32(&self.data[offset..]) as usize
}
pub fn seek_to_restart_point(&mut self, index: usize)
{
self.key = String::new();
self.restart_index = index;
// current_ will be fixed by ParseNextKey();
// ParseNextKey() starts at the end of value_, so set value_ accordingly
self.value_offset = self.get_restart_point(index);
}
pub fn is_valid(&self) -> bool
{
self.current < self.restarts
}
pub fn status(&self) -> &Status {
&self.status
}
pub fn key(&self) -> String {
assert!(self.is_valid());
self.key.clone()
}
pub fn value(&self) -> Slice {
assert!(self.is_valid());
&self.data[self.value_offset..self.value_offset+self.value_len]
}
pub fn step(&mut self) {
assert!(self.is_valid());
self.parse_next_key();
}
pub fn prev(&mut self) {
assert!(self.is_valid());
// Scan backwards to a restart point before current_
let original = self.current;
while self.get_restart_point(self.restart_index) >= original {
if self.restart_index == 0 {
// No more entries
self.current = self.restarts;
self.restart_index = self.num_restarts;
return;
}
self.restart_index -= 1;
}
}
pub fn seek(&mut self, target: Slice)
{
// Binary search in restart array to find the last restart point
// with a key < target
let mut left = 0;
let mut right = self.num_restarts - 1;
while left < right {
let mid = (left + right + 1) / 2;
let region_offset = self.get_restart_point(mid);
// let shared, non_shared, value_length;
let entry = match decode_entry(&self.data[region_offset as usize..]) {
Err(_) => return self.corruption_error(),
Ok(key) => key,
};
if entry.shared != 0 {
return self.corruption_error()
}
let mid_key = entry.new_slice;
if self.compare(mid_key, target) < 0 {
// Key at "mid" is smaller than "target". Therefore all
// blocks before "mid" are uninteresting.
left = mid;
} else {
// Key at "mid" is >= "target". Therefore all blocks at or
// after "mid" are uninteresting.
right = mid - 1;
}
}
// Linear search (within restart block) for first key >= target
self.seek_to_restart_point(left);
loop {
if !self.parse_next_key() {
return;
}
if self.compare(self.key.as_bytes(), target) >= 0 {
return;
}
}
}
pub fn seek_to_first(&mut self) {
self.seek_to_restart_point(0);
self.parse_next_key();
}
pub fn seek_to_last(&mut self) {
let n_restarts = self.num_restarts - 1;
self.seek_to_restart_point(n_restarts);
while self | {
self.value_offset + self.value_len
} | identifier_body |
block.rs | _of::<u32>();
coding::decode_fixed32(&data[offset..]) as usize
}
fn iter_slice<'a, T: SliceComparator>(&'a self, comparator: T, slice: Slice<'a>) -> BlockIterator<'a, T>
{
if self.get_size() < mem::size_of::<u32>() {
BlockIterator::new(comparator, &[], 0, 0)
.with_status(Status::Corruption("bad block contents".into()))
} else {
let num_restarts = Self::num_restarts(slice);
if num_restarts == 0 {
BlockIterator::new(comparator, &[], 0, 0)
} else {
let restart_offset = self.restart_offset();
BlockIterator::new(comparator, slice, restart_offset, num_restarts)
}
}
}
}
impl Block for OwnedBlock {
fn get_size(&self) -> usize { self.data.len() }
fn data(&self) -> Slice { &self.data }
fn restart_offset(&self) -> usize { self.restart_offset }
fn iter<'a, T: SliceComparator>(&'a self, comparator: T) -> BlockIterator<'a, T>
{
self.iter_slice(comparator, self.data.as_slice())
}
}
impl<'a> Block for SliceBlock<'a> {
fn get_size(&self) -> usize { self.data.len() }
fn data(&self) -> Slice { self.data }
fn restart_offset(&self) -> usize { self.restart_offset }
fn iter<'i, T: SliceComparator>(&'i self, comparator: T) -> BlockIterator<'i, T>
{
self.iter_slice(comparator, self.data)
}
}
impl OwnedBlock {
pub fn new(contents: Slice) -> RubbleResult<OwnedBlock>
{
let sizeof_u32 = mem::size_of::<u32>();
let max_restarts_allowed = (contents.len() - sizeof_u32) / sizeof_u32;
let num_restarts = Self::num_restarts(contents);
if num_restarts > max_restarts_allowed {
return Err("The size is too small for num_restarts()".into())
}
Ok(OwnedBlock {
data: contents.to_vec(),
restart_offset: contents.len() - (1 + num_restarts) * sizeof_u32,
})
}
}
impl<'a> SliceBlock<'a> {
fn get_size(&self) -> usize { self.data.len() }
}
struct DecodedEntry<'a> {
new_slice: Slice<'a>,
shared: u32,
non_shared: u32,
value_length: u32,
}
/// Helper routine: decode the next block entry starting at "p",
/// storing the number of shared key bytes, non_shared key bytes,
/// and the length of the value in "*shared", "*non_shared", and
/// "*value_length", respectively. Will not dereference past "limit".
///
/// If any errors are detected, returns NULL. Otherwise, returns a
/// pointer to the key delta (just past the three decoded values).
fn decode_entry(mut p: &[u8]) -> RubbleResult<DecodedEntry>
{
if p.len() < 3 {
return Err("Entry missing header!".into())
};
let mut cur = 0;
let mut shared = p[0] as u32;
let mut non_shared = p[1] as u32;
let mut value_length = p[2] as u32;
if (shared | non_shared | value_length) < 128 {
// Fast path: all three values are encoded in one byte each
cur += 3;
} else {
let fallback = try!(coding::get_varint32_ptr_fallback(p));
p = fallback.slice;
shared = fallback.value;
let fallback = try!(coding::get_varint32_ptr_fallback(p));
p = fallback.slice;
non_shared = fallback.value;
let fallback = try!(coding::get_varint32_ptr_fallback(p));
p = fallback.slice;
value_length = fallback.value;
}
let new_slice = &p[cur..];
if new_slice.len() < (non_shared + value_length) as usize {
return Err("bad block?".into());
}
return Ok(DecodedEntry {
new_slice: new_slice,
shared: shared,
non_shared: non_shared,
value_length: value_length,
});
}
pub struct BlockIterator<'a, T: SliceComparator> {
comparator: T,
data: Slice<'a>,
value_offset: usize,
value_len: usize,
restarts: usize,
num_restarts: usize,
current: usize,
restart_index: usize,
key: String,
status: Status,
}
impl<'a, T: SliceComparator> BlockIterator<'a, T> {
pub fn new(comparator: T, data: Slice<'a>, restarts: usize, num_restarts: usize)
-> BlockIterator<'a, T>
{
assert!(num_restarts > 0);
BlockIterator::<'a, T> {
key: String::new(),
status: Status::Ok,
value_offset: 0,
value_len: 0,
comparator: comparator,
data: data,
restarts: restarts,
num_restarts: num_restarts,
current: restarts,
restart_index: num_restarts,
}
}
fn with_status(mut self, status: Status) -> BlockIterator<'a, T>
{
self.status = status;
self
}
fn | (&self, a: Slice, b: Slice) -> i32
{
self.comparator.compare(a, b)
}
/// Return the offset in data_ just past the end of the current entry.
fn next_entry_offset(&self) -> usize
{
self.value_offset + self.value_len
}
fn get_restart_point(&self, index: usize) -> usize
{
assert!(index < self.num_restarts);
let offset = self.restarts + index * mem::size_of::<u32>();
coding::decode_fixed32(&self.data[offset..]) as usize
}
pub fn seek_to_restart_point(&mut self, index: usize)
{
self.key = String::new();
self.restart_index = index;
// current_ will be fixed by ParseNextKey();
// ParseNextKey() starts at the end of value_, so set value_ accordingly
self.value_offset = self.get_restart_point(index);
}
pub fn is_valid(&self) -> bool
{
self.current < self.restarts
}
pub fn status(&self) -> &Status {
&self.status
}
pub fn key(&self) -> String {
assert!(self.is_valid());
self.key.clone()
}
pub fn value(&self) -> Slice {
assert!(self.is_valid());
&self.data[self.value_offset..self.value_offset+self.value_len]
}
pub fn step(&mut self) {
assert!(self.is_valid());
self.parse_next_key();
}
pub fn prev(&mut self) {
assert!(self.is_valid());
// Scan backwards to a restart point before current_
let original = self.current;
while self.get_restart_point(self.restart_index) >= original {
if self.restart_index == 0 {
// No more entries
self.current = self.restarts;
self.restart_index = self.num_restarts;
return;
}
self.restart_index -= 1;
}
}
pub fn seek(&mut self, target: Slice)
{
// Binary search in restart array to find the last restart point
// with a key < target
let mut left = 0;
let mut right = self.num_restarts - 1;
while left < right {
let mid = (left + right + 1) / 2;
let region_offset = self.get_restart_point(mid);
// let shared, non_shared, value_length;
let entry = match decode_entry(&self.data[region_offset as usize..]) {
Err(_) => return self.corruption_error(),
Ok(key) => key,
};
if entry.shared != 0 {
return self.corruption_error()
}
let mid_key = entry.new_slice;
if self.compare(mid_key, target) < 0 {
// Key at "mid" is smaller than "target". Therefore all
// blocks before "mid" are uninteresting.
left = mid;
} else {
// Key at "mid" is >= "target". Therefore all blocks at or
// after "mid" are uninteresting.
right = mid - 1;
}
}
// Linear search (within restart block) for first key >= target
self.seek_to_restart_point(left);
loop {
if !self.parse_next_key() {
return;
}
if self.compare(self.key.as_bytes(), target) >= 0 {
return;
}
}
}
pub fn seek_to_first(&mut self) {
self.seek_to_restart_point(0);
self.parse_next_key();
}
pub fn seek_to_last(&mut self) {
let n_restarts = self.num_restarts - 1;
self.seek_to_restart_point(n_restarts);
while self | compare | identifier_name |
generator.rs | and saved
/// for possible later use.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct GeneratorArgs {
/// magic_number helps to identify that the block was written
/// by the app.
magic_number: u64,
/// process_id helps to differentiate this run from other runs
process_id: u64,
/// Human friendly name for this thread.
name: String,
/// Unique identifier for each generator.
generator_unique_id: u64,
/// Target block size. For some Targets,
/// IO might fail if size of IO is not a multiple of
/// block_size. This size is also used to watermark the
/// block with block header
block_size: u64,
/// MTU per IO that Target can handle.
/// 0 represents N/A for this Target
max_io_size: u64,
/// Hard alignment requirements without which IOs might fail
align: bool,
/// Seed that will be used to generate IOs in this thread
seed: u64,
/// Name of the target on which generator will perform IOs.
target_name: String,
/// target_range describes the portion of the Target
/// the generator is allowed to work on. Other instances
/// of Target may work on different ranges within the same
/// Target.
/// All generated IoPacket's offset and length should
/// fall in this range
target_range: Range<u64>,
/// Target type. When there are multiple target types in the apps, this
/// will help us search and load the right target operations.
target_type: AvailableTargets,
/// Types of the operations to perform on the target.
operations: TargetOps,
/// The maximum allowed number of outstanding IOs that are generated and
/// are in Issuer queue. This number does not limit IOs that belong to verify
/// operation.
issuer_queue_depth: usize,
/// The number of IOs that need to be issued before we gracefully tear-down
/// generator thread.
/// TODO(auradkar): Introduce time bound exit criteria.
max_io_count: u64,
/// When true, the target access (read/write) are sequential with respect to
/// offsets within the target and within a thread.
sequential: bool,
}
impl GeneratorArgs {
pub fn new(
magic_number: u64,
process_id: u64,
id: u64,
block_size: u64,
max_io_size: u64,
align: bool,
seed: u64,
target_name: String,
target_range: Range<u64>,
target_type: AvailableTargets,
operations: TargetOps,
issuer_queue_depth: usize,
max_io_count: u64,
sequential: bool,
) -> GeneratorArgs {
GeneratorArgs {
name: format!("generator-{}", id),
generator_unique_id: id,
block_size,
max_io_size,
align,
seed,
target_name,
target_range,
target_type,
operations,
issuer_queue_depth,
magic_number,
process_id,
max_io_count,
sequential,
}
}
}
/// Based on the input args this returns a set of allowed operations that
/// generator is allowed to issue. For now we only allow writes.
fn pick_operation_type(args: &GeneratorArgs) -> Vec<OperationType> {
let mut operations: Vec<OperationType> = vec![];
if args.operations.write {
operations.push(OperationType::Write);
} else {
assert!(false);
}
return operations;
}
/// Based on the input args this returns a generator that can generate requested
/// IO load.For now we only allow sequential io.
fn pick_generator_type(args: &GeneratorArgs, target_id: u64) -> Box<dyn Generator> {
if !args.sequential {
panic!("Only sequential io generator is implemented at the moment");
}
Box::new(SequentialIoGenerator::new(
args.magic_number,
args.process_id,
target_id,
args.generator_unique_id,
args.target_range.clone(),
args.block_size,
args.max_io_size,
args.align,
))
}
fn run_generator(
args: &GeneratorArgs,
to_issuer: &SyncSender<IoPacketType>,
active_commands: &mut ActiveCommands,
start_instant: Instant,
io_map: Arc<Mutex<HashMap<u64, IoPacketType>>>,
) -> Result<(), Error> {
// Generator specific target unique id.
let target_id = 0;
// IO sequence number. Order of IOs issued need not be same as order they arrive at
// verifier and get logged. While replaying, this number helps us determine order
// to issue IOs irrespective of the order they are read from replay log.
let io_sequence_number = 0;
// The generator's stage in lifetime of an IO
let stage = PipelineStages::Generate;
let mut gen = pick_generator_type(&args, target_id);
let target = create_target(
args.target_type,
target_id,
args.target_name.clone(),
args.target_range.clone(),
start_instant,
);
// An array of allowed operations that helps generator to pick an operation
// based on generated random number.
let allowed_operations = pick_operation_type(&args);
for io_sequence_number in 1..(args.max_io_count + 1) {
if active_commands.count() == 0 {
debug!("{} running slow.", args.name);
}
let io_seed = gen.generate_number();
let io_range = gen.get_io_range();
let op_type = gen.get_io_operation(&allowed_operations);
let mut io_packet =
target.create_io_packet(op_type, io_sequence_number, io_seed, io_range, target.clone());
io_packet.timestamp_stage_start(stage);
let io_offset_range = io_packet.io_offset_range().clone();
gen.fill_buffer(io_packet.buffer_mut(), io_sequence_number, io_offset_range);
{
let mut map = io_map.lock().unwrap();
map.insert(io_sequence_number, io_packet.clone());
}
io_packet.timestamp_stage_end(stage);
to_issuer.send(io_packet).expect("error sending command");
active_commands.increment();
}
let io_packet =
target.create_io_packet(OperationType::Exit, io_sequence_number, 4, 0..1, target.clone());
to_issuer.send(io_packet).expect("error sending exit command");
active_commands.increment();
Ok(())
}
/// Function that creates verifier and issuer thread. It build channels for them to communicate.
/// This thread assumes the role of generator.
pub fn run_load(
args: GeneratorArgs,
start_instant: Instant,
stats: Arc<Mutex<Stats>>,
) -> Result<(), Error> {
// Channel used to send commands from generator to issuer
// This is the only bounded channel. The throttle control happens over this channel.
// TODO(auradkar): Considering ActiveCommands and this channel are so tightly related, should
// this channel be part of the ActiveCommand implementation?
let (gi_to_issuer, gi_from_generator) = sync_channel(args.issuer_queue_depth);
// Channel used to send commands from issuer to verifier
let (iv_to_verifier, iv_from_issuer) = channel();
// Channel used to send commands from verifier to generator
let (vi_to_issuer, vi_from_verifier) = channel();
// A hashmap of all outstanding IOs. Shared between generator and verifier.
// Generator inserts entries and verifier removes it.
let io_map = Arc::new(Mutex::new(HashMap::new()));
// Mechanism to notify issuer of IOs.
let mut active_commands = ActiveCommands::new();
// Thread handle to wait on for joining.
let mut thread_handles = vec![];
// Create Issuer
let issuer_args = IssuerArgs::new(
format!("issues-{}", args.generator_unique_id),
0,
gi_from_generator,
iv_to_verifier,
vi_from_verifier,
active_commands.clone(),
);
thread_handles.push(spawn(move || run_issuer(issuer_args)));
// Create verifier
let verifier_args = VerifierArgs::new(
format!("verifier-{}", args.generator_unique_id),
0,
iv_from_issuer,
vi_to_issuer,
false,
io_map.clone(),
stats.clone(),
active_commands.clone(),
);
thread_handles.push(spawn(move || run_verifier(verifier_args)));
run_generator(&args, &gi_to_issuer, &mut active_commands, start_instant, io_map)?;
for handle in thread_handles {
handle.join().unwrap()?;
}
stats.lock().unwrap().stop_clock();
Ok(())
}
#[cfg(test)]
mod tests {
use {
crate::generator::ActiveCommands,
std::thread::sleep,
std::{thread, time},
};
#[test]
fn active_command_test() {
let mut command_count = ActiveCommands::new();
assert_eq!(command_count.count(), 0);
command_count.increment();
assert_eq!(command_count.count(), 1);
command_count.increment();
assert_eq!(command_count.count(), 2);
assert_eq!(command_count.decrement(), false);
assert_eq!(command_count.count(), 1);
assert_eq!(command_count.decrement(), false);
assert_eq!(command_count.count(), 0);
}
#[test]
fn | active_command_block_test | identifier_name |
|
generator.rs | command_count: Arc<(Mutex<u64>, Condvar)>,
}
impl ActiveCommands {
pub fn new() -> ActiveCommands {
ActiveCommands { command_count: Arc::new((Mutex::new(0), Condvar::new())) }
}
/// Decrements number of active commands. Waits on the condition variable if
/// command_count is zero. Returns true if command_count was zero and call
/// was blocked.
/// ```
/// let mut count = ActiveCommands::new();
///
/// Thread 1
/// command_count.remove();
/// cmd = receiver.try_recv();
/// assert_eq!(cmd.is_ok());
///
/// Thread 2
/// sender.send(cmd);
/// command_count.insert();
/// ```
pub fn decrement(&mut self) -> bool {
let (lock, cvar) = &*self.command_count;
let mut count = lock.lock().unwrap();
let mut slept = false;
while (*count) == 0 {
slept = true;
debug!("waiting to on command");
count = cvar.wait(count).unwrap();
}
(*count) -= 1;
slept
}
/// Increments command_count and notifies one waiter.
pub fn increment(&mut self) {
let &(ref lock, ref cvar) = &*self.command_count;
let mut count = lock.lock().unwrap();
(*count) += 1;
cvar.notify_one();
}
/// Returns value of command_count. This returns a snap-shot in time value.
/// By the time another action is performed based on previous value returned
/// by count, the count may have changed. Currently, sender increments the
/// count and reciever decrements it.
pub fn count(&self) -> u64 {
let &(ref lock, ref _cvar) = &*self.command_count;
let count = lock.lock().unwrap();
*count
}
}
/// Generating an IoPacket involves several variants like
/// - data for the IO and it's checksum
/// - data size
/// - offset of the IO
/// - several other (future) things like file name, directory path.
/// When we want randomly generated IO to be repeatable, we need to generate
/// a random number from a seed and based on that random number, we derive
/// variants of the IO. A typical use of Generator would look something like
/// ```
/// let generator: Generator = create_my_awesome_generator();
/// while (disks_death) {
/// random_number = generator.generate_number();
/// io_range = generator.get_io_range();
/// io_type = generator.get_io_operation();
/// io_packet = create_io_packet(io_type, io_range);
/// generator.fill_buffer(io_packet);
/// }
/// ```
pub trait Generator {
/// Generates a new [random] number and return it's value.
/// TODO(auradkar): "It is a bit confusing that the generator is both providing random numbers,
/// operations, and buffers. Seems like it is operating at 3 different levels
/// of abstraction... maybe split it into several different traits. "
fn generate_number(&mut self) -> u64;
/// Returns type of operation corresponding to the last generated [random]
/// number
fn get_io_operation(&self, allowed_ops: &Vec<OperationType>) -> OperationType;
/// Returns Range (start and end] of IO operation. end - start gives the size
/// of the IO
fn get_io_range(&self) -> Range<u64>;
/// Generates and fills the buf with data.
fn fill_buffer(&self, buf: &mut Vec<u8>, sequence_number: u64, offset_range: Range<u64>);
}
/// GeneratorArgs contains only the fields that help generator make decisions
/// needed for re-playability. This structure can be serialized and saved
/// for possible later use.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct GeneratorArgs {
/// magic_number helps to identify that the block was written
/// by the app.
magic_number: u64,
/// process_id helps to differentiate this run from other runs
process_id: u64,
/// Human friendly name for this thread.
name: String,
/// Unique identifier for each generator.
generator_unique_id: u64,
/// Target block size. For some Targets,
/// IO might fail if size of IO is not a multiple of
/// block_size. This size is also used to watermark the
/// block with block header
block_size: u64,
/// MTU per IO that Target can handle.
/// 0 represents N/A for this Target
max_io_size: u64,
/// Hard alignment requirements without which IOs might fail
align: bool,
/// Seed that will be used to generate IOs in this thread
seed: u64,
/// Name of the target on which generator will perform IOs.
target_name: String,
/// target_range describes the portion of the Target
/// the generator is allowed to work on. Other instances
/// of Target may work on different ranges within the same
/// Target.
/// All generated IoPacket's offset and length should
/// fall in this range
target_range: Range<u64>,
/// Target type. When there are multiple target types in the apps, this
/// will help us search and load the right target operations.
target_type: AvailableTargets,
/// Types of the operations to perform on the target.
operations: TargetOps,
/// The maximum allowed number of outstanding IOs that are generated and
/// are in Issuer queue. This number does not limit IOs that belong to verify
/// operation.
issuer_queue_depth: usize,
/// The number of IOs that need to be issued before we gracefully tear-down
/// generator thread.
/// TODO(auradkar): Introduce time bound exit criteria.
max_io_count: u64,
/// When true, the target access (read/write) are sequential with respect to
/// offsets within the target and within a thread.
sequential: bool,
}
impl GeneratorArgs {
pub fn new(
magic_number: u64,
process_id: u64,
id: u64,
block_size: u64,
max_io_size: u64,
align: bool,
seed: u64,
target_name: String,
target_range: Range<u64>,
target_type: AvailableTargets,
operations: TargetOps,
issuer_queue_depth: usize,
max_io_count: u64,
sequential: bool,
) -> GeneratorArgs {
GeneratorArgs {
name: format!("generator-{}", id),
generator_unique_id: id,
block_size,
max_io_size,
align,
seed,
target_name,
target_range,
target_type,
operations,
issuer_queue_depth,
magic_number,
process_id,
max_io_count,
sequential,
}
}
}
/// Based on the input args this returns a set of allowed operations that
/// generator is allowed to issue. For now we only allow writes.
fn pick_operation_type(args: &GeneratorArgs) -> Vec<OperationType> {
let mut operations: Vec<OperationType> = vec![];
if args.operations.write {
operations.push(OperationType::Write);
} else {
assert!(false);
}
return operations;
}
/// Based on the input args this returns a generator that can generate requested
/// IO load.For now we only allow sequential io.
fn pick_generator_type(args: &GeneratorArgs, target_id: u64) -> Box<dyn Generator> {
if !args.sequential {
panic!("Only sequential io generator is implemented at the moment");
}
Box::new(SequentialIoGenerator::new(
args.magic_number,
args.process_id,
target_id,
args.generator_unique_id,
args.target_range.clone(),
args.block_size,
args.max_io_size,
args.align,
))
}
fn run_generator(
args: &GeneratorArgs,
to_issuer: &SyncSender<IoPacketType>,
active_commands: &mut ActiveCommands,
start_instant: Instant,
io_map: Arc<Mutex<HashMap<u64, IoPacketType>>>,
) -> Result<(), Error> {
// Generator specific target unique id.
let target_id = 0;
// IO sequence number. Order of IOs issued need not be same as order they arrive at
// verifier and get logged. While replaying, this number helps us determine order
// to issue IOs irrespective of the order they are read from replay log.
let io_sequence_number = 0;
// The generator's stage in lifetime of an IO
let stage = PipelineStages::Generate;
let mut gen = pick_generator_type(&args, target_id);
let target = create_target(
args.target_type,
target_id,
args.target_name.clone(),
args.target_range.clone(),
start_instant,
);
// An array of allowed operations that helps generator to pick an operation
// based on generated random number.
let allowed_operations = pick_operation_type(&args);
for io_sequence_number in 1..(args.max_io_count + 1) {
if active_commands.count() == 0 {
debug!("{} running slow.", args.name);
}
let io_seed = gen.generate_number();
let io_range = gen | random_line_split |
||
generator.rs | is zero. Returns true if command_count was zero and call
/// was blocked.
/// ```
/// let mut count = ActiveCommands::new();
///
/// Thread 1
/// command_count.remove();
/// cmd = receiver.try_recv();
/// assert_eq!(cmd.is_ok());
///
/// Thread 2
/// sender.send(cmd);
/// command_count.insert();
/// ```
pub fn decrement(&mut self) -> bool {
let (lock, cvar) = &*self.command_count;
let mut count = lock.lock().unwrap();
let mut slept = false;
while (*count) == 0 {
slept = true;
debug!("waiting to on command");
count = cvar.wait(count).unwrap();
}
(*count) -= 1;
slept
}
/// Increments command_count and notifies one waiter.
pub fn increment(&mut self) {
let &(ref lock, ref cvar) = &*self.command_count;
let mut count = lock.lock().unwrap();
(*count) += 1;
cvar.notify_one();
}
/// Returns value of command_count. This returns a snap-shot in time value.
/// By the time another action is performed based on previous value returned
/// by count, the count may have changed. Currently, sender increments the
/// count and reciever decrements it.
pub fn count(&self) -> u64 {
let &(ref lock, ref _cvar) = &*self.command_count;
let count = lock.lock().unwrap();
*count
}
}
/// Generating an IoPacket involves several variants like
/// - data for the IO and it's checksum
/// - data size
/// - offset of the IO
/// - several other (future) things like file name, directory path.
/// When we want randomly generated IO to be repeatable, we need to generate
/// a random number from a seed and based on that random number, we derive
/// variants of the IO. A typical use of Generator would look something like
/// ```
/// let generator: Generator = create_my_awesome_generator();
/// while (disks_death) {
/// random_number = generator.generate_number();
/// io_range = generator.get_io_range();
/// io_type = generator.get_io_operation();
/// io_packet = create_io_packet(io_type, io_range);
/// generator.fill_buffer(io_packet);
/// }
/// ```
pub trait Generator {
/// Generates a new [random] number and return it's value.
/// TODO(auradkar): "It is a bit confusing that the generator is both providing random numbers,
/// operations, and buffers. Seems like it is operating at 3 different levels
/// of abstraction... maybe split it into several different traits. "
fn generate_number(&mut self) -> u64;
/// Returns type of operation corresponding to the last generated [random]
/// number
fn get_io_operation(&self, allowed_ops: &Vec<OperationType>) -> OperationType;
/// Returns Range (start and end] of IO operation. end - start gives the size
/// of the IO
fn get_io_range(&self) -> Range<u64>;
/// Generates and fills the buf with data.
fn fill_buffer(&self, buf: &mut Vec<u8>, sequence_number: u64, offset_range: Range<u64>);
}
/// GeneratorArgs contains only the fields that help generator make decisions
/// needed for re-playability. This structure can be serialized and saved
/// for possible later use.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct GeneratorArgs {
/// magic_number helps to identify that the block was written
/// by the app.
magic_number: u64,
/// process_id helps to differentiate this run from other runs
process_id: u64,
/// Human friendly name for this thread.
name: String,
/// Unique identifier for each generator.
generator_unique_id: u64,
/// Target block size. For some Targets,
/// IO might fail if size of IO is not a multiple of
/// block_size. This size is also used to watermark the
/// block with block header
block_size: u64,
/// MTU per IO that Target can handle.
/// 0 represents N/A for this Target
max_io_size: u64,
/// Hard alignment requirements without which IOs might fail
align: bool,
/// Seed that will be used to generate IOs in this thread
seed: u64,
/// Name of the target on which generator will perform IOs.
target_name: String,
/// target_range describes the portion of the Target
/// the generator is allowed to work on. Other instances
/// of Target may work on different ranges within the same
/// Target.
/// All generated IoPacket's offset and length should
/// fall in this range
target_range: Range<u64>,
/// Target type. When there are multiple target types in the apps, this
/// will help us search and load the right target operations.
target_type: AvailableTargets,
/// Types of the operations to perform on the target.
operations: TargetOps,
/// The maximum allowed number of outstanding IOs that are generated and
/// are in Issuer queue. This number does not limit IOs that belong to verify
/// operation.
issuer_queue_depth: usize,
/// The number of IOs that need to be issued before we gracefully tear-down
/// generator thread.
/// TODO(auradkar): Introduce time bound exit criteria.
max_io_count: u64,
/// When true, the target access (read/write) are sequential with respect to
/// offsets within the target and within a thread.
sequential: bool,
}
impl GeneratorArgs {
pub fn new(
magic_number: u64,
process_id: u64,
id: u64,
block_size: u64,
max_io_size: u64,
align: bool,
seed: u64,
target_name: String,
target_range: Range<u64>,
target_type: AvailableTargets,
operations: TargetOps,
issuer_queue_depth: usize,
max_io_count: u64,
sequential: bool,
) -> GeneratorArgs {
GeneratorArgs {
name: format!("generator-{}", id),
generator_unique_id: id,
block_size,
max_io_size,
align,
seed,
target_name,
target_range,
target_type,
operations,
issuer_queue_depth,
magic_number,
process_id,
max_io_count,
sequential,
}
}
}
/// Based on the input args this returns a set of allowed operations that
/// generator is allowed to issue. For now we only allow writes.
fn pick_operation_type(args: &GeneratorArgs) -> Vec<OperationType> |
/// Based on the input args this returns a generator that can generate requested
/// IO load.For now we only allow sequential io.
fn pick_generator_type(args: &GeneratorArgs, target_id: u64) -> Box<dyn Generator> {
if !args.sequential {
panic!("Only sequential io generator is implemented at the moment");
}
Box::new(SequentialIoGenerator::new(
args.magic_number,
args.process_id,
target_id,
args.generator_unique_id,
args.target_range.clone(),
args.block_size,
args.max_io_size,
args.align,
))
}
fn run_generator(
args: &GeneratorArgs,
to_issuer: &SyncSender<IoPacketType>,
active_commands: &mut ActiveCommands,
start_instant: Instant,
io_map: Arc<Mutex<HashMap<u64, IoPacketType>>>,
) -> Result<(), Error> {
// Generator specific target unique id.
let target_id = 0;
// IO sequence number. Order of IOs issued need not be same as order they arrive at
// verifier and get logged. While replaying, this number helps us determine order
// to issue IOs irrespective of the order they are read from replay log.
let io_sequence_number = 0;
// The generator's stage in lifetime of an IO
let stage = PipelineStages::Generate;
let mut gen = pick_generator_type(&args, target_id);
let target = create_target(
args.target_type,
target_id,
args.target_name.clone(),
args.target_range.clone(),
start_instant,
);
// An array of allowed operations that helps generator to pick an operation
// based on generated random number.
let allowed_operations = pick_operation_type(&args);
for io_sequence_number in 1..(args.max_io_count + 1) {
if active_commands.count() == 0 {
debug!("{} running slow.", args.name);
}
let io_seed = gen.generate_number();
let io_range = gen.get_io_range();
let op_type = gen.get_io_operation(&allowed_operations);
let mut io_packet =
target.create_io_packet(op_type, io_sequence_number, io_seed, io_range, target.clone());
io_packet.timestamp_stage_start(stage);
let io_offset_range = io_packet.io_offset_range().clone();
gen.fill_buffer(io_packet.buffer | {
let mut operations: Vec<OperationType> = vec![];
if args.operations.write {
operations.push(OperationType::Write);
} else {
assert!(false);
}
return operations;
} | identifier_body |
generator.rs | is zero. Returns true if command_count was zero and call
/// was blocked.
/// ```
/// let mut count = ActiveCommands::new();
///
/// Thread 1
/// command_count.remove();
/// cmd = receiver.try_recv();
/// assert_eq!(cmd.is_ok());
///
/// Thread 2
/// sender.send(cmd);
/// command_count.insert();
/// ```
pub fn decrement(&mut self) -> bool {
let (lock, cvar) = &*self.command_count;
let mut count = lock.lock().unwrap();
let mut slept = false;
while (*count) == 0 {
slept = true;
debug!("waiting to on command");
count = cvar.wait(count).unwrap();
}
(*count) -= 1;
slept
}
/// Increments command_count and notifies one waiter.
pub fn increment(&mut self) {
let &(ref lock, ref cvar) = &*self.command_count;
let mut count = lock.lock().unwrap();
(*count) += 1;
cvar.notify_one();
}
/// Returns value of command_count. This returns a snap-shot in time value.
/// By the time another action is performed based on previous value returned
/// by count, the count may have changed. Currently, sender increments the
/// count and reciever decrements it.
pub fn count(&self) -> u64 {
let &(ref lock, ref _cvar) = &*self.command_count;
let count = lock.lock().unwrap();
*count
}
}
/// Generating an IoPacket involves several variants like
/// - data for the IO and it's checksum
/// - data size
/// - offset of the IO
/// - several other (future) things like file name, directory path.
/// When we want randomly generated IO to be repeatable, we need to generate
/// a random number from a seed and based on that random number, we derive
/// variants of the IO. A typical use of Generator would look something like
/// ```
/// let generator: Generator = create_my_awesome_generator();
/// while (disks_death) {
/// random_number = generator.generate_number();
/// io_range = generator.get_io_range();
/// io_type = generator.get_io_operation();
/// io_packet = create_io_packet(io_type, io_range);
/// generator.fill_buffer(io_packet);
/// }
/// ```
pub trait Generator {
/// Generates a new [random] number and return it's value.
/// TODO(auradkar): "It is a bit confusing that the generator is both providing random numbers,
/// operations, and buffers. Seems like it is operating at 3 different levels
/// of abstraction... maybe split it into several different traits. "
fn generate_number(&mut self) -> u64;
/// Returns type of operation corresponding to the last generated [random]
/// number
fn get_io_operation(&self, allowed_ops: &Vec<OperationType>) -> OperationType;
/// Returns Range (start and end] of IO operation. end - start gives the size
/// of the IO
fn get_io_range(&self) -> Range<u64>;
/// Generates and fills the buf with data.
fn fill_buffer(&self, buf: &mut Vec<u8>, sequence_number: u64, offset_range: Range<u64>);
}
/// GeneratorArgs contains only the fields that help generator make decisions
/// needed for re-playability. This structure can be serialized and saved
/// for possible later use.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct GeneratorArgs {
/// magic_number helps to identify that the block was written
/// by the app.
magic_number: u64,
/// process_id helps to differentiate this run from other runs
process_id: u64,
/// Human friendly name for this thread.
name: String,
/// Unique identifier for each generator.
generator_unique_id: u64,
/// Target block size. For some Targets,
/// IO might fail if size of IO is not a multiple of
/// block_size. This size is also used to watermark the
/// block with block header
block_size: u64,
/// MTU per IO that Target can handle.
/// 0 represents N/A for this Target
max_io_size: u64,
/// Hard alignment requirements without which IOs might fail
align: bool,
/// Seed that will be used to generate IOs in this thread
seed: u64,
/// Name of the target on which generator will perform IOs.
target_name: String,
/// target_range describes the portion of the Target
/// the generator is allowed to work on. Other instances
/// of Target may work on different ranges within the same
/// Target.
/// All generated IoPacket's offset and length should
/// fall in this range
target_range: Range<u64>,
/// Target type. When there are multiple target types in the apps, this
/// will help us search and load the right target operations.
target_type: AvailableTargets,
/// Types of the operations to perform on the target.
operations: TargetOps,
/// The maximum allowed number of outstanding IOs that are generated and
/// are in Issuer queue. This number does not limit IOs that belong to verify
/// operation.
issuer_queue_depth: usize,
/// The number of IOs that need to be issued before we gracefully tear-down
/// generator thread.
/// TODO(auradkar): Introduce time bound exit criteria.
max_io_count: u64,
/// When true, the target access (read/write) are sequential with respect to
/// offsets within the target and within a thread.
sequential: bool,
}
impl GeneratorArgs {
pub fn new(
magic_number: u64,
process_id: u64,
id: u64,
block_size: u64,
max_io_size: u64,
align: bool,
seed: u64,
target_name: String,
target_range: Range<u64>,
target_type: AvailableTargets,
operations: TargetOps,
issuer_queue_depth: usize,
max_io_count: u64,
sequential: bool,
) -> GeneratorArgs {
GeneratorArgs {
name: format!("generator-{}", id),
generator_unique_id: id,
block_size,
max_io_size,
align,
seed,
target_name,
target_range,
target_type,
operations,
issuer_queue_depth,
magic_number,
process_id,
max_io_count,
sequential,
}
}
}
/// Based on the input args this returns a set of allowed operations that
/// generator is allowed to issue. For now we only allow writes.
fn pick_operation_type(args: &GeneratorArgs) -> Vec<OperationType> {
let mut operations: Vec<OperationType> = vec![];
if args.operations.write | else {
assert!(false);
}
return operations;
}
/// Based on the input args this returns a generator that can generate requested
/// IO load.For now we only allow sequential io.
fn pick_generator_type(args: &GeneratorArgs, target_id: u64) -> Box<dyn Generator> {
if !args.sequential {
panic!("Only sequential io generator is implemented at the moment");
}
Box::new(SequentialIoGenerator::new(
args.magic_number,
args.process_id,
target_id,
args.generator_unique_id,
args.target_range.clone(),
args.block_size,
args.max_io_size,
args.align,
))
}
fn run_generator(
args: &GeneratorArgs,
to_issuer: &SyncSender<IoPacketType>,
active_commands: &mut ActiveCommands,
start_instant: Instant,
io_map: Arc<Mutex<HashMap<u64, IoPacketType>>>,
) -> Result<(), Error> {
// Generator specific target unique id.
let target_id = 0;
// IO sequence number. Order of IOs issued need not be same as order they arrive at
// verifier and get logged. While replaying, this number helps us determine order
// to issue IOs irrespective of the order they are read from replay log.
let io_sequence_number = 0;
// The generator's stage in lifetime of an IO
let stage = PipelineStages::Generate;
let mut gen = pick_generator_type(&args, target_id);
let target = create_target(
args.target_type,
target_id,
args.target_name.clone(),
args.target_range.clone(),
start_instant,
);
// An array of allowed operations that helps generator to pick an operation
// based on generated random number.
let allowed_operations = pick_operation_type(&args);
for io_sequence_number in 1..(args.max_io_count + 1) {
if active_commands.count() == 0 {
debug!("{} running slow.", args.name);
}
let io_seed = gen.generate_number();
let io_range = gen.get_io_range();
let op_type = gen.get_io_operation(&allowed_operations);
let mut io_packet =
target.create_io_packet(op_type, io_sequence_number, io_seed, io_range, target.clone());
io_packet.timestamp_stage_start(stage);
let io_offset_range = io_packet.io_offset_range().clone();
gen.fill_buffer(io_packet.buffer_mut | {
operations.push(OperationType::Write);
} | conditional_block |
main.rs | From<&'a mut ES> for Option<&'a mut Ant> {
fn from(entity_state: &'a mut ES) -> Self {
match entity_state {
&mut ES::Ant(ref mut ant) => Some(ant),
}
}
}
#[derive(Clone, Debug)]
enum ES {
Ant(Ant),
}
impl EntityState<CS> for ES {}
impl From<Ant> for ES {
fn from(ant: Ant) -> Self {
ES::Ant(ant)
}
}
#[derive(Clone)]
struct MES(ketos::Value);
impl Default for MES {
fn default() -> Self {
MES(ketos::Value::Unit)
}
}
impl MutEntityState for MES {}
enum CA {
}
impl CellAction<CS> for CA {}
#[derive(Debug)]
enum EA {
}
type U = Universe2D<CS, ES, MES>;
fn map_value_to_self_action(val: &Value) -> Result<SelfAction<CS, ES, EA>, String> {
match val {
&Value::List(ref list) => {
if list.is_empty() {
return Err("The provided action list was empty!".into());
}
match &list[0] {
&Value::String(ref action_type) => match action_type.as_ref() {
"translate" => {
if list.len() != 3 {
return Err(format!("Invalid amount of arguments provided to translate action: {}", list.len() - 1));
}
let arg1: isize = match &list[1] {
&Value::Integer(ref int) => match int.to_isize() {
Some(i) => i,
None => {
return Err(format!("Integer provided to argument 1 converted into `None`!"))
}
},
_ => {
return Err(format!(
"Invalid arg type of {} provided to argument 1 of translate action!",
list[1].type_name()
));
},
};
let arg2: isize = match &list[2] {
&Value::Integer(ref int) => match int.to_isize() {
Some(i) => i,
None => {
return Err(format!("Integer provided to argument 2 converted into `None`!"))
}
},
_ => {
return Err(format!(
"Invalid arg type of {} provided to argument 2 of translate action!",
list[2].type_name()
));
},
};
let action = SelfAction::Translate(arg1, arg2);
Ok(action)
},
_ => Err(format!("Invalid action type of `{}` supplied!", action_type)),
},
_ => Err(format!("Invalid argument type of {} provided for action identifier!", list[0].type_name()))
}
},
_ => Err(format!("Invalid value type of {} jammed into action buffer.", val.type_name()))
}
}
fn map_value_to_cell_action(_val: &Value) -> Result<(CA, usize), String> {
unimplemented!();
}
fn map_value_to_entity_action(_val: &Value) -> Result<(EA, usize, Uuid), String> {
unimplemented!();
}
impl EntityAction<CS, ES> for EA {}
struct WorldGenerator;
impl Generator<CS, ES, MES> for WorldGenerator {
fn gen(&mut self, _conf: &UniverseConf) -> (Vec<Cell<CS>>, Vec<Vec<Entity<CS, ES, MES>>>) {
let mut rng = PcgRng::from_seed(PRNG_SEED);
let cells = vec![Cell { state: CS::default() }; UNIVERSE_LENGTH];
let mut entities = vec![Vec::new(); UNIVERSE_LENGTH];
let ant_src = include_str!("./ant.lisp");
let ant_entity: Entity<CS, ES, MES> = Entity::new(ES::from(Ant::from_source(ant_src).unwrap()), MES::default());
for _ in 0..ANT_COUNT {
loop {
let universe_index: usize = rng.gen_range(0, UNIVERSE_LENGTH);
if entities[universe_index].is_empty() {
entities[universe_index].push(ant_entity.clone());
break;
}
}
}
(cells, entities)
}
}
fn reset_action_buffers(context: &Context, universe_index: usize) {
let scope: &GlobalScope = context.scope();
scope.add_named_value("__CELL_ACTIONS", Value::Unit);
scope.add_named_value("__SELF_ACTIONS", Value::Unit);
scope.add_named_value("__ENTITY_ACTIONS", Value::Unit);
scope.add_named_value("UNIVERSE_INDEX", Value::Integer(ketos::integer::Integer::from_usize(universe_index)))
}
fn get_list_by_name(scope: &Scope, name: &str) -> Result<RcVec<Value>, String> {
match scope.get_named_value(name) {
Some(buf) => match buf {
Value::List(list) => Ok(list),
Value::Unit => Ok(RcVec::new(vec![])),
_ => {
return Err(format!("{} has been changed to an invalid type of {}!", name, buf.type_name()));
},
}
None => {
return Err(format!("The variable named {} was deleted!", name));
},
}
}
fn process_action_buffers(
context: &Context,
cell_action_executor: &mut FnMut(CA, usize),
self_action_executor: &mut FnMut(SelfAction<CS, ES, EA>),
entity_action_executor: &mut FnMut(EA, usize, Uuid)
) -> Result<(), String> {
let scope = context.scope();
let cell_action_list = get_list_by_name(scope, "__CELL_ACTIONS")?;
for val in &cell_action_list {
let (action, universe_index): (CA, usize) = map_value_to_cell_action(val)?;
cell_action_executor(action, universe_index);
}
let self_action_list = get_list_by_name(scope, "__SELF_ACTIONS")?;
for val in &self_action_list {
let action: SelfAction<CS, ES, EA> = map_value_to_self_action(val)?;
self_action_executor(action);
}
let entity_action_list = get_list_by_name(scope, "__ENTITY_ACTIONS")?;
for val in &entity_action_list {
let (action, entity_index, uuid): (EA, usize, Uuid) = map_value_to_entity_action(val)?;
entity_action_executor(action, entity_index, uuid);
}
Ok(())
}
struct AntEngine;
fn exec_cell_action(
owned_action: &OwnedAction<CS, ES, CA, EA>,
_cells: &mut [Cell<CS>],
entities: &mut EntityContainer<CS, ES, MES>
) {
let (_entity, _entity_universe_index) = match entities.get_verify_mut(owned_action.source_entity_index, owned_action.source_uuid) {
Some((entity, universe_index)) => (entity, universe_index),
None => { return; }, // The entity been deleted, so abort.
};
match &owned_action.action {
&Action::CellAction {ref action, ..} => match action {
_ => unimplemented!(),
},
_ => unreachable!(),
}
}
fn exec_self_action(
universe: &mut U,
action: &OwnedAction<CS, ES, CA, EA>
) {
match action.action {
Action::SelfAction(SelfAction::Translate(x_offset, y_offset)) => translate_entity(
x_offset,
y_offset,
&mut universe.entities,
action.source_entity_index,
action.source_uuid,
UNIVERSE_SIZE
),
Action::EntityAction{ .. } | Action::CellAction{ .. } => unreachable!(),
_ => unimplemented!(),
}
}
fn exec_entity_action(_action: &OwnedAction<CS, ES, CA, EA>) {
unimplemented!(); // TODO
}
impl SerialEngine<CS, ES, MES, CA, EA, SerialEntityIterator<CS, ES>, U> for AntEngine {
fn iter_entities(&self, _universe: &U) -> SerialEntityIterator<CS, ES> {
SerialEntityIterator::new(UNIVERSE_SIZE)
}
fn exec_actions(
&self,
universe: &mut U,
cell_actions: &[OwnedAction<CS, ES, CA, EA>],
self_actions: &[OwnedAction<CS, ES, CA, EA>],
entity_actions: &[OwnedAction<CS, ES, CA, EA>]
) {
for cell_action in cell_actions { exec_cell_action(cell_action, &mut universe.cells, &mut universe.entities); }
for self_action in self_actions { exec_self_action(universe, self_action); }
for entity_action in entity_actions { exec_entity_action(entity_action); }
}
fn drive_entity(
&mut self,
universe_index: usize,
entity: &Entity<CS, ES, MES>,
_: &U,
cell_action_executor: &mut FnMut(CA, usize),
self_action_executor: &mut FnMut(SelfAction<CS, ES, EA>),
entity_action_executor: &mut FnMut(EA, usize, Uuid)
) {
match entity.state {
ES::Ant(Ant { ref code, ref context, .. }) => {
reset_action_buffers(context, universe_index);
for c in code {
match ketos::exec::execute(context, Rc::clone(&c)) {
Ok(_) => (),
Err(err) => {
println!("Entity script errored: {:?}", err);
return;
},
}; | }
match process_action_buffers( | random_line_split |
|
main.rs | .add_named_value("UNIVERSE_SIZE", UNIVERSE_SIZE.into());
return Rc::new(global_scope)
}
fn get_ant_default_context() -> ketos::Context {
let scope = get_ant_global_scope();
let restrictions = get_ant_restrictions();
let context = ketos::Context::new(scope, restrictions);
// Fill the context with default items from our "standard library"
let std_src = include_str!("./ant_std.lisp");
let codes: Vec<Rc<Code>> = get_codes_from_source(&context, std_src)
.expect("You've got syntax errors in your standard library!");
for code in &codes {
ketos::exec::execute(&context, Rc::clone(code))
.expect("Error while executing standard library code!");
}
context
}
#[derive(Clone, Copy, Debug, PartialEq)]
enum CellContents {
Empty,
Filled(u8),
Food(u16),
Anthill,
}
#[derive(Clone, Debug)]
struct CS {
contents: CellContents,
}
impl CellState for CS {}
impl Default for CS {
fn default() -> Self {
CS { contents: CellContents::Empty }
}
}
#[derive(Clone)]
struct Ant {
code: Vec<Rc<Code>>,
context: Context,
holding: CellContents,
}
impl Ant {
pub fn from_source(src: &str) -> Result<Self, String> {
let context = get_ant_default_context();
let codes = get_codes_from_source(&context, src)?;
Ok(Ant {
code: codes,
context: context,
holding: CellContents::Empty,
})
}
}
impl Debug for Ant {
fn fmt(&self, formatter: &mut Formatter) -> Result<(), fmt::Error> {
write!(formatter, "Ant {{ code: {:?}, context: {{..}}, holding: {:?} }}", self.code, self.holding)
}
}
impl<'a> From<&'a ES> for Option<&'a Ant> {
fn from(entity_state: &'a ES) -> Self {
match entity_state {
&ES::Ant(ref ant) => Some(ant),
}
}
}
impl<'a> From<&'a mut ES> for Option<&'a mut Ant> {
fn from(entity_state: &'a mut ES) -> Self {
match entity_state {
&mut ES::Ant(ref mut ant) => Some(ant),
}
}
}
#[derive(Clone, Debug)]
enum ES {
Ant(Ant),
}
impl EntityState<CS> for ES {}
impl From<Ant> for ES {
fn from(ant: Ant) -> Self {
ES::Ant(ant)
}
}
#[derive(Clone)]
struct MES(ketos::Value);
impl Default for MES {
fn default() -> Self {
MES(ketos::Value::Unit)
}
}
impl MutEntityState for MES {}
enum CA {
}
impl CellAction<CS> for CA {}
#[derive(Debug)]
enum EA {
}
type U = Universe2D<CS, ES, MES>;
fn map_value_to_self_action(val: &Value) -> Result<SelfAction<CS, ES, EA>, String> {
match val {
&Value::List(ref list) => {
if list.is_empty() {
return Err("The provided action list was empty!".into());
}
match &list[0] {
&Value::String(ref action_type) => match action_type.as_ref() {
"translate" => {
if list.len() != 3 {
return Err(format!("Invalid amount of arguments provided to translate action: {}", list.len() - 1));
}
let arg1: isize = match &list[1] {
&Value::Integer(ref int) => match int.to_isize() {
Some(i) => i,
None => {
return Err(format!("Integer provided to argument 1 converted into `None`!"))
}
},
_ => {
return Err(format!(
"Invalid arg type of {} provided to argument 1 of translate action!",
list[1].type_name()
));
},
};
let arg2: isize = match &list[2] {
&Value::Integer(ref int) => match int.to_isize() {
Some(i) => i,
None => {
return Err(format!("Integer provided to argument 2 converted into `None`!"))
}
},
_ => {
return Err(format!(
"Invalid arg type of {} provided to argument 2 of translate action!",
list[2].type_name()
));
},
};
let action = SelfAction::Translate(arg1, arg2);
Ok(action)
},
_ => Err(format!("Invalid action type of `{}` supplied!", action_type)),
},
_ => Err(format!("Invalid argument type of {} provided for action identifier!", list[0].type_name()))
}
},
_ => Err(format!("Invalid value type of {} jammed into action buffer.", val.type_name()))
}
}
fn map_value_to_cell_action(_val: &Value) -> Result<(CA, usize), String> {
unimplemented!();
}
fn map_value_to_entity_action(_val: &Value) -> Result<(EA, usize, Uuid), String> {
unimplemented!();
}
impl EntityAction<CS, ES> for EA {}
struct WorldGenerator;
impl Generator<CS, ES, MES> for WorldGenerator {
fn gen(&mut self, _conf: &UniverseConf) -> (Vec<Cell<CS>>, Vec<Vec<Entity<CS, ES, MES>>>) {
let mut rng = PcgRng::from_seed(PRNG_SEED);
let cells = vec![Cell { state: CS::default() }; UNIVERSE_LENGTH];
let mut entities = vec![Vec::new(); UNIVERSE_LENGTH];
let ant_src = include_str!("./ant.lisp");
let ant_entity: Entity<CS, ES, MES> = Entity::new(ES::from(Ant::from_source(ant_src).unwrap()), MES::default());
for _ in 0..ANT_COUNT {
loop {
let universe_index: usize = rng.gen_range(0, UNIVERSE_LENGTH);
if entities[universe_index].is_empty() {
entities[universe_index].push(ant_entity.clone());
break;
}
}
}
(cells, entities)
}
}
fn | (context: &Context, universe_index: usize) {
let scope: &GlobalScope = context.scope();
scope.add_named_value("__CELL_ACTIONS", Value::Unit);
scope.add_named_value("__SELF_ACTIONS", Value::Unit);
scope.add_named_value("__ENTITY_ACTIONS", Value::Unit);
scope.add_named_value("UNIVERSE_INDEX", Value::Integer(ketos::integer::Integer::from_usize(universe_index)))
}
fn get_list_by_name(scope: &Scope, name: &str) -> Result<RcVec<Value>, String> {
match scope.get_named_value(name) {
Some(buf) => match buf {
Value::List(list) => Ok(list),
Value::Unit => Ok(RcVec::new(vec![])),
_ => {
return Err(format!("{} has been changed to an invalid type of {}!", name, buf.type_name()));
},
}
None => {
return Err(format!("The variable named {} was deleted!", name));
},
}
}
fn process_action_buffers(
context: &Context,
cell_action_executor: &mut FnMut(CA, usize),
self_action_executor: &mut FnMut(SelfAction<CS, ES, EA>),
entity_action_executor: &mut FnMut(EA, usize, Uuid)
) -> Result<(), String> {
let scope = context.scope();
let cell_action_list = get_list_by_name(scope, "__CELL_ACTIONS")?;
for val in &cell_action_list {
let (action, universe_index): (CA, usize) = map_value_to_cell_action(val)?;
cell_action_executor(action, universe_index);
}
let self_action_list = get_list_by_name(scope, "__SELF_ACTIONS")?;
for val in &self_action_list {
let action: SelfAction<CS, ES, EA> = map_value_to_self_action(val)?;
self_action_executor(action);
}
let entity_action_list = get_list_by_name(scope, "__ENTITY_ACTIONS")?;
for val in &entity_action_list {
let (action, entity_index, uuid): (EA, usize, Uuid) = map_value_to_entity_action(val)?;
entity_action_executor(action, entity_index, uuid);
}
Ok(())
}
struct AntEngine;
fn exec_cell_action(
owned_action: &OwnedAction<CS, ES, CA, EA>,
_cells: &mut [Cell<CS>],
entities: &mut EntityContainer<CS, ES, MES>
) {
let (_entity, _entity_universe_index) = match entities.get_verify_mut(owned_action.source_entity_index, owned_action.source_uuid) {
Some((entity, universe_index)) => (entity, universe_index),
None => { return; }, // The entity been deleted, so abort.
};
match &owned_action.action {
&Action::CellAction {ref action, ..} => match action {
_ => unimplemented!(),
},
_ => unreachable!(),
}
}
fn exec_self_action(
universe: &mut U,
action: &OwnedAction<CS, ES, CA, EA>
) {
match action.action {
Action::SelfAction(SelfAction::Translate(x_offset, y_offset)) => | reset_action_buffers | identifier_name |
main.rs | type of {} provided to argument 1 of translate action!",
list[1].type_name()
));
},
};
let arg2: isize = match &list[2] {
&Value::Integer(ref int) => match int.to_isize() {
Some(i) => i,
None => {
return Err(format!("Integer provided to argument 2 converted into `None`!"))
}
},
_ => {
return Err(format!(
"Invalid arg type of {} provided to argument 2 of translate action!",
list[2].type_name()
));
},
};
let action = SelfAction::Translate(arg1, arg2);
Ok(action)
},
_ => Err(format!("Invalid action type of `{}` supplied!", action_type)),
},
_ => Err(format!("Invalid argument type of {} provided for action identifier!", list[0].type_name()))
}
},
_ => Err(format!("Invalid value type of {} jammed into action buffer.", val.type_name()))
}
}
fn map_value_to_cell_action(_val: &Value) -> Result<(CA, usize), String> {
unimplemented!();
}
fn map_value_to_entity_action(_val: &Value) -> Result<(EA, usize, Uuid), String> {
unimplemented!();
}
impl EntityAction<CS, ES> for EA {}
struct WorldGenerator;
impl Generator<CS, ES, MES> for WorldGenerator {
fn gen(&mut self, _conf: &UniverseConf) -> (Vec<Cell<CS>>, Vec<Vec<Entity<CS, ES, MES>>>) {
let mut rng = PcgRng::from_seed(PRNG_SEED);
let cells = vec![Cell { state: CS::default() }; UNIVERSE_LENGTH];
let mut entities = vec![Vec::new(); UNIVERSE_LENGTH];
let ant_src = include_str!("./ant.lisp");
let ant_entity: Entity<CS, ES, MES> = Entity::new(ES::from(Ant::from_source(ant_src).unwrap()), MES::default());
for _ in 0..ANT_COUNT {
loop {
let universe_index: usize = rng.gen_range(0, UNIVERSE_LENGTH);
if entities[universe_index].is_empty() {
entities[universe_index].push(ant_entity.clone());
break;
}
}
}
(cells, entities)
}
}
fn reset_action_buffers(context: &Context, universe_index: usize) {
let scope: &GlobalScope = context.scope();
scope.add_named_value("__CELL_ACTIONS", Value::Unit);
scope.add_named_value("__SELF_ACTIONS", Value::Unit);
scope.add_named_value("__ENTITY_ACTIONS", Value::Unit);
scope.add_named_value("UNIVERSE_INDEX", Value::Integer(ketos::integer::Integer::from_usize(universe_index)))
}
fn get_list_by_name(scope: &Scope, name: &str) -> Result<RcVec<Value>, String> {
match scope.get_named_value(name) {
Some(buf) => match buf {
Value::List(list) => Ok(list),
Value::Unit => Ok(RcVec::new(vec![])),
_ => {
return Err(format!("{} has been changed to an invalid type of {}!", name, buf.type_name()));
},
}
None => {
return Err(format!("The variable named {} was deleted!", name));
},
}
}
fn process_action_buffers(
context: &Context,
cell_action_executor: &mut FnMut(CA, usize),
self_action_executor: &mut FnMut(SelfAction<CS, ES, EA>),
entity_action_executor: &mut FnMut(EA, usize, Uuid)
) -> Result<(), String> {
let scope = context.scope();
let cell_action_list = get_list_by_name(scope, "__CELL_ACTIONS")?;
for val in &cell_action_list {
let (action, universe_index): (CA, usize) = map_value_to_cell_action(val)?;
cell_action_executor(action, universe_index);
}
let self_action_list = get_list_by_name(scope, "__SELF_ACTIONS")?;
for val in &self_action_list {
let action: SelfAction<CS, ES, EA> = map_value_to_self_action(val)?;
self_action_executor(action);
}
let entity_action_list = get_list_by_name(scope, "__ENTITY_ACTIONS")?;
for val in &entity_action_list {
let (action, entity_index, uuid): (EA, usize, Uuid) = map_value_to_entity_action(val)?;
entity_action_executor(action, entity_index, uuid);
}
Ok(())
}
struct AntEngine;
fn exec_cell_action(
owned_action: &OwnedAction<CS, ES, CA, EA>,
_cells: &mut [Cell<CS>],
entities: &mut EntityContainer<CS, ES, MES>
) {
let (_entity, _entity_universe_index) = match entities.get_verify_mut(owned_action.source_entity_index, owned_action.source_uuid) {
Some((entity, universe_index)) => (entity, universe_index),
None => { return; }, // The entity been deleted, so abort.
};
match &owned_action.action {
&Action::CellAction {ref action, ..} => match action {
_ => unimplemented!(),
},
_ => unreachable!(),
}
}
fn exec_self_action(
universe: &mut U,
action: &OwnedAction<CS, ES, CA, EA>
) {
match action.action {
Action::SelfAction(SelfAction::Translate(x_offset, y_offset)) => translate_entity(
x_offset,
y_offset,
&mut universe.entities,
action.source_entity_index,
action.source_uuid,
UNIVERSE_SIZE
),
Action::EntityAction{ .. } | Action::CellAction{ .. } => unreachable!(),
_ => unimplemented!(),
}
}
fn exec_entity_action(_action: &OwnedAction<CS, ES, CA, EA>) {
unimplemented!(); // TODO
}
impl SerialEngine<CS, ES, MES, CA, EA, SerialEntityIterator<CS, ES>, U> for AntEngine {
fn iter_entities(&self, _universe: &U) -> SerialEntityIterator<CS, ES> {
SerialEntityIterator::new(UNIVERSE_SIZE)
}
fn exec_actions(
&self,
universe: &mut U,
cell_actions: &[OwnedAction<CS, ES, CA, EA>],
self_actions: &[OwnedAction<CS, ES, CA, EA>],
entity_actions: &[OwnedAction<CS, ES, CA, EA>]
) {
for cell_action in cell_actions { exec_cell_action(cell_action, &mut universe.cells, &mut universe.entities); }
for self_action in self_actions { exec_self_action(universe, self_action); }
for entity_action in entity_actions { exec_entity_action(entity_action); }
}
fn drive_entity(
&mut self,
universe_index: usize,
entity: &Entity<CS, ES, MES>,
_: &U,
cell_action_executor: &mut FnMut(CA, usize),
self_action_executor: &mut FnMut(SelfAction<CS, ES, EA>),
entity_action_executor: &mut FnMut(EA, usize, Uuid)
) {
match entity.state {
ES::Ant(Ant { ref code, ref context, .. }) => {
reset_action_buffers(context, universe_index);
for c in code {
match ketos::exec::execute(context, Rc::clone(&c)) {
Ok(_) => (),
Err(err) => {
println!("Entity script errored: {:?}", err);
return;
},
};
}
match process_action_buffers(
context,
cell_action_executor,
self_action_executor,
entity_action_executor
) {
Ok(()) => (),
Err(err) => println!("Error while retrieving action buffers from context: {}", err),
}
}
}
}
}
type OurSerialEngine = Box<SerialEngine<CS, ES, MES, CA, EA, SerialEntityIterator<CS, ES>, U>>;
/// Given a coordinate of the universe, uses state of its cell and the entities that reside in it to determine a color
/// to display on the canvas. This is called each tick. The returned value is the color in RGBA.
fn calc_color(
cell: &Cell<CS>,
entity_indexes: &[usize],
entity_container: &EntityContainer<CS, ES, MES>
) -> [u8; 4] {
if !entity_indexes.is_empty() {
for i in entity_indexes {
match unsafe { &entity_container.get(*i).state } {
&ES::Ant { .. } => { return [91, 75, 11, 255] },
}
}
[12, 24, 222, 255]
} else | {
match cell.state.contents {
CellContents::Anthill => [222, 233, 244, 255],
CellContents::Empty => [12, 12, 12, 255],
CellContents::Food(_) => [200, 30, 40, 255], // TODO: Different colors for different food amounts
CellContents::Filled(_) => [230, 230, 230, 255],
}
} | conditional_block |
|
main.rs | .add_named_value("UNIVERSE_SIZE", UNIVERSE_SIZE.into());
return Rc::new(global_scope)
}
fn get_ant_default_context() -> ketos::Context {
let scope = get_ant_global_scope();
let restrictions = get_ant_restrictions();
let context = ketos::Context::new(scope, restrictions);
// Fill the context with default items from our "standard library"
let std_src = include_str!("./ant_std.lisp");
let codes: Vec<Rc<Code>> = get_codes_from_source(&context, std_src)
.expect("You've got syntax errors in your standard library!");
for code in &codes {
ketos::exec::execute(&context, Rc::clone(code))
.expect("Error while executing standard library code!");
}
context
}
#[derive(Clone, Copy, Debug, PartialEq)]
enum CellContents {
Empty,
Filled(u8),
Food(u16),
Anthill,
}
#[derive(Clone, Debug)]
struct CS {
contents: CellContents,
}
impl CellState for CS {}
impl Default for CS {
fn default() -> Self {
CS { contents: CellContents::Empty }
}
}
#[derive(Clone)]
struct Ant {
code: Vec<Rc<Code>>,
context: Context,
holding: CellContents,
}
impl Ant {
pub fn from_source(src: &str) -> Result<Self, String> {
let context = get_ant_default_context();
let codes = get_codes_from_source(&context, src)?;
Ok(Ant {
code: codes,
context: context,
holding: CellContents::Empty,
})
}
}
impl Debug for Ant {
fn fmt(&self, formatter: &mut Formatter) -> Result<(), fmt::Error> {
write!(formatter, "Ant {{ code: {:?}, context: {{..}}, holding: {:?} }}", self.code, self.holding)
}
}
impl<'a> From<&'a ES> for Option<&'a Ant> {
fn from(entity_state: &'a ES) -> Self {
match entity_state {
&ES::Ant(ref ant) => Some(ant),
}
}
}
impl<'a> From<&'a mut ES> for Option<&'a mut Ant> {
fn from(entity_state: &'a mut ES) -> Self {
match entity_state {
&mut ES::Ant(ref mut ant) => Some(ant),
}
}
}
#[derive(Clone, Debug)]
enum ES {
Ant(Ant),
}
impl EntityState<CS> for ES {}
impl From<Ant> for ES {
fn from(ant: Ant) -> Self |
}
#[derive(Clone)]
struct MES(ketos::Value);
impl Default for MES {
fn default() -> Self {
MES(ketos::Value::Unit)
}
}
impl MutEntityState for MES {}
enum CA {
}
impl CellAction<CS> for CA {}
#[derive(Debug)]
enum EA {
}
type U = Universe2D<CS, ES, MES>;
fn map_value_to_self_action(val: &Value) -> Result<SelfAction<CS, ES, EA>, String> {
match val {
&Value::List(ref list) => {
if list.is_empty() {
return Err("The provided action list was empty!".into());
}
match &list[0] {
&Value::String(ref action_type) => match action_type.as_ref() {
"translate" => {
if list.len() != 3 {
return Err(format!("Invalid amount of arguments provided to translate action: {}", list.len() - 1));
}
let arg1: isize = match &list[1] {
&Value::Integer(ref int) => match int.to_isize() {
Some(i) => i,
None => {
return Err(format!("Integer provided to argument 1 converted into `None`!"))
}
},
_ => {
return Err(format!(
"Invalid arg type of {} provided to argument 1 of translate action!",
list[1].type_name()
));
},
};
let arg2: isize = match &list[2] {
&Value::Integer(ref int) => match int.to_isize() {
Some(i) => i,
None => {
return Err(format!("Integer provided to argument 2 converted into `None`!"))
}
},
_ => {
return Err(format!(
"Invalid arg type of {} provided to argument 2 of translate action!",
list[2].type_name()
));
},
};
let action = SelfAction::Translate(arg1, arg2);
Ok(action)
},
_ => Err(format!("Invalid action type of `{}` supplied!", action_type)),
},
_ => Err(format!("Invalid argument type of {} provided for action identifier!", list[0].type_name()))
}
},
_ => Err(format!("Invalid value type of {} jammed into action buffer.", val.type_name()))
}
}
fn map_value_to_cell_action(_val: &Value) -> Result<(CA, usize), String> {
unimplemented!();
}
fn map_value_to_entity_action(_val: &Value) -> Result<(EA, usize, Uuid), String> {
unimplemented!();
}
impl EntityAction<CS, ES> for EA {}
struct WorldGenerator;
impl Generator<CS, ES, MES> for WorldGenerator {
fn gen(&mut self, _conf: &UniverseConf) -> (Vec<Cell<CS>>, Vec<Vec<Entity<CS, ES, MES>>>) {
let mut rng = PcgRng::from_seed(PRNG_SEED);
let cells = vec![Cell { state: CS::default() }; UNIVERSE_LENGTH];
let mut entities = vec![Vec::new(); UNIVERSE_LENGTH];
let ant_src = include_str!("./ant.lisp");
let ant_entity: Entity<CS, ES, MES> = Entity::new(ES::from(Ant::from_source(ant_src).unwrap()), MES::default());
for _ in 0..ANT_COUNT {
loop {
let universe_index: usize = rng.gen_range(0, UNIVERSE_LENGTH);
if entities[universe_index].is_empty() {
entities[universe_index].push(ant_entity.clone());
break;
}
}
}
(cells, entities)
}
}
fn reset_action_buffers(context: &Context, universe_index: usize) {
let scope: &GlobalScope = context.scope();
scope.add_named_value("__CELL_ACTIONS", Value::Unit);
scope.add_named_value("__SELF_ACTIONS", Value::Unit);
scope.add_named_value("__ENTITY_ACTIONS", Value::Unit);
scope.add_named_value("UNIVERSE_INDEX", Value::Integer(ketos::integer::Integer::from_usize(universe_index)))
}
fn get_list_by_name(scope: &Scope, name: &str) -> Result<RcVec<Value>, String> {
match scope.get_named_value(name) {
Some(buf) => match buf {
Value::List(list) => Ok(list),
Value::Unit => Ok(RcVec::new(vec![])),
_ => {
return Err(format!("{} has been changed to an invalid type of {}!", name, buf.type_name()));
},
}
None => {
return Err(format!("The variable named {} was deleted!", name));
},
}
}
fn process_action_buffers(
context: &Context,
cell_action_executor: &mut FnMut(CA, usize),
self_action_executor: &mut FnMut(SelfAction<CS, ES, EA>),
entity_action_executor: &mut FnMut(EA, usize, Uuid)
) -> Result<(), String> {
let scope = context.scope();
let cell_action_list = get_list_by_name(scope, "__CELL_ACTIONS")?;
for val in &cell_action_list {
let (action, universe_index): (CA, usize) = map_value_to_cell_action(val)?;
cell_action_executor(action, universe_index);
}
let self_action_list = get_list_by_name(scope, "__SELF_ACTIONS")?;
for val in &self_action_list {
let action: SelfAction<CS, ES, EA> = map_value_to_self_action(val)?;
self_action_executor(action);
}
let entity_action_list = get_list_by_name(scope, "__ENTITY_ACTIONS")?;
for val in &entity_action_list {
let (action, entity_index, uuid): (EA, usize, Uuid) = map_value_to_entity_action(val)?;
entity_action_executor(action, entity_index, uuid);
}
Ok(())
}
struct AntEngine;
fn exec_cell_action(
owned_action: &OwnedAction<CS, ES, CA, EA>,
_cells: &mut [Cell<CS>],
entities: &mut EntityContainer<CS, ES, MES>
) {
let (_entity, _entity_universe_index) = match entities.get_verify_mut(owned_action.source_entity_index, owned_action.source_uuid) {
Some((entity, universe_index)) => (entity, universe_index),
None => { return; }, // The entity been deleted, so abort.
};
match &owned_action.action {
&Action::CellAction {ref action, ..} => match action {
_ => unimplemented!(),
},
_ => unreachable!(),
}
}
fn exec_self_action(
universe: &mut U,
action: &OwnedAction<CS, ES, CA, EA>
) {
match action.action {
Action::SelfAction(SelfAction::Translate(x_offset, y_offset)) | {
ES::Ant(ant)
} | identifier_body |
test_utils.go | TestLeaderIP = "10.10.10.10"
)
const KubeConfigData = `
apiVersion: v1
clusters: []
contexts: []
kind: Config
preferences: {}
users: []
`
type ClustersKubeConfig struct {
APIVersion string `yaml:"apiVersion"`
Clusters []ClusterData `yaml:"clusters"`
Contexts []KubeContextData `yaml:"contexts"`
Kind string `yaml:"kind"`
Users []UserData `yaml:"users"`
}
type ClusterData struct {
Cluster ClusterServerData `yaml:"cluster"`
Name string `yaml:"name"`
}
type ClusterServerData struct {
CAData string `yaml:"certificate-authority-data"`
Server string `yaml:"server"`
}
type KubeContextData struct {
Context ContextData `yaml:"context"`
Name string `yaml:"name"`
}
type ContextData struct {
Cluster string `yaml:"cluster"`
User string `yaml:"user"`
}
type UserData struct {
Name string `yaml:"name"`
User UserID `yaml:"user"`
}
type UserID struct {
ClientCert string `yaml:"client-certificate-data"`
ClientKey string `yaml:"client-key-data"`
}
func BuildAndCreateTestKubeConfig(k8sClient1, k8sClient2 client.Client) {
user1 := Cluster1 + "-user"
user2 := Cluster2 + "-user"
// kData := make(map[string]interface{})
kData := ClustersKubeConfig{}
Expect(yaml.Unmarshal([]byte(KubeConfigData), &kData)).Should(Succeed())
kData.Clusters = []ClusterData{
{
Cluster: ClusterServerData{
CAData: b64.StdEncoding.EncodeToString([]byte(testEnv1.Config.CAData)),
Server: testEnv1.Config.Host,
},
Name: Cluster1,
},
{
Cluster: ClusterServerData{
CAData: b64.StdEncoding.EncodeToString([]byte(testEnv2.Config.CAData)),
Server: testEnv2.Config.Host,
},
Name: Cluster2,
},
}
kData.Contexts = []KubeContextData{
{
Context: ContextData{
Cluster: Cluster1,
User: user1,
},
Name: Cluster1,
},
{
Context: ContextData{
Cluster: Cluster2,
User: user2,
},
Name: Cluster2,
},
}
kData.Users = []UserData{
{
Name: user1,
User: UserID{
ClientCert: b64.StdEncoding.EncodeToString([]byte(testEnv1.Config.CertData)),
ClientKey: b64.StdEncoding.EncodeToString([]byte(testEnv1.Config.KeyData)),
},
},
{
Name: user2,
User: UserID{
ClientCert: b64.StdEncoding.EncodeToString([]byte(testEnv2.Config.CertData)),
ClientKey: b64.StdEncoding.EncodeToString([]byte(testEnv2.Config.KeyData)),
},
},
}
// generate a string out of kubeCfg
kubeCfgData, err := yaml.Marshal(kData)
Expect(err).NotTo(HaveOccurred())
// create the "avi-system" namespace
nsObj := corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: AviSystemNS,
},
}
Expect(k8sClient1.Create(context.TODO(), &nsObj)).Should(Succeed())
Expect(os.Setenv("GSLB_CONFIG", string(kubeCfgData))).Should(Succeed())
// create "avi-system" namespace on the other cluster as well
nsObj.ObjectMeta.ResourceVersion = ""
Expect(k8sClient2.Create(context.TODO(), &nsObj)).Should(Succeed())
}
func getTestAMKOClusterObj(currentContext string, isLeader bool) amkovmwarecomv1alpha1.AMKOCluster {
return amkovmwarecomv1alpha1.AMKOCluster{
ObjectMeta: metav1.ObjectMeta{
Name: TestAMKOClusterName,
Namespace: AviSystemNS,
},
Spec: amkovmwarecomv1alpha1.AMKOClusterSpec{
ClusterContext: currentContext,
IsLeader: isLeader,
Clusters: []string{Cluster1, Cluster2},
Version: TestAMKOVersion,
},
}
}
func getTestAMKOClusterStatusReason(status amkovmwarecomv1alpha1.AMKOClusterStatus,
statusType string) map[string]string {
for _, condition := range status.Conditions {
if condition.Type == statusType {
return map[string]string{
"reason": condition.Reason,
"status": condition.Status,
}
}
}
return map[string]string{}
}
func getTestAMKOClusterStatusMsg(status amkovmwarecomv1alpha1.AMKOClusterStatus, statusType string) string {
for _, condition := range status.Conditions {
if condition.Type == statusType {
return condition.Status
}
}
return ""
}
func getTestGCObj() gslbalphav1.GSLBConfig {
return gslbalphav1.GSLBConfig{
ObjectMeta: metav1.ObjectMeta{
Name: TestGCName,
Namespace: AviSystemNS,
},
Spec: gslbalphav1.GSLBConfigSpec{
GSLBLeader: gslbalphav1.GSLBLeader{
Credentials: "test-creds",
ControllerVersion: "20.1.4",
ControllerIP: TestLeaderIP,
},
MemberClusters: []gslbalphav1.MemberCluster{
{
ClusterContext: Cluster1,
},
{
ClusterContext: Cluster2,
},
},
RefreshInterval: 3600,
LogLevel: "INFO",
},
}
}
func getTestGDPObject() gdpalphav2.GlobalDeploymentPolicy {
label := make(map[string]string)
label["key"] = "value"
return gdpalphav2.GlobalDeploymentPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: TestGDPName,
Namespace: AviSystemNS,
},
Spec: gdpalphav2.GDPSpec{
MatchRules: gdpalphav2.MatchRules{
AppSelector: gdpalphav2.AppSelector{
Label: label,
},
},
MatchClusters: []gdpalphav2.ClusterProperty{
{
Cluster: Cluster1,
},
{
Cluster: Cluster2,
},
},
TTL: getGDPTTLPtr(300),
},
}
}
func getGDPTTLPtr(val int) *int {
ttl := val
return &ttl
}
func createTestGCAndGDPObjs(ctx context.Context, k8sClient client.Client, gc *gslbalphav1.GSLBConfig, gdp *gdpalphav2.GlobalDeploymentPolicy) {
Expect(k8sClient.Create(ctx, gc)).Should(Succeed())
Expect(k8sClient.Create(ctx, gdp)).Should(Succeed())
}
func deleteTestGCAndGDPObj(ctx context.Context, k8sClient client.Client, gc *gslbalphav1.GSLBConfig, gdp *gdpalphav2.GlobalDeploymentPolicy) {
err := k8sClient.Delete(ctx, gc)
if err != nil && k8serrors.IsNotFound(err) {
return
}
Expect(err).ToNot(HaveOccurred())
err = k8sClient.Delete(ctx, gdp)
if err != nil && k8serrors.IsNotFound(err) {
return
}
Expect(err).ToNot(HaveOccurred())
}
func TestGCGDPNotFederated(k8sClient client.Client) {
var gcList gslbalphav1.GSLBConfigList
ctx := context.Background()
Expect(k8sClient.List(ctx, &gcList)).Should(Succeed())
Expect(len(gcList.Items)).Should(BeZero())
var gdpList gdpalphav2.GlobalDeploymentPolicyList
Expect(k8sClient.List(ctx, &gdpList)).Should(Succeed())
Expect(len(gdpList.Items)).Should(BeZero())
}
func TestGCGDPExist(k8sClient client.Client) {
var gcList gslbalphav1.GSLBConfigList
ctx := context.Background()
Expect(k8sClient.List(ctx, &gcList)).Should(Succeed())
Expect(len(gcList.Items)).Should(Equal(1))
var gdpList gdpalphav2.GlobalDeploymentPolicyList
Expect(k8sClient.List(ctx, &gdpList)).Should(Succeed())
Expect(len(gdpList.Items)).Should(Equal(1))
}
// func VerifyTestAMKOClusterObjectSuccess(k8sClient client.Client, statusType string) {
// Eventually(func() string {
// var obj amkovmwarecomv1alpha1.AMKOCluster
// Expect(k8sClient.Get(context.TODO(),
// types.NamespacedName{
// Name: TestAMKOClusterName,
// Namespace: AviSystemNS},
// &obj)).Should(Succeed())
// return getTestAMKOClusterStatusReason(obj.Status, statusType)
// }, 5*time.Second, 1*time.Second).Should(Equal("Federation successful"))
// }
func | VerifyTestAMKOClusterStatus | identifier_name |
|
test_utils.go | /pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
)
var testEnv1 *envtest.Environment
var testEnv2 *envtest.Environment
const (
Cluster1 = "cluster1"
Cluster2 = "cluster2"
TestAMKOVersion = "1.4.2"
TestAMKODifferentVersion = "1.5.1"
TestAMKOClusterName = "test-amko-cluster"
TestGSLBSecret = "gslb-config-secret"
AMKOCRDs = "../../helm/amko/crds"
TestGCName = "test-gc"
TestGDPName = "test-gdp"
TestLeaderIP = "10.10.10.10"
)
const KubeConfigData = `
apiVersion: v1
clusters: []
contexts: []
kind: Config
preferences: {}
users: []
`
type ClustersKubeConfig struct {
APIVersion string `yaml:"apiVersion"`
Clusters []ClusterData `yaml:"clusters"`
Contexts []KubeContextData `yaml:"contexts"`
Kind string `yaml:"kind"`
Users []UserData `yaml:"users"`
}
type ClusterData struct {
Cluster ClusterServerData `yaml:"cluster"`
Name string `yaml:"name"`
}
type ClusterServerData struct {
CAData string `yaml:"certificate-authority-data"`
Server string `yaml:"server"`
}
type KubeContextData struct {
Context ContextData `yaml:"context"`
Name string `yaml:"name"`
}
type ContextData struct {
Cluster string `yaml:"cluster"`
User string `yaml:"user"`
}
type UserData struct {
Name string `yaml:"name"`
User UserID `yaml:"user"`
}
type UserID struct {
ClientCert string `yaml:"client-certificate-data"`
ClientKey string `yaml:"client-key-data"`
}
func BuildAndCreateTestKubeConfig(k8sClient1, k8sClient2 client.Client) {
user1 := Cluster1 + "-user"
user2 := Cluster2 + "-user"
// kData := make(map[string]interface{})
kData := ClustersKubeConfig{}
Expect(yaml.Unmarshal([]byte(KubeConfigData), &kData)).Should(Succeed())
kData.Clusters = []ClusterData{
{
Cluster: ClusterServerData{
CAData: b64.StdEncoding.EncodeToString([]byte(testEnv1.Config.CAData)),
Server: testEnv1.Config.Host,
},
Name: Cluster1,
},
{
Cluster: ClusterServerData{
CAData: b64.StdEncoding.EncodeToString([]byte(testEnv2.Config.CAData)),
Server: testEnv2.Config.Host,
},
Name: Cluster2,
},
}
kData.Contexts = []KubeContextData{
{
Context: ContextData{
Cluster: Cluster1,
User: user1,
},
Name: Cluster1,
},
{
Context: ContextData{
Cluster: Cluster2,
User: user2,
},
Name: Cluster2,
},
}
kData.Users = []UserData{
{
Name: user1,
User: UserID{
ClientCert: b64.StdEncoding.EncodeToString([]byte(testEnv1.Config.CertData)),
ClientKey: b64.StdEncoding.EncodeToString([]byte(testEnv1.Config.KeyData)),
},
},
{
Name: user2,
User: UserID{
ClientCert: b64.StdEncoding.EncodeToString([]byte(testEnv2.Config.CertData)),
ClientKey: b64.StdEncoding.EncodeToString([]byte(testEnv2.Config.KeyData)),
},
},
}
// generate a string out of kubeCfg
kubeCfgData, err := yaml.Marshal(kData)
Expect(err).NotTo(HaveOccurred())
// create the "avi-system" namespace
nsObj := corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: AviSystemNS,
},
}
Expect(k8sClient1.Create(context.TODO(), &nsObj)).Should(Succeed())
Expect(os.Setenv("GSLB_CONFIG", string(kubeCfgData))).Should(Succeed())
// create "avi-system" namespace on the other cluster as well
nsObj.ObjectMeta.ResourceVersion = ""
Expect(k8sClient2.Create(context.TODO(), &nsObj)).Should(Succeed())
}
func getTestAMKOClusterObj(currentContext string, isLeader bool) amkovmwarecomv1alpha1.AMKOCluster |
func getTestAMKOClusterStatusReason(status amkovmwarecomv1alpha1.AMKOClusterStatus,
statusType string) map[string]string {
for _, condition := range status.Conditions {
if condition.Type == statusType {
return map[string]string{
"reason": condition.Reason,
"status": condition.Status,
}
}
}
return map[string]string{}
}
func getTestAMKOClusterStatusMsg(status amkovmwarecomv1alpha1.AMKOClusterStatus, statusType string) string {
for _, condition := range status.Conditions {
if condition.Type == statusType {
return condition.Status
}
}
return ""
}
func getTestGCObj() gslbalphav1.GSLBConfig {
return gslbalphav1.GSLBConfig{
ObjectMeta: metav1.ObjectMeta{
Name: TestGCName,
Namespace: AviSystemNS,
},
Spec: gslbalphav1.GSLBConfigSpec{
GSLBLeader: gslbalphav1.GSLBLeader{
Credentials: "test-creds",
ControllerVersion: "20.1.4",
ControllerIP: TestLeaderIP,
},
MemberClusters: []gslbalphav1.MemberCluster{
{
ClusterContext: Cluster1,
},
{
ClusterContext: Cluster2,
},
},
RefreshInterval: 3600,
LogLevel: "INFO",
},
}
}
func getTestGDPObject() gdpalphav2.GlobalDeploymentPolicy {
label := make(map[string]string)
label["key"] = "value"
return gdpalphav2.GlobalDeploymentPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: TestGDPName,
Namespace: AviSystemNS,
},
Spec: gdpalphav2.GDPSpec{
MatchRules: gdpalphav2.MatchRules{
AppSelector: gdpalphav2.AppSelector{
Label: label,
},
},
MatchClusters: []gdpalphav2.ClusterProperty{
{
Cluster: Cluster1,
},
{
Cluster: Cluster2,
},
},
TTL: getGDPTTLPtr(300),
},
}
}
func getGDPTTLPtr(val int) *int {
ttl := val
return &ttl
}
func createTestGCAndGDPObjs(ctx context.Context, k8sClient client.Client, gc *gslbalphav1.GSLBConfig, gdp *gdpalphav2.GlobalDeploymentPolicy) {
Expect(k8sClient.Create(ctx, gc)).Should(Succeed())
Expect(k8sClient.Create(ctx, gdp)).Should(Succeed())
}
func deleteTestGCAndGDPObj(ctx context.Context, k8sClient client.Client, gc *gslbalphav1.GSLBConfig, gdp *gdpalphav2.GlobalDeploymentPolicy) {
err := k8sClient.Delete(ctx, gc)
if err != nil && k8serrors.IsNotFound(err) {
return
}
Expect(err).ToNot(HaveOccurred())
err = k8sClient.Delete(ctx, gdp)
if err != nil && k8serrors.IsNotFound(err) {
return
}
Expect(err).ToNot(HaveOccurred())
}
func TestGCGDPNotFederated(k8sClient client.Client) {
var gcList gslbalphav1.GSLBConfigList
ctx := context.Background()
Expect(k8sClient.List(ctx, &gcList)).Should(Succeed())
Expect(len(gcList.Items)).Should(BeZero())
var gdpList gdpalphav2.GlobalDeploymentPolicyList
Expect(k8sClient.List(ctx, &gdpList)).Should(Succeed())
Expect(len(gdpList.Items)).Should(BeZero())
}
func TestGCGDPExist(k8sClient client.Client) {
var gcList gslbalphav1.GSLBConfigList
ctx := context.Background()
Expect(k8sClient.List(ctx, &gcList)).Should(Succeed())
Expect(len(gcList.Items)).Should(Equal(1))
var | {
return amkovmwarecomv1alpha1.AMKOCluster{
ObjectMeta: metav1.ObjectMeta{
Name: TestAMKOClusterName,
Namespace: AviSystemNS,
},
Spec: amkovmwarecomv1alpha1.AMKOClusterSpec{
ClusterContext: currentContext,
IsLeader: isLeader,
Clusters: []string{Cluster1, Cluster2},
Version: TestAMKOVersion,
},
}
} | identifier_body |
test_utils.go | /pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
)
var testEnv1 *envtest.Environment
var testEnv2 *envtest.Environment
const (
Cluster1 = "cluster1"
Cluster2 = "cluster2"
TestAMKOVersion = "1.4.2"
TestAMKODifferentVersion = "1.5.1"
TestAMKOClusterName = "test-amko-cluster"
TestGSLBSecret = "gslb-config-secret"
AMKOCRDs = "../../helm/amko/crds"
TestGCName = "test-gc"
TestGDPName = "test-gdp"
TestLeaderIP = "10.10.10.10"
)
const KubeConfigData = `
apiVersion: v1
clusters: []
contexts: []
kind: Config
preferences: {}
users: []
`
type ClustersKubeConfig struct {
APIVersion string `yaml:"apiVersion"`
Clusters []ClusterData `yaml:"clusters"`
Contexts []KubeContextData `yaml:"contexts"`
Kind string `yaml:"kind"`
Users []UserData `yaml:"users"`
}
type ClusterData struct {
Cluster ClusterServerData `yaml:"cluster"`
Name string `yaml:"name"`
}
type ClusterServerData struct {
CAData string `yaml:"certificate-authority-data"`
Server string `yaml:"server"`
}
type KubeContextData struct {
Context ContextData `yaml:"context"`
Name string `yaml:"name"`
}
type ContextData struct {
Cluster string `yaml:"cluster"`
User string `yaml:"user"`
}
type UserData struct {
Name string `yaml:"name"`
User UserID `yaml:"user"`
}
type UserID struct {
ClientCert string `yaml:"client-certificate-data"`
ClientKey string `yaml:"client-key-data"`
}
func BuildAndCreateTestKubeConfig(k8sClient1, k8sClient2 client.Client) {
user1 := Cluster1 + "-user"
user2 := Cluster2 + "-user"
// kData := make(map[string]interface{})
kData := ClustersKubeConfig{}
Expect(yaml.Unmarshal([]byte(KubeConfigData), &kData)).Should(Succeed())
kData.Clusters = []ClusterData{
{
Cluster: ClusterServerData{
CAData: b64.StdEncoding.EncodeToString([]byte(testEnv1.Config.CAData)),
Server: testEnv1.Config.Host,
},
Name: Cluster1,
},
{
Cluster: ClusterServerData{
CAData: b64.StdEncoding.EncodeToString([]byte(testEnv2.Config.CAData)),
Server: testEnv2.Config.Host,
},
Name: Cluster2,
},
}
kData.Contexts = []KubeContextData{
{
Context: ContextData{
Cluster: Cluster1,
User: user1,
},
Name: Cluster1,
},
{
Context: ContextData{
Cluster: Cluster2,
User: user2,
},
Name: Cluster2,
},
}
kData.Users = []UserData{
{
Name: user1,
User: UserID{
ClientCert: b64.StdEncoding.EncodeToString([]byte(testEnv1.Config.CertData)),
ClientKey: b64.StdEncoding.EncodeToString([]byte(testEnv1.Config.KeyData)),
},
},
{
Name: user2,
User: UserID{
ClientCert: b64.StdEncoding.EncodeToString([]byte(testEnv2.Config.CertData)),
ClientKey: b64.StdEncoding.EncodeToString([]byte(testEnv2.Config.KeyData)),
},
},
}
// generate a string out of kubeCfg
kubeCfgData, err := yaml.Marshal(kData)
Expect(err).NotTo(HaveOccurred())
// create the "avi-system" namespace
nsObj := corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: AviSystemNS,
},
}
Expect(k8sClient1.Create(context.TODO(), &nsObj)).Should(Succeed())
Expect(os.Setenv("GSLB_CONFIG", string(kubeCfgData))).Should(Succeed())
// create "avi-system" namespace on the other cluster as well
nsObj.ObjectMeta.ResourceVersion = ""
Expect(k8sClient2.Create(context.TODO(), &nsObj)).Should(Succeed())
}
func getTestAMKOClusterObj(currentContext string, isLeader bool) amkovmwarecomv1alpha1.AMKOCluster {
return amkovmwarecomv1alpha1.AMKOCluster{
ObjectMeta: metav1.ObjectMeta{
Name: TestAMKOClusterName,
Namespace: AviSystemNS,
},
Spec: amkovmwarecomv1alpha1.AMKOClusterSpec{
ClusterContext: currentContext,
IsLeader: isLeader,
Clusters: []string{Cluster1, Cluster2},
Version: TestAMKOVersion,
},
}
}
func getTestAMKOClusterStatusReason(status amkovmwarecomv1alpha1.AMKOClusterStatus,
statusType string) map[string]string {
for _, condition := range status.Conditions {
if condition.Type == statusType {
return map[string]string{
"reason": condition.Reason,
"status": condition.Status,
}
}
}
return map[string]string{}
}
func getTestAMKOClusterStatusMsg(status amkovmwarecomv1alpha1.AMKOClusterStatus, statusType string) string {
for _, condition := range status.Conditions {
if condition.Type == statusType {
return condition.Status
}
}
return ""
}
func getTestGCObj() gslbalphav1.GSLBConfig {
return gslbalphav1.GSLBConfig{
ObjectMeta: metav1.ObjectMeta{
Name: TestGCName,
Namespace: AviSystemNS,
},
Spec: gslbalphav1.GSLBConfigSpec{
GSLBLeader: gslbalphav1.GSLBLeader{
Credentials: "test-creds",
ControllerVersion: "20.1.4",
ControllerIP: TestLeaderIP,
},
MemberClusters: []gslbalphav1.MemberCluster{
{
ClusterContext: Cluster1,
},
{
ClusterContext: Cluster2,
},
},
RefreshInterval: 3600,
LogLevel: "INFO",
},
}
}
func getTestGDPObject() gdpalphav2.GlobalDeploymentPolicy {
label := make(map[string]string)
label["key"] = "value"
return gdpalphav2.GlobalDeploymentPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: TestGDPName,
Namespace: AviSystemNS,
},
Spec: gdpalphav2.GDPSpec{
MatchRules: gdpalphav2.MatchRules{
AppSelector: gdpalphav2.AppSelector{
Label: label,
},
},
MatchClusters: []gdpalphav2.ClusterProperty{
{
Cluster: Cluster1,
},
{
Cluster: Cluster2,
},
},
TTL: getGDPTTLPtr(300),
},
}
}
func getGDPTTLPtr(val int) *int {
ttl := val
return &ttl
}
func createTestGCAndGDPObjs(ctx context.Context, k8sClient client.Client, gc *gslbalphav1.GSLBConfig, gdp *gdpalphav2.GlobalDeploymentPolicy) {
Expect(k8sClient.Create(ctx, gc)).Should(Succeed())
Expect(k8sClient.Create(ctx, gdp)).Should(Succeed())
}
func deleteTestGCAndGDPObj(ctx context.Context, k8sClient client.Client, gc *gslbalphav1.GSLBConfig, gdp *gdpalphav2.GlobalDeploymentPolicy) {
err := k8sClient.Delete(ctx, gc)
if err != nil && k8serrors.IsNotFound(err) |
Expect(err).ToNot(HaveOccurred())
err = k8sClient.Delete(ctx, gdp)
if err != nil && k8serrors.IsNotFound(err) {
return
}
Expect(err).ToNot(HaveOccurred())
}
func TestGCGDPNotFederated(k8sClient client.Client) {
var gcList gslbalphav1.GSLBConfigList
ctx := context.Background()
Expect(k8sClient.List(ctx, &gcList)).Should(Succeed())
Expect(len(gcList.Items)).Should(BeZero())
var gdpList gdpalphav2.GlobalDeploymentPolicyList
Expect(k8sClient.List(ctx, &gdpList)).Should(Succeed())
Expect(len(gdpList.Items)).Should(BeZero())
}
func TestGCGDPExist(k8sClient client.Client) {
var gcList gslbalphav1.GSLBConfigList
ctx := context.Background()
Expect(k8sClient.List(ctx, &gcList)).Should(Succeed())
Expect(len(gcList.Items)).Should(Equal(1))
var | {
return
} | conditional_block |
test_utils.go | /apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
)
var testEnv1 *envtest.Environment
var testEnv2 *envtest.Environment
const (
Cluster1 = "cluster1"
Cluster2 = "cluster2"
TestAMKOVersion = "1.4.2"
TestAMKODifferentVersion = "1.5.1"
TestAMKOClusterName = "test-amko-cluster"
TestGSLBSecret = "gslb-config-secret"
AMKOCRDs = "../../helm/amko/crds"
TestGCName = "test-gc"
TestGDPName = "test-gdp"
TestLeaderIP = "10.10.10.10"
)
const KubeConfigData = `
apiVersion: v1
clusters: []
contexts: []
kind: Config
preferences: {}
users: []
`
type ClustersKubeConfig struct {
APIVersion string `yaml:"apiVersion"`
Clusters []ClusterData `yaml:"clusters"`
Contexts []KubeContextData `yaml:"contexts"`
Kind string `yaml:"kind"`
Users []UserData `yaml:"users"`
}
type ClusterData struct {
Cluster ClusterServerData `yaml:"cluster"`
Name string `yaml:"name"`
}
type ClusterServerData struct {
CAData string `yaml:"certificate-authority-data"`
Server string `yaml:"server"`
}
type KubeContextData struct {
Context ContextData `yaml:"context"`
Name string `yaml:"name"`
}
type ContextData struct {
Cluster string `yaml:"cluster"`
User string `yaml:"user"`
}
type UserData struct {
Name string `yaml:"name"`
User UserID `yaml:"user"`
}
type UserID struct {
ClientCert string `yaml:"client-certificate-data"`
ClientKey string `yaml:"client-key-data"`
}
func BuildAndCreateTestKubeConfig(k8sClient1, k8sClient2 client.Client) {
user1 := Cluster1 + "-user"
user2 := Cluster2 + "-user"
// kData := make(map[string]interface{})
kData := ClustersKubeConfig{}
Expect(yaml.Unmarshal([]byte(KubeConfigData), &kData)).Should(Succeed())
kData.Clusters = []ClusterData{
{
Cluster: ClusterServerData{
CAData: b64.StdEncoding.EncodeToString([]byte(testEnv1.Config.CAData)),
Server: testEnv1.Config.Host,
},
Name: Cluster1,
},
{
Cluster: ClusterServerData{
CAData: b64.StdEncoding.EncodeToString([]byte(testEnv2.Config.CAData)),
Server: testEnv2.Config.Host,
},
Name: Cluster2,
},
}
kData.Contexts = []KubeContextData{
{
Context: ContextData{
Cluster: Cluster1,
User: user1,
},
Name: Cluster1,
},
{
Context: ContextData{
Cluster: Cluster2,
User: user2,
},
Name: Cluster2,
},
}
kData.Users = []UserData{
{
Name: user1,
User: UserID{
ClientCert: b64.StdEncoding.EncodeToString([]byte(testEnv1.Config.CertData)),
ClientKey: b64.StdEncoding.EncodeToString([]byte(testEnv1.Config.KeyData)),
},
},
{
Name: user2,
User: UserID{
ClientCert: b64.StdEncoding.EncodeToString([]byte(testEnv2.Config.CertData)),
ClientKey: b64.StdEncoding.EncodeToString([]byte(testEnv2.Config.KeyData)),
},
},
}
// generate a string out of kubeCfg
kubeCfgData, err := yaml.Marshal(kData)
Expect(err).NotTo(HaveOccurred())
// create the "avi-system" namespace
nsObj := corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: AviSystemNS,
}, | Expect(k8sClient1.Create(context.TODO(), &nsObj)).Should(Succeed())
Expect(os.Setenv("GSLB_CONFIG", string(kubeCfgData))).Should(Succeed())
// create "avi-system" namespace on the other cluster as well
nsObj.ObjectMeta.ResourceVersion = ""
Expect(k8sClient2.Create(context.TODO(), &nsObj)).Should(Succeed())
}
func getTestAMKOClusterObj(currentContext string, isLeader bool) amkovmwarecomv1alpha1.AMKOCluster {
return amkovmwarecomv1alpha1.AMKOCluster{
ObjectMeta: metav1.ObjectMeta{
Name: TestAMKOClusterName,
Namespace: AviSystemNS,
},
Spec: amkovmwarecomv1alpha1.AMKOClusterSpec{
ClusterContext: currentContext,
IsLeader: isLeader,
Clusters: []string{Cluster1, Cluster2},
Version: TestAMKOVersion,
},
}
}
func getTestAMKOClusterStatusReason(status amkovmwarecomv1alpha1.AMKOClusterStatus,
statusType string) map[string]string {
for _, condition := range status.Conditions {
if condition.Type == statusType {
return map[string]string{
"reason": condition.Reason,
"status": condition.Status,
}
}
}
return map[string]string{}
}
func getTestAMKOClusterStatusMsg(status amkovmwarecomv1alpha1.AMKOClusterStatus, statusType string) string {
for _, condition := range status.Conditions {
if condition.Type == statusType {
return condition.Status
}
}
return ""
}
func getTestGCObj() gslbalphav1.GSLBConfig {
return gslbalphav1.GSLBConfig{
ObjectMeta: metav1.ObjectMeta{
Name: TestGCName,
Namespace: AviSystemNS,
},
Spec: gslbalphav1.GSLBConfigSpec{
GSLBLeader: gslbalphav1.GSLBLeader{
Credentials: "test-creds",
ControllerVersion: "20.1.4",
ControllerIP: TestLeaderIP,
},
MemberClusters: []gslbalphav1.MemberCluster{
{
ClusterContext: Cluster1,
},
{
ClusterContext: Cluster2,
},
},
RefreshInterval: 3600,
LogLevel: "INFO",
},
}
}
func getTestGDPObject() gdpalphav2.GlobalDeploymentPolicy {
label := make(map[string]string)
label["key"] = "value"
return gdpalphav2.GlobalDeploymentPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: TestGDPName,
Namespace: AviSystemNS,
},
Spec: gdpalphav2.GDPSpec{
MatchRules: gdpalphav2.MatchRules{
AppSelector: gdpalphav2.AppSelector{
Label: label,
},
},
MatchClusters: []gdpalphav2.ClusterProperty{
{
Cluster: Cluster1,
},
{
Cluster: Cluster2,
},
},
TTL: getGDPTTLPtr(300),
},
}
}
func getGDPTTLPtr(val int) *int {
ttl := val
return &ttl
}
func createTestGCAndGDPObjs(ctx context.Context, k8sClient client.Client, gc *gslbalphav1.GSLBConfig, gdp *gdpalphav2.GlobalDeploymentPolicy) {
Expect(k8sClient.Create(ctx, gc)).Should(Succeed())
Expect(k8sClient.Create(ctx, gdp)).Should(Succeed())
}
func deleteTestGCAndGDPObj(ctx context.Context, k8sClient client.Client, gc *gslbalphav1.GSLBConfig, gdp *gdpalphav2.GlobalDeploymentPolicy) {
err := k8sClient.Delete(ctx, gc)
if err != nil && k8serrors.IsNotFound(err) {
return
}
Expect(err).ToNot(HaveOccurred())
err = k8sClient.Delete(ctx, gdp)
if err != nil && k8serrors.IsNotFound(err) {
return
}
Expect(err).ToNot(HaveOccurred())
}
func TestGCGDPNotFederated(k8sClient client.Client) {
var gcList gslbalphav1.GSLBConfigList
ctx := context.Background()
Expect(k8sClient.List(ctx, &gcList)).Should(Succeed())
Expect(len(gcList.Items)).Should(BeZero())
var gdpList gdpalphav2.GlobalDeploymentPolicyList
Expect(k8sClient.List(ctx, &gdpList)).Should(Succeed())
Expect(len(gdpList.Items)).Should(BeZero())
}
func TestGCGDPExist(k8sClient client.Client) {
var gcList gslbalphav1.GSLBConfigList
ctx := context.Background()
Expect(k8sClient.List(ctx, &gcList)).Should(Succeed())
Expect(len(gcList.Items)).Should(Equal(1))
var g | } | random_line_split |
main-v2.js | let displayRow = renderElement('div', 'row');
let display = renderElement('div', 'col bg-light text-right display-4');
display.id = 'displayWindow';
display.setAttribute('style', 'height: 80px;');
display.innerHTML = 0;
let bottom = renderElement('div', 'p-5');
// Append Elements
centerCol.appendChild(title);
centerCol.appendChild(displayRow);
displayRow.appendChild(display)
// Create the btns and append them to calcRow
let calcRow = document.createElement('div');
calcRow.className = 'row';
for (let i = 0; i < 20; i++) {
let btn = document.createElement('button');
btn.className = 'col-3 border bg-light display-4 button';
btn.setAttribute('type', 'button');
btn.id = `${calcBtns[i]}`;
btn.setAttribute('style', 'height: 80px;');
let text = document.createTextNode(`${calcBtns[i]}`);
btn.appendChild(text);
if (calcBtns[i] !== '') {
btn.addEventListener('click', clickedOn);
// Disables the blank buttons
} else {
btn.disabled = true;
}
calcRow.appendChild(btn);
centerCol.appendChild(calcRow);
}
centerCol.appendChild(bottom);
row.appendChild(rightCol);
row.appendChild(centerCol);
row.appendChild(leftCol);
container.appendChild(row);
let app = document.getElementById('app');
app.appendChild(container);
}
// Keyboard btns
document.addEventListener('keydown', function(e) { | }
// Key "=/+" without Shift --> "="
if (e.keyCode === 187 && !e.shiftKey) {
symPress('=');
}
// Can use * for multiply
if (e.keyCode === 56 && e.shiftKey) {
symPress('X');
}
if (e.keyCode === 56 && !e.shiftKey) {
numPress('8');
}
switch (e.keyCode) {
case 67:
symPress('C');
break;
// Delete key also --> Clear
case 8:
symPress('C');
break;
case 191:
symPress('/');
break;
case 88:
symPress('X');
break;
case 189:
symPress('-');
break;
// Allows "enter" to be used as "=", since that seems pretty intuitive
case 13:
symPress('=');
break;
case 190:
symPress('.');
break;
case 48:
numPress('0');
break;
case 49:
numPress('1');
break;
case 50:
numPress('2');
break;
case 51:
numPress('3');
break;
case 52:
numPress('4');
break;
case 53:
numPress('5');
break;
case 54:
numPress('6');
break;
case 55:
numPress('7');
break;
case 57:
numPress('9');
break;
}
if (displayWindow.innerHTML === 'NaN') {
clear();
displayWindow.innerHTML = '-Undefined-';
}
});
// CALC LOGIC
// Differentiates between numbers and symbols
function clickedOn() {
if (this.id === 'C' || this.id === '/' || this.id === 'X' || this.id === '-' || this.id === '+' || this.id === '=' || this.id === '.') {
symPress(this.id);
} else {
numPress(this.id);
}
// If NaN (for example, from 0/0) clears the calc and displays a message)
if (displayWindow.innerHTML === 'NaN') {
clear();
displayWindow.innerHTML = '-Undefined-';
}
// Debugging Logs:
console.log(`Equation: ${num1} ${operand} ${num2}`);
console.log(`Equal temp num: ${equalTemp}; eqPress: ${eqPress}`)
console.log('---------------');
}
// If a number is pressed
function numPress(inputNum) {
// Resets the equal temp number on any number press
equalTemp = undefined;
// If equal was just pressed, followed by a number, clears the calc
if (eqPress) {
clear();
}
// Sets num1
if (operand === '') {
// Makes it so you can't enter 00000
if (inputNum === '0' && num1 === '0') {
num1 = '';
// Caps the input length at 10 digits
} else if (num1.length < 10) {
if (num1 === '0') {
num1 = '';
}
num1 += inputNum;
displayWindow.innerHTML = num1;
}
// Sets num2
} else {
if (inputNum === '0' && num2 === '0') {
num2 = '';
} else if (num2.length < 10) {
if (num2 === '0') {
num2 = '';
}
num2 += inputNum;
displayWindow.innerHTML = num2;
}
}
}
// If a symbol is pressed
function symPress(inputSym) {
// If the sym is not =, then reset the equal values
if (inputSym !== '=') {
equalTemp = undefined;
eqPress = false;
}
// Switch cases for various symbols
switch (inputSym) {
case '+':
// Only allows you to input operands if num1 has already been defined
// Otherwise, you can press an operand, and then a num, which can cause weird results
if (num1 !== '') {
// If num2 isn't defined yet, set the operand and do nothing else
if (num2 === '') {
displayWindow.innerHTML = '+';
operand = '+';
break;
// If it has been defined, calculate the last 2 numbers, display that result,
// place the result in num1, and clear num2
} else {
multiCalc(operand);
displayWindow.innerHTML = num1;
operand = '+';
break;
}
}
break;
case '-':
if (num1 !== '') {
if (num2 === '') {
displayWindow.innerHTML = '-';
operand = '-';
break;
} else {
multiCalc(operand);
displayWindow.innerHTML = num1;
operand = '-';
break;
}
}
break;
case '/':
if (num1 !== '') {
if (num2 === '') {
displayWindow.innerHTML = '/';
operand = '/';
break;
} else {
multiCalc(operand);
displayWindow.innerHTML = num1;
operand = '/';
break;
}
}
break;
case 'X':
if (num1 !== '') {
if (num2 === '') {
displayWindow.innerHTML = 'X';
operand = '*';
break;
} else {
multiCalc(operand);
displayWindow.innerHTML = num1;
operand = '*';
break;
}
}
break;
case '=':
// If either input is '.' --> display "Illegal use of decimal"
if (num1 === '.' || num2 === '.') {
clear();
displayWindow.innerHTML = '-Invalid Use of Decimal-';
}
// Records a boolean for if = was the last sym pressed
eqPress = true;
// If neither num1 nor num2 have been defined yet, do nothing
if (num1 === '' && num2 === '') {
break;
// If num2 is undefined, calculate using num1 [operand] num1
} else if (num2 === '') {
displayWindow.innerHTML = equalCalc(operand);
break;
// If num2 has been defined, record num2 in the equal sign's temp num holder, then calculate
} else {
equalTemp = num2;
displayWindow.innerHTML = mathCalc(operand);
break;
}
case '.':
// If operand is undefined, then apply decimal to num1
if (operand === '') {
// Check to make sure num1 doesn't already have a decimal
if (!num1.includes('.')) {
num1 += '.';
displayWindow.innerHTML = num1;
}
} else {
if (!num2.includes('.')) {
num2 += '.';
displayWindow.innerHTML = num2;
}
}
break;
// Clears the calc and all its variables if btn C is pressed
case 'C':
clear();
}
}
// Normal calculations --> [] + [] =
function mathCalc(sym) {
switch (sym) {
case '+':
// Calculates num1 [operand] num2, stores that value
// in num1 and displays it, clears num2 for use in future calculations
num1 = Number(num1) + Number(num2);
num2 = '';
return num1;
case '-':
num1 = Number(num1) - Number(num2);
num2 = '';
return num1;
case '/':
num1 = Number(num1) | // Keys: Shift and "=/+" --> "+"
if (e.keyCode === 187 && e.shiftKey) {
symPress('+'); | random_line_split |
main-v2.js | 67:
symPress('C');
break;
// Delete key also --> Clear
case 8:
symPress('C');
break;
case 191:
symPress('/');
break;
case 88:
symPress('X');
break;
case 189:
symPress('-');
break;
// Allows "enter" to be used as "=", since that seems pretty intuitive
case 13:
symPress('=');
break;
case 190:
symPress('.');
break;
case 48:
numPress('0');
break;
case 49:
numPress('1');
break;
case 50:
numPress('2');
break;
case 51:
numPress('3');
break;
case 52:
numPress('4');
break;
case 53:
numPress('5');
break;
case 54:
numPress('6');
break;
case 55:
numPress('7');
break;
case 57:
numPress('9');
break;
}
if (displayWindow.innerHTML === 'NaN') {
clear();
displayWindow.innerHTML = '-Undefined-';
}
});
// CALC LOGIC
// Differentiates between numbers and symbols
function clickedOn() {
if (this.id === 'C' || this.id === '/' || this.id === 'X' || this.id === '-' || this.id === '+' || this.id === '=' || this.id === '.') {
symPress(this.id);
} else {
numPress(this.id);
}
// If NaN (for example, from 0/0) clears the calc and displays a message)
if (displayWindow.innerHTML === 'NaN') {
clear();
displayWindow.innerHTML = '-Undefined-';
}
// Debugging Logs:
console.log(`Equation: ${num1} ${operand} ${num2}`);
console.log(`Equal temp num: ${equalTemp}; eqPress: ${eqPress}`)
console.log('---------------');
}
// If a number is pressed
function numPress(inputNum) {
// Resets the equal temp number on any number press
equalTemp = undefined;
// If equal was just pressed, followed by a number, clears the calc
if (eqPress) {
clear();
}
// Sets num1
if (operand === '') {
// Makes it so you can't enter 00000
if (inputNum === '0' && num1 === '0') {
num1 = '';
// Caps the input length at 10 digits
} else if (num1.length < 10) {
if (num1 === '0') {
num1 = '';
}
num1 += inputNum;
displayWindow.innerHTML = num1;
}
// Sets num2
} else {
if (inputNum === '0' && num2 === '0') {
num2 = '';
} else if (num2.length < 10) {
if (num2 === '0') {
num2 = '';
}
num2 += inputNum;
displayWindow.innerHTML = num2;
}
}
}
// If a symbol is pressed
function symPress(inputSym) {
// If the sym is not =, then reset the equal values
if (inputSym !== '=') {
equalTemp = undefined;
eqPress = false;
}
// Switch cases for various symbols
switch (inputSym) {
case '+':
// Only allows you to input operands if num1 has already been defined
// Otherwise, you can press an operand, and then a num, which can cause weird results
if (num1 !== '') {
// If num2 isn't defined yet, set the operand and do nothing else
if (num2 === '') {
displayWindow.innerHTML = '+';
operand = '+';
break;
// If it has been defined, calculate the last 2 numbers, display that result,
// place the result in num1, and clear num2
} else {
multiCalc(operand);
displayWindow.innerHTML = num1;
operand = '+';
break;
}
}
break;
case '-':
if (num1 !== '') {
if (num2 === '') {
displayWindow.innerHTML = '-';
operand = '-';
break;
} else {
multiCalc(operand);
displayWindow.innerHTML = num1;
operand = '-';
break;
}
}
break;
case '/':
if (num1 !== '') {
if (num2 === '') {
displayWindow.innerHTML = '/';
operand = '/';
break;
} else {
multiCalc(operand);
displayWindow.innerHTML = num1;
operand = '/';
break;
}
}
break;
case 'X':
if (num1 !== '') {
if (num2 === '') {
displayWindow.innerHTML = 'X';
operand = '*';
break;
} else {
multiCalc(operand);
displayWindow.innerHTML = num1;
operand = '*';
break;
}
}
break;
case '=':
// If either input is '.' --> display "Illegal use of decimal"
if (num1 === '.' || num2 === '.') {
clear();
displayWindow.innerHTML = '-Invalid Use of Decimal-';
}
// Records a boolean for if = was the last sym pressed
eqPress = true;
// If neither num1 nor num2 have been defined yet, do nothing
if (num1 === '' && num2 === '') {
break;
// If num2 is undefined, calculate using num1 [operand] num1
} else if (num2 === '') {
displayWindow.innerHTML = equalCalc(operand);
break;
// If num2 has been defined, record num2 in the equal sign's temp num holder, then calculate
} else {
equalTemp = num2;
displayWindow.innerHTML = mathCalc(operand);
break;
}
case '.':
// If operand is undefined, then apply decimal to num1
if (operand === '') {
// Check to make sure num1 doesn't already have a decimal
if (!num1.includes('.')) {
num1 += '.';
displayWindow.innerHTML = num1;
}
} else {
if (!num2.includes('.')) {
num2 += '.';
displayWindow.innerHTML = num2;
}
}
break;
// Clears the calc and all its variables if btn C is pressed
case 'C':
clear();
}
}
// Normal calculations --> [] + [] =
function mathCalc(sym) {
switch (sym) {
case '+':
// Calculates num1 [operand] num2, stores that value
// in num1 and displays it, clears num2 for use in future calculations
num1 = Number(num1) + Number(num2);
num2 = '';
return num1;
case '-':
num1 = Number(num1) - Number(num2);
num2 = '';
return num1;
case '/':
num1 = Number(num1) / Number(num2);
num2 = '';
return num1;
case '*':
num1 = Number(num1) * Number(num2);
num2 = '';
return num1;
}
}
// [] + [] + []... =
function multiCalc(sym) {
switch (sym) {
case '+':
num1 = Number(num1) + Number(num2);
num2 = '';
break;
case '-':
num1 = Number(num1) - Number(num2);
num2 = '';
break;
case '/':
num1 = Number(num1) / Number(num2);
num2 = '';
break;
case '*':
num1 = Number(num1) * Number(num2);
num2 = '';
}
}
// For when equal sign is pressed multiple times --> [] + = = = OR [] + [] = = =
function equalCalc(sym) {
switch (sym) {
case '+':
// If equal's temp num has not been defined yet, define it
// Otherwise, keep performing calculations using the old value
if (equalTemp === undefined) {
equalTemp = num1;
}
num1 = Number(num1) + Number(equalTemp);
num2 = '';
return num1;
case '-':
if (equalTemp === undefined) {
equalTemp = num1;
}
num1 = Number(num1) - Number(equalTemp);
num2 = '';
return num1;
case '/':
if (equalTemp === undefined) {
equalTemp = num1;
}
num1 = Number(num1) / Number(equalTemp);
num2 = '';
return num1;
case '*':
if (equalTemp === undefined) {
equalTemp = num1;
}
num1 = Number(num1) * Number(equalTemp);
num2 = '';
return num1;
case '':
return num1;
}
}
// Resets all of the calculator's values to their default state
function clear() | {
num1 = '';
num2 = '';
operand = '';
displayWindow.innerHTML = 0;
equalTemp = undefined;
eqPress = false;
} | identifier_body |
|
main-v2.js | let displayRow = renderElement('div', 'row');
let display = renderElement('div', 'col bg-light text-right display-4');
display.id = 'displayWindow';
display.setAttribute('style', 'height: 80px;');
display.innerHTML = 0;
let bottom = renderElement('div', 'p-5');
// Append Elements
centerCol.appendChild(title);
centerCol.appendChild(displayRow);
displayRow.appendChild(display)
// Create the btns and append them to calcRow
let calcRow = document.createElement('div');
calcRow.className = 'row';
for (let i = 0; i < 20; i++) {
let btn = document.createElement('button');
btn.className = 'col-3 border bg-light display-4 button';
btn.setAttribute('type', 'button');
btn.id = `${calcBtns[i]}`;
btn.setAttribute('style', 'height: 80px;');
let text = document.createTextNode(`${calcBtns[i]}`);
btn.appendChild(text);
if (calcBtns[i] !== '') {
btn.addEventListener('click', clickedOn);
// Disables the blank buttons
} else {
btn.disabled = true;
}
calcRow.appendChild(btn);
centerCol.appendChild(calcRow);
}
centerCol.appendChild(bottom);
row.appendChild(rightCol);
row.appendChild(centerCol);
row.appendChild(leftCol);
container.appendChild(row);
let app = document.getElementById('app');
app.appendChild(container);
}
// Keyboard btns
document.addEventListener('keydown', function(e) {
// Keys: Shift and "=/+" --> "+"
if (e.keyCode === 187 && e.shiftKey) {
symPress('+');
}
// Key "=/+" without Shift --> "="
if (e.keyCode === 187 && !e.shiftKey) {
symPress('=');
}
// Can use * for multiply
if (e.keyCode === 56 && e.shiftKey) {
symPress('X');
}
if (e.keyCode === 56 && !e.shiftKey) {
numPress('8');
}
switch (e.keyCode) {
case 67:
symPress('C');
break;
// Delete key also --> Clear
case 8:
symPress('C');
break;
case 191:
symPress('/');
break;
case 88:
symPress('X');
break;
case 189:
symPress('-');
break;
// Allows "enter" to be used as "=", since that seems pretty intuitive
case 13:
symPress('=');
break;
case 190:
symPress('.');
break;
case 48:
numPress('0');
break;
case 49:
numPress('1');
break;
case 50:
numPress('2');
break;
case 51:
numPress('3');
break;
case 52:
numPress('4');
break;
case 53:
numPress('5');
break;
case 54:
numPress('6');
break;
case 55:
numPress('7');
break;
case 57:
numPress('9');
break;
}
if (displayWindow.innerHTML === 'NaN') {
clear();
displayWindow.innerHTML = '-Undefined-';
}
});
// CALC LOGIC
// Differentiates between numbers and symbols
function clickedOn() {
if (this.id === 'C' || this.id === '/' || this.id === 'X' || this.id === '-' || this.id === '+' || this.id === '=' || this.id === '.') | else {
numPress(this.id);
}
// If NaN (for example, from 0/0) clears the calc and displays a message)
if (displayWindow.innerHTML === 'NaN') {
clear();
displayWindow.innerHTML = '-Undefined-';
}
// Debugging Logs:
console.log(`Equation: ${num1} ${operand} ${num2}`);
console.log(`Equal temp num: ${equalTemp}; eqPress: ${eqPress}`)
console.log('---------------');
}
// If a number is pressed
function numPress(inputNum) {
// Resets the equal temp number on any number press
equalTemp = undefined;
// If equal was just pressed, followed by a number, clears the calc
if (eqPress) {
clear();
}
// Sets num1
if (operand === '') {
// Makes it so you can't enter 00000
if (inputNum === '0' && num1 === '0') {
num1 = '';
// Caps the input length at 10 digits
} else if (num1.length < 10) {
if (num1 === '0') {
num1 = '';
}
num1 += inputNum;
displayWindow.innerHTML = num1;
}
// Sets num2
} else {
if (inputNum === '0' && num2 === '0') {
num2 = '';
} else if (num2.length < 10) {
if (num2 === '0') {
num2 = '';
}
num2 += inputNum;
displayWindow.innerHTML = num2;
}
}
}
// If a symbol is pressed
function symPress(inputSym) {
// If the sym is not =, then reset the equal values
if (inputSym !== '=') {
equalTemp = undefined;
eqPress = false;
}
// Switch cases for various symbols
switch (inputSym) {
case '+':
// Only allows you to input operands if num1 has already been defined
// Otherwise, you can press an operand, and then a num, which can cause weird results
if (num1 !== '') {
// If num2 isn't defined yet, set the operand and do nothing else
if (num2 === '') {
displayWindow.innerHTML = '+';
operand = '+';
break;
// If it has been defined, calculate the last 2 numbers, display that result,
// place the result in num1, and clear num2
} else {
multiCalc(operand);
displayWindow.innerHTML = num1;
operand = '+';
break;
}
}
break;
case '-':
if (num1 !== '') {
if (num2 === '') {
displayWindow.innerHTML = '-';
operand = '-';
break;
} else {
multiCalc(operand);
displayWindow.innerHTML = num1;
operand = '-';
break;
}
}
break;
case '/':
if (num1 !== '') {
if (num2 === '') {
displayWindow.innerHTML = '/';
operand = '/';
break;
} else {
multiCalc(operand);
displayWindow.innerHTML = num1;
operand = '/';
break;
}
}
break;
case 'X':
if (num1 !== '') {
if (num2 === '') {
displayWindow.innerHTML = 'X';
operand = '*';
break;
} else {
multiCalc(operand);
displayWindow.innerHTML = num1;
operand = '*';
break;
}
}
break;
case '=':
// If either input is '.' --> display "Illegal use of decimal"
if (num1 === '.' || num2 === '.') {
clear();
displayWindow.innerHTML = '-Invalid Use of Decimal-';
}
// Records a boolean for if = was the last sym pressed
eqPress = true;
// If neither num1 nor num2 have been defined yet, do nothing
if (num1 === '' && num2 === '') {
break;
// If num2 is undefined, calculate using num1 [operand] num1
} else if (num2 === '') {
displayWindow.innerHTML = equalCalc(operand);
break;
// If num2 has been defined, record num2 in the equal sign's temp num holder, then calculate
} else {
equalTemp = num2;
displayWindow.innerHTML = mathCalc(operand);
break;
}
case '.':
// If operand is undefined, then apply decimal to num1
if (operand === '') {
// Check to make sure num1 doesn't already have a decimal
if (!num1.includes('.')) {
num1 += '.';
displayWindow.innerHTML = num1;
}
} else {
if (!num2.includes('.')) {
num2 += '.';
displayWindow.innerHTML = num2;
}
}
break;
// Clears the calc and all its variables if btn C is pressed
case 'C':
clear();
}
}
// Normal calculations --> [] + [] =
function mathCalc(sym) {
switch (sym) {
case '+':
// Calculates num1 [operand] num2, stores that value
// in num1 and displays it, clears num2 for use in future calculations
num1 = Number(num1) + Number(num2);
num2 = '';
return num1;
case '-':
num1 = Number(num1) - Number(num2);
num2 = '';
return num1;
case '/':
num1 = Number(num1 | {
symPress(this.id);
} | conditional_block |
main-v2.js | () {
// Create Elements
let container = renderElement('div', 'container-fluid');
let row = renderElement('div', 'row');
let leftCol = renderElement('div', 'col-0 col-sm-0 col-md-1 col-lg-2');
let centerCol = renderElement('div', 'col-12 col-sm-12 col-md-10 col-lg-8 text-center');
let rightCol = renderElement('div', 'col-0 col-sm-0 col-md-1 col-lg-2');
let title = renderElement('h1', 'my-5 display-4 text-white');
// title.innerHTML = 'Calculator';
// A colorful title
title.innerHTML = '<span class="text-danger">C</span><span class="text-primary">a</span><span class="text-warning">l</span><span class="text-dark">c</span><span class="text-danger">u</span><span class="text-primary">l</span><span class="text-warning">a</span><span class="text-dark">t</span><span class="text-danger">o</span><span class="text-primary">r</span>';
let displayRow = renderElement('div', 'row');
let display = renderElement('div', 'col bg-light text-right display-4');
display.id = 'displayWindow';
display.setAttribute('style', 'height: 80px;');
display.innerHTML = 0;
let bottom = renderElement('div', 'p-5');
// Append Elements
centerCol.appendChild(title);
centerCol.appendChild(displayRow);
displayRow.appendChild(display)
// Create the btns and append them to calcRow
let calcRow = document.createElement('div');
calcRow.className = 'row';
for (let i = 0; i < 20; i++) {
let btn = document.createElement('button');
btn.className = 'col-3 border bg-light display-4 button';
btn.setAttribute('type', 'button');
btn.id = `${calcBtns[i]}`;
btn.setAttribute('style', 'height: 80px;');
let text = document.createTextNode(`${calcBtns[i]}`);
btn.appendChild(text);
if (calcBtns[i] !== '') {
btn.addEventListener('click', clickedOn);
// Disables the blank buttons
} else {
btn.disabled = true;
}
calcRow.appendChild(btn);
centerCol.appendChild(calcRow);
}
centerCol.appendChild(bottom);
row.appendChild(rightCol);
row.appendChild(centerCol);
row.appendChild(leftCol);
container.appendChild(row);
let app = document.getElementById('app');
app.appendChild(container);
}
// Keyboard btns
document.addEventListener('keydown', function(e) {
// Keys: Shift and "=/+" --> "+"
if (e.keyCode === 187 && e.shiftKey) {
symPress('+');
}
// Key "=/+" without Shift --> "="
if (e.keyCode === 187 && !e.shiftKey) {
symPress('=');
}
// Can use * for multiply
if (e.keyCode === 56 && e.shiftKey) {
symPress('X');
}
if (e.keyCode === 56 && !e.shiftKey) {
numPress('8');
}
switch (e.keyCode) {
case 67:
symPress('C');
break;
// Delete key also --> Clear
case 8:
symPress('C');
break;
case 191:
symPress('/');
break;
case 88:
symPress('X');
break;
case 189:
symPress('-');
break;
// Allows "enter" to be used as "=", since that seems pretty intuitive
case 13:
symPress('=');
break;
case 190:
symPress('.');
break;
case 48:
numPress('0');
break;
case 49:
numPress('1');
break;
case 50:
numPress('2');
break;
case 51:
numPress('3');
break;
case 52:
numPress('4');
break;
case 53:
numPress('5');
break;
case 54:
numPress('6');
break;
case 55:
numPress('7');
break;
case 57:
numPress('9');
break;
}
if (displayWindow.innerHTML === 'NaN') {
clear();
displayWindow.innerHTML = '-Undefined-';
}
});
// CALC LOGIC
// Differentiates between numbers and symbols
function clickedOn() {
if (this.id === 'C' || this.id === '/' || this.id === 'X' || this.id === '-' || this.id === '+' || this.id === '=' || this.id === '.') {
symPress(this.id);
} else {
numPress(this.id);
}
// If NaN (for example, from 0/0) clears the calc and displays a message)
if (displayWindow.innerHTML === 'NaN') {
clear();
displayWindow.innerHTML = '-Undefined-';
}
// Debugging Logs:
console.log(`Equation: ${num1} ${operand} ${num2}`);
console.log(`Equal temp num: ${equalTemp}; eqPress: ${eqPress}`)
console.log('---------------');
}
// If a number is pressed
function numPress(inputNum) {
// Resets the equal temp number on any number press
equalTemp = undefined;
// If equal was just pressed, followed by a number, clears the calc
if (eqPress) {
clear();
}
// Sets num1
if (operand === '') {
// Makes it so you can't enter 00000
if (inputNum === '0' && num1 === '0') {
num1 = '';
// Caps the input length at 10 digits
} else if (num1.length < 10) {
if (num1 === '0') {
num1 = '';
}
num1 += inputNum;
displayWindow.innerHTML = num1;
}
// Sets num2
} else {
if (inputNum === '0' && num2 === '0') {
num2 = '';
} else if (num2.length < 10) {
if (num2 === '0') {
num2 = '';
}
num2 += inputNum;
displayWindow.innerHTML = num2;
}
}
}
// If a symbol is pressed
function symPress(inputSym) {
// If the sym is not =, then reset the equal values
if (inputSym !== '=') {
equalTemp = undefined;
eqPress = false;
}
// Switch cases for various symbols
switch (inputSym) {
case '+':
// Only allows you to input operands if num1 has already been defined
// Otherwise, you can press an operand, and then a num, which can cause weird results
if (num1 !== '') {
// If num2 isn't defined yet, set the operand and do nothing else
if (num2 === '') {
displayWindow.innerHTML = '+';
operand = '+';
break;
// If it has been defined, calculate the last 2 numbers, display that result,
// place the result in num1, and clear num2
} else {
multiCalc(operand);
displayWindow.innerHTML = num1;
operand = '+';
break;
}
}
break;
case '-':
if (num1 !== '') {
if (num2 === '') {
displayWindow.innerHTML = '-';
operand = '-';
break;
} else {
multiCalc(operand);
displayWindow.innerHTML = num1;
operand = '-';
break;
}
}
break;
case '/':
if (num1 !== '') {
if (num2 === '') {
displayWindow.innerHTML = '/';
operand = '/';
break;
} else {
multiCalc(operand);
displayWindow.innerHTML = num1;
operand = '/';
break;
}
}
break;
case 'X':
if (num1 !== '') {
if (num2 === '') {
displayWindow.innerHTML = 'X';
operand = '*';
break;
} else {
multiCalc(operand);
displayWindow.innerHTML = num1;
operand = '*';
break;
}
}
break;
case '=':
// If either input is '.' --> display "Illegal use of decimal"
if (num1 === '.' || num2 === '.') {
clear();
displayWindow.innerHTML = '-Invalid Use of Decimal-';
}
// Records a boolean for if = was the last sym pressed
eqPress = true;
// If neither num1 nor num2 have been defined yet, do nothing
if (num1 === '' && num2 === '') {
break;
// If num2 is undefined, calculate using num1 [operand] num1
} else if (num2 === '') {
displayWindow.innerHTML = equalCalc(operand);
break;
// If num2 has been defined, record num2 in the equal sign's temp num holder, then calculate
} else {
equalTemp = | loadCalc | identifier_name |
|
proc.py | self.props['suffix'] = utils.uid(signature)
return self.suffix
def _tidyBeforeRun (self):
"""
Do some preparation before running jobs
"""
self._buildProps ()
self._buildInput ()
self._buildProcVars ()
self._buildJobs ()
def _tidyAfterRun (self):
"""
Do some cleaning after running jobs
"""
failedjobs = []
for i in self.ncjobids:
job = self.jobs[i]
if not job.succeed():
failedjobs.append (job)
if not failedjobs:
self.log ('Successful jobs: ALL', 'debug')
if callable (self.callback):
self.log('Calling callback ...', 'debug')
self.callback (self)
else:
failedjobs[0].showError (len(failedjobs))
if self.errorhow != 'ignore':
sys.exit (1) # don't go further
def _name (self, incAggr = True):
"""
Get my name include `aggr`, `id`, `tag`
@returns:
the name
"""
aggrName = "@%s" % self.aggr if self.aggr and incAggr else ""
tag = ".%s" % self.tag if self.tag != "notag" else ""
return "%s%s%s" % (self.id, tag, aggrName)
def run (self, config = None):
"""
Run the jobs with a configuration
@params:
`config`: The configuration
"""
timer = time()
if config is None:
config = {}
self.logger.info ('[ START] ' + utils.padBoth(' ' + self._name() + ' ', 80, '-'))
# log the dependencies
self.log ("%s => %s => %s" % ([p._name() for p in self.depends] if self.depends else "START", self._name(), [p._name() for p in self.nexts] if self.nexts else "END"), "info", "depends")
self._readConfig (config)
self._tidyBeforeRun ()
if self._runCmd('beforeCmd') != 0:
raise Exception ('Failed to run beforeCmd: %s' % self.beforeCmd)
if not self._isCached():
# I am not cached, touch the input of my nexts?
# but my nexts are not initized, how?
# set cached to False, then my nexts will access it
self.props['cached'] = False
self.log (self.workdir, 'info', 'RUNNING')
self._runJobs()
if self._runCmd('afterCmd') != 0:
raise Exception ('Failed to run afterCmd: %s' % self.afterCmd)
self._tidyAfterRun ()
self.log ('Done (time: %s).' % utils.formatTime(time() - timer), 'info')
def _buildProps (self):
"""
Compute some properties
"""
if isinstance (self.retcodes, int):
self.props['retcodes'] = [self.retcodes]
if isinstance (self.retcodes, str):
self.props['retcodes'] = [int(i) for i in self.retcodes.split(',')]
key = self._name(False)
if key in proc.PROCS and proc.PROCS[key] != self:
raise Exception ('A proc with id "%s" and tag "%s" already exists.' % (self.id, self.tag))
proc.PROCS[key] = self
if not 'workdir' in self.sets and not self.workdir:
self.props['workdir'] = os.path.join(self.ppldir, "PyPPL.%s.%s.%s" % (self.id, self.tag, self._suffix()))
if not os.path.exists (self.workdir):
os.makedirs (self.workdir)
def _buildInput (self):
"""
Build the input data
Input could be:
1. list: ['input', 'infile:file'] <=> ['input:var', 'infile:path']
2. str : "input, infile:file" <=> input:var, infile:path
3. dict: {"input": channel1, "infile:file": channel2}
or {"input:var, input:file" : channel3}
for 1,2 channels will be the combined channel from dependents, if there is not dependents, it will be sys.argv[1:]
"""
indata = self.config['input']
if not isinstance (indata, dict):
indata = ','.join(utils.alwaysList (indata))
depdchan = channel.fromChannels (*[d.channel for d in self.depends])
indata = {indata: depdchan if self.depends else channel.fromArgv()}
# expand to one key-channel pairs
for inkeys, invals in indata.iteritems():
keys = utils.split(inkeys, ',')
if callable (invals):
vals = invals (*[d.channel.copy() for d in self.depends] if self.depends else channel.fromArgv())
vals = vals.split()
elif isinstance (invals, basestring): # only for files: "/a/b/*.txt, /a/c/*.txt"
vals = utils.split(invals, ',')
elif isinstance (invals, channel):
vals = invals.split()
elif isinstance (invals, list):
vals = channel.create(invals).split()
else:
raise ValueError ("%s: Unexpected values for input. Expect dict, list, str, channel, callable." % self._name())
width = len (vals)
if len (keys) > width:
raise ValueError ('%s: Not enough data for input variables.\nVarialbes: %s\nData: %s' % (self._name(), keys, vals))
for i, key in enumerate(keys):
intype = key.split(':')[-1]
thekey = key.split(':')[0]
val = vals[i].toList() #if isinstance(vals[i], channel) else vals[i]
if intype not in proc.IN_VARTYPE + proc.IN_FILESTYPE + proc.IN_FILETYPE:
intype = proc.IN_VARTYPE[0]
if intype in proc.IN_FILESTYPE:
for x, v in enumerate(val):
if isinstance (v, basestring):
val[x] = channel.fromPath (v).toList()
if self.length == 0:
self.props['length'] = len (val)
if self.length != len (val):
raise ValueError ('%s: Expect same lengths for input channels, but got %s and %s (keys: %s).' % (self._name(), self.length, len (val), key))
self.props['indata'][thekey] = {
'type': intype,
'data': val
}
self.props['jobs'] = [None] * self.length
def _buildProcVars (self):
"""
also add proc.props, mostly scalar values
"""
alias = {val:key for key, val in proc.ALIAS.iteritems()}
for prop in sorted(self.props.keys()):
val = self.props[prop]
if not prop in ['id', 'tag', 'tmpdir', 'forks', 'cache', 'workdir', 'echo', 'runner',
'errorhow', 'errorntry', 'defaultSh', 'exportdir', 'exporthow', 'exportow',
'indir', 'outdir', 'length', 'args']:
continue
if prop == 'args':
self.props['procvars']['proc.args'] = val
for k, v in val.iteritems():
self.props['procvars']['proc.args.' + k] = v
self.log('%s => %s' % (k, v), 'info', 'p.args')
else:
self.props['procvars']['proc.' + prop] = val
if alias.has_key (prop):
self.props['procvars']['proc.' + alias[prop]] = val
self.log ('%s (%s) => %s' % (prop, alias[prop], val), 'info', 'p.props')
else:
self.log ('%s => %s' % (prop, val), 'info', 'p.props')
def _buildJobs (self):
rptjob = randint(0, self.length-1)
for i in range(self.length):
job = pjob (i, self)
self.jobs[i] = job
job.init ()
row = [x['data'] for x in job.output.values()]
self.channel.rbind (row)
self.jobs[rptjob].report()
def _readConfig (self, config):
| """
Read the configuration
@params:
`config`: The configuration
"""
conf = { key:val for key, val in config.iteritems() if key not in self.sets }
self.config.update (conf)
for key, val in conf.iteritems():
self.props[key] = val | identifier_body |
|
proc.py | raise Exception ('Failed to run afterCmd: %s' % self.afterCmd)
self._tidyAfterRun ()
self.log ('Done (time: %s).' % utils.formatTime(time() - timer), 'info')
def _buildProps (self):
"""
Compute some properties
"""
if isinstance (self.retcodes, int):
self.props['retcodes'] = [self.retcodes]
if isinstance (self.retcodes, str):
self.props['retcodes'] = [int(i) for i in self.retcodes.split(',')]
key = self._name(False)
if key in proc.PROCS and proc.PROCS[key] != self:
raise Exception ('A proc with id "%s" and tag "%s" already exists.' % (self.id, self.tag))
proc.PROCS[key] = self
if not 'workdir' in self.sets and not self.workdir:
self.props['workdir'] = os.path.join(self.ppldir, "PyPPL.%s.%s.%s" % (self.id, self.tag, self._suffix()))
if not os.path.exists (self.workdir):
os.makedirs (self.workdir)
def _buildInput (self):
"""
Build the input data
Input could be:
1. list: ['input', 'infile:file'] <=> ['input:var', 'infile:path']
2. str : "input, infile:file" <=> input:var, infile:path
3. dict: {"input": channel1, "infile:file": channel2}
or {"input:var, input:file" : channel3}
for 1,2 channels will be the combined channel from dependents, if there is not dependents, it will be sys.argv[1:]
"""
indata = self.config['input']
if not isinstance (indata, dict):
indata = ','.join(utils.alwaysList (indata))
depdchan = channel.fromChannels (*[d.channel for d in self.depends])
indata = {indata: depdchan if self.depends else channel.fromArgv()}
# expand to one key-channel pairs
for inkeys, invals in indata.iteritems():
keys = utils.split(inkeys, ',')
if callable (invals):
vals = invals (*[d.channel.copy() for d in self.depends] if self.depends else channel.fromArgv())
vals = vals.split()
elif isinstance (invals, basestring): # only for files: "/a/b/*.txt, /a/c/*.txt"
vals = utils.split(invals, ',')
elif isinstance (invals, channel):
vals = invals.split()
elif isinstance (invals, list):
vals = channel.create(invals).split()
else:
raise ValueError ("%s: Unexpected values for input. Expect dict, list, str, channel, callable." % self._name())
width = len (vals)
if len (keys) > width:
raise ValueError ('%s: Not enough data for input variables.\nVarialbes: %s\nData: %s' % (self._name(), keys, vals))
for i, key in enumerate(keys):
intype = key.split(':')[-1]
thekey = key.split(':')[0]
val = vals[i].toList() #if isinstance(vals[i], channel) else vals[i]
if intype not in proc.IN_VARTYPE + proc.IN_FILESTYPE + proc.IN_FILETYPE:
intype = proc.IN_VARTYPE[0]
if intype in proc.IN_FILESTYPE:
for x, v in enumerate(val):
if isinstance (v, basestring):
val[x] = channel.fromPath (v).toList()
if self.length == 0:
self.props['length'] = len (val)
if self.length != len (val):
raise ValueError ('%s: Expect same lengths for input channels, but got %s and %s (keys: %s).' % (self._name(), self.length, len (val), key))
self.props['indata'][thekey] = {
'type': intype,
'data': val
}
self.props['jobs'] = [None] * self.length
def _buildProcVars (self):
"""
also add proc.props, mostly scalar values
"""
alias = {val:key for key, val in proc.ALIAS.iteritems()}
for prop in sorted(self.props.keys()):
val = self.props[prop]
if not prop in ['id', 'tag', 'tmpdir', 'forks', 'cache', 'workdir', 'echo', 'runner',
'errorhow', 'errorntry', 'defaultSh', 'exportdir', 'exporthow', 'exportow',
'indir', 'outdir', 'length', 'args']:
continue
if prop == 'args':
self.props['procvars']['proc.args'] = val
for k, v in val.iteritems():
self.props['procvars']['proc.args.' + k] = v
self.log('%s => %s' % (k, v), 'info', 'p.args')
else:
self.props['procvars']['proc.' + prop] = val
if alias.has_key (prop):
self.props['procvars']['proc.' + alias[prop]] = val
self.log ('%s (%s) => %s' % (prop, alias[prop], val), 'info', 'p.props')
else:
self.log ('%s => %s' % (prop, val), 'info', 'p.props')
def _buildJobs (self):
rptjob = randint(0, self.length-1)
for i in range(self.length):
job = pjob (i, self)
self.jobs[i] = job
job.init ()
row = [x['data'] for x in job.output.values()]
self.channel.rbind (row)
self.jobs[rptjob].report()
def _readConfig (self, config):
"""
Read the configuration
@params:
`config`: The configuration
"""
conf = { key:val for key, val in config.iteritems() if key not in self.sets }
self.config.update (conf)
for key, val in conf.iteritems():
self.props[key] = val
def _isCached (self):
"""
Tell whether the jobs are cached
@returns:
True if all jobs are cached, otherwise False
"""
self.props['ncjobids'] = range(self.length)
if self.cache == False:
self.log ('Not cached, because proc.cache is False', 'debug')
return False
if self.cache == True:
for depend in self.depends:
if depend.cached: continue
self.log ('Not cached, my dependent "%s" is not cached.' % depend._name(), 'debug')
return False
trulyCachedJids = []
exptCachedJids = []
self.props['ncjobids'] = []
for i, job in enumerate(self.jobs):
job = self.jobs[i]
if job.isTrulyCached ():
trulyCachedJids.append(i)
elif job.isExptCached ():
exptCachedJids.append (i)
else:
self.props['ncjobids'].append (i)
self.log ('Truely cached jobs: %s' % (trulyCachedJids if len(trulyCachedJids) < self.length else 'ALL'), 'debug')
self.log ('Export cached jobs: %s' % (exptCachedJids if len(exptCachedJids) < self.length else 'ALL'), 'debug')
if self.ncjobids:
if len(self.ncjobids) < self.length:
self.log ('Partly cached, only run non-cached %s job(s).' % len(self.ncjobids), 'info')
self.log ('Jobs to be running: %s' % self.ncjobids, 'debug')
else:
self.log ('Not cached, none of the jobs are cached.', 'info')
return False
else:
self.log (self.workdir, 'info', 'CACHED')
return True
def _runCmd (self, key):
"""
Run the `beforeCmd` or `afterCmd`
@params:
`key`: "beforeCmd" or "afterCmd"
@returns:
The return code of the command
"""
if not self.props[key]:
return 0
cmd = utils.format(self.props[key], self.procvars)
self.log ('Running <%s>: %s' % (key, cmd), 'info')
p = Popen (cmd, shell=True, stdin=PIPE, stderr=PIPE, stdout=PIPE)
if self.echo:
for line in iter(p.stdout.readline, ''):
self.logger.info ('[ STDOUT] ' + line.rstrip("\n"))
for line in iter(p.stderr.readline, ''):
self.logger.error ('[ STDERR] ' + line.rstrip("\n"))
return p.wait()
def | _runJobs | identifier_name |
|
proc.py | ValueError('Property "%s" of proc is not found' % name)
if proc.ALIAS.has_key(name):
name = proc.ALIAS[name]
return self.props[name]
def __setattr__ (self, name, value):
if not self.config.has_key(name) and not proc.ALIAS.has_key(name) and not name.endswith ('Runner'):
raise ValueError('Cannot set property "%s" for proc instance' % name)
if proc.ALIAS.has_key(name):
name = proc.ALIAS[name]
self.sets.append(name)
self.config[name] = value
if name == 'depends':
# remove me from nexts of my previous depends
for depend in self.depends:
if not self in depend.nexts:
continue
del depend.props['nexts'][depend.nexts.index(self)]
self.props['depends'] = []
depends = value
if not isinstance (value, list):
depends = [value]
for depend in depends:
if isinstance (depend, proc):
self.props['depends'].append (depend)
if self not in depend.nexts:
depend.nexts.append (self)
elif isinstance (depend, aggr):
for p in depend.ends:
self.props['depends'].append (p)
if self not in p.nexts:
p.nexts.append (self)
else:
self.props[name] = value
def log (self, msg, level="info", flag=None, key = ''):
"""
The log function with aggregation name, process id and tag integrated.
@params:
`msg`: The message to log
`level`: The log level
`flag`: The flag
`key`: The type of messages
"""
if flag is None:
flag = level
flag = flag.upper().rjust(7)
flag = "[%s]" % flag
title = self._name()
func = getattr(self.logger, level)
maxline = proc.LOG_NLINE[key]
prevlog = self.lognline['prevlog']
if key == prevlog:
if self.lognline[key] < abs(maxline):
func ("%s %s: %s" % (flag, title, msg))
else:
n_omit = self.lognline[prevlog] - abs(proc.LOG_NLINE[prevlog])
if n_omit > 0 and proc.LOG_NLINE[prevlog] < 0:
logname = 'logs' if n_omit > 1 else 'log'
maxinfo = ' (%s, max=%s)' % (prevlog, abs(proc.LOG_NLINE[prevlog])) if prevlog else ''
self.logger.debug ("[ DEBUG] %s: ... and %s %s omitted%s." % (title, n_omit, logname, maxinfo))
self.lognline[prevlog] = 0
if self.lognline[key] < abs(maxline):
func ("%s %s: %s" % (flag, title, msg))
self.lognline['prevlog'] = key
self.lognline[key] += 1
def copy (self, tag=None, newid=None):
"""
Copy a process
@params:
`newid`: The new id of the process, default: `None` (use the varname)
`tag`: The tag of the new process, default: `None` (used the old one)
@returns:
The new process
"""
newproc = proc (tag if tag is not None else self.tag)
config = {key:val for key, val in self.config.iteritems() if key not in ['tag', 'workdir', 'aggr']}
config['tag'] = newproc.tag
config['aggr'] = ''
config['workdir'] = ''
props = {key:val for key, val in self.props.iteritems() if key not in ['cached', 'procvars', 'ncjobids', 'sets', 'channel', 'jobs', 'depends', 'nexts', 'tag', 'workdir', 'id', 'args']}
props['cached'] = True
props['procvars'] = {}
props['channel'] = channel.create()
props['depends'] = []
props['nexts'] = []
props['jobs'] = []
props['ncjobids'] = []
props['sets'] = []
props['workdir'] = ''
props['args'] = pycopy.copy(self.props['args'])
props['id'] = utils.varname(r'\w+\.' + self.copy.__name__, 3) if newid is None else newid
newproc.__dict__['config'].update(config)
newproc.__dict__['props'].update(props)
return newproc
def _suffix (self):
"""
Calcuate a uid for the process according to the configuration
@returns:
The uid
"""
if self.suffix:
return self.suffix
config = { key:val for key, val in self.config.iteritems() if key not in ['workdir', 'forks', 'cache', 'retcodes', 'echo', 'runner', 'exportdir', 'exporthow', 'exportow', 'errorhow', 'errorntry'] or key.endswith ('Runner') }
config['id'] = self.id
config['tag'] = self.tag
if config.has_key ('callback'):
config['callback'] = utils.funcsig(config['callback'])
# proc is not picklable
if config.has_key('depends'):
depends = config['depends']
pickable_depends = []
if isinstance(depends, proc):
depends = [depends]
elif isinstance(depends, aggr):
depends = depends.procs
for depend in depends:
pickable_depends.append(depend.id + '.' + depend.tag)
config['depends'] = pickable_depends
# lambda not pickable
if config.has_key ('input') and isinstance(config['input'], dict):
config['input'] = pycopy.copy(config['input'])
for key, val in config['input'].iteritems():
config['input'][key] = utils.funcsig(val) if callable(val) else val
signature = pickle.dumps(str(config))
self.props['suffix'] = utils.uid(signature)
return self.suffix
def _tidyBeforeRun (self):
"""
Do some preparation before running jobs
"""
self._buildProps ()
self._buildInput ()
self._buildProcVars ()
self._buildJobs ()
def _tidyAfterRun (self):
"""
Do some cleaning after running jobs
"""
failedjobs = []
for i in self.ncjobids:
job = self.jobs[i]
if not job.succeed():
failedjobs.append (job)
if not failedjobs:
self.log ('Successful jobs: ALL', 'debug')
if callable (self.callback):
self.log('Calling callback ...', 'debug')
self.callback (self)
else:
failedjobs[0].showError (len(failedjobs))
if self.errorhow != 'ignore':
sys.exit (1) # don't go further
def _name (self, incAggr = True):
"""
Get my name include `aggr`, `id`, `tag`
@returns:
the name
"""
aggrName = "@%s" % self.aggr if self.aggr and incAggr else ""
tag = ".%s" % self.tag if self.tag != "notag" else ""
return "%s%s%s" % (self.id, tag, aggrName)
def run (self, config = None):
"""
Run the jobs with a configuration
@params:
`config`: The configuration
"""
timer = time()
if config is None:
config = {}
self.logger.info ('[ START] ' + utils.padBoth(' ' + self._name() + ' ', 80, '-'))
# log the dependencies
self.log ("%s => %s => %s" % ([p._name() for p in self.depends] if self.depends else "START", self._name(), [p._name() for p in self.nexts] if self.nexts else "END"), "info", "depends")
self._readConfig (config)
self._tidyBeforeRun ()
if self._runCmd('beforeCmd') != 0:
raise Exception ('Failed to run beforeCmd: %s' % self.beforeCmd)
if not self._isCached():
# I am not cached, touch the input of my nexts?
# but my nexts are not initized, how? | # set cached to False, then my nexts will access it
self.props['cached'] = False
self.log (self.workdir, 'info', 'RUNNING')
self._runJobs()
if self._runCmd('afterCmd') != 0: | random_line_split |
|
proc.py | '):
depends = config['depends']
pickable_depends = []
if isinstance(depends, proc):
depends = [depends]
elif isinstance(depends, aggr):
depends = depends.procs
for depend in depends:
pickable_depends.append(depend.id + '.' + depend.tag)
config['depends'] = pickable_depends
# lambda not pickable
if config.has_key ('input') and isinstance(config['input'], dict):
config['input'] = pycopy.copy(config['input'])
for key, val in config['input'].iteritems():
config['input'][key] = utils.funcsig(val) if callable(val) else val
signature = pickle.dumps(str(config))
self.props['suffix'] = utils.uid(signature)
return self.suffix
def _tidyBeforeRun (self):
"""
Do some preparation before running jobs
"""
self._buildProps ()
self._buildInput ()
self._buildProcVars ()
self._buildJobs ()
def _tidyAfterRun (self):
"""
Do some cleaning after running jobs
"""
failedjobs = []
for i in self.ncjobids:
job = self.jobs[i]
if not job.succeed():
failedjobs.append (job)
if not failedjobs:
self.log ('Successful jobs: ALL', 'debug')
if callable (self.callback):
self.log('Calling callback ...', 'debug')
self.callback (self)
else:
failedjobs[0].showError (len(failedjobs))
if self.errorhow != 'ignore':
sys.exit (1) # don't go further
def _name (self, incAggr = True):
"""
Get my name include `aggr`, `id`, `tag`
@returns:
the name
"""
aggrName = "@%s" % self.aggr if self.aggr and incAggr else ""
tag = ".%s" % self.tag if self.tag != "notag" else ""
return "%s%s%s" % (self.id, tag, aggrName)
def run (self, config = None):
"""
Run the jobs with a configuration
@params:
`config`: The configuration
"""
timer = time()
if config is None:
config = {}
self.logger.info ('[ START] ' + utils.padBoth(' ' + self._name() + ' ', 80, '-'))
# log the dependencies
self.log ("%s => %s => %s" % ([p._name() for p in self.depends] if self.depends else "START", self._name(), [p._name() for p in self.nexts] if self.nexts else "END"), "info", "depends")
self._readConfig (config)
self._tidyBeforeRun ()
if self._runCmd('beforeCmd') != 0:
raise Exception ('Failed to run beforeCmd: %s' % self.beforeCmd)
if not self._isCached():
# I am not cached, touch the input of my nexts?
# but my nexts are not initized, how?
# set cached to False, then my nexts will access it
self.props['cached'] = False
self.log (self.workdir, 'info', 'RUNNING')
self._runJobs()
if self._runCmd('afterCmd') != 0:
raise Exception ('Failed to run afterCmd: %s' % self.afterCmd)
self._tidyAfterRun ()
self.log ('Done (time: %s).' % utils.formatTime(time() - timer), 'info')
def _buildProps (self):
"""
Compute some properties
"""
if isinstance (self.retcodes, int):
self.props['retcodes'] = [self.retcodes]
if isinstance (self.retcodes, str):
self.props['retcodes'] = [int(i) for i in self.retcodes.split(',')]
key = self._name(False)
if key in proc.PROCS and proc.PROCS[key] != self:
raise Exception ('A proc with id "%s" and tag "%s" already exists.' % (self.id, self.tag))
proc.PROCS[key] = self
if not 'workdir' in self.sets and not self.workdir:
self.props['workdir'] = os.path.join(self.ppldir, "PyPPL.%s.%s.%s" % (self.id, self.tag, self._suffix()))
if not os.path.exists (self.workdir):
os.makedirs (self.workdir)
def _buildInput (self):
"""
Build the input data
Input could be:
1. list: ['input', 'infile:file'] <=> ['input:var', 'infile:path']
2. str : "input, infile:file" <=> input:var, infile:path
3. dict: {"input": channel1, "infile:file": channel2}
or {"input:var, input:file" : channel3}
for 1,2 channels will be the combined channel from dependents, if there is not dependents, it will be sys.argv[1:]
"""
indata = self.config['input']
if not isinstance (indata, dict):
indata = ','.join(utils.alwaysList (indata))
depdchan = channel.fromChannels (*[d.channel for d in self.depends])
indata = {indata: depdchan if self.depends else channel.fromArgv()}
# expand to one key-channel pairs
for inkeys, invals in indata.iteritems():
keys = utils.split(inkeys, ',')
if callable (invals):
vals = invals (*[d.channel.copy() for d in self.depends] if self.depends else channel.fromArgv())
vals = vals.split()
elif isinstance (invals, basestring): # only for files: "/a/b/*.txt, /a/c/*.txt"
vals = utils.split(invals, ',')
elif isinstance (invals, channel):
vals = invals.split()
elif isinstance (invals, list):
vals = channel.create(invals).split()
else:
raise ValueError ("%s: Unexpected values for input. Expect dict, list, str, channel, callable." % self._name())
width = len (vals)
if len (keys) > width:
raise ValueError ('%s: Not enough data for input variables.\nVarialbes: %s\nData: %s' % (self._name(), keys, vals))
for i, key in enumerate(keys):
intype = key.split(':')[-1]
thekey = key.split(':')[0]
val = vals[i].toList() #if isinstance(vals[i], channel) else vals[i]
if intype not in proc.IN_VARTYPE + proc.IN_FILESTYPE + proc.IN_FILETYPE:
intype = proc.IN_VARTYPE[0]
if intype in proc.IN_FILESTYPE:
for x, v in enumerate(val):
if isinstance (v, basestring):
val[x] = channel.fromPath (v).toList()
if self.length == 0:
self.props['length'] = len (val)
if self.length != len (val):
raise ValueError ('%s: Expect same lengths for input channels, but got %s and %s (keys: %s).' % (self._name(), self.length, len (val), key))
self.props['indata'][thekey] = {
'type': intype,
'data': val
}
self.props['jobs'] = [None] * self.length
def _buildProcVars (self):
"""
also add proc.props, mostly scalar values
"""
alias = {val:key for key, val in proc.ALIAS.iteritems()}
for prop in sorted(self.props.keys()):
| val = self.props[prop]
if not prop in ['id', 'tag', 'tmpdir', 'forks', 'cache', 'workdir', 'echo', 'runner',
'errorhow', 'errorntry', 'defaultSh', 'exportdir', 'exporthow', 'exportow',
'indir', 'outdir', 'length', 'args']:
continue
if prop == 'args':
self.props['procvars']['proc.args'] = val
for k, v in val.iteritems():
self.props['procvars']['proc.args.' + k] = v
self.log('%s => %s' % (k, v), 'info', 'p.args')
else:
self.props['procvars']['proc.' + prop] = val
if alias.has_key (prop):
self.props['procvars']['proc.' + alias[prop]] = val
self.log ('%s (%s) => %s' % (prop, alias[prop], val), 'info', 'p.props')
else:
self.log ('%s => %s' % (prop, val), 'info', 'p.props') | conditional_block |
|
madlibs.py | 1) * 10)
msg.reply("======= Starting Round {0}/{1} =======".format(
int(state['round']), state['options']['numrounds']
))
log.info("======= Starting Round {0}/{1} =======".format(
int(state['round']), state['options']['numrounds']
))
if state['options']['hidesentence']:
poslist = []
for idx in state['textshape']:
poslist.append(state['doc'][idx].pos_)
text = "Hidden sentence! Give me: "
text += ", ".join(poslist)
else:
text = state['text']
msg.reply(text)
log.info(text)
msg.reply("Entries should be of the form " + underline +
"word word ..." + underline)
msg.reply("--> Send your entries to me VIA MESSAGE, you have " +\
"{} seconds".format(entrytime)
)
t = threading.Timer(
entrytime,
voteround,
args=(msg, state)
)
t.start()
state['threads'][t.ident] = t
t2 = threading.Timer(
entrytime - state['options']['warntime'],
warntime,
args=(msg, state)
)
t2.start()
state['threads'][t2.ident] = t2
if not state['options']['botplays']:
return
t3 = threading.Thread(
target=botentry,
args=(msg, state)
)
t3.start()
state['threads'][t3.ident] = t3
def processentry(msg, state):
"Process a submitted Mad Lib word list entry."
try:
if msg.text.strip().lower() == "!skip":
state['skippers'].add(msg.nick)
if len(state['skippers']) == 3:
msg.reply("OK, you don't like that one! " +\
bold + "Restarting round.")
killgame(state, reset=False)
round -= 0.5
startround(msg, state)
if msg.sender[0] == '#':
# ignore public statements other than !skip
return
entry = msg.text.strip()
words = [x.strip() for x in entry.split()]
# search for stopwords
stopwords = [x for x in words \
if x.lower() in state['options']['stopwords']]
if stopwords:
msg.reply("Entry " + bold + "rejected" + bold +\
", stopword(s) found: " + ", ".join(stopwords)
)
return
if len(words) == len(state['textshape']):
resp = "Entry accepted."
# remove any previous entry
for ent in state['entries']:
if ent[0] == msg.nick:
state['entries'].remove(ent)
resp = "Entry changed."
break
state['entries'].append((msg.nick, words, 0))
log.info("{0} entry: {1}".format(msg.nick, ", ".join(words)))
state['votes'][msg.nick] = -1
msg.reply(resp)
else:
msg.reply("Entry " + bold + "rejected" + bold +\
", expected {1} words and got {0}".format(
len(words), len(state['textshape'])
))
except Exception as e:
msg.reply("Entry " + bold + "rejected" + bold + \
", unexpected error")
log.error(str(e))
@gamethread
def botentry(msg, state):
"""Generate a response based on the original text.
Warning, may take 30-60s to complete. Do not set entrytime
very low!"""
if 'words' not in state:
# expensive initialization, do ALAP
log.info("Loading word corpus...")
state['words'] = [w for w in nlp.nlp.vocab if w.has_vector]
#cosine = lambda v1, v2: dot(v1, v2) / (norm(v1) * norm(v2))
entry = []
for t in state['textshape']:
log.debug("Searching for replacement for {0} ({1})".format(
state['doc'][t], state['doc'][t].pos_
))
try:
state['words'].sort(key=lambda w:
w.similarity(state['doc'][t]),
reverse=True
)
#cosine(w.vector, state['doc'][t].vector)
state['words'].reverse
except TypeError:
# perhaps our word lacks a vector?
pass
if state['options']['matchpos']:
sent = [x.string for x in list(state['doc'])]
pos = state['doc'][t].pos_
for ctr in range(10):
# TODO: Parametrize the bounds on random here
newword = state['words'][random.randint(50, 500)]
log.debug("Trying " + newword.orth_.lower())
sent[t] = newword.orth_.lower() + " "
newsent = nlp.nlp("".join(sent))
if newsent[t].pos_ == pos:
break
entry.append(newword.orth_.lower())
log.debug("Word found: {0} ({1})".format(
entry[-1], newsent[t].pos_
))
else:
entry.append(
state['words'][random.randint(50, 500)].orth_.lower()
)
log.debug("Word found: " + entry[-1])
log.info("Bot enters: " + ", ".join(entry))
state['entries'].append((config.nick, entry, 0))
# no entry in state['votes']
@gamethread
def voteround(msg, state):
"Start the voting portion of a Mad Libs round."
state['round'] += 0.5
if len(state['entries']) == 0 \
or (state['options']['botplays'] and \
len(state['entries']) == 1):
msg.reply(bold + "ACHTUNG! No entries received! Ending game.")
killgame(state)
return
# give 10s more vote time for >3 entries
votetime = int(state['options']['votetime'] + \
(len(state['entries']) - 3) * 10)
random.shuffle(state['entries'])
msg.reply("======= Entries Received =======")
for num, ent in enumerate(state['entries'], start=1):
doc = [x.string for x in list(state['doc'])]
# substitute words keeping original trailing whitespace
for idx, word in enumerate(ent[1]):
wordidx = state['textshape'][idx]
doc[wordidx] = bold + word + bold + \
state['doc'][wordidx].whitespace_
text = "".join(doc)
msg.reply("Entry {0}: {1}".format(num, text))
msg.reply("======= Voting Time!!!!! =======")
msg.reply("Send your vote (number) to me VIA MESSAGE, you have " +
"{} seconds".format(votetime)
)
t = threading.Timer(
votetime,
endround,
args=(msg, state)
)
t.start()
state['threads'][t.ident] = t
t2 = threading.Timer(
votetime - state['options']['warntime'],
warntime,
args=(msg, state)
)
t2.start()
state['threads'][t2.ident] = t2
def processvote(msg, state):
"Process a vote for a Mad Libs entry."
try:
if msg.sender[0] == '#':
# ignore public statements
return
# Entries are numbered from 1, list is numbered from 0
voted = int(msg.text) - 1
if voted >= len(state['entries']) or voted < 0:
raise ValueError
if msg.sender == state['entries'][voted][0]:
msg.reply("You cannot vote for yourself!")
return
if state['votes'][msg.sender] == -1:
msg.reply("Vote accepted.")
else:
msg.reply("Vote changed.")
state['votes'][msg.sender] = voted
log.info("{0} voting for {1}".format(msg.sender,
state['entries'][voted][0]))
except Exception as e:
msg.reply("Vote " + bold + "rejected" + bold + \
", unexpected error"
)
log.error(str(e))
@gamethread
def endround(msg, state):
| "End a round of Mad Libs."
state['round'] += 0.25
state['doc'] = None
state['text'] = ""
state['textshape'] = []
shame = []
for nick, vote in state['votes'].items():
if vote == -1:
shame.append(nick)
else:
ent = state['entries'][vote]
state['entries'][vote] = ( ent[0], ent[1], ent[2]+1 )
msg.reply("======= Voting Results =======")
log.info("======= Voting Results =======")
for num, ent in enumerate(state['entries']):
msg.reply("Entry {0}: {1}: {2} => {3}".format(
num+1, ent[0], ", ".join(ent[1]), ent[2] | identifier_body |
|
madlibs.py | t2
if not state['options']['botplays']:
return
t3 = threading.Thread(
target=botentry,
args=(msg, state)
)
t3.start()
state['threads'][t3.ident] = t3
def processentry(msg, state):
"Process a submitted Mad Lib word list entry."
try:
if msg.text.strip().lower() == "!skip":
state['skippers'].add(msg.nick)
if len(state['skippers']) == 3:
msg.reply("OK, you don't like that one! " +\
bold + "Restarting round.")
killgame(state, reset=False)
round -= 0.5
startround(msg, state)
if msg.sender[0] == '#':
# ignore public statements other than !skip
return
entry = msg.text.strip()
words = [x.strip() for x in entry.split()]
# search for stopwords
stopwords = [x for x in words \
if x.lower() in state['options']['stopwords']]
if stopwords:
msg.reply("Entry " + bold + "rejected" + bold +\
", stopword(s) found: " + ", ".join(stopwords)
)
return
if len(words) == len(state['textshape']):
resp = "Entry accepted."
# remove any previous entry
for ent in state['entries']:
if ent[0] == msg.nick:
state['entries'].remove(ent)
resp = "Entry changed."
break
state['entries'].append((msg.nick, words, 0))
log.info("{0} entry: {1}".format(msg.nick, ", ".join(words)))
state['votes'][msg.nick] = -1
msg.reply(resp)
else:
msg.reply("Entry " + bold + "rejected" + bold +\
", expected {1} words and got {0}".format(
len(words), len(state['textshape'])
))
except Exception as e:
msg.reply("Entry " + bold + "rejected" + bold + \
", unexpected error")
log.error(str(e))
@gamethread
def botentry(msg, state):
"""Generate a response based on the original text.
Warning, may take 30-60s to complete. Do not set entrytime
very low!"""
if 'words' not in state:
# expensive initialization, do ALAP
log.info("Loading word corpus...")
state['words'] = [w for w in nlp.nlp.vocab if w.has_vector]
#cosine = lambda v1, v2: dot(v1, v2) / (norm(v1) * norm(v2))
entry = []
for t in state['textshape']:
log.debug("Searching for replacement for {0} ({1})".format(
state['doc'][t], state['doc'][t].pos_
))
try:
state['words'].sort(key=lambda w:
w.similarity(state['doc'][t]),
reverse=True
)
#cosine(w.vector, state['doc'][t].vector)
state['words'].reverse
except TypeError:
# perhaps our word lacks a vector?
pass
if state['options']['matchpos']:
sent = [x.string for x in list(state['doc'])]
pos = state['doc'][t].pos_
for ctr in range(10):
# TODO: Parametrize the bounds on random here
newword = state['words'][random.randint(50, 500)]
log.debug("Trying " + newword.orth_.lower())
sent[t] = newword.orth_.lower() + " "
newsent = nlp.nlp("".join(sent))
if newsent[t].pos_ == pos:
break
entry.append(newword.orth_.lower())
log.debug("Word found: {0} ({1})".format(
entry[-1], newsent[t].pos_
))
else:
entry.append(
state['words'][random.randint(50, 500)].orth_.lower()
)
log.debug("Word found: " + entry[-1])
log.info("Bot enters: " + ", ".join(entry))
state['entries'].append((config.nick, entry, 0))
# no entry in state['votes']
@gamethread
def voteround(msg, state):
"Start the voting portion of a Mad Libs round."
state['round'] += 0.5
if len(state['entries']) == 0 \
or (state['options']['botplays'] and \
len(state['entries']) == 1):
msg.reply(bold + "ACHTUNG! No entries received! Ending game.")
killgame(state)
return
# give 10s more vote time for >3 entries
votetime = int(state['options']['votetime'] + \
(len(state['entries']) - 3) * 10)
random.shuffle(state['entries'])
msg.reply("======= Entries Received =======")
for num, ent in enumerate(state['entries'], start=1):
doc = [x.string for x in list(state['doc'])]
# substitute words keeping original trailing whitespace
for idx, word in enumerate(ent[1]):
wordidx = state['textshape'][idx]
doc[wordidx] = bold + word + bold + \
state['doc'][wordidx].whitespace_
text = "".join(doc)
msg.reply("Entry {0}: {1}".format(num, text))
msg.reply("======= Voting Time!!!!! =======")
msg.reply("Send your vote (number) to me VIA MESSAGE, you have " +
"{} seconds".format(votetime)
)
t = threading.Timer(
votetime,
endround,
args=(msg, state)
)
t.start()
state['threads'][t.ident] = t
t2 = threading.Timer(
votetime - state['options']['warntime'],
warntime,
args=(msg, state)
)
t2.start()
state['threads'][t2.ident] = t2
def processvote(msg, state):
"Process a vote for a Mad Libs entry."
try:
if msg.sender[0] == '#':
# ignore public statements
return
# Entries are numbered from 1, list is numbered from 0
voted = int(msg.text) - 1
if voted >= len(state['entries']) or voted < 0:
raise ValueError
if msg.sender == state['entries'][voted][0]:
msg.reply("You cannot vote for yourself!")
return
if state['votes'][msg.sender] == -1:
msg.reply("Vote accepted.")
else:
msg.reply("Vote changed.")
state['votes'][msg.sender] = voted
log.info("{0} voting for {1}".format(msg.sender,
state['entries'][voted][0]))
except Exception as e:
msg.reply("Vote " + bold + "rejected" + bold + \
", unexpected error"
)
log.error(str(e))
@gamethread
def endround(msg, state):
"End a round of Mad Libs."
state['round'] += 0.25
state['doc'] = None
state['text'] = ""
state['textshape'] = []
shame = []
for nick, vote in state['votes'].items():
if vote == -1:
shame.append(nick)
else:
ent = state['entries'][vote]
state['entries'][vote] = ( ent[0], ent[1], ent[2]+1 )
msg.reply("======= Voting Results =======")
log.info("======= Voting Results =======")
for num, ent in enumerate(state['entries']):
msg.reply("Entry {0}: {1}: {2} => {3}".format(
num+1, ent[0], ", ".join(ent[1]), ent[2]
))
log.info("Entry {0}: {1}: {2} => {3}".format(
num+1, ent[0], ", ".join(ent[1]), ent[2]
))
state['scores'][ent[0]] += ent[2]
if state['options']['shame'] and shame:
msg.reply("These users did not vote: " +
", ".join(shame)
)
log.debug("Scores so far: " + str(state['scores']))
if state['round'] > state['options']['numrounds']:
endgame(msg, state)
else:
msg.reply("Round {0}/{1} starts in {2} seconds.".format(
int(ceil(state['round'])),
state['options']['numrounds'],
state['options']['intertime']
))
t = threading.Timer(
state['options']['intertime'],
startround,
args=(msg, state)
)
t.start()
state['threads'][t.ident] = t
def endgame(msg, state):
"End a game of Mad Libs."
slist = sorted(iter(state['scores'].items()),
key=lambda k: k[1],
reverse=True
)
| winners = [slist[0]]
for player in slist[1:]: | random_line_split |
|
madlibs.py | pus open failed: " + str(e))
killgame(state)
# give 10s more time for each add'l 80-char line
entrytime = int(state['options']['entrytime'] + \
(floor(len(state['text']) / 80) - 1) * 10)
msg.reply("======= Starting Round {0}/{1} =======".format(
int(state['round']), state['options']['numrounds']
))
log.info("======= Starting Round {0}/{1} =======".format(
int(state['round']), state['options']['numrounds']
))
if state['options']['hidesentence']:
poslist = []
for idx in state['textshape']:
poslist.append(state['doc'][idx].pos_)
text = "Hidden sentence! Give me: "
text += ", ".join(poslist)
else:
text = state['text']
msg.reply(text)
log.info(text)
msg.reply("Entries should be of the form " + underline +
"word word ..." + underline)
msg.reply("--> Send your entries to me VIA MESSAGE, you have " +\
"{} seconds".format(entrytime)
)
t = threading.Timer(
entrytime,
voteround,
args=(msg, state)
)
t.start()
state['threads'][t.ident] = t
t2 = threading.Timer(
entrytime - state['options']['warntime'],
warntime,
args=(msg, state)
)
t2.start()
state['threads'][t2.ident] = t2
if not state['options']['botplays']:
return
t3 = threading.Thread(
target=botentry,
args=(msg, state)
)
t3.start()
state['threads'][t3.ident] = t3
def processentry(msg, state):
"Process a submitted Mad Lib word list entry."
try:
if msg.text.strip().lower() == "!skip":
state['skippers'].add(msg.nick)
if len(state['skippers']) == 3:
msg.reply("OK, you don't like that one! " +\
bold + "Restarting round.")
killgame(state, reset=False)
round -= 0.5
startround(msg, state)
if msg.sender[0] == '#':
# ignore public statements other than !skip
return
entry = msg.text.strip()
words = [x.strip() for x in entry.split()]
# search for stopwords
stopwords = [x for x in words \
if x.lower() in state['options']['stopwords']]
if stopwords:
msg.reply("Entry " + bold + "rejected" + bold +\
", stopword(s) found: " + ", ".join(stopwords)
)
return
if len(words) == len(state['textshape']):
resp = "Entry accepted."
# remove any previous entry
for ent in state['entries']:
if ent[0] == msg.nick:
state['entries'].remove(ent)
resp = "Entry changed."
break
state['entries'].append((msg.nick, words, 0))
log.info("{0} entry: {1}".format(msg.nick, ", ".join(words)))
state['votes'][msg.nick] = -1
msg.reply(resp)
else:
msg.reply("Entry " + bold + "rejected" + bold +\
", expected {1} words and got {0}".format(
len(words), len(state['textshape'])
))
except Exception as e:
msg.reply("Entry " + bold + "rejected" + bold + \
", unexpected error")
log.error(str(e))
@gamethread
def | (msg, state):
"""Generate a response based on the original text.
Warning, may take 30-60s to complete. Do not set entrytime
very low!"""
if 'words' not in state:
# expensive initialization, do ALAP
log.info("Loading word corpus...")
state['words'] = [w for w in nlp.nlp.vocab if w.has_vector]
#cosine = lambda v1, v2: dot(v1, v2) / (norm(v1) * norm(v2))
entry = []
for t in state['textshape']:
log.debug("Searching for replacement for {0} ({1})".format(
state['doc'][t], state['doc'][t].pos_
))
try:
state['words'].sort(key=lambda w:
w.similarity(state['doc'][t]),
reverse=True
)
#cosine(w.vector, state['doc'][t].vector)
state['words'].reverse
except TypeError:
# perhaps our word lacks a vector?
pass
if state['options']['matchpos']:
sent = [x.string for x in list(state['doc'])]
pos = state['doc'][t].pos_
for ctr in range(10):
# TODO: Parametrize the bounds on random here
newword = state['words'][random.randint(50, 500)]
log.debug("Trying " + newword.orth_.lower())
sent[t] = newword.orth_.lower() + " "
newsent = nlp.nlp("".join(sent))
if newsent[t].pos_ == pos:
break
entry.append(newword.orth_.lower())
log.debug("Word found: {0} ({1})".format(
entry[-1], newsent[t].pos_
))
else:
entry.append(
state['words'][random.randint(50, 500)].orth_.lower()
)
log.debug("Word found: " + entry[-1])
log.info("Bot enters: " + ", ".join(entry))
state['entries'].append((config.nick, entry, 0))
# no entry in state['votes']
@gamethread
def voteround(msg, state):
"Start the voting portion of a Mad Libs round."
state['round'] += 0.5
if len(state['entries']) == 0 \
or (state['options']['botplays'] and \
len(state['entries']) == 1):
msg.reply(bold + "ACHTUNG! No entries received! Ending game.")
killgame(state)
return
# give 10s more vote time for >3 entries
votetime = int(state['options']['votetime'] + \
(len(state['entries']) - 3) * 10)
random.shuffle(state['entries'])
msg.reply("======= Entries Received =======")
for num, ent in enumerate(state['entries'], start=1):
doc = [x.string for x in list(state['doc'])]
# substitute words keeping original trailing whitespace
for idx, word in enumerate(ent[1]):
wordidx = state['textshape'][idx]
doc[wordidx] = bold + word + bold + \
state['doc'][wordidx].whitespace_
text = "".join(doc)
msg.reply("Entry {0}: {1}".format(num, text))
msg.reply("======= Voting Time!!!!! =======")
msg.reply("Send your vote (number) to me VIA MESSAGE, you have " +
"{} seconds".format(votetime)
)
t = threading.Timer(
votetime,
endround,
args=(msg, state)
)
t.start()
state['threads'][t.ident] = t
t2 = threading.Timer(
votetime - state['options']['warntime'],
warntime,
args=(msg, state)
)
t2.start()
state['threads'][t2.ident] = t2
def processvote(msg, state):
"Process a vote for a Mad Libs entry."
try:
if msg.sender[0] == '#':
# ignore public statements
return
# Entries are numbered from 1, list is numbered from 0
voted = int(msg.text) - 1
if voted >= len(state['entries']) or voted < 0:
raise ValueError
if msg.sender == state['entries'][voted][0]:
msg.reply("You cannot vote for yourself!")
return
if state['votes'][msg.sender] == -1:
msg.reply("Vote accepted.")
else:
msg.reply("Vote changed.")
state['votes'][msg.sender] = voted
log.info("{0} voting for {1}".format(msg.sender,
state['entries'][voted][0]))
except Exception as e:
msg.reply("Vote " + bold + "rejected" + bold + \
", unexpected error"
)
log.error(str(e))
@gamethread
def endround(msg, state):
"End a round of Mad Libs."
state['round'] += 0.25
state['doc'] = None
state['text'] = ""
state['textshape'] = []
shame = []
for nick, vote in state['votes'].items():
if vote == -1:
shame.append(nick)
else:
ent = state['entries'][vote]
state['entries'][vote] = ( ent[0], ent[1], ent[2]+1 )
msg.reply("======= Voting Results =======")
| botentry | identifier_name |
madlibs.py |
else:
name = state['options']['corpus']
if state['options']['corporaset'] == "None":
set = None
else:
set = state['options']['corporaset']
# will raise IOError if corpus invalid
if name:
state['corpus'] = nlp.corpus(set=set, name=name)
else:
state['corpus'] = nlp.random_corpus(set=set)
try:
line = nlp.random_line(state['corpus'])
except UnicodeDecodeError:
state['corpus'] == None
doc = nlp.nlp(line)
# truncate line if too long
maxlen = state['options']['linemaxlen']
if len(line) > maxlen:
line = ""
for span in doc.sents:
sent = ''.join(doc[i].string for i in range(
span.start, span.end
)).strip()
if len(line) + len(sent) > maxlen:
break
line += sent + " "
doc = nlp.nlp(line)
ddict = defaultdict(list)
for (index, token) in enumerate(doc):
if token.pos_ in ['ADJ', 'ADV', 'NOUN', 'VERB']:
ddict[token].append(index)
slist = sorted(ddict, key=lambda t: t.prob)
# build list of tokens+whitespace from parsed output
words = map(lambda x: x.string, list(doc))
# 2 subs + 1 more per word wrap line
limit = min(len(line) / 80 + 2, 6)
slots = []
for t in slist[:limit]:
for ctr in ddict[t]:
words[ctr] = underline + u" " + t.pos_ + " " +\
underline + t.whitespace_
slots.append(ctr)
slots.sort()
state['doc'] = doc
state['text'] = "".join(words)
state['textshape'] = slots
@gamethread
def warntime(msg, state):
msg.reply(bold + "*** {} second warning! ***".format(
state['options']['warntime']) + bold
)
@gamethread
def startround(msg, state):
"Start a round of Mad Libs. "
state['round'] += 0.25
state['votes'] = { k: -1 for k, v in state['votes'].items() }
state['entries'] = []
state['skippers'] = set()
try:
generate_madlib(state)
except IOError as e:
msg.reply("Unable to locate corpus. Aborting game.")
log.error("Corpus open failed: " + str(e))
killgame(state)
# give 10s more time for each add'l 80-char line
entrytime = int(state['options']['entrytime'] + \
(floor(len(state['text']) / 80) - 1) * 10)
msg.reply("======= Starting Round {0}/{1} =======".format(
int(state['round']), state['options']['numrounds']
))
log.info("======= Starting Round {0}/{1} =======".format(
int(state['round']), state['options']['numrounds']
))
if state['options']['hidesentence']:
poslist = []
for idx in state['textshape']:
poslist.append(state['doc'][idx].pos_)
text = "Hidden sentence! Give me: "
text += ", ".join(poslist)
else:
text = state['text']
msg.reply(text)
log.info(text)
msg.reply("Entries should be of the form " + underline +
"word word ..." + underline)
msg.reply("--> Send your entries to me VIA MESSAGE, you have " +\
"{} seconds".format(entrytime)
)
t = threading.Timer(
entrytime,
voteround,
args=(msg, state)
)
t.start()
state['threads'][t.ident] = t
t2 = threading.Timer(
entrytime - state['options']['warntime'],
warntime,
args=(msg, state)
)
t2.start()
state['threads'][t2.ident] = t2
if not state['options']['botplays']:
return
t3 = threading.Thread(
target=botentry,
args=(msg, state)
)
t3.start()
state['threads'][t3.ident] = t3
def processentry(msg, state):
"Process a submitted Mad Lib word list entry."
try:
if msg.text.strip().lower() == "!skip":
state['skippers'].add(msg.nick)
if len(state['skippers']) == 3:
msg.reply("OK, you don't like that one! " +\
bold + "Restarting round.")
killgame(state, reset=False)
round -= 0.5
startround(msg, state)
if msg.sender[0] == '#':
# ignore public statements other than !skip
return
entry = msg.text.strip()
words = [x.strip() for x in entry.split()]
# search for stopwords
stopwords = [x for x in words \
if x.lower() in state['options']['stopwords']]
if stopwords:
msg.reply("Entry " + bold + "rejected" + bold +\
", stopword(s) found: " + ", ".join(stopwords)
)
return
if len(words) == len(state['textshape']):
resp = "Entry accepted."
# remove any previous entry
for ent in state['entries']:
if ent[0] == msg.nick:
state['entries'].remove(ent)
resp = "Entry changed."
break
state['entries'].append((msg.nick, words, 0))
log.info("{0} entry: {1}".format(msg.nick, ", ".join(words)))
state['votes'][msg.nick] = -1
msg.reply(resp)
else:
msg.reply("Entry " + bold + "rejected" + bold +\
", expected {1} words and got {0}".format(
len(words), len(state['textshape'])
))
except Exception as e:
msg.reply("Entry " + bold + "rejected" + bold + \
", unexpected error")
log.error(str(e))
@gamethread
def botentry(msg, state):
"""Generate a response based on the original text.
Warning, may take 30-60s to complete. Do not set entrytime
very low!"""
if 'words' not in state:
# expensive initialization, do ALAP
log.info("Loading word corpus...")
state['words'] = [w for w in nlp.nlp.vocab if w.has_vector]
#cosine = lambda v1, v2: dot(v1, v2) / (norm(v1) * norm(v2))
entry = []
for t in state['textshape']:
log.debug("Searching for replacement for {0} ({1})".format(
state['doc'][t], state['doc'][t].pos_
))
try:
state['words'].sort(key=lambda w:
w.similarity(state['doc'][t]),
reverse=True
)
#cosine(w.vector, state['doc'][t].vector)
state['words'].reverse
except TypeError:
# perhaps our word lacks a vector?
pass
if state['options']['matchpos']:
sent = [x.string for x in list(state['doc'])]
pos = state['doc'][t].pos_
for ctr in range(10):
# TODO: Parametrize the bounds on random here
newword = state['words'][random.randint(50, 500)]
log.debug("Trying " + newword.orth_.lower())
sent[t] = newword.orth_.lower() + " "
newsent = nlp.nlp("".join(sent))
if newsent[t].pos_ == pos:
break
entry.append(newword.orth_.lower())
log.debug("Word found: {0} ({1})".format(
entry[-1], newsent[t].pos_
))
else:
entry.append(
state['words'][random.randint(50, 500)].orth_.lower()
)
log.debug("Word found: " + entry[-1])
log.info("Bot enters: " + ", ".join(entry))
state['entries'].append((config.nick, entry, 0))
# no entry in state['votes']
@gamethread
def voteround(msg, state):
"Start the voting portion of a Mad Libs round."
state['round'] += 0.5
if len(state['entries']) == 0 \
or (state['options']['botplays'] and \
len(state['entries']) == 1):
msg.reply(bold + "ACHTUNG! No entries received! Ending game.")
killgame(state)
return
# give 10s more vote time for >3 entries
votetime = int(state['options']['votetime'] + \
(len(state['entries']) - 3) * 10)
random.shuffle(state['entries'])
msg.reply("======= | name = None | conditional_block |
|
gru_model.py | "]
unique_intent = list(set(intent))
sentences = list(df["Sentence"])
return (intent, unique_intent, sentences)
intent, unique_intent, sentences = load_dataset("Dataset.csv")
intent
sentences
print(sentences[:10])
nltk.download("stopwords")
nltk.download("punkt")
#define stemmer
stemmer = LancasterStemmer()
"""# 3. Data Cleaning"""
def cleaning(sentences):
words = []
for s in sentences:
clean = re.sub(r'[^ a-z A-Z 0-9]', " ", s)
w = word_tokenize(clean)
#stemming
words.append([i.lower() for i in w])
return words
cleaned_words = cleaning(sentences)
print(len(cleaned_words))
print(cleaned_words[:2])
"""### 3.1 Keras Tokenizer"""
def create_tokenizer(words, filters = '!"#$%&()*+,-./:;<=>?@[\]^_`{|}~'):
token = Tokenizer(filters = filters)
token.fit_on_texts(words)
return token
def | (words):
return(len(max(words, key = len)))
word_tokenizer = create_tokenizer(cleaned_words)
vocab_size = len(word_tokenizer.word_index) + 1
max_length = max_length(cleaned_words)
print("Vocab Size = %d and Maximum length = %d" % (vocab_size, max_length))
"""### 3.2 One Hot Encoding for Model Fed"""
def encoding_doc(token, words):
return(token.texts_to_sequences(words))
encoded_doc = encoding_doc(word_tokenizer, cleaned_words)
def padding_doc(encoded_doc, max_length):
return(pad_sequences(encoded_doc, maxlen = max_length, padding = "post"))
padded_doc = padding_doc(encoded_doc, max_length)
padded_doc[:5]
print("Shape of padded docs = ",padded_doc.shape)
#tokenizer with filter changed
output_tokenizer = create_tokenizer(unique_intent, filters = '!"#$%&()*+,-/:;<=>?@[\]^`{|}~')
output_tokenizer.word_index
encoded_output = encoding_doc(output_tokenizer, intent)
encoded_output = np.array(encoded_output).reshape(len(encoded_output), 1)
encoded_output.shape
def one_hot(encode):
o = OneHotEncoder(sparse = False)
return(o.fit_transform(encode))
output_one_hot = one_hot(encoded_output)
output_one_hot.shape
"""# 4. Train and Validation Split"""
from sklearn.model_selection import train_test_split
train_X, val_X, train_Y, val_Y = train_test_split(padded_doc, output_one_hot, shuffle = True, test_size = 0.2)
print("Shape of train_X = %s and train_Y = %s" % (train_X.shape, train_Y.shape))
print("Shape of val_X = %s and val_Y = %s" % (val_X.shape, val_Y.shape))
"""# 5. GRU Modeling"""
def create_model(vocab_size, max_length):
model = Sequential()
model.add(Embedding(vocab_size, 128, input_length = max_length, trainable = False))
model.add(Bidirectional(LSTM(128)))
# model.add(LSTM(128))
model.add(Dense(32, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(21, activation = "softmax"))
return model
def create_model(vocab_size, max_length):
model = Sequential()
model.add(Embedding(vocab_size, 128, input_length = max_length, trainable = False))
model.add(Bidirectional(LSTM(128, return_sequences=True)))
model.add(Bidirectional(LSTM(64)))
# model.add(LSTM(128))
model.add(Dense(32, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(21, activation = "softmax"))
return model
model = create_model(vocab_size, max_length)
model.compile(loss = "categorical_crossentropy", optimizer = "adam", metrics = ["accuracy"])
model.summary()
"""# 6. Training"""
filename = 'model.h5'
checkpoint = ModelCheckpoint(filename, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
hist = model.fit(train_X, train_Y, epochs = 100, batch_size = 32, validation_data = (val_X, val_Y), callbacks = [checkpoint])
loss = pd.DataFrame({'loss': model.history.history['accuracy'], 'auc': model.history.history['val_accuracy'] })
loss.plot()
model = load_model("model.h5")
def predictions(text):
clean = re.sub(r'[^ a-z A-Z 0-9]', " ", text)
test_word = word_tokenize(clean)
test_word = [w.lower() for w in test_word]
test_ls = word_tokenizer.texts_to_sequences(test_word)
print(test_word)
#Check for unknown words
if [] in test_ls:
test_ls = list(filter(None, test_ls))
test_ls = np.array(test_ls).reshape(1, len(test_ls))
x = padding_doc(test_ls, max_length)
pred = model.predict_proba(x)
return pred
def get_final_output(pred, classes):
predictions = pred[0]
classes = np.array(classes)
ids = np.argsort(-predictions)
classes = classes[ids]
predictions = -np.sort(-predictions)
for i in range(pred.shape[1]):
print("%s has confidence = %s" % (classes[i], (predictions[i])))
"""# 7. Testing"""
text = "Can you help me?"
pred = predictions(text)
get_final_output(pred, unique_intent)
"""# 8. Save/Load Pickle"""
# from sklearn.externals import joblib
# joblib.dump(model, 'modelnlp.pkl')
# nlp_model = open('modelnlp.pkl','rb')
# nlp = joblib.load(nlp_model)
# !pip install git+https://github.com/TinkerMob/keras_albert_model.git
# from keras_albert_model import build_albert
"""# 9. Experiment with Monkeyzlearn API"""
from monkeylearn import MonkeyLearn
ml = MonkeyLearn('e7e230d51a8668a72eea86c29559bef04bd6c8fb')
data = ["Hi Feco, looks promising, I would like to schedule a call tomorrow and see the demo. What times do you have available? Thanks, Ryan."]
model_id = 'cl_v9GTn7zi'
result = ml.classifiers.classify(model_id, data)
print(result.body)
# !pip install monkeylearn
"""# 10. BERT Model"""
!pip install bert-for-tf2
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import json
import os
from sklearn.metrics import roc_curve
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Input, Dense, Embedding, Activation, LSTM, SimpleRNN, Dropout
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
import bert
from tqdm import tqdm
from tensorflow.keras import backend as K
import tensorflow as tf
import tensorflow_hub as hub
print("TensorFlow Version:",tf.__version__)
print("Hub version: ",hub.__version__)
# Params for bert model
class BertModel(object):
def __init__(self):
self.max_len = 128
bert_path = "https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/1"
FullTokenizer=bert.bert_tokenization.FullTokenizer
self.bert_module = hub.KerasLayer(bert_path,trainable=True)
self.vocab_file = self.bert_module.resolved_object.vocab_file.asset_path.numpy()
self.do_lower_case = self.bert_module.resolved_object.do_lower_case.numpy()
self.tokenizer = FullTokenizer(self.vocab_file,self.do_lower_case)
def get_masks(self,tokens, max_seq_length):
return [1]*len(tokens) + [0] * (max_seq_length - len(tokens))
def get_segments(self,tokens, max_seq_length):
"""Segments: 0 for the first sequence, 1 for the second"""
segments = []
current_segment_id = 0
for token in tokens:
segments.append(current_segment_id)
if token == "[SEP]":
current_segment_id = 1
return segments + [0] * (max_seq_length - len(tokens))
def get_ids(self,tokens, tokenizer, max_seq_length):
"""Token ids from Tokenizer vocab"""
token_ids = tokenizer.convert_tokens_to_ids(tokens,)
input_ids = token_ids + [0] * (max_seq_length-len(token_ids))
return input_ids
def create_single_input(self,sentence,maxlen):
stokens = self.tokenizer.tokenize(sentence)
stokens = stokens[:maxlen]
stokens = ["[CLS]"] + stokens + ["[SEP]"]
ids = self.get_ids(stokens, self.tokenizer, self.max_len)
masks = self.get | max_length | identifier_name |
gru_model.py | "]
unique_intent = list(set(intent))
sentences = list(df["Sentence"])
return (intent, unique_intent, sentences)
intent, unique_intent, sentences = load_dataset("Dataset.csv")
intent
sentences
print(sentences[:10])
nltk.download("stopwords")
nltk.download("punkt")
#define stemmer
stemmer = LancasterStemmer()
"""# 3. Data Cleaning"""
def cleaning(sentences):
words = []
for s in sentences:
clean = re.sub(r'[^ a-z A-Z 0-9]', " ", s)
w = word_tokenize(clean)
#stemming
words.append([i.lower() for i in w])
return words
cleaned_words = cleaning(sentences)
print(len(cleaned_words))
print(cleaned_words[:2])
"""### 3.1 Keras Tokenizer"""
def create_tokenizer(words, filters = '!"#$%&()*+,-./:;<=>?@[\]^_`{|}~'):
token = Tokenizer(filters = filters)
token.fit_on_texts(words)
return token
def max_length(words):
return(len(max(words, key = len)))
word_tokenizer = create_tokenizer(cleaned_words)
vocab_size = len(word_tokenizer.word_index) + 1
max_length = max_length(cleaned_words)
print("Vocab Size = %d and Maximum length = %d" % (vocab_size, max_length))
"""### 3.2 One Hot Encoding for Model Fed"""
def encoding_doc(token, words):
return(token.texts_to_sequences(words))
encoded_doc = encoding_doc(word_tokenizer, cleaned_words)
def padding_doc(encoded_doc, max_length):
return(pad_sequences(encoded_doc, maxlen = max_length, padding = "post"))
padded_doc = padding_doc(encoded_doc, max_length)
padded_doc[:5]
print("Shape of padded docs = ",padded_doc.shape)
#tokenizer with filter changed
output_tokenizer = create_tokenizer(unique_intent, filters = '!"#$%&()*+,-/:;<=>?@[\]^`{|}~')
output_tokenizer.word_index
encoded_output = encoding_doc(output_tokenizer, intent)
encoded_output = np.array(encoded_output).reshape(len(encoded_output), 1)
encoded_output.shape
def one_hot(encode):
o = OneHotEncoder(sparse = False)
return(o.fit_transform(encode))
output_one_hot = one_hot(encoded_output)
output_one_hot.shape
"""# 4. Train and Validation Split"""
from sklearn.model_selection import train_test_split
train_X, val_X, train_Y, val_Y = train_test_split(padded_doc, output_one_hot, shuffle = True, test_size = 0.2)
print("Shape of train_X = %s and train_Y = %s" % (train_X.shape, train_Y.shape))
print("Shape of val_X = %s and val_Y = %s" % (val_X.shape, val_Y.shape))
"""# 5. GRU Modeling"""
def create_model(vocab_size, max_length):
model = Sequential()
model.add(Embedding(vocab_size, 128, input_length = max_length, trainable = False))
model.add(Bidirectional(LSTM(128)))
# model.add(LSTM(128))
model.add(Dense(32, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(21, activation = "softmax"))
return model
def create_model(vocab_size, max_length):
model = Sequential()
model.add(Embedding(vocab_size, 128, input_length = max_length, trainable = False))
model.add(Bidirectional(LSTM(128, return_sequences=True)))
model.add(Bidirectional(LSTM(64)))
# model.add(LSTM(128))
model.add(Dense(32, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(21, activation = "softmax"))
return model
model = create_model(vocab_size, max_length)
model.compile(loss = "categorical_crossentropy", optimizer = "adam", metrics = ["accuracy"])
model.summary()
"""# 6. Training"""
filename = 'model.h5'
checkpoint = ModelCheckpoint(filename, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
hist = model.fit(train_X, train_Y, epochs = 100, batch_size = 32, validation_data = (val_X, val_Y), callbacks = [checkpoint])
loss = pd.DataFrame({'loss': model.history.history['accuracy'], 'auc': model.history.history['val_accuracy'] })
loss.plot()
model = load_model("model.h5")
def predictions(text):
clean = re.sub(r'[^ a-z A-Z 0-9]', " ", text)
test_word = word_tokenize(clean)
test_word = [w.lower() for w in test_word]
test_ls = word_tokenizer.texts_to_sequences(test_word)
print(test_word)
#Check for unknown words
if [] in test_ls:
test_ls = list(filter(None, test_ls))
test_ls = np.array(test_ls).reshape(1, len(test_ls))
x = padding_doc(test_ls, max_length)
pred = model.predict_proba(x)
return pred
def get_final_output(pred, classes):
predictions = pred[0]
classes = np.array(classes)
ids = np.argsort(-predictions)
classes = classes[ids]
predictions = -np.sort(-predictions)
for i in range(pred.shape[1]):
|
"""# 7. Testing"""
text = "Can you help me?"
pred = predictions(text)
get_final_output(pred, unique_intent)
"""# 8. Save/Load Pickle"""
# from sklearn.externals import joblib
# joblib.dump(model, 'modelnlp.pkl')
# nlp_model = open('modelnlp.pkl','rb')
# nlp = joblib.load(nlp_model)
# !pip install git+https://github.com/TinkerMob/keras_albert_model.git
# from keras_albert_model import build_albert
"""# 9. Experiment with Monkeyzlearn API"""
from monkeylearn import MonkeyLearn
ml = MonkeyLearn('e7e230d51a8668a72eea86c29559bef04bd6c8fb')
data = ["Hi Feco, looks promising, I would like to schedule a call tomorrow and see the demo. What times do you have available? Thanks, Ryan."]
model_id = 'cl_v9GTn7zi'
result = ml.classifiers.classify(model_id, data)
print(result.body)
# !pip install monkeylearn
"""# 10. BERT Model"""
!pip install bert-for-tf2
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import json
import os
from sklearn.metrics import roc_curve
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Input, Dense, Embedding, Activation, LSTM, SimpleRNN, Dropout
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
import bert
from tqdm import tqdm
from tensorflow.keras import backend as K
import tensorflow as tf
import tensorflow_hub as hub
print("TensorFlow Version:",tf.__version__)
print("Hub version: ",hub.__version__)
# Params for bert model
class BertModel(object):
def __init__(self):
self.max_len = 128
bert_path = "https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/1"
FullTokenizer=bert.bert_tokenization.FullTokenizer
self.bert_module = hub.KerasLayer(bert_path,trainable=True)
self.vocab_file = self.bert_module.resolved_object.vocab_file.asset_path.numpy()
self.do_lower_case = self.bert_module.resolved_object.do_lower_case.numpy()
self.tokenizer = FullTokenizer(self.vocab_file,self.do_lower_case)
def get_masks(self,tokens, max_seq_length):
return [1]*len(tokens) + [0] * (max_seq_length - len(tokens))
def get_segments(self,tokens, max_seq_length):
"""Segments: 0 for the first sequence, 1 for the second"""
segments = []
current_segment_id = 0
for token in tokens:
segments.append(current_segment_id)
if token == "[SEP]":
current_segment_id = 1
return segments + [0] * (max_seq_length - len(tokens))
def get_ids(self,tokens, tokenizer, max_seq_length):
"""Token ids from Tokenizer vocab"""
token_ids = tokenizer.convert_tokens_to_ids(tokens,)
input_ids = token_ids + [0] * (max_seq_length-len(token_ids))
return input_ids
def create_single_input(self,sentence,maxlen):
stokens = self.tokenizer.tokenize(sentence)
stokens = stokens[:maxlen]
stokens = ["[CLS]"] + stokens + ["[SEP]"]
ids = self.get_ids(stokens, self.tokenizer, self.max_len)
masks = self.get_masks | print("%s has confidence = %s" % (classes[i], (predictions[i]))) | conditional_block |
gru_model.py | "]
unique_intent = list(set(intent))
sentences = list(df["Sentence"])
return (intent, unique_intent, sentences)
intent, unique_intent, sentences = load_dataset("Dataset.csv")
intent
sentences
print(sentences[:10])
nltk.download("stopwords")
nltk.download("punkt")
#define stemmer
stemmer = LancasterStemmer()
"""# 3. Data Cleaning"""
def cleaning(sentences):
words = []
for s in sentences:
clean = re.sub(r'[^ a-z A-Z 0-9]', " ", s)
w = word_tokenize(clean)
#stemming
words.append([i.lower() for i in w])
return words
cleaned_words = cleaning(sentences)
print(len(cleaned_words))
print(cleaned_words[:2])
"""### 3.1 Keras Tokenizer"""
def create_tokenizer(words, filters = '!"#$%&()*+,-./:;<=>?@[\]^_`{|}~'):
token = Tokenizer(filters = filters)
token.fit_on_texts(words)
return token
def max_length(words):
return(len(max(words, key = len)))
word_tokenizer = create_tokenizer(cleaned_words)
vocab_size = len(word_tokenizer.word_index) + 1
max_length = max_length(cleaned_words)
print("Vocab Size = %d and Maximum length = %d" % (vocab_size, max_length))
"""### 3.2 One Hot Encoding for Model Fed"""
def encoding_doc(token, words):
return(token.texts_to_sequences(words))
encoded_doc = encoding_doc(word_tokenizer, cleaned_words)
def padding_doc(encoded_doc, max_length):
return(pad_sequences(encoded_doc, maxlen = max_length, padding = "post"))
padded_doc = padding_doc(encoded_doc, max_length)
padded_doc[:5]
print("Shape of padded docs = ",padded_doc.shape)
#tokenizer with filter changed
output_tokenizer = create_tokenizer(unique_intent, filters = '!"#$%&()*+,-/:;<=>?@[\]^`{|}~')
output_tokenizer.word_index
encoded_output = encoding_doc(output_tokenizer, intent)
encoded_output = np.array(encoded_output).reshape(len(encoded_output), 1)
encoded_output.shape
def one_hot(encode):
o = OneHotEncoder(sparse = False)
return(o.fit_transform(encode))
output_one_hot = one_hot(encoded_output)
output_one_hot.shape
"""# 4. Train and Validation Split"""
from sklearn.model_selection import train_test_split
train_X, val_X, train_Y, val_Y = train_test_split(padded_doc, output_one_hot, shuffle = True, test_size = 0.2)
print("Shape of train_X = %s and train_Y = %s" % (train_X.shape, train_Y.shape))
print("Shape of val_X = %s and val_Y = %s" % (val_X.shape, val_Y.shape))
"""# 5. GRU Modeling"""
def create_model(vocab_size, max_length):
model = Sequential()
model.add(Embedding(vocab_size, 128, input_length = max_length, trainable = False))
model.add(Bidirectional(LSTM(128)))
# model.add(LSTM(128))
model.add(Dense(32, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(21, activation = "softmax"))
return model
def create_model(vocab_size, max_length):
model = Sequential()
model.add(Embedding(vocab_size, 128, input_length = max_length, trainable = False))
model.add(Bidirectional(LSTM(128, return_sequences=True)))
model.add(Bidirectional(LSTM(64)))
# model.add(LSTM(128))
model.add(Dense(32, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(21, activation = "softmax"))
return model
model = create_model(vocab_size, max_length)
model.compile(loss = "categorical_crossentropy", optimizer = "adam", metrics = ["accuracy"])
model.summary()
"""# 6. Training"""
filename = 'model.h5'
checkpoint = ModelCheckpoint(filename, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
hist = model.fit(train_X, train_Y, epochs = 100, batch_size = 32, validation_data = (val_X, val_Y), callbacks = [checkpoint])
loss = pd.DataFrame({'loss': model.history.history['accuracy'], 'auc': model.history.history['val_accuracy'] })
loss.plot()
model = load_model("model.h5")
def predictions(text):
clean = re.sub(r'[^ a-z A-Z 0-9]', " ", text)
test_word = word_tokenize(clean)
test_word = [w.lower() for w in test_word]
test_ls = word_tokenizer.texts_to_sequences(test_word)
print(test_word)
#Check for unknown words
if [] in test_ls:
test_ls = list(filter(None, test_ls))
test_ls = np.array(test_ls).reshape(1, len(test_ls))
x = padding_doc(test_ls, max_length)
pred = model.predict_proba(x)
return pred
def get_final_output(pred, classes):
predictions = pred[0]
classes = np.array(classes)
ids = np.argsort(-predictions)
classes = classes[ids]
predictions = -np.sort(-predictions)
for i in range(pred.shape[1]):
print("%s has confidence = %s" % (classes[i], (predictions[i])))
"""# 7. Testing"""
text = "Can you help me?"
pred = predictions(text)
get_final_output(pred, unique_intent)
"""# 8. Save/Load Pickle"""
# from sklearn.externals import joblib
# joblib.dump(model, 'modelnlp.pkl')
# nlp_model = open('modelnlp.pkl','rb')
# nlp = joblib.load(nlp_model)
# !pip install git+https://github.com/TinkerMob/keras_albert_model.git
# from keras_albert_model import build_albert
"""# 9. Experiment with Monkeyzlearn API"""
from monkeylearn import MonkeyLearn
ml = MonkeyLearn('e7e230d51a8668a72eea86c29559bef04bd6c8fb')
data = ["Hi Feco, looks promising, I would like to schedule a call tomorrow and see the demo. What times do you have available? Thanks, Ryan."]
model_id = 'cl_v9GTn7zi'
result = ml.classifiers.classify(model_id, data)
print(result.body)
# !pip install monkeylearn
"""# 10. BERT Model"""
!pip install bert-for-tf2
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import json
import os
from sklearn.metrics import roc_curve
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Input, Dense, Embedding, Activation, LSTM, SimpleRNN, Dropout
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
import bert
from tqdm import tqdm
from tensorflow.keras import backend as K
import tensorflow as tf
import tensorflow_hub as hub
print("TensorFlow Version:",tf.__version__)
print("Hub version: ",hub.__version__)
# Params for bert model
class BertModel(object):
def __init__(self):
self.max_len = 128
bert_path = "https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/1"
FullTokenizer=bert.bert_tokenization.FullTokenizer
self.bert_module = hub.KerasLayer(bert_path,trainable=True)
self.vocab_file = self.bert_module.resolved_object.vocab_file.asset_path.numpy()
self.do_lower_case = self.bert_module.resolved_object.do_lower_case.numpy()
self.tokenizer = FullTokenizer(self.vocab_file,self.do_lower_case)
def get_masks(self,tokens, max_seq_length):
return [1]*len(tokens) + [0] * (max_seq_length - len(tokens))
def get_segments(self,tokens, max_seq_length):
"""Segments: 0 for the first sequence, 1 for the second"""
segments = []
current_segment_id = 0
for token in tokens:
segments.append(current_segment_id)
if token == "[SEP]":
current_segment_id = 1
return segments + [0] * (max_seq_length - len(tokens))
def get_ids(self,tokens, tokenizer, max_seq_length):
"""Token ids from Tokenizer vocab"""
token_ids = tokenizer.convert_tokens_to_ids(tokens,)
input_ids = token_ids + [0] * (max_seq_length-len(token_ids)) | return input_ids
def create_single_input(self,sentence,maxlen):
stokens = self.tokenizer.tokenize(sentence)
stokens = stokens[:maxlen]
stokens = ["[CLS]"] + stokens + ["[SEP]"]
ids = self.get_ids(stokens, self.tokenizer, self.max_len)
masks = self.get_masks | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.