file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
term_gui.rs | // Copyright (c) The Swiboe development team. All rights reserved.
// Licensed under the Apache License, Version 2.0. See LICENSE.txt
// in the project root for license information.
#[macro_use]
extern crate clap;
extern crate rustbox;
extern crate serde_json;
extern crate subsequence_match;
extern crate swiboe;
extern crate swiboe_gui as gui;
extern crate time;
extern crate serde;
extern crate uuid;
use gui::buffer_views;
use serde::{Serialize, Deserialize};
use gui::keymap_handler;
use rustbox::{Color, RustBox};
use std::cmp;
use std::env;
use std::net;
use std::path;
use std::str::FromStr;
use std::sync::mpsc;
use std::sync::{RwLock, Arc};
use swiboe::client::{self, RpcCaller};
use uuid::Uuid;
fn clamp<T: Copy + cmp::Ord + std::fmt::Debug>(min: T, max: T, v: &mut T) {
let new_value = cmp::min(max, cmp::max(min, *v));
*v = new_value;
}
struct CompleterWidget {
candidates: subsequence_match::CandidateSet,
rpc: Option<client::rpc::client::Context>,
query: String,
results: Vec<subsequence_match::QueryResult>,
selection_index: isize,
}
enum CompleterState {
Running,
Canceled,
Selected(String),
}
impl CompleterWidget {
fn new(client: &mut client::Client) -> swiboe::Result<Self> {
// TODO(sirver): This should use the current work directory of the server, since the server
// might run on a different machine than the client - and certainly in a different
// directory.
let current_dir = env::current_dir().unwrap();
let rpc = try!(client.call("list_files", &swiboe::plugin::list_files::ListFilesRequest {
directory: current_dir.to_string_lossy().into_owned(),
}));
Ok(CompleterWidget {
candidates: subsequence_match::CandidateSet::new(),
rpc: Some(rpc),
query: "".into(),
results: Vec::new(),
selection_index: 0,
})
}
fn on_key(&mut self, key: rustbox::Key) -> CompleterState {
match key {
rustbox::Key::Char(c) => {
self.query.push(c);
self.results.clear();
CompleterState::Running
},
rustbox::Key::Backspace => {
self.query.pop();
self.results.clear();
CompleterState::Running
},
rustbox::Key::Down => {
self.selection_index += 1;
CompleterState::Running
},
rustbox::Key::Up => {
self.selection_index -= 1;
CompleterState::Running
},
rustbox::Key::Esc => {
self.rpc.take().unwrap().cancel().unwrap();
CompleterState::Canceled
},
rustbox::Key::Enter => {
self.rpc.take().unwrap().cancel().unwrap();
if self.results.is_empty() {
CompleterState::Canceled
} else {
clamp(0, self.results.len() as isize - 1, &mut self.selection_index);
CompleterState::Selected(self.results[self.selection_index as usize].text.clone())
}
}
_ => CompleterState::Running,
}
}
fn draw(&mut self, rustbox: &rustbox::RustBox) {
while let Some(b) = self.rpc.as_mut().unwrap().try_recv().unwrap() {
self.results.clear();
let b: swiboe::plugin::list_files::ListFilesUpdate = serde_json::from_value(b).unwrap();
for file in &b.files {
self.candidates.insert(file);
}
}
if self.results.is_empty() {
let query_to_use: String = self.query.chars().filter(|c| !c.is_whitespace()).collect();
self.candidates.query(&query_to_use, subsequence_match::MatchCase::No, &mut self.results);
}
if !self.results.is_empty() {
clamp(0, self.results.len() as isize - 1, &mut self.selection_index);
}
rustbox.print(0, 0, rustbox::RB_BOLD, Color::Yellow, Color::Default, &self.query);
let len_string = format!("{}/{} matching ({})", self.results.len(), self.candidates.len(),
if self.rpc.as_ref().unwrap().done() { "done" } else { "scanning" } );
rustbox.print(rustbox.width() - len_string.len() - 1, 0, rustbox::RB_BOLD, Color::Blue, Color::Default, &len_string);
let mut row = 1usize;
for result in &self.results {
let mut matching_indices = result.matching_indices.iter().peekable();
for (col, c) in result.text.chars().enumerate() {
let matches = match matching_indices.peek() {
Some(val) if **val == col => true,
_ => false,
};
let mut style = if matches {
matching_indices.next();
rustbox::RB_BOLD
} else {
rustbox::RB_NORMAL
};
if row as isize == self.selection_index + 1 {
style = style | rustbox::RB_REVERSE;
}
rustbox.print_char(col, row, style, Color::Default, Color::Default, c);
}
row += 1;
if row > rustbox.height() {
break;
}
}
}
}
struct BufferViewWidget {
view_id: String,
client: client::ThinClient,
cursor_id: String,
}
impl BufferViewWidget {
pub fn new(view_id: String, client: client::ThinClient) -> Self {
BufferViewWidget {
view_id: view_id,
client: client,
cursor_id: String::new(),
}
}
fn draw(&mut self, buffer_view: &buffer_views::BufferView, rustbox: &rustbox::RustBox) {
let mut row = 0;
let top_line_index = buffer_view.top_line_index as usize;
self.cursor_id = buffer_view.cursor.id().to_string();
let mut cursor_drawn = false;
while row < rustbox.height() {
let line_index = top_line_index + row;
if let Some(line) = buffer_view.lines.get(line_index) {
for (col, c) in line.chars().enumerate() {
if col >= rustbox.width() {
break;
}
let bg = if buffer_view.cursor.position.line_index == line_index as isize &&
buffer_view.cursor.position.column_index as usize == col {
cursor_drawn = true;
Color::Red
} else {
Color::Default
};
rustbox.print_char(col, row, rustbox::RB_NORMAL, Color::Default, bg, c);
}
}
row += 1;
}
if !cursor_drawn {
let row = buffer_view.cursor.position.line_index - top_line_index as isize;
rustbox.print_char(buffer_view.cursor.position.column_index as usize,
row as usize, rustbox::RB_NORMAL,
Color::Default, Color::Red, ' ');
}
}
}
#[derive(Debug)]
struct Options {
socket: String,
config_file: path::PathBuf,
}
struct TerminalGui {
config_file_runner: Box<gui::config_file::ConfigFileRunner>,
client: client::Client,
rustbox: rustbox::RustBox,
buffer_views: Arc<RwLock<gui::buffer_views::BufferViews>>,
last_key_down_event: time::PreciseTime,
completer: Option<CompleterWidget>,
buffer_view_widget: Option<BufferViewWidget>,
// NOCOM(#sirver): GuiCommand in namespace gui is very duplicated
gui_commands: mpsc::Receiver<gui::command::GuiCommand>,
}
impl TerminalGui {
fn new(options: &Options) -> swiboe::Result<Self> {
let mut client = match net::SocketAddr::from_str(&options.socket) {
Ok(value) => {
client::Client::connect_tcp(&value).unwrap()
}
Err(_) => {
let socket_path = path::PathBuf::from(&options.socket);
client::Client::connect_unix(&socket_path).unwrap()
}
};
let mut config_file_runner = gui::config_file::ConfigFileRunner::new(
try!(client.clone()));
config_file_runner.run(&options.config_file);
let rustbox = match RustBox::init(rustbox::InitOptions {
input_mode: rustbox::InputMode::Current,
buffer_stderr: true,
}) {
Result::Ok(v) => v,
Result::Err(e) => panic!("{}", e),
};
let gui_id: String = Uuid::new_v4().to_hyphenated_string();
let (gui_commands_tx, gui_commands_rx) = mpsc::channel();
let buffer_views = try!(gui::buffer_views::BufferViews::new(&gui_id, gui_commands_tx, &mut client));
Ok(TerminalGui {
config_file_runner: config_file_runner,
client: client,
rustbox: rustbox,
buffer_views: buffer_views,
last_key_down_event: time::PreciseTime::now(),
completer: None,
buffer_view_widget: None,
gui_commands: gui_commands_rx,
})
}
fn handle_events(&mut self) -> swiboe::Result<bool> {
match self.rustbox.peek_event(std::time::Duration::from_millis(5), false) {
Ok(rustbox::Event::KeyEvent(key)) => {
if self.completer.is_some() {
let rv = self.completer.as_mut().unwrap().on_key(key);
match rv {
CompleterState::Running => (),
CompleterState::Canceled => {
self.completer = None;
},
CompleterState::Selected(result) => {
self.completer = None;
let mut rpc = try!(self.client.call("buffer.open", &swiboe::plugin::buffer::open::Request {
uri: format!("file://{}", result),
}));
let response: swiboe::plugin::buffer::open::Response = rpc.wait_for().unwrap();
let mut buffer_views = self.buffer_views.write().unwrap();
let view_id = buffer_views.new_view(response.buffer_index, self.rustbox.width(), self.rustbox.height());
self.buffer_view_widget = Some(BufferViewWidget::new(view_id, try!(self.client.clone())));
},
}
} else {
if !try!(self.handle_key(key)) {
return Ok(false);
}
}
},
Err(e) => panic!("{}", e),
_ => { }
}
while let Ok(command) = self.gui_commands.try_recv() {
match command {
gui::command::GuiCommand::Quit => return Ok(false),
gui::command::GuiCommand::Redraw => (),
}
}
return Ok(true);
}
fn handle_key(&mut self, key: rustbox::Key) -> swiboe::Result<bool> {
let delta_t = {
let now = time::PreciseTime::now();
let delta_t = self.last_key_down_event.to(now);
self.last_key_down_event = now;
delta_t
};
let delta_t_in_seconds = delta_t.num_nanoseconds().unwrap() as f64 / 1e9;
match key {
// NOCOM(#sirver): should be handled through plugins.
rustbox::Key::Char('q') => return Ok(false),
rustbox::Key::Ctrl('t') => {
self.completer = Some(try!(CompleterWidget::new(&mut self.client)))
},
rustbox::Key::Esc => {
self.config_file_runner.keymap_handler.timeout();
},
rustbox::Key::Char(a) => {
self.config_file_runner.keymap_handler.key_down(
delta_t_in_seconds, keymap_handler::Key::Char(a));
},
rustbox::Key::Up => {
self.config_file_runner.keymap_handler.key_down(
delta_t_in_seconds, keymap_handler::Key::Up);
},
rustbox::Key::Down => {
self.config_file_runner.keymap_handler.key_down(
delta_t_in_seconds, keymap_handler::Key::Down);
},
rustbox::Key::Left => {
self.config_file_runner.keymap_handler.key_down(
delta_t_in_seconds, keymap_handler::Key::Left);
},
rustbox::Key::Right => {
self.config_file_runner.keymap_handler.key_down(
delta_t_in_seconds, keymap_handler::Key::Right);
},
rustbox::Key::Tab => {
self.config_file_runner.keymap_handler.key_down(
delta_t_in_seconds, keymap_handler::Key::Tab);
},
rustbox::Key::Ctrl(some_other_key) => {
self.config_file_runner.keymap_handler.key_down(
delta_t_in_seconds, keymap_handler::Key::Ctrl);
try!(self.handle_key(rustbox::Key::Char(some_other_key)));
}
_ => (),
}
Ok(true)
}
fn draw(&mut self) {
self.rustbox.clear();
if let Some(ref mut widget) = self.buffer_view_widget {
let buffer_views = self.buffer_views.read().unwrap();
let buffer_view = buffer_views.get(&widget.view_id).unwrap();
widget.draw(&buffer_view, &self.rustbox);
}
if let Some(ref mut completer) = self.completer {
completer.draw(&self.rustbox);
}
self.rustbox.present();
}
}
fn parse_options() -> Options {
let matches = clap::App::new("term_gui")
.about("Terminal client for Swiboe")
.version(&crate_version!()[..])
.arg(clap::Arg::with_name("SOCKET")
.short("s")
.long("socket")
.help("Socket at which the master listens.")
.required(true)
.takes_value(true))
.arg(clap::Arg::with_name("CONFIG_FILE")
.short("c")
.long("config_file")
.help("The config file to run when the GUI starts up.")
.takes_value(true))
.get_matches();
Options {
config_file: path::PathBuf::from(matches.value_of("CONFIG_FILE").unwrap_or("config.lua")),
socket: matches.value_of("SOCKET").unwrap().into(),
}
}
fn main() | {
let options = parse_options();
let mut gui = TerminalGui::new(&options).unwrap();
while gui.handle_events().unwrap() {
gui.draw();
}
} | identifier_body |
|
term_gui.rs | // Copyright (c) The Swiboe development team. All rights reserved.
// Licensed under the Apache License, Version 2.0. See LICENSE.txt
// in the project root for license information.
#[macro_use]
extern crate clap;
extern crate rustbox;
extern crate serde_json;
extern crate subsequence_match;
extern crate swiboe;
extern crate swiboe_gui as gui;
extern crate time;
extern crate serde;
extern crate uuid;
use gui::buffer_views;
use serde::{Serialize, Deserialize};
use gui::keymap_handler;
use rustbox::{Color, RustBox};
use std::cmp;
use std::env;
use std::net;
use std::path;
use std::str::FromStr;
use std::sync::mpsc;
use std::sync::{RwLock, Arc};
use swiboe::client::{self, RpcCaller};
use uuid::Uuid;
fn clamp<T: Copy + cmp::Ord + std::fmt::Debug>(min: T, max: T, v: &mut T) {
let new_value = cmp::min(max, cmp::max(min, *v));
*v = new_value;
}
struct CompleterWidget {
candidates: subsequence_match::CandidateSet,
rpc: Option<client::rpc::client::Context>,
query: String,
results: Vec<subsequence_match::QueryResult>,
selection_index: isize,
}
enum CompleterState {
Running,
Canceled,
Selected(String),
}
impl CompleterWidget {
fn new(client: &mut client::Client) -> swiboe::Result<Self> {
// TODO(sirver): This should use the current work directory of the server, since the server
// might run on a different machine than the client - and certainly in a different
// directory.
let current_dir = env::current_dir().unwrap();
let rpc = try!(client.call("list_files", &swiboe::plugin::list_files::ListFilesRequest {
directory: current_dir.to_string_lossy().into_owned(),
}));
Ok(CompleterWidget {
candidates: subsequence_match::CandidateSet::new(),
rpc: Some(rpc),
query: "".into(),
results: Vec::new(),
selection_index: 0,
})
}
fn | (&mut self, key: rustbox::Key) -> CompleterState {
match key {
rustbox::Key::Char(c) => {
self.query.push(c);
self.results.clear();
CompleterState::Running
},
rustbox::Key::Backspace => {
self.query.pop();
self.results.clear();
CompleterState::Running
},
rustbox::Key::Down => {
self.selection_index += 1;
CompleterState::Running
},
rustbox::Key::Up => {
self.selection_index -= 1;
CompleterState::Running
},
rustbox::Key::Esc => {
self.rpc.take().unwrap().cancel().unwrap();
CompleterState::Canceled
},
rustbox::Key::Enter => {
self.rpc.take().unwrap().cancel().unwrap();
if self.results.is_empty() {
CompleterState::Canceled
} else {
clamp(0, self.results.len() as isize - 1, &mut self.selection_index);
CompleterState::Selected(self.results[self.selection_index as usize].text.clone())
}
}
_ => CompleterState::Running,
}
}
fn draw(&mut self, rustbox: &rustbox::RustBox) {
while let Some(b) = self.rpc.as_mut().unwrap().try_recv().unwrap() {
self.results.clear();
let b: swiboe::plugin::list_files::ListFilesUpdate = serde_json::from_value(b).unwrap();
for file in &b.files {
self.candidates.insert(file);
}
}
if self.results.is_empty() {
let query_to_use: String = self.query.chars().filter(|c| !c.is_whitespace()).collect();
self.candidates.query(&query_to_use, subsequence_match::MatchCase::No, &mut self.results);
}
if !self.results.is_empty() {
clamp(0, self.results.len() as isize - 1, &mut self.selection_index);
}
rustbox.print(0, 0, rustbox::RB_BOLD, Color::Yellow, Color::Default, &self.query);
let len_string = format!("{}/{} matching ({})", self.results.len(), self.candidates.len(),
if self.rpc.as_ref().unwrap().done() { "done" } else { "scanning" } );
rustbox.print(rustbox.width() - len_string.len() - 1, 0, rustbox::RB_BOLD, Color::Blue, Color::Default, &len_string);
let mut row = 1usize;
for result in &self.results {
let mut matching_indices = result.matching_indices.iter().peekable();
for (col, c) in result.text.chars().enumerate() {
let matches = match matching_indices.peek() {
Some(val) if **val == col => true,
_ => false,
};
let mut style = if matches {
matching_indices.next();
rustbox::RB_BOLD
} else {
rustbox::RB_NORMAL
};
if row as isize == self.selection_index + 1 {
style = style | rustbox::RB_REVERSE;
}
rustbox.print_char(col, row, style, Color::Default, Color::Default, c);
}
row += 1;
if row > rustbox.height() {
break;
}
}
}
}
struct BufferViewWidget {
view_id: String,
client: client::ThinClient,
cursor_id: String,
}
impl BufferViewWidget {
pub fn new(view_id: String, client: client::ThinClient) -> Self {
BufferViewWidget {
view_id: view_id,
client: client,
cursor_id: String::new(),
}
}
fn draw(&mut self, buffer_view: &buffer_views::BufferView, rustbox: &rustbox::RustBox) {
let mut row = 0;
let top_line_index = buffer_view.top_line_index as usize;
self.cursor_id = buffer_view.cursor.id().to_string();
let mut cursor_drawn = false;
while row < rustbox.height() {
let line_index = top_line_index + row;
if let Some(line) = buffer_view.lines.get(line_index) {
for (col, c) in line.chars().enumerate() {
if col >= rustbox.width() {
break;
}
let bg = if buffer_view.cursor.position.line_index == line_index as isize &&
buffer_view.cursor.position.column_index as usize == col {
cursor_drawn = true;
Color::Red
} else {
Color::Default
};
rustbox.print_char(col, row, rustbox::RB_NORMAL, Color::Default, bg, c);
}
}
row += 1;
}
if !cursor_drawn {
let row = buffer_view.cursor.position.line_index - top_line_index as isize;
rustbox.print_char(buffer_view.cursor.position.column_index as usize,
row as usize, rustbox::RB_NORMAL,
Color::Default, Color::Red, ' ');
}
}
}
#[derive(Debug)]
struct Options {
socket: String,
config_file: path::PathBuf,
}
struct TerminalGui {
config_file_runner: Box<gui::config_file::ConfigFileRunner>,
client: client::Client,
rustbox: rustbox::RustBox,
buffer_views: Arc<RwLock<gui::buffer_views::BufferViews>>,
last_key_down_event: time::PreciseTime,
completer: Option<CompleterWidget>,
buffer_view_widget: Option<BufferViewWidget>,
// NOCOM(#sirver): GuiCommand in namespace gui is very duplicated
gui_commands: mpsc::Receiver<gui::command::GuiCommand>,
}
impl TerminalGui {
fn new(options: &Options) -> swiboe::Result<Self> {
let mut client = match net::SocketAddr::from_str(&options.socket) {
Ok(value) => {
client::Client::connect_tcp(&value).unwrap()
}
Err(_) => {
let socket_path = path::PathBuf::from(&options.socket);
client::Client::connect_unix(&socket_path).unwrap()
}
};
let mut config_file_runner = gui::config_file::ConfigFileRunner::new(
try!(client.clone()));
config_file_runner.run(&options.config_file);
let rustbox = match RustBox::init(rustbox::InitOptions {
input_mode: rustbox::InputMode::Current,
buffer_stderr: true,
}) {
Result::Ok(v) => v,
Result::Err(e) => panic!("{}", e),
};
let gui_id: String = Uuid::new_v4().to_hyphenated_string();
let (gui_commands_tx, gui_commands_rx) = mpsc::channel();
let buffer_views = try!(gui::buffer_views::BufferViews::new(&gui_id, gui_commands_tx, &mut client));
Ok(TerminalGui {
config_file_runner: config_file_runner,
client: client,
rustbox: rustbox,
buffer_views: buffer_views,
last_key_down_event: time::PreciseTime::now(),
completer: None,
buffer_view_widget: None,
gui_commands: gui_commands_rx,
})
}
fn handle_events(&mut self) -> swiboe::Result<bool> {
match self.rustbox.peek_event(std::time::Duration::from_millis(5), false) {
Ok(rustbox::Event::KeyEvent(key)) => {
if self.completer.is_some() {
let rv = self.completer.as_mut().unwrap().on_key(key);
match rv {
CompleterState::Running => (),
CompleterState::Canceled => {
self.completer = None;
},
CompleterState::Selected(result) => {
self.completer = None;
let mut rpc = try!(self.client.call("buffer.open", &swiboe::plugin::buffer::open::Request {
uri: format!("file://{}", result),
}));
let response: swiboe::plugin::buffer::open::Response = rpc.wait_for().unwrap();
let mut buffer_views = self.buffer_views.write().unwrap();
let view_id = buffer_views.new_view(response.buffer_index, self.rustbox.width(), self.rustbox.height());
self.buffer_view_widget = Some(BufferViewWidget::new(view_id, try!(self.client.clone())));
},
}
} else {
if !try!(self.handle_key(key)) {
return Ok(false);
}
}
},
Err(e) => panic!("{}", e),
_ => { }
}
while let Ok(command) = self.gui_commands.try_recv() {
match command {
gui::command::GuiCommand::Quit => return Ok(false),
gui::command::GuiCommand::Redraw => (),
}
}
return Ok(true);
}
fn handle_key(&mut self, key: rustbox::Key) -> swiboe::Result<bool> {
let delta_t = {
let now = time::PreciseTime::now();
let delta_t = self.last_key_down_event.to(now);
self.last_key_down_event = now;
delta_t
};
let delta_t_in_seconds = delta_t.num_nanoseconds().unwrap() as f64 / 1e9;
match key {
// NOCOM(#sirver): should be handled through plugins.
rustbox::Key::Char('q') => return Ok(false),
rustbox::Key::Ctrl('t') => {
self.completer = Some(try!(CompleterWidget::new(&mut self.client)))
},
rustbox::Key::Esc => {
self.config_file_runner.keymap_handler.timeout();
},
rustbox::Key::Char(a) => {
self.config_file_runner.keymap_handler.key_down(
delta_t_in_seconds, keymap_handler::Key::Char(a));
},
rustbox::Key::Up => {
self.config_file_runner.keymap_handler.key_down(
delta_t_in_seconds, keymap_handler::Key::Up);
},
rustbox::Key::Down => {
self.config_file_runner.keymap_handler.key_down(
delta_t_in_seconds, keymap_handler::Key::Down);
},
rustbox::Key::Left => {
self.config_file_runner.keymap_handler.key_down(
delta_t_in_seconds, keymap_handler::Key::Left);
},
rustbox::Key::Right => {
self.config_file_runner.keymap_handler.key_down(
delta_t_in_seconds, keymap_handler::Key::Right);
},
rustbox::Key::Tab => {
self.config_file_runner.keymap_handler.key_down(
delta_t_in_seconds, keymap_handler::Key::Tab);
},
rustbox::Key::Ctrl(some_other_key) => {
self.config_file_runner.keymap_handler.key_down(
delta_t_in_seconds, keymap_handler::Key::Ctrl);
try!(self.handle_key(rustbox::Key::Char(some_other_key)));
}
_ => (),
}
Ok(true)
}
fn draw(&mut self) {
self.rustbox.clear();
if let Some(ref mut widget) = self.buffer_view_widget {
let buffer_views = self.buffer_views.read().unwrap();
let buffer_view = buffer_views.get(&widget.view_id).unwrap();
widget.draw(&buffer_view, &self.rustbox);
}
if let Some(ref mut completer) = self.completer {
completer.draw(&self.rustbox);
}
self.rustbox.present();
}
}
fn parse_options() -> Options {
let matches = clap::App::new("term_gui")
.about("Terminal client for Swiboe")
.version(&crate_version!()[..])
.arg(clap::Arg::with_name("SOCKET")
.short("s")
.long("socket")
.help("Socket at which the master listens.")
.required(true)
.takes_value(true))
.arg(clap::Arg::with_name("CONFIG_FILE")
.short("c")
.long("config_file")
.help("The config file to run when the GUI starts up.")
.takes_value(true))
.get_matches();
Options {
config_file: path::PathBuf::from(matches.value_of("CONFIG_FILE").unwrap_or("config.lua")),
socket: matches.value_of("SOCKET").unwrap().into(),
}
}
fn main() {
let options = parse_options();
let mut gui = TerminalGui::new(&options).unwrap();
while gui.handle_events().unwrap() {
gui.draw();
}
}
| on_key | identifier_name |
wechat_task.py | # -*- coding: utf-8 -*-
# Author: kelvinBen
# Github: https://github.com/kelvinBen/HistoricalArticlesToPdf
import os
import re
import math
import time
import json
import shutil
import psutil
import logging
import requests
from PIL import Image
from queue import Queue
from http import cookiejar
import urllib.parse as urlcode
import libs.sql.user_sql as UserSql
import libs.sql.wechat_sql as WechatSql
from libs.core.html2pdf import HtmlToPdfThreads
log = logging.getLogger(__name__)
class WechatTask(object):
base_url = "https://mp.weixin.qq.com/cgi-bin/"
start_login_url = base_url+"bizlogin?action=startlogin"
getqrcode_url = base_url+"scanloginqrcode?action=getqrcode&random=%s"
ask_url = base_url+"scanloginqrcode?action=ask&token=&lang=zh_CN&f=json&ajax=1"
login_url = base_url+"bizlogin?action=login"
search_biz_url = base_url+"searchbiz"
appmsg_url = base_url+"appmsg"
referer = "https://mp.weixin.qq.com/"
thread_list =[]
img_path_dict = {}
diz_list =[]
def __init__(self,user_name, password, cookie, name, website_url,threads,out_path):
self.user_name = user_name
self.password = password
self.cookie = cookie
self.name = name.replace("\"","").replace(" ","")
self.website_url = website_url
self.task_queue = Queue()
self.threads = threads
self.out_path = out_path
def start(self):
self.__start_data__ = str(time.time).replace(".","")
self.__create_dir__()
self.__load_cookies__()
self.__start_threads__()
for thread in self.thread_list:
thread.join()
self.__print__()
self.__delete_file__()
def __create_dir__(self):
self.out_qrcode_path = os.path.join(self.out_path,"qrcode")
if not os.path.exists(self.out_qrcode_path):
os.makedirs(self.out_qrcode_path)
self.wx_cookie_path = os.path.join(self.out_path,"wx.info")
def __start_threads__(self):
for thread_id in range(1,self.threads):
thread_name = "Thread - " + str(thread_id)
thread = HtmlToPdfThreads(self.task_queue,thread_id,thread_name)
thread.start()
self.thread_list.append(thread)
def __data__(self,map=None):
data = {"userlang":"zh_CN","redirect_url":"","login_type":"3","token":"","lang":"","f":"json","ajax":"1"}
if map:
for key,value in map.items():
data[key] = value
return data
def __head__(self,heads=None):
head ={
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:78.0) Gecko/20100101 Firefox/78.0",
"Referer": self.referer
}
if self.cookie:
head["Cookie"] = self.cookie
if heads:
for key,value in heads.items():
head[key] = value
return head
def __start_login__(self):
data = {"sessionid":str(time.time()).replace(".","")}
session,result = self.__http_request__(url=self.start_login_url,data=self.__data__(data),wait=1)
if result:
self.getqrcode(session)
def getqrcode(self,session):
time_str = str(time.time()).replace(".","")
new_getqrcode_url = self.getqrcode_url.replace("%s",time_str)
qrcode_path = os.path.join(self.out_qrcode_path,time_str + ".png")
self.__http_io_request__(url=new_getqrcode_url,session=session,path=qrcode_path)
log.warn("请使用微信扫描弹出的二维码图片用于登录微信公众号!")
try:
image = Image.open(qrcode_path)
image.show()
except Exception as e:
log.error(e)
raise Exception("获取二维码失败,请重试!")
self.getqrcodeStatus(session)
def getqrcodeStatus(self,session,t=6):
while True:
session,result = self.__http_request__(method='get',url=self.ask_url,wait=t)
if not result:
return
if result.get("status") == "3":
log.warn("二维码已失效,请重新使用微信进行扫码!")
self.getqrcode(session)
return
if str(result.get("status")) == "1":
self.login(session)
return
if t == 6:
t = 7
else:
t = 6
def login(self,session):
data = {"lang":"zh_CN"}
session,result = self.__http_request__(url=self.login_url,data=self.__data__(data))
if not result:
return
redirect_url = result.get("redirect_url")
if not redirect_url:
return
token_compile = re.compile(r'.*token=(.*).*')
token = token_compile.findall(redirect_url)
if len(token) < 0:
return
token = token[0]
names = self.name.split(",")
self.__save_cookie__(session,token)
for name in names:
self.search_biz(session,token,name)
# 搜索公众号
def search_biz(self,session,token,name,no=1,begin=0,count=5,total=0):
data = {
"action":"search_biz",
"begin":begin,
"count":count,
"query":name,
"token":token,
"lang":"zh_CN",
"f":"json",
"ajax":1
}
self.referer = ("https://mp.weixin.qq.com/cgi-bin/appmsg?t=media/appmsg_edit_v2&action=edit&isNew=1&type=10&createType=0&token=%s&lang=zh_CN") % (token)
session,result = self.__http_request__(method='get',url=self.search_biz_url,data=data)
if not result:
return
biz_list = result.get("list") # 公众号列表
biz_total = result.get("total") # 公众号总数量
if len(biz_list) == 0:
return
for biz in biz_list:
fakeid = biz.get("fakeid")
nickname = biz.get("nickname")
alias = biz.get("alias")
if nickname != name:
continue
wi_id = WechatSql.insert_info(fakeid,alias,nickname)
out_dir = os.path.join(self.out_path , nickname)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
begin = WechatSql.select_list_num(wi_id)
app_msg_cnt = self.list_ex(session,fakeid,token,out_dir,wi_id)
diz_dict ={}
if app_msg_cnt != 0:
diz_dict["wi_id"] = wi_id
diz_dict["name"] = name
diz_dict["total"] = app_msg_cnt
diz_dict["current"] = str(app_msg_cnt - begin)
diz_dict["html"] = os.path.join(out_dir,"html")
diz_dict["pdf"] = os.path.join(out_dir,"pdf")
self.diz_list.append(diz_dict)
return
begin = count + begin
if no <= biz_total:
self.search_biz(session,token,name,no,begin,count,biz_total)
def list_ex(self,session,fakeid,token,out_dir,wi_id,no=0,begin=0,count=5,app_msg_cnt=0):
data ={
"action":"list_ex",
"begin":str(begin),
"count":str(count),
"fakeid":str(fakeid),
"type":"9",
"query":"",
"token":str(token),
"lang":"zh_CN",
"f":"json",
"ajax":"1"
}
if begin < 0: # 防止出现负数的情况
return app_msg_cnt
if app_msg_cnt == 0: # 获取文章总数量
session,result = self.__http_request__(method='get',url=self.appmsg_url,data=data,session=session)
if not result:
return app_msg_cnt
app_msg_cnt = result.get("app_msg_cnt")
nums = str(app_msg_cnt/10).split(".")
if int(nums[1]) >= 5:
start = app_msg_cnt - int(nums[1]) + 5
else:
start = app_msg_cnt - int(nums[1])
self.list_ex(session,fakeid,token,out_dir,wi_id,begin=start, app_msg_cnt = app_msg_cnt) # 设置文章起始编号和文章总数量
return app_msg_cnt
session,result = self.__http_request__(method='get',url=self.appmsg_url,data=data,session=session)
if not result:
return app_msg_cnt
app_msg_cnt = result.get("app_msg_cnt")
app_msg_list = result.get("app_msg_list")
if len(app_msg_list) == 0:
return app_msg_cnt
for app in list(reversed(app_msg_list)):
link = app.get("link")
title = app.get("title")
digest = app.get("digest")
title_list = WechatSql.select_list_title(wi_id,begin)
if title in title_list:
continue
i_date = str(time.time).replace(".","")
WechatSql.insert_list(wi_id,no,title,link,digest,i_date)
self.__get_article_details__(no,title,link,out_dir)
no = no + 1
begin = begin - count
self.list_ex(session,fakeid,token,out_dir,wi_id,no,begin,count,app_msg_cnt)
def __get_article_details__(self,no,title,link,out_dir):
filters = {'/','\\','?','*',':','"','<','>','|',' ','?','(',')','!',',','“',"”"}
for filter in filters:
title = title.replace(filter,"")
html_path = os.path.join(out_dir,"html")
pdf_path = os.path.join(out_dir,"pdf")
image_path = os.path.join(html_path,"image")
if not os.path.exist | os.makedirs(image_path)
if not os.path.exists(pdf_path):
os.makedirs(pdf_path)
html_file = os.path.join(html_path,str(no)+ "-" +title+".html")
pdf_file = os.path.join(pdf_path,str(no)+ "-" +title+".pdf")
if os.path.exists(pdf_file): # PDF文件存在则不生成对应的PDF文件,否则继续
return
if not os.path.exists(html_file):
content = self.__get_content__(link,image_path)
with open(html_file,"w") as f:
f.write(content)
f.flush()
f.close()
task_info = {"html":html_file,"pdf":pdf_file}
self.task_queue.put(task_info)
def __get_content__(self,link,image_path):
self.referer = link
session,content = self.__http_request__(method="get",url=link,flag=True)
if not content:
return
src_compile = re.compile(r'data-src=\"(.*?)\"')
src_urls = src_compile.findall(content)
if len(src_urls) < 0:
return
for img_url in src_urls:
if not (img_url.startswith("http://") or img_url.startswith("https://")):
continue
img_url_compile = re.compile("wx_fmt=(.*)?")
img = img_url_compile.findall(img_url)
suffix = ".png"
if len(img)>0:
suffix = "."+ str(img[0])
img_name = str(time.time()).replace(".","") + suffix
img_file = os.path.join(image_path,img_name)
self.__http_io_request__(url=img_url,path=img_file)
self.img_path_dict[img_url] = "./image/"+img_name
content = content.replace("data-src","src")
for key,value in self.img_path_dict.items():
content = content.replace(key,value)
return content
def __http_io_request__(self,method='get',url=None,data=None,headers=None,session=requests.session(),stream=True,path=None):
if method =='get':
resp = session.get(url=url,params=data,headers=self.__head__(headers),stream=stream)
else:
resp = session.post(url=url,data=data,headers=self.__head__(headers),stream=stream)
if resp.status_code == 200:
with open(path, 'wb+') as f:
for chunk in resp.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
f.close()
return session,True
time.sleep(1)
return session,False
def __http_request__(self,method='post',url=None,data=None,headers=None,session=requests.session(),wait=5,flag=False):
time.sleep(wait)
if method == "get":
resp = session.get(url = url, params = data, headers = self.__head__(headers))
else:
resp = session.post(url = url, data = data, headers = self.__head__(headers))
if resp.status_code != 200:
log.error("网络异常或者错误:"+str(resp.status_code))
return session,None
if flag:
content = resp.text
if not content:
return session,None
return session,content
resp_json = resp.json()
if not resp_json:
return session,None
log.debug(resp_json)
base_resp = resp_json.get("base_resp")
if base_resp:
ret = base_resp.get("ret")
err_msg = base_resp.get("err_msg")
if ret == 0:
return session,resp_json
elif err_msg == "default" or err_msg == "invalid csrf token" or err_msg=="invalid session" :
UserSql.delete_user_info(0)
self.__start_login__()
return
else:
return session,None
def __print__(self):
change = 0
current = 0
for diz in self.diz_list:
titles = WechatSql.select_list_to_diz(diz['wi_id'],self.__start_data__)
for title in titles:
if os.path.exists(os.path.join(diz["pdf"],title+".pdf")):
change = change + 1
if os.path.exists(os.path.join(diz["html"],title+".html")):
current = current + 1
print(("公众号: %s ,共计 %s 篇文章" %(diz["name"],diz["total"])))
print(("==> 本次共计获取 %s 篇文章,成功将 %s 篇文章转换为PDF文件。")%(str(current),str(change)))
print(("==> PDF文件输出目录为: %s")%(diz["pdf"]))
print(("==> HTML文件输出目录为: %s")%(diz["html"]))
def __delete_file__(self):
if os.path.exists(self.out_qrcode_path):
shutil.rmtree(self.out_qrcode_path)
def __save_cookie__(self,session,token):
cookie = ""
cookies = session.cookies.items()
for key,value in cookies:
cookie = cookie + '{0}={1};'.format(key,value)
UserSql.insert_user_info(self.user_name ,self.password,token,cookie,0)
def __load_cookies__(self):
session = requests.session()
user_info = UserSql.select_user_info(0)
if user_info:
token = user_info[0]
self.cookie = user_info[1]
names = self.name.split(",")
for name in names:
self.search_biz(session,token,name)
else:
self.__start_login__()
| s(image_path):
| conditional_block |
wechat_task.py | # -*- coding: utf-8 -*-
# Author: kelvinBen
# Github: https://github.com/kelvinBen/HistoricalArticlesToPdf
import os
import re
import math
import time
import json
import shutil
import psutil
import logging
import requests
from PIL import Image
from queue import Queue
from http import cookiejar
import urllib.parse as urlcode
import libs.sql.user_sql as UserSql
import libs.sql.wechat_sql as WechatSql
from libs.core.html2pdf import HtmlToPdfThreads
log = logging.getLogger(__name__)
class WechatTask(object):
base_url = "https://mp.weixin.qq.com/cgi-bin/"
start_login_url = base_url+"bizlogin?action=startlogin"
getqrcode_url = base_url+"scanloginqrcode?action=getqrcode&random=%s"
ask_url = base_url+"scanloginqrcode?action=ask&token=&lang=zh_CN&f=json&ajax=1"
login_url = base_url+"bizlogin?action=login"
search_biz_url = base_url+"searchbiz"
appmsg_url = base_url+"appmsg"
referer = "https://mp.weixin.qq.com/"
thread_list =[]
img_path_dict = {}
diz_list =[]
def __init__(self,user_name, password, cookie, name, website_url,threads,out_path):
self.user_name = user_name
self.password = password
self.cookie = cookie
self.name = name.replace("\"","").replace(" ","")
self.website_url = website_url
self.task_queue = Queue()
self.threads = threads
self.out_path = out_path
def start(self):
self.__start_data__ = str(time.time).replace(".","")
self.__create_dir__()
self.__load_cookies__()
self.__start_threads__()
for thread in self.thread_list:
thread.join()
self.__print__()
self.__delete_file__()
def __create_dir__(self):
self.out_qrcode_path = os.path.join(self.out_path,"qrcode")
if not os.path.exists(self.out_qrcode_path):
os.makedirs(self.out_qrcode_path)
self.wx_cookie_path = os.path.join(self.out_path,"wx.info")
def __start_threads__(self):
for thread_id in range(1,self.threads):
thread_name = "Thread - " + str(thread_id)
thread = HtmlToPdfThreads(self.task_queue,thread_id,thread_name)
thread.start()
self.thread_list.append(thread)
def __data__(self,map=None):
data = {"userlang":"zh_CN","redirect_url":"","login_type":"3","token":"","lang":"","f":"json","ajax":"1"}
if map:
for key,value in map.items():
data[key] = value
return data
def __head__(self,heads=None):
head ={
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:78.0) Gecko/20100101 Firefox/78.0",
"Referer": self.referer
}
if self.cookie:
head["Cookie"] = self.cookie
if heads:
for key,value in heads.items():
head[key] = value
return head
def __start_login__(self):
data = {"sessionid":str(time.time()).replace(".","")}
session,result = self.__http_request__(url=self.start_login_url,data=self.__data__(data),wait=1)
if result:
self.getqrcode(session)
def getqrcode(self,session):
time_str = str(time.time()).replace(".","")
new_getqrcode_url = self.getqrcode_url.replace("%s",time_str)
qrcode_path = os.path.join(self.out_qrcode_path,time_str + ".png")
self.__http_io_request__(url=new_getqrcode_url,session=session,path=qrcode_path)
log.warn("请使用微信扫描弹出的二维码图片用于登录微信公众号!")
try:
image = Image.open(qrcode_path)
image.show()
except Exception as e:
log.error(e)
raise Exception("获取二维码失败,请重试!")
self.getqrcodeStatus(session)
def getqrcodeStatus(self,session,t=6):
while True:
session,result = self.__http_request__(method='get',url=self.ask_url,wait=t)
if not result:
return
if result.get("status") == "3":
log.warn("二维码已失效,请重新使用微信进行扫码!")
self.getqrcode(session)
return
if str(result.get("status")) == "1":
self.login(session)
return
if t == 6:
t = 7
else:
t = 6
def login(self,session):
data = {"lang":"zh_CN"}
session,result = self.__http_request__(url=self.login_url,data=self.__data__(data))
if not result:
return
redirect_url = result.get("redirect_url")
if not redirect_url:
return
token_compile = re.compile(r'.*token=(.*).*')
token = token_compile.findall(redirect_url)
if len(token) < 0:
return
token = token[0]
names = self.name.split(",")
self.__save_cookie__(session,token)
for name in names:
self.search_biz(session,token,name)
# 搜索公众号
def search_biz(self,session,token,name,no=1,begin=0,count=5,total=0):
data = {
"action":"search_biz",
"begin":begin,
"count":count,
"query":name,
"token":token,
"lang":"zh_CN",
"f":"json",
"ajax":1
}
self.referer = ("https://mp.weixin.qq.com/cgi-bin/appmsg?t=media/appmsg_edit_v2&action=edit&isNew=1&type=10&createType=0&token=%s&lang=zh_CN") % (token)
session,result = self.__http_request__(method='get',url=self.search_biz_url,data=data)
if not result:
return
biz_list = result.get("list") # 公众号列表
biz_total = result.get("total") # 公众号总数量
if len(biz_list) == 0:
return
for biz in biz_list:
fakeid = biz.get("fakeid")
nickname = biz.get("nickname")
alias = biz.get("alias")
if nickname != name:
continue
wi_id = WechatSql.insert_info(fakeid,alias,nickname)
out_dir = os.path.join(self.out_path , nickname)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
begin = WechatSql.select_list_num(wi_id)
app_msg_cnt = self.list_ex(session,fakeid,token,out_dir,wi_id)
diz_dict ={}
if app_msg_cnt != 0:
diz_dict["wi_id"] = wi_id
diz_dict["name"] = name
diz_dict["total"] = app_msg_cnt
diz_dict["current"] = str(app_msg_cnt - begin)
diz_dict["html"] = os.path.join(out_dir,"html")
diz_dict["pdf"] = os.path.join(out_dir,"pdf")
self.diz_list.append(diz_dict)
return
begin = count + begin
if no <= biz_total:
self.search_biz(session,token,name,no,begin,count,biz_total)
def list_ex(self,session,fakeid,token,out_dir,wi_id,no=0,begin=0,count=5,app_msg_cnt=0):
data ={
"action":"list_ex",
"begin":str(begin),
"count":str(count),
"fakeid":str(fakeid),
"type":"9",
"query":"",
"token":str(token),
"lang":"zh_CN",
"f":"json",
"ajax":"1"
}
if begin < 0: # 防止出现负数的情况
return app_msg_cnt
if app_msg_cnt == 0: # 获取文章总数量
session,result = self.__http_request__(method='get',url=self.appmsg_url,data=data,session=session)
if not result:
return app_msg_cnt
app_msg_cnt = result.get("app_msg_cnt")
nums = str(app_msg_cnt/10).split(".")
if int(nums[1]) >= 5:
start = app_msg_cnt - int(nums[1]) + 5
else:
start = app_msg_cnt - int(nums[1])
self.list_ex(session,fakeid,token,out_dir,wi_id,begin=start, app_msg_cnt = app_msg_cnt) # 设置文章起始编号和文章总数量
return app_msg_cnt
session,result = self.__http_request__(method='get',url=self.appmsg_url,data=data,session=session)
if not result:
return app_msg_cnt
app_msg_cnt = result.get("app_msg_cnt")
app_msg_list = result.get("app_msg_list")
if len(app_msg_list) == 0:
return app_msg_cnt
for app in list(reversed(app_msg_list)):
link = app.get("link")
title = app.get("title")
digest = app.get("digest")
title_list = WechatSql.select_list_title(wi_id,begin)
if title in title_list:
continue
i_date = str(time.time).replace(".","")
WechatSql.insert_list(wi_id,no,title,link,digest,i_date)
self.__get_article_details__(no,title,link,out_dir)
no = no + 1
begin = begin - count
self.list_ex(session,fakeid,token,out_dir,wi_id,no,begin,count,app_msg_cnt)
def __get_article_details__(self,no,title,link,out_dir):
filters = {'/','\\','?','*',':','"','<','>','|',' ','?','(',')','!',',','“',"”"}
for filter in filters:
title = title.replace(filter,"")
html_path = os.path.join(out_dir,"html")
pdf_path = os.path.join(out_dir,"pdf")
image_path = os.path.join(html_path,"image")
if not os.path.exists(image_path):
os.makedirs(image_path)
if not os.path.exists(pdf_path):
os.makedirs(pdf_path)
html_file = os.path.join(html_path,str(no)+ "-" +title+".html")
pdf_file = os.path.join(pdf_path,str(no)+ "-" +title+".pdf")
if os.path.exists(pdf_file): # PDF文件存在则不生成对应的PDF文件,否则继续
return
if not os.path.exists(html_file):
content = self.__get_content__(link,image_path)
with open(html_file,"w") as f:
f.write(content)
f.flush()
f.close()
task_info = {"html":html_file,"pdf":pdf_file}
self.task_queue.put(task_info)
def __get_content__(self,link,image_path):
self.referer = link
session,content = self.__http_request__(method="get",url=link,flag=True)
if not content:
return
src_compile = re.compile(r'data-src=\"(.*?)\"')
src_urls = src_compile.findall(content)
if len(src_urls) < 0:
return
for img_url in src_urls:
if not (img_url.startswith("http://") or img_url.startswith("https://")):
continue
img_url_compile = re.compile("wx_fmt=(.*)?")
img = img_url_compile.findall(img_url)
suffix = ".png"
if len(img)>0:
suffix = "."+ str(img[0])
img_name = str(time.time()).replace(".","") + suffix
img_file = os.path.join(image_path,img_name)
self.__http_io_request__(url=img_url,path=img_file)
self.img_path_dict[img_url] = "./image/"+img_name
content = content.replace("data-src","src")
for key,value in self.img_path_dict.items():
content = content.replace(key,value)
return content
def __http_io_request__(self,method='get',url=None,data=None,headers=None,session=requests.session(),stream=True,path=None):
if method =='get':
resp = session.get(url=url,params=data,headers=self.__head__(headers),stream=stream)
else:
resp = session.post(url=url,data=data,headers=self.__head__(headers),stream=stream)
if resp.statu | f.__head__(headers))
else:
resp = session.post(url = url, data = data, headers = self.__head__(headers))
if resp.status_code != 200:
log.error("网络异常或者错误:"+str(resp.status_code))
return session,None
if flag:
content = resp.text
if not content:
return session,None
return session,content
resp_json = resp.json()
if not resp_json:
return session,None
log.debug(resp_json)
base_resp = resp_json.get("base_resp")
if base_resp:
ret = base_resp.get("ret")
err_msg = base_resp.get("err_msg")
if ret == 0:
return session,resp_json
elif err_msg == "default" or err_msg == "invalid csrf token" or err_msg=="invalid session" :
UserSql.delete_user_info(0)
self.__start_login__()
return
else:
return session,None
def __print__(self):
change = 0
current = 0
for diz in self.diz_list:
titles = WechatSql.select_list_to_diz(diz['wi_id'],self.__start_data__)
for title in titles:
if os.path.exists(os.path.join(diz["pdf"],title+".pdf")):
change = change + 1
if os.path.exists(os.path.join(diz["html"],title+".html")):
current = current + 1
print(("公众号: %s ,共计 %s 篇文章" %(diz["name"],diz["total"])))
print(("==> 本次共计获取 %s 篇文章,成功将 %s 篇文章转换为PDF文件。")%(str(current),str(change)))
print(("==> PDF文件输出目录为: %s")%(diz["pdf"]))
print(("==> HTML文件输出目录为: %s")%(diz["html"]))
def __delete_file__(self):
if os.path.exists(self.out_qrcode_path):
shutil.rmtree(self.out_qrcode_path)
def __save_cookie__(self,session,token):
cookie = ""
cookies = session.cookies.items()
for key,value in cookies:
cookie = cookie + '{0}={1};'.format(key,value)
UserSql.insert_user_info(self.user_name ,self.password,token,cookie,0)
def __load_cookies__(self):
session = requests.session()
user_info = UserSql.select_user_info(0)
if user_info:
token = user_info[0]
self.cookie = user_info[1]
names = self.name.split(",")
for name in names:
self.search_biz(session,token,name)
else:
self.__start_login__()
| s_code == 200:
with open(path, 'wb+') as f:
for chunk in resp.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
f.close()
return session,True
time.sleep(1)
return session,False
def __http_request__(self,method='post',url=None,data=None,headers=None,session=requests.session(),wait=5,flag=False):
time.sleep(wait)
if method == "get":
resp = session.get(url = url, params = data, headers = sel | identifier_body |
wechat_task.py | # -*- coding: utf-8 -*-
# Author: kelvinBen
# Github: https://github.com/kelvinBen/HistoricalArticlesToPdf
import os
import re
import math
import time
import json
import shutil
import psutil
import logging
import requests
from PIL import Image
from queue import Queue
from http import cookiejar
import urllib.parse as urlcode
import libs.sql.user_sql as UserSql
import libs.sql.wechat_sql as WechatSql
from libs.core.html2pdf import HtmlToPdfThreads
log = logging.getLogger(__name__)
class WechatTask(object):
base_url = "https://mp.weixin.qq.com/cgi-bin/"
start_login_url = base_url+"bizlogin?action=startlogin"
getqrcode_url = base_url+"scanloginqrcode?action=getqrcode&random=%s"
ask_url = base_url+"scanloginqrcode?action=ask&token=&lang=zh_CN&f=json&ajax=1"
login_url = base_url+"bizlogin?action=login"
search_biz_url = base_url+"searchbiz"
appmsg_url = base_url+"appmsg"
referer = "https://mp.weixin.qq.com/"
thread_list =[]
img_path_dict = {}
diz_list =[]
def __init__(self,user_name, password, cookie, name, website_url,threads,out_path):
self.user_name = user_name
self.password = password
self.cookie = cookie
self.name = name.replace("\"","").replace(" ","")
self.website_url = website_url
self.task_queue = Queue()
self.threads = threads
self.out_path = out_path
def start(self):
self.__start_data__ = str(time.time).replace(".","")
self.__create_dir__()
self.__load_cookies__()
self.__start_threads__()
for thread in self.thread_list:
thread.join()
self.__print__()
self.__delete_file__()
def __create_dir__(self):
self.out_qrcode_path = os.path.join(self.out_path,"qrcode")
if not os.path.exists(self.out_qrcode_path):
os.makedirs(self.out_qrcode_path)
self.wx_cookie_path = os.path.join(self.out_path,"wx.info")
def __start_threads__(self):
for thread_id in range(1,self.threads):
thread_name = "Thread - " + str(thread_id)
thread = HtmlToPdfThreads(self.task_queue,thread_id,thread_name)
thread.start()
self.thread_list.append(thread)
def __data__(self,map=None):
data = {"userlang":"zh_CN","redirect_url":"","login_type":"3","token":"","lang":"","f":"json","ajax":"1"}
if map:
for key,value in map.items():
data[key] = value
return data
def __head__(self,heads=None):
head ={
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:78.0) Gecko/20100101 Firefox/78.0",
"Referer": self.referer
}
if self.cookie:
head["Cookie"] = self.cookie
if heads:
for key,value in heads.items():
head[key] = value
return head
def __start_login__(self):
data = {"sessionid":str(time.time()).replace(".","")}
session,result = self.__http_request__(url=self.start_login_url,data=self.__data__(data),wait=1)
if result:
self.getqrcode(session)
def getqrcode(self,session):
time_str = str(time.time()).replace(".","")
new_getqrcode_url = self.getqrcode_url.replace("%s",time_str)
qrcode_path = os.path.join(self.out_qrcode_path,time_str + ".png")
self.__http_io_request__(url=new_getqrcode_url,session=session,path=qrcode_path)
log.warn("请使用微信扫描弹出的二维码图片用于登录微信公众号!")
try:
image = Image.open(qrcode_path)
image.show()
except Exception as e:
log.error(e)
raise Exception("获取二维码失败,请重试!")
self.getqrcodeStatus(session)
def getqrcodeStatus(self,session,t=6):
while True:
session,result = self.__http_request__(method='get',url=self.ask_url,wait=t)
if not result:
return
if result.get("status") == "3":
log.warn("二维码已失效,请重新使用微信进行扫码!")
self.getqrcode(session)
return
if str(result.get("status")) == "1":
self.login(session)
return
if t == 6:
t = 7
else:
t = 6
def login(self,session):
data = {"lang":"zh_CN"}
session,result = self.__http_request__(url=self.login_url,data=self.__data__(data))
if not result:
return
redirect_url = result.get("redirect_url")
if not redirect_url:
return
token_compile = re.compile(r'.*token=(.*).*')
token = token_compile.findall(redirect_url)
if len(token) < 0:
return
token = token[0]
names = self.name.split(",")
self.__save_cookie__(session,token)
for name in names:
self.search_biz(session,token,name)
# 搜索公众号
def search_biz(self,session,token,name,no=1,begin=0,count=5,total=0):
data = {
"action":"search_biz",
"begin":begin,
"count":count,
"query":name,
"token":token,
"lang":"zh_CN",
"f":"json",
"ajax":1
}
self.referer = ("https://mp.weixin.qq.com/cgi-bin/appmsg?t=media/appmsg_edit_v2&action=edit&isNew=1&type=10&createType=0&token=%s&lang=zh_CN") % (token)
session,result = self.__http_request__(method='get',url=self.search_biz_url,data=data)
if not result:
return
biz_list = result.get("list") # 公众号列表
biz_total = result.get("total") # 公众号总数量
if len(biz_list) == 0:
return
for biz in biz_list:
fakeid = biz.get("fakeid")
nickname = biz.get("nickname")
alias = biz.get("alias")
if nickname != name:
continue
wi_id = WechatSql.insert_info(fakeid,alias,nickname)
out_dir = os.path.join(self.out_path , nickname)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
begin = WechatSql.select_list_num(wi_id)
app_msg_cnt = self.list_ex(session,fakeid,token,out_dir,wi_id)
diz_dict ={}
if app_msg_cnt != 0:
diz_dict["wi_id"] = wi_id
diz_dict["name"] = name
diz_dict["total"] = app_msg_cnt
diz_dict["current"] = str(app_msg_cnt - begin)
diz_dict["html"] = os.path.join(out_dir,"html")
diz_dict["pdf"] = os.path.join(out_dir,"pdf")
self.diz_list.append(diz_dict)
return
begin = count + begin
if no <= biz_total:
self.search_biz(session,token,name,no,begin,count,biz_total)
def list_ex(self,session,fakeid,token,out_dir,wi_id,no=0,begin=0,count=5,app_msg_cnt=0):
data ={
"action":"list_ex",
"begin":str(begin),
"count":str(count),
"fakeid":str(fakeid),
"type":"9",
"query":"",
"token":str(token),
"lang":"zh_CN",
"f":"json",
"ajax":"1"
}
if begin < 0: # 防止出现负数的情况
return app_msg_cnt
if app_msg_cnt == 0: # 获取文章总数量
session,result = self.__http_request__(method='get',url=self.appmsg_url,data=data,session=session)
if not result:
return app_msg_cnt
app_msg_cnt = result.get("app_msg_cnt")
nums = str(app_msg_cnt/10).split(".")
if int(nums[1]) >= 5:
start = app_msg_cnt - int(nums[1]) + 5
else:
start = app_msg_cnt - int(nums[1])
self.list_ex(session,fakeid,token,out_dir,wi_id,begin=start, app_msg_cnt = app_msg_cnt) # 设置文章起始编号和文章总数量
return app_msg_cnt
session,result = self.__http_request__(method='get',url=self.appmsg_url,data=data,session=session)
if not result:
return app_msg_cnt
app_msg_cnt = result.get("app_msg_cnt")
app_msg_list = result.get("app_msg_list")
if len(app_msg_list) == 0:
return app_msg_cnt
for app in list(reversed(app_msg_list)):
link = app.get("link")
title = app.get("title")
digest = app.get("digest")
title_list = WechatSql.select_list_title(wi_id,begin)
if title in title_list:
continue
i_date = str(time.time).replace(".","")
WechatSql.insert_list(wi_id,no,title,link,digest,i_date)
self.__get_article_details__(no,title,link,out_dir)
no = no + 1
begin = begin - count
self.list_ex(session,fakeid,token,out_dir,wi_id,no,begin,count,app_msg_cnt)
def __get_article_details__(self,no,title,link,out_dir):
filters = {'/','\\','?','*',':','"','<','>','|',' ','?','(',')','!',',','“',"”"}
for filter in filters:
title = title.replace(filter,"")
html_path = os.path.join(out_dir,"html")
pdf_path = os.path.join(out_dir,"pdf")
image_path = os.path.join(html_path,"image")
if not os.path.exists(image_path):
os.makedirs(image_path)
if not os.path.exists(pdf_path):
os.makedirs(pdf_path)
html_file = os.path.join(html_path,str(no)+ "-" +title+".html")
pdf_file = os.path.join(pdf_path,str(no)+ "-" +title+".pdf")
if os.path.exists(pdf_file): # PDF文件存在则不生成对应的PDF文件,否则继续
return
if not os.path.exists(html_file):
content = self.__get_content__(link,image_path)
with open(html_file,"w") as f:
f.write(content)
f.flush()
f.close()
task_info = {"html":html_file,"pdf":pdf_file}
self.task_queue.put(task_info)
def __get_content__(self,link,image_path):
self.referer = link
session,content = self.__http_request__(method="get",url=link,flag=True)
if not content:
return
src_compile = re.compile(r'data-src=\"(.*?)\"')
src_urls = src_compile.findall(content)
if len(src_urls) < 0:
return
for img_url in src_urls:
if not (img_url.startswith("http://") or img_url.startswith("https://")):
continue
img_url_compile = re.compile("wx_fmt=(.*)?")
img = img_url_compile.findall(img_url)
suffix = ".png"
if len(img)>0:
suffix = "."+ str(img[0])
img_name = str(time.time()).replace(".","") + suffix
img_file = os.path.join(image_path,img_name)
self.__http_io_request__(url=img_url,path=img_file)
self.img_path_dict[img_url] = "./image/"+img_name
content = content.replace("data-src","src")
for key,value in self.img_path_dict.items():
content = content.replace(key,value)
return content
| if method =='get':
resp = session.get(url=url,params=data,headers=self.__head__(headers),stream=stream)
else:
resp = session.post(url=url,data=data,headers=self.__head__(headers),stream=stream)
if resp.status_code == 200:
with open(path, 'wb+') as f:
for chunk in resp.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
f.close()
return session,True
time.sleep(1)
return session,False
def __http_request__(self,method='post',url=None,data=None,headers=None,session=requests.session(),wait=5,flag=False):
time.sleep(wait)
if method == "get":
resp = session.get(url = url, params = data, headers = self.__head__(headers))
else:
resp = session.post(url = url, data = data, headers = self.__head__(headers))
if resp.status_code != 200:
log.error("网络异常或者错误:"+str(resp.status_code))
return session,None
if flag:
content = resp.text
if not content:
return session,None
return session,content
resp_json = resp.json()
if not resp_json:
return session,None
log.debug(resp_json)
base_resp = resp_json.get("base_resp")
if base_resp:
ret = base_resp.get("ret")
err_msg = base_resp.get("err_msg")
if ret == 0:
return session,resp_json
elif err_msg == "default" or err_msg == "invalid csrf token" or err_msg=="invalid session" :
UserSql.delete_user_info(0)
self.__start_login__()
return
else:
return session,None
def __print__(self):
change = 0
current = 0
for diz in self.diz_list:
titles = WechatSql.select_list_to_diz(diz['wi_id'],self.__start_data__)
for title in titles:
if os.path.exists(os.path.join(diz["pdf"],title+".pdf")):
change = change + 1
if os.path.exists(os.path.join(diz["html"],title+".html")):
current = current + 1
print(("公众号: %s ,共计 %s 篇文章" %(diz["name"],diz["total"])))
print(("==> 本次共计获取 %s 篇文章,成功将 %s 篇文章转换为PDF文件。")%(str(current),str(change)))
print(("==> PDF文件输出目录为: %s")%(diz["pdf"]))
print(("==> HTML文件输出目录为: %s")%(diz["html"]))
def __delete_file__(self):
if os.path.exists(self.out_qrcode_path):
shutil.rmtree(self.out_qrcode_path)
def __save_cookie__(self,session,token):
cookie = ""
cookies = session.cookies.items()
for key,value in cookies:
cookie = cookie + '{0}={1};'.format(key,value)
UserSql.insert_user_info(self.user_name ,self.password,token,cookie,0)
def __load_cookies__(self):
session = requests.session()
user_info = UserSql.select_user_info(0)
if user_info:
token = user_info[0]
self.cookie = user_info[1]
names = self.name.split(",")
for name in names:
self.search_biz(session,token,name)
else:
self.__start_login__() | def __http_io_request__(self,method='get',url=None,data=None,headers=None,session=requests.session(),stream=True,path=None): | random_line_split |
wechat_task.py | # -*- coding: utf-8 -*-
# Author: kelvinBen
# Github: https://github.com/kelvinBen/HistoricalArticlesToPdf
import os
import re
import math
import time
import json
import shutil
import psutil
import logging
import requests
from PIL import Image
from queue import Queue
from http import cookiejar
import urllib.parse as urlcode
import libs.sql.user_sql as UserSql
import libs.sql.wechat_sql as WechatSql
from libs.core.html2pdf import HtmlToPdfThreads
log = logging.getLogger(__name__)
class WechatTask(object):
base_url = "https://mp.weixin.qq.com/cgi-bin/"
start_login_url = base_url+"bizlogin?action=startlogin"
getqrcode_url = base_url+"scanloginqrcode?action=getqrcode&random=%s"
ask_url = base_url+"scanloginqrcode?action=ask&token=&lang=zh_CN&f=json&ajax=1"
login_url = base_url+"bizlogin?action=login"
search_biz_url = base_url+"searchbiz"
appmsg_url = base_url+"appmsg"
referer = "https://mp.weixin.qq.com/"
thread_list =[]
img_path_dict = {}
diz_list =[]
def __init__(self,user_name, password, cookie, name, website_url,threads,out_path):
self.user_name = user_name
self.password = password
self.cookie = cookie
self.name = name.replace("\"","").replace(" ","")
self.website_url = website_url
self.task_queue = Queue()
self.threads = threads
self.out_path = out_path
def start(self):
self.__start_data__ = str(time.time).replace(".","")
self.__create_dir__()
self.__load_cookies__()
self.__start_threads__()
for thread in self.thread_list:
thread.join()
self.__print__()
self.__delete_file__()
def __create_dir__(self):
self.out_qrcode_path = os.path.join(self.out_path,"qrcode")
if not os.path.exists(self.out_qrcode_path):
os.makedirs(self.out_qrcode_path)
self.wx_cookie_path = os.path.join(self.out_path,"wx.info")
def __start_threads__(self):
for thread_id in range(1,self.threads):
thread_name = "Thread - " + str(thread_id)
thread = HtmlToPdfThreads(self.task_queue,thread_id,thread_name)
thread.start()
self.thread_list.append(thread)
def __data__(self,map=None):
data = {"userlang":"zh_CN","redirect_url":"","login_type":"3","token":"","lang":"","f":"json","ajax":"1"}
if map:
for key,value in map.items():
data[key] = value
return data
def __head__(self,heads=None):
head ={
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:78.0) Gecko/20100101 Firefox/78.0",
"Referer": self.referer
}
if self.cookie:
head["Cookie"] = self.cookie
if heads:
for key,value in heads.items():
head[key] = value
return head
def __start_login__(self):
data = {"sessionid":str(time.time()).replace(".","")}
session,result = self.__http_request__(url=self.start_login_url,data=self.__data__(data),wait=1)
if result:
self.getqrcode(session)
def getqrcode(self,session):
time_str = str(time.time()).replace(".","")
new_getqrcode_url = self.getqrcode_url.replace("%s",time_str)
qrcode_path = os.path.join(self.out_qrcode_path,time_str + ".png")
self.__http_io_request__(url=new_getqrcode_url,session=session,path=qrcode_path)
log.warn("请使用微信扫描弹出的二维码图片用于登录微信公众号!")
try:
image = Image.open(qrcode_path)
image.show()
except Exception as e:
log.error(e)
raise Exception("获取二维码失败,请重试!")
self.getqrcodeStatus(session)
def getqrcodeStatus(self,session,t=6):
while True:
session,result = self.__http_request__(method='get',url=self.ask_url,wait=t)
if not result:
return
if result.get("status") == "3":
log.warn("二维码已失效,请重新使用微信进行扫码!")
self.getqrcode(session)
return
if str(result.get("status")) == "1":
self.login(session)
return
if t == 6:
t = 7
else:
t = 6
def login(self,session):
data = {"lang":"zh_CN"}
session,result = self.__http_request__(url=self.login_url,data=self.__data__(data))
if not result:
return
redirect_url = result.get("redirect_url")
if not redirect_url:
return
token_compile = re.compile(r'.*token=(.*).*')
token = token_compile.findall(redirect_url)
if len(token) < 0:
return
token = token[0]
names = self.name.split(",")
self.__save_cookie__(session,token)
for name in names:
self.search_biz(session,token,name)
# 搜索公众号
def search_biz(self,session,token,name,no=1,begin=0,count=5,total=0):
data = {
"action":"search_biz",
"begin":begin,
"count":count,
"query":name,
"token":token,
"lang":"zh_CN",
"f":"json",
"ajax":1
}
self.referer = ("https://mp.weixin.qq.com/cgi-bin/appmsg?t=media/appmsg_edit_v2&action=edit&isNew=1&type=10&createType=0&token=%s&lang=zh_CN") % (token)
session,result = self.__http_request__(method='get',url=self.search_biz_url,data=data)
if not result:
return
biz_list = result.get("list") # 公众号列表
biz_total = result.get("total") # 公众号总数量
if len(biz_list) == 0:
return
for biz in biz_list:
fakeid = biz.get("fakeid")
nickname = biz.get("nickname")
alias = biz.get("alias")
if nickname != name:
continue
wi_id = WechatSql.insert_info(fakeid,alias,nickname)
out_dir = os.path.join(self.out_path , nickname)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
begin = WechatSql.select_list_num(wi_id)
app_msg_cnt = self.list_ex(session,fakeid,token,out_dir,wi_id)
diz_dict ={}
if app_msg_cnt != 0:
diz_dict["wi_id"] = wi_id
diz_dict["name"] = name
diz_dict["total"] = app_msg_cnt
diz_dict["current"] = str(app_msg_cnt - begin)
diz_dict["html"] = os.path.join(out_dir,"html")
diz_dict["pdf"] = os.path.join(out_dir,"pdf")
self.diz_list.append(diz_dict)
return
begin = count + begin
if no <= biz_total:
self.search_biz(session,token,name,no,begin,count,biz_total)
def list_ex(self,session,fakeid,token,out_dir,wi_id,no=0,begin=0,count=5,app_msg_cnt=0):
data ={
"action":"list_ex",
"begin":str(begin),
"count":str(count),
"fakeid":str(fakeid),
"type":"9",
"query":"",
"token":str(token),
"lang":"zh_CN",
"f":"json",
"ajax":"1"
}
if begin < 0: # 防止出现负数的情况
return app_msg_cnt
if app_msg_cnt == 0: # 获取文章总数量
session,result = self.__http_request__(method='get',url=self.appmsg_url,data=data,session=session)
if not result:
return app_msg_cnt
app_msg_cnt = result.get("app_msg_cnt")
nums = str(app_msg_cnt/10).split(".")
if int(nums[1]) >= 5:
start = app_msg_cnt - int(nums[1]) + 5
else:
start = app_msg_cnt - int(nums[1])
self.list_ex(session,fakeid,token,out_dir,wi_id,begin=start, app_msg_cnt = app_msg_cnt) # 设置文章起始编号和文章总数量
return app_msg_cnt
session,result = self.__http_request__(method='get',url=self.appmsg_url,data=data,session=session)
if not result:
return app_msg_cnt
app_msg_cnt = result.get("app_msg_cnt")
app_msg_list = result.get("app_msg_list")
if len(app_msg_list) == 0:
return app_msg_cnt
for app in list(reversed(app_msg_list)):
link = app.get("link")
title = app.get("title")
digest = app.get("digest")
title_list = WechatSql.select_list_title(wi_id,begin)
if title in title_list:
continue
i_date = str(time.time).replace(".","")
WechatSql.insert_list(wi_id,no,title,link,digest,i_date)
self.__get_article_details__(no,title,link,out_dir)
no = no + 1
begin = begin - count
self.list_ex(session,fakeid,token,out_dir,wi_id,no,begin,count,app_msg_cnt)
def __get_article_details__(self,no,title,link,out_dir):
filters = {'/','\\','?','*',':','"','<','>','|',' ','?','(',')','!',',','“',"”"}
for filter in filters:
title = title | html_path = os.path.join(out_dir,"html")
pdf_path = os.path.join(out_dir,"pdf")
image_path = os.path.join(html_path,"image")
if not os.path.exists(image_path):
os.makedirs(image_path)
if not os.path.exists(pdf_path):
os.makedirs(pdf_path)
html_file = os.path.join(html_path,str(no)+ "-" +title+".html")
pdf_file = os.path.join(pdf_path,str(no)+ "-" +title+".pdf")
if os.path.exists(pdf_file): # PDF文件存在则不生成对应的PDF文件,否则继续
return
if not os.path.exists(html_file):
content = self.__get_content__(link,image_path)
with open(html_file,"w") as f:
f.write(content)
f.flush()
f.close()
task_info = {"html":html_file,"pdf":pdf_file}
self.task_queue.put(task_info)
def __get_content__(self,link,image_path):
self.referer = link
session,content = self.__http_request__(method="get",url=link,flag=True)
if not content:
return
src_compile = re.compile(r'data-src=\"(.*?)\"')
src_urls = src_compile.findall(content)
if len(src_urls) < 0:
return
for img_url in src_urls:
if not (img_url.startswith("http://") or img_url.startswith("https://")):
continue
img_url_compile = re.compile("wx_fmt=(.*)?")
img = img_url_compile.findall(img_url)
suffix = ".png"
if len(img)>0:
suffix = "."+ str(img[0])
img_name = str(time.time()).replace(".","") + suffix
img_file = os.path.join(image_path,img_name)
self.__http_io_request__(url=img_url,path=img_file)
self.img_path_dict[img_url] = "./image/"+img_name
content = content.replace("data-src","src")
for key,value in self.img_path_dict.items():
content = content.replace(key,value)
return content
def __http_io_request__(self,method='get',url=None,data=None,headers=None,session=requests.session(),stream=True,path=None):
if method =='get':
resp = session.get(url=url,params=data,headers=self.__head__(headers),stream=stream)
else:
resp = session.post(url=url,data=data,headers=self.__head__(headers),stream=stream)
if resp.status_code == 200:
with open(path, 'wb+') as f:
for chunk in resp.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
f.close()
return session,True
time.sleep(1)
return session,False
def __http_request__(self,method='post',url=None,data=None,headers=None,session=requests.session(),wait=5,flag=False):
time.sleep(wait)
if method == "get":
resp = session.get(url = url, params = data, headers = self.__head__(headers))
else:
resp = session.post(url = url, data = data, headers = self.__head__(headers))
if resp.status_code != 200:
log.error("网络异常或者错误:"+str(resp.status_code))
return session,None
if flag:
content = resp.text
if not content:
return session,None
return session,content
resp_json = resp.json()
if not resp_json:
return session,None
log.debug(resp_json)
base_resp = resp_json.get("base_resp")
if base_resp:
ret = base_resp.get("ret")
err_msg = base_resp.get("err_msg")
if ret == 0:
return session,resp_json
elif err_msg == "default" or err_msg == "invalid csrf token" or err_msg=="invalid session" :
UserSql.delete_user_info(0)
self.__start_login__()
return
else:
return session,None
def __print__(self):
change = 0
current = 0
for diz in self.diz_list:
titles = WechatSql.select_list_to_diz(diz['wi_id'],self.__start_data__)
for title in titles:
if os.path.exists(os.path.join(diz["pdf"],title+".pdf")):
change = change + 1
if os.path.exists(os.path.join(diz["html"],title+".html")):
current = current + 1
print(("公众号: %s ,共计 %s 篇文章" %(diz["name"],diz["total"])))
print(("==> 本次共计获取 %s 篇文章,成功将 %s 篇文章转换为PDF文件。")%(str(current),str(change)))
print(("==> PDF文件输出目录为: %s")%(diz["pdf"]))
print(("==> HTML文件输出目录为: %s")%(diz["html"]))
def __delete_file__(self):
if os.path.exists(self.out_qrcode_path):
shutil.rmtree(self.out_qrcode_path)
def __save_cookie__(self,session,token):
cookie = ""
cookies = session.cookies.items()
for key,value in cookies:
cookie = cookie + '{0}={1};'.format(key,value)
UserSql.insert_user_info(self.user_name ,self.password,token,cookie,0)
def __load_cookies__(self):
session = requests.session()
user_info = UserSql.select_user_info(0)
if user_info:
token = user_info[0]
self.cookie = user_info[1]
names = self.name.split(",")
for name in names:
self.search_biz(session,token,name)
else:
self.__start_login__()
| .replace(filter,"")
| identifier_name |
write.rs | use std::{
cmp,
convert::TryInto,
io::{self, Write},
};
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
use cgmath::prelude::*;
use crate::{
prelude::*,
io::{PropKind, Error, ErrorKind},
};
use super::{Encoding, RawTriangle};
// ----------------------------------------------------------------------------
/// The solid name used when the user didn't specify one.
const DEFAULT_SOLID_NAME: &str = "mesh";
// ===============================================================================================
// ===== STL Config
// ===============================================================================================
/// Used to configure and create a [`Writer`].
///
/// This is used to configure basic settings for the file to be written. You
/// can use the [`Config::into_writer`] method to create a [`Writer`] that can
/// be used as streaming sink.
#[derive(Clone, Debug)]
pub struct Config {
solid_name: String,
encoding: Encoding,
}
impl Config {
/// Creates a new builder instance from the given encoding. For
/// convenience, you can use [`Config::binary()`] or [`Config::ascii()`]
/// directly.
pub fn new(encoding: Encoding) -> Self {
Self {
solid_name: DEFAULT_SOLID_NAME.into(),
encoding,
}
}
/// Creates a new builder instance for a binary STL file.
pub fn | () -> Self {
Self::new(Encoding::Binary)
}
/// Creates a new builder instance for an ASCII STL file.
///
/// **Note**: please don't use this. STL ASCII files are even more space
/// inefficient than binary STL files. If you can avoid it, never use ASCII
/// STL. In fact, consider not using STL at all.
pub fn ascii() -> Self {
Self::new(Encoding::Ascii)
}
/// Sets the solid name for this file.
///
/// The given name must be an ASCII string (otherwise the function panics).
/// If a binary file is written, only 76 bytes of the string are written to
/// the file.
pub fn with_solid_name(self, name: impl Into<String>) -> Self {
let solid_name = name.into();
assert!(solid_name.is_ascii());
Self {
solid_name,
.. self
}
}
/// Creates a [`Writer`] with `self` as config.
pub fn into_writer<W: io::Write>(self, writer: W) -> Writer<W> {
Writer::new(self, writer)
}
}
// ===============================================================================================
// ===== STL Writer
// ===============================================================================================
/// A writer able to write binary and ASCII STL files. Implements
/// [`StreamSink`].
#[derive(Debug)]
pub struct Writer<W: io::Write> {
config: Config,
writer: W,
}
impl<W: io::Write> Writer<W> {
/// Creates a new STL writer with the given STL config which will write to
/// the given `io::Write` instance.
pub fn new(config: Config, writer: W) -> Self {
Self { config, writer }
}
/// Low level function to write STL files.
///
/// You usually don't need to use this function directly and instead use a
/// high level interface. This function is still exposed to give you more
/// or less complete control.
pub fn write_raw(
self,
num_triangles: u32,
triangles: impl IntoIterator<Item = Result<RawTriangle, Error>>,
) -> Result<(), Error> {
if self.config.encoding == Encoding::Ascii {
self.write_raw_ascii(triangles)
} else {
self.write_raw_binary(num_triangles, triangles)
}
}
#[inline(never)]
pub fn write_raw_binary(
self,
num_triangles: u32,
triangles: impl IntoIterator<Item = Result<RawTriangle, Error>>,
) -> Result<(), Error> {
let config = self.config;
let mut w = self.writer;
// First, a 80 bytes useless header that must not begin with "solid".
// We try to fit the solid name in it.
let name_len = cmp::min(config.solid_name.len(), 76);
let signature = format!("LOX {}", &config.solid_name[..name_len]);
let padding = vec![b' '; 80 - signature.len()];
w.write_all(signature.as_bytes())?;
w.write_all(&padding)?;
// Next, number of triangles
w.write_u32::<LittleEndian>(num_triangles)?;
const TRI_SIZE: usize = 4 * 3 * 4 + 2;
let mut buf = [0; TRI_SIZE];
for triangle in triangles {
let triangle = triangle?;
// Write face normal
LittleEndian::write_f32(&mut buf[00..04], triangle.normal[0]);
LittleEndian::write_f32(&mut buf[04..08], triangle.normal[1]);
LittleEndian::write_f32(&mut buf[08..12], triangle.normal[2]);
LittleEndian::write_f32(&mut buf[12..16], triangle.vertices[0][0]);
LittleEndian::write_f32(&mut buf[16..20], triangle.vertices[0][1]);
LittleEndian::write_f32(&mut buf[20..24], triangle.vertices[0][2]);
LittleEndian::write_f32(&mut buf[24..28], triangle.vertices[1][0]);
LittleEndian::write_f32(&mut buf[28..32], triangle.vertices[1][1]);
LittleEndian::write_f32(&mut buf[32..36], triangle.vertices[1][2]);
LittleEndian::write_f32(&mut buf[36..40], triangle.vertices[2][0]);
LittleEndian::write_f32(&mut buf[40..44], triangle.vertices[2][1]);
LittleEndian::write_f32(&mut buf[44..48], triangle.vertices[2][2]);
LittleEndian::write_u16(&mut buf[48..50], triangle.attribute_byte_count);
w.write_all(&buf)?;
}
Ok(())
}
#[inline(never)]
pub fn write_raw_ascii(
self,
triangles: impl IntoIterator<Item = Result<RawTriangle, Error>>,
) -> Result<(), Error> {
let config = self.config;
let mut w = self.writer;
writeln!(w, "solid {}", config.solid_name)?;
for triangle in triangles {
let triangle = triangle?;
// Write face normal
write!(w, " facet normal ")?;
write_ascii_vector(&mut w, triangle.normal)?;
writeln!(w, "")?;
// Write all vertex positions
writeln!(w, " outer loop")?;
for &vertex_pos in &triangle.vertices {
write!(w, " vertex ")?;
write_ascii_vector(&mut w, vertex_pos)?;
writeln!(w, "")?;
}
writeln!(w, " endloop")?;
writeln!(w, " endfacet")?;
}
writeln!(w, "endsolid {}", config.solid_name)?;
Ok(())
}
}
impl<W: io::Write> StreamSink for Writer<W> {
#[inline(never)]
fn transfer_from<S: MemSource>(self, src: &S) -> Result<(), Error> {
// Make sure we have positions
if src.vertex_position_type().is_none() {
return Err(Error::new(|| ErrorKind::DataIncomplete {
prop: PropKind::VertexPosition,
msg: "source does not provide vertex positions, but STL requires them".into(),
}));
}
let mesh = src.core_mesh();
let has_normals = src.face_normal_type().is_some();
// The triangle iterator
let triangles = mesh.face_handles().map(|fh| {
let mut it = mesh.vertices_around_face(fh);
let va = it.next().expect("bug: less than 3 vertices around face");
let vb = it.next().expect("bug: less than 3 vertices around face");
let vc = it.next().expect("bug: less than 3 vertices around face");
// Make sure this is a triangle face. Note: we do not check
// `mesh.is_tri_mesh()` in the beginning, as we also want to be
// able to serialize triangle meshes whose type does not implement
// `TriMesh`. We only want to error if there is actually a non-tri
// face.
if it.next().is_some() {
return Err(Error::new(|| ErrorKind::StreamSinkDoesNotSupportPolygonFaces));
}
// Get positions from map and convert them to array
let get_v = |vh| -> Result<[f32; 3], Error> {
src.vertex_position::<f32>(vh)
.and_then(|opt| {
opt.ok_or_else(|| Error::new(|| ErrorKind::DataIncomplete {
prop: PropKind::VertexPosition,
msg: format!("no position for {:?} while writing STL", vh),
}))
})
.map(|p| p.convert()) // to array form
};
let vertices = [get_v(va)?, get_v(vb)?, get_v(vc)?];
let normal = if has_normals {
src.face_normal::<f32>(fh)?
.ok_or_else(|| Error::new(|| ErrorKind::DataIncomplete {
prop: PropKind::FaceNormal,
msg: format!("no normal for {:?} while writing STL", fh),
}))?
.convert() // to array form
} else {
calc_normal(&vertices)
};
Ok(RawTriangle {
vertices,
normal,
// As Wikipedia beautifully put it: "this should be zero
// because most software does not understand anything else."
// Great. Some people abuse this to store color or other
// information. This is terrible, we won't do that.
attribute_byte_count: 0,
})
});
let face_count = mesh.num_faces().try_into().map_err(|_| {
Error::new(|| ErrorKind::SinkIncompatible(
"STL only supports 2^32 triangles, but mesh contains more faces".into()
))
})?;
self.write_raw(face_count, triangles)
}
}
// ===============================================================================================
// ===== Helper functions
// ===============================================================================================
/// Calculates the normal of the face defined be the three vertices in CCW.
fn calc_normal(positions: &[[f32; 3]; 3]) -> [f32; 3] {
let pos_a = positions[0].to_point3();
let pos_b = positions[1].to_point3();
let pos_c = positions[2].to_point3();
let normal = (pos_b - pos_a).cross(pos_c - pos_a).normalize();
[normal.x, normal.y, normal.z]
}
// ===============================================================================================
// ===== Functions for body writing
// ===============================================================================================
/// Writes the three values of the given vector (in STL ASCII encoding,
/// separated
/// by ' ') into the writer.
fn write_ascii_vector(w: &mut impl Write, [x, y, z]: [f32; 3]) -> Result<(), io::Error> {
write_ascii_f32(w, x)?;
write!(w, " ")?;
write_ascii_f32(w, y)?;
write!(w, " ")?;
write_ascii_f32(w, z)?;
Ok(())
}
/// Writes the given `f32` in STL ASCII format into the given writer.
///
/// The STL specification is terribly underspecified. The only information
/// about how to encode floats in ASCII is this:
///
/// > The numerical data in the facet normal and vertex lines are single
/// > precision floats, for example, 1.23456E+789. A facet normal coordinate
/// > may have a leading minus sign; a vertex coordinate may not.
///
/// I don't think the last sentence makes any sense: why forbid negative
/// coordinates? In any case, no one in the real world cares about that: there
/// are plenty of STL files out there with negative vertex coordinates.
///
/// About the actual format: clearly unhelpful. In real world, STL files floats
/// are encoded all over the place. I've seen `1`, `1.2`, `10.2`, `1.02e1`,
/// `1.020000E+001` and more. We just stick to the exact format mentioned in
/// the "specification". This does not necessarily make any sense and wastes
/// memory, but so does ASCII STL. Just don't use the ASCII STL format!
fn write_ascii_f32(w: &mut impl Write, v: f32) -> Result<(), io::Error> {
use std::num::FpCategory;
match v.classify() {
FpCategory::Normal | FpCategory::Subnormal => {
let exponent = v.abs().log10().floor();
let mantissa = v / 10f32.powf(exponent);
write!(w, "{}E{:+}", mantissa, exponent)
}
_ => {
// `v` is either infinite, `NaN` or zero. We want to serialize
// the zeroes as `0.0`.
write!(w, "{:.1}", v)
}
}
}
| binary | identifier_name |
write.rs | use std::{
cmp,
convert::TryInto,
io::{self, Write},
};
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
use cgmath::prelude::*;
use crate::{
prelude::*,
io::{PropKind, Error, ErrorKind},
};
use super::{Encoding, RawTriangle};
// ----------------------------------------------------------------------------
/// The solid name used when the user didn't specify one.
const DEFAULT_SOLID_NAME: &str = "mesh";
// ===============================================================================================
// ===== STL Config
// ===============================================================================================
/// Used to configure and create a [`Writer`].
///
/// This is used to configure basic settings for the file to be written. You
/// can use the [`Config::into_writer`] method to create a [`Writer`] that can
/// be used as streaming sink.
#[derive(Clone, Debug)]
pub struct Config {
solid_name: String,
encoding: Encoding,
}
impl Config {
/// Creates a new builder instance from the given encoding. For
/// convenience, you can use [`Config::binary()`] or [`Config::ascii()`]
/// directly.
pub fn new(encoding: Encoding) -> Self {
Self {
solid_name: DEFAULT_SOLID_NAME.into(),
encoding,
}
}
/// Creates a new builder instance for a binary STL file.
pub fn binary() -> Self {
Self::new(Encoding::Binary)
}
/// Creates a new builder instance for an ASCII STL file.
///
/// **Note**: please don't use this. STL ASCII files are even more space
/// inefficient than binary STL files. If you can avoid it, never use ASCII
/// STL. In fact, consider not using STL at all.
pub fn ascii() -> Self {
Self::new(Encoding::Ascii)
}
/// Sets the solid name for this file.
///
/// The given name must be an ASCII string (otherwise the function panics).
/// If a binary file is written, only 76 bytes of the string are written to
/// the file.
pub fn with_solid_name(self, name: impl Into<String>) -> Self {
let solid_name = name.into();
assert!(solid_name.is_ascii());
Self {
solid_name,
.. self
}
}
/// Creates a [`Writer`] with `self` as config.
pub fn into_writer<W: io::Write>(self, writer: W) -> Writer<W> {
Writer::new(self, writer)
}
}
// ===============================================================================================
// ===== STL Writer
// ===============================================================================================
/// A writer able to write binary and ASCII STL files. Implements
/// [`StreamSink`].
#[derive(Debug)]
pub struct Writer<W: io::Write> {
config: Config,
writer: W,
}
impl<W: io::Write> Writer<W> {
/// Creates a new STL writer with the given STL config which will write to
/// the given `io::Write` instance.
pub fn new(config: Config, writer: W) -> Self {
Self { config, writer }
}
/// Low level function to write STL files.
///
/// You usually don't need to use this function directly and instead use a
/// high level interface. This function is still exposed to give you more
/// or less complete control.
pub fn write_raw(
self,
num_triangles: u32,
triangles: impl IntoIterator<Item = Result<RawTriangle, Error>>,
) -> Result<(), Error> |
#[inline(never)]
pub fn write_raw_binary(
self,
num_triangles: u32,
triangles: impl IntoIterator<Item = Result<RawTriangle, Error>>,
) -> Result<(), Error> {
let config = self.config;
let mut w = self.writer;
// First, a 80 bytes useless header that must not begin with "solid".
// We try to fit the solid name in it.
let name_len = cmp::min(config.solid_name.len(), 76);
let signature = format!("LOX {}", &config.solid_name[..name_len]);
let padding = vec![b' '; 80 - signature.len()];
w.write_all(signature.as_bytes())?;
w.write_all(&padding)?;
// Next, number of triangles
w.write_u32::<LittleEndian>(num_triangles)?;
const TRI_SIZE: usize = 4 * 3 * 4 + 2;
let mut buf = [0; TRI_SIZE];
for triangle in triangles {
let triangle = triangle?;
// Write face normal
LittleEndian::write_f32(&mut buf[00..04], triangle.normal[0]);
LittleEndian::write_f32(&mut buf[04..08], triangle.normal[1]);
LittleEndian::write_f32(&mut buf[08..12], triangle.normal[2]);
LittleEndian::write_f32(&mut buf[12..16], triangle.vertices[0][0]);
LittleEndian::write_f32(&mut buf[16..20], triangle.vertices[0][1]);
LittleEndian::write_f32(&mut buf[20..24], triangle.vertices[0][2]);
LittleEndian::write_f32(&mut buf[24..28], triangle.vertices[1][0]);
LittleEndian::write_f32(&mut buf[28..32], triangle.vertices[1][1]);
LittleEndian::write_f32(&mut buf[32..36], triangle.vertices[1][2]);
LittleEndian::write_f32(&mut buf[36..40], triangle.vertices[2][0]);
LittleEndian::write_f32(&mut buf[40..44], triangle.vertices[2][1]);
LittleEndian::write_f32(&mut buf[44..48], triangle.vertices[2][2]);
LittleEndian::write_u16(&mut buf[48..50], triangle.attribute_byte_count);
w.write_all(&buf)?;
}
Ok(())
}
#[inline(never)]
pub fn write_raw_ascii(
self,
triangles: impl IntoIterator<Item = Result<RawTriangle, Error>>,
) -> Result<(), Error> {
let config = self.config;
let mut w = self.writer;
writeln!(w, "solid {}", config.solid_name)?;
for triangle in triangles {
let triangle = triangle?;
// Write face normal
write!(w, " facet normal ")?;
write_ascii_vector(&mut w, triangle.normal)?;
writeln!(w, "")?;
// Write all vertex positions
writeln!(w, " outer loop")?;
for &vertex_pos in &triangle.vertices {
write!(w, " vertex ")?;
write_ascii_vector(&mut w, vertex_pos)?;
writeln!(w, "")?;
}
writeln!(w, " endloop")?;
writeln!(w, " endfacet")?;
}
writeln!(w, "endsolid {}", config.solid_name)?;
Ok(())
}
}
impl<W: io::Write> StreamSink for Writer<W> {
#[inline(never)]
fn transfer_from<S: MemSource>(self, src: &S) -> Result<(), Error> {
// Make sure we have positions
if src.vertex_position_type().is_none() {
return Err(Error::new(|| ErrorKind::DataIncomplete {
prop: PropKind::VertexPosition,
msg: "source does not provide vertex positions, but STL requires them".into(),
}));
}
let mesh = src.core_mesh();
let has_normals = src.face_normal_type().is_some();
// The triangle iterator
let triangles = mesh.face_handles().map(|fh| {
let mut it = mesh.vertices_around_face(fh);
let va = it.next().expect("bug: less than 3 vertices around face");
let vb = it.next().expect("bug: less than 3 vertices around face");
let vc = it.next().expect("bug: less than 3 vertices around face");
// Make sure this is a triangle face. Note: we do not check
// `mesh.is_tri_mesh()` in the beginning, as we also want to be
// able to serialize triangle meshes whose type does not implement
// `TriMesh`. We only want to error if there is actually a non-tri
// face.
if it.next().is_some() {
return Err(Error::new(|| ErrorKind::StreamSinkDoesNotSupportPolygonFaces));
}
// Get positions from map and convert them to array
let get_v = |vh| -> Result<[f32; 3], Error> {
src.vertex_position::<f32>(vh)
.and_then(|opt| {
opt.ok_or_else(|| Error::new(|| ErrorKind::DataIncomplete {
prop: PropKind::VertexPosition,
msg: format!("no position for {:?} while writing STL", vh),
}))
})
.map(|p| p.convert()) // to array form
};
let vertices = [get_v(va)?, get_v(vb)?, get_v(vc)?];
let normal = if has_normals {
src.face_normal::<f32>(fh)?
.ok_or_else(|| Error::new(|| ErrorKind::DataIncomplete {
prop: PropKind::FaceNormal,
msg: format!("no normal for {:?} while writing STL", fh),
}))?
.convert() // to array form
} else {
calc_normal(&vertices)
};
Ok(RawTriangle {
vertices,
normal,
// As Wikipedia beautifully put it: "this should be zero
// because most software does not understand anything else."
// Great. Some people abuse this to store color or other
// information. This is terrible, we won't do that.
attribute_byte_count: 0,
})
});
let face_count = mesh.num_faces().try_into().map_err(|_| {
Error::new(|| ErrorKind::SinkIncompatible(
"STL only supports 2^32 triangles, but mesh contains more faces".into()
))
})?;
self.write_raw(face_count, triangles)
}
}
// ===============================================================================================
// ===== Helper functions
// ===============================================================================================
/// Calculates the normal of the face defined be the three vertices in CCW.
fn calc_normal(positions: &[[f32; 3]; 3]) -> [f32; 3] {
let pos_a = positions[0].to_point3();
let pos_b = positions[1].to_point3();
let pos_c = positions[2].to_point3();
let normal = (pos_b - pos_a).cross(pos_c - pos_a).normalize();
[normal.x, normal.y, normal.z]
}
// ===============================================================================================
// ===== Functions for body writing
// ===============================================================================================
/// Writes the three values of the given vector (in STL ASCII encoding,
/// separated
/// by ' ') into the writer.
fn write_ascii_vector(w: &mut impl Write, [x, y, z]: [f32; 3]) -> Result<(), io::Error> {
write_ascii_f32(w, x)?;
write!(w, " ")?;
write_ascii_f32(w, y)?;
write!(w, " ")?;
write_ascii_f32(w, z)?;
Ok(())
}
/// Writes the given `f32` in STL ASCII format into the given writer.
///
/// The STL specification is terribly underspecified. The only information
/// about how to encode floats in ASCII is this:
///
/// > The numerical data in the facet normal and vertex lines are single
/// > precision floats, for example, 1.23456E+789. A facet normal coordinate
/// > may have a leading minus sign; a vertex coordinate may not.
///
/// I don't think the last sentence makes any sense: why forbid negative
/// coordinates? In any case, no one in the real world cares about that: there
/// are plenty of STL files out there with negative vertex coordinates.
///
/// About the actual format: clearly unhelpful. In real world, STL files floats
/// are encoded all over the place. I've seen `1`, `1.2`, `10.2`, `1.02e1`,
/// `1.020000E+001` and more. We just stick to the exact format mentioned in
/// the "specification". This does not necessarily make any sense and wastes
/// memory, but so does ASCII STL. Just don't use the ASCII STL format!
fn write_ascii_f32(w: &mut impl Write, v: f32) -> Result<(), io::Error> {
use std::num::FpCategory;
match v.classify() {
FpCategory::Normal | FpCategory::Subnormal => {
let exponent = v.abs().log10().floor();
let mantissa = v / 10f32.powf(exponent);
write!(w, "{}E{:+}", mantissa, exponent)
}
_ => {
// `v` is either infinite, `NaN` or zero. We want to serialize
// the zeroes as `0.0`.
write!(w, "{:.1}", v)
}
}
}
| {
if self.config.encoding == Encoding::Ascii {
self.write_raw_ascii(triangles)
} else {
self.write_raw_binary(num_triangles, triangles)
}
} | identifier_body |
write.rs | use std::{
cmp,
convert::TryInto,
io::{self, Write},
}; | use cgmath::prelude::*;
use crate::{
prelude::*,
io::{PropKind, Error, ErrorKind},
};
use super::{Encoding, RawTriangle};
// ----------------------------------------------------------------------------
/// The solid name used when the user didn't specify one.
const DEFAULT_SOLID_NAME: &str = "mesh";
// ===============================================================================================
// ===== STL Config
// ===============================================================================================
/// Used to configure and create a [`Writer`].
///
/// This is used to configure basic settings for the file to be written. You
/// can use the [`Config::into_writer`] method to create a [`Writer`] that can
/// be used as streaming sink.
#[derive(Clone, Debug)]
pub struct Config {
solid_name: String,
encoding: Encoding,
}
impl Config {
/// Creates a new builder instance from the given encoding. For
/// convenience, you can use [`Config::binary()`] or [`Config::ascii()`]
/// directly.
pub fn new(encoding: Encoding) -> Self {
Self {
solid_name: DEFAULT_SOLID_NAME.into(),
encoding,
}
}
/// Creates a new builder instance for a binary STL file.
pub fn binary() -> Self {
Self::new(Encoding::Binary)
}
/// Creates a new builder instance for an ASCII STL file.
///
/// **Note**: please don't use this. STL ASCII files are even more space
/// inefficient than binary STL files. If you can avoid it, never use ASCII
/// STL. In fact, consider not using STL at all.
pub fn ascii() -> Self {
Self::new(Encoding::Ascii)
}
/// Sets the solid name for this file.
///
/// The given name must be an ASCII string (otherwise the function panics).
/// If a binary file is written, only 76 bytes of the string are written to
/// the file.
pub fn with_solid_name(self, name: impl Into<String>) -> Self {
let solid_name = name.into();
assert!(solid_name.is_ascii());
Self {
solid_name,
.. self
}
}
/// Creates a [`Writer`] with `self` as config.
pub fn into_writer<W: io::Write>(self, writer: W) -> Writer<W> {
Writer::new(self, writer)
}
}
// ===============================================================================================
// ===== STL Writer
// ===============================================================================================
/// A writer able to write binary and ASCII STL files. Implements
/// [`StreamSink`].
#[derive(Debug)]
pub struct Writer<W: io::Write> {
config: Config,
writer: W,
}
impl<W: io::Write> Writer<W> {
/// Creates a new STL writer with the given STL config which will write to
/// the given `io::Write` instance.
pub fn new(config: Config, writer: W) -> Self {
Self { config, writer }
}
/// Low level function to write STL files.
///
/// You usually don't need to use this function directly and instead use a
/// high level interface. This function is still exposed to give you more
/// or less complete control.
pub fn write_raw(
self,
num_triangles: u32,
triangles: impl IntoIterator<Item = Result<RawTriangle, Error>>,
) -> Result<(), Error> {
if self.config.encoding == Encoding::Ascii {
self.write_raw_ascii(triangles)
} else {
self.write_raw_binary(num_triangles, triangles)
}
}
#[inline(never)]
pub fn write_raw_binary(
self,
num_triangles: u32,
triangles: impl IntoIterator<Item = Result<RawTriangle, Error>>,
) -> Result<(), Error> {
let config = self.config;
let mut w = self.writer;
// First, a 80 bytes useless header that must not begin with "solid".
// We try to fit the solid name in it.
let name_len = cmp::min(config.solid_name.len(), 76);
let signature = format!("LOX {}", &config.solid_name[..name_len]);
let padding = vec![b' '; 80 - signature.len()];
w.write_all(signature.as_bytes())?;
w.write_all(&padding)?;
// Next, number of triangles
w.write_u32::<LittleEndian>(num_triangles)?;
const TRI_SIZE: usize = 4 * 3 * 4 + 2;
let mut buf = [0; TRI_SIZE];
for triangle in triangles {
let triangle = triangle?;
// Write face normal
LittleEndian::write_f32(&mut buf[00..04], triangle.normal[0]);
LittleEndian::write_f32(&mut buf[04..08], triangle.normal[1]);
LittleEndian::write_f32(&mut buf[08..12], triangle.normal[2]);
LittleEndian::write_f32(&mut buf[12..16], triangle.vertices[0][0]);
LittleEndian::write_f32(&mut buf[16..20], triangle.vertices[0][1]);
LittleEndian::write_f32(&mut buf[20..24], triangle.vertices[0][2]);
LittleEndian::write_f32(&mut buf[24..28], triangle.vertices[1][0]);
LittleEndian::write_f32(&mut buf[28..32], triangle.vertices[1][1]);
LittleEndian::write_f32(&mut buf[32..36], triangle.vertices[1][2]);
LittleEndian::write_f32(&mut buf[36..40], triangle.vertices[2][0]);
LittleEndian::write_f32(&mut buf[40..44], triangle.vertices[2][1]);
LittleEndian::write_f32(&mut buf[44..48], triangle.vertices[2][2]);
LittleEndian::write_u16(&mut buf[48..50], triangle.attribute_byte_count);
w.write_all(&buf)?;
}
Ok(())
}
#[inline(never)]
pub fn write_raw_ascii(
self,
triangles: impl IntoIterator<Item = Result<RawTriangle, Error>>,
) -> Result<(), Error> {
let config = self.config;
let mut w = self.writer;
writeln!(w, "solid {}", config.solid_name)?;
for triangle in triangles {
let triangle = triangle?;
// Write face normal
write!(w, " facet normal ")?;
write_ascii_vector(&mut w, triangle.normal)?;
writeln!(w, "")?;
// Write all vertex positions
writeln!(w, " outer loop")?;
for &vertex_pos in &triangle.vertices {
write!(w, " vertex ")?;
write_ascii_vector(&mut w, vertex_pos)?;
writeln!(w, "")?;
}
writeln!(w, " endloop")?;
writeln!(w, " endfacet")?;
}
writeln!(w, "endsolid {}", config.solid_name)?;
Ok(())
}
}
impl<W: io::Write> StreamSink for Writer<W> {
#[inline(never)]
fn transfer_from<S: MemSource>(self, src: &S) -> Result<(), Error> {
// Make sure we have positions
if src.vertex_position_type().is_none() {
return Err(Error::new(|| ErrorKind::DataIncomplete {
prop: PropKind::VertexPosition,
msg: "source does not provide vertex positions, but STL requires them".into(),
}));
}
let mesh = src.core_mesh();
let has_normals = src.face_normal_type().is_some();
// The triangle iterator
let triangles = mesh.face_handles().map(|fh| {
let mut it = mesh.vertices_around_face(fh);
let va = it.next().expect("bug: less than 3 vertices around face");
let vb = it.next().expect("bug: less than 3 vertices around face");
let vc = it.next().expect("bug: less than 3 vertices around face");
// Make sure this is a triangle face. Note: we do not check
// `mesh.is_tri_mesh()` in the beginning, as we also want to be
// able to serialize triangle meshes whose type does not implement
// `TriMesh`. We only want to error if there is actually a non-tri
// face.
if it.next().is_some() {
return Err(Error::new(|| ErrorKind::StreamSinkDoesNotSupportPolygonFaces));
}
// Get positions from map and convert them to array
let get_v = |vh| -> Result<[f32; 3], Error> {
src.vertex_position::<f32>(vh)
.and_then(|opt| {
opt.ok_or_else(|| Error::new(|| ErrorKind::DataIncomplete {
prop: PropKind::VertexPosition,
msg: format!("no position for {:?} while writing STL", vh),
}))
})
.map(|p| p.convert()) // to array form
};
let vertices = [get_v(va)?, get_v(vb)?, get_v(vc)?];
let normal = if has_normals {
src.face_normal::<f32>(fh)?
.ok_or_else(|| Error::new(|| ErrorKind::DataIncomplete {
prop: PropKind::FaceNormal,
msg: format!("no normal for {:?} while writing STL", fh),
}))?
.convert() // to array form
} else {
calc_normal(&vertices)
};
Ok(RawTriangle {
vertices,
normal,
// As Wikipedia beautifully put it: "this should be zero
// because most software does not understand anything else."
// Great. Some people abuse this to store color or other
// information. This is terrible, we won't do that.
attribute_byte_count: 0,
})
});
let face_count = mesh.num_faces().try_into().map_err(|_| {
Error::new(|| ErrorKind::SinkIncompatible(
"STL only supports 2^32 triangles, but mesh contains more faces".into()
))
})?;
self.write_raw(face_count, triangles)
}
}
// ===============================================================================================
// ===== Helper functions
// ===============================================================================================
/// Calculates the normal of the face defined be the three vertices in CCW.
fn calc_normal(positions: &[[f32; 3]; 3]) -> [f32; 3] {
let pos_a = positions[0].to_point3();
let pos_b = positions[1].to_point3();
let pos_c = positions[2].to_point3();
let normal = (pos_b - pos_a).cross(pos_c - pos_a).normalize();
[normal.x, normal.y, normal.z]
}
// ===============================================================================================
// ===== Functions for body writing
// ===============================================================================================
/// Writes the three values of the given vector (in STL ASCII encoding,
/// separated
/// by ' ') into the writer.
fn write_ascii_vector(w: &mut impl Write, [x, y, z]: [f32; 3]) -> Result<(), io::Error> {
write_ascii_f32(w, x)?;
write!(w, " ")?;
write_ascii_f32(w, y)?;
write!(w, " ")?;
write_ascii_f32(w, z)?;
Ok(())
}
/// Writes the given `f32` in STL ASCII format into the given writer.
///
/// The STL specification is terribly underspecified. The only information
/// about how to encode floats in ASCII is this:
///
/// > The numerical data in the facet normal and vertex lines are single
/// > precision floats, for example, 1.23456E+789. A facet normal coordinate
/// > may have a leading minus sign; a vertex coordinate may not.
///
/// I don't think the last sentence makes any sense: why forbid negative
/// coordinates? In any case, no one in the real world cares about that: there
/// are plenty of STL files out there with negative vertex coordinates.
///
/// About the actual format: clearly unhelpful. In real world, STL files floats
/// are encoded all over the place. I've seen `1`, `1.2`, `10.2`, `1.02e1`,
/// `1.020000E+001` and more. We just stick to the exact format mentioned in
/// the "specification". This does not necessarily make any sense and wastes
/// memory, but so does ASCII STL. Just don't use the ASCII STL format!
fn write_ascii_f32(w: &mut impl Write, v: f32) -> Result<(), io::Error> {
use std::num::FpCategory;
match v.classify() {
FpCategory::Normal | FpCategory::Subnormal => {
let exponent = v.abs().log10().floor();
let mantissa = v / 10f32.powf(exponent);
write!(w, "{}E{:+}", mantissa, exponent)
}
_ => {
// `v` is either infinite, `NaN` or zero. We want to serialize
// the zeroes as `0.0`.
write!(w, "{:.1}", v)
}
}
} |
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt}; | random_line_split |
write.rs | use std::{
cmp,
convert::TryInto,
io::{self, Write},
};
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
use cgmath::prelude::*;
use crate::{
prelude::*,
io::{PropKind, Error, ErrorKind},
};
use super::{Encoding, RawTriangle};
// ----------------------------------------------------------------------------
/// The solid name used when the user didn't specify one.
const DEFAULT_SOLID_NAME: &str = "mesh";
// ===============================================================================================
// ===== STL Config
// ===============================================================================================
/// Used to configure and create a [`Writer`].
///
/// This is used to configure basic settings for the file to be written. You
/// can use the [`Config::into_writer`] method to create a [`Writer`] that can
/// be used as streaming sink.
#[derive(Clone, Debug)]
pub struct Config {
solid_name: String,
encoding: Encoding,
}
impl Config {
/// Creates a new builder instance from the given encoding. For
/// convenience, you can use [`Config::binary()`] or [`Config::ascii()`]
/// directly.
pub fn new(encoding: Encoding) -> Self {
Self {
solid_name: DEFAULT_SOLID_NAME.into(),
encoding,
}
}
/// Creates a new builder instance for a binary STL file.
pub fn binary() -> Self {
Self::new(Encoding::Binary)
}
/// Creates a new builder instance for an ASCII STL file.
///
/// **Note**: please don't use this. STL ASCII files are even more space
/// inefficient than binary STL files. If you can avoid it, never use ASCII
/// STL. In fact, consider not using STL at all.
pub fn ascii() -> Self {
Self::new(Encoding::Ascii)
}
/// Sets the solid name for this file.
///
/// The given name must be an ASCII string (otherwise the function panics).
/// If a binary file is written, only 76 bytes of the string are written to
/// the file.
pub fn with_solid_name(self, name: impl Into<String>) -> Self {
let solid_name = name.into();
assert!(solid_name.is_ascii());
Self {
solid_name,
.. self
}
}
/// Creates a [`Writer`] with `self` as config.
pub fn into_writer<W: io::Write>(self, writer: W) -> Writer<W> {
Writer::new(self, writer)
}
}
// ===============================================================================================
// ===== STL Writer
// ===============================================================================================
/// A writer able to write binary and ASCII STL files. Implements
/// [`StreamSink`].
#[derive(Debug)]
pub struct Writer<W: io::Write> {
config: Config,
writer: W,
}
impl<W: io::Write> Writer<W> {
/// Creates a new STL writer with the given STL config which will write to
/// the given `io::Write` instance.
pub fn new(config: Config, writer: W) -> Self {
Self { config, writer }
}
/// Low level function to write STL files.
///
/// You usually don't need to use this function directly and instead use a
/// high level interface. This function is still exposed to give you more
/// or less complete control.
pub fn write_raw(
self,
num_triangles: u32,
triangles: impl IntoIterator<Item = Result<RawTriangle, Error>>,
) -> Result<(), Error> {
if self.config.encoding == Encoding::Ascii {
self.write_raw_ascii(triangles)
} else {
self.write_raw_binary(num_triangles, triangles)
}
}
#[inline(never)]
pub fn write_raw_binary(
self,
num_triangles: u32,
triangles: impl IntoIterator<Item = Result<RawTriangle, Error>>,
) -> Result<(), Error> {
let config = self.config;
let mut w = self.writer;
// First, a 80 bytes useless header that must not begin with "solid".
// We try to fit the solid name in it.
let name_len = cmp::min(config.solid_name.len(), 76);
let signature = format!("LOX {}", &config.solid_name[..name_len]);
let padding = vec![b' '; 80 - signature.len()];
w.write_all(signature.as_bytes())?;
w.write_all(&padding)?;
// Next, number of triangles
w.write_u32::<LittleEndian>(num_triangles)?;
const TRI_SIZE: usize = 4 * 3 * 4 + 2;
let mut buf = [0; TRI_SIZE];
for triangle in triangles {
let triangle = triangle?;
// Write face normal
LittleEndian::write_f32(&mut buf[00..04], triangle.normal[0]);
LittleEndian::write_f32(&mut buf[04..08], triangle.normal[1]);
LittleEndian::write_f32(&mut buf[08..12], triangle.normal[2]);
LittleEndian::write_f32(&mut buf[12..16], triangle.vertices[0][0]);
LittleEndian::write_f32(&mut buf[16..20], triangle.vertices[0][1]);
LittleEndian::write_f32(&mut buf[20..24], triangle.vertices[0][2]);
LittleEndian::write_f32(&mut buf[24..28], triangle.vertices[1][0]);
LittleEndian::write_f32(&mut buf[28..32], triangle.vertices[1][1]);
LittleEndian::write_f32(&mut buf[32..36], triangle.vertices[1][2]);
LittleEndian::write_f32(&mut buf[36..40], triangle.vertices[2][0]);
LittleEndian::write_f32(&mut buf[40..44], triangle.vertices[2][1]);
LittleEndian::write_f32(&mut buf[44..48], triangle.vertices[2][2]);
LittleEndian::write_u16(&mut buf[48..50], triangle.attribute_byte_count);
w.write_all(&buf)?;
}
Ok(())
}
#[inline(never)]
pub fn write_raw_ascii(
self,
triangles: impl IntoIterator<Item = Result<RawTriangle, Error>>,
) -> Result<(), Error> {
let config = self.config;
let mut w = self.writer;
writeln!(w, "solid {}", config.solid_name)?;
for triangle in triangles {
let triangle = triangle?;
// Write face normal
write!(w, " facet normal ")?;
write_ascii_vector(&mut w, triangle.normal)?;
writeln!(w, "")?;
// Write all vertex positions
writeln!(w, " outer loop")?;
for &vertex_pos in &triangle.vertices {
write!(w, " vertex ")?;
write_ascii_vector(&mut w, vertex_pos)?;
writeln!(w, "")?;
}
writeln!(w, " endloop")?;
writeln!(w, " endfacet")?;
}
writeln!(w, "endsolid {}", config.solid_name)?;
Ok(())
}
}
impl<W: io::Write> StreamSink for Writer<W> {
#[inline(never)]
fn transfer_from<S: MemSource>(self, src: &S) -> Result<(), Error> {
// Make sure we have positions
if src.vertex_position_type().is_none() |
let mesh = src.core_mesh();
let has_normals = src.face_normal_type().is_some();
// The triangle iterator
let triangles = mesh.face_handles().map(|fh| {
let mut it = mesh.vertices_around_face(fh);
let va = it.next().expect("bug: less than 3 vertices around face");
let vb = it.next().expect("bug: less than 3 vertices around face");
let vc = it.next().expect("bug: less than 3 vertices around face");
// Make sure this is a triangle face. Note: we do not check
// `mesh.is_tri_mesh()` in the beginning, as we also want to be
// able to serialize triangle meshes whose type does not implement
// `TriMesh`. We only want to error if there is actually a non-tri
// face.
if it.next().is_some() {
return Err(Error::new(|| ErrorKind::StreamSinkDoesNotSupportPolygonFaces));
}
// Get positions from map and convert them to array
let get_v = |vh| -> Result<[f32; 3], Error> {
src.vertex_position::<f32>(vh)
.and_then(|opt| {
opt.ok_or_else(|| Error::new(|| ErrorKind::DataIncomplete {
prop: PropKind::VertexPosition,
msg: format!("no position for {:?} while writing STL", vh),
}))
})
.map(|p| p.convert()) // to array form
};
let vertices = [get_v(va)?, get_v(vb)?, get_v(vc)?];
let normal = if has_normals {
src.face_normal::<f32>(fh)?
.ok_or_else(|| Error::new(|| ErrorKind::DataIncomplete {
prop: PropKind::FaceNormal,
msg: format!("no normal for {:?} while writing STL", fh),
}))?
.convert() // to array form
} else {
calc_normal(&vertices)
};
Ok(RawTriangle {
vertices,
normal,
// As Wikipedia beautifully put it: "this should be zero
// because most software does not understand anything else."
// Great. Some people abuse this to store color or other
// information. This is terrible, we won't do that.
attribute_byte_count: 0,
})
});
let face_count = mesh.num_faces().try_into().map_err(|_| {
Error::new(|| ErrorKind::SinkIncompatible(
"STL only supports 2^32 triangles, but mesh contains more faces".into()
))
})?;
self.write_raw(face_count, triangles)
}
}
// ===============================================================================================
// ===== Helper functions
// ===============================================================================================
/// Calculates the normal of the face defined be the three vertices in CCW.
fn calc_normal(positions: &[[f32; 3]; 3]) -> [f32; 3] {
let pos_a = positions[0].to_point3();
let pos_b = positions[1].to_point3();
let pos_c = positions[2].to_point3();
let normal = (pos_b - pos_a).cross(pos_c - pos_a).normalize();
[normal.x, normal.y, normal.z]
}
// ===============================================================================================
// ===== Functions for body writing
// ===============================================================================================
/// Writes the three values of the given vector (in STL ASCII encoding,
/// separated
/// by ' ') into the writer.
fn write_ascii_vector(w: &mut impl Write, [x, y, z]: [f32; 3]) -> Result<(), io::Error> {
write_ascii_f32(w, x)?;
write!(w, " ")?;
write_ascii_f32(w, y)?;
write!(w, " ")?;
write_ascii_f32(w, z)?;
Ok(())
}
/// Writes the given `f32` in STL ASCII format into the given writer.
///
/// The STL specification is terribly underspecified. The only information
/// about how to encode floats in ASCII is this:
///
/// > The numerical data in the facet normal and vertex lines are single
/// > precision floats, for example, 1.23456E+789. A facet normal coordinate
/// > may have a leading minus sign; a vertex coordinate may not.
///
/// I don't think the last sentence makes any sense: why forbid negative
/// coordinates? In any case, no one in the real world cares about that: there
/// are plenty of STL files out there with negative vertex coordinates.
///
/// About the actual format: clearly unhelpful. In real world, STL files floats
/// are encoded all over the place. I've seen `1`, `1.2`, `10.2`, `1.02e1`,
/// `1.020000E+001` and more. We just stick to the exact format mentioned in
/// the "specification". This does not necessarily make any sense and wastes
/// memory, but so does ASCII STL. Just don't use the ASCII STL format!
fn write_ascii_f32(w: &mut impl Write, v: f32) -> Result<(), io::Error> {
use std::num::FpCategory;
match v.classify() {
FpCategory::Normal | FpCategory::Subnormal => {
let exponent = v.abs().log10().floor();
let mantissa = v / 10f32.powf(exponent);
write!(w, "{}E{:+}", mantissa, exponent)
}
_ => {
// `v` is either infinite, `NaN` or zero. We want to serialize
// the zeroes as `0.0`.
write!(w, "{:.1}", v)
}
}
}
| {
return Err(Error::new(|| ErrorKind::DataIncomplete {
prop: PropKind::VertexPosition,
msg: "source does not provide vertex positions, but STL requires them".into(),
}));
} | conditional_block |
jh.py | # -*- coding: utf-8 -*-
from . import op
Jh_BlockSize = 64
Jh_StateSize = 32
JH_HX = 8
JH_HY = 4
IV512 = [
(0x6fd14b96), (0x3e00aa17), (0x636a2e05), (0x7a15d543),
(0x8a225e8d), (0x0c97ef0b), (0xe9341259), (0xf2b3c361),
(0x891da0c1), (0x536f801e), (0x2aa9056b), (0xea2b6d80),
(0x588eccdb), (0x2075baa6), (0xa90f3a76), (0xbaf83bf7),
(0x0169e605), (0x41e34a69), (0x46b58a8e), (0x2e6fe65a),
(0x1047a7d0), (0xc1843c24), (0x3b6e71b1), (0x2d5ac199),
(0xcf57f6ec), (0x9db1f856), (0xa706887c), (0x5716b156),
(0xe3c2fcdf), (0xe68517fb), (0x545a4678), (0xcc8cdd4b),
]
C = [
0xa2ded572, 0x67f815df, 0x0a15847b, 0x571523b7, 0x90d6ab81, 0xf6875a4d,
0xc54f9f4e, 0x402bd1c3, 0xe03a98ea, 0x9cfa455c, 0x99d2c503, 0x9a99b266,
0xb4960266, 0x8a53bbf2, 0x1a1456b5, 0x31a2db88, 0x5c5aa303, 0xdb0e199a,
0x0ab23f40, 0x1044c187, 0x8019051c, 0x1d959e84, 0xadeb336f, 0xdccde75e,
0x9213ba10, 0x416bbf02, 0x156578dc, 0xd027bbf7, 0x39812c0a, 0x5078aa37,
0xd2bf1a3f, 0xd3910041, 0x0d5a2d42, 0x907eccf6, 0x9c9f62dd, 0xce97c092,
0x0ba75c18, 0xac442bc7, 0xd665dfd1, 0x23fcc663, 0x036c6e97, 0x1ab8e09e,
0x7e450521, 0xa8ec6c44, 0xbb03f1ee, 0xfa618e5d, 0xb29796fd, 0x97818394,
0x37858e4a, 0x2f3003db, 0x2d8d672a, 0x956a9ffb, 0x8173fe8a, 0x6c69b8f8,
0x4672c78a, 0x14427fc0, 0x8f15f4c5, 0xc45ec7bd, 0xa76f4475, 0x80bb118f,
0xb775de52, 0xbc88e4ae, 0x1e00b882, 0xf4a3a698, 0x338ff48e, 0x1563a3a9,
0x24565faa, 0x89f9b7d5, 0x20edf1b6, 0xfde05a7c, 0x5ae9ca36, 0x362c4206,
0x433529ce, 0x3d98fe4e, 0x74f93a53, 0xa74b9a73, 0x591ff5d0, 0x86814e6f,
0x81ad9d0e, 0x9f5ad8af, 0x670605a7, 0x6a6234ee, 0xbe280b8b, 0x2717b96e,
0x26077447, 0x3f1080c6, 0x6f7ea0e0, 0x7b487ec6, 0xa50a550d, 0xc0a4f84a,
0x9fe7e391, 0x9ef18e97, 0x81727686, 0xd48d6050, 0x415a9e7e, 0x62b0e5f3,
0xec1f9ffc, 0x7a205440, 0x001ae4e3, 0x84c9f4ce, 0xf594d74f, 0xd895fa9d,
0x117e2e55, 0xa554c324, 0x2872df5b, 0x286efebd, 0xe27ff578, 0xb2c4a50f,
0xef7c8905, 0x2ed349ee, 0x85937e44, 0x7f5928eb, 0x37695f70, 0x4a3124b3,
0xf128865e, 0x65e4d61d, 0x04771bc7, 0xe720b951, 0xe843fe74, 0x8a87d423,
0xa3e8297d, 0xf2947692, 0x097acbdd, 0xc1d9309b, 0xfb301b1d, 0xe01bdc5b,
0x4f4924da, 0xbf829cf2, 0x31bae7a4, 0xffbf70b4, 0x0544320d, 0x48bcf8de,
0x32fcae3b, 0x39d3bb53, 0xc1c39f45, 0xa08b29e0, 0xfd05c9e5, 0x0f09aef7,
0x12347094, 0x34f19042, 0x01b771a2, 0x95ed44e3, 0x368e3be9, 0x4a982f4f,
0x631d4088, 0x15f66ca0, 0x4b44c147, 0xffaf5287, 0xf14abb7e, 0x30c60ae2,
0xc5b67046, 0xe68c6ecc, 0x56a4d5a4, 0x00ca4fbd, 0x4b849dda, 0xae183ec8,
0x45ce5773, 0xadd16430, 0x68cea6e8, 0x67255c14, 0xf28cdaa3, 0x16e10ecb,
0x5806e933, 0x9a99949a, 0x20b2601f, 0x7b846fc2, 0x7facced1, 0x1885d1a0,
0xa15b5932, 0xd319dd8d, 0xc01c9a50, 0x46b4a5aa, 0x67633d9f, 0xba6b04e4,
0xab19caf6, 0x7eee560b, 0xea79b11f, 0x742128a9, 0x35f7bde9, 0xee51363b,
0x5aac571d, 0x76d35075, 0xfec2463a, 0x01707da3, 0xafc135f7, 0x42d8a498,
0x20eced78, 0x79676b9e, 0x15638341, 0xa8db3aea, 0x4d3bc3fa, 0x832c8332,
0x1f3b40a7, 0xf347271c, 0x34f04059, 0x9a762db7, 0x6c4e3ee7, 0xfd4f21d2,
0x398dfdb8, 0xef5957dc, 0x490c9b8d, 0xdaeb492b, 0x49d7a25b, 0x0d70f368,
0xd0ae3b7d, 0x84558d7a, 0xf0e9a5f5, 0x658ef8e4, 0xf4a2b8a0, 0x533b1036,
0x9e07a80c, 0x5aec3e75, 0x92946891, 0x4f88e856, 0x555cb05b, 0x4cbcbaf8,
0x993bbbe3, 0x7b9487f3, 0xd6f4da75, 0x5d1c6b72, 0x28acae64, 0x6db334dc,
0x50a5346c, 0x71db28b8, 0xf2e261f8, 0x2a518d10, 0x3364dbe3, 0xfc75dd59,
0xf1bcac1c, 0xa23fce43, 0x3cd1bb67, 0xb043e802, 0xca5b0a33, 0x75a12988,
0x4d19347f, 0x5c5316b4, 0xc3943b92, 0x1e4d790e, 0xd7757479, 0x3fafeeb6,
0xf7d4a8ea, 0x21391abe, 0x097ef45c, 0x5127234c, 0x5324a326, 0xd23c32ba,
0x4a17a344, 0xadd5a66d, 0xa63e1db5, 0x08c9f2af, 0x983d5983, 0x563c6b91,
0xa17cf84c, 0x4d608672, 0xcc3ee246, 0xf6c76e08, 0xb333982f, 0x5e76bcb1,
0xa566d62b, 0x2ae6c4ef, 0xe8b6f406, 0x36d4c1be, 0x1582ee74, 0x6321efbc,
0x0d4ec1fd, 0x69c953f4, 0xc45a7da7, 0x26585806, 0x1614c17e, 0x16fae006,
0x3daf907e, 0x3f9d6328, 0xe3f2c9d2, 0x0cd29b00, 0x30ceaa5f, 0x300cd4b7,
0x16512a74, 0x9832e0f2, 0xd830eb0d, 0x9af8cee3, 0x7b9ec54b, 0x9279f1b5,
0x6ee651ff, 0xd3688604, 0x574d239b, 0x316796e6, 0xf3a6e6cc, 0x05750a17,
0xd98176b1, 0xce6c3213, 0x8452173c, 0x62a205f8, 0xb3cb2bf4, 0x47154778,
0x825446ff, 0x486a9323, 0x0758df38, 0x65655e4e, 0x897cfcf2, 0x8e5086fc,
0x442e7031, 0x86ca0bd0, 0xa20940f0, 0x4e477830, 0x39eea065, 0x8338f7d1,
0x37e95ef7, 0xbd3a2ce4, 0x26b29721, 0x6ff81301, 0xd1ed44a3, 0xe7de9fef,
0x15dfa08b, 0xd9922576, 0xf6f7853c, 0xbe42dc12, 0x7ceca7d8, 0x7eb027ab,
0xda7d8d53, 0xdea83eaa, 0x93ce25aa, 0xd86902bd, 0xfd43f65a, 0xf908731a,
0xdaef5fc0, 0xa5194a17, 0x33664d97, 0x6a21fd4c, 0x3198b435, 0x701541db,
0xbb0f1eea, 0x9b54cded, 0xa163d09a, 0x72409751, 0xbf9d75f6, 0xe26f4791,
]
def Sb(x, c):
x[3] = ~x[3]
x[0] ^= (c) & ~x[2]
tmp = (c) ^ (x[0] & x[1])
x[0] ^= x[2] & x[3]
x[3] ^= ~x[1] & x[2]
x[1] ^= x[0] & x[2]
x[2] ^= x[0] & ~x[3]
x[0] ^= x[1] | x[3]
x[3] ^= x[1] & x[2]
x[1] ^= tmp & x[0]
x[2] ^= tmp
return x
def Lb(x):
x[4] ^= x[1]
x[5] ^= x[2]
x[6] ^= x[3] ^ x[0]
x[7] ^= x[0]
x[0] ^= x[5]
x[1] ^= x[6]
x[2] ^= x[7] ^ x[4]
x[3] ^= x[4]
return x
def Ceven(n, r):
return C[((r) << 3) + 3 - n]
def Codd(n, r):
return C[((r) << 3) + 7 - n]
def S(x0, x1, x2, x3, cb, r):
x = Sb([x0[3], x1[3], x2[3], x3[3]], cb(3, r))
x0[3] = x[0]
x1[3] = x[1]
x2[3] = x[2]
x3[3] = x[3]
x = Sb([x0[2], x1[2], x2[2], x3[2]], cb(2, r))
x0[2] = x[0]
x1[2] = x[1]
x2[2] = x[2]
x3[2] = x[3]
x = Sb([x0[1], x1[1], x2[1], x3[1]], cb(1, r))
x0[1] = x[0]
x1[1] = x[1]
x2[1] = x[2]
x3[1] = x[3]
x = Sb([x0[0], x1[0], x2[0], x3[0]], cb(0, r))
x0[0] = x[0]
x1[0] = x[1]
x2[0] = x[2]
x3[0] = x[3]
def L(x0, x1, x2, x3, x4, x5, x6, x7):
x = Lb([x0[3], x1[3], x2[3], x3[3], x4[3], x5[3], x6[3], x7[3]])
x0[3] = x[0]
x1[3] = x[1]
x2[3] = x[2]
x3[3] = x[3]
x4[3] = x[4]
x5[3] = x[5]
x6[3] = x[6]
x7[3] = x[7]
x = Lb([x0[2], x1[2], x2[2], x3[2], x4[2], x5[2], x6[2], x7[2]])
x0[2] = x[0]
x1[2] = x[1]
x2[2] = x[2]
x3[2] = x[3]
x4[2] = x[4]
x5[2] = x[5]
x6[2] = x[6]
x7[2] = x[7]
x = Lb([x0[1], x1[1], x2[1], x3[1], x4[1], x5[1], x6[1], x7[1]])
x0[1] = x[0]
x1[1] = x[1]
x2[1] = x[2]
x3[1] = x[3]
x4[1] = x[4]
x5[1] = x[5]
x6[1] = x[6]
x7[1] = x[7]
x = Lb([x0[0], x1[0], x2[0], x3[0], x4[0], x5[0], x6[0], x7[0]])
x0[0] = x[0]
x1[0] = x[1]
x2[0] = x[2]
x3[0] = x[3]
x4[0] = x[4]
x5[0] = x[5]
x6[0] = x[6]
x7[0] = x[7]
def Wz(x, c, n):
t = (x[3] & (c)) << (n)
x[3] = ((x[3] >> (n)) & (c)) | t
t = (x[2] & (c)) << (n)
x[2] = ((x[2] >> (n)) & (c)) | t
t = (x[1] & (c)) << (n)
x[1] = ((x[1] >> (n)) & (c)) | t
t = (x[0] & (c)) << (n)
x[0] = ((x[0] >> (n)) & (c)) | t
def W(ro, x):
if ro == 0:
return Wz(x, (0x55555555), 1)
elif ro == 1:
return Wz(x, (0x33333333), 2)
elif ro == 2:
return Wz(x, (0x0F0F0F0F), 4)
elif ro == 3:
return Wz(x, (0x00FF00FF), 8)
elif ro == 4:
return Wz(x, (0x0000FFFF), 16)
elif ro == 5:
t = x[3]
x[3] = x[2]
x[2] = t
t = x[1]
x[1] = x[0]
x[0] = t
return
elif ro == 6:
t = x[3]
x[3] = x[1]
x[1] = t
t = x[2]
x[2] = x[0]
x[0] = t
def | (h, r, ro):
S(h[0], h[2], h[4], h[6], Ceven, r)
S(h[1], h[3], h[5], h[7], Codd, r)
L(h[0], h[2], h[4], h[6], h[1], h[3], h[5], h[7])
W(ro, h[1])
W(ro, h[3])
W(ro, h[5])
W(ro, h[7])
def READ_STATE(h, state):
h[0][3] = state[0]
h[0][2] = state[1]
h[0][1] = state[2]
h[0][0] = state[3]
h[1][3] = state[4]
h[1][2] = state[5]
h[1][1] = state[6]
h[1][0] = state[7]
h[2][3] = state[8]
h[2][2] = state[9]
h[2][1] = state[10]
h[2][0] = state[11]
h[3][3] = state[12]
h[3][2] = state[13]
h[3][1] = state[14]
h[3][0] = state[15]
h[4][3] = state[16]
h[4][2] = state[17]
h[4][1] = state[18]
h[4][0] = state[19]
h[5][3] = state[20]
h[5][2] = state[21]
h[5][1] = state[22]
h[5][0] = state[23]
h[6][3] = state[24]
h[6][2] = state[25]
h[6][1] = state[26]
h[6][0] = state[27]
h[7][3] = state[28]
h[7][2] = state[29]
h[7][1] = state[30]
h[7][0] = state[31]
def WRITE_STATE(h, state):
state[0] = h[0][3]
state[1] = h[0][2]
state[2] = h[0][1]
state[3] = h[0][0]
state[4] = h[1][3]
state[5] = h[1][2]
state[6] = h[1][1]
state[7] = h[1][0]
state[8] = h[2][3]
state[9] = h[2][2]
state[10] = h[2][1]
state[11] = h[2][0]
state[12] = h[3][3]
state[13] = h[3][2]
state[14] = h[3][1]
state[15] = h[3][0]
state[16] = h[4][3]
state[17] = h[4][2]
state[18] = h[4][1]
state[19] = h[4][0]
state[20] = h[5][3]
state[21] = h[5][2]
state[22] = h[5][1]
state[23] = h[5][0]
state[24] = h[6][3]
state[25] = h[6][2]
state[26] = h[6][1]
state[27] = h[6][0]
state[28] = h[7][3]
state[29] = h[7][2]
state[30] = h[7][1]
state[31] = h[7][0]
def E8(h):
for r in range(0, 42, 7):
SL(h, r + 0, 0)
SL(h, r + 1, 1)
SL(h, r + 2, 2)
SL(h, r + 3, 3)
SL(h, r + 4, 4)
SL(h, r + 5, 5)
SL(h, r + 6, 6)
def bufferXORInsertBackwards(buf, data, x, y, bufferOffsetX=0, bufferOffsetY=0):
for i in range(x):
for j in range(x):
m = i + bufferOffsetX
n = bufferOffsetY + y - 1 - j
buf[m][n] = buf[m][n] ^ data[i * 4 + j]
def jh_update(ctx, msg, msg_len=None):
buf = ctx['buffer']
buf_len = len(buf)
ptr = ctx['ptr']
if msg_len is None:
msg_len = len(msg)
if msg_len < buf_len - ptr:
op.buffer_insert(buf, ptr, msg, msg_len)
ptr += msg_len
ctx['ptr'] = ptr
return
V = [None] * JH_HX
for i in range(JH_HX):
V[i] = [None] * JH_HY
READ_STATE(V, ctx['state'])
while msg_len > 0:
clen = buf_len - ptr
if clen > msg_len:
clen = msg_len
op.buffer_insert(buf, ptr, msg, clen)
ptr += clen
msg = msg[clen:]
msg_len -= clen
if ptr == buf_len:
buf32 = op.swap32_list(op.bytes_to_i32_list(buf))
bufferXORInsertBackwards(V, buf32, 4, 4)
E8(V)
bufferXORInsertBackwards(V, buf32, 4, 4, 4, 0)
blockCountLow = ctx['blockCountLow']
blockCountLow = op.t32(blockCountLow + 1)
ctx['blockCountLow'] = blockCountLow
if blockCountLow == 0:
ctx['blockCountHigh'] += 1
ptr = 0
WRITE_STATE(V, ctx['state'])
ctx['ptr'] = ptr
def jh_close(ctx):
buf = bytearray(128)
l = [None] * 4
buf[0] = 0x80
ptr = ctx['ptr']
if ptr is 0:
numz = 47
else:
numz = 111 - ptr
buf[1:1+numz] = [0] * numz
blockCountLow = ctx['blockCountLow']
blockCountHigh = ctx['blockCountHigh']
l[0] = op.t32(blockCountLow << 9) + (ptr << 3)
l[1] = op.t32(blockCountLow >> 23) + op.t32(blockCountHigh << 9)
l[2] = op.t32(blockCountHigh >> 23)
l[3] = 0
lBytes = op.bytes_from_i32_list(op.swap32_list(l))
op.buffer_insert(buf, 1 + numz, lBytes[::-1], 16)
jh_update(ctx, buf, numz + 17)
out = [None] * 16
state = ctx['state']
for u in range(16):
out[u] = op.swap32(state[u + 16])
return out
def jh(msg, out_array=False, in_array=False):
ctx = {}
ctx['state'] = op.swap32_list(IV512)
ctx['ptr'] = 0
ctx['buffer'] = bytearray(Jh_BlockSize)
ctx['blockCountHigh'] = 0
ctx['blockCountLow'] = 0
if in_array:
msg = op.bytes_from_i32_list(msg)
jh_update(ctx, msg)
res = jh_close(ctx)
if not out_array:
res = op.bytes_from_i32_list(res)
return res
| SL | identifier_name |
jh.py | # -*- coding: utf-8 -*-
from . import op
Jh_BlockSize = 64
Jh_StateSize = 32
JH_HX = 8
JH_HY = 4
IV512 = [
(0x6fd14b96), (0x3e00aa17), (0x636a2e05), (0x7a15d543),
(0x8a225e8d), (0x0c97ef0b), (0xe9341259), (0xf2b3c361),
(0x891da0c1), (0x536f801e), (0x2aa9056b), (0xea2b6d80),
(0x588eccdb), (0x2075baa6), (0xa90f3a76), (0xbaf83bf7),
(0x0169e605), (0x41e34a69), (0x46b58a8e), (0x2e6fe65a),
(0x1047a7d0), (0xc1843c24), (0x3b6e71b1), (0x2d5ac199),
(0xcf57f6ec), (0x9db1f856), (0xa706887c), (0x5716b156),
(0xe3c2fcdf), (0xe68517fb), (0x545a4678), (0xcc8cdd4b),
]
C = [
0xa2ded572, 0x67f815df, 0x0a15847b, 0x571523b7, 0x90d6ab81, 0xf6875a4d,
0xc54f9f4e, 0x402bd1c3, 0xe03a98ea, 0x9cfa455c, 0x99d2c503, 0x9a99b266,
0xb4960266, 0x8a53bbf2, 0x1a1456b5, 0x31a2db88, 0x5c5aa303, 0xdb0e199a,
0x0ab23f40, 0x1044c187, 0x8019051c, 0x1d959e84, 0xadeb336f, 0xdccde75e,
0x9213ba10, 0x416bbf02, 0x156578dc, 0xd027bbf7, 0x39812c0a, 0x5078aa37,
0xd2bf1a3f, 0xd3910041, 0x0d5a2d42, 0x907eccf6, 0x9c9f62dd, 0xce97c092,
0x0ba75c18, 0xac442bc7, 0xd665dfd1, 0x23fcc663, 0x036c6e97, 0x1ab8e09e,
0x7e450521, 0xa8ec6c44, 0xbb03f1ee, 0xfa618e5d, 0xb29796fd, 0x97818394,
0x37858e4a, 0x2f3003db, 0x2d8d672a, 0x956a9ffb, 0x8173fe8a, 0x6c69b8f8,
0x4672c78a, 0x14427fc0, 0x8f15f4c5, 0xc45ec7bd, 0xa76f4475, 0x80bb118f,
0xb775de52, 0xbc88e4ae, 0x1e00b882, 0xf4a3a698, 0x338ff48e, 0x1563a3a9,
0x24565faa, 0x89f9b7d5, 0x20edf1b6, 0xfde05a7c, 0x5ae9ca36, 0x362c4206,
0x433529ce, 0x3d98fe4e, 0x74f93a53, 0xa74b9a73, 0x591ff5d0, 0x86814e6f,
0x81ad9d0e, 0x9f5ad8af, 0x670605a7, 0x6a6234ee, 0xbe280b8b, 0x2717b96e,
0x26077447, 0x3f1080c6, 0x6f7ea0e0, 0x7b487ec6, 0xa50a550d, 0xc0a4f84a,
0x9fe7e391, 0x9ef18e97, 0x81727686, 0xd48d6050, 0x415a9e7e, 0x62b0e5f3,
0xec1f9ffc, 0x7a205440, 0x001ae4e3, 0x84c9f4ce, 0xf594d74f, 0xd895fa9d,
0x117e2e55, 0xa554c324, 0x2872df5b, 0x286efebd, 0xe27ff578, 0xb2c4a50f,
0xef7c8905, 0x2ed349ee, 0x85937e44, 0x7f5928eb, 0x37695f70, 0x4a3124b3,
0xf128865e, 0x65e4d61d, 0x04771bc7, 0xe720b951, 0xe843fe74, 0x8a87d423,
0xa3e8297d, 0xf2947692, 0x097acbdd, 0xc1d9309b, 0xfb301b1d, 0xe01bdc5b,
0x4f4924da, 0xbf829cf2, 0x31bae7a4, 0xffbf70b4, 0x0544320d, 0x48bcf8de,
0x32fcae3b, 0x39d3bb53, 0xc1c39f45, 0xa08b29e0, 0xfd05c9e5, 0x0f09aef7,
0x12347094, 0x34f19042, 0x01b771a2, 0x95ed44e3, 0x368e3be9, 0x4a982f4f,
0x631d4088, 0x15f66ca0, 0x4b44c147, 0xffaf5287, 0xf14abb7e, 0x30c60ae2,
0xc5b67046, 0xe68c6ecc, 0x56a4d5a4, 0x00ca4fbd, 0x4b849dda, 0xae183ec8,
0x45ce5773, 0xadd16430, 0x68cea6e8, 0x67255c14, 0xf28cdaa3, 0x16e10ecb,
0x5806e933, 0x9a99949a, 0x20b2601f, 0x7b846fc2, 0x7facced1, 0x1885d1a0,
0xa15b5932, 0xd319dd8d, 0xc01c9a50, 0x46b4a5aa, 0x67633d9f, 0xba6b04e4,
0xab19caf6, 0x7eee560b, 0xea79b11f, 0x742128a9, 0x35f7bde9, 0xee51363b,
0x5aac571d, 0x76d35075, 0xfec2463a, 0x01707da3, 0xafc135f7, 0x42d8a498,
0x20eced78, 0x79676b9e, 0x15638341, 0xa8db3aea, 0x4d3bc3fa, 0x832c8332,
0x1f3b40a7, 0xf347271c, 0x34f04059, 0x9a762db7, 0x6c4e3ee7, 0xfd4f21d2,
0x398dfdb8, 0xef5957dc, 0x490c9b8d, 0xdaeb492b, 0x49d7a25b, 0x0d70f368,
0xd0ae3b7d, 0x84558d7a, 0xf0e9a5f5, 0x658ef8e4, 0xf4a2b8a0, 0x533b1036,
0x9e07a80c, 0x5aec3e75, 0x92946891, 0x4f88e856, 0x555cb05b, 0x4cbcbaf8,
0x993bbbe3, 0x7b9487f3, 0xd6f4da75, 0x5d1c6b72, 0x28acae64, 0x6db334dc,
0x50a5346c, 0x71db28b8, 0xf2e261f8, 0x2a518d10, 0x3364dbe3, 0xfc75dd59,
0xf1bcac1c, 0xa23fce43, 0x3cd1bb67, 0xb043e802, 0xca5b0a33, 0x75a12988,
0x4d19347f, 0x5c5316b4, 0xc3943b92, 0x1e4d790e, 0xd7757479, 0x3fafeeb6,
0xf7d4a8ea, 0x21391abe, 0x097ef45c, 0x5127234c, 0x5324a326, 0xd23c32ba,
0x4a17a344, 0xadd5a66d, 0xa63e1db5, 0x08c9f2af, 0x983d5983, 0x563c6b91,
0xa17cf84c, 0x4d608672, 0xcc3ee246, 0xf6c76e08, 0xb333982f, 0x5e76bcb1,
0xa566d62b, 0x2ae6c4ef, 0xe8b6f406, 0x36d4c1be, 0x1582ee74, 0x6321efbc,
0x0d4ec1fd, 0x69c953f4, 0xc45a7da7, 0x26585806, 0x1614c17e, 0x16fae006,
0x3daf907e, 0x3f9d6328, 0xe3f2c9d2, 0x0cd29b00, 0x30ceaa5f, 0x300cd4b7,
0x16512a74, 0x9832e0f2, 0xd830eb0d, 0x9af8cee3, 0x7b9ec54b, 0x9279f1b5,
0x6ee651ff, 0xd3688604, 0x574d239b, 0x316796e6, 0xf3a6e6cc, 0x05750a17,
0xd98176b1, 0xce6c3213, 0x8452173c, 0x62a205f8, 0xb3cb2bf4, 0x47154778,
0x825446ff, 0x486a9323, 0x0758df38, 0x65655e4e, 0x897cfcf2, 0x8e5086fc,
0x442e7031, 0x86ca0bd0, 0xa20940f0, 0x4e477830, 0x39eea065, 0x8338f7d1,
0x37e95ef7, 0xbd3a2ce4, 0x26b29721, 0x6ff81301, 0xd1ed44a3, 0xe7de9fef,
0x15dfa08b, 0xd9922576, 0xf6f7853c, 0xbe42dc12, 0x7ceca7d8, 0x7eb027ab,
0xda7d8d53, 0xdea83eaa, 0x93ce25aa, 0xd86902bd, 0xfd43f65a, 0xf908731a,
0xdaef5fc0, 0xa5194a17, 0x33664d97, 0x6a21fd4c, 0x3198b435, 0x701541db,
0xbb0f1eea, 0x9b54cded, 0xa163d09a, 0x72409751, 0xbf9d75f6, 0xe26f4791,
]
def Sb(x, c):
x[3] = ~x[3]
x[0] ^= (c) & ~x[2]
tmp = (c) ^ (x[0] & x[1])
x[0] ^= x[2] & x[3]
x[3] ^= ~x[1] & x[2]
x[1] ^= x[0] & x[2]
x[2] ^= x[0] & ~x[3]
x[0] ^= x[1] | x[3]
x[3] ^= x[1] & x[2]
x[1] ^= tmp & x[0]
x[2] ^= tmp
return x
def Lb(x):
x[4] ^= x[1]
x[5] ^= x[2]
x[6] ^= x[3] ^ x[0]
x[7] ^= x[0]
x[0] ^= x[5]
x[1] ^= x[6]
x[2] ^= x[7] ^ x[4]
x[3] ^= x[4]
return x
def Ceven(n, r):
return C[((r) << 3) + 3 - n]
def Codd(n, r):
return C[((r) << 3) + 7 - n]
def S(x0, x1, x2, x3, cb, r):
x = Sb([x0[3], x1[3], x2[3], x3[3]], cb(3, r))
x0[3] = x[0]
x1[3] = x[1]
x2[3] = x[2]
x3[3] = x[3]
x = Sb([x0[2], x1[2], x2[2], x3[2]], cb(2, r))
x0[2] = x[0]
x1[2] = x[1]
x2[2] = x[2]
x3[2] = x[3]
x = Sb([x0[1], x1[1], x2[1], x3[1]], cb(1, r))
x0[1] = x[0]
x1[1] = x[1]
x2[1] = x[2]
x3[1] = x[3]
x = Sb([x0[0], x1[0], x2[0], x3[0]], cb(0, r))
x0[0] = x[0]
x1[0] = x[1]
x2[0] = x[2]
x3[0] = x[3]
def L(x0, x1, x2, x3, x4, x5, x6, x7):
x = Lb([x0[3], x1[3], x2[3], x3[3], x4[3], x5[3], x6[3], x7[3]])
x0[3] = x[0]
x1[3] = x[1]
x2[3] = x[2]
x3[3] = x[3]
x4[3] = x[4]
x5[3] = x[5]
x6[3] = x[6]
x7[3] = x[7]
x = Lb([x0[2], x1[2], x2[2], x3[2], x4[2], x5[2], x6[2], x7[2]])
x0[2] = x[0]
x1[2] = x[1]
x2[2] = x[2]
x3[2] = x[3]
x4[2] = x[4]
x5[2] = x[5]
x6[2] = x[6]
x7[2] = x[7]
x = Lb([x0[1], x1[1], x2[1], x3[1], x4[1], x5[1], x6[1], x7[1]])
x0[1] = x[0]
x1[1] = x[1]
x2[1] = x[2]
x3[1] = x[3]
x4[1] = x[4]
x5[1] = x[5]
x6[1] = x[6]
x7[1] = x[7]
x = Lb([x0[0], x1[0], x2[0], x3[0], x4[0], x5[0], x6[0], x7[0]])
x0[0] = x[0]
x1[0] = x[1]
x2[0] = x[2]
x3[0] = x[3]
x4[0] = x[4]
x5[0] = x[5]
x6[0] = x[6]
x7[0] = x[7]
def Wz(x, c, n):
t = (x[3] & (c)) << (n)
x[3] = ((x[3] >> (n)) & (c)) | t
t = (x[2] & (c)) << (n)
x[2] = ((x[2] >> (n)) & (c)) | t
t = (x[1] & (c)) << (n)
x[1] = ((x[1] >> (n)) & (c)) | t
t = (x[0] & (c)) << (n)
x[0] = ((x[0] >> (n)) & (c)) | t
def W(ro, x):
if ro == 0:
return Wz(x, (0x55555555), 1)
elif ro == 1:
return Wz(x, (0x33333333), 2)
elif ro == 2:
return Wz(x, (0x0F0F0F0F), 4)
elif ro == 3:
return Wz(x, (0x00FF00FF), 8)
elif ro == 4:
return Wz(x, (0x0000FFFF), 16)
elif ro == 5:
t = x[3]
x[3] = x[2]
x[2] = t
t = x[1]
x[1] = x[0]
x[0] = t
return
elif ro == 6:
t = x[3]
x[3] = x[1]
x[1] = t
t = x[2]
x[2] = x[0]
x[0] = t
def SL(h, r, ro):
S(h[0], h[2], h[4], h[6], Ceven, r)
S(h[1], h[3], h[5], h[7], Codd, r)
L(h[0], h[2], h[4], h[6], h[1], h[3], h[5], h[7])
W(ro, h[1])
W(ro, h[3])
W(ro, h[5])
W(ro, h[7])
def READ_STATE(h, state):
h[0][3] = state[0]
h[0][2] = state[1]
h[0][1] = state[2]
h[0][0] = state[3]
h[1][3] = state[4]
h[1][2] = state[5]
h[1][1] = state[6]
h[1][0] = state[7]
h[2][3] = state[8]
h[2][2] = state[9]
h[2][1] = state[10]
h[2][0] = state[11]
h[3][3] = state[12]
h[3][2] = state[13]
h[3][1] = state[14]
h[3][0] = state[15]
h[4][3] = state[16]
h[4][2] = state[17]
h[4][1] = state[18]
h[4][0] = state[19]
h[5][3] = state[20]
h[5][2] = state[21]
h[5][1] = state[22]
h[5][0] = state[23]
h[6][3] = state[24]
h[6][2] = state[25]
h[6][1] = state[26]
h[6][0] = state[27]
h[7][3] = state[28]
h[7][2] = state[29]
h[7][1] = state[30]
h[7][0] = state[31]
def WRITE_STATE(h, state):
state[0] = h[0][3]
state[1] = h[0][2]
state[2] = h[0][1]
state[3] = h[0][0]
state[4] = h[1][3] | state[9] = h[2][2]
state[10] = h[2][1]
state[11] = h[2][0]
state[12] = h[3][3]
state[13] = h[3][2]
state[14] = h[3][1]
state[15] = h[3][0]
state[16] = h[4][3]
state[17] = h[4][2]
state[18] = h[4][1]
state[19] = h[4][0]
state[20] = h[5][3]
state[21] = h[5][2]
state[22] = h[5][1]
state[23] = h[5][0]
state[24] = h[6][3]
state[25] = h[6][2]
state[26] = h[6][1]
state[27] = h[6][0]
state[28] = h[7][3]
state[29] = h[7][2]
state[30] = h[7][1]
state[31] = h[7][0]
def E8(h):
for r in range(0, 42, 7):
SL(h, r + 0, 0)
SL(h, r + 1, 1)
SL(h, r + 2, 2)
SL(h, r + 3, 3)
SL(h, r + 4, 4)
SL(h, r + 5, 5)
SL(h, r + 6, 6)
def bufferXORInsertBackwards(buf, data, x, y, bufferOffsetX=0, bufferOffsetY=0):
for i in range(x):
for j in range(x):
m = i + bufferOffsetX
n = bufferOffsetY + y - 1 - j
buf[m][n] = buf[m][n] ^ data[i * 4 + j]
def jh_update(ctx, msg, msg_len=None):
buf = ctx['buffer']
buf_len = len(buf)
ptr = ctx['ptr']
if msg_len is None:
msg_len = len(msg)
if msg_len < buf_len - ptr:
op.buffer_insert(buf, ptr, msg, msg_len)
ptr += msg_len
ctx['ptr'] = ptr
return
V = [None] * JH_HX
for i in range(JH_HX):
V[i] = [None] * JH_HY
READ_STATE(V, ctx['state'])
while msg_len > 0:
clen = buf_len - ptr
if clen > msg_len:
clen = msg_len
op.buffer_insert(buf, ptr, msg, clen)
ptr += clen
msg = msg[clen:]
msg_len -= clen
if ptr == buf_len:
buf32 = op.swap32_list(op.bytes_to_i32_list(buf))
bufferXORInsertBackwards(V, buf32, 4, 4)
E8(V)
bufferXORInsertBackwards(V, buf32, 4, 4, 4, 0)
blockCountLow = ctx['blockCountLow']
blockCountLow = op.t32(blockCountLow + 1)
ctx['blockCountLow'] = blockCountLow
if blockCountLow == 0:
ctx['blockCountHigh'] += 1
ptr = 0
WRITE_STATE(V, ctx['state'])
ctx['ptr'] = ptr
def jh_close(ctx):
buf = bytearray(128)
l = [None] * 4
buf[0] = 0x80
ptr = ctx['ptr']
if ptr is 0:
numz = 47
else:
numz = 111 - ptr
buf[1:1+numz] = [0] * numz
blockCountLow = ctx['blockCountLow']
blockCountHigh = ctx['blockCountHigh']
l[0] = op.t32(blockCountLow << 9) + (ptr << 3)
l[1] = op.t32(blockCountLow >> 23) + op.t32(blockCountHigh << 9)
l[2] = op.t32(blockCountHigh >> 23)
l[3] = 0
lBytes = op.bytes_from_i32_list(op.swap32_list(l))
op.buffer_insert(buf, 1 + numz, lBytes[::-1], 16)
jh_update(ctx, buf, numz + 17)
out = [None] * 16
state = ctx['state']
for u in range(16):
out[u] = op.swap32(state[u + 16])
return out
def jh(msg, out_array=False, in_array=False):
ctx = {}
ctx['state'] = op.swap32_list(IV512)
ctx['ptr'] = 0
ctx['buffer'] = bytearray(Jh_BlockSize)
ctx['blockCountHigh'] = 0
ctx['blockCountLow'] = 0
if in_array:
msg = op.bytes_from_i32_list(msg)
jh_update(ctx, msg)
res = jh_close(ctx)
if not out_array:
res = op.bytes_from_i32_list(res)
return res | state[5] = h[1][2]
state[6] = h[1][1]
state[7] = h[1][0]
state[8] = h[2][3] | random_line_split |
jh.py | # -*- coding: utf-8 -*-
from . import op
Jh_BlockSize = 64
Jh_StateSize = 32
JH_HX = 8
JH_HY = 4
IV512 = [
(0x6fd14b96), (0x3e00aa17), (0x636a2e05), (0x7a15d543),
(0x8a225e8d), (0x0c97ef0b), (0xe9341259), (0xf2b3c361),
(0x891da0c1), (0x536f801e), (0x2aa9056b), (0xea2b6d80),
(0x588eccdb), (0x2075baa6), (0xa90f3a76), (0xbaf83bf7),
(0x0169e605), (0x41e34a69), (0x46b58a8e), (0x2e6fe65a),
(0x1047a7d0), (0xc1843c24), (0x3b6e71b1), (0x2d5ac199),
(0xcf57f6ec), (0x9db1f856), (0xa706887c), (0x5716b156),
(0xe3c2fcdf), (0xe68517fb), (0x545a4678), (0xcc8cdd4b),
]
C = [
0xa2ded572, 0x67f815df, 0x0a15847b, 0x571523b7, 0x90d6ab81, 0xf6875a4d,
0xc54f9f4e, 0x402bd1c3, 0xe03a98ea, 0x9cfa455c, 0x99d2c503, 0x9a99b266,
0xb4960266, 0x8a53bbf2, 0x1a1456b5, 0x31a2db88, 0x5c5aa303, 0xdb0e199a,
0x0ab23f40, 0x1044c187, 0x8019051c, 0x1d959e84, 0xadeb336f, 0xdccde75e,
0x9213ba10, 0x416bbf02, 0x156578dc, 0xd027bbf7, 0x39812c0a, 0x5078aa37,
0xd2bf1a3f, 0xd3910041, 0x0d5a2d42, 0x907eccf6, 0x9c9f62dd, 0xce97c092,
0x0ba75c18, 0xac442bc7, 0xd665dfd1, 0x23fcc663, 0x036c6e97, 0x1ab8e09e,
0x7e450521, 0xa8ec6c44, 0xbb03f1ee, 0xfa618e5d, 0xb29796fd, 0x97818394,
0x37858e4a, 0x2f3003db, 0x2d8d672a, 0x956a9ffb, 0x8173fe8a, 0x6c69b8f8,
0x4672c78a, 0x14427fc0, 0x8f15f4c5, 0xc45ec7bd, 0xa76f4475, 0x80bb118f,
0xb775de52, 0xbc88e4ae, 0x1e00b882, 0xf4a3a698, 0x338ff48e, 0x1563a3a9,
0x24565faa, 0x89f9b7d5, 0x20edf1b6, 0xfde05a7c, 0x5ae9ca36, 0x362c4206,
0x433529ce, 0x3d98fe4e, 0x74f93a53, 0xa74b9a73, 0x591ff5d0, 0x86814e6f,
0x81ad9d0e, 0x9f5ad8af, 0x670605a7, 0x6a6234ee, 0xbe280b8b, 0x2717b96e,
0x26077447, 0x3f1080c6, 0x6f7ea0e0, 0x7b487ec6, 0xa50a550d, 0xc0a4f84a,
0x9fe7e391, 0x9ef18e97, 0x81727686, 0xd48d6050, 0x415a9e7e, 0x62b0e5f3,
0xec1f9ffc, 0x7a205440, 0x001ae4e3, 0x84c9f4ce, 0xf594d74f, 0xd895fa9d,
0x117e2e55, 0xa554c324, 0x2872df5b, 0x286efebd, 0xe27ff578, 0xb2c4a50f,
0xef7c8905, 0x2ed349ee, 0x85937e44, 0x7f5928eb, 0x37695f70, 0x4a3124b3,
0xf128865e, 0x65e4d61d, 0x04771bc7, 0xe720b951, 0xe843fe74, 0x8a87d423,
0xa3e8297d, 0xf2947692, 0x097acbdd, 0xc1d9309b, 0xfb301b1d, 0xe01bdc5b,
0x4f4924da, 0xbf829cf2, 0x31bae7a4, 0xffbf70b4, 0x0544320d, 0x48bcf8de,
0x32fcae3b, 0x39d3bb53, 0xc1c39f45, 0xa08b29e0, 0xfd05c9e5, 0x0f09aef7,
0x12347094, 0x34f19042, 0x01b771a2, 0x95ed44e3, 0x368e3be9, 0x4a982f4f,
0x631d4088, 0x15f66ca0, 0x4b44c147, 0xffaf5287, 0xf14abb7e, 0x30c60ae2,
0xc5b67046, 0xe68c6ecc, 0x56a4d5a4, 0x00ca4fbd, 0x4b849dda, 0xae183ec8,
0x45ce5773, 0xadd16430, 0x68cea6e8, 0x67255c14, 0xf28cdaa3, 0x16e10ecb,
0x5806e933, 0x9a99949a, 0x20b2601f, 0x7b846fc2, 0x7facced1, 0x1885d1a0,
0xa15b5932, 0xd319dd8d, 0xc01c9a50, 0x46b4a5aa, 0x67633d9f, 0xba6b04e4,
0xab19caf6, 0x7eee560b, 0xea79b11f, 0x742128a9, 0x35f7bde9, 0xee51363b,
0x5aac571d, 0x76d35075, 0xfec2463a, 0x01707da3, 0xafc135f7, 0x42d8a498,
0x20eced78, 0x79676b9e, 0x15638341, 0xa8db3aea, 0x4d3bc3fa, 0x832c8332,
0x1f3b40a7, 0xf347271c, 0x34f04059, 0x9a762db7, 0x6c4e3ee7, 0xfd4f21d2,
0x398dfdb8, 0xef5957dc, 0x490c9b8d, 0xdaeb492b, 0x49d7a25b, 0x0d70f368,
0xd0ae3b7d, 0x84558d7a, 0xf0e9a5f5, 0x658ef8e4, 0xf4a2b8a0, 0x533b1036,
0x9e07a80c, 0x5aec3e75, 0x92946891, 0x4f88e856, 0x555cb05b, 0x4cbcbaf8,
0x993bbbe3, 0x7b9487f3, 0xd6f4da75, 0x5d1c6b72, 0x28acae64, 0x6db334dc,
0x50a5346c, 0x71db28b8, 0xf2e261f8, 0x2a518d10, 0x3364dbe3, 0xfc75dd59,
0xf1bcac1c, 0xa23fce43, 0x3cd1bb67, 0xb043e802, 0xca5b0a33, 0x75a12988,
0x4d19347f, 0x5c5316b4, 0xc3943b92, 0x1e4d790e, 0xd7757479, 0x3fafeeb6,
0xf7d4a8ea, 0x21391abe, 0x097ef45c, 0x5127234c, 0x5324a326, 0xd23c32ba,
0x4a17a344, 0xadd5a66d, 0xa63e1db5, 0x08c9f2af, 0x983d5983, 0x563c6b91,
0xa17cf84c, 0x4d608672, 0xcc3ee246, 0xf6c76e08, 0xb333982f, 0x5e76bcb1,
0xa566d62b, 0x2ae6c4ef, 0xe8b6f406, 0x36d4c1be, 0x1582ee74, 0x6321efbc,
0x0d4ec1fd, 0x69c953f4, 0xc45a7da7, 0x26585806, 0x1614c17e, 0x16fae006,
0x3daf907e, 0x3f9d6328, 0xe3f2c9d2, 0x0cd29b00, 0x30ceaa5f, 0x300cd4b7,
0x16512a74, 0x9832e0f2, 0xd830eb0d, 0x9af8cee3, 0x7b9ec54b, 0x9279f1b5,
0x6ee651ff, 0xd3688604, 0x574d239b, 0x316796e6, 0xf3a6e6cc, 0x05750a17,
0xd98176b1, 0xce6c3213, 0x8452173c, 0x62a205f8, 0xb3cb2bf4, 0x47154778,
0x825446ff, 0x486a9323, 0x0758df38, 0x65655e4e, 0x897cfcf2, 0x8e5086fc,
0x442e7031, 0x86ca0bd0, 0xa20940f0, 0x4e477830, 0x39eea065, 0x8338f7d1,
0x37e95ef7, 0xbd3a2ce4, 0x26b29721, 0x6ff81301, 0xd1ed44a3, 0xe7de9fef,
0x15dfa08b, 0xd9922576, 0xf6f7853c, 0xbe42dc12, 0x7ceca7d8, 0x7eb027ab,
0xda7d8d53, 0xdea83eaa, 0x93ce25aa, 0xd86902bd, 0xfd43f65a, 0xf908731a,
0xdaef5fc0, 0xa5194a17, 0x33664d97, 0x6a21fd4c, 0x3198b435, 0x701541db,
0xbb0f1eea, 0x9b54cded, 0xa163d09a, 0x72409751, 0xbf9d75f6, 0xe26f4791,
]
def Sb(x, c):
x[3] = ~x[3]
x[0] ^= (c) & ~x[2]
tmp = (c) ^ (x[0] & x[1])
x[0] ^= x[2] & x[3]
x[3] ^= ~x[1] & x[2]
x[1] ^= x[0] & x[2]
x[2] ^= x[0] & ~x[3]
x[0] ^= x[1] | x[3]
x[3] ^= x[1] & x[2]
x[1] ^= tmp & x[0]
x[2] ^= tmp
return x
def Lb(x):
x[4] ^= x[1]
x[5] ^= x[2]
x[6] ^= x[3] ^ x[0]
x[7] ^= x[0]
x[0] ^= x[5]
x[1] ^= x[6]
x[2] ^= x[7] ^ x[4]
x[3] ^= x[4]
return x
def Ceven(n, r):
return C[((r) << 3) + 3 - n]
def Codd(n, r):
return C[((r) << 3) + 7 - n]
def S(x0, x1, x2, x3, cb, r):
x = Sb([x0[3], x1[3], x2[3], x3[3]], cb(3, r))
x0[3] = x[0]
x1[3] = x[1]
x2[3] = x[2]
x3[3] = x[3]
x = Sb([x0[2], x1[2], x2[2], x3[2]], cb(2, r))
x0[2] = x[0]
x1[2] = x[1]
x2[2] = x[2]
x3[2] = x[3]
x = Sb([x0[1], x1[1], x2[1], x3[1]], cb(1, r))
x0[1] = x[0]
x1[1] = x[1]
x2[1] = x[2]
x3[1] = x[3]
x = Sb([x0[0], x1[0], x2[0], x3[0]], cb(0, r))
x0[0] = x[0]
x1[0] = x[1]
x2[0] = x[2]
x3[0] = x[3]
def L(x0, x1, x2, x3, x4, x5, x6, x7):
x = Lb([x0[3], x1[3], x2[3], x3[3], x4[3], x5[3], x6[3], x7[3]])
x0[3] = x[0]
x1[3] = x[1]
x2[3] = x[2]
x3[3] = x[3]
x4[3] = x[4]
x5[3] = x[5]
x6[3] = x[6]
x7[3] = x[7]
x = Lb([x0[2], x1[2], x2[2], x3[2], x4[2], x5[2], x6[2], x7[2]])
x0[2] = x[0]
x1[2] = x[1]
x2[2] = x[2]
x3[2] = x[3]
x4[2] = x[4]
x5[2] = x[5]
x6[2] = x[6]
x7[2] = x[7]
x = Lb([x0[1], x1[1], x2[1], x3[1], x4[1], x5[1], x6[1], x7[1]])
x0[1] = x[0]
x1[1] = x[1]
x2[1] = x[2]
x3[1] = x[3]
x4[1] = x[4]
x5[1] = x[5]
x6[1] = x[6]
x7[1] = x[7]
x = Lb([x0[0], x1[0], x2[0], x3[0], x4[0], x5[0], x6[0], x7[0]])
x0[0] = x[0]
x1[0] = x[1]
x2[0] = x[2]
x3[0] = x[3]
x4[0] = x[4]
x5[0] = x[5]
x6[0] = x[6]
x7[0] = x[7]
def Wz(x, c, n):
t = (x[3] & (c)) << (n)
x[3] = ((x[3] >> (n)) & (c)) | t
t = (x[2] & (c)) << (n)
x[2] = ((x[2] >> (n)) & (c)) | t
t = (x[1] & (c)) << (n)
x[1] = ((x[1] >> (n)) & (c)) | t
t = (x[0] & (c)) << (n)
x[0] = ((x[0] >> (n)) & (c)) | t
def W(ro, x):
if ro == 0:
return Wz(x, (0x55555555), 1)
elif ro == 1:
return Wz(x, (0x33333333), 2)
elif ro == 2:
return Wz(x, (0x0F0F0F0F), 4)
elif ro == 3:
return Wz(x, (0x00FF00FF), 8)
elif ro == 4:
return Wz(x, (0x0000FFFF), 16)
elif ro == 5:
t = x[3]
x[3] = x[2]
x[2] = t
t = x[1]
x[1] = x[0]
x[0] = t
return
elif ro == 6:
t = x[3]
x[3] = x[1]
x[1] = t
t = x[2]
x[2] = x[0]
x[0] = t
def SL(h, r, ro):
S(h[0], h[2], h[4], h[6], Ceven, r)
S(h[1], h[3], h[5], h[7], Codd, r)
L(h[0], h[2], h[4], h[6], h[1], h[3], h[5], h[7])
W(ro, h[1])
W(ro, h[3])
W(ro, h[5])
W(ro, h[7])
def READ_STATE(h, state):
h[0][3] = state[0]
h[0][2] = state[1]
h[0][1] = state[2]
h[0][0] = state[3]
h[1][3] = state[4]
h[1][2] = state[5]
h[1][1] = state[6]
h[1][0] = state[7]
h[2][3] = state[8]
h[2][2] = state[9]
h[2][1] = state[10]
h[2][0] = state[11]
h[3][3] = state[12]
h[3][2] = state[13]
h[3][1] = state[14]
h[3][0] = state[15]
h[4][3] = state[16]
h[4][2] = state[17]
h[4][1] = state[18]
h[4][0] = state[19]
h[5][3] = state[20]
h[5][2] = state[21]
h[5][1] = state[22]
h[5][0] = state[23]
h[6][3] = state[24]
h[6][2] = state[25]
h[6][1] = state[26]
h[6][0] = state[27]
h[7][3] = state[28]
h[7][2] = state[29]
h[7][1] = state[30]
h[7][0] = state[31]
def WRITE_STATE(h, state):
state[0] = h[0][3]
state[1] = h[0][2]
state[2] = h[0][1]
state[3] = h[0][0]
state[4] = h[1][3]
state[5] = h[1][2]
state[6] = h[1][1]
state[7] = h[1][0]
state[8] = h[2][3]
state[9] = h[2][2]
state[10] = h[2][1]
state[11] = h[2][0]
state[12] = h[3][3]
state[13] = h[3][2]
state[14] = h[3][1]
state[15] = h[3][0]
state[16] = h[4][3]
state[17] = h[4][2]
state[18] = h[4][1]
state[19] = h[4][0]
state[20] = h[5][3]
state[21] = h[5][2]
state[22] = h[5][1]
state[23] = h[5][0]
state[24] = h[6][3]
state[25] = h[6][2]
state[26] = h[6][1]
state[27] = h[6][0]
state[28] = h[7][3]
state[29] = h[7][2]
state[30] = h[7][1]
state[31] = h[7][0]
def E8(h):
for r in range(0, 42, 7):
SL(h, r + 0, 0)
SL(h, r + 1, 1)
SL(h, r + 2, 2)
SL(h, r + 3, 3)
SL(h, r + 4, 4)
SL(h, r + 5, 5)
SL(h, r + 6, 6)
def bufferXORInsertBackwards(buf, data, x, y, bufferOffsetX=0, bufferOffsetY=0):
for i in range(x):
for j in range(x):
m = i + bufferOffsetX
n = bufferOffsetY + y - 1 - j
buf[m][n] = buf[m][n] ^ data[i * 4 + j]
def jh_update(ctx, msg, msg_len=None):
buf = ctx['buffer']
buf_len = len(buf)
ptr = ctx['ptr']
if msg_len is None:
msg_len = len(msg)
if msg_len < buf_len - ptr:
op.buffer_insert(buf, ptr, msg, msg_len)
ptr += msg_len
ctx['ptr'] = ptr
return
V = [None] * JH_HX
for i in range(JH_HX):
V[i] = [None] * JH_HY
READ_STATE(V, ctx['state'])
while msg_len > 0:
clen = buf_len - ptr
if clen > msg_len:
clen = msg_len
op.buffer_insert(buf, ptr, msg, clen)
ptr += clen
msg = msg[clen:]
msg_len -= clen
if ptr == buf_len:
|
WRITE_STATE(V, ctx['state'])
ctx['ptr'] = ptr
def jh_close(ctx):
buf = bytearray(128)
l = [None] * 4
buf[0] = 0x80
ptr = ctx['ptr']
if ptr is 0:
numz = 47
else:
numz = 111 - ptr
buf[1:1+numz] = [0] * numz
blockCountLow = ctx['blockCountLow']
blockCountHigh = ctx['blockCountHigh']
l[0] = op.t32(blockCountLow << 9) + (ptr << 3)
l[1] = op.t32(blockCountLow >> 23) + op.t32(blockCountHigh << 9)
l[2] = op.t32(blockCountHigh >> 23)
l[3] = 0
lBytes = op.bytes_from_i32_list(op.swap32_list(l))
op.buffer_insert(buf, 1 + numz, lBytes[::-1], 16)
jh_update(ctx, buf, numz + 17)
out = [None] * 16
state = ctx['state']
for u in range(16):
out[u] = op.swap32(state[u + 16])
return out
def jh(msg, out_array=False, in_array=False):
ctx = {}
ctx['state'] = op.swap32_list(IV512)
ctx['ptr'] = 0
ctx['buffer'] = bytearray(Jh_BlockSize)
ctx['blockCountHigh'] = 0
ctx['blockCountLow'] = 0
if in_array:
msg = op.bytes_from_i32_list(msg)
jh_update(ctx, msg)
res = jh_close(ctx)
if not out_array:
res = op.bytes_from_i32_list(res)
return res
| buf32 = op.swap32_list(op.bytes_to_i32_list(buf))
bufferXORInsertBackwards(V, buf32, 4, 4)
E8(V)
bufferXORInsertBackwards(V, buf32, 4, 4, 4, 0)
blockCountLow = ctx['blockCountLow']
blockCountLow = op.t32(blockCountLow + 1)
ctx['blockCountLow'] = blockCountLow
if blockCountLow == 0:
ctx['blockCountHigh'] += 1
ptr = 0 | conditional_block |
jh.py | # -*- coding: utf-8 -*-
from . import op
Jh_BlockSize = 64
Jh_StateSize = 32
JH_HX = 8
JH_HY = 4
IV512 = [
(0x6fd14b96), (0x3e00aa17), (0x636a2e05), (0x7a15d543),
(0x8a225e8d), (0x0c97ef0b), (0xe9341259), (0xf2b3c361),
(0x891da0c1), (0x536f801e), (0x2aa9056b), (0xea2b6d80),
(0x588eccdb), (0x2075baa6), (0xa90f3a76), (0xbaf83bf7),
(0x0169e605), (0x41e34a69), (0x46b58a8e), (0x2e6fe65a),
(0x1047a7d0), (0xc1843c24), (0x3b6e71b1), (0x2d5ac199),
(0xcf57f6ec), (0x9db1f856), (0xa706887c), (0x5716b156),
(0xe3c2fcdf), (0xe68517fb), (0x545a4678), (0xcc8cdd4b),
]
C = [
0xa2ded572, 0x67f815df, 0x0a15847b, 0x571523b7, 0x90d6ab81, 0xf6875a4d,
0xc54f9f4e, 0x402bd1c3, 0xe03a98ea, 0x9cfa455c, 0x99d2c503, 0x9a99b266,
0xb4960266, 0x8a53bbf2, 0x1a1456b5, 0x31a2db88, 0x5c5aa303, 0xdb0e199a,
0x0ab23f40, 0x1044c187, 0x8019051c, 0x1d959e84, 0xadeb336f, 0xdccde75e,
0x9213ba10, 0x416bbf02, 0x156578dc, 0xd027bbf7, 0x39812c0a, 0x5078aa37,
0xd2bf1a3f, 0xd3910041, 0x0d5a2d42, 0x907eccf6, 0x9c9f62dd, 0xce97c092,
0x0ba75c18, 0xac442bc7, 0xd665dfd1, 0x23fcc663, 0x036c6e97, 0x1ab8e09e,
0x7e450521, 0xa8ec6c44, 0xbb03f1ee, 0xfa618e5d, 0xb29796fd, 0x97818394,
0x37858e4a, 0x2f3003db, 0x2d8d672a, 0x956a9ffb, 0x8173fe8a, 0x6c69b8f8,
0x4672c78a, 0x14427fc0, 0x8f15f4c5, 0xc45ec7bd, 0xa76f4475, 0x80bb118f,
0xb775de52, 0xbc88e4ae, 0x1e00b882, 0xf4a3a698, 0x338ff48e, 0x1563a3a9,
0x24565faa, 0x89f9b7d5, 0x20edf1b6, 0xfde05a7c, 0x5ae9ca36, 0x362c4206,
0x433529ce, 0x3d98fe4e, 0x74f93a53, 0xa74b9a73, 0x591ff5d0, 0x86814e6f,
0x81ad9d0e, 0x9f5ad8af, 0x670605a7, 0x6a6234ee, 0xbe280b8b, 0x2717b96e,
0x26077447, 0x3f1080c6, 0x6f7ea0e0, 0x7b487ec6, 0xa50a550d, 0xc0a4f84a,
0x9fe7e391, 0x9ef18e97, 0x81727686, 0xd48d6050, 0x415a9e7e, 0x62b0e5f3,
0xec1f9ffc, 0x7a205440, 0x001ae4e3, 0x84c9f4ce, 0xf594d74f, 0xd895fa9d,
0x117e2e55, 0xa554c324, 0x2872df5b, 0x286efebd, 0xe27ff578, 0xb2c4a50f,
0xef7c8905, 0x2ed349ee, 0x85937e44, 0x7f5928eb, 0x37695f70, 0x4a3124b3,
0xf128865e, 0x65e4d61d, 0x04771bc7, 0xe720b951, 0xe843fe74, 0x8a87d423,
0xa3e8297d, 0xf2947692, 0x097acbdd, 0xc1d9309b, 0xfb301b1d, 0xe01bdc5b,
0x4f4924da, 0xbf829cf2, 0x31bae7a4, 0xffbf70b4, 0x0544320d, 0x48bcf8de,
0x32fcae3b, 0x39d3bb53, 0xc1c39f45, 0xa08b29e0, 0xfd05c9e5, 0x0f09aef7,
0x12347094, 0x34f19042, 0x01b771a2, 0x95ed44e3, 0x368e3be9, 0x4a982f4f,
0x631d4088, 0x15f66ca0, 0x4b44c147, 0xffaf5287, 0xf14abb7e, 0x30c60ae2,
0xc5b67046, 0xe68c6ecc, 0x56a4d5a4, 0x00ca4fbd, 0x4b849dda, 0xae183ec8,
0x45ce5773, 0xadd16430, 0x68cea6e8, 0x67255c14, 0xf28cdaa3, 0x16e10ecb,
0x5806e933, 0x9a99949a, 0x20b2601f, 0x7b846fc2, 0x7facced1, 0x1885d1a0,
0xa15b5932, 0xd319dd8d, 0xc01c9a50, 0x46b4a5aa, 0x67633d9f, 0xba6b04e4,
0xab19caf6, 0x7eee560b, 0xea79b11f, 0x742128a9, 0x35f7bde9, 0xee51363b,
0x5aac571d, 0x76d35075, 0xfec2463a, 0x01707da3, 0xafc135f7, 0x42d8a498,
0x20eced78, 0x79676b9e, 0x15638341, 0xa8db3aea, 0x4d3bc3fa, 0x832c8332,
0x1f3b40a7, 0xf347271c, 0x34f04059, 0x9a762db7, 0x6c4e3ee7, 0xfd4f21d2,
0x398dfdb8, 0xef5957dc, 0x490c9b8d, 0xdaeb492b, 0x49d7a25b, 0x0d70f368,
0xd0ae3b7d, 0x84558d7a, 0xf0e9a5f5, 0x658ef8e4, 0xf4a2b8a0, 0x533b1036,
0x9e07a80c, 0x5aec3e75, 0x92946891, 0x4f88e856, 0x555cb05b, 0x4cbcbaf8,
0x993bbbe3, 0x7b9487f3, 0xd6f4da75, 0x5d1c6b72, 0x28acae64, 0x6db334dc,
0x50a5346c, 0x71db28b8, 0xf2e261f8, 0x2a518d10, 0x3364dbe3, 0xfc75dd59,
0xf1bcac1c, 0xa23fce43, 0x3cd1bb67, 0xb043e802, 0xca5b0a33, 0x75a12988,
0x4d19347f, 0x5c5316b4, 0xc3943b92, 0x1e4d790e, 0xd7757479, 0x3fafeeb6,
0xf7d4a8ea, 0x21391abe, 0x097ef45c, 0x5127234c, 0x5324a326, 0xd23c32ba,
0x4a17a344, 0xadd5a66d, 0xa63e1db5, 0x08c9f2af, 0x983d5983, 0x563c6b91,
0xa17cf84c, 0x4d608672, 0xcc3ee246, 0xf6c76e08, 0xb333982f, 0x5e76bcb1,
0xa566d62b, 0x2ae6c4ef, 0xe8b6f406, 0x36d4c1be, 0x1582ee74, 0x6321efbc,
0x0d4ec1fd, 0x69c953f4, 0xc45a7da7, 0x26585806, 0x1614c17e, 0x16fae006,
0x3daf907e, 0x3f9d6328, 0xe3f2c9d2, 0x0cd29b00, 0x30ceaa5f, 0x300cd4b7,
0x16512a74, 0x9832e0f2, 0xd830eb0d, 0x9af8cee3, 0x7b9ec54b, 0x9279f1b5,
0x6ee651ff, 0xd3688604, 0x574d239b, 0x316796e6, 0xf3a6e6cc, 0x05750a17,
0xd98176b1, 0xce6c3213, 0x8452173c, 0x62a205f8, 0xb3cb2bf4, 0x47154778,
0x825446ff, 0x486a9323, 0x0758df38, 0x65655e4e, 0x897cfcf2, 0x8e5086fc,
0x442e7031, 0x86ca0bd0, 0xa20940f0, 0x4e477830, 0x39eea065, 0x8338f7d1,
0x37e95ef7, 0xbd3a2ce4, 0x26b29721, 0x6ff81301, 0xd1ed44a3, 0xe7de9fef,
0x15dfa08b, 0xd9922576, 0xf6f7853c, 0xbe42dc12, 0x7ceca7d8, 0x7eb027ab,
0xda7d8d53, 0xdea83eaa, 0x93ce25aa, 0xd86902bd, 0xfd43f65a, 0xf908731a,
0xdaef5fc0, 0xa5194a17, 0x33664d97, 0x6a21fd4c, 0x3198b435, 0x701541db,
0xbb0f1eea, 0x9b54cded, 0xa163d09a, 0x72409751, 0xbf9d75f6, 0xe26f4791,
]
def Sb(x, c):
x[3] = ~x[3]
x[0] ^= (c) & ~x[2]
tmp = (c) ^ (x[0] & x[1])
x[0] ^= x[2] & x[3]
x[3] ^= ~x[1] & x[2]
x[1] ^= x[0] & x[2]
x[2] ^= x[0] & ~x[3]
x[0] ^= x[1] | x[3]
x[3] ^= x[1] & x[2]
x[1] ^= tmp & x[0]
x[2] ^= tmp
return x
def Lb(x):
x[4] ^= x[1]
x[5] ^= x[2]
x[6] ^= x[3] ^ x[0]
x[7] ^= x[0]
x[0] ^= x[5]
x[1] ^= x[6]
x[2] ^= x[7] ^ x[4]
x[3] ^= x[4]
return x
def Ceven(n, r):
return C[((r) << 3) + 3 - n]
def Codd(n, r):
return C[((r) << 3) + 7 - n]
def S(x0, x1, x2, x3, cb, r):
x = Sb([x0[3], x1[3], x2[3], x3[3]], cb(3, r))
x0[3] = x[0]
x1[3] = x[1]
x2[3] = x[2]
x3[3] = x[3]
x = Sb([x0[2], x1[2], x2[2], x3[2]], cb(2, r))
x0[2] = x[0]
x1[2] = x[1]
x2[2] = x[2]
x3[2] = x[3]
x = Sb([x0[1], x1[1], x2[1], x3[1]], cb(1, r))
x0[1] = x[0]
x1[1] = x[1]
x2[1] = x[2]
x3[1] = x[3]
x = Sb([x0[0], x1[0], x2[0], x3[0]], cb(0, r))
x0[0] = x[0]
x1[0] = x[1]
x2[0] = x[2]
x3[0] = x[3]
def L(x0, x1, x2, x3, x4, x5, x6, x7):
x = Lb([x0[3], x1[3], x2[3], x3[3], x4[3], x5[3], x6[3], x7[3]])
x0[3] = x[0]
x1[3] = x[1]
x2[3] = x[2]
x3[3] = x[3]
x4[3] = x[4]
x5[3] = x[5]
x6[3] = x[6]
x7[3] = x[7]
x = Lb([x0[2], x1[2], x2[2], x3[2], x4[2], x5[2], x6[2], x7[2]])
x0[2] = x[0]
x1[2] = x[1]
x2[2] = x[2]
x3[2] = x[3]
x4[2] = x[4]
x5[2] = x[5]
x6[2] = x[6]
x7[2] = x[7]
x = Lb([x0[1], x1[1], x2[1], x3[1], x4[1], x5[1], x6[1], x7[1]])
x0[1] = x[0]
x1[1] = x[1]
x2[1] = x[2]
x3[1] = x[3]
x4[1] = x[4]
x5[1] = x[5]
x6[1] = x[6]
x7[1] = x[7]
x = Lb([x0[0], x1[0], x2[0], x3[0], x4[0], x5[0], x6[0], x7[0]])
x0[0] = x[0]
x1[0] = x[1]
x2[0] = x[2]
x3[0] = x[3]
x4[0] = x[4]
x5[0] = x[5]
x6[0] = x[6]
x7[0] = x[7]
def Wz(x, c, n):
|
def W(ro, x):
if ro == 0:
return Wz(x, (0x55555555), 1)
elif ro == 1:
return Wz(x, (0x33333333), 2)
elif ro == 2:
return Wz(x, (0x0F0F0F0F), 4)
elif ro == 3:
return Wz(x, (0x00FF00FF), 8)
elif ro == 4:
return Wz(x, (0x0000FFFF), 16)
elif ro == 5:
t = x[3]
x[3] = x[2]
x[2] = t
t = x[1]
x[1] = x[0]
x[0] = t
return
elif ro == 6:
t = x[3]
x[3] = x[1]
x[1] = t
t = x[2]
x[2] = x[0]
x[0] = t
def SL(h, r, ro):
S(h[0], h[2], h[4], h[6], Ceven, r)
S(h[1], h[3], h[5], h[7], Codd, r)
L(h[0], h[2], h[4], h[6], h[1], h[3], h[5], h[7])
W(ro, h[1])
W(ro, h[3])
W(ro, h[5])
W(ro, h[7])
def READ_STATE(h, state):
h[0][3] = state[0]
h[0][2] = state[1]
h[0][1] = state[2]
h[0][0] = state[3]
h[1][3] = state[4]
h[1][2] = state[5]
h[1][1] = state[6]
h[1][0] = state[7]
h[2][3] = state[8]
h[2][2] = state[9]
h[2][1] = state[10]
h[2][0] = state[11]
h[3][3] = state[12]
h[3][2] = state[13]
h[3][1] = state[14]
h[3][0] = state[15]
h[4][3] = state[16]
h[4][2] = state[17]
h[4][1] = state[18]
h[4][0] = state[19]
h[5][3] = state[20]
h[5][2] = state[21]
h[5][1] = state[22]
h[5][0] = state[23]
h[6][3] = state[24]
h[6][2] = state[25]
h[6][1] = state[26]
h[6][0] = state[27]
h[7][3] = state[28]
h[7][2] = state[29]
h[7][1] = state[30]
h[7][0] = state[31]
def WRITE_STATE(h, state):
state[0] = h[0][3]
state[1] = h[0][2]
state[2] = h[0][1]
state[3] = h[0][0]
state[4] = h[1][3]
state[5] = h[1][2]
state[6] = h[1][1]
state[7] = h[1][0]
state[8] = h[2][3]
state[9] = h[2][2]
state[10] = h[2][1]
state[11] = h[2][0]
state[12] = h[3][3]
state[13] = h[3][2]
state[14] = h[3][1]
state[15] = h[3][0]
state[16] = h[4][3]
state[17] = h[4][2]
state[18] = h[4][1]
state[19] = h[4][0]
state[20] = h[5][3]
state[21] = h[5][2]
state[22] = h[5][1]
state[23] = h[5][0]
state[24] = h[6][3]
state[25] = h[6][2]
state[26] = h[6][1]
state[27] = h[6][0]
state[28] = h[7][3]
state[29] = h[7][2]
state[30] = h[7][1]
state[31] = h[7][0]
def E8(h):
for r in range(0, 42, 7):
SL(h, r + 0, 0)
SL(h, r + 1, 1)
SL(h, r + 2, 2)
SL(h, r + 3, 3)
SL(h, r + 4, 4)
SL(h, r + 5, 5)
SL(h, r + 6, 6)
def bufferXORInsertBackwards(buf, data, x, y, bufferOffsetX=0, bufferOffsetY=0):
for i in range(x):
for j in range(x):
m = i + bufferOffsetX
n = bufferOffsetY + y - 1 - j
buf[m][n] = buf[m][n] ^ data[i * 4 + j]
def jh_update(ctx, msg, msg_len=None):
buf = ctx['buffer']
buf_len = len(buf)
ptr = ctx['ptr']
if msg_len is None:
msg_len = len(msg)
if msg_len < buf_len - ptr:
op.buffer_insert(buf, ptr, msg, msg_len)
ptr += msg_len
ctx['ptr'] = ptr
return
V = [None] * JH_HX
for i in range(JH_HX):
V[i] = [None] * JH_HY
READ_STATE(V, ctx['state'])
while msg_len > 0:
clen = buf_len - ptr
if clen > msg_len:
clen = msg_len
op.buffer_insert(buf, ptr, msg, clen)
ptr += clen
msg = msg[clen:]
msg_len -= clen
if ptr == buf_len:
buf32 = op.swap32_list(op.bytes_to_i32_list(buf))
bufferXORInsertBackwards(V, buf32, 4, 4)
E8(V)
bufferXORInsertBackwards(V, buf32, 4, 4, 4, 0)
blockCountLow = ctx['blockCountLow']
blockCountLow = op.t32(blockCountLow + 1)
ctx['blockCountLow'] = blockCountLow
if blockCountLow == 0:
ctx['blockCountHigh'] += 1
ptr = 0
WRITE_STATE(V, ctx['state'])
ctx['ptr'] = ptr
def jh_close(ctx):
buf = bytearray(128)
l = [None] * 4
buf[0] = 0x80
ptr = ctx['ptr']
if ptr is 0:
numz = 47
else:
numz = 111 - ptr
buf[1:1+numz] = [0] * numz
blockCountLow = ctx['blockCountLow']
blockCountHigh = ctx['blockCountHigh']
l[0] = op.t32(blockCountLow << 9) + (ptr << 3)
l[1] = op.t32(blockCountLow >> 23) + op.t32(blockCountHigh << 9)
l[2] = op.t32(blockCountHigh >> 23)
l[3] = 0
lBytes = op.bytes_from_i32_list(op.swap32_list(l))
op.buffer_insert(buf, 1 + numz, lBytes[::-1], 16)
jh_update(ctx, buf, numz + 17)
out = [None] * 16
state = ctx['state']
for u in range(16):
out[u] = op.swap32(state[u + 16])
return out
def jh(msg, out_array=False, in_array=False):
ctx = {}
ctx['state'] = op.swap32_list(IV512)
ctx['ptr'] = 0
ctx['buffer'] = bytearray(Jh_BlockSize)
ctx['blockCountHigh'] = 0
ctx['blockCountLow'] = 0
if in_array:
msg = op.bytes_from_i32_list(msg)
jh_update(ctx, msg)
res = jh_close(ctx)
if not out_array:
res = op.bytes_from_i32_list(res)
return res
| t = (x[3] & (c)) << (n)
x[3] = ((x[3] >> (n)) & (c)) | t
t = (x[2] & (c)) << (n)
x[2] = ((x[2] >> (n)) & (c)) | t
t = (x[1] & (c)) << (n)
x[1] = ((x[1] >> (n)) & (c)) | t
t = (x[0] & (c)) << (n)
x[0] = ((x[0] >> (n)) & (c)) | t | identifier_body |
parse.rs | //! This module contains functionality for parsing a regular expression into the intermediate
//! representation in repr.rs (from which it is compiled into a state graph), and optimizing that
//! intermediate representation.
#![allow(dead_code)]
use std::iter::FromIterator;
use std::ops::{Index, Range, RangeFull};
use std::str::FromStr;
use crate::repr::{AnchorLocation, Pattern, Repetition};
/// The entry point for this module: Parse a string into a `Pattern` that can be optimized and/or
/// compiled.
pub fn parse(s: &str) -> Result<Pattern, String> {
let src: Vec<char> = s.chars().collect();
parse_re(ParseState::new(&src)).map(|t| t.0)
}
/// ParseStack contains already parsed elements of a regular expression, and is used for parsing
/// textual regular expressions (as the parsing algorithm is stack-based). It can be converted to
/// an Pattern.
struct ParseStack {
s: Vec<Pattern>,
}
impl ParseStack {
fn new() -> ParseStack {
ParseStack {
s: Vec::with_capacity(4),
}
}
fn push(&mut self, p: Pattern) {
self.s.push(p)
}
fn pop(&mut self) -> Option<Pattern> {
self.s.pop()
}
fn empty(&self) -> bool {
self.s.is_empty()
}
fn to_pattern(mut self) -> Pattern {
if self.s.len() > 1 {
Pattern::Concat(self.s)
} else if self.s.len() == 1 {
self.s.pop().unwrap()
} else {
panic!("empty stack")
}
}
}
/// State of the parser, quite a simple struct. It contains the current substring that a parser
/// function is concerned with as well as the position within the overall parsed string, so that
/// useful positions can be reported to users. In addition, it provides functions to cheaply create
/// "sub-ParseStates" containing a substring of its current string.
///
/// It also supports indexing by ranges and index.
struct ParseState<'a> {
/// The string to parse. This may be a substring of the "overall" matched string.
src: &'a [char],
/// The position within the overall string (for error reporting).
pos: usize,
}
impl<'a> ParseState<'a> {
/// new returns a new ParseState operating on the specified input string.
fn new(s: &'a [char]) -> ParseState<'a> {
ParseState { src: s, pos: 0 }
}
/// from returns a new ParseState operating on the [from..] sub-string of the current
/// ParseState.
fn from(&self, from: usize) -> ParseState<'a> {
self.sub(from, self.len())
}
/// pos returns the overall position within the input regex.
fn pos(&self) -> usize {
self.pos
}
/// sub returns a sub-ParseState containing [from..to] of the current one.
fn sub(&self, from: usize, to: usize) -> ParseState<'a> {
ParseState {
src: &self.src[from..to],
pos: self.pos + from,
}
}
/// len returns how many characters this ParseState contains.
fn len(&self) -> usize {
self.src.len()
}
/// err returns a formatted error string containing the specified message and the overall
/// position within the original input string.
fn err<T>(&self, s: &str, i: usize) -> Result<T, String> {
Err(format!("{} at :{}", s, self.pos + i))
}
}
impl<'a> Index<Range<usize>> for ParseState<'a> {
type Output = [char];
fn index(&self, r: Range<usize>) -> &Self::Output {
&self.src[r]
}
}
impl<'a> Index<RangeFull> for ParseState<'a> {
type Output = [char];
fn index(&self, r: RangeFull) -> &Self::Output {
&self.src[r]
}
}
impl<'a> Index<usize> for ParseState<'a> {
type Output = char;
fn index(&self, i: usize) -> &Self::Output {
&self.src[i]
}
}
impl<'a> Clone for ParseState<'a> {
fn clone(&self) -> ParseState<'a> {
ParseState {
src: self.src,
pos: self.pos,
}
}
}
/// parse_re is the parser entry point; like all parser functions, it returns either a pair of
/// (parsed pattern, new ParseState) or an error string.
fn parse_re<'a>(mut s: ParseState<'a>) -> Result<(Pattern, ParseState<'a>), String> {
// The stack assists us in parsing the linear parts of a regular expression, e.g. non-pattern
// characters, or character sets.
let mut stack = ParseStack::new();
loop {
if s.len() == 0 {
break;
}
match s[0] {
'.' => {
stack.push(Pattern::Any);
s = s.from(1);
}
'$' => {
if s.len() == 1 {
stack.push(Pattern::Anchor(AnchorLocation::End));
} else {
stack.push(Pattern::Char('$'))
}
s = s.from(1);
}
'^' => {
if s.pos() == 0 {
stack.push(Pattern::Anchor(AnchorLocation::Begin));
} else {
stack.push(Pattern::Char('^'));
}
s = s.from(1);
}
r @ '+' | r @ '*' | r @ '?' => {
if let Some(p) = stack.pop() {
let rep = match r {
'+' => Repetition::OnceOrMore(p),
'*' => Repetition::ZeroOrMore(p),
'?' => Repetition::ZeroOrOnce(p),
_ => unimplemented!(),
};
stack.push(Pattern::Repeated(Box::new(rep)));
s = s.from(1);
} else {
return s.err("+ without pattern to repeat", 0);
}
}
// Alternation: Parse the expression on the right of the pipe sign and push an
// alternation between what we've already seen and the stuff on the right.
'|' => {
let (rest, newst) = parse_re(s.from(1))?;
let left = stack.to_pattern();
stack = ParseStack::new();
stack.push(Pattern::Alternate(vec![left, rest]));
s = newst;
}
'(' => {
match split_in_parens(s.clone(), ROUND_PARENS) {
Some((parens, newst)) => {
// Parse the sub-regex within parentheses.
let (pat, rest) = parse_re(parens)?;
assert!(rest.len() == 0);
stack.push(Pattern::Submatch(Box::new(pat)));
// Set the current state to contain the string after the parentheses.
s = newst;
}
None => return s.err("unmatched (", s.len()),
}
}
')' => return s.err("unopened ')'", 0),
'[' => match parse_char_set(s) {
Ok((pat, newst)) => {
stack.push(pat);
s = newst;
}
Err(e) => return Err(e),
},
']' => return s.err("unopened ']'", 0),
'{' => {
match split_in_parens(s.clone(), CURLY_BRACKETS) {
Some((rep, newst)) => {
if let Some(p) = stack.pop() {
let rep = parse_specific_repetition(rep, p)?;
stack.push(rep);
s = newst;
} else {
return s.err("repetition {} without pattern to repeat", 0);
}
}
None => return s.err("unmatched {", s.len()),
};
}
c => {
stack.push(Pattern::Char(c));
s = s.from(1);
}
}
}
Ok((stack.to_pattern(), s))
}
/// parse_char_set parses the character set at the start of the input state.
/// Valid states are [a], [ab], [a-z], [-a-z], [a-z-] and [a-fh-kl].
fn parse_char_set<'a>(s: ParseState<'a>) -> Result<(Pattern, ParseState<'a>), String> {
if let Some((cs, rest)) = split_in_parens(s.clone(), SQUARE_BRACKETS) {
let mut chars: Vec<char> = vec![];
let mut ranges: Vec<Pattern> = vec![];
let mut st = cs;
loop {
// Try to match a range "a-z" by looking for the dash; if no dash, add character to set
// and advance.
if st.len() >= 3 && st[1] == '-' {
ranges.push(Pattern::CharRange(st[0], st[2]));
st = st.from(3);
} else if st.len() > 0 {
chars.push(st[0]);
st = st.from(1);
} else {
break;
}
}
assert_eq!(st.len(), 0);
if chars.len() == 1 {
ranges.push(Pattern::Char(chars.pop().unwrap()));
} else if !chars.is_empty() {
ranges.push(Pattern::CharSet(chars));
}
if ranges.len() == 1 {
Ok((ranges.pop().unwrap(), rest))
} else {
let pat = Pattern::Alternate(ranges);
Ok((pat, rest))
}
} else {
s.err("unmatched [", s.len())
}
}
/// Parse a repetition spec inside curly braces: {1} | {1,} | {,1} | {1,2}
fn parse_specific_repetition<'a>(rep: ParseState<'a>, p: Pattern) -> Result<Pattern, String> {
let mut nparts = 0;
let mut parts: [Option<&[char]>; 2] = Default::default();
for p in rep[..].split(|c| *c == ',') {
parts[nparts] = Some(p);
nparts += 1;
if nparts == 2 {
break;
}
}
if nparts == 0 {
// {}
return rep.err("empty {} spec", 0);
} else if nparts == 1 {
// {1}
if let Ok(n) = u32::from_str(&String::from_iter(parts[0].unwrap().iter())) {
return Ok(Pattern::Repeated(Box::new(Repetition::Specific(
p, n, None,
))));
} else {
return Err(format!(
"invalid repetition '{}'",
String::from_iter(rep[..].iter())
));
}
} else if nparts == 2 {
fn errtostr(r: Result<u32, std::num::ParseIntError>) -> Result<u32, String> {
match r {
Ok(u) => Ok(u),
Err(e) => Err(format!("{}", e)),
}
}
let (p0, p1) = (parts[0].unwrap(), parts[1].unwrap());
// {2,3}
if !p0.is_empty() && !p1.is_empty() {
let min = errtostr(u32::from_str(&String::from_iter(p0.iter())))?;
let max = errtostr(u32::from_str(&String::from_iter(p1.iter())))?;
return Ok(Pattern::Repeated(Box::new(Repetition::Specific(
p,
min,
Some(max),
))));
} else if p0.is_empty() && !p1.is_empty() {
// {,3}
let min = 0;
let max = errtostr(u32::from_str(&String::from_iter(p1.iter())))?;
return Ok(Pattern::Repeated(Box::new(Repetition::Specific(
p,
min,
Some(max),
))));
} else if !p0.is_empty() && p1.is_empty() {
// {3,}
let min = errtostr(u32::from_str(&String::from_iter(p0.iter())))?;
let repetition =
Pattern::Repeated(Box::new(Repetition::Specific(p.clone(), min, None)));
return Ok(Pattern::Concat(vec![
repetition,
Pattern::Repeated(Box::new(Repetition::ZeroOrMore(p))),
]));
}
}
Err(format!("invalid repetition pattern {:?}", &rep[..]))
}
/// Constants for generalizing parsing of parentheses.
const ROUND_PARENS: (char, char) = ('(', ')');
/// Constants for generalizing parsing of parentheses.
const SQUARE_BRACKETS: (char, char) = ('[', ']');
/// Constants for generalizing parsing of parentheses.
const CURLY_BRACKETS: (char, char) = ('{', '}');
/// split_in_parens returns two new ParseStates; the first one containing the contents of the
/// parenthesized clause starting at s[0], the second one containing the rest.
fn split_in_parens<'a>(
s: ParseState<'a>,
parens: (char, char),
) -> Option<(ParseState<'a>, ParseState<'a>)> {
if let Some(end) = find_closing_paren(s.clone(), parens) {
Some((s.sub(1, end), s.from(end + 1)))
} else {
None
}
}
/// find_closing_paren returns the index of the parenthesis closing the opening parenthesis at the
/// beginning of the state's string.
fn find_closing_paren<'a>(s: ParseState<'a>, parens: (char, char)) -> Option<usize> {
if s[0] != parens.0 {
return None;
}
let mut count = 0;
for i in 0..s.len() {
if s[i] == parens.0 {
count += 1;
} else if s[i] == parens.1 {
count -= 1;
}
if count == 0 {
return Some(i);
}
}
None
}
#[cfg(test)]
mod tests {
use super::*;
use crate::compile::*;
use crate::repr::*;
use crate::state::dot;
#[test]
fn test_find_closing_paren() {
for case in &[
("(abc)de", Some(4)),
("()a", Some(1)),
("(abcd)", Some(5)),
("(abc", None),
] {
let src: Vec<char> = case.0.chars().collect();
assert_eq!(
find_closing_paren(ParseState::new(src.as_ref()), ROUND_PARENS),
case.1
);
}
}
#[test]
fn test_parse_charset() {
for case in &[
("[a]", Pattern::Char('a')),
("[ab]", Pattern::CharSet(vec!['a', 'b'])),
("[ba-]", Pattern::CharSet(vec!['b', 'a', '-'])),
("[a-z]", Pattern::CharRange('a', 'z')),
(
"[a-z-]",
Pattern::Alternate(vec![Pattern::CharRange('a', 'z'), Pattern::Char('-')]),
),
(
"[-a-z-]",
Pattern::Alternate(vec![
Pattern::CharRange('a', 'z'),
Pattern::CharSet(vec!['-', '-']),
]),
),
(
"[a-zA-Z]",
Pattern::Alternate(vec![
Pattern::CharRange('a', 'z'),
Pattern::CharRange('A', 'Z'),
]),
),
(
"[a-zA-Z-]",
Pattern::Alternate(vec![
Pattern::CharRange('a', 'z'),
Pattern::CharRange('A', 'Z'),
Pattern::Char('-'),
]),
),
] {
let src: Vec<char> = case.0.chars().collect();
let st = ParseState::new(&src);
assert_eq!(parse_char_set(st).unwrap().0, case.1);
}
}
#[test]
fn test_parse_subs() |
#[test]
fn test_parse_res() {
let case1 = (
"a(Bcd)e",
Pattern::Concat(vec![
Pattern::Char('a'),
Pattern::Submatch(Box::new(Pattern::Concat(vec![
Pattern::Char('B'),
Pattern::Char('c'),
Pattern::Char('d'),
]))),
Pattern::Char('e'),
]),
);
for c in &[case1] {
assert_eq!(c.1, parse(c.0).unwrap());
}
}
#[test]
fn test_parse_res_errors() {
let case1 = ("ac)d", "unopened ')' at :2");
let case2 = ("(ac)d)", "unopened ')' at :5");
let case3 = ("[ac]d]", "unopened ']' at :5");
let case4 = ("(ac)d]", "unopened ']' at :5");
for c in &[case1, case2, case3, case4] {
assert_eq!(c.1, parse(c.0).unwrap_err());
}
}
#[test]
fn test_parse_repetition_manual() {
println!(
"digraph st {{ {} }}",
dot(&start_compile(&parse("[abc]{1,5}").unwrap()))
);
}
#[test]
fn test_parse_manual() {
let rep = parse("a|[bed]|(c|d|e)|f").unwrap();
println!("{:?}", rep.clone());
let dot = dot(&start_compile(&rep));
println!("digraph st {{ {} }}", dot);
}
#[test]
fn test_parse_manual2() {
println!("{:?}", parse("abcdef"));
}
}
| {
let case1 = (
"a(b)c",
Pattern::Concat(vec![
Pattern::Char('a'),
Pattern::Submatch(Box::new(Pattern::Char('b'))),
Pattern::Char('c'),
]),
);
let case2 = ("(b)", Pattern::Submatch(Box::new(Pattern::Char('b'))));
for c in &[case1, case2] {
assert_eq!(c.1, parse(c.0).unwrap());
}
} | identifier_body |
parse.rs | //! This module contains functionality for parsing a regular expression into the intermediate
//! representation in repr.rs (from which it is compiled into a state graph), and optimizing that
//! intermediate representation.
#![allow(dead_code)]
use std::iter::FromIterator;
use std::ops::{Index, Range, RangeFull};
use std::str::FromStr;
use crate::repr::{AnchorLocation, Pattern, Repetition};
/// The entry point for this module: Parse a string into a `Pattern` that can be optimized and/or
/// compiled.
pub fn parse(s: &str) -> Result<Pattern, String> {
let src: Vec<char> = s.chars().collect();
parse_re(ParseState::new(&src)).map(|t| t.0)
}
/// ParseStack contains already parsed elements of a regular expression, and is used for parsing
/// textual regular expressions (as the parsing algorithm is stack-based). It can be converted to
/// an Pattern.
struct ParseStack {
s: Vec<Pattern>,
}
impl ParseStack {
fn new() -> ParseStack {
ParseStack {
s: Vec::with_capacity(4),
}
}
fn push(&mut self, p: Pattern) {
self.s.push(p)
}
fn pop(&mut self) -> Option<Pattern> {
self.s.pop()
}
fn empty(&self) -> bool {
self.s.is_empty()
}
fn to_pattern(mut self) -> Pattern {
if self.s.len() > 1 {
Pattern::Concat(self.s)
} else if self.s.len() == 1 {
self.s.pop().unwrap()
} else {
panic!("empty stack")
}
}
}
/// State of the parser, quite a simple struct. It contains the current substring that a parser
/// function is concerned with as well as the position within the overall parsed string, so that
/// useful positions can be reported to users. In addition, it provides functions to cheaply create
/// "sub-ParseStates" containing a substring of its current string.
///
/// It also supports indexing by ranges and index.
struct ParseState<'a> {
/// The string to parse. This may be a substring of the "overall" matched string.
src: &'a [char],
/// The position within the overall string (for error reporting).
pos: usize,
}
impl<'a> ParseState<'a> {
/// new returns a new ParseState operating on the specified input string.
fn new(s: &'a [char]) -> ParseState<'a> {
ParseState { src: s, pos: 0 }
}
/// from returns a new ParseState operating on the [from..] sub-string of the current
/// ParseState.
fn from(&self, from: usize) -> ParseState<'a> {
self.sub(from, self.len())
}
/// pos returns the overall position within the input regex.
fn pos(&self) -> usize {
self.pos
}
/// sub returns a sub-ParseState containing [from..to] of the current one.
fn sub(&self, from: usize, to: usize) -> ParseState<'a> {
ParseState {
src: &self.src[from..to],
pos: self.pos + from,
}
}
/// len returns how many characters this ParseState contains.
fn len(&self) -> usize {
self.src.len()
}
/// err returns a formatted error string containing the specified message and the overall
/// position within the original input string.
fn err<T>(&self, s: &str, i: usize) -> Result<T, String> {
Err(format!("{} at :{}", s, self.pos + i))
}
}
impl<'a> Index<Range<usize>> for ParseState<'a> {
type Output = [char];
fn index(&self, r: Range<usize>) -> &Self::Output {
&self.src[r]
}
}
impl<'a> Index<RangeFull> for ParseState<'a> {
type Output = [char];
fn index(&self, r: RangeFull) -> &Self::Output {
&self.src[r]
}
}
impl<'a> Index<usize> for ParseState<'a> {
type Output = char;
fn index(&self, i: usize) -> &Self::Output {
&self.src[i]
}
}
impl<'a> Clone for ParseState<'a> {
fn clone(&self) -> ParseState<'a> {
ParseState {
src: self.src,
pos: self.pos,
}
}
}
/// parse_re is the parser entry point; like all parser functions, it returns either a pair of
/// (parsed pattern, new ParseState) or an error string.
fn parse_re<'a>(mut s: ParseState<'a>) -> Result<(Pattern, ParseState<'a>), String> {
// The stack assists us in parsing the linear parts of a regular expression, e.g. non-pattern
// characters, or character sets.
let mut stack = ParseStack::new();
loop {
if s.len() == 0 {
break;
}
match s[0] {
'.' => {
stack.push(Pattern::Any);
s = s.from(1);
}
'$' => {
if s.len() == 1 {
stack.push(Pattern::Anchor(AnchorLocation::End));
} else {
stack.push(Pattern::Char('$'))
}
s = s.from(1);
}
'^' => {
if s.pos() == 0 {
stack.push(Pattern::Anchor(AnchorLocation::Begin));
} else {
stack.push(Pattern::Char('^'));
}
s = s.from(1);
}
r @ '+' | r @ '*' | r @ '?' => {
if let Some(p) = stack.pop() {
let rep = match r {
'+' => Repetition::OnceOrMore(p),
'*' => Repetition::ZeroOrMore(p),
'?' => Repetition::ZeroOrOnce(p),
_ => unimplemented!(),
};
stack.push(Pattern::Repeated(Box::new(rep)));
s = s.from(1);
} else {
return s.err("+ without pattern to repeat", 0);
}
}
// Alternation: Parse the expression on the right of the pipe sign and push an
// alternation between what we've already seen and the stuff on the right.
'|' => {
let (rest, newst) = parse_re(s.from(1))?;
let left = stack.to_pattern();
stack = ParseStack::new();
stack.push(Pattern::Alternate(vec![left, rest]));
s = newst;
}
'(' => {
match split_in_parens(s.clone(), ROUND_PARENS) {
Some((parens, newst)) => {
// Parse the sub-regex within parentheses.
let (pat, rest) = parse_re(parens)?;
assert!(rest.len() == 0);
stack.push(Pattern::Submatch(Box::new(pat)));
// Set the current state to contain the string after the parentheses.
s = newst;
}
None => return s.err("unmatched (", s.len()),
}
}
')' => return s.err("unopened ')'", 0),
'[' => match parse_char_set(s) {
Ok((pat, newst)) => {
stack.push(pat);
s = newst;
}
Err(e) => return Err(e),
},
']' => return s.err("unopened ']'", 0),
'{' => {
match split_in_parens(s.clone(), CURLY_BRACKETS) {
Some((rep, newst)) => { | } else {
return s.err("repetition {} without pattern to repeat", 0);
}
}
None => return s.err("unmatched {", s.len()),
};
}
c => {
stack.push(Pattern::Char(c));
s = s.from(1);
}
}
}
Ok((stack.to_pattern(), s))
}
/// parse_char_set parses the character set at the start of the input state.
/// Valid states are [a], [ab], [a-z], [-a-z], [a-z-] and [a-fh-kl].
fn parse_char_set<'a>(s: ParseState<'a>) -> Result<(Pattern, ParseState<'a>), String> {
if let Some((cs, rest)) = split_in_parens(s.clone(), SQUARE_BRACKETS) {
let mut chars: Vec<char> = vec![];
let mut ranges: Vec<Pattern> = vec![];
let mut st = cs;
loop {
// Try to match a range "a-z" by looking for the dash; if no dash, add character to set
// and advance.
if st.len() >= 3 && st[1] == '-' {
ranges.push(Pattern::CharRange(st[0], st[2]));
st = st.from(3);
} else if st.len() > 0 {
chars.push(st[0]);
st = st.from(1);
} else {
break;
}
}
assert_eq!(st.len(), 0);
if chars.len() == 1 {
ranges.push(Pattern::Char(chars.pop().unwrap()));
} else if !chars.is_empty() {
ranges.push(Pattern::CharSet(chars));
}
if ranges.len() == 1 {
Ok((ranges.pop().unwrap(), rest))
} else {
let pat = Pattern::Alternate(ranges);
Ok((pat, rest))
}
} else {
s.err("unmatched [", s.len())
}
}
/// Parse a repetition spec inside curly braces: {1} | {1,} | {,1} | {1,2}
fn parse_specific_repetition<'a>(rep: ParseState<'a>, p: Pattern) -> Result<Pattern, String> {
let mut nparts = 0;
let mut parts: [Option<&[char]>; 2] = Default::default();
for p in rep[..].split(|c| *c == ',') {
parts[nparts] = Some(p);
nparts += 1;
if nparts == 2 {
break;
}
}
if nparts == 0 {
// {}
return rep.err("empty {} spec", 0);
} else if nparts == 1 {
// {1}
if let Ok(n) = u32::from_str(&String::from_iter(parts[0].unwrap().iter())) {
return Ok(Pattern::Repeated(Box::new(Repetition::Specific(
p, n, None,
))));
} else {
return Err(format!(
"invalid repetition '{}'",
String::from_iter(rep[..].iter())
));
}
} else if nparts == 2 {
fn errtostr(r: Result<u32, std::num::ParseIntError>) -> Result<u32, String> {
match r {
Ok(u) => Ok(u),
Err(e) => Err(format!("{}", e)),
}
}
let (p0, p1) = (parts[0].unwrap(), parts[1].unwrap());
// {2,3}
if !p0.is_empty() && !p1.is_empty() {
let min = errtostr(u32::from_str(&String::from_iter(p0.iter())))?;
let max = errtostr(u32::from_str(&String::from_iter(p1.iter())))?;
return Ok(Pattern::Repeated(Box::new(Repetition::Specific(
p,
min,
Some(max),
))));
} else if p0.is_empty() && !p1.is_empty() {
// {,3}
let min = 0;
let max = errtostr(u32::from_str(&String::from_iter(p1.iter())))?;
return Ok(Pattern::Repeated(Box::new(Repetition::Specific(
p,
min,
Some(max),
))));
} else if !p0.is_empty() && p1.is_empty() {
// {3,}
let min = errtostr(u32::from_str(&String::from_iter(p0.iter())))?;
let repetition =
Pattern::Repeated(Box::new(Repetition::Specific(p.clone(), min, None)));
return Ok(Pattern::Concat(vec![
repetition,
Pattern::Repeated(Box::new(Repetition::ZeroOrMore(p))),
]));
}
}
Err(format!("invalid repetition pattern {:?}", &rep[..]))
}
/// Constants for generalizing parsing of parentheses.
const ROUND_PARENS: (char, char) = ('(', ')');
/// Constants for generalizing parsing of parentheses.
const SQUARE_BRACKETS: (char, char) = ('[', ']');
/// Constants for generalizing parsing of parentheses.
const CURLY_BRACKETS: (char, char) = ('{', '}');
/// split_in_parens returns two new ParseStates; the first one containing the contents of the
/// parenthesized clause starting at s[0], the second one containing the rest.
fn split_in_parens<'a>(
s: ParseState<'a>,
parens: (char, char),
) -> Option<(ParseState<'a>, ParseState<'a>)> {
if let Some(end) = find_closing_paren(s.clone(), parens) {
Some((s.sub(1, end), s.from(end + 1)))
} else {
None
}
}
/// find_closing_paren returns the index of the parenthesis closing the opening parenthesis at the
/// beginning of the state's string.
fn find_closing_paren<'a>(s: ParseState<'a>, parens: (char, char)) -> Option<usize> {
if s[0] != parens.0 {
return None;
}
let mut count = 0;
for i in 0..s.len() {
if s[i] == parens.0 {
count += 1;
} else if s[i] == parens.1 {
count -= 1;
}
if count == 0 {
return Some(i);
}
}
None
}
#[cfg(test)]
mod tests {
use super::*;
use crate::compile::*;
use crate::repr::*;
use crate::state::dot;
#[test]
fn test_find_closing_paren() {
for case in &[
("(abc)de", Some(4)),
("()a", Some(1)),
("(abcd)", Some(5)),
("(abc", None),
] {
let src: Vec<char> = case.0.chars().collect();
assert_eq!(
find_closing_paren(ParseState::new(src.as_ref()), ROUND_PARENS),
case.1
);
}
}
#[test]
fn test_parse_charset() {
for case in &[
("[a]", Pattern::Char('a')),
("[ab]", Pattern::CharSet(vec!['a', 'b'])),
("[ba-]", Pattern::CharSet(vec!['b', 'a', '-'])),
("[a-z]", Pattern::CharRange('a', 'z')),
(
"[a-z-]",
Pattern::Alternate(vec![Pattern::CharRange('a', 'z'), Pattern::Char('-')]),
),
(
"[-a-z-]",
Pattern::Alternate(vec![
Pattern::CharRange('a', 'z'),
Pattern::CharSet(vec!['-', '-']),
]),
),
(
"[a-zA-Z]",
Pattern::Alternate(vec![
Pattern::CharRange('a', 'z'),
Pattern::CharRange('A', 'Z'),
]),
),
(
"[a-zA-Z-]",
Pattern::Alternate(vec![
Pattern::CharRange('a', 'z'),
Pattern::CharRange('A', 'Z'),
Pattern::Char('-'),
]),
),
] {
let src: Vec<char> = case.0.chars().collect();
let st = ParseState::new(&src);
assert_eq!(parse_char_set(st).unwrap().0, case.1);
}
}
#[test]
fn test_parse_subs() {
let case1 = (
"a(b)c",
Pattern::Concat(vec![
Pattern::Char('a'),
Pattern::Submatch(Box::new(Pattern::Char('b'))),
Pattern::Char('c'),
]),
);
let case2 = ("(b)", Pattern::Submatch(Box::new(Pattern::Char('b'))));
for c in &[case1, case2] {
assert_eq!(c.1, parse(c.0).unwrap());
}
}
#[test]
fn test_parse_res() {
let case1 = (
"a(Bcd)e",
Pattern::Concat(vec![
Pattern::Char('a'),
Pattern::Submatch(Box::new(Pattern::Concat(vec![
Pattern::Char('B'),
Pattern::Char('c'),
Pattern::Char('d'),
]))),
Pattern::Char('e'),
]),
);
for c in &[case1] {
assert_eq!(c.1, parse(c.0).unwrap());
}
}
#[test]
fn test_parse_res_errors() {
let case1 = ("ac)d", "unopened ')' at :2");
let case2 = ("(ac)d)", "unopened ')' at :5");
let case3 = ("[ac]d]", "unopened ']' at :5");
let case4 = ("(ac)d]", "unopened ']' at :5");
for c in &[case1, case2, case3, case4] {
assert_eq!(c.1, parse(c.0).unwrap_err());
}
}
#[test]
fn test_parse_repetition_manual() {
println!(
"digraph st {{ {} }}",
dot(&start_compile(&parse("[abc]{1,5}").unwrap()))
);
}
#[test]
fn test_parse_manual() {
let rep = parse("a|[bed]|(c|d|e)|f").unwrap();
println!("{:?}", rep.clone());
let dot = dot(&start_compile(&rep));
println!("digraph st {{ {} }}", dot);
}
#[test]
fn test_parse_manual2() {
println!("{:?}", parse("abcdef"));
}
} | if let Some(p) = stack.pop() {
let rep = parse_specific_repetition(rep, p)?;
stack.push(rep);
s = newst; | random_line_split |
parse.rs | //! This module contains functionality for parsing a regular expression into the intermediate
//! representation in repr.rs (from which it is compiled into a state graph), and optimizing that
//! intermediate representation.
#![allow(dead_code)]
use std::iter::FromIterator;
use std::ops::{Index, Range, RangeFull};
use std::str::FromStr;
use crate::repr::{AnchorLocation, Pattern, Repetition};
/// The entry point for this module: Parse a string into a `Pattern` that can be optimized and/or
/// compiled.
pub fn parse(s: &str) -> Result<Pattern, String> {
let src: Vec<char> = s.chars().collect();
parse_re(ParseState::new(&src)).map(|t| t.0)
}
/// ParseStack contains already parsed elements of a regular expression, and is used for parsing
/// textual regular expressions (as the parsing algorithm is stack-based). It can be converted to
/// an Pattern.
struct ParseStack {
s: Vec<Pattern>,
}
impl ParseStack {
fn new() -> ParseStack {
ParseStack {
s: Vec::with_capacity(4),
}
}
fn push(&mut self, p: Pattern) {
self.s.push(p)
}
fn pop(&mut self) -> Option<Pattern> {
self.s.pop()
}
fn empty(&self) -> bool {
self.s.is_empty()
}
fn to_pattern(mut self) -> Pattern {
if self.s.len() > 1 {
Pattern::Concat(self.s)
} else if self.s.len() == 1 {
self.s.pop().unwrap()
} else {
panic!("empty stack")
}
}
}
/// State of the parser, quite a simple struct. It contains the current substring that a parser
/// function is concerned with as well as the position within the overall parsed string, so that
/// useful positions can be reported to users. In addition, it provides functions to cheaply create
/// "sub-ParseStates" containing a substring of its current string.
///
/// It also supports indexing by ranges and index.
struct | <'a> {
/// The string to parse. This may be a substring of the "overall" matched string.
src: &'a [char],
/// The position within the overall string (for error reporting).
pos: usize,
}
impl<'a> ParseState<'a> {
/// new returns a new ParseState operating on the specified input string.
fn new(s: &'a [char]) -> ParseState<'a> {
ParseState { src: s, pos: 0 }
}
/// from returns a new ParseState operating on the [from..] sub-string of the current
/// ParseState.
fn from(&self, from: usize) -> ParseState<'a> {
self.sub(from, self.len())
}
/// pos returns the overall position within the input regex.
fn pos(&self) -> usize {
self.pos
}
/// sub returns a sub-ParseState containing [from..to] of the current one.
fn sub(&self, from: usize, to: usize) -> ParseState<'a> {
ParseState {
src: &self.src[from..to],
pos: self.pos + from,
}
}
/// len returns how many characters this ParseState contains.
fn len(&self) -> usize {
self.src.len()
}
/// err returns a formatted error string containing the specified message and the overall
/// position within the original input string.
fn err<T>(&self, s: &str, i: usize) -> Result<T, String> {
Err(format!("{} at :{}", s, self.pos + i))
}
}
impl<'a> Index<Range<usize>> for ParseState<'a> {
type Output = [char];
fn index(&self, r: Range<usize>) -> &Self::Output {
&self.src[r]
}
}
impl<'a> Index<RangeFull> for ParseState<'a> {
type Output = [char];
fn index(&self, r: RangeFull) -> &Self::Output {
&self.src[r]
}
}
impl<'a> Index<usize> for ParseState<'a> {
type Output = char;
fn index(&self, i: usize) -> &Self::Output {
&self.src[i]
}
}
impl<'a> Clone for ParseState<'a> {
fn clone(&self) -> ParseState<'a> {
ParseState {
src: self.src,
pos: self.pos,
}
}
}
/// parse_re is the parser entry point; like all parser functions, it returns either a pair of
/// (parsed pattern, new ParseState) or an error string.
fn parse_re<'a>(mut s: ParseState<'a>) -> Result<(Pattern, ParseState<'a>), String> {
// The stack assists us in parsing the linear parts of a regular expression, e.g. non-pattern
// characters, or character sets.
let mut stack = ParseStack::new();
loop {
if s.len() == 0 {
break;
}
match s[0] {
'.' => {
stack.push(Pattern::Any);
s = s.from(1);
}
'$' => {
if s.len() == 1 {
stack.push(Pattern::Anchor(AnchorLocation::End));
} else {
stack.push(Pattern::Char('$'))
}
s = s.from(1);
}
'^' => {
if s.pos() == 0 {
stack.push(Pattern::Anchor(AnchorLocation::Begin));
} else {
stack.push(Pattern::Char('^'));
}
s = s.from(1);
}
r @ '+' | r @ '*' | r @ '?' => {
if let Some(p) = stack.pop() {
let rep = match r {
'+' => Repetition::OnceOrMore(p),
'*' => Repetition::ZeroOrMore(p),
'?' => Repetition::ZeroOrOnce(p),
_ => unimplemented!(),
};
stack.push(Pattern::Repeated(Box::new(rep)));
s = s.from(1);
} else {
return s.err("+ without pattern to repeat", 0);
}
}
// Alternation: Parse the expression on the right of the pipe sign and push an
// alternation between what we've already seen and the stuff on the right.
'|' => {
let (rest, newst) = parse_re(s.from(1))?;
let left = stack.to_pattern();
stack = ParseStack::new();
stack.push(Pattern::Alternate(vec![left, rest]));
s = newst;
}
'(' => {
match split_in_parens(s.clone(), ROUND_PARENS) {
Some((parens, newst)) => {
// Parse the sub-regex within parentheses.
let (pat, rest) = parse_re(parens)?;
assert!(rest.len() == 0);
stack.push(Pattern::Submatch(Box::new(pat)));
// Set the current state to contain the string after the parentheses.
s = newst;
}
None => return s.err("unmatched (", s.len()),
}
}
')' => return s.err("unopened ')'", 0),
'[' => match parse_char_set(s) {
Ok((pat, newst)) => {
stack.push(pat);
s = newst;
}
Err(e) => return Err(e),
},
']' => return s.err("unopened ']'", 0),
'{' => {
match split_in_parens(s.clone(), CURLY_BRACKETS) {
Some((rep, newst)) => {
if let Some(p) = stack.pop() {
let rep = parse_specific_repetition(rep, p)?;
stack.push(rep);
s = newst;
} else {
return s.err("repetition {} without pattern to repeat", 0);
}
}
None => return s.err("unmatched {", s.len()),
};
}
c => {
stack.push(Pattern::Char(c));
s = s.from(1);
}
}
}
Ok((stack.to_pattern(), s))
}
/// parse_char_set parses the character set at the start of the input state.
/// Valid states are [a], [ab], [a-z], [-a-z], [a-z-] and [a-fh-kl].
fn parse_char_set<'a>(s: ParseState<'a>) -> Result<(Pattern, ParseState<'a>), String> {
if let Some((cs, rest)) = split_in_parens(s.clone(), SQUARE_BRACKETS) {
let mut chars: Vec<char> = vec![];
let mut ranges: Vec<Pattern> = vec![];
let mut st = cs;
loop {
// Try to match a range "a-z" by looking for the dash; if no dash, add character to set
// and advance.
if st.len() >= 3 && st[1] == '-' {
ranges.push(Pattern::CharRange(st[0], st[2]));
st = st.from(3);
} else if st.len() > 0 {
chars.push(st[0]);
st = st.from(1);
} else {
break;
}
}
assert_eq!(st.len(), 0);
if chars.len() == 1 {
ranges.push(Pattern::Char(chars.pop().unwrap()));
} else if !chars.is_empty() {
ranges.push(Pattern::CharSet(chars));
}
if ranges.len() == 1 {
Ok((ranges.pop().unwrap(), rest))
} else {
let pat = Pattern::Alternate(ranges);
Ok((pat, rest))
}
} else {
s.err("unmatched [", s.len())
}
}
/// Parse a repetition spec inside curly braces: {1} | {1,} | {,1} | {1,2}
fn parse_specific_repetition<'a>(rep: ParseState<'a>, p: Pattern) -> Result<Pattern, String> {
let mut nparts = 0;
let mut parts: [Option<&[char]>; 2] = Default::default();
for p in rep[..].split(|c| *c == ',') {
parts[nparts] = Some(p);
nparts += 1;
if nparts == 2 {
break;
}
}
if nparts == 0 {
// {}
return rep.err("empty {} spec", 0);
} else if nparts == 1 {
// {1}
if let Ok(n) = u32::from_str(&String::from_iter(parts[0].unwrap().iter())) {
return Ok(Pattern::Repeated(Box::new(Repetition::Specific(
p, n, None,
))));
} else {
return Err(format!(
"invalid repetition '{}'",
String::from_iter(rep[..].iter())
));
}
} else if nparts == 2 {
fn errtostr(r: Result<u32, std::num::ParseIntError>) -> Result<u32, String> {
match r {
Ok(u) => Ok(u),
Err(e) => Err(format!("{}", e)),
}
}
let (p0, p1) = (parts[0].unwrap(), parts[1].unwrap());
// {2,3}
if !p0.is_empty() && !p1.is_empty() {
let min = errtostr(u32::from_str(&String::from_iter(p0.iter())))?;
let max = errtostr(u32::from_str(&String::from_iter(p1.iter())))?;
return Ok(Pattern::Repeated(Box::new(Repetition::Specific(
p,
min,
Some(max),
))));
} else if p0.is_empty() && !p1.is_empty() {
// {,3}
let min = 0;
let max = errtostr(u32::from_str(&String::from_iter(p1.iter())))?;
return Ok(Pattern::Repeated(Box::new(Repetition::Specific(
p,
min,
Some(max),
))));
} else if !p0.is_empty() && p1.is_empty() {
// {3,}
let min = errtostr(u32::from_str(&String::from_iter(p0.iter())))?;
let repetition =
Pattern::Repeated(Box::new(Repetition::Specific(p.clone(), min, None)));
return Ok(Pattern::Concat(vec![
repetition,
Pattern::Repeated(Box::new(Repetition::ZeroOrMore(p))),
]));
}
}
Err(format!("invalid repetition pattern {:?}", &rep[..]))
}
/// Constants for generalizing parsing of parentheses.
const ROUND_PARENS: (char, char) = ('(', ')');
/// Constants for generalizing parsing of parentheses.
const SQUARE_BRACKETS: (char, char) = ('[', ']');
/// Constants for generalizing parsing of parentheses.
const CURLY_BRACKETS: (char, char) = ('{', '}');
/// split_in_parens returns two new ParseStates; the first one containing the contents of the
/// parenthesized clause starting at s[0], the second one containing the rest.
fn split_in_parens<'a>(
s: ParseState<'a>,
parens: (char, char),
) -> Option<(ParseState<'a>, ParseState<'a>)> {
if let Some(end) = find_closing_paren(s.clone(), parens) {
Some((s.sub(1, end), s.from(end + 1)))
} else {
None
}
}
/// find_closing_paren returns the index of the parenthesis closing the opening parenthesis at the
/// beginning of the state's string.
fn find_closing_paren<'a>(s: ParseState<'a>, parens: (char, char)) -> Option<usize> {
if s[0] != parens.0 {
return None;
}
let mut count = 0;
for i in 0..s.len() {
if s[i] == parens.0 {
count += 1;
} else if s[i] == parens.1 {
count -= 1;
}
if count == 0 {
return Some(i);
}
}
None
}
#[cfg(test)]
mod tests {
use super::*;
use crate::compile::*;
use crate::repr::*;
use crate::state::dot;
#[test]
fn test_find_closing_paren() {
for case in &[
("(abc)de", Some(4)),
("()a", Some(1)),
("(abcd)", Some(5)),
("(abc", None),
] {
let src: Vec<char> = case.0.chars().collect();
assert_eq!(
find_closing_paren(ParseState::new(src.as_ref()), ROUND_PARENS),
case.1
);
}
}
#[test]
fn test_parse_charset() {
for case in &[
("[a]", Pattern::Char('a')),
("[ab]", Pattern::CharSet(vec!['a', 'b'])),
("[ba-]", Pattern::CharSet(vec!['b', 'a', '-'])),
("[a-z]", Pattern::CharRange('a', 'z')),
(
"[a-z-]",
Pattern::Alternate(vec![Pattern::CharRange('a', 'z'), Pattern::Char('-')]),
),
(
"[-a-z-]",
Pattern::Alternate(vec![
Pattern::CharRange('a', 'z'),
Pattern::CharSet(vec!['-', '-']),
]),
),
(
"[a-zA-Z]",
Pattern::Alternate(vec![
Pattern::CharRange('a', 'z'),
Pattern::CharRange('A', 'Z'),
]),
),
(
"[a-zA-Z-]",
Pattern::Alternate(vec![
Pattern::CharRange('a', 'z'),
Pattern::CharRange('A', 'Z'),
Pattern::Char('-'),
]),
),
] {
let src: Vec<char> = case.0.chars().collect();
let st = ParseState::new(&src);
assert_eq!(parse_char_set(st).unwrap().0, case.1);
}
}
#[test]
fn test_parse_subs() {
let case1 = (
"a(b)c",
Pattern::Concat(vec![
Pattern::Char('a'),
Pattern::Submatch(Box::new(Pattern::Char('b'))),
Pattern::Char('c'),
]),
);
let case2 = ("(b)", Pattern::Submatch(Box::new(Pattern::Char('b'))));
for c in &[case1, case2] {
assert_eq!(c.1, parse(c.0).unwrap());
}
}
#[test]
fn test_parse_res() {
let case1 = (
"a(Bcd)e",
Pattern::Concat(vec![
Pattern::Char('a'),
Pattern::Submatch(Box::new(Pattern::Concat(vec![
Pattern::Char('B'),
Pattern::Char('c'),
Pattern::Char('d'),
]))),
Pattern::Char('e'),
]),
);
for c in &[case1] {
assert_eq!(c.1, parse(c.0).unwrap());
}
}
#[test]
fn test_parse_res_errors() {
let case1 = ("ac)d", "unopened ')' at :2");
let case2 = ("(ac)d)", "unopened ')' at :5");
let case3 = ("[ac]d]", "unopened ']' at :5");
let case4 = ("(ac)d]", "unopened ']' at :5");
for c in &[case1, case2, case3, case4] {
assert_eq!(c.1, parse(c.0).unwrap_err());
}
}
#[test]
fn test_parse_repetition_manual() {
println!(
"digraph st {{ {} }}",
dot(&start_compile(&parse("[abc]{1,5}").unwrap()))
);
}
#[test]
fn test_parse_manual() {
let rep = parse("a|[bed]|(c|d|e)|f").unwrap();
println!("{:?}", rep.clone());
let dot = dot(&start_compile(&rep));
println!("digraph st {{ {} }}", dot);
}
#[test]
fn test_parse_manual2() {
println!("{:?}", parse("abcdef"));
}
}
| ParseState | identifier_name |
regression.py | '''
BoardGameGeek Regression Prediction and Recommendation
CAPP 122 Final Project
Author: Syeda Jaisha
The following script runs a regression using data from BoardGameGeek (BGG) and
uses it to predict BGG rating and number of user reviews/ratings received,
for a game designed by our user as well as to make recommendations on possible
ways to improve the game rating and increase its popularity
'''
import pandas as pd
import numpy as np
rating_lst = ['avg_playtime', 'suggested_numplayers', 'averageweight',
'num_mechanics', 'lang_dep2', 'lang_dep3', 'lang_dep4',
'lang_dep5', 'Strategy Game', 'Family Game', 'Party Game',
'Abstract Game', 'Thematic', 'War Game','Customizable',
"Children's Game"]
popularity_lst = ['num_categories', 'num_mechanics', 'lang_dep2',
'lang_dep3', 'lang_dep4', 'lang_dep5', 'Strategy Game',
'Family Game', 'Party Game', 'Abstract Game', 'Thematic',
'War Game','Customizable', "Children's Game"]
django_to_local_cols = {'Average playing time': 'avg_playtime',
'Recommended number of players': 'suggested_numplayers',
'Complexity': 'averageweight',
'Number of categories': 'num_categories',
'Number of mechanics': 'num_mechanics',
'Language dependency' : {'lang_dep2': 2,
'lang_dep3': 3,
'lang_dep4': 4,
'lang_dep5': 5 },
'Type': ['Strategy Game', 'Family Game', 'Party Game',
'Abstract Game', 'Thematic', 'War Game',
'Customizable', "Children's Game"]}
def predict(input_dict, rating_bool):
'''
Main function that runs the regression and produces either predicted BGG
rating or number of ratings received and make relevant recommendations to
improve upon the same.
Inputs:
rating_bool (bool): If True, run the regression for predicted BGG
rating
If False, run the regression for predicted number
of ratings
input_dict (dict): Dictionary produced by Django UI, containing
required fields for the prediction using regression
Output:
(tuple of lists) Contains a list of column names and a list of columns
output for Django UI
Warning: Predicted values may be negative due to low R2 of models
'''
x = construct_x(input_dict, rating_bool)
X, y, raw_df, dep_var = construct_X_y(rating_bool)
coef = regress(X,y)
beta = coef['beta']
pred_val = apply_beta(beta, x)[0]
accuracy = calculate_R2(X, y, beta)
sorted_df_y = raw_df.sort_values(by=dep_var, ascending = False).\
reset_index(drop=True)
rank = sorted_df_y[sorted_df_y[dep_var] >= pred_val].index[-1] + 2
top_5_games = ''
for i, game in enumerate(sorted_df_y['name'][0:5]):
top_5_games += game
if i != 4:
top_5_games += ', '
decrease_gain_tup, increase_gain_tup, lang_dep_gain_tup, game_type_tup = \
recommend(coef, input_dict, X, rating_bool)
if rating_bool:
return (['Your game is likely to get a BGG rating of ____ on BoardGameGeek',
'placing you at a rank of ____ among 4093 games in our dataset',
'with top 5 BGG board games being ____',
'This prediction is only ____ percent accurate.',
'try decreasing ____,'
'to improve score by (for each unit decreased) ____',
'try increasing ____,'
'to improve score by (for each unit increased) ____',
'try changing "Language dependency" to ____,'
'to improve score by ____',
'try dropping "type" _____,'
'try adding "type" _____, to improve score by ____'],
[[str(round(pred_val,5)), rank,
top_5_games, str(round(accuracy,2)), decrease_gain_tup,
increase_gain_tup, lang_dep_gain_tup, game_type_tup]])
else:
return (['Your game is likely to be voted for by ____ users on BoardGameGeek',
'placing you at a ____ rank among 4093 games in our dataset',
'with top 5 BGG board games being _____',
'This prediction is only ____ percent accurate.',
'try decreasing ____,'
'to improve score by (for each unit decreased) ____',
'try increasing ____,'
'to improve score by (for each unit increased) ____',
'try changing "Language dependency" to ____,'
'to improve score by ____',
'try dropping "type" _____, try adding "type" _____,'
'to improve score by ____'],
[[str(round(pred_val,0)), rank,
top_5_games,str(round(accuracy,2)), decrease_gain_tup,
increase_gain_tup, lang_dep_gain_tup, game_type_tup]])
def construct_x(input_dict, rating_bool):
'''
Construct x vector using user inputs from Django by matching Django
fields to column names in internal data, using field inputs to create
required columns and finally add a 'ones' column for constant of the
regression equation.
Input: (dict) Dictionary produced by Django UI, containing
required fields for the prediction using regression
Output: (pandas Series) Column vector
'''
x_dict = {}
type_lst =[]
for field in input_dict.keys():
if field == 'Language dependency':
for dummy, complexity in django_to_local_cols[field].items():
x_dict[dummy] = 0
if input_dict[field] == complexity:
x_dict[dummy] = 1
elif field in ['Type 1', 'Type 2', 'Type 3']:
type_lst.append(input_dict[field])
else:
col_name = django_to_local_cols[field]
value = input_dict[field]
x_dict[col_name] = value
for type_dummy in django_to_local_cols['Type']:
x_dict[type_dummy] = 0
if type_dummy in type_lst:
x_dict[type_dummy] = 1
x = pd.DataFrame(x_dict, index = ['obs'])
if rating_bool:
pred_vars = rating_lst
else:
pred_vars = popularity_lst
x = x.loc[:,pred_vars]
prepend_ones_col(x)
return x
def construct_X_y(rating_bool):
'''
Process raw data (data cleaning, data type coercion, creating dummy
variables) pulled from BoardGameGeek API and then use it to construct X
matrix and y vector to be plugged into the regress function.
Input: (bool) Indicates which regression model to run
Outputs:
X: (pandas DataFrame) X matrix containing observations of regressors
y: (pandas Series) column vector containing obsersvations of dependent
variable
raw_df: (pandas DataFrame) processed dataframe
dep_var: (str) name of depedent variable
'''
raw_df = pd.read_csv("all_games.csv")
raw_df = raw_df.loc[:,['bgg_id', 'is_boardgame', 'name', 'name_coerced',
'minplaytime', 'maxplaytime', 'suggested_numplayers',
'suggested_language', 'num_ratings',
'Board Game_avg_rating', 'Strategy Game',
'Family Game', 'Party Game', 'Abstract Game', 'Thematic',
'War Game','Customizable', "Children's Game",
'num_categories', 'num_mechanics','averageweight']]
raw_df = raw_df[raw_df['is_boardgame'] == True]
raw_df = raw_df.dropna(subset=['suggested_language'])
create_lang_dummy(raw_df)
raw_df = raw_df.astype({'Strategy Game':'int64', 'Family Game': 'int64',
'Party Game': 'int64', 'Abstract Game': 'int64',
'Thematic': 'int64', 'War Game': 'int64',
'Customizable': 'int64', "Children's Game": 'int64',
'lang_dep2': 'int64', 'lang_dep3': 'int64',
'lang_dep4': 'int64', 'lang_dep5': 'int64'})
raw_df['suggested_numplayers'] = raw_df['suggested_numplayers']\
.astype('string').str.strip('+').astype('int64')
raw_df['avg_playtime'] = (raw_df['minplaytime'] + raw_df['maxplaytime'])/2
raw_df = raw_df[raw_df['suggested_numplayers'] != 0]
raw_df = raw_df[raw_df['avg_playtime'] != 0]
raw_df = raw_df.dropna()
if rating_bool:
pred_vars, dep_var = rating_lst, 'Board Game_avg_rating'
else:
pred_vars, dep_var = popularity_lst, 'num_ratings'
X = raw_df.loc[:,pred_vars]
prepend_ones_col(X)
y = raw_df[dep_var]
return X, y, raw_df, dep_var
def create_lang_dummy(df):
'''
Create and insert (k-1) dummy variables for k Language dependency categories
in the dataframe.
Input: (pandas DataFrame) BGG data
'''
lang_dep = {'No necessary in-game text':1,
'Some necessary text - easily memorized or small crib sheet':2,
'Moderate in-game text - needs crib sheet or paste ups':3,
'Extensive use of text - massive conversion needed to be playable':4,
'Unplayable in another language':5}
categories = pd.unique(df['suggested_language'])
for category in categories:
if lang_dep[category] != 1:
dummy_name = 'lang_dep' + str(lang_dep[category])
df[dummy_name] = df['suggested_language'] == category
def prepend_ones_col(X):
'''
Add a ones column to the left side of pandas DataFrame.
Input: (pandas DataFrame) X matrix
'''
X.insert(0,'ones', 1)
def regress(X, y):
'''
Regress X matrix on y vector and calculate beta vector.
Inputs:
X (pandas DataFrame): X matrix containing observations of regressors
y (pandas Series): y vector
Ouputs:
coef (pandas DataFrame): beta vector containing coefficient estimates
for the regressors
'''
beta = np.linalg.lstsq(X, y, rcond=None)[0]
#Source: /home/syedajaisha/capp30121-aut-20-syedajaisha/pa5/util.py
col_names = list(X.columns)
col_names[0] = 'intercept'
coef = pd.DataFrame({'beta': beta}, index=col_names)
return coef
def calculate_R2(X, y, beta):
'''
Calculate R_sqauared for a regression model
Inputs:
X (pandas DataFrame): X matrix
y (pandas Series): y vector
beta(pandas DataFrame): beta vector
Output: (float) R_squared
'''
yhat = apply_beta(beta, X)
R2 = 1 - (np.sum((y - yhat)**2) / np.sum((y - np.mean(y))**2))
#Source: /home/syedajaisha/capp30121-aut-20-syedajaisha/pa5/regression.py
return R2*100
def apply_beta(beta, X):
'''
Apply beta, the vector generated by regress, to the
specified values to calculate predicted value
Inputs:
beta (pandas Series): beta vector
X (pandas DataFrame): X matrix
Output:
yhat (numpy array): predicted value
'''
yhat = np.dot(X, beta)
return yhat
def recommend(coef, input_dict, X, rating_bool):
| '''
Make recommendations based on what paramters can the user potentially
increase, decrease, switch categories of to increase their predicted value
of BGG rating and number of ratings and also informs of the corresponding
change in the predicted value
Inputs:
coef (pandas DataFrame): beta vector containing coefficient estimates
input_dict (dict): Dictionary produced by Django UI, containing
required fields for the prediction using regression
X (pandas DataFrame): X matrix
rating_bool (bool): Indicates which regression model to run
Disclaimer: This function doesn't recommend changing everything to arrive at
the optimal result. For example, in case a game already has three types, it
won't suggest the user to replace them all with the ones corresponding to
the largest three coefficents among all games types, it would just ask that
the existing type that adds the least value to the regression be replaced
with the type corresponding to the highest coefficient among remaining game
types
'''
dummy_var = ['Language dependency', 'Type']
decrease_gain_tup = []
increase_gain_tup =[]
lang_dep_gain_tup = []
game_type_tup= []
if rating_bool:
beta = round(coef['beta'],4)
else:
beta = round(coef['beta'],0).astype('int64')
for field in django_to_local_cols:
if field not in dummy_var:
if field in input_dict:
if beta[django_to_local_cols[field]] < 0:
if input_dict[field] > min(X[django_to_local_cols[field]]):
decrease_gain_tup.append((field, -beta[django_to_local_cols[field]]))
else:
if input_dict[field] < max(X[django_to_local_cols[field]]):
increase_gain_tup.append((field, beta[django_to_local_cols[field]]))
elif field == 'Language dependency':
current_lang_dep = 'lang_dep' + str(input_dict['Language dependency'])
if current_lang_dep == 'lang_dep1':
for lang_dep_dummy in django_to_local_cols['Language dependency'].keys():
if beta[lang_dep_dummy] > 0:
lang_dep_gain_tup.append((django_to_local_cols['Language dependency'][lang_dep_dummy], \
beta[lang_dep_dummy]))
else:
if beta[current_lang_dep] < 0:
lang_dep_gain_tup.append((1, -beta[current_lang_dep]))
for lang_dep_dummy in django_to_local_cols['Language dependency'].keys():
if beta[lang_dep_dummy] > beta[current_lang_dep]:
gain = -beta[current_lang_dep] + beta[lang_dep_dummy]
lang_dep_gain_tup.append((django_to_local_cols['Language dependency'][lang_dep_dummy], gain))
elif field == 'Type':
current_type_coefs = {}
game_type_coefs = {}
for game_type in django_to_local_cols['Type']:
if game_type in input_dict.values():
current_type_coefs[beta[game_type]] = game_type
else:
game_type_coefs[beta[game_type]] = game_type
max_game_type_coef = max(game_type_coefs.keys())
if len(current_type_coefs) == 3:
min_current_type_coef = min(current_type_coefs.keys())
if max_game_type_coef > min_current_type_coef:
game_type_tup.append((current_type_coefs[min_current_type_coef],
game_type_coefs[max_game_type_coef],
max_game_type_coef - min_current_type_coef))
else:
if max_game_type_coef > 0:
game_type_tup.append(('none',
game_type_coefs[max_game_type_coef],
max_game_type_coef))
lst_lst = [decrease_gain_tup, increase_gain_tup, lang_dep_gain_tup, game_type_tup]
for lst in lst_lst:
if not lst:
lst.append('already optimal')
return (decrease_gain_tup, increase_gain_tup, lang_dep_gain_tup, game_type_tup) | identifier_body |
|
regression.py | '''
BoardGameGeek Regression Prediction and Recommendation
CAPP 122 Final Project
Author: Syeda Jaisha
The following script runs a regression using data from BoardGameGeek (BGG) and
uses it to predict BGG rating and number of user reviews/ratings received,
for a game designed by our user as well as to make recommendations on possible
ways to improve the game rating and increase its popularity
'''
import pandas as pd
import numpy as np
rating_lst = ['avg_playtime', 'suggested_numplayers', 'averageweight',
'num_mechanics', 'lang_dep2', 'lang_dep3', 'lang_dep4',
'lang_dep5', 'Strategy Game', 'Family Game', 'Party Game',
'Abstract Game', 'Thematic', 'War Game','Customizable',
"Children's Game"]
popularity_lst = ['num_categories', 'num_mechanics', 'lang_dep2',
'lang_dep3', 'lang_dep4', 'lang_dep5', 'Strategy Game',
'Family Game', 'Party Game', 'Abstract Game', 'Thematic',
'War Game','Customizable', "Children's Game"]
django_to_local_cols = {'Average playing time': 'avg_playtime',
'Recommended number of players': 'suggested_numplayers',
'Complexity': 'averageweight',
'Number of categories': 'num_categories',
'Number of mechanics': 'num_mechanics',
'Language dependency' : {'lang_dep2': 2,
'lang_dep3': 3,
'lang_dep4': 4,
'lang_dep5': 5 },
'Type': ['Strategy Game', 'Family Game', 'Party Game',
'Abstract Game', 'Thematic', 'War Game',
'Customizable', "Children's Game"]}
def predict(input_dict, rating_bool):
'''
Main function that runs the regression and produces either predicted BGG
rating or number of ratings received and make relevant recommendations to
improve upon the same.
Inputs:
rating_bool (bool): If True, run the regression for predicted BGG
rating
If False, run the regression for predicted number
of ratings
input_dict (dict): Dictionary produced by Django UI, containing
required fields for the prediction using regression
Output:
(tuple of lists) Contains a list of column names and a list of columns
output for Django UI
Warning: Predicted values may be negative due to low R2 of models
'''
x = construct_x(input_dict, rating_bool)
X, y, raw_df, dep_var = construct_X_y(rating_bool)
coef = regress(X,y)
beta = coef['beta']
pred_val = apply_beta(beta, x)[0]
accuracy = calculate_R2(X, y, beta)
sorted_df_y = raw_df.sort_values(by=dep_var, ascending = False).\
reset_index(drop=True)
rank = sorted_df_y[sorted_df_y[dep_var] >= pred_val].index[-1] + 2
top_5_games = ''
for i, game in enumerate(sorted_df_y['name'][0:5]):
top_5_games += game
if i != 4:
top_5_games += ', '
decrease_gain_tup, increase_gain_tup, lang_dep_gain_tup, game_type_tup = \
recommend(coef, input_dict, X, rating_bool)
if rating_bool:
return (['Your game is likely to get a BGG rating of ____ on BoardGameGeek',
'placing you at a rank of ____ among 4093 games in our dataset',
'with top 5 BGG board games being ____',
'This prediction is only ____ percent accurate.',
'try decreasing ____,'
'to improve score by (for each unit decreased) ____',
'try increasing ____,'
'to improve score by (for each unit increased) ____',
'try changing "Language dependency" to ____,'
'to improve score by ____',
'try dropping "type" _____,'
'try adding "type" _____, to improve score by ____'],
[[str(round(pred_val,5)), rank,
top_5_games, str(round(accuracy,2)), decrease_gain_tup,
increase_gain_tup, lang_dep_gain_tup, game_type_tup]])
else:
return (['Your game is likely to be voted for by ____ users on BoardGameGeek',
'placing you at a ____ rank among 4093 games in our dataset',
'with top 5 BGG board games being _____',
'This prediction is only ____ percent accurate.',
'try decreasing ____,'
'to improve score by (for each unit decreased) ____',
'try increasing ____,'
'to improve score by (for each unit increased) ____',
'try changing "Language dependency" to ____,'
'to improve score by ____',
'try dropping "type" _____, try adding "type" _____,'
'to improve score by ____'],
[[str(round(pred_val,0)), rank,
top_5_games,str(round(accuracy,2)), decrease_gain_tup,
increase_gain_tup, lang_dep_gain_tup, game_type_tup]])
def construct_x(input_dict, rating_bool):
'''
Construct x vector using user inputs from Django by matching Django
fields to column names in internal data, using field inputs to create
required columns and finally add a 'ones' column for constant of the
regression equation.
Input: (dict) Dictionary produced by Django UI, containing
required fields for the prediction using regression
Output: (pandas Series) Column vector
'''
x_dict = {}
type_lst =[]
for field in input_dict.keys():
if field == 'Language dependency':
for dummy, complexity in django_to_local_cols[field].items():
x_dict[dummy] = 0
if input_dict[field] == complexity:
x_dict[dummy] = 1
elif field in ['Type 1', 'Type 2', 'Type 3']:
type_lst.append(input_dict[field])
else:
col_name = django_to_local_cols[field]
value = input_dict[field]
x_dict[col_name] = value
for type_dummy in django_to_local_cols['Type']:
x_dict[type_dummy] = 0
if type_dummy in type_lst:
x_dict[type_dummy] = 1
x = pd.DataFrame(x_dict, index = ['obs'])
if rating_bool:
pred_vars = rating_lst
else:
pred_vars = popularity_lst
x = x.loc[:,pred_vars]
prepend_ones_col(x)
return x
def construct_X_y(rating_bool):
'''
Process raw data (data cleaning, data type coercion, creating dummy
variables) pulled from BoardGameGeek API and then use it to construct X
matrix and y vector to be plugged into the regress function.
Input: (bool) Indicates which regression model to run
Outputs:
X: (pandas DataFrame) X matrix containing observations of regressors
y: (pandas Series) column vector containing obsersvations of dependent
variable
raw_df: (pandas DataFrame) processed dataframe
dep_var: (str) name of depedent variable
'''
raw_df = pd.read_csv("all_games.csv")
raw_df = raw_df.loc[:,['bgg_id', 'is_boardgame', 'name', 'name_coerced',
'minplaytime', 'maxplaytime', 'suggested_numplayers',
'suggested_language', 'num_ratings',
'Board Game_avg_rating', 'Strategy Game',
'Family Game', 'Party Game', 'Abstract Game', 'Thematic',
'War Game','Customizable', "Children's Game",
'num_categories', 'num_mechanics','averageweight']]
raw_df = raw_df[raw_df['is_boardgame'] == True]
raw_df = raw_df.dropna(subset=['suggested_language'])
create_lang_dummy(raw_df)
raw_df = raw_df.astype({'Strategy Game':'int64', 'Family Game': 'int64',
'Party Game': 'int64', 'Abstract Game': 'int64',
'Thematic': 'int64', 'War Game': 'int64',
'Customizable': 'int64', "Children's Game": 'int64',
'lang_dep2': 'int64', 'lang_dep3': 'int64',
'lang_dep4': 'int64', 'lang_dep5': 'int64'})
raw_df['suggested_numplayers'] = raw_df['suggested_numplayers']\
.astype('string').str.strip('+').astype('int64')
raw_df['avg_playtime'] = (raw_df['minplaytime'] + raw_df['maxplaytime'])/2
raw_df = raw_df[raw_df['suggested_numplayers'] != 0]
raw_df = raw_df[raw_df['avg_playtime'] != 0]
raw_df = raw_df.dropna()
if rating_bool:
pred_vars, dep_var = rating_lst, 'Board Game_avg_rating'
else:
pred_vars, dep_var = popularity_lst, 'num_ratings'
X = raw_df.loc[:,pred_vars]
prepend_ones_col(X)
y = raw_df[dep_var]
return X, y, raw_df, dep_var
def create_lang_dummy(df):
'''
Create and insert (k-1) dummy variables for k Language dependency categories
in the dataframe.
Input: (pandas DataFrame) BGG data
'''
lang_dep = {'No necessary in-game text':1,
'Some necessary text - easily memorized or small crib sheet':2,
'Moderate in-game text - needs crib sheet or paste ups':3,
'Extensive use of text - massive conversion needed to be playable':4,
'Unplayable in another language':5}
categories = pd.unique(df['suggested_language'])
for category in categories:
if lang_dep[category] != 1:
dummy_name = 'lang_dep' + str(lang_dep[category])
df[dummy_name] = df['suggested_language'] == category
def prepend_ones_col(X):
'''
Add a ones column to the left side of pandas DataFrame.
Input: (pandas DataFrame) X matrix
'''
X.insert(0,'ones', 1)
def regress(X, y):
'''
Regress X matrix on y vector and calculate beta vector.
Inputs:
X (pandas DataFrame): X matrix containing observations of regressors
y (pandas Series): y vector
Ouputs:
coef (pandas DataFrame): beta vector containing coefficient estimates
for the regressors
| #Source: /home/syedajaisha/capp30121-aut-20-syedajaisha/pa5/util.py
col_names = list(X.columns)
col_names[0] = 'intercept'
coef = pd.DataFrame({'beta': beta}, index=col_names)
return coef
def calculate_R2(X, y, beta):
'''
Calculate R_sqauared for a regression model
Inputs:
X (pandas DataFrame): X matrix
y (pandas Series): y vector
beta(pandas DataFrame): beta vector
Output: (float) R_squared
'''
yhat = apply_beta(beta, X)
R2 = 1 - (np.sum((y - yhat)**2) / np.sum((y - np.mean(y))**2))
#Source: /home/syedajaisha/capp30121-aut-20-syedajaisha/pa5/regression.py
return R2*100
def apply_beta(beta, X):
'''
Apply beta, the vector generated by regress, to the
specified values to calculate predicted value
Inputs:
beta (pandas Series): beta vector
X (pandas DataFrame): X matrix
Output:
yhat (numpy array): predicted value
'''
yhat = np.dot(X, beta)
return yhat
def recommend(coef, input_dict, X, rating_bool):
'''
Make recommendations based on what paramters can the user potentially
increase, decrease, switch categories of to increase their predicted value
of BGG rating and number of ratings and also informs of the corresponding
change in the predicted value
Inputs:
coef (pandas DataFrame): beta vector containing coefficient estimates
input_dict (dict): Dictionary produced by Django UI, containing
required fields for the prediction using regression
X (pandas DataFrame): X matrix
rating_bool (bool): Indicates which regression model to run
Disclaimer: This function doesn't recommend changing everything to arrive at
the optimal result. For example, in case a game already has three types, it
won't suggest the user to replace them all with the ones corresponding to
the largest three coefficents among all games types, it would just ask that
the existing type that adds the least value to the regression be replaced
with the type corresponding to the highest coefficient among remaining game
types
'''
dummy_var = ['Language dependency', 'Type']
decrease_gain_tup = []
increase_gain_tup =[]
lang_dep_gain_tup = []
game_type_tup= []
if rating_bool:
beta = round(coef['beta'],4)
else:
beta = round(coef['beta'],0).astype('int64')
for field in django_to_local_cols:
if field not in dummy_var:
if field in input_dict:
if beta[django_to_local_cols[field]] < 0:
if input_dict[field] > min(X[django_to_local_cols[field]]):
decrease_gain_tup.append((field, -beta[django_to_local_cols[field]]))
else:
if input_dict[field] < max(X[django_to_local_cols[field]]):
increase_gain_tup.append((field, beta[django_to_local_cols[field]]))
elif field == 'Language dependency':
current_lang_dep = 'lang_dep' + str(input_dict['Language dependency'])
if current_lang_dep == 'lang_dep1':
for lang_dep_dummy in django_to_local_cols['Language dependency'].keys():
if beta[lang_dep_dummy] > 0:
lang_dep_gain_tup.append((django_to_local_cols['Language dependency'][lang_dep_dummy], \
beta[lang_dep_dummy]))
else:
if beta[current_lang_dep] < 0:
lang_dep_gain_tup.append((1, -beta[current_lang_dep]))
for lang_dep_dummy in django_to_local_cols['Language dependency'].keys():
if beta[lang_dep_dummy] > beta[current_lang_dep]:
gain = -beta[current_lang_dep] + beta[lang_dep_dummy]
lang_dep_gain_tup.append((django_to_local_cols['Language dependency'][lang_dep_dummy], gain))
elif field == 'Type':
current_type_coefs = {}
game_type_coefs = {}
for game_type in django_to_local_cols['Type']:
if game_type in input_dict.values():
current_type_coefs[beta[game_type]] = game_type
else:
game_type_coefs[beta[game_type]] = game_type
max_game_type_coef = max(game_type_coefs.keys())
if len(current_type_coefs) == 3:
min_current_type_coef = min(current_type_coefs.keys())
if max_game_type_coef > min_current_type_coef:
game_type_tup.append((current_type_coefs[min_current_type_coef],
game_type_coefs[max_game_type_coef],
max_game_type_coef - min_current_type_coef))
else:
if max_game_type_coef > 0:
game_type_tup.append(('none',
game_type_coefs[max_game_type_coef],
max_game_type_coef))
lst_lst = [decrease_gain_tup, increase_gain_tup, lang_dep_gain_tup, game_type_tup]
for lst in lst_lst:
if not lst:
lst.append('already optimal')
return (decrease_gain_tup, increase_gain_tup, lang_dep_gain_tup, game_type_tup) | '''
beta = np.linalg.lstsq(X, y, rcond=None)[0] | random_line_split |
regression.py | '''
BoardGameGeek Regression Prediction and Recommendation
CAPP 122 Final Project
Author: Syeda Jaisha
The following script runs a regression using data from BoardGameGeek (BGG) and
uses it to predict BGG rating and number of user reviews/ratings received,
for a game designed by our user as well as to make recommendations on possible
ways to improve the game rating and increase its popularity
'''
import pandas as pd
import numpy as np
rating_lst = ['avg_playtime', 'suggested_numplayers', 'averageweight',
'num_mechanics', 'lang_dep2', 'lang_dep3', 'lang_dep4',
'lang_dep5', 'Strategy Game', 'Family Game', 'Party Game',
'Abstract Game', 'Thematic', 'War Game','Customizable',
"Children's Game"]
popularity_lst = ['num_categories', 'num_mechanics', 'lang_dep2',
'lang_dep3', 'lang_dep4', 'lang_dep5', 'Strategy Game',
'Family Game', 'Party Game', 'Abstract Game', 'Thematic',
'War Game','Customizable', "Children's Game"]
django_to_local_cols = {'Average playing time': 'avg_playtime',
'Recommended number of players': 'suggested_numplayers',
'Complexity': 'averageweight',
'Number of categories': 'num_categories',
'Number of mechanics': 'num_mechanics',
'Language dependency' : {'lang_dep2': 2,
'lang_dep3': 3,
'lang_dep4': 4,
'lang_dep5': 5 },
'Type': ['Strategy Game', 'Family Game', 'Party Game',
'Abstract Game', 'Thematic', 'War Game',
'Customizable', "Children's Game"]}
def predict(input_dict, rating_bool):
'''
Main function that runs the regression and produces either predicted BGG
rating or number of ratings received and make relevant recommendations to
improve upon the same.
Inputs:
rating_bool (bool): If True, run the regression for predicted BGG
rating
If False, run the regression for predicted number
of ratings
input_dict (dict): Dictionary produced by Django UI, containing
required fields for the prediction using regression
Output:
(tuple of lists) Contains a list of column names and a list of columns
output for Django UI
Warning: Predicted values may be negative due to low R2 of models
'''
x = construct_x(input_dict, rating_bool)
X, y, raw_df, dep_var = construct_X_y(rating_bool)
coef = regress(X,y)
beta = coef['beta']
pred_val = apply_beta(beta, x)[0]
accuracy = calculate_R2(X, y, beta)
sorted_df_y = raw_df.sort_values(by=dep_var, ascending = False).\
reset_index(drop=True)
rank = sorted_df_y[sorted_df_y[dep_var] >= pred_val].index[-1] + 2
top_5_games = ''
for i, game in enumerate(sorted_df_y['name'][0:5]):
top_5_games += game
if i != 4:
top_5_games += ', '
decrease_gain_tup, increase_gain_tup, lang_dep_gain_tup, game_type_tup = \
recommend(coef, input_dict, X, rating_bool)
if rating_bool:
return (['Your game is likely to get a BGG rating of ____ on BoardGameGeek',
'placing you at a rank of ____ among 4093 games in our dataset',
'with top 5 BGG board games being ____',
'This prediction is only ____ percent accurate.',
'try decreasing ____,'
'to improve score by (for each unit decreased) ____',
'try increasing ____,'
'to improve score by (for each unit increased) ____',
'try changing "Language dependency" to ____,'
'to improve score by ____',
'try dropping "type" _____,'
'try adding "type" _____, to improve score by ____'],
[[str(round(pred_val,5)), rank,
top_5_games, str(round(accuracy,2)), decrease_gain_tup,
increase_gain_tup, lang_dep_gain_tup, game_type_tup]])
else:
return (['Your game is likely to be voted for by ____ users on BoardGameGeek',
'placing you at a ____ rank among 4093 games in our dataset',
'with top 5 BGG board games being _____',
'This prediction is only ____ percent accurate.',
'try decreasing ____,'
'to improve score by (for each unit decreased) ____',
'try increasing ____,'
'to improve score by (for each unit increased) ____',
'try changing "Language dependency" to ____,'
'to improve score by ____',
'try dropping "type" _____, try adding "type" _____,'
'to improve score by ____'],
[[str(round(pred_val,0)), rank,
top_5_games,str(round(accuracy,2)), decrease_gain_tup,
increase_gain_tup, lang_dep_gain_tup, game_type_tup]])
def construct_x(input_dict, rating_bool):
'''
Construct x vector using user inputs from Django by matching Django
fields to column names in internal data, using field inputs to create
required columns and finally add a 'ones' column for constant of the
regression equation.
Input: (dict) Dictionary produced by Django UI, containing
required fields for the prediction using regression
Output: (pandas Series) Column vector
'''
x_dict = {}
type_lst =[]
for field in input_dict.keys():
if field == 'Language dependency':
for dummy, complexity in django_to_local_cols[field].items():
x_dict[dummy] = 0
if input_dict[field] == complexity:
x_dict[dummy] = 1
elif field in ['Type 1', 'Type 2', 'Type 3']:
type_lst.append(input_dict[field])
else:
col_name = django_to_local_cols[field]
value = input_dict[field]
x_dict[col_name] = value
for type_dummy in django_to_local_cols['Type']:
x_dict[type_dummy] = 0
if type_dummy in type_lst:
x_dict[type_dummy] = 1
x = pd.DataFrame(x_dict, index = ['obs'])
if rating_bool:
pred_vars = rating_lst
else:
pred_vars = popularity_lst
x = x.loc[:,pred_vars]
prepend_ones_col(x)
return x
def construct_X_y(rating_bool):
'''
Process raw data (data cleaning, data type coercion, creating dummy
variables) pulled from BoardGameGeek API and then use it to construct X
matrix and y vector to be plugged into the regress function.
Input: (bool) Indicates which regression model to run
Outputs:
X: (pandas DataFrame) X matrix containing observations of regressors
y: (pandas Series) column vector containing obsersvations of dependent
variable
raw_df: (pandas DataFrame) processed dataframe
dep_var: (str) name of depedent variable
'''
raw_df = pd.read_csv("all_games.csv")
raw_df = raw_df.loc[:,['bgg_id', 'is_boardgame', 'name', 'name_coerced',
'minplaytime', 'maxplaytime', 'suggested_numplayers',
'suggested_language', 'num_ratings',
'Board Game_avg_rating', 'Strategy Game',
'Family Game', 'Party Game', 'Abstract Game', 'Thematic',
'War Game','Customizable', "Children's Game",
'num_categories', 'num_mechanics','averageweight']]
raw_df = raw_df[raw_df['is_boardgame'] == True]
raw_df = raw_df.dropna(subset=['suggested_language'])
create_lang_dummy(raw_df)
raw_df = raw_df.astype({'Strategy Game':'int64', 'Family Game': 'int64',
'Party Game': 'int64', 'Abstract Game': 'int64',
'Thematic': 'int64', 'War Game': 'int64',
'Customizable': 'int64', "Children's Game": 'int64',
'lang_dep2': 'int64', 'lang_dep3': 'int64',
'lang_dep4': 'int64', 'lang_dep5': 'int64'})
raw_df['suggested_numplayers'] = raw_df['suggested_numplayers']\
.astype('string').str.strip('+').astype('int64')
raw_df['avg_playtime'] = (raw_df['minplaytime'] + raw_df['maxplaytime'])/2
raw_df = raw_df[raw_df['suggested_numplayers'] != 0]
raw_df = raw_df[raw_df['avg_playtime'] != 0]
raw_df = raw_df.dropna()
if rating_bool:
pred_vars, dep_var = rating_lst, 'Board Game_avg_rating'
else:
pred_vars, dep_var = popularity_lst, 'num_ratings'
X = raw_df.loc[:,pred_vars]
prepend_ones_col(X)
y = raw_df[dep_var]
return X, y, raw_df, dep_var
def create_lang_dummy(df):
'''
Create and insert (k-1) dummy variables for k Language dependency categories
in the dataframe.
Input: (pandas DataFrame) BGG data
'''
lang_dep = {'No necessary in-game text':1,
'Some necessary text - easily memorized or small crib sheet':2,
'Moderate in-game text - needs crib sheet or paste ups':3,
'Extensive use of text - massive conversion needed to be playable':4,
'Unplayable in another language':5}
categories = pd.unique(df['suggested_language'])
for category in categories:
if lang_dep[category] != 1:
dummy_name = 'lang_dep' + str(lang_dep[category])
df[dummy_name] = df['suggested_language'] == category
def prepend_ones_col(X):
'''
Add a ones column to the left side of pandas DataFrame.
Input: (pandas DataFrame) X matrix
'''
X.insert(0,'ones', 1)
def regress(X, y):
'''
Regress X matrix on y vector and calculate beta vector.
Inputs:
X (pandas DataFrame): X matrix containing observations of regressors
y (pandas Series): y vector
Ouputs:
coef (pandas DataFrame): beta vector containing coefficient estimates
for the regressors
'''
beta = np.linalg.lstsq(X, y, rcond=None)[0]
#Source: /home/syedajaisha/capp30121-aut-20-syedajaisha/pa5/util.py
col_names = list(X.columns)
col_names[0] = 'intercept'
coef = pd.DataFrame({'beta': beta}, index=col_names)
return coef
def calculate_R2(X, y, beta):
'''
Calculate R_sqauared for a regression model
Inputs:
X (pandas DataFrame): X matrix
y (pandas Series): y vector
beta(pandas DataFrame): beta vector
Output: (float) R_squared
'''
yhat = apply_beta(beta, X)
R2 = 1 - (np.sum((y - yhat)**2) / np.sum((y - np.mean(y))**2))
#Source: /home/syedajaisha/capp30121-aut-20-syedajaisha/pa5/regression.py
return R2*100
def apply_beta(beta, X):
'''
Apply beta, the vector generated by regress, to the
specified values to calculate predicted value
Inputs:
beta (pandas Series): beta vector
X (pandas DataFrame): X matrix
Output:
yhat (numpy array): predicted value
'''
yhat = np.dot(X, beta)
return yhat
def recommend(coef, input_dict, X, rating_bool):
'''
Make recommendations based on what paramters can the user potentially
increase, decrease, switch categories of to increase their predicted value
of BGG rating and number of ratings and also informs of the corresponding
change in the predicted value
Inputs:
coef (pandas DataFrame): beta vector containing coefficient estimates
input_dict (dict): Dictionary produced by Django UI, containing
required fields for the prediction using regression
X (pandas DataFrame): X matrix
rating_bool (bool): Indicates which regression model to run
Disclaimer: This function doesn't recommend changing everything to arrive at
the optimal result. For example, in case a game already has three types, it
won't suggest the user to replace them all with the ones corresponding to
the largest three coefficents among all games types, it would just ask that
the existing type that adds the least value to the regression be replaced
with the type corresponding to the highest coefficient among remaining game
types
'''
dummy_var = ['Language dependency', 'Type']
decrease_gain_tup = []
increase_gain_tup =[]
lang_dep_gain_tup = []
game_type_tup= []
if rating_bool:
beta = round(coef['beta'],4)
else:
beta = round(coef['beta'],0).astype('int64')
for field in django_to_local_cols:
if field not in dummy_var:
if field in input_dict:
if beta[django_to_local_cols[field]] < 0:
if input_dict[field] > min(X[django_to_local_cols[field]]):
decrease_gain_tup.append((field, -beta[django_to_local_cols[field]]))
else:
if input_dict[field] < max(X[django_to_local_cols[field]]):
increase_gain_tup.append((field, beta[django_to_local_cols[field]]))
elif field == 'Language dependency':
current_lang_dep = 'lang_dep' + str(input_dict['Language dependency'])
if current_lang_dep == 'lang_dep1':
for lang_dep_dummy in django_to_local_cols['Language dependency'].keys():
if beta[lang_dep_dummy] > 0:
lang_dep_gain_tup.append((django_to_local_cols['Language dependency'][lang_dep_dummy], \
beta[lang_dep_dummy]))
else:
|
elif field == 'Type':
current_type_coefs = {}
game_type_coefs = {}
for game_type in django_to_local_cols['Type']:
if game_type in input_dict.values():
current_type_coefs[beta[game_type]] = game_type
else:
game_type_coefs[beta[game_type]] = game_type
max_game_type_coef = max(game_type_coefs.keys())
if len(current_type_coefs) == 3:
min_current_type_coef = min(current_type_coefs.keys())
if max_game_type_coef > min_current_type_coef:
game_type_tup.append((current_type_coefs[min_current_type_coef],
game_type_coefs[max_game_type_coef],
max_game_type_coef - min_current_type_coef))
else:
if max_game_type_coef > 0:
game_type_tup.append(('none',
game_type_coefs[max_game_type_coef],
max_game_type_coef))
lst_lst = [decrease_gain_tup, increase_gain_tup, lang_dep_gain_tup, game_type_tup]
for lst in lst_lst:
if not lst:
lst.append('already optimal')
return (decrease_gain_tup, increase_gain_tup, lang_dep_gain_tup, game_type_tup)
| if beta[current_lang_dep] < 0:
lang_dep_gain_tup.append((1, -beta[current_lang_dep]))
for lang_dep_dummy in django_to_local_cols['Language dependency'].keys():
if beta[lang_dep_dummy] > beta[current_lang_dep]:
gain = -beta[current_lang_dep] + beta[lang_dep_dummy]
lang_dep_gain_tup.append((django_to_local_cols['Language dependency'][lang_dep_dummy], gain)) | conditional_block |
regression.py | '''
BoardGameGeek Regression Prediction and Recommendation
CAPP 122 Final Project
Author: Syeda Jaisha
The following script runs a regression using data from BoardGameGeek (BGG) and
uses it to predict BGG rating and number of user reviews/ratings received,
for a game designed by our user as well as to make recommendations on possible
ways to improve the game rating and increase its popularity
'''
import pandas as pd
import numpy as np
rating_lst = ['avg_playtime', 'suggested_numplayers', 'averageweight',
'num_mechanics', 'lang_dep2', 'lang_dep3', 'lang_dep4',
'lang_dep5', 'Strategy Game', 'Family Game', 'Party Game',
'Abstract Game', 'Thematic', 'War Game','Customizable',
"Children's Game"]
popularity_lst = ['num_categories', 'num_mechanics', 'lang_dep2',
'lang_dep3', 'lang_dep4', 'lang_dep5', 'Strategy Game',
'Family Game', 'Party Game', 'Abstract Game', 'Thematic',
'War Game','Customizable', "Children's Game"]
django_to_local_cols = {'Average playing time': 'avg_playtime',
'Recommended number of players': 'suggested_numplayers',
'Complexity': 'averageweight',
'Number of categories': 'num_categories',
'Number of mechanics': 'num_mechanics',
'Language dependency' : {'lang_dep2': 2,
'lang_dep3': 3,
'lang_dep4': 4,
'lang_dep5': 5 },
'Type': ['Strategy Game', 'Family Game', 'Party Game',
'Abstract Game', 'Thematic', 'War Game',
'Customizable', "Children's Game"]}
def predict(input_dict, rating_bool):
'''
Main function that runs the regression and produces either predicted BGG
rating or number of ratings received and make relevant recommendations to
improve upon the same.
Inputs:
rating_bool (bool): If True, run the regression for predicted BGG
rating
If False, run the regression for predicted number
of ratings
input_dict (dict): Dictionary produced by Django UI, containing
required fields for the prediction using regression
Output:
(tuple of lists) Contains a list of column names and a list of columns
output for Django UI
Warning: Predicted values may be negative due to low R2 of models
'''
x = construct_x(input_dict, rating_bool)
X, y, raw_df, dep_var = construct_X_y(rating_bool)
coef = regress(X,y)
beta = coef['beta']
pred_val = apply_beta(beta, x)[0]
accuracy = calculate_R2(X, y, beta)
sorted_df_y = raw_df.sort_values(by=dep_var, ascending = False).\
reset_index(drop=True)
rank = sorted_df_y[sorted_df_y[dep_var] >= pred_val].index[-1] + 2
top_5_games = ''
for i, game in enumerate(sorted_df_y['name'][0:5]):
top_5_games += game
if i != 4:
top_5_games += ', '
decrease_gain_tup, increase_gain_tup, lang_dep_gain_tup, game_type_tup = \
recommend(coef, input_dict, X, rating_bool)
if rating_bool:
return (['Your game is likely to get a BGG rating of ____ on BoardGameGeek',
'placing you at a rank of ____ among 4093 games in our dataset',
'with top 5 BGG board games being ____',
'This prediction is only ____ percent accurate.',
'try decreasing ____,'
'to improve score by (for each unit decreased) ____',
'try increasing ____,'
'to improve score by (for each unit increased) ____',
'try changing "Language dependency" to ____,'
'to improve score by ____',
'try dropping "type" _____,'
'try adding "type" _____, to improve score by ____'],
[[str(round(pred_val,5)), rank,
top_5_games, str(round(accuracy,2)), decrease_gain_tup,
increase_gain_tup, lang_dep_gain_tup, game_type_tup]])
else:
return (['Your game is likely to be voted for by ____ users on BoardGameGeek',
'placing you at a ____ rank among 4093 games in our dataset',
'with top 5 BGG board games being _____',
'This prediction is only ____ percent accurate.',
'try decreasing ____,'
'to improve score by (for each unit decreased) ____',
'try increasing ____,'
'to improve score by (for each unit increased) ____',
'try changing "Language dependency" to ____,'
'to improve score by ____',
'try dropping "type" _____, try adding "type" _____,'
'to improve score by ____'],
[[str(round(pred_val,0)), rank,
top_5_games,str(round(accuracy,2)), decrease_gain_tup,
increase_gain_tup, lang_dep_gain_tup, game_type_tup]])
def construct_x(input_dict, rating_bool):
'''
Construct x vector using user inputs from Django by matching Django
fields to column names in internal data, using field inputs to create
required columns and finally add a 'ones' column for constant of the
regression equation.
Input: (dict) Dictionary produced by Django UI, containing
required fields for the prediction using regression
Output: (pandas Series) Column vector
'''
x_dict = {}
type_lst =[]
for field in input_dict.keys():
if field == 'Language dependency':
for dummy, complexity in django_to_local_cols[field].items():
x_dict[dummy] = 0
if input_dict[field] == complexity:
x_dict[dummy] = 1
elif field in ['Type 1', 'Type 2', 'Type 3']:
type_lst.append(input_dict[field])
else:
col_name = django_to_local_cols[field]
value = input_dict[field]
x_dict[col_name] = value
for type_dummy in django_to_local_cols['Type']:
x_dict[type_dummy] = 0
if type_dummy in type_lst:
x_dict[type_dummy] = 1
x = pd.DataFrame(x_dict, index = ['obs'])
if rating_bool:
pred_vars = rating_lst
else:
pred_vars = popularity_lst
x = x.loc[:,pred_vars]
prepend_ones_col(x)
return x
def construct_X_y(rating_bool):
'''
Process raw data (data cleaning, data type coercion, creating dummy
variables) pulled from BoardGameGeek API and then use it to construct X
matrix and y vector to be plugged into the regress function.
Input: (bool) Indicates which regression model to run
Outputs:
X: (pandas DataFrame) X matrix containing observations of regressors
y: (pandas Series) column vector containing obsersvations of dependent
variable
raw_df: (pandas DataFrame) processed dataframe
dep_var: (str) name of depedent variable
'''
raw_df = pd.read_csv("all_games.csv")
raw_df = raw_df.loc[:,['bgg_id', 'is_boardgame', 'name', 'name_coerced',
'minplaytime', 'maxplaytime', 'suggested_numplayers',
'suggested_language', 'num_ratings',
'Board Game_avg_rating', 'Strategy Game',
'Family Game', 'Party Game', 'Abstract Game', 'Thematic',
'War Game','Customizable', "Children's Game",
'num_categories', 'num_mechanics','averageweight']]
raw_df = raw_df[raw_df['is_boardgame'] == True]
raw_df = raw_df.dropna(subset=['suggested_language'])
create_lang_dummy(raw_df)
raw_df = raw_df.astype({'Strategy Game':'int64', 'Family Game': 'int64',
'Party Game': 'int64', 'Abstract Game': 'int64',
'Thematic': 'int64', 'War Game': 'int64',
'Customizable': 'int64', "Children's Game": 'int64',
'lang_dep2': 'int64', 'lang_dep3': 'int64',
'lang_dep4': 'int64', 'lang_dep5': 'int64'})
raw_df['suggested_numplayers'] = raw_df['suggested_numplayers']\
.astype('string').str.strip('+').astype('int64')
raw_df['avg_playtime'] = (raw_df['minplaytime'] + raw_df['maxplaytime'])/2
raw_df = raw_df[raw_df['suggested_numplayers'] != 0]
raw_df = raw_df[raw_df['avg_playtime'] != 0]
raw_df = raw_df.dropna()
if rating_bool:
pred_vars, dep_var = rating_lst, 'Board Game_avg_rating'
else:
pred_vars, dep_var = popularity_lst, 'num_ratings'
X = raw_df.loc[:,pred_vars]
prepend_ones_col(X)
y = raw_df[dep_var]
return X, y, raw_df, dep_var
def create_lang_dummy(df):
'''
Create and insert (k-1) dummy variables for k Language dependency categories
in the dataframe.
Input: (pandas DataFrame) BGG data
'''
lang_dep = {'No necessary in-game text':1,
'Some necessary text - easily memorized or small crib sheet':2,
'Moderate in-game text - needs crib sheet or paste ups':3,
'Extensive use of text - massive conversion needed to be playable':4,
'Unplayable in another language':5}
categories = pd.unique(df['suggested_language'])
for category in categories:
if lang_dep[category] != 1:
dummy_name = 'lang_dep' + str(lang_dep[category])
df[dummy_name] = df['suggested_language'] == category
def prepend_ones_col(X):
'''
Add a ones column to the left side of pandas DataFrame.
Input: (pandas DataFrame) X matrix
'''
X.insert(0,'ones', 1)
def regress(X, y):
'''
Regress X matrix on y vector and calculate beta vector.
Inputs:
X (pandas DataFrame): X matrix containing observations of regressors
y (pandas Series): y vector
Ouputs:
coef (pandas DataFrame): beta vector containing coefficient estimates
for the regressors
'''
beta = np.linalg.lstsq(X, y, rcond=None)[0]
#Source: /home/syedajaisha/capp30121-aut-20-syedajaisha/pa5/util.py
col_names = list(X.columns)
col_names[0] = 'intercept'
coef = pd.DataFrame({'beta': beta}, index=col_names)
return coef
def | (X, y, beta):
'''
Calculate R_sqauared for a regression model
Inputs:
X (pandas DataFrame): X matrix
y (pandas Series): y vector
beta(pandas DataFrame): beta vector
Output: (float) R_squared
'''
yhat = apply_beta(beta, X)
R2 = 1 - (np.sum((y - yhat)**2) / np.sum((y - np.mean(y))**2))
#Source: /home/syedajaisha/capp30121-aut-20-syedajaisha/pa5/regression.py
return R2*100
def apply_beta(beta, X):
'''
Apply beta, the vector generated by regress, to the
specified values to calculate predicted value
Inputs:
beta (pandas Series): beta vector
X (pandas DataFrame): X matrix
Output:
yhat (numpy array): predicted value
'''
yhat = np.dot(X, beta)
return yhat
def recommend(coef, input_dict, X, rating_bool):
'''
Make recommendations based on what paramters can the user potentially
increase, decrease, switch categories of to increase their predicted value
of BGG rating and number of ratings and also informs of the corresponding
change in the predicted value
Inputs:
coef (pandas DataFrame): beta vector containing coefficient estimates
input_dict (dict): Dictionary produced by Django UI, containing
required fields for the prediction using regression
X (pandas DataFrame): X matrix
rating_bool (bool): Indicates which regression model to run
Disclaimer: This function doesn't recommend changing everything to arrive at
the optimal result. For example, in case a game already has three types, it
won't suggest the user to replace them all with the ones corresponding to
the largest three coefficents among all games types, it would just ask that
the existing type that adds the least value to the regression be replaced
with the type corresponding to the highest coefficient among remaining game
types
'''
dummy_var = ['Language dependency', 'Type']
decrease_gain_tup = []
increase_gain_tup =[]
lang_dep_gain_tup = []
game_type_tup= []
if rating_bool:
beta = round(coef['beta'],4)
else:
beta = round(coef['beta'],0).astype('int64')
for field in django_to_local_cols:
if field not in dummy_var:
if field in input_dict:
if beta[django_to_local_cols[field]] < 0:
if input_dict[field] > min(X[django_to_local_cols[field]]):
decrease_gain_tup.append((field, -beta[django_to_local_cols[field]]))
else:
if input_dict[field] < max(X[django_to_local_cols[field]]):
increase_gain_tup.append((field, beta[django_to_local_cols[field]]))
elif field == 'Language dependency':
current_lang_dep = 'lang_dep' + str(input_dict['Language dependency'])
if current_lang_dep == 'lang_dep1':
for lang_dep_dummy in django_to_local_cols['Language dependency'].keys():
if beta[lang_dep_dummy] > 0:
lang_dep_gain_tup.append((django_to_local_cols['Language dependency'][lang_dep_dummy], \
beta[lang_dep_dummy]))
else:
if beta[current_lang_dep] < 0:
lang_dep_gain_tup.append((1, -beta[current_lang_dep]))
for lang_dep_dummy in django_to_local_cols['Language dependency'].keys():
if beta[lang_dep_dummy] > beta[current_lang_dep]:
gain = -beta[current_lang_dep] + beta[lang_dep_dummy]
lang_dep_gain_tup.append((django_to_local_cols['Language dependency'][lang_dep_dummy], gain))
elif field == 'Type':
current_type_coefs = {}
game_type_coefs = {}
for game_type in django_to_local_cols['Type']:
if game_type in input_dict.values():
current_type_coefs[beta[game_type]] = game_type
else:
game_type_coefs[beta[game_type]] = game_type
max_game_type_coef = max(game_type_coefs.keys())
if len(current_type_coefs) == 3:
min_current_type_coef = min(current_type_coefs.keys())
if max_game_type_coef > min_current_type_coef:
game_type_tup.append((current_type_coefs[min_current_type_coef],
game_type_coefs[max_game_type_coef],
max_game_type_coef - min_current_type_coef))
else:
if max_game_type_coef > 0:
game_type_tup.append(('none',
game_type_coefs[max_game_type_coef],
max_game_type_coef))
lst_lst = [decrease_gain_tup, increase_gain_tup, lang_dep_gain_tup, game_type_tup]
for lst in lst_lst:
if not lst:
lst.append('already optimal')
return (decrease_gain_tup, increase_gain_tup, lang_dep_gain_tup, game_type_tup)
| calculate_R2 | identifier_name |
iggo.go | package main
import (
"encoding/json"
"fmt"
"github.com/dustin/go-humanize"
"github.com/gorilla/feeds"
"github.com/jacoduplessis/simplejson"
"html/template"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"regexp"
"strings"
"time"
)
var templateFuncs = template.FuncMap{
"sizemax": sizemax,
"linkify": linkify,
}
var templateMap = map[string]*template.Template{}
var client = &http.Client{
Timeout: time.Second * 10,
}
type Size struct {
URL string
Width int
Height int
}
type User struct {
ID string
Name string
Username string
Bio string
Followers int
Following int
ProfilePic string
Posts []*Post
}
type Post struct {
ID string
Timestamp int64
Time string
URL string
Width int
Height int
Shortcode string
Likes int
Sizes []Size
Thumbnail string
Text string
Owner *PostOwner
Likers []*PostLiker
}
type PostLiker struct {
ProfilePic string
Username string
}
type PostOwner struct {
ID string
ProfilePic string
Username string
Name string
}
type SearchResult struct {
Query string
Users []struct {
User struct {
Username string `json:"username"`
Name string `json:"full_name"`
ProfilePic string `json:"profile_pic_url"`
Followers int `json:"follower_count"`
Byline string `json:"byline"`
}
}
Tags []struct {
Tag struct {
Name string `json:"name"`
MediaCount int `json:"media_count"`
} `json:"hashtag"`
} `json:"hashtags"`
}
type Tag struct {
Name string
Posts []*Post
}
func GetPost(r *http.Request) (*Post, error) {
shortcode := strings.TrimRight(r.URL.Path[len("/post/"):], "/")
if shortcode == "" {
return nil, nil
}
resp, err := client.Get(fmt.Sprintf("https://www.instagram.com/p/%s/", shortcode))
if err != nil {
return nil, err
}
defer resp.Body.Close()
return GetPostFromMarkup(resp.Body)
}
func GetPostFromMarkup(body io.Reader) (*Post, error) {
sd := sharedData(body)
container, err := simplejson.NewJson(sd)
if err != nil {
return nil, err
}
base := container.GetPath("entry_data", "PostPage").GetIndex(0).GetPath("graphql", "shortcode_media")
timestamp := base.Get("taken_at_timestamp").GetInt64()
likers := []*PostLiker{}
for _, edge := range base.GetPath("edge_media_preview_like", "edges").GetArray() {
n := edge.Get("node")
likers = append(likers, &PostLiker{
ProfilePic: n.Get("profile_pic_url").GetString(),
Username: n.Get("username").GetString(),
})
}
data := &Post{
Shortcode: base.Get("shortcode").GetString(),
ID: base.Get("id").GetString(),
URL: base.Get("display_url").GetString(),
Text: getText(base),
Timestamp: timestamp,
Time: humanize.Time(time.Unix(timestamp, 0)),
Likes: base.Get("edge_media_preview_like").Get("count").GetInt(),
Likers: likers,
Owner: &PostOwner{
ID: base.GetPath("owner", "id").GetString(),
ProfilePic: base.GetPath("owner", "profile_pic_url").GetString(),
Username: base.GetPath("owner", "username").GetString(),
Name: base.GetPath("owner", "full_name").GetString(),
},
}
return data, nil
}
func getText(j *simplejson.Json) string {
return j.GetPath("edge_media_to_caption", "edges").GetIndex(0).GetPath("node", "text").GetString()
}
func getPosts(j *simplejson.Json) []*Post {
var posts []*Post
for _, edge := range j.Get("edges").GetArray() {
n := edge.Get("node")
var sizes []Size
for _, s := range n.Get("thumbnail_resources").GetArray() {
sizes = append(sizes, Size{
URL: s.Get("src").GetString(),
Width: s.Get("config_width").GetInt(),
Height: s.Get("config_width").GetInt(),
})
}
timestamp := n.Get("taken_at_timestamp").GetInt64()
posts = append(posts, &Post{
ID: n.Get("id").GetString(),
Shortcode: n.Get("shortcode").GetString(),
URL: n.Get("display_url").GetString(),
Timestamp: timestamp,
Time: humanize.Time(time.Unix(timestamp, 0)),
Likes: n.GetPath("edge_liked_by", "count").GetInt(),
Sizes: sizes,
Text: getText(n),
Height: n.GetPath("dimensions", "height").GetInt(),
Width: n.GetPath("dimensions", "width").GetInt(),
Thumbnail: n.Get("thumbnail_src").GetString(),
})
}
return posts
}
func GetUserFromMarkup(body io.Reader) (*User, error) {
sd := sharedData(body)
container, err := simplejson.NewJson(sd)
if err != nil {
return nil, err
}
base := container.GetPath("entry_data", "ProfilePage").GetIndex(0).GetPath("graphql", "user")
data := &User{
ID: base.Get("id").GetString(),
Name: base.Get("full_name").GetString(),
Username: base.Get("username").GetString(),
Bio: base.Get("biography").GetString(),
Followers: base.GetPath("edge_followed_by", "count").GetInt(),
Following: base.GetPath("edge_follow", "count").GetInt(),
ProfilePic: base.Get("profile_pic_url_hd").GetString(),
Posts: getPosts(base.Get("edge_owner_to_timeline_media")),
}
return data, nil
}
func GetTagFromMarkup(body io.Reader) (*Tag, error) {
sd := sharedData(body)
container, err := simplejson.NewJson(sd)
if err != nil {
return nil, err
}
base := container.GetPath("entry_data", "TagPage").GetIndex(0).GetPath("graphql", "hashtag")
data := &Tag{
Name: base.Get("name").GetString(),
Posts: getPosts(base.Get("edge_hashtag_to_media")),
}
return data, nil
}
// GetUserFromUsername takes a username, makes a request
// and parses the response into a User struct, returning a pointer
func GetUserFromUsername(username string) (*User, error) {
if username == "" {
return nil, nil
}
resp, err := client.Get(fmt.Sprintf("https://www.instagram.com/%s/", username))
if err != nil {
return nil, err
}
defer resp.Body.Close()
return GetUserFromMarkup(resp.Body)
}
func GetUser(r *http.Request) (*User, error) {
username := strings.TrimRight(r.URL.Path[len("/user/"):], "/")
return GetUserFromUsername(username)
}
func GetTag(r *http.Request) (*Tag, error) {
slug := strings.TrimRight(r.URL.Path[len("/tag/"):], "/")
if slug == "" {
return nil, nil
}
resp, err := client.Get(fmt.Sprintf("https://www.instagram.com/explore/tags/%s/", slug))
if err != nil {
return nil, err
}
defer resp.Body.Close()
if err != nil {
return nil, err
}
return GetTagFromMarkup(resp.Body)
}
func sizemax(p *Post, w int) Size {
ix := 0
for i, s := range p.Sizes {
if s.Width <= w {
ix = i
} else {
break
}
}
return p.Sizes[ix]
}
func linkify(s string) template.HTML {
t := regexp.MustCompile(`(?i)#([\p{L}\w]+)`)
s = t.ReplaceAllString(s, `<a href="/tag/$1">#$1</a>`)
u := regexp.MustCompile(`(?i)@([\p{L}\w.]+)`)
s = u.ReplaceAllString(s, `<a href="/user/$1">@$1</a>`)
return template.HTML(s)
}
func setupTemplates() {
base := template.Must(template.ParseFiles("templates/base.html")).Funcs(templateFuncs)
if _, err := base.ParseFiles("templates/custom.html"); err != nil {
base.New("custom.html").Parse("")
}
keys := []string{"index", "post", "search", "tag", "user"}
for _, key := range keys {
clone := template.Must(base.Clone())
tmpl := template.Must(clone.ParseFiles("templates/" + key + ".html"))
templateMap[key] = tmpl
}
}
func renderTemplate(w http.ResponseWriter, key string, data interface{}) *appError {
tmpl, ok := templateMap[key]
if !ok {
return &appError{"Template error", 500, fmt.Errorf(`template "%s" not found`, key)}
}
err := tmpl.ExecuteTemplate(w, "base.html", data)
if err != nil |
return nil
}
func sharedData(r io.Reader) []byte {
re := regexp.MustCompile(`window._sharedData\s?=\s?(.*);</script>`)
b, err := ioutil.ReadAll(r)
if err != nil {
return nil
}
matches := re.FindSubmatch(b)
if len(matches) < 2 {
return nil
}
return matches[1]
}
func getSearchResult(q string) (*SearchResult, error) {
sr := &SearchResult{}
qs := &url.Values{}
qs.Add("context", "blended")
qs.Add("query", q)
r, err := client.Get("https://www.instagram.com/web/search/topsearch/?" + qs.Encode())
if err != nil {
return sr, err
}
defer r.Body.Close()
err = json.NewDecoder(r.Body).Decode(sr)
return sr, err
}
func renderJSON(w http.ResponseWriter, data interface{}) *appError {
w.Header().Set("Content-Type", "application/json")
err := json.NewEncoder(w).Encode(data)
if err != nil {
return &appError{"Could not write response", 500, err}
}
return nil
}
func makeFeedHandler() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
username := strings.TrimRight(r.URL.Path[len("/feed/"):], "/")
user, err := GetUserFromUsername(username)
if err != nil {
log.Printf("Error fetching user (%s) data for feed: %s", username, err)
w.Write([]byte("Error"))
return
}
now := time.Now()
feed := &feeds.Feed{
Title: fmt.Sprintf("Instagram Posts by %s", username),
Link: &feeds.Link{Href: fmt.Sprintf("https://www.instagram.com/%s", username)},
Description: fmt.Sprintf("Recent photos posted by %s on Instagram", username),
Created: now,
}
for _, post := range user.Posts {
item := feeds.Item{
Id: post.Shortcode,
Title: post.Text,
Link: &feeds.Link{Href: fmt.Sprintf("https://www.instagram.com/p/%s", post.Shortcode)},
Author: &feeds.Author{Name: username},
Created: time.Unix(post.Timestamp, 0),
Content: sizemax(post, 800).URL,
}
feed.Add(&item)
}
err = feed.WriteRss(w)
if err != nil {
log.Printf("Error writing feed: %s", err)
}
})
}
func makeIndex() appHandler {
return func(w http.ResponseWriter, r *http.Request) *appError {
q := r.FormValue("q")
if q != "" {
sr, _ := getSearchResult(q)
sr.Query = q
if r.URL.Query().Get("format") == "json" {
return renderJSON(w, &sr)
}
return renderTemplate(w, "search", sr)
}
return renderTemplate(w, "index", nil)
}
}
type appError struct {
Message string
Code int
Error error
}
type appHandler func(w http.ResponseWriter, r *http.Request) *appError
func (fn appHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type")
if apperr := fn(w, r); apperr != nil {
http.Error(w, apperr.Message, apperr.Code)
log.Println(apperr.Error.Error())
}
}
func makeHandler(f func(*http.Request) (interface{}, error), templateKey string) appHandler {
return func(w http.ResponseWriter, r *http.Request) *appError {
data, err := f(r)
if err != nil || data == nil {
return &appError{"Could not load data", 404, err}
}
if r.URL.Query().Get("format") == "json" {
return renderJSON(w, &data)
}
return renderTemplate(w, templateKey, data)
}
}
func getListenAddr() string {
if port := os.Getenv("PORT"); port != "" {
return ":" + port
}
if addr := os.Getenv("LISTEN_ADDR"); addr != "" {
return addr
}
return "127.0.0.1:8000"
}
func userFetcher(r *http.Request) (interface{}, error) {
return GetUser(r)
}
func postFetcher(r *http.Request) (interface{}, error) {
return GetPost(r)
}
func tagFetcher(r *http.Request) (interface{}, error) {
return GetTag(r)
}
func main() {
setupTemplates()
http.Handle("/user/", makeHandler(userFetcher, "user"))
http.Handle("/post/", makeHandler(postFetcher, "post"))
http.Handle("/tag/", makeHandler(tagFetcher, "tag"))
http.Handle("/static/", http.StripPrefix("/static", http.FileServer(http.Dir("./static"))))
http.Handle("/feed/", makeFeedHandler())
http.Handle("/", makeIndex())
addr := getListenAddr()
fmt.Println("Listening on ", addr)
log.Fatal(http.ListenAndServe(addr, nil))
}
| {
return &appError{"Template error", 500, err}
} | conditional_block |
iggo.go | package main
import (
"encoding/json"
"fmt"
"github.com/dustin/go-humanize"
"github.com/gorilla/feeds"
"github.com/jacoduplessis/simplejson"
"html/template"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"regexp"
"strings"
"time"
)
var templateFuncs = template.FuncMap{
"sizemax": sizemax,
"linkify": linkify,
}
var templateMap = map[string]*template.Template{}
var client = &http.Client{
Timeout: time.Second * 10,
}
type Size struct {
URL string
Width int
Height int
}
type User struct {
ID string
Name string
Username string
Bio string
Followers int
Following int
ProfilePic string
Posts []*Post
}
type Post struct {
ID string
Timestamp int64
Time string
URL string
Width int
Height int
Shortcode string
Likes int
Sizes []Size
Thumbnail string
Text string
Owner *PostOwner
Likers []*PostLiker
}
type PostLiker struct {
ProfilePic string
Username string
}
type PostOwner struct {
ID string
ProfilePic string
Username string
Name string
}
type SearchResult struct {
Query string
Users []struct {
User struct {
Username string `json:"username"`
Name string `json:"full_name"`
ProfilePic string `json:"profile_pic_url"`
Followers int `json:"follower_count"`
Byline string `json:"byline"`
}
}
Tags []struct {
Tag struct {
Name string `json:"name"`
MediaCount int `json:"media_count"`
} `json:"hashtag"`
} `json:"hashtags"`
}
type Tag struct {
Name string
Posts []*Post
}
func GetPost(r *http.Request) (*Post, error) {
shortcode := strings.TrimRight(r.URL.Path[len("/post/"):], "/")
if shortcode == "" {
return nil, nil
}
resp, err := client.Get(fmt.Sprintf("https://www.instagram.com/p/%s/", shortcode))
if err != nil {
return nil, err
}
defer resp.Body.Close()
return GetPostFromMarkup(resp.Body)
}
func GetPostFromMarkup(body io.Reader) (*Post, error) {
sd := sharedData(body)
container, err := simplejson.NewJson(sd)
if err != nil {
return nil, err
}
base := container.GetPath("entry_data", "PostPage").GetIndex(0).GetPath("graphql", "shortcode_media")
timestamp := base.Get("taken_at_timestamp").GetInt64()
likers := []*PostLiker{}
for _, edge := range base.GetPath("edge_media_preview_like", "edges").GetArray() {
n := edge.Get("node")
likers = append(likers, &PostLiker{
ProfilePic: n.Get("profile_pic_url").GetString(),
Username: n.Get("username").GetString(),
})
}
data := &Post{
Shortcode: base.Get("shortcode").GetString(),
ID: base.Get("id").GetString(),
URL: base.Get("display_url").GetString(),
Text: getText(base),
Timestamp: timestamp,
Time: humanize.Time(time.Unix(timestamp, 0)),
Likes: base.Get("edge_media_preview_like").Get("count").GetInt(),
Likers: likers,
Owner: &PostOwner{
ID: base.GetPath("owner", "id").GetString(),
ProfilePic: base.GetPath("owner", "profile_pic_url").GetString(),
Username: base.GetPath("owner", "username").GetString(),
Name: base.GetPath("owner", "full_name").GetString(),
},
}
return data, nil
}
func getText(j *simplejson.Json) string {
return j.GetPath("edge_media_to_caption", "edges").GetIndex(0).GetPath("node", "text").GetString()
}
func getPosts(j *simplejson.Json) []*Post {
var posts []*Post
for _, edge := range j.Get("edges").GetArray() {
n := edge.Get("node")
var sizes []Size
for _, s := range n.Get("thumbnail_resources").GetArray() {
sizes = append(sizes, Size{
URL: s.Get("src").GetString(),
Width: s.Get("config_width").GetInt(),
Height: s.Get("config_width").GetInt(),
})
}
timestamp := n.Get("taken_at_timestamp").GetInt64()
posts = append(posts, &Post{
ID: n.Get("id").GetString(),
Shortcode: n.Get("shortcode").GetString(),
URL: n.Get("display_url").GetString(),
Timestamp: timestamp,
Time: humanize.Time(time.Unix(timestamp, 0)),
Likes: n.GetPath("edge_liked_by", "count").GetInt(),
Sizes: sizes,
Text: getText(n),
Height: n.GetPath("dimensions", "height").GetInt(),
Width: n.GetPath("dimensions", "width").GetInt(),
Thumbnail: n.Get("thumbnail_src").GetString(),
})
}
return posts
}
func GetUserFromMarkup(body io.Reader) (*User, error) {
sd := sharedData(body)
container, err := simplejson.NewJson(sd)
if err != nil {
return nil, err
}
base := container.GetPath("entry_data", "ProfilePage").GetIndex(0).GetPath("graphql", "user")
data := &User{
ID: base.Get("id").GetString(),
Name: base.Get("full_name").GetString(),
Username: base.Get("username").GetString(),
Bio: base.Get("biography").GetString(),
Followers: base.GetPath("edge_followed_by", "count").GetInt(),
Following: base.GetPath("edge_follow", "count").GetInt(),
ProfilePic: base.Get("profile_pic_url_hd").GetString(),
Posts: getPosts(base.Get("edge_owner_to_timeline_media")),
}
return data, nil
}
func GetTagFromMarkup(body io.Reader) (*Tag, error) {
sd := sharedData(body)
container, err := simplejson.NewJson(sd)
if err != nil {
return nil, err
}
base := container.GetPath("entry_data", "TagPage").GetIndex(0).GetPath("graphql", "hashtag")
data := &Tag{
Name: base.Get("name").GetString(),
Posts: getPosts(base.Get("edge_hashtag_to_media")),
}
return data, nil
}
// GetUserFromUsername takes a username, makes a request
// and parses the response into a User struct, returning a pointer
func GetUserFromUsername(username string) (*User, error) {
if username == "" {
return nil, nil
}
resp, err := client.Get(fmt.Sprintf("https://www.instagram.com/%s/", username))
if err != nil {
return nil, err
}
defer resp.Body.Close()
return GetUserFromMarkup(resp.Body)
}
func GetUser(r *http.Request) (*User, error) {
username := strings.TrimRight(r.URL.Path[len("/user/"):], "/")
return GetUserFromUsername(username)
}
func GetTag(r *http.Request) (*Tag, error) {
slug := strings.TrimRight(r.URL.Path[len("/tag/"):], "/")
if slug == "" {
return nil, nil
}
resp, err := client.Get(fmt.Sprintf("https://www.instagram.com/explore/tags/%s/", slug))
if err != nil {
return nil, err
}
defer resp.Body.Close()
if err != nil {
return nil, err
}
return GetTagFromMarkup(resp.Body)
}
func sizemax(p *Post, w int) Size {
ix := 0
for i, s := range p.Sizes {
if s.Width <= w {
ix = i
} else {
break
}
}
return p.Sizes[ix]
}
func linkify(s string) template.HTML {
t := regexp.MustCompile(`(?i)#([\p{L}\w]+)`)
s = t.ReplaceAllString(s, `<a href="/tag/$1">#$1</a>`)
u := regexp.MustCompile(`(?i)@([\p{L}\w.]+)`)
s = u.ReplaceAllString(s, `<a href="/user/$1">@$1</a>`)
return template.HTML(s)
}
func setupTemplates() {
base := template.Must(template.ParseFiles("templates/base.html")).Funcs(templateFuncs)
if _, err := base.ParseFiles("templates/custom.html"); err != nil {
base.New("custom.html").Parse("")
}
keys := []string{"index", "post", "search", "tag", "user"}
for _, key := range keys {
clone := template.Must(base.Clone())
tmpl := template.Must(clone.ParseFiles("templates/" + key + ".html"))
templateMap[key] = tmpl
}
}
func renderTemplate(w http.ResponseWriter, key string, data interface{}) *appError {
tmpl, ok := templateMap[key]
if !ok {
return &appError{"Template error", 500, fmt.Errorf(`template "%s" not found`, key)}
}
err := tmpl.ExecuteTemplate(w, "base.html", data)
if err != nil {
return &appError{"Template error", 500, err}
}
return nil
}
func sharedData(r io.Reader) []byte {
re := regexp.MustCompile(`window._sharedData\s?=\s?(.*);</script>`)
b, err := ioutil.ReadAll(r)
if err != nil {
return nil
}
matches := re.FindSubmatch(b)
if len(matches) < 2 {
return nil
}
return matches[1]
}
func getSearchResult(q string) (*SearchResult, error) {
sr := &SearchResult{}
qs := &url.Values{}
qs.Add("context", "blended")
qs.Add("query", q)
r, err := client.Get("https://www.instagram.com/web/search/topsearch/?" + qs.Encode())
if err != nil {
return sr, err
}
defer r.Body.Close()
err = json.NewDecoder(r.Body).Decode(sr)
return sr, err
}
func renderJSON(w http.ResponseWriter, data interface{}) *appError {
w.Header().Set("Content-Type", "application/json")
err := json.NewEncoder(w).Encode(data)
if err != nil {
return &appError{"Could not write response", 500, err}
}
return nil
}
func makeFeedHandler() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
username := strings.TrimRight(r.URL.Path[len("/feed/"):], "/")
user, err := GetUserFromUsername(username)
if err != nil {
log.Printf("Error fetching user (%s) data for feed: %s", username, err)
w.Write([]byte("Error"))
return
}
now := time.Now()
feed := &feeds.Feed{
Title: fmt.Sprintf("Instagram Posts by %s", username),
Link: &feeds.Link{Href: fmt.Sprintf("https://www.instagram.com/%s", username)},
Description: fmt.Sprintf("Recent photos posted by %s on Instagram", username),
Created: now,
}
for _, post := range user.Posts {
item := feeds.Item{
Id: post.Shortcode,
Title: post.Text,
Link: &feeds.Link{Href: fmt.Sprintf("https://www.instagram.com/p/%s", post.Shortcode)},
Author: &feeds.Author{Name: username},
Created: time.Unix(post.Timestamp, 0),
Content: sizemax(post, 800).URL,
}
feed.Add(&item)
}
err = feed.WriteRss(w)
if err != nil {
log.Printf("Error writing feed: %s", err)
}
})
}
func makeIndex() appHandler {
return func(w http.ResponseWriter, r *http.Request) *appError {
q := r.FormValue("q")
if q != "" {
sr, _ := getSearchResult(q)
sr.Query = q
if r.URL.Query().Get("format") == "json" {
return renderJSON(w, &sr)
}
return renderTemplate(w, "search", sr)
}
return renderTemplate(w, "index", nil)
}
}
type appError struct {
Message string
Code int
Error error
}
type appHandler func(w http.ResponseWriter, r *http.Request) *appError
func (fn appHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type")
if apperr := fn(w, r); apperr != nil {
http.Error(w, apperr.Message, apperr.Code)
log.Println(apperr.Error.Error())
}
}
func makeHandler(f func(*http.Request) (interface{}, error), templateKey string) appHandler {
return func(w http.ResponseWriter, r *http.Request) *appError {
data, err := f(r)
if err != nil || data == nil {
return &appError{"Could not load data", 404, err}
}
if r.URL.Query().Get("format") == "json" {
return renderJSON(w, &data)
}
return renderTemplate(w, templateKey, data)
}
}
func getListenAddr() string {
if port := os.Getenv("PORT"); port != "" {
return ":" + port
}
if addr := os.Getenv("LISTEN_ADDR"); addr != "" {
return addr
}
return "127.0.0.1:8000"
}
func userFetcher(r *http.Request) (interface{}, error) {
return GetUser(r)
}
func postFetcher(r *http.Request) (interface{}, error) |
func tagFetcher(r *http.Request) (interface{}, error) {
return GetTag(r)
}
func main() {
setupTemplates()
http.Handle("/user/", makeHandler(userFetcher, "user"))
http.Handle("/post/", makeHandler(postFetcher, "post"))
http.Handle("/tag/", makeHandler(tagFetcher, "tag"))
http.Handle("/static/", http.StripPrefix("/static", http.FileServer(http.Dir("./static"))))
http.Handle("/feed/", makeFeedHandler())
http.Handle("/", makeIndex())
addr := getListenAddr()
fmt.Println("Listening on ", addr)
log.Fatal(http.ListenAndServe(addr, nil))
}
| {
return GetPost(r)
} | identifier_body |
iggo.go | package main
import (
"encoding/json"
"fmt"
"github.com/dustin/go-humanize"
"github.com/gorilla/feeds"
"github.com/jacoduplessis/simplejson"
"html/template"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"regexp"
"strings"
"time"
)
var templateFuncs = template.FuncMap{
"sizemax": sizemax,
"linkify": linkify,
}
var templateMap = map[string]*template.Template{}
var client = &http.Client{
Timeout: time.Second * 10,
}
type Size struct {
URL string
Width int
Height int
}
type User struct {
ID string
Name string
Username string
Bio string
Followers int
Following int
ProfilePic string
Posts []*Post
}
type Post struct {
ID string
Timestamp int64
Time string
URL string
Width int
Height int
Shortcode string
Likes int
Sizes []Size
Thumbnail string
Text string
Owner *PostOwner
Likers []*PostLiker
}
type PostLiker struct {
ProfilePic string
Username string
}
type PostOwner struct {
ID string
ProfilePic string
Username string
Name string
}
type SearchResult struct {
Query string
Users []struct {
User struct {
Username string `json:"username"`
Name string `json:"full_name"`
ProfilePic string `json:"profile_pic_url"`
Followers int `json:"follower_count"`
Byline string `json:"byline"`
}
}
Tags []struct {
Tag struct {
Name string `json:"name"`
MediaCount int `json:"media_count"`
} `json:"hashtag"`
} `json:"hashtags"`
}
type Tag struct {
Name string
Posts []*Post
}
func GetPost(r *http.Request) (*Post, error) {
shortcode := strings.TrimRight(r.URL.Path[len("/post/"):], "/")
if shortcode == "" {
return nil, nil
}
resp, err := client.Get(fmt.Sprintf("https://www.instagram.com/p/%s/", shortcode))
if err != nil {
return nil, err
}
defer resp.Body.Close()
return GetPostFromMarkup(resp.Body)
}
func GetPostFromMarkup(body io.Reader) (*Post, error) {
sd := sharedData(body)
container, err := simplejson.NewJson(sd)
if err != nil {
return nil, err
}
base := container.GetPath("entry_data", "PostPage").GetIndex(0).GetPath("graphql", "shortcode_media")
timestamp := base.Get("taken_at_timestamp").GetInt64()
likers := []*PostLiker{}
for _, edge := range base.GetPath("edge_media_preview_like", "edges").GetArray() {
n := edge.Get("node")
likers = append(likers, &PostLiker{
ProfilePic: n.Get("profile_pic_url").GetString(),
Username: n.Get("username").GetString(),
})
}
data := &Post{
Shortcode: base.Get("shortcode").GetString(),
ID: base.Get("id").GetString(),
URL: base.Get("display_url").GetString(),
Text: getText(base),
Timestamp: timestamp,
Time: humanize.Time(time.Unix(timestamp, 0)),
Likes: base.Get("edge_media_preview_like").Get("count").GetInt(),
Likers: likers,
Owner: &PostOwner{
ID: base.GetPath("owner", "id").GetString(),
ProfilePic: base.GetPath("owner", "profile_pic_url").GetString(),
Username: base.GetPath("owner", "username").GetString(),
Name: base.GetPath("owner", "full_name").GetString(),
},
}
return data, nil
}
func getText(j *simplejson.Json) string {
return j.GetPath("edge_media_to_caption", "edges").GetIndex(0).GetPath("node", "text").GetString()
}
func getPosts(j *simplejson.Json) []*Post {
var posts []*Post
for _, edge := range j.Get("edges").GetArray() {
n := edge.Get("node")
var sizes []Size
for _, s := range n.Get("thumbnail_resources").GetArray() {
sizes = append(sizes, Size{
URL: s.Get("src").GetString(),
Width: s.Get("config_width").GetInt(),
Height: s.Get("config_width").GetInt(),
})
}
timestamp := n.Get("taken_at_timestamp").GetInt64()
posts = append(posts, &Post{
ID: n.Get("id").GetString(),
Shortcode: n.Get("shortcode").GetString(),
URL: n.Get("display_url").GetString(),
Timestamp: timestamp,
Time: humanize.Time(time.Unix(timestamp, 0)),
Likes: n.GetPath("edge_liked_by", "count").GetInt(),
Sizes: sizes,
Text: getText(n),
Height: n.GetPath("dimensions", "height").GetInt(),
Width: n.GetPath("dimensions", "width").GetInt(),
Thumbnail: n.Get("thumbnail_src").GetString(),
})
}
return posts
}
func GetUserFromMarkup(body io.Reader) (*User, error) {
sd := sharedData(body)
container, err := simplejson.NewJson(sd)
if err != nil {
return nil, err
}
base := container.GetPath("entry_data", "ProfilePage").GetIndex(0).GetPath("graphql", "user")
data := &User{
ID: base.Get("id").GetString(),
Name: base.Get("full_name").GetString(),
Username: base.Get("username").GetString(),
Bio: base.Get("biography").GetString(),
Followers: base.GetPath("edge_followed_by", "count").GetInt(),
Following: base.GetPath("edge_follow", "count").GetInt(),
ProfilePic: base.Get("profile_pic_url_hd").GetString(),
Posts: getPosts(base.Get("edge_owner_to_timeline_media")),
}
return data, nil
}
func GetTagFromMarkup(body io.Reader) (*Tag, error) {
sd := sharedData(body)
container, err := simplejson.NewJson(sd)
if err != nil {
return nil, err
}
base := container.GetPath("entry_data", "TagPage").GetIndex(0).GetPath("graphql", "hashtag")
data := &Tag{
Name: base.Get("name").GetString(),
Posts: getPosts(base.Get("edge_hashtag_to_media")),
}
return data, nil
}
// GetUserFromUsername takes a username, makes a request
// and parses the response into a User struct, returning a pointer
func GetUserFromUsername(username string) (*User, error) {
if username == "" {
return nil, nil
}
resp, err := client.Get(fmt.Sprintf("https://www.instagram.com/%s/", username))
if err != nil {
return nil, err
}
defer resp.Body.Close()
return GetUserFromMarkup(resp.Body)
}
func GetUser(r *http.Request) (*User, error) {
username := strings.TrimRight(r.URL.Path[len("/user/"):], "/")
return GetUserFromUsername(username)
}
func GetTag(r *http.Request) (*Tag, error) {
slug := strings.TrimRight(r.URL.Path[len("/tag/"):], "/")
if slug == "" {
return nil, nil
}
resp, err := client.Get(fmt.Sprintf("https://www.instagram.com/explore/tags/%s/", slug))
if err != nil {
return nil, err
}
defer resp.Body.Close()
if err != nil {
return nil, err
}
return GetTagFromMarkup(resp.Body)
}
func sizemax(p *Post, w int) Size {
ix := 0
for i, s := range p.Sizes {
if s.Width <= w {
ix = i
} else {
break
}
}
return p.Sizes[ix]
}
func linkify(s string) template.HTML {
t := regexp.MustCompile(`(?i)#([\p{L}\w]+)`)
s = t.ReplaceAllString(s, `<a href="/tag/$1">#$1</a>`)
u := regexp.MustCompile(`(?i)@([\p{L}\w.]+)`)
s = u.ReplaceAllString(s, `<a href="/user/$1">@$1</a>`)
return template.HTML(s)
}
func setupTemplates() {
base := template.Must(template.ParseFiles("templates/base.html")).Funcs(templateFuncs)
if _, err := base.ParseFiles("templates/custom.html"); err != nil {
base.New("custom.html").Parse("")
}
keys := []string{"index", "post", "search", "tag", "user"}
for _, key := range keys {
clone := template.Must(base.Clone())
tmpl := template.Must(clone.ParseFiles("templates/" + key + ".html"))
templateMap[key] = tmpl
}
}
func renderTemplate(w http.ResponseWriter, key string, data interface{}) *appError {
tmpl, ok := templateMap[key]
if !ok {
return &appError{"Template error", 500, fmt.Errorf(`template "%s" not found`, key)}
}
err := tmpl.ExecuteTemplate(w, "base.html", data)
if err != nil {
return &appError{"Template error", 500, err}
}
return nil
}
func sharedData(r io.Reader) []byte {
re := regexp.MustCompile(`window._sharedData\s?=\s?(.*);</script>`)
b, err := ioutil.ReadAll(r)
if err != nil {
return nil
}
matches := re.FindSubmatch(b)
if len(matches) < 2 {
return nil
}
return matches[1]
}
func getSearchResult(q string) (*SearchResult, error) {
sr := &SearchResult{}
qs := &url.Values{}
qs.Add("context", "blended")
qs.Add("query", q)
r, err := client.Get("https://www.instagram.com/web/search/topsearch/?" + qs.Encode())
if err != nil {
return sr, err
}
defer r.Body.Close()
err = json.NewDecoder(r.Body).Decode(sr)
return sr, err
}
func renderJSON(w http.ResponseWriter, data interface{}) *appError {
w.Header().Set("Content-Type", "application/json")
err := json.NewEncoder(w).Encode(data)
if err != nil {
return &appError{"Could not write response", 500, err}
}
return nil
}
func makeFeedHandler() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
username := strings.TrimRight(r.URL.Path[len("/feed/"):], "/")
user, err := GetUserFromUsername(username)
if err != nil {
log.Printf("Error fetching user (%s) data for feed: %s", username, err)
w.Write([]byte("Error"))
return
}
now := time.Now()
feed := &feeds.Feed{
Title: fmt.Sprintf("Instagram Posts by %s", username),
Link: &feeds.Link{Href: fmt.Sprintf("https://www.instagram.com/%s", username)},
Description: fmt.Sprintf("Recent photos posted by %s on Instagram", username),
Created: now,
}
for _, post := range user.Posts {
item := feeds.Item{
Id: post.Shortcode,
Title: post.Text,
Link: &feeds.Link{Href: fmt.Sprintf("https://www.instagram.com/p/%s", post.Shortcode)},
Author: &feeds.Author{Name: username},
Created: time.Unix(post.Timestamp, 0),
Content: sizemax(post, 800).URL,
}
feed.Add(&item)
}
err = feed.WriteRss(w)
if err != nil {
log.Printf("Error writing feed: %s", err)
}
})
}
func | () appHandler {
return func(w http.ResponseWriter, r *http.Request) *appError {
q := r.FormValue("q")
if q != "" {
sr, _ := getSearchResult(q)
sr.Query = q
if r.URL.Query().Get("format") == "json" {
return renderJSON(w, &sr)
}
return renderTemplate(w, "search", sr)
}
return renderTemplate(w, "index", nil)
}
}
type appError struct {
Message string
Code int
Error error
}
type appHandler func(w http.ResponseWriter, r *http.Request) *appError
func (fn appHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type")
if apperr := fn(w, r); apperr != nil {
http.Error(w, apperr.Message, apperr.Code)
log.Println(apperr.Error.Error())
}
}
func makeHandler(f func(*http.Request) (interface{}, error), templateKey string) appHandler {
return func(w http.ResponseWriter, r *http.Request) *appError {
data, err := f(r)
if err != nil || data == nil {
return &appError{"Could not load data", 404, err}
}
if r.URL.Query().Get("format") == "json" {
return renderJSON(w, &data)
}
return renderTemplate(w, templateKey, data)
}
}
func getListenAddr() string {
if port := os.Getenv("PORT"); port != "" {
return ":" + port
}
if addr := os.Getenv("LISTEN_ADDR"); addr != "" {
return addr
}
return "127.0.0.1:8000"
}
func userFetcher(r *http.Request) (interface{}, error) {
return GetUser(r)
}
func postFetcher(r *http.Request) (interface{}, error) {
return GetPost(r)
}
func tagFetcher(r *http.Request) (interface{}, error) {
return GetTag(r)
}
func main() {
setupTemplates()
http.Handle("/user/", makeHandler(userFetcher, "user"))
http.Handle("/post/", makeHandler(postFetcher, "post"))
http.Handle("/tag/", makeHandler(tagFetcher, "tag"))
http.Handle("/static/", http.StripPrefix("/static", http.FileServer(http.Dir("./static"))))
http.Handle("/feed/", makeFeedHandler())
http.Handle("/", makeIndex())
addr := getListenAddr()
fmt.Println("Listening on ", addr)
log.Fatal(http.ListenAndServe(addr, nil))
}
| makeIndex | identifier_name |
iggo.go | package main
import (
"encoding/json"
"fmt"
"github.com/dustin/go-humanize"
"github.com/gorilla/feeds"
"github.com/jacoduplessis/simplejson"
"html/template"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"regexp"
"strings"
"time"
)
var templateFuncs = template.FuncMap{
"sizemax": sizemax,
"linkify": linkify,
}
var templateMap = map[string]*template.Template{}
var client = &http.Client{
Timeout: time.Second * 10,
}
type Size struct {
URL string
Width int
Height int
}
type User struct {
ID string
Name string
Username string
Bio string
Followers int
Following int
ProfilePic string
Posts []*Post
}
type Post struct {
ID string
Timestamp int64
Time string
URL string
Width int
Height int
Shortcode string
Likes int
Sizes []Size
Thumbnail string
Text string
Owner *PostOwner
Likers []*PostLiker
}
type PostLiker struct {
ProfilePic string
Username string
}
type PostOwner struct {
ID string
ProfilePic string
Username string
Name string
}
type SearchResult struct {
Query string
Users []struct {
User struct {
Username string `json:"username"`
Name string `json:"full_name"`
ProfilePic string `json:"profile_pic_url"`
Followers int `json:"follower_count"`
Byline string `json:"byline"`
}
}
Tags []struct {
Tag struct {
Name string `json:"name"`
MediaCount int `json:"media_count"`
} `json:"hashtag"`
} `json:"hashtags"`
}
type Tag struct {
Name string
Posts []*Post
}
func GetPost(r *http.Request) (*Post, error) {
shortcode := strings.TrimRight(r.URL.Path[len("/post/"):], "/")
if shortcode == "" {
return nil, nil
}
resp, err := client.Get(fmt.Sprintf("https://www.instagram.com/p/%s/", shortcode))
if err != nil {
return nil, err
}
defer resp.Body.Close()
return GetPostFromMarkup(resp.Body)
}
func GetPostFromMarkup(body io.Reader) (*Post, error) {
sd := sharedData(body)
container, err := simplejson.NewJson(sd)
if err != nil {
return nil, err
}
base := container.GetPath("entry_data", "PostPage").GetIndex(0).GetPath("graphql", "shortcode_media")
timestamp := base.Get("taken_at_timestamp").GetInt64()
likers := []*PostLiker{}
for _, edge := range base.GetPath("edge_media_preview_like", "edges").GetArray() {
n := edge.Get("node")
likers = append(likers, &PostLiker{
ProfilePic: n.Get("profile_pic_url").GetString(),
Username: n.Get("username").GetString(),
})
}
data := &Post{
Shortcode: base.Get("shortcode").GetString(),
ID: base.Get("id").GetString(),
URL: base.Get("display_url").GetString(),
Text: getText(base),
Timestamp: timestamp,
Time: humanize.Time(time.Unix(timestamp, 0)),
Likes: base.Get("edge_media_preview_like").Get("count").GetInt(),
Likers: likers,
Owner: &PostOwner{
ID: base.GetPath("owner", "id").GetString(),
ProfilePic: base.GetPath("owner", "profile_pic_url").GetString(),
Username: base.GetPath("owner", "username").GetString(),
Name: base.GetPath("owner", "full_name").GetString(),
},
}
return data, nil
}
func getText(j *simplejson.Json) string {
return j.GetPath("edge_media_to_caption", "edges").GetIndex(0).GetPath("node", "text").GetString()
}
func getPosts(j *simplejson.Json) []*Post {
var posts []*Post
for _, edge := range j.Get("edges").GetArray() {
n := edge.Get("node")
var sizes []Size
for _, s := range n.Get("thumbnail_resources").GetArray() {
sizes = append(sizes, Size{
URL: s.Get("src").GetString(),
Width: s.Get("config_width").GetInt(),
Height: s.Get("config_width").GetInt(),
})
}
timestamp := n.Get("taken_at_timestamp").GetInt64()
posts = append(posts, &Post{
ID: n.Get("id").GetString(),
Shortcode: n.Get("shortcode").GetString(),
URL: n.Get("display_url").GetString(),
Timestamp: timestamp,
Time: humanize.Time(time.Unix(timestamp, 0)),
Likes: n.GetPath("edge_liked_by", "count").GetInt(),
Sizes: sizes,
Text: getText(n),
Height: n.GetPath("dimensions", "height").GetInt(),
Width: n.GetPath("dimensions", "width").GetInt(),
Thumbnail: n.Get("thumbnail_src").GetString(),
})
}
return posts
}
func GetUserFromMarkup(body io.Reader) (*User, error) {
sd := sharedData(body)
container, err := simplejson.NewJson(sd)
if err != nil {
return nil, err
}
base := container.GetPath("entry_data", "ProfilePage").GetIndex(0).GetPath("graphql", "user")
data := &User{
ID: base.Get("id").GetString(),
Name: base.Get("full_name").GetString(),
Username: base.Get("username").GetString(),
Bio: base.Get("biography").GetString(),
Followers: base.GetPath("edge_followed_by", "count").GetInt(),
Following: base.GetPath("edge_follow", "count").GetInt(),
ProfilePic: base.Get("profile_pic_url_hd").GetString(),
Posts: getPosts(base.Get("edge_owner_to_timeline_media")),
}
return data, nil
}
func GetTagFromMarkup(body io.Reader) (*Tag, error) {
sd := sharedData(body)
container, err := simplejson.NewJson(sd)
if err != nil {
return nil, err
}
base := container.GetPath("entry_data", "TagPage").GetIndex(0).GetPath("graphql", "hashtag")
data := &Tag{
Name: base.Get("name").GetString(),
Posts: getPosts(base.Get("edge_hashtag_to_media")),
}
return data, nil
}
// GetUserFromUsername takes a username, makes a request
// and parses the response into a User struct, returning a pointer
func GetUserFromUsername(username string) (*User, error) {
if username == "" {
return nil, nil
}
resp, err := client.Get(fmt.Sprintf("https://www.instagram.com/%s/", username))
if err != nil {
return nil, err
}
defer resp.Body.Close()
return GetUserFromMarkup(resp.Body)
}
func GetUser(r *http.Request) (*User, error) {
username := strings.TrimRight(r.URL.Path[len("/user/"):], "/")
return GetUserFromUsername(username)
}
func GetTag(r *http.Request) (*Tag, error) {
slug := strings.TrimRight(r.URL.Path[len("/tag/"):], "/")
if slug == "" {
return nil, nil
}
resp, err := client.Get(fmt.Sprintf("https://www.instagram.com/explore/tags/%s/", slug))
if err != nil {
return nil, err
}
defer resp.Body.Close()
if err != nil {
return nil, err
}
return GetTagFromMarkup(resp.Body)
}
func sizemax(p *Post, w int) Size {
ix := 0
for i, s := range p.Sizes {
if s.Width <= w {
ix = i
} else {
break
}
}
return p.Sizes[ix]
}
func linkify(s string) template.HTML {
t := regexp.MustCompile(`(?i)#([\p{L}\w]+)`)
s = t.ReplaceAllString(s, `<a href="/tag/$1">#$1</a>`)
u := regexp.MustCompile(`(?i)@([\p{L}\w.]+)`)
s = u.ReplaceAllString(s, `<a href="/user/$1">@$1</a>`)
return template.HTML(s)
}
func setupTemplates() {
base := template.Must(template.ParseFiles("templates/base.html")).Funcs(templateFuncs)
if _, err := base.ParseFiles("templates/custom.html"); err != nil {
base.New("custom.html").Parse("")
}
keys := []string{"index", "post", "search", "tag", "user"}
for _, key := range keys {
clone := template.Must(base.Clone())
tmpl := template.Must(clone.ParseFiles("templates/" + key + ".html"))
templateMap[key] = tmpl
}
}
func renderTemplate(w http.ResponseWriter, key string, data interface{}) *appError {
tmpl, ok := templateMap[key]
if !ok {
return &appError{"Template error", 500, fmt.Errorf(`template "%s" not found`, key)}
}
err := tmpl.ExecuteTemplate(w, "base.html", data)
if err != nil {
return &appError{"Template error", 500, err}
}
return nil
}
func sharedData(r io.Reader) []byte {
re := regexp.MustCompile(`window._sharedData\s?=\s?(.*);</script>`)
b, err := ioutil.ReadAll(r)
if err != nil {
return nil
}
matches := re.FindSubmatch(b)
if len(matches) < 2 {
return nil
}
return matches[1]
}
func getSearchResult(q string) (*SearchResult, error) {
sr := &SearchResult{}
qs := &url.Values{}
qs.Add("context", "blended")
qs.Add("query", q)
r, err := client.Get("https://www.instagram.com/web/search/topsearch/?" + qs.Encode())
if err != nil {
return sr, err
}
defer r.Body.Close()
err = json.NewDecoder(r.Body).Decode(sr)
return sr, err
}
func renderJSON(w http.ResponseWriter, data interface{}) *appError {
w.Header().Set("Content-Type", "application/json")
err := json.NewEncoder(w).Encode(data)
if err != nil {
return &appError{"Could not write response", 500, err}
}
return nil
}
func makeFeedHandler() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
username := strings.TrimRight(r.URL.Path[len("/feed/"):], "/")
user, err := GetUserFromUsername(username)
if err != nil {
log.Printf("Error fetching user (%s) data for feed: %s", username, err)
w.Write([]byte("Error"))
return
}
now := time.Now()
feed := &feeds.Feed{
Title: fmt.Sprintf("Instagram Posts by %s", username),
Link: &feeds.Link{Href: fmt.Sprintf("https://www.instagram.com/%s", username)},
Description: fmt.Sprintf("Recent photos posted by %s on Instagram", username),
Created: now,
}
for _, post := range user.Posts {
item := feeds.Item{
Id: post.Shortcode,
Title: post.Text,
Link: &feeds.Link{Href: fmt.Sprintf("https://www.instagram.com/p/%s", post.Shortcode)},
Author: &feeds.Author{Name: username},
Created: time.Unix(post.Timestamp, 0),
Content: sizemax(post, 800).URL,
}
feed.Add(&item)
}
err = feed.WriteRss(w)
if err != nil {
log.Printf("Error writing feed: %s", err)
}
})
}
func makeIndex() appHandler {
return func(w http.ResponseWriter, r *http.Request) *appError {
q := r.FormValue("q")
if q != "" {
sr, _ := getSearchResult(q)
sr.Query = q
if r.URL.Query().Get("format") == "json" {
return renderJSON(w, &sr)
}
return renderTemplate(w, "search", sr)
}
return renderTemplate(w, "index", nil)
}
}
type appError struct {
Message string
Code int
Error error
}
type appHandler func(w http.ResponseWriter, r *http.Request) *appError
func (fn appHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type")
if apperr := fn(w, r); apperr != nil {
http.Error(w, apperr.Message, apperr.Code)
log.Println(apperr.Error.Error())
}
}
func makeHandler(f func(*http.Request) (interface{}, error), templateKey string) appHandler {
return func(w http.ResponseWriter, r *http.Request) *appError {
data, err := f(r)
if err != nil || data == nil {
return &appError{"Could not load data", 404, err}
}
if r.URL.Query().Get("format") == "json" {
return renderJSON(w, &data)
} |
func getListenAddr() string {
if port := os.Getenv("PORT"); port != "" {
return ":" + port
}
if addr := os.Getenv("LISTEN_ADDR"); addr != "" {
return addr
}
return "127.0.0.1:8000"
}
func userFetcher(r *http.Request) (interface{}, error) {
return GetUser(r)
}
func postFetcher(r *http.Request) (interface{}, error) {
return GetPost(r)
}
func tagFetcher(r *http.Request) (interface{}, error) {
return GetTag(r)
}
func main() {
setupTemplates()
http.Handle("/user/", makeHandler(userFetcher, "user"))
http.Handle("/post/", makeHandler(postFetcher, "post"))
http.Handle("/tag/", makeHandler(tagFetcher, "tag"))
http.Handle("/static/", http.StripPrefix("/static", http.FileServer(http.Dir("./static"))))
http.Handle("/feed/", makeFeedHandler())
http.Handle("/", makeIndex())
addr := getListenAddr()
fmt.Println("Listening on ", addr)
log.Fatal(http.ListenAndServe(addr, nil))
} |
return renderTemplate(w, templateKey, data)
}
} | random_line_split |
fib.rs | //! Fibonacci Heap (decent impl)
//!
use std::{
borrow::Borrow,
cmp::Ordering::*,
collections::{hash_map::Entry::*, HashMap},
fmt::{Debug, Display},
hash::Hash, mem::replace,
};
use common::hashmap;
use coll::*;
////////////////////////////////////////////////////////////////////////////////
//// Macro
def_attr_macro!(clone|
left, right, child, paren, rank, marked, idx
);
def_attr_macro!(ref|
(val, T)
);
////////////////////////////////////////
//// Node wrapper
macro_rules! node {
($i:expr, $k:expr) => {
node!($i, $k, 0, false)
};
($i:expr, $k:expr, $rank:expr, $marked:expr) => {{
aux_node!({
idx: $i,
val: $k,
rank: $rank,
left: WeakNode::none(),
right: Node::none(),
paren: WeakNode::none(),
child: Node::none(),
marked: $marked
})
}};
}
////////////////////////////////////////////////////////////////////////////////
//// Structure
/// [Fibonacci Heap](https://en.wikipedia.org/wiki/Fibonacci_heap)
/// : Indexed Min Heap based on linked list.
///
/// size(x) >= F(d+2)
///
/// I should be cheap to clone
pub struct FibHeap<I, T> {
len: usize,
/// roots count
rcnt: usize,
min: Node<I, T>,
/// index of nodes
nodes: HashMap<I, Node<I, T>>,
}
#[derive(Clone)]
struct Node_<I, T> {
idx: I,
val: T,
rank: usize, // children number
/// rev ref
left: WeakNode<I, T>,
right: Node<I, T>,
/// rev ref
paren: WeakNode<I, T>,
child: Node<I, T>,
/// Indicate that it has lost a child
marked: bool,
}
////////////////////////////////////////////////////////////////////////////////
//// Implementation
impl<I: Debug, T: Debug> Debug for Node_<I, T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{:?}[{:?}]{}",
self.idx,
self.val,
if self.marked { " X" } else { "" }
)
}
}
impl_node!();
impl<I, T> Node<I, T> {
fn children(&self) -> Vec<Self> {
let mut child = child!(self);
let mut res = vec![];
while child.is_some() {
res.push(child.clone());
child = right!(child);
}
res
}
/// remove paren, left and right
fn purge_as_root(&self) {
paren!(self, WeakNode::none());
left!(self, WeakNode::none());
right!(self, Node::none());
}
fn cut_child(&self, x: Node<I, T>) {
if !left!(x).is_none() {
right!(left!(x).upgrade(), right!(x));
} else {
debug_assert!(child!(self).rc_eq(&x));
child!(self, right!(x));
}
if !right!(x).is_none() {
left!(right!(x), left!(x));
}
rank!(self, rank!(self) - 1);
x.purge_as_root();
}
/// replace with new val, return old val
fn replace_key(&self, val: T) -> T
where
I: Debug,
T: Debug
{
replace(val_mut!(self), val)
}
fn replace(&mut self, x: Self) -> Self {
let old = Self(self.0.clone());
self.0 = x.0;
old
}
#[cfg(test)]
#[allow(unused)]
fn validate_ref(&self)
where
I: Clone,
{
assert!(self.is_some());
let _self_idx = idx!(self);
/* validate right sibling */
let rh = right!(self);
if rh.is_some() {
let _rh_idx = idx!(rh);
let rhlf = left!(rh).upgrade();
assert!(rhlf.rc_eq(self));
assert!(rhlf.is_some());
rh.validate_ref();
}
/* validate children */
let child = child!(self);
if child.is_some() {
let _child_idx = idx!(child);
let cpw = paren!(child);
assert!(!cpw.is_none());
let cp = cpw.upgrade();
assert!(cp.rc_eq(self));
assert!(cp.is_some());
child.validate_ref();
}
}
}
impl<I: Debug, T: Debug> Debug for Node<I, T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if self.is_none() {
write!(f, "None")
} else {
write!(f, "{:?}", self.0.as_ref().unwrap().as_ref().borrow())
}
}
}
impl<I: Debug, T: Debug> Display for Node<I, T> {
fn | (&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
writeln!(f, "R({:?}) ", self)?;
let mut curq = vec![(self.clone(), self.children())];
loop {
let mut nxtq = vec![];
for (p, children) in curq {
if children.is_empty() {
break;
}
write!(f, "P({:?}) ", p)?;
let childlen = children.len();
for (i, child) in children.into_iter().enumerate() {
write!(f, "{:?}", child)?;
if i < childlen - 1 {
write!(f, ", ")?;
}
nxtq.push((child.clone(), child.children()));
}
write!(f, "; ")?;
}
if !nxtq.is_empty() {
writeln!(f)?;
curq = nxtq;
} else {
break;
}
}
Ok(())
}
}
impl<I, T> FibHeap<I, T>
where
I: Eq + Hash + Clone + Debug,
T: Ord + Debug
{
////////////////////////////////////////////////////////////////////////////
//// Public method
pub fn new() -> Self {
Self {
len: 0,
rcnt: 0,
min: Node::none(),
nodes: HashMap::new(),
}
}
pub fn len(&self) -> usize {
self.len
}
/// Same index node would be overidden
pub fn push(&mut self, i: I, v: T)
{
let node = node!(i.clone(), v);
self.nodes.insert(i, node.clone());
self.push_into_roots(node.clone());
if val!(node) < val!(self.min) {
self.min = node;
}
self.len += 1;
}
/// Amortized cost O(rank(H))
///
/// trees(H') <= rank(H) + 1 # since no two trees have same rank.
///
/// delete-min
pub fn pop_item(&mut self) -> Option<(I, T)>
{
if self.min.is_none() {
return None;
}
self.len -= 1;
/* push children of oldmin into roots */
for child in self.min.children() {
self.push_into_roots(child.clone());
}
/* update min */
let newmin = self.roots()[1..]
.into_iter()
.min_by_key(|&sib| val!(sib))
.cloned()
.unwrap_or_default();
/* just del old min */
self.remove_from_roots(self.min.clone());
let oldmin = self.min.replace(newmin);
self.consolidate();
Some((
self.remove_from_index(&oldmin),
unwrap_into!(oldmin).val
))
}
/// merge same rank trees recusively
pub fn consolidate(&mut self) {
let mut rank: HashMap<usize, Node<I, T>> = hashmap!();
for mut sib in self.roots() {
while let Some(x) = rank.remove(&rank!(sib)) {
sib = self.merge_same_rank_root(x, sib);
}
rank.insert(rank!(sib), sib);
}
}
/// Return oldval, alias of ReplaceOrPush
///
/// Exec push if the val doesn't exist.
///
pub fn insert(&mut self, i: I, v: T) -> Option<T>
where
I: Eq + Hash + Clone,
T: Ord + Debug
{
match self.nodes.entry(i.clone()) {
Occupied(ent) => {
let x = ent.get().clone();
let oldv = x.replace_key(v);
match val!(x).cmp(&oldv) {
Less => self.decrease_key_(x),
Equal => (),
Greater => self.increase_key_(x),
}
Some(oldv)
}
Vacant(_ent) => {
self.push(i, v);
None
}
}
}
pub fn union(&mut self, _other: Self) {
unimplemented!("link roots, but not O(1) for link index reference")
}
pub fn delete<Q: AsRef<I>>(&mut self, _i: Q) -> Option<T> {
unimplemented!("1. decrease-val to -infi, 2. pop");
}
////////////////////////////////////////////////////////////////////////////
//// Extra functional method
/// Return oldval
///
pub fn decrease_key(&mut self, i: I, v: T) -> Option<T>
where
I: Eq + Hash + Clone,
T: Debug
{
let x;
match self.nodes.entry(i.clone()) {
Occupied(ent) => {
x = ent.get().clone();
let oldv = x.replace_key(v);
self.decrease_key_(x);
Some(oldv)
}
Vacant(_ent) => None,
}
}
pub fn top_item(&self) -> Option<(I, &T)>
where
I: Eq + Clone
{
if self.min.is_some() {
Some((idx!(self.min), val!(self.min)))
} else {
None
}
}
pub fn top(&self) -> Option<&T> {
self.top_item().map(|x| x.1)
}
pub fn pop(&mut self) -> Option<T> {
self.pop_item().map(|x| x.1)
}
pub fn get<Q>(&self, i: &Q) -> Option<&T>
where
I: Borrow<Q>,
Q: Ord + Hash + ?Sized,
{
self.nodes.get(i).map(|node| val!(node))
}
pub fn indexes(&self) -> impl Iterator<Item = &I> {
self.nodes.keys()
}
////////////////////////////////////////////////////////////////////////////
//// Assistant method
fn decrease_key_(&mut self, x: Node<I, T>) {
let ent;
let p = paren!(x);
if !p.is_none() && val!(x) < val!(p.upgrade()) {
// 假装x节点本身也是一个符合条件的父节点
marked!(x, true);
ent = x.downgrade();
} else {
ent = WeakNode::none();
}
self.cut_meld_unmark_to_roots(ent);
if val!(x) < val!(self.min) {
debug_assert!(paren!(x).is_none());
self.min = x;
}
}
/// WARNING: O(rank) = O(n)
fn increase_key_(&mut self, x: Node<I, T>) {
let ent;
let mut children_lost = if marked!(x) { 1 } else { 0 };
for child in x.children() {
if val!(child) < val!(x) {
x.cut_child(child.clone());
self.push_into_roots(child.clone());
marked!(child, false);
children_lost += 1;
}
}
match children_lost.cmp(&1) {
Less => ent = WeakNode::none(),
Equal => {
marked!(x, true);
ent = paren!(x);
}
Greater => {
marked!(x, true);
ent = x.downgrade();
}
}
self.cut_meld_unmark_to_roots(ent);
// WARNING: O(rank), update self.min
if x.rc_eq(&self.min) {
let min_node =
self.roots().into_iter().min_by_key(|x| val!(x)).unwrap();
self.min = min_node;
}
}
fn cut_meld_unmark_to_roots(&mut self, ent: WeakNode<I, T>) {
if ent.is_none() {
return;
}
let mut x = ent.upgrade();
let mut p = paren!(x);
while marked!(x) && !p.is_none() {
let strongp = p.upgrade();
strongp.cut_child(x.clone());
self.push_into_roots(x.clone());
marked!(x, false);
x = strongp;
p = paren!(x);
}
// 定义上不标记根,但这应该是无所谓的,标记对于可能的pop导致的树规整后的树情况更精确
marked!(x, true);
}
fn remove_from_index(&mut self, x: &Node<I, T>) -> I
where
I: Eq + Hash + Clone
{
let k = idx!(x);
self.nodes.remove(&k);
k
}
/// insert at sib of self.min, with purge
fn push_into_roots(&mut self, x: Node<I, T>) {
debug_assert!(!self.min.rc_eq(&x));
self.rcnt += 1;
x.purge_as_root();
if self.min.is_none() {
self.min = x;
left!(self.min, self.min.downgrade());
right!(self.min, self.min.clone());
} else {
debug_assert!(right!(self.min).is_some());
right!(x, right!(self.min));
left!(x, self.min.downgrade());
right!(self.min, x.clone());
left!(right!(x), x.downgrade());
}
}
/// from self.min go through all roots
fn roots(&self) -> Vec<Node<I, T>> {
let mut sibs = vec![];
if self.min.is_none() {
return sibs;
} else {
sibs.push(self.min.clone());
}
let mut sib = right!(self.min);
while !sib.rc_eq(&self.min) {
sibs.push(sib.clone());
sib = right!(sib);
}
sibs
}
fn remove_from_roots(&mut self, x: Node<I, T>) {
self.rcnt -= 1;
if self.rcnt > 0 {
right!(left!(x).upgrade(), right!(x));
left!(right!(x), left!(x));
}
x.purge_as_root();
}
/// update self.rcnt
fn merge_same_rank_root(
&mut self,
mut x: Node<I, T>,
mut y: Node<I, T>,
) -> Node<I, T> {
debug_assert_eq!(rank!(x), rank!(y));
// let x be parent
if val!(y) < val!(x) || val!(y) == val!(x) && y.rc_eq(&self.min) {
(x, y) = (y, x);
}
// remove y from roots
self.remove_from_roots(y.clone());
// link y to x child
right!(y, child!(x));
if child!(x).is_some() {
left!(child!(x), y.downgrade());
}
// link y to x
paren!(y, x.downgrade());
child!(x, y.clone());
rank!(x, rank!(x) + 1);
x
}
////////////////////////////////////////////////////////////////////////////
//// Validation method
/// Validate nodes are not None or Failed to upgrade to Rc
#[cfg(test)]
#[allow(unused)]
pub(crate) fn validate_ref(&self) {
if self.len() == 0 {
return;
}
/* validate roots */
for root in self.roots() {
assert!(root.is_some());
let rh = right!(root);
assert!(rh.is_some());
let wlf = left!(root);
assert!(!wlf.is_none());
let left = wlf.upgrade();
assert!(left.is_some());
let child = child!(root);
if child.is_some() {
child.validate_ref();
}
}
}
}
impl<I: Eq + Hash + Clone, T: Clone> FibHeap<I, T> {
fn overall_clone(
&self,
nodes: &mut HashMap<I, Node<I, T>>,
x: Node<I, T>,
) -> Node<I, T> {
if x.is_none() {
return Node::none();
}
// overall clone node body
let newx = node!(idx!(x), val!(x).clone(), rank!(x), marked!(x));
// update index reference
nodes.insert(idx!(x), newx.clone());
// recursive call it
let mut childen_iter = x.children().into_iter();
if let Some(child) = childen_iter.next() {
let newchild = self.overall_clone(nodes, child);
child!(newx, newchild.clone());
paren!(newchild, newx.downgrade());
let mut cur = newchild;
for child in childen_iter {
let newchild = self.overall_clone(nodes, child);
right!(cur, newchild.clone());
left!(newchild, cur.downgrade());
cur = newchild;
}
}
newx
}
}
impl<I, T> Drop for FibHeap<I, T> {
fn drop(&mut self) {
if self.len > 0 {
// break circle dependency to enable drop
let tail = left!(self.min).upgrade();
right!(tail, Node::none());
self.nodes.clear();
}
}
}
impl<T: Debug, K: Debug> Display for FibHeap<T, K> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut sib = self.min.clone();
for i in 1..=self.rcnt {
writeln!(f, "{} ({i:03}) {}", "-".repeat(28), "-".repeat(28))?;
// writeln!(f)?;
if sib.rc_eq(&self.min) {
write!(f, "M=>")?;
}
writeln!(f, "{}", sib)?;
debug_assert!(sib.is_some());
sib = right!(sib);
}
writeln!(f, "{}>> end <<{}", "-".repeat(28), "-".repeat(28))?;
Ok(())
}
}
impl<I: Ord + Hash + Clone + Debug, T: Ord + Clone + Debug> Clone for FibHeap<I, T> {
fn clone(&self) -> Self {
let len = self.len;
let rcnt = self.rcnt;
let mut nodes = HashMap::new();
let min;
let mut roots_iter = self.roots().into_iter();
if let Some(_min) = roots_iter.next() {
min = self.overall_clone(&mut nodes, _min.clone());
let mut cur = min.clone();
for root in roots_iter {
let newroot = self.overall_clone(&mut nodes, root);
right!(cur, newroot.clone());
left!(newroot, cur.downgrade());
cur = newroot;
}
right!(cur, min.clone());
left!(min, cur.downgrade());
} else {
min = Node::none();
}
Self {
len,
rcnt,
min,
nodes,
}
}
}
#[cfg(test)]
mod tests {
use super::{ FibHeap, super::* };
use common::random;
#[ignore = "for debug"]
#[test]
fn debug_fib_heap() {}
#[test]
fn test_fibheap_fixeddata() {
let mut heap = FibHeap::<usize, usize>::new();
let mut auto = common::gen();
heap.insert(auto(), 2);
heap.insert(auto(), 4);
heap.insert(auto(), 1);
assert_eq!(heap.pop().unwrap(), 1);
assert_eq!(heap.pop().unwrap(), 2);
assert_eq!(heap.pop().unwrap(), 4);
assert_eq!(heap.pop(), None);
}
#[test]
fn test_fibheap_randomdata() {
test_heap!(FibHeap::new(), MIN);
test_heap_update!(FibHeap::new(), MIN);
}
#[test]
fn test_fibheap_randomdata_extra() {
let get_one = || random::<usize>() % 1000;
let validate = |heap: &FibHeap<i32, usize>, non_dec: bool| {
let mut heap = (*heap).clone();
let mut storage = vec![];
while let Some(e) = heap.pop() {
storage.push(e);
}
if !non_dec {
storage.reverse();
}
let mut iter = storage.into_iter().enumerate();
let mut prev = iter.next().unwrap().1;
for (_i, e) in iter {
assert!(prev <= e, "prev: {prev:?}, e: {e:?}");
prev = e;
}
};
let non_dec = true;
for _ in 0..1 {
let mut heap = FibHeap::<i32, usize>::new();
// pad 50% of batch
for i in 0..300 {
let e = get_one();
heap.push(i, e); // push
}
for _ in 0..100 {
let newkey = get_one();
let i = random::<usize>() % heap.len;
heap.insert(i as i32, newkey.clone());
validate(&heap, non_dec);
}
}
}
}
| fmt | identifier_name |
fib.rs | //! Fibonacci Heap (decent impl)
//!
use std::{
borrow::Borrow,
cmp::Ordering::*,
collections::{hash_map::Entry::*, HashMap},
fmt::{Debug, Display},
hash::Hash, mem::replace,
};
use common::hashmap;
use coll::*;
////////////////////////////////////////////////////////////////////////////////
//// Macro
def_attr_macro!(clone|
left, right, child, paren, rank, marked, idx
);
def_attr_macro!(ref|
(val, T)
);
////////////////////////////////////////
//// Node wrapper
macro_rules! node {
($i:expr, $k:expr) => {
node!($i, $k, 0, false)
};
($i:expr, $k:expr, $rank:expr, $marked:expr) => {{
aux_node!({
idx: $i,
val: $k,
rank: $rank,
left: WeakNode::none(),
right: Node::none(),
paren: WeakNode::none(),
child: Node::none(),
marked: $marked
})
}};
}
////////////////////////////////////////////////////////////////////////////////
//// Structure
/// [Fibonacci Heap](https://en.wikipedia.org/wiki/Fibonacci_heap)
/// : Indexed Min Heap based on linked list.
///
/// size(x) >= F(d+2)
///
/// I should be cheap to clone
pub struct FibHeap<I, T> {
len: usize,
/// roots count
rcnt: usize,
min: Node<I, T>,
/// index of nodes
nodes: HashMap<I, Node<I, T>>,
}
#[derive(Clone)]
struct Node_<I, T> {
idx: I,
val: T,
rank: usize, // children number
/// rev ref
left: WeakNode<I, T>,
right: Node<I, T>,
/// rev ref
paren: WeakNode<I, T>,
child: Node<I, T>,
/// Indicate that it has lost a child
marked: bool,
}
////////////////////////////////////////////////////////////////////////////////
//// Implementation
impl<I: Debug, T: Debug> Debug for Node_<I, T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{:?}[{:?}]{}",
self.idx,
self.val,
if self.marked { " X" } else { "" }
)
}
}
impl_node!();
impl<I, T> Node<I, T> {
fn children(&self) -> Vec<Self> {
let mut child = child!(self);
let mut res = vec![];
while child.is_some() {
res.push(child.clone());
child = right!(child);
}
res
}
/// remove paren, left and right
fn purge_as_root(&self) {
paren!(self, WeakNode::none());
left!(self, WeakNode::none());
right!(self, Node::none());
}
fn cut_child(&self, x: Node<I, T>) |
/// replace with new val, return old val
fn replace_key(&self, val: T) -> T
where
I: Debug,
T: Debug
{
replace(val_mut!(self), val)
}
fn replace(&mut self, x: Self) -> Self {
let old = Self(self.0.clone());
self.0 = x.0;
old
}
#[cfg(test)]
#[allow(unused)]
fn validate_ref(&self)
where
I: Clone,
{
assert!(self.is_some());
let _self_idx = idx!(self);
/* validate right sibling */
let rh = right!(self);
if rh.is_some() {
let _rh_idx = idx!(rh);
let rhlf = left!(rh).upgrade();
assert!(rhlf.rc_eq(self));
assert!(rhlf.is_some());
rh.validate_ref();
}
/* validate children */
let child = child!(self);
if child.is_some() {
let _child_idx = idx!(child);
let cpw = paren!(child);
assert!(!cpw.is_none());
let cp = cpw.upgrade();
assert!(cp.rc_eq(self));
assert!(cp.is_some());
child.validate_ref();
}
}
}
impl<I: Debug, T: Debug> Debug for Node<I, T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if self.is_none() {
write!(f, "None")
} else {
write!(f, "{:?}", self.0.as_ref().unwrap().as_ref().borrow())
}
}
}
impl<I: Debug, T: Debug> Display for Node<I, T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
writeln!(f, "R({:?}) ", self)?;
let mut curq = vec![(self.clone(), self.children())];
loop {
let mut nxtq = vec![];
for (p, children) in curq {
if children.is_empty() {
break;
}
write!(f, "P({:?}) ", p)?;
let childlen = children.len();
for (i, child) in children.into_iter().enumerate() {
write!(f, "{:?}", child)?;
if i < childlen - 1 {
write!(f, ", ")?;
}
nxtq.push((child.clone(), child.children()));
}
write!(f, "; ")?;
}
if !nxtq.is_empty() {
writeln!(f)?;
curq = nxtq;
} else {
break;
}
}
Ok(())
}
}
impl<I, T> FibHeap<I, T>
where
I: Eq + Hash + Clone + Debug,
T: Ord + Debug
{
////////////////////////////////////////////////////////////////////////////
//// Public method
pub fn new() -> Self {
Self {
len: 0,
rcnt: 0,
min: Node::none(),
nodes: HashMap::new(),
}
}
pub fn len(&self) -> usize {
self.len
}
/// Same index node would be overidden
pub fn push(&mut self, i: I, v: T)
{
let node = node!(i.clone(), v);
self.nodes.insert(i, node.clone());
self.push_into_roots(node.clone());
if val!(node) < val!(self.min) {
self.min = node;
}
self.len += 1;
}
/// Amortized cost O(rank(H))
///
/// trees(H') <= rank(H) + 1 # since no two trees have same rank.
///
/// delete-min
pub fn pop_item(&mut self) -> Option<(I, T)>
{
if self.min.is_none() {
return None;
}
self.len -= 1;
/* push children of oldmin into roots */
for child in self.min.children() {
self.push_into_roots(child.clone());
}
/* update min */
let newmin = self.roots()[1..]
.into_iter()
.min_by_key(|&sib| val!(sib))
.cloned()
.unwrap_or_default();
/* just del old min */
self.remove_from_roots(self.min.clone());
let oldmin = self.min.replace(newmin);
self.consolidate();
Some((
self.remove_from_index(&oldmin),
unwrap_into!(oldmin).val
))
}
/// merge same rank trees recusively
pub fn consolidate(&mut self) {
let mut rank: HashMap<usize, Node<I, T>> = hashmap!();
for mut sib in self.roots() {
while let Some(x) = rank.remove(&rank!(sib)) {
sib = self.merge_same_rank_root(x, sib);
}
rank.insert(rank!(sib), sib);
}
}
/// Return oldval, alias of ReplaceOrPush
///
/// Exec push if the val doesn't exist.
///
pub fn insert(&mut self, i: I, v: T) -> Option<T>
where
I: Eq + Hash + Clone,
T: Ord + Debug
{
match self.nodes.entry(i.clone()) {
Occupied(ent) => {
let x = ent.get().clone();
let oldv = x.replace_key(v);
match val!(x).cmp(&oldv) {
Less => self.decrease_key_(x),
Equal => (),
Greater => self.increase_key_(x),
}
Some(oldv)
}
Vacant(_ent) => {
self.push(i, v);
None
}
}
}
pub fn union(&mut self, _other: Self) {
unimplemented!("link roots, but not O(1) for link index reference")
}
pub fn delete<Q: AsRef<I>>(&mut self, _i: Q) -> Option<T> {
unimplemented!("1. decrease-val to -infi, 2. pop");
}
////////////////////////////////////////////////////////////////////////////
//// Extra functional method
/// Return oldval
///
pub fn decrease_key(&mut self, i: I, v: T) -> Option<T>
where
I: Eq + Hash + Clone,
T: Debug
{
let x;
match self.nodes.entry(i.clone()) {
Occupied(ent) => {
x = ent.get().clone();
let oldv = x.replace_key(v);
self.decrease_key_(x);
Some(oldv)
}
Vacant(_ent) => None,
}
}
pub fn top_item(&self) -> Option<(I, &T)>
where
I: Eq + Clone
{
if self.min.is_some() {
Some((idx!(self.min), val!(self.min)))
} else {
None
}
}
pub fn top(&self) -> Option<&T> {
self.top_item().map(|x| x.1)
}
pub fn pop(&mut self) -> Option<T> {
self.pop_item().map(|x| x.1)
}
pub fn get<Q>(&self, i: &Q) -> Option<&T>
where
I: Borrow<Q>,
Q: Ord + Hash + ?Sized,
{
self.nodes.get(i).map(|node| val!(node))
}
pub fn indexes(&self) -> impl Iterator<Item = &I> {
self.nodes.keys()
}
////////////////////////////////////////////////////////////////////////////
//// Assistant method
fn decrease_key_(&mut self, x: Node<I, T>) {
let ent;
let p = paren!(x);
if !p.is_none() && val!(x) < val!(p.upgrade()) {
// 假装x节点本身也是一个符合条件的父节点
marked!(x, true);
ent = x.downgrade();
} else {
ent = WeakNode::none();
}
self.cut_meld_unmark_to_roots(ent);
if val!(x) < val!(self.min) {
debug_assert!(paren!(x).is_none());
self.min = x;
}
}
/// WARNING: O(rank) = O(n)
fn increase_key_(&mut self, x: Node<I, T>) {
let ent;
let mut children_lost = if marked!(x) { 1 } else { 0 };
for child in x.children() {
if val!(child) < val!(x) {
x.cut_child(child.clone());
self.push_into_roots(child.clone());
marked!(child, false);
children_lost += 1;
}
}
match children_lost.cmp(&1) {
Less => ent = WeakNode::none(),
Equal => {
marked!(x, true);
ent = paren!(x);
}
Greater => {
marked!(x, true);
ent = x.downgrade();
}
}
self.cut_meld_unmark_to_roots(ent);
// WARNING: O(rank), update self.min
if x.rc_eq(&self.min) {
let min_node =
self.roots().into_iter().min_by_key(|x| val!(x)).unwrap();
self.min = min_node;
}
}
fn cut_meld_unmark_to_roots(&mut self, ent: WeakNode<I, T>) {
if ent.is_none() {
return;
}
let mut x = ent.upgrade();
let mut p = paren!(x);
while marked!(x) && !p.is_none() {
let strongp = p.upgrade();
strongp.cut_child(x.clone());
self.push_into_roots(x.clone());
marked!(x, false);
x = strongp;
p = paren!(x);
}
// 定义上不标记根,但这应该是无所谓的,标记对于可能的pop导致的树规整后的树情况更精确
marked!(x, true);
}
fn remove_from_index(&mut self, x: &Node<I, T>) -> I
where
I: Eq + Hash + Clone
{
let k = idx!(x);
self.nodes.remove(&k);
k
}
/// insert at sib of self.min, with purge
fn push_into_roots(&mut self, x: Node<I, T>) {
debug_assert!(!self.min.rc_eq(&x));
self.rcnt += 1;
x.purge_as_root();
if self.min.is_none() {
self.min = x;
left!(self.min, self.min.downgrade());
right!(self.min, self.min.clone());
} else {
debug_assert!(right!(self.min).is_some());
right!(x, right!(self.min));
left!(x, self.min.downgrade());
right!(self.min, x.clone());
left!(right!(x), x.downgrade());
}
}
/// from self.min go through all roots
fn roots(&self) -> Vec<Node<I, T>> {
let mut sibs = vec![];
if self.min.is_none() {
return sibs;
} else {
sibs.push(self.min.clone());
}
let mut sib = right!(self.min);
while !sib.rc_eq(&self.min) {
sibs.push(sib.clone());
sib = right!(sib);
}
sibs
}
fn remove_from_roots(&mut self, x: Node<I, T>) {
self.rcnt -= 1;
if self.rcnt > 0 {
right!(left!(x).upgrade(), right!(x));
left!(right!(x), left!(x));
}
x.purge_as_root();
}
/// update self.rcnt
fn merge_same_rank_root(
&mut self,
mut x: Node<I, T>,
mut y: Node<I, T>,
) -> Node<I, T> {
debug_assert_eq!(rank!(x), rank!(y));
// let x be parent
if val!(y) < val!(x) || val!(y) == val!(x) && y.rc_eq(&self.min) {
(x, y) = (y, x);
}
// remove y from roots
self.remove_from_roots(y.clone());
// link y to x child
right!(y, child!(x));
if child!(x).is_some() {
left!(child!(x), y.downgrade());
}
// link y to x
paren!(y, x.downgrade());
child!(x, y.clone());
rank!(x, rank!(x) + 1);
x
}
////////////////////////////////////////////////////////////////////////////
//// Validation method
/// Validate nodes are not None or Failed to upgrade to Rc
#[cfg(test)]
#[allow(unused)]
pub(crate) fn validate_ref(&self) {
if self.len() == 0 {
return;
}
/* validate roots */
for root in self.roots() {
assert!(root.is_some());
let rh = right!(root);
assert!(rh.is_some());
let wlf = left!(root);
assert!(!wlf.is_none());
let left = wlf.upgrade();
assert!(left.is_some());
let child = child!(root);
if child.is_some() {
child.validate_ref();
}
}
}
}
impl<I: Eq + Hash + Clone, T: Clone> FibHeap<I, T> {
fn overall_clone(
&self,
nodes: &mut HashMap<I, Node<I, T>>,
x: Node<I, T>,
) -> Node<I, T> {
if x.is_none() {
return Node::none();
}
// overall clone node body
let newx = node!(idx!(x), val!(x).clone(), rank!(x), marked!(x));
// update index reference
nodes.insert(idx!(x), newx.clone());
// recursive call it
let mut childen_iter = x.children().into_iter();
if let Some(child) = childen_iter.next() {
let newchild = self.overall_clone(nodes, child);
child!(newx, newchild.clone());
paren!(newchild, newx.downgrade());
let mut cur = newchild;
for child in childen_iter {
let newchild = self.overall_clone(nodes, child);
right!(cur, newchild.clone());
left!(newchild, cur.downgrade());
cur = newchild;
}
}
newx
}
}
impl<I, T> Drop for FibHeap<I, T> {
fn drop(&mut self) {
if self.len > 0 {
// break circle dependency to enable drop
let tail = left!(self.min).upgrade();
right!(tail, Node::none());
self.nodes.clear();
}
}
}
impl<T: Debug, K: Debug> Display for FibHeap<T, K> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut sib = self.min.clone();
for i in 1..=self.rcnt {
writeln!(f, "{} ({i:03}) {}", "-".repeat(28), "-".repeat(28))?;
// writeln!(f)?;
if sib.rc_eq(&self.min) {
write!(f, "M=>")?;
}
writeln!(f, "{}", sib)?;
debug_assert!(sib.is_some());
sib = right!(sib);
}
writeln!(f, "{}>> end <<{}", "-".repeat(28), "-".repeat(28))?;
Ok(())
}
}
impl<I: Ord + Hash + Clone + Debug, T: Ord + Clone + Debug> Clone for FibHeap<I, T> {
fn clone(&self) -> Self {
let len = self.len;
let rcnt = self.rcnt;
let mut nodes = HashMap::new();
let min;
let mut roots_iter = self.roots().into_iter();
if let Some(_min) = roots_iter.next() {
min = self.overall_clone(&mut nodes, _min.clone());
let mut cur = min.clone();
for root in roots_iter {
let newroot = self.overall_clone(&mut nodes, root);
right!(cur, newroot.clone());
left!(newroot, cur.downgrade());
cur = newroot;
}
right!(cur, min.clone());
left!(min, cur.downgrade());
} else {
min = Node::none();
}
Self {
len,
rcnt,
min,
nodes,
}
}
}
#[cfg(test)]
mod tests {
use super::{ FibHeap, super::* };
use common::random;
#[ignore = "for debug"]
#[test]
fn debug_fib_heap() {}
#[test]
fn test_fibheap_fixeddata() {
let mut heap = FibHeap::<usize, usize>::new();
let mut auto = common::gen();
heap.insert(auto(), 2);
heap.insert(auto(), 4);
heap.insert(auto(), 1);
assert_eq!(heap.pop().unwrap(), 1);
assert_eq!(heap.pop().unwrap(), 2);
assert_eq!(heap.pop().unwrap(), 4);
assert_eq!(heap.pop(), None);
}
#[test]
fn test_fibheap_randomdata() {
test_heap!(FibHeap::new(), MIN);
test_heap_update!(FibHeap::new(), MIN);
}
#[test]
fn test_fibheap_randomdata_extra() {
let get_one = || random::<usize>() % 1000;
let validate = |heap: &FibHeap<i32, usize>, non_dec: bool| {
let mut heap = (*heap).clone();
let mut storage = vec![];
while let Some(e) = heap.pop() {
storage.push(e);
}
if !non_dec {
storage.reverse();
}
let mut iter = storage.into_iter().enumerate();
let mut prev = iter.next().unwrap().1;
for (_i, e) in iter {
assert!(prev <= e, "prev: {prev:?}, e: {e:?}");
prev = e;
}
};
let non_dec = true;
for _ in 0..1 {
let mut heap = FibHeap::<i32, usize>::new();
// pad 50% of batch
for i in 0..300 {
let e = get_one();
heap.push(i, e); // push
}
for _ in 0..100 {
let newkey = get_one();
let i = random::<usize>() % heap.len;
heap.insert(i as i32, newkey.clone());
validate(&heap, non_dec);
}
}
}
}
| {
if !left!(x).is_none() {
right!(left!(x).upgrade(), right!(x));
} else {
debug_assert!(child!(self).rc_eq(&x));
child!(self, right!(x));
}
if !right!(x).is_none() {
left!(right!(x), left!(x));
}
rank!(self, rank!(self) - 1);
x.purge_as_root();
} | identifier_body |
fib.rs | //! Fibonacci Heap (decent impl)
//!
use std::{
borrow::Borrow,
cmp::Ordering::*,
collections::{hash_map::Entry::*, HashMap},
fmt::{Debug, Display},
hash::Hash, mem::replace,
};
use common::hashmap;
use coll::*;
////////////////////////////////////////////////////////////////////////////////
//// Macro
def_attr_macro!(clone|
left, right, child, paren, rank, marked, idx
);
def_attr_macro!(ref|
(val, T)
);
////////////////////////////////////////
//// Node wrapper
macro_rules! node {
($i:expr, $k:expr) => {
node!($i, $k, 0, false)
};
($i:expr, $k:expr, $rank:expr, $marked:expr) => {{
aux_node!({
idx: $i,
val: $k,
rank: $rank,
left: WeakNode::none(),
right: Node::none(),
paren: WeakNode::none(),
child: Node::none(),
marked: $marked
})
}};
}
////////////////////////////////////////////////////////////////////////////////
//// Structure
/// [Fibonacci Heap](https://en.wikipedia.org/wiki/Fibonacci_heap)
/// : Indexed Min Heap based on linked list.
///
/// size(x) >= F(d+2)
///
/// I should be cheap to clone
pub struct FibHeap<I, T> {
len: usize,
/// roots count
rcnt: usize,
min: Node<I, T>,
/// index of nodes
nodes: HashMap<I, Node<I, T>>,
}
#[derive(Clone)]
struct Node_<I, T> {
idx: I,
val: T,
rank: usize, // children number
/// rev ref
left: WeakNode<I, T>,
right: Node<I, T>,
/// rev ref
paren: WeakNode<I, T>,
child: Node<I, T>,
/// Indicate that it has lost a child
marked: bool,
}
////////////////////////////////////////////////////////////////////////////////
//// Implementation
impl<I: Debug, T: Debug> Debug for Node_<I, T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{:?}[{:?}]{}",
self.idx,
self.val,
if self.marked { " X" } else { "" }
)
}
}
impl_node!();
impl<I, T> Node<I, T> {
fn children(&self) -> Vec<Self> {
let mut child = child!(self);
let mut res = vec![];
while child.is_some() {
res.push(child.clone());
child = right!(child);
}
res
}
/// remove paren, left and right
fn purge_as_root(&self) {
paren!(self, WeakNode::none());
left!(self, WeakNode::none());
right!(self, Node::none());
}
fn cut_child(&self, x: Node<I, T>) {
if !left!(x).is_none() {
right!(left!(x).upgrade(), right!(x));
} else {
debug_assert!(child!(self).rc_eq(&x));
child!(self, right!(x));
}
if !right!(x).is_none() {
left!(right!(x), left!(x));
}
rank!(self, rank!(self) - 1);
x.purge_as_root();
}
/// replace with new val, return old val
fn replace_key(&self, val: T) -> T
where
I: Debug,
T: Debug
{
replace(val_mut!(self), val)
}
fn replace(&mut self, x: Self) -> Self {
let old = Self(self.0.clone());
self.0 = x.0;
old
}
#[cfg(test)]
#[allow(unused)]
fn validate_ref(&self)
where
I: Clone,
{
assert!(self.is_some());
let _self_idx = idx!(self);
/* validate right sibling */
let rh = right!(self);
if rh.is_some() {
let _rh_idx = idx!(rh);
let rhlf = left!(rh).upgrade();
assert!(rhlf.rc_eq(self));
assert!(rhlf.is_some());
rh.validate_ref();
}
/* validate children */
let child = child!(self);
if child.is_some() {
let _child_idx = idx!(child);
let cpw = paren!(child);
assert!(!cpw.is_none());
let cp = cpw.upgrade();
assert!(cp.rc_eq(self));
assert!(cp.is_some());
child.validate_ref();
}
}
}
impl<I: Debug, T: Debug> Debug for Node<I, T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if self.is_none() {
write!(f, "None")
} else {
write!(f, "{:?}", self.0.as_ref().unwrap().as_ref().borrow())
}
}
}
impl<I: Debug, T: Debug> Display for Node<I, T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
writeln!(f, "R({:?}) ", self)?;
let mut curq = vec![(self.clone(), self.children())];
loop {
let mut nxtq = vec![];
for (p, children) in curq {
if children.is_empty() {
break;
}
write!(f, "P({:?}) ", p)?;
let childlen = children.len();
for (i, child) in children.into_iter().enumerate() {
write!(f, "{:?}", child)?;
if i < childlen - 1 {
write!(f, ", ")?;
}
nxtq.push((child.clone(), child.children()));
}
write!(f, "; ")?;
}
if !nxtq.is_empty() {
writeln!(f)?;
curq = nxtq;
} else {
break;
}
}
Ok(())
}
}
impl<I, T> FibHeap<I, T>
where
I: Eq + Hash + Clone + Debug,
T: Ord + Debug
{
////////////////////////////////////////////////////////////////////////////
//// Public method
pub fn new() -> Self {
Self {
len: 0,
rcnt: 0,
min: Node::none(),
nodes: HashMap::new(),
}
}
pub fn len(&self) -> usize {
self.len
}
/// Same index node would be overidden
pub fn push(&mut self, i: I, v: T)
{
let node = node!(i.clone(), v);
self.nodes.insert(i, node.clone());
self.push_into_roots(node.clone());
if val!(node) < val!(self.min) {
self.min = node;
}
self.len += 1;
}
/// Amortized cost O(rank(H))
///
/// trees(H') <= rank(H) + 1 # since no two trees have same rank.
///
/// delete-min
pub fn pop_item(&mut self) -> Option<(I, T)>
{
if self.min.is_none() {
return None;
}
self.len -= 1;
/* push children of oldmin into roots */
for child in self.min.children() {
self.push_into_roots(child.clone());
}
/* update min */
let newmin = self.roots()[1..]
.into_iter()
.min_by_key(|&sib| val!(sib))
.cloned()
.unwrap_or_default();
/* just del old min */
self.remove_from_roots(self.min.clone());
let oldmin = self.min.replace(newmin);
self.consolidate();
Some((
self.remove_from_index(&oldmin),
unwrap_into!(oldmin).val
))
}
/// merge same rank trees recusively
pub fn consolidate(&mut self) {
let mut rank: HashMap<usize, Node<I, T>> = hashmap!();
for mut sib in self.roots() {
while let Some(x) = rank.remove(&rank!(sib)) {
sib = self.merge_same_rank_root(x, sib);
}
rank.insert(rank!(sib), sib);
}
}
/// Return oldval, alias of ReplaceOrPush
///
/// Exec push if the val doesn't exist.
///
pub fn insert(&mut self, i: I, v: T) -> Option<T>
where
I: Eq + Hash + Clone,
T: Ord + Debug
{
match self.nodes.entry(i.clone()) {
Occupied(ent) => {
let x = ent.get().clone();
let oldv = x.replace_key(v);
match val!(x).cmp(&oldv) {
Less => self.decrease_key_(x),
Equal => (),
Greater => self.increase_key_(x),
}
Some(oldv)
}
Vacant(_ent) => {
self.push(i, v);
None
}
}
}
pub fn union(&mut self, _other: Self) {
unimplemented!("link roots, but not O(1) for link index reference")
}
pub fn delete<Q: AsRef<I>>(&mut self, _i: Q) -> Option<T> {
unimplemented!("1. decrease-val to -infi, 2. pop");
}
////////////////////////////////////////////////////////////////////////////
//// Extra functional method
/// Return oldval
///
pub fn decrease_key(&mut self, i: I, v: T) -> Option<T>
where
I: Eq + Hash + Clone,
T: Debug
{
let x;
match self.nodes.entry(i.clone()) {
Occupied(ent) => {
x = ent.get().clone();
let oldv = x.replace_key(v);
self.decrease_key_(x);
Some(oldv)
}
Vacant(_ent) => None,
}
}
pub fn top_item(&self) -> Option<(I, &T)>
where
I: Eq + Clone
{
if self.min.is_some() {
Some((idx!(self.min), val!(self.min)))
} else {
None
}
}
pub fn top(&self) -> Option<&T> {
self.top_item().map(|x| x.1)
}
pub fn pop(&mut self) -> Option<T> {
self.pop_item().map(|x| x.1)
}
pub fn get<Q>(&self, i: &Q) -> Option<&T>
where
I: Borrow<Q>,
Q: Ord + Hash + ?Sized,
{
self.nodes.get(i).map(|node| val!(node))
}
pub fn indexes(&self) -> impl Iterator<Item = &I> {
self.nodes.keys()
}
////////////////////////////////////////////////////////////////////////////
//// Assistant method
fn decrease_key_(&mut self, x: Node<I, T>) {
let ent;
let p = paren!(x);
if !p.is_none() && val!(x) < val!(p.upgrade()) {
// 假装x节点本身也是一个符合条件的父节点
marked!(x, true);
ent = x.downgrade();
} else {
ent = WeakNode::none();
}
self.cut_meld_unmark_to_roots(ent);
if val!(x) < val!(self.min) {
debug_assert!(paren!(x).is_none());
self.min = x;
}
}
/// WARNING: O(rank) = O(n)
fn increase_key_(&mut self, x: Node<I, T>) {
let ent;
let mut children_lost = if marked!(x) { 1 } else { 0 };
for child in x.children() {
if val!(child) < val!(x) {
x.cut_child(child.clone());
self.push_into_roots(child.clone());
marked!(child, false);
children_lost += 1;
}
}
match children_lost.cmp(&1) {
Less => ent = WeakNode::none(),
Equal => {
marked!(x, true);
ent = paren!(x);
}
Greater => {
marked!(x, true);
ent = x.downgrade();
}
}
self.cut_meld_unmark_to_roots(ent);
// WARNING: O(rank), update self.min
if x.rc_eq(&self.min) {
let min_node =
self.roots().into_iter().min_by_key(|x| val!(x)).unwrap();
self.min = min_node;
}
}
fn cut_meld_unmark_to_roots(&mut self, ent: WeakNode<I, T>) {
if ent.is_none() {
return;
}
let mut x = ent.upgrade();
let mut p = paren!(x);
while marked!(x) && !p.is_none() {
let strongp = p.upgrade();
strongp.cut_child(x.clone());
self.push_into_roots(x.clone());
marked!(x, false);
x = strongp;
p = paren!(x);
}
// 定义上不标记根,但这应该是无所谓的,标记对于可能的pop导致的树规整后的树情况更精确
marked!(x, true);
}
fn remove_from_index(&mut self, x: &Node<I, T>) -> I
where
I: Eq + Hash + Clone
{
let k = idx!(x);
self.nodes.remove(&k);
k
}
/// insert at sib of self.min, with purge
fn push_into_roots(&mut self, x: Node<I, T>) {
debug_assert!(!self.min.rc_eq(&x));
self.rcnt += 1;
x.purge_as_root();
if self.min.is_none() {
self.min = x;
left!(self.min, self.min.downgrade());
right!(self.min, self.min.clone());
} else {
debug_assert!(right!(self.min).is_some());
right!(x, right!(self.min));
left!(x, self.min.downgrade());
right!(self.min, x.clone());
left!(right!(x), x.downgrade());
}
}
/// from self.min go through all roots
fn roots(&self) -> Vec<Node<I, T>> {
let mut sibs = vec![];
if self.min.is_none() {
return sibs;
} else {
sibs.push(self.min.clone());
}
let mut sib = right!(self.min);
while !sib.rc_eq(&self.min) {
sibs.push(sib.clone());
sib = right!(sib);
}
sibs
}
fn remove_from_roots(&mut self, x: Node<I, T>) {
self.rcnt -= 1;
if self.rcnt > 0 {
right!(left!(x).upgrade(), right!(x));
left!(right!(x), left!(x));
}
x.purge_as_root();
}
/// update self.rcnt
fn merge_same_rank_root(
&mut self,
mut x: Node<I, T>,
mut y: Node<I, T>,
) -> Node<I, T> {
debug_assert_eq!(rank!(x), rank!(y));
// let x be parent
if val!(y) < val!(x) || val!(y) == val!(x) && y.rc_eq(&self.min) {
(x, y) = (y, x);
}
// remove y from roots
self.remove_from_roots(y.clone());
// link y to x child
right!(y, child!(x));
if child!(x).is_some() {
left!(child!(x), y.downgrade());
}
// link y to x
paren!(y, x.downgrade());
child!(x, y.clone());
rank!(x, rank!(x) + 1);
x
}
////////////////////////////////////////////////////////////////////////////
//// Validation method
/// Validate nodes are not None or Failed to upgrade to Rc
#[cfg(test)]
#[allow(unused)]
pub(crate) fn validate_ref(&self) {
if self.len() == 0 {
return;
}
/* validate roots */
for root in self.roots() {
assert!(root.is_some());
let rh = right!(root);
assert!(rh.is_some());
let wlf = left!(root);
assert!(!wlf.is_none());
let left = wlf.upgrade();
assert!(left.is_some());
let child = child!(root);
if child.is_some() {
child.validate_ref();
}
}
}
}
impl<I: Eq + Hash + Clone, T: Clone> FibHeap<I, T> {
fn overall_clone(
&self,
nodes: &mut HashMap<I, Node<I, T>>,
x: Node<I, T>,
) -> Node<I, T> {
if x.is_none() {
return Node::none();
}
// overall clone node body
let newx = node!(idx!(x), val!(x).clone(), rank!(x), marked!(x));
// update index reference
nodes.insert(idx!(x), newx.clone());
// recursive call it
let mut childen_iter = x.children().into_iter();
if let Some(child) = childen_iter.next() {
let newchild = self.overall_clone(nodes, child);
child!(newx, newchild.clone());
paren!(newchild, newx.downgrade());
let mut cur = newchild;
for child in childen_iter {
let newchild = self.overall_clone(nodes, child);
right!(cur, newchild.clone());
left!(newchild, cur.downgrade());
cur = newchild;
}
}
newx
}
}
impl<I, T> Drop for FibHeap<I, T> {
fn drop(&mut self) {
if self.len > 0 {
// break circle dependency to enable drop
let tail = left!(self.min).upgrade();
right!(tail, Node::none());
self.nodes.clear();
}
}
}
impl<T: Debug, K: Debug> Display for FibHeap<T, K> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut sib = self.min.clone();
for i in 1..=self.rcnt {
writeln!(f, "{} ({i:03}) {}", "-".repeat(28), "-".repeat(28))?;
// writeln!(f)?;
if sib.rc_eq(&self.min) {
write!(f, "M=>")?;
}
writeln!(f, "{}", sib)?;
debug_assert! | }
writeln!(f, "{}>> end <<{}", "-".repeat(28), "-".repeat(28))?;
Ok(())
}
}
impl<I: Ord + Hash + Clone + Debug, T: Ord + Clone + Debug> Clone for FibHeap<I, T> {
fn clone(&self) -> Self {
let len = self.len;
let rcnt = self.rcnt;
let mut nodes = HashMap::new();
let min;
let mut roots_iter = self.roots().into_iter();
if let Some(_min) = roots_iter.next() {
min = self.overall_clone(&mut nodes, _min.clone());
let mut cur = min.clone();
for root in roots_iter {
let newroot = self.overall_clone(&mut nodes, root);
right!(cur, newroot.clone());
left!(newroot, cur.downgrade());
cur = newroot;
}
right!(cur, min.clone());
left!(min, cur.downgrade());
} else {
min = Node::none();
}
Self {
len,
rcnt,
min,
nodes,
}
}
}
#[cfg(test)]
mod tests {
use super::{ FibHeap, super::* };
use common::random;
#[ignore = "for debug"]
#[test]
fn debug_fib_heap() {}
#[test]
fn test_fibheap_fixeddata() {
let mut heap = FibHeap::<usize, usize>::new();
let mut auto = common::gen();
heap.insert(auto(), 2);
heap.insert(auto(), 4);
heap.insert(auto(), 1);
assert_eq!(heap.pop().unwrap(), 1);
assert_eq!(heap.pop().unwrap(), 2);
assert_eq!(heap.pop().unwrap(), 4);
assert_eq!(heap.pop(), None);
}
#[test]
fn test_fibheap_randomdata() {
test_heap!(FibHeap::new(), MIN);
test_heap_update!(FibHeap::new(), MIN);
}
#[test]
fn test_fibheap_randomdata_extra() {
let get_one = || random::<usize>() % 1000;
let validate = |heap: &FibHeap<i32, usize>, non_dec: bool| {
let mut heap = (*heap).clone();
let mut storage = vec![];
while let Some(e) = heap.pop() {
storage.push(e);
}
if !non_dec {
storage.reverse();
}
let mut iter = storage.into_iter().enumerate();
let mut prev = iter.next().unwrap().1;
for (_i, e) in iter {
assert!(prev <= e, "prev: {prev:?}, e: {e:?}");
prev = e;
}
};
let non_dec = true;
for _ in 0..1 {
let mut heap = FibHeap::<i32, usize>::new();
// pad 50% of batch
for i in 0..300 {
let e = get_one();
heap.push(i, e); // push
}
for _ in 0..100 {
let newkey = get_one();
let i = random::<usize>() % heap.len;
heap.insert(i as i32, newkey.clone());
validate(&heap, non_dec);
}
}
}
}
| (sib.is_some());
sib = right!(sib);
| conditional_block |
fib.rs | //! Fibonacci Heap (decent impl)
//!
use std::{
borrow::Borrow,
cmp::Ordering::*,
collections::{hash_map::Entry::*, HashMap},
fmt::{Debug, Display},
hash::Hash, mem::replace,
};
use common::hashmap;
use coll::*;
////////////////////////////////////////////////////////////////////////////////
//// Macro
def_attr_macro!(clone|
left, right, child, paren, rank, marked, idx
);
def_attr_macro!(ref|
(val, T)
);
////////////////////////////////////////
//// Node wrapper
macro_rules! node {
($i:expr, $k:expr) => {
node!($i, $k, 0, false)
};
($i:expr, $k:expr, $rank:expr, $marked:expr) => {{
aux_node!({
idx: $i,
val: $k,
rank: $rank,
left: WeakNode::none(),
right: Node::none(),
paren: WeakNode::none(),
child: Node::none(),
marked: $marked
})
}};
}
////////////////////////////////////////////////////////////////////////////////
//// Structure
/// [Fibonacci Heap](https://en.wikipedia.org/wiki/Fibonacci_heap)
/// : Indexed Min Heap based on linked list.
///
/// size(x) >= F(d+2)
///
/// I should be cheap to clone
pub struct FibHeap<I, T> {
len: usize,
/// roots count
rcnt: usize,
min: Node<I, T>,
/// index of nodes
nodes: HashMap<I, Node<I, T>>,
}
#[derive(Clone)]
struct Node_<I, T> {
idx: I,
val: T,
rank: usize, // children number
/// rev ref
left: WeakNode<I, T>,
right: Node<I, T>,
/// rev ref
paren: WeakNode<I, T>,
child: Node<I, T>,
/// Indicate that it has lost a child
marked: bool,
}
////////////////////////////////////////////////////////////////////////////////
//// Implementation
impl<I: Debug, T: Debug> Debug for Node_<I, T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{:?}[{:?}]{}",
self.idx,
self.val,
if self.marked { " X" } else { "" }
)
}
}
impl_node!();
impl<I, T> Node<I, T> {
fn children(&self) -> Vec<Self> {
let mut child = child!(self);
let mut res = vec![];
while child.is_some() {
res.push(child.clone());
child = right!(child);
}
res
}
/// remove paren, left and right
fn purge_as_root(&self) {
paren!(self, WeakNode::none());
left!(self, WeakNode::none());
right!(self, Node::none());
}
fn cut_child(&self, x: Node<I, T>) {
if !left!(x).is_none() {
right!(left!(x).upgrade(), right!(x));
} else {
debug_assert!(child!(self).rc_eq(&x));
child!(self, right!(x));
}
if !right!(x).is_none() {
left!(right!(x), left!(x));
}
rank!(self, rank!(self) - 1);
x.purge_as_root();
}
/// replace with new val, return old val
fn replace_key(&self, val: T) -> T
where
I: Debug,
T: Debug
{
replace(val_mut!(self), val)
}
fn replace(&mut self, x: Self) -> Self {
let old = Self(self.0.clone());
self.0 = x.0;
old
}
#[cfg(test)]
#[allow(unused)]
fn validate_ref(&self)
where
I: Clone,
{
assert!(self.is_some());
let _self_idx = idx!(self);
/* validate right sibling */
let rh = right!(self);
if rh.is_some() {
let _rh_idx = idx!(rh);
let rhlf = left!(rh).upgrade();
assert!(rhlf.rc_eq(self));
assert!(rhlf.is_some());
rh.validate_ref();
}
/* validate children */
let child = child!(self);
if child.is_some() {
let _child_idx = idx!(child);
let cpw = paren!(child);
assert!(!cpw.is_none());
let cp = cpw.upgrade();
assert!(cp.rc_eq(self));
assert!(cp.is_some());
child.validate_ref();
}
}
}
impl<I: Debug, T: Debug> Debug for Node<I, T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if self.is_none() {
write!(f, "None")
} else {
write!(f, "{:?}", self.0.as_ref().unwrap().as_ref().borrow())
}
}
}
impl<I: Debug, T: Debug> Display for Node<I, T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
writeln!(f, "R({:?}) ", self)?;
let mut curq = vec![(self.clone(), self.children())];
loop {
let mut nxtq = vec![];
for (p, children) in curq {
if children.is_empty() {
break;
}
write!(f, "P({:?}) ", p)?;
let childlen = children.len();
for (i, child) in children.into_iter().enumerate() {
write!(f, "{:?}", child)?;
if i < childlen - 1 {
write!(f, ", ")?;
}
nxtq.push((child.clone(), child.children()));
}
write!(f, "; ")?;
}
if !nxtq.is_empty() {
writeln!(f)?;
curq = nxtq;
} else {
break;
}
}
Ok(())
}
}
impl<I, T> FibHeap<I, T>
where
I: Eq + Hash + Clone + Debug,
T: Ord + Debug
{
////////////////////////////////////////////////////////////////////////////
//// Public method
pub fn new() -> Self {
Self {
len: 0,
rcnt: 0,
min: Node::none(),
nodes: HashMap::new(),
}
}
pub fn len(&self) -> usize {
self.len
}
/// Same index node would be overidden
pub fn push(&mut self, i: I, v: T)
{
let node = node!(i.clone(), v);
self.nodes.insert(i, node.clone());
self.push_into_roots(node.clone());
if val!(node) < val!(self.min) {
self.min = node;
}
self.len += 1;
}
/// Amortized cost O(rank(H))
///
/// trees(H') <= rank(H) + 1 # since no two trees have same rank.
///
/// delete-min
pub fn pop_item(&mut self) -> Option<(I, T)>
{
if self.min.is_none() {
return None;
}
self.len -= 1;
/* push children of oldmin into roots */
for child in self.min.children() {
self.push_into_roots(child.clone());
}
/* update min */
let newmin = self.roots()[1..]
.into_iter()
.min_by_key(|&sib| val!(sib))
.cloned()
.unwrap_or_default();
/* just del old min */
self.remove_from_roots(self.min.clone());
let oldmin = self.min.replace(newmin);
self.consolidate();
Some((
self.remove_from_index(&oldmin),
unwrap_into!(oldmin).val
))
}
/// merge same rank trees recusively
pub fn consolidate(&mut self) {
let mut rank: HashMap<usize, Node<I, T>> = hashmap!();
for mut sib in self.roots() {
while let Some(x) = rank.remove(&rank!(sib)) {
sib = self.merge_same_rank_root(x, sib);
}
rank.insert(rank!(sib), sib);
}
}
/// Return oldval, alias of ReplaceOrPush
///
/// Exec push if the val doesn't exist.
///
pub fn insert(&mut self, i: I, v: T) -> Option<T>
where
I: Eq + Hash + Clone,
T: Ord + Debug
{
match self.nodes.entry(i.clone()) {
Occupied(ent) => {
let x = ent.get().clone();
let oldv = x.replace_key(v);
match val!(x).cmp(&oldv) {
Less => self.decrease_key_(x),
Equal => (),
Greater => self.increase_key_(x),
}
Some(oldv)
}
Vacant(_ent) => {
self.push(i, v);
None
}
}
}
pub fn union(&mut self, _other: Self) {
unimplemented!("link roots, but not O(1) for link index reference")
}
pub fn delete<Q: AsRef<I>>(&mut self, _i: Q) -> Option<T> {
unimplemented!("1. decrease-val to -infi, 2. pop");
}
////////////////////////////////////////////////////////////////////////////
//// Extra functional method
/// Return oldval
///
pub fn decrease_key(&mut self, i: I, v: T) -> Option<T>
where
I: Eq + Hash + Clone,
T: Debug
{
let x;
match self.nodes.entry(i.clone()) {
Occupied(ent) => {
x = ent.get().clone();
let oldv = x.replace_key(v);
self.decrease_key_(x);
Some(oldv)
}
Vacant(_ent) => None,
}
}
pub fn top_item(&self) -> Option<(I, &T)>
where
I: Eq + Clone
{
if self.min.is_some() {
Some((idx!(self.min), val!(self.min)))
} else {
None
}
}
pub fn top(&self) -> Option<&T> {
self.top_item().map(|x| x.1)
}
pub fn pop(&mut self) -> Option<T> {
self.pop_item().map(|x| x.1)
}
pub fn get<Q>(&self, i: &Q) -> Option<&T>
where
I: Borrow<Q>,
Q: Ord + Hash + ?Sized,
{
self.nodes.get(i).map(|node| val!(node))
}
pub fn indexes(&self) -> impl Iterator<Item = &I> {
self.nodes.keys()
}
////////////////////////////////////////////////////////////////////////////
//// Assistant method
fn decrease_key_(&mut self, x: Node<I, T>) {
let ent;
let p = paren!(x);
if !p.is_none() && val!(x) < val!(p.upgrade()) {
// 假装x节点本身也是一个符合条件的父节点
marked!(x, true);
ent = x.downgrade();
} else {
ent = WeakNode::none();
}
self.cut_meld_unmark_to_roots(ent);
if val!(x) < val!(self.min) {
debug_assert!(paren!(x).is_none());
self.min = x;
}
}
/// WARNING: O(rank) = O(n)
fn increase_key_(&mut self, x: Node<I, T>) {
let ent;
let mut children_lost = if marked!(x) { 1 } else { 0 };
for child in x.children() {
if val!(child) < val!(x) {
x.cut_child(child.clone());
self.push_into_roots(child.clone());
marked!(child, false);
children_lost += 1;
}
}
match children_lost.cmp(&1) {
Less => ent = WeakNode::none(),
Equal => {
marked!(x, true);
ent = paren!(x);
}
Greater => {
marked!(x, true);
ent = x.downgrade();
}
}
self.cut_meld_unmark_to_roots(ent);
// WARNING: O(rank), update self.min
if x.rc_eq(&self.min) {
let min_node =
self.roots().into_iter().min_by_key(|x| val!(x)).unwrap();
self.min = min_node;
}
}
fn cut_meld_unmark_to_roots(&mut self, ent: WeakNode<I, T>) {
if ent.is_none() {
return;
}
let mut x = ent.upgrade();
let mut p = paren!(x);
while marked!(x) && !p.is_none() {
let strongp = p.upgrade();
strongp.cut_child(x.clone());
self.push_into_roots(x.clone());
marked!(x, false);
x = strongp;
p = paren!(x);
}
// 定义上不标记根,但这应该是无所谓的,标记对于可能的pop导致的树规整后的树情况更精确
marked!(x, true);
}
fn remove_from_index(&mut self, x: &Node<I, T>) -> I
where
I: Eq + Hash + Clone
{
let k = idx!(x);
self.nodes.remove(&k);
k
}
/// insert at sib of self.min, with purge
fn push_into_roots(&mut self, x: Node<I, T>) {
debug_assert!(!self.min.rc_eq(&x));
self.rcnt += 1;
x.purge_as_root();
if self.min.is_none() {
self.min = x;
left!(self.min, self.min.downgrade());
right!(self.min, self.min.clone());
} else {
debug_assert!(right!(self.min).is_some());
right!(x, right!(self.min));
left!(x, self.min.downgrade());
right!(self.min, x.clone());
left!(right!(x), x.downgrade());
}
}
/// from self.min go through all roots
fn roots(&self) -> Vec<Node<I, T>> {
let mut sibs = vec![];
if self.min.is_none() {
return sibs;
} else {
sibs.push(self.min.clone());
}
let mut sib = right!(self.min);
while !sib.rc_eq(&self.min) {
sibs.push(sib.clone());
sib = right!(sib);
}
sibs
}
fn remove_from_roots(&mut self, x: Node<I, T>) {
self.rcnt -= 1;
if self.rcnt > 0 {
right!(left!(x).upgrade(), right!(x));
left!(right!(x), left!(x));
}
x.purge_as_root();
}
/// update self.rcnt
fn merge_same_rank_root(
&mut self,
mut x: Node<I, T>,
mut y: Node<I, T>,
) -> Node<I, T> {
debug_assert_eq!(rank!(x), rank!(y));
// let x be parent
if val!(y) < val!(x) || val!(y) == val!(x) && y.rc_eq(&self.min) {
(x, y) = (y, x);
}
// remove y from roots
self.remove_from_roots(y.clone());
// link y to x child
right!(y, child!(x));
if child!(x).is_some() {
left!(child!(x), y.downgrade());
}
// link y to x
paren!(y, x.downgrade());
child!(x, y.clone());
rank!(x, rank!(x) + 1);
x
}
////////////////////////////////////////////////////////////////////////////
//// Validation method
/// Validate nodes are not None or Failed to upgrade to Rc
#[cfg(test)]
#[allow(unused)]
pub(crate) fn validate_ref(&self) {
if self.len() == 0 {
return;
}
/* validate roots */
for root in self.roots() {
assert!(root.is_some());
let rh = right!(root);
assert!(rh.is_some());
let wlf = left!(root);
assert!(!wlf.is_none());
let left = wlf.upgrade();
assert!(left.is_some());
let child = child!(root);
if child.is_some() {
child.validate_ref();
}
}
}
}
impl<I: Eq + Hash + Clone, T: Clone> FibHeap<I, T> {
fn overall_clone(
&self,
nodes: &mut HashMap<I, Node<I, T>>,
x: Node<I, T>,
) -> Node<I, T> {
if x.is_none() {
return Node::none();
}
// overall clone node body
let newx = node!(idx!(x), val!(x).clone(), rank!(x), marked!(x));
// update index reference
nodes.insert(idx!(x), newx.clone());
// recursive call it
let mut childen_iter = x.children().into_iter();
if let Some(child) = childen_iter.next() {
let newchild = self.overall_clone(nodes, child);
child!(newx, newchild.clone());
paren!(newchild, newx.downgrade());
let mut cur = newchild;
for child in childen_iter {
let newchild = self.overall_clone(nodes, child);
right!(cur, newchild.clone());
left!(newchild, cur.downgrade());
cur = newchild;
}
}
newx
}
}
impl<I, T> Drop for FibHeap<I, T> {
fn drop(&mut self) {
if self.len > 0 {
// break circle dependency to enable drop
let tail = left!(self.min).upgrade();
right!(tail, Node::none());
self.nodes.clear();
}
}
}
impl<T: Debug, K: Debug> Display for FibHeap<T, K> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut sib = self.min.clone();
for i in 1..=self.rcnt {
writeln!(f, "{} ({i:03}) {}", "-".repeat(28), "-".repeat(28))?;
// writeln!(f)?;
if sib.rc_eq(&self.min) {
write!(f, "M=>")?;
}
writeln!(f, "{}", sib)?;
debug_assert!(sib.is_some());
sib = right!(sib);
}
writeln!(f, "{}>> end <<{}", "-".repeat(28), "-".repeat(28))?;
Ok(())
}
}
impl<I: Ord + Hash + Clone + Debug, T: Ord + Clone + Debug> Clone for FibHeap<I, T> {
fn clone(&self) -> Self {
let len = self.len;
let rcnt = self.rcnt;
let mut nodes = HashMap::new();
let min;
let mut roots_iter = self.roots().into_iter();
if let Some(_min) = roots_iter.next() {
min = self.overall_clone(&mut nodes, _min.clone());
let mut cur = min.clone();
for root in roots_iter {
let newroot = self.overall_clone(&mut nodes, root);
right!(cur, newroot.clone());
left!(newroot, cur.downgrade());
cur = newroot;
}
right!(cur, min.clone());
left!(min, cur.downgrade());
} else { | Self {
len,
rcnt,
min,
nodes,
}
}
}
#[cfg(test)]
mod tests {
use super::{ FibHeap, super::* };
use common::random;
#[ignore = "for debug"]
#[test]
fn debug_fib_heap() {}
#[test]
fn test_fibheap_fixeddata() {
let mut heap = FibHeap::<usize, usize>::new();
let mut auto = common::gen();
heap.insert(auto(), 2);
heap.insert(auto(), 4);
heap.insert(auto(), 1);
assert_eq!(heap.pop().unwrap(), 1);
assert_eq!(heap.pop().unwrap(), 2);
assert_eq!(heap.pop().unwrap(), 4);
assert_eq!(heap.pop(), None);
}
#[test]
fn test_fibheap_randomdata() {
test_heap!(FibHeap::new(), MIN);
test_heap_update!(FibHeap::new(), MIN);
}
#[test]
fn test_fibheap_randomdata_extra() {
let get_one = || random::<usize>() % 1000;
let validate = |heap: &FibHeap<i32, usize>, non_dec: bool| {
let mut heap = (*heap).clone();
let mut storage = vec![];
while let Some(e) = heap.pop() {
storage.push(e);
}
if !non_dec {
storage.reverse();
}
let mut iter = storage.into_iter().enumerate();
let mut prev = iter.next().unwrap().1;
for (_i, e) in iter {
assert!(prev <= e, "prev: {prev:?}, e: {e:?}");
prev = e;
}
};
let non_dec = true;
for _ in 0..1 {
let mut heap = FibHeap::<i32, usize>::new();
// pad 50% of batch
for i in 0..300 {
let e = get_one();
heap.push(i, e); // push
}
for _ in 0..100 {
let newkey = get_one();
let i = random::<usize>() % heap.len;
heap.insert(i as i32, newkey.clone());
validate(&heap, non_dec);
}
}
}
} | min = Node::none();
}
| random_line_split |
main.rs | use std::collections::HashMap;
use std::fs::File;
use std::io::{BufRead, BufReader};
const COLLECTED_ORE: u64 = 1000000000000;
#[derive(Debug, Eq, PartialEq)]
struct Reaction {
output: (String, u64),
ingredients: Vec<(String, u64)>,
}
type ReactionMap = HashMap<String, Reaction>;
fn calc_ore(reactions: &ReactionMap) -> u64 {
calc_ore_for_fuel(1, reactions)
}
fn calc_ore_for_fuel(fuel: u64, reactions: &ReactionMap) -> u64 {
let mut ore = 0;
let mut spare_chemicals = HashMap::new();
let mut requirements = Vec::new();
requirements.push((String::from("FUEL"), fuel));
let ore_name = String::from("ORE");
while !requirements.is_empty() {
let cur_requirements = requirements.clone();
requirements.clear();
for (req_chem, req_amount) in cur_requirements {
// Check whether we have any spare of this ingredient from
// other reactions.
let mut adj_req_amount = req_amount;
if let Some(spare) = spare_chemicals.get_mut(&req_chem) {
if *spare >= req_amount {
// We have enough spare to completely fulfill this
// requirement, no need to go further.
*spare -= req_amount;
continue;
} else {
// Reduce the required amount by the amount we have
// spare;
adj_req_amount = req_amount - *spare;
*spare = 0;
}
}
// Find the reaction that produces this ingredient.
let reaction = reactions
.get(&req_chem)
.expect(format!("Couldn't find reaction for {}", req_chem).as_ref());
// Find out how many times we need to run this reaction,
// and how much will be spare.
let output_amount = reaction.output.1;
let reaction_count = (adj_req_amount - 1) / output_amount + 1;
let spare = output_amount * reaction_count - adj_req_amount;
// Update the spare count for this ingredient.
if let Some(existing_spare) = spare_chemicals.get_mut(&req_chem) {
*existing_spare += spare;
} else {
spare_chemicals.insert(req_chem, spare);
}
// Update the required ingredients list with the ingredients
// needed to make this chemical.
for ingredient in reaction.ingredients.clone() {
let ingredient_name = ingredient.0;
let ingredient_count = reaction_count * ingredient.1;
if ingredient_name == ore_name {
ore += ingredient_count;
} else {
requirements.push((ingredient_name, ingredient_count));
}
}
}
}
ore
}
fn calc_fuel_for_ore(ore: u64, reactions: &ReactionMap) -> u64 |
fn parse_chemical(chemical: &str) -> (String, u64) {
let mut iter = chemical.split_whitespace();
let count = iter.next().unwrap().parse::<u64>().unwrap();
let chem = iter.next().unwrap();
(String::from(chem), count)
}
fn parse_reactions(strs: &[String]) -> ReactionMap {
let mut reactions = HashMap::new();
for reaction in strs {
let mut iter = reaction.split(" => ");
let ingredients_str = iter.next().unwrap();
let output_str = iter.next().unwrap();
let mut ingredients = Vec::new();
for ingredient in ingredients_str.split(", ") {
ingredients.push(parse_chemical(ingredient));
}
let output = parse_chemical(output_str);
reactions.insert(
output.0.clone(),
Reaction {
output: output,
ingredients: ingredients,
},
);
}
reactions
}
fn parse_input(filename: &str) -> ReactionMap {
let file = File::open(filename).expect("Failed to open file");
let reader = BufReader::new(file);
let reactions: Vec<String> = reader
.lines()
.map(|l| l.expect("Failed to read line"))
.map(|l| String::from(l.trim()))
.collect();
parse_reactions(reactions.as_slice())
}
fn main() {
let reactions = parse_input("input");
// Part 1
let ore = calc_ore(&reactions);
println!("Require {} ore for 1 fuel", ore);
// Part 2
let fuel = calc_fuel_for_ore(COLLECTED_ORE, &reactions);
println!("Produce {} fuel from {} ore", fuel, COLLECTED_ORE);
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse() {
let input = vec![String::from("7 A, 1 E => 1 FUEL")];
let reactions = parse_reactions(input.as_slice());
let result = reactions.get(&String::from("FUEL"));
assert!(result.is_some());
let reaction = result.unwrap();
assert_eq!(
*reaction,
Reaction {
output: (String::from("FUEL"), 1),
ingredients: vec![(String::from("A"), 7), (String::from("E"), 1),],
},
);
}
#[test]
fn example1() {
let input = vec![
String::from("10 ORE => 10 A"),
String::from("1 ORE => 1 B"),
String::from("7 A, 1 B => 1 C"),
String::from("7 A, 1 C => 1 D"),
String::from("7 A, 1 D => 1 E"),
String::from("7 A, 1 E => 1 FUEL"),
];
let reactions = parse_reactions(input.as_slice());
let result = calc_ore(&reactions);
assert_eq!(result, 31);
}
#[test]
fn example2() {
let input = vec![
String::from("9 ORE => 2 A"),
String::from("8 ORE => 3 B"),
String::from("7 ORE => 5 C"),
String::from("3 A, 4 B => 1 AB"),
String::from("5 B, 7 C => 1 BC"),
String::from("4 C, 1 A => 1 CA"),
String::from("2 AB, 3 BC, 4 CA => 1 FUEL"),
];
let reactions = parse_reactions(input.as_slice());
let result = calc_ore(&reactions);
assert_eq!(result, 165);
}
#[test]
fn example3() {
let input = vec![
String::from("157 ORE => 5 NZVS"),
String::from("165 ORE => 6 DCFZ"),
String::from("44 XJWVT, 5 KHKGT, 1 QDVJ, 29 NZVS, 9 GPVTF, 48 HKGWZ => 1 FUEL"),
String::from("12 HKGWZ, 1 GPVTF, 8 PSHF => 9 QDVJ"),
String::from("179 ORE => 7 PSHF"),
String::from("177 ORE => 5 HKGWZ"),
String::from("7 DCFZ, 7 PSHF => 2 XJWVT"),
String::from("165 ORE => 2 GPVTF"),
String::from("3 DCFZ, 7 NZVS, 5 HKGWZ, 10 PSHF => 8 KHKGT"),
];
let reactions = parse_reactions(input.as_slice());
let result = calc_ore(&reactions);
assert_eq!(result, 13312);
let result = calc_fuel_for_ore(COLLECTED_ORE, &reactions);
assert_eq!(result, 82892753);
}
#[test]
fn example4() {
let input = vec![
String::from("2 VPVL, 7 FWMGM, 2 CXFTF, 11 MNCFX => 1 STKFG"),
String::from("17 NVRVD, 3 JNWZP => 8 VPVL"),
String::from("53 STKFG, 6 MNCFX, 46 VJHF, 81 HVMC, 68 CXFTF, 25 GNMV => 1 FUEL"),
String::from("22 VJHF, 37 MNCFX => 5 FWMGM"),
String::from("139 ORE => 4 NVRVD"),
String::from("144 ORE => 7 JNWZP"),
String::from("5 MNCFX, 7 RFSQX, 2 FWMGM, 2 VPVL, 19 CXFTF => 3 HVMC"),
String::from("5 VJHF, 7 MNCFX, 9 VPVL, 37 CXFTF => 6 GNMV"),
String::from("145 ORE => 6 MNCFX"),
String::from("1 NVRVD => 8 CXFTF"),
String::from("1 VJHF, 6 MNCFX => 4 RFSQX"),
String::from("176 ORE => 6 VJHF"),
];
let reactions = parse_reactions(input.as_slice());
let result = calc_ore(&reactions);
assert_eq!(result, 180697);
let result = calc_fuel_for_ore(COLLECTED_ORE, &reactions);
assert_eq!(result, 5586022);
}
#[test]
fn example5() {
let input = vec![
String::from("171 ORE => 8 CNZTR"),
String::from("7 ZLQW, 3 BMBT, 9 XCVML, 26 XMNCP, 1 WPTQ, 2 MZWV, 1 RJRHP => 4 PLWSL"),
String::from("114 ORE => 4 BHXH"),
String::from("14 VRPVC => 6 BMBT"),
String::from("6 BHXH, 18 KTJDG, 12 WPTQ, 7 PLWSL, 31 FHTLT, 37 ZDVW => 1 FUEL"),
String::from("6 WPTQ, 2 BMBT, 8 ZLQW, 18 KTJDG, 1 XMNCP, 6 MZWV, 1 RJRHP => 6 FHTLT"),
String::from("15 XDBXC, 2 LTCX, 1 VRPVC => 6 ZLQW"),
String::from("13 WPTQ, 10 LTCX, 3 RJRHP, 14 XMNCP, 2 MZWV, 1 ZLQW => 1 ZDVW"),
String::from("5 BMBT => 4 WPTQ"),
String::from("189 ORE => 9 KTJDG"),
String::from("1 MZWV, 17 XDBXC, 3 XCVML => 2 XMNCP"),
String::from("12 VRPVC, 27 CNZTR => 2 XDBXC"),
String::from("15 KTJDG, 12 BHXH => 5 XCVML"),
String::from("3 BHXH, 2 VRPVC => 7 MZWV"),
String::from("121 ORE => 7 VRPVC"),
String::from("7 XCVML => 6 RJRHP"),
String::from("5 BHXH, 4 VRPVC => 5 LTCX"),
];
let reactions = parse_reactions(input.as_slice());
let result = calc_ore(&reactions);
assert_eq!(result, 2210736);
let result = calc_fuel_for_ore(COLLECTED_ORE, &reactions);
assert_eq!(result, 460664);
}
}
| {
let mut lower = 1;
let mut current;
let mut upper = 1;
// Find an upper bound to use for binary search.
loop {
let used_ore = calc_ore_for_fuel(upper, reactions);
if used_ore < ore {
upper *= 2;
} else {
break;
}
}
// Binary search to find the highest amount of fuel we can
// produce without using all the ore.
loop {
current = (upper - lower) / 2 + lower;
let used_ore = calc_ore_for_fuel(current, reactions);
if used_ore < ore {
lower = current;
} else {
upper = current;
}
if upper - 1 == lower {
return lower;
}
}
} | identifier_body |
main.rs | use std::collections::HashMap;
use std::fs::File;
use std::io::{BufRead, BufReader};
const COLLECTED_ORE: u64 = 1000000000000;
#[derive(Debug, Eq, PartialEq)]
struct Reaction {
output: (String, u64),
ingredients: Vec<(String, u64)>,
}
type ReactionMap = HashMap<String, Reaction>;
fn calc_ore(reactions: &ReactionMap) -> u64 {
calc_ore_for_fuel(1, reactions)
}
fn calc_ore_for_fuel(fuel: u64, reactions: &ReactionMap) -> u64 {
let mut ore = 0;
let mut spare_chemicals = HashMap::new();
let mut requirements = Vec::new();
requirements.push((String::from("FUEL"), fuel));
let ore_name = String::from("ORE");
while !requirements.is_empty() {
let cur_requirements = requirements.clone();
requirements.clear();
for (req_chem, req_amount) in cur_requirements {
// Check whether we have any spare of this ingredient from
// other reactions.
let mut adj_req_amount = req_amount;
if let Some(spare) = spare_chemicals.get_mut(&req_chem) {
if *spare >= req_amount {
// We have enough spare to completely fulfill this
// requirement, no need to go further.
*spare -= req_amount;
continue;
} else {
// Reduce the required amount by the amount we have
// spare;
adj_req_amount = req_amount - *spare;
*spare = 0;
}
}
// Find the reaction that produces this ingredient.
let reaction = reactions
.get(&req_chem)
.expect(format!("Couldn't find reaction for {}", req_chem).as_ref());
// Find out how many times we need to run this reaction,
// and how much will be spare.
let output_amount = reaction.output.1;
let reaction_count = (adj_req_amount - 1) / output_amount + 1;
let spare = output_amount * reaction_count - adj_req_amount;
// Update the spare count for this ingredient.
if let Some(existing_spare) = spare_chemicals.get_mut(&req_chem) {
*existing_spare += spare;
} else {
spare_chemicals.insert(req_chem, spare);
}
// Update the required ingredients list with the ingredients
// needed to make this chemical.
for ingredient in reaction.ingredients.clone() {
let ingredient_name = ingredient.0;
let ingredient_count = reaction_count * ingredient.1;
if ingredient_name == ore_name {
ore += ingredient_count;
} else {
requirements.push((ingredient_name, ingredient_count));
}
}
}
}
ore
}
fn calc_fuel_for_ore(ore: u64, reactions: &ReactionMap) -> u64 {
let mut lower = 1;
let mut current;
let mut upper = 1;
// Find an upper bound to use for binary search.
loop {
let used_ore = calc_ore_for_fuel(upper, reactions);
if used_ore < ore {
upper *= 2;
} else {
break;
}
}
// Binary search to find the highest amount of fuel we can
// produce without using all the ore.
loop {
current = (upper - lower) / 2 + lower;
let used_ore = calc_ore_for_fuel(current, reactions);
if used_ore < ore {
lower = current;
} else {
upper = current;
}
if upper - 1 == lower {
return lower;
}
}
}
fn parse_chemical(chemical: &str) -> (String, u64) {
let mut iter = chemical.split_whitespace();
let count = iter.next().unwrap().parse::<u64>().unwrap();
let chem = iter.next().unwrap();
(String::from(chem), count)
}
fn parse_reactions(strs: &[String]) -> ReactionMap {
let mut reactions = HashMap::new();
for reaction in strs {
let mut iter = reaction.split(" => ");
let ingredients_str = iter.next().unwrap();
let output_str = iter.next().unwrap();
let mut ingredients = Vec::new();
for ingredient in ingredients_str.split(", ") {
ingredients.push(parse_chemical(ingredient));
}
let output = parse_chemical(output_str);
reactions.insert(
output.0.clone(),
Reaction {
output: output,
ingredients: ingredients,
},
);
}
reactions
}
fn parse_input(filename: &str) -> ReactionMap {
let file = File::open(filename).expect("Failed to open file");
let reader = BufReader::new(file);
let reactions: Vec<String> = reader
.lines()
.map(|l| l.expect("Failed to read line"))
.map(|l| String::from(l.trim()))
.collect();
parse_reactions(reactions.as_slice())
}
fn main() {
let reactions = parse_input("input");
// Part 1
let ore = calc_ore(&reactions);
println!("Require {} ore for 1 fuel", ore);
// Part 2
let fuel = calc_fuel_for_ore(COLLECTED_ORE, &reactions);
println!("Produce {} fuel from {} ore", fuel, COLLECTED_ORE);
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn | () {
let input = vec![String::from("7 A, 1 E => 1 FUEL")];
let reactions = parse_reactions(input.as_slice());
let result = reactions.get(&String::from("FUEL"));
assert!(result.is_some());
let reaction = result.unwrap();
assert_eq!(
*reaction,
Reaction {
output: (String::from("FUEL"), 1),
ingredients: vec![(String::from("A"), 7), (String::from("E"), 1),],
},
);
}
#[test]
fn example1() {
let input = vec![
String::from("10 ORE => 10 A"),
String::from("1 ORE => 1 B"),
String::from("7 A, 1 B => 1 C"),
String::from("7 A, 1 C => 1 D"),
String::from("7 A, 1 D => 1 E"),
String::from("7 A, 1 E => 1 FUEL"),
];
let reactions = parse_reactions(input.as_slice());
let result = calc_ore(&reactions);
assert_eq!(result, 31);
}
#[test]
fn example2() {
let input = vec![
String::from("9 ORE => 2 A"),
String::from("8 ORE => 3 B"),
String::from("7 ORE => 5 C"),
String::from("3 A, 4 B => 1 AB"),
String::from("5 B, 7 C => 1 BC"),
String::from("4 C, 1 A => 1 CA"),
String::from("2 AB, 3 BC, 4 CA => 1 FUEL"),
];
let reactions = parse_reactions(input.as_slice());
let result = calc_ore(&reactions);
assert_eq!(result, 165);
}
#[test]
fn example3() {
let input = vec![
String::from("157 ORE => 5 NZVS"),
String::from("165 ORE => 6 DCFZ"),
String::from("44 XJWVT, 5 KHKGT, 1 QDVJ, 29 NZVS, 9 GPVTF, 48 HKGWZ => 1 FUEL"),
String::from("12 HKGWZ, 1 GPVTF, 8 PSHF => 9 QDVJ"),
String::from("179 ORE => 7 PSHF"),
String::from("177 ORE => 5 HKGWZ"),
String::from("7 DCFZ, 7 PSHF => 2 XJWVT"),
String::from("165 ORE => 2 GPVTF"),
String::from("3 DCFZ, 7 NZVS, 5 HKGWZ, 10 PSHF => 8 KHKGT"),
];
let reactions = parse_reactions(input.as_slice());
let result = calc_ore(&reactions);
assert_eq!(result, 13312);
let result = calc_fuel_for_ore(COLLECTED_ORE, &reactions);
assert_eq!(result, 82892753);
}
#[test]
fn example4() {
let input = vec![
String::from("2 VPVL, 7 FWMGM, 2 CXFTF, 11 MNCFX => 1 STKFG"),
String::from("17 NVRVD, 3 JNWZP => 8 VPVL"),
String::from("53 STKFG, 6 MNCFX, 46 VJHF, 81 HVMC, 68 CXFTF, 25 GNMV => 1 FUEL"),
String::from("22 VJHF, 37 MNCFX => 5 FWMGM"),
String::from("139 ORE => 4 NVRVD"),
String::from("144 ORE => 7 JNWZP"),
String::from("5 MNCFX, 7 RFSQX, 2 FWMGM, 2 VPVL, 19 CXFTF => 3 HVMC"),
String::from("5 VJHF, 7 MNCFX, 9 VPVL, 37 CXFTF => 6 GNMV"),
String::from("145 ORE => 6 MNCFX"),
String::from("1 NVRVD => 8 CXFTF"),
String::from("1 VJHF, 6 MNCFX => 4 RFSQX"),
String::from("176 ORE => 6 VJHF"),
];
let reactions = parse_reactions(input.as_slice());
let result = calc_ore(&reactions);
assert_eq!(result, 180697);
let result = calc_fuel_for_ore(COLLECTED_ORE, &reactions);
assert_eq!(result, 5586022);
}
#[test]
fn example5() {
let input = vec![
String::from("171 ORE => 8 CNZTR"),
String::from("7 ZLQW, 3 BMBT, 9 XCVML, 26 XMNCP, 1 WPTQ, 2 MZWV, 1 RJRHP => 4 PLWSL"),
String::from("114 ORE => 4 BHXH"),
String::from("14 VRPVC => 6 BMBT"),
String::from("6 BHXH, 18 KTJDG, 12 WPTQ, 7 PLWSL, 31 FHTLT, 37 ZDVW => 1 FUEL"),
String::from("6 WPTQ, 2 BMBT, 8 ZLQW, 18 KTJDG, 1 XMNCP, 6 MZWV, 1 RJRHP => 6 FHTLT"),
String::from("15 XDBXC, 2 LTCX, 1 VRPVC => 6 ZLQW"),
String::from("13 WPTQ, 10 LTCX, 3 RJRHP, 14 XMNCP, 2 MZWV, 1 ZLQW => 1 ZDVW"),
String::from("5 BMBT => 4 WPTQ"),
String::from("189 ORE => 9 KTJDG"),
String::from("1 MZWV, 17 XDBXC, 3 XCVML => 2 XMNCP"),
String::from("12 VRPVC, 27 CNZTR => 2 XDBXC"),
String::from("15 KTJDG, 12 BHXH => 5 XCVML"),
String::from("3 BHXH, 2 VRPVC => 7 MZWV"),
String::from("121 ORE => 7 VRPVC"),
String::from("7 XCVML => 6 RJRHP"),
String::from("5 BHXH, 4 VRPVC => 5 LTCX"),
];
let reactions = parse_reactions(input.as_slice());
let result = calc_ore(&reactions);
assert_eq!(result, 2210736);
let result = calc_fuel_for_ore(COLLECTED_ORE, &reactions);
assert_eq!(result, 460664);
}
}
| test_parse | identifier_name |
main.rs | use std::collections::HashMap;
use std::fs::File;
use std::io::{BufRead, BufReader};
const COLLECTED_ORE: u64 = 1000000000000;
#[derive(Debug, Eq, PartialEq)]
struct Reaction {
output: (String, u64),
ingredients: Vec<(String, u64)>,
}
type ReactionMap = HashMap<String, Reaction>;
fn calc_ore(reactions: &ReactionMap) -> u64 {
calc_ore_for_fuel(1, reactions)
}
fn calc_ore_for_fuel(fuel: u64, reactions: &ReactionMap) -> u64 {
let mut ore = 0;
let mut spare_chemicals = HashMap::new();
let mut requirements = Vec::new();
requirements.push((String::from("FUEL"), fuel));
let ore_name = String::from("ORE");
while !requirements.is_empty() {
let cur_requirements = requirements.clone();
requirements.clear();
for (req_chem, req_amount) in cur_requirements {
// Check whether we have any spare of this ingredient from
// other reactions.
let mut adj_req_amount = req_amount;
if let Some(spare) = spare_chemicals.get_mut(&req_chem) {
if *spare >= req_amount {
// We have enough spare to completely fulfill this
// requirement, no need to go further.
*spare -= req_amount;
continue;
} else {
// Reduce the required amount by the amount we have
// spare;
adj_req_amount = req_amount - *spare;
*spare = 0;
}
}
// Find the reaction that produces this ingredient.
let reaction = reactions
.get(&req_chem)
.expect(format!("Couldn't find reaction for {}", req_chem).as_ref());
// Find out how many times we need to run this reaction,
// and how much will be spare.
let output_amount = reaction.output.1;
let reaction_count = (adj_req_amount - 1) / output_amount + 1;
let spare = output_amount * reaction_count - adj_req_amount;
// Update the spare count for this ingredient.
if let Some(existing_spare) = spare_chemicals.get_mut(&req_chem) {
*existing_spare += spare;
} else {
spare_chemicals.insert(req_chem, spare);
}
// Update the required ingredients list with the ingredients
// needed to make this chemical.
for ingredient in reaction.ingredients.clone() {
let ingredient_name = ingredient.0;
let ingredient_count = reaction_count * ingredient.1;
if ingredient_name == ore_name {
ore += ingredient_count;
} else {
requirements.push((ingredient_name, ingredient_count));
}
}
}
}
ore
}
fn calc_fuel_for_ore(ore: u64, reactions: &ReactionMap) -> u64 {
let mut lower = 1;
let mut current;
let mut upper = 1;
// Find an upper bound to use for binary search.
loop {
let used_ore = calc_ore_for_fuel(upper, reactions);
if used_ore < ore {
upper *= 2;
} else {
break;
}
}
// Binary search to find the highest amount of fuel we can
// produce without using all the ore.
loop {
current = (upper - lower) / 2 + lower;
let used_ore = calc_ore_for_fuel(current, reactions);
if used_ore < ore {
lower = current;
} else {
upper = current;
}
if upper - 1 == lower {
return lower;
}
}
}
fn parse_chemical(chemical: &str) -> (String, u64) {
let mut iter = chemical.split_whitespace();
let count = iter.next().unwrap().parse::<u64>().unwrap();
let chem = iter.next().unwrap();
(String::from(chem), count)
}
fn parse_reactions(strs: &[String]) -> ReactionMap {
let mut reactions = HashMap::new();
for reaction in strs {
let mut iter = reaction.split(" => ");
let ingredients_str = iter.next().unwrap();
let output_str = iter.next().unwrap();
let mut ingredients = Vec::new();
for ingredient in ingredients_str.split(", ") {
ingredients.push(parse_chemical(ingredient));
}
let output = parse_chemical(output_str);
reactions.insert(
output.0.clone(),
Reaction {
output: output,
ingredients: ingredients,
},
); | }
fn parse_input(filename: &str) -> ReactionMap {
let file = File::open(filename).expect("Failed to open file");
let reader = BufReader::new(file);
let reactions: Vec<String> = reader
.lines()
.map(|l| l.expect("Failed to read line"))
.map(|l| String::from(l.trim()))
.collect();
parse_reactions(reactions.as_slice())
}
fn main() {
let reactions = parse_input("input");
// Part 1
let ore = calc_ore(&reactions);
println!("Require {} ore for 1 fuel", ore);
// Part 2
let fuel = calc_fuel_for_ore(COLLECTED_ORE, &reactions);
println!("Produce {} fuel from {} ore", fuel, COLLECTED_ORE);
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse() {
let input = vec![String::from("7 A, 1 E => 1 FUEL")];
let reactions = parse_reactions(input.as_slice());
let result = reactions.get(&String::from("FUEL"));
assert!(result.is_some());
let reaction = result.unwrap();
assert_eq!(
*reaction,
Reaction {
output: (String::from("FUEL"), 1),
ingredients: vec![(String::from("A"), 7), (String::from("E"), 1),],
},
);
}
#[test]
fn example1() {
let input = vec![
String::from("10 ORE => 10 A"),
String::from("1 ORE => 1 B"),
String::from("7 A, 1 B => 1 C"),
String::from("7 A, 1 C => 1 D"),
String::from("7 A, 1 D => 1 E"),
String::from("7 A, 1 E => 1 FUEL"),
];
let reactions = parse_reactions(input.as_slice());
let result = calc_ore(&reactions);
assert_eq!(result, 31);
}
#[test]
fn example2() {
let input = vec![
String::from("9 ORE => 2 A"),
String::from("8 ORE => 3 B"),
String::from("7 ORE => 5 C"),
String::from("3 A, 4 B => 1 AB"),
String::from("5 B, 7 C => 1 BC"),
String::from("4 C, 1 A => 1 CA"),
String::from("2 AB, 3 BC, 4 CA => 1 FUEL"),
];
let reactions = parse_reactions(input.as_slice());
let result = calc_ore(&reactions);
assert_eq!(result, 165);
}
#[test]
fn example3() {
let input = vec![
String::from("157 ORE => 5 NZVS"),
String::from("165 ORE => 6 DCFZ"),
String::from("44 XJWVT, 5 KHKGT, 1 QDVJ, 29 NZVS, 9 GPVTF, 48 HKGWZ => 1 FUEL"),
String::from("12 HKGWZ, 1 GPVTF, 8 PSHF => 9 QDVJ"),
String::from("179 ORE => 7 PSHF"),
String::from("177 ORE => 5 HKGWZ"),
String::from("7 DCFZ, 7 PSHF => 2 XJWVT"),
String::from("165 ORE => 2 GPVTF"),
String::from("3 DCFZ, 7 NZVS, 5 HKGWZ, 10 PSHF => 8 KHKGT"),
];
let reactions = parse_reactions(input.as_slice());
let result = calc_ore(&reactions);
assert_eq!(result, 13312);
let result = calc_fuel_for_ore(COLLECTED_ORE, &reactions);
assert_eq!(result, 82892753);
}
#[test]
fn example4() {
let input = vec![
String::from("2 VPVL, 7 FWMGM, 2 CXFTF, 11 MNCFX => 1 STKFG"),
String::from("17 NVRVD, 3 JNWZP => 8 VPVL"),
String::from("53 STKFG, 6 MNCFX, 46 VJHF, 81 HVMC, 68 CXFTF, 25 GNMV => 1 FUEL"),
String::from("22 VJHF, 37 MNCFX => 5 FWMGM"),
String::from("139 ORE => 4 NVRVD"),
String::from("144 ORE => 7 JNWZP"),
String::from("5 MNCFX, 7 RFSQX, 2 FWMGM, 2 VPVL, 19 CXFTF => 3 HVMC"),
String::from("5 VJHF, 7 MNCFX, 9 VPVL, 37 CXFTF => 6 GNMV"),
String::from("145 ORE => 6 MNCFX"),
String::from("1 NVRVD => 8 CXFTF"),
String::from("1 VJHF, 6 MNCFX => 4 RFSQX"),
String::from("176 ORE => 6 VJHF"),
];
let reactions = parse_reactions(input.as_slice());
let result = calc_ore(&reactions);
assert_eq!(result, 180697);
let result = calc_fuel_for_ore(COLLECTED_ORE, &reactions);
assert_eq!(result, 5586022);
}
#[test]
fn example5() {
let input = vec![
String::from("171 ORE => 8 CNZTR"),
String::from("7 ZLQW, 3 BMBT, 9 XCVML, 26 XMNCP, 1 WPTQ, 2 MZWV, 1 RJRHP => 4 PLWSL"),
String::from("114 ORE => 4 BHXH"),
String::from("14 VRPVC => 6 BMBT"),
String::from("6 BHXH, 18 KTJDG, 12 WPTQ, 7 PLWSL, 31 FHTLT, 37 ZDVW => 1 FUEL"),
String::from("6 WPTQ, 2 BMBT, 8 ZLQW, 18 KTJDG, 1 XMNCP, 6 MZWV, 1 RJRHP => 6 FHTLT"),
String::from("15 XDBXC, 2 LTCX, 1 VRPVC => 6 ZLQW"),
String::from("13 WPTQ, 10 LTCX, 3 RJRHP, 14 XMNCP, 2 MZWV, 1 ZLQW => 1 ZDVW"),
String::from("5 BMBT => 4 WPTQ"),
String::from("189 ORE => 9 KTJDG"),
String::from("1 MZWV, 17 XDBXC, 3 XCVML => 2 XMNCP"),
String::from("12 VRPVC, 27 CNZTR => 2 XDBXC"),
String::from("15 KTJDG, 12 BHXH => 5 XCVML"),
String::from("3 BHXH, 2 VRPVC => 7 MZWV"),
String::from("121 ORE => 7 VRPVC"),
String::from("7 XCVML => 6 RJRHP"),
String::from("5 BHXH, 4 VRPVC => 5 LTCX"),
];
let reactions = parse_reactions(input.as_slice());
let result = calc_ore(&reactions);
assert_eq!(result, 2210736);
let result = calc_fuel_for_ore(COLLECTED_ORE, &reactions);
assert_eq!(result, 460664);
}
} | }
reactions | random_line_split |
main.rs | use std::collections::HashMap;
use std::fs::File;
use std::io::{BufRead, BufReader};
const COLLECTED_ORE: u64 = 1000000000000;
#[derive(Debug, Eq, PartialEq)]
struct Reaction {
output: (String, u64),
ingredients: Vec<(String, u64)>,
}
type ReactionMap = HashMap<String, Reaction>;
fn calc_ore(reactions: &ReactionMap) -> u64 {
calc_ore_for_fuel(1, reactions)
}
fn calc_ore_for_fuel(fuel: u64, reactions: &ReactionMap) -> u64 {
let mut ore = 0;
let mut spare_chemicals = HashMap::new();
let mut requirements = Vec::new();
requirements.push((String::from("FUEL"), fuel));
let ore_name = String::from("ORE");
while !requirements.is_empty() {
let cur_requirements = requirements.clone();
requirements.clear();
for (req_chem, req_amount) in cur_requirements {
// Check whether we have any spare of this ingredient from
// other reactions.
let mut adj_req_amount = req_amount;
if let Some(spare) = spare_chemicals.get_mut(&req_chem) {
if *spare >= req_amount {
// We have enough spare to completely fulfill this
// requirement, no need to go further.
*spare -= req_amount;
continue;
} else {
// Reduce the required amount by the amount we have
// spare;
adj_req_amount = req_amount - *spare;
*spare = 0;
}
}
// Find the reaction that produces this ingredient.
let reaction = reactions
.get(&req_chem)
.expect(format!("Couldn't find reaction for {}", req_chem).as_ref());
// Find out how many times we need to run this reaction,
// and how much will be spare.
let output_amount = reaction.output.1;
let reaction_count = (adj_req_amount - 1) / output_amount + 1;
let spare = output_amount * reaction_count - adj_req_amount;
// Update the spare count for this ingredient.
if let Some(existing_spare) = spare_chemicals.get_mut(&req_chem) {
*existing_spare += spare;
} else {
spare_chemicals.insert(req_chem, spare);
}
// Update the required ingredients list with the ingredients
// needed to make this chemical.
for ingredient in reaction.ingredients.clone() {
let ingredient_name = ingredient.0;
let ingredient_count = reaction_count * ingredient.1;
if ingredient_name == ore_name {
ore += ingredient_count;
} else {
requirements.push((ingredient_name, ingredient_count));
}
}
}
}
ore
}
fn calc_fuel_for_ore(ore: u64, reactions: &ReactionMap) -> u64 {
let mut lower = 1;
let mut current;
let mut upper = 1;
// Find an upper bound to use for binary search.
loop {
let used_ore = calc_ore_for_fuel(upper, reactions);
if used_ore < ore {
upper *= 2;
} else |
}
// Binary search to find the highest amount of fuel we can
// produce without using all the ore.
loop {
current = (upper - lower) / 2 + lower;
let used_ore = calc_ore_for_fuel(current, reactions);
if used_ore < ore {
lower = current;
} else {
upper = current;
}
if upper - 1 == lower {
return lower;
}
}
}
fn parse_chemical(chemical: &str) -> (String, u64) {
let mut iter = chemical.split_whitespace();
let count = iter.next().unwrap().parse::<u64>().unwrap();
let chem = iter.next().unwrap();
(String::from(chem), count)
}
fn parse_reactions(strs: &[String]) -> ReactionMap {
let mut reactions = HashMap::new();
for reaction in strs {
let mut iter = reaction.split(" => ");
let ingredients_str = iter.next().unwrap();
let output_str = iter.next().unwrap();
let mut ingredients = Vec::new();
for ingredient in ingredients_str.split(", ") {
ingredients.push(parse_chemical(ingredient));
}
let output = parse_chemical(output_str);
reactions.insert(
output.0.clone(),
Reaction {
output: output,
ingredients: ingredients,
},
);
}
reactions
}
fn parse_input(filename: &str) -> ReactionMap {
let file = File::open(filename).expect("Failed to open file");
let reader = BufReader::new(file);
let reactions: Vec<String> = reader
.lines()
.map(|l| l.expect("Failed to read line"))
.map(|l| String::from(l.trim()))
.collect();
parse_reactions(reactions.as_slice())
}
fn main() {
let reactions = parse_input("input");
// Part 1
let ore = calc_ore(&reactions);
println!("Require {} ore for 1 fuel", ore);
// Part 2
let fuel = calc_fuel_for_ore(COLLECTED_ORE, &reactions);
println!("Produce {} fuel from {} ore", fuel, COLLECTED_ORE);
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse() {
let input = vec![String::from("7 A, 1 E => 1 FUEL")];
let reactions = parse_reactions(input.as_slice());
let result = reactions.get(&String::from("FUEL"));
assert!(result.is_some());
let reaction = result.unwrap();
assert_eq!(
*reaction,
Reaction {
output: (String::from("FUEL"), 1),
ingredients: vec![(String::from("A"), 7), (String::from("E"), 1),],
},
);
}
#[test]
fn example1() {
let input = vec![
String::from("10 ORE => 10 A"),
String::from("1 ORE => 1 B"),
String::from("7 A, 1 B => 1 C"),
String::from("7 A, 1 C => 1 D"),
String::from("7 A, 1 D => 1 E"),
String::from("7 A, 1 E => 1 FUEL"),
];
let reactions = parse_reactions(input.as_slice());
let result = calc_ore(&reactions);
assert_eq!(result, 31);
}
#[test]
fn example2() {
let input = vec![
String::from("9 ORE => 2 A"),
String::from("8 ORE => 3 B"),
String::from("7 ORE => 5 C"),
String::from("3 A, 4 B => 1 AB"),
String::from("5 B, 7 C => 1 BC"),
String::from("4 C, 1 A => 1 CA"),
String::from("2 AB, 3 BC, 4 CA => 1 FUEL"),
];
let reactions = parse_reactions(input.as_slice());
let result = calc_ore(&reactions);
assert_eq!(result, 165);
}
#[test]
fn example3() {
let input = vec![
String::from("157 ORE => 5 NZVS"),
String::from("165 ORE => 6 DCFZ"),
String::from("44 XJWVT, 5 KHKGT, 1 QDVJ, 29 NZVS, 9 GPVTF, 48 HKGWZ => 1 FUEL"),
String::from("12 HKGWZ, 1 GPVTF, 8 PSHF => 9 QDVJ"),
String::from("179 ORE => 7 PSHF"),
String::from("177 ORE => 5 HKGWZ"),
String::from("7 DCFZ, 7 PSHF => 2 XJWVT"),
String::from("165 ORE => 2 GPVTF"),
String::from("3 DCFZ, 7 NZVS, 5 HKGWZ, 10 PSHF => 8 KHKGT"),
];
let reactions = parse_reactions(input.as_slice());
let result = calc_ore(&reactions);
assert_eq!(result, 13312);
let result = calc_fuel_for_ore(COLLECTED_ORE, &reactions);
assert_eq!(result, 82892753);
}
#[test]
fn example4() {
let input = vec![
String::from("2 VPVL, 7 FWMGM, 2 CXFTF, 11 MNCFX => 1 STKFG"),
String::from("17 NVRVD, 3 JNWZP => 8 VPVL"),
String::from("53 STKFG, 6 MNCFX, 46 VJHF, 81 HVMC, 68 CXFTF, 25 GNMV => 1 FUEL"),
String::from("22 VJHF, 37 MNCFX => 5 FWMGM"),
String::from("139 ORE => 4 NVRVD"),
String::from("144 ORE => 7 JNWZP"),
String::from("5 MNCFX, 7 RFSQX, 2 FWMGM, 2 VPVL, 19 CXFTF => 3 HVMC"),
String::from("5 VJHF, 7 MNCFX, 9 VPVL, 37 CXFTF => 6 GNMV"),
String::from("145 ORE => 6 MNCFX"),
String::from("1 NVRVD => 8 CXFTF"),
String::from("1 VJHF, 6 MNCFX => 4 RFSQX"),
String::from("176 ORE => 6 VJHF"),
];
let reactions = parse_reactions(input.as_slice());
let result = calc_ore(&reactions);
assert_eq!(result, 180697);
let result = calc_fuel_for_ore(COLLECTED_ORE, &reactions);
assert_eq!(result, 5586022);
}
#[test]
fn example5() {
let input = vec![
String::from("171 ORE => 8 CNZTR"),
String::from("7 ZLQW, 3 BMBT, 9 XCVML, 26 XMNCP, 1 WPTQ, 2 MZWV, 1 RJRHP => 4 PLWSL"),
String::from("114 ORE => 4 BHXH"),
String::from("14 VRPVC => 6 BMBT"),
String::from("6 BHXH, 18 KTJDG, 12 WPTQ, 7 PLWSL, 31 FHTLT, 37 ZDVW => 1 FUEL"),
String::from("6 WPTQ, 2 BMBT, 8 ZLQW, 18 KTJDG, 1 XMNCP, 6 MZWV, 1 RJRHP => 6 FHTLT"),
String::from("15 XDBXC, 2 LTCX, 1 VRPVC => 6 ZLQW"),
String::from("13 WPTQ, 10 LTCX, 3 RJRHP, 14 XMNCP, 2 MZWV, 1 ZLQW => 1 ZDVW"),
String::from("5 BMBT => 4 WPTQ"),
String::from("189 ORE => 9 KTJDG"),
String::from("1 MZWV, 17 XDBXC, 3 XCVML => 2 XMNCP"),
String::from("12 VRPVC, 27 CNZTR => 2 XDBXC"),
String::from("15 KTJDG, 12 BHXH => 5 XCVML"),
String::from("3 BHXH, 2 VRPVC => 7 MZWV"),
String::from("121 ORE => 7 VRPVC"),
String::from("7 XCVML => 6 RJRHP"),
String::from("5 BHXH, 4 VRPVC => 5 LTCX"),
];
let reactions = parse_reactions(input.as_slice());
let result = calc_ore(&reactions);
assert_eq!(result, 2210736);
let result = calc_fuel_for_ore(COLLECTED_ORE, &reactions);
assert_eq!(result, 460664);
}
}
| {
break;
} | conditional_block |
$time.js | /**
* $time.js
* @require loot
*/
(function() {
// date/time -------------------------------------------------------
function $now() {
return new Date().getTime();
}
/* $timeAgo
/*
* Javascript Humane Dates
* Copyright (c) 2008 Dean Landolt (deanlandolt.com)
* Re-write by Zach Leatherman (zachleat.com)
* RE-RE-write by andrew luetgers
* to accept timestamps and remove init work from each call
*
* Adopted from the John Resig's pretty.js
* at http://ejohn.org/blog/javascript-pretty-date
* and henrah's proposed modification
* at http://ejohn.org/blog/javascript-pretty-date/#comment-297458
*
* Licensed under the MIT license.
*/
var $timeAgo = (function() {
var minusRe = /-/g,
tzRe = /[TZ]/g,
margin = 0.1;
function getFormats(lang) {
return [
[60, lang.now],
[3600, lang.minute, lang.minutes, 60], // 60 minutes, 1 minute
[86400, lang.hour, lang.hours, 3600], // 24 hours, 1 hour
[604800, lang.day, lang.days, 86400], // 7 days, 1 day
[2628000, lang.week, lang.weeks, 604800], // ~1 month, 1 week
[31536000, lang.month, lang.months, 2628000], // 1 year, ~1 month
[Infinity, lang.year, lang.years, 31536000] // Infinity, 1 year
];
}
/*
* 0 seconds && < 60 seconds Now
* 60 seconds 1 Minute
* > 60 seconds && < 60 minutes X Minutes
* 60 minutes 1 Hour
* > 60 minutes && < 24 hours X Hours
* 24 hours 1 Day
* > 24 hours && < 7 days X Days
* 7 days 1 Week
* > 7 days && < ~ 1 Month X Weeks
* ~ 1 Month 1 Month
* > ~ 1 Month && < 1 Year X Months
* 1 Year 1 Year
* > 1 Year X Years
*
* Single units are +10%. 1 Year shows first at 1 Year + 10%
*/
function normalize(val, single) {
if(val >= single && val <= single * (1+margin)) {
return single;
}
return val;
}
function normalizeDateInput(date) {
switch (typeof date) {
case "string":
date = new Date(('' + date).replace(minusRe, "/").replace(tzRe, " "));
break;
case "number":
date = new Date(date);
break;
}
return date;
}
var timeAgo = function(date, compareTo, langCode) {
date = normalizeDateInput(date || $now());
compareTo = normalizeDateInput(compareTo || new Date);
langCode = langCode || this.defaultLang;
var lang = this.formats[langCode];
var token,
isString = (typeof date === "string"),
seconds = (compareTo - date +
(compareTo.getTimezoneOffset() -
// if we received a GMT time from a string, doesn't include time zone bias
// if we got a date object, the time zone is built in, we need to remove it.
(isString ? 0 : date.getTimezoneOffset())
) * 60000
) / 1000;
if (seconds < 0) {
seconds = Math.abs(seconds);
token = '';
} else {
token = ' ' + lang.ago;
}
for(var i = 0, format = formats[0]; formats[i]; format = formats[++i]) {
if(seconds < format[0]) {
if(i === 0) {
// Now
return format[1];
}
var val = Math.ceil(normalize(seconds, format[3]) / (format[3]));
return val +
' ' +
(val != 1 ? format[2] : format[1]) +
(i > 0 ? token : '');
}
}
};
timeAgo.lang = {};
timeAgo.formats = {};
timeAgo.setLang = function(code, newLang) {
this.defaultLang = code;
this.lang[code] = newLang;
this.formats[code] = getFormats(newLang);
};
timeAgo.setLang("en", {
ago: 'Ago',
now: 'Just Now',
minute: 'Minute',
minutes: 'Minutes',
hour: 'Hour',
hours: 'Hours',
day: 'Day',
days: 'Days',
week: 'Week',
weeks: 'Weeks',
month: 'Month',
months: 'Months',
year: 'Year',
years: 'Years'
});
return timeAgo;
}());
var $timer = (function() {
var epoch = new Date(1970, 1, 1, 0, 0, 0, 0).valueOf();
var timerApi = {
parent: null,
interval: null,
started: 0,
elapsed: 0,
start: function() {
var that = this;
this.started = $now();
this.interval = setInterval(function() {
that.update();
}, 1000);
},
stop: function() {
clearInterval(this.interval);
this.reset();
},
pause: function() {
clearInterval(this.interval);
},
reset: function() {
this.started = $now();
this.update();
},
update: function() {
this.elapsed = $now() - this.started;
this.parent.innerHTML = this.format(this.elapsed + $now() - this.started);
},
format: function(ms) {
// console.log(ms, $now() - ms, new Date(ms - $now()).toString());
var d = new Date(ms + epoch).toString().replace(/.*(\d{2}:\d{2}:\d{2}).*/, '$1');
var x = (ms % 1000) + "";
while (x.length < 3) {
x = "0" + x;
}
d += "." + x;
return d.substr(0, d.length - 4);
}
};
return function(parent) {
var timer = $new(timerApi);
timer.parent = parent;
return timer;
}
}());
/*
* Date Format 1.2.3
* (c) 2007-2009 Steven Levithan <stevenlevithan.com>
* MIT license
*
* Includes enhancements by Scott Trenda <scott.trenda.net>
* and Kris Kowal <cixar.com/~kris.kowal/>
*
* Accepts a date, a mask, or a date and a mask.
* Returns a formatted version of the given date.
* The date defaults to the current date/time.
* The mask defaults to dateFormat.masks.default.
* see http://blog.stevenlevithan.com/archives/date-time-format
*/
/* Mask Description
d Day of the month as digits; no leading zero for single-digit days.
dd Day of the month as digits; leading zero for single-digit days.
ddd Day of the week as a three-letter abbreviation.
dddd Day of the week as its full name.
m Month as digits; no leading zero for single-digit months.
mm Month as digits; leading zero for single-digit months.
mmm Month as a three-letter abbreviation.
mmmm Month as its full name.
yy Year as last two digits; leading zero for years less than 10.
yyyy Year represented by four digits.
h Hours; no leading zero for single-digit hours (12-hour clock).
hh Hours; leading zero for single-digit hours (12-hour clock).
H Hours; no leading zero for single-digit hours (24-hour clock).
HH Hours; leading zero for single-digit hours (24-hour clock).
M Minutes; no leading zero for single-digit minutes.
MM Minutes; leading zero for single-digit minutes.
s Seconds; no leading zero for single-digit seconds.
ss Seconds; leading zero for single-digit seconds.
l or L Milliseconds. l gives 3 digits. L gives 2 digits.
t Lowercase, single-character time marker string: a or p.
tt Lowercase, two-character time marker string: am or pm.
T Uppercase, single-character time marker string: A or P.
TT Uppercase, two-character time marker string: AM or PM.
Z US timezone abbreviation, e.g. EST or MDT. With non-US timezones or in the Opera browser, the GMT/UTC offset is returned, e.g. GMT-0500
o GMT/UTC timezone offset, e.g. -0500 or +0230.
S The date's ordinal suffix (st, nd, rd, or th). Works well with d.
'…' or "…" Literal character sequence. Surrounding quotes are removed.
UTC: Must be the first four characters of the mask. Converts the date from local time to UTC/GMT/Zulu time before applying the mask. The "UTC:" prefix is removed.
*/
var $dateFormat = (function () {
var token = /d{1,4}|m{1,4}|yy(?:yy)?|([HhMsTt])\1?|[LloSZ]|"[^"]*"|'[^']*'/g,
timezone = /\b(?:[PMCEA][SDP]T|(?:Pacific|Mountain|Central|Eastern|Atlantic) (?:Standard|Daylight|Prevailing) Time|(?:GMT|UTC)(?:[-+]\d{4})?)\b/g,
timezoneClip = /[^-+\dA-Z]/g,
pad = function(val, len) {
val = String(val);
len = len || 2;
while (val.length < len) val = "0" + val;
return val;
};
// Regexes and supporting functions are cached through closure
return function(date, mask, utc, langCode) {
if (!date) {
return date + "";
}
var dF = $dateFormat;
langCode = langCode || dF.defaultLang;
var lang = dF.lang[langCode];
// You can't provide utc if you skip other args (use the "UTC:" mask prefix)
if (arguments.length == 1 && Object.prototype.toString.call(date) == "[object String]" && !/\d/.test(date)) {
mask = date;
date = undefined;
}
// Passing date through Date applies Date.parse, if necessary
date = date ? new Date(date) : new Date;
if (!$isDate(date)) throw SyntaxError("invalid date");
mask = String(dF.masks[mask] || mask || dF.masks["default"]);
// Allow setting the utc argument via the mask
if (mask.slice(0, 4) == "UTC:") {
mask = mask.slice(4);
utc = true;
}
var _ = utc ? "getUTC" : "get",
d = date[_ + "Date"](),
D = date[_ + "Day"](),
m = date[_ + "Month"](),
y = date[_ + "FullYear"](),
H = date[_ + "Hours"](),
M = date[_ + "Minutes"](),
s = date[_ + "Seconds"](),
L = date[_ + "Milliseconds"](),
o = utc ? 0 : date.getTimezoneOffset(),
flags = {
d: d,
dd: pad(d),
ddd: lang.dayNames[D],
dddd: lang.dayNames[D + 7],
m: m + 1,
mm: pad(m + 1),
mmm: lang.monthNames[m],
mmmm: lang.monthNames[m + 12],
yy: String(y).slice(2),
yyyy: y,
h: H % 12 || 12,
hh: pad(H % 12 || 12),
H: H,
HH: pad(H),
M: M,
MM: pad(M),
s: s,
ss: pad(s),
l: pad(L, 3),
L: pad(L > 99 ? Math.round(L / 10) : L),
t: H < 12 ? "a" : "p",
tt: H < 12 ? "am" : "pm",
T: H < 12 ? "A" : "P",
TT: H < 12 ? "AM" : "PM",
Z: utc ? "UTC" : (String(date).match(timezone) || [""]).pop().replace(timezoneClip, ""),
o: (o > 0 ? "-" : "+") + pad(Math.floor(Math.abs(o) / 60) * 100 + Math.abs(o) % 60, 4),
S: ["th", "st", "nd", "rd"][d % 10 > 3 ? 0 : (d % 100 - d % 10 != 10) * d % 10]
};
return mask.replace(token, function ($0) {
return $0 in flags ? flags[$0] : $0.slice(1, $0.length - 1);
});
};
}());
// Some common format strings
$dateFormat.masks = {
"default": "ddd mmm dd yyyy HH:MM:ss",
shortDate: "m/d/yy",
mediumDate: "mmm d, yyyy",
longDate: "mmmm d, yyyy",
fullDate: "dddd, mmmm d, yyyy",
shortTime: "h:MM TT",
mediumTime: "h:MM:ss TT",
longTime: "h:MM:ss TT Z",
isoDate: "yyyy-mm-dd",
isoTime: "HH:MM:ss",
isoDateTime: "yyyy-mm-dd'T'HH:MM:ss",
isoUtcDateTime: "UTC:yyyy-mm-dd'T'HH:MM:ss'Z'"
};
// Internationalization strings
$dateFormat.defaultLang = "en";
$dateFormat.lang = {
en: {
dayNames: [
"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat",
"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"
],
monthNames: [
"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec",
"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"
]
}
};
function $secondsToTime(_s) {
var h, m, s, ms, pad, f1, f2, f3;
ms = Math.round((parseFloat(_s) % 1)*1000);
s = parseInt(_s, 10);
h = Math.floor( s / ( 60 * 60 ) );
s -= h * ( 60 * 60 );
m = Math.floor( s / 60 );
s -= m * 60;
pad = function(v) {return (v > 9) ? v : "0"+v;};
f1 = $map([h, m], pad).join(":");
f2 = $map([h, m, s], pad).join(":");
// create x hours x minutes string
// if no hours it will be x minutes
// if no hours or minutes will be x seconds
// plurality of units is handled
var hStr = h ? h + " hour" + (h>1 ? "s " : " ") : "",
mStr = (h || m) ? m + " minute" + (m>1 ? "s" : "") : "",
sStr = (!m && s) ? s + " second" + (s>1 ? "s" : "") : "";
f3 = hStr + mStr + sStr;
return {h: h, m: m, s: s, ms: ms, "hh:mm": f1, "hh:mm:ss": f2, formatted: f3};
}
function $millisToTime(ms) {
| oot.extend({
$now: $now,
$date: $dateFormat,
$timeAgo: $timeAgo,
$timer: $timer,
$secondsToTime: $secondsToTime,
$millisToTime: $millisToTime
});
}()); | return $secondsToTime(parseInt(ms, 10)/1000);
}
l | identifier_body |
$time.js | /**
* $time.js
* @require loot
*/
(function() {
// date/time -------------------------------------------------------
function $now() {
return new Date().getTime();
}
/* $timeAgo
/*
* Javascript Humane Dates
* Copyright (c) 2008 Dean Landolt (deanlandolt.com)
* Re-write by Zach Leatherman (zachleat.com)
* RE-RE-write by andrew luetgers
* to accept timestamps and remove init work from each call
*
* Adopted from the John Resig's pretty.js
* at http://ejohn.org/blog/javascript-pretty-date
* and henrah's proposed modification
* at http://ejohn.org/blog/javascript-pretty-date/#comment-297458
*
* Licensed under the MIT license.
*/
var $timeAgo = (function() {
var minusRe = /-/g,
tzRe = /[TZ]/g,
margin = 0.1;
function getFormats(lang) {
return [
[60, lang.now],
[3600, lang.minute, lang.minutes, 60], // 60 minutes, 1 minute
[86400, lang.hour, lang.hours, 3600], // 24 hours, 1 hour
[604800, lang.day, lang.days, 86400], // 7 days, 1 day
[2628000, lang.week, lang.weeks, 604800], // ~1 month, 1 week
[31536000, lang.month, lang.months, 2628000], // 1 year, ~1 month
[Infinity, lang.year, lang.years, 31536000] // Infinity, 1 year
];
}
/*
* 0 seconds && < 60 seconds Now
* 60 seconds 1 Minute
* > 60 seconds && < 60 minutes X Minutes
* 60 minutes 1 Hour
* > 60 minutes && < 24 hours X Hours
* 24 hours 1 Day
* > 24 hours && < 7 days X Days
* 7 days 1 Week
* > 7 days && < ~ 1 Month X Weeks
* ~ 1 Month 1 Month
* > ~ 1 Month && < 1 Year X Months
* 1 Year 1 Year
* > 1 Year X Years
*
* Single units are +10%. 1 Year shows first at 1 Year + 10%
*/
function | (val, single) {
if(val >= single && val <= single * (1+margin)) {
return single;
}
return val;
}
function normalizeDateInput(date) {
switch (typeof date) {
case "string":
date = new Date(('' + date).replace(minusRe, "/").replace(tzRe, " "));
break;
case "number":
date = new Date(date);
break;
}
return date;
}
var timeAgo = function(date, compareTo, langCode) {
date = normalizeDateInput(date || $now());
compareTo = normalizeDateInput(compareTo || new Date);
langCode = langCode || this.defaultLang;
var lang = this.formats[langCode];
var token,
isString = (typeof date === "string"),
seconds = (compareTo - date +
(compareTo.getTimezoneOffset() -
// if we received a GMT time from a string, doesn't include time zone bias
// if we got a date object, the time zone is built in, we need to remove it.
(isString ? 0 : date.getTimezoneOffset())
) * 60000
) / 1000;
if (seconds < 0) {
seconds = Math.abs(seconds);
token = '';
} else {
token = ' ' + lang.ago;
}
for(var i = 0, format = formats[0]; formats[i]; format = formats[++i]) {
if(seconds < format[0]) {
if(i === 0) {
// Now
return format[1];
}
var val = Math.ceil(normalize(seconds, format[3]) / (format[3]));
return val +
' ' +
(val != 1 ? format[2] : format[1]) +
(i > 0 ? token : '');
}
}
};
timeAgo.lang = {};
timeAgo.formats = {};
timeAgo.setLang = function(code, newLang) {
this.defaultLang = code;
this.lang[code] = newLang;
this.formats[code] = getFormats(newLang);
};
timeAgo.setLang("en", {
ago: 'Ago',
now: 'Just Now',
minute: 'Minute',
minutes: 'Minutes',
hour: 'Hour',
hours: 'Hours',
day: 'Day',
days: 'Days',
week: 'Week',
weeks: 'Weeks',
month: 'Month',
months: 'Months',
year: 'Year',
years: 'Years'
});
return timeAgo;
}());
var $timer = (function() {
var epoch = new Date(1970, 1, 1, 0, 0, 0, 0).valueOf();
var timerApi = {
parent: null,
interval: null,
started: 0,
elapsed: 0,
start: function() {
var that = this;
this.started = $now();
this.interval = setInterval(function() {
that.update();
}, 1000);
},
stop: function() {
clearInterval(this.interval);
this.reset();
},
pause: function() {
clearInterval(this.interval);
},
reset: function() {
this.started = $now();
this.update();
},
update: function() {
this.elapsed = $now() - this.started;
this.parent.innerHTML = this.format(this.elapsed + $now() - this.started);
},
format: function(ms) {
// console.log(ms, $now() - ms, new Date(ms - $now()).toString());
var d = new Date(ms + epoch).toString().replace(/.*(\d{2}:\d{2}:\d{2}).*/, '$1');
var x = (ms % 1000) + "";
while (x.length < 3) {
x = "0" + x;
}
d += "." + x;
return d.substr(0, d.length - 4);
}
};
return function(parent) {
var timer = $new(timerApi);
timer.parent = parent;
return timer;
}
}());
/*
* Date Format 1.2.3
* (c) 2007-2009 Steven Levithan <stevenlevithan.com>
* MIT license
*
* Includes enhancements by Scott Trenda <scott.trenda.net>
* and Kris Kowal <cixar.com/~kris.kowal/>
*
* Accepts a date, a mask, or a date and a mask.
* Returns a formatted version of the given date.
* The date defaults to the current date/time.
* The mask defaults to dateFormat.masks.default.
* see http://blog.stevenlevithan.com/archives/date-time-format
*/
/* Mask Description
d Day of the month as digits; no leading zero for single-digit days.
dd Day of the month as digits; leading zero for single-digit days.
ddd Day of the week as a three-letter abbreviation.
dddd Day of the week as its full name.
m Month as digits; no leading zero for single-digit months.
mm Month as digits; leading zero for single-digit months.
mmm Month as a three-letter abbreviation.
mmmm Month as its full name.
yy Year as last two digits; leading zero for years less than 10.
yyyy Year represented by four digits.
h Hours; no leading zero for single-digit hours (12-hour clock).
hh Hours; leading zero for single-digit hours (12-hour clock).
H Hours; no leading zero for single-digit hours (24-hour clock).
HH Hours; leading zero for single-digit hours (24-hour clock).
M Minutes; no leading zero for single-digit minutes.
MM Minutes; leading zero for single-digit minutes.
s Seconds; no leading zero for single-digit seconds.
ss Seconds; leading zero for single-digit seconds.
l or L Milliseconds. l gives 3 digits. L gives 2 digits.
t Lowercase, single-character time marker string: a or p.
tt Lowercase, two-character time marker string: am or pm.
T Uppercase, single-character time marker string: A or P.
TT Uppercase, two-character time marker string: AM or PM.
Z US timezone abbreviation, e.g. EST or MDT. With non-US timezones or in the Opera browser, the GMT/UTC offset is returned, e.g. GMT-0500
o GMT/UTC timezone offset, e.g. -0500 or +0230.
S The date's ordinal suffix (st, nd, rd, or th). Works well with d.
'…' or "…" Literal character sequence. Surrounding quotes are removed.
UTC: Must be the first four characters of the mask. Converts the date from local time to UTC/GMT/Zulu time before applying the mask. The "UTC:" prefix is removed.
*/
var $dateFormat = (function () {
var token = /d{1,4}|m{1,4}|yy(?:yy)?|([HhMsTt])\1?|[LloSZ]|"[^"]*"|'[^']*'/g,
timezone = /\b(?:[PMCEA][SDP]T|(?:Pacific|Mountain|Central|Eastern|Atlantic) (?:Standard|Daylight|Prevailing) Time|(?:GMT|UTC)(?:[-+]\d{4})?)\b/g,
timezoneClip = /[^-+\dA-Z]/g,
pad = function(val, len) {
val = String(val);
len = len || 2;
while (val.length < len) val = "0" + val;
return val;
};
// Regexes and supporting functions are cached through closure
return function(date, mask, utc, langCode) {
if (!date) {
return date + "";
}
var dF = $dateFormat;
langCode = langCode || dF.defaultLang;
var lang = dF.lang[langCode];
// You can't provide utc if you skip other args (use the "UTC:" mask prefix)
if (arguments.length == 1 && Object.prototype.toString.call(date) == "[object String]" && !/\d/.test(date)) {
mask = date;
date = undefined;
}
// Passing date through Date applies Date.parse, if necessary
date = date ? new Date(date) : new Date;
if (!$isDate(date)) throw SyntaxError("invalid date");
mask = String(dF.masks[mask] || mask || dF.masks["default"]);
// Allow setting the utc argument via the mask
if (mask.slice(0, 4) == "UTC:") {
mask = mask.slice(4);
utc = true;
}
var _ = utc ? "getUTC" : "get",
d = date[_ + "Date"](),
D = date[_ + "Day"](),
m = date[_ + "Month"](),
y = date[_ + "FullYear"](),
H = date[_ + "Hours"](),
M = date[_ + "Minutes"](),
s = date[_ + "Seconds"](),
L = date[_ + "Milliseconds"](),
o = utc ? 0 : date.getTimezoneOffset(),
flags = {
d: d,
dd: pad(d),
ddd: lang.dayNames[D],
dddd: lang.dayNames[D + 7],
m: m + 1,
mm: pad(m + 1),
mmm: lang.monthNames[m],
mmmm: lang.monthNames[m + 12],
yy: String(y).slice(2),
yyyy: y,
h: H % 12 || 12,
hh: pad(H % 12 || 12),
H: H,
HH: pad(H),
M: M,
MM: pad(M),
s: s,
ss: pad(s),
l: pad(L, 3),
L: pad(L > 99 ? Math.round(L / 10) : L),
t: H < 12 ? "a" : "p",
tt: H < 12 ? "am" : "pm",
T: H < 12 ? "A" : "P",
TT: H < 12 ? "AM" : "PM",
Z: utc ? "UTC" : (String(date).match(timezone) || [""]).pop().replace(timezoneClip, ""),
o: (o > 0 ? "-" : "+") + pad(Math.floor(Math.abs(o) / 60) * 100 + Math.abs(o) % 60, 4),
S: ["th", "st", "nd", "rd"][d % 10 > 3 ? 0 : (d % 100 - d % 10 != 10) * d % 10]
};
return mask.replace(token, function ($0) {
return $0 in flags ? flags[$0] : $0.slice(1, $0.length - 1);
});
};
}());
// Some common format strings
$dateFormat.masks = {
"default": "ddd mmm dd yyyy HH:MM:ss",
shortDate: "m/d/yy",
mediumDate: "mmm d, yyyy",
longDate: "mmmm d, yyyy",
fullDate: "dddd, mmmm d, yyyy",
shortTime: "h:MM TT",
mediumTime: "h:MM:ss TT",
longTime: "h:MM:ss TT Z",
isoDate: "yyyy-mm-dd",
isoTime: "HH:MM:ss",
isoDateTime: "yyyy-mm-dd'T'HH:MM:ss",
isoUtcDateTime: "UTC:yyyy-mm-dd'T'HH:MM:ss'Z'"
};
// Internationalization strings
$dateFormat.defaultLang = "en";
$dateFormat.lang = {
en: {
dayNames: [
"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat",
"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"
],
monthNames: [
"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec",
"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"
]
}
};
function $secondsToTime(_s) {
var h, m, s, ms, pad, f1, f2, f3;
ms = Math.round((parseFloat(_s) % 1)*1000);
s = parseInt(_s, 10);
h = Math.floor( s / ( 60 * 60 ) );
s -= h * ( 60 * 60 );
m = Math.floor( s / 60 );
s -= m * 60;
pad = function(v) {return (v > 9) ? v : "0"+v;};
f1 = $map([h, m], pad).join(":");
f2 = $map([h, m, s], pad).join(":");
// create x hours x minutes string
// if no hours it will be x minutes
// if no hours or minutes will be x seconds
// plurality of units is handled
var hStr = h ? h + " hour" + (h>1 ? "s " : " ") : "",
mStr = (h || m) ? m + " minute" + (m>1 ? "s" : "") : "",
sStr = (!m && s) ? s + " second" + (s>1 ? "s" : "") : "";
f3 = hStr + mStr + sStr;
return {h: h, m: m, s: s, ms: ms, "hh:mm": f1, "hh:mm:ss": f2, formatted: f3};
}
function $millisToTime(ms) {
return $secondsToTime(parseInt(ms, 10)/1000);
}
loot.extend({
$now: $now,
$date: $dateFormat,
$timeAgo: $timeAgo,
$timer: $timer,
$secondsToTime: $secondsToTime,
$millisToTime: $millisToTime
});
}()); | normalize | identifier_name |
$time.js | /**
* $time.js
* @require loot
*/
(function() {
// date/time -------------------------------------------------------
function $now() {
return new Date().getTime();
}
/* $timeAgo
/*
* Javascript Humane Dates
* Copyright (c) 2008 Dean Landolt (deanlandolt.com)
* Re-write by Zach Leatherman (zachleat.com)
* RE-RE-write by andrew luetgers
* to accept timestamps and remove init work from each call
*
* Adopted from the John Resig's pretty.js
* at http://ejohn.org/blog/javascript-pretty-date
* and henrah's proposed modification
* at http://ejohn.org/blog/javascript-pretty-date/#comment-297458
*
* Licensed under the MIT license.
*/
var $timeAgo = (function() {
var minusRe = /-/g,
tzRe = /[TZ]/g,
margin = 0.1;
function getFormats(lang) {
return [
[60, lang.now],
[3600, lang.minute, lang.minutes, 60], // 60 minutes, 1 minute
[86400, lang.hour, lang.hours, 3600], // 24 hours, 1 hour
[604800, lang.day, lang.days, 86400], // 7 days, 1 day
[2628000, lang.week, lang.weeks, 604800], // ~1 month, 1 week
[31536000, lang.month, lang.months, 2628000], // 1 year, ~1 month
[Infinity, lang.year, lang.years, 31536000] // Infinity, 1 year
];
}
/*
* 0 seconds && < 60 seconds Now
* 60 seconds 1 Minute
* > 60 seconds && < 60 minutes X Minutes
* 60 minutes 1 Hour
* > 60 minutes && < 24 hours X Hours
* 24 hours 1 Day
* > 24 hours && < 7 days X Days
* 7 days 1 Week
* > 7 days && < ~ 1 Month X Weeks
* ~ 1 Month 1 Month
* > ~ 1 Month && < 1 Year X Months
* 1 Year 1 Year
* > 1 Year X Years
*
* Single units are +10%. 1 Year shows first at 1 Year + 10%
*/
function normalize(val, single) {
if(val >= single && val <= single * (1+margin)) {
return single;
}
return val;
}
function normalizeDateInput(date) {
switch (typeof date) {
case "string":
date = new Date(('' + date).replace(minusRe, "/").replace(tzRe, " "));
break;
case "number":
date = new Date(date);
break;
}
return date;
}
var timeAgo = function(date, compareTo, langCode) {
date = normalizeDateInput(date || $now());
compareTo = normalizeDateInput(compareTo || new Date);
langCode = langCode || this.defaultLang;
var lang = this.formats[langCode];
var token,
isString = (typeof date === "string"),
seconds = (compareTo - date +
(compareTo.getTimezoneOffset() -
// if we received a GMT time from a string, doesn't include time zone bias
// if we got a date object, the time zone is built in, we need to remove it.
(isString ? 0 : date.getTimezoneOffset())
) * 60000
) / 1000;
if (seconds < 0) {
seconds = Math.abs(seconds);
token = '';
} else {
token = ' ' + lang.ago;
}
for(var i = 0, format = formats[0]; formats[i]; format = formats[++i]) {
if(seconds < format[0]) {
if(i === 0) {
// Now
return format[1];
}
var val = Math.ceil(normalize(seconds, format[3]) / (format[3]));
return val +
' ' +
(val != 1 ? format[2] : format[1]) +
(i > 0 ? token : '');
}
}
};
timeAgo.lang = {};
timeAgo.formats = {};
timeAgo.setLang = function(code, newLang) {
this.defaultLang = code;
this.lang[code] = newLang;
this.formats[code] = getFormats(newLang);
};
timeAgo.setLang("en", {
ago: 'Ago',
now: 'Just Now',
minute: 'Minute',
minutes: 'Minutes',
hour: 'Hour',
hours: 'Hours',
day: 'Day',
days: 'Days',
week: 'Week',
weeks: 'Weeks',
month: 'Month',
months: 'Months',
year: 'Year',
years: 'Years'
});
return timeAgo;
}());
var $timer = (function() {
var epoch = new Date(1970, 1, 1, 0, 0, 0, 0).valueOf();
var timerApi = {
parent: null,
interval: null,
started: 0,
elapsed: 0,
start: function() {
var that = this;
this.started = $now();
this.interval = setInterval(function() {
that.update();
}, 1000);
},
stop: function() {
clearInterval(this.interval);
this.reset();
},
pause: function() {
clearInterval(this.interval);
},
reset: function() {
this.started = $now();
this.update();
},
update: function() {
this.elapsed = $now() - this.started;
this.parent.innerHTML = this.format(this.elapsed + $now() - this.started);
},
format: function(ms) {
// console.log(ms, $now() - ms, new Date(ms - $now()).toString());
var d = new Date(ms + epoch).toString().replace(/.*(\d{2}:\d{2}:\d{2}).*/, '$1');
var x = (ms % 1000) + "";
while (x.length < 3) {
x = "0" + x;
}
d += "." + x;
return d.substr(0, d.length - 4);
}
};
return function(parent) {
var timer = $new(timerApi);
timer.parent = parent;
return timer;
}
}());
/*
* Date Format 1.2.3
* (c) 2007-2009 Steven Levithan <stevenlevithan.com>
* MIT license
*
* Includes enhancements by Scott Trenda <scott.trenda.net>
* and Kris Kowal <cixar.com/~kris.kowal/>
*
* Accepts a date, a mask, or a date and a mask.
* Returns a formatted version of the given date.
* The date defaults to the current date/time.
* The mask defaults to dateFormat.masks.default.
* see http://blog.stevenlevithan.com/archives/date-time-format
*/
/* Mask Description
d Day of the month as digits; no leading zero for single-digit days.
dd Day of the month as digits; leading zero for single-digit days.
ddd Day of the week as a three-letter abbreviation.
dddd Day of the week as its full name.
m Month as digits; no leading zero for single-digit months.
mm Month as digits; leading zero for single-digit months.
mmm Month as a three-letter abbreviation.
mmmm Month as its full name.
yy Year as last two digits; leading zero for years less than 10.
yyyy Year represented by four digits.
h Hours; no leading zero for single-digit hours (12-hour clock).
hh Hours; leading zero for single-digit hours (12-hour clock).
H Hours; no leading zero for single-digit hours (24-hour clock).
HH Hours; leading zero for single-digit hours (24-hour clock).
M Minutes; no leading zero for single-digit minutes.
MM Minutes; leading zero for single-digit minutes.
s Seconds; no leading zero for single-digit seconds.
ss Seconds; leading zero for single-digit seconds.
l or L Milliseconds. l gives 3 digits. L gives 2 digits.
t Lowercase, single-character time marker string: a or p.
tt Lowercase, two-character time marker string: am or pm.
T Uppercase, single-character time marker string: A or P.
TT Uppercase, two-character time marker string: AM or PM.
Z US timezone abbreviation, e.g. EST or MDT. With non-US timezones or in the Opera browser, the GMT/UTC offset is returned, e.g. GMT-0500
o GMT/UTC timezone offset, e.g. -0500 or +0230.
S The date's ordinal suffix (st, nd, rd, or th). Works well with d.
'…' or "…" Literal character sequence. Surrounding quotes are removed.
UTC: Must be the first four characters of the mask. Converts the date from local time to UTC/GMT/Zulu time before applying the mask. The "UTC:" prefix is removed.
*/
var $dateFormat = (function () {
var token = /d{1,4}|m{1,4}|yy(?:yy)?|([HhMsTt])\1?|[LloSZ]|"[^"]*"|'[^']*'/g,
timezone = /\b(?:[PMCEA][SDP]T|(?:Pacific|Mountain|Central|Eastern|Atlantic) (?:Standard|Daylight|Prevailing) Time|(?:GMT|UTC)(?:[-+]\d{4})?)\b/g,
timezoneClip = /[^-+\dA-Z]/g,
pad = function(val, len) {
val = String(val);
len = len || 2;
while (val.length < len) val = "0" + val;
return val;
};
// Regexes and supporting functions are cached through closure
return function(date, mask, utc, langCode) {
if (!date) {
return date + "";
}
var dF = $dateFormat;
langCode = langCode || dF.defaultLang;
var lang = dF.lang[langCode];
// You can't provide utc if you skip other args (use the "UTC:" mask prefix)
if (arguments.length == 1 && Object.prototype.toString.call(date) == "[object String]" && !/\d/.test(date)) {
mask = date;
date = undefined;
}
// Passing date through Date applies Date.parse, if necessary
date = date ? new Date(date) : new Date;
if (!$isDate(date)) throw SyntaxError("invalid date");
mask = String(dF.masks[mask] || mask || dF.masks["default"]);
// Allow setting the utc argument via the mask
if (mask.slice(0, 4) == "UTC:") {
mask = mask.slice(4);
utc = true;
}
var _ = utc ? "getUTC" : "get",
d = date[_ + "Date"](),
D = date[_ + "Day"](),
m = date[_ + "Month"](),
y = date[_ + "FullYear"](),
H = date[_ + "Hours"](),
M = date[_ + "Minutes"](),
s = date[_ + "Seconds"](),
L = date[_ + "Milliseconds"](),
o = utc ? 0 : date.getTimezoneOffset(),
flags = {
d: d,
dd: pad(d),
ddd: lang.dayNames[D],
dddd: lang.dayNames[D + 7],
m: m + 1,
mm: pad(m + 1),
mmm: lang.monthNames[m],
mmmm: lang.monthNames[m + 12],
yy: String(y).slice(2),
yyyy: y,
h: H % 12 || 12,
hh: pad(H % 12 || 12),
H: H,
HH: pad(H), | l: pad(L, 3),
L: pad(L > 99 ? Math.round(L / 10) : L),
t: H < 12 ? "a" : "p",
tt: H < 12 ? "am" : "pm",
T: H < 12 ? "A" : "P",
TT: H < 12 ? "AM" : "PM",
Z: utc ? "UTC" : (String(date).match(timezone) || [""]).pop().replace(timezoneClip, ""),
o: (o > 0 ? "-" : "+") + pad(Math.floor(Math.abs(o) / 60) * 100 + Math.abs(o) % 60, 4),
S: ["th", "st", "nd", "rd"][d % 10 > 3 ? 0 : (d % 100 - d % 10 != 10) * d % 10]
};
return mask.replace(token, function ($0) {
return $0 in flags ? flags[$0] : $0.slice(1, $0.length - 1);
});
};
}());
// Some common format strings
$dateFormat.masks = {
"default": "ddd mmm dd yyyy HH:MM:ss",
shortDate: "m/d/yy",
mediumDate: "mmm d, yyyy",
longDate: "mmmm d, yyyy",
fullDate: "dddd, mmmm d, yyyy",
shortTime: "h:MM TT",
mediumTime: "h:MM:ss TT",
longTime: "h:MM:ss TT Z",
isoDate: "yyyy-mm-dd",
isoTime: "HH:MM:ss",
isoDateTime: "yyyy-mm-dd'T'HH:MM:ss",
isoUtcDateTime: "UTC:yyyy-mm-dd'T'HH:MM:ss'Z'"
};
// Internationalization strings
$dateFormat.defaultLang = "en";
$dateFormat.lang = {
en: {
dayNames: [
"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat",
"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"
],
monthNames: [
"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec",
"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"
]
}
};
function $secondsToTime(_s) {
var h, m, s, ms, pad, f1, f2, f3;
ms = Math.round((parseFloat(_s) % 1)*1000);
s = parseInt(_s, 10);
h = Math.floor( s / ( 60 * 60 ) );
s -= h * ( 60 * 60 );
m = Math.floor( s / 60 );
s -= m * 60;
pad = function(v) {return (v > 9) ? v : "0"+v;};
f1 = $map([h, m], pad).join(":");
f2 = $map([h, m, s], pad).join(":");
// create x hours x minutes string
// if no hours it will be x minutes
// if no hours or minutes will be x seconds
// plurality of units is handled
var hStr = h ? h + " hour" + (h>1 ? "s " : " ") : "",
mStr = (h || m) ? m + " minute" + (m>1 ? "s" : "") : "",
sStr = (!m && s) ? s + " second" + (s>1 ? "s" : "") : "";
f3 = hStr + mStr + sStr;
return {h: h, m: m, s: s, ms: ms, "hh:mm": f1, "hh:mm:ss": f2, formatted: f3};
}
function $millisToTime(ms) {
return $secondsToTime(parseInt(ms, 10)/1000);
}
loot.extend({
$now: $now,
$date: $dateFormat,
$timeAgo: $timeAgo,
$timer: $timer,
$secondsToTime: $secondsToTime,
$millisToTime: $millisToTime
});
}()); | M: M,
MM: pad(M),
s: s,
ss: pad(s), | random_line_split |
$time.js | /**
* $time.js
* @require loot
*/
(function() {
// date/time -------------------------------------------------------
function $now() {
return new Date().getTime();
}
/* $timeAgo
/*
* Javascript Humane Dates
* Copyright (c) 2008 Dean Landolt (deanlandolt.com)
* Re-write by Zach Leatherman (zachleat.com)
* RE-RE-write by andrew luetgers
* to accept timestamps and remove init work from each call
*
* Adopted from the John Resig's pretty.js
* at http://ejohn.org/blog/javascript-pretty-date
* and henrah's proposed modification
* at http://ejohn.org/blog/javascript-pretty-date/#comment-297458
*
* Licensed under the MIT license.
*/
var $timeAgo = (function() {
var minusRe = /-/g,
tzRe = /[TZ]/g,
margin = 0.1;
function getFormats(lang) {
return [
[60, lang.now],
[3600, lang.minute, lang.minutes, 60], // 60 minutes, 1 minute
[86400, lang.hour, lang.hours, 3600], // 24 hours, 1 hour
[604800, lang.day, lang.days, 86400], // 7 days, 1 day
[2628000, lang.week, lang.weeks, 604800], // ~1 month, 1 week
[31536000, lang.month, lang.months, 2628000], // 1 year, ~1 month
[Infinity, lang.year, lang.years, 31536000] // Infinity, 1 year
];
}
/*
* 0 seconds && < 60 seconds Now
* 60 seconds 1 Minute
* > 60 seconds && < 60 minutes X Minutes
* 60 minutes 1 Hour
* > 60 minutes && < 24 hours X Hours
* 24 hours 1 Day
* > 24 hours && < 7 days X Days
* 7 days 1 Week
* > 7 days && < ~ 1 Month X Weeks
* ~ 1 Month 1 Month
* > ~ 1 Month && < 1 Year X Months
* 1 Year 1 Year
* > 1 Year X Years
*
* Single units are +10%. 1 Year shows first at 1 Year + 10%
*/
function normalize(val, single) {
if(val >= single && val <= single * (1+margin)) {
return single;
}
return val;
}
function normalizeDateInput(date) {
switch (typeof date) {
case "string":
date = new Date(('' + date).replace(minusRe, "/").replace(tzRe, " "));
break;
case "number":
date = new Date(date);
break;
}
return date;
}
var timeAgo = function(date, compareTo, langCode) {
date = normalizeDateInput(date || $now());
compareTo = normalizeDateInput(compareTo || new Date);
langCode = langCode || this.defaultLang;
var lang = this.formats[langCode];
var token,
isString = (typeof date === "string"),
seconds = (compareTo - date +
(compareTo.getTimezoneOffset() -
// if we received a GMT time from a string, doesn't include time zone bias
// if we got a date object, the time zone is built in, we need to remove it.
(isString ? 0 : date.getTimezoneOffset())
) * 60000
) / 1000;
if (seconds < 0) {
seconds = Math.abs(seconds);
token = '';
} else {
token = ' ' + lang.ago;
}
for(var i = 0, format = formats[0]; formats[i]; format = formats[++i]) {
if(seconds < format[0]) {
if(i === 0) |
var val = Math.ceil(normalize(seconds, format[3]) / (format[3]));
return val +
' ' +
(val != 1 ? format[2] : format[1]) +
(i > 0 ? token : '');
}
}
};
timeAgo.lang = {};
timeAgo.formats = {};
timeAgo.setLang = function(code, newLang) {
this.defaultLang = code;
this.lang[code] = newLang;
this.formats[code] = getFormats(newLang);
};
timeAgo.setLang("en", {
ago: 'Ago',
now: 'Just Now',
minute: 'Minute',
minutes: 'Minutes',
hour: 'Hour',
hours: 'Hours',
day: 'Day',
days: 'Days',
week: 'Week',
weeks: 'Weeks',
month: 'Month',
months: 'Months',
year: 'Year',
years: 'Years'
});
return timeAgo;
}());
var $timer = (function() {
var epoch = new Date(1970, 1, 1, 0, 0, 0, 0).valueOf();
var timerApi = {
parent: null,
interval: null,
started: 0,
elapsed: 0,
start: function() {
var that = this;
this.started = $now();
this.interval = setInterval(function() {
that.update();
}, 1000);
},
stop: function() {
clearInterval(this.interval);
this.reset();
},
pause: function() {
clearInterval(this.interval);
},
reset: function() {
this.started = $now();
this.update();
},
update: function() {
this.elapsed = $now() - this.started;
this.parent.innerHTML = this.format(this.elapsed + $now() - this.started);
},
format: function(ms) {
// console.log(ms, $now() - ms, new Date(ms - $now()).toString());
var d = new Date(ms + epoch).toString().replace(/.*(\d{2}:\d{2}:\d{2}).*/, '$1');
var x = (ms % 1000) + "";
while (x.length < 3) {
x = "0" + x;
}
d += "." + x;
return d.substr(0, d.length - 4);
}
};
return function(parent) {
var timer = $new(timerApi);
timer.parent = parent;
return timer;
}
}());
/*
* Date Format 1.2.3
* (c) 2007-2009 Steven Levithan <stevenlevithan.com>
* MIT license
*
* Includes enhancements by Scott Trenda <scott.trenda.net>
* and Kris Kowal <cixar.com/~kris.kowal/>
*
* Accepts a date, a mask, or a date and a mask.
* Returns a formatted version of the given date.
* The date defaults to the current date/time.
* The mask defaults to dateFormat.masks.default.
* see http://blog.stevenlevithan.com/archives/date-time-format
*/
/* Mask Description
d Day of the month as digits; no leading zero for single-digit days.
dd Day of the month as digits; leading zero for single-digit days.
ddd Day of the week as a three-letter abbreviation.
dddd Day of the week as its full name.
m Month as digits; no leading zero for single-digit months.
mm Month as digits; leading zero for single-digit months.
mmm Month as a three-letter abbreviation.
mmmm Month as its full name.
yy Year as last two digits; leading zero for years less than 10.
yyyy Year represented by four digits.
h Hours; no leading zero for single-digit hours (12-hour clock).
hh Hours; leading zero for single-digit hours (12-hour clock).
H Hours; no leading zero for single-digit hours (24-hour clock).
HH Hours; leading zero for single-digit hours (24-hour clock).
M Minutes; no leading zero for single-digit minutes.
MM Minutes; leading zero for single-digit minutes.
s Seconds; no leading zero for single-digit seconds.
ss Seconds; leading zero for single-digit seconds.
l or L Milliseconds. l gives 3 digits. L gives 2 digits.
t Lowercase, single-character time marker string: a or p.
tt Lowercase, two-character time marker string: am or pm.
T Uppercase, single-character time marker string: A or P.
TT Uppercase, two-character time marker string: AM or PM.
Z US timezone abbreviation, e.g. EST or MDT. With non-US timezones or in the Opera browser, the GMT/UTC offset is returned, e.g. GMT-0500
o GMT/UTC timezone offset, e.g. -0500 or +0230.
S The date's ordinal suffix (st, nd, rd, or th). Works well with d.
'…' or "…" Literal character sequence. Surrounding quotes are removed.
UTC: Must be the first four characters of the mask. Converts the date from local time to UTC/GMT/Zulu time before applying the mask. The "UTC:" prefix is removed.
*/
var $dateFormat = (function () {
var token = /d{1,4}|m{1,4}|yy(?:yy)?|([HhMsTt])\1?|[LloSZ]|"[^"]*"|'[^']*'/g,
timezone = /\b(?:[PMCEA][SDP]T|(?:Pacific|Mountain|Central|Eastern|Atlantic) (?:Standard|Daylight|Prevailing) Time|(?:GMT|UTC)(?:[-+]\d{4})?)\b/g,
timezoneClip = /[^-+\dA-Z]/g,
pad = function(val, len) {
val = String(val);
len = len || 2;
while (val.length < len) val = "0" + val;
return val;
};
// Regexes and supporting functions are cached through closure
return function(date, mask, utc, langCode) {
if (!date) {
return date + "";
}
var dF = $dateFormat;
langCode = langCode || dF.defaultLang;
var lang = dF.lang[langCode];
// You can't provide utc if you skip other args (use the "UTC:" mask prefix)
if (arguments.length == 1 && Object.prototype.toString.call(date) == "[object String]" && !/\d/.test(date)) {
mask = date;
date = undefined;
}
// Passing date through Date applies Date.parse, if necessary
date = date ? new Date(date) : new Date;
if (!$isDate(date)) throw SyntaxError("invalid date");
mask = String(dF.masks[mask] || mask || dF.masks["default"]);
// Allow setting the utc argument via the mask
if (mask.slice(0, 4) == "UTC:") {
mask = mask.slice(4);
utc = true;
}
var _ = utc ? "getUTC" : "get",
d = date[_ + "Date"](),
D = date[_ + "Day"](),
m = date[_ + "Month"](),
y = date[_ + "FullYear"](),
H = date[_ + "Hours"](),
M = date[_ + "Minutes"](),
s = date[_ + "Seconds"](),
L = date[_ + "Milliseconds"](),
o = utc ? 0 : date.getTimezoneOffset(),
flags = {
d: d,
dd: pad(d),
ddd: lang.dayNames[D],
dddd: lang.dayNames[D + 7],
m: m + 1,
mm: pad(m + 1),
mmm: lang.monthNames[m],
mmmm: lang.monthNames[m + 12],
yy: String(y).slice(2),
yyyy: y,
h: H % 12 || 12,
hh: pad(H % 12 || 12),
H: H,
HH: pad(H),
M: M,
MM: pad(M),
s: s,
ss: pad(s),
l: pad(L, 3),
L: pad(L > 99 ? Math.round(L / 10) : L),
t: H < 12 ? "a" : "p",
tt: H < 12 ? "am" : "pm",
T: H < 12 ? "A" : "P",
TT: H < 12 ? "AM" : "PM",
Z: utc ? "UTC" : (String(date).match(timezone) || [""]).pop().replace(timezoneClip, ""),
o: (o > 0 ? "-" : "+") + pad(Math.floor(Math.abs(o) / 60) * 100 + Math.abs(o) % 60, 4),
S: ["th", "st", "nd", "rd"][d % 10 > 3 ? 0 : (d % 100 - d % 10 != 10) * d % 10]
};
return mask.replace(token, function ($0) {
return $0 in flags ? flags[$0] : $0.slice(1, $0.length - 1);
});
};
}());
// Some common format strings
$dateFormat.masks = {
"default": "ddd mmm dd yyyy HH:MM:ss",
shortDate: "m/d/yy",
mediumDate: "mmm d, yyyy",
longDate: "mmmm d, yyyy",
fullDate: "dddd, mmmm d, yyyy",
shortTime: "h:MM TT",
mediumTime: "h:MM:ss TT",
longTime: "h:MM:ss TT Z",
isoDate: "yyyy-mm-dd",
isoTime: "HH:MM:ss",
isoDateTime: "yyyy-mm-dd'T'HH:MM:ss",
isoUtcDateTime: "UTC:yyyy-mm-dd'T'HH:MM:ss'Z'"
};
// Internationalization strings
$dateFormat.defaultLang = "en";
$dateFormat.lang = {
en: {
dayNames: [
"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat",
"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"
],
monthNames: [
"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec",
"January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"
]
}
};
function $secondsToTime(_s) {
var h, m, s, ms, pad, f1, f2, f3;
ms = Math.round((parseFloat(_s) % 1)*1000);
s = parseInt(_s, 10);
h = Math.floor( s / ( 60 * 60 ) );
s -= h * ( 60 * 60 );
m = Math.floor( s / 60 );
s -= m * 60;
pad = function(v) {return (v > 9) ? v : "0"+v;};
f1 = $map([h, m], pad).join(":");
f2 = $map([h, m, s], pad).join(":");
// create x hours x minutes string
// if no hours it will be x minutes
// if no hours or minutes will be x seconds
// plurality of units is handled
var hStr = h ? h + " hour" + (h>1 ? "s " : " ") : "",
mStr = (h || m) ? m + " minute" + (m>1 ? "s" : "") : "",
sStr = (!m && s) ? s + " second" + (s>1 ? "s" : "") : "";
f3 = hStr + mStr + sStr;
return {h: h, m: m, s: s, ms: ms, "hh:mm": f1, "hh:mm:ss": f2, formatted: f3};
}
function $millisToTime(ms) {
return $secondsToTime(parseInt(ms, 10)/1000);
}
loot.extend({
$now: $now,
$date: $dateFormat,
$timeAgo: $timeAgo,
$timer: $timer,
$secondsToTime: $secondsToTime,
$millisToTime: $millisToTime
});
}()); | {
// Now
return format[1];
} | conditional_block |
ecvrf.rs | // Copyright (c) The XPeer Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! This module implements an instantiation of a verifiable random function known as
//! [ECVRF-ED25519-SHA512-TAI](https://tools.ietf.org/html/draft-irtf-cfrg-vrf-04).
//!
//! # Examples
//!
//! ```
//! use nextgen_crypto::{traits::Uniform, vrf::ecvrf::*};
//! use rand::{rngs::StdRng, SeedableRng};
//!
//! let message = b"Test message";
//! let mut rng: StdRng = SeedableRng::from_seed([0; 32]);
//! let private_key = VRFPrivateKey::generate_for_testing(&mut rng);
//! let public_key: VRFPublicKey = (&private_key).into();
//! ```
//! **Note**: The above example generates a private key using a private function intended only for
//! testing purposes. Production code should find an alternate means for secure key generation.
//!
//! Produce a proof for a message from a `VRFPrivateKey`, and verify the proof and message
//! using a `VRFPublicKey`:
//!
//! ```
//! # use nextgen_crypto::{traits::Uniform, vrf::ecvrf::*};
//! # use rand::{rngs::StdRng, SeedableRng};
//! # let message = b"Test message";
//! # let mut rng: StdRng = SeedableRng::from_seed([0; 32]);
//! # let private_key = VRFPrivateKey::generate_for_testing(&mut rng);
//! # let public_key: VRFPublicKey = (&private_key).into();
//! let proof = private_key.prove(message);
//! assert!(public_key.verify(&proof, message).is_ok());
//! ```
//!
//! Produce a pseudorandom output from a `Proof`:
//!
//! ```
//! # use nextgen_crypto::{traits::Uniform, vrf::ecvrf::*};
//! # use rand::{rngs::StdRng, SeedableRng};
//! # let message = b"Test message";
//! # let mut rng: StdRng = SeedableRng::from_seed([0; 32]);
//! # let private_key = VRFPrivateKey::generate_for_testing(&mut rng);
//! # let public_key: VRFPublicKey = (&private_key).into();
//! # let proof = private_key.prove(message);
//! let output: Output = (&proof).into();
//! ```
use crate::traits::*;
use core::convert::TryFrom;
use curve25519_dalek::{
constants::ED25519_BASEPOINT_POINT,
edwards::{CompressedEdwardsY, EdwardsPoint},
scalar::Scalar as ed25519_Scalar,
};
use derive_deref::Deref;
use ed25519_dalek::{
self, Digest, PublicKey as ed25519_PublicKey, SecretKey as ed25519_PrivateKey, Sha512,
};
use failure::prelude::*;
use serde::{Deserialize, Serialize};
const SUITE: u8 = 0x03;
const ONE: u8 = 0x01;
const TWO: u8 = 0x02;
const THREE: u8 = 0x03;
/// The number of bytes of [`Output`]
pub const OUTPUT_LENGTH: usize = 64;
/// The number of bytes of [`Proof`]
pub const PROOF_LENGTH: usize = 80;
/// An ECVRF private key
#[derive(Serialize, Deserialize, Deref, Debug)]
pub struct VRFPrivateKey(ed25519_PrivateKey);
/// An ECVRF public key
#[derive(Serialize, Deserialize, Deref, Debug, PartialEq, Eq)]
pub struct VRFPublicKey(ed25519_PublicKey);
/// A longer private key which is slightly optimized for proof generation.
///
/// This is similar in structure to ed25519_dalek::ExpandedSecretKey. It can be produced from
/// a VRFPrivateKey.
pub struct VRFExpandedPrivateKey {
pub(super) key: ed25519_Scalar,
pub(super) nonce: [u8; 32],
}
impl VRFPrivateKey {
/// Produces a proof for an input (using the private key)
pub fn prove(&self, alpha: &[u8]) -> Proof {
VRFExpandedPrivateKey::from(self).prove(&VRFPublicKey((&self.0).into()), alpha)
}
}
impl VRFExpandedPrivateKey {
/// Produces a proof for an input (using the expanded private key)
pub fn prove(&self, pk: &VRFPublicKey, alpha: &[u8]) -> Proof {
let h_point = pk.hash_to_curve(alpha);
let k_scalar =
ed25519_Scalar::from_bytes_mod_order_wide(&nonce_generation_bytes(self.nonce, h_point));
let gamma = h_point * self.key;
let c_scalar = hash_points(&[
h_point,
gamma,
ED25519_BASEPOINT_POINT * k_scalar,
h_point * k_scalar,
]);
Proof {
gamma,
c: c_scalar,
s: k_scalar + c_scalar * self.key,
}
}
}
impl Uniform for VRFPrivateKey {
fn generate_for_testing<R>(rng: &mut R) -> Self
where
R: SeedableCryptoRng,
|
}
impl TryFrom<&[u8]> for VRFPrivateKey {
type Error = CryptoMaterialError;
fn try_from(bytes: &[u8]) -> std::result::Result<VRFPrivateKey, CryptoMaterialError> {
Ok(VRFPrivateKey(
ed25519_PrivateKey::from_bytes(bytes).unwrap(),
))
}
}
impl TryFrom<&[u8]> for VRFPublicKey {
type Error = CryptoMaterialError;
fn try_from(bytes: &[u8]) -> std::result::Result<VRFPublicKey, CryptoMaterialError> {
if bytes.len() != ed25519_dalek::PUBLIC_KEY_LENGTH {
return Err(CryptoMaterialError::WrongLengthError);
}
let mut bits: [u8; 32] = [0u8; 32];
bits.copy_from_slice(&bytes[..32]);
let compressed = curve25519_dalek::edwards::CompressedEdwardsY(bits);
let point = compressed
.decompress()
.ok_or(CryptoMaterialError::DeserializationError)?;
// Check if the point lies on a small subgroup. This is required
// when using curves with a small cofactor (in ed25519, cofactor = 8).
if point.is_small_order() {
return Err(CryptoMaterialError::SmallSubgroupError);
}
Ok(VRFPublicKey(ed25519_PublicKey::from_bytes(bytes).unwrap()))
}
}
impl VRFPublicKey {
/// Given a [`Proof`] and an input, returns whether or not the proof is valid for the input
/// and public key
pub fn verify(&self, proof: &Proof, alpha: &[u8]) -> Result<()> {
let h_point = self.hash_to_curve(alpha);
let pk_point = CompressedEdwardsY::from_slice(self.as_bytes())
.decompress()
.unwrap();
let cprime = hash_points(&[
h_point,
proof.gamma,
ED25519_BASEPOINT_POINT * proof.s - pk_point * proof.c,
h_point * proof.s - proof.gamma * proof.c,
]);
if proof.c == cprime {
Ok(())
} else {
bail!("The proof failed to verify for this public key")
}
}
pub(super) fn hash_to_curve(&self, alpha: &[u8]) -> EdwardsPoint {
let mut result = [0u8; 32];
let mut counter = 0;
let mut wrapped_point: Option<EdwardsPoint> = None;
while wrapped_point.is_none() {
result.copy_from_slice(
&Sha512::new()
.chain(&[SUITE, ONE])
.chain(self.as_bytes())
.chain(&alpha)
.chain(&[counter])
.result()[..32],
);
wrapped_point = CompressedEdwardsY::from_slice(&result).decompress();
counter += 1;
}
wrapped_point.unwrap().mul_by_cofactor()
}
}
impl<'a> From<&'a VRFPrivateKey> for VRFPublicKey {
fn from(private_key: &'a VRFPrivateKey) -> Self {
let secret: &ed25519_PrivateKey = private_key;
let public: ed25519_PublicKey = secret.into();
VRFPublicKey(public)
}
}
impl<'a> From<&'a VRFPrivateKey> for VRFExpandedPrivateKey {
fn from(private_key: &'a VRFPrivateKey) -> Self {
let mut h: Sha512 = Sha512::default();
let mut hash: [u8; 64] = [0u8; 64];
let mut lower: [u8; 32] = [0u8; 32];
let mut upper: [u8; 32] = [0u8; 32];
h.input(private_key.to_bytes());
hash.copy_from_slice(h.result().as_slice());
lower.copy_from_slice(&hash[00..32]);
upper.copy_from_slice(&hash[32..64]);
lower[0] &= 248;
lower[31] &= 63;
lower[31] |= 64;
VRFExpandedPrivateKey {
key: ed25519_Scalar::from_bits(lower),
nonce: upper,
}
}
}
/// A VRF proof that can be used to validate an input with a public key
pub struct Proof {
gamma: EdwardsPoint,
c: ed25519_Scalar,
s: ed25519_Scalar,
}
impl Proof {
/// Produces a new Proof struct from its fields
pub fn new(gamma: EdwardsPoint, c: ed25519_Scalar, s: ed25519_Scalar) -> Proof {
Proof { gamma, c, s }
}
/// Converts a Proof into bytes
pub fn to_bytes(&self) -> [u8; PROOF_LENGTH] {
let mut ret = [0u8; PROOF_LENGTH];
ret[..32].copy_from_slice(&self.gamma.compress().to_bytes()[..]);
ret[32..48].copy_from_slice(&self.c.to_bytes()[..16]);
ret[48..].copy_from_slice(&self.s.to_bytes()[..]);
ret
}
}
impl TryFrom<&[u8]> for Proof {
type Error = CryptoMaterialError;
fn try_from(bytes: &[u8]) -> std::result::Result<Proof, CryptoMaterialError> {
let mut c_buf = [0u8; 32];
c_buf[..16].copy_from_slice(&bytes[32..48]);
let mut s_buf = [0u8; 32];
s_buf.copy_from_slice(&bytes[48..]);
Ok(Proof {
gamma: CompressedEdwardsY::from_slice(&bytes[..32])
.decompress()
.unwrap(),
c: ed25519_Scalar::from_bits(c_buf),
s: ed25519_Scalar::from_bits(s_buf),
})
}
}
/// The ECVRF output produced from the proof
pub struct Output([u8; OUTPUT_LENGTH]);
impl Output {
/// Converts an Output into bytes
#[inline]
pub fn to_bytes(&self) -> [u8; OUTPUT_LENGTH] {
self.0
}
}
impl<'a> From<&'a Proof> for Output {
fn from(proof: &'a Proof) -> Output {
let mut output = [0u8; OUTPUT_LENGTH];
output.copy_from_slice(
&Sha512::new()
.chain(&[SUITE, THREE])
.chain(&proof.gamma.mul_by_cofactor().compress().to_bytes()[..])
.result()[..],
);
Output(output)
}
}
pub(super) fn nonce_generation_bytes(nonce: [u8; 32], h_point: EdwardsPoint) -> [u8; 64] {
let mut k_buf = [0u8; 64];
k_buf.copy_from_slice(
&Sha512::new()
.chain(nonce)
.chain(h_point.compress().as_bytes())
.result()[..],
);
k_buf
}
pub(super) fn hash_points(points: &[EdwardsPoint]) -> ed25519_Scalar {
let mut result = [0u8; 32];
let mut hash = Sha512::new().chain(&[SUITE, TWO]);
for point in points.iter() {
hash = hash.chain(point.compress().to_bytes());
}
result[..16].copy_from_slice(&hash.result()[..16]);
ed25519_Scalar::from_bits(result)
}
| {
VRFPrivateKey(ed25519_PrivateKey::generate(rng))
} | identifier_body |
ecvrf.rs | // Copyright (c) The XPeer Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! This module implements an instantiation of a verifiable random function known as
//! [ECVRF-ED25519-SHA512-TAI](https://tools.ietf.org/html/draft-irtf-cfrg-vrf-04).
//!
//! # Examples
//!
//! ```
//! use nextgen_crypto::{traits::Uniform, vrf::ecvrf::*};
//! use rand::{rngs::StdRng, SeedableRng};
//!
//! let message = b"Test message";
//! let mut rng: StdRng = SeedableRng::from_seed([0; 32]);
//! let private_key = VRFPrivateKey::generate_for_testing(&mut rng);
//! let public_key: VRFPublicKey = (&private_key).into();
//! ```
//! **Note**: The above example generates a private key using a private function intended only for
//! testing purposes. Production code should find an alternate means for secure key generation.
//!
//! Produce a proof for a message from a `VRFPrivateKey`, and verify the proof and message
//! using a `VRFPublicKey`:
//!
//! ```
//! # use nextgen_crypto::{traits::Uniform, vrf::ecvrf::*};
//! # use rand::{rngs::StdRng, SeedableRng};
//! # let message = b"Test message";
//! # let mut rng: StdRng = SeedableRng::from_seed([0; 32]);
//! # let private_key = VRFPrivateKey::generate_for_testing(&mut rng);
//! # let public_key: VRFPublicKey = (&private_key).into();
//! let proof = private_key.prove(message);
//! assert!(public_key.verify(&proof, message).is_ok());
//! ```
//!
//! Produce a pseudorandom output from a `Proof`:
//!
//! ```
//! # use nextgen_crypto::{traits::Uniform, vrf::ecvrf::*};
//! # use rand::{rngs::StdRng, SeedableRng};
//! # let message = b"Test message";
//! # let mut rng: StdRng = SeedableRng::from_seed([0; 32]);
//! # let private_key = VRFPrivateKey::generate_for_testing(&mut rng);
//! # let public_key: VRFPublicKey = (&private_key).into();
//! # let proof = private_key.prove(message);
//! let output: Output = (&proof).into();
//! ```
use crate::traits::*;
use core::convert::TryFrom;
use curve25519_dalek::{
constants::ED25519_BASEPOINT_POINT,
edwards::{CompressedEdwardsY, EdwardsPoint},
scalar::Scalar as ed25519_Scalar,
};
use derive_deref::Deref;
use ed25519_dalek::{
self, Digest, PublicKey as ed25519_PublicKey, SecretKey as ed25519_PrivateKey, Sha512,
};
use failure::prelude::*;
use serde::{Deserialize, Serialize};
const SUITE: u8 = 0x03;
const ONE: u8 = 0x01;
const TWO: u8 = 0x02;
const THREE: u8 = 0x03;
/// The number of bytes of [`Output`]
pub const OUTPUT_LENGTH: usize = 64;
/// The number of bytes of [`Proof`]
pub const PROOF_LENGTH: usize = 80;
/// An ECVRF private key
#[derive(Serialize, Deserialize, Deref, Debug)]
pub struct VRFPrivateKey(ed25519_PrivateKey);
/// An ECVRF public key
#[derive(Serialize, Deserialize, Deref, Debug, PartialEq, Eq)]
pub struct VRFPublicKey(ed25519_PublicKey);
/// A longer private key which is slightly optimized for proof generation.
///
/// This is similar in structure to ed25519_dalek::ExpandedSecretKey. It can be produced from
/// a VRFPrivateKey.
pub struct VRFExpandedPrivateKey {
pub(super) key: ed25519_Scalar,
pub(super) nonce: [u8; 32],
}
impl VRFPrivateKey {
/// Produces a proof for an input (using the private key)
pub fn prove(&self, alpha: &[u8]) -> Proof {
VRFExpandedPrivateKey::from(self).prove(&VRFPublicKey((&self.0).into()), alpha)
}
}
impl VRFExpandedPrivateKey {
/// Produces a proof for an input (using the expanded private key)
pub fn prove(&self, pk: &VRFPublicKey, alpha: &[u8]) -> Proof {
let h_point = pk.hash_to_curve(alpha);
let k_scalar =
ed25519_Scalar::from_bytes_mod_order_wide(&nonce_generation_bytes(self.nonce, h_point));
let gamma = h_point * self.key;
let c_scalar = hash_points(&[
h_point,
gamma,
ED25519_BASEPOINT_POINT * k_scalar,
h_point * k_scalar,
]);
Proof {
gamma,
c: c_scalar,
s: k_scalar + c_scalar * self.key,
}
}
}
impl Uniform for VRFPrivateKey {
fn generate_for_testing<R>(rng: &mut R) -> Self
where
R: SeedableCryptoRng,
{
VRFPrivateKey(ed25519_PrivateKey::generate(rng))
}
}
impl TryFrom<&[u8]> for VRFPrivateKey {
type Error = CryptoMaterialError;
fn try_from(bytes: &[u8]) -> std::result::Result<VRFPrivateKey, CryptoMaterialError> {
Ok(VRFPrivateKey(
ed25519_PrivateKey::from_bytes(bytes).unwrap(),
))
}
}
impl TryFrom<&[u8]> for VRFPublicKey {
type Error = CryptoMaterialError;
fn try_from(bytes: &[u8]) -> std::result::Result<VRFPublicKey, CryptoMaterialError> {
if bytes.len() != ed25519_dalek::PUBLIC_KEY_LENGTH {
return Err(CryptoMaterialError::WrongLengthError);
}
let mut bits: [u8; 32] = [0u8; 32];
bits.copy_from_slice(&bytes[..32]);
let compressed = curve25519_dalek::edwards::CompressedEdwardsY(bits);
let point = compressed
.decompress()
.ok_or(CryptoMaterialError::DeserializationError)?;
// Check if the point lies on a small subgroup. This is required
// when using curves with a small cofactor (in ed25519, cofactor = 8).
if point.is_small_order() {
return Err(CryptoMaterialError::SmallSubgroupError);
}
Ok(VRFPublicKey(ed25519_PublicKey::from_bytes(bytes).unwrap()))
}
}
impl VRFPublicKey {
/// Given a [`Proof`] and an input, returns whether or not the proof is valid for the input
/// and public key
pub fn verify(&self, proof: &Proof, alpha: &[u8]) -> Result<()> {
let h_point = self.hash_to_curve(alpha);
let pk_point = CompressedEdwardsY::from_slice(self.as_bytes())
.decompress()
.unwrap();
let cprime = hash_points(&[
h_point,
proof.gamma,
ED25519_BASEPOINT_POINT * proof.s - pk_point * proof.c,
h_point * proof.s - proof.gamma * proof.c,
]);
if proof.c == cprime {
Ok(())
} else {
bail!("The proof failed to verify for this public key")
}
}
pub(super) fn hash_to_curve(&self, alpha: &[u8]) -> EdwardsPoint {
let mut result = [0u8; 32];
let mut counter = 0;
let mut wrapped_point: Option<EdwardsPoint> = None;
while wrapped_point.is_none() {
result.copy_from_slice(
&Sha512::new()
.chain(&[SUITE, ONE])
.chain(self.as_bytes())
.chain(&alpha)
.chain(&[counter])
.result()[..32],
);
wrapped_point = CompressedEdwardsY::from_slice(&result).decompress();
counter += 1;
}
wrapped_point.unwrap().mul_by_cofactor()
}
}
impl<'a> From<&'a VRFPrivateKey> for VRFPublicKey {
fn from(private_key: &'a VRFPrivateKey) -> Self {
let secret: &ed25519_PrivateKey = private_key;
let public: ed25519_PublicKey = secret.into();
VRFPublicKey(public)
}
}
impl<'a> From<&'a VRFPrivateKey> for VRFExpandedPrivateKey {
fn from(private_key: &'a VRFPrivateKey) -> Self {
let mut h: Sha512 = Sha512::default();
let mut hash: [u8; 64] = [0u8; 64];
let mut lower: [u8; 32] = [0u8; 32];
let mut upper: [u8; 32] = [0u8; 32];
h.input(private_key.to_bytes());
hash.copy_from_slice(h.result().as_slice());
lower.copy_from_slice(&hash[00..32]);
upper.copy_from_slice(&hash[32..64]);
lower[0] &= 248;
lower[31] &= 63;
lower[31] |= 64;
VRFExpandedPrivateKey {
key: ed25519_Scalar::from_bits(lower),
nonce: upper,
}
}
}
/// A VRF proof that can be used to validate an input with a public key
pub struct Proof {
gamma: EdwardsPoint,
c: ed25519_Scalar,
s: ed25519_Scalar,
}
impl Proof {
/// Produces a new Proof struct from its fields
pub fn new(gamma: EdwardsPoint, c: ed25519_Scalar, s: ed25519_Scalar) -> Proof {
Proof { gamma, c, s }
}
/// Converts a Proof into bytes
pub fn to_bytes(&self) -> [u8; PROOF_LENGTH] {
let mut ret = [0u8; PROOF_LENGTH];
ret[..32].copy_from_slice(&self.gamma.compress().to_bytes()[..]);
ret[32..48].copy_from_slice(&self.c.to_bytes()[..16]);
ret[48..].copy_from_slice(&self.s.to_bytes()[..]);
ret
}
}
impl TryFrom<&[u8]> for Proof {
type Error = CryptoMaterialError;
fn try_from(bytes: &[u8]) -> std::result::Result<Proof, CryptoMaterialError> {
let mut c_buf = [0u8; 32];
c_buf[..16].copy_from_slice(&bytes[32..48]);
let mut s_buf = [0u8; 32];
s_buf.copy_from_slice(&bytes[48..]);
Ok(Proof {
gamma: CompressedEdwardsY::from_slice(&bytes[..32])
.decompress()
.unwrap(),
c: ed25519_Scalar::from_bits(c_buf),
s: ed25519_Scalar::from_bits(s_buf),
})
}
}
/// The ECVRF output produced from the proof
pub struct Output([u8; OUTPUT_LENGTH]);
impl Output {
/// Converts an Output into bytes
#[inline]
pub fn to_bytes(&self) -> [u8; OUTPUT_LENGTH] {
self.0
}
}
impl<'a> From<&'a Proof> for Output {
fn from(proof: &'a Proof) -> Output {
let mut output = [0u8; OUTPUT_LENGTH];
output.copy_from_slice(
&Sha512::new()
.chain(&[SUITE, THREE])
.chain(&proof.gamma.mul_by_cofactor().compress().to_bytes()[..])
.result()[..],
);
Output(output)
}
}
pub(super) fn nonce_generation_bytes(nonce: [u8; 32], h_point: EdwardsPoint) -> [u8; 64] {
let mut k_buf = [0u8; 64];
k_buf.copy_from_slice(
&Sha512::new()
.chain(nonce)
.chain(h_point.compress().as_bytes()) | .result()[..],
);
k_buf
}
pub(super) fn hash_points(points: &[EdwardsPoint]) -> ed25519_Scalar {
let mut result = [0u8; 32];
let mut hash = Sha512::new().chain(&[SUITE, TWO]);
for point in points.iter() {
hash = hash.chain(point.compress().to_bytes());
}
result[..16].copy_from_slice(&hash.result()[..16]);
ed25519_Scalar::from_bits(result)
} | random_line_split |
|
ecvrf.rs | // Copyright (c) The XPeer Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! This module implements an instantiation of a verifiable random function known as
//! [ECVRF-ED25519-SHA512-TAI](https://tools.ietf.org/html/draft-irtf-cfrg-vrf-04).
//!
//! # Examples
//!
//! ```
//! use nextgen_crypto::{traits::Uniform, vrf::ecvrf::*};
//! use rand::{rngs::StdRng, SeedableRng};
//!
//! let message = b"Test message";
//! let mut rng: StdRng = SeedableRng::from_seed([0; 32]);
//! let private_key = VRFPrivateKey::generate_for_testing(&mut rng);
//! let public_key: VRFPublicKey = (&private_key).into();
//! ```
//! **Note**: The above example generates a private key using a private function intended only for
//! testing purposes. Production code should find an alternate means for secure key generation.
//!
//! Produce a proof for a message from a `VRFPrivateKey`, and verify the proof and message
//! using a `VRFPublicKey`:
//!
//! ```
//! # use nextgen_crypto::{traits::Uniform, vrf::ecvrf::*};
//! # use rand::{rngs::StdRng, SeedableRng};
//! # let message = b"Test message";
//! # let mut rng: StdRng = SeedableRng::from_seed([0; 32]);
//! # let private_key = VRFPrivateKey::generate_for_testing(&mut rng);
//! # let public_key: VRFPublicKey = (&private_key).into();
//! let proof = private_key.prove(message);
//! assert!(public_key.verify(&proof, message).is_ok());
//! ```
//!
//! Produce a pseudorandom output from a `Proof`:
//!
//! ```
//! # use nextgen_crypto::{traits::Uniform, vrf::ecvrf::*};
//! # use rand::{rngs::StdRng, SeedableRng};
//! # let message = b"Test message";
//! # let mut rng: StdRng = SeedableRng::from_seed([0; 32]);
//! # let private_key = VRFPrivateKey::generate_for_testing(&mut rng);
//! # let public_key: VRFPublicKey = (&private_key).into();
//! # let proof = private_key.prove(message);
//! let output: Output = (&proof).into();
//! ```
use crate::traits::*;
use core::convert::TryFrom;
use curve25519_dalek::{
constants::ED25519_BASEPOINT_POINT,
edwards::{CompressedEdwardsY, EdwardsPoint},
scalar::Scalar as ed25519_Scalar,
};
use derive_deref::Deref;
use ed25519_dalek::{
self, Digest, PublicKey as ed25519_PublicKey, SecretKey as ed25519_PrivateKey, Sha512,
};
use failure::prelude::*;
use serde::{Deserialize, Serialize};
const SUITE: u8 = 0x03;
const ONE: u8 = 0x01;
const TWO: u8 = 0x02;
const THREE: u8 = 0x03;
/// The number of bytes of [`Output`]
pub const OUTPUT_LENGTH: usize = 64;
/// The number of bytes of [`Proof`]
pub const PROOF_LENGTH: usize = 80;
/// An ECVRF private key
#[derive(Serialize, Deserialize, Deref, Debug)]
pub struct VRFPrivateKey(ed25519_PrivateKey);
/// An ECVRF public key
#[derive(Serialize, Deserialize, Deref, Debug, PartialEq, Eq)]
pub struct VRFPublicKey(ed25519_PublicKey);
/// A longer private key which is slightly optimized for proof generation.
///
/// This is similar in structure to ed25519_dalek::ExpandedSecretKey. It can be produced from
/// a VRFPrivateKey.
pub struct VRFExpandedPrivateKey {
pub(super) key: ed25519_Scalar,
pub(super) nonce: [u8; 32],
}
impl VRFPrivateKey {
/// Produces a proof for an input (using the private key)
pub fn prove(&self, alpha: &[u8]) -> Proof {
VRFExpandedPrivateKey::from(self).prove(&VRFPublicKey((&self.0).into()), alpha)
}
}
impl VRFExpandedPrivateKey {
/// Produces a proof for an input (using the expanded private key)
pub fn prove(&self, pk: &VRFPublicKey, alpha: &[u8]) -> Proof {
let h_point = pk.hash_to_curve(alpha);
let k_scalar =
ed25519_Scalar::from_bytes_mod_order_wide(&nonce_generation_bytes(self.nonce, h_point));
let gamma = h_point * self.key;
let c_scalar = hash_points(&[
h_point,
gamma,
ED25519_BASEPOINT_POINT * k_scalar,
h_point * k_scalar,
]);
Proof {
gamma,
c: c_scalar,
s: k_scalar + c_scalar * self.key,
}
}
}
impl Uniform for VRFPrivateKey {
fn generate_for_testing<R>(rng: &mut R) -> Self
where
R: SeedableCryptoRng,
{
VRFPrivateKey(ed25519_PrivateKey::generate(rng))
}
}
impl TryFrom<&[u8]> for VRFPrivateKey {
type Error = CryptoMaterialError;
fn try_from(bytes: &[u8]) -> std::result::Result<VRFPrivateKey, CryptoMaterialError> {
Ok(VRFPrivateKey(
ed25519_PrivateKey::from_bytes(bytes).unwrap(),
))
}
}
impl TryFrom<&[u8]> for VRFPublicKey {
type Error = CryptoMaterialError;
fn try_from(bytes: &[u8]) -> std::result::Result<VRFPublicKey, CryptoMaterialError> {
if bytes.len() != ed25519_dalek::PUBLIC_KEY_LENGTH {
return Err(CryptoMaterialError::WrongLengthError);
}
let mut bits: [u8; 32] = [0u8; 32];
bits.copy_from_slice(&bytes[..32]);
let compressed = curve25519_dalek::edwards::CompressedEdwardsY(bits);
let point = compressed
.decompress()
.ok_or(CryptoMaterialError::DeserializationError)?;
// Check if the point lies on a small subgroup. This is required
// when using curves with a small cofactor (in ed25519, cofactor = 8).
if point.is_small_order() {
return Err(CryptoMaterialError::SmallSubgroupError);
}
Ok(VRFPublicKey(ed25519_PublicKey::from_bytes(bytes).unwrap()))
}
}
impl VRFPublicKey {
/// Given a [`Proof`] and an input, returns whether or not the proof is valid for the input
/// and public key
pub fn verify(&self, proof: &Proof, alpha: &[u8]) -> Result<()> {
let h_point = self.hash_to_curve(alpha);
let pk_point = CompressedEdwardsY::from_slice(self.as_bytes())
.decompress()
.unwrap();
let cprime = hash_points(&[
h_point,
proof.gamma,
ED25519_BASEPOINT_POINT * proof.s - pk_point * proof.c,
h_point * proof.s - proof.gamma * proof.c,
]);
if proof.c == cprime | else {
bail!("The proof failed to verify for this public key")
}
}
pub(super) fn hash_to_curve(&self, alpha: &[u8]) -> EdwardsPoint {
let mut result = [0u8; 32];
let mut counter = 0;
let mut wrapped_point: Option<EdwardsPoint> = None;
while wrapped_point.is_none() {
result.copy_from_slice(
&Sha512::new()
.chain(&[SUITE, ONE])
.chain(self.as_bytes())
.chain(&alpha)
.chain(&[counter])
.result()[..32],
);
wrapped_point = CompressedEdwardsY::from_slice(&result).decompress();
counter += 1;
}
wrapped_point.unwrap().mul_by_cofactor()
}
}
impl<'a> From<&'a VRFPrivateKey> for VRFPublicKey {
fn from(private_key: &'a VRFPrivateKey) -> Self {
let secret: &ed25519_PrivateKey = private_key;
let public: ed25519_PublicKey = secret.into();
VRFPublicKey(public)
}
}
impl<'a> From<&'a VRFPrivateKey> for VRFExpandedPrivateKey {
fn from(private_key: &'a VRFPrivateKey) -> Self {
let mut h: Sha512 = Sha512::default();
let mut hash: [u8; 64] = [0u8; 64];
let mut lower: [u8; 32] = [0u8; 32];
let mut upper: [u8; 32] = [0u8; 32];
h.input(private_key.to_bytes());
hash.copy_from_slice(h.result().as_slice());
lower.copy_from_slice(&hash[00..32]);
upper.copy_from_slice(&hash[32..64]);
lower[0] &= 248;
lower[31] &= 63;
lower[31] |= 64;
VRFExpandedPrivateKey {
key: ed25519_Scalar::from_bits(lower),
nonce: upper,
}
}
}
/// A VRF proof that can be used to validate an input with a public key
pub struct Proof {
gamma: EdwardsPoint,
c: ed25519_Scalar,
s: ed25519_Scalar,
}
impl Proof {
/// Produces a new Proof struct from its fields
pub fn new(gamma: EdwardsPoint, c: ed25519_Scalar, s: ed25519_Scalar) -> Proof {
Proof { gamma, c, s }
}
/// Converts a Proof into bytes
pub fn to_bytes(&self) -> [u8; PROOF_LENGTH] {
let mut ret = [0u8; PROOF_LENGTH];
ret[..32].copy_from_slice(&self.gamma.compress().to_bytes()[..]);
ret[32..48].copy_from_slice(&self.c.to_bytes()[..16]);
ret[48..].copy_from_slice(&self.s.to_bytes()[..]);
ret
}
}
impl TryFrom<&[u8]> for Proof {
type Error = CryptoMaterialError;
fn try_from(bytes: &[u8]) -> std::result::Result<Proof, CryptoMaterialError> {
let mut c_buf = [0u8; 32];
c_buf[..16].copy_from_slice(&bytes[32..48]);
let mut s_buf = [0u8; 32];
s_buf.copy_from_slice(&bytes[48..]);
Ok(Proof {
gamma: CompressedEdwardsY::from_slice(&bytes[..32])
.decompress()
.unwrap(),
c: ed25519_Scalar::from_bits(c_buf),
s: ed25519_Scalar::from_bits(s_buf),
})
}
}
/// The ECVRF output produced from the proof
pub struct Output([u8; OUTPUT_LENGTH]);
impl Output {
/// Converts an Output into bytes
#[inline]
pub fn to_bytes(&self) -> [u8; OUTPUT_LENGTH] {
self.0
}
}
impl<'a> From<&'a Proof> for Output {
fn from(proof: &'a Proof) -> Output {
let mut output = [0u8; OUTPUT_LENGTH];
output.copy_from_slice(
&Sha512::new()
.chain(&[SUITE, THREE])
.chain(&proof.gamma.mul_by_cofactor().compress().to_bytes()[..])
.result()[..],
);
Output(output)
}
}
pub(super) fn nonce_generation_bytes(nonce: [u8; 32], h_point: EdwardsPoint) -> [u8; 64] {
let mut k_buf = [0u8; 64];
k_buf.copy_from_slice(
&Sha512::new()
.chain(nonce)
.chain(h_point.compress().as_bytes())
.result()[..],
);
k_buf
}
pub(super) fn hash_points(points: &[EdwardsPoint]) -> ed25519_Scalar {
let mut result = [0u8; 32];
let mut hash = Sha512::new().chain(&[SUITE, TWO]);
for point in points.iter() {
hash = hash.chain(point.compress().to_bytes());
}
result[..16].copy_from_slice(&hash.result()[..16]);
ed25519_Scalar::from_bits(result)
}
| {
Ok(())
} | conditional_block |
ecvrf.rs | // Copyright (c) The XPeer Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! This module implements an instantiation of a verifiable random function known as
//! [ECVRF-ED25519-SHA512-TAI](https://tools.ietf.org/html/draft-irtf-cfrg-vrf-04).
//!
//! # Examples
//!
//! ```
//! use nextgen_crypto::{traits::Uniform, vrf::ecvrf::*};
//! use rand::{rngs::StdRng, SeedableRng};
//!
//! let message = b"Test message";
//! let mut rng: StdRng = SeedableRng::from_seed([0; 32]);
//! let private_key = VRFPrivateKey::generate_for_testing(&mut rng);
//! let public_key: VRFPublicKey = (&private_key).into();
//! ```
//! **Note**: The above example generates a private key using a private function intended only for
//! testing purposes. Production code should find an alternate means for secure key generation.
//!
//! Produce a proof for a message from a `VRFPrivateKey`, and verify the proof and message
//! using a `VRFPublicKey`:
//!
//! ```
//! # use nextgen_crypto::{traits::Uniform, vrf::ecvrf::*};
//! # use rand::{rngs::StdRng, SeedableRng};
//! # let message = b"Test message";
//! # let mut rng: StdRng = SeedableRng::from_seed([0; 32]);
//! # let private_key = VRFPrivateKey::generate_for_testing(&mut rng);
//! # let public_key: VRFPublicKey = (&private_key).into();
//! let proof = private_key.prove(message);
//! assert!(public_key.verify(&proof, message).is_ok());
//! ```
//!
//! Produce a pseudorandom output from a `Proof`:
//!
//! ```
//! # use nextgen_crypto::{traits::Uniform, vrf::ecvrf::*};
//! # use rand::{rngs::StdRng, SeedableRng};
//! # let message = b"Test message";
//! # let mut rng: StdRng = SeedableRng::from_seed([0; 32]);
//! # let private_key = VRFPrivateKey::generate_for_testing(&mut rng);
//! # let public_key: VRFPublicKey = (&private_key).into();
//! # let proof = private_key.prove(message);
//! let output: Output = (&proof).into();
//! ```
use crate::traits::*;
use core::convert::TryFrom;
use curve25519_dalek::{
constants::ED25519_BASEPOINT_POINT,
edwards::{CompressedEdwardsY, EdwardsPoint},
scalar::Scalar as ed25519_Scalar,
};
use derive_deref::Deref;
use ed25519_dalek::{
self, Digest, PublicKey as ed25519_PublicKey, SecretKey as ed25519_PrivateKey, Sha512,
};
use failure::prelude::*;
use serde::{Deserialize, Serialize};
const SUITE: u8 = 0x03;
const ONE: u8 = 0x01;
const TWO: u8 = 0x02;
const THREE: u8 = 0x03;
/// The number of bytes of [`Output`]
pub const OUTPUT_LENGTH: usize = 64;
/// The number of bytes of [`Proof`]
pub const PROOF_LENGTH: usize = 80;
/// An ECVRF private key
#[derive(Serialize, Deserialize, Deref, Debug)]
pub struct VRFPrivateKey(ed25519_PrivateKey);
/// An ECVRF public key
#[derive(Serialize, Deserialize, Deref, Debug, PartialEq, Eq)]
pub struct VRFPublicKey(ed25519_PublicKey);
/// A longer private key which is slightly optimized for proof generation.
///
/// This is similar in structure to ed25519_dalek::ExpandedSecretKey. It can be produced from
/// a VRFPrivateKey.
pub struct VRFExpandedPrivateKey {
pub(super) key: ed25519_Scalar,
pub(super) nonce: [u8; 32],
}
impl VRFPrivateKey {
/// Produces a proof for an input (using the private key)
pub fn prove(&self, alpha: &[u8]) -> Proof {
VRFExpandedPrivateKey::from(self).prove(&VRFPublicKey((&self.0).into()), alpha)
}
}
impl VRFExpandedPrivateKey {
/// Produces a proof for an input (using the expanded private key)
pub fn prove(&self, pk: &VRFPublicKey, alpha: &[u8]) -> Proof {
let h_point = pk.hash_to_curve(alpha);
let k_scalar =
ed25519_Scalar::from_bytes_mod_order_wide(&nonce_generation_bytes(self.nonce, h_point));
let gamma = h_point * self.key;
let c_scalar = hash_points(&[
h_point,
gamma,
ED25519_BASEPOINT_POINT * k_scalar,
h_point * k_scalar,
]);
Proof {
gamma,
c: c_scalar,
s: k_scalar + c_scalar * self.key,
}
}
}
impl Uniform for VRFPrivateKey {
fn generate_for_testing<R>(rng: &mut R) -> Self
where
R: SeedableCryptoRng,
{
VRFPrivateKey(ed25519_PrivateKey::generate(rng))
}
}
impl TryFrom<&[u8]> for VRFPrivateKey {
type Error = CryptoMaterialError;
fn try_from(bytes: &[u8]) -> std::result::Result<VRFPrivateKey, CryptoMaterialError> {
Ok(VRFPrivateKey(
ed25519_PrivateKey::from_bytes(bytes).unwrap(),
))
}
}
impl TryFrom<&[u8]> for VRFPublicKey {
type Error = CryptoMaterialError;
fn | (bytes: &[u8]) -> std::result::Result<VRFPublicKey, CryptoMaterialError> {
if bytes.len() != ed25519_dalek::PUBLIC_KEY_LENGTH {
return Err(CryptoMaterialError::WrongLengthError);
}
let mut bits: [u8; 32] = [0u8; 32];
bits.copy_from_slice(&bytes[..32]);
let compressed = curve25519_dalek::edwards::CompressedEdwardsY(bits);
let point = compressed
.decompress()
.ok_or(CryptoMaterialError::DeserializationError)?;
// Check if the point lies on a small subgroup. This is required
// when using curves with a small cofactor (in ed25519, cofactor = 8).
if point.is_small_order() {
return Err(CryptoMaterialError::SmallSubgroupError);
}
Ok(VRFPublicKey(ed25519_PublicKey::from_bytes(bytes).unwrap()))
}
}
impl VRFPublicKey {
/// Given a [`Proof`] and an input, returns whether or not the proof is valid for the input
/// and public key
pub fn verify(&self, proof: &Proof, alpha: &[u8]) -> Result<()> {
let h_point = self.hash_to_curve(alpha);
let pk_point = CompressedEdwardsY::from_slice(self.as_bytes())
.decompress()
.unwrap();
let cprime = hash_points(&[
h_point,
proof.gamma,
ED25519_BASEPOINT_POINT * proof.s - pk_point * proof.c,
h_point * proof.s - proof.gamma * proof.c,
]);
if proof.c == cprime {
Ok(())
} else {
bail!("The proof failed to verify for this public key")
}
}
pub(super) fn hash_to_curve(&self, alpha: &[u8]) -> EdwardsPoint {
let mut result = [0u8; 32];
let mut counter = 0;
let mut wrapped_point: Option<EdwardsPoint> = None;
while wrapped_point.is_none() {
result.copy_from_slice(
&Sha512::new()
.chain(&[SUITE, ONE])
.chain(self.as_bytes())
.chain(&alpha)
.chain(&[counter])
.result()[..32],
);
wrapped_point = CompressedEdwardsY::from_slice(&result).decompress();
counter += 1;
}
wrapped_point.unwrap().mul_by_cofactor()
}
}
impl<'a> From<&'a VRFPrivateKey> for VRFPublicKey {
fn from(private_key: &'a VRFPrivateKey) -> Self {
let secret: &ed25519_PrivateKey = private_key;
let public: ed25519_PublicKey = secret.into();
VRFPublicKey(public)
}
}
impl<'a> From<&'a VRFPrivateKey> for VRFExpandedPrivateKey {
fn from(private_key: &'a VRFPrivateKey) -> Self {
let mut h: Sha512 = Sha512::default();
let mut hash: [u8; 64] = [0u8; 64];
let mut lower: [u8; 32] = [0u8; 32];
let mut upper: [u8; 32] = [0u8; 32];
h.input(private_key.to_bytes());
hash.copy_from_slice(h.result().as_slice());
lower.copy_from_slice(&hash[00..32]);
upper.copy_from_slice(&hash[32..64]);
lower[0] &= 248;
lower[31] &= 63;
lower[31] |= 64;
VRFExpandedPrivateKey {
key: ed25519_Scalar::from_bits(lower),
nonce: upper,
}
}
}
/// A VRF proof that can be used to validate an input with a public key
pub struct Proof {
gamma: EdwardsPoint,
c: ed25519_Scalar,
s: ed25519_Scalar,
}
impl Proof {
/// Produces a new Proof struct from its fields
pub fn new(gamma: EdwardsPoint, c: ed25519_Scalar, s: ed25519_Scalar) -> Proof {
Proof { gamma, c, s }
}
/// Converts a Proof into bytes
pub fn to_bytes(&self) -> [u8; PROOF_LENGTH] {
let mut ret = [0u8; PROOF_LENGTH];
ret[..32].copy_from_slice(&self.gamma.compress().to_bytes()[..]);
ret[32..48].copy_from_slice(&self.c.to_bytes()[..16]);
ret[48..].copy_from_slice(&self.s.to_bytes()[..]);
ret
}
}
impl TryFrom<&[u8]> for Proof {
type Error = CryptoMaterialError;
fn try_from(bytes: &[u8]) -> std::result::Result<Proof, CryptoMaterialError> {
let mut c_buf = [0u8; 32];
c_buf[..16].copy_from_slice(&bytes[32..48]);
let mut s_buf = [0u8; 32];
s_buf.copy_from_slice(&bytes[48..]);
Ok(Proof {
gamma: CompressedEdwardsY::from_slice(&bytes[..32])
.decompress()
.unwrap(),
c: ed25519_Scalar::from_bits(c_buf),
s: ed25519_Scalar::from_bits(s_buf),
})
}
}
/// The ECVRF output produced from the proof
pub struct Output([u8; OUTPUT_LENGTH]);
impl Output {
/// Converts an Output into bytes
#[inline]
pub fn to_bytes(&self) -> [u8; OUTPUT_LENGTH] {
self.0
}
}
impl<'a> From<&'a Proof> for Output {
fn from(proof: &'a Proof) -> Output {
let mut output = [0u8; OUTPUT_LENGTH];
output.copy_from_slice(
&Sha512::new()
.chain(&[SUITE, THREE])
.chain(&proof.gamma.mul_by_cofactor().compress().to_bytes()[..])
.result()[..],
);
Output(output)
}
}
pub(super) fn nonce_generation_bytes(nonce: [u8; 32], h_point: EdwardsPoint) -> [u8; 64] {
let mut k_buf = [0u8; 64];
k_buf.copy_from_slice(
&Sha512::new()
.chain(nonce)
.chain(h_point.compress().as_bytes())
.result()[..],
);
k_buf
}
pub(super) fn hash_points(points: &[EdwardsPoint]) -> ed25519_Scalar {
let mut result = [0u8; 32];
let mut hash = Sha512::new().chain(&[SUITE, TWO]);
for point in points.iter() {
hash = hash.chain(point.compress().to_bytes());
}
result[..16].copy_from_slice(&hash.result()[..16]);
ed25519_Scalar::from_bits(result)
}
| try_from | identifier_name |
interactor.go | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package git
import (
"bufio"
"bytes"
"errors"
"fmt"
"os"
"strings"
"github.com/sirupsen/logrus"
)
// Interactor knows how to operate on a git repository cloned from GitHub
// using a local cache.
type Interactor interface {
// Directory exposes the directory in which the repository has been cloned
Directory() string
// Clean removes the repository. It is up to the user to call this once they are done
Clean() error
// ResetHard runs `git reset --hard`
ResetHard(commitlike string) error
// IsDirty checks whether the repo is dirty or not
IsDirty() (bool, error)
// Checkout runs `git checkout`
Checkout(commitlike string) error
// RevParse runs `git rev-parse`
RevParse(commitlike string) (string, error)
// BranchExists determines if a branch with the name exists
BranchExists(branch string) bool
// ObjectExists determines if the Git object exists locally
ObjectExists(sha string) (bool, error)
// CheckoutNewBranch creates a new branch from HEAD and checks it out
CheckoutNewBranch(branch string) error
// Merge merges the commitlike into the current HEAD
Merge(commitlike string) (bool, error)
// MergeWithStrategy merges the commitlike into the current HEAD with the strategy
MergeWithStrategy(commitlike, mergeStrategy string, opts ...MergeOpt) (bool, error)
// MergeAndCheckout merges all commitlikes into the current HEAD with the appropriate strategy
MergeAndCheckout(baseSHA string, mergeStrategy string, headSHAs ...string) error
// Am calls `git am`
Am(path string) error
// Fetch calls `git fetch arg...`
Fetch(arg ...string) error
// FetchRef fetches the refspec
FetchRef(refspec string) error
// FetchFromRemote fetches the branch of the given remote
FetchFromRemote(remote RemoteResolver, branch string) error
// CheckoutPullRequest fetches and checks out the synthetic refspec from GitHub for a pull request HEAD
CheckoutPullRequest(number int) error
// Config runs `git config`
Config(args ...string) error
// Diff runs `git diff`
Diff(head, sha string) (changes []string, err error)
// MergeCommitsExistBetween determines if merge commits exist between target and HEAD
MergeCommitsExistBetween(target, head string) (bool, error)
// ShowRef returns the commit for a commitlike. Unlike rev-parse it does not require a checkout.
ShowRef(commitlike string) (string, error)
}
// cacher knows how to cache and update repositories in a central cache
type cacher interface {
// MirrorClone sets up a mirror of the source repository.
MirrorClone() error
// RemoteUpdate fetches all updates from the remote.
RemoteUpdate() error
}
// cloner knows how to clone repositories from a central cache
type cloner interface {
// Clone clones the repository from a local path.
Clone(from string) error
CloneWithRepoOpts(from string, repoOpts RepoOpts) error
// FetchCommits fetches only the given commits.
FetchCommits(bool, []string) error
}
// MergeOpt holds options for git merge operations.
// Currently only commit message option is supported.
type MergeOpt struct {
CommitMessage string
}
type interactor struct {
executor executor
remote RemoteResolver
dir string
logger *logrus.Entry
}
// Directory exposes the directory in which this repository has been cloned
func (i *interactor) Directory() string {
return i.dir
}
// Clean cleans up the repository from the on-disk cache
func (i *interactor) Clean() error {
return os.RemoveAll(i.dir)
}
// ResetHard runs `git reset --hard`
func (i *interactor) ResetHard(commitlike string) error {
// `git reset --hard` doesn't cleanup untracked file
i.logger.Info("Clean untracked files and dirs.")
if out, err := i.executor.Run("clean", "-df"); err != nil {
return fmt.Errorf("error clean -df: %v. output: %s", err, string(out))
}
i.logger.WithField("commitlike", commitlike).Info("Reset hard.")
if out, err := i.executor.Run("reset", "--hard", commitlike); err != nil {
return fmt.Errorf("error reset hard %s: %v. output: %s", commitlike, err, string(out))
}
return nil
}
// IsDirty checks whether the repo is dirty or not
func (i *interactor) IsDirty() (bool, error) {
i.logger.Info("Checking is dirty.")
b, err := i.executor.Run("status", "--porcelain")
if err != nil {
return false, fmt.Errorf("error add -A: %v. output: %s", err, string(b))
}
return len(b) > 0, nil
}
// Clone clones the repository from a local path.
func (i *interactor) Clone(from string) error {
return i.CloneWithRepoOpts(from, RepoOpts{})
}
// CloneWithRepoOpts clones the repository from a local path, but additionally
// use any repository options (RepoOpts) to customize the clone behavior.
func (i *interactor) CloneWithRepoOpts(from string, repoOpts RepoOpts) error {
i.logger.Infof("Creating a clone of the repo at %s from %s", i.dir, from)
cloneArgs := []string{"clone"}
if repoOpts.ShareObjectsWithSourceRepo {
cloneArgs = append(cloneArgs, "--shared")
}
// Handle sparse checkouts.
if repoOpts.SparseCheckoutDirs != nil {
cloneArgs = append(cloneArgs, "--sparse")
}
cloneArgs = append(cloneArgs, []string{from, i.dir}...)
if out, err := i.executor.Run(cloneArgs...); err != nil {
return fmt.Errorf("error creating a clone: %w %v", err, string(out))
}
// For sparse checkouts, we have to do some additional housekeeping after
// the clone is completed. We use Git's global "-C <directory>" flag to
// switch to that directory before running the "sparse-checkout" command,
// because otherwise the command will fail (because it will try to run the
// command in the $PWD, which is not the same as the just-created clone
// directory (i.dir)).
if repoOpts.SparseCheckoutDirs != nil {
if len(repoOpts.SparseCheckoutDirs) == 0 {
return nil
}
sparseCheckoutArgs := []string{"-C", i.dir, "sparse-checkout", "set"}
sparseCheckoutArgs = append(sparseCheckoutArgs, repoOpts.SparseCheckoutDirs...)
if out, err := i.executor.Run(sparseCheckoutArgs...); err != nil {
return fmt.Errorf("error setting it to a sparse checkout: %w %v", err, string(out))
}
}
return nil
}
// MirrorClone sets up a mirror of the source repository.
func (i *interactor) MirrorClone() error {
i.logger.Infof("Creating a mirror of the repo at %s", i.dir)
remote, err := i.remote()
if err != nil {
return fmt.Errorf("could not resolve remote for cloning: %w", err)
}
if out, err := i.executor.Run("clone", "--mirror", remote, i.dir); err != nil {
return fmt.Errorf("error creating a mirror clone: %w %v", err, string(out))
}
return nil
}
// Checkout runs git checkout.
func (i *interactor) Checkout(commitlike string) error {
i.logger.Infof("Checking out %q", commitlike)
if out, err := i.executor.Run("checkout", commitlike); err != nil {
return fmt.Errorf("error checking out %q: %w %v", commitlike, err, string(out))
}
return nil
}
// RevParse runs git rev-parse.
func (i *interactor) RevParse(commitlike string) (string, error) {
i.logger.Infof("Parsing revision %q", commitlike)
out, err := i.executor.Run("rev-parse", commitlike)
if err != nil {
return "", fmt.Errorf("error parsing %q: %w %v", commitlike, err, string(out))
}
return string(out), nil
}
// BranchExists returns true if branch exists in heads.
func (i *interactor) BranchExists(branch string) bool {
i.logger.Infof("Checking if branch %q exists", branch)
_, err := i.executor.Run("ls-remote", "--exit-code", "--heads", "origin", branch)
return err == nil
}
func (i *interactor) ObjectExists(sha string) (bool, error) {
i.logger.WithField("SHA", sha).Info("Checking if Git object exists")
output, err := i.executor.Run("cat-file", "-e", sha)
// If the object does not exist, cat-file will exit with a non-zero exit
// code. This will make err non-nil. However this is a known behavior, so
// we just log it.
//
// We still have the error type as a return value because the v1 git client
// adapter needs to know that this operation is not supported there.
if err != nil {
i.logger.WithError(err).WithField("SHA", sha).Debugf("error from 'git cat-file -e': %s", string(output))
return false, nil
}
return true, nil
}
// CheckoutNewBranch creates a new branch and checks it out.
func (i *interactor) CheckoutNewBranch(branch string) error {
i.logger.Infof("Checking out new branch %q", branch)
if out, err := i.executor.Run("checkout", "-b", branch); err != nil {
return fmt.Errorf("error checking out new branch %q: %w %v", branch, err, string(out))
}
return nil
}
// Merge attempts to merge commitlike into the current branch. It returns true
// if the merge completes. It returns an error if the abort fails.
func (i *interactor) Merge(commitlike string) (bool, error) {
return i.MergeWithStrategy(commitlike, "merge")
}
// MergeWithStrategy attempts to merge commitlike into the current branch given the merge strategy.
// It returns true if the merge completes. if the merge does not complete successfully, we try to
// abort it and return an error if the abort fails.
func (i *interactor) MergeWithStrategy(commitlike, mergeStrategy string, opts ...MergeOpt) (bool, error) {
i.logger.Infof("Merging %q using the %q strategy", commitlike, mergeStrategy)
switch mergeStrategy {
case "merge":
return i.mergeMerge(commitlike, opts...)
case "squash":
return i.squashMerge(commitlike)
case "rebase":
return i.mergeRebase(commitlike)
case "ifNecessary":
return i.mergeIfNecessary(commitlike, opts...)
default:
return false, fmt.Errorf("merge strategy %q is not supported", mergeStrategy)
}
}
func (i *interactor) mergeHelper(args []string, commitlike string, opts ...MergeOpt) (bool, error) {
if len(opts) == 0 {
args = append(args, []string{"-m", "merge"}...)
} else {
for _, opt := range opts {
args = append(args, []string{"-m", opt.CommitMessage}...)
}
}
args = append(args, commitlike)
out, err := i.executor.Run(args...)
if err == nil {
return true, nil
}
i.logger.WithError(err).Infof("Error merging %q: %s", commitlike, string(out))
if out, err := i.executor.Run("merge", "--abort"); err != nil {
return false, fmt.Errorf("error aborting merge of %q: %w %v", commitlike, err, string(out))
}
return false, nil
}
func (i *interactor) mergeMerge(commitlike string, opts ...MergeOpt) (bool, error) {
args := []string{"merge", "--no-ff", "--no-stat"}
return i.mergeHelper(args, commitlike, opts...)
}
func (i *interactor) mergeIfNecessary(commitlike string, opts ...MergeOpt) (bool, error) {
args := []string{"merge", "--ff", "--no-stat"}
return i.mergeHelper(args, commitlike, opts...)
}
func (i *interactor) squashMerge(commitlike string) (bool, error) {
out, err := i.executor.Run("merge", "--squash", "--no-stat", commitlike)
if err != nil {
i.logger.WithError(err).Warnf("Error staging merge for %q: %s", commitlike, string(out))
if out, err := i.executor.Run("reset", "--hard", "HEAD"); err != nil {
return false, fmt.Errorf("error aborting merge of %q: %w %v", commitlike, err, string(out))
}
return false, nil
}
out, err = i.executor.Run("commit", "--no-stat", "-m", "merge")
if err != nil {
i.logger.WithError(err).Warnf("Error committing merge for %q: %s", commitlike, string(out))
if out, err := i.executor.Run("reset", "--hard", "HEAD"); err != nil {
return false, fmt.Errorf("error aborting merge of %q: %w %v", commitlike, err, string(out))
}
return false, nil
}
return true, nil
}
func (i *interactor) mergeRebase(commitlike string) (bool, error) {
if commitlike == "" {
return false, errors.New("branch must be set")
}
headRev, err := i.revParse("HEAD")
if err != nil {
i.logger.WithError(err).Infof("Failed to parse HEAD revision")
return false, err
}
headRev = strings.TrimSuffix(headRev, "\n")
b, err := i.executor.Run("rebase", "--no-stat", headRev, commitlike)
if err != nil {
i.logger.WithField("out", string(b)).WithError(err).Infof("Rebase failed.")
if b, err := i.executor.Run("rebase", "--abort"); err != nil {
return false, fmt.Errorf("error aborting after failed rebase for commitlike %s: %v. output: %s", commitlike, err, string(b))
}
return false, nil
}
return true, nil
}
func (i *interactor) revParse(args ...string) (string, error) {
fullArgs := append([]string{"rev-parse"}, args...)
b, err := i.executor.Run(fullArgs...)
if err != nil {
return "", errors.New(string(b))
}
return string(b), nil
}
// Only the `merge` and `squash` strategies are supported.
func (i *interactor) MergeAndCheckout(baseSHA string, mergeStrategy string, headSHAs ...string) error {
if baseSHA == "" {
return errors.New("baseSHA must be set")
}
if err := i.Checkout(baseSHA); err != nil {
return err
}
for _, headSHA := range headSHAs {
ok, err := i.MergeWithStrategy(headSHA, mergeStrategy)
if err != nil | else if !ok {
return fmt.Errorf("failed to merge %q", headSHA)
}
}
return nil
}
// Am tries to apply the patch in the given path into the current branch
// by performing a three-way merge (similar to git cherry-pick). It returns
// an error if the patch cannot be applied.
func (i *interactor) Am(path string) error {
i.logger.Infof("Applying patch at %s", path)
out, err := i.executor.Run("am", "--3way", path)
if err == nil {
return nil
}
i.logger.WithError(err).Infof("Patch apply failed with output: %s", string(out))
if abortOut, abortErr := i.executor.Run("am", "--abort"); err != nil {
i.logger.WithError(abortErr).Warningf("Aborting patch apply failed with output: %s", string(abortOut))
}
return errors.New(string(bytes.TrimPrefix(out, []byte("The copy of the patch that failed is found in: .git/rebase-apply/patch"))))
}
// FetchCommits only fetches those commits which we want, and only if they are
// missing.
func (i *interactor) FetchCommits(noFetchTags bool, commitSHAs []string) error {
fetchArgs := []string{"--no-write-fetch-head"}
if noFetchTags {
fetchArgs = append(fetchArgs, "--no-tags")
}
// For each commit SHA, check if it already exists. If so, don't bother
// fetching it.
var missingCommits bool
for _, commitSHA := range commitSHAs {
if exists, _ := i.ObjectExists(commitSHA); exists {
continue
}
fetchArgs = append(fetchArgs, commitSHA)
missingCommits = true
}
// Skip the fetch operation altogether if nothing is missing (we already
// fetched everything previously at some point).
if !missingCommits {
return nil
}
if err := i.Fetch(fetchArgs...); err != nil {
return fmt.Errorf("failed to fetch %s: %v", fetchArgs, err)
}
return nil
}
// RemoteUpdate fetches all updates from the remote.
func (i *interactor) RemoteUpdate() error {
i.logger.Info("Updating from remote")
if out, err := i.executor.Run("remote", "update", "--prune"); err != nil {
return fmt.Errorf("error updating: %w %v", err, string(out))
}
return nil
}
// Fetch fetches all updates from the remote.
func (i *interactor) Fetch(arg ...string) error {
remote, err := i.remote()
if err != nil {
return fmt.Errorf("could not resolve remote for fetching: %w", err)
}
arg = append([]string{"fetch", remote}, arg...)
i.logger.Infof("Fetching from %s", remote)
if out, err := i.executor.Run(arg...); err != nil {
return fmt.Errorf("error fetching: %w %v", err, string(out))
}
return nil
}
// FetchRef fetches a refspec from the remote and leaves it as FETCH_HEAD.
func (i *interactor) FetchRef(refspec string) error {
remote, err := i.remote()
if err != nil {
return fmt.Errorf("could not resolve remote for fetching: %w", err)
}
i.logger.Infof("Fetching %q from %s", refspec, remote)
if out, err := i.executor.Run("fetch", remote, refspec); err != nil {
return fmt.Errorf("error fetching %q: %w %v", refspec, err, string(out))
}
return nil
}
// FetchFromRemote fetches all update from a specific remote and branch and leaves it as FETCH_HEAD.
func (i *interactor) FetchFromRemote(remote RemoteResolver, branch string) error {
r, err := remote()
if err != nil {
return fmt.Errorf("couldn't get remote: %w", err)
}
i.logger.Infof("Fetching %s from %s", branch, r)
if out, err := i.executor.Run("fetch", r, branch); err != nil {
return fmt.Errorf("error fetching %s from %s: %w %v", branch, r, err, string(out))
}
return nil
}
// CheckoutPullRequest fetches the HEAD of a pull request using a synthetic refspec
// available on GitHub remotes and creates a branch at that commit.
func (i *interactor) CheckoutPullRequest(number int) error {
i.logger.Infof("Checking out pull request %d", number)
if err := i.FetchRef(fmt.Sprintf("pull/%d/head", number)); err != nil {
return err
}
if err := i.Checkout("FETCH_HEAD"); err != nil {
return err
}
if err := i.CheckoutNewBranch(fmt.Sprintf("pull%d", number)); err != nil {
return err
}
return nil
}
// Config runs git config.
func (i *interactor) Config(args ...string) error {
i.logger.WithField("args", args).Info("Configuring.")
if out, err := i.executor.Run(append([]string{"config"}, args...)...); err != nil {
return fmt.Errorf("error configuring %v: %w %v", args, err, string(out))
}
return nil
}
// Diff lists the difference between the two references, returning the output
// line by line.
func (i *interactor) Diff(head, sha string) ([]string, error) {
i.logger.Infof("Finding the differences between %q and %q", head, sha)
out, err := i.executor.Run("diff", head, sha, "--name-only")
if err != nil {
return nil, err
}
var changes []string
scan := bufio.NewScanner(bytes.NewReader(out))
scan.Split(bufio.ScanLines)
for scan.Scan() {
changes = append(changes, scan.Text())
}
return changes, nil
}
// MergeCommitsExistBetween runs 'git log <target>..<head> --merged' to verify
// if merge commits exist between "target" and "head".
func (i *interactor) MergeCommitsExistBetween(target, head string) (bool, error) {
i.logger.Infof("Determining if merge commits exist between %q and %q", target, head)
out, err := i.executor.Run("log", fmt.Sprintf("%s..%s", target, head), "--oneline", "--merges")
if err != nil {
return false, fmt.Errorf("error verifying if merge commits exist between %q and %q: %v %s", target, head, err, string(out))
}
return len(out) != 0, nil
}
func (i *interactor) ShowRef(commitlike string) (string, error) {
i.logger.Infof("Getting the commit sha for commitlike %s", commitlike)
out, err := i.executor.Run("show-ref", "-s", commitlike)
if err != nil {
return "", fmt.Errorf("failed to get commit sha for commitlike %s: %w", commitlike, err)
}
return strings.TrimSpace(string(out)), nil
}
| {
return err
} | conditional_block |
interactor.go | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package git
import (
"bufio"
"bytes"
"errors"
"fmt"
"os"
"strings"
"github.com/sirupsen/logrus"
)
// Interactor knows how to operate on a git repository cloned from GitHub
// using a local cache.
type Interactor interface {
// Directory exposes the directory in which the repository has been cloned
Directory() string
// Clean removes the repository. It is up to the user to call this once they are done
Clean() error
// ResetHard runs `git reset --hard`
ResetHard(commitlike string) error
// IsDirty checks whether the repo is dirty or not
IsDirty() (bool, error)
// Checkout runs `git checkout`
Checkout(commitlike string) error
// RevParse runs `git rev-parse`
RevParse(commitlike string) (string, error)
// BranchExists determines if a branch with the name exists
BranchExists(branch string) bool
// ObjectExists determines if the Git object exists locally
ObjectExists(sha string) (bool, error)
// CheckoutNewBranch creates a new branch from HEAD and checks it out
CheckoutNewBranch(branch string) error
// Merge merges the commitlike into the current HEAD
Merge(commitlike string) (bool, error)
// MergeWithStrategy merges the commitlike into the current HEAD with the strategy
MergeWithStrategy(commitlike, mergeStrategy string, opts ...MergeOpt) (bool, error)
// MergeAndCheckout merges all commitlikes into the current HEAD with the appropriate strategy
MergeAndCheckout(baseSHA string, mergeStrategy string, headSHAs ...string) error
// Am calls `git am`
Am(path string) error
// Fetch calls `git fetch arg...`
Fetch(arg ...string) error
// FetchRef fetches the refspec
FetchRef(refspec string) error
// FetchFromRemote fetches the branch of the given remote
FetchFromRemote(remote RemoteResolver, branch string) error
// CheckoutPullRequest fetches and checks out the synthetic refspec from GitHub for a pull request HEAD
CheckoutPullRequest(number int) error
// Config runs `git config`
Config(args ...string) error
// Diff runs `git diff`
Diff(head, sha string) (changes []string, err error)
// MergeCommitsExistBetween determines if merge commits exist between target and HEAD
MergeCommitsExistBetween(target, head string) (bool, error)
// ShowRef returns the commit for a commitlike. Unlike rev-parse it does not require a checkout.
ShowRef(commitlike string) (string, error)
}
// cacher knows how to cache and update repositories in a central cache
type cacher interface {
// MirrorClone sets up a mirror of the source repository.
MirrorClone() error
// RemoteUpdate fetches all updates from the remote.
RemoteUpdate() error
}
// cloner knows how to clone repositories from a central cache
type cloner interface {
// Clone clones the repository from a local path.
Clone(from string) error
CloneWithRepoOpts(from string, repoOpts RepoOpts) error
// FetchCommits fetches only the given commits.
FetchCommits(bool, []string) error
}
// MergeOpt holds options for git merge operations.
// Currently only commit message option is supported.
type MergeOpt struct {
CommitMessage string
}
type interactor struct {
executor executor
remote RemoteResolver
dir string
logger *logrus.Entry
}
// Directory exposes the directory in which this repository has been cloned
func (i *interactor) Directory() string {
return i.dir
}
// Clean cleans up the repository from the on-disk cache
func (i *interactor) Clean() error {
return os.RemoveAll(i.dir)
}
// ResetHard runs `git reset --hard`
func (i *interactor) ResetHard(commitlike string) error {
// `git reset --hard` doesn't cleanup untracked file
i.logger.Info("Clean untracked files and dirs.")
if out, err := i.executor.Run("clean", "-df"); err != nil {
return fmt.Errorf("error clean -df: %v. output: %s", err, string(out))
}
i.logger.WithField("commitlike", commitlike).Info("Reset hard.")
if out, err := i.executor.Run("reset", "--hard", commitlike); err != nil {
return fmt.Errorf("error reset hard %s: %v. output: %s", commitlike, err, string(out))
}
return nil
}
// IsDirty checks whether the repo is dirty or not
func (i *interactor) IsDirty() (bool, error) {
i.logger.Info("Checking is dirty.")
b, err := i.executor.Run("status", "--porcelain")
if err != nil {
return false, fmt.Errorf("error add -A: %v. output: %s", err, string(b))
}
return len(b) > 0, nil
}
// Clone clones the repository from a local path.
func (i *interactor) Clone(from string) error {
return i.CloneWithRepoOpts(from, RepoOpts{})
}
// CloneWithRepoOpts clones the repository from a local path, but additionally
// use any repository options (RepoOpts) to customize the clone behavior.
func (i *interactor) CloneWithRepoOpts(from string, repoOpts RepoOpts) error {
i.logger.Infof("Creating a clone of the repo at %s from %s", i.dir, from)
cloneArgs := []string{"clone"}
if repoOpts.ShareObjectsWithSourceRepo {
cloneArgs = append(cloneArgs, "--shared")
}
// Handle sparse checkouts.
if repoOpts.SparseCheckoutDirs != nil {
cloneArgs = append(cloneArgs, "--sparse")
}
cloneArgs = append(cloneArgs, []string{from, i.dir}...)
if out, err := i.executor.Run(cloneArgs...); err != nil {
return fmt.Errorf("error creating a clone: %w %v", err, string(out))
}
// For sparse checkouts, we have to do some additional housekeeping after
// the clone is completed. We use Git's global "-C <directory>" flag to
// switch to that directory before running the "sparse-checkout" command,
// because otherwise the command will fail (because it will try to run the
// command in the $PWD, which is not the same as the just-created clone
// directory (i.dir)).
if repoOpts.SparseCheckoutDirs != nil {
if len(repoOpts.SparseCheckoutDirs) == 0 {
return nil
}
sparseCheckoutArgs := []string{"-C", i.dir, "sparse-checkout", "set"}
sparseCheckoutArgs = append(sparseCheckoutArgs, repoOpts.SparseCheckoutDirs...)
if out, err := i.executor.Run(sparseCheckoutArgs...); err != nil {
return fmt.Errorf("error setting it to a sparse checkout: %w %v", err, string(out))
}
}
return nil
}
// MirrorClone sets up a mirror of the source repository.
func (i *interactor) MirrorClone() error {
i.logger.Infof("Creating a mirror of the repo at %s", i.dir)
remote, err := i.remote()
if err != nil {
return fmt.Errorf("could not resolve remote for cloning: %w", err)
}
if out, err := i.executor.Run("clone", "--mirror", remote, i.dir); err != nil {
return fmt.Errorf("error creating a mirror clone: %w %v", err, string(out))
}
return nil
}
// Checkout runs git checkout.
func (i *interactor) Checkout(commitlike string) error {
i.logger.Infof("Checking out %q", commitlike)
if out, err := i.executor.Run("checkout", commitlike); err != nil {
return fmt.Errorf("error checking out %q: %w %v", commitlike, err, string(out))
}
return nil
}
// RevParse runs git rev-parse.
func (i *interactor) RevParse(commitlike string) (string, error) {
i.logger.Infof("Parsing revision %q", commitlike)
out, err := i.executor.Run("rev-parse", commitlike)
if err != nil {
return "", fmt.Errorf("error parsing %q: %w %v", commitlike, err, string(out))
}
return string(out), nil
}
// BranchExists returns true if branch exists in heads.
func (i *interactor) BranchExists(branch string) bool {
i.logger.Infof("Checking if branch %q exists", branch)
_, err := i.executor.Run("ls-remote", "--exit-code", "--heads", "origin", branch)
return err == nil
}
func (i *interactor) ObjectExists(sha string) (bool, error) {
i.logger.WithField("SHA", sha).Info("Checking if Git object exists")
output, err := i.executor.Run("cat-file", "-e", sha)
// If the object does not exist, cat-file will exit with a non-zero exit
// code. This will make err non-nil. However this is a known behavior, so
// we just log it.
//
// We still have the error type as a return value because the v1 git client
// adapter needs to know that this operation is not supported there.
if err != nil {
i.logger.WithError(err).WithField("SHA", sha).Debugf("error from 'git cat-file -e': %s", string(output))
return false, nil
}
return true, nil
}
// CheckoutNewBranch creates a new branch and checks it out.
func (i *interactor) CheckoutNewBranch(branch string) error {
i.logger.Infof("Checking out new branch %q", branch)
if out, err := i.executor.Run("checkout", "-b", branch); err != nil {
return fmt.Errorf("error checking out new branch %q: %w %v", branch, err, string(out))
}
return nil
}
// Merge attempts to merge commitlike into the current branch. It returns true
// if the merge completes. It returns an error if the abort fails.
func (i *interactor) Merge(commitlike string) (bool, error) {
return i.MergeWithStrategy(commitlike, "merge")
}
// MergeWithStrategy attempts to merge commitlike into the current branch given the merge strategy.
// It returns true if the merge completes. if the merge does not complete successfully, we try to
// abort it and return an error if the abort fails.
func (i *interactor) MergeWithStrategy(commitlike, mergeStrategy string, opts ...MergeOpt) (bool, error) {
i.logger.Infof("Merging %q using the %q strategy", commitlike, mergeStrategy)
switch mergeStrategy {
case "merge":
return i.mergeMerge(commitlike, opts...)
case "squash":
return i.squashMerge(commitlike)
case "rebase":
return i.mergeRebase(commitlike)
case "ifNecessary":
return i.mergeIfNecessary(commitlike, opts...)
default:
return false, fmt.Errorf("merge strategy %q is not supported", mergeStrategy)
}
}
func (i *interactor) mergeHelper(args []string, commitlike string, opts ...MergeOpt) (bool, error) {
if len(opts) == 0 {
args = append(args, []string{"-m", "merge"}...)
} else {
for _, opt := range opts {
args = append(args, []string{"-m", opt.CommitMessage}...)
}
}
args = append(args, commitlike)
out, err := i.executor.Run(args...)
if err == nil {
return true, nil
}
i.logger.WithError(err).Infof("Error merging %q: %s", commitlike, string(out))
if out, err := i.executor.Run("merge", "--abort"); err != nil {
return false, fmt.Errorf("error aborting merge of %q: %w %v", commitlike, err, string(out))
}
return false, nil
}
func (i *interactor) mergeMerge(commitlike string, opts ...MergeOpt) (bool, error) {
args := []string{"merge", "--no-ff", "--no-stat"}
return i.mergeHelper(args, commitlike, opts...)
}
func (i *interactor) mergeIfNecessary(commitlike string, opts ...MergeOpt) (bool, error) {
args := []string{"merge", "--ff", "--no-stat"}
return i.mergeHelper(args, commitlike, opts...)
}
func (i *interactor) squashMerge(commitlike string) (bool, error) {
out, err := i.executor.Run("merge", "--squash", "--no-stat", commitlike)
if err != nil {
i.logger.WithError(err).Warnf("Error staging merge for %q: %s", commitlike, string(out))
if out, err := i.executor.Run("reset", "--hard", "HEAD"); err != nil {
return false, fmt.Errorf("error aborting merge of %q: %w %v", commitlike, err, string(out))
}
return false, nil
}
out, err = i.executor.Run("commit", "--no-stat", "-m", "merge")
if err != nil {
i.logger.WithError(err).Warnf("Error committing merge for %q: %s", commitlike, string(out))
if out, err := i.executor.Run("reset", "--hard", "HEAD"); err != nil {
return false, fmt.Errorf("error aborting merge of %q: %w %v", commitlike, err, string(out))
}
return false, nil
}
return true, nil
}
func (i *interactor) mergeRebase(commitlike string) (bool, error) {
if commitlike == "" {
return false, errors.New("branch must be set")
}
headRev, err := i.revParse("HEAD")
if err != nil {
i.logger.WithError(err).Infof("Failed to parse HEAD revision")
return false, err
}
headRev = strings.TrimSuffix(headRev, "\n")
b, err := i.executor.Run("rebase", "--no-stat", headRev, commitlike)
if err != nil {
i.logger.WithField("out", string(b)).WithError(err).Infof("Rebase failed.")
if b, err := i.executor.Run("rebase", "--abort"); err != nil {
return false, fmt.Errorf("error aborting after failed rebase for commitlike %s: %v. output: %s", commitlike, err, string(b))
}
return false, nil
}
return true, nil
}
func (i *interactor) revParse(args ...string) (string, error) {
fullArgs := append([]string{"rev-parse"}, args...)
b, err := i.executor.Run(fullArgs...)
if err != nil {
return "", errors.New(string(b))
}
return string(b), nil
}
// Only the `merge` and `squash` strategies are supported.
func (i *interactor) MergeAndCheckout(baseSHA string, mergeStrategy string, headSHAs ...string) error {
if baseSHA == "" {
return errors.New("baseSHA must be set")
}
if err := i.Checkout(baseSHA); err != nil {
return err
}
for _, headSHA := range headSHAs {
ok, err := i.MergeWithStrategy(headSHA, mergeStrategy)
if err != nil {
return err
} else if !ok {
return fmt.Errorf("failed to merge %q", headSHA)
}
}
return nil
}
// Am tries to apply the patch in the given path into the current branch
// by performing a three-way merge (similar to git cherry-pick). It returns
// an error if the patch cannot be applied.
func (i *interactor) Am(path string) error {
i.logger.Infof("Applying patch at %s", path)
out, err := i.executor.Run("am", "--3way", path)
if err == nil {
return nil
}
i.logger.WithError(err).Infof("Patch apply failed with output: %s", string(out))
if abortOut, abortErr := i.executor.Run("am", "--abort"); err != nil {
i.logger.WithError(abortErr).Warningf("Aborting patch apply failed with output: %s", string(abortOut))
}
return errors.New(string(bytes.TrimPrefix(out, []byte("The copy of the patch that failed is found in: .git/rebase-apply/patch"))))
}
// FetchCommits only fetches those commits which we want, and only if they are
// missing.
func (i *interactor) FetchCommits(noFetchTags bool, commitSHAs []string) error |
// RemoteUpdate fetches all updates from the remote.
func (i *interactor) RemoteUpdate() error {
i.logger.Info("Updating from remote")
if out, err := i.executor.Run("remote", "update", "--prune"); err != nil {
return fmt.Errorf("error updating: %w %v", err, string(out))
}
return nil
}
// Fetch fetches all updates from the remote.
func (i *interactor) Fetch(arg ...string) error {
remote, err := i.remote()
if err != nil {
return fmt.Errorf("could not resolve remote for fetching: %w", err)
}
arg = append([]string{"fetch", remote}, arg...)
i.logger.Infof("Fetching from %s", remote)
if out, err := i.executor.Run(arg...); err != nil {
return fmt.Errorf("error fetching: %w %v", err, string(out))
}
return nil
}
// FetchRef fetches a refspec from the remote and leaves it as FETCH_HEAD.
func (i *interactor) FetchRef(refspec string) error {
remote, err := i.remote()
if err != nil {
return fmt.Errorf("could not resolve remote for fetching: %w", err)
}
i.logger.Infof("Fetching %q from %s", refspec, remote)
if out, err := i.executor.Run("fetch", remote, refspec); err != nil {
return fmt.Errorf("error fetching %q: %w %v", refspec, err, string(out))
}
return nil
}
// FetchFromRemote fetches all update from a specific remote and branch and leaves it as FETCH_HEAD.
func (i *interactor) FetchFromRemote(remote RemoteResolver, branch string) error {
r, err := remote()
if err != nil {
return fmt.Errorf("couldn't get remote: %w", err)
}
i.logger.Infof("Fetching %s from %s", branch, r)
if out, err := i.executor.Run("fetch", r, branch); err != nil {
return fmt.Errorf("error fetching %s from %s: %w %v", branch, r, err, string(out))
}
return nil
}
// CheckoutPullRequest fetches the HEAD of a pull request using a synthetic refspec
// available on GitHub remotes and creates a branch at that commit.
func (i *interactor) CheckoutPullRequest(number int) error {
i.logger.Infof("Checking out pull request %d", number)
if err := i.FetchRef(fmt.Sprintf("pull/%d/head", number)); err != nil {
return err
}
if err := i.Checkout("FETCH_HEAD"); err != nil {
return err
}
if err := i.CheckoutNewBranch(fmt.Sprintf("pull%d", number)); err != nil {
return err
}
return nil
}
// Config runs git config.
func (i *interactor) Config(args ...string) error {
i.logger.WithField("args", args).Info("Configuring.")
if out, err := i.executor.Run(append([]string{"config"}, args...)...); err != nil {
return fmt.Errorf("error configuring %v: %w %v", args, err, string(out))
}
return nil
}
// Diff lists the difference between the two references, returning the output
// line by line.
func (i *interactor) Diff(head, sha string) ([]string, error) {
i.logger.Infof("Finding the differences between %q and %q", head, sha)
out, err := i.executor.Run("diff", head, sha, "--name-only")
if err != nil {
return nil, err
}
var changes []string
scan := bufio.NewScanner(bytes.NewReader(out))
scan.Split(bufio.ScanLines)
for scan.Scan() {
changes = append(changes, scan.Text())
}
return changes, nil
}
// MergeCommitsExistBetween runs 'git log <target>..<head> --merged' to verify
// if merge commits exist between "target" and "head".
func (i *interactor) MergeCommitsExistBetween(target, head string) (bool, error) {
i.logger.Infof("Determining if merge commits exist between %q and %q", target, head)
out, err := i.executor.Run("log", fmt.Sprintf("%s..%s", target, head), "--oneline", "--merges")
if err != nil {
return false, fmt.Errorf("error verifying if merge commits exist between %q and %q: %v %s", target, head, err, string(out))
}
return len(out) != 0, nil
}
func (i *interactor) ShowRef(commitlike string) (string, error) {
i.logger.Infof("Getting the commit sha for commitlike %s", commitlike)
out, err := i.executor.Run("show-ref", "-s", commitlike)
if err != nil {
return "", fmt.Errorf("failed to get commit sha for commitlike %s: %w", commitlike, err)
}
return strings.TrimSpace(string(out)), nil
}
| {
fetchArgs := []string{"--no-write-fetch-head"}
if noFetchTags {
fetchArgs = append(fetchArgs, "--no-tags")
}
// For each commit SHA, check if it already exists. If so, don't bother
// fetching it.
var missingCommits bool
for _, commitSHA := range commitSHAs {
if exists, _ := i.ObjectExists(commitSHA); exists {
continue
}
fetchArgs = append(fetchArgs, commitSHA)
missingCommits = true
}
// Skip the fetch operation altogether if nothing is missing (we already
// fetched everything previously at some point).
if !missingCommits {
return nil
}
if err := i.Fetch(fetchArgs...); err != nil {
return fmt.Errorf("failed to fetch %s: %v", fetchArgs, err)
}
return nil
} | identifier_body |
interactor.go | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package git
import (
"bufio"
"bytes"
"errors"
"fmt"
"os"
"strings"
"github.com/sirupsen/logrus"
)
// Interactor knows how to operate on a git repository cloned from GitHub
// using a local cache.
type Interactor interface {
// Directory exposes the directory in which the repository has been cloned
Directory() string
// Clean removes the repository. It is up to the user to call this once they are done
Clean() error
// ResetHard runs `git reset --hard`
ResetHard(commitlike string) error
// IsDirty checks whether the repo is dirty or not
IsDirty() (bool, error)
// Checkout runs `git checkout`
Checkout(commitlike string) error
// RevParse runs `git rev-parse`
RevParse(commitlike string) (string, error)
// BranchExists determines if a branch with the name exists
BranchExists(branch string) bool
// ObjectExists determines if the Git object exists locally
ObjectExists(sha string) (bool, error)
// CheckoutNewBranch creates a new branch from HEAD and checks it out
CheckoutNewBranch(branch string) error
// Merge merges the commitlike into the current HEAD
Merge(commitlike string) (bool, error)
// MergeWithStrategy merges the commitlike into the current HEAD with the strategy
MergeWithStrategy(commitlike, mergeStrategy string, opts ...MergeOpt) (bool, error)
// MergeAndCheckout merges all commitlikes into the current HEAD with the appropriate strategy
MergeAndCheckout(baseSHA string, mergeStrategy string, headSHAs ...string) error
// Am calls `git am`
Am(path string) error
// Fetch calls `git fetch arg...`
Fetch(arg ...string) error
// FetchRef fetches the refspec
FetchRef(refspec string) error
// FetchFromRemote fetches the branch of the given remote
FetchFromRemote(remote RemoteResolver, branch string) error
// CheckoutPullRequest fetches and checks out the synthetic refspec from GitHub for a pull request HEAD
CheckoutPullRequest(number int) error
// Config runs `git config`
Config(args ...string) error
// Diff runs `git diff`
Diff(head, sha string) (changes []string, err error)
// MergeCommitsExistBetween determines if merge commits exist between target and HEAD
MergeCommitsExistBetween(target, head string) (bool, error)
// ShowRef returns the commit for a commitlike. Unlike rev-parse it does not require a checkout.
ShowRef(commitlike string) (string, error)
}
// cacher knows how to cache and update repositories in a central cache
type cacher interface {
// MirrorClone sets up a mirror of the source repository.
MirrorClone() error
// RemoteUpdate fetches all updates from the remote.
RemoteUpdate() error
}
// cloner knows how to clone repositories from a central cache
type cloner interface {
// Clone clones the repository from a local path.
Clone(from string) error
CloneWithRepoOpts(from string, repoOpts RepoOpts) error
// FetchCommits fetches only the given commits.
FetchCommits(bool, []string) error
}
// MergeOpt holds options for git merge operations.
// Currently only commit message option is supported.
type MergeOpt struct {
CommitMessage string
}
type interactor struct {
executor executor
remote RemoteResolver
dir string
logger *logrus.Entry
}
// Directory exposes the directory in which this repository has been cloned
func (i *interactor) Directory() string {
return i.dir
}
// Clean cleans up the repository from the on-disk cache
func (i *interactor) Clean() error {
return os.RemoveAll(i.dir)
}
// ResetHard runs `git reset --hard`
func (i *interactor) ResetHard(commitlike string) error {
// `git reset --hard` doesn't cleanup untracked file
i.logger.Info("Clean untracked files and dirs.")
if out, err := i.executor.Run("clean", "-df"); err != nil {
return fmt.Errorf("error clean -df: %v. output: %s", err, string(out))
}
i.logger.WithField("commitlike", commitlike).Info("Reset hard.")
if out, err := i.executor.Run("reset", "--hard", commitlike); err != nil {
return fmt.Errorf("error reset hard %s: %v. output: %s", commitlike, err, string(out))
}
return nil
}
// IsDirty checks whether the repo is dirty or not
func (i *interactor) IsDirty() (bool, error) {
i.logger.Info("Checking is dirty.")
b, err := i.executor.Run("status", "--porcelain")
if err != nil {
return false, fmt.Errorf("error add -A: %v. output: %s", err, string(b))
}
return len(b) > 0, nil
}
// Clone clones the repository from a local path.
func (i *interactor) Clone(from string) error {
return i.CloneWithRepoOpts(from, RepoOpts{})
}
// CloneWithRepoOpts clones the repository from a local path, but additionally
// use any repository options (RepoOpts) to customize the clone behavior.
func (i *interactor) CloneWithRepoOpts(from string, repoOpts RepoOpts) error {
i.logger.Infof("Creating a clone of the repo at %s from %s", i.dir, from)
cloneArgs := []string{"clone"}
if repoOpts.ShareObjectsWithSourceRepo {
cloneArgs = append(cloneArgs, "--shared")
}
// Handle sparse checkouts.
if repoOpts.SparseCheckoutDirs != nil {
cloneArgs = append(cloneArgs, "--sparse")
}
cloneArgs = append(cloneArgs, []string{from, i.dir}...)
if out, err := i.executor.Run(cloneArgs...); err != nil {
return fmt.Errorf("error creating a clone: %w %v", err, string(out))
}
// For sparse checkouts, we have to do some additional housekeeping after
// the clone is completed. We use Git's global "-C <directory>" flag to
// switch to that directory before running the "sparse-checkout" command,
// because otherwise the command will fail (because it will try to run the
// command in the $PWD, which is not the same as the just-created clone
// directory (i.dir)).
if repoOpts.SparseCheckoutDirs != nil {
if len(repoOpts.SparseCheckoutDirs) == 0 {
return nil
}
sparseCheckoutArgs := []string{"-C", i.dir, "sparse-checkout", "set"}
sparseCheckoutArgs = append(sparseCheckoutArgs, repoOpts.SparseCheckoutDirs...)
if out, err := i.executor.Run(sparseCheckoutArgs...); err != nil {
return fmt.Errorf("error setting it to a sparse checkout: %w %v", err, string(out))
}
}
return nil
}
// MirrorClone sets up a mirror of the source repository.
func (i *interactor) MirrorClone() error {
i.logger.Infof("Creating a mirror of the repo at %s", i.dir)
remote, err := i.remote()
if err != nil {
return fmt.Errorf("could not resolve remote for cloning: %w", err)
}
if out, err := i.executor.Run("clone", "--mirror", remote, i.dir); err != nil {
return fmt.Errorf("error creating a mirror clone: %w %v", err, string(out))
}
return nil
}
// Checkout runs git checkout.
func (i *interactor) Checkout(commitlike string) error {
i.logger.Infof("Checking out %q", commitlike)
if out, err := i.executor.Run("checkout", commitlike); err != nil {
return fmt.Errorf("error checking out %q: %w %v", commitlike, err, string(out))
}
return nil
}
// RevParse runs git rev-parse.
func (i *interactor) RevParse(commitlike string) (string, error) {
i.logger.Infof("Parsing revision %q", commitlike)
out, err := i.executor.Run("rev-parse", commitlike)
if err != nil {
return "", fmt.Errorf("error parsing %q: %w %v", commitlike, err, string(out))
}
return string(out), nil
}
// BranchExists returns true if branch exists in heads.
func (i *interactor) BranchExists(branch string) bool {
i.logger.Infof("Checking if branch %q exists", branch)
_, err := i.executor.Run("ls-remote", "--exit-code", "--heads", "origin", branch)
return err == nil
}
func (i *interactor) ObjectExists(sha string) (bool, error) {
i.logger.WithField("SHA", sha).Info("Checking if Git object exists")
output, err := i.executor.Run("cat-file", "-e", sha)
// If the object does not exist, cat-file will exit with a non-zero exit
// code. This will make err non-nil. However this is a known behavior, so
// we just log it.
//
// We still have the error type as a return value because the v1 git client
// adapter needs to know that this operation is not supported there.
if err != nil {
i.logger.WithError(err).WithField("SHA", sha).Debugf("error from 'git cat-file -e': %s", string(output))
return false, nil
}
return true, nil
}
// CheckoutNewBranch creates a new branch and checks it out.
func (i *interactor) CheckoutNewBranch(branch string) error {
i.logger.Infof("Checking out new branch %q", branch)
if out, err := i.executor.Run("checkout", "-b", branch); err != nil {
return fmt.Errorf("error checking out new branch %q: %w %v", branch, err, string(out))
}
return nil
}
// Merge attempts to merge commitlike into the current branch. It returns true
// if the merge completes. It returns an error if the abort fails.
func (i *interactor) Merge(commitlike string) (bool, error) {
return i.MergeWithStrategy(commitlike, "merge")
}
// MergeWithStrategy attempts to merge commitlike into the current branch given the merge strategy.
// It returns true if the merge completes. if the merge does not complete successfully, we try to
// abort it and return an error if the abort fails.
func (i *interactor) MergeWithStrategy(commitlike, mergeStrategy string, opts ...MergeOpt) (bool, error) {
i.logger.Infof("Merging %q using the %q strategy", commitlike, mergeStrategy)
switch mergeStrategy {
case "merge":
return i.mergeMerge(commitlike, opts...)
case "squash":
return i.squashMerge(commitlike)
case "rebase":
return i.mergeRebase(commitlike)
case "ifNecessary":
return i.mergeIfNecessary(commitlike, opts...)
default:
return false, fmt.Errorf("merge strategy %q is not supported", mergeStrategy)
}
}
func (i *interactor) mergeHelper(args []string, commitlike string, opts ...MergeOpt) (bool, error) {
if len(opts) == 0 {
args = append(args, []string{"-m", "merge"}...)
} else {
for _, opt := range opts {
args = append(args, []string{"-m", opt.CommitMessage}...)
}
}
args = append(args, commitlike)
out, err := i.executor.Run(args...)
if err == nil {
return true, nil
}
i.logger.WithError(err).Infof("Error merging %q: %s", commitlike, string(out))
if out, err := i.executor.Run("merge", "--abort"); err != nil {
return false, fmt.Errorf("error aborting merge of %q: %w %v", commitlike, err, string(out))
}
return false, nil
}
func (i *interactor) mergeMerge(commitlike string, opts ...MergeOpt) (bool, error) {
args := []string{"merge", "--no-ff", "--no-stat"}
return i.mergeHelper(args, commitlike, opts...)
}
func (i *interactor) mergeIfNecessary(commitlike string, opts ...MergeOpt) (bool, error) {
args := []string{"merge", "--ff", "--no-stat"}
return i.mergeHelper(args, commitlike, opts...)
}
func (i *interactor) squashMerge(commitlike string) (bool, error) {
out, err := i.executor.Run("merge", "--squash", "--no-stat", commitlike)
if err != nil {
i.logger.WithError(err).Warnf("Error staging merge for %q: %s", commitlike, string(out))
if out, err := i.executor.Run("reset", "--hard", "HEAD"); err != nil {
return false, fmt.Errorf("error aborting merge of %q: %w %v", commitlike, err, string(out))
}
return false, nil
}
out, err = i.executor.Run("commit", "--no-stat", "-m", "merge")
if err != nil {
i.logger.WithError(err).Warnf("Error committing merge for %q: %s", commitlike, string(out))
if out, err := i.executor.Run("reset", "--hard", "HEAD"); err != nil {
return false, fmt.Errorf("error aborting merge of %q: %w %v", commitlike, err, string(out))
}
return false, nil
}
return true, nil
}
func (i *interactor) mergeRebase(commitlike string) (bool, error) {
if commitlike == "" {
return false, errors.New("branch must be set")
}
headRev, err := i.revParse("HEAD")
if err != nil {
i.logger.WithError(err).Infof("Failed to parse HEAD revision")
return false, err
}
headRev = strings.TrimSuffix(headRev, "\n")
b, err := i.executor.Run("rebase", "--no-stat", headRev, commitlike)
if err != nil {
i.logger.WithField("out", string(b)).WithError(err).Infof("Rebase failed.")
if b, err := i.executor.Run("rebase", "--abort"); err != nil {
return false, fmt.Errorf("error aborting after failed rebase for commitlike %s: %v. output: %s", commitlike, err, string(b))
}
return false, nil
}
return true, nil
}
func (i *interactor) revParse(args ...string) (string, error) {
fullArgs := append([]string{"rev-parse"}, args...)
b, err := i.executor.Run(fullArgs...)
if err != nil {
return "", errors.New(string(b))
}
return string(b), nil
}
// Only the `merge` and `squash` strategies are supported.
func (i *interactor) MergeAndCheckout(baseSHA string, mergeStrategy string, headSHAs ...string) error {
if baseSHA == "" {
return errors.New("baseSHA must be set")
}
if err := i.Checkout(baseSHA); err != nil {
return err
}
for _, headSHA := range headSHAs {
ok, err := i.MergeWithStrategy(headSHA, mergeStrategy)
if err != nil {
return err
} else if !ok {
return fmt.Errorf("failed to merge %q", headSHA)
}
}
return nil
}
// Am tries to apply the patch in the given path into the current branch
// by performing a three-way merge (similar to git cherry-pick). It returns
// an error if the patch cannot be applied.
func (i *interactor) Am(path string) error {
i.logger.Infof("Applying patch at %s", path)
out, err := i.executor.Run("am", "--3way", path)
if err == nil {
return nil
}
i.logger.WithError(err).Infof("Patch apply failed with output: %s", string(out))
if abortOut, abortErr := i.executor.Run("am", "--abort"); err != nil {
i.logger.WithError(abortErr).Warningf("Aborting patch apply failed with output: %s", string(abortOut))
}
return errors.New(string(bytes.TrimPrefix(out, []byte("The copy of the patch that failed is found in: .git/rebase-apply/patch"))))
}
// FetchCommits only fetches those commits which we want, and only if they are
// missing.
func (i *interactor) FetchCommits(noFetchTags bool, commitSHAs []string) error {
fetchArgs := []string{"--no-write-fetch-head"}
if noFetchTags {
fetchArgs = append(fetchArgs, "--no-tags")
}
// For each commit SHA, check if it already exists. If so, don't bother
// fetching it.
var missingCommits bool
for _, commitSHA := range commitSHAs {
if exists, _ := i.ObjectExists(commitSHA); exists {
continue
}
fetchArgs = append(fetchArgs, commitSHA)
missingCommits = true
}
// Skip the fetch operation altogether if nothing is missing (we already
// fetched everything previously at some point).
if !missingCommits {
return nil
}
if err := i.Fetch(fetchArgs...); err != nil {
return fmt.Errorf("failed to fetch %s: %v", fetchArgs, err)
}
return nil
}
// RemoteUpdate fetches all updates from the remote.
func (i *interactor) RemoteUpdate() error {
i.logger.Info("Updating from remote")
if out, err := i.executor.Run("remote", "update", "--prune"); err != nil {
return fmt.Errorf("error updating: %w %v", err, string(out))
}
return nil
}
// Fetch fetches all updates from the remote.
func (i *interactor) Fetch(arg ...string) error {
remote, err := i.remote()
if err != nil {
return fmt.Errorf("could not resolve remote for fetching: %w", err)
}
arg = append([]string{"fetch", remote}, arg...)
i.logger.Infof("Fetching from %s", remote)
if out, err := i.executor.Run(arg...); err != nil {
return fmt.Errorf("error fetching: %w %v", err, string(out))
}
return nil
}
// FetchRef fetches a refspec from the remote and leaves it as FETCH_HEAD.
func (i *interactor) FetchRef(refspec string) error {
remote, err := i.remote()
if err != nil {
return fmt.Errorf("could not resolve remote for fetching: %w", err)
}
i.logger.Infof("Fetching %q from %s", refspec, remote)
if out, err := i.executor.Run("fetch", remote, refspec); err != nil {
return fmt.Errorf("error fetching %q: %w %v", refspec, err, string(out))
}
return nil
}
// FetchFromRemote fetches all update from a specific remote and branch and leaves it as FETCH_HEAD.
func (i *interactor) FetchFromRemote(remote RemoteResolver, branch string) error {
r, err := remote()
if err != nil {
return fmt.Errorf("couldn't get remote: %w", err)
}
i.logger.Infof("Fetching %s from %s", branch, r)
if out, err := i.executor.Run("fetch", r, branch); err != nil {
return fmt.Errorf("error fetching %s from %s: %w %v", branch, r, err, string(out))
}
return nil
}
// CheckoutPullRequest fetches the HEAD of a pull request using a synthetic refspec
// available on GitHub remotes and creates a branch at that commit.
func (i *interactor) CheckoutPullRequest(number int) error {
i.logger.Infof("Checking out pull request %d", number)
if err := i.FetchRef(fmt.Sprintf("pull/%d/head", number)); err != nil {
return err
}
if err := i.Checkout("FETCH_HEAD"); err != nil {
return err
}
if err := i.CheckoutNewBranch(fmt.Sprintf("pull%d", number)); err != nil {
return err
}
return nil
}
// Config runs git config.
func (i *interactor) Config(args ...string) error {
i.logger.WithField("args", args).Info("Configuring.")
if out, err := i.executor.Run(append([]string{"config"}, args...)...); err != nil {
return fmt.Errorf("error configuring %v: %w %v", args, err, string(out))
}
return nil
}
// Diff lists the difference between the two references, returning the output
// line by line.
func (i *interactor) Diff(head, sha string) ([]string, error) {
i.logger.Infof("Finding the differences between %q and %q", head, sha)
out, err := i.executor.Run("diff", head, sha, "--name-only")
if err != nil {
return nil, err
}
var changes []string
scan := bufio.NewScanner(bytes.NewReader(out))
scan.Split(bufio.ScanLines)
for scan.Scan() {
changes = append(changes, scan.Text())
}
return changes, nil
}
// MergeCommitsExistBetween runs 'git log <target>..<head> --merged' to verify
// if merge commits exist between "target" and "head".
func (i *interactor) MergeCommitsExistBetween(target, head string) (bool, error) {
i.logger.Infof("Determining if merge commits exist between %q and %q", target, head)
out, err := i.executor.Run("log", fmt.Sprintf("%s..%s", target, head), "--oneline", "--merges")
if err != nil {
return false, fmt.Errorf("error verifying if merge commits exist between %q and %q: %v %s", target, head, err, string(out))
}
return len(out) != 0, nil
}
func (i *interactor) | (commitlike string) (string, error) {
i.logger.Infof("Getting the commit sha for commitlike %s", commitlike)
out, err := i.executor.Run("show-ref", "-s", commitlike)
if err != nil {
return "", fmt.Errorf("failed to get commit sha for commitlike %s: %w", commitlike, err)
}
return strings.TrimSpace(string(out)), nil
}
| ShowRef | identifier_name |
interactor.go | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package git
import (
"bufio"
"bytes"
"errors"
"fmt"
"os"
"strings"
"github.com/sirupsen/logrus"
)
// Interactor knows how to operate on a git repository cloned from GitHub
// using a local cache.
type Interactor interface {
// Directory exposes the directory in which the repository has been cloned
Directory() string
// Clean removes the repository. It is up to the user to call this once they are done
Clean() error
// ResetHard runs `git reset --hard`
ResetHard(commitlike string) error
// IsDirty checks whether the repo is dirty or not
IsDirty() (bool, error)
// Checkout runs `git checkout`
Checkout(commitlike string) error
// RevParse runs `git rev-parse`
RevParse(commitlike string) (string, error)
// BranchExists determines if a branch with the name exists
BranchExists(branch string) bool
// ObjectExists determines if the Git object exists locally
ObjectExists(sha string) (bool, error)
// CheckoutNewBranch creates a new branch from HEAD and checks it out
CheckoutNewBranch(branch string) error
// Merge merges the commitlike into the current HEAD
Merge(commitlike string) (bool, error)
// MergeWithStrategy merges the commitlike into the current HEAD with the strategy
MergeWithStrategy(commitlike, mergeStrategy string, opts ...MergeOpt) (bool, error)
// MergeAndCheckout merges all commitlikes into the current HEAD with the appropriate strategy
MergeAndCheckout(baseSHA string, mergeStrategy string, headSHAs ...string) error
// Am calls `git am`
Am(path string) error
// Fetch calls `git fetch arg...`
Fetch(arg ...string) error
// FetchRef fetches the refspec
FetchRef(refspec string) error
// FetchFromRemote fetches the branch of the given remote
FetchFromRemote(remote RemoteResolver, branch string) error
// CheckoutPullRequest fetches and checks out the synthetic refspec from GitHub for a pull request HEAD
CheckoutPullRequest(number int) error
// Config runs `git config`
Config(args ...string) error
// Diff runs `git diff`
Diff(head, sha string) (changes []string, err error)
// MergeCommitsExistBetween determines if merge commits exist between target and HEAD
MergeCommitsExistBetween(target, head string) (bool, error)
// ShowRef returns the commit for a commitlike. Unlike rev-parse it does not require a checkout.
ShowRef(commitlike string) (string, error)
}
// cacher knows how to cache and update repositories in a central cache
type cacher interface {
// MirrorClone sets up a mirror of the source repository.
MirrorClone() error
// RemoteUpdate fetches all updates from the remote.
RemoteUpdate() error
}
// cloner knows how to clone repositories from a central cache
type cloner interface {
// Clone clones the repository from a local path.
Clone(from string) error
CloneWithRepoOpts(from string, repoOpts RepoOpts) error
// FetchCommits fetches only the given commits.
FetchCommits(bool, []string) error
}
// MergeOpt holds options for git merge operations.
// Currently only commit message option is supported.
type MergeOpt struct {
CommitMessage string
}
type interactor struct {
executor executor
remote RemoteResolver
dir string
logger *logrus.Entry
}
// Directory exposes the directory in which this repository has been cloned
func (i *interactor) Directory() string {
return i.dir
}
// Clean cleans up the repository from the on-disk cache
func (i *interactor) Clean() error {
return os.RemoveAll(i.dir)
}
// ResetHard runs `git reset --hard`
func (i *interactor) ResetHard(commitlike string) error {
// `git reset --hard` doesn't cleanup untracked file
i.logger.Info("Clean untracked files and dirs.")
if out, err := i.executor.Run("clean", "-df"); err != nil {
return fmt.Errorf("error clean -df: %v. output: %s", err, string(out))
}
i.logger.WithField("commitlike", commitlike).Info("Reset hard.")
if out, err := i.executor.Run("reset", "--hard", commitlike); err != nil {
return fmt.Errorf("error reset hard %s: %v. output: %s", commitlike, err, string(out))
}
return nil
}
// IsDirty checks whether the repo is dirty or not
func (i *interactor) IsDirty() (bool, error) {
i.logger.Info("Checking is dirty.")
b, err := i.executor.Run("status", "--porcelain")
if err != nil {
return false, fmt.Errorf("error add -A: %v. output: %s", err, string(b))
}
return len(b) > 0, nil
}
// Clone clones the repository from a local path.
func (i *interactor) Clone(from string) error {
return i.CloneWithRepoOpts(from, RepoOpts{})
}
// CloneWithRepoOpts clones the repository from a local path, but additionally
// use any repository options (RepoOpts) to customize the clone behavior.
func (i *interactor) CloneWithRepoOpts(from string, repoOpts RepoOpts) error {
i.logger.Infof("Creating a clone of the repo at %s from %s", i.dir, from)
cloneArgs := []string{"clone"}
if repoOpts.ShareObjectsWithSourceRepo {
cloneArgs = append(cloneArgs, "--shared")
}
// Handle sparse checkouts.
if repoOpts.SparseCheckoutDirs != nil {
cloneArgs = append(cloneArgs, "--sparse")
}
cloneArgs = append(cloneArgs, []string{from, i.dir}...)
if out, err := i.executor.Run(cloneArgs...); err != nil {
return fmt.Errorf("error creating a clone: %w %v", err, string(out))
}
// For sparse checkouts, we have to do some additional housekeeping after
// the clone is completed. We use Git's global "-C <directory>" flag to
// switch to that directory before running the "sparse-checkout" command,
// because otherwise the command will fail (because it will try to run the
// command in the $PWD, which is not the same as the just-created clone
// directory (i.dir)).
if repoOpts.SparseCheckoutDirs != nil {
if len(repoOpts.SparseCheckoutDirs) == 0 {
return nil
}
sparseCheckoutArgs := []string{"-C", i.dir, "sparse-checkout", "set"}
sparseCheckoutArgs = append(sparseCheckoutArgs, repoOpts.SparseCheckoutDirs...)
if out, err := i.executor.Run(sparseCheckoutArgs...); err != nil {
return fmt.Errorf("error setting it to a sparse checkout: %w %v", err, string(out))
}
}
return nil
}
// MirrorClone sets up a mirror of the source repository.
func (i *interactor) MirrorClone() error {
i.logger.Infof("Creating a mirror of the repo at %s", i.dir)
remote, err := i.remote()
if err != nil {
return fmt.Errorf("could not resolve remote for cloning: %w", err)
}
if out, err := i.executor.Run("clone", "--mirror", remote, i.dir); err != nil {
return fmt.Errorf("error creating a mirror clone: %w %v", err, string(out))
}
return nil
}
// Checkout runs git checkout.
func (i *interactor) Checkout(commitlike string) error {
i.logger.Infof("Checking out %q", commitlike)
if out, err := i.executor.Run("checkout", commitlike); err != nil {
return fmt.Errorf("error checking out %q: %w %v", commitlike, err, string(out))
}
return nil
}
// RevParse runs git rev-parse.
func (i *interactor) RevParse(commitlike string) (string, error) {
i.logger.Infof("Parsing revision %q", commitlike)
out, err := i.executor.Run("rev-parse", commitlike)
if err != nil {
return "", fmt.Errorf("error parsing %q: %w %v", commitlike, err, string(out))
}
return string(out), nil
}
// BranchExists returns true if branch exists in heads.
func (i *interactor) BranchExists(branch string) bool {
i.logger.Infof("Checking if branch %q exists", branch)
_, err := i.executor.Run("ls-remote", "--exit-code", "--heads", "origin", branch)
return err == nil
}
func (i *interactor) ObjectExists(sha string) (bool, error) {
i.logger.WithField("SHA", sha).Info("Checking if Git object exists")
output, err := i.executor.Run("cat-file", "-e", sha)
// If the object does not exist, cat-file will exit with a non-zero exit
// code. This will make err non-nil. However this is a known behavior, so
// we just log it.
//
// We still have the error type as a return value because the v1 git client
// adapter needs to know that this operation is not supported there.
if err != nil {
i.logger.WithError(err).WithField("SHA", sha).Debugf("error from 'git cat-file -e': %s", string(output))
return false, nil
}
return true, nil
}
// CheckoutNewBranch creates a new branch and checks it out.
func (i *interactor) CheckoutNewBranch(branch string) error {
i.logger.Infof("Checking out new branch %q", branch)
if out, err := i.executor.Run("checkout", "-b", branch); err != nil {
return fmt.Errorf("error checking out new branch %q: %w %v", branch, err, string(out))
}
return nil
}
// Merge attempts to merge commitlike into the current branch. It returns true
// if the merge completes. It returns an error if the abort fails.
func (i *interactor) Merge(commitlike string) (bool, error) {
return i.MergeWithStrategy(commitlike, "merge")
}
// MergeWithStrategy attempts to merge commitlike into the current branch given the merge strategy.
// It returns true if the merge completes. if the merge does not complete successfully, we try to
// abort it and return an error if the abort fails.
func (i *interactor) MergeWithStrategy(commitlike, mergeStrategy string, opts ...MergeOpt) (bool, error) {
i.logger.Infof("Merging %q using the %q strategy", commitlike, mergeStrategy)
switch mergeStrategy {
case "merge":
return i.mergeMerge(commitlike, opts...)
case "squash":
return i.squashMerge(commitlike)
case "rebase":
return i.mergeRebase(commitlike)
case "ifNecessary":
return i.mergeIfNecessary(commitlike, opts...)
default:
return false, fmt.Errorf("merge strategy %q is not supported", mergeStrategy)
}
}
func (i *interactor) mergeHelper(args []string, commitlike string, opts ...MergeOpt) (bool, error) {
if len(opts) == 0 {
args = append(args, []string{"-m", "merge"}...)
} else {
for _, opt := range opts {
args = append(args, []string{"-m", opt.CommitMessage}...)
}
}
args = append(args, commitlike)
out, err := i.executor.Run(args...)
if err == nil {
return true, nil
}
i.logger.WithError(err).Infof("Error merging %q: %s", commitlike, string(out))
if out, err := i.executor.Run("merge", "--abort"); err != nil {
return false, fmt.Errorf("error aborting merge of %q: %w %v", commitlike, err, string(out))
}
return false, nil
}
func (i *interactor) mergeMerge(commitlike string, opts ...MergeOpt) (bool, error) {
args := []string{"merge", "--no-ff", "--no-stat"}
return i.mergeHelper(args, commitlike, opts...)
}
func (i *interactor) mergeIfNecessary(commitlike string, opts ...MergeOpt) (bool, error) {
args := []string{"merge", "--ff", "--no-stat"}
return i.mergeHelper(args, commitlike, opts...)
}
func (i *interactor) squashMerge(commitlike string) (bool, error) {
out, err := i.executor.Run("merge", "--squash", "--no-stat", commitlike)
if err != nil {
i.logger.WithError(err).Warnf("Error staging merge for %q: %s", commitlike, string(out))
if out, err := i.executor.Run("reset", "--hard", "HEAD"); err != nil {
return false, fmt.Errorf("error aborting merge of %q: %w %v", commitlike, err, string(out))
}
return false, nil
}
out, err = i.executor.Run("commit", "--no-stat", "-m", "merge")
if err != nil {
i.logger.WithError(err).Warnf("Error committing merge for %q: %s", commitlike, string(out))
if out, err := i.executor.Run("reset", "--hard", "HEAD"); err != nil {
return false, fmt.Errorf("error aborting merge of %q: %w %v", commitlike, err, string(out))
}
return false, nil
}
return true, nil
}
func (i *interactor) mergeRebase(commitlike string) (bool, error) {
if commitlike == "" {
return false, errors.New("branch must be set")
}
headRev, err := i.revParse("HEAD")
if err != nil {
i.logger.WithError(err).Infof("Failed to parse HEAD revision")
return false, err
}
headRev = strings.TrimSuffix(headRev, "\n")
b, err := i.executor.Run("rebase", "--no-stat", headRev, commitlike)
if err != nil {
i.logger.WithField("out", string(b)).WithError(err).Infof("Rebase failed.")
if b, err := i.executor.Run("rebase", "--abort"); err != nil {
return false, fmt.Errorf("error aborting after failed rebase for commitlike %s: %v. output: %s", commitlike, err, string(b))
}
return false, nil
}
return true, nil
}
func (i *interactor) revParse(args ...string) (string, error) {
fullArgs := append([]string{"rev-parse"}, args...)
b, err := i.executor.Run(fullArgs...)
if err != nil {
return "", errors.New(string(b))
}
return string(b), nil
}
// Only the `merge` and `squash` strategies are supported.
func (i *interactor) MergeAndCheckout(baseSHA string, mergeStrategy string, headSHAs ...string) error {
if baseSHA == "" {
return errors.New("baseSHA must be set")
}
if err := i.Checkout(baseSHA); err != nil {
return err
}
for _, headSHA := range headSHAs {
ok, err := i.MergeWithStrategy(headSHA, mergeStrategy)
if err != nil {
return err
} else if !ok { | }
return nil
}
// Am tries to apply the patch in the given path into the current branch
// by performing a three-way merge (similar to git cherry-pick). It returns
// an error if the patch cannot be applied.
func (i *interactor) Am(path string) error {
i.logger.Infof("Applying patch at %s", path)
out, err := i.executor.Run("am", "--3way", path)
if err == nil {
return nil
}
i.logger.WithError(err).Infof("Patch apply failed with output: %s", string(out))
if abortOut, abortErr := i.executor.Run("am", "--abort"); err != nil {
i.logger.WithError(abortErr).Warningf("Aborting patch apply failed with output: %s", string(abortOut))
}
return errors.New(string(bytes.TrimPrefix(out, []byte("The copy of the patch that failed is found in: .git/rebase-apply/patch"))))
}
// FetchCommits only fetches those commits which we want, and only if they are
// missing.
func (i *interactor) FetchCommits(noFetchTags bool, commitSHAs []string) error {
fetchArgs := []string{"--no-write-fetch-head"}
if noFetchTags {
fetchArgs = append(fetchArgs, "--no-tags")
}
// For each commit SHA, check if it already exists. If so, don't bother
// fetching it.
var missingCommits bool
for _, commitSHA := range commitSHAs {
if exists, _ := i.ObjectExists(commitSHA); exists {
continue
}
fetchArgs = append(fetchArgs, commitSHA)
missingCommits = true
}
// Skip the fetch operation altogether if nothing is missing (we already
// fetched everything previously at some point).
if !missingCommits {
return nil
}
if err := i.Fetch(fetchArgs...); err != nil {
return fmt.Errorf("failed to fetch %s: %v", fetchArgs, err)
}
return nil
}
// RemoteUpdate fetches all updates from the remote.
func (i *interactor) RemoteUpdate() error {
i.logger.Info("Updating from remote")
if out, err := i.executor.Run("remote", "update", "--prune"); err != nil {
return fmt.Errorf("error updating: %w %v", err, string(out))
}
return nil
}
// Fetch fetches all updates from the remote.
func (i *interactor) Fetch(arg ...string) error {
remote, err := i.remote()
if err != nil {
return fmt.Errorf("could not resolve remote for fetching: %w", err)
}
arg = append([]string{"fetch", remote}, arg...)
i.logger.Infof("Fetching from %s", remote)
if out, err := i.executor.Run(arg...); err != nil {
return fmt.Errorf("error fetching: %w %v", err, string(out))
}
return nil
}
// FetchRef fetches a refspec from the remote and leaves it as FETCH_HEAD.
func (i *interactor) FetchRef(refspec string) error {
remote, err := i.remote()
if err != nil {
return fmt.Errorf("could not resolve remote for fetching: %w", err)
}
i.logger.Infof("Fetching %q from %s", refspec, remote)
if out, err := i.executor.Run("fetch", remote, refspec); err != nil {
return fmt.Errorf("error fetching %q: %w %v", refspec, err, string(out))
}
return nil
}
// FetchFromRemote fetches all update from a specific remote and branch and leaves it as FETCH_HEAD.
func (i *interactor) FetchFromRemote(remote RemoteResolver, branch string) error {
r, err := remote()
if err != nil {
return fmt.Errorf("couldn't get remote: %w", err)
}
i.logger.Infof("Fetching %s from %s", branch, r)
if out, err := i.executor.Run("fetch", r, branch); err != nil {
return fmt.Errorf("error fetching %s from %s: %w %v", branch, r, err, string(out))
}
return nil
}
// CheckoutPullRequest fetches the HEAD of a pull request using a synthetic refspec
// available on GitHub remotes and creates a branch at that commit.
func (i *interactor) CheckoutPullRequest(number int) error {
i.logger.Infof("Checking out pull request %d", number)
if err := i.FetchRef(fmt.Sprintf("pull/%d/head", number)); err != nil {
return err
}
if err := i.Checkout("FETCH_HEAD"); err != nil {
return err
}
if err := i.CheckoutNewBranch(fmt.Sprintf("pull%d", number)); err != nil {
return err
}
return nil
}
// Config runs git config.
func (i *interactor) Config(args ...string) error {
i.logger.WithField("args", args).Info("Configuring.")
if out, err := i.executor.Run(append([]string{"config"}, args...)...); err != nil {
return fmt.Errorf("error configuring %v: %w %v", args, err, string(out))
}
return nil
}
// Diff lists the difference between the two references, returning the output
// line by line.
func (i *interactor) Diff(head, sha string) ([]string, error) {
i.logger.Infof("Finding the differences between %q and %q", head, sha)
out, err := i.executor.Run("diff", head, sha, "--name-only")
if err != nil {
return nil, err
}
var changes []string
scan := bufio.NewScanner(bytes.NewReader(out))
scan.Split(bufio.ScanLines)
for scan.Scan() {
changes = append(changes, scan.Text())
}
return changes, nil
}
// MergeCommitsExistBetween runs 'git log <target>..<head> --merged' to verify
// if merge commits exist between "target" and "head".
func (i *interactor) MergeCommitsExistBetween(target, head string) (bool, error) {
i.logger.Infof("Determining if merge commits exist between %q and %q", target, head)
out, err := i.executor.Run("log", fmt.Sprintf("%s..%s", target, head), "--oneline", "--merges")
if err != nil {
return false, fmt.Errorf("error verifying if merge commits exist between %q and %q: %v %s", target, head, err, string(out))
}
return len(out) != 0, nil
}
func (i *interactor) ShowRef(commitlike string) (string, error) {
i.logger.Infof("Getting the commit sha for commitlike %s", commitlike)
out, err := i.executor.Run("show-ref", "-s", commitlike)
if err != nil {
return "", fmt.Errorf("failed to get commit sha for commitlike %s: %w", commitlike, err)
}
return strings.TrimSpace(string(out)), nil
} | return fmt.Errorf("failed to merge %q", headSHA)
} | random_line_split |
eulerlib.py | import math
import time
import quadratic
import random
def time_it(f, args=None):
t0 = time.time()
print('--- Timed execution for {} ----------------'.format(f.__name__))
print('Running...')
result = f(*args) if args is not None else f()
print('Solution is {}'.format(result))
t1 = time.time()
print('Executed in {} seconds'.format(round(t1 - t0, 6)))
def distinct(x):
"""
Returns a list of unique elements.
:param x: List of elements.
:return: List of unique elements.
"""
return list(set(x))
def is_number(n):
"""
Returns true if the number is an instance of an int.
or a float.
:param n: The number n to be tested.
:return: True if n is int or float.
"""
return isinstance(n, (int, float))
def is_unique_string(s):
"""
Determines if a given string only consists of unique
characters.
:param s: The string to test.
:return: True if the string only contains unique characters.
"""
return len(s) == len(set(s))
def divisors(x):
"""
Returns all the divisors for a number x, including x.
e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]
:param x: number >= 1.
:return: the divisors including 1 and x.
"""
x = abs(x)
result = []
upper_bound = int(math.sqrt(x))
for i in range(1, upper_bound + 1):
if x % i == 0:
if x / i == i:
result.append(i)
else:
result.append(i)
result.append(x//i)
return sorted(distinct(result))
def sum_of_proper_divisors_sieve(n):
"""
Generates an array with the sum of the divisors
for that index of the array. To find the sum of
divisors for 12: sieve[12].
:param n: Upper limit of numbers.
:return: List with sum of divisors.
"""
sieve = [1] * (n + 1)
for i in range(2, n // 2 + 1):
for j in range(i + i, n, i):
sieve[j] += i
return sieve
def prime_sieve(n):
"""
Generates an array which determines if the index
of the array is a prime number. To see if 997 is
a prime number: sieve[997] == True.
:param n: Upper limit of numbers.
:return: List with boolean values.
"""
upper_bound = int(math.sqrt(n))
primes = [True] * (n + 1)
primes[0] = primes[1] = False
for i in range(upper_bound + 1):
if not primes[i]:
continue
for j in range(2, n // i + 1):
if i*j < n:
primes[i*j] = False
return primes
def sieve_to_list(sieve):
"""
Returns the sieve as a list where the index is the number
where it was True.
:param sieve:
:return:
"""
return [i for i, v in enumerate(sieve) if v]
def triangle_number(n):
"""
Calculate the nth triangle number.
:param n: Fn
:return: Triangle number for n.
"""
return n * (n + 1) // 2
def is_triangle_number(n):
"""
Tests if a number is a triangle number. Solved with the
inverse of n(n+1)/2, and testing if that solution
is integer.
:param n: Number to test.
:return: True if it is a triangle number.
"""
_, x = quadratic.solve(1, 1, -2*n)
return is_number(x) and x.is_integer()
def triangle_number_sieve(n):
"""
Generates a sieve which can be used to tell if a number
is a triangle number.
:param n: Up to which n.
:return: Sieve with boolean values, sieve[3] = True.
"""
triangle_numbers = [False] * (n + 1)
tn = i = 1
while tn < n:
triangle_numbers[triangle_number(i)] = True
i += 1
tn = triangle_number(i)
return triangle_numbers
def hexagonal_number(n):
"""
Calculate the nth hexagonal number.
:param n: Hn
:return: Hexagonal number
"""
return n * (2 * n - 1)
def is_hexagonal_number(n):
"""
Determines if n is a hexagonal number.
:param n: Hn
:return: Hexagonal number
"""
_, x = quadratic.solve(2, -1, -n)
return is_number(x) and x.is_integer()
def pentagonal_number(n):
return n * (3 * n - 1) / 2
def is_pentagonal_number(n):
"""
Determines if n is a pentagonal number.
:param n:
:return: True if pentagonal.
"""
_, x = quadratic.solve(3, -1, -2 * n)
return is_number(x) and x.is_integer()
def proper_divisors(x):
"""
Returns all the proper divisors for a number x, excluding x.
e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]
:param x: number >= 1.
:return: the divisors excluding itself.
"""
return divisors(x)[:-1]
def restricted_divisors(x):
"""
Returns all the restricted divisors for a number x, excluding 1 and x.
e.g divisors(1001) = [7, 11, 13, 77, 91, 143]
:param x: number >= 1.
:return: the divisors excluding 1 and itself.
"""
return divisors(x)[1:-1]
def is_perfect_number(x):
"""
Test if a number is a perfect number. A number is perfect
if the sum of the proper divisors is equal to itself.
:param x: number to test.
:return: True if it is a perfect number.
"""
return sum(proper_divisors(x)) == x
def is_abundant_number(x):
"""
Test if a number is an abundant number. A number is abundant
if the sum of the proper divisors is greater than the number
itself.
:param x: number to test.
:return: True if it is an abundant number.
"""
return sum(proper_divisors(x)) > x
def is_deficient_number(x):
"""
Test if a number is a deficient number. A number is deficient
if the sum of the proper divisors is less than the number
itself.
:param x: number to test.
:return: True if it is a deficient number.
"""
return sum(proper_divisors(x)) < x
def digits(x):
"""
Returns the digits of a number in a list.
:param x: The number to sum the digits of.
:return: Sum of the number x.
"""
return [int(d) for d in str(x)]
def digits_to_int(x):
"""
Concatenate a list of digits to an integer.
:param x:
:return:
"""
if x is None:
return ""
return int(''.join([str(i) for i in x]))
def is_fibonacci_number(x):
"""
Test if x is a Fibonacci number.
:param x: Number to test.
:return: True if it is a Fibonacci number.
"""
a = math.sqrt(5 * x ** 2 + 4)
b = math.sqrt(5 * x ** 2 - 4)
return a.is_integer() or b.is_integer()
def fibonacci_n(n):
"""
Calculate the nth Fibonacci number (Fn).
:param n: which number to calculate.
:return: The nth Fibonacci number.
"""
sqrt5 = math.sqrt(5)
phi = (1 + sqrt5) / 2
psi = (1 - sqrt5) / 2
return (phi**n - psi**n) // sqrt5
def fibonacci_n_inv(x):
"""
Calculate the n for Fn for a Fibonacci number.
:param x: Fibonacci number.
:return: The position of the Fibonacci number (Fn)
"""
if x < 2:
raise ValueError('Function approximation is wrong when x < 2.')
sqrt5 = math.sqrt(5)
phi = (1 + sqrt5) / 2
rad = 5 * x**2
p = math.sqrt(5*x**2 + 4)
n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) \
if p.is_integer() \
else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)
return round(n)
def gcd(a, b):
"""
Determines the greatest common divisor for a and b
with the Euclidean Algorithm.
:param a: First number.
:param b: Second number.
:return: Greatest common divisor for a and b.
"""
a = abs(a)
b = abs(b)
if a == b:
return a
if b > a:
a, b = b, a
q = a // b
r = a - b * q
while r != 0:
a = b
b = r
q = a // b
r = a - b * q
return b
def lcm(a, b):
"""
Calculate the least common multiple (LCM) with the GCD
algorithm using: LCM(a,b) = (a*b)/GCD(a,b).
:param a:
:param b:
:return:
"""
return a * b // gcd(a, b)
def lcm3(a, b, c):
"""
Calculating the LCM for multiple digits is done with
LCM(a,b,c) = LCM(LCM(a,b),c)
:param a:
:param b:
:param c:
:return:
"""
return lcm(lcm(a, b), c)
def primitive_pythagorean_triplet_generator(n=math.inf):
"""
Generates n primitive pythagorean triplets.
:param n:
:return:
"""
v = 2
u = 1
while n > 0:
if not(is_odd(v) and is_odd(u)) and gcd(u, v) == 1:
a = v*v - u*u
b = 2*v*u
c = u*u + v*v
if a > b:
a, b = b, a
n -= 1
yield (a, b, c)
u += 1
if u >= v:
v += 1
u = 1
def prime_counting_function(n):
"""
Return the number of primes below a given number.
This is calculated with the proportionality which
states that π(n) ~ n / log(n).
:param n: Upper bound.
:return: Estimate of the number of primes below the
bound.
"""
return n / math.log(n)
def lambertw(x):
"""
Lambert W function with Newton's Method.
:param x:
:return:
"""
eps = 1e-8
w = x
while True:
ew = math.exp(w)
w_new = w - (w * ew - x) / (w * ew + ew)
if abs(w - w_new) <= eps:
break
w = w_new
return w
def prime_counting_function_inv(y):
"""
Returns the upper bound for a given number of primes.
:param y: How many primes you want.
:return: Upper bound.
"""
x = 2
while x / math.log(x) < y:
x += 1
return x
def p | numbers):
"""
Returns the product of a list of numbers.
:param numbers:
:return:
"""
p = 1
for x in numbers:
p *= x
return p
def factorial(n):
"""
Returns the factorial n! of a number.
:param n:
:return:
"""
return product(range(1, n + 1))
def is_even(n):
"""
Returns true if a number is even.
:param n:
:return:
"""
return not n & 1
def is_odd(n):
"""
Returns true if a number is odd.
:param n:
:return:
"""
return n & 1
def permutations(a):
"""
Generates all the permutations for a set.
:param a:
:return:
"""
n = len(a)
return _heap_perm_(n, a)
def _heap_perm_(n, a):
"""
Heap's permutation algorithm.
https://stackoverflow.com/a/29044942
:param n:
:param a:
:return:
"""
if n == 1:
yield a
else:
for i in range(n-1):
for hp in _heap_perm_(n-1, a):
yield list(hp)
j = 0 if (n % 2) == 1 else i
a[j], a[n - 1] = a[n - 1], a[j]
for hp in _heap_perm_(n-1, a):
yield list(hp)
def shift(a, n=1):
"""
Shift all the elements in the list by n.
:param a:
:param n:
:return:
"""
return a[n:] + a[:n]
def is_palindrome(x):
"""
Returns true if a number or a string is a palindrome.
:param x:
:return:
"""
strx = str(x)
return strx == strx[::-1]
# chars = [c for c in x] if not is_number(x) else digits(x)
# for i in range(len(chars) // 2):
# if chars[i] != chars[len(chars) - i - 1]:
# return False
# return True
def is_pandigital_to_n(x, n, zero_based=False):
"""
Returns true if a list of numbers is pandigital from 1 up to n.
:param x:
:param n:
:param zero_based:
:return:
"""
return set(x) == set(range(0 if zero_based else 1, n + 1))
def to_binary_string(x):
"""
Useful to convert a number into a binary number.
:param x:
:return:
"""
return "{0:b}".format(x)
def _palindrome_number_generator():
"""
https://stackoverflow.com/a/16344628
:return:
"""
yield 0
lower = 1
while True:
higher = lower*10
for i in range(lower, higher):
s = str(i)
yield int(s+s[-2::-1])
for i in range(lower, higher):
s = str(i)
yield int(s+s[::-1])
lower = higher
def palindrome_generator(lower, upper):
"""
Generates all palindromes between [lower, upper].
https://stackoverflow.com/a/16344628
:param lower:
:param upper:
:return:
"""
all_palindrome_numbers = _palindrome_number_generator()
for p in all_palindrome_numbers:
if p >= lower:
break
palindrome_list = [p]
for p in all_palindrome_numbers:
# Because we use the same generator object,
# p continues where the previous loop halted.
if p >= upper:
break
palindrome_list.append(p)
return palindrome_list
def string_split_2d(data, field_delimiter=',', line_delimiter='\n'):
"""
Split a string of 2D data into lists. Example of the data
1,2
3,4
5,6
to:
[[1,2],[3,4],[5,6]]
:param data:
:param field_delimiter: delimiter used between seperate fields, default: ,
:param line_delimiter: delimiter used between lines, default: \n
:return: 2D list
"""
return [line.split(field_delimiter) for line in data.split(line_delimiter)]
def simplify_fraction(a, b):
"""
Simplifies a fraction to the lowest common form.
:param a:
:param b:
:return:
"""
c = gcd(a, b)
return a // c, b // c
def modpow(a, n, p):
"""
Use Fermat's little theorem to calculate a^n mod p, which
can handle very large exponents. Calculates in O(log n) time.
:param a: base
:param n: exponent
:param p: mod
:return: (a^n) mod p
"""
res = 1
a = a % p
while n > 0:
# if n is odd
if n & 1:
res = (res * a) % p
n = n >> 1 # n = n / 2
a = (a*a) % p
return res
def is_prime(n, k):
"""
Test if a number n is prime k-times.
:param n: The prime number to be tested.
:param k: The number of tests.
:return:
"""
if n <= 1 or n == 4:
return False
if n <= 3:
return True
if is_even(n):
return False
while k > 0:
# Take random int in [2, n-2]
a = random.randint(2, n-1)
# Check if a and n are co-prime.
if gcd(n, a) != 1:
return False
# Fermat's little theorem
if modpow(a, n-1, n) != 1:
return False
k -= 1
return True
def _first_index_with_bigger_neighbour(P):
"""
Find the first index from the right whose element is larger
than his neighbour.
:param P:
:return:
"""
i = len(P) - 1
while i > 0 and P[i-1] >= P[i]:
i -= 1
return i
def _first_index_with_smaller_neighbour(P):
"""
Find the first index from the right whose element is smaller
than his neighbour.
:param P:
:return:
"""
i = len(P) - 1
while i > 0 and P[i-1] <= P[i]:
i -= 1
return i
def next_permutation(P):
"""
For any given permutation P, give the next permutation.
If there is no next permutation, P will be returned.
:param P:
:return:
"""
n = len(P)
# Find the first index with the bigger neighbour.
i = _first_index_with_bigger_neighbour(P)
# If this is the first, where i=0, then there is no next permutation.
if i == 0:
return P
# From the right, find a value in P that is smaller than
# the previous found value.
j = n - 1
while P[j] <= P[i-1]:
j -= 1
# Swap the values
P[i-1], P[j] = P[j], P[i-1]
# Restore the tail of the permutation.
j = n - 1
while i < j:
P[i], P[j] = P[j], P[i]
i += 1
j -= 1
return P
def previous_permutation(P):
"""
For any given permutation P, give the previous permutation.
If there is no pervious permutation, P will be returned.
:param P:
:return:
"""
n = len(P)
# Find the first index with the smaller neighbour.
i = _first_index_with_smaller_neighbour(P)
# If this is the first, where i=0, then there is no next permutation.
if i == 0:
return P
# From the right, find a value in P that is bigger than
# the previous found value.
j = n - 1
while P[j] >= P[i-1]:
j -= 1
# Swap the values
P[i-1], P[j] = P[j], P[i-1]
# Restore the tail of the permutation.
j = n - 1
while i < j:
P[i], P[j] = P[j], P[i]
i += 1
j -= 1
return P
def prime_factorization(x, sieve=None):
"""
Factorizes a number into the prime factorization.
Requires a sieve to be quick, if sieve is not specified
it will generate one itself.
:param x:
:param sieve:
:return:
"""
if x == 0:
return []
if x in [1, 2]:
return [x]
if sieve is None:
sieve = prime_sieve(x + 1)
factors = []
if sieve[x]:
return [x]
for i in range(2, int(math.sqrt(x) + 1)):
if sieve[x]:
break
if not sieve[i]:
continue
if x % i == 0:
factors.append(i)
x //= i
return factors + prime_factorization(x, sieve)
def is_permutation(A, B):
"""
Returns true if A and B are permutations of each other.
:param A:
:param B:
:return:
"""
return set(A) == set(B)
def is_permutation3(A, B, C):
"""
Returns true if A, B and C are permutations of each other.
:param A:
:param B:
:param C:
:return:
"""
return set(A) == set(B) == set(C)
def equal_sets(S):
"""
Returns true if all the sets s in S are equal
to each other.
:param S:
:return:
"""
s0 = S[0]
res = True
for i in range(1, len(S)):
res = res and s0 == S[i]
return res
def union_sets(S):
"""
Returns the union of all sets in S.
:param S:
:return:
"""
res = set()
for s in S:
res |= s
return res
def intersect_sets(S):
"""
Returns the intersection of all sets in S.
:param S:
:return:
"""
res = S[0]
for s in S:
res &= s
return res
def cumsum(L):
"""
Returns a list with the cumulative sum of a list L.
:param S:
:return:
"""
for i in range(1, len(L)):
L[i] += L[i-1]
return L
| roduct( | identifier_name |
eulerlib.py | import math
import time
import quadratic
import random
def time_it(f, args=None):
t0 = time.time()
print('--- Timed execution for {} ----------------'.format(f.__name__))
print('Running...')
result = f(*args) if args is not None else f()
print('Solution is {}'.format(result))
t1 = time.time()
print('Executed in {} seconds'.format(round(t1 - t0, 6)))
def distinct(x):
"""
Returns a list of unique elements.
:param x: List of elements.
:return: List of unique elements.
"""
return list(set(x))
def is_number(n):
"""
Returns true if the number is an instance of an int.
or a float.
:param n: The number n to be tested.
:return: True if n is int or float.
"""
return isinstance(n, (int, float))
def is_unique_string(s):
"""
Determines if a given string only consists of unique
characters.
:param s: The string to test.
:return: True if the string only contains unique characters.
"""
return len(s) == len(set(s))
def divisors(x):
"""
Returns all the divisors for a number x, including x.
e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]
:param x: number >= 1.
:return: the divisors including 1 and x.
"""
x = abs(x)
result = []
upper_bound = int(math.sqrt(x))
for i in range(1, upper_bound + 1):
if x % i == 0:
if x / i == i:
result.append(i)
else:
result.append(i)
result.append(x//i)
return sorted(distinct(result))
def sum_of_proper_divisors_sieve(n):
"""
Generates an array with the sum of the divisors
for that index of the array. To find the sum of
divisors for 12: sieve[12].
:param n: Upper limit of numbers.
:return: List with sum of divisors.
"""
sieve = [1] * (n + 1)
for i in range(2, n // 2 + 1):
for j in range(i + i, n, i):
sieve[j] += i
return sieve
def prime_sieve(n):
"""
Generates an array which determines if the index
of the array is a prime number. To see if 997 is
a prime number: sieve[997] == True.
:param n: Upper limit of numbers.
:return: List with boolean values.
"""
upper_bound = int(math.sqrt(n))
primes = [True] * (n + 1)
primes[0] = primes[1] = False
for i in range(upper_bound + 1):
if not primes[i]:
continue
for j in range(2, n // i + 1):
if i*j < n:
primes[i*j] = False
return primes
def sieve_to_list(sieve):
"""
Returns the sieve as a list where the index is the number
where it was True.
:param sieve:
:return:
"""
return [i for i, v in enumerate(sieve) if v]
def triangle_number(n):
"""
Calculate the nth triangle number.
:param n: Fn
:return: Triangle number for n.
"""
return n * (n + 1) // 2
def is_triangle_number(n):
"""
Tests if a number is a triangle number. Solved with the
inverse of n(n+1)/2, and testing if that solution
is integer.
:param n: Number to test.
:return: True if it is a triangle number.
"""
_, x = quadratic.solve(1, 1, -2*n)
return is_number(x) and x.is_integer()
def triangle_number_sieve(n):
"""
Generates a sieve which can be used to tell if a number
is a triangle number.
:param n: Up to which n.
:return: Sieve with boolean values, sieve[3] = True.
"""
triangle_numbers = [False] * (n + 1)
tn = i = 1
while tn < n:
triangle_numbers[triangle_number(i)] = True
i += 1
tn = triangle_number(i)
return triangle_numbers
def hexagonal_number(n):
"""
Calculate the nth hexagonal number.
:param n: Hn
:return: Hexagonal number
"""
return n * (2 * n - 1)
def is_hexagonal_number(n):
"""
Determines if n is a hexagonal number.
:param n: Hn
:return: Hexagonal number
"""
_, x = quadratic.solve(2, -1, -n)
return is_number(x) and x.is_integer()
def pentagonal_number(n):
return n * (3 * n - 1) / 2
def is_pentagonal_number(n):
"""
Determines if n is a pentagonal number.
:param n:
:return: True if pentagonal.
"""
_, x = quadratic.solve(3, -1, -2 * n)
return is_number(x) and x.is_integer()
def proper_divisors(x):
"""
Returns all the proper divisors for a number x, excluding x.
e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]
:param x: number >= 1.
:return: the divisors excluding itself.
"""
return divisors(x)[:-1]
def restricted_divisors(x):
"""
Returns all the restricted divisors for a number x, excluding 1 and x.
e.g divisors(1001) = [7, 11, 13, 77, 91, 143]
:param x: number >= 1.
:return: the divisors excluding 1 and itself.
"""
return divisors(x)[1:-1]
def is_perfect_number(x):
"""
Test if a number is a perfect number. A number is perfect
if the sum of the proper divisors is equal to itself.
:param x: number to test.
:return: True if it is a perfect number.
"""
return sum(proper_divisors(x)) == x
def is_abundant_number(x):
"""
Test if a number is an abundant number. A number is abundant
if the sum of the proper divisors is greater than the number
itself.
:param x: number to test.
:return: True if it is an abundant number.
"""
return sum(proper_divisors(x)) > x
def is_deficient_number(x):
"""
Test if a number is a deficient number. A number is deficient
if the sum of the proper divisors is less than the number
itself.
:param x: number to test.
:return: True if it is a deficient number.
"""
return sum(proper_divisors(x)) < x
def digits(x):
"""
Returns the digits of a number in a list.
:param x: The number to sum the digits of.
:return: Sum of the number x.
"""
return [int(d) for d in str(x)]
def digits_to_int(x):
"""
Concatenate a list of digits to an integer.
:param x:
:return:
"""
if x is None:
return ""
return int(''.join([str(i) for i in x]))
def is_fibonacci_number(x):
"""
Test if x is a Fibonacci number.
:param x: Number to test.
:return: True if it is a Fibonacci number.
"""
a = math.sqrt(5 * x ** 2 + 4)
b = math.sqrt(5 * x ** 2 - 4)
return a.is_integer() or b.is_integer()
def fibonacci_n(n):
"""
Calculate the nth Fibonacci number (Fn).
:param n: which number to calculate.
:return: The nth Fibonacci number.
"""
sqrt5 = math.sqrt(5)
phi = (1 + sqrt5) / 2
psi = (1 - sqrt5) / 2
return (phi**n - psi**n) // sqrt5
def fibonacci_n_inv(x):
"""
Calculate the n for Fn for a Fibonacci number.
:param x: Fibonacci number.
:return: The position of the Fibonacci number (Fn)
"""
if x < 2:
raise ValueError('Function approximation is wrong when x < 2.')
sqrt5 = math.sqrt(5)
phi = (1 + sqrt5) / 2
rad = 5 * x**2
p = math.sqrt(5*x**2 + 4)
n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) \
if p.is_integer() \
else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)
return round(n)
def gcd(a, b):
"""
Determines the greatest common divisor for a and b
with the Euclidean Algorithm.
:param a: First number.
:param b: Second number.
:return: Greatest common divisor for a and b.
"""
a = abs(a)
b = abs(b)
if a == b:
return a
if b > a:
a, b = b, a
q = a // b
r = a - b * q
while r != 0:
a = b
b = r
q = a // b
r = a - b * q
return b
def lcm(a, b):
"""
Calculate the least common multiple (LCM) with the GCD
algorithm using: LCM(a,b) = (a*b)/GCD(a,b).
:param a:
:param b:
:return:
"""
return a * b // gcd(a, b)
def lcm3(a, b, c):
"""
Calculating the LCM for multiple digits is done with
LCM(a,b,c) = LCM(LCM(a,b),c)
:param a:
:param b:
:param c:
:return:
"""
return lcm(lcm(a, b), c)
def primitive_pythagorean_triplet_generator(n=math.inf):
"""
Generates n primitive pythagorean triplets.
:param n:
:return:
"""
v = 2
u = 1
while n > 0:
if not(is_odd(v) and is_odd(u)) and gcd(u, v) == 1:
a = v*v - u*u
b = 2*v*u
c = u*u + v*v
if a > b:
a, b = b, a
n -= 1
yield (a, b, c)
u += 1
if u >= v:
v += 1
u = 1
def prime_counting_function(n):
"""
Return the number of primes below a given number.
This is calculated with the proportionality which
states that π(n) ~ n / log(n).
:param n: Upper bound.
:return: Estimate of the number of primes below the
bound.
"""
return n / math.log(n)
def lambertw(x):
"""
Lambert W function with Newton's Method.
:param x:
:return:
"""
eps = 1e-8
w = x
while True:
ew = math.exp(w)
w_new = w - (w * ew - x) / (w * ew + ew)
if abs(w - w_new) <= eps:
break
w = w_new
return w
def prime_counting_function_inv(y):
"""
Returns the upper bound for a given number of primes.
:param y: How many primes you want.
:return: Upper bound.
"""
x = 2
while x / math.log(x) < y:
x += 1
return x
def product(numbers):
"""
Returns the product of a list of numbers.
:param numbers:
:return:
"""
p = 1
for x in numbers:
p *= x
return p
def factorial(n):
"""
Returns the factorial n! of a number.
:param n:
:return:
"""
return product(range(1, n + 1))
def is_even(n):
"""
Returns true if a number is even.
:param n:
:return:
"""
return not n & 1
def is_odd(n):
"""
Returns true if a number is odd.
:param n:
:return:
"""
return n & 1
def permutations(a):
"""
Generates all the permutations for a set.
:param a:
:return:
"""
n = len(a)
return _heap_perm_(n, a)
def _heap_perm_(n, a):
"""
Heap's permutation algorithm.
https://stackoverflow.com/a/29044942
:param n:
:param a:
:return:
"""
if n == 1:
yield a
else:
for i in range(n-1):
for hp in _heap_perm_(n-1, a):
yield list(hp)
j = 0 if (n % 2) == 1 else i
a[j], a[n - 1] = a[n - 1], a[j]
for hp in _heap_perm_(n-1, a):
yield list(hp)
def shift(a, n=1):
"""
Shift all the elements in the list by n.
:param a:
:param n:
:return:
"""
return a[n:] + a[:n]
def is_palindrome(x):
"""
Returns true if a number or a string is a palindrome.
:param x:
:return:
"""
strx = str(x)
return strx == strx[::-1]
# chars = [c for c in x] if not is_number(x) else digits(x)
# for i in range(len(chars) // 2):
# if chars[i] != chars[len(chars) - i - 1]:
# return False
# return True
def is_pandigital_to_n(x, n, zero_based=False):
"""
Returns true if a list of numbers is pandigital from 1 up to n.
:param x:
:param n:
:param zero_based:
:return:
"""
return set(x) == set(range(0 if zero_based else 1, n + 1))
def to_binary_string(x):
"""
Useful to convert a number into a binary number.
:param x:
:return:
"""
return "{0:b}".format(x)
def _palindrome_number_generator():
"""
https://stackoverflow.com/a/16344628
:return:
"""
yield 0
lower = 1
while True:
higher = lower*10
for i in range(lower, higher):
s = str(i)
yield int(s+s[-2::-1])
for i in range(lower, higher):
s = str(i)
yield int(s+s[::-1])
lower = higher
def palindrome_generator(lower, upper):
"""
Generates all palindromes between [lower, upper].
https://stackoverflow.com/a/16344628
:param lower:
:param upper:
:return:
"""
all_palindrome_numbers = _palindrome_number_generator()
for p in all_palindrome_numbers:
if p >= lower:
break
palindrome_list = [p]
for p in all_palindrome_numbers:
# Because we use the same generator object,
# p continues where the previous loop halted.
if p >= upper:
break
palindrome_list.append(p)
return palindrome_list
def string_split_2d(data, field_delimiter=',', line_delimiter='\n'):
"""
Split a string of 2D data into lists. Example of the data
1,2
3,4
5,6
to:
[[1,2],[3,4],[5,6]]
:param data:
:param field_delimiter: delimiter used between seperate fields, default: ,
:param line_delimiter: delimiter used between lines, default: \n
:return: 2D list
"""
return [line.split(field_delimiter) for line in data.split(line_delimiter)]
def simplify_fraction(a, b):
"""
Simplifies a fraction to the lowest common form.
:param a:
:param b:
:return:
"""
c = gcd(a, b)
return a // c, b // c
def modpow(a, n, p):
"""
Use Fermat's little theorem to calculate a^n mod p, which
can handle very large exponents. Calculates in O(log n) time.
:param a: base
:param n: exponent
:param p: mod
:return: (a^n) mod p
"""
res = 1
a = a % p
while n > 0:
# if n is odd
if n & 1:
res = (res * a) % p
n = n >> 1 # n = n / 2
a = (a*a) % p
return res
def is_prime(n, k):
"""
Test if a number n is prime k-times.
:param n: The prime number to be tested.
:param k: The number of tests.
:return:
"""
if n <= 1 or n == 4:
return False
if n <= 3:
return True
if is_even(n):
return False
while k > 0:
# Take random int in [2, n-2]
a = random.randint(2, n-1)
# Check if a and n are co-prime.
if gcd(n, a) != 1:
return False
# Fermat's little theorem
if modpow(a, n-1, n) != 1:
return False
k -= 1
return True
def _first_index_with_bigger_neighbour(P):
"""
Find the first index from the right whose element is larger
than his neighbour.
:param P:
:return:
"""
i = len(P) - 1
while i > 0 and P[i-1] >= P[i]:
i -= 1
return i
def _first_index_with_smaller_neighbour(P):
"""
Find the first index from the right whose element is smaller
than his neighbour.
:param P:
:return:
"""
i = len(P) - 1
while i > 0 and P[i-1] <= P[i]:
i | return i
def next_permutation(P):
"""
For any given permutation P, give the next permutation.
If there is no next permutation, P will be returned.
:param P:
:return:
"""
n = len(P)
# Find the first index with the bigger neighbour.
i = _first_index_with_bigger_neighbour(P)
# If this is the first, where i=0, then there is no next permutation.
if i == 0:
return P
# From the right, find a value in P that is smaller than
# the previous found value.
j = n - 1
while P[j] <= P[i-1]:
j -= 1
# Swap the values
P[i-1], P[j] = P[j], P[i-1]
# Restore the tail of the permutation.
j = n - 1
while i < j:
P[i], P[j] = P[j], P[i]
i += 1
j -= 1
return P
def previous_permutation(P):
"""
For any given permutation P, give the previous permutation.
If there is no pervious permutation, P will be returned.
:param P:
:return:
"""
n = len(P)
# Find the first index with the smaller neighbour.
i = _first_index_with_smaller_neighbour(P)
# If this is the first, where i=0, then there is no next permutation.
if i == 0:
return P
# From the right, find a value in P that is bigger than
# the previous found value.
j = n - 1
while P[j] >= P[i-1]:
j -= 1
# Swap the values
P[i-1], P[j] = P[j], P[i-1]
# Restore the tail of the permutation.
j = n - 1
while i < j:
P[i], P[j] = P[j], P[i]
i += 1
j -= 1
return P
def prime_factorization(x, sieve=None):
"""
Factorizes a number into the prime factorization.
Requires a sieve to be quick, if sieve is not specified
it will generate one itself.
:param x:
:param sieve:
:return:
"""
if x == 0:
return []
if x in [1, 2]:
return [x]
if sieve is None:
sieve = prime_sieve(x + 1)
factors = []
if sieve[x]:
return [x]
for i in range(2, int(math.sqrt(x) + 1)):
if sieve[x]:
break
if not sieve[i]:
continue
if x % i == 0:
factors.append(i)
x //= i
return factors + prime_factorization(x, sieve)
def is_permutation(A, B):
"""
Returns true if A and B are permutations of each other.
:param A:
:param B:
:return:
"""
return set(A) == set(B)
def is_permutation3(A, B, C):
"""
Returns true if A, B and C are permutations of each other.
:param A:
:param B:
:param C:
:return:
"""
return set(A) == set(B) == set(C)
def equal_sets(S):
"""
Returns true if all the sets s in S are equal
to each other.
:param S:
:return:
"""
s0 = S[0]
res = True
for i in range(1, len(S)):
res = res and s0 == S[i]
return res
def union_sets(S):
"""
Returns the union of all sets in S.
:param S:
:return:
"""
res = set()
for s in S:
res |= s
return res
def intersect_sets(S):
"""
Returns the intersection of all sets in S.
:param S:
:return:
"""
res = S[0]
for s in S:
res &= s
return res
def cumsum(L):
"""
Returns a list with the cumulative sum of a list L.
:param S:
:return:
"""
for i in range(1, len(L)):
L[i] += L[i-1]
return L
| -= 1
| conditional_block |
eulerlib.py | import math
import time
import quadratic
import random
def time_it(f, args=None):
t0 = time.time()
print('--- Timed execution for {} ----------------'.format(f.__name__))
print('Running...')
result = f(*args) if args is not None else f()
print('Solution is {}'.format(result))
t1 = time.time()
print('Executed in {} seconds'.format(round(t1 - t0, 6)))
def distinct(x):
"""
Returns a list of unique elements.
:param x: List of elements.
:return: List of unique elements.
"""
return list(set(x))
def is_number(n):
"""
Returns true if the number is an instance of an int.
or a float.
:param n: The number n to be tested.
:return: True if n is int or float.
"""
return isinstance(n, (int, float))
def is_unique_string(s):
"""
Determines if a given string only consists of unique
characters.
:param s: The string to test.
:return: True if the string only contains unique characters.
"""
return len(s) == len(set(s))
def divisors(x):
"""
Returns all the divisors for a number x, including x.
e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]
:param x: number >= 1.
:return: the divisors including 1 and x.
"""
x = abs(x)
result = []
upper_bound = int(math.sqrt(x))
for i in range(1, upper_bound + 1):
if x % i == 0:
if x / i == i:
result.append(i)
else:
result.append(i)
result.append(x//i)
return sorted(distinct(result))
def sum_of_proper_divisors_sieve(n):
"""
Generates an array with the sum of the divisors
for that index of the array. To find the sum of
divisors for 12: sieve[12].
:param n: Upper limit of numbers.
:return: List with sum of divisors.
"""
sieve = [1] * (n + 1)
for i in range(2, n // 2 + 1):
for j in range(i + i, n, i):
sieve[j] += i
return sieve
def prime_sieve(n):
"""
Generates an array which determines if the index
of the array is a prime number. To see if 997 is
a prime number: sieve[997] == True.
:param n: Upper limit of numbers.
:return: List with boolean values.
"""
upper_bound = int(math.sqrt(n))
primes = [True] * (n + 1)
primes[0] = primes[1] = False
for i in range(upper_bound + 1):
if not primes[i]:
continue
for j in range(2, n // i + 1):
if i*j < n:
primes[i*j] = False
return primes
def sieve_to_list(sieve):
"""
Returns the sieve as a list where the index is the number
where it was True.
:param sieve:
:return:
"""
return [i for i, v in enumerate(sieve) if v]
def triangle_number(n):
"""
Calculate the nth triangle number.
:param n: Fn
:return: Triangle number for n.
"""
return n * (n + 1) // 2
def is_triangle_number(n):
"""
Tests if a number is a triangle number. Solved with the
inverse of n(n+1)/2, and testing if that solution
is integer.
:param n: Number to test.
:return: True if it is a triangle number.
"""
_, x = quadratic.solve(1, 1, -2*n)
return is_number(x) and x.is_integer()
def triangle_number_sieve(n):
"""
Generates a sieve which can be used to tell if a number
is a triangle number.
:param n: Up to which n.
:return: Sieve with boolean values, sieve[3] = True.
"""
triangle_numbers = [False] * (n + 1)
tn = i = 1
while tn < n:
triangle_numbers[triangle_number(i)] = True
i += 1
tn = triangle_number(i)
return triangle_numbers
def hexagonal_number(n):
"""
Calculate the nth hexagonal number.
:param n: Hn
:return: Hexagonal number
"""
return n * (2 * n - 1)
def is_hexagonal_number(n):
"""
Determines if n is a hexagonal number.
:param n: Hn
:return: Hexagonal number
"""
_, x = quadratic.solve(2, -1, -n)
return is_number(x) and x.is_integer()
def pentagonal_number(n):
return n * (3 * n - 1) / 2
def is_pentagonal_number(n):
"""
Determines if n is a pentagonal number.
:param n:
:return: True if pentagonal.
"""
_, x = quadratic.solve(3, -1, -2 * n)
return is_number(x) and x.is_integer()
def proper_divisors(x):
"""
Returns all the proper divisors for a number x, excluding x.
e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]
:param x: number >= 1.
:return: the divisors excluding itself.
"""
return divisors(x)[:-1]
def restricted_divisors(x):
"""
Returns all the restricted divisors for a number x, excluding 1 and x.
e.g divisors(1001) = [7, 11, 13, 77, 91, 143]
:param x: number >= 1.
:return: the divisors excluding 1 and itself.
"""
return divisors(x)[1:-1]
def is_perfect_number(x):
"""
Test if a number is a perfect number. A number is perfect
if the sum of the proper divisors is equal to itself.
:param x: number to test.
:return: True if it is a perfect number.
"""
return sum(proper_divisors(x)) == x
def is_abundant_number(x):
"""
Test if a number is an abundant number. A number is abundant
if the sum of the proper divisors is greater than the number
itself.
:param x: number to test.
:return: True if it is an abundant number.
"""
return sum(proper_divisors(x)) > x
def is_deficient_number(x):
"""
Test if a number is a deficient number. A number is deficient
if the sum of the proper divisors is less than the number
itself.
:param x: number to test.
:return: True if it is a deficient number.
"""
return sum(proper_divisors(x)) < x
def digits(x):
"""
Returns the digits of a number in a list.
:param x: The number to sum the digits of.
:return: Sum of the number x.
"""
return [int(d) for d in str(x)]
def digits_to_int(x):
"""
Concatenate a list of digits to an integer.
:param x:
:return:
"""
if x is None:
return ""
return int(''.join([str(i) for i in x]))
def is_fibonacci_number(x):
"""
Test if x is a Fibonacci number.
:param x: Number to test.
:return: True if it is a Fibonacci number.
"""
a = math.sqrt(5 * x ** 2 + 4)
b = math.sqrt(5 * x ** 2 - 4)
return a.is_integer() or b.is_integer()
def fibonacci_n(n):
"""
Calculate the nth Fibonacci number (Fn).
:param n: which number to calculate.
:return: The nth Fibonacci number.
"""
sqrt5 = math.sqrt(5)
phi = (1 + sqrt5) / 2
psi = (1 - sqrt5) / 2
return (phi**n - psi**n) // sqrt5
def fibonacci_n_inv(x):
"""
Calculate the n for Fn for a Fibonacci number.
:param x: Fibonacci number.
:return: The position of the Fibonacci number (Fn)
"""
if x < 2:
raise ValueError('Function approximation is wrong when x < 2.')
sqrt5 = math.sqrt(5)
phi = (1 + sqrt5) / 2
rad = 5 * x**2
p = math.sqrt(5*x**2 + 4)
n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) \
if p.is_integer() \
else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)
return round(n)
def gcd(a, b):
"""
Determines the greatest common divisor for a and b
with the Euclidean Algorithm.
:param a: First number.
:param b: Second number.
:return: Greatest common divisor for a and b.
"""
a = abs(a)
b = abs(b)
if a == b:
return a
if b > a:
a, b = b, a
q = a // b
r = a - b * q
while r != 0:
a = b
b = r
q = a // b
r = a - b * q
return b
def lcm(a, b):
"""
Calculate the least common multiple (LCM) with the GCD
algorithm using: LCM(a,b) = (a*b)/GCD(a,b).
:param a:
:param b:
:return:
"""
return a * b // gcd(a, b)
def lcm3(a, b, c):
"""
Calculating the LCM for multiple digits is done with
LCM(a,b,c) = LCM(LCM(a,b),c)
:param a:
:param b:
:param c:
:return:
"""
return lcm(lcm(a, b), c)
def primitive_pythagorean_triplet_generator(n=math.inf):
"""
Generates n primitive pythagorean triplets.
:param n:
:return:
"""
v = 2
u = 1
while n > 0:
if not(is_odd(v) and is_odd(u)) and gcd(u, v) == 1:
a = v*v - u*u
b = 2*v*u
c = u*u + v*v
if a > b:
a, b = b, a
n -= 1
yield (a, b, c)
u += 1
if u >= v:
v += 1
u = 1
def prime_counting_function(n):
"""
Return the number of primes below a given number.
This is calculated with the proportionality which
states that π(n) ~ n / log(n).
:param n: Upper bound.
:return: Estimate of the number of primes below the
bound.
"""
return n / math.log(n)
def lambertw(x):
"""
Lambert W function with Newton's Method.
:param x:
:return:
"""
eps = 1e-8
w = x
while True:
ew = math.exp(w)
w_new = w - (w * ew - x) / (w * ew + ew)
if abs(w - w_new) <= eps:
break
w = w_new
return w
def prime_counting_function_inv(y):
"""
Returns the upper bound for a given number of primes.
:param y: How many primes you want.
:return: Upper bound.
"""
x = 2
while x / math.log(x) < y:
x += 1
return x
def product(numbers):
"""
Returns the product of a list of numbers.
:param numbers:
:return:
"""
p = 1
for x in numbers:
p *= x
return p
def factorial(n):
"""
Returns the factorial n! of a number.
:param n:
:return:
"""
return product(range(1, n + 1))
def is_even(n):
"""
Returns true if a number is even.
:param n:
:return:
"""
return not n & 1
def is_odd(n):
"""
Returns true if a number is odd.
:param n:
:return:
"""
return n & 1
def permutations(a):
"""
Generates all the permutations for a set.
:param a:
:return:
"""
n = len(a)
return _heap_perm_(n, a)
def _heap_perm_(n, a):
"""
Heap's permutation algorithm.
https://stackoverflow.com/a/29044942
:param n:
:param a:
:return:
"""
if n == 1:
yield a
else:
for i in range(n-1):
for hp in _heap_perm_(n-1, a):
yield list(hp)
j = 0 if (n % 2) == 1 else i
a[j], a[n - 1] = a[n - 1], a[j]
for hp in _heap_perm_(n-1, a):
yield list(hp)
def shift(a, n=1):
"""
Shift all the elements in the list by n.
:param a:
:param n:
:return:
"""
return a[n:] + a[:n]
def is_palindrome(x):
"""
Returns true if a number or a string is a palindrome.
:param x:
:return:
"""
strx = str(x)
return strx == strx[::-1]
# chars = [c for c in x] if not is_number(x) else digits(x)
# for i in range(len(chars) // 2):
# if chars[i] != chars[len(chars) - i - 1]:
# return False
# return True
def is_pandigital_to_n(x, n, zero_based=False):
"""
Returns true if a list of numbers is pandigital from 1 up to n.
:param x:
:param n:
:param zero_based:
:return:
"""
return set(x) == set(range(0 if zero_based else 1, n + 1))
def to_binary_string(x):
"""
Useful to convert a number into a binary number.
:param x:
:return:
"""
return "{0:b}".format(x)
def _palindrome_number_generator():
"""
https://stackoverflow.com/a/16344628
:return:
"""
yield 0
lower = 1
while True:
higher = lower*10
for i in range(lower, higher):
s = str(i)
yield int(s+s[-2::-1])
for i in range(lower, higher):
s = str(i)
yield int(s+s[::-1])
lower = higher
def palindrome_generator(lower, upper):
"""
Generates all palindromes between [lower, upper].
https://stackoverflow.com/a/16344628
:param lower:
:param upper:
:return:
"""
all_palindrome_numbers = _palindrome_number_generator()
for p in all_palindrome_numbers:
if p >= lower:
break
palindrome_list = [p]
for p in all_palindrome_numbers:
# Because we use the same generator object,
# p continues where the previous loop halted.
if p >= upper:
break
palindrome_list.append(p)
return palindrome_list
def string_split_2d(data, field_delimiter=',', line_delimiter='\n'):
"""
Split a string of 2D data into lists. Example of the data
1,2
3,4
5,6
to:
[[1,2],[3,4],[5,6]]
:param data:
:param field_delimiter: delimiter used between seperate fields, default: ,
:param line_delimiter: delimiter used between lines, default: \n
:return: 2D list
"""
return [line.split(field_delimiter) for line in data.split(line_delimiter)]
def simplify_fraction(a, b):
"""
Simplifies a fraction to the lowest common form.
:param a:
:param b:
:return:
"""
c = gcd(a, b)
return a // c, b // c
def modpow(a, n, p):
"""
Use Fermat's little theorem to calculate a^n mod p, which
can handle very large exponents. Calculates in O(log n) time.
:param a: base
:param n: exponent
:param p: mod
:return: (a^n) mod p
"""
res = 1
a = a % p
while n > 0:
# if n is odd
if n & 1:
res = (res * a) % p
n = n >> 1 # n = n / 2
a = (a*a) % p
return res
def is_prime(n, k):
"""
Test if a number n is prime k-times.
:param n: The prime number to be tested.
:param k: The number of tests.
:return:
"""
if n <= 1 or n == 4:
return False
if n <= 3:
return True
if is_even(n):
return False
while k > 0:
# Take random int in [2, n-2]
a = random.randint(2, n-1)
# Check if a and n are co-prime.
if gcd(n, a) != 1:
return False
# Fermat's little theorem
if modpow(a, n-1, n) != 1:
return False
k -= 1
return True
def _first_index_with_bigger_neighbour(P):
"""
Find the first index from the right whose element is larger
than his neighbour.
:param P:
:return:
"""
i = len(P) - 1
while i > 0 and P[i-1] >= P[i]:
i -= 1
return i
def _first_index_with_smaller_neighbour(P):
"""
Find the first index from the right whose element is smaller
than his neighbour.
:param P:
:return:
"""
i = len(P) - 1
while i > 0 and P[i-1] <= P[i]:
i -= 1
return i
def next_permutation(P):
"""
For any given permutation P, give the next permutation.
If there is no next permutation, P will be returned.
:param P:
:return:
"""
n = len(P)
# Find the first index with the bigger neighbour.
i = _first_index_with_bigger_neighbour(P)
# If this is the first, where i=0, then there is no next permutation.
if i == 0:
return P
# From the right, find a value in P that is smaller than
# the previous found value.
j = n - 1
while P[j] <= P[i-1]:
j -= 1
# Swap the values
P[i-1], P[j] = P[j], P[i-1]
# Restore the tail of the permutation.
j = n - 1
while i < j:
P[i], P[j] = P[j], P[i]
i += 1
j -= 1
return P
def previous_permutation(P):
"""
For any given permutation P, give the previous permutation.
If there is no pervious permutation, P will be returned.
:param P:
:return:
"""
n = len(P)
# Find the first index with the smaller neighbour.
i = _first_index_with_smaller_neighbour(P)
# If this is the first, where i=0, then there is no next permutation.
if i == 0:
return P
# From the right, find a value in P that is bigger than
# the previous found value.
j = n - 1
while P[j] >= P[i-1]:
j -= 1
# Swap the values
P[i-1], P[j] = P[j], P[i-1]
# Restore the tail of the permutation.
j = n - 1
while i < j:
P[i], P[j] = P[j], P[i]
i += 1
j -= 1
return P
def prime_factorization(x, sieve=None):
"""
Factorizes a number into the prime factorization.
Requires a sieve to be quick, if sieve is not specified
it will generate one itself.
:param x:
:param sieve:
:return:
"""
if x == 0:
return []
if x in [1, 2]:
return [x]
if sieve is None:
sieve = prime_sieve(x + 1)
factors = []
if sieve[x]:
return [x]
for i in range(2, int(math.sqrt(x) + 1)):
if sieve[x]:
break
if not sieve[i]:
continue
if x % i == 0:
factors.append(i)
x //= i
return factors + prime_factorization(x, sieve)
def is_permutation(A, B):
"""
Returns true if A and B are permutations of each other.
:param A:
:param B:
:return:
"""
return set(A) == set(B)
def is_permutation3(A, B, C):
"""
Returns true if A, B and C are permutations of each other.
:param A:
:param B:
:param C:
:return:
"""
return set(A) == set(B) == set(C)
def equal_sets(S):
"""
Returns true if all the sets s in S are equal
to each other.
:param S:
:return:
"""
s0 = S[0]
res = True
for i in range(1, len(S)):
res = res and s0 == S[i]
return res
| Returns the union of all sets in S.
:param S:
:return:
"""
res = set()
for s in S:
res |= s
return res
def intersect_sets(S):
"""
Returns the intersection of all sets in S.
:param S:
:return:
"""
res = S[0]
for s in S:
res &= s
return res
def cumsum(L):
"""
Returns a list with the cumulative sum of a list L.
:param S:
:return:
"""
for i in range(1, len(L)):
L[i] += L[i-1]
return L | def union_sets(S):
""" | random_line_split |
eulerlib.py | import math
import time
import quadratic
import random
def time_it(f, args=None):
t0 = time.time()
print('--- Timed execution for {} ----------------'.format(f.__name__))
print('Running...')
result = f(*args) if args is not None else f()
print('Solution is {}'.format(result))
t1 = time.time()
print('Executed in {} seconds'.format(round(t1 - t0, 6)))
def distinct(x):
"""
Returns a list of unique elements.
:param x: List of elements.
:return: List of unique elements.
"""
return list(set(x))
def is_number(n):
"""
Returns true if the number is an instance of an int.
or a float.
:param n: The number n to be tested.
:return: True if n is int or float.
"""
return isinstance(n, (int, float))
def is_unique_string(s):
"""
Determines if a given string only consists of unique
characters.
:param s: The string to test.
:return: True if the string only contains unique characters.
"""
return len(s) == len(set(s))
def divisors(x):
"""
Returns all the divisors for a number x, including x.
e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143, 1001]
:param x: number >= 1.
:return: the divisors including 1 and x.
"""
x = abs(x)
result = []
upper_bound = int(math.sqrt(x))
for i in range(1, upper_bound + 1):
if x % i == 0:
if x / i == i:
result.append(i)
else:
result.append(i)
result.append(x//i)
return sorted(distinct(result))
def sum_of_proper_divisors_sieve(n):
"""
Generates an array with the sum of the divisors
for that index of the array. To find the sum of
divisors for 12: sieve[12].
:param n: Upper limit of numbers.
:return: List with sum of divisors.
"""
sieve = [1] * (n + 1)
for i in range(2, n // 2 + 1):
for j in range(i + i, n, i):
sieve[j] += i
return sieve
def prime_sieve(n):
"""
Generates an array which determines if the index
of the array is a prime number. To see if 997 is
a prime number: sieve[997] == True.
:param n: Upper limit of numbers.
:return: List with boolean values.
"""
upper_bound = int(math.sqrt(n))
primes = [True] * (n + 1)
primes[0] = primes[1] = False
for i in range(upper_bound + 1):
if not primes[i]:
continue
for j in range(2, n // i + 1):
if i*j < n:
primes[i*j] = False
return primes
def sieve_to_list(sieve):
"""
Returns the sieve as a list where the index is the number
where it was True.
:param sieve:
:return:
"""
return [i for i, v in enumerate(sieve) if v]
def triangle_number(n):
"""
Calculate the nth triangle number.
:param n: Fn
:return: Triangle number for n.
"""
return n * (n + 1) // 2
def is_triangle_number(n):
"""
Tests if a number is a triangle number. Solved with the
inverse of n(n+1)/2, and testing if that solution
is integer.
:param n: Number to test.
:return: True if it is a triangle number.
"""
_, x = quadratic.solve(1, 1, -2*n)
return is_number(x) and x.is_integer()
def triangle_number_sieve(n):
"""
Generates a sieve which can be used to tell if a number
is a triangle number.
:param n: Up to which n.
:return: Sieve with boolean values, sieve[3] = True.
"""
triangle_numbers = [False] * (n + 1)
tn = i = 1
while tn < n:
triangle_numbers[triangle_number(i)] = True
i += 1
tn = triangle_number(i)
return triangle_numbers
def hexagonal_number(n):
|
def is_hexagonal_number(n):
"""
Determines if n is a hexagonal number.
:param n: Hn
:return: Hexagonal number
"""
_, x = quadratic.solve(2, -1, -n)
return is_number(x) and x.is_integer()
def pentagonal_number(n):
return n * (3 * n - 1) / 2
def is_pentagonal_number(n):
"""
Determines if n is a pentagonal number.
:param n:
:return: True if pentagonal.
"""
_, x = quadratic.solve(3, -1, -2 * n)
return is_number(x) and x.is_integer()
def proper_divisors(x):
"""
Returns all the proper divisors for a number x, excluding x.
e.g divisors(1001) = [1, 7, 11, 13, 77, 91, 143]
:param x: number >= 1.
:return: the divisors excluding itself.
"""
return divisors(x)[:-1]
def restricted_divisors(x):
"""
Returns all the restricted divisors for a number x, excluding 1 and x.
e.g divisors(1001) = [7, 11, 13, 77, 91, 143]
:param x: number >= 1.
:return: the divisors excluding 1 and itself.
"""
return divisors(x)[1:-1]
def is_perfect_number(x):
"""
Test if a number is a perfect number. A number is perfect
if the sum of the proper divisors is equal to itself.
:param x: number to test.
:return: True if it is a perfect number.
"""
return sum(proper_divisors(x)) == x
def is_abundant_number(x):
"""
Test if a number is an abundant number. A number is abundant
if the sum of the proper divisors is greater than the number
itself.
:param x: number to test.
:return: True if it is an abundant number.
"""
return sum(proper_divisors(x)) > x
def is_deficient_number(x):
"""
Test if a number is a deficient number. A number is deficient
if the sum of the proper divisors is less than the number
itself.
:param x: number to test.
:return: True if it is a deficient number.
"""
return sum(proper_divisors(x)) < x
def digits(x):
"""
Returns the digits of a number in a list.
:param x: The number to sum the digits of.
:return: Sum of the number x.
"""
return [int(d) for d in str(x)]
def digits_to_int(x):
"""
Concatenate a list of digits to an integer.
:param x:
:return:
"""
if x is None:
return ""
return int(''.join([str(i) for i in x]))
def is_fibonacci_number(x):
"""
Test if x is a Fibonacci number.
:param x: Number to test.
:return: True if it is a Fibonacci number.
"""
a = math.sqrt(5 * x ** 2 + 4)
b = math.sqrt(5 * x ** 2 - 4)
return a.is_integer() or b.is_integer()
def fibonacci_n(n):
"""
Calculate the nth Fibonacci number (Fn).
:param n: which number to calculate.
:return: The nth Fibonacci number.
"""
sqrt5 = math.sqrt(5)
phi = (1 + sqrt5) / 2
psi = (1 - sqrt5) / 2
return (phi**n - psi**n) // sqrt5
def fibonacci_n_inv(x):
"""
Calculate the n for Fn for a Fibonacci number.
:param x: Fibonacci number.
:return: The position of the Fibonacci number (Fn)
"""
if x < 2:
raise ValueError('Function approximation is wrong when x < 2.')
sqrt5 = math.sqrt(5)
phi = (1 + sqrt5) / 2
rad = 5 * x**2
p = math.sqrt(5*x**2 + 4)
n = math.log((x * sqrt5 + math.sqrt(rad + 4)) / 2, phi) \
if p.is_integer() \
else math.log((x * sqrt5 + math.sqrt(rad - 4)) / 2, phi)
return round(n)
def gcd(a, b):
"""
Determines the greatest common divisor for a and b
with the Euclidean Algorithm.
:param a: First number.
:param b: Second number.
:return: Greatest common divisor for a and b.
"""
a = abs(a)
b = abs(b)
if a == b:
return a
if b > a:
a, b = b, a
q = a // b
r = a - b * q
while r != 0:
a = b
b = r
q = a // b
r = a - b * q
return b
def lcm(a, b):
"""
Calculate the least common multiple (LCM) with the GCD
algorithm using: LCM(a,b) = (a*b)/GCD(a,b).
:param a:
:param b:
:return:
"""
return a * b // gcd(a, b)
def lcm3(a, b, c):
"""
Calculating the LCM for multiple digits is done with
LCM(a,b,c) = LCM(LCM(a,b),c)
:param a:
:param b:
:param c:
:return:
"""
return lcm(lcm(a, b), c)
def primitive_pythagorean_triplet_generator(n=math.inf):
"""
Generates n primitive pythagorean triplets.
:param n:
:return:
"""
v = 2
u = 1
while n > 0:
if not(is_odd(v) and is_odd(u)) and gcd(u, v) == 1:
a = v*v - u*u
b = 2*v*u
c = u*u + v*v
if a > b:
a, b = b, a
n -= 1
yield (a, b, c)
u += 1
if u >= v:
v += 1
u = 1
def prime_counting_function(n):
"""
Return the number of primes below a given number.
This is calculated with the proportionality which
states that π(n) ~ n / log(n).
:param n: Upper bound.
:return: Estimate of the number of primes below the
bound.
"""
return n / math.log(n)
def lambertw(x):
"""
Lambert W function with Newton's Method.
:param x:
:return:
"""
eps = 1e-8
w = x
while True:
ew = math.exp(w)
w_new = w - (w * ew - x) / (w * ew + ew)
if abs(w - w_new) <= eps:
break
w = w_new
return w
def prime_counting_function_inv(y):
"""
Returns the upper bound for a given number of primes.
:param y: How many primes you want.
:return: Upper bound.
"""
x = 2
while x / math.log(x) < y:
x += 1
return x
def product(numbers):
"""
Returns the product of a list of numbers.
:param numbers:
:return:
"""
p = 1
for x in numbers:
p *= x
return p
def factorial(n):
"""
Returns the factorial n! of a number.
:param n:
:return:
"""
return product(range(1, n + 1))
def is_even(n):
"""
Returns true if a number is even.
:param n:
:return:
"""
return not n & 1
def is_odd(n):
"""
Returns true if a number is odd.
:param n:
:return:
"""
return n & 1
def permutations(a):
"""
Generates all the permutations for a set.
:param a:
:return:
"""
n = len(a)
return _heap_perm_(n, a)
def _heap_perm_(n, a):
"""
Heap's permutation algorithm.
https://stackoverflow.com/a/29044942
:param n:
:param a:
:return:
"""
if n == 1:
yield a
else:
for i in range(n-1):
for hp in _heap_perm_(n-1, a):
yield list(hp)
j = 0 if (n % 2) == 1 else i
a[j], a[n - 1] = a[n - 1], a[j]
for hp in _heap_perm_(n-1, a):
yield list(hp)
def shift(a, n=1):
"""
Shift all the elements in the list by n.
:param a:
:param n:
:return:
"""
return a[n:] + a[:n]
def is_palindrome(x):
"""
Returns true if a number or a string is a palindrome.
:param x:
:return:
"""
strx = str(x)
return strx == strx[::-1]
# chars = [c for c in x] if not is_number(x) else digits(x)
# for i in range(len(chars) // 2):
# if chars[i] != chars[len(chars) - i - 1]:
# return False
# return True
def is_pandigital_to_n(x, n, zero_based=False):
"""
Returns true if a list of numbers is pandigital from 1 up to n.
:param x:
:param n:
:param zero_based:
:return:
"""
return set(x) == set(range(0 if zero_based else 1, n + 1))
def to_binary_string(x):
"""
Useful to convert a number into a binary number.
:param x:
:return:
"""
return "{0:b}".format(x)
def _palindrome_number_generator():
"""
https://stackoverflow.com/a/16344628
:return:
"""
yield 0
lower = 1
while True:
higher = lower*10
for i in range(lower, higher):
s = str(i)
yield int(s+s[-2::-1])
for i in range(lower, higher):
s = str(i)
yield int(s+s[::-1])
lower = higher
def palindrome_generator(lower, upper):
"""
Generates all palindromes between [lower, upper].
https://stackoverflow.com/a/16344628
:param lower:
:param upper:
:return:
"""
all_palindrome_numbers = _palindrome_number_generator()
for p in all_palindrome_numbers:
if p >= lower:
break
palindrome_list = [p]
for p in all_palindrome_numbers:
# Because we use the same generator object,
# p continues where the previous loop halted.
if p >= upper:
break
palindrome_list.append(p)
return palindrome_list
def string_split_2d(data, field_delimiter=',', line_delimiter='\n'):
"""
Split a string of 2D data into lists. Example of the data
1,2
3,4
5,6
to:
[[1,2],[3,4],[5,6]]
:param data:
:param field_delimiter: delimiter used between seperate fields, default: ,
:param line_delimiter: delimiter used between lines, default: \n
:return: 2D list
"""
return [line.split(field_delimiter) for line in data.split(line_delimiter)]
def simplify_fraction(a, b):
"""
Simplifies a fraction to the lowest common form.
:param a:
:param b:
:return:
"""
c = gcd(a, b)
return a // c, b // c
def modpow(a, n, p):
"""
Use Fermat's little theorem to calculate a^n mod p, which
can handle very large exponents. Calculates in O(log n) time.
:param a: base
:param n: exponent
:param p: mod
:return: (a^n) mod p
"""
res = 1
a = a % p
while n > 0:
# if n is odd
if n & 1:
res = (res * a) % p
n = n >> 1 # n = n / 2
a = (a*a) % p
return res
def is_prime(n, k):
"""
Test if a number n is prime k-times.
:param n: The prime number to be tested.
:param k: The number of tests.
:return:
"""
if n <= 1 or n == 4:
return False
if n <= 3:
return True
if is_even(n):
return False
while k > 0:
# Take random int in [2, n-2]
a = random.randint(2, n-1)
# Check if a and n are co-prime.
if gcd(n, a) != 1:
return False
# Fermat's little theorem
if modpow(a, n-1, n) != 1:
return False
k -= 1
return True
def _first_index_with_bigger_neighbour(P):
"""
Find the first index from the right whose element is larger
than his neighbour.
:param P:
:return:
"""
i = len(P) - 1
while i > 0 and P[i-1] >= P[i]:
i -= 1
return i
def _first_index_with_smaller_neighbour(P):
"""
Find the first index from the right whose element is smaller
than his neighbour.
:param P:
:return:
"""
i = len(P) - 1
while i > 0 and P[i-1] <= P[i]:
i -= 1
return i
def next_permutation(P):
"""
For any given permutation P, give the next permutation.
If there is no next permutation, P will be returned.
:param P:
:return:
"""
n = len(P)
# Find the first index with the bigger neighbour.
i = _first_index_with_bigger_neighbour(P)
# If this is the first, where i=0, then there is no next permutation.
if i == 0:
return P
# From the right, find a value in P that is smaller than
# the previous found value.
j = n - 1
while P[j] <= P[i-1]:
j -= 1
# Swap the values
P[i-1], P[j] = P[j], P[i-1]
# Restore the tail of the permutation.
j = n - 1
while i < j:
P[i], P[j] = P[j], P[i]
i += 1
j -= 1
return P
def previous_permutation(P):
"""
For any given permutation P, give the previous permutation.
If there is no pervious permutation, P will be returned.
:param P:
:return:
"""
n = len(P)
# Find the first index with the smaller neighbour.
i = _first_index_with_smaller_neighbour(P)
# If this is the first, where i=0, then there is no next permutation.
if i == 0:
return P
# From the right, find a value in P that is bigger than
# the previous found value.
j = n - 1
while P[j] >= P[i-1]:
j -= 1
# Swap the values
P[i-1], P[j] = P[j], P[i-1]
# Restore the tail of the permutation.
j = n - 1
while i < j:
P[i], P[j] = P[j], P[i]
i += 1
j -= 1
return P
def prime_factorization(x, sieve=None):
"""
Factorizes a number into the prime factorization.
Requires a sieve to be quick, if sieve is not specified
it will generate one itself.
:param x:
:param sieve:
:return:
"""
if x == 0:
return []
if x in [1, 2]:
return [x]
if sieve is None:
sieve = prime_sieve(x + 1)
factors = []
if sieve[x]:
return [x]
for i in range(2, int(math.sqrt(x) + 1)):
if sieve[x]:
break
if not sieve[i]:
continue
if x % i == 0:
factors.append(i)
x //= i
return factors + prime_factorization(x, sieve)
def is_permutation(A, B):
"""
Returns true if A and B are permutations of each other.
:param A:
:param B:
:return:
"""
return set(A) == set(B)
def is_permutation3(A, B, C):
"""
Returns true if A, B and C are permutations of each other.
:param A:
:param B:
:param C:
:return:
"""
return set(A) == set(B) == set(C)
def equal_sets(S):
"""
Returns true if all the sets s in S are equal
to each other.
:param S:
:return:
"""
s0 = S[0]
res = True
for i in range(1, len(S)):
res = res and s0 == S[i]
return res
def union_sets(S):
"""
Returns the union of all sets in S.
:param S:
:return:
"""
res = set()
for s in S:
res |= s
return res
def intersect_sets(S):
"""
Returns the intersection of all sets in S.
:param S:
:return:
"""
res = S[0]
for s in S:
res &= s
return res
def cumsum(L):
"""
Returns a list with the cumulative sum of a list L.
:param S:
:return:
"""
for i in range(1, len(L)):
L[i] += L[i-1]
return L
| """
Calculate the nth hexagonal number.
:param n: Hn
:return: Hexagonal number
"""
return n * (2 * n - 1) | identifier_body |
constant_folding.py | """Constant folding optimisation for bytecode.
This optimisation adds a new pseudo-opcode, LOAD_FOLDED_CONST, which encodes the
type of a complex literal constant in its `arg` field, in a "typestruct" format
described below. There is a corresponding function, build_folded_type, which
constructs a vm type from the encoded typestruct.
The type structure stored in LOAD_FOLDED_CONST is an immutable (for easy
hashing) tree with the following elements:
('prim', <python type>) : a primitive type, e.g. ('prim', str)
(tag, types) : a collection type; 'types' represent the type params
frozenset(types): a union of types
tag = prim | tuple | list | map | set
the types python supports for a literal constant
types = a tuple of type | frozenset(types)
where the size of the tuple depends on the tag, e.g ('map', (k, v))
For ease of testing and debugging there is also a simplified literal syntax to
construct and examine these typestructs, see constant_folding_test for examples.
This is less uniform, and therefore not recommended to use other than for
input/output.
"""
from typing import Any, Dict, FrozenSet, Tuple
import attrs
from pytype.pyc import loadmarshal
from pytype.pyc import opcodes
from pytype.pyc import pyc
# Copied from typegraph/cfg.py
# If we have more than 64 elements in a map/list, the type variable accumulates
# too many bindings and falls back to Any. So if we find a constant with too
# many elements, we go directly to constructing an abstract type, and do not
# attempt to track keys/element positions.
MAX_VAR_SIZE = 64
class ConstantError(Exception):
"""Errors raised during constant folding."""
def __init__(self, message, op):
super().__init__(message)
self.lineno = op.line
self.message = message
# We track constants at three levels:
# typ: A typestruct representing the abstract type of the constant
# elements: A list or map of top-level types
# value: The concrete python value
#
# 'elements' is an intermediate structure that tracks individual folded
# constants for every element in a map or list. So e.g. for the constant
# {'x': [1, 2], 'y': 3}
# we would have
# typ = ('map', {str}, {('list', {int}), int})
# value = {'x': [1, 2], 'y': 3}
# elements = {'x': <<[1, 2]>>, 'y': <<3>>}
# where <<x>> is the folded constant corresponding to x. This lets us
# short-circuit pyval tracking at any level in the structure and fall back to
# abstract types.
#
# Note that while we could in theory just track the python value, and then
# construct 'typ' and 'elements' at the end, that would mean recursively
# unfolding a structure that we have just folded; the code is simpler if we
# track elements and types at every stage.
@attrs.define
class _Constant:
"""A folded python constant."""
typ: Tuple[str, Any]
value: Any
elements: Any
op: opcodes.Opcode
@property
def tag(self):
return self.typ[0]
@attrs.define
class _Collection:
"""A linear collection (e.g. list, tuple, set)."""
types: FrozenSet[Any]
values: Tuple[Any, ...]
elements: Tuple[Any, ...]
@attrs.define
class _Map:
"""A dictionary."""
key_types: FrozenSet[Any]
keys: Tuple[Any, ...]
value_types: FrozenSet[Any]
values: Tuple[Any, ...]
elements: Dict[Any, Any]
class _CollectionBuilder:
"""Build up a collection of constants."""
def __init__(self):
self.types = set()
self.values = []
self.elements = []
def add(self, constant):
self.types.add(constant.typ)
self.elements.append(constant)
self.values.append(constant.value)
def build(self):
return _Collection(
types=frozenset(self.types),
values=tuple(reversed(self.values)),
elements=tuple(reversed(self.elements)))
class _MapBuilder:
"""Build up a map of constants."""
def __init__(self):
self.key_types = set()
self.value_types = set()
self.keys = []
self.values = []
self.elements = {}
def add(self, key, value):
self.key_types.add(key.typ)
self.value_types.add(value.typ)
self.keys.append(key.value)
self.values.append(value.value)
self.elements[key.value] = value
def build(self):
return _Map(
key_types=frozenset(self.key_types),
keys=tuple(reversed(self.keys)),
value_types=frozenset(self.value_types),
values=tuple(reversed(self.values)),
elements=self.elements)
class _Stack:
"""A simple opcode stack."""
def __init__(self):
self.stack = []
self.consts = {}
def __iter__(self):
return self.stack.__iter__()
def push(self, val):
self.stack.append(val)
def pop(self):
return self.stack.pop()
def _preserve_constant(self, c):
if c and (
not isinstance(c.op, opcodes.LOAD_CONST) or
isinstance(c.op, opcodes.BUILD_STRING)):
self.consts[id(c.op)] = c
def clear(self):
# Preserve any constants in the stack before clearing it.
for c in self.stack:
self._preserve_constant(c)
self.stack = []
def _pop_args(self, n):
"""Try to get n args off the stack for a BUILD call."""
if len(self.stack) < n:
# We have started a new block in the middle of constructing a literal
# (e.g. due to an inline function call). Clear the stack, since the
# literal is not constant.
self.clear()
return None
elif n and any(x is None for x in self.stack[-n:]):
# We have something other than constants in the arg list. Pop all the args
# for this op off the stack, preserving constants.
for _ in range(n):
self._preserve_constant(self.pop())
return None
else:
return [self.pop() for _ in range(n)]
def fold_args(self, n, op):
"""Collect the arguments to a build call."""
ret = _CollectionBuilder()
args = self._pop_args(n)
if args is None:
self.push(None)
return None
for elt in args:
ret.add(elt)
elt.op.folded = op
return ret.build()
def fold_map_args(self, n, op):
"""Collect the arguments to a BUILD_MAP call."""
ret = _MapBuilder()
args = self._pop_args(2 * n)
if args is None:
self.push(None)
return None
for i in range(0, 2 * n, 2):
v_elt, k_elt = args[i], args[i + 1]
ret.add(k_elt, v_elt)
k_elt.op.folded = op
v_elt.op.folded = op
return ret.build()
def build_str(self, n, op):
ret = self.fold_args(n, op)
if ret:
self.push(_Constant(('prim', str), '', None, op))
else:
self.push(None)
return ret
def build(self, python_type, op):
"""Build a folded type."""
collection = self.fold_args(op.arg, op)
if collection:
typename = python_type.__name__
typ = (typename, collection.types)
try:
value = python_type(collection.values)
except TypeError as e:
raise ConstantError(f'TypeError: {e.args[0]}', op) from e
elements = collection.elements
self.push(_Constant(typ, value, elements, op))
class _FoldedOps:
"""Mapping from a folded opcode to the top level constant that replaces it."""
def __init__(self):
self.folds = {}
def add(self, op):
self.folds[id(op)] = op.folded
def resolve(self, op):
f = op
while id(f) in self.folds:
f = self.folds[id(f)]
return f
class _FoldConstants:
"""Fold constant literals in pyc code."""
def visit_code(self, code):
"""Visit code, folding literals."""
def build_tuple(tup):
out = []
for e in tup:
if isinstance(e, tuple):
out.append(build_tuple(e))
else:
out.append(('prim', type(e)))
return ('tuple', tuple(out))
folds = _FoldedOps()
for block in code.order:
stack = _Stack()
for op in block:
if isinstance(op, opcodes.LOAD_CONST):
elt = code.consts[op.arg]
if isinstance(elt, tuple):
typ = build_tuple(elt)
stack.push(_Constant(typ, elt, typ[1], op))
else:
stack.push(_Constant(('prim', type(elt)), elt, None, op))
elif isinstance(op, opcodes.BUILD_LIST):
stack.build(list, op)
elif isinstance(op, opcodes.BUILD_SET):
stack.build(set, op)
elif isinstance(op, opcodes.FORMAT_VALUE):
if op.arg & loadmarshal.FVS_MASK:
stack.build_str(2, op)
else:
stack.build_str(1, op)
elif isinstance(op, opcodes.BUILD_STRING):
stack.build_str(op.arg, op)
elif isinstance(op, opcodes.BUILD_MAP):
map_ = stack.fold_map_args(op.arg, op)
if map_:
typ = ('map', (map_.key_types, map_.value_types))
val = dict(zip(map_.keys, map_.values))
stack.push(_Constant(typ, val, map_.elements, op))
elif isinstance(op, opcodes.BUILD_CONST_KEY_MAP):
keys = stack.pop()
vals = stack.fold_args(op.arg, op)
if vals:
keys.op.folded = op
_, t = keys.typ
typ = ('map', (frozenset(t), vals.types))
val = dict(zip(keys.value, vals.values))
elements = dict(zip(keys.value, vals.elements))
stack.push(_Constant(typ, val, elements, op))
elif isinstance(op, opcodes.LIST_APPEND):
elements = stack.fold_args(2, op)
if elements:
lst, element = elements.elements
tag, et = lst.typ
assert tag == 'list'
typ = (tag, et | {element.typ})
value = lst.value + [element.value]
elements = lst.elements + (element,)
stack.push(_Constant(typ, value, elements, op))
elif isinstance(op, opcodes.LIST_EXTEND):
elements = stack.fold_args(2, op)
if elements:
lst, other = elements.elements
tag, et = lst.typ
assert tag == 'list'
other_tag, other_et = other.typ
if other_tag == 'tuple':
# Deconstruct the tuple built in opcodes.LOAD_CONST above
other_elts = tuple(_Constant(('prim', e), v, None, other.op)
for (_, e), v in zip(other_et, other.value))
elif other_tag == 'prim':
assert other_et == str
other_et = {other.typ}
other_elts = tuple(_Constant(('prim', str), v, None, other.op)
for v in other.value)
else:
other_elts = other.elements
typ = (tag, et | set(other_et))
value = lst.value + list(other.value)
elements = lst.elements + other_elts
stack.push(_Constant(typ, value, elements, op))
elif isinstance(op, opcodes.MAP_ADD):
elements = stack.fold_args(3, op)
if elements:
map_, key, val = elements.elements
tag, (kt, vt) = map_.typ
assert tag == 'map'
typ = (tag, (kt | {key.typ}, vt | {val.typ}))
value = {**map_.value, **{key.value: val.value}}
elements = {**map_.elements, **{key.value: val}}
stack.push(_Constant(typ, value, elements, op))
elif isinstance(op, opcodes.DICT_UPDATE):
elements = stack.fold_args(2, op)
if elements:
map1, map2 = elements.elements
tag1, (kt1, vt1) = map1.typ
tag2, (kt2, vt2) = map2.typ
assert tag1 == tag2 == 'map'
typ = (tag1, (kt1 | kt2, vt1 | vt2))
value = {**map1.value, **map2.value}
elements = {**map1.elements, **map2.elements}
stack.push(_Constant(typ, value, elements, op))
else:
# If we hit any other bytecode, we are no longer building a literal
# constant. Insert a None as a sentinel to the next BUILD op to
# not fold itself.
stack.push(None)
# Clear the stack to save any folded constants before exiting the block
stack.clear()
# Now rewrite the block to replace folded opcodes with a single
# LOAD_FOLDED_CONSTANT opcode.
out = []
for op in block:
if id(op) in stack.consts:
t = stack.consts[id(op)]
arg = t
pretty_arg = t
o = opcodes.LOAD_FOLDED_CONST(op.index, op.line, arg, pretty_arg)
o.next = op.next
o.target = op.target
o.block_target = op.block_target
o.code = op.code
op.folded = o
folds.add(op)
out.append(o)
elif op.folded:
folds.add(op)
else:
out.append(op)
block.code = out
# Adjust 'next' and 'target' pointers to account for folding.
for op in code.code_iter:
if op.next:
op.next = folds.resolve(op.next)
if op.target:
op.target = folds.resolve(op.target)
return code
def to_literal(typ, always_tuple=False):
"""Convert a typestruct item to a simplified form for ease of use."""
def expand(params):
return (to_literal(x) for x in params)
def union(params):
ret = tuple(sorted(expand(params), key=str))
if len(ret) == 1 and not always_tuple:
ret, = ret # pylint: disable=self-assigning-variable
return ret
tag, params = typ
if tag == 'prim':
return params
elif tag == 'tuple':
vals = tuple(expand(params))
return (tag, *vals)
elif tag == 'map':
k, v = params
return (tag, union(k), union(v))
else:
return (tag, union(params))
def from_literal(tup):
"""Convert from simple literal form to the more uniform typestruct."""
def expand(vals):
return [from_literal(x) for x in vals]
def union(vals):
if not isinstance(vals, tuple):
vals = (vals,)
v = expand(vals)
return frozenset(v)
if not isinstance(tup, tuple):
return ('prim', tup)
elif isinstance(tup[0], str):
tag, *vals = tup
if tag == 'prim':
return tup
elif tag == 'tuple':
params = tuple(expand(vals))
return (tag, params)
elif tag == 'map':
k, v = vals
return (tag, (union(k), union(v)))
else:
vals, = vals # pylint: disable=self-assigning-variable
return (tag, union(vals))
else:
return tuple(expand(tup))
def optimize(code):
"""Fold all constant literals in the bytecode into LOAD_FOLDED_CONST ops."""
return pyc.visit(code, _FoldConstants())
def build_folded_type(ctx, state, const):
"""Convert a typestruct to a vm type."""
def typeconst(t):
"""Create a constant purely to hold types for a recursive call."""
return _Constant(t, None, None, const.op)
def build_pyval(state, const):
if const.value is not None and const.tag in ('prim', 'tuple'):
return state, ctx.convert.constant_to_var(const.value)
else:
return build_folded_type(ctx, state, const)
def expand(state, elements):
vs = []
for e in elements:
state, v = build_pyval(state, e)
vs.append(v)
return state, vs
def join_types(state, ts):
xs = [typeconst(t) for t in ts]
state, vs = expand(state, xs)
val = ctx.convert.build_content(vs)
return state, val
def collect(state, convert_type, params):
state, t = join_types(state, params)
ret = ctx.convert.build_collection_of_type(state.node, convert_type, t)
return state, ret
def collect_tuple(state, elements):
state, vs = expand(state, elements)
return state, ctx.convert.build_tuple(state.node, vs)
def collect_list(state, params, elements):
if elements is None:
return collect(state, ctx.convert.list_type, params)
elif len(elements) < MAX_VAR_SIZE:
state, vs = expand(state, elements)
return state, ctx.convert.build_list(state.node, vs)
else:
# Without constant folding we construct a variable wrapping every element
# in the list and store it; however, we cannot retrieve them all. So as an
# optimisation, we will add the first few elements as pyals, then add one
# element for every contained type, and rely on the fact that the tail
# elements will contribute to the overall list type, but will not be
# retrievable as pyvals.
# TODO(b/175443170): We should use a smaller MAX_SUBSCRIPT cutoff; this
# behaviour is unrelated to MAX_VAR_SIZE (which limits the number of
# distinct bindings for the overall typevar).
n = MAX_VAR_SIZE - len(params) - 1
elts = elements[:n] + tuple(typeconst(t) for t in params)
state, vs = expand(state, elts)
return state, ctx.convert.build_list(state.node, vs)
def collect_map(state, params, elements):
m_var = ctx.convert.build_map(state.node)
m = m_var.data[0]
# Do not forward the state while creating dict literals.
node = state.node
# We want a single string type to store in the Dict.K type param.
# Calling set_str_item on every k/v pair will lead to a type param with a
# lot of literal strings as bindings, causing potentially severe performance
# issues down the line.
str_key = ctx.convert.str_type.instantiate(node)
if elements is not None and len(elements) < MAX_VAR_SIZE:
for (k, v) in elements.items():
_, v = build_pyval(state, v)
k_var = ctx.convert.constant_to_var(k)
m.setitem(node, k_var, v)
if isinstance(k, str):
m.merge_instance_type_params(node, str_key, v)
else:
m.merge_instance_type_params(node, k_var, v)
else:
# Treat a too-large dictionary as {Union[keys] : Union[vals]}. We could
# store a subset of the k/v pairs, as with collect_list, but for
# dictionaries it is less obvious which subset we should be storing.
# Perhaps we could create one variable per unique value type, and then
# store every key in the pyval but reuse the value variables.
k_types, v_types = params
_, v = join_types(state, v_types)
for t in k_types:
_, k = build_folded_type(ctx, state, typeconst(t))
m.setitem(node, k, v)
m.merge_instance_type_params(node, k, v)
return state, m_var
tag, params = const.typ
if tag == 'prim':
if const.value:
return state, ctx.convert.constant_to_var(const.value)
else:
val = ctx.convert.primitive_class_instances[params]
return state, val.to_variable(state.node)
elif tag == 'list':
return collect_list(state, params, const.elements)
elif tag == 'set':
return collect(state, ctx.convert.set_type, params)
elif tag == 'tuple':
# If we get a tuple without const.elements, construct it from the type.
# (e.g. this happens with a large dict with tuple keys)
if not const.elements: | elts = const.elements
return collect_tuple(state, elts)
elif tag == 'map':
return collect_map(state, params, const.elements)
else:
assert False, ('Unexpected type tag:', const.typ) | elts = tuple(typeconst(t) for t in params)
else: | random_line_split |
constant_folding.py | """Constant folding optimisation for bytecode.
This optimisation adds a new pseudo-opcode, LOAD_FOLDED_CONST, which encodes the
type of a complex literal constant in its `arg` field, in a "typestruct" format
described below. There is a corresponding function, build_folded_type, which
constructs a vm type from the encoded typestruct.
The type structure stored in LOAD_FOLDED_CONST is an immutable (for easy
hashing) tree with the following elements:
('prim', <python type>) : a primitive type, e.g. ('prim', str)
(tag, types) : a collection type; 'types' represent the type params
frozenset(types): a union of types
tag = prim | tuple | list | map | set
the types python supports for a literal constant
types = a tuple of type | frozenset(types)
where the size of the tuple depends on the tag, e.g ('map', (k, v))
For ease of testing and debugging there is also a simplified literal syntax to
construct and examine these typestructs, see constant_folding_test for examples.
This is less uniform, and therefore not recommended to use other than for
input/output.
"""
from typing import Any, Dict, FrozenSet, Tuple
import attrs
from pytype.pyc import loadmarshal
from pytype.pyc import opcodes
from pytype.pyc import pyc
# Copied from typegraph/cfg.py
# If we have more than 64 elements in a map/list, the type variable accumulates
# too many bindings and falls back to Any. So if we find a constant with too
# many elements, we go directly to constructing an abstract type, and do not
# attempt to track keys/element positions.
MAX_VAR_SIZE = 64
class ConstantError(Exception):
"""Errors raised during constant folding."""
def __init__(self, message, op):
super().__init__(message)
self.lineno = op.line
self.message = message
# We track constants at three levels:
# typ: A typestruct representing the abstract type of the constant
# elements: A list or map of top-level types
# value: The concrete python value
#
# 'elements' is an intermediate structure that tracks individual folded
# constants for every element in a map or list. So e.g. for the constant
# {'x': [1, 2], 'y': 3}
# we would have
# typ = ('map', {str}, {('list', {int}), int})
# value = {'x': [1, 2], 'y': 3}
# elements = {'x': <<[1, 2]>>, 'y': <<3>>}
# where <<x>> is the folded constant corresponding to x. This lets us
# short-circuit pyval tracking at any level in the structure and fall back to
# abstract types.
#
# Note that while we could in theory just track the python value, and then
# construct 'typ' and 'elements' at the end, that would mean recursively
# unfolding a structure that we have just folded; the code is simpler if we
# track elements and types at every stage.
@attrs.define
class _Constant:
"""A folded python constant."""
typ: Tuple[str, Any]
value: Any
elements: Any
op: opcodes.Opcode
@property
def tag(self):
return self.typ[0]
@attrs.define
class _Collection:
"""A linear collection (e.g. list, tuple, set)."""
types: FrozenSet[Any]
values: Tuple[Any, ...]
elements: Tuple[Any, ...]
@attrs.define
class _Map:
"""A dictionary."""
key_types: FrozenSet[Any]
keys: Tuple[Any, ...]
value_types: FrozenSet[Any]
values: Tuple[Any, ...]
elements: Dict[Any, Any]
class _CollectionBuilder:
"""Build up a collection of constants."""
def __init__(self):
self.types = set()
self.values = []
self.elements = []
def add(self, constant):
self.types.add(constant.typ)
self.elements.append(constant)
self.values.append(constant.value)
def build(self):
return _Collection(
types=frozenset(self.types),
values=tuple(reversed(self.values)),
elements=tuple(reversed(self.elements)))
class _MapBuilder:
"""Build up a map of constants."""
def __init__(self):
self.key_types = set()
self.value_types = set()
self.keys = []
self.values = []
self.elements = {}
def add(self, key, value):
self.key_types.add(key.typ)
self.value_types.add(value.typ)
self.keys.append(key.value)
self.values.append(value.value)
self.elements[key.value] = value
def build(self):
return _Map(
key_types=frozenset(self.key_types),
keys=tuple(reversed(self.keys)),
value_types=frozenset(self.value_types),
values=tuple(reversed(self.values)),
elements=self.elements)
class _Stack:
"""A simple opcode stack."""
def __init__(self):
self.stack = []
self.consts = {}
def __iter__(self):
return self.stack.__iter__()
def push(self, val):
self.stack.append(val)
def pop(self):
|
def _preserve_constant(self, c):
if c and (
not isinstance(c.op, opcodes.LOAD_CONST) or
isinstance(c.op, opcodes.BUILD_STRING)):
self.consts[id(c.op)] = c
def clear(self):
# Preserve any constants in the stack before clearing it.
for c in self.stack:
self._preserve_constant(c)
self.stack = []
def _pop_args(self, n):
"""Try to get n args off the stack for a BUILD call."""
if len(self.stack) < n:
# We have started a new block in the middle of constructing a literal
# (e.g. due to an inline function call). Clear the stack, since the
# literal is not constant.
self.clear()
return None
elif n and any(x is None for x in self.stack[-n:]):
# We have something other than constants in the arg list. Pop all the args
# for this op off the stack, preserving constants.
for _ in range(n):
self._preserve_constant(self.pop())
return None
else:
return [self.pop() for _ in range(n)]
def fold_args(self, n, op):
"""Collect the arguments to a build call."""
ret = _CollectionBuilder()
args = self._pop_args(n)
if args is None:
self.push(None)
return None
for elt in args:
ret.add(elt)
elt.op.folded = op
return ret.build()
def fold_map_args(self, n, op):
"""Collect the arguments to a BUILD_MAP call."""
ret = _MapBuilder()
args = self._pop_args(2 * n)
if args is None:
self.push(None)
return None
for i in range(0, 2 * n, 2):
v_elt, k_elt = args[i], args[i + 1]
ret.add(k_elt, v_elt)
k_elt.op.folded = op
v_elt.op.folded = op
return ret.build()
def build_str(self, n, op):
ret = self.fold_args(n, op)
if ret:
self.push(_Constant(('prim', str), '', None, op))
else:
self.push(None)
return ret
def build(self, python_type, op):
"""Build a folded type."""
collection = self.fold_args(op.arg, op)
if collection:
typename = python_type.__name__
typ = (typename, collection.types)
try:
value = python_type(collection.values)
except TypeError as e:
raise ConstantError(f'TypeError: {e.args[0]}', op) from e
elements = collection.elements
self.push(_Constant(typ, value, elements, op))
class _FoldedOps:
"""Mapping from a folded opcode to the top level constant that replaces it."""
def __init__(self):
self.folds = {}
def add(self, op):
self.folds[id(op)] = op.folded
def resolve(self, op):
f = op
while id(f) in self.folds:
f = self.folds[id(f)]
return f
class _FoldConstants:
"""Fold constant literals in pyc code."""
def visit_code(self, code):
"""Visit code, folding literals."""
def build_tuple(tup):
out = []
for e in tup:
if isinstance(e, tuple):
out.append(build_tuple(e))
else:
out.append(('prim', type(e)))
return ('tuple', tuple(out))
folds = _FoldedOps()
for block in code.order:
stack = _Stack()
for op in block:
if isinstance(op, opcodes.LOAD_CONST):
elt = code.consts[op.arg]
if isinstance(elt, tuple):
typ = build_tuple(elt)
stack.push(_Constant(typ, elt, typ[1], op))
else:
stack.push(_Constant(('prim', type(elt)), elt, None, op))
elif isinstance(op, opcodes.BUILD_LIST):
stack.build(list, op)
elif isinstance(op, opcodes.BUILD_SET):
stack.build(set, op)
elif isinstance(op, opcodes.FORMAT_VALUE):
if op.arg & loadmarshal.FVS_MASK:
stack.build_str(2, op)
else:
stack.build_str(1, op)
elif isinstance(op, opcodes.BUILD_STRING):
stack.build_str(op.arg, op)
elif isinstance(op, opcodes.BUILD_MAP):
map_ = stack.fold_map_args(op.arg, op)
if map_:
typ = ('map', (map_.key_types, map_.value_types))
val = dict(zip(map_.keys, map_.values))
stack.push(_Constant(typ, val, map_.elements, op))
elif isinstance(op, opcodes.BUILD_CONST_KEY_MAP):
keys = stack.pop()
vals = stack.fold_args(op.arg, op)
if vals:
keys.op.folded = op
_, t = keys.typ
typ = ('map', (frozenset(t), vals.types))
val = dict(zip(keys.value, vals.values))
elements = dict(zip(keys.value, vals.elements))
stack.push(_Constant(typ, val, elements, op))
elif isinstance(op, opcodes.LIST_APPEND):
elements = stack.fold_args(2, op)
if elements:
lst, element = elements.elements
tag, et = lst.typ
assert tag == 'list'
typ = (tag, et | {element.typ})
value = lst.value + [element.value]
elements = lst.elements + (element,)
stack.push(_Constant(typ, value, elements, op))
elif isinstance(op, opcodes.LIST_EXTEND):
elements = stack.fold_args(2, op)
if elements:
lst, other = elements.elements
tag, et = lst.typ
assert tag == 'list'
other_tag, other_et = other.typ
if other_tag == 'tuple':
# Deconstruct the tuple built in opcodes.LOAD_CONST above
other_elts = tuple(_Constant(('prim', e), v, None, other.op)
for (_, e), v in zip(other_et, other.value))
elif other_tag == 'prim':
assert other_et == str
other_et = {other.typ}
other_elts = tuple(_Constant(('prim', str), v, None, other.op)
for v in other.value)
else:
other_elts = other.elements
typ = (tag, et | set(other_et))
value = lst.value + list(other.value)
elements = lst.elements + other_elts
stack.push(_Constant(typ, value, elements, op))
elif isinstance(op, opcodes.MAP_ADD):
elements = stack.fold_args(3, op)
if elements:
map_, key, val = elements.elements
tag, (kt, vt) = map_.typ
assert tag == 'map'
typ = (tag, (kt | {key.typ}, vt | {val.typ}))
value = {**map_.value, **{key.value: val.value}}
elements = {**map_.elements, **{key.value: val}}
stack.push(_Constant(typ, value, elements, op))
elif isinstance(op, opcodes.DICT_UPDATE):
elements = stack.fold_args(2, op)
if elements:
map1, map2 = elements.elements
tag1, (kt1, vt1) = map1.typ
tag2, (kt2, vt2) = map2.typ
assert tag1 == tag2 == 'map'
typ = (tag1, (kt1 | kt2, vt1 | vt2))
value = {**map1.value, **map2.value}
elements = {**map1.elements, **map2.elements}
stack.push(_Constant(typ, value, elements, op))
else:
# If we hit any other bytecode, we are no longer building a literal
# constant. Insert a None as a sentinel to the next BUILD op to
# not fold itself.
stack.push(None)
# Clear the stack to save any folded constants before exiting the block
stack.clear()
# Now rewrite the block to replace folded opcodes with a single
# LOAD_FOLDED_CONSTANT opcode.
out = []
for op in block:
if id(op) in stack.consts:
t = stack.consts[id(op)]
arg = t
pretty_arg = t
o = opcodes.LOAD_FOLDED_CONST(op.index, op.line, arg, pretty_arg)
o.next = op.next
o.target = op.target
o.block_target = op.block_target
o.code = op.code
op.folded = o
folds.add(op)
out.append(o)
elif op.folded:
folds.add(op)
else:
out.append(op)
block.code = out
# Adjust 'next' and 'target' pointers to account for folding.
for op in code.code_iter:
if op.next:
op.next = folds.resolve(op.next)
if op.target:
op.target = folds.resolve(op.target)
return code
def to_literal(typ, always_tuple=False):
"""Convert a typestruct item to a simplified form for ease of use."""
def expand(params):
return (to_literal(x) for x in params)
def union(params):
ret = tuple(sorted(expand(params), key=str))
if len(ret) == 1 and not always_tuple:
ret, = ret # pylint: disable=self-assigning-variable
return ret
tag, params = typ
if tag == 'prim':
return params
elif tag == 'tuple':
vals = tuple(expand(params))
return (tag, *vals)
elif tag == 'map':
k, v = params
return (tag, union(k), union(v))
else:
return (tag, union(params))
def from_literal(tup):
"""Convert from simple literal form to the more uniform typestruct."""
def expand(vals):
return [from_literal(x) for x in vals]
def union(vals):
if not isinstance(vals, tuple):
vals = (vals,)
v = expand(vals)
return frozenset(v)
if not isinstance(tup, tuple):
return ('prim', tup)
elif isinstance(tup[0], str):
tag, *vals = tup
if tag == 'prim':
return tup
elif tag == 'tuple':
params = tuple(expand(vals))
return (tag, params)
elif tag == 'map':
k, v = vals
return (tag, (union(k), union(v)))
else:
vals, = vals # pylint: disable=self-assigning-variable
return (tag, union(vals))
else:
return tuple(expand(tup))
def optimize(code):
"""Fold all constant literals in the bytecode into LOAD_FOLDED_CONST ops."""
return pyc.visit(code, _FoldConstants())
def build_folded_type(ctx, state, const):
"""Convert a typestruct to a vm type."""
def typeconst(t):
"""Create a constant purely to hold types for a recursive call."""
return _Constant(t, None, None, const.op)
def build_pyval(state, const):
if const.value is not None and const.tag in ('prim', 'tuple'):
return state, ctx.convert.constant_to_var(const.value)
else:
return build_folded_type(ctx, state, const)
def expand(state, elements):
vs = []
for e in elements:
state, v = build_pyval(state, e)
vs.append(v)
return state, vs
def join_types(state, ts):
xs = [typeconst(t) for t in ts]
state, vs = expand(state, xs)
val = ctx.convert.build_content(vs)
return state, val
def collect(state, convert_type, params):
state, t = join_types(state, params)
ret = ctx.convert.build_collection_of_type(state.node, convert_type, t)
return state, ret
def collect_tuple(state, elements):
state, vs = expand(state, elements)
return state, ctx.convert.build_tuple(state.node, vs)
def collect_list(state, params, elements):
if elements is None:
return collect(state, ctx.convert.list_type, params)
elif len(elements) < MAX_VAR_SIZE:
state, vs = expand(state, elements)
return state, ctx.convert.build_list(state.node, vs)
else:
# Without constant folding we construct a variable wrapping every element
# in the list and store it; however, we cannot retrieve them all. So as an
# optimisation, we will add the first few elements as pyals, then add one
# element for every contained type, and rely on the fact that the tail
# elements will contribute to the overall list type, but will not be
# retrievable as pyvals.
# TODO(b/175443170): We should use a smaller MAX_SUBSCRIPT cutoff; this
# behaviour is unrelated to MAX_VAR_SIZE (which limits the number of
# distinct bindings for the overall typevar).
n = MAX_VAR_SIZE - len(params) - 1
elts = elements[:n] + tuple(typeconst(t) for t in params)
state, vs = expand(state, elts)
return state, ctx.convert.build_list(state.node, vs)
def collect_map(state, params, elements):
m_var = ctx.convert.build_map(state.node)
m = m_var.data[0]
# Do not forward the state while creating dict literals.
node = state.node
# We want a single string type to store in the Dict.K type param.
# Calling set_str_item on every k/v pair will lead to a type param with a
# lot of literal strings as bindings, causing potentially severe performance
# issues down the line.
str_key = ctx.convert.str_type.instantiate(node)
if elements is not None and len(elements) < MAX_VAR_SIZE:
for (k, v) in elements.items():
_, v = build_pyval(state, v)
k_var = ctx.convert.constant_to_var(k)
m.setitem(node, k_var, v)
if isinstance(k, str):
m.merge_instance_type_params(node, str_key, v)
else:
m.merge_instance_type_params(node, k_var, v)
else:
# Treat a too-large dictionary as {Union[keys] : Union[vals]}. We could
# store a subset of the k/v pairs, as with collect_list, but for
# dictionaries it is less obvious which subset we should be storing.
# Perhaps we could create one variable per unique value type, and then
# store every key in the pyval but reuse the value variables.
k_types, v_types = params
_, v = join_types(state, v_types)
for t in k_types:
_, k = build_folded_type(ctx, state, typeconst(t))
m.setitem(node, k, v)
m.merge_instance_type_params(node, k, v)
return state, m_var
tag, params = const.typ
if tag == 'prim':
if const.value:
return state, ctx.convert.constant_to_var(const.value)
else:
val = ctx.convert.primitive_class_instances[params]
return state, val.to_variable(state.node)
elif tag == 'list':
return collect_list(state, params, const.elements)
elif tag == 'set':
return collect(state, ctx.convert.set_type, params)
elif tag == 'tuple':
# If we get a tuple without const.elements, construct it from the type.
# (e.g. this happens with a large dict with tuple keys)
if not const.elements:
elts = tuple(typeconst(t) for t in params)
else:
elts = const.elements
return collect_tuple(state, elts)
elif tag == 'map':
return collect_map(state, params, const.elements)
else:
assert False, ('Unexpected type tag:', const.typ)
| return self.stack.pop() | identifier_body |
constant_folding.py | """Constant folding optimisation for bytecode.
This optimisation adds a new pseudo-opcode, LOAD_FOLDED_CONST, which encodes the
type of a complex literal constant in its `arg` field, in a "typestruct" format
described below. There is a corresponding function, build_folded_type, which
constructs a vm type from the encoded typestruct.
The type structure stored in LOAD_FOLDED_CONST is an immutable (for easy
hashing) tree with the following elements:
('prim', <python type>) : a primitive type, e.g. ('prim', str)
(tag, types) : a collection type; 'types' represent the type params
frozenset(types): a union of types
tag = prim | tuple | list | map | set
the types python supports for a literal constant
types = a tuple of type | frozenset(types)
where the size of the tuple depends on the tag, e.g ('map', (k, v))
For ease of testing and debugging there is also a simplified literal syntax to
construct and examine these typestructs, see constant_folding_test for examples.
This is less uniform, and therefore not recommended to use other than for
input/output.
"""
from typing import Any, Dict, FrozenSet, Tuple
import attrs
from pytype.pyc import loadmarshal
from pytype.pyc import opcodes
from pytype.pyc import pyc
# Copied from typegraph/cfg.py
# If we have more than 64 elements in a map/list, the type variable accumulates
# too many bindings and falls back to Any. So if we find a constant with too
# many elements, we go directly to constructing an abstract type, and do not
# attempt to track keys/element positions.
MAX_VAR_SIZE = 64
class ConstantError(Exception):
"""Errors raised during constant folding."""
def __init__(self, message, op):
super().__init__(message)
self.lineno = op.line
self.message = message
# We track constants at three levels:
# typ: A typestruct representing the abstract type of the constant
# elements: A list or map of top-level types
# value: The concrete python value
#
# 'elements' is an intermediate structure that tracks individual folded
# constants for every element in a map or list. So e.g. for the constant
# {'x': [1, 2], 'y': 3}
# we would have
# typ = ('map', {str}, {('list', {int}), int})
# value = {'x': [1, 2], 'y': 3}
# elements = {'x': <<[1, 2]>>, 'y': <<3>>}
# where <<x>> is the folded constant corresponding to x. This lets us
# short-circuit pyval tracking at any level in the structure and fall back to
# abstract types.
#
# Note that while we could in theory just track the python value, and then
# construct 'typ' and 'elements' at the end, that would mean recursively
# unfolding a structure that we have just folded; the code is simpler if we
# track elements and types at every stage.
@attrs.define
class _Constant:
"""A folded python constant."""
typ: Tuple[str, Any]
value: Any
elements: Any
op: opcodes.Opcode
@property
def tag(self):
return self.typ[0]
@attrs.define
class _Collection:
"""A linear collection (e.g. list, tuple, set)."""
types: FrozenSet[Any]
values: Tuple[Any, ...]
elements: Tuple[Any, ...]
@attrs.define
class _Map:
"""A dictionary."""
key_types: FrozenSet[Any]
keys: Tuple[Any, ...]
value_types: FrozenSet[Any]
values: Tuple[Any, ...]
elements: Dict[Any, Any]
class _CollectionBuilder:
"""Build up a collection of constants."""
def __init__(self):
self.types = set()
self.values = []
self.elements = []
def add(self, constant):
self.types.add(constant.typ)
self.elements.append(constant)
self.values.append(constant.value)
def build(self):
return _Collection(
types=frozenset(self.types),
values=tuple(reversed(self.values)),
elements=tuple(reversed(self.elements)))
class _MapBuilder:
"""Build up a map of constants."""
def __init__(self):
self.key_types = set()
self.value_types = set()
self.keys = []
self.values = []
self.elements = {}
def add(self, key, value):
self.key_types.add(key.typ)
self.value_types.add(value.typ)
self.keys.append(key.value)
self.values.append(value.value)
self.elements[key.value] = value
def build(self):
return _Map(
key_types=frozenset(self.key_types),
keys=tuple(reversed(self.keys)),
value_types=frozenset(self.value_types),
values=tuple(reversed(self.values)),
elements=self.elements)
class _Stack:
"""A simple opcode stack."""
def __init__(self):
self.stack = []
self.consts = {}
def __iter__(self):
return self.stack.__iter__()
def push(self, val):
self.stack.append(val)
def pop(self):
return self.stack.pop()
def _preserve_constant(self, c):
if c and (
not isinstance(c.op, opcodes.LOAD_CONST) or
isinstance(c.op, opcodes.BUILD_STRING)):
self.consts[id(c.op)] = c
def clear(self):
# Preserve any constants in the stack before clearing it.
for c in self.stack:
self._preserve_constant(c)
self.stack = []
def _pop_args(self, n):
"""Try to get n args off the stack for a BUILD call."""
if len(self.stack) < n:
# We have started a new block in the middle of constructing a literal
# (e.g. due to an inline function call). Clear the stack, since the
# literal is not constant.
self.clear()
return None
elif n and any(x is None for x in self.stack[-n:]):
# We have something other than constants in the arg list. Pop all the args
# for this op off the stack, preserving constants.
for _ in range(n):
self._preserve_constant(self.pop())
return None
else:
return [self.pop() for _ in range(n)]
def fold_args(self, n, op):
"""Collect the arguments to a build call."""
ret = _CollectionBuilder()
args = self._pop_args(n)
if args is None:
self.push(None)
return None
for elt in args:
ret.add(elt)
elt.op.folded = op
return ret.build()
def fold_map_args(self, n, op):
"""Collect the arguments to a BUILD_MAP call."""
ret = _MapBuilder()
args = self._pop_args(2 * n)
if args is None:
self.push(None)
return None
for i in range(0, 2 * n, 2):
v_elt, k_elt = args[i], args[i + 1]
ret.add(k_elt, v_elt)
k_elt.op.folded = op
v_elt.op.folded = op
return ret.build()
def build_str(self, n, op):
ret = self.fold_args(n, op)
if ret:
self.push(_Constant(('prim', str), '', None, op))
else:
self.push(None)
return ret
def build(self, python_type, op):
"""Build a folded type."""
collection = self.fold_args(op.arg, op)
if collection:
typename = python_type.__name__
typ = (typename, collection.types)
try:
value = python_type(collection.values)
except TypeError as e:
raise ConstantError(f'TypeError: {e.args[0]}', op) from e
elements = collection.elements
self.push(_Constant(typ, value, elements, op))
class _FoldedOps:
"""Mapping from a folded opcode to the top level constant that replaces it."""
def __init__(self):
self.folds = {}
def add(self, op):
self.folds[id(op)] = op.folded
def resolve(self, op):
f = op
while id(f) in self.folds:
f = self.folds[id(f)]
return f
class _FoldConstants:
"""Fold constant literals in pyc code."""
def visit_code(self, code):
"""Visit code, folding literals."""
def build_tuple(tup):
out = []
for e in tup:
if isinstance(e, tuple):
out.append(build_tuple(e))
else:
out.append(('prim', type(e)))
return ('tuple', tuple(out))
folds = _FoldedOps()
for block in code.order:
stack = _Stack()
for op in block:
if isinstance(op, opcodes.LOAD_CONST):
elt = code.consts[op.arg]
if isinstance(elt, tuple):
typ = build_tuple(elt)
stack.push(_Constant(typ, elt, typ[1], op))
else:
stack.push(_Constant(('prim', type(elt)), elt, None, op))
elif isinstance(op, opcodes.BUILD_LIST):
stack.build(list, op)
elif isinstance(op, opcodes.BUILD_SET):
stack.build(set, op)
elif isinstance(op, opcodes.FORMAT_VALUE):
if op.arg & loadmarshal.FVS_MASK:
stack.build_str(2, op)
else:
stack.build_str(1, op)
elif isinstance(op, opcodes.BUILD_STRING):
stack.build_str(op.arg, op)
elif isinstance(op, opcodes.BUILD_MAP):
map_ = stack.fold_map_args(op.arg, op)
if map_:
typ = ('map', (map_.key_types, map_.value_types))
val = dict(zip(map_.keys, map_.values))
stack.push(_Constant(typ, val, map_.elements, op))
elif isinstance(op, opcodes.BUILD_CONST_KEY_MAP):
keys = stack.pop()
vals = stack.fold_args(op.arg, op)
if vals:
keys.op.folded = op
_, t = keys.typ
typ = ('map', (frozenset(t), vals.types))
val = dict(zip(keys.value, vals.values))
elements = dict(zip(keys.value, vals.elements))
stack.push(_Constant(typ, val, elements, op))
elif isinstance(op, opcodes.LIST_APPEND):
elements = stack.fold_args(2, op)
if elements:
lst, element = elements.elements
tag, et = lst.typ
assert tag == 'list'
typ = (tag, et | {element.typ})
value = lst.value + [element.value]
elements = lst.elements + (element,)
stack.push(_Constant(typ, value, elements, op))
elif isinstance(op, opcodes.LIST_EXTEND):
elements = stack.fold_args(2, op)
if elements:
lst, other = elements.elements
tag, et = lst.typ
assert tag == 'list'
other_tag, other_et = other.typ
if other_tag == 'tuple':
# Deconstruct the tuple built in opcodes.LOAD_CONST above
other_elts = tuple(_Constant(('prim', e), v, None, other.op)
for (_, e), v in zip(other_et, other.value))
elif other_tag == 'prim':
assert other_et == str
other_et = {other.typ}
other_elts = tuple(_Constant(('prim', str), v, None, other.op)
for v in other.value)
else:
other_elts = other.elements
typ = (tag, et | set(other_et))
value = lst.value + list(other.value)
elements = lst.elements + other_elts
stack.push(_Constant(typ, value, elements, op))
elif isinstance(op, opcodes.MAP_ADD):
elements = stack.fold_args(3, op)
if elements:
map_, key, val = elements.elements
tag, (kt, vt) = map_.typ
assert tag == 'map'
typ = (tag, (kt | {key.typ}, vt | {val.typ}))
value = {**map_.value, **{key.value: val.value}}
elements = {**map_.elements, **{key.value: val}}
stack.push(_Constant(typ, value, elements, op))
elif isinstance(op, opcodes.DICT_UPDATE):
elements = stack.fold_args(2, op)
if elements:
map1, map2 = elements.elements
tag1, (kt1, vt1) = map1.typ
tag2, (kt2, vt2) = map2.typ
assert tag1 == tag2 == 'map'
typ = (tag1, (kt1 | kt2, vt1 | vt2))
value = {**map1.value, **map2.value}
elements = {**map1.elements, **map2.elements}
stack.push(_Constant(typ, value, elements, op))
else:
# If we hit any other bytecode, we are no longer building a literal
# constant. Insert a None as a sentinel to the next BUILD op to
# not fold itself.
stack.push(None)
# Clear the stack to save any folded constants before exiting the block
stack.clear()
# Now rewrite the block to replace folded opcodes with a single
# LOAD_FOLDED_CONSTANT opcode.
out = []
for op in block:
if id(op) in stack.consts:
t = stack.consts[id(op)]
arg = t
pretty_arg = t
o = opcodes.LOAD_FOLDED_CONST(op.index, op.line, arg, pretty_arg)
o.next = op.next
o.target = op.target
o.block_target = op.block_target
o.code = op.code
op.folded = o
folds.add(op)
out.append(o)
elif op.folded:
folds.add(op)
else:
out.append(op)
block.code = out
# Adjust 'next' and 'target' pointers to account for folding.
for op in code.code_iter:
if op.next:
op.next = folds.resolve(op.next)
if op.target:
op.target = folds.resolve(op.target)
return code
def to_literal(typ, always_tuple=False):
"""Convert a typestruct item to a simplified form for ease of use."""
def expand(params):
return (to_literal(x) for x in params)
def union(params):
ret = tuple(sorted(expand(params), key=str))
if len(ret) == 1 and not always_tuple:
ret, = ret # pylint: disable=self-assigning-variable
return ret
tag, params = typ
if tag == 'prim':
return params
elif tag == 'tuple':
vals = tuple(expand(params))
return (tag, *vals)
elif tag == 'map':
k, v = params
return (tag, union(k), union(v))
else:
return (tag, union(params))
def from_literal(tup):
"""Convert from simple literal form to the more uniform typestruct."""
def expand(vals):
return [from_literal(x) for x in vals]
def union(vals):
if not isinstance(vals, tuple):
vals = (vals,)
v = expand(vals)
return frozenset(v)
if not isinstance(tup, tuple):
return ('prim', tup)
elif isinstance(tup[0], str):
tag, *vals = tup
if tag == 'prim':
return tup
elif tag == 'tuple':
|
elif tag == 'map':
k, v = vals
return (tag, (union(k), union(v)))
else:
vals, = vals # pylint: disable=self-assigning-variable
return (tag, union(vals))
else:
return tuple(expand(tup))
def optimize(code):
"""Fold all constant literals in the bytecode into LOAD_FOLDED_CONST ops."""
return pyc.visit(code, _FoldConstants())
def build_folded_type(ctx, state, const):
"""Convert a typestruct to a vm type."""
def typeconst(t):
"""Create a constant purely to hold types for a recursive call."""
return _Constant(t, None, None, const.op)
def build_pyval(state, const):
if const.value is not None and const.tag in ('prim', 'tuple'):
return state, ctx.convert.constant_to_var(const.value)
else:
return build_folded_type(ctx, state, const)
def expand(state, elements):
vs = []
for e in elements:
state, v = build_pyval(state, e)
vs.append(v)
return state, vs
def join_types(state, ts):
xs = [typeconst(t) for t in ts]
state, vs = expand(state, xs)
val = ctx.convert.build_content(vs)
return state, val
def collect(state, convert_type, params):
state, t = join_types(state, params)
ret = ctx.convert.build_collection_of_type(state.node, convert_type, t)
return state, ret
def collect_tuple(state, elements):
state, vs = expand(state, elements)
return state, ctx.convert.build_tuple(state.node, vs)
def collect_list(state, params, elements):
if elements is None:
return collect(state, ctx.convert.list_type, params)
elif len(elements) < MAX_VAR_SIZE:
state, vs = expand(state, elements)
return state, ctx.convert.build_list(state.node, vs)
else:
# Without constant folding we construct a variable wrapping every element
# in the list and store it; however, we cannot retrieve them all. So as an
# optimisation, we will add the first few elements as pyals, then add one
# element for every contained type, and rely on the fact that the tail
# elements will contribute to the overall list type, but will not be
# retrievable as pyvals.
# TODO(b/175443170): We should use a smaller MAX_SUBSCRIPT cutoff; this
# behaviour is unrelated to MAX_VAR_SIZE (which limits the number of
# distinct bindings for the overall typevar).
n = MAX_VAR_SIZE - len(params) - 1
elts = elements[:n] + tuple(typeconst(t) for t in params)
state, vs = expand(state, elts)
return state, ctx.convert.build_list(state.node, vs)
def collect_map(state, params, elements):
m_var = ctx.convert.build_map(state.node)
m = m_var.data[0]
# Do not forward the state while creating dict literals.
node = state.node
# We want a single string type to store in the Dict.K type param.
# Calling set_str_item on every k/v pair will lead to a type param with a
# lot of literal strings as bindings, causing potentially severe performance
# issues down the line.
str_key = ctx.convert.str_type.instantiate(node)
if elements is not None and len(elements) < MAX_VAR_SIZE:
for (k, v) in elements.items():
_, v = build_pyval(state, v)
k_var = ctx.convert.constant_to_var(k)
m.setitem(node, k_var, v)
if isinstance(k, str):
m.merge_instance_type_params(node, str_key, v)
else:
m.merge_instance_type_params(node, k_var, v)
else:
# Treat a too-large dictionary as {Union[keys] : Union[vals]}. We could
# store a subset of the k/v pairs, as with collect_list, but for
# dictionaries it is less obvious which subset we should be storing.
# Perhaps we could create one variable per unique value type, and then
# store every key in the pyval but reuse the value variables.
k_types, v_types = params
_, v = join_types(state, v_types)
for t in k_types:
_, k = build_folded_type(ctx, state, typeconst(t))
m.setitem(node, k, v)
m.merge_instance_type_params(node, k, v)
return state, m_var
tag, params = const.typ
if tag == 'prim':
if const.value:
return state, ctx.convert.constant_to_var(const.value)
else:
val = ctx.convert.primitive_class_instances[params]
return state, val.to_variable(state.node)
elif tag == 'list':
return collect_list(state, params, const.elements)
elif tag == 'set':
return collect(state, ctx.convert.set_type, params)
elif tag == 'tuple':
# If we get a tuple without const.elements, construct it from the type.
# (e.g. this happens with a large dict with tuple keys)
if not const.elements:
elts = tuple(typeconst(t) for t in params)
else:
elts = const.elements
return collect_tuple(state, elts)
elif tag == 'map':
return collect_map(state, params, const.elements)
else:
assert False, ('Unexpected type tag:', const.typ)
| params = tuple(expand(vals))
return (tag, params) | conditional_block |
constant_folding.py | """Constant folding optimisation for bytecode.
This optimisation adds a new pseudo-opcode, LOAD_FOLDED_CONST, which encodes the
type of a complex literal constant in its `arg` field, in a "typestruct" format
described below. There is a corresponding function, build_folded_type, which
constructs a vm type from the encoded typestruct.
The type structure stored in LOAD_FOLDED_CONST is an immutable (for easy
hashing) tree with the following elements:
('prim', <python type>) : a primitive type, e.g. ('prim', str)
(tag, types) : a collection type; 'types' represent the type params
frozenset(types): a union of types
tag = prim | tuple | list | map | set
the types python supports for a literal constant
types = a tuple of type | frozenset(types)
where the size of the tuple depends on the tag, e.g ('map', (k, v))
For ease of testing and debugging there is also a simplified literal syntax to
construct and examine these typestructs, see constant_folding_test for examples.
This is less uniform, and therefore not recommended to use other than for
input/output.
"""
from typing import Any, Dict, FrozenSet, Tuple
import attrs
from pytype.pyc import loadmarshal
from pytype.pyc import opcodes
from pytype.pyc import pyc
# Copied from typegraph/cfg.py
# If we have more than 64 elements in a map/list, the type variable accumulates
# too many bindings and falls back to Any. So if we find a constant with too
# many elements, we go directly to constructing an abstract type, and do not
# attempt to track keys/element positions.
MAX_VAR_SIZE = 64
class ConstantError(Exception):
"""Errors raised during constant folding."""
def __init__(self, message, op):
super().__init__(message)
self.lineno = op.line
self.message = message
# We track constants at three levels:
# typ: A typestruct representing the abstract type of the constant
# elements: A list or map of top-level types
# value: The concrete python value
#
# 'elements' is an intermediate structure that tracks individual folded
# constants for every element in a map or list. So e.g. for the constant
# {'x': [1, 2], 'y': 3}
# we would have
# typ = ('map', {str}, {('list', {int}), int})
# value = {'x': [1, 2], 'y': 3}
# elements = {'x': <<[1, 2]>>, 'y': <<3>>}
# where <<x>> is the folded constant corresponding to x. This lets us
# short-circuit pyval tracking at any level in the structure and fall back to
# abstract types.
#
# Note that while we could in theory just track the python value, and then
# construct 'typ' and 'elements' at the end, that would mean recursively
# unfolding a structure that we have just folded; the code is simpler if we
# track elements and types at every stage.
@attrs.define
class _Constant:
"""A folded python constant."""
typ: Tuple[str, Any]
value: Any
elements: Any
op: opcodes.Opcode
@property
def tag(self):
return self.typ[0]
@attrs.define
class _Collection:
"""A linear collection (e.g. list, tuple, set)."""
types: FrozenSet[Any]
values: Tuple[Any, ...]
elements: Tuple[Any, ...]
@attrs.define
class _Map:
"""A dictionary."""
key_types: FrozenSet[Any]
keys: Tuple[Any, ...]
value_types: FrozenSet[Any]
values: Tuple[Any, ...]
elements: Dict[Any, Any]
class _CollectionBuilder:
"""Build up a collection of constants."""
def __init__(self):
self.types = set()
self.values = []
self.elements = []
def add(self, constant):
self.types.add(constant.typ)
self.elements.append(constant)
self.values.append(constant.value)
def build(self):
return _Collection(
types=frozenset(self.types),
values=tuple(reversed(self.values)),
elements=tuple(reversed(self.elements)))
class _MapBuilder:
"""Build up a map of constants."""
def __init__(self):
self.key_types = set()
self.value_types = set()
self.keys = []
self.values = []
self.elements = {}
def add(self, key, value):
self.key_types.add(key.typ)
self.value_types.add(value.typ)
self.keys.append(key.value)
self.values.append(value.value)
self.elements[key.value] = value
def build(self):
return _Map(
key_types=frozenset(self.key_types),
keys=tuple(reversed(self.keys)),
value_types=frozenset(self.value_types),
values=tuple(reversed(self.values)),
elements=self.elements)
class _Stack:
"""A simple opcode stack."""
def __init__(self):
self.stack = []
self.consts = {}
def __iter__(self):
return self.stack.__iter__()
def push(self, val):
self.stack.append(val)
def pop(self):
return self.stack.pop()
def _preserve_constant(self, c):
if c and (
not isinstance(c.op, opcodes.LOAD_CONST) or
isinstance(c.op, opcodes.BUILD_STRING)):
self.consts[id(c.op)] = c
def clear(self):
# Preserve any constants in the stack before clearing it.
for c in self.stack:
self._preserve_constant(c)
self.stack = []
def _pop_args(self, n):
"""Try to get n args off the stack for a BUILD call."""
if len(self.stack) < n:
# We have started a new block in the middle of constructing a literal
# (e.g. due to an inline function call). Clear the stack, since the
# literal is not constant.
self.clear()
return None
elif n and any(x is None for x in self.stack[-n:]):
# We have something other than constants in the arg list. Pop all the args
# for this op off the stack, preserving constants.
for _ in range(n):
self._preserve_constant(self.pop())
return None
else:
return [self.pop() for _ in range(n)]
def fold_args(self, n, op):
"""Collect the arguments to a build call."""
ret = _CollectionBuilder()
args = self._pop_args(n)
if args is None:
self.push(None)
return None
for elt in args:
ret.add(elt)
elt.op.folded = op
return ret.build()
def fold_map_args(self, n, op):
"""Collect the arguments to a BUILD_MAP call."""
ret = _MapBuilder()
args = self._pop_args(2 * n)
if args is None:
self.push(None)
return None
for i in range(0, 2 * n, 2):
v_elt, k_elt = args[i], args[i + 1]
ret.add(k_elt, v_elt)
k_elt.op.folded = op
v_elt.op.folded = op
return ret.build()
def build_str(self, n, op):
ret = self.fold_args(n, op)
if ret:
self.push(_Constant(('prim', str), '', None, op))
else:
self.push(None)
return ret
def | (self, python_type, op):
"""Build a folded type."""
collection = self.fold_args(op.arg, op)
if collection:
typename = python_type.__name__
typ = (typename, collection.types)
try:
value = python_type(collection.values)
except TypeError as e:
raise ConstantError(f'TypeError: {e.args[0]}', op) from e
elements = collection.elements
self.push(_Constant(typ, value, elements, op))
class _FoldedOps:
"""Mapping from a folded opcode to the top level constant that replaces it."""
def __init__(self):
self.folds = {}
def add(self, op):
self.folds[id(op)] = op.folded
def resolve(self, op):
f = op
while id(f) in self.folds:
f = self.folds[id(f)]
return f
class _FoldConstants:
"""Fold constant literals in pyc code."""
def visit_code(self, code):
"""Visit code, folding literals."""
def build_tuple(tup):
out = []
for e in tup:
if isinstance(e, tuple):
out.append(build_tuple(e))
else:
out.append(('prim', type(e)))
return ('tuple', tuple(out))
folds = _FoldedOps()
for block in code.order:
stack = _Stack()
for op in block:
if isinstance(op, opcodes.LOAD_CONST):
elt = code.consts[op.arg]
if isinstance(elt, tuple):
typ = build_tuple(elt)
stack.push(_Constant(typ, elt, typ[1], op))
else:
stack.push(_Constant(('prim', type(elt)), elt, None, op))
elif isinstance(op, opcodes.BUILD_LIST):
stack.build(list, op)
elif isinstance(op, opcodes.BUILD_SET):
stack.build(set, op)
elif isinstance(op, opcodes.FORMAT_VALUE):
if op.arg & loadmarshal.FVS_MASK:
stack.build_str(2, op)
else:
stack.build_str(1, op)
elif isinstance(op, opcodes.BUILD_STRING):
stack.build_str(op.arg, op)
elif isinstance(op, opcodes.BUILD_MAP):
map_ = stack.fold_map_args(op.arg, op)
if map_:
typ = ('map', (map_.key_types, map_.value_types))
val = dict(zip(map_.keys, map_.values))
stack.push(_Constant(typ, val, map_.elements, op))
elif isinstance(op, opcodes.BUILD_CONST_KEY_MAP):
keys = stack.pop()
vals = stack.fold_args(op.arg, op)
if vals:
keys.op.folded = op
_, t = keys.typ
typ = ('map', (frozenset(t), vals.types))
val = dict(zip(keys.value, vals.values))
elements = dict(zip(keys.value, vals.elements))
stack.push(_Constant(typ, val, elements, op))
elif isinstance(op, opcodes.LIST_APPEND):
elements = stack.fold_args(2, op)
if elements:
lst, element = elements.elements
tag, et = lst.typ
assert tag == 'list'
typ = (tag, et | {element.typ})
value = lst.value + [element.value]
elements = lst.elements + (element,)
stack.push(_Constant(typ, value, elements, op))
elif isinstance(op, opcodes.LIST_EXTEND):
elements = stack.fold_args(2, op)
if elements:
lst, other = elements.elements
tag, et = lst.typ
assert tag == 'list'
other_tag, other_et = other.typ
if other_tag == 'tuple':
# Deconstruct the tuple built in opcodes.LOAD_CONST above
other_elts = tuple(_Constant(('prim', e), v, None, other.op)
for (_, e), v in zip(other_et, other.value))
elif other_tag == 'prim':
assert other_et == str
other_et = {other.typ}
other_elts = tuple(_Constant(('prim', str), v, None, other.op)
for v in other.value)
else:
other_elts = other.elements
typ = (tag, et | set(other_et))
value = lst.value + list(other.value)
elements = lst.elements + other_elts
stack.push(_Constant(typ, value, elements, op))
elif isinstance(op, opcodes.MAP_ADD):
elements = stack.fold_args(3, op)
if elements:
map_, key, val = elements.elements
tag, (kt, vt) = map_.typ
assert tag == 'map'
typ = (tag, (kt | {key.typ}, vt | {val.typ}))
value = {**map_.value, **{key.value: val.value}}
elements = {**map_.elements, **{key.value: val}}
stack.push(_Constant(typ, value, elements, op))
elif isinstance(op, opcodes.DICT_UPDATE):
elements = stack.fold_args(2, op)
if elements:
map1, map2 = elements.elements
tag1, (kt1, vt1) = map1.typ
tag2, (kt2, vt2) = map2.typ
assert tag1 == tag2 == 'map'
typ = (tag1, (kt1 | kt2, vt1 | vt2))
value = {**map1.value, **map2.value}
elements = {**map1.elements, **map2.elements}
stack.push(_Constant(typ, value, elements, op))
else:
# If we hit any other bytecode, we are no longer building a literal
# constant. Insert a None as a sentinel to the next BUILD op to
# not fold itself.
stack.push(None)
# Clear the stack to save any folded constants before exiting the block
stack.clear()
# Now rewrite the block to replace folded opcodes with a single
# LOAD_FOLDED_CONSTANT opcode.
out = []
for op in block:
if id(op) in stack.consts:
t = stack.consts[id(op)]
arg = t
pretty_arg = t
o = opcodes.LOAD_FOLDED_CONST(op.index, op.line, arg, pretty_arg)
o.next = op.next
o.target = op.target
o.block_target = op.block_target
o.code = op.code
op.folded = o
folds.add(op)
out.append(o)
elif op.folded:
folds.add(op)
else:
out.append(op)
block.code = out
# Adjust 'next' and 'target' pointers to account for folding.
for op in code.code_iter:
if op.next:
op.next = folds.resolve(op.next)
if op.target:
op.target = folds.resolve(op.target)
return code
def to_literal(typ, always_tuple=False):
"""Convert a typestruct item to a simplified form for ease of use."""
def expand(params):
return (to_literal(x) for x in params)
def union(params):
ret = tuple(sorted(expand(params), key=str))
if len(ret) == 1 and not always_tuple:
ret, = ret # pylint: disable=self-assigning-variable
return ret
tag, params = typ
if tag == 'prim':
return params
elif tag == 'tuple':
vals = tuple(expand(params))
return (tag, *vals)
elif tag == 'map':
k, v = params
return (tag, union(k), union(v))
else:
return (tag, union(params))
def from_literal(tup):
"""Convert from simple literal form to the more uniform typestruct."""
def expand(vals):
return [from_literal(x) for x in vals]
def union(vals):
if not isinstance(vals, tuple):
vals = (vals,)
v = expand(vals)
return frozenset(v)
if not isinstance(tup, tuple):
return ('prim', tup)
elif isinstance(tup[0], str):
tag, *vals = tup
if tag == 'prim':
return tup
elif tag == 'tuple':
params = tuple(expand(vals))
return (tag, params)
elif tag == 'map':
k, v = vals
return (tag, (union(k), union(v)))
else:
vals, = vals # pylint: disable=self-assigning-variable
return (tag, union(vals))
else:
return tuple(expand(tup))
def optimize(code):
"""Fold all constant literals in the bytecode into LOAD_FOLDED_CONST ops."""
return pyc.visit(code, _FoldConstants())
def build_folded_type(ctx, state, const):
"""Convert a typestruct to a vm type."""
def typeconst(t):
"""Create a constant purely to hold types for a recursive call."""
return _Constant(t, None, None, const.op)
def build_pyval(state, const):
if const.value is not None and const.tag in ('prim', 'tuple'):
return state, ctx.convert.constant_to_var(const.value)
else:
return build_folded_type(ctx, state, const)
def expand(state, elements):
vs = []
for e in elements:
state, v = build_pyval(state, e)
vs.append(v)
return state, vs
def join_types(state, ts):
xs = [typeconst(t) for t in ts]
state, vs = expand(state, xs)
val = ctx.convert.build_content(vs)
return state, val
def collect(state, convert_type, params):
state, t = join_types(state, params)
ret = ctx.convert.build_collection_of_type(state.node, convert_type, t)
return state, ret
def collect_tuple(state, elements):
state, vs = expand(state, elements)
return state, ctx.convert.build_tuple(state.node, vs)
def collect_list(state, params, elements):
if elements is None:
return collect(state, ctx.convert.list_type, params)
elif len(elements) < MAX_VAR_SIZE:
state, vs = expand(state, elements)
return state, ctx.convert.build_list(state.node, vs)
else:
# Without constant folding we construct a variable wrapping every element
# in the list and store it; however, we cannot retrieve them all. So as an
# optimisation, we will add the first few elements as pyals, then add one
# element for every contained type, and rely on the fact that the tail
# elements will contribute to the overall list type, but will not be
# retrievable as pyvals.
# TODO(b/175443170): We should use a smaller MAX_SUBSCRIPT cutoff; this
# behaviour is unrelated to MAX_VAR_SIZE (which limits the number of
# distinct bindings for the overall typevar).
n = MAX_VAR_SIZE - len(params) - 1
elts = elements[:n] + tuple(typeconst(t) for t in params)
state, vs = expand(state, elts)
return state, ctx.convert.build_list(state.node, vs)
def collect_map(state, params, elements):
m_var = ctx.convert.build_map(state.node)
m = m_var.data[0]
# Do not forward the state while creating dict literals.
node = state.node
# We want a single string type to store in the Dict.K type param.
# Calling set_str_item on every k/v pair will lead to a type param with a
# lot of literal strings as bindings, causing potentially severe performance
# issues down the line.
str_key = ctx.convert.str_type.instantiate(node)
if elements is not None and len(elements) < MAX_VAR_SIZE:
for (k, v) in elements.items():
_, v = build_pyval(state, v)
k_var = ctx.convert.constant_to_var(k)
m.setitem(node, k_var, v)
if isinstance(k, str):
m.merge_instance_type_params(node, str_key, v)
else:
m.merge_instance_type_params(node, k_var, v)
else:
# Treat a too-large dictionary as {Union[keys] : Union[vals]}. We could
# store a subset of the k/v pairs, as with collect_list, but for
# dictionaries it is less obvious which subset we should be storing.
# Perhaps we could create one variable per unique value type, and then
# store every key in the pyval but reuse the value variables.
k_types, v_types = params
_, v = join_types(state, v_types)
for t in k_types:
_, k = build_folded_type(ctx, state, typeconst(t))
m.setitem(node, k, v)
m.merge_instance_type_params(node, k, v)
return state, m_var
tag, params = const.typ
if tag == 'prim':
if const.value:
return state, ctx.convert.constant_to_var(const.value)
else:
val = ctx.convert.primitive_class_instances[params]
return state, val.to_variable(state.node)
elif tag == 'list':
return collect_list(state, params, const.elements)
elif tag == 'set':
return collect(state, ctx.convert.set_type, params)
elif tag == 'tuple':
# If we get a tuple without const.elements, construct it from the type.
# (e.g. this happens with a large dict with tuple keys)
if not const.elements:
elts = tuple(typeconst(t) for t in params)
else:
elts = const.elements
return collect_tuple(state, elts)
elif tag == 'map':
return collect_map(state, params, const.elements)
else:
assert False, ('Unexpected type tag:', const.typ)
| build | identifier_name |
parser.rs | use crate::Result;
use pom::char_class::{alpha, alphanum, multispace};
use pom::parser::*;
use std::str::FromStr;
use std::collections::HashMap;
use std::fmt::{Display, Formatter};
#[derive(Debug, Clone, PartialEq)]
pub struct StateMachine {
pub name: String,
pub states: Vec<State>,
pub accept_states: Vec<AcceptState>
}
#[derive(Debug, Clone, PartialEq)]
pub struct AcceptState(StateId, StateId);
#[derive(Debug, Clone, PartialEq)]
pub struct StateId(String);
#[derive(Debug, Clone, PartialEq)]
pub struct State {
pub id: StateId,
pub is_starting_state: bool,
pub description: Option<String>
}
impl AcceptState {
pub fn source(&self) -> &StateId {
&self.0
}
pub fn target(&self) -> &StateId {
&self.1
}
}
impl Display for StateId {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.write_str(&self.0)
}
}
/// space, tab, etc
fn ws<'a>() -> Parser<'a, u8, ()> {
is_a(multispace).discard()
}
/// whitespace and comments
fn space<'a>() -> Parser<'a, u8, ()> {
(ws() | comment()).repeat(0..).discard()
}
fn semi<'a>() -> Parser<'a, u8, ()> {
keyword(b";").name("semi")
}
fn to_eol<'a>() -> Parser<'a, u8, String> {
fn anything_else(term: u8) -> bool {
!is_cr(term) && !is_lf(term)
}
is_a(anything_else)
.repeat(0..)
.map(|u8s| String::from_utf8(u8s).expect("can only parse utf"))
}
fn line_comment<'a>() -> Parser<'a, u8, ()> {
(seq(b"//") * to_eol() - eol())
.discard()
.name("line comment")
}
fn eol<'a>() -> Parser<'a, u8, ()> {
((is_a(is_cr) * is_a(is_lf)) | is_a(is_lf) | is_a(is_cr)).discard()
}
fn keyword<'a>(keyword: &'static [u8]) -> Parser<'a, u8, ()> {
literal(keyword).discard().name("keyword")
}
fn literal<'a>(literal: &'static [u8]) -> Parser<'a, u8, String> {
spaced(seq(literal))
.map(|u8s| String::from_utf8(u8s.to_vec()).expect("can only parse utf"))
.name("literal")
}
fn star_comment<'a>() -> Parser<'a, u8, ()> {
fn anything_else(term: u8) -> bool {
term != b'*'
}
(seq(b"/*") * is_a(anything_else).repeat(0..) - seq(b"*/")).discard()
}
fn comment<'a>() -> Parser<'a, u8, ()> {
line_comment() | star_comment()
}
/// a parser wrapped in whitespace
fn spaced<'a, T>(parser: Parser<'a, u8, T>) -> Parser<'a, u8, T>
where
T: 'a,
{
space() * parser - space()
}
fn is_cr(term: u8) -> bool {
term == b'\r'
}
fn is_lf(term: u8) -> bool {
term == b'\n'
}
fn is_underscore(term: u8) -> bool {
term == b'_'
}
fn state_id<'a>() -> Parser<'a, u8, StateId> {
(identifier())
.map(|(ident)| StateId(ident))
}
fn identifier<'a>() -> Parser<'a, u8, String> {
let it = ((is_a(alpha) | is_a(is_underscore))
+ (is_a(alphanum) | is_a(is_underscore)).repeat(0..))
.map(|(first, rest)| format!("{}{}", first as char, String::from_utf8(rest).unwrap()));
spaced(it).name("name")
}
fn string<'a>() -> Parser<'a, u8, String> {
let special_char = sym(b'\\')
| sym(b'/')
| sym(b'"')
| sym(b'b').map(|_| b'\x08')
| sym(b'f').map(|_| b'\x0C')
| sym(b'n').map(|_| b'\n')
| sym(b'r').map(|_| b'\r')
| sym(b't').map(|_| b'\t');
let escape_sequence = sym(b'\\') * special_char;
let string = sym(b'"') * (none_of(b"\\\"") | escape_sequence).repeat(0..) - sym(b'"');
string.convert(String::from_utf8)
}
fn state<'a>() -> Parser<'a, u8, State> {
let raw = keyword(b"state") * identifier() + string().opt()
- semi();
raw.map(move |(identifier, description)| State {
id: StateId(identifier),
is_starting_state: false,
description
})
}
fn | <'a>() -> Parser<'a, u8, Vec<State>> {
fn tag_starting_state(idx: usize, state: State) -> State {
State {
is_starting_state: idx == 0,
..state
}
};
state().repeat(0..).map(|states| states.into_iter().enumerate().map(|(idx, state)| tag_starting_state(idx, state)).collect())
}
fn accept_states_list<'a>() -> Parser<'a, u8, Vec<AcceptState>> {
accept_states_chain()
.repeat(0..)
.map(|chains| chains.into_iter().flatten().collect())
}
fn accept_states_chain<'a>() -> Parser<'a, u8, Vec<AcceptState>> {
let raw = spaced(list(spaced(state_id()), keyword(b"->"))) - semi();
raw.map(move |(state_ids)| {
if state_ids.len() < 2 {
return vec![];
}
let mut result = vec![];
for i in 0..state_ids.len() -1 {
let left = state_ids[i].clone();
let right = state_ids[i+1].clone();
let accept = AcceptState(left, right);
result.push(accept);
}
return result;
})
}
pub fn state_machine<'a>() -> Parser<'a, u8, StateMachine> {
let header = keyword(b"machine") * identifier() - semi();
let raw = header
+ state_list()
+ accept_states_list();
raw.map(move |((name, states), accept_states)| StateMachine {
name,
states,
accept_states
})
}
#[cfg(test)]
mod test {
use super::*;
use std::cmp::min;
use std::path::{Path, PathBuf};
use std::{fs, io};
macro_rules! assert_consumes_all {
( $ parser: expr, $input: expr ) => {
let terminating_parser = $parser - space() - end();
let res = terminating_parser.parse($input);
if let Err(_) = res {
panic!("parser failed to match and consume everything")
}
};
( $ parser: expr, $input: expr, $expected: expr) => {
let terminating_parser = $parser - space() - end();
let res = terminating_parser.parse($input);
match res {
Ok(answer) => {
// it parsed, but was it right?
assert_eq!(answer, $expected)
}
Err(_) => {
//
panic!("parser failed to match and consume everything")
}
}
};
}
#[test]
fn parse_keywords() -> Result<()> {
assert_consumes_all![eol(), b"\r"];
assert_consumes_all![eol(), b"\r\n"];
assert_consumes_all![eol(), b"\n"];
assert_consumes_all![space(), b""];
assert_consumes_all![space(), b" "];
assert_consumes_all![space(), b" \t \n \r "];
assert_consumes_all![line_comment(), b"//\r"];
assert_consumes_all![line_comment(), b"//\n"];
assert_consumes_all![line_comment(), b"//\r\n"];
assert_consumes_all![line_comment(), b"// xyz \r\n"];
assert_consumes_all![star_comment(), b"/* thing */"];
assert_consumes_all![star_comment(), b"/* thing \r\n thing */"];
assert_consumes_all!(
identifier(),
b"foo"
);
assert_consumes_all!(
state_id(),
b"foo"
);
assert_consumes_all!(
accept_states_chain(),
b"foo-> bar -> baz;",
vec![
AcceptState(StateId("foo".into()), StateId("bar".into())),
AcceptState(StateId("bar".into()), StateId("baz".into())),
]
);
assert_consumes_all!(
accept_states_list(),
b"foo-> bar -> baz; baz -> quux;",
vec![
AcceptState(StateId("foo".into()), StateId("bar".into())),
AcceptState(StateId("bar".into()), StateId("baz".into())),
AcceptState(StateId("baz".into()), StateId("quux".into())),
]
);
Ok(())
}
#[test]
fn parse_state_machines() -> Result<()> {
let emptymachine = StateMachine {
name: "foo".into(),
states: Default::default(),
accept_states: vec![]
};
assert_consumes_all!(
state_machine(),
b"machine foo;",
emptymachine
);
assert_consumes_all!(
state_machine(),
b"
machine foo;
state bar \"it's a bar thing\";
state baz;
bar -> baz;
",
StateMachine {
name: "foo".into(),
states: vec![
State {
id: StateId("bar".into()),
is_starting_state: true,
description: Some("it's a bar thing".into())
},
State {
id: StateId("baz".into()),
is_starting_state: false,
description: None
},
],
accept_states: vec![
AcceptState(StateId("bar".into()), StateId("baz".into()))
]
}
);
Ok(())
}
fn count_lines(byte_slice: &[u8]) -> usize {
let line_parser = (to_eol() - eol()).repeat(0..);
let parse_result = line_parser.parse(byte_slice).unwrap();
parse_result.len()
}
#[test]
fn line_counter_works() {
let file_path_str = "assets/fsml/simple-state-machine.fsml";
let byte_vec: Vec<u8> = std::fs::read(file_path_str).unwrap();
let actual = count_lines(&byte_vec);
assert_eq!(12, actual);
}
#[test]
fn parse_state_machine_file() {
let file_path_str = "assets/fsml/simple-state-machine.fsml";
assert_parse_file(PathBuf::from_str(file_path_str).unwrap().as_path());
}
#[test]
fn parse_all_files() -> Result<()> {
let mut entries = fs::read_dir("assets/fsml")?
.map(|res| res.map(|e| e.path()))
//.filter(|f| )
.collect::<std::result::Result<Vec<_>, io::Error>>()?;
entries.sort();
for file_path_str in entries {
println!("");
println!("{}", file_path_str.to_str().unwrap());
println!("");
assert_parse_file(file_path_str.as_path());
}
Ok(())
}
fn assert_parse_file(file_path_str: &Path) {
let byte_vec: Vec<u8> = std::fs::read(file_path_str).unwrap();
let file_content =
String::from_utf8(byte_vec.clone()).expect("should be able to read the file");
let byte_slice: &[u8] = &byte_vec;
let parser = state_machine();
let parse_result = match parser.parse(byte_slice) {
Ok(parse_result) => parse_result,
Err(pom::Error::Mismatch { message, position }) => {
let start_str = &byte_vec[0..position];
let line = count_lines(start_str) + 1;
let end = min(position + 50, file_content.len() - 1);
let extract = &file_content[position..end];
let extract = extract
.to_string()
.replace("\n", "\\n")
.replace("\r", "\\r")
.replace("\t", "\\t");
let err_location = format!("{}:{}:{}", file_path_str.to_str().unwrap(), line, 1);
// thread 'idl_parser::test::parse_full_html5_file' panicked at 'whoops', src/idl_parser.rs:428:9
let better_message = format!(
"thread 'idl_parser::test::parse_full_html5_file' panicked at 'parsing', {}\n\n{}",
err_location, extract
);
println!("{}", better_message);
panic!(message)
}
Err(e) => panic!("{}", e),
};
println!("{:?}", parse_result);
}
}
| state_list | identifier_name |
parser.rs | use crate::Result;
use pom::char_class::{alpha, alphanum, multispace};
use pom::parser::*;
use std::str::FromStr;
use std::collections::HashMap;
use std::fmt::{Display, Formatter};
#[derive(Debug, Clone, PartialEq)]
pub struct StateMachine {
pub name: String,
pub states: Vec<State>,
pub accept_states: Vec<AcceptState>
}
#[derive(Debug, Clone, PartialEq)]
pub struct AcceptState(StateId, StateId);
#[derive(Debug, Clone, PartialEq)]
pub struct StateId(String);
#[derive(Debug, Clone, PartialEq)]
pub struct State {
pub id: StateId,
pub is_starting_state: bool,
pub description: Option<String>
}
impl AcceptState {
pub fn source(&self) -> &StateId {
&self.0
}
pub fn target(&self) -> &StateId {
&self.1
}
}
impl Display for StateId {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.write_str(&self.0)
}
}
/// space, tab, etc
fn ws<'a>() -> Parser<'a, u8, ()> {
is_a(multispace).discard()
}
/// whitespace and comments
fn space<'a>() -> Parser<'a, u8, ()> {
(ws() | comment()).repeat(0..).discard()
}
fn semi<'a>() -> Parser<'a, u8, ()> {
keyword(b";").name("semi")
}
fn to_eol<'a>() -> Parser<'a, u8, String> {
fn anything_else(term: u8) -> bool {
!is_cr(term) && !is_lf(term)
}
is_a(anything_else)
.repeat(0..)
.map(|u8s| String::from_utf8(u8s).expect("can only parse utf"))
}
fn line_comment<'a>() -> Parser<'a, u8, ()> {
(seq(b"//") * to_eol() - eol())
.discard()
.name("line comment")
}
fn eol<'a>() -> Parser<'a, u8, ()> {
((is_a(is_cr) * is_a(is_lf)) | is_a(is_lf) | is_a(is_cr)).discard()
}
fn keyword<'a>(keyword: &'static [u8]) -> Parser<'a, u8, ()> {
literal(keyword).discard().name("keyword")
}
fn literal<'a>(literal: &'static [u8]) -> Parser<'a, u8, String> {
spaced(seq(literal))
.map(|u8s| String::from_utf8(u8s.to_vec()).expect("can only parse utf"))
.name("literal")
}
fn star_comment<'a>() -> Parser<'a, u8, ()> {
fn anything_else(term: u8) -> bool {
term != b'*'
}
(seq(b"/*") * is_a(anything_else).repeat(0..) - seq(b"*/")).discard()
}
fn comment<'a>() -> Parser<'a, u8, ()> {
line_comment() | star_comment()
}
/// a parser wrapped in whitespace
fn spaced<'a, T>(parser: Parser<'a, u8, T>) -> Parser<'a, u8, T>
where
T: 'a,
{
space() * parser - space()
}
fn is_cr(term: u8) -> bool {
term == b'\r'
}
fn is_lf(term: u8) -> bool {
term == b'\n'
}
fn is_underscore(term: u8) -> bool {
term == b'_'
}
fn state_id<'a>() -> Parser<'a, u8, StateId> {
(identifier())
.map(|(ident)| StateId(ident))
}
fn identifier<'a>() -> Parser<'a, u8, String> {
let it = ((is_a(alpha) | is_a(is_underscore))
+ (is_a(alphanum) | is_a(is_underscore)).repeat(0..))
.map(|(first, rest)| format!("{}{}", first as char, String::from_utf8(rest).unwrap()));
spaced(it).name("name")
}
fn string<'a>() -> Parser<'a, u8, String> {
let special_char = sym(b'\\')
| sym(b'/')
| sym(b'"')
| sym(b'b').map(|_| b'\x08')
| sym(b'f').map(|_| b'\x0C')
| sym(b'n').map(|_| b'\n')
| sym(b'r').map(|_| b'\r')
| sym(b't').map(|_| b'\t');
let escape_sequence = sym(b'\\') * special_char;
let string = sym(b'"') * (none_of(b"\\\"") | escape_sequence).repeat(0..) - sym(b'"');
string.convert(String::from_utf8)
}
fn state<'a>() -> Parser<'a, u8, State> {
let raw = keyword(b"state") * identifier() + string().opt()
- semi();
raw.map(move |(identifier, description)| State {
id: StateId(identifier),
is_starting_state: false,
description
})
}
fn state_list<'a>() -> Parser<'a, u8, Vec<State>> {
fn tag_starting_state(idx: usize, state: State) -> State {
State {
is_starting_state: idx == 0,
..state
}
};
state().repeat(0..).map(|states| states.into_iter().enumerate().map(|(idx, state)| tag_starting_state(idx, state)).collect())
}
fn accept_states_list<'a>() -> Parser<'a, u8, Vec<AcceptState>> {
accept_states_chain()
.repeat(0..)
.map(|chains| chains.into_iter().flatten().collect())
}
fn accept_states_chain<'a>() -> Parser<'a, u8, Vec<AcceptState>> {
let raw = spaced(list(spaced(state_id()), keyword(b"->"))) - semi();
raw.map(move |(state_ids)| {
if state_ids.len() < 2 {
return vec![];
}
let mut result = vec![];
for i in 0..state_ids.len() -1 {
let left = state_ids[i].clone();
let right = state_ids[i+1].clone();
let accept = AcceptState(left, right);
result.push(accept);
}
return result;
})
}
pub fn state_machine<'a>() -> Parser<'a, u8, StateMachine> {
let header = keyword(b"machine") * identifier() - semi();
let raw = header
+ state_list()
+ accept_states_list();
raw.map(move |((name, states), accept_states)| StateMachine {
name,
states,
accept_states
})
}
#[cfg(test)]
mod test {
use super::*;
use std::cmp::min;
use std::path::{Path, PathBuf};
use std::{fs, io};
macro_rules! assert_consumes_all {
( $ parser: expr, $input: expr ) => {
let terminating_parser = $parser - space() - end();
let res = terminating_parser.parse($input);
if let Err(_) = res {
panic!("parser failed to match and consume everything")
}
};
( $ parser: expr, $input: expr, $expected: expr) => {
let terminating_parser = $parser - space() - end();
let res = terminating_parser.parse($input);
match res {
Ok(answer) => {
// it parsed, but was it right?
assert_eq!(answer, $expected)
}
Err(_) => {
//
panic!("parser failed to match and consume everything")
}
}
};
}
#[test]
fn parse_keywords() -> Result<()> {
assert_consumes_all![eol(), b"\r"];
assert_consumes_all![eol(), b"\r\n"];
assert_consumes_all![eol(), b"\n"];
assert_consumes_all![space(), b""];
assert_consumes_all![space(), b" "];
assert_consumes_all![space(), b" \t \n \r "];
assert_consumes_all![line_comment(), b"//\r"];
assert_consumes_all![line_comment(), b"//\n"];
assert_consumes_all![line_comment(), b"//\r\n"];
assert_consumes_all![line_comment(), b"// xyz \r\n"];
assert_consumes_all![star_comment(), b"/* thing */"];
assert_consumes_all![star_comment(), b"/* thing \r\n thing */"];
assert_consumes_all!(
identifier(),
b"foo"
);
assert_consumes_all!(
state_id(),
b"foo"
);
assert_consumes_all!(
accept_states_chain(),
b"foo-> bar -> baz;",
vec![
AcceptState(StateId("foo".into()), StateId("bar".into())),
AcceptState(StateId("bar".into()), StateId("baz".into())),
]
);
assert_consumes_all!(
accept_states_list(),
b"foo-> bar -> baz; baz -> quux;",
vec![
AcceptState(StateId("foo".into()), StateId("bar".into())),
AcceptState(StateId("bar".into()), StateId("baz".into())),
AcceptState(StateId("baz".into()), StateId("quux".into())),
]
);
Ok(())
}
#[test]
fn parse_state_machines() -> Result<()> {
let emptymachine = StateMachine {
name: "foo".into(),
states: Default::default(),
accept_states: vec![]
};
assert_consumes_all!(
state_machine(),
b"machine foo;",
emptymachine
);
assert_consumes_all!(
state_machine(),
b"
machine foo;
state bar \"it's a bar thing\";
state baz;
bar -> baz;
",
StateMachine {
name: "foo".into(),
states: vec![
State {
id: StateId("bar".into()),
is_starting_state: true,
description: Some("it's a bar thing".into())
},
State {
id: StateId("baz".into()),
is_starting_state: false,
description: None
},
],
accept_states: vec![
AcceptState(StateId("bar".into()), StateId("baz".into()))
]
}
);
Ok(())
}
fn count_lines(byte_slice: &[u8]) -> usize |
#[test]
fn line_counter_works() {
let file_path_str = "assets/fsml/simple-state-machine.fsml";
let byte_vec: Vec<u8> = std::fs::read(file_path_str).unwrap();
let actual = count_lines(&byte_vec);
assert_eq!(12, actual);
}
#[test]
fn parse_state_machine_file() {
let file_path_str = "assets/fsml/simple-state-machine.fsml";
assert_parse_file(PathBuf::from_str(file_path_str).unwrap().as_path());
}
#[test]
fn parse_all_files() -> Result<()> {
let mut entries = fs::read_dir("assets/fsml")?
.map(|res| res.map(|e| e.path()))
//.filter(|f| )
.collect::<std::result::Result<Vec<_>, io::Error>>()?;
entries.sort();
for file_path_str in entries {
println!("");
println!("{}", file_path_str.to_str().unwrap());
println!("");
assert_parse_file(file_path_str.as_path());
}
Ok(())
}
fn assert_parse_file(file_path_str: &Path) {
let byte_vec: Vec<u8> = std::fs::read(file_path_str).unwrap();
let file_content =
String::from_utf8(byte_vec.clone()).expect("should be able to read the file");
let byte_slice: &[u8] = &byte_vec;
let parser = state_machine();
let parse_result = match parser.parse(byte_slice) {
Ok(parse_result) => parse_result,
Err(pom::Error::Mismatch { message, position }) => {
let start_str = &byte_vec[0..position];
let line = count_lines(start_str) + 1;
let end = min(position + 50, file_content.len() - 1);
let extract = &file_content[position..end];
let extract = extract
.to_string()
.replace("\n", "\\n")
.replace("\r", "\\r")
.replace("\t", "\\t");
let err_location = format!("{}:{}:{}", file_path_str.to_str().unwrap(), line, 1);
// thread 'idl_parser::test::parse_full_html5_file' panicked at 'whoops', src/idl_parser.rs:428:9
let better_message = format!(
"thread 'idl_parser::test::parse_full_html5_file' panicked at 'parsing', {}\n\n{}",
err_location, extract
);
println!("{}", better_message);
panic!(message)
}
Err(e) => panic!("{}", e),
};
println!("{:?}", parse_result);
}
}
| {
let line_parser = (to_eol() - eol()).repeat(0..);
let parse_result = line_parser.parse(byte_slice).unwrap();
parse_result.len()
} | identifier_body |
parser.rs | use crate::Result;
use pom::char_class::{alpha, alphanum, multispace};
use pom::parser::*;
use std::str::FromStr;
use std::collections::HashMap;
use std::fmt::{Display, Formatter};
#[derive(Debug, Clone, PartialEq)]
pub struct StateMachine {
pub name: String,
pub states: Vec<State>,
pub accept_states: Vec<AcceptState>
}
#[derive(Debug, Clone, PartialEq)]
pub struct AcceptState(StateId, StateId);
#[derive(Debug, Clone, PartialEq)]
pub struct StateId(String);
#[derive(Debug, Clone, PartialEq)]
pub struct State {
pub id: StateId,
pub is_starting_state: bool,
pub description: Option<String>
}
impl AcceptState {
pub fn source(&self) -> &StateId {
&self.0
}
pub fn target(&self) -> &StateId {
&self.1
}
}
impl Display for StateId {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.write_str(&self.0)
}
}
/// space, tab, etc
fn ws<'a>() -> Parser<'a, u8, ()> {
is_a(multispace).discard()
}
/// whitespace and comments
fn space<'a>() -> Parser<'a, u8, ()> {
(ws() | comment()).repeat(0..).discard()
}
fn semi<'a>() -> Parser<'a, u8, ()> {
keyword(b";").name("semi")
}
fn to_eol<'a>() -> Parser<'a, u8, String> {
fn anything_else(term: u8) -> bool {
!is_cr(term) && !is_lf(term)
}
is_a(anything_else)
.repeat(0..)
.map(|u8s| String::from_utf8(u8s).expect("can only parse utf"))
}
fn line_comment<'a>() -> Parser<'a, u8, ()> {
(seq(b"//") * to_eol() - eol())
.discard()
.name("line comment")
}
fn eol<'a>() -> Parser<'a, u8, ()> {
((is_a(is_cr) * is_a(is_lf)) | is_a(is_lf) | is_a(is_cr)).discard()
}
fn keyword<'a>(keyword: &'static [u8]) -> Parser<'a, u8, ()> {
literal(keyword).discard().name("keyword")
}
fn literal<'a>(literal: &'static [u8]) -> Parser<'a, u8, String> {
spaced(seq(literal))
.map(|u8s| String::from_utf8(u8s.to_vec()).expect("can only parse utf"))
.name("literal")
}
fn star_comment<'a>() -> Parser<'a, u8, ()> {
fn anything_else(term: u8) -> bool {
term != b'*'
}
(seq(b"/*") * is_a(anything_else).repeat(0..) - seq(b"*/")).discard()
}
fn comment<'a>() -> Parser<'a, u8, ()> {
line_comment() | star_comment()
}
/// a parser wrapped in whitespace
fn spaced<'a, T>(parser: Parser<'a, u8, T>) -> Parser<'a, u8, T>
where
T: 'a,
{
space() * parser - space()
}
fn is_cr(term: u8) -> bool {
term == b'\r'
}
fn is_lf(term: u8) -> bool {
term == b'\n'
}
fn is_underscore(term: u8) -> bool {
term == b'_'
}
fn state_id<'a>() -> Parser<'a, u8, StateId> {
(identifier())
.map(|(ident)| StateId(ident))
}
fn identifier<'a>() -> Parser<'a, u8, String> {
let it = ((is_a(alpha) | is_a(is_underscore))
+ (is_a(alphanum) | is_a(is_underscore)).repeat(0..))
.map(|(first, rest)| format!("{}{}", first as char, String::from_utf8(rest).unwrap()));
spaced(it).name("name")
}
fn string<'a>() -> Parser<'a, u8, String> {
let special_char = sym(b'\\')
| sym(b'/')
| sym(b'"')
| sym(b'b').map(|_| b'\x08')
| sym(b'f').map(|_| b'\x0C')
| sym(b'n').map(|_| b'\n')
| sym(b'r').map(|_| b'\r')
| sym(b't').map(|_| b'\t');
let escape_sequence = sym(b'\\') * special_char;
let string = sym(b'"') * (none_of(b"\\\"") | escape_sequence).repeat(0..) - sym(b'"');
string.convert(String::from_utf8)
}
fn state<'a>() -> Parser<'a, u8, State> {
let raw = keyword(b"state") * identifier() + string().opt()
- semi();
raw.map(move |(identifier, description)| State {
id: StateId(identifier),
is_starting_state: false,
description
})
}
fn state_list<'a>() -> Parser<'a, u8, Vec<State>> {
fn tag_starting_state(idx: usize, state: State) -> State {
State {
is_starting_state: idx == 0,
..state
}
};
state().repeat(0..).map(|states| states.into_iter().enumerate().map(|(idx, state)| tag_starting_state(idx, state)).collect())
}
fn accept_states_list<'a>() -> Parser<'a, u8, Vec<AcceptState>> {
accept_states_chain()
.repeat(0..)
.map(|chains| chains.into_iter().flatten().collect())
}
fn accept_states_chain<'a>() -> Parser<'a, u8, Vec<AcceptState>> {
let raw = spaced(list(spaced(state_id()), keyword(b"->"))) - semi();
raw.map(move |(state_ids)| {
if state_ids.len() < 2 {
return vec![];
}
let mut result = vec![];
for i in 0..state_ids.len() -1 {
let left = state_ids[i].clone();
let right = state_ids[i+1].clone();
let accept = AcceptState(left, right);
result.push(accept);
}
return result;
})
}
pub fn state_machine<'a>() -> Parser<'a, u8, StateMachine> {
let header = keyword(b"machine") * identifier() - semi();
let raw = header
+ state_list()
+ accept_states_list();
raw.map(move |((name, states), accept_states)| StateMachine {
name,
states,
accept_states
})
}
#[cfg(test)]
mod test {
use super::*;
use std::cmp::min;
use std::path::{Path, PathBuf};
use std::{fs, io};
macro_rules! assert_consumes_all {
( $ parser: expr, $input: expr ) => {
let terminating_parser = $parser - space() - end();
let res = terminating_parser.parse($input);
if let Err(_) = res {
panic!("parser failed to match and consume everything")
}
};
( $ parser: expr, $input: expr, $expected: expr) => {
let terminating_parser = $parser - space() - end();
let res = terminating_parser.parse($input);
match res {
Ok(answer) => {
// it parsed, but was it right?
assert_eq!(answer, $expected)
}
Err(_) => {
//
panic!("parser failed to match and consume everything")
}
}
};
}
#[test]
fn parse_keywords() -> Result<()> {
assert_consumes_all![eol(), b"\r"];
assert_consumes_all![eol(), b"\r\n"];
assert_consumes_all![eol(), b"\n"];
assert_consumes_all![space(), b""];
assert_consumes_all![space(), b" "];
assert_consumes_all![space(), b" \t \n \r "];
assert_consumes_all![line_comment(), b"//\r"];
assert_consumes_all![line_comment(), b"//\n"];
assert_consumes_all![line_comment(), b"//\r\n"];
assert_consumes_all![line_comment(), b"// xyz \r\n"];
assert_consumes_all![star_comment(), b"/* thing */"];
assert_consumes_all![star_comment(), b"/* thing \r\n thing */"];
assert_consumes_all!(
identifier(),
b"foo"
);
assert_consumes_all!(
state_id(),
b"foo"
);
assert_consumes_all!(
accept_states_chain(),
b"foo-> bar -> baz;",
vec![
AcceptState(StateId("foo".into()), StateId("bar".into())),
AcceptState(StateId("bar".into()), StateId("baz".into())),
]
);
assert_consumes_all!(
accept_states_list(),
b"foo-> bar -> baz; baz -> quux;",
vec![
AcceptState(StateId("foo".into()), StateId("bar".into())),
AcceptState(StateId("bar".into()), StateId("baz".into())),
AcceptState(StateId("baz".into()), StateId("quux".into())),
]
);
Ok(())
}
#[test]
fn parse_state_machines() -> Result<()> {
let emptymachine = StateMachine {
name: "foo".into(),
states: Default::default(),
accept_states: vec![]
};
assert_consumes_all!(
state_machine(),
b"machine foo;",
emptymachine
);
assert_consumes_all!(
state_machine(),
b"
machine foo;
state bar \"it's a bar thing\";
state baz;
bar -> baz;
",
StateMachine {
name: "foo".into(),
states: vec![
State {
id: StateId("bar".into()),
is_starting_state: true,
description: Some("it's a bar thing".into())
},
State {
id: StateId("baz".into()),
is_starting_state: false,
description: None
},
],
accept_states: vec![
AcceptState(StateId("bar".into()), StateId("baz".into()))
]
}
);
Ok(())
}
fn count_lines(byte_slice: &[u8]) -> usize {
let line_parser = (to_eol() - eol()).repeat(0..);
let parse_result = line_parser.parse(byte_slice).unwrap();
parse_result.len()
}
#[test]
fn line_counter_works() {
let file_path_str = "assets/fsml/simple-state-machine.fsml";
let byte_vec: Vec<u8> = std::fs::read(file_path_str).unwrap();
let actual = count_lines(&byte_vec);
assert_eq!(12, actual);
}
#[test]
fn parse_state_machine_file() {
let file_path_str = "assets/fsml/simple-state-machine.fsml";
assert_parse_file(PathBuf::from_str(file_path_str).unwrap().as_path());
}
#[test]
fn parse_all_files() -> Result<()> {
let mut entries = fs::read_dir("assets/fsml")?
.map(|res| res.map(|e| e.path()))
//.filter(|f| )
.collect::<std::result::Result<Vec<_>, io::Error>>()?;
entries.sort();
for file_path_str in entries {
println!("");
println!("{}", file_path_str.to_str().unwrap());
println!("");
assert_parse_file(file_path_str.as_path());
}
Ok(())
}
fn assert_parse_file(file_path_str: &Path) {
let byte_vec: Vec<u8> = std::fs::read(file_path_str).unwrap();
let file_content =
String::from_utf8(byte_vec.clone()).expect("should be able to read the file");
let byte_slice: &[u8] = &byte_vec;
let parser = state_machine();
let parse_result = match parser.parse(byte_slice) {
Ok(parse_result) => parse_result,
Err(pom::Error::Mismatch { message, position }) => |
Err(e) => panic!("{}", e),
};
println!("{:?}", parse_result);
}
}
| {
let start_str = &byte_vec[0..position];
let line = count_lines(start_str) + 1;
let end = min(position + 50, file_content.len() - 1);
let extract = &file_content[position..end];
let extract = extract
.to_string()
.replace("\n", "\\n")
.replace("\r", "\\r")
.replace("\t", "\\t");
let err_location = format!("{}:{}:{}", file_path_str.to_str().unwrap(), line, 1);
// thread 'idl_parser::test::parse_full_html5_file' panicked at 'whoops', src/idl_parser.rs:428:9
let better_message = format!(
"thread 'idl_parser::test::parse_full_html5_file' panicked at 'parsing', {}\n\n{}",
err_location, extract
);
println!("{}", better_message);
panic!(message)
} | conditional_block |
parser.rs | use crate::Result;
use pom::char_class::{alpha, alphanum, multispace};
use pom::parser::*;
use std::str::FromStr;
use std::collections::HashMap;
use std::fmt::{Display, Formatter};
#[derive(Debug, Clone, PartialEq)]
pub struct StateMachine {
pub name: String,
pub states: Vec<State>,
pub accept_states: Vec<AcceptState>
}
#[derive(Debug, Clone, PartialEq)]
pub struct AcceptState(StateId, StateId);
#[derive(Debug, Clone, PartialEq)]
pub struct StateId(String);
#[derive(Debug, Clone, PartialEq)]
pub struct State {
pub id: StateId,
pub is_starting_state: bool,
pub description: Option<String>
}
impl AcceptState {
pub fn source(&self) -> &StateId {
&self.0
}
pub fn target(&self) -> &StateId {
&self.1
}
}
impl Display for StateId {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.write_str(&self.0)
}
}
/// space, tab, etc
fn ws<'a>() -> Parser<'a, u8, ()> {
is_a(multispace).discard()
}
/// whitespace and comments
fn space<'a>() -> Parser<'a, u8, ()> {
(ws() | comment()).repeat(0..).discard()
}
fn semi<'a>() -> Parser<'a, u8, ()> {
keyword(b";").name("semi")
}
fn to_eol<'a>() -> Parser<'a, u8, String> {
fn anything_else(term: u8) -> bool {
!is_cr(term) && !is_lf(term)
}
is_a(anything_else)
.repeat(0..)
.map(|u8s| String::from_utf8(u8s).expect("can only parse utf"))
}
fn line_comment<'a>() -> Parser<'a, u8, ()> {
(seq(b"//") * to_eol() - eol())
.discard()
.name("line comment")
}
fn eol<'a>() -> Parser<'a, u8, ()> {
((is_a(is_cr) * is_a(is_lf)) | is_a(is_lf) | is_a(is_cr)).discard()
}
fn keyword<'a>(keyword: &'static [u8]) -> Parser<'a, u8, ()> {
literal(keyword).discard().name("keyword")
}
fn literal<'a>(literal: &'static [u8]) -> Parser<'a, u8, String> {
spaced(seq(literal))
.map(|u8s| String::from_utf8(u8s.to_vec()).expect("can only parse utf"))
.name("literal")
}
fn star_comment<'a>() -> Parser<'a, u8, ()> {
fn anything_else(term: u8) -> bool {
term != b'*'
}
(seq(b"/*") * is_a(anything_else).repeat(0..) - seq(b"*/")).discard()
}
fn comment<'a>() -> Parser<'a, u8, ()> {
line_comment() | star_comment()
}
/// a parser wrapped in whitespace
fn spaced<'a, T>(parser: Parser<'a, u8, T>) -> Parser<'a, u8, T>
where
T: 'a,
{
space() * parser - space()
}
fn is_cr(term: u8) -> bool {
term == b'\r'
}
fn is_lf(term: u8) -> bool {
term == b'\n'
}
fn is_underscore(term: u8) -> bool {
term == b'_'
}
fn state_id<'a>() -> Parser<'a, u8, StateId> {
(identifier())
.map(|(ident)| StateId(ident))
}
fn identifier<'a>() -> Parser<'a, u8, String> {
let it = ((is_a(alpha) | is_a(is_underscore))
+ (is_a(alphanum) | is_a(is_underscore)).repeat(0..))
.map(|(first, rest)| format!("{}{}", first as char, String::from_utf8(rest).unwrap()));
spaced(it).name("name")
}
fn string<'a>() -> Parser<'a, u8, String> {
let special_char = sym(b'\\')
| sym(b'/')
| sym(b'"')
| sym(b'b').map(|_| b'\x08')
| sym(b'f').map(|_| b'\x0C')
| sym(b'n').map(|_| b'\n')
| sym(b'r').map(|_| b'\r')
| sym(b't').map(|_| b'\t');
let escape_sequence = sym(b'\\') * special_char;
let string = sym(b'"') * (none_of(b"\\\"") | escape_sequence).repeat(0..) - sym(b'"');
string.convert(String::from_utf8)
}
fn state<'a>() -> Parser<'a, u8, State> {
let raw = keyword(b"state") * identifier() + string().opt()
- semi();
raw.map(move |(identifier, description)| State {
id: StateId(identifier),
is_starting_state: false,
description
})
}
fn state_list<'a>() -> Parser<'a, u8, Vec<State>> {
fn tag_starting_state(idx: usize, state: State) -> State {
State {
is_starting_state: idx == 0,
..state | fn accept_states_list<'a>() -> Parser<'a, u8, Vec<AcceptState>> {
accept_states_chain()
.repeat(0..)
.map(|chains| chains.into_iter().flatten().collect())
}
fn accept_states_chain<'a>() -> Parser<'a, u8, Vec<AcceptState>> {
let raw = spaced(list(spaced(state_id()), keyword(b"->"))) - semi();
raw.map(move |(state_ids)| {
if state_ids.len() < 2 {
return vec![];
}
let mut result = vec![];
for i in 0..state_ids.len() -1 {
let left = state_ids[i].clone();
let right = state_ids[i+1].clone();
let accept = AcceptState(left, right);
result.push(accept);
}
return result;
})
}
pub fn state_machine<'a>() -> Parser<'a, u8, StateMachine> {
let header = keyword(b"machine") * identifier() - semi();
let raw = header
+ state_list()
+ accept_states_list();
raw.map(move |((name, states), accept_states)| StateMachine {
name,
states,
accept_states
})
}
#[cfg(test)]
mod test {
use super::*;
use std::cmp::min;
use std::path::{Path, PathBuf};
use std::{fs, io};
macro_rules! assert_consumes_all {
( $ parser: expr, $input: expr ) => {
let terminating_parser = $parser - space() - end();
let res = terminating_parser.parse($input);
if let Err(_) = res {
panic!("parser failed to match and consume everything")
}
};
( $ parser: expr, $input: expr, $expected: expr) => {
let terminating_parser = $parser - space() - end();
let res = terminating_parser.parse($input);
match res {
Ok(answer) => {
// it parsed, but was it right?
assert_eq!(answer, $expected)
}
Err(_) => {
//
panic!("parser failed to match and consume everything")
}
}
};
}
#[test]
fn parse_keywords() -> Result<()> {
assert_consumes_all![eol(), b"\r"];
assert_consumes_all![eol(), b"\r\n"];
assert_consumes_all![eol(), b"\n"];
assert_consumes_all![space(), b""];
assert_consumes_all![space(), b" "];
assert_consumes_all![space(), b" \t \n \r "];
assert_consumes_all![line_comment(), b"//\r"];
assert_consumes_all![line_comment(), b"//\n"];
assert_consumes_all![line_comment(), b"//\r\n"];
assert_consumes_all![line_comment(), b"// xyz \r\n"];
assert_consumes_all![star_comment(), b"/* thing */"];
assert_consumes_all![star_comment(), b"/* thing \r\n thing */"];
assert_consumes_all!(
identifier(),
b"foo"
);
assert_consumes_all!(
state_id(),
b"foo"
);
assert_consumes_all!(
accept_states_chain(),
b"foo-> bar -> baz;",
vec![
AcceptState(StateId("foo".into()), StateId("bar".into())),
AcceptState(StateId("bar".into()), StateId("baz".into())),
]
);
assert_consumes_all!(
accept_states_list(),
b"foo-> bar -> baz; baz -> quux;",
vec![
AcceptState(StateId("foo".into()), StateId("bar".into())),
AcceptState(StateId("bar".into()), StateId("baz".into())),
AcceptState(StateId("baz".into()), StateId("quux".into())),
]
);
Ok(())
}
#[test]
fn parse_state_machines() -> Result<()> {
let emptymachine = StateMachine {
name: "foo".into(),
states: Default::default(),
accept_states: vec![]
};
assert_consumes_all!(
state_machine(),
b"machine foo;",
emptymachine
);
assert_consumes_all!(
state_machine(),
b"
machine foo;
state bar \"it's a bar thing\";
state baz;
bar -> baz;
",
StateMachine {
name: "foo".into(),
states: vec![
State {
id: StateId("bar".into()),
is_starting_state: true,
description: Some("it's a bar thing".into())
},
State {
id: StateId("baz".into()),
is_starting_state: false,
description: None
},
],
accept_states: vec![
AcceptState(StateId("bar".into()), StateId("baz".into()))
]
}
);
Ok(())
}
fn count_lines(byte_slice: &[u8]) -> usize {
let line_parser = (to_eol() - eol()).repeat(0..);
let parse_result = line_parser.parse(byte_slice).unwrap();
parse_result.len()
}
#[test]
fn line_counter_works() {
let file_path_str = "assets/fsml/simple-state-machine.fsml";
let byte_vec: Vec<u8> = std::fs::read(file_path_str).unwrap();
let actual = count_lines(&byte_vec);
assert_eq!(12, actual);
}
#[test]
fn parse_state_machine_file() {
let file_path_str = "assets/fsml/simple-state-machine.fsml";
assert_parse_file(PathBuf::from_str(file_path_str).unwrap().as_path());
}
#[test]
fn parse_all_files() -> Result<()> {
let mut entries = fs::read_dir("assets/fsml")?
.map(|res| res.map(|e| e.path()))
//.filter(|f| )
.collect::<std::result::Result<Vec<_>, io::Error>>()?;
entries.sort();
for file_path_str in entries {
println!("");
println!("{}", file_path_str.to_str().unwrap());
println!("");
assert_parse_file(file_path_str.as_path());
}
Ok(())
}
fn assert_parse_file(file_path_str: &Path) {
let byte_vec: Vec<u8> = std::fs::read(file_path_str).unwrap();
let file_content =
String::from_utf8(byte_vec.clone()).expect("should be able to read the file");
let byte_slice: &[u8] = &byte_vec;
let parser = state_machine();
let parse_result = match parser.parse(byte_slice) {
Ok(parse_result) => parse_result,
Err(pom::Error::Mismatch { message, position }) => {
let start_str = &byte_vec[0..position];
let line = count_lines(start_str) + 1;
let end = min(position + 50, file_content.len() - 1);
let extract = &file_content[position..end];
let extract = extract
.to_string()
.replace("\n", "\\n")
.replace("\r", "\\r")
.replace("\t", "\\t");
let err_location = format!("{}:{}:{}", file_path_str.to_str().unwrap(), line, 1);
// thread 'idl_parser::test::parse_full_html5_file' panicked at 'whoops', src/idl_parser.rs:428:9
let better_message = format!(
"thread 'idl_parser::test::parse_full_html5_file' panicked at 'parsing', {}\n\n{}",
err_location, extract
);
println!("{}", better_message);
panic!(message)
}
Err(e) => panic!("{}", e),
};
println!("{:?}", parse_result);
}
} | }
};
state().repeat(0..).map(|states| states.into_iter().enumerate().map(|(idx, state)| tag_starting_state(idx, state)).collect())
}
| random_line_split |
lib.rs | // (A,X,E,R,S)
//
// Lizzie Borden took an axe
// And gave her mother forty whacks.
// When she saw what she had done,
// She gave her father forty-one.
//
#![feature(struct_variant)]
#![allow(dead_code)]
#![allow(uppercase_variables)]
#![allow(unused_variable)]
#![allow(unused_imports)]
//#![allow(visible_private_types)]
use std::collections::hashmap::HashMap;
mod r0;
mod r1;
// scheme kinda source-language
#[deriving(Clone)]
pub enum CoreLanguage {
// <core> → <object>
// <core> → <variable>
// <core> → (quote <object>)
// <core> → (lambda (<variable> ... ) <core>)
// <core> → (if <core> <core> <core>)
// <core> → (set! <variable> <core>)
// <core> → (call/cc <core>)
// <core> → (<core> <core> ... )
Object(Obj),
Variable(String),
Quote(Core),
Lambda(Vec<String>, Core),
If(Core, Core, Core),
Set(String, Core),
CallCC(Core),
List(Vec<Core>)
}
pub type Core = Box<CoreLanguage>;
#[deriving(Clone)]
pub enum Obj {
ONil,
OBool(bool),
OInt(i32),
OFloat(f32),
OStr(String),
OClosure(Closure)
}
//(define compile
// (lambda (x next)
// (cond
// [(symbol? x)
// (list ’refer x next)]
// [(pair? x)
// (record-case x
// [quote (obj)
// (list ’constant obj next)]
// [lambda (vars body)
// (list ’close vars (compile body ’(return)) next)]
// [if (test then else)
// (let ([thenc (compile then next)]
// [elsec (compile else next)])
// (compile test (list ’test thenc elsec)))]
// [set! (var x)
// (compile x (list ’assign var next))]
// [call/cc (x)
// (let ([c (list ’conti
// (list ’argument
// (compile x ’(apply))))])
// (if (tail? next)
// c
// (list ’frame next c)))]
// [else
// (recur loop ([args (cdr x)]
// [c (compile (car x) ’(apply))])
// (if (null? args)
// (if (tail? next)
// c
// (list ’frame next c))
// (loop (cdr args)
// (compile (car args)
// (list ’argument c)))))])]
// [else
// (list ’constant x next)])))
pub fn compile(x: CoreLanguage, next: Code) -> Code {
match x {
Variable(str) => {
box REFER{var:str, k:next}
},
Quote(obj) => {
box CONSTANT{obj:ONil, k:next}
},
Lambda(vars, body) => {
box CLOSE{ vars:vars, body:compile(*body, box RETURN{unused:true}), k:next }
},
If(test, seq, alt) => {
let thenc = compile(*seq, next.clone());
let elsec = compile(*alt, next.clone());
compile(*test, box TEST{kthen:thenc, kelse:elsec})
},
Set(var, x) => {
compile(*x, box ASSIGN{var:var, k:next} )
},
CallCC(x) => {
let c = box CONTI{
k: box ARGUMENT{ k:compile(*x, box APPLY{unused:true}) }
};
if is_tail(&next) { c } else { box FRAME{k:next, ret:c} }
},
List(x) => {
let args = x.slice_from(1);
let mut c = compile((*x[0]).clone(), box APPLY{unused:true});
for arg in args.iter() {
c = compile((**arg).clone(), box ARGUMENT{k:c});
}
if is_tail(&next) { c } else { box FRAME{k:next, ret:c} }
}
_ =>
{ box CONSTANT{obj:ONil /*x*/, k:next} }
}
}
fn is_tail(x: &Code) -> bool {
match **x {
RETURN{..} => true,
_ => false
}
}
///////////////////////////////////////////////////////////////////////////////
// Opcode
// these are a dozen primitive instructions that implement scheme-like
// semantics. This is applicative-order lambda calculus with lexically-scoped
// environments: everything reduces to function calls where arguments are
// evaluated before application of function to arguments; variables are
// bound in their static (lexical) scope like Scheme, not in their dynamic
// (runtime) scope like earlier Lisps.
// Execution model is heap-based and there is support for call-with-current-continuation
// so exception semantics can be implemented easily in terms of call/cc.
#[deriving(Clone)]
pub enum Opcode {
HALT {unused:bool},
REFER {var: String, k: Code},
CONSTANT {obj: Obj, k: Code},
CLOSE {vars: Vec<String>, body: Code, k: Code},
TEST {kthen: Code, kelse: Code},
ASSIGN {var: String, k: Code},
CONTI {k: Code},
NUATE {s: Frame, var: String},
FRAME {k: Code, ret: Code},
ARGUMENT {k: Code},
APPLY {unused:bool},
INVOKE {method: String, k: Code},
RETURN {unused:bool},
}
pub type Code = Box<Opcode>;
/// Scope is a dynamic environment: a set of bindings, implemented
/// as a map from variable names (as Str, representing symbols)
/// to runtime value (Obj? if not typing; or AxonVal derivatives)
#[deriving(Clone)]
struct Scope {
parent: Option<Box<Scope>>, // link to enclosing scope
//local: HashMap<String, Obj>// local vars (conceptually includes fn params)
vars: Vec<String>,
vals: Vec<Obj>
}
impl Scope
{
fn new(parent:Option<Box<Scope>>) -> Scope {
Scope { parent:parent, vars:vec!(), vals:vec!() }
}
fn get(&self, var: &String) -> Option<Obj> {
let ix_opt = self.vars.iter().position(|v| { v == var });
match ix_opt {
Some(ix) => Some(self.vals[ix].clone()),
None => None
}
}
fn set(&mut self, var: &String, val: Obj) {
| let ix_opt = self.vars.iter().position(|v| { v == var });
match ix_opt {
Some(ix) => { *self.vals.get_mut(ix) = val },
None => self.vals.push(val)
};
}
fn extend(&self, vars: Vec<String>, vals: Vec<Obj>) -> Scope {
Scope{
parent: Some(box self.clone()),
vars: vars,
vals: vals
}
}
}
/// Frame is the dynamic (runtime) representation of a function
/// execution. It captures the caller-frame and return-address,
/// so the complete dynamic context can be traced by walking back
/// thru the caller links; a bindings context (which pushes and
/// pops local scopes for variable definitions); the AST for the
/// function's code; and the instruction-pointer which indicates
/// the current point of execution in the code.
#[deriving(Clone)]
struct Frame {
// *X* when this frame returns, exec. resumes from caller.code[ret] (ie. ret is index into code of caller)
ret: Code,
// *E* parms,locals
bindings: Scope,
// *R* accumulator of arg vals, to be combined w/ param names in extending env
valueRib: Vec<Obj>,
// *S* previous frame
caller: Option<Box<Frame>>,
//code: Code //belongs in Frame (there's a frame for every lambda definition)
}
impl Frame {
fn make(env:Scope, rib: Vec<Obj>, ret: Code, caller: Option<Box<Frame>>)
-> Frame
{
Frame { bindings:env, valueRib:rib, ret:ret, caller:caller }
}
}
/// closure captures the environment where it was created; when called,
/// it binds its params to actual-arg values (in left-to-right listed order)
/// and extends its environment with those bindings, and executes its
/// body with that extended environment.
#[deriving(Clone)]
pub struct Closure {
// names of parameters to be applied to closure
params: Vec<String>,
// static environment (lexical scope, captures scopes enclosing definition)
env: Scope,
// code implementing body of closure.
body: Code
}
impl Closure {
fn make(params: Vec<String>, env: Scope, body: Code) -> Closure {
Closure { params:params, env:env, body:body }
}
}
/// The VM below is fundamentally a state machine, of course, and
/// the five registers capture the entire current-state of that machine.
struct VMState
{
/////////////////////////////////////////////////////////////////////
// Machine Registers
// accumulator (most-recently-evaluated-expression value)
A: Obj,
// next instruction to be executed (source is compiled into a directed-graph of Opcode)
X: Code,
// current (lexical) environment (bindings map, context, ...)
E: Scope,
// value rib (accumulator for values of arguments to a fn application)
R: Vec<Obj>,
// control stack (ptr to top call frame; frames have link to prev frame)
S: Frame
}
impl VMState {
fn make(a:Obj, x:Code, e:Scope, r:Vec<Obj>, s:Frame) -> VMState {
VMState { A:a, X:x, E:e, R:r, S:s }
}
fn accumulator(&self) -> &Obj { &self.A }
fn program(&self) -> &Code { &self.X }
fn environment(&self) -> &Scope { &self.E }
fn arguments(&self) -> &Vec<Obj> { &self.R }
fn stackframe(&self) -> &Frame { &self.S }
}
///////////////////////////////////////////////////////////////////////////////
// axon machine: definition and implementation of virtual machine for
// scheme-like semantics
//
// let code be an in-memory graph (DAG) of instructions, where the entry-point
// to a sub-program is a single instruction.
// let instruction be a composition of:
// - opcode, an enumeration identifying its type
// - operands, compile-time constant arguments to the instruction
// - links, 0, 1, or 2 links to successor-instructions.
// note the single exception: the 'nuate instruction takes a Frame
// argument. This means that (as written), compiled code that
// includes call/cc won't be serializable, because the live control-stack
// frames aren't serializable. This only matters if we start thinking
// about serializing execution-in-process code and moving it to a
// different machine for resumption.
// ...
// ...
// A VM with 5 registers, 12 primitive instructions, and
// 3 basic data structures:
// - Frame captures a call-frame and maintains a dynamic control stack
// - Scope manages bindings of variables to values in lexically nested scopes
// - Closure binds parameters to actual args and executes code
struct Machine {
state: VMState
}
impl Machine
{
fn init(state: VMState) -> Machine { Machine { state:state } }
fn step(&mut self) -> Option<Obj> {
let (mut A,X,mut E,mut R,mut S) = (
self.state.A.clone(),
self.state.X.clone(),
self.state.E.clone(),
self.state.R.clone(),
self.state.S.clone()
);
let x = match *X {
// case HALT : return // and return A
HALT {..} => {
box HALT {unused:true}
},
// case REFER : I: REFER ; A = E[I.var]; X = I.next
REFER {var:ref var, k:ref k} => {
A = E.get(var).expect("yowza");
k.clone()
},
// case CONSTANT: I: CONSTANT; A = I.obj; X = I.next
CONSTANT {obj:ref obj, k:ref k} => {
A = obj.clone();
k.clone()
},
// case CLOSE : I: CLOSE ; A = Closure(I.vars, E, I.body); X = I.next
CLOSE {vars:ref vars, body:ref body, k:ref k} => {
let a = Closure { params:vars.clone(), env:E.clone(), body:body.clone() };
A = OClosure(a);
k.clone()
},
// case TEST : I: TEST ; X = (A == true) ? I.thenc : I.elsec
TEST {kthen:ref kthen, kelse:ref kelse} => {
let k = //if A == true { kthen } else { kelse };
match A {
OBool(true) => { kthen },
//OBool(false) => { kelse },
_ => { kelse }
};
k.clone()
},
// case ASSIGN : I: ASSIGN ; E[I.var] = A; X = I.next
ASSIGN {var:ref var, k:ref k} => {
E.set(var, A.clone());
k.clone()
},
// case CONTI : I: CONTI ; A = capture_cc(S); X = I.next
CONTI {k:ref k} => {
let a = Machine::capture_cc(&S);
A = OClosure(a);
k.clone()
},
// case NUATE : I: NUATE ; A = E[I.var]; X = RETURN;
NUATE {s:ref s, var:ref var} => {
A = E.get(var).expect("yup");
box RETURN {unused:true}
},
// case FRAME : I: FRAME ; S = Frame(E, R, I.ret, S); R = [,]; X = I.next
FRAME {k:ref k, ret:ref ret} => {
let s = Frame {
ret: ret.clone(),
bindings: E.clone(),
valueRib: R.clone(),
caller:Some(box S.clone())
};
S = s;
R = vec!();
k.clone()
},
// case ARGUMENT: I: ARGUMENT; R.add(A); X = I.next
ARGUMENT {k:ref k} => {
R.push(A.clone());
k.clone()
},
// case APPLY : I: APPLY ; closure := (AxonClosure) A
// vals := R
// vars := closure.params
// E = closure.env.extend(vars, vals)
// R = [,]
// X = closure.body
APPLY {..} => {
let closure = match A {
OClosure(ref clo) => { clo.clone() },
_ => fail!("yo! no clo")
};
let vals = R;
R = vec!();
let vars = closure.params.clone();
E = closure.env.extend(vars, vals);
closure.body
},
// case INVOKE : I: INVOKE ; obj := A
// // meth := obj.typeof.slot[I.method]
// args := (Obj?[]) R
// // A = meth.invoke(obj, args)
// R = [,]
// X = I.next
INVOKE {method:ref method, k:ref code} => {
let f = match A {
OClosure(ref clo) => { clo.clone() },
_ => fail!("no clo no mo")
};
let args = R;
R = vec!();
//TODO: A = (f)(args);
code.clone()
},
// case RETURN : I: RETURN ; X = S.ret; E = S.bindings; R = S.valueRib; S = S.caller
RETURN {..} => {
let x = S.ret;
E = S.bindings;
R = S.valueRib;
S = *S.caller.expect("DCM,ICU");
x
},
};
let retval = A.clone();
self.state = VMState {
A:A,
X:X,
E:E,
R:R,
S:S
};
//notifyObservers
Some(retval)
}
fn done(&self) -> bool {
match *self.state.X {
HALT {..} => { true },
_ => { false }
}
}
fn run(&mut self) -> Option<Obj> {
loop {
let retval = self.step();
if self.done() {
return retval;
}
}
}
/// a continuation is a closure that in addition has access to the frame
/// in which it was created (where call/cc was called).
/// the body of a continuation closure, when executed, restores the
/// saved frame (which includes its calling frames) (pg. 50)
///
/// a continuation generates a closure that captures
/// the current control stack; the body of the generated
/// closure is an instruction that will restore the
/// captured stack.
fn capture_cc(s: &Frame) -> Closure {
let v = "__V__";
let body = box NUATE{ s:s.clone(), var:v.to_string() };
let env = Scope::new(None);
let vars = vec!(v.to_string());
Closure { params:vars, env:env, body:body }
}
} | identifier_name |
|
lib.rs | // (A,X,E,R,S)
//
// Lizzie Borden took an axe
// And gave her mother forty whacks.
// When she saw what she had done,
// She gave her father forty-one.
//
#![feature(struct_variant)]
#![allow(dead_code)]
#![allow(uppercase_variables)]
#![allow(unused_variable)]
#![allow(unused_imports)]
//#![allow(visible_private_types)]
use std::collections::hashmap::HashMap;
mod r0;
mod r1;
// scheme kinda source-language
#[deriving(Clone)]
pub enum CoreLanguage {
// <core> → <object>
// <core> → <variable>
// <core> → (quote <object>)
// <core> → (lambda (<variable> ... ) <core>)
// <core> → (if <core> <core> <core>)
// <core> → (set! <variable> <core>)
// <core> → (call/cc <core>)
// <core> → (<core> <core> ... )
Object(Obj),
Variable(String),
Quote(Core),
Lambda(Vec<String>, Core),
If(Core, Core, Core),
Set(String, Core),
CallCC(Core),
List(Vec<Core>)
}
pub type Core = Box<CoreLanguage>;
#[deriving(Clone)]
pub enum Obj {
ONil,
OBool(bool),
OInt(i32),
OFloat(f32),
OStr(String),
OClosure(Closure)
}
//(define compile
// (lambda (x next)
// (cond
// [(symbol? x)
// (list ’refer x next)]
// [(pair? x)
// (record-case x
// [quote (obj)
// (list ’constant obj next)]
// [lambda (vars body)
// (list ’close vars (compile body ’(return)) next)]
// [if (test then else)
// (let ([thenc (compile then next)]
// [elsec (compile else next)])
// (compile test (list ’test thenc elsec)))]
// [set! (var x)
// (compile x (list ’assign var next))]
// [call/cc (x)
// (let ([c (list ’conti
// (list ’argument
// (compile x ’(apply))))])
// (if (tail? next)
// c
// (list ’frame next c)))]
// [else
// (recur loop ([args (cdr x)]
// [c (compile (car x) ’(apply))])
// (if (null? args)
// (if (tail? next)
// c
// (list ’frame next c))
// (loop (cdr args)
// (compile (car args)
// (list ’argument c)))))])]
// [else
// (list ’constant x next)])))
pub fn compile(x: CoreLanguage, next: Code) -> Code {
match x {
Variable(str) => {
box REFER{var:str, k:next}
},
Quote(obj) => {
box CONSTANT{obj:ONil, k:next}
},
Lambda(vars, body) => {
box CLOSE{ vars:vars, body:compile(*body, box RETURN{unused:true}), k:next }
},
If(test, seq, alt) => {
let thenc = compile(*seq, next.clone());
let elsec = compile(*alt, next.clone());
compile(*test, box TEST{kthen:thenc, kelse:elsec})
},
Set(var, x) => {
compile(*x, box ASSIGN{var:var, k:next} )
},
CallCC(x) => {
let c = box CONTI{
k: box ARGUMENT{ k:compile(*x, box APPLY{unused:true}) }
};
if is_tail(&next) { c } else { box FRAME{k:next, ret:c} }
},
List(x) => {
let args = x.slice_from(1);
let mut c = compile((*x[0]).clone(), box APPLY{unused:true});
for arg in args.iter() {
c = compile((**arg).clone(), box ARGUMENT{k:c});
}
if is_tail(&next) { c } else { box FRAME{k:next, ret:c} }
}
_ =>
{ box CONSTANT{obj:ONil /*x*/, k:next} }
}
}
fn is_tail(x: &Code) -> bool {
match **x {
RETURN{..} => true,
_ => false
}
}
///////////////////////////////////////////////////////////////////////////////
// Opcode
// these are a dozen primitive instructions that implement scheme-like
// semantics. This is applicative-order lambda calculus with lexically-scoped
// environments: everything reduces to function calls where arguments are
// evaluated before application of function to arguments; variables are
// bound in their static (lexical) scope like Scheme, not in their dynamic
// (runtime) scope like earlier Lisps.
// Execution model is heap-based and there is support for call-with-current-continuation
// so exception semantics can be implemented easily in terms of call/cc.
#[deriving(Clone)]
pub enum Opcode {
HALT {unused:bool},
REFER {var: String, k: Code},
CONSTANT {obj: Obj, k: Code},
CLOSE {vars: Vec<String>, body: Code, k: Code},
TEST {kthen: Code, kelse: Code},
ASSIGN {var: String, k: Code},
CONTI {k: Code},
NUATE {s: Frame, var: String},
FRAME {k: Code, ret: Code},
ARGUMENT {k: Code},
APPLY {unused:bool},
INVOKE {method: String, k: Code},
RETURN {unused:bool},
}
pub type Code = Box<Opcode>;
/// Scope is a dynamic environment: a set of bindings, implemented
/// as a map from variable names (as Str, representing symbols)
/// to runtime value (Obj? if not typing; or AxonVal derivatives)
#[deriving(Clone)]
struct Scope {
parent: Option<Box<Scope>>, // link to enclosing scope
//local: HashMap<String, Obj>// local vars (conceptually includes fn params)
vars: Vec<String>,
vals: Vec<Obj>
}
impl Scope
{
fn new(parent:Option<Box<Scope>>) -> Scope {
Scope { parent:parent, vars:vec!(), vals:vec!() }
}
fn get(&self, var: &String) -> Option<Obj> {
let ix_opt = self.vars.iter().position(|v| { v == var });
match ix_opt {
Some(ix) => Some(self.vals[ix].clone()),
None => None
}
}
fn set(&mut self, var: &String, val: Obj) {
let ix_opt = self.vars.iter().position(|v| { v == var });
match ix_opt {
Some(ix) => { *self.vals.get_mut(ix) = val },
None => self.vals.push(val)
};
}
fn extend(&self, vars: Vec<String>, vals: Vec<Obj>) -> Scope {
Scope{
parent: Some(box self.clone()),
vars: vars,
vals: vals
}
}
}
/// Frame is the dynamic (runtime) representation of a function
/// execution. It captures the caller-frame and return-address,
/// so the complete dynamic context can be traced by walking back
/// thru the caller links; a bindings context (which pushes and
/// pops local scopes for variable definitions); the AST for the
/// function's code; and the instruction-pointer which indicates
/// the current point of execution in the code.
#[deriving(Clone)]
struct Frame {
// *X* when this frame returns, exec. resumes from caller.code[ret] (ie. ret is index into code of caller)
ret: Code,
// *E* parms,locals
bindings: Scope,
// *R* accumulator of arg vals, to be combined w/ param names in extending env
valueRib: Vec<Obj>,
// *S* previous frame
caller: Option<Box<Frame>>,
//code: Code //belongs in Frame (there's a frame for every lambda definition)
}
impl Frame {
fn make(env:Scope, rib: Vec<Obj>, ret: Code, caller: Option<Box<Frame>>)
-> Frame
{
Frame { bindings:env, valueRib:rib, ret:ret, caller:caller }
}
}
/// closure captures the environment where it was created; when called,
/// it binds its params to actual-arg values (in left-to-right listed order)
/// and extends its environment with those bindings, and executes its
/// body with that extended environment.
#[deriving(Clone)]
pub struct Closure {
// names of parameters to be applied to closure
params: Vec<String>,
// static environment (lexical scope, captures scopes enclosing definition)
env: Scope,
// code implementing body of closure.
body: Code
}
impl Closure {
fn make(params: Vec<String>, env: Scope, body: Code) -> Closure {
Closure { params:params, env:env, body:body }
}
}
/// The VM below is fundamentally a state machine, of course, and
/// the five registers capture the entire current-state of that machine.
struct VMState
{
/////////////////////////////////////////////////////////////////////
// Machine Registers
// accumulator (most-recently-evaluated-expression value)
A: Obj,
// next instruction to be executed (source is compiled into a directed-graph of Opcode)
X: Code,
// current (lexical) environment (bindings map, context, ...)
E: Scope,
// value rib (accumulator for values of arguments to a fn application)
R: Vec<Obj>,
// control stack (ptr to top call frame; frames have link to prev frame)
S: Frame
}
impl VMState {
fn make(a:Obj, x:Code, e:Scope, r:Vec<Obj>, s:Frame) -> VMState {
VMState { A:a, X:x, E:e, R:r, S:s }
}
fn accumulator(&self) -> &Obj { &self.A }
fn program(&self) -> &Code { &self.X }
fn environment(&self) -> &Scope { &self.E }
fn arguments(&self) -> &Vec<Obj> { &self.R }
fn stackframe(&self) -> &Frame { &self.S }
}
///////////////////////////////////////////////////////////////////////////////
// axon machine: definition and implementation of virtual machine for
// scheme-like semantics
//
// let code be an in-memory graph (DAG) of instructions, where the entry-point
// to a sub-program is a single instruction.
// let instruction be a composition of:
// - opcode, an enumeration identifying its type
// - operands, compile-time constant arguments to the instruction
// - links, 0, 1, or 2 links to successor-instructions.
// note the single exception: the 'nuate instruction takes a Frame
// argument. This means that (as written), compiled code that
// includes call/cc won't be serializable, because the live control-stack
// frames aren't serializable. This only matters if we start thinking
// about serializing execution-in-process code and moving it to a
// different machine for resumption.
// ...
// ...
// A VM with 5 registers, 12 primitive instructions, and
// 3 basic data structures:
// - Frame captures a call-frame and maintains a dynamic control stack
// - Scope manages bindings of variables to values in lexically nested scopes
// - Closure binds parameters to actual args and executes code
struct Machine {
state: VMState
}
impl Machine
{
fn init(state: VMState) -> Machine { Machine { state:state } }
fn step(&mut self) -> Option<Obj> {
let (mut A,X,mut E,mut R,mut S) = (
self.state.A.clone(),
self.state.X.clone(),
self.state.E.clone(),
self.state.R.clone(),
self.state.S.clone()
);
let x = match *X {
// case HALT : return // and return A
HALT {..} => {
box HALT {unused:true}
},
// case REFER : I: REFER ; A = E[I.var]; X = I.next
REFER {var:ref var, k:ref k} => {
A = E.get(var).expect("yowza");
k.clone()
},
// case CONSTANT: I: CONSTANT; A = I.obj; X = I.next
CONSTANT {obj:ref obj, k:ref k} => {
A = obj.clone();
k.clone()
},
// | e(I.vars, E, I.body); X = I.next
CLOSE {vars:ref vars, body:ref body, k:ref k} => {
let a = Closure { params:vars.clone(), env:E.clone(), body:body.clone() };
A = OClosure(a);
k.clone()
},
// case TEST : I: TEST ; X = (A == true) ? I.thenc : I.elsec
TEST {kthen:ref kthen, kelse:ref kelse} => {
let k = //if A == true { kthen } else { kelse };
match A {
OBool(true) => { kthen },
//OBool(false) => { kelse },
_ => { kelse }
};
k.clone()
},
// case ASSIGN : I: ASSIGN ; E[I.var] = A; X = I.next
ASSIGN {var:ref var, k:ref k} => {
E.set(var, A.clone());
k.clone()
},
// case CONTI : I: CONTI ; A = capture_cc(S); X = I.next
CONTI {k:ref k} => {
let a = Machine::capture_cc(&S);
A = OClosure(a);
k.clone()
},
// case NUATE : I: NUATE ; A = E[I.var]; X = RETURN;
NUATE {s:ref s, var:ref var} => {
A = E.get(var).expect("yup");
box RETURN {unused:true}
},
// case FRAME : I: FRAME ; S = Frame(E, R, I.ret, S); R = [,]; X = I.next
FRAME {k:ref k, ret:ref ret} => {
let s = Frame {
ret: ret.clone(),
bindings: E.clone(),
valueRib: R.clone(),
caller:Some(box S.clone())
};
S = s;
R = vec!();
k.clone()
},
// case ARGUMENT: I: ARGUMENT; R.add(A); X = I.next
ARGUMENT {k:ref k} => {
R.push(A.clone());
k.clone()
},
// case APPLY : I: APPLY ; closure := (AxonClosure) A
// vals := R
// vars := closure.params
// E = closure.env.extend(vars, vals)
// R = [,]
// X = closure.body
APPLY {..} => {
let closure = match A {
OClosure(ref clo) => { clo.clone() },
_ => fail!("yo! no clo")
};
let vals = R;
R = vec!();
let vars = closure.params.clone();
E = closure.env.extend(vars, vals);
closure.body
},
// case INVOKE : I: INVOKE ; obj := A
// // meth := obj.typeof.slot[I.method]
// args := (Obj?[]) R
// // A = meth.invoke(obj, args)
// R = [,]
// X = I.next
INVOKE {method:ref method, k:ref code} => {
let f = match A {
OClosure(ref clo) => { clo.clone() },
_ => fail!("no clo no mo")
};
let args = R;
R = vec!();
//TODO: A = (f)(args);
code.clone()
},
// case RETURN : I: RETURN ; X = S.ret; E = S.bindings; R = S.valueRib; S = S.caller
RETURN {..} => {
let x = S.ret;
E = S.bindings;
R = S.valueRib;
S = *S.caller.expect("DCM,ICU");
x
},
};
let retval = A.clone();
self.state = VMState {
A:A,
X:X,
E:E,
R:R,
S:S
};
//notifyObservers
Some(retval)
}
fn done(&self) -> bool {
match *self.state.X {
HALT {..} => { true },
_ => { false }
}
}
fn run(&mut self) -> Option<Obj> {
loop {
let retval = self.step();
if self.done() {
return retval;
}
}
}
/// a continuation is a closure that in addition has access to the frame
/// in which it was created (where call/cc was called).
/// the body of a continuation closure, when executed, restores the
/// saved frame (which includes its calling frames) (pg. 50)
///
/// a continuation generates a closure that captures
/// the current control stack; the body of the generated
/// closure is an instruction that will restore the
/// captured stack.
fn capture_cc(s: &Frame) -> Closure {
let v = "__V__";
let body = box NUATE{ s:s.clone(), var:v.to_string() };
let env = Scope::new(None);
let vars = vec!(v.to_string());
Closure { params:vars, env:env, body:body }
}
} | case CLOSE : I: CLOSE ; A = Closur | conditional_block |
lib.rs | // (A,X,E,R,S)
//
// Lizzie Borden took an axe
// And gave her mother forty whacks.
// When she saw what she had done,
// She gave her father forty-one.
//
#![feature(struct_variant)]
#![allow(dead_code)]
#![allow(uppercase_variables)]
#![allow(unused_variable)]
#![allow(unused_imports)]
//#![allow(visible_private_types)]
use std::collections::hashmap::HashMap;
mod r0;
mod r1;
// scheme kinda source-language
#[deriving(Clone)]
pub enum CoreLanguage {
// <core> → <object>
// <core> → <variable>
// <core> → (quote <object>)
// <core> → (lambda (<variable> ... ) <core>)
// <core> → (if <core> <core> <core>)
// <core> → (set! <variable> <core>)
// <core> → (call/cc <core>)
// <core> → (<core> <core> ... )
Object(Obj),
Variable(String),
Quote(Core),
Lambda(Vec<String>, Core),
If(Core, Core, Core),
Set(String, Core),
CallCC(Core),
List(Vec<Core>)
}
pub type Core = Box<CoreLanguage>;
#[deriving(Clone)]
pub enum Obj {
ONil,
OBool(bool),
OInt(i32),
OFloat(f32),
OStr(String),
OClosure(Closure)
}
//(define compile
// (lambda (x next)
// (cond
// [(symbol? x)
// (list ’refer x next)]
// [(pair? x)
// (record-case x
// [quote (obj)
// (list ’constant obj next)]
// [lambda (vars body)
// (list ’close vars (compile body ’(return)) next)]
// [if (test then else)
// (let ([thenc (compile then next)]
// [elsec (compile else next)])
// (compile test (list ’test thenc elsec)))]
// [set! (var x)
// (compile x (list ’assign var next))]
// [call/cc (x)
// (let ([c (list ’conti
// (list ’argument
// (compile x ’(apply))))])
// (if (tail? next)
// c
// (list ’frame next c)))]
// [else
// (recur loop ([args (cdr x)]
// [c (compile (car x) ’(apply))])
// (if (null? args)
// (if (tail? next)
// c
// (list ’frame next c))
// (loop (cdr args)
// (compile (car args)
// (list ’argument c)))))])]
// [else
// (list ’constant x next)])))
pub fn compile(x: CoreLanguage, next: Code) -> Code {
match x {
Variable(str) => {
box REFER{var:str, k:next}
},
Quote(obj) => {
box CONSTANT{obj:ONil, k:next}
},
Lambda(vars, body) => {
box CLOSE{ vars:vars, body:compile(*body, box RETURN{unused:true}), k:next }
},
If(test, seq, alt) => {
let thenc = compile(*seq, next.clone());
let elsec = compile(*alt, next.clone());
compile(*test, box TEST{kthen:thenc, kelse:elsec})
},
Set(var, x) => {
compile(*x, box ASSIGN{var:var, k:next} )
},
CallCC(x) => {
let c = box CONTI{
k: box ARGUMENT{ k:compile(*x, box APPLY{unused:true}) }
};
if is_tail(&next) { c } else { box FRAME{k:next, ret:c} }
},
List(x) => {
let args = x.slice_from(1);
let mut c = compile((*x[0]).clone(), box APPLY{unused:true});
for arg in args.iter() {
c = compile((**arg).clone(), box ARGUMENT{k:c});
}
if is_tail(&next) { c } else { box FRAME{k:next, ret:c} }
}
_ =>
{ box CONSTANT{obj:ONil /*x*/, k:next} }
}
}
fn is_tail(x: &Code) -> bool {
match **x {
RETURN{..} => true,
_ => false
}
}
///////////////////////////////////////////////////////////////////////////////
// Opcode
// these are a dozen primitive instructions that implement scheme-like
// semantics. This is applicative-order lambda calculus with lexically-scoped
// environments: everything reduces to function calls where arguments are
// evaluated before application of function to arguments; variables are
// bound in their static (lexical) scope like Scheme, not in their dynamic
// (runtime) scope like earlier Lisps.
// Execution model is heap-based and there is support for call-with-current-continuation
// so exception semantics can be implemented easily in terms of call/cc.
#[deriving(Clone)]
pub enum Opcode {
HALT {unused:bool},
REFER {var: String, k: Code},
CONSTANT {obj: Obj, k: Code},
CLOSE {vars: Vec<String>, body: Code, k: Code},
TEST {kthen: Code, kelse: Code},
ASSIGN {var: String, k: Code},
CONTI {k: Code},
NUATE {s: Frame, var: String},
FRAME {k: Code, ret: Code},
ARGUMENT {k: Code},
APPLY {unused:bool},
INVOKE {method: String, k: Code},
RETURN {unused:bool},
}
pub type Code = Box<Opcode>;
/// Scope is a dynamic environment: a set of bindings, implemented
/// as a map from variable names (as Str, representing symbols)
/// to runtime value (Obj? if not typing; or AxonVal derivatives)
#[deriving(Clone)]
struct Scope {
parent: Option<Box<Scope>>, // link to enclosing scope
//local: HashMap<String, Obj>// local vars (conceptually includes fn params)
vars: Vec<String>,
vals: Vec<Obj>
}
impl Scope
{
fn new(parent:Option<Box<Scope>>) -> Scope {
Scope { parent:parent, vars:vec!(), vals:vec!() }
}
fn get(&self, var: &String) -> Option<Obj> {
let ix_opt = self.vars.iter().position(|v| { v == var });
match ix_opt {
Some(ix) => Some(self.vals[ix].clone()),
None => None
}
}
fn set(&mut self, var: &String, val: Obj) {
let ix_opt = self.vars.iter().position(|v| { v == var });
match ix_opt {
Some(ix) => { *self.vals.get_mut(ix) = val },
None => self.vals.push(val)
};
}
fn extend(&self, vars: Vec<String>, vals: Vec<Obj>) -> Scope {
Scope{
parent: Some(box self.clone()),
vars: vars,
vals: vals
}
}
}
/// Frame is the dynamic (runtime) representation of a function
/// execution. It captures the caller-frame and return-address,
/// so the complete dynamic context can be traced by walking back
/// thru the caller links; a bindings context (which pushes and
/// pops local scopes for variable definitions); the AST for the
/// function's code; and the instruction-pointer which indicates
/// the current point of execution in the code.
#[deriving(Clone)]
struct Frame {
// *X* when this frame returns, exec. resumes from caller.code[ret] (ie. ret is index into code of caller)
ret: Code,
// *E* parms,locals
bindings: Scope,
// *R* accumulator of arg vals, to be combined w/ param names in extending env
valueRib: Vec<Obj>,
// *S* previous frame
caller: Option<Box<Frame>>,
//code: Code //belongs in Frame (there's a frame for every lambda definition)
}
impl Frame {
fn make(env:Scope, rib: Vec<Obj>, ret: Code, caller: Option<Box<Frame>>)
-> Frame
{
Frame { bindings:env, valueRib:rib, ret:ret, caller:caller }
}
}
/// closure captures the environment where it was created; when called,
/// it binds its params to actual-arg values (in left-to-right listed order)
/// and extends its environment with those bindings, and executes its
/// body with that extended environment.
#[deriving(Clone)]
pub struct Closure {
// names of parameters to be applied to closure
params: Vec<String>,
// static environment (lexical scope, captures scopes enclosing definition)
env: Scope,
// code implementing body of closure.
body: Code
}
impl Closure {
fn make(params: Vec<String>, env: Scope, body: Code) -> Closure {
Closure { params:params, env:env, body:body }
}
}
/// The VM below is fundamentally a state machine, of course, and
/// the five registers capture the entire current-state of that machine.
struct VMState
{
/////////////////////////////////////////////////////////////////////
// Machine Registers
// accumulator (most-recently-evaluated-expression value)
A: Obj,
// next instruction to be executed (source is compiled into a directed-graph of Opcode)
X: Code,
// current (lexical) environment (bindings map, context, ...)
E: Scope,
// value rib (accumulator for values of arguments to a fn application)
R: Vec<Obj>, | S: Frame
}
impl VMState {
fn make(a:Obj, x:Code, e:Scope, r:Vec<Obj>, s:Frame) -> VMState {
VMState { A:a, X:x, E:e, R:r, S:s }
}
fn accumulator(&self) -> &Obj { &self.A }
fn program(&self) -> &Code { &self.X }
fn environment(&self) -> &Scope { &self.E }
fn arguments(&self) -> &Vec<Obj> { &self.R }
fn stackframe(&self) -> &Frame { &self.S }
}
///////////////////////////////////////////////////////////////////////////////
// axon machine: definition and implementation of virtual machine for
// scheme-like semantics
//
// let code be an in-memory graph (DAG) of instructions, where the entry-point
// to a sub-program is a single instruction.
// let instruction be a composition of:
// - opcode, an enumeration identifying its type
// - operands, compile-time constant arguments to the instruction
// - links, 0, 1, or 2 links to successor-instructions.
// note the single exception: the 'nuate instruction takes a Frame
// argument. This means that (as written), compiled code that
// includes call/cc won't be serializable, because the live control-stack
// frames aren't serializable. This only matters if we start thinking
// about serializing execution-in-process code and moving it to a
// different machine for resumption.
// ...
// ...
// A VM with 5 registers, 12 primitive instructions, and
// 3 basic data structures:
// - Frame captures a call-frame and maintains a dynamic control stack
// - Scope manages bindings of variables to values in lexically nested scopes
// - Closure binds parameters to actual args and executes code
struct Machine {
state: VMState
}
impl Machine
{
fn init(state: VMState) -> Machine { Machine { state:state } }
fn step(&mut self) -> Option<Obj> {
let (mut A,X,mut E,mut R,mut S) = (
self.state.A.clone(),
self.state.X.clone(),
self.state.E.clone(),
self.state.R.clone(),
self.state.S.clone()
);
let x = match *X {
// case HALT : return // and return A
HALT {..} => {
box HALT {unused:true}
},
// case REFER : I: REFER ; A = E[I.var]; X = I.next
REFER {var:ref var, k:ref k} => {
A = E.get(var).expect("yowza");
k.clone()
},
// case CONSTANT: I: CONSTANT; A = I.obj; X = I.next
CONSTANT {obj:ref obj, k:ref k} => {
A = obj.clone();
k.clone()
},
// case CLOSE : I: CLOSE ; A = Closure(I.vars, E, I.body); X = I.next
CLOSE {vars:ref vars, body:ref body, k:ref k} => {
let a = Closure { params:vars.clone(), env:E.clone(), body:body.clone() };
A = OClosure(a);
k.clone()
},
// case TEST : I: TEST ; X = (A == true) ? I.thenc : I.elsec
TEST {kthen:ref kthen, kelse:ref kelse} => {
let k = //if A == true { kthen } else { kelse };
match A {
OBool(true) => { kthen },
//OBool(false) => { kelse },
_ => { kelse }
};
k.clone()
},
// case ASSIGN : I: ASSIGN ; E[I.var] = A; X = I.next
ASSIGN {var:ref var, k:ref k} => {
E.set(var, A.clone());
k.clone()
},
// case CONTI : I: CONTI ; A = capture_cc(S); X = I.next
CONTI {k:ref k} => {
let a = Machine::capture_cc(&S);
A = OClosure(a);
k.clone()
},
// case NUATE : I: NUATE ; A = E[I.var]; X = RETURN;
NUATE {s:ref s, var:ref var} => {
A = E.get(var).expect("yup");
box RETURN {unused:true}
},
// case FRAME : I: FRAME ; S = Frame(E, R, I.ret, S); R = [,]; X = I.next
FRAME {k:ref k, ret:ref ret} => {
let s = Frame {
ret: ret.clone(),
bindings: E.clone(),
valueRib: R.clone(),
caller:Some(box S.clone())
};
S = s;
R = vec!();
k.clone()
},
// case ARGUMENT: I: ARGUMENT; R.add(A); X = I.next
ARGUMENT {k:ref k} => {
R.push(A.clone());
k.clone()
},
// case APPLY : I: APPLY ; closure := (AxonClosure) A
// vals := R
// vars := closure.params
// E = closure.env.extend(vars, vals)
// R = [,]
// X = closure.body
APPLY {..} => {
let closure = match A {
OClosure(ref clo) => { clo.clone() },
_ => fail!("yo! no clo")
};
let vals = R;
R = vec!();
let vars = closure.params.clone();
E = closure.env.extend(vars, vals);
closure.body
},
// case INVOKE : I: INVOKE ; obj := A
// // meth := obj.typeof.slot[I.method]
// args := (Obj?[]) R
// // A = meth.invoke(obj, args)
// R = [,]
// X = I.next
INVOKE {method:ref method, k:ref code} => {
let f = match A {
OClosure(ref clo) => { clo.clone() },
_ => fail!("no clo no mo")
};
let args = R;
R = vec!();
//TODO: A = (f)(args);
code.clone()
},
// case RETURN : I: RETURN ; X = S.ret; E = S.bindings; R = S.valueRib; S = S.caller
RETURN {..} => {
let x = S.ret;
E = S.bindings;
R = S.valueRib;
S = *S.caller.expect("DCM,ICU");
x
},
};
let retval = A.clone();
self.state = VMState {
A:A,
X:X,
E:E,
R:R,
S:S
};
//notifyObservers
Some(retval)
}
fn done(&self) -> bool {
match *self.state.X {
HALT {..} => { true },
_ => { false }
}
}
fn run(&mut self) -> Option<Obj> {
loop {
let retval = self.step();
if self.done() {
return retval;
}
}
}
/// a continuation is a closure that in addition has access to the frame
/// in which it was created (where call/cc was called).
/// the body of a continuation closure, when executed, restores the
/// saved frame (which includes its calling frames) (pg. 50)
///
/// a continuation generates a closure that captures
/// the current control stack; the body of the generated
/// closure is an instruction that will restore the
/// captured stack.
fn capture_cc(s: &Frame) -> Closure {
let v = "__V__";
let body = box NUATE{ s:s.clone(), var:v.to_string() };
let env = Scope::new(None);
let vars = vec!(v.to_string());
Closure { params:vars, env:env, body:body }
}
} |
// control stack (ptr to top call frame; frames have link to prev frame) | random_line_split |
nvg.rs | //! NanoVG is small antialiased vector graphics rendering library with a lean
//! API modeled after the HTML5 Canvas API. It can be used to draw gauge
//! instruments in MSFS. See `Gauge::create_nanovg`.
use crate::sys;
type Result = std::result::Result<(), Box<dyn std::error::Error>>;
/// A NanoVG render context.
pub struct Context {
ctx: *mut sys::NVGcontext,
}
impl Context {
/// Create a NanoVG render context from an `FsContext`.
pub fn create(fs_ctx: sys::FsContext) -> Option<Self> {
let uninit = std::mem::MaybeUninit::<sys::NVGparams>::zeroed();
let mut params = unsafe { uninit.assume_init() };
params.userPtr = fs_ctx;
params.edgeAntiAlias = 1;
let ctx = unsafe { sys::nvgCreateInternal(&mut params) };
if ctx.is_null() {
None
} else {
Some(Self { ctx })
}
}
/// Draw a frame.
pub fn draw_frame<F: Fn(&Frame) -> Result>(&self, width: usize, height: usize, f: F) {
unsafe {
sys::nvgBeginFrame(self.ctx, width as f32, height as f32, 1.0);
}
let frame = Frame { ctx: self.ctx };
match f(&frame) {
Ok(()) => unsafe {
sys::nvgEndFrame(self.ctx);
},
Err(_) => unsafe {
sys::nvgCancelFrame(self.ctx);
},
}
}
/// NanoVG allows you to load .ttf files and use the font to render text.
///
/// The appearance of the text can be defined by setting the current text style
/// and by specifying the fill color. Common text and font settings such as
/// font size, letter spacing and text align are supported. Font blur allows you
/// to create simple text effects such as drop shadows.
///
/// At render time the font face can be set based on the font handles or name.
///
/// Font measure functions return values in local space, the calculations are
/// carried in the same resolution as the final rendering. This is done because
/// the text glyph positions are snapped to the nearest pixels sharp rendering.
///
/// The local space means that values are not rotated or scale as per the current
/// transformation. For example if you set font size to 12, which would mean that
/// line height is 16, then regardless of the current scaling and rotation, the
/// returned line height is always 16. Some measures may vary because of the scaling
/// since aforementioned pixel snapping.
///
/// While this may sound a little odd, the setup allows you to always render the
/// same way regardless of scaling.
///
/// Note: currently only solid color fill is supported for text.
pub fn create_font(
&self,
name: &str,
filename: &str,
) -> std::result::Result<Font, Box<dyn std::error::Error>> {
let name = std::ffi::CString::new(name).unwrap();
let filename = std::ffi::CString::new(filename).unwrap();
let handle = unsafe { sys::nvgCreateFont(self.ctx, name.as_ptr(), filename.as_ptr()) };
match handle {
-1 => Err(Box::new(std::io::Error::new(
std::io::ErrorKind::Other,
"unable to load font",
))),
_ => Ok(Font { handle }),
}
}
/// NanoVG allows you to load jpg, png, psd, tga, pic and gif files to be used for rendering.
/// In addition you can upload your own image. The image loading is provided by stb_image.
pub fn create_image(
&self,
filename: &str,
) -> std::result::Result<Image, Box<dyn std::error::Error>> {
let filename = std::ffi::CString::new(filename).unwrap();
let handle = unsafe { sys::nvgCreateImage(self.ctx, filename.as_ptr(), 0) };
match handle {
-1 => Err(Box::new(std::io::Error::new(
std::io::ErrorKind::Other,
"unable to load image",
))),
_ => Ok(Image {
ctx: self.ctx,
handle,
}),
}
}
}
impl Drop for Context {
fn drop(&mut self) {
unsafe {
sys::nvgDeleteInternal(self.ctx);
}
}
}
/// Methods to draw on a frame. See `Context::draw_frame`.
pub struct Frame {
ctx: *mut sys::NVGcontext,
}
impl Frame {
/// Draw a path.
pub fn draw_path<F: Fn(&Path) -> Result>(&self, style: &Style, f: F) -> Result {
unsafe {
// sys::nvgSave(self.ctx);
// sys::nvgReset(self.ctx);
sys::nvgBeginPath(self.ctx);
}
if let Some(stroke) = &style.stroke {
match stroke {
PaintOrColor::Paint(p) => unsafe {
sys::nvgStrokePaint(self.ctx, &p.0);
},
PaintOrColor::Color(c) => unsafe {
sys::nvgStrokeColor(self.ctx, &c.0);
},
}
}
if let Some(fill) = &style.fill {
match fill {
PaintOrColor::Paint(p) => unsafe {
sys::nvgFillPaint(self.ctx, &p.0);
},
PaintOrColor::Color(c) => unsafe {
sys::nvgFillColor(self.ctx, &c.0);
},
}
}
let path = Path { ctx: self.ctx };
let r = f(&path);
if style.stroke.is_some() {
unsafe {
sys::nvgStroke(self.ctx);
}
}
if style.fill.is_some() {
unsafe {
sys::nvgFill(self.ctx);
}
}
/*
unsafe {
sys::nvgRestore(self.ctx);
}
*/
r
}
}
/// A path.
pub struct Path {
ctx: *mut sys::NVGcontext,
}
impl Path {
/// Starts new sub-path with specified point as first point.
pub fn move_to(&self, x: f32, y: f32) {
unsafe {
sys::nvgMoveTo(self.ctx, x, y);
}
}
/// Adds line segment from the last point in the path to the specified point.
pub fn line_to(&self, x: f32, y: f32) {
unsafe {
sys::nvgLineTo(self.ctx, x, y);
}
}
/// Adds cubic bezier segment from last point in the path via two control points to the specified point.
pub fn bezier_to(&self, c1x: f32, c1y: f32, c2x: f32, c2y: f32, x: f32, y: f32) {
unsafe {
sys::nvgBezierTo(self.ctx, c1x, c1y, c2x, c2y, x, y);
}
}
/// Adds quadratic bezier segment from last point in the path via a control point to the
/// specified point.
pub fn quad_to(&self, cx: f32, cy: f32, x: f32, y: f32) {
unsafe {
sys::nvgQuadTo(self.ctx, cx, cy, x, y);
}
}
/// Adds an arc segment at the corner defined by the last path point, and two specified points.
pub fn arc_to(&self, x1: f32, y1: f32, x2: f32, y2: f32, radius: f32) {
unsafe {
sys::nvgArcTo(self.ctx, x1, y1, x2, y2, radius);
}
}
/// Closes current sub-path with a line segment.
pub fn close_path(&self) {
unsafe {
sys::nvgClosePath(self.ctx);
}
}
/// Creates a new circle arc shaped sub-path. The arc center is at (`cx`,`cy`), the arc radius
/// is `r`, and the arc is drawn from angle `a0` to `a1`, and swept in direction `dir`.
/// Angles are in radians.
pub fn arc(&self, cx: f32, cy: f32, r: f32, a0: f32, a1: f32, dir: Direction) {
unsafe {
sys::nvgArc(self.ctx, cx, cy, r, a0, a1, dir.to_sys() as _);
}
}
/// Creates a new oval arc shaped sub-path. The arc center is at (`cx`, `cy`), the arc radius
/// is (`rx`, `ry`), and the arc is draw from angle a0 to a1, and swept in direction `dir`.
#[allow(clippy::too_many_arguments)]
pub fn elliptical_arc(
&self,
cx: f32,
cy: f32,
rx: f32,
ry: f32,
a0: f32,
a1: f32,
dir: Direction,
) {
unsafe {
sys::nvgEllipticalArc(self.ctx, cx, cy, rx, ry, a0, a1, dir.to_sys() as _);
}
}
/// Creates new rectangle shaped sub-path.
pub fn rect(&self, x: f32, y: f32, w: f32, h: f32) {
unsafe {
sys::nvgRect(self.ctx, x, y, w, h);
}
}
/// Creates a new rounded rectangle sub-path with rounded corners
#[allow(clippy::many_single_char_names)]
pub fn rounded_rect(&self, x: f32, y: f32, w: f32, h: f32, r: f32) {
unsafe {
sys::nvgRoundedRect(self.ctx, x, y, w, h, r);
}
}
/// Creates new rounded rectangle shaped sub-path with varying radii for each corner.
#[allow(clippy::too_many_arguments)]
#[allow(clippy::many_single_char_names)]
pub fn rounded_rect_varying(
&self,
x: f32,
y: f32,
w: f32,
h: f32,
rad_top_left: f32,
rad_top_right: f32,
rad_bottom_right: f32,
rad_bottom_left: f32,
) {
unsafe {
sys::nvgRoundedRectVarying(
self.ctx,
x,
y,
w,
h,
rad_top_left,
rad_top_right,
rad_bottom_right,
rad_bottom_left,
);
}
}
/// Creates a new ellipse shaped sub-path.
pub fn ellipse(&self, cx: f32, cy: f32, rx: f32, ry: f32) {
unsafe {
sys::nvgEllipse(self.ctx, cx, cy, rx, ry);
}
}
/// Creates a new circle shaped path.
pub fn circle(&self, cx: f32, cy: f32, r: f32) {
unsafe {
sys::nvgCircle(self.ctx, cx, cy, r);
}
}
// TODO: fill
}
/// Winding direction
#[derive(Debug, Clone, Copy)]
pub enum Direction {
/// Winding for holes.
Clockwise,
/// Winding for solid shapes.
CounterClockwise,
}
impl Direction {
fn | (self) -> sys::NVGwinding {
match self {
Direction::Clockwise => sys::NVGwinding_NVG_CW,
Direction::CounterClockwise => sys::NVGwinding_NVG_CCW,
}
}
}
#[derive(Debug)]
#[doc(hidden)]
pub enum PaintOrColor {
Paint(Paint),
Color(Color),
}
impl From<Paint> for PaintOrColor {
fn from(p: Paint) -> PaintOrColor {
PaintOrColor::Paint(p)
}
}
impl From<Color> for PaintOrColor {
fn from(c: Color) -> PaintOrColor {
PaintOrColor::Color(c)
}
}
/// The stroke and/or fill which will be applied to a path.
#[derive(Debug, Default)]
pub struct Style {
stroke: Option<PaintOrColor>,
fill: Option<PaintOrColor>,
}
impl Style {
/// Set the stroke of this style.
pub fn stroke<T: Into<PaintOrColor>>(mut self, stroke: T) -> Self {
self.stroke = Some(stroke.into());
self
}
/// Set the fill of this style.
pub fn fill<T: Into<PaintOrColor>>(mut self, fill: T) -> Self {
self.fill = Some(fill.into());
self
}
}
/// Colors in NanoVG are stored as unsigned ints in ABGR format.
#[derive(Debug)]
pub struct Color(sys::NVGcolor);
impl Color {
/// Returns a color value from red, green, blue values. Alpha will be set to 255 (1.0).
pub fn from_rgb(r: u8, g: u8, b: u8) -> Self {
Self(unsafe { sys::nvgRGB(r, g, b) })
}
/// Returns a color value from red, green, blue values. Alpha will be set to 1.0f.
pub fn from_rgbf(r: f32, g: f32, b: f32) -> Self {
Self(unsafe { sys::nvgRGBf(r, g, b) })
}
/// Returns a color value from red, green, blue and alpha values.
pub fn from_rgba(r: u8, g: u8, b: u8, a: u8) -> Self {
Self(unsafe { sys::nvgRGBA(r, g, b, a) })
}
/// Returns a color value from red, green, blue values. Alpha will be set to 1.0f.
pub fn from_rgbaf(r: f32, g: f32, b: f32, a: f32) -> Self {
Self(unsafe { sys::nvgRGBAf(r, g, b, a) })
}
/// Returns color value specified by hue, saturation and lightness.
/// HSL values are all in range [0..1], alpha will be set to 255.
pub fn from_hsv(h: f32, s: f32, l: f32) -> Self {
Self(unsafe { sys::nvgHSL(h, s, l) })
}
/// Returns color value specified by hue, saturation and lightness.
/// HSL values are all in range [0..1], alpha will be set to 255.
pub fn from_hsva(h: f32, s: f32, l: f32, a: u8) -> Self {
Self(unsafe { sys::nvgHSLA(h, s, l, a) })
}
}
/// NanoVG supports four types of paints: linear gradient, box gradient, radial gradient and image pattern.
/// These can be used as paints for strokes and fills.
#[derive(Debug)]
pub struct Paint(sys::NVGpaint);
impl Paint {
/// Creates and returns an image pattern. Parameters (`x`, `y`) specify the left-top location of the image pattern,
/// (`w`, `h`) is the size of the image, `angle` is the rotation around the top-left corner, and `image` is the image
/// to render.
pub fn from_image(
image: &Image,
x: f32,
y: f32,
w: f32,
h: f32,
angle: f32,
alpha: f32,
) -> Paint {
Paint(unsafe { sys::nvgImagePattern(image.ctx, x, y, w, h, angle, image.handle, alpha) })
}
}
/// A font handle.
pub struct Font {
handle: std::os::raw::c_int,
}
/// An image handle.
pub struct Image {
ctx: *mut sys::NVGcontext,
handle: std::os::raw::c_int,
}
impl Image {
/// Returns the dimensions of a created image.
pub fn size(&self) -> (usize, usize) {
let mut w = 0;
let mut h = 0;
unsafe {
sys::nvgImageSize(self.ctx, self.handle, &mut w, &mut h);
}
(w as usize, h as usize)
}
}
impl Drop for Image {
fn drop(&mut self) {
unsafe {
sys::nvgDeleteImage(self.ctx, self.handle);
}
}
}
| to_sys | identifier_name |
nvg.rs | //! NanoVG is small antialiased vector graphics rendering library with a lean
//! API modeled after the HTML5 Canvas API. It can be used to draw gauge
//! instruments in MSFS. See `Gauge::create_nanovg`.
use crate::sys;
type Result = std::result::Result<(), Box<dyn std::error::Error>>;
/// A NanoVG render context.
pub struct Context {
ctx: *mut sys::NVGcontext,
}
impl Context {
/// Create a NanoVG render context from an `FsContext`.
pub fn create(fs_ctx: sys::FsContext) -> Option<Self> {
let uninit = std::mem::MaybeUninit::<sys::NVGparams>::zeroed();
let mut params = unsafe { uninit.assume_init() };
params.userPtr = fs_ctx;
params.edgeAntiAlias = 1;
let ctx = unsafe { sys::nvgCreateInternal(&mut params) };
if ctx.is_null() {
None
} else {
Some(Self { ctx })
}
}
/// Draw a frame.
pub fn draw_frame<F: Fn(&Frame) -> Result>(&self, width: usize, height: usize, f: F) {
unsafe {
sys::nvgBeginFrame(self.ctx, width as f32, height as f32, 1.0);
}
let frame = Frame { ctx: self.ctx };
match f(&frame) {
Ok(()) => unsafe {
sys::nvgEndFrame(self.ctx);
},
Err(_) => unsafe {
sys::nvgCancelFrame(self.ctx);
},
}
}
/// NanoVG allows you to load .ttf files and use the font to render text.
///
/// The appearance of the text can be defined by setting the current text style
/// and by specifying the fill color. Common text and font settings such as
/// font size, letter spacing and text align are supported. Font blur allows you
/// to create simple text effects such as drop shadows.
///
/// At render time the font face can be set based on the font handles or name.
///
/// Font measure functions return values in local space, the calculations are
/// carried in the same resolution as the final rendering. This is done because
/// the text glyph positions are snapped to the nearest pixels sharp rendering.
///
/// The local space means that values are not rotated or scale as per the current
/// transformation. For example if you set font size to 12, which would mean that
/// line height is 16, then regardless of the current scaling and rotation, the
/// returned line height is always 16. Some measures may vary because of the scaling
/// since aforementioned pixel snapping.
///
/// While this may sound a little odd, the setup allows you to always render the
/// same way regardless of scaling.
///
/// Note: currently only solid color fill is supported for text.
pub fn create_font(
&self,
name: &str,
filename: &str,
) -> std::result::Result<Font, Box<dyn std::error::Error>> {
let name = std::ffi::CString::new(name).unwrap();
let filename = std::ffi::CString::new(filename).unwrap();
let handle = unsafe { sys::nvgCreateFont(self.ctx, name.as_ptr(), filename.as_ptr()) };
match handle {
-1 => Err(Box::new(std::io::Error::new(
std::io::ErrorKind::Other,
"unable to load font",
))),
_ => Ok(Font { handle }),
}
}
/// NanoVG allows you to load jpg, png, psd, tga, pic and gif files to be used for rendering.
/// In addition you can upload your own image. The image loading is provided by stb_image.
pub fn create_image(
&self,
filename: &str,
) -> std::result::Result<Image, Box<dyn std::error::Error>> {
let filename = std::ffi::CString::new(filename).unwrap();
let handle = unsafe { sys::nvgCreateImage(self.ctx, filename.as_ptr(), 0) };
match handle {
-1 => Err(Box::new(std::io::Error::new(
std::io::ErrorKind::Other,
"unable to load image",
))),
_ => Ok(Image {
ctx: self.ctx,
handle,
}),
}
}
}
impl Drop for Context {
fn drop(&mut self) {
unsafe {
sys::nvgDeleteInternal(self.ctx);
}
}
}
/// Methods to draw on a frame. See `Context::draw_frame`.
pub struct Frame {
ctx: *mut sys::NVGcontext,
}
impl Frame {
/// Draw a path.
pub fn draw_path<F: Fn(&Path) -> Result>(&self, style: &Style, f: F) -> Result {
unsafe {
// sys::nvgSave(self.ctx);
// sys::nvgReset(self.ctx);
sys::nvgBeginPath(self.ctx);
}
if let Some(stroke) = &style.stroke {
match stroke {
PaintOrColor::Paint(p) => unsafe {
sys::nvgStrokePaint(self.ctx, &p.0);
},
PaintOrColor::Color(c) => unsafe {
sys::nvgStrokeColor(self.ctx, &c.0);
},
}
}
if let Some(fill) = &style.fill {
match fill {
PaintOrColor::Paint(p) => unsafe {
sys::nvgFillPaint(self.ctx, &p.0);
},
PaintOrColor::Color(c) => unsafe {
sys::nvgFillColor(self.ctx, &c.0);
},
}
}
let path = Path { ctx: self.ctx };
let r = f(&path);
if style.stroke.is_some() {
unsafe {
sys::nvgStroke(self.ctx);
}
}
if style.fill.is_some() {
unsafe {
sys::nvgFill(self.ctx);
}
}
/*
unsafe {
sys::nvgRestore(self.ctx);
}
*/
r
}
}
/// A path.
pub struct Path {
ctx: *mut sys::NVGcontext,
}
impl Path {
/// Starts new sub-path with specified point as first point.
pub fn move_to(&self, x: f32, y: f32) {
unsafe {
sys::nvgMoveTo(self.ctx, x, y);
}
}
/// Adds line segment from the last point in the path to the specified point.
pub fn line_to(&self, x: f32, y: f32) {
unsafe {
sys::nvgLineTo(self.ctx, x, y);
}
}
/// Adds cubic bezier segment from last point in the path via two control points to the specified point.
pub fn bezier_to(&self, c1x: f32, c1y: f32, c2x: f32, c2y: f32, x: f32, y: f32) {
unsafe {
sys::nvgBezierTo(self.ctx, c1x, c1y, c2x, c2y, x, y);
}
}
/// Adds quadratic bezier segment from last point in the path via a control point to the
/// specified point.
pub fn quad_to(&self, cx: f32, cy: f32, x: f32, y: f32) {
unsafe {
sys::nvgQuadTo(self.ctx, cx, cy, x, y);
}
}
/// Adds an arc segment at the corner defined by the last path point, and two specified points.
pub fn arc_to(&self, x1: f32, y1: f32, x2: f32, y2: f32, radius: f32) {
unsafe {
sys::nvgArcTo(self.ctx, x1, y1, x2, y2, radius);
}
}
/// Closes current sub-path with a line segment.
pub fn close_path(&self) {
unsafe {
sys::nvgClosePath(self.ctx);
}
}
/// Creates a new circle arc shaped sub-path. The arc center is at (`cx`,`cy`), the arc radius
/// is `r`, and the arc is drawn from angle `a0` to `a1`, and swept in direction `dir`.
/// Angles are in radians.
pub fn arc(&self, cx: f32, cy: f32, r: f32, a0: f32, a1: f32, dir: Direction) {
unsafe {
sys::nvgArc(self.ctx, cx, cy, r, a0, a1, dir.to_sys() as _);
}
}
/// Creates a new oval arc shaped sub-path. The arc center is at (`cx`, `cy`), the arc radius
/// is (`rx`, `ry`), and the arc is draw from angle a0 to a1, and swept in direction `dir`.
#[allow(clippy::too_many_arguments)]
pub fn elliptical_arc(
&self,
cx: f32,
cy: f32,
rx: f32,
ry: f32,
a0: f32,
a1: f32,
dir: Direction,
) {
unsafe {
sys::nvgEllipticalArc(self.ctx, cx, cy, rx, ry, a0, a1, dir.to_sys() as _);
}
}
/// Creates new rectangle shaped sub-path.
pub fn rect(&self, x: f32, y: f32, w: f32, h: f32) {
unsafe {
sys::nvgRect(self.ctx, x, y, w, h);
}
}
/// Creates a new rounded rectangle sub-path with rounded corners
#[allow(clippy::many_single_char_names)]
pub fn rounded_rect(&self, x: f32, y: f32, w: f32, h: f32, r: f32) {
unsafe {
sys::nvgRoundedRect(self.ctx, x, y, w, h, r);
}
}
/// Creates new rounded rectangle shaped sub-path with varying radii for each corner.
#[allow(clippy::too_many_arguments)]
#[allow(clippy::many_single_char_names)]
pub fn rounded_rect_varying(
&self,
x: f32,
y: f32,
w: f32,
h: f32,
rad_top_left: f32,
rad_top_right: f32,
rad_bottom_right: f32,
rad_bottom_left: f32,
) {
unsafe {
sys::nvgRoundedRectVarying(
self.ctx,
x,
y,
w,
h,
rad_top_left,
rad_top_right,
rad_bottom_right,
rad_bottom_left,
);
}
}
/// Creates a new ellipse shaped sub-path.
pub fn ellipse(&self, cx: f32, cy: f32, rx: f32, ry: f32) {
unsafe {
sys::nvgEllipse(self.ctx, cx, cy, rx, ry);
}
}
/// Creates a new circle shaped path.
pub fn circle(&self, cx: f32, cy: f32, r: f32) {
unsafe {
sys::nvgCircle(self.ctx, cx, cy, r);
}
}
// TODO: fill
}
/// Winding direction
#[derive(Debug, Clone, Copy)]
pub enum Direction {
/// Winding for holes.
Clockwise,
/// Winding for solid shapes.
CounterClockwise,
}
impl Direction {
fn to_sys(self) -> sys::NVGwinding {
match self {
Direction::Clockwise => sys::NVGwinding_NVG_CW,
Direction::CounterClockwise => sys::NVGwinding_NVG_CCW,
}
}
}
#[derive(Debug)]
#[doc(hidden)]
pub enum PaintOrColor {
Paint(Paint),
Color(Color),
}
impl From<Paint> for PaintOrColor {
fn from(p: Paint) -> PaintOrColor {
PaintOrColor::Paint(p)
}
}
impl From<Color> for PaintOrColor {
fn from(c: Color) -> PaintOrColor {
PaintOrColor::Color(c)
}
}
/// The stroke and/or fill which will be applied to a path.
#[derive(Debug, Default)]
pub struct Style {
stroke: Option<PaintOrColor>,
fill: Option<PaintOrColor>, | pub fn stroke<T: Into<PaintOrColor>>(mut self, stroke: T) -> Self {
self.stroke = Some(stroke.into());
self
}
/// Set the fill of this style.
pub fn fill<T: Into<PaintOrColor>>(mut self, fill: T) -> Self {
self.fill = Some(fill.into());
self
}
}
/// Colors in NanoVG are stored as unsigned ints in ABGR format.
#[derive(Debug)]
pub struct Color(sys::NVGcolor);
impl Color {
/// Returns a color value from red, green, blue values. Alpha will be set to 255 (1.0).
pub fn from_rgb(r: u8, g: u8, b: u8) -> Self {
Self(unsafe { sys::nvgRGB(r, g, b) })
}
/// Returns a color value from red, green, blue values. Alpha will be set to 1.0f.
pub fn from_rgbf(r: f32, g: f32, b: f32) -> Self {
Self(unsafe { sys::nvgRGBf(r, g, b) })
}
/// Returns a color value from red, green, blue and alpha values.
pub fn from_rgba(r: u8, g: u8, b: u8, a: u8) -> Self {
Self(unsafe { sys::nvgRGBA(r, g, b, a) })
}
/// Returns a color value from red, green, blue values. Alpha will be set to 1.0f.
pub fn from_rgbaf(r: f32, g: f32, b: f32, a: f32) -> Self {
Self(unsafe { sys::nvgRGBAf(r, g, b, a) })
}
/// Returns color value specified by hue, saturation and lightness.
/// HSL values are all in range [0..1], alpha will be set to 255.
pub fn from_hsv(h: f32, s: f32, l: f32) -> Self {
Self(unsafe { sys::nvgHSL(h, s, l) })
}
/// Returns color value specified by hue, saturation and lightness.
/// HSL values are all in range [0..1], alpha will be set to 255.
pub fn from_hsva(h: f32, s: f32, l: f32, a: u8) -> Self {
Self(unsafe { sys::nvgHSLA(h, s, l, a) })
}
}
/// NanoVG supports four types of paints: linear gradient, box gradient, radial gradient and image pattern.
/// These can be used as paints for strokes and fills.
#[derive(Debug)]
pub struct Paint(sys::NVGpaint);
impl Paint {
/// Creates and returns an image pattern. Parameters (`x`, `y`) specify the left-top location of the image pattern,
/// (`w`, `h`) is the size of the image, `angle` is the rotation around the top-left corner, and `image` is the image
/// to render.
pub fn from_image(
image: &Image,
x: f32,
y: f32,
w: f32,
h: f32,
angle: f32,
alpha: f32,
) -> Paint {
Paint(unsafe { sys::nvgImagePattern(image.ctx, x, y, w, h, angle, image.handle, alpha) })
}
}
/// A font handle.
pub struct Font {
handle: std::os::raw::c_int,
}
/// An image handle.
pub struct Image {
ctx: *mut sys::NVGcontext,
handle: std::os::raw::c_int,
}
impl Image {
/// Returns the dimensions of a created image.
pub fn size(&self) -> (usize, usize) {
let mut w = 0;
let mut h = 0;
unsafe {
sys::nvgImageSize(self.ctx, self.handle, &mut w, &mut h);
}
(w as usize, h as usize)
}
}
impl Drop for Image {
fn drop(&mut self) {
unsafe {
sys::nvgDeleteImage(self.ctx, self.handle);
}
}
} | }
impl Style {
/// Set the stroke of this style. | random_line_split |
deployment-center-state-manager.ts | import { ReplaySubject } from 'rxjs/ReplaySubject';
import { FormGroup, FormControl } from '@angular/forms';
import { WizardForm, SourceSettings } from './deployment-center-setup-models';
import { Observable } from 'rxjs/Observable';
import { CacheService } from '../../../../shared/services/cache.service';
import { ArmSiteDescriptor } from '../../../../shared/resourceDescriptors';
import { Injectable, OnDestroy } from '@angular/core';
import { Subject } from 'rxjs/Subject';
import { UserService } from '../../../../shared/services/user.service';
import {
ARMApiVersions,
ScenarioIds,
Kinds,
RuntimeStacks,
Constants,
JavaVersions,
JavaContainers,
DeploymentCenterConstants,
LogCategories,
} from '../../../../shared/models/constants';
import { TranslateService } from '@ngx-translate/core';
import { PortalResources } from '../../../../shared/models/portal-resources';
import { ArmObj } from '../../../../shared/models/arm/arm-obj';
import { Site } from '../../../../shared/models/arm/site';
import { SiteService } from '../../../../shared/services/site.service';
import { forkJoin } from 'rxjs/observable/forkJoin';
import { ScenarioService } from '../../../../shared/services/scenario/scenario.service';
import { VSOAccount } from '../../Models/vso-repo';
import { AzureDevOpsService } from './azure-devops.service';
import { GithubService } from './github.service';
import { GitHubActionWorkflowRequestContent, GitHubCommit } from '../../Models/github';
import { Guid } from 'app/shared/Utilities/Guid';
import { SubscriptionService } from 'app/shared/services/subscription.service';
import { SiteConfig } from 'app/shared/models/arm/site-config';
import { WorkflowOptions } from '../../Models/deployment-enums';
import { BehaviorSubject } from 'rxjs';
import { LogService } from '../../../../shared/services/log.service';
import { PublishingCredentials } from '../../../../shared/models/publishing-credentials';
import { HttpResult } from '../../../../shared/models/http-result';
@Injectable()
export class DeploymentCenterStateManager implements OnDestroy {
public resourceIdStream$ = new ReplaySubject<string>(1);
public wizardForm: FormGroup = new FormGroup({});
private _resourceId = '';
private _ngUnsubscribe$ = new Subject();
private _token: string;
public siteArm: ArmObj<Site>;
public siteArmObj$ = new ReplaySubject<ArmObj<Site>>();
public updateSourceProviderConfig$ = new Subject();
public selectedVstsRepoId = '';
public subscriptionName = '';
public canCreateNewSite = true;
public hideBuild = false;
public hideVstsBuildConfigure = false;
public isLinuxApp = false;
public isFunctionApp = false;
public vstsKuduOnly = false;
public vsoAccounts: VSOAccount[] = [];
public hideConfigureStepContinueButton = false;
public siteName = '';
public slotName = '';
public gitHubPublishProfileSecretGuid = '';
public isGithubActionWorkflowScopeAvailable = false;
public stack = '';
public stackVersion = '';
public gitHubTokenUpdated$ = new ReplaySubject<boolean>();
public oneDriveToken$ = new BehaviorSubject<string>('');
public dropBoxToken$ = new BehaviorSubject<string>('');
public bitBucketToken$ = new BehaviorSubject<string>('');
public gitHubToken$ = new BehaviorSubject<string>('');
public replacementPublishUrl = '';
constructor(
private _cacheService: CacheService,
private _azureDevOpsService: AzureDevOpsService,
private _translateService: TranslateService,
private _scenarioService: ScenarioService,
private _githubService: GithubService,
private _logService: LogService,
private _siteService: SiteService,
userService: UserService,
subscriptionService: SubscriptionService
) {
this.resourceIdStream$
.switchMap(r => {
this._resourceId = r;
const siteDescriptor = new ArmSiteDescriptor(this._resourceId);
this.siteName = siteDescriptor.site;
this.slotName = siteDescriptor.slot;
// TODO (michinoy): Figure out a way to only generate this guid IF github actions build provider
// is selected. This might require refactoring a ton of stuff in step-complete component to understand
// what build provider is selected.
this.gitHubPublishProfileSecretGuid = Guid.newGuid()
.toLowerCase()
.replace(/[-]/g, '');
return forkJoin(
this._siteService.getSite(this._resourceId),
this._siteService.getSiteConfig(this._resourceId),
this._siteService.getAppSettings(this._resourceId),
this._siteService.fetchSiteConfigMetadata(this._resourceId),
this._siteService.getPublishingCredentials(this._resourceId),
subscriptionService.getSubscription(siteDescriptor.subscription)
);
})
.switchMap(result => {
const [site, config, appSettings, configMetadata, publishingCredentials, sub] = result;
this.siteArm = site.result;
this.isLinuxApp = this.siteArm.kind.toLowerCase().includes(Kinds.linux);
this.isFunctionApp = this.siteArm.kind.toLowerCase().includes(Kinds.functionApp);
this.subscriptionName = sub.result.displayName;
// NOTE(michinoy): temporary fix, while the backend reinstates the scm url in the publish url property.
this.replacementPublishUrl = this.isLinuxApp ? this._getScmUri(publishingCredentials) : null;
if (config.isSuccessful && appSettings.isSuccessful && configMetadata.isSuccessful) {
this._setStackAndVersion(config.result.properties, appSettings.result.properties, configMetadata.result.properties);
}
this.siteArmObj$.next(this.siteArm);
return this._scenarioService.checkScenarioAsync(ScenarioIds.vstsDeploymentHide, { site: this.siteArm });
})
.subscribe(vstsScenarioCheck => {
this.hideBuild = vstsScenarioCheck.status === 'disabled';
});
userService
.getStartupInfo()
.takeUntil(this._ngUnsubscribe$)
.subscribe(r => {
this._token = r.token;
});
}
public get wizardValues(): WizardForm {
return this.wizardForm.value;
}
public set wizardValues(values: WizardForm) {
this.wizardForm.patchValue(values);
}
public get sourceSettings(): FormGroup {
return (this.wizardForm && (this.wizardForm.controls.sourceSettings as FormGroup)) || null;
}
public get buildSettings(): FormGroup {
return (this.wizardForm && (this.wizardForm.controls.buildSettings as FormGroup)) || null;
}
public deploy(): Observable<{ status: string; statusMessage: string; result: any }> {
switch (this.wizardValues.buildProvider) {
case 'github':
// NOTE(michinoy): Only initiate writing a workflow configuration file if the branch does not already have it OR
// the user opted to overwrite it.
if (
!this.wizardValues.sourceSettings.githubActionWorkflowOption ||
this.wizardValues.sourceSettings.githubActionWorkflowOption === WorkflowOptions.Overwrite
) {
return this._deployGithubActions().map(result => ({ status: 'succeeded', statusMessage: null, result }));
} else {
return this._deployKudu().map(result => ({ status: 'succeeded', statusMessage: null, result }));
}
default:
return this._deployKudu().map(result => ({ status: 'succeeded', statusMessage: null, result }));
}
}
public fetchVSTSProfile() {
// if the first get fails, it's likely because the user doesn't have an account in vsts yet
// the fix for this is to do an empty post call on the same url and then get it
return this._cacheService
.get(AzureDevOpsService.AzDevProfileUri, true, this._azureDevOpsService.getAzDevDirectHeaders(false))
.catch(() => {
return this._cacheService
.post(AzureDevOpsService.AzDevProfileUri, true, this._azureDevOpsService.getAzDevDirectHeaders(false))
.switchMap(() => {
return this._cacheService.get(AzureDevOpsService.AzDevProfileUri, true, this._azureDevOpsService.getAzDevDirectHeaders(false));
});
});
}
private _setStackAndVersion(
siteConfig: SiteConfig,
siteAppSettings: { [key: string]: string },
configMetadata: { [key: string]: string }
) {
if (this.isLinuxApp) {
this._setStackAndVersionForLinux(siteConfig);
} else {
this._setStackAndVersionForWindows(siteConfig, siteAppSettings, configMetadata);
}
}
private _setStackAndVersionForWindows(
siteConfig: SiteConfig,
siteAppSettings: { [key: string]: string },
configMetadata: { [key: string]: string }
) {
if (configMetadata['CURRENT_STACK']) {
const metadataStack = configMetadata['CURRENT_STACK'].toLowerCase();
// NOTE(michinoy): Java is special, so need to handle it carefully. Also in this case, use
// the string 'java' rather than any of the constants defined as it is not related to any of the
// defined constants.
if (metadataStack === 'java') {
this.stack = siteConfig.javaVersion === JavaVersions.WindowsVersion8 ? RuntimeStacks.java8 : RuntimeStacks.java11;
} else if (metadataStack === 'dotnet') {
this.stack = RuntimeStacks.aspnet;
} else {
this.stack = metadataStack;
}
}
if (this.stack === RuntimeStacks.node) {
this.stackVersion = siteAppSettings[Constants.nodeVersionAppSettingName];
} else if (this.stack === RuntimeStacks.python) {
this.stackVersion = siteConfig.pythonVersion;
} else if (this.stack === RuntimeStacks.java8 || this.stack === RuntimeStacks.java11) {
this.stackVersion = `${siteConfig.javaVersion}|${siteConfig.javaContainer}|${siteConfig.javaContainerVersion}`;
} else if (this.stack === RuntimeStacks.aspnet && !!siteConfig.netFrameworkVersion) {
this.stackVersion == siteConfig.netFrameworkVersion;
} else if (this.stack === '') {
this.stackVersion = '';
}
}
private _setStackAndVersionForLinux(siteConfig: SiteConfig) {
const linuxFxVersionParts = siteConfig.linuxFxVersion ? siteConfig.linuxFxVersion.split('|') : [];
const runtimeStack = linuxFxVersionParts.length > 0 ? linuxFxVersionParts[0].toLocaleLowerCase() : '';
// NOTE(michinoy): Java is special, so need to handle it carefully.
if (runtimeStack === JavaContainers.JavaSE || runtimeStack === JavaContainers.Tomcat) {
const fxVersionParts = !!siteConfig.linuxFxVersion ? siteConfig.linuxFxVersion.split('-') : [];
const fxStack = fxVersionParts.length === 2 ? fxVersionParts[1].toLocaleLowerCase() : '';
if (fxStack === JavaVersions.LinuxVersion8 || fxStack === JavaVersions.LinuxVersion11) {
this.stack = fxStack === JavaVersions.LinuxVersion8 ? RuntimeStacks.java8 : RuntimeStacks.java11;
} else {
this.stack = '';
}
} else {
// NOTE(michinoy): So it seems that in the stack API the stack value is 'aspnet', whereas from site config, the stack identifier is
// 'dotnetcore'. Due to this mismatch, we need to hard code the conversion on the client side.
this.stack = siteConfig.linuxFxVersion.toLocaleLowerCase() === 'dotnetcore|5.0' ? RuntimeStacks.aspnet : runtimeStack;
}
this.stackVersion = !!siteConfig.linuxFxVersion ? siteConfig.linuxFxVersion : '';
}
private _deployGithubActions() |
private _deployKudu() {
const payload = this.wizardValues.sourceSettings;
payload.isGitHubAction = this.wizardValues.buildProvider === 'github';
payload.isManualIntegration = this.wizardValues.sourceProvider === 'external';
if (this.wizardValues.sourceProvider === 'localgit') {
return this._cacheService
.patchArm(`${this._resourceId}/config/web`, ARMApiVersions.antaresApiVersion20181101, {
properties: {
scmType: 'LocalGit',
},
})
.map(r => r.json());
} else {
return this._cacheService
.putArm(`${this._resourceId}/sourcecontrols/web`, ARMApiVersions.antaresApiVersion20181101, {
properties: payload,
})
.map(r => r.json())
.catch((err, _) => {
if (payload.isGitHubAction && this._isApiSyncError(err.json())) {
// NOTE(michinoy): If the save operation was being done for GitHub Action, and
// we are experiencing the API sync error, populate the source controls properties
// manually.
this._logService.error(LogCategories.cicd, 'apiSyncErrorWorkaround', { resourceId: this._resourceId });
return this._updateGitHubActionSourceControlPropertiesManually(payload);
} else {
return Observable.throw(err);
}
});
}
}
private _updateGitHubActionSourceControlPropertiesManually(sourceSettingsPayload: SourceSettings) {
return this._fetchMetadata()
.switchMap(r => {
if (r && r.result && r.result.properties) {
return this._updateMetadata(r.result.properties, sourceSettingsPayload);
} else {
return Observable.throw(r);
}
})
.switchMap(r => {
if (r && r.status === 200) {
return this._patchSiteConfigForGitHubAction();
} else {
return Observable.throw(r);
}
})
.catch(r => Observable.throw(r))
.map(r => r.json());
}
private _updateMetadata(properties: { [key: string]: string }, sourceSettingsPayload: SourceSettings) {
delete properties['RepoUrl'];
delete properties['ScmUri'];
delete properties['CloneUri'];
delete properties['branch'];
properties['RepoUrl'] = sourceSettingsPayload.repoUrl;
properties['branch'] = sourceSettingsPayload.branch;
return this._cacheService
.putArm(`${this._resourceId}/config/metadata`, ARMApiVersions.antaresApiVersion20181101, { properties })
.catch(err => {
this._logService.error(LogCategories.cicd, 'apiSyncErrorWorkaround-update-metadata-failure', {
resourceId: this._resourceId,
error: err,
});
return Observable.throw(err);
});
}
private _fetchMetadata() {
return this._siteService.fetchSiteConfigMetadata(this._resourceId).catch(err => {
this._logService.error(LogCategories.cicd, 'apiSyncErrorWorkaround-fetch-metadata-failure', {
resourceId: this._resourceId,
error: err,
});
return Observable.throw(err);
});
}
private _patchSiteConfigForGitHubAction() {
return this._cacheService
.patchArm(`${this._resourceId}/config/web`, ARMApiVersions.antaresApiVersion20181101, {
properties: {
scmType: 'GitHubAction',
},
})
.catch(err => {
this._logService.error(LogCategories.cicd, 'apiSyncErrorWorkaround-sitConfig-patch-failure', {
resourceId: this._resourceId,
error: err,
});
return Observable.throw(err);
});
}
// Detect the specific error which is indicative of Ant89 Geo/Stamp sync issues.
private _isApiSyncError(error: any): boolean {
return (
error.Message &&
error.Message.indexOf &&
error.Message.indexOf('500 (InternalServerError)') > -1 &&
error.Message.indexOf('GeoRegionServiceClient') > -1
);
}
private _getScmUri(publishingCredentialsResponse: HttpResult<ArmObj<PublishingCredentials>>): string {
if (
publishingCredentialsResponse.isSuccessful &&
publishingCredentialsResponse.result &&
publishingCredentialsResponse.result.properties.scmUri
) {
const scmUriParts = publishingCredentialsResponse.result.properties.scmUri.split('@');
if (scmUriParts.length > 1) {
return scmUriParts[1];
}
}
return null;
}
public getToken(): string {
return `Bearer ${this._token}`;
}
ngOnDestroy(): void {
this._ngUnsubscribe$.next();
}
resetSection(formGroup: FormGroup) {
formGroup.reset();
}
markSectionAsTouched(formGroup: FormGroup) {
Object.keys(formGroup.controls).forEach(field => {
const control = formGroup.get(field);
if (control instanceof FormControl && !control.touched && !control.dirty) {
control.markAsTouched();
control.updateValueAndValidity({ onlySelf: true, emitEvent: false });
} else if (control instanceof FormGroup) {
this.markSectionAsTouched(control);
}
});
}
}
| {
const repo = this.wizardValues.sourceSettings.repoUrl.replace(`${DeploymentCenterConstants.githubUri}/`, '');
const branch = this.wizardValues.sourceSettings.branch || 'master';
const workflowInformation = this._githubService.getWorkflowInformation(
this.wizardValues.buildSettings,
this.wizardValues.sourceSettings,
this.isLinuxApp,
this.gitHubPublishProfileSecretGuid,
this.siteName,
this.slotName
);
const commitInfo: GitHubCommit = {
repoName: repo,
branchName: branch,
filePath: `.github/workflows/${workflowInformation.fileName}`,
message: this._translateService.instant(PortalResources.githubActionWorkflowCommitMessage),
contentBase64Encoded: btoa(workflowInformation.content),
committer: {
name: 'Azure App Service',
email: '[email protected]',
},
};
return this._githubService
.fetchWorkflowConfiguration(this.gitHubToken$.getValue(), this.wizardValues.sourceSettings.repoUrl, repo, branch, commitInfo.filePath)
.switchMap(fileContentResponse => {
if (fileContentResponse) {
commitInfo.sha = fileContentResponse.sha;
}
const requestContent: GitHubActionWorkflowRequestContent = {
resourceId: this._resourceId,
secretName: workflowInformation.secretName,
commit: commitInfo,
};
return this._githubService.createOrUpdateActionWorkflow(
this.getToken(),
this.gitHubToken$.getValue(),
requestContent,
this.replacementPublishUrl
);
})
.switchMap(_ => {
return this._deployKudu();
});
} | identifier_body |
deployment-center-state-manager.ts | import { ReplaySubject } from 'rxjs/ReplaySubject';
import { FormGroup, FormControl } from '@angular/forms';
import { WizardForm, SourceSettings } from './deployment-center-setup-models';
import { Observable } from 'rxjs/Observable';
import { CacheService } from '../../../../shared/services/cache.service';
import { ArmSiteDescriptor } from '../../../../shared/resourceDescriptors';
import { Injectable, OnDestroy } from '@angular/core';
import { Subject } from 'rxjs/Subject';
import { UserService } from '../../../../shared/services/user.service';
import {
ARMApiVersions,
ScenarioIds,
Kinds,
RuntimeStacks,
Constants,
JavaVersions,
JavaContainers,
DeploymentCenterConstants,
LogCategories,
} from '../../../../shared/models/constants';
import { TranslateService } from '@ngx-translate/core';
import { PortalResources } from '../../../../shared/models/portal-resources';
import { ArmObj } from '../../../../shared/models/arm/arm-obj';
import { Site } from '../../../../shared/models/arm/site';
import { SiteService } from '../../../../shared/services/site.service';
import { forkJoin } from 'rxjs/observable/forkJoin';
import { ScenarioService } from '../../../../shared/services/scenario/scenario.service';
import { VSOAccount } from '../../Models/vso-repo';
import { AzureDevOpsService } from './azure-devops.service';
import { GithubService } from './github.service';
import { GitHubActionWorkflowRequestContent, GitHubCommit } from '../../Models/github';
import { Guid } from 'app/shared/Utilities/Guid';
import { SubscriptionService } from 'app/shared/services/subscription.service';
import { SiteConfig } from 'app/shared/models/arm/site-config';
import { WorkflowOptions } from '../../Models/deployment-enums';
import { BehaviorSubject } from 'rxjs';
import { LogService } from '../../../../shared/services/log.service';
import { PublishingCredentials } from '../../../../shared/models/publishing-credentials';
import { HttpResult } from '../../../../shared/models/http-result';
@Injectable()
export class DeploymentCenterStateManager implements OnDestroy {
public resourceIdStream$ = new ReplaySubject<string>(1);
public wizardForm: FormGroup = new FormGroup({});
private _resourceId = '';
private _ngUnsubscribe$ = new Subject();
private _token: string;
public siteArm: ArmObj<Site>;
public siteArmObj$ = new ReplaySubject<ArmObj<Site>>();
public updateSourceProviderConfig$ = new Subject();
public selectedVstsRepoId = '';
public subscriptionName = '';
public canCreateNewSite = true;
public hideBuild = false;
public hideVstsBuildConfigure = false;
public isLinuxApp = false;
public isFunctionApp = false;
public vstsKuduOnly = false;
public vsoAccounts: VSOAccount[] = [];
public hideConfigureStepContinueButton = false;
public siteName = '';
public slotName = '';
public gitHubPublishProfileSecretGuid = '';
public isGithubActionWorkflowScopeAvailable = false;
public stack = '';
public stackVersion = '';
public gitHubTokenUpdated$ = new ReplaySubject<boolean>();
public oneDriveToken$ = new BehaviorSubject<string>('');
public dropBoxToken$ = new BehaviorSubject<string>('');
public bitBucketToken$ = new BehaviorSubject<string>('');
public gitHubToken$ = new BehaviorSubject<string>('');
public replacementPublishUrl = '';
constructor(
private _cacheService: CacheService,
private _azureDevOpsService: AzureDevOpsService,
private _translateService: TranslateService,
private _scenarioService: ScenarioService,
private _githubService: GithubService,
private _logService: LogService,
private _siteService: SiteService,
userService: UserService,
subscriptionService: SubscriptionService
) {
this.resourceIdStream$
.switchMap(r => {
this._resourceId = r;
const siteDescriptor = new ArmSiteDescriptor(this._resourceId);
this.siteName = siteDescriptor.site;
this.slotName = siteDescriptor.slot;
// TODO (michinoy): Figure out a way to only generate this guid IF github actions build provider
// is selected. This might require refactoring a ton of stuff in step-complete component to understand
// what build provider is selected.
this.gitHubPublishProfileSecretGuid = Guid.newGuid()
.toLowerCase()
.replace(/[-]/g, '');
return forkJoin(
this._siteService.getSite(this._resourceId),
this._siteService.getSiteConfig(this._resourceId),
this._siteService.getAppSettings(this._resourceId),
this._siteService.fetchSiteConfigMetadata(this._resourceId),
this._siteService.getPublishingCredentials(this._resourceId),
subscriptionService.getSubscription(siteDescriptor.subscription)
);
})
.switchMap(result => {
const [site, config, appSettings, configMetadata, publishingCredentials, sub] = result;
this.siteArm = site.result;
this.isLinuxApp = this.siteArm.kind.toLowerCase().includes(Kinds.linux);
this.isFunctionApp = this.siteArm.kind.toLowerCase().includes(Kinds.functionApp);
this.subscriptionName = sub.result.displayName;
// NOTE(michinoy): temporary fix, while the backend reinstates the scm url in the publish url property.
this.replacementPublishUrl = this.isLinuxApp ? this._getScmUri(publishingCredentials) : null;
if (config.isSuccessful && appSettings.isSuccessful && configMetadata.isSuccessful) {
this._setStackAndVersion(config.result.properties, appSettings.result.properties, configMetadata.result.properties);
}
this.siteArmObj$.next(this.siteArm);
return this._scenarioService.checkScenarioAsync(ScenarioIds.vstsDeploymentHide, { site: this.siteArm });
})
.subscribe(vstsScenarioCheck => {
this.hideBuild = vstsScenarioCheck.status === 'disabled';
});
userService
.getStartupInfo()
.takeUntil(this._ngUnsubscribe$)
.subscribe(r => {
this._token = r.token;
});
}
public get wizardValues(): WizardForm {
return this.wizardForm.value;
}
public set wizardValues(values: WizardForm) {
this.wizardForm.patchValue(values);
}
public get sourceSettings(): FormGroup {
return (this.wizardForm && (this.wizardForm.controls.sourceSettings as FormGroup)) || null;
}
public get buildSettings(): FormGroup {
return (this.wizardForm && (this.wizardForm.controls.buildSettings as FormGroup)) || null;
}
public deploy(): Observable<{ status: string; statusMessage: string; result: any }> {
switch (this.wizardValues.buildProvider) {
case 'github':
// NOTE(michinoy): Only initiate writing a workflow configuration file if the branch does not already have it OR
// the user opted to overwrite it.
if (
!this.wizardValues.sourceSettings.githubActionWorkflowOption ||
this.wizardValues.sourceSettings.githubActionWorkflowOption === WorkflowOptions.Overwrite
) {
return this._deployGithubActions().map(result => ({ status: 'succeeded', statusMessage: null, result }));
} else {
return this._deployKudu().map(result => ({ status: 'succeeded', statusMessage: null, result }));
}
default:
return this._deployKudu().map(result => ({ status: 'succeeded', statusMessage: null, result }));
}
}
public fetchVSTSProfile() {
// if the first get fails, it's likely because the user doesn't have an account in vsts yet
// the fix for this is to do an empty post call on the same url and then get it
return this._cacheService
.get(AzureDevOpsService.AzDevProfileUri, true, this._azureDevOpsService.getAzDevDirectHeaders(false))
.catch(() => {
return this._cacheService
.post(AzureDevOpsService.AzDevProfileUri, true, this._azureDevOpsService.getAzDevDirectHeaders(false))
.switchMap(() => {
return this._cacheService.get(AzureDevOpsService.AzDevProfileUri, true, this._azureDevOpsService.getAzDevDirectHeaders(false));
});
});
}
private _setStackAndVersion(
siteConfig: SiteConfig,
siteAppSettings: { [key: string]: string },
configMetadata: { [key: string]: string }
) {
if (this.isLinuxApp) {
this._setStackAndVersionForLinux(siteConfig);
} else {
this._setStackAndVersionForWindows(siteConfig, siteAppSettings, configMetadata);
}
}
private _setStackAndVersionForWindows(
siteConfig: SiteConfig,
siteAppSettings: { [key: string]: string },
configMetadata: { [key: string]: string }
) {
if (configMetadata['CURRENT_STACK']) {
const metadataStack = configMetadata['CURRENT_STACK'].toLowerCase();
// NOTE(michinoy): Java is special, so need to handle it carefully. Also in this case, use
// the string 'java' rather than any of the constants defined as it is not related to any of the
// defined constants.
if (metadataStack === 'java') {
this.stack = siteConfig.javaVersion === JavaVersions.WindowsVersion8 ? RuntimeStacks.java8 : RuntimeStacks.java11;
} else if (metadataStack === 'dotnet') {
this.stack = RuntimeStacks.aspnet;
} else {
this.stack = metadataStack;
}
}
if (this.stack === RuntimeStacks.node) {
this.stackVersion = siteAppSettings[Constants.nodeVersionAppSettingName];
} else if (this.stack === RuntimeStacks.python) {
this.stackVersion = siteConfig.pythonVersion;
} else if (this.stack === RuntimeStacks.java8 || this.stack === RuntimeStacks.java11) {
this.stackVersion = `${siteConfig.javaVersion}|${siteConfig.javaContainer}|${siteConfig.javaContainerVersion}`;
} else if (this.stack === RuntimeStacks.aspnet && !!siteConfig.netFrameworkVersion) {
this.stackVersion == siteConfig.netFrameworkVersion;
} else if (this.stack === '') {
this.stackVersion = '';
}
}
private _setStackAndVersionForLinux(siteConfig: SiteConfig) {
const linuxFxVersionParts = siteConfig.linuxFxVersion ? siteConfig.linuxFxVersion.split('|') : [];
const runtimeStack = linuxFxVersionParts.length > 0 ? linuxFxVersionParts[0].toLocaleLowerCase() : '';
// NOTE(michinoy): Java is special, so need to handle it carefully.
if (runtimeStack === JavaContainers.JavaSE || runtimeStack === JavaContainers.Tomcat) {
const fxVersionParts = !!siteConfig.linuxFxVersion ? siteConfig.linuxFxVersion.split('-') : [];
const fxStack = fxVersionParts.length === 2 ? fxVersionParts[1].toLocaleLowerCase() : '';
if (fxStack === JavaVersions.LinuxVersion8 || fxStack === JavaVersions.LinuxVersion11) {
this.stack = fxStack === JavaVersions.LinuxVersion8 ? RuntimeStacks.java8 : RuntimeStacks.java11;
} else {
this.stack = '';
}
} else {
// NOTE(michinoy): So it seems that in the stack API the stack value is 'aspnet', whereas from site config, the stack identifier is
// 'dotnetcore'. Due to this mismatch, we need to hard code the conversion on the client side.
this.stack = siteConfig.linuxFxVersion.toLocaleLowerCase() === 'dotnetcore|5.0' ? RuntimeStacks.aspnet : runtimeStack;
}
this.stackVersion = !!siteConfig.linuxFxVersion ? siteConfig.linuxFxVersion : '';
}
private _deployGithubActions() {
const repo = this.wizardValues.sourceSettings.repoUrl.replace(`${DeploymentCenterConstants.githubUri}/`, '');
const branch = this.wizardValues.sourceSettings.branch || 'master';
const workflowInformation = this._githubService.getWorkflowInformation(
this.wizardValues.buildSettings,
this.wizardValues.sourceSettings,
this.isLinuxApp,
this.gitHubPublishProfileSecretGuid,
this.siteName,
this.slotName
);
const commitInfo: GitHubCommit = {
repoName: repo,
branchName: branch,
filePath: `.github/workflows/${workflowInformation.fileName}`,
message: this._translateService.instant(PortalResources.githubActionWorkflowCommitMessage),
contentBase64Encoded: btoa(workflowInformation.content),
committer: {
name: 'Azure App Service',
email: '[email protected]',
},
};
return this._githubService
.fetchWorkflowConfiguration(this.gitHubToken$.getValue(), this.wizardValues.sourceSettings.repoUrl, repo, branch, commitInfo.filePath)
.switchMap(fileContentResponse => {
if (fileContentResponse) {
commitInfo.sha = fileContentResponse.sha;
}
const requestContent: GitHubActionWorkflowRequestContent = {
resourceId: this._resourceId,
secretName: workflowInformation.secretName,
commit: commitInfo,
};
return this._githubService.createOrUpdateActionWorkflow(
this.getToken(),
this.gitHubToken$.getValue(),
requestContent,
this.replacementPublishUrl
);
})
.switchMap(_ => {
return this._deployKudu();
});
}
private _deployKudu() {
const payload = this.wizardValues.sourceSettings;
payload.isGitHubAction = this.wizardValues.buildProvider === 'github';
payload.isManualIntegration = this.wizardValues.sourceProvider === 'external';
if (this.wizardValues.sourceProvider === 'localgit') {
return this._cacheService
.patchArm(`${this._resourceId}/config/web`, ARMApiVersions.antaresApiVersion20181101, {
properties: {
scmType: 'LocalGit',
},
})
.map(r => r.json());
} else {
return this._cacheService
.putArm(`${this._resourceId}/sourcecontrols/web`, ARMApiVersions.antaresApiVersion20181101, {
properties: payload,
})
.map(r => r.json())
.catch((err, _) => {
if (payload.isGitHubAction && this._isApiSyncError(err.json())) {
// NOTE(michinoy): If the save operation was being done for GitHub Action, and
// we are experiencing the API sync error, populate the source controls properties
// manually.
this._logService.error(LogCategories.cicd, 'apiSyncErrorWorkaround', { resourceId: this._resourceId });
return this._updateGitHubActionSourceControlPropertiesManually(payload);
} else {
return Observable.throw(err);
}
});
}
}
private _updateGitHubActionSourceControlPropertiesManually(sourceSettingsPayload: SourceSettings) {
return this._fetchMetadata()
.switchMap(r => {
if (r && r.result && r.result.properties) | else {
return Observable.throw(r);
}
})
.switchMap(r => {
if (r && r.status === 200) {
return this._patchSiteConfigForGitHubAction();
} else {
return Observable.throw(r);
}
})
.catch(r => Observable.throw(r))
.map(r => r.json());
}
private _updateMetadata(properties: { [key: string]: string }, sourceSettingsPayload: SourceSettings) {
delete properties['RepoUrl'];
delete properties['ScmUri'];
delete properties['CloneUri'];
delete properties['branch'];
properties['RepoUrl'] = sourceSettingsPayload.repoUrl;
properties['branch'] = sourceSettingsPayload.branch;
return this._cacheService
.putArm(`${this._resourceId}/config/metadata`, ARMApiVersions.antaresApiVersion20181101, { properties })
.catch(err => {
this._logService.error(LogCategories.cicd, 'apiSyncErrorWorkaround-update-metadata-failure', {
resourceId: this._resourceId,
error: err,
});
return Observable.throw(err);
});
}
private _fetchMetadata() {
return this._siteService.fetchSiteConfigMetadata(this._resourceId).catch(err => {
this._logService.error(LogCategories.cicd, 'apiSyncErrorWorkaround-fetch-metadata-failure', {
resourceId: this._resourceId,
error: err,
});
return Observable.throw(err);
});
}
private _patchSiteConfigForGitHubAction() {
return this._cacheService
.patchArm(`${this._resourceId}/config/web`, ARMApiVersions.antaresApiVersion20181101, {
properties: {
scmType: 'GitHubAction',
},
})
.catch(err => {
this._logService.error(LogCategories.cicd, 'apiSyncErrorWorkaround-sitConfig-patch-failure', {
resourceId: this._resourceId,
error: err,
});
return Observable.throw(err);
});
}
// Detect the specific error which is indicative of Ant89 Geo/Stamp sync issues.
private _isApiSyncError(error: any): boolean {
return (
error.Message &&
error.Message.indexOf &&
error.Message.indexOf('500 (InternalServerError)') > -1 &&
error.Message.indexOf('GeoRegionServiceClient') > -1
);
}
private _getScmUri(publishingCredentialsResponse: HttpResult<ArmObj<PublishingCredentials>>): string {
if (
publishingCredentialsResponse.isSuccessful &&
publishingCredentialsResponse.result &&
publishingCredentialsResponse.result.properties.scmUri
) {
const scmUriParts = publishingCredentialsResponse.result.properties.scmUri.split('@');
if (scmUriParts.length > 1) {
return scmUriParts[1];
}
}
return null;
}
public getToken(): string {
return `Bearer ${this._token}`;
}
ngOnDestroy(): void {
this._ngUnsubscribe$.next();
}
resetSection(formGroup: FormGroup) {
formGroup.reset();
}
markSectionAsTouched(formGroup: FormGroup) {
Object.keys(formGroup.controls).forEach(field => {
const control = formGroup.get(field);
if (control instanceof FormControl && !control.touched && !control.dirty) {
control.markAsTouched();
control.updateValueAndValidity({ onlySelf: true, emitEvent: false });
} else if (control instanceof FormGroup) {
this.markSectionAsTouched(control);
}
});
}
}
| {
return this._updateMetadata(r.result.properties, sourceSettingsPayload);
} | conditional_block |
deployment-center-state-manager.ts | import { ReplaySubject } from 'rxjs/ReplaySubject';
import { FormGroup, FormControl } from '@angular/forms';
import { WizardForm, SourceSettings } from './deployment-center-setup-models';
import { Observable } from 'rxjs/Observable';
import { CacheService } from '../../../../shared/services/cache.service';
import { ArmSiteDescriptor } from '../../../../shared/resourceDescriptors';
import { Injectable, OnDestroy } from '@angular/core';
import { Subject } from 'rxjs/Subject';
import { UserService } from '../../../../shared/services/user.service';
import {
ARMApiVersions,
ScenarioIds,
Kinds,
RuntimeStacks,
Constants,
JavaVersions,
JavaContainers,
DeploymentCenterConstants,
LogCategories,
} from '../../../../shared/models/constants';
import { TranslateService } from '@ngx-translate/core';
import { PortalResources } from '../../../../shared/models/portal-resources';
import { ArmObj } from '../../../../shared/models/arm/arm-obj';
import { Site } from '../../../../shared/models/arm/site';
import { SiteService } from '../../../../shared/services/site.service';
import { forkJoin } from 'rxjs/observable/forkJoin';
import { ScenarioService } from '../../../../shared/services/scenario/scenario.service';
import { VSOAccount } from '../../Models/vso-repo';
import { AzureDevOpsService } from './azure-devops.service';
import { GithubService } from './github.service';
import { GitHubActionWorkflowRequestContent, GitHubCommit } from '../../Models/github';
import { Guid } from 'app/shared/Utilities/Guid';
import { SubscriptionService } from 'app/shared/services/subscription.service';
import { SiteConfig } from 'app/shared/models/arm/site-config';
import { WorkflowOptions } from '../../Models/deployment-enums';
import { BehaviorSubject } from 'rxjs';
import { LogService } from '../../../../shared/services/log.service';
import { PublishingCredentials } from '../../../../shared/models/publishing-credentials';
import { HttpResult } from '../../../../shared/models/http-result';
@Injectable()
export class DeploymentCenterStateManager implements OnDestroy {
public resourceIdStream$ = new ReplaySubject<string>(1);
public wizardForm: FormGroup = new FormGroup({});
private _resourceId = '';
private _ngUnsubscribe$ = new Subject();
private _token: string;
public siteArm: ArmObj<Site>;
public siteArmObj$ = new ReplaySubject<ArmObj<Site>>();
public updateSourceProviderConfig$ = new Subject();
public selectedVstsRepoId = '';
public subscriptionName = '';
public canCreateNewSite = true;
public hideBuild = false;
public hideVstsBuildConfigure = false;
public isLinuxApp = false;
public isFunctionApp = false;
public vstsKuduOnly = false;
public vsoAccounts: VSOAccount[] = [];
public hideConfigureStepContinueButton = false;
public siteName = '';
public slotName = '';
public gitHubPublishProfileSecretGuid = '';
public isGithubActionWorkflowScopeAvailable = false;
public stack = '';
public stackVersion = '';
public gitHubTokenUpdated$ = new ReplaySubject<boolean>();
public oneDriveToken$ = new BehaviorSubject<string>('');
public dropBoxToken$ = new BehaviorSubject<string>('');
public bitBucketToken$ = new BehaviorSubject<string>('');
public gitHubToken$ = new BehaviorSubject<string>('');
public replacementPublishUrl = '';
constructor(
private _cacheService: CacheService,
private _azureDevOpsService: AzureDevOpsService,
private _translateService: TranslateService,
private _scenarioService: ScenarioService,
private _githubService: GithubService,
private _logService: LogService,
private _siteService: SiteService,
userService: UserService,
subscriptionService: SubscriptionService
) {
this.resourceIdStream$
.switchMap(r => {
this._resourceId = r;
const siteDescriptor = new ArmSiteDescriptor(this._resourceId);
this.siteName = siteDescriptor.site;
this.slotName = siteDescriptor.slot;
// TODO (michinoy): Figure out a way to only generate this guid IF github actions build provider
// is selected. This might require refactoring a ton of stuff in step-complete component to understand
// what build provider is selected.
this.gitHubPublishProfileSecretGuid = Guid.newGuid()
.toLowerCase()
.replace(/[-]/g, '');
return forkJoin(
this._siteService.getSite(this._resourceId),
this._siteService.getSiteConfig(this._resourceId),
this._siteService.getAppSettings(this._resourceId),
this._siteService.fetchSiteConfigMetadata(this._resourceId),
this._siteService.getPublishingCredentials(this._resourceId),
subscriptionService.getSubscription(siteDescriptor.subscription)
);
})
.switchMap(result => {
const [site, config, appSettings, configMetadata, publishingCredentials, sub] = result;
this.siteArm = site.result;
this.isLinuxApp = this.siteArm.kind.toLowerCase().includes(Kinds.linux);
this.isFunctionApp = this.siteArm.kind.toLowerCase().includes(Kinds.functionApp);
this.subscriptionName = sub.result.displayName;
// NOTE(michinoy): temporary fix, while the backend reinstates the scm url in the publish url property.
this.replacementPublishUrl = this.isLinuxApp ? this._getScmUri(publishingCredentials) : null;
if (config.isSuccessful && appSettings.isSuccessful && configMetadata.isSuccessful) {
this._setStackAndVersion(config.result.properties, appSettings.result.properties, configMetadata.result.properties);
}
this.siteArmObj$.next(this.siteArm);
return this._scenarioService.checkScenarioAsync(ScenarioIds.vstsDeploymentHide, { site: this.siteArm });
})
.subscribe(vstsScenarioCheck => {
this.hideBuild = vstsScenarioCheck.status === 'disabled';
});
userService
.getStartupInfo()
.takeUntil(this._ngUnsubscribe$)
.subscribe(r => {
this._token = r.token;
});
}
public get wizardValues(): WizardForm {
return this.wizardForm.value;
}
public set wizardValues(values: WizardForm) {
this.wizardForm.patchValue(values);
}
public get sourceSettings(): FormGroup {
return (this.wizardForm && (this.wizardForm.controls.sourceSettings as FormGroup)) || null;
}
public get buildSettings(): FormGroup {
return (this.wizardForm && (this.wizardForm.controls.buildSettings as FormGroup)) || null;
}
public deploy(): Observable<{ status: string; statusMessage: string; result: any }> {
switch (this.wizardValues.buildProvider) {
case 'github':
// NOTE(michinoy): Only initiate writing a workflow configuration file if the branch does not already have it OR
// the user opted to overwrite it.
if (
!this.wizardValues.sourceSettings.githubActionWorkflowOption ||
this.wizardValues.sourceSettings.githubActionWorkflowOption === WorkflowOptions.Overwrite
) {
return this._deployGithubActions().map(result => ({ status: 'succeeded', statusMessage: null, result }));
} else {
return this._deployKudu().map(result => ({ status: 'succeeded', statusMessage: null, result }));
}
default:
return this._deployKudu().map(result => ({ status: 'succeeded', statusMessage: null, result }));
}
}
public fetchVSTSProfile() {
// if the first get fails, it's likely because the user doesn't have an account in vsts yet
// the fix for this is to do an empty post call on the same url and then get it
return this._cacheService
.get(AzureDevOpsService.AzDevProfileUri, true, this._azureDevOpsService.getAzDevDirectHeaders(false))
.catch(() => {
return this._cacheService
.post(AzureDevOpsService.AzDevProfileUri, true, this._azureDevOpsService.getAzDevDirectHeaders(false))
.switchMap(() => {
return this._cacheService.get(AzureDevOpsService.AzDevProfileUri, true, this._azureDevOpsService.getAzDevDirectHeaders(false));
});
});
}
private _setStackAndVersion(
siteConfig: SiteConfig,
siteAppSettings: { [key: string]: string },
configMetadata: { [key: string]: string }
) {
if (this.isLinuxApp) {
this._setStackAndVersionForLinux(siteConfig);
} else {
this._setStackAndVersionForWindows(siteConfig, siteAppSettings, configMetadata);
}
}
private _setStackAndVersionForWindows(
siteConfig: SiteConfig,
siteAppSettings: { [key: string]: string },
configMetadata: { [key: string]: string }
) {
if (configMetadata['CURRENT_STACK']) {
const metadataStack = configMetadata['CURRENT_STACK'].toLowerCase();
// NOTE(michinoy): Java is special, so need to handle it carefully. Also in this case, use
// the string 'java' rather than any of the constants defined as it is not related to any of the
// defined constants.
if (metadataStack === 'java') {
this.stack = siteConfig.javaVersion === JavaVersions.WindowsVersion8 ? RuntimeStacks.java8 : RuntimeStacks.java11;
} else if (metadataStack === 'dotnet') {
this.stack = RuntimeStacks.aspnet;
} else {
this.stack = metadataStack;
}
}
if (this.stack === RuntimeStacks.node) {
this.stackVersion = siteAppSettings[Constants.nodeVersionAppSettingName];
} else if (this.stack === RuntimeStacks.python) {
this.stackVersion = siteConfig.pythonVersion;
} else if (this.stack === RuntimeStacks.java8 || this.stack === RuntimeStacks.java11) {
this.stackVersion = `${siteConfig.javaVersion}|${siteConfig.javaContainer}|${siteConfig.javaContainerVersion}`;
} else if (this.stack === RuntimeStacks.aspnet && !!siteConfig.netFrameworkVersion) {
this.stackVersion == siteConfig.netFrameworkVersion;
} else if (this.stack === '') {
this.stackVersion = '';
}
}
private _setStackAndVersionForLinux(siteConfig: SiteConfig) {
const linuxFxVersionParts = siteConfig.linuxFxVersion ? siteConfig.linuxFxVersion.split('|') : [];
const runtimeStack = linuxFxVersionParts.length > 0 ? linuxFxVersionParts[0].toLocaleLowerCase() : '';
// NOTE(michinoy): Java is special, so need to handle it carefully.
if (runtimeStack === JavaContainers.JavaSE || runtimeStack === JavaContainers.Tomcat) {
const fxVersionParts = !!siteConfig.linuxFxVersion ? siteConfig.linuxFxVersion.split('-') : [];
const fxStack = fxVersionParts.length === 2 ? fxVersionParts[1].toLocaleLowerCase() : '';
if (fxStack === JavaVersions.LinuxVersion8 || fxStack === JavaVersions.LinuxVersion11) {
this.stack = fxStack === JavaVersions.LinuxVersion8 ? RuntimeStacks.java8 : RuntimeStacks.java11;
} else {
this.stack = '';
}
} else {
// NOTE(michinoy): So it seems that in the stack API the stack value is 'aspnet', whereas from site config, the stack identifier is
// 'dotnetcore'. Due to this mismatch, we need to hard code the conversion on the client side.
this.stack = siteConfig.linuxFxVersion.toLocaleLowerCase() === 'dotnetcore|5.0' ? RuntimeStacks.aspnet : runtimeStack;
}
this.stackVersion = !!siteConfig.linuxFxVersion ? siteConfig.linuxFxVersion : '';
}
private _deployGithubActions() {
const repo = this.wizardValues.sourceSettings.repoUrl.replace(`${DeploymentCenterConstants.githubUri}/`, '');
const branch = this.wizardValues.sourceSettings.branch || 'master';
const workflowInformation = this._githubService.getWorkflowInformation(
this.wizardValues.buildSettings,
this.wizardValues.sourceSettings,
this.isLinuxApp,
this.gitHubPublishProfileSecretGuid,
this.siteName,
this.slotName
);
const commitInfo: GitHubCommit = {
repoName: repo,
branchName: branch,
filePath: `.github/workflows/${workflowInformation.fileName}`,
message: this._translateService.instant(PortalResources.githubActionWorkflowCommitMessage),
contentBase64Encoded: btoa(workflowInformation.content),
committer: {
name: 'Azure App Service',
email: '[email protected]',
},
};
return this._githubService
.fetchWorkflowConfiguration(this.gitHubToken$.getValue(), this.wizardValues.sourceSettings.repoUrl, repo, branch, commitInfo.filePath)
.switchMap(fileContentResponse => {
if (fileContentResponse) {
commitInfo.sha = fileContentResponse.sha;
}
const requestContent: GitHubActionWorkflowRequestContent = {
resourceId: this._resourceId,
secretName: workflowInformation.secretName,
commit: commitInfo,
};
return this._githubService.createOrUpdateActionWorkflow(
this.getToken(),
this.gitHubToken$.getValue(),
requestContent,
this.replacementPublishUrl
);
})
.switchMap(_ => {
return this._deployKudu();
});
}
private _deployKudu() {
const payload = this.wizardValues.sourceSettings;
payload.isGitHubAction = this.wizardValues.buildProvider === 'github';
payload.isManualIntegration = this.wizardValues.sourceProvider === 'external';
if (this.wizardValues.sourceProvider === 'localgit') {
return this._cacheService
.patchArm(`${this._resourceId}/config/web`, ARMApiVersions.antaresApiVersion20181101, {
properties: {
scmType: 'LocalGit',
},
})
.map(r => r.json());
} else {
return this._cacheService
.putArm(`${this._resourceId}/sourcecontrols/web`, ARMApiVersions.antaresApiVersion20181101, {
properties: payload,
})
.map(r => r.json())
.catch((err, _) => {
if (payload.isGitHubAction && this._isApiSyncError(err.json())) {
// NOTE(michinoy): If the save operation was being done for GitHub Action, and
// we are experiencing the API sync error, populate the source controls properties
// manually.
this._logService.error(LogCategories.cicd, 'apiSyncErrorWorkaround', { resourceId: this._resourceId });
return this._updateGitHubActionSourceControlPropertiesManually(payload);
} else {
return Observable.throw(err);
}
});
}
}
private _updateGitHubActionSourceControlPropertiesManually(sourceSettingsPayload: SourceSettings) {
return this._fetchMetadata()
.switchMap(r => {
if (r && r.result && r.result.properties) {
return this._updateMetadata(r.result.properties, sourceSettingsPayload);
} else {
return Observable.throw(r); | }
})
.switchMap(r => {
if (r && r.status === 200) {
return this._patchSiteConfigForGitHubAction();
} else {
return Observable.throw(r);
}
})
.catch(r => Observable.throw(r))
.map(r => r.json());
}
private _updateMetadata(properties: { [key: string]: string }, sourceSettingsPayload: SourceSettings) {
delete properties['RepoUrl'];
delete properties['ScmUri'];
delete properties['CloneUri'];
delete properties['branch'];
properties['RepoUrl'] = sourceSettingsPayload.repoUrl;
properties['branch'] = sourceSettingsPayload.branch;
return this._cacheService
.putArm(`${this._resourceId}/config/metadata`, ARMApiVersions.antaresApiVersion20181101, { properties })
.catch(err => {
this._logService.error(LogCategories.cicd, 'apiSyncErrorWorkaround-update-metadata-failure', {
resourceId: this._resourceId,
error: err,
});
return Observable.throw(err);
});
}
private _fetchMetadata() {
return this._siteService.fetchSiteConfigMetadata(this._resourceId).catch(err => {
this._logService.error(LogCategories.cicd, 'apiSyncErrorWorkaround-fetch-metadata-failure', {
resourceId: this._resourceId,
error: err,
});
return Observable.throw(err);
});
}
private _patchSiteConfigForGitHubAction() {
return this._cacheService
.patchArm(`${this._resourceId}/config/web`, ARMApiVersions.antaresApiVersion20181101, {
properties: {
scmType: 'GitHubAction',
},
})
.catch(err => {
this._logService.error(LogCategories.cicd, 'apiSyncErrorWorkaround-sitConfig-patch-failure', {
resourceId: this._resourceId,
error: err,
});
return Observable.throw(err);
});
}
// Detect the specific error which is indicative of Ant89 Geo/Stamp sync issues.
private _isApiSyncError(error: any): boolean {
return (
error.Message &&
error.Message.indexOf &&
error.Message.indexOf('500 (InternalServerError)') > -1 &&
error.Message.indexOf('GeoRegionServiceClient') > -1
);
}
private _getScmUri(publishingCredentialsResponse: HttpResult<ArmObj<PublishingCredentials>>): string {
if (
publishingCredentialsResponse.isSuccessful &&
publishingCredentialsResponse.result &&
publishingCredentialsResponse.result.properties.scmUri
) {
const scmUriParts = publishingCredentialsResponse.result.properties.scmUri.split('@');
if (scmUriParts.length > 1) {
return scmUriParts[1];
}
}
return null;
}
public getToken(): string {
return `Bearer ${this._token}`;
}
ngOnDestroy(): void {
this._ngUnsubscribe$.next();
}
resetSection(formGroup: FormGroup) {
formGroup.reset();
}
markSectionAsTouched(formGroup: FormGroup) {
Object.keys(formGroup.controls).forEach(field => {
const control = formGroup.get(field);
if (control instanceof FormControl && !control.touched && !control.dirty) {
control.markAsTouched();
control.updateValueAndValidity({ onlySelf: true, emitEvent: false });
} else if (control instanceof FormGroup) {
this.markSectionAsTouched(control);
}
});
}
} | random_line_split |
|
deployment-center-state-manager.ts | import { ReplaySubject } from 'rxjs/ReplaySubject';
import { FormGroup, FormControl } from '@angular/forms';
import { WizardForm, SourceSettings } from './deployment-center-setup-models';
import { Observable } from 'rxjs/Observable';
import { CacheService } from '../../../../shared/services/cache.service';
import { ArmSiteDescriptor } from '../../../../shared/resourceDescriptors';
import { Injectable, OnDestroy } from '@angular/core';
import { Subject } from 'rxjs/Subject';
import { UserService } from '../../../../shared/services/user.service';
import {
ARMApiVersions,
ScenarioIds,
Kinds,
RuntimeStacks,
Constants,
JavaVersions,
JavaContainers,
DeploymentCenterConstants,
LogCategories,
} from '../../../../shared/models/constants';
import { TranslateService } from '@ngx-translate/core';
import { PortalResources } from '../../../../shared/models/portal-resources';
import { ArmObj } from '../../../../shared/models/arm/arm-obj';
import { Site } from '../../../../shared/models/arm/site';
import { SiteService } from '../../../../shared/services/site.service';
import { forkJoin } from 'rxjs/observable/forkJoin';
import { ScenarioService } from '../../../../shared/services/scenario/scenario.service';
import { VSOAccount } from '../../Models/vso-repo';
import { AzureDevOpsService } from './azure-devops.service';
import { GithubService } from './github.service';
import { GitHubActionWorkflowRequestContent, GitHubCommit } from '../../Models/github';
import { Guid } from 'app/shared/Utilities/Guid';
import { SubscriptionService } from 'app/shared/services/subscription.service';
import { SiteConfig } from 'app/shared/models/arm/site-config';
import { WorkflowOptions } from '../../Models/deployment-enums';
import { BehaviorSubject } from 'rxjs';
import { LogService } from '../../../../shared/services/log.service';
import { PublishingCredentials } from '../../../../shared/models/publishing-credentials';
import { HttpResult } from '../../../../shared/models/http-result';
@Injectable()
export class DeploymentCenterStateManager implements OnDestroy {
public resourceIdStream$ = new ReplaySubject<string>(1);
public wizardForm: FormGroup = new FormGroup({});
private _resourceId = '';
private _ngUnsubscribe$ = new Subject();
private _token: string;
public siteArm: ArmObj<Site>;
public siteArmObj$ = new ReplaySubject<ArmObj<Site>>();
public updateSourceProviderConfig$ = new Subject();
public selectedVstsRepoId = '';
public subscriptionName = '';
public canCreateNewSite = true;
public hideBuild = false;
public hideVstsBuildConfigure = false;
public isLinuxApp = false;
public isFunctionApp = false;
public vstsKuduOnly = false;
public vsoAccounts: VSOAccount[] = [];
public hideConfigureStepContinueButton = false;
public siteName = '';
public slotName = '';
public gitHubPublishProfileSecretGuid = '';
public isGithubActionWorkflowScopeAvailable = false;
public stack = '';
public stackVersion = '';
public gitHubTokenUpdated$ = new ReplaySubject<boolean>();
public oneDriveToken$ = new BehaviorSubject<string>('');
public dropBoxToken$ = new BehaviorSubject<string>('');
public bitBucketToken$ = new BehaviorSubject<string>('');
public gitHubToken$ = new BehaviorSubject<string>('');
public replacementPublishUrl = '';
constructor(
private _cacheService: CacheService,
private _azureDevOpsService: AzureDevOpsService,
private _translateService: TranslateService,
private _scenarioService: ScenarioService,
private _githubService: GithubService,
private _logService: LogService,
private _siteService: SiteService,
userService: UserService,
subscriptionService: SubscriptionService
) {
this.resourceIdStream$
.switchMap(r => {
this._resourceId = r;
const siteDescriptor = new ArmSiteDescriptor(this._resourceId);
this.siteName = siteDescriptor.site;
this.slotName = siteDescriptor.slot;
// TODO (michinoy): Figure out a way to only generate this guid IF github actions build provider
// is selected. This might require refactoring a ton of stuff in step-complete component to understand
// what build provider is selected.
this.gitHubPublishProfileSecretGuid = Guid.newGuid()
.toLowerCase()
.replace(/[-]/g, '');
return forkJoin(
this._siteService.getSite(this._resourceId),
this._siteService.getSiteConfig(this._resourceId),
this._siteService.getAppSettings(this._resourceId),
this._siteService.fetchSiteConfigMetadata(this._resourceId),
this._siteService.getPublishingCredentials(this._resourceId),
subscriptionService.getSubscription(siteDescriptor.subscription)
);
})
.switchMap(result => {
const [site, config, appSettings, configMetadata, publishingCredentials, sub] = result;
this.siteArm = site.result;
this.isLinuxApp = this.siteArm.kind.toLowerCase().includes(Kinds.linux);
this.isFunctionApp = this.siteArm.kind.toLowerCase().includes(Kinds.functionApp);
this.subscriptionName = sub.result.displayName;
// NOTE(michinoy): temporary fix, while the backend reinstates the scm url in the publish url property.
this.replacementPublishUrl = this.isLinuxApp ? this._getScmUri(publishingCredentials) : null;
if (config.isSuccessful && appSettings.isSuccessful && configMetadata.isSuccessful) {
this._setStackAndVersion(config.result.properties, appSettings.result.properties, configMetadata.result.properties);
}
this.siteArmObj$.next(this.siteArm);
return this._scenarioService.checkScenarioAsync(ScenarioIds.vstsDeploymentHide, { site: this.siteArm });
})
.subscribe(vstsScenarioCheck => {
this.hideBuild = vstsScenarioCheck.status === 'disabled';
});
userService
.getStartupInfo()
.takeUntil(this._ngUnsubscribe$)
.subscribe(r => {
this._token = r.token;
});
}
public get wizardValues(): WizardForm {
return this.wizardForm.value;
}
public set wizardValues(values: WizardForm) {
this.wizardForm.patchValue(values);
}
public get sourceSettings(): FormGroup {
return (this.wizardForm && (this.wizardForm.controls.sourceSettings as FormGroup)) || null;
}
public get buildSettings(): FormGroup {
return (this.wizardForm && (this.wizardForm.controls.buildSettings as FormGroup)) || null;
}
public deploy(): Observable<{ status: string; statusMessage: string; result: any }> {
switch (this.wizardValues.buildProvider) {
case 'github':
// NOTE(michinoy): Only initiate writing a workflow configuration file if the branch does not already have it OR
// the user opted to overwrite it.
if (
!this.wizardValues.sourceSettings.githubActionWorkflowOption ||
this.wizardValues.sourceSettings.githubActionWorkflowOption === WorkflowOptions.Overwrite
) {
return this._deployGithubActions().map(result => ({ status: 'succeeded', statusMessage: null, result }));
} else {
return this._deployKudu().map(result => ({ status: 'succeeded', statusMessage: null, result }));
}
default:
return this._deployKudu().map(result => ({ status: 'succeeded', statusMessage: null, result }));
}
}
public fetchVSTSProfile() {
// if the first get fails, it's likely because the user doesn't have an account in vsts yet
// the fix for this is to do an empty post call on the same url and then get it
return this._cacheService
.get(AzureDevOpsService.AzDevProfileUri, true, this._azureDevOpsService.getAzDevDirectHeaders(false))
.catch(() => {
return this._cacheService
.post(AzureDevOpsService.AzDevProfileUri, true, this._azureDevOpsService.getAzDevDirectHeaders(false))
.switchMap(() => {
return this._cacheService.get(AzureDevOpsService.AzDevProfileUri, true, this._azureDevOpsService.getAzDevDirectHeaders(false));
});
});
}
private _setStackAndVersion(
siteConfig: SiteConfig,
siteAppSettings: { [key: string]: string },
configMetadata: { [key: string]: string }
) {
if (this.isLinuxApp) {
this._setStackAndVersionForLinux(siteConfig);
} else {
this._setStackAndVersionForWindows(siteConfig, siteAppSettings, configMetadata);
}
}
private _setStackAndVersionForWindows(
siteConfig: SiteConfig,
siteAppSettings: { [key: string]: string },
configMetadata: { [key: string]: string }
) {
if (configMetadata['CURRENT_STACK']) {
const metadataStack = configMetadata['CURRENT_STACK'].toLowerCase();
// NOTE(michinoy): Java is special, so need to handle it carefully. Also in this case, use
// the string 'java' rather than any of the constants defined as it is not related to any of the
// defined constants.
if (metadataStack === 'java') {
this.stack = siteConfig.javaVersion === JavaVersions.WindowsVersion8 ? RuntimeStacks.java8 : RuntimeStacks.java11;
} else if (metadataStack === 'dotnet') {
this.stack = RuntimeStacks.aspnet;
} else {
this.stack = metadataStack;
}
}
if (this.stack === RuntimeStacks.node) {
this.stackVersion = siteAppSettings[Constants.nodeVersionAppSettingName];
} else if (this.stack === RuntimeStacks.python) {
this.stackVersion = siteConfig.pythonVersion;
} else if (this.stack === RuntimeStacks.java8 || this.stack === RuntimeStacks.java11) {
this.stackVersion = `${siteConfig.javaVersion}|${siteConfig.javaContainer}|${siteConfig.javaContainerVersion}`;
} else if (this.stack === RuntimeStacks.aspnet && !!siteConfig.netFrameworkVersion) {
this.stackVersion == siteConfig.netFrameworkVersion;
} else if (this.stack === '') {
this.stackVersion = '';
}
}
private _setStackAndVersionForLinux(siteConfig: SiteConfig) {
const linuxFxVersionParts = siteConfig.linuxFxVersion ? siteConfig.linuxFxVersion.split('|') : [];
const runtimeStack = linuxFxVersionParts.length > 0 ? linuxFxVersionParts[0].toLocaleLowerCase() : '';
// NOTE(michinoy): Java is special, so need to handle it carefully.
if (runtimeStack === JavaContainers.JavaSE || runtimeStack === JavaContainers.Tomcat) {
const fxVersionParts = !!siteConfig.linuxFxVersion ? siteConfig.linuxFxVersion.split('-') : [];
const fxStack = fxVersionParts.length === 2 ? fxVersionParts[1].toLocaleLowerCase() : '';
if (fxStack === JavaVersions.LinuxVersion8 || fxStack === JavaVersions.LinuxVersion11) {
this.stack = fxStack === JavaVersions.LinuxVersion8 ? RuntimeStacks.java8 : RuntimeStacks.java11;
} else {
this.stack = '';
}
} else {
// NOTE(michinoy): So it seems that in the stack API the stack value is 'aspnet', whereas from site config, the stack identifier is
// 'dotnetcore'. Due to this mismatch, we need to hard code the conversion on the client side.
this.stack = siteConfig.linuxFxVersion.toLocaleLowerCase() === 'dotnetcore|5.0' ? RuntimeStacks.aspnet : runtimeStack;
}
this.stackVersion = !!siteConfig.linuxFxVersion ? siteConfig.linuxFxVersion : '';
}
private _deployGithubActions() {
const repo = this.wizardValues.sourceSettings.repoUrl.replace(`${DeploymentCenterConstants.githubUri}/`, '');
const branch = this.wizardValues.sourceSettings.branch || 'master';
const workflowInformation = this._githubService.getWorkflowInformation(
this.wizardValues.buildSettings,
this.wizardValues.sourceSettings,
this.isLinuxApp,
this.gitHubPublishProfileSecretGuid,
this.siteName,
this.slotName
);
const commitInfo: GitHubCommit = {
repoName: repo,
branchName: branch,
filePath: `.github/workflows/${workflowInformation.fileName}`,
message: this._translateService.instant(PortalResources.githubActionWorkflowCommitMessage),
contentBase64Encoded: btoa(workflowInformation.content),
committer: {
name: 'Azure App Service',
email: '[email protected]',
},
};
return this._githubService
.fetchWorkflowConfiguration(this.gitHubToken$.getValue(), this.wizardValues.sourceSettings.repoUrl, repo, branch, commitInfo.filePath)
.switchMap(fileContentResponse => {
if (fileContentResponse) {
commitInfo.sha = fileContentResponse.sha;
}
const requestContent: GitHubActionWorkflowRequestContent = {
resourceId: this._resourceId,
secretName: workflowInformation.secretName,
commit: commitInfo,
};
return this._githubService.createOrUpdateActionWorkflow(
this.getToken(),
this.gitHubToken$.getValue(),
requestContent,
this.replacementPublishUrl
);
})
.switchMap(_ => {
return this._deployKudu();
});
}
private _deployKudu() {
const payload = this.wizardValues.sourceSettings;
payload.isGitHubAction = this.wizardValues.buildProvider === 'github';
payload.isManualIntegration = this.wizardValues.sourceProvider === 'external';
if (this.wizardValues.sourceProvider === 'localgit') {
return this._cacheService
.patchArm(`${this._resourceId}/config/web`, ARMApiVersions.antaresApiVersion20181101, {
properties: {
scmType: 'LocalGit',
},
})
.map(r => r.json());
} else {
return this._cacheService
.putArm(`${this._resourceId}/sourcecontrols/web`, ARMApiVersions.antaresApiVersion20181101, {
properties: payload,
})
.map(r => r.json())
.catch((err, _) => {
if (payload.isGitHubAction && this._isApiSyncError(err.json())) {
// NOTE(michinoy): If the save operation was being done for GitHub Action, and
// we are experiencing the API sync error, populate the source controls properties
// manually.
this._logService.error(LogCategories.cicd, 'apiSyncErrorWorkaround', { resourceId: this._resourceId });
return this._updateGitHubActionSourceControlPropertiesManually(payload);
} else {
return Observable.throw(err);
}
});
}
}
private _updateGitHubActionSourceControlPropertiesManually(sourceSettingsPayload: SourceSettings) {
return this._fetchMetadata()
.switchMap(r => {
if (r && r.result && r.result.properties) {
return this._updateMetadata(r.result.properties, sourceSettingsPayload);
} else {
return Observable.throw(r);
}
})
.switchMap(r => {
if (r && r.status === 200) {
return this._patchSiteConfigForGitHubAction();
} else {
return Observable.throw(r);
}
})
.catch(r => Observable.throw(r))
.map(r => r.json());
}
private | (properties: { [key: string]: string }, sourceSettingsPayload: SourceSettings) {
delete properties['RepoUrl'];
delete properties['ScmUri'];
delete properties['CloneUri'];
delete properties['branch'];
properties['RepoUrl'] = sourceSettingsPayload.repoUrl;
properties['branch'] = sourceSettingsPayload.branch;
return this._cacheService
.putArm(`${this._resourceId}/config/metadata`, ARMApiVersions.antaresApiVersion20181101, { properties })
.catch(err => {
this._logService.error(LogCategories.cicd, 'apiSyncErrorWorkaround-update-metadata-failure', {
resourceId: this._resourceId,
error: err,
});
return Observable.throw(err);
});
}
private _fetchMetadata() {
return this._siteService.fetchSiteConfigMetadata(this._resourceId).catch(err => {
this._logService.error(LogCategories.cicd, 'apiSyncErrorWorkaround-fetch-metadata-failure', {
resourceId: this._resourceId,
error: err,
});
return Observable.throw(err);
});
}
private _patchSiteConfigForGitHubAction() {
return this._cacheService
.patchArm(`${this._resourceId}/config/web`, ARMApiVersions.antaresApiVersion20181101, {
properties: {
scmType: 'GitHubAction',
},
})
.catch(err => {
this._logService.error(LogCategories.cicd, 'apiSyncErrorWorkaround-sitConfig-patch-failure', {
resourceId: this._resourceId,
error: err,
});
return Observable.throw(err);
});
}
// Detect the specific error which is indicative of Ant89 Geo/Stamp sync issues.
private _isApiSyncError(error: any): boolean {
return (
error.Message &&
error.Message.indexOf &&
error.Message.indexOf('500 (InternalServerError)') > -1 &&
error.Message.indexOf('GeoRegionServiceClient') > -1
);
}
private _getScmUri(publishingCredentialsResponse: HttpResult<ArmObj<PublishingCredentials>>): string {
if (
publishingCredentialsResponse.isSuccessful &&
publishingCredentialsResponse.result &&
publishingCredentialsResponse.result.properties.scmUri
) {
const scmUriParts = publishingCredentialsResponse.result.properties.scmUri.split('@');
if (scmUriParts.length > 1) {
return scmUriParts[1];
}
}
return null;
}
public getToken(): string {
return `Bearer ${this._token}`;
}
ngOnDestroy(): void {
this._ngUnsubscribe$.next();
}
resetSection(formGroup: FormGroup) {
formGroup.reset();
}
markSectionAsTouched(formGroup: FormGroup) {
Object.keys(formGroup.controls).forEach(field => {
const control = formGroup.get(field);
if (control instanceof FormControl && !control.touched && !control.dirty) {
control.markAsTouched();
control.updateValueAndValidity({ onlySelf: true, emitEvent: false });
} else if (control instanceof FormGroup) {
this.markSectionAsTouched(control);
}
});
}
}
| _updateMetadata | identifier_name |
actix.rs | // Copyright 2020 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Actix-web API backend.
//!
//! [Actix-web](https://github.com/actix/actix-web) is an asynchronous backend
//! for HTTP API, based on the [Actix](https://github.com/actix/actix) framework.
pub use actix_web::middleware::cors::Cors;
use actix::{Actor, System};
use actix_web::{
error::ResponseError, http::header, AsyncResponder, FromRequest, HttpMessage, HttpResponse,
Query,
};
use failure::{bail, ensure, format_err, Error};
use futures::{future::Either, sync::mpsc, Future, IntoFuture, Stream};
use log::trace;
use serde::{
de::{self, DeserializeOwned},
ser, Serialize,
};
use std::{
fmt,
net::SocketAddr,
result,
str::FromStr,
sync::Arc,
thread::{self, JoinHandle},
};
use crate::api::{
self,
manager::{ApiManager, UpdateEndpoints},
Actuality, ApiAccess, ApiAggregator, ApiBackend, ApiScope, EndpointMutability,
ExtendApiBackend, FutureResult, NamedWith,
};
/// Type alias for the concrete `actix-web` HTTP response.
pub type FutureResponse = actix_web::FutureResponse<HttpResponse, actix_web::Error>;
/// Type alias for the concrete `actix-web` HTTP request.
pub type HttpRequest = actix_web::HttpRequest<()>;
/// Type alias for the inner `actix-web` HTTP requests handler.
pub type RawHandler = dyn Fn(HttpRequest) -> FutureResponse + 'static + Send + Sync;
/// Type alias for the `actix-web::App`.
pub type App = actix_web::App<()>;
/// Type alias for the `actix-web::App` configuration.
pub type AppConfig = Arc<dyn Fn(App) -> App + 'static + Send + Sync>;
/// Raw `actix-web` backend requests handler.
#[derive(Clone)]
pub struct RequestHandler {
/// Endpoint name.
pub name: String,
/// Endpoint HTTP method.
pub method: actix_web::http::Method,
/// Inner handler.
pub inner: Arc<RawHandler>,
}
impl fmt::Debug for RequestHandler {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("RequestHandler")
.field("name", &self.name)
.field("method", &self.method)
.finish()
}
}
/// API builder for the `actix-web` backend.
#[derive(Debug, Clone, Default)]
pub struct ApiBuilder {
handlers: Vec<RequestHandler>,
}
impl ApiBuilder {
/// Constructs a new backend builder instance.
pub fn new() -> Self {
Self::default()
}
}
impl ApiBackend for ApiBuilder {
type Handler = RequestHandler;
type Backend = actix_web::Scope<()>;
fn raw_handler(&mut self, handler: Self::Handler) -> &mut Self {
self.handlers.push(handler);
self
}
fn wire(&self, mut output: Self::Backend) -> Self::Backend {
for handler in self.handlers.clone() {
let inner = handler.inner;
output = output.route(&handler.name, handler.method.clone(), move |request| {
inner(request)
});
}
output
}
}
impl ExtendApiBackend for actix_web::Scope<()> {
fn extend<'a, I>(mut self, items: I) -> Self
where
I: IntoIterator<Item = (&'a str, &'a ApiScope)>,
{
for item in items {
self = self.nested(&item.0, move |scope| item.1.actix_backend.wire(scope))
}
self
}
}
impl ResponseError for api::Error {
fn error_response(&self) -> HttpResponse {
match self {
api::Error::BadRequest(err) => HttpResponse::BadRequest().body(err.to_string()),
api::Error::InternalError(err) => {
HttpResponse::InternalServerError().body(err.to_string())
}
api::Error::Io(err) => HttpResponse::InternalServerError().body(err.to_string()),
api::Error::Storage(err) => HttpResponse::InternalServerError().body(err.to_string()),
api::Error::Gone => HttpResponse::Gone().finish(),
api::Error::MovedPermanently(new_location) => HttpResponse::MovedPermanently()
.header(header::LOCATION, new_location.clone())
.finish(),
api::Error::NotFound(err) => HttpResponse::NotFound().body(err.to_string()),
api::Error::Unauthorized => HttpResponse::Unauthorized().finish(),
}
}
}
/// Creates a `HttpResponse` object from the provided JSON value.
/// Depending on the `actuality` parameter value, the warning about endpoint
/// being deprecated can be added.
fn json_response<T: Serialize>(actuality: Actuality, json_value: T) -> HttpResponse {
let mut response = HttpResponse::Ok();
if let Actuality::Deprecated {
ref discontinued_on,
ref description,
} = actuality
{
// There is a proposal for creating special deprecation header within HTTP,
// but currently it's only a draft. So the conventional way to notify API user
// about endpoint deprecation is setting the `Warning` header.
let expiration_note = match discontinued_on {
// Date is formatted according to HTTP-date format.
Some(date) => format!(
"The old API is maintained until {}.",
date.format("%a, %d %b %Y %T GMT")
),
None => "Currently there is no specific date for disabling this endpoint.".into(),
};
let mut warning_text = format!(
"Deprecated API: This endpoint is deprecated, \
see the service documentation to find an alternative. \
{}",
expiration_note
);
if let Some(description) = description {
warning_text = format!("{} Additional information: {}.", warning_text, description);
}
let warning_string = create_warning_header(&warning_text);
response.header(header::WARNING, warning_string);
}
response.json(json_value)
}
/// Formats warning string according to the following format:
/// "<warn-code> <warn-agent> \"<warn-text>\" [<warn-date>]"
/// <warn-code> in our case is 299, which means a miscellaneous persistent warning.
/// <warn-agent> is optional, so we set it to "-".
/// <warn-text> is a warning description, which is taken as an only argument.
/// <warn-date> is not required.
/// For details you can see RFC 7234, section 5.5: Warning.
fn create_warning_header(warning_text: &str) -> String {
format!("299 - \"{}\"", warning_text)
}
impl From<EndpointMutability> for actix_web::http::Method {
fn from(mutability: EndpointMutability) -> Self {
match mutability {
EndpointMutability::Immutable => actix_web::http::Method::GET,
EndpointMutability::Mutable => actix_web::http::Method::POST,
}
}
}
impl<Q, I, F> From<NamedWith<Q, I, api::Result<I>, F>> for RequestHandler
where
F: Fn(Q) -> api::Result<I> + 'static + Send + Sync + Clone,
Q: DeserializeOwned + 'static,
I: Serialize + 'static,
{
fn from(f: NamedWith<Q, I, api::Result<I>, F>) -> Self {
// Convert handler that returns a `Result` into handler that will return `FutureResult`.
let handler = f.inner.handler;
let future_endpoint = move |query| -> Box<dyn Future<Item = I, Error = api::Error>> {
let future = handler(query).into_future();
Box::new(future)
};
let named_with_future = NamedWith::new(f.name, future_endpoint, f.mutability);
// Then we can create a `RequestHandler` with the `From` specialization for future result.
RequestHandler::from(named_with_future)
}
}
/// Takes `HttpRequest` as a parameter and extracts query:
/// - If request is immutable, the query is parsed from query string,
/// - If request is mutable, the query is parsed from the request body as JSON.
fn extract_query<Q>(
request: HttpRequest,
mutability: EndpointMutability,
) -> impl Future<Item = Q, Error = actix_web::error::Error>
where
Q: DeserializeOwned + 'static,
{
match mutability {
EndpointMutability::Immutable => {
let future = Query::from_request(&request, &Default::default())
.map(Query::into_inner)
.map_err(From::from)
.into_future();
Either::A(future)
}
EndpointMutability::Mutable => {
let future = request.json().from_err();
Either::B(future)
}
}
}
impl<Q, I, F> From<NamedWith<Q, I, FutureResult<I>, F>> for RequestHandler
where
F: Fn(Q) -> FutureResult<I> + 'static + Clone + Send + Sync,
Q: DeserializeOwned + 'static,
I: Serialize + 'static,
{
fn from(f: NamedWith<Q, I, FutureResult<I>, F>) -> Self |
}
/// Creates `actix_web::App` for the given aggregator and runtime configuration.
pub(crate) fn create_app(aggregator: &ApiAggregator, runtime_config: ApiRuntimeConfig) -> App {
let app_config = runtime_config.app_config;
let access = runtime_config.access;
let mut app = App::new();
app = app.scope("api", |scope| aggregator.extend_backend(access, scope));
if let Some(app_config) = app_config {
app = app_config(app);
}
app
}
/// Configuration parameters for the `App` runtime.
#[derive(Clone)]
pub struct ApiRuntimeConfig {
/// The socket address to bind.
pub listen_address: SocketAddr,
/// API access level.
pub access: ApiAccess,
/// Optional App configuration.
pub app_config: Option<AppConfig>,
}
impl ApiRuntimeConfig {
/// Creates API runtime configuration for the given address and access level.
pub fn new(listen_address: SocketAddr, access: ApiAccess) -> Self {
Self {
listen_address,
access,
app_config: Default::default(),
}
}
}
impl fmt::Debug for ApiRuntimeConfig {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ApiRuntimeConfig")
.field("listen_address", &self.listen_address)
.field("access", &self.access)
.field("app_config", &self.app_config.as_ref().map(drop))
.finish()
}
}
/// Configuration parameters for the actix system runtime.
#[derive(Debug, Clone)]
pub struct SystemRuntimeConfig {
/// Active API runtimes.
pub api_runtimes: Vec<ApiRuntimeConfig>,
/// API aggregator.
pub api_aggregator: ApiAggregator,
/// The interval in milliseconds between attempts of restarting HTTP-server in case
/// the server failed to restart
pub server_restart_retry_timeout: u64,
/// The attempts counts of restarting HTTP-server in case the server failed to restart
pub server_restart_max_retries: u16,
}
/// Actix system runtime handle.
pub struct SystemRuntime {
system_thread: JoinHandle<result::Result<(), Error>>,
system: System,
}
impl SystemRuntimeConfig {
/// Starts actix system runtime along with all web runtimes.
pub fn start(
self,
endpoints_rx: mpsc::Receiver<UpdateEndpoints>,
) -> result::Result<SystemRuntime, Error> {
// Creates a system thread.
let (system_tx, system_rx) = mpsc::unbounded();
let system_thread = thread::spawn(move || -> result::Result<(), Error> {
let system = System::new("http-server");
system_tx.unbounded_send(System::current())?;
ApiManager::new(self, endpoints_rx).start();
// Starts actix-web runtime.
let code = system.run();
trace!("Actix runtime finished with code {}", code);
ensure!(
code == 0,
"Actix runtime finished with the non zero error code: {}",
code
);
Ok(())
});
// Receives addresses of runtime items.
let system = system_rx
.wait()
.next()
.ok_or_else(|| format_err!("Unable to receive actix system handle"))?
.map_err(|()| format_err!("Unable to receive actix system handle"))?;
Ok(SystemRuntime {
system_thread,
system,
})
}
}
impl SystemRuntime {
/// Stops the actix system runtime along with all web runtimes.
pub fn stop(self) -> result::Result<(), Error> {
// Stop actix system runtime.
self.system.stop();
self.system_thread.join().map_err(|e| {
format_err!(
"Unable to join actix web api thread, an error occurred: {:?}",
e
)
})?
}
}
impl fmt::Debug for SystemRuntime {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SystemRuntime").finish()
}
}
/// CORS header specification.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum AllowOrigin {
/// Allows access from any host.
Any,
/// Allows access only from the specified hosts.
Whitelist(Vec<String>),
}
impl ser::Serialize for AllowOrigin {
fn serialize<S>(&self, serializer: S) -> result::Result<S::Ok, S::Error>
where
S: ser::Serializer,
{
match *self {
AllowOrigin::Any => "*".serialize(serializer),
AllowOrigin::Whitelist(ref hosts) => {
if hosts.len() == 1 {
hosts[0].serialize(serializer)
} else {
hosts.serialize(serializer)
}
}
}
}
}
impl<'de> de::Deserialize<'de> for AllowOrigin {
fn deserialize<D>(d: D) -> result::Result<Self, D::Error>
where
D: de::Deserializer<'de>,
{
struct Visitor;
impl<'de> de::Visitor<'de> for Visitor {
type Value = AllowOrigin;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a list of hosts or \"*\"")
}
fn visit_str<E>(self, value: &str) -> result::Result<AllowOrigin, E>
where
E: de::Error,
{
match value {
"*" => Ok(AllowOrigin::Any),
_ => Ok(AllowOrigin::Whitelist(vec![value.to_string()])),
}
}
fn visit_seq<A>(self, seq: A) -> result::Result<AllowOrigin, A::Error>
where
A: de::SeqAccess<'de>,
{
let hosts =
de::Deserialize::deserialize(de::value::SeqAccessDeserializer::new(seq))?;
Ok(AllowOrigin::Whitelist(hosts))
}
}
d.deserialize_any(Visitor)
}
}
impl FromStr for AllowOrigin {
type Err = Error;
fn from_str(s: &str) -> result::Result<Self, Self::Err> {
if s == "*" {
return Ok(AllowOrigin::Any);
}
let v: Vec<_> = s
.split(',')
.map(|s| s.trim().to_string())
.filter(|s| !s.is_empty())
.collect();
if v.is_empty() {
bail!("Invalid AllowOrigin::Whitelist value");
}
Ok(AllowOrigin::Whitelist(v))
}
}
impl<'a> From<&'a AllowOrigin> for Cors {
fn from(origin: &'a AllowOrigin) -> Self {
match *origin {
AllowOrigin::Any => Self::build().finish(),
AllowOrigin::Whitelist(ref hosts) => {
let mut builder = Self::build();
for host in hosts {
builder.allowed_origin(host);
}
builder.finish()
}
}
}
}
impl From<AllowOrigin> for Cors {
fn from(origin: AllowOrigin) -> Self {
Self::from(&origin)
}
}
#[cfg(test)]
mod tests {
use pretty_assertions::assert_eq;
use super::*;
#[test]
fn allow_origin_from_str() {
fn check(text: &str, expected: AllowOrigin) {
let from_str = AllowOrigin::from_str(text).unwrap();
assert_eq!(from_str, expected);
}
check(r#"*"#, AllowOrigin::Any);
check(
r#"http://example.com"#,
AllowOrigin::Whitelist(vec!["http://example.com".to_string()]),
);
check(
r#"http://a.org, http://b.org"#,
AllowOrigin::Whitelist(vec!["http://a.org".to_string(), "http://b.org".to_string()]),
);
check(
r#"http://a.org, http://b.org, "#,
AllowOrigin::Whitelist(vec!["http://a.org".to_string(), "http://b.org".to_string()]),
);
check(
r#"http://a.org,http://b.org"#,
AllowOrigin::Whitelist(vec!["http://a.org".to_string(), "http://b.org".to_string()]),
);
}
fn assert_responses_eq(left: HttpResponse, right: HttpResponse) {
assert_eq!(left.status(), right.status());
assert_eq!(left.headers(), right.headers());
assert_eq!(left.body(), right.body());
}
#[test]
fn test_create_warning_header() {
assert_eq!(
&create_warning_header("Description"),
"299 - \"Description\""
);
}
#[test]
fn json_responses() {
use chrono::TimeZone;
let actual_response = json_response(Actuality::Actual, 123);
assert_responses_eq(actual_response, HttpResponse::Ok().json(123));
let deprecated_response_no_deadline = json_response(
Actuality::Deprecated {
discontinued_on: None,
description: None,
},
123,
);
let expected_warning_text =
"Deprecated API: This endpoint is deprecated, \
see the service documentation to find an alternative. \
Currently there is no specific date for disabling this endpoint.";
let expected_warning = create_warning_header(expected_warning_text);
assert_responses_eq(
deprecated_response_no_deadline,
HttpResponse::Ok()
.header(header::WARNING, expected_warning)
.json(123),
);
let description = "Docs can be found on docs.rs".to_owned();
let deprecated_response_with_description = json_response(
Actuality::Deprecated {
discontinued_on: None,
description: Some(description),
},
123,
);
let expected_warning_text =
"Deprecated API: This endpoint is deprecated, \
see the service documentation to find an alternative. \
Currently there is no specific date for disabling this endpoint. \
Additional information: Docs can be found on docs.rs.";
let expected_warning = create_warning_header(expected_warning_text);
assert_responses_eq(
deprecated_response_with_description,
HttpResponse::Ok()
.header(header::WARNING, expected_warning)
.json(123),
);
let deadline = chrono::Utc.ymd(2020, 12, 31).and_hms(23, 59, 59);
let deprecated_response_deadline = json_response(
Actuality::Deprecated {
discontinued_on: Some(deadline),
description: None,
},
123,
);
let expected_warning_text =
"Deprecated API: This endpoint is deprecated, \
see the service documentation to find an alternative. \
The old API is maintained until Thu, 31 Dec 2020 23:59:59 GMT.";
let expected_warning = create_warning_header(expected_warning_text);
assert_responses_eq(
deprecated_response_deadline,
HttpResponse::Ok()
.header(header::WARNING, expected_warning)
.json(123),
);
}
}
| {
let handler = f.inner.handler;
let actuality = f.inner.actuality;
let mutability = f.mutability;
let index = move |request: HttpRequest| -> FutureResponse {
let handler = handler.clone();
let actuality = actuality.clone();
extract_query(request, mutability)
.and_then(move |query| {
handler(query)
.map(|value| json_response(actuality, value))
.map_err(From::from)
})
.responder()
};
Self {
name: f.name,
method: f.mutability.into(),
inner: Arc::from(index) as Arc<RawHandler>,
}
} | identifier_body |
actix.rs | // Copyright 2020 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Actix-web API backend.
//!
//! [Actix-web](https://github.com/actix/actix-web) is an asynchronous backend
//! for HTTP API, based on the [Actix](https://github.com/actix/actix) framework.
pub use actix_web::middleware::cors::Cors;
use actix::{Actor, System};
use actix_web::{
error::ResponseError, http::header, AsyncResponder, FromRequest, HttpMessage, HttpResponse,
Query,
};
use failure::{bail, ensure, format_err, Error};
use futures::{future::Either, sync::mpsc, Future, IntoFuture, Stream};
use log::trace;
use serde::{
de::{self, DeserializeOwned},
ser, Serialize,
};
use std::{
fmt,
net::SocketAddr,
result,
str::FromStr,
sync::Arc,
thread::{self, JoinHandle},
};
use crate::api::{
self,
manager::{ApiManager, UpdateEndpoints},
Actuality, ApiAccess, ApiAggregator, ApiBackend, ApiScope, EndpointMutability,
ExtendApiBackend, FutureResult, NamedWith,
};
/// Type alias for the concrete `actix-web` HTTP response.
pub type FutureResponse = actix_web::FutureResponse<HttpResponse, actix_web::Error>;
/// Type alias for the concrete `actix-web` HTTP request.
pub type HttpRequest = actix_web::HttpRequest<()>;
/// Type alias for the inner `actix-web` HTTP requests handler.
pub type RawHandler = dyn Fn(HttpRequest) -> FutureResponse + 'static + Send + Sync;
/// Type alias for the `actix-web::App`.
pub type App = actix_web::App<()>;
/// Type alias for the `actix-web::App` configuration.
pub type AppConfig = Arc<dyn Fn(App) -> App + 'static + Send + Sync>;
/// Raw `actix-web` backend requests handler.
#[derive(Clone)]
pub struct RequestHandler {
/// Endpoint name.
pub name: String,
/// Endpoint HTTP method.
pub method: actix_web::http::Method,
/// Inner handler.
pub inner: Arc<RawHandler>,
}
impl fmt::Debug for RequestHandler {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("RequestHandler")
.field("name", &self.name)
.field("method", &self.method)
.finish()
}
}
/// API builder for the `actix-web` backend.
#[derive(Debug, Clone, Default)]
pub struct ApiBuilder {
handlers: Vec<RequestHandler>,
}
impl ApiBuilder {
/// Constructs a new backend builder instance.
pub fn new() -> Self {
Self::default()
}
}
impl ApiBackend for ApiBuilder {
type Handler = RequestHandler;
type Backend = actix_web::Scope<()>;
fn raw_handler(&mut self, handler: Self::Handler) -> &mut Self {
self.handlers.push(handler);
self
}
fn wire(&self, mut output: Self::Backend) -> Self::Backend {
for handler in self.handlers.clone() {
let inner = handler.inner;
output = output.route(&handler.name, handler.method.clone(), move |request| {
inner(request)
});
}
output
}
}
impl ExtendApiBackend for actix_web::Scope<()> {
fn extend<'a, I>(mut self, items: I) -> Self
where
I: IntoIterator<Item = (&'a str, &'a ApiScope)>,
{
for item in items {
self = self.nested(&item.0, move |scope| item.1.actix_backend.wire(scope))
}
self
}
}
impl ResponseError for api::Error {
fn error_response(&self) -> HttpResponse {
match self {
api::Error::BadRequest(err) => HttpResponse::BadRequest().body(err.to_string()),
api::Error::InternalError(err) => {
HttpResponse::InternalServerError().body(err.to_string())
}
api::Error::Io(err) => HttpResponse::InternalServerError().body(err.to_string()),
api::Error::Storage(err) => HttpResponse::InternalServerError().body(err.to_string()),
api::Error::Gone => HttpResponse::Gone().finish(),
api::Error::MovedPermanently(new_location) => HttpResponse::MovedPermanently()
.header(header::LOCATION, new_location.clone())
.finish(),
api::Error::NotFound(err) => HttpResponse::NotFound().body(err.to_string()),
api::Error::Unauthorized => HttpResponse::Unauthorized().finish(),
}
}
}
/// Creates a `HttpResponse` object from the provided JSON value.
/// Depending on the `actuality` parameter value, the warning about endpoint
/// being deprecated can be added.
fn json_response<T: Serialize>(actuality: Actuality, json_value: T) -> HttpResponse {
let mut response = HttpResponse::Ok();
if let Actuality::Deprecated {
ref discontinued_on,
ref description,
} = actuality
{
// There is a proposal for creating special deprecation header within HTTP,
// but currently it's only a draft. So the conventional way to notify API user
// about endpoint deprecation is setting the `Warning` header.
let expiration_note = match discontinued_on {
// Date is formatted according to HTTP-date format.
Some(date) => format!(
"The old API is maintained until {}.",
date.format("%a, %d %b %Y %T GMT")
),
None => "Currently there is no specific date for disabling this endpoint.".into(),
};
let mut warning_text = format!(
"Deprecated API: This endpoint is deprecated, \
see the service documentation to find an alternative. \
{}",
expiration_note
);
if let Some(description) = description {
warning_text = format!("{} Additional information: {}.", warning_text, description);
}
let warning_string = create_warning_header(&warning_text);
response.header(header::WARNING, warning_string);
}
response.json(json_value)
}
/// Formats warning string according to the following format:
/// "<warn-code> <warn-agent> \"<warn-text>\" [<warn-date>]"
/// <warn-code> in our case is 299, which means a miscellaneous persistent warning.
/// <warn-agent> is optional, so we set it to "-".
/// <warn-text> is a warning description, which is taken as an only argument.
/// <warn-date> is not required.
/// For details you can see RFC 7234, section 5.5: Warning.
fn create_warning_header(warning_text: &str) -> String {
format!("299 - \"{}\"", warning_text)
}
impl From<EndpointMutability> for actix_web::http::Method {
fn from(mutability: EndpointMutability) -> Self {
match mutability {
EndpointMutability::Immutable => actix_web::http::Method::GET,
EndpointMutability::Mutable => actix_web::http::Method::POST,
}
}
}
impl<Q, I, F> From<NamedWith<Q, I, api::Result<I>, F>> for RequestHandler
where
F: Fn(Q) -> api::Result<I> + 'static + Send + Sync + Clone,
Q: DeserializeOwned + 'static,
I: Serialize + 'static,
{
fn from(f: NamedWith<Q, I, api::Result<I>, F>) -> Self {
// Convert handler that returns a `Result` into handler that will return `FutureResult`.
let handler = f.inner.handler;
let future_endpoint = move |query| -> Box<dyn Future<Item = I, Error = api::Error>> {
let future = handler(query).into_future();
Box::new(future)
};
let named_with_future = NamedWith::new(f.name, future_endpoint, f.mutability);
// Then we can create a `RequestHandler` with the `From` specialization for future result.
RequestHandler::from(named_with_future)
}
}
/// Takes `HttpRequest` as a parameter and extracts query:
/// - If request is immutable, the query is parsed from query string,
/// - If request is mutable, the query is parsed from the request body as JSON.
fn extract_query<Q>(
request: HttpRequest,
mutability: EndpointMutability,
) -> impl Future<Item = Q, Error = actix_web::error::Error>
where
Q: DeserializeOwned + 'static,
{
match mutability {
EndpointMutability::Immutable => {
let future = Query::from_request(&request, &Default::default())
.map(Query::into_inner)
.map_err(From::from)
.into_future();
Either::A(future)
}
EndpointMutability::Mutable => {
let future = request.json().from_err();
Either::B(future)
}
}
}
impl<Q, I, F> From<NamedWith<Q, I, FutureResult<I>, F>> for RequestHandler
where
F: Fn(Q) -> FutureResult<I> + 'static + Clone + Send + Sync,
Q: DeserializeOwned + 'static,
I: Serialize + 'static,
{
fn from(f: NamedWith<Q, I, FutureResult<I>, F>) -> Self {
let handler = f.inner.handler;
let actuality = f.inner.actuality;
let mutability = f.mutability;
let index = move |request: HttpRequest| -> FutureResponse {
let handler = handler.clone();
let actuality = actuality.clone();
extract_query(request, mutability)
.and_then(move |query| {
handler(query)
.map(|value| json_response(actuality, value))
.map_err(From::from)
})
.responder()
};
Self {
name: f.name,
method: f.mutability.into(),
inner: Arc::from(index) as Arc<RawHandler>,
}
}
}
/// Creates `actix_web::App` for the given aggregator and runtime configuration.
pub(crate) fn create_app(aggregator: &ApiAggregator, runtime_config: ApiRuntimeConfig) -> App {
let app_config = runtime_config.app_config;
let access = runtime_config.access;
let mut app = App::new();
app = app.scope("api", |scope| aggregator.extend_backend(access, scope));
if let Some(app_config) = app_config {
app = app_config(app);
}
app
}
/// Configuration parameters for the `App` runtime.
#[derive(Clone)]
pub struct ApiRuntimeConfig {
/// The socket address to bind.
pub listen_address: SocketAddr,
/// API access level.
pub access: ApiAccess,
/// Optional App configuration.
pub app_config: Option<AppConfig>,
}
impl ApiRuntimeConfig {
/// Creates API runtime configuration for the given address and access level.
pub fn new(listen_address: SocketAddr, access: ApiAccess) -> Self {
Self {
listen_address,
access,
app_config: Default::default(),
}
}
}
impl fmt::Debug for ApiRuntimeConfig {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ApiRuntimeConfig")
.field("listen_address", &self.listen_address)
.field("access", &self.access)
.field("app_config", &self.app_config.as_ref().map(drop))
.finish()
}
}
/// Configuration parameters for the actix system runtime.
#[derive(Debug, Clone)]
pub struct SystemRuntimeConfig {
/// Active API runtimes.
pub api_runtimes: Vec<ApiRuntimeConfig>,
/// API aggregator.
pub api_aggregator: ApiAggregator,
/// The interval in milliseconds between attempts of restarting HTTP-server in case
/// the server failed to restart
pub server_restart_retry_timeout: u64,
/// The attempts counts of restarting HTTP-server in case the server failed to restart
pub server_restart_max_retries: u16,
}
/// Actix system runtime handle.
pub struct SystemRuntime {
system_thread: JoinHandle<result::Result<(), Error>>,
system: System,
}
impl SystemRuntimeConfig {
/// Starts actix system runtime along with all web runtimes.
pub fn start(
self,
endpoints_rx: mpsc::Receiver<UpdateEndpoints>,
) -> result::Result<SystemRuntime, Error> {
// Creates a system thread.
let (system_tx, system_rx) = mpsc::unbounded();
let system_thread = thread::spawn(move || -> result::Result<(), Error> {
let system = System::new("http-server");
system_tx.unbounded_send(System::current())?;
ApiManager::new(self, endpoints_rx).start();
// Starts actix-web runtime.
let code = system.run();
trace!("Actix runtime finished with code {}", code);
ensure!(
code == 0,
"Actix runtime finished with the non zero error code: {}",
code
);
Ok(())
});
// Receives addresses of runtime items.
let system = system_rx
.wait()
.next()
.ok_or_else(|| format_err!("Unable to receive actix system handle"))?
.map_err(|()| format_err!("Unable to receive actix system handle"))?;
Ok(SystemRuntime {
system_thread,
system,
})
}
}
impl SystemRuntime {
/// Stops the actix system runtime along with all web runtimes.
pub fn stop(self) -> result::Result<(), Error> {
// Stop actix system runtime.
self.system.stop();
self.system_thread.join().map_err(|e| {
format_err!(
"Unable to join actix web api thread, an error occurred: {:?}",
e
)
})?
}
}
impl fmt::Debug for SystemRuntime {
fn | (&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SystemRuntime").finish()
}
}
/// CORS header specification.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum AllowOrigin {
/// Allows access from any host.
Any,
/// Allows access only from the specified hosts.
Whitelist(Vec<String>),
}
impl ser::Serialize for AllowOrigin {
fn serialize<S>(&self, serializer: S) -> result::Result<S::Ok, S::Error>
where
S: ser::Serializer,
{
match *self {
AllowOrigin::Any => "*".serialize(serializer),
AllowOrigin::Whitelist(ref hosts) => {
if hosts.len() == 1 {
hosts[0].serialize(serializer)
} else {
hosts.serialize(serializer)
}
}
}
}
}
impl<'de> de::Deserialize<'de> for AllowOrigin {
fn deserialize<D>(d: D) -> result::Result<Self, D::Error>
where
D: de::Deserializer<'de>,
{
struct Visitor;
impl<'de> de::Visitor<'de> for Visitor {
type Value = AllowOrigin;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a list of hosts or \"*\"")
}
fn visit_str<E>(self, value: &str) -> result::Result<AllowOrigin, E>
where
E: de::Error,
{
match value {
"*" => Ok(AllowOrigin::Any),
_ => Ok(AllowOrigin::Whitelist(vec![value.to_string()])),
}
}
fn visit_seq<A>(self, seq: A) -> result::Result<AllowOrigin, A::Error>
where
A: de::SeqAccess<'de>,
{
let hosts =
de::Deserialize::deserialize(de::value::SeqAccessDeserializer::new(seq))?;
Ok(AllowOrigin::Whitelist(hosts))
}
}
d.deserialize_any(Visitor)
}
}
impl FromStr for AllowOrigin {
type Err = Error;
fn from_str(s: &str) -> result::Result<Self, Self::Err> {
if s == "*" {
return Ok(AllowOrigin::Any);
}
let v: Vec<_> = s
.split(',')
.map(|s| s.trim().to_string())
.filter(|s| !s.is_empty())
.collect();
if v.is_empty() {
bail!("Invalid AllowOrigin::Whitelist value");
}
Ok(AllowOrigin::Whitelist(v))
}
}
impl<'a> From<&'a AllowOrigin> for Cors {
fn from(origin: &'a AllowOrigin) -> Self {
match *origin {
AllowOrigin::Any => Self::build().finish(),
AllowOrigin::Whitelist(ref hosts) => {
let mut builder = Self::build();
for host in hosts {
builder.allowed_origin(host);
}
builder.finish()
}
}
}
}
impl From<AllowOrigin> for Cors {
fn from(origin: AllowOrigin) -> Self {
Self::from(&origin)
}
}
#[cfg(test)]
mod tests {
use pretty_assertions::assert_eq;
use super::*;
#[test]
fn allow_origin_from_str() {
fn check(text: &str, expected: AllowOrigin) {
let from_str = AllowOrigin::from_str(text).unwrap();
assert_eq!(from_str, expected);
}
check(r#"*"#, AllowOrigin::Any);
check(
r#"http://example.com"#,
AllowOrigin::Whitelist(vec!["http://example.com".to_string()]),
);
check(
r#"http://a.org, http://b.org"#,
AllowOrigin::Whitelist(vec!["http://a.org".to_string(), "http://b.org".to_string()]),
);
check(
r#"http://a.org, http://b.org, "#,
AllowOrigin::Whitelist(vec!["http://a.org".to_string(), "http://b.org".to_string()]),
);
check(
r#"http://a.org,http://b.org"#,
AllowOrigin::Whitelist(vec!["http://a.org".to_string(), "http://b.org".to_string()]),
);
}
fn assert_responses_eq(left: HttpResponse, right: HttpResponse) {
assert_eq!(left.status(), right.status());
assert_eq!(left.headers(), right.headers());
assert_eq!(left.body(), right.body());
}
#[test]
fn test_create_warning_header() {
assert_eq!(
&create_warning_header("Description"),
"299 - \"Description\""
);
}
#[test]
fn json_responses() {
use chrono::TimeZone;
let actual_response = json_response(Actuality::Actual, 123);
assert_responses_eq(actual_response, HttpResponse::Ok().json(123));
let deprecated_response_no_deadline = json_response(
Actuality::Deprecated {
discontinued_on: None,
description: None,
},
123,
);
let expected_warning_text =
"Deprecated API: This endpoint is deprecated, \
see the service documentation to find an alternative. \
Currently there is no specific date for disabling this endpoint.";
let expected_warning = create_warning_header(expected_warning_text);
assert_responses_eq(
deprecated_response_no_deadline,
HttpResponse::Ok()
.header(header::WARNING, expected_warning)
.json(123),
);
let description = "Docs can be found on docs.rs".to_owned();
let deprecated_response_with_description = json_response(
Actuality::Deprecated {
discontinued_on: None,
description: Some(description),
},
123,
);
let expected_warning_text =
"Deprecated API: This endpoint is deprecated, \
see the service documentation to find an alternative. \
Currently there is no specific date for disabling this endpoint. \
Additional information: Docs can be found on docs.rs.";
let expected_warning = create_warning_header(expected_warning_text);
assert_responses_eq(
deprecated_response_with_description,
HttpResponse::Ok()
.header(header::WARNING, expected_warning)
.json(123),
);
let deadline = chrono::Utc.ymd(2020, 12, 31).and_hms(23, 59, 59);
let deprecated_response_deadline = json_response(
Actuality::Deprecated {
discontinued_on: Some(deadline),
description: None,
},
123,
);
let expected_warning_text =
"Deprecated API: This endpoint is deprecated, \
see the service documentation to find an alternative. \
The old API is maintained until Thu, 31 Dec 2020 23:59:59 GMT.";
let expected_warning = create_warning_header(expected_warning_text);
assert_responses_eq(
deprecated_response_deadline,
HttpResponse::Ok()
.header(header::WARNING, expected_warning)
.json(123),
);
}
}
| fmt | identifier_name |
actix.rs | // Copyright 2020 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Actix-web API backend.
//!
//! [Actix-web](https://github.com/actix/actix-web) is an asynchronous backend
//! for HTTP API, based on the [Actix](https://github.com/actix/actix) framework.
pub use actix_web::middleware::cors::Cors;
use actix::{Actor, System};
use actix_web::{
error::ResponseError, http::header, AsyncResponder, FromRequest, HttpMessage, HttpResponse,
Query,
};
use failure::{bail, ensure, format_err, Error};
use futures::{future::Either, sync::mpsc, Future, IntoFuture, Stream};
use log::trace;
use serde::{
de::{self, DeserializeOwned},
ser, Serialize,
};
use std::{
fmt,
net::SocketAddr,
result,
str::FromStr,
sync::Arc,
thread::{self, JoinHandle},
};
use crate::api::{
self,
manager::{ApiManager, UpdateEndpoints},
Actuality, ApiAccess, ApiAggregator, ApiBackend, ApiScope, EndpointMutability,
ExtendApiBackend, FutureResult, NamedWith,
};
/// Type alias for the concrete `actix-web` HTTP response.
pub type FutureResponse = actix_web::FutureResponse<HttpResponse, actix_web::Error>;
/// Type alias for the concrete `actix-web` HTTP request.
pub type HttpRequest = actix_web::HttpRequest<()>;
/// Type alias for the inner `actix-web` HTTP requests handler.
pub type RawHandler = dyn Fn(HttpRequest) -> FutureResponse + 'static + Send + Sync;
/// Type alias for the `actix-web::App`.
pub type App = actix_web::App<()>;
/// Type alias for the `actix-web::App` configuration.
pub type AppConfig = Arc<dyn Fn(App) -> App + 'static + Send + Sync>;
/// Raw `actix-web` backend requests handler.
#[derive(Clone)]
pub struct RequestHandler {
/// Endpoint name.
pub name: String,
/// Endpoint HTTP method.
pub method: actix_web::http::Method,
/// Inner handler.
pub inner: Arc<RawHandler>,
}
impl fmt::Debug for RequestHandler {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("RequestHandler")
.field("name", &self.name)
.field("method", &self.method)
.finish()
}
}
/// API builder for the `actix-web` backend.
#[derive(Debug, Clone, Default)]
pub struct ApiBuilder {
handlers: Vec<RequestHandler>,
}
impl ApiBuilder {
/// Constructs a new backend builder instance.
pub fn new() -> Self {
Self::default()
}
}
impl ApiBackend for ApiBuilder {
type Handler = RequestHandler;
type Backend = actix_web::Scope<()>;
fn raw_handler(&mut self, handler: Self::Handler) -> &mut Self {
self.handlers.push(handler);
self
}
fn wire(&self, mut output: Self::Backend) -> Self::Backend {
for handler in self.handlers.clone() {
let inner = handler.inner;
output = output.route(&handler.name, handler.method.clone(), move |request| {
inner(request)
});
}
output
}
}
impl ExtendApiBackend for actix_web::Scope<()> {
fn extend<'a, I>(mut self, items: I) -> Self
where
I: IntoIterator<Item = (&'a str, &'a ApiScope)>,
{
for item in items {
self = self.nested(&item.0, move |scope| item.1.actix_backend.wire(scope))
}
self
}
}
impl ResponseError for api::Error {
fn error_response(&self) -> HttpResponse {
match self {
api::Error::BadRequest(err) => HttpResponse::BadRequest().body(err.to_string()),
api::Error::InternalError(err) => {
HttpResponse::InternalServerError().body(err.to_string())
}
api::Error::Io(err) => HttpResponse::InternalServerError().body(err.to_string()),
api::Error::Storage(err) => HttpResponse::InternalServerError().body(err.to_string()),
api::Error::Gone => HttpResponse::Gone().finish(),
api::Error::MovedPermanently(new_location) => HttpResponse::MovedPermanently()
.header(header::LOCATION, new_location.clone())
.finish(),
api::Error::NotFound(err) => HttpResponse::NotFound().body(err.to_string()),
api::Error::Unauthorized => HttpResponse::Unauthorized().finish(),
}
}
}
/// Creates a `HttpResponse` object from the provided JSON value.
/// Depending on the `actuality` parameter value, the warning about endpoint
/// being deprecated can be added.
fn json_response<T: Serialize>(actuality: Actuality, json_value: T) -> HttpResponse {
let mut response = HttpResponse::Ok();
if let Actuality::Deprecated {
ref discontinued_on,
ref description,
} = actuality
{
// There is a proposal for creating special deprecation header within HTTP,
// but currently it's only a draft. So the conventional way to notify API user
// about endpoint deprecation is setting the `Warning` header.
let expiration_note = match discontinued_on {
// Date is formatted according to HTTP-date format.
Some(date) => format!(
"The old API is maintained until {}.",
date.format("%a, %d %b %Y %T GMT")
),
None => "Currently there is no specific date for disabling this endpoint.".into(),
};
let mut warning_text = format!(
"Deprecated API: This endpoint is deprecated, \
see the service documentation to find an alternative. \
{}",
expiration_note
);
if let Some(description) = description {
warning_text = format!("{} Additional information: {}.", warning_text, description);
}
let warning_string = create_warning_header(&warning_text);
response.header(header::WARNING, warning_string);
}
response.json(json_value)
}
/// Formats warning string according to the following format:
/// "<warn-code> <warn-agent> \"<warn-text>\" [<warn-date>]"
/// <warn-code> in our case is 299, which means a miscellaneous persistent warning.
/// <warn-agent> is optional, so we set it to "-".
/// <warn-text> is a warning description, which is taken as an only argument.
/// <warn-date> is not required.
/// For details you can see RFC 7234, section 5.5: Warning.
fn create_warning_header(warning_text: &str) -> String {
format!("299 - \"{}\"", warning_text)
}
impl From<EndpointMutability> for actix_web::http::Method {
fn from(mutability: EndpointMutability) -> Self {
match mutability {
EndpointMutability::Immutable => actix_web::http::Method::GET,
EndpointMutability::Mutable => actix_web::http::Method::POST,
}
}
}
impl<Q, I, F> From<NamedWith<Q, I, api::Result<I>, F>> for RequestHandler
where
F: Fn(Q) -> api::Result<I> + 'static + Send + Sync + Clone,
Q: DeserializeOwned + 'static,
I: Serialize + 'static,
{
fn from(f: NamedWith<Q, I, api::Result<I>, F>) -> Self {
// Convert handler that returns a `Result` into handler that will return `FutureResult`.
let handler = f.inner.handler;
let future_endpoint = move |query| -> Box<dyn Future<Item = I, Error = api::Error>> {
let future = handler(query).into_future();
Box::new(future)
};
let named_with_future = NamedWith::new(f.name, future_endpoint, f.mutability);
// Then we can create a `RequestHandler` with the `From` specialization for future result.
RequestHandler::from(named_with_future)
}
}
/// Takes `HttpRequest` as a parameter and extracts query:
/// - If request is immutable, the query is parsed from query string,
/// - If request is mutable, the query is parsed from the request body as JSON.
fn extract_query<Q>(
request: HttpRequest,
mutability: EndpointMutability,
) -> impl Future<Item = Q, Error = actix_web::error::Error>
where
Q: DeserializeOwned + 'static,
{
match mutability {
EndpointMutability::Immutable => {
let future = Query::from_request(&request, &Default::default())
.map(Query::into_inner)
.map_err(From::from)
.into_future();
Either::A(future)
}
EndpointMutability::Mutable => {
let future = request.json().from_err();
Either::B(future)
}
}
}
impl<Q, I, F> From<NamedWith<Q, I, FutureResult<I>, F>> for RequestHandler
where
F: Fn(Q) -> FutureResult<I> + 'static + Clone + Send + Sync,
Q: DeserializeOwned + 'static,
I: Serialize + 'static,
{
fn from(f: NamedWith<Q, I, FutureResult<I>, F>) -> Self {
let handler = f.inner.handler;
let actuality = f.inner.actuality;
let mutability = f.mutability;
let index = move |request: HttpRequest| -> FutureResponse {
let handler = handler.clone();
let actuality = actuality.clone();
extract_query(request, mutability)
.and_then(move |query| {
handler(query)
.map(|value| json_response(actuality, value))
.map_err(From::from)
})
.responder()
};
Self {
name: f.name,
method: f.mutability.into(),
inner: Arc::from(index) as Arc<RawHandler>,
}
}
}
/// Creates `actix_web::App` for the given aggregator and runtime configuration.
pub(crate) fn create_app(aggregator: &ApiAggregator, runtime_config: ApiRuntimeConfig) -> App {
let app_config = runtime_config.app_config;
let access = runtime_config.access;
let mut app = App::new();
app = app.scope("api", |scope| aggregator.extend_backend(access, scope));
if let Some(app_config) = app_config {
app = app_config(app);
}
app
}
/// Configuration parameters for the `App` runtime.
#[derive(Clone)]
pub struct ApiRuntimeConfig {
/// The socket address to bind.
pub listen_address: SocketAddr,
/// API access level.
pub access: ApiAccess,
/// Optional App configuration.
pub app_config: Option<AppConfig>,
}
impl ApiRuntimeConfig {
/// Creates API runtime configuration for the given address and access level.
pub fn new(listen_address: SocketAddr, access: ApiAccess) -> Self {
Self {
listen_address,
access,
app_config: Default::default(),
}
}
}
impl fmt::Debug for ApiRuntimeConfig {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ApiRuntimeConfig")
.field("listen_address", &self.listen_address)
.field("access", &self.access)
.field("app_config", &self.app_config.as_ref().map(drop))
.finish()
}
}
/// Configuration parameters for the actix system runtime.
#[derive(Debug, Clone)]
pub struct SystemRuntimeConfig {
/// Active API runtimes.
pub api_runtimes: Vec<ApiRuntimeConfig>,
/// API aggregator.
pub api_aggregator: ApiAggregator,
/// The interval in milliseconds between attempts of restarting HTTP-server in case
/// the server failed to restart
pub server_restart_retry_timeout: u64,
/// The attempts counts of restarting HTTP-server in case the server failed to restart
pub server_restart_max_retries: u16,
}
/// Actix system runtime handle.
pub struct SystemRuntime {
system_thread: JoinHandle<result::Result<(), Error>>,
system: System,
}
impl SystemRuntimeConfig {
/// Starts actix system runtime along with all web runtimes.
pub fn start(
self,
endpoints_rx: mpsc::Receiver<UpdateEndpoints>,
) -> result::Result<SystemRuntime, Error> {
// Creates a system thread.
let (system_tx, system_rx) = mpsc::unbounded();
let system_thread = thread::spawn(move || -> result::Result<(), Error> {
let system = System::new("http-server");
system_tx.unbounded_send(System::current())?;
ApiManager::new(self, endpoints_rx).start();
// Starts actix-web runtime.
let code = system.run();
trace!("Actix runtime finished with code {}", code);
ensure!(
code == 0,
"Actix runtime finished with the non zero error code: {}",
code
);
Ok(())
});
// Receives addresses of runtime items.
let system = system_rx
.wait()
.next()
.ok_or_else(|| format_err!("Unable to receive actix system handle"))?
.map_err(|()| format_err!("Unable to receive actix system handle"))?;
Ok(SystemRuntime {
system_thread,
system,
})
}
}
impl SystemRuntime {
/// Stops the actix system runtime along with all web runtimes.
pub fn stop(self) -> result::Result<(), Error> {
// Stop actix system runtime.
self.system.stop();
self.system_thread.join().map_err(|e| {
format_err!(
"Unable to join actix web api thread, an error occurred: {:?}",
e
)
})?
}
}
impl fmt::Debug for SystemRuntime {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SystemRuntime").finish()
}
}
/// CORS header specification.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum AllowOrigin {
/// Allows access from any host.
Any,
/// Allows access only from the specified hosts.
Whitelist(Vec<String>),
}
impl ser::Serialize for AllowOrigin {
fn serialize<S>(&self, serializer: S) -> result::Result<S::Ok, S::Error>
where
S: ser::Serializer,
{
match *self {
AllowOrigin::Any => "*".serialize(serializer),
AllowOrigin::Whitelist(ref hosts) => {
if hosts.len() == 1 {
hosts[0].serialize(serializer)
} else {
hosts.serialize(serializer)
}
}
}
}
}
impl<'de> de::Deserialize<'de> for AllowOrigin {
fn deserialize<D>(d: D) -> result::Result<Self, D::Error>
where
D: de::Deserializer<'de>,
{
struct Visitor;
impl<'de> de::Visitor<'de> for Visitor {
type Value = AllowOrigin;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a list of hosts or \"*\"")
}
fn visit_str<E>(self, value: &str) -> result::Result<AllowOrigin, E>
where
E: de::Error,
{ | "*" => Ok(AllowOrigin::Any),
_ => Ok(AllowOrigin::Whitelist(vec![value.to_string()])),
}
}
fn visit_seq<A>(self, seq: A) -> result::Result<AllowOrigin, A::Error>
where
A: de::SeqAccess<'de>,
{
let hosts =
de::Deserialize::deserialize(de::value::SeqAccessDeserializer::new(seq))?;
Ok(AllowOrigin::Whitelist(hosts))
}
}
d.deserialize_any(Visitor)
}
}
impl FromStr for AllowOrigin {
type Err = Error;
fn from_str(s: &str) -> result::Result<Self, Self::Err> {
if s == "*" {
return Ok(AllowOrigin::Any);
}
let v: Vec<_> = s
.split(',')
.map(|s| s.trim().to_string())
.filter(|s| !s.is_empty())
.collect();
if v.is_empty() {
bail!("Invalid AllowOrigin::Whitelist value");
}
Ok(AllowOrigin::Whitelist(v))
}
}
impl<'a> From<&'a AllowOrigin> for Cors {
fn from(origin: &'a AllowOrigin) -> Self {
match *origin {
AllowOrigin::Any => Self::build().finish(),
AllowOrigin::Whitelist(ref hosts) => {
let mut builder = Self::build();
for host in hosts {
builder.allowed_origin(host);
}
builder.finish()
}
}
}
}
impl From<AllowOrigin> for Cors {
fn from(origin: AllowOrigin) -> Self {
Self::from(&origin)
}
}
#[cfg(test)]
mod tests {
use pretty_assertions::assert_eq;
use super::*;
#[test]
fn allow_origin_from_str() {
fn check(text: &str, expected: AllowOrigin) {
let from_str = AllowOrigin::from_str(text).unwrap();
assert_eq!(from_str, expected);
}
check(r#"*"#, AllowOrigin::Any);
check(
r#"http://example.com"#,
AllowOrigin::Whitelist(vec!["http://example.com".to_string()]),
);
check(
r#"http://a.org, http://b.org"#,
AllowOrigin::Whitelist(vec!["http://a.org".to_string(), "http://b.org".to_string()]),
);
check(
r#"http://a.org, http://b.org, "#,
AllowOrigin::Whitelist(vec!["http://a.org".to_string(), "http://b.org".to_string()]),
);
check(
r#"http://a.org,http://b.org"#,
AllowOrigin::Whitelist(vec!["http://a.org".to_string(), "http://b.org".to_string()]),
);
}
fn assert_responses_eq(left: HttpResponse, right: HttpResponse) {
assert_eq!(left.status(), right.status());
assert_eq!(left.headers(), right.headers());
assert_eq!(left.body(), right.body());
}
#[test]
fn test_create_warning_header() {
assert_eq!(
&create_warning_header("Description"),
"299 - \"Description\""
);
}
#[test]
fn json_responses() {
use chrono::TimeZone;
let actual_response = json_response(Actuality::Actual, 123);
assert_responses_eq(actual_response, HttpResponse::Ok().json(123));
let deprecated_response_no_deadline = json_response(
Actuality::Deprecated {
discontinued_on: None,
description: None,
},
123,
);
let expected_warning_text =
"Deprecated API: This endpoint is deprecated, \
see the service documentation to find an alternative. \
Currently there is no specific date for disabling this endpoint.";
let expected_warning = create_warning_header(expected_warning_text);
assert_responses_eq(
deprecated_response_no_deadline,
HttpResponse::Ok()
.header(header::WARNING, expected_warning)
.json(123),
);
let description = "Docs can be found on docs.rs".to_owned();
let deprecated_response_with_description = json_response(
Actuality::Deprecated {
discontinued_on: None,
description: Some(description),
},
123,
);
let expected_warning_text =
"Deprecated API: This endpoint is deprecated, \
see the service documentation to find an alternative. \
Currently there is no specific date for disabling this endpoint. \
Additional information: Docs can be found on docs.rs.";
let expected_warning = create_warning_header(expected_warning_text);
assert_responses_eq(
deprecated_response_with_description,
HttpResponse::Ok()
.header(header::WARNING, expected_warning)
.json(123),
);
let deadline = chrono::Utc.ymd(2020, 12, 31).and_hms(23, 59, 59);
let deprecated_response_deadline = json_response(
Actuality::Deprecated {
discontinued_on: Some(deadline),
description: None,
},
123,
);
let expected_warning_text =
"Deprecated API: This endpoint is deprecated, \
see the service documentation to find an alternative. \
The old API is maintained until Thu, 31 Dec 2020 23:59:59 GMT.";
let expected_warning = create_warning_header(expected_warning_text);
assert_responses_eq(
deprecated_response_deadline,
HttpResponse::Ok()
.header(header::WARNING, expected_warning)
.json(123),
);
}
} | match value { | random_line_split |
seq2seq.py | from functools import partial
import tensorflow as tf
layers = tf.keras.layers
class _Seq2SeqBase(object):
@staticmethod
def gru():
return layers.CuDNNGRU if tf.test.is_gpu_available() else layers.GRU
@staticmethod
def lstm():
return layers.CuDNNLSTM if tf.test.is_gpu_available() else layers.LSTM
class RNNEncoder(_Seq2SeqBase):
def __init__(self, units, bidirectional=False, merge_mode=None):
rnn_model = partial(self.gru(), units=units, return_sequences=True, return_state=True, unroll=True)
self.forward_rnn = rnn_model(go_backwards=False, name='enc_forward_rnn')
self.backward_rnn = rnn_model(go_backwards=True, name='enc_backward_rnn') if bidirectional else None
self.merge_mode = merge_mode
def __call__(self, inputs):
forward_results = self.forward_rnn(inputs)
if self.backward_rnn:
backward_results = self.backward_rnn(inputs)
if not self.merge_mode:
# follow Bahdanau's paper
backward_results[0] = layers.Concatenate()([forward_results[0], backward_results[0]])
final_results = backward_results
else:
merge_func = layers.Concatenate() if self.merge_mode == 'concat' else layers.Add()
final_results = [merge_func([i, j]) for i, j in zip(forward_results, backward_results)]
else:
final_results = forward_results
output, hidden = final_results[0], final_results[1:]
hidden = [layers.Dense(units=self.forward_rnn.units, activation='tanh')(x) for x in hidden]
return output, hidden
class RNNWithAttentionDecoder(_Seq2SeqBase):
def __init__(self, units, n_classes, dec_max_time_steps, eos_token=0,
attn_method='concat', attn_before_rnn=True, **kwargs):
self.rnn = self.gru()(units=units, return_state=True)
self.attn_score = self.build_attn_score_func(units, attn_method, **kwargs)
self.attn_combine = layers.Dense(units=units, activation='tanh', name='dec_attn_combine')
self.attn_before_rnn = attn_before_rnn
self.output_fc = layers.Dense(units=n_classes, name='dec_output_fc')
self.dec_max_time_steps = dec_max_time_steps
self.eos_token = eos_token # todo: early stopping
@staticmethod
def build_attn_score_func(units, attn_method, **kwargs): # todo: share?
if attn_method == 'concat':
fcs = [
tf.layers.Dense(units=units, activation='tanh', name='w'),
tf.layers.Dense(units=1, name='r')
]
def f(*args):
_, h, e = args
h = tf.expand_dims(h, axis=1) # ?*1*N
h = tf.tile(h, multiples=[1, e.shape[1], 1]) # ?*20*N
x = tf.concat([e, h], axis=-1)
for layer in fcs:
x = layer(x)
return x # ?*20*1
return f
elif attn_method == 'location':
|
elif attn_method == 'dot':
def f(*args):
_, h, e = args
h = tf.expand_dims(h, axis=-1) # ?*32*1
return tf.matmul(e, h) # ?*20*1
return f
else:
raise NotImplemented
def __call__(self, inputs, encoder_output, encoder_state, teacher_forcing, **kwargs):
hidden_state = encoder_state
outputs = []
def without_teacher_forcing():
embed = kwargs.get('embed', None)
assert embed
return embed(tf.argmax(pred, axis=1))
for step in range(self.dec_max_time_steps):
if step == 0:
x = inputs[:, 0, :]
else:
x = tf.cond(teacher_forcing, true_fn=lambda: inputs[:, step, :],
false_fn=without_teacher_forcing, name='dec_switch_teacher_forcing')
'''calculate attention'''
h_state = hidden_state[0]
atten_scores = self.attn_score(x, h_state, encoder_output)
atten_weights = tf.nn.softmax(atten_scores, dim=1)
atten_context = tf.multiply(encoder_output, atten_weights) # ?*20*32 ?*20*1
atten_context = tf.reduce_sum(atten_context, axis=1)
'''across rnn'''
if self.attn_before_rnn:
x = tf.expand_dims(tf.concat([atten_context, x], axis=-1), axis=1) # todo: delete x?
results = self.rnn(x, initial_state=hidden_state)
output, hidden_state = results[0], results[1:]
else:
# follow Luong's paper. a little bit different~
x = tf.expand_dims(x, axis=1)
results = self.rnn(x, initial_state=hidden_state)
output, hidden_state = results[0], results[1:]
x = tf.concat([atten_context, output], axis=-1)
output = self.attn_combine(x)
pred = self.output_fc(output) # logits
outputs.append(pred)
outputs = tf.stack(outputs, axis=1)
return outputs
def _default_batchify_fn(data):
if isinstance(data[0], np.ndarray):
return np.stack(data)
elif isinstance(data[0], tuple):
data = zip(*data)
return [_default_batchify_fn(i) for i in data]
else:
data = np.asarray(data)
return data
class _MMetric(object):
def __init__(self):
self.num = 0
self.total = 0
def update(self, num, total):
self.num += num
self.total += total
def get(self):
return self.num / self.total
def reset(self):
self.num = 0
self.total = 0
if __name__ == '__main__':
import warnings
import os
import numpy as np
import pandas as pd
from mxnet.gluon.data import ArrayDataset, DataLoader
from sklearn.model_selection import train_test_split
from tqdm import tqdm
warnings.filterwarnings('ignore')
os.environ["CUDA_VISIBLE_DEVICES"] = ""
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.InteractiveSession(config=config)
hidden_size = 32
sos_token = 10
use_teacher_forcing_ratio = 0.5
'''build encoder'''
encoder_input = tf.placeholder(tf.int32, shape=(None, 20))
encoder_embedding = layers.Embedding(input_dim=11, output_dim=8, trainable=True)
encoder = RNNEncoder(units=hidden_size, bidirectional=True, merge_mode='sum')
encoder_output, encoder_state = encoder(inputs=encoder_embedding(encoder_input))
'''build decoder'''
decoder_input = tf.placeholder(tf.int32, shape=(None, None))
teacher_forcing = tf.placeholder_with_default(False, shape=None)
decoder = RNNWithAttentionDecoder(
units=hidden_size,
n_classes=10,
enc_max_time_steps=20,
dec_max_time_steps=20,
attn_method='dot',
attn_before_rnn=False
)
decoder_output = decoder(inputs=encoder_embedding(decoder_input), encoder_output=encoder_output,
encoder_state=encoder_state, teacher_forcing=teacher_forcing,
embed=encoder_embedding)
softmax_label = tf.placeholder(tf.int64, shape=(None, 20))
'''build loss'''
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=decoder_output, labels=softmax_label)
loss = tf.reduce_mean(loss)
'''build optimizer'''
opt = tf.train.AdamOptimizer(learning_rate=0.02).minimize(loss)
'''build metric'''
pred_label = tf.argmax(decoder_output, axis=-1)
n_true = tf.reduce_all(tf.equal(pred_label, softmax_label), axis=1)
n_true = tf.cast(n_true, dtype=tf.int32)
n_true = tf.reduce_sum(n_true)
'''load data'''
def load_data(path):
return pd.read_csv(path, header=None).values
X_train = load_data('./dataset/task8_train_input.csv')
y_train = load_data('./dataset/task8_train_output.csv')
X_test = load_data('./dataset/task8_test_input.csv')
y_test = load_data('./dataset/task8_test_output.csv')
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, train_size=0.9, random_state=0)
print('TrainSet Shape:{}'.format(X_train.shape))
print('TestSet Shape:{}'.format(X_test.shape))
build_dataloader = partial(DataLoader, batch_size=32, shuffle=False, last_batch='keep',
batchify_fn=_default_batchify_fn)
train_dataloader = build_dataloader(dataset=ArrayDataset(X_train, y_train))
test_dataloader = build_dataloader(dataset=ArrayDataset(X_test, y_test))
val_dataloader = build_dataloader(dataset=ArrayDataset(X_val, y_val))
'''start training'''
sess.run(tf.global_variables_initializer())
train_loss, train_acc = _MMetric(), _MMetric()
print_freq = 50
for step, (x, y) in enumerate(tqdm(train_dataloader, desc='Training', position=0)):
sos_input = np.ones(shape=(len(y), 1), dtype=np.int32) * sos_token
t = np.random.rand() < use_teacher_forcing_ratio
d = sos_input if not t else np.concatenate((sos_input, y[:, 1:]), axis=1)
feed_dict = {encoder_input: x, decoder_input: d, softmax_label: y, teacher_forcing: t}
_, loss_value, n_true_value = sess.run([opt, loss, n_true], feed_dict=feed_dict)
train_loss.update(loss_value, 1)
train_acc.update(n_true_value, len(x))
if step != 0 and step % print_freq == 0:
'''Evaluate on validation set'''
val_loss, val_acc = _MMetric(), _MMetric()
for x, y in val_dataloader:
sos_input = np.ones(shape=(len(y), 1), dtype=np.int32) * sos_token
feed_dict = {encoder_input: x, decoder_input: sos_input, softmax_label: y, teacher_forcing: False}
loss_value, n_true_value = sess.run([loss, n_true], feed_dict=feed_dict)
val_loss.update(loss_value, 1)
val_acc.update(n_true_value, len(x))
tqdm.write(
'[Step {}/{}] train-loss: {}, train-acc: {} val-loss: {}, val-acc: {}'.format(
step, len(train_dataloader), train_loss.get(), train_acc.get(), val_loss.get(), val_acc.get()
)
)
train_loss.reset()
train_acc.reset()
'''start testing'''
test_loss, test_acc = _MMetric(), _MMetric()
for step, (x, y) in enumerate(tqdm(test_dataloader, desc='Testing', position=0)):
sos_input = np.ones(shape=(len(y), 1), dtype=np.int32) * sos_token
feed_dict = {encoder_input: x, decoder_input: sos_input, softmax_label: y, teacher_forcing: False}
loss_value, n_true_value = sess.run([loss, n_true], feed_dict=feed_dict)
test_loss.update(loss_value, 1)
test_acc.update(n_true_value, len(x))
if step != 0 and step % print_freq == 0:
tqdm.write('[Step {}/{}] test-loss: {}, test-acc: {}'
.format(step, len(test_dataloader), test_loss.get(), test_acc.get()))
tqdm.write('[final] test-loss: {}, test-acc: {}'.format(test_loss.get(), test_acc.get()))
| enc_max_time_steps = kwargs.get('enc_max_time_steps', None)
assert enc_max_time_steps
fc = tf.layers.Dense(units=enc_max_time_steps)
def f(*args):
x = fc(tf.concat(args[:-1], axis=-1)) # ?*20
return tf.expand_dims(x, axis=-1) # ?*20*1
return f | conditional_block |
seq2seq.py | from functools import partial
import tensorflow as tf
layers = tf.keras.layers
class _Seq2SeqBase(object):
@staticmethod
def gru():
return layers.CuDNNGRU if tf.test.is_gpu_available() else layers.GRU
@staticmethod
def lstm():
return layers.CuDNNLSTM if tf.test.is_gpu_available() else layers.LSTM
class RNNEncoder(_Seq2SeqBase):
def | (self, units, bidirectional=False, merge_mode=None):
rnn_model = partial(self.gru(), units=units, return_sequences=True, return_state=True, unroll=True)
self.forward_rnn = rnn_model(go_backwards=False, name='enc_forward_rnn')
self.backward_rnn = rnn_model(go_backwards=True, name='enc_backward_rnn') if bidirectional else None
self.merge_mode = merge_mode
def __call__(self, inputs):
forward_results = self.forward_rnn(inputs)
if self.backward_rnn:
backward_results = self.backward_rnn(inputs)
if not self.merge_mode:
# follow Bahdanau's paper
backward_results[0] = layers.Concatenate()([forward_results[0], backward_results[0]])
final_results = backward_results
else:
merge_func = layers.Concatenate() if self.merge_mode == 'concat' else layers.Add()
final_results = [merge_func([i, j]) for i, j in zip(forward_results, backward_results)]
else:
final_results = forward_results
output, hidden = final_results[0], final_results[1:]
hidden = [layers.Dense(units=self.forward_rnn.units, activation='tanh')(x) for x in hidden]
return output, hidden
class RNNWithAttentionDecoder(_Seq2SeqBase):
def __init__(self, units, n_classes, dec_max_time_steps, eos_token=0,
attn_method='concat', attn_before_rnn=True, **kwargs):
self.rnn = self.gru()(units=units, return_state=True)
self.attn_score = self.build_attn_score_func(units, attn_method, **kwargs)
self.attn_combine = layers.Dense(units=units, activation='tanh', name='dec_attn_combine')
self.attn_before_rnn = attn_before_rnn
self.output_fc = layers.Dense(units=n_classes, name='dec_output_fc')
self.dec_max_time_steps = dec_max_time_steps
self.eos_token = eos_token # todo: early stopping
@staticmethod
def build_attn_score_func(units, attn_method, **kwargs): # todo: share?
if attn_method == 'concat':
fcs = [
tf.layers.Dense(units=units, activation='tanh', name='w'),
tf.layers.Dense(units=1, name='r')
]
def f(*args):
_, h, e = args
h = tf.expand_dims(h, axis=1) # ?*1*N
h = tf.tile(h, multiples=[1, e.shape[1], 1]) # ?*20*N
x = tf.concat([e, h], axis=-1)
for layer in fcs:
x = layer(x)
return x # ?*20*1
return f
elif attn_method == 'location':
enc_max_time_steps = kwargs.get('enc_max_time_steps', None)
assert enc_max_time_steps
fc = tf.layers.Dense(units=enc_max_time_steps)
def f(*args):
x = fc(tf.concat(args[:-1], axis=-1)) # ?*20
return tf.expand_dims(x, axis=-1) # ?*20*1
return f
elif attn_method == 'dot':
def f(*args):
_, h, e = args
h = tf.expand_dims(h, axis=-1) # ?*32*1
return tf.matmul(e, h) # ?*20*1
return f
else:
raise NotImplemented
def __call__(self, inputs, encoder_output, encoder_state, teacher_forcing, **kwargs):
hidden_state = encoder_state
outputs = []
def without_teacher_forcing():
embed = kwargs.get('embed', None)
assert embed
return embed(tf.argmax(pred, axis=1))
for step in range(self.dec_max_time_steps):
if step == 0:
x = inputs[:, 0, :]
else:
x = tf.cond(teacher_forcing, true_fn=lambda: inputs[:, step, :],
false_fn=without_teacher_forcing, name='dec_switch_teacher_forcing')
'''calculate attention'''
h_state = hidden_state[0]
atten_scores = self.attn_score(x, h_state, encoder_output)
atten_weights = tf.nn.softmax(atten_scores, dim=1)
atten_context = tf.multiply(encoder_output, atten_weights) # ?*20*32 ?*20*1
atten_context = tf.reduce_sum(atten_context, axis=1)
'''across rnn'''
if self.attn_before_rnn:
x = tf.expand_dims(tf.concat([atten_context, x], axis=-1), axis=1) # todo: delete x?
results = self.rnn(x, initial_state=hidden_state)
output, hidden_state = results[0], results[1:]
else:
# follow Luong's paper. a little bit different~
x = tf.expand_dims(x, axis=1)
results = self.rnn(x, initial_state=hidden_state)
output, hidden_state = results[0], results[1:]
x = tf.concat([atten_context, output], axis=-1)
output = self.attn_combine(x)
pred = self.output_fc(output) # logits
outputs.append(pred)
outputs = tf.stack(outputs, axis=1)
return outputs
def _default_batchify_fn(data):
if isinstance(data[0], np.ndarray):
return np.stack(data)
elif isinstance(data[0], tuple):
data = zip(*data)
return [_default_batchify_fn(i) for i in data]
else:
data = np.asarray(data)
return data
class _MMetric(object):
def __init__(self):
self.num = 0
self.total = 0
def update(self, num, total):
self.num += num
self.total += total
def get(self):
return self.num / self.total
def reset(self):
self.num = 0
self.total = 0
if __name__ == '__main__':
import warnings
import os
import numpy as np
import pandas as pd
from mxnet.gluon.data import ArrayDataset, DataLoader
from sklearn.model_selection import train_test_split
from tqdm import tqdm
warnings.filterwarnings('ignore')
os.environ["CUDA_VISIBLE_DEVICES"] = ""
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.InteractiveSession(config=config)
hidden_size = 32
sos_token = 10
use_teacher_forcing_ratio = 0.5
'''build encoder'''
encoder_input = tf.placeholder(tf.int32, shape=(None, 20))
encoder_embedding = layers.Embedding(input_dim=11, output_dim=8, trainable=True)
encoder = RNNEncoder(units=hidden_size, bidirectional=True, merge_mode='sum')
encoder_output, encoder_state = encoder(inputs=encoder_embedding(encoder_input))
'''build decoder'''
decoder_input = tf.placeholder(tf.int32, shape=(None, None))
teacher_forcing = tf.placeholder_with_default(False, shape=None)
decoder = RNNWithAttentionDecoder(
units=hidden_size,
n_classes=10,
enc_max_time_steps=20,
dec_max_time_steps=20,
attn_method='dot',
attn_before_rnn=False
)
decoder_output = decoder(inputs=encoder_embedding(decoder_input), encoder_output=encoder_output,
encoder_state=encoder_state, teacher_forcing=teacher_forcing,
embed=encoder_embedding)
softmax_label = tf.placeholder(tf.int64, shape=(None, 20))
'''build loss'''
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=decoder_output, labels=softmax_label)
loss = tf.reduce_mean(loss)
'''build optimizer'''
opt = tf.train.AdamOptimizer(learning_rate=0.02).minimize(loss)
'''build metric'''
pred_label = tf.argmax(decoder_output, axis=-1)
n_true = tf.reduce_all(tf.equal(pred_label, softmax_label), axis=1)
n_true = tf.cast(n_true, dtype=tf.int32)
n_true = tf.reduce_sum(n_true)
'''load data'''
def load_data(path):
return pd.read_csv(path, header=None).values
X_train = load_data('./dataset/task8_train_input.csv')
y_train = load_data('./dataset/task8_train_output.csv')
X_test = load_data('./dataset/task8_test_input.csv')
y_test = load_data('./dataset/task8_test_output.csv')
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, train_size=0.9, random_state=0)
print('TrainSet Shape:{}'.format(X_train.shape))
print('TestSet Shape:{}'.format(X_test.shape))
build_dataloader = partial(DataLoader, batch_size=32, shuffle=False, last_batch='keep',
batchify_fn=_default_batchify_fn)
train_dataloader = build_dataloader(dataset=ArrayDataset(X_train, y_train))
test_dataloader = build_dataloader(dataset=ArrayDataset(X_test, y_test))
val_dataloader = build_dataloader(dataset=ArrayDataset(X_val, y_val))
'''start training'''
sess.run(tf.global_variables_initializer())
train_loss, train_acc = _MMetric(), _MMetric()
print_freq = 50
for step, (x, y) in enumerate(tqdm(train_dataloader, desc='Training', position=0)):
sos_input = np.ones(shape=(len(y), 1), dtype=np.int32) * sos_token
t = np.random.rand() < use_teacher_forcing_ratio
d = sos_input if not t else np.concatenate((sos_input, y[:, 1:]), axis=1)
feed_dict = {encoder_input: x, decoder_input: d, softmax_label: y, teacher_forcing: t}
_, loss_value, n_true_value = sess.run([opt, loss, n_true], feed_dict=feed_dict)
train_loss.update(loss_value, 1)
train_acc.update(n_true_value, len(x))
if step != 0 and step % print_freq == 0:
'''Evaluate on validation set'''
val_loss, val_acc = _MMetric(), _MMetric()
for x, y in val_dataloader:
sos_input = np.ones(shape=(len(y), 1), dtype=np.int32) * sos_token
feed_dict = {encoder_input: x, decoder_input: sos_input, softmax_label: y, teacher_forcing: False}
loss_value, n_true_value = sess.run([loss, n_true], feed_dict=feed_dict)
val_loss.update(loss_value, 1)
val_acc.update(n_true_value, len(x))
tqdm.write(
'[Step {}/{}] train-loss: {}, train-acc: {} val-loss: {}, val-acc: {}'.format(
step, len(train_dataloader), train_loss.get(), train_acc.get(), val_loss.get(), val_acc.get()
)
)
train_loss.reset()
train_acc.reset()
'''start testing'''
test_loss, test_acc = _MMetric(), _MMetric()
for step, (x, y) in enumerate(tqdm(test_dataloader, desc='Testing', position=0)):
sos_input = np.ones(shape=(len(y), 1), dtype=np.int32) * sos_token
feed_dict = {encoder_input: x, decoder_input: sos_input, softmax_label: y, teacher_forcing: False}
loss_value, n_true_value = sess.run([loss, n_true], feed_dict=feed_dict)
test_loss.update(loss_value, 1)
test_acc.update(n_true_value, len(x))
if step != 0 and step % print_freq == 0:
tqdm.write('[Step {}/{}] test-loss: {}, test-acc: {}'
.format(step, len(test_dataloader), test_loss.get(), test_acc.get()))
tqdm.write('[final] test-loss: {}, test-acc: {}'.format(test_loss.get(), test_acc.get()))
| __init__ | identifier_name |
seq2seq.py | from functools import partial
import tensorflow as tf
layers = tf.keras.layers
class _Seq2SeqBase(object):
@staticmethod
def gru():
return layers.CuDNNGRU if tf.test.is_gpu_available() else layers.GRU
@staticmethod
def lstm():
return layers.CuDNNLSTM if tf.test.is_gpu_available() else layers.LSTM
class RNNEncoder(_Seq2SeqBase):
def __init__(self, units, bidirectional=False, merge_mode=None):
rnn_model = partial(self.gru(), units=units, return_sequences=True, return_state=True, unroll=True)
self.forward_rnn = rnn_model(go_backwards=False, name='enc_forward_rnn')
self.backward_rnn = rnn_model(go_backwards=True, name='enc_backward_rnn') if bidirectional else None
self.merge_mode = merge_mode
def __call__(self, inputs):
forward_results = self.forward_rnn(inputs)
if self.backward_rnn:
backward_results = self.backward_rnn(inputs)
if not self.merge_mode:
# follow Bahdanau's paper
backward_results[0] = layers.Concatenate()([forward_results[0], backward_results[0]])
final_results = backward_results
else:
merge_func = layers.Concatenate() if self.merge_mode == 'concat' else layers.Add()
final_results = [merge_func([i, j]) for i, j in zip(forward_results, backward_results)]
else:
final_results = forward_results
output, hidden = final_results[0], final_results[1:]
hidden = [layers.Dense(units=self.forward_rnn.units, activation='tanh')(x) for x in hidden]
return output, hidden
class RNNWithAttentionDecoder(_Seq2SeqBase):
def __init__(self, units, n_classes, dec_max_time_steps, eos_token=0,
attn_method='concat', attn_before_rnn=True, **kwargs):
|
@staticmethod
def build_attn_score_func(units, attn_method, **kwargs): # todo: share?
if attn_method == 'concat':
fcs = [
tf.layers.Dense(units=units, activation='tanh', name='w'),
tf.layers.Dense(units=1, name='r')
]
def f(*args):
_, h, e = args
h = tf.expand_dims(h, axis=1) # ?*1*N
h = tf.tile(h, multiples=[1, e.shape[1], 1]) # ?*20*N
x = tf.concat([e, h], axis=-1)
for layer in fcs:
x = layer(x)
return x # ?*20*1
return f
elif attn_method == 'location':
enc_max_time_steps = kwargs.get('enc_max_time_steps', None)
assert enc_max_time_steps
fc = tf.layers.Dense(units=enc_max_time_steps)
def f(*args):
x = fc(tf.concat(args[:-1], axis=-1)) # ?*20
return tf.expand_dims(x, axis=-1) # ?*20*1
return f
elif attn_method == 'dot':
def f(*args):
_, h, e = args
h = tf.expand_dims(h, axis=-1) # ?*32*1
return tf.matmul(e, h) # ?*20*1
return f
else:
raise NotImplemented
def __call__(self, inputs, encoder_output, encoder_state, teacher_forcing, **kwargs):
hidden_state = encoder_state
outputs = []
def without_teacher_forcing():
embed = kwargs.get('embed', None)
assert embed
return embed(tf.argmax(pred, axis=1))
for step in range(self.dec_max_time_steps):
if step == 0:
x = inputs[:, 0, :]
else:
x = tf.cond(teacher_forcing, true_fn=lambda: inputs[:, step, :],
false_fn=without_teacher_forcing, name='dec_switch_teacher_forcing')
'''calculate attention'''
h_state = hidden_state[0]
atten_scores = self.attn_score(x, h_state, encoder_output)
atten_weights = tf.nn.softmax(atten_scores, dim=1)
atten_context = tf.multiply(encoder_output, atten_weights) # ?*20*32 ?*20*1
atten_context = tf.reduce_sum(atten_context, axis=1)
'''across rnn'''
if self.attn_before_rnn:
x = tf.expand_dims(tf.concat([atten_context, x], axis=-1), axis=1) # todo: delete x?
results = self.rnn(x, initial_state=hidden_state)
output, hidden_state = results[0], results[1:]
else:
# follow Luong's paper. a little bit different~
x = tf.expand_dims(x, axis=1)
results = self.rnn(x, initial_state=hidden_state)
output, hidden_state = results[0], results[1:]
x = tf.concat([atten_context, output], axis=-1)
output = self.attn_combine(x)
pred = self.output_fc(output) # logits
outputs.append(pred)
outputs = tf.stack(outputs, axis=1)
return outputs
def _default_batchify_fn(data):
if isinstance(data[0], np.ndarray):
return np.stack(data)
elif isinstance(data[0], tuple):
data = zip(*data)
return [_default_batchify_fn(i) for i in data]
else:
data = np.asarray(data)
return data
class _MMetric(object):
def __init__(self):
self.num = 0
self.total = 0
def update(self, num, total):
self.num += num
self.total += total
def get(self):
return self.num / self.total
def reset(self):
self.num = 0
self.total = 0
if __name__ == '__main__':
import warnings
import os
import numpy as np
import pandas as pd
from mxnet.gluon.data import ArrayDataset, DataLoader
from sklearn.model_selection import train_test_split
from tqdm import tqdm
warnings.filterwarnings('ignore')
os.environ["CUDA_VISIBLE_DEVICES"] = ""
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.InteractiveSession(config=config)
hidden_size = 32
sos_token = 10
use_teacher_forcing_ratio = 0.5
'''build encoder'''
encoder_input = tf.placeholder(tf.int32, shape=(None, 20))
encoder_embedding = layers.Embedding(input_dim=11, output_dim=8, trainable=True)
encoder = RNNEncoder(units=hidden_size, bidirectional=True, merge_mode='sum')
encoder_output, encoder_state = encoder(inputs=encoder_embedding(encoder_input))
'''build decoder'''
decoder_input = tf.placeholder(tf.int32, shape=(None, None))
teacher_forcing = tf.placeholder_with_default(False, shape=None)
decoder = RNNWithAttentionDecoder(
units=hidden_size,
n_classes=10,
enc_max_time_steps=20,
dec_max_time_steps=20,
attn_method='dot',
attn_before_rnn=False
)
decoder_output = decoder(inputs=encoder_embedding(decoder_input), encoder_output=encoder_output,
encoder_state=encoder_state, teacher_forcing=teacher_forcing,
embed=encoder_embedding)
softmax_label = tf.placeholder(tf.int64, shape=(None, 20))
'''build loss'''
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=decoder_output, labels=softmax_label)
loss = tf.reduce_mean(loss)
'''build optimizer'''
opt = tf.train.AdamOptimizer(learning_rate=0.02).minimize(loss)
'''build metric'''
pred_label = tf.argmax(decoder_output, axis=-1)
n_true = tf.reduce_all(tf.equal(pred_label, softmax_label), axis=1)
n_true = tf.cast(n_true, dtype=tf.int32)
n_true = tf.reduce_sum(n_true)
'''load data'''
def load_data(path):
return pd.read_csv(path, header=None).values
X_train = load_data('./dataset/task8_train_input.csv')
y_train = load_data('./dataset/task8_train_output.csv')
X_test = load_data('./dataset/task8_test_input.csv')
y_test = load_data('./dataset/task8_test_output.csv')
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, train_size=0.9, random_state=0)
print('TrainSet Shape:{}'.format(X_train.shape))
print('TestSet Shape:{}'.format(X_test.shape))
build_dataloader = partial(DataLoader, batch_size=32, shuffle=False, last_batch='keep',
batchify_fn=_default_batchify_fn)
train_dataloader = build_dataloader(dataset=ArrayDataset(X_train, y_train))
test_dataloader = build_dataloader(dataset=ArrayDataset(X_test, y_test))
val_dataloader = build_dataloader(dataset=ArrayDataset(X_val, y_val))
'''start training'''
sess.run(tf.global_variables_initializer())
train_loss, train_acc = _MMetric(), _MMetric()
print_freq = 50
for step, (x, y) in enumerate(tqdm(train_dataloader, desc='Training', position=0)):
sos_input = np.ones(shape=(len(y), 1), dtype=np.int32) * sos_token
t = np.random.rand() < use_teacher_forcing_ratio
d = sos_input if not t else np.concatenate((sos_input, y[:, 1:]), axis=1)
feed_dict = {encoder_input: x, decoder_input: d, softmax_label: y, teacher_forcing: t}
_, loss_value, n_true_value = sess.run([opt, loss, n_true], feed_dict=feed_dict)
train_loss.update(loss_value, 1)
train_acc.update(n_true_value, len(x))
if step != 0 and step % print_freq == 0:
'''Evaluate on validation set'''
val_loss, val_acc = _MMetric(), _MMetric()
for x, y in val_dataloader:
sos_input = np.ones(shape=(len(y), 1), dtype=np.int32) * sos_token
feed_dict = {encoder_input: x, decoder_input: sos_input, softmax_label: y, teacher_forcing: False}
loss_value, n_true_value = sess.run([loss, n_true], feed_dict=feed_dict)
val_loss.update(loss_value, 1)
val_acc.update(n_true_value, len(x))
tqdm.write(
'[Step {}/{}] train-loss: {}, train-acc: {} val-loss: {}, val-acc: {}'.format(
step, len(train_dataloader), train_loss.get(), train_acc.get(), val_loss.get(), val_acc.get()
)
)
train_loss.reset()
train_acc.reset()
'''start testing'''
test_loss, test_acc = _MMetric(), _MMetric()
for step, (x, y) in enumerate(tqdm(test_dataloader, desc='Testing', position=0)):
sos_input = np.ones(shape=(len(y), 1), dtype=np.int32) * sos_token
feed_dict = {encoder_input: x, decoder_input: sos_input, softmax_label: y, teacher_forcing: False}
loss_value, n_true_value = sess.run([loss, n_true], feed_dict=feed_dict)
test_loss.update(loss_value, 1)
test_acc.update(n_true_value, len(x))
if step != 0 and step % print_freq == 0:
tqdm.write('[Step {}/{}] test-loss: {}, test-acc: {}'
.format(step, len(test_dataloader), test_loss.get(), test_acc.get()))
tqdm.write('[final] test-loss: {}, test-acc: {}'.format(test_loss.get(), test_acc.get()))
| self.rnn = self.gru()(units=units, return_state=True)
self.attn_score = self.build_attn_score_func(units, attn_method, **kwargs)
self.attn_combine = layers.Dense(units=units, activation='tanh', name='dec_attn_combine')
self.attn_before_rnn = attn_before_rnn
self.output_fc = layers.Dense(units=n_classes, name='dec_output_fc')
self.dec_max_time_steps = dec_max_time_steps
self.eos_token = eos_token # todo: early stopping | identifier_body |
seq2seq.py | from functools import partial
import tensorflow as tf
layers = tf.keras.layers
class _Seq2SeqBase(object):
@staticmethod
def gru():
return layers.CuDNNGRU if tf.test.is_gpu_available() else layers.GRU
@staticmethod
def lstm():
return layers.CuDNNLSTM if tf.test.is_gpu_available() else layers.LSTM
class RNNEncoder(_Seq2SeqBase):
def __init__(self, units, bidirectional=False, merge_mode=None):
rnn_model = partial(self.gru(), units=units, return_sequences=True, return_state=True, unroll=True)
self.forward_rnn = rnn_model(go_backwards=False, name='enc_forward_rnn')
self.backward_rnn = rnn_model(go_backwards=True, name='enc_backward_rnn') if bidirectional else None
self.merge_mode = merge_mode
def __call__(self, inputs):
forward_results = self.forward_rnn(inputs)
if self.backward_rnn:
backward_results = self.backward_rnn(inputs)
if not self.merge_mode:
# follow Bahdanau's paper
backward_results[0] = layers.Concatenate()([forward_results[0], backward_results[0]])
final_results = backward_results
else:
merge_func = layers.Concatenate() if self.merge_mode == 'concat' else layers.Add()
final_results = [merge_func([i, j]) for i, j in zip(forward_results, backward_results)]
else:
final_results = forward_results
output, hidden = final_results[0], final_results[1:]
hidden = [layers.Dense(units=self.forward_rnn.units, activation='tanh')(x) for x in hidden]
return output, hidden
class RNNWithAttentionDecoder(_Seq2SeqBase):
def __init__(self, units, n_classes, dec_max_time_steps, eos_token=0,
attn_method='concat', attn_before_rnn=True, **kwargs):
self.rnn = self.gru()(units=units, return_state=True)
self.attn_score = self.build_attn_score_func(units, attn_method, **kwargs)
self.attn_combine = layers.Dense(units=units, activation='tanh', name='dec_attn_combine')
self.attn_before_rnn = attn_before_rnn
self.output_fc = layers.Dense(units=n_classes, name='dec_output_fc')
self.dec_max_time_steps = dec_max_time_steps
self.eos_token = eos_token # todo: early stopping
@staticmethod
def build_attn_score_func(units, attn_method, **kwargs): # todo: share?
if attn_method == 'concat':
fcs = [
tf.layers.Dense(units=units, activation='tanh', name='w'),
tf.layers.Dense(units=1, name='r')
]
def f(*args):
_, h, e = args
h = tf.expand_dims(h, axis=1) # ?*1*N
h = tf.tile(h, multiples=[1, e.shape[1], 1]) # ?*20*N
x = tf.concat([e, h], axis=-1)
for layer in fcs:
x = layer(x)
return x # ?*20*1
return f
elif attn_method == 'location':
enc_max_time_steps = kwargs.get('enc_max_time_steps', None)
assert enc_max_time_steps
fc = tf.layers.Dense(units=enc_max_time_steps)
def f(*args):
x = fc(tf.concat(args[:-1], axis=-1)) # ?*20
return tf.expand_dims(x, axis=-1) # ?*20*1
return f
elif attn_method == 'dot':
def f(*args):
_, h, e = args
h = tf.expand_dims(h, axis=-1) # ?*32*1
return tf.matmul(e, h) # ?*20*1
return f
else:
raise NotImplemented
def __call__(self, inputs, encoder_output, encoder_state, teacher_forcing, **kwargs):
hidden_state = encoder_state
outputs = []
def without_teacher_forcing():
embed = kwargs.get('embed', None)
assert embed
return embed(tf.argmax(pred, axis=1))
for step in range(self.dec_max_time_steps):
if step == 0:
x = inputs[:, 0, :]
else:
x = tf.cond(teacher_forcing, true_fn=lambda: inputs[:, step, :],
false_fn=without_teacher_forcing, name='dec_switch_teacher_forcing')
'''calculate attention'''
h_state = hidden_state[0]
atten_scores = self.attn_score(x, h_state, encoder_output)
atten_weights = tf.nn.softmax(atten_scores, dim=1)
atten_context = tf.multiply(encoder_output, atten_weights) # ?*20*32 ?*20*1
atten_context = tf.reduce_sum(atten_context, axis=1)
'''across rnn'''
if self.attn_before_rnn:
x = tf.expand_dims(tf.concat([atten_context, x], axis=-1), axis=1) # todo: delete x?
results = self.rnn(x, initial_state=hidden_state)
output, hidden_state = results[0], results[1:]
else:
# follow Luong's paper. a little bit different~
x = tf.expand_dims(x, axis=1)
results = self.rnn(x, initial_state=hidden_state)
output, hidden_state = results[0], results[1:]
x = tf.concat([atten_context, output], axis=-1)
output = self.attn_combine(x)
pred = self.output_fc(output) # logits
outputs.append(pred)
outputs = tf.stack(outputs, axis=1)
return outputs
def _default_batchify_fn(data):
if isinstance(data[0], np.ndarray):
return np.stack(data)
elif isinstance(data[0], tuple):
data = zip(*data)
return [_default_batchify_fn(i) for i in data]
else:
data = np.asarray(data)
return data
class _MMetric(object):
def __init__(self):
self.num = 0
self.total = 0
def update(self, num, total):
self.num += num
self.total += total
def get(self):
return self.num / self.total
def reset(self):
self.num = 0
self.total = 0
if __name__ == '__main__':
import warnings
import os
import numpy as np
import pandas as pd
from mxnet.gluon.data import ArrayDataset, DataLoader
from sklearn.model_selection import train_test_split
from tqdm import tqdm
warnings.filterwarnings('ignore')
os.environ["CUDA_VISIBLE_DEVICES"] = ""
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.InteractiveSession(config=config)
hidden_size = 32
sos_token = 10
use_teacher_forcing_ratio = 0.5
'''build encoder'''
encoder_input = tf.placeholder(tf.int32, shape=(None, 20))
encoder_embedding = layers.Embedding(input_dim=11, output_dim=8, trainable=True)
encoder = RNNEncoder(units=hidden_size, bidirectional=True, merge_mode='sum')
encoder_output, encoder_state = encoder(inputs=encoder_embedding(encoder_input))
'''build decoder'''
decoder_input = tf.placeholder(tf.int32, shape=(None, None))
teacher_forcing = tf.placeholder_with_default(False, shape=None)
decoder = RNNWithAttentionDecoder(
units=hidden_size,
n_classes=10,
enc_max_time_steps=20,
dec_max_time_steps=20,
attn_method='dot',
attn_before_rnn=False
)
decoder_output = decoder(inputs=encoder_embedding(decoder_input), encoder_output=encoder_output,
encoder_state=encoder_state, teacher_forcing=teacher_forcing,
embed=encoder_embedding)
softmax_label = tf.placeholder(tf.int64, shape=(None, 20))
'''build loss'''
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=decoder_output, labels=softmax_label)
loss = tf.reduce_mean(loss)
'''build optimizer'''
opt = tf.train.AdamOptimizer(learning_rate=0.02).minimize(loss)
'''build metric'''
pred_label = tf.argmax(decoder_output, axis=-1)
n_true = tf.reduce_all(tf.equal(pred_label, softmax_label), axis=1)
n_true = tf.cast(n_true, dtype=tf.int32)
n_true = tf.reduce_sum(n_true)
'''load data'''
def load_data(path):
return pd.read_csv(path, header=None).values
X_train = load_data('./dataset/task8_train_input.csv')
y_train = load_data('./dataset/task8_train_output.csv') | print('TestSet Shape:{}'.format(X_test.shape))
build_dataloader = partial(DataLoader, batch_size=32, shuffle=False, last_batch='keep',
batchify_fn=_default_batchify_fn)
train_dataloader = build_dataloader(dataset=ArrayDataset(X_train, y_train))
test_dataloader = build_dataloader(dataset=ArrayDataset(X_test, y_test))
val_dataloader = build_dataloader(dataset=ArrayDataset(X_val, y_val))
'''start training'''
sess.run(tf.global_variables_initializer())
train_loss, train_acc = _MMetric(), _MMetric()
print_freq = 50
for step, (x, y) in enumerate(tqdm(train_dataloader, desc='Training', position=0)):
sos_input = np.ones(shape=(len(y), 1), dtype=np.int32) * sos_token
t = np.random.rand() < use_teacher_forcing_ratio
d = sos_input if not t else np.concatenate((sos_input, y[:, 1:]), axis=1)
feed_dict = {encoder_input: x, decoder_input: d, softmax_label: y, teacher_forcing: t}
_, loss_value, n_true_value = sess.run([opt, loss, n_true], feed_dict=feed_dict)
train_loss.update(loss_value, 1)
train_acc.update(n_true_value, len(x))
if step != 0 and step % print_freq == 0:
'''Evaluate on validation set'''
val_loss, val_acc = _MMetric(), _MMetric()
for x, y in val_dataloader:
sos_input = np.ones(shape=(len(y), 1), dtype=np.int32) * sos_token
feed_dict = {encoder_input: x, decoder_input: sos_input, softmax_label: y, teacher_forcing: False}
loss_value, n_true_value = sess.run([loss, n_true], feed_dict=feed_dict)
val_loss.update(loss_value, 1)
val_acc.update(n_true_value, len(x))
tqdm.write(
'[Step {}/{}] train-loss: {}, train-acc: {} val-loss: {}, val-acc: {}'.format(
step, len(train_dataloader), train_loss.get(), train_acc.get(), val_loss.get(), val_acc.get()
)
)
train_loss.reset()
train_acc.reset()
'''start testing'''
test_loss, test_acc = _MMetric(), _MMetric()
for step, (x, y) in enumerate(tqdm(test_dataloader, desc='Testing', position=0)):
sos_input = np.ones(shape=(len(y), 1), dtype=np.int32) * sos_token
feed_dict = {encoder_input: x, decoder_input: sos_input, softmax_label: y, teacher_forcing: False}
loss_value, n_true_value = sess.run([loss, n_true], feed_dict=feed_dict)
test_loss.update(loss_value, 1)
test_acc.update(n_true_value, len(x))
if step != 0 and step % print_freq == 0:
tqdm.write('[Step {}/{}] test-loss: {}, test-acc: {}'
.format(step, len(test_dataloader), test_loss.get(), test_acc.get()))
tqdm.write('[final] test-loss: {}, test-acc: {}'.format(test_loss.get(), test_acc.get())) | X_test = load_data('./dataset/task8_test_input.csv')
y_test = load_data('./dataset/task8_test_output.csv')
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, train_size=0.9, random_state=0)
print('TrainSet Shape:{}'.format(X_train.shape)) | random_line_split |
mock.rs | // This file is part of the SORA network and Polkaswap app.
// Copyright (c) 2020, 2021, Polka Biome Ltd. All rights reserved.
// SPDX-License-Identifier: BSD-4-Clause
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
// Redistributions of source code must retain the above copyright notice, this list
// of conditions and the following disclaimer.
// Redistributions in binary form must reproduce the above copyright notice, this
// list of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
//
// All advertising materials mentioning features or use of this software must display
// the following acknowledgement: This product includes software developed by Polka Biome
// Ltd., SORA, and Polkaswap.
//
// Neither the name of the Polka Biome Ltd. nor the names of its contributors may be used
// to endorse or promote products derived from this software without specific prior written permission.
// THIS SOFTWARE IS PROVIDED BY Polka Biome Ltd. AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Polka Biome Ltd. BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Creating mock Runtime here
use crate::{AssetConfig, Config, NetworkConfig};
use codec::{Codec, Decode, Encode};
use common::mock::ExistentialDeposits;
use common::prelude::Balance;
use common::{
Amount, AssetId32, AssetName, AssetSymbol, PredefinedAssetId, DEFAULT_BALANCE_PRECISION, VAL,
};
use currencies::BasicCurrencyAdapter;
use frame_support::dispatch::{DispatchInfo, GetDispatchInfo};
use frame_support::sp_io::TestExternalities;
use frame_support::sp_runtime::app_crypto::sp_core;
use frame_support::sp_runtime::app_crypto::sp_core::crypto::AccountId32;
use frame_support::sp_runtime::app_crypto::sp_core::offchain::{OffchainExt, TransactionPoolExt};
use frame_support::sp_runtime::app_crypto::sp_core::{ecdsa, sr25519, Pair, Public};
use frame_support::sp_runtime::offchain::testing::{
OffchainState, PoolState, TestOffchainExt, TestTransactionPoolExt,
};
use frame_support::sp_runtime::serde::{Serialize, Serializer};
use frame_support::sp_runtime::testing::Header;
use frame_support::sp_runtime::traits::{
self, Applyable, BlakeTwo256, Checkable, DispatchInfoOf, Dispatchable, IdentifyAccount,
IdentityLookup, PostDispatchInfoOf, SignedExtension, ValidateUnsigned, Verify,
};
use frame_support::sp_runtime::transaction_validity::{
TransactionSource, TransactionValidity, TransactionValidityError,
};
use frame_support::sp_runtime::{
self, ApplyExtrinsicResultWithInfo, MultiSignature, MultiSigner, Perbill, Percent,
};
use frame_support::traits::GenesisBuild;
use frame_support::weights::{Pays, Weight};
use frame_support::{construct_runtime, parameter_types};
use frame_system::offchain::{Account, SigningTypes};
use parking_lot::RwLock;
use sp_core::H256;
use sp_keystore::testing::KeyStore;
use sp_keystore::KeystoreExt;
use sp_std::collections::btree_set::BTreeSet;
use sp_std::fmt::Debug;
use sp_std::str::FromStr;
use sp_std::sync::Arc;
use std::collections::HashMap;
use {crate as eth_bridge, frame_system};
pub const PSWAP: PredefinedAssetId = PredefinedAssetId::PSWAP;
pub const XOR: PredefinedAssetId = PredefinedAssetId::XOR;
/// An index to a block.
pub type BlockNumber = u64;
pub type Signature = MultiSignature;
/// Some way of identifying an account on the chain. We intentionally make it equivalent
/// to the public key of our transaction signing scheme.
pub type AccountId = <<Signature as Verify>::Signer as IdentifyAccount>::AccountId;
type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic<Runtime>;
type Block = frame_system::mocking::MockBlock<Runtime>;
parameter_types! {
pub const GetBaseAssetId: AssetId32<PredefinedAssetId> = AssetId32::from_asset_id(XOR);
pub const DepositBase: u64 = 1;
pub const DepositFactor: u64 = 1;
pub const MaxSignatories: u16 = 4;
pub const UnsignedPriority: u64 = 100;
pub const EthNetworkId: <Runtime as Config>::NetworkId = 0;
}
#[derive(PartialEq, Eq, Clone, Encode, Decode, Debug)]
pub struct MyTestXt<Call, Extra> {
/// Signature of the extrinsic.
pub signature: Option<(AccountId, Extra)>,
/// Call of the extrinsic.
pub call: Call,
}
parity_util_mem::malloc_size_of_is_0!(any: MyTestXt<Call, Extra>);
impl<Call: Codec + Sync + Send, Context, Extra> Checkable<Context> for MyTestXt<Call, Extra> {
type Checked = Self;
fn check(self, _c: &Context) -> Result<Self::Checked, TransactionValidityError> {
Ok(self)
}
}
impl<Call: Codec + Sync + Send, Extra> traits::Extrinsic for MyTestXt<Call, Extra> {
type Call = Call;
type SignaturePayload = (AccountId, Extra);
fn is_signed(&self) -> Option<bool> {
Some(self.signature.is_some())
}
fn new(c: Call, sig: Option<Self::SignaturePayload>) -> Option<Self> {
Some(MyTestXt {
signature: sig,
call: c,
})
}
}
impl SignedExtension for MyExtra {
type AccountId = AccountId;
type Call = Call;
type AdditionalSigned = ();
type Pre = ();
const IDENTIFIER: &'static str = "testextension";
fn additional_signed(&self) -> Result<Self::AdditionalSigned, TransactionValidityError> {
Ok(())
}
}
impl<Origin, Call, Extra> Applyable for MyTestXt<Call, Extra>
where
Call:
'static + Sized + Send + Sync + Clone + Eq + Codec + Debug + Dispatchable<Origin = Origin>,
Extra: SignedExtension<AccountId = AccountId, Call = Call>,
Origin: From<Option<AccountId32>>,
{
type Call = Call;
/// Checks to see if this is a valid *transaction*. It returns information on it if so.
fn validate<U: ValidateUnsigned<Call = Self::Call>>(
&self,
_source: TransactionSource,
_info: &DispatchInfoOf<Self::Call>,
_len: usize,
) -> TransactionValidity {
Ok(Default::default())
}
/// Executes all necessary logic needed prior to dispatch and deconstructs into function call,
/// index and sender.
fn apply<U: ValidateUnsigned<Call = Self::Call>>(
self,
info: &DispatchInfoOf<Self::Call>,
len: usize,
) -> ApplyExtrinsicResultWithInfo<PostDispatchInfoOf<Self::Call>> {
let maybe_who = if let Some((who, extra)) = self.signature | else {
Extra::pre_dispatch_unsigned(&self.call, info, len)?;
None
};
Ok(self.call.dispatch(maybe_who.into()))
}
}
impl<Call, Extra> Serialize for MyTestXt<Call, Extra>
where
MyTestXt<Call, Extra>: Encode,
{
fn serialize<S>(&self, seq: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
self.using_encoded(|bytes| seq.serialize_bytes(bytes))
}
}
impl<Call: Encode, Extra: Encode> GetDispatchInfo for MyTestXt<Call, Extra> {
fn get_dispatch_info(&self) -> DispatchInfo {
// for testing: weight == size.
DispatchInfo {
weight: self.encode().len() as _,
pays_fee: Pays::No,
..Default::default()
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Encode, Decode)]
pub struct MyExtra;
pub type TestExtrinsic = MyTestXt<Call, MyExtra>;
parameter_types! {
pub const BlockHashCount: u64 = 250;
pub const MaximumBlockWeight: Weight = 1024;
pub const MaximumBlockLength: u32 = 2 * 1024;
pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75);
pub const ExistentialDeposit: u128 = 0;
}
impl frame_system::Config for Runtime {
type BaseCallFilter = ();
type BlockWeights = ();
type BlockLength = ();
type Origin = Origin;
type Call = Call;
type Index = u64;
type BlockNumber = u64;
type Hash = H256;
type Hashing = BlakeTwo256;
type AccountId = AccountId;
type Lookup = IdentityLookup<Self::AccountId>;
type Header = Header;
type Event = Event;
type BlockHashCount = BlockHashCount;
type DbWeight = ();
type Version = ();
type AccountData = pallet_balances::AccountData<Balance>;
type OnNewAccount = ();
type OnKilledAccount = ();
type SystemWeightInfo = ();
type PalletInfo = PalletInfo;
type SS58Prefix = ();
}
impl<T: SigningTypes> frame_system::offchain::SignMessage<T> for Runtime {
type SignatureData = ();
fn sign_message(&self, _message: &[u8]) -> Self::SignatureData {
unimplemented!()
}
fn sign<TPayload, F>(&self, _f: F) -> Self::SignatureData
where
F: Fn(&Account<T>) -> TPayload,
TPayload: frame_system::offchain::SignedPayload<T>,
{
unimplemented!()
}
}
impl<LocalCall> frame_system::offchain::CreateSignedTransaction<LocalCall> for Runtime
where
Call: From<LocalCall>,
{
fn create_transaction<C: frame_system::offchain::AppCrypto<Self::Public, Self::Signature>>(
call: Call,
_public: <Signature as Verify>::Signer,
account: <Runtime as frame_system::Config>::AccountId,
_index: <Runtime as frame_system::Config>::Index,
) -> Option<(
Call,
<TestExtrinsic as sp_runtime::traits::Extrinsic>::SignaturePayload,
)> {
Some((call, (account, MyExtra {})))
}
}
impl frame_system::offchain::SigningTypes for Runtime {
type Public = <Signature as Verify>::Signer;
type Signature = Signature;
}
impl<C> frame_system::offchain::SendTransactionTypes<C> for Runtime
where
Call: From<C>,
{
type OverarchingCall = Call;
type Extrinsic = TestExtrinsic;
}
impl pallet_balances::Config for Runtime {
/// The type for recording an account's balance.
type Balance = Balance;
/// The ubiquitous event type.
type Event = Event;
type DustRemoval = ();
type ExistentialDeposit = ExistentialDeposit;
type AccountStore = System;
type WeightInfo = ();
type MaxLocks = ();
}
impl tokens::Config for Runtime {
type Event = Event;
type Balance = Balance;
type Amount = Amount;
type CurrencyId = <Runtime as assets::Config>::AssetId;
type WeightInfo = ();
type ExistentialDeposits = ExistentialDeposits;
type OnDust = ();
}
impl currencies::Config for Runtime {
type Event = Event;
type MultiCurrency = Tokens;
type NativeCurrency = BasicCurrencyAdapter<Runtime, Balances, Amount, BlockNumber>;
type GetNativeCurrencyId = <Runtime as assets::Config>::GetBaseAssetId;
type WeightInfo = ();
}
impl assets::Config for Runtime {
type Event = Event;
type ExtraAccountId = [u8; 32];
type ExtraAssetRecordArg =
common::AssetIdExtraAssetRecordArg<common::DEXId, common::LiquiditySourceType, [u8; 32]>;
type AssetId = common::AssetId32<PredefinedAssetId>;
type GetBaseAssetId = GetBaseAssetId;
type Currency = currencies::Module<Runtime>;
type WeightInfo = ();
}
impl common::Config for Runtime {
type DEXId = common::DEXId;
type LstId = common::LiquiditySourceType;
}
impl permissions::Config for Runtime {
type Event = Event;
}
impl bridge_multisig::Config for Runtime {
type Call = Call;
type Event = Event;
type Currency = Balances;
type DepositBase = DepositBase;
type DepositFactor = DepositFactor;
type MaxSignatories = MaxSignatories;
type WeightInfo = ();
}
impl pallet_sudo::Config for Runtime {
type Call = Call;
type Event = Event;
}
impl crate::Config for Runtime {
type PeerId = crate::crypto::TestAuthId;
type Call = Call;
type Event = Event;
type NetworkId = u32;
type GetEthNetworkId = EthNetworkId;
type WeightInfo = ();
}
impl sp_runtime::traits::ExtrinsicMetadata for TestExtrinsic {
const VERSION: u8 = 1;
type SignedExtensions = ();
}
construct_runtime!(
pub enum Runtime where
Block = Block,
NodeBlock = Block,
UncheckedExtrinsic = UncheckedExtrinsic
{
System: frame_system::{Module, Call, Config, Storage, Event<T>},
Balances: pallet_balances::{Module, Call, Storage, Config<T>, Event<T>},
Multisig: bridge_multisig::{Module, Call, Storage, Config<T>, Event<T>},
Tokens: tokens::{Module, Call, Storage, Config<T>, Event<T>},
Currencies: currencies::{Module, Call, Storage, Event<T>},
Assets: assets::{Module, Call, Storage, Config<T>, Event<T>},
Permissions: permissions::{Module, Call, Storage, Config<T>, Event<T>},
Sudo: pallet_sudo::{Module, Call, Storage, Config<T>, Event<T>},
EthBridge: eth_bridge::{Module, Call, Storage, Config<T>, Event<T>},
}
);
pub type SubstrateAccountId = <<Signature as Verify>::Signer as IdentifyAccount>::AccountId;
pub struct State {
pub networks: HashMap<u32, ExtendedNetworkConfig>,
pub authority_account_id: AccountId32,
pub pool_state: Arc<RwLock<PoolState>>,
pub offchain_state: Arc<RwLock<OffchainState>>,
}
#[derive(Clone, Debug)]
pub struct ExtendedNetworkConfig {
pub ocw_keypairs: Vec<(MultiSigner, AccountId32, [u8; 32])>,
pub config: NetworkConfig<Runtime>,
}
pub struct ExtBuilder {
pub networks: HashMap<u32, ExtendedNetworkConfig>,
last_network_id: u32,
root_account_id: AccountId32,
}
impl Default for ExtBuilder {
fn default() -> Self {
let mut builder = Self {
networks: Default::default(),
last_network_id: Default::default(),
root_account_id: get_account_id_from_seed::<sr25519::Public>("Alice"),
};
builder.add_network(
vec![
AssetConfig::Thischain { id: PSWAP.into() },
AssetConfig::Sidechain {
id: XOR.into(),
sidechain_id: sp_core::H160::from_str(
"40fd72257597aa14c7231a7b1aaa29fce868f677",
)
.unwrap(),
owned: true,
precision: DEFAULT_BALANCE_PRECISION,
},
AssetConfig::Sidechain {
id: VAL.into(),
sidechain_id: sp_core::H160::from_str(
"3f9feac97e5feb15d8bf98042a9a01b515da3dfb",
)
.unwrap(),
owned: true,
precision: DEFAULT_BALANCE_PRECISION,
},
],
Some(vec![
(XOR.into(), common::balance!(350000)),
(VAL.into(), common::balance!(33900000)),
]),
Some(4),
);
builder
}
}
impl ExtBuilder {
pub fn new() -> Self {
Self {
networks: Default::default(),
last_network_id: Default::default(),
root_account_id: get_account_id_from_seed::<sr25519::Public>("Alice"),
}
}
pub fn add_currency(
&mut self,
network_id: u32,
currency: AssetConfig<AssetId32<PredefinedAssetId>>,
) {
self.networks
.get_mut(&network_id)
.unwrap()
.config
.assets
.push(currency);
}
pub fn add_network(
&mut self,
assets: Vec<AssetConfig<AssetId32<PredefinedAssetId>>>,
reserves: Option<Vec<(AssetId32<PredefinedAssetId>, Balance)>>,
peers_num: Option<usize>,
) -> u32 {
let net_id = self.last_network_id;
let multisig_account_id = bridge_multisig::Module::<Runtime>::multi_account_id(
&self.root_account_id,
1,
net_id as u64 + 10,
);
let peers_keys = gen_peers_keys(&format!("OCW{}", net_id), peers_num.unwrap_or(4));
self.networks.insert(
net_id,
ExtendedNetworkConfig {
config: NetworkConfig {
initial_peers: peers_keys.iter().map(|(_, id, _)| id).cloned().collect(),
bridge_account_id: multisig_account_id.clone(),
assets,
bridge_contract_address: Default::default(),
reserves: reserves.unwrap_or_default(),
},
ocw_keypairs: peers_keys,
},
);
self.last_network_id += 1;
net_id
}
pub fn build(self) -> (TestExternalities, State) {
let (offchain, offchain_state) = TestOffchainExt::new();
let (pool, pool_state) = TestTransactionPoolExt::new();
let authority_account_id =
bridge_multisig::Module::<Runtime>::multi_account_id(&self.root_account_id, 1, 0);
let mut bridge_accounts = Vec::new();
let mut bridge_network_configs = Vec::new();
let mut endowed_accounts: Vec<(_, AssetId32<PredefinedAssetId>, _)> = Vec::new();
let mut networks: Vec<_> = self.networks.clone().into_iter().collect();
networks.sort_by(|(x, _), (y, _)| x.cmp(y));
for (_net_id, ext_network) in networks {
bridge_network_configs.push(ext_network.config.clone());
endowed_accounts.extend(ext_network.config.assets.iter().cloned().map(
|asset_config| {
(
ext_network.config.bridge_account_id.clone(),
asset_config.asset_id().clone(),
0,
)
},
));
endowed_accounts.extend(ext_network.config.reserves.iter().cloned().map(
|(asset_id, _balance)| (ext_network.config.bridge_account_id.clone(), asset_id, 0),
));
bridge_accounts.push((
ext_network.config.bridge_account_id.clone(),
bridge_multisig::MultisigAccount::new(
ext_network
.ocw_keypairs
.iter()
.map(|x| x.1.clone())
.collect(),
Percent::from_parts(67),
),
));
}
// pallet_balances and orml_tokens no longer accept duplicate elements.
let mut unique_endowed_accounts: Vec<(_, AssetId32<PredefinedAssetId>, _)> = Vec::new();
for acc in endowed_accounts {
if let Some(unique_acc) = unique_endowed_accounts.iter_mut().find(|a| a.1 == acc.1) {
unique_acc.2 += acc.2;
} else {
unique_endowed_accounts.push(acc);
}
}
let endowed_accounts = unique_endowed_accounts;
let endowed_assets: BTreeSet<_> = endowed_accounts
.iter()
.map(|x| {
(
x.1,
self.root_account_id.clone(),
AssetSymbol(b"".to_vec()),
AssetName(b"".to_vec()),
18,
Balance::from(0u32),
true,
)
})
.collect();
let mut storage = frame_system::GenesisConfig::default()
.build_storage::<Runtime>()
.unwrap();
let mut balances: Vec<_> = endowed_accounts
.iter()
.map(|(acc, ..)| acc)
.chain(vec![&self.root_account_id, &authority_account_id])
.map(|x| (x.clone(), Balance::from(0u32)))
.collect();
balances.extend(bridge_accounts.iter().map(|(acc, _)| (acc.clone(), 0)));
for (_net_id, ext_network) in &self.networks {
balances.extend(ext_network.ocw_keypairs.iter().map(|x| (x.1.clone(), 0)));
}
balances.sort_by_key(|x| x.0.clone());
balances.dedup_by_key(|x| x.0.clone());
BalancesConfig { balances }
.assimilate_storage(&mut storage)
.unwrap();
if !endowed_accounts.is_empty() {
SudoConfig {
key: endowed_accounts[0].0.clone(),
}
.assimilate_storage(&mut storage)
.unwrap();
}
MultisigConfig {
accounts: bridge_accounts,
}
.assimilate_storage(&mut storage)
.unwrap();
PermissionsConfig {
initial_permission_owners: vec![],
initial_permissions: Vec::new(),
}
.assimilate_storage(&mut storage)
.unwrap();
TokensConfig {
endowed_accounts: endowed_accounts.clone(),
}
.assimilate_storage(&mut storage)
.unwrap();
AssetsConfig {
endowed_assets: endowed_assets.into_iter().collect(),
}
.assimilate_storage(&mut storage)
.unwrap();
EthBridgeConfig {
networks: bridge_network_configs,
authority_account: authority_account_id.clone(),
val_master_contract_address: sp_core::H160::from_str(
"47e229aa491763038f6a505b4f85d8eb463f0962",
)
.unwrap(),
xor_master_contract_address: sp_core::H160::from_str(
"12c6a709925783f49fcca0b398d13b0d597e6e1c",
)
.unwrap(),
}
.assimilate_storage(&mut storage)
.unwrap();
let mut t = TestExternalities::from(storage);
t.register_extension(OffchainExt::new(offchain));
t.register_extension(TransactionPoolExt::new(pool));
t.register_extension(KeystoreExt(Arc::new(KeyStore::new())));
t.execute_with(|| System::set_block_number(1));
let state = State {
networks: self.networks,
authority_account_id,
pool_state,
offchain_state,
};
(t, state)
}
}
pub fn get_from_seed<TPublic: Public>(seed: &str) -> <TPublic::Pair as Pair>::Public {
TPublic::Pair::from_string(&format!("//{}", seed), None)
.expect("static values are valid; qed")
.public()
}
pub type AccountPublic = <Signature as Verify>::Signer;
/// Helper function to generate an account ID from seed
pub fn get_account_id_from_seed<TPublic: Public>(seed: &str) -> SubstrateAccountId
where
AccountPublic: From<<TPublic::Pair as Pair>::Public>,
{
AccountPublic::from(get_from_seed::<TPublic>(seed)).into_account()
}
pub fn gen_peers_keys(
prefix: &str,
peers_num: usize,
) -> Vec<(AccountPublic, AccountId32, [u8; 32])> {
(0..peers_num)
.map(|i| {
let kp = ecdsa::Pair::from_string(&format!("//{}{}", prefix, i), None).unwrap();
let signer = AccountPublic::from(kp.public());
(signer.clone(), signer.into_account(), kp.seed())
})
.collect()
}
| {
Extra::pre_dispatch(extra, &who, &self.call, info, len)?;
Some(who)
} | conditional_block |
mock.rs | // This file is part of the SORA network and Polkaswap app.
// Copyright (c) 2020, 2021, Polka Biome Ltd. All rights reserved.
// SPDX-License-Identifier: BSD-4-Clause
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
// Redistributions of source code must retain the above copyright notice, this list
// of conditions and the following disclaimer.
// Redistributions in binary form must reproduce the above copyright notice, this
// list of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
//
// All advertising materials mentioning features or use of this software must display
// the following acknowledgement: This product includes software developed by Polka Biome
// Ltd., SORA, and Polkaswap.
//
// Neither the name of the Polka Biome Ltd. nor the names of its contributors may be used
// to endorse or promote products derived from this software without specific prior written permission.
// THIS SOFTWARE IS PROVIDED BY Polka Biome Ltd. AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Polka Biome Ltd. BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Creating mock Runtime here
use crate::{AssetConfig, Config, NetworkConfig};
use codec::{Codec, Decode, Encode};
use common::mock::ExistentialDeposits;
use common::prelude::Balance;
use common::{
Amount, AssetId32, AssetName, AssetSymbol, PredefinedAssetId, DEFAULT_BALANCE_PRECISION, VAL,
};
use currencies::BasicCurrencyAdapter;
use frame_support::dispatch::{DispatchInfo, GetDispatchInfo};
use frame_support::sp_io::TestExternalities;
use frame_support::sp_runtime::app_crypto::sp_core;
use frame_support::sp_runtime::app_crypto::sp_core::crypto::AccountId32;
use frame_support::sp_runtime::app_crypto::sp_core::offchain::{OffchainExt, TransactionPoolExt};
use frame_support::sp_runtime::app_crypto::sp_core::{ecdsa, sr25519, Pair, Public};
use frame_support::sp_runtime::offchain::testing::{
OffchainState, PoolState, TestOffchainExt, TestTransactionPoolExt,
};
use frame_support::sp_runtime::serde::{Serialize, Serializer};
use frame_support::sp_runtime::testing::Header;
use frame_support::sp_runtime::traits::{
self, Applyable, BlakeTwo256, Checkable, DispatchInfoOf, Dispatchable, IdentifyAccount,
IdentityLookup, PostDispatchInfoOf, SignedExtension, ValidateUnsigned, Verify,
};
use frame_support::sp_runtime::transaction_validity::{
TransactionSource, TransactionValidity, TransactionValidityError,
};
use frame_support::sp_runtime::{
self, ApplyExtrinsicResultWithInfo, MultiSignature, MultiSigner, Perbill, Percent,
};
use frame_support::traits::GenesisBuild;
use frame_support::weights::{Pays, Weight};
use frame_support::{construct_runtime, parameter_types};
use frame_system::offchain::{Account, SigningTypes};
use parking_lot::RwLock;
use sp_core::H256;
use sp_keystore::testing::KeyStore;
use sp_keystore::KeystoreExt;
use sp_std::collections::btree_set::BTreeSet;
use sp_std::fmt::Debug;
use sp_std::str::FromStr;
use sp_std::sync::Arc;
use std::collections::HashMap;
use {crate as eth_bridge, frame_system};
pub const PSWAP: PredefinedAssetId = PredefinedAssetId::PSWAP;
pub const XOR: PredefinedAssetId = PredefinedAssetId::XOR;
/// An index to a block.
pub type BlockNumber = u64;
pub type Signature = MultiSignature;
/// Some way of identifying an account on the chain. We intentionally make it equivalent
/// to the public key of our transaction signing scheme.
pub type AccountId = <<Signature as Verify>::Signer as IdentifyAccount>::AccountId;
type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic<Runtime>;
type Block = frame_system::mocking::MockBlock<Runtime>;
parameter_types! {
pub const GetBaseAssetId: AssetId32<PredefinedAssetId> = AssetId32::from_asset_id(XOR);
pub const DepositBase: u64 = 1;
pub const DepositFactor: u64 = 1;
pub const MaxSignatories: u16 = 4;
pub const UnsignedPriority: u64 = 100;
pub const EthNetworkId: <Runtime as Config>::NetworkId = 0;
}
#[derive(PartialEq, Eq, Clone, Encode, Decode, Debug)]
pub struct MyTestXt<Call, Extra> {
/// Signature of the extrinsic.
pub signature: Option<(AccountId, Extra)>,
/// Call of the extrinsic.
pub call: Call,
}
parity_util_mem::malloc_size_of_is_0!(any: MyTestXt<Call, Extra>);
impl<Call: Codec + Sync + Send, Context, Extra> Checkable<Context> for MyTestXt<Call, Extra> {
type Checked = Self;
fn check(self, _c: &Context) -> Result<Self::Checked, TransactionValidityError> {
Ok(self)
}
}
impl<Call: Codec + Sync + Send, Extra> traits::Extrinsic for MyTestXt<Call, Extra> {
type Call = Call;
type SignaturePayload = (AccountId, Extra);
fn is_signed(&self) -> Option<bool> {
Some(self.signature.is_some())
}
fn new(c: Call, sig: Option<Self::SignaturePayload>) -> Option<Self> {
Some(MyTestXt {
signature: sig,
call: c,
})
}
}
impl SignedExtension for MyExtra {
type AccountId = AccountId;
type Call = Call;
type AdditionalSigned = ();
type Pre = ();
const IDENTIFIER: &'static str = "testextension";
fn additional_signed(&self) -> Result<Self::AdditionalSigned, TransactionValidityError> {
Ok(())
}
}
impl<Origin, Call, Extra> Applyable for MyTestXt<Call, Extra>
where
Call:
'static + Sized + Send + Sync + Clone + Eq + Codec + Debug + Dispatchable<Origin = Origin>,
Extra: SignedExtension<AccountId = AccountId, Call = Call>,
Origin: From<Option<AccountId32>>,
{
type Call = Call;
/// Checks to see if this is a valid *transaction*. It returns information on it if so.
fn validate<U: ValidateUnsigned<Call = Self::Call>>(
&self,
_source: TransactionSource,
_info: &DispatchInfoOf<Self::Call>,
_len: usize,
) -> TransactionValidity {
Ok(Default::default())
}
/// Executes all necessary logic needed prior to dispatch and deconstructs into function call,
/// index and sender.
fn apply<U: ValidateUnsigned<Call = Self::Call>>(
self,
info: &DispatchInfoOf<Self::Call>,
len: usize,
) -> ApplyExtrinsicResultWithInfo<PostDispatchInfoOf<Self::Call>> {
let maybe_who = if let Some((who, extra)) = self.signature {
Extra::pre_dispatch(extra, &who, &self.call, info, len)?;
Some(who)
} else {
Extra::pre_dispatch_unsigned(&self.call, info, len)?;
None
};
Ok(self.call.dispatch(maybe_who.into()))
}
}
impl<Call, Extra> Serialize for MyTestXt<Call, Extra>
where
MyTestXt<Call, Extra>: Encode,
{
fn serialize<S>(&self, seq: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
self.using_encoded(|bytes| seq.serialize_bytes(bytes))
}
}
impl<Call: Encode, Extra: Encode> GetDispatchInfo for MyTestXt<Call, Extra> {
fn get_dispatch_info(&self) -> DispatchInfo {
// for testing: weight == size.
DispatchInfo {
weight: self.encode().len() as _,
pays_fee: Pays::No,
..Default::default()
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Encode, Decode)]
pub struct MyExtra;
pub type TestExtrinsic = MyTestXt<Call, MyExtra>;
parameter_types! {
pub const BlockHashCount: u64 = 250;
pub const MaximumBlockWeight: Weight = 1024;
pub const MaximumBlockLength: u32 = 2 * 1024;
pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75);
pub const ExistentialDeposit: u128 = 0;
}
impl frame_system::Config for Runtime {
type BaseCallFilter = ();
type BlockWeights = ();
type BlockLength = ();
type Origin = Origin;
type Call = Call;
type Index = u64;
type BlockNumber = u64;
type Hash = H256;
type Hashing = BlakeTwo256;
type AccountId = AccountId;
type Lookup = IdentityLookup<Self::AccountId>;
type Header = Header;
type Event = Event;
type BlockHashCount = BlockHashCount;
type DbWeight = ();
type Version = ();
type AccountData = pallet_balances::AccountData<Balance>;
type OnNewAccount = ();
type OnKilledAccount = ();
type SystemWeightInfo = ();
type PalletInfo = PalletInfo;
type SS58Prefix = ();
}
impl<T: SigningTypes> frame_system::offchain::SignMessage<T> for Runtime {
type SignatureData = ();
fn sign_message(&self, _message: &[u8]) -> Self::SignatureData {
unimplemented!()
}
fn sign<TPayload, F>(&self, _f: F) -> Self::SignatureData
where
F: Fn(&Account<T>) -> TPayload,
TPayload: frame_system::offchain::SignedPayload<T>,
{
unimplemented!()
}
}
impl<LocalCall> frame_system::offchain::CreateSignedTransaction<LocalCall> for Runtime
where
Call: From<LocalCall>,
{
fn create_transaction<C: frame_system::offchain::AppCrypto<Self::Public, Self::Signature>>(
call: Call,
_public: <Signature as Verify>::Signer,
account: <Runtime as frame_system::Config>::AccountId,
_index: <Runtime as frame_system::Config>::Index,
) -> Option<(
Call,
<TestExtrinsic as sp_runtime::traits::Extrinsic>::SignaturePayload,
)> {
Some((call, (account, MyExtra {})))
}
}
impl frame_system::offchain::SigningTypes for Runtime {
type Public = <Signature as Verify>::Signer;
type Signature = Signature;
}
impl<C> frame_system::offchain::SendTransactionTypes<C> for Runtime
where
Call: From<C>,
{
type OverarchingCall = Call;
type Extrinsic = TestExtrinsic;
}
impl pallet_balances::Config for Runtime {
/// The type for recording an account's balance.
type Balance = Balance;
/// The ubiquitous event type.
type Event = Event;
type DustRemoval = ();
type ExistentialDeposit = ExistentialDeposit;
type AccountStore = System;
type WeightInfo = ();
type MaxLocks = ();
}
impl tokens::Config for Runtime {
type Event = Event;
type Balance = Balance;
type Amount = Amount;
type CurrencyId = <Runtime as assets::Config>::AssetId;
type WeightInfo = ();
type ExistentialDeposits = ExistentialDeposits;
type OnDust = ();
}
impl currencies::Config for Runtime {
type Event = Event;
type MultiCurrency = Tokens;
type NativeCurrency = BasicCurrencyAdapter<Runtime, Balances, Amount, BlockNumber>;
type GetNativeCurrencyId = <Runtime as assets::Config>::GetBaseAssetId;
type WeightInfo = ();
}
impl assets::Config for Runtime {
type Event = Event;
type ExtraAccountId = [u8; 32];
type ExtraAssetRecordArg =
common::AssetIdExtraAssetRecordArg<common::DEXId, common::LiquiditySourceType, [u8; 32]>;
type AssetId = common::AssetId32<PredefinedAssetId>;
type GetBaseAssetId = GetBaseAssetId;
type Currency = currencies::Module<Runtime>;
type WeightInfo = ();
}
impl common::Config for Runtime {
type DEXId = common::DEXId;
type LstId = common::LiquiditySourceType;
}
impl permissions::Config for Runtime {
type Event = Event;
}
impl bridge_multisig::Config for Runtime {
type Call = Call;
type Event = Event;
type Currency = Balances;
type DepositBase = DepositBase;
type DepositFactor = DepositFactor;
type MaxSignatories = MaxSignatories;
type WeightInfo = ();
}
impl pallet_sudo::Config for Runtime {
type Call = Call;
type Event = Event;
}
impl crate::Config for Runtime {
type PeerId = crate::crypto::TestAuthId;
type Call = Call;
type Event = Event;
type NetworkId = u32; | impl sp_runtime::traits::ExtrinsicMetadata for TestExtrinsic {
const VERSION: u8 = 1;
type SignedExtensions = ();
}
construct_runtime!(
pub enum Runtime where
Block = Block,
NodeBlock = Block,
UncheckedExtrinsic = UncheckedExtrinsic
{
System: frame_system::{Module, Call, Config, Storage, Event<T>},
Balances: pallet_balances::{Module, Call, Storage, Config<T>, Event<T>},
Multisig: bridge_multisig::{Module, Call, Storage, Config<T>, Event<T>},
Tokens: tokens::{Module, Call, Storage, Config<T>, Event<T>},
Currencies: currencies::{Module, Call, Storage, Event<T>},
Assets: assets::{Module, Call, Storage, Config<T>, Event<T>},
Permissions: permissions::{Module, Call, Storage, Config<T>, Event<T>},
Sudo: pallet_sudo::{Module, Call, Storage, Config<T>, Event<T>},
EthBridge: eth_bridge::{Module, Call, Storage, Config<T>, Event<T>},
}
);
pub type SubstrateAccountId = <<Signature as Verify>::Signer as IdentifyAccount>::AccountId;
pub struct State {
pub networks: HashMap<u32, ExtendedNetworkConfig>,
pub authority_account_id: AccountId32,
pub pool_state: Arc<RwLock<PoolState>>,
pub offchain_state: Arc<RwLock<OffchainState>>,
}
#[derive(Clone, Debug)]
pub struct ExtendedNetworkConfig {
pub ocw_keypairs: Vec<(MultiSigner, AccountId32, [u8; 32])>,
pub config: NetworkConfig<Runtime>,
}
pub struct ExtBuilder {
pub networks: HashMap<u32, ExtendedNetworkConfig>,
last_network_id: u32,
root_account_id: AccountId32,
}
impl Default for ExtBuilder {
fn default() -> Self {
let mut builder = Self {
networks: Default::default(),
last_network_id: Default::default(),
root_account_id: get_account_id_from_seed::<sr25519::Public>("Alice"),
};
builder.add_network(
vec![
AssetConfig::Thischain { id: PSWAP.into() },
AssetConfig::Sidechain {
id: XOR.into(),
sidechain_id: sp_core::H160::from_str(
"40fd72257597aa14c7231a7b1aaa29fce868f677",
)
.unwrap(),
owned: true,
precision: DEFAULT_BALANCE_PRECISION,
},
AssetConfig::Sidechain {
id: VAL.into(),
sidechain_id: sp_core::H160::from_str(
"3f9feac97e5feb15d8bf98042a9a01b515da3dfb",
)
.unwrap(),
owned: true,
precision: DEFAULT_BALANCE_PRECISION,
},
],
Some(vec![
(XOR.into(), common::balance!(350000)),
(VAL.into(), common::balance!(33900000)),
]),
Some(4),
);
builder
}
}
impl ExtBuilder {
pub fn new() -> Self {
Self {
networks: Default::default(),
last_network_id: Default::default(),
root_account_id: get_account_id_from_seed::<sr25519::Public>("Alice"),
}
}
pub fn add_currency(
&mut self,
network_id: u32,
currency: AssetConfig<AssetId32<PredefinedAssetId>>,
) {
self.networks
.get_mut(&network_id)
.unwrap()
.config
.assets
.push(currency);
}
pub fn add_network(
&mut self,
assets: Vec<AssetConfig<AssetId32<PredefinedAssetId>>>,
reserves: Option<Vec<(AssetId32<PredefinedAssetId>, Balance)>>,
peers_num: Option<usize>,
) -> u32 {
let net_id = self.last_network_id;
let multisig_account_id = bridge_multisig::Module::<Runtime>::multi_account_id(
&self.root_account_id,
1,
net_id as u64 + 10,
);
let peers_keys = gen_peers_keys(&format!("OCW{}", net_id), peers_num.unwrap_or(4));
self.networks.insert(
net_id,
ExtendedNetworkConfig {
config: NetworkConfig {
initial_peers: peers_keys.iter().map(|(_, id, _)| id).cloned().collect(),
bridge_account_id: multisig_account_id.clone(),
assets,
bridge_contract_address: Default::default(),
reserves: reserves.unwrap_or_default(),
},
ocw_keypairs: peers_keys,
},
);
self.last_network_id += 1;
net_id
}
pub fn build(self) -> (TestExternalities, State) {
let (offchain, offchain_state) = TestOffchainExt::new();
let (pool, pool_state) = TestTransactionPoolExt::new();
let authority_account_id =
bridge_multisig::Module::<Runtime>::multi_account_id(&self.root_account_id, 1, 0);
let mut bridge_accounts = Vec::new();
let mut bridge_network_configs = Vec::new();
let mut endowed_accounts: Vec<(_, AssetId32<PredefinedAssetId>, _)> = Vec::new();
let mut networks: Vec<_> = self.networks.clone().into_iter().collect();
networks.sort_by(|(x, _), (y, _)| x.cmp(y));
for (_net_id, ext_network) in networks {
bridge_network_configs.push(ext_network.config.clone());
endowed_accounts.extend(ext_network.config.assets.iter().cloned().map(
|asset_config| {
(
ext_network.config.bridge_account_id.clone(),
asset_config.asset_id().clone(),
0,
)
},
));
endowed_accounts.extend(ext_network.config.reserves.iter().cloned().map(
|(asset_id, _balance)| (ext_network.config.bridge_account_id.clone(), asset_id, 0),
));
bridge_accounts.push((
ext_network.config.bridge_account_id.clone(),
bridge_multisig::MultisigAccount::new(
ext_network
.ocw_keypairs
.iter()
.map(|x| x.1.clone())
.collect(),
Percent::from_parts(67),
),
));
}
// pallet_balances and orml_tokens no longer accept duplicate elements.
let mut unique_endowed_accounts: Vec<(_, AssetId32<PredefinedAssetId>, _)> = Vec::new();
for acc in endowed_accounts {
if let Some(unique_acc) = unique_endowed_accounts.iter_mut().find(|a| a.1 == acc.1) {
unique_acc.2 += acc.2;
} else {
unique_endowed_accounts.push(acc);
}
}
let endowed_accounts = unique_endowed_accounts;
let endowed_assets: BTreeSet<_> = endowed_accounts
.iter()
.map(|x| {
(
x.1,
self.root_account_id.clone(),
AssetSymbol(b"".to_vec()),
AssetName(b"".to_vec()),
18,
Balance::from(0u32),
true,
)
})
.collect();
let mut storage = frame_system::GenesisConfig::default()
.build_storage::<Runtime>()
.unwrap();
let mut balances: Vec<_> = endowed_accounts
.iter()
.map(|(acc, ..)| acc)
.chain(vec![&self.root_account_id, &authority_account_id])
.map(|x| (x.clone(), Balance::from(0u32)))
.collect();
balances.extend(bridge_accounts.iter().map(|(acc, _)| (acc.clone(), 0)));
for (_net_id, ext_network) in &self.networks {
balances.extend(ext_network.ocw_keypairs.iter().map(|x| (x.1.clone(), 0)));
}
balances.sort_by_key(|x| x.0.clone());
balances.dedup_by_key(|x| x.0.clone());
BalancesConfig { balances }
.assimilate_storage(&mut storage)
.unwrap();
if !endowed_accounts.is_empty() {
SudoConfig {
key: endowed_accounts[0].0.clone(),
}
.assimilate_storage(&mut storage)
.unwrap();
}
MultisigConfig {
accounts: bridge_accounts,
}
.assimilate_storage(&mut storage)
.unwrap();
PermissionsConfig {
initial_permission_owners: vec![],
initial_permissions: Vec::new(),
}
.assimilate_storage(&mut storage)
.unwrap();
TokensConfig {
endowed_accounts: endowed_accounts.clone(),
}
.assimilate_storage(&mut storage)
.unwrap();
AssetsConfig {
endowed_assets: endowed_assets.into_iter().collect(),
}
.assimilate_storage(&mut storage)
.unwrap();
EthBridgeConfig {
networks: bridge_network_configs,
authority_account: authority_account_id.clone(),
val_master_contract_address: sp_core::H160::from_str(
"47e229aa491763038f6a505b4f85d8eb463f0962",
)
.unwrap(),
xor_master_contract_address: sp_core::H160::from_str(
"12c6a709925783f49fcca0b398d13b0d597e6e1c",
)
.unwrap(),
}
.assimilate_storage(&mut storage)
.unwrap();
let mut t = TestExternalities::from(storage);
t.register_extension(OffchainExt::new(offchain));
t.register_extension(TransactionPoolExt::new(pool));
t.register_extension(KeystoreExt(Arc::new(KeyStore::new())));
t.execute_with(|| System::set_block_number(1));
let state = State {
networks: self.networks,
authority_account_id,
pool_state,
offchain_state,
};
(t, state)
}
}
pub fn get_from_seed<TPublic: Public>(seed: &str) -> <TPublic::Pair as Pair>::Public {
TPublic::Pair::from_string(&format!("//{}", seed), None)
.expect("static values are valid; qed")
.public()
}
pub type AccountPublic = <Signature as Verify>::Signer;
/// Helper function to generate an account ID from seed
pub fn get_account_id_from_seed<TPublic: Public>(seed: &str) -> SubstrateAccountId
where
AccountPublic: From<<TPublic::Pair as Pair>::Public>,
{
AccountPublic::from(get_from_seed::<TPublic>(seed)).into_account()
}
pub fn gen_peers_keys(
prefix: &str,
peers_num: usize,
) -> Vec<(AccountPublic, AccountId32, [u8; 32])> {
(0..peers_num)
.map(|i| {
let kp = ecdsa::Pair::from_string(&format!("//{}{}", prefix, i), None).unwrap();
let signer = AccountPublic::from(kp.public());
(signer.clone(), signer.into_account(), kp.seed())
})
.collect()
} | type GetEthNetworkId = EthNetworkId;
type WeightInfo = ();
}
| random_line_split |
mock.rs | // This file is part of the SORA network and Polkaswap app.
// Copyright (c) 2020, 2021, Polka Biome Ltd. All rights reserved.
// SPDX-License-Identifier: BSD-4-Clause
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
// Redistributions of source code must retain the above copyright notice, this list
// of conditions and the following disclaimer.
// Redistributions in binary form must reproduce the above copyright notice, this
// list of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
//
// All advertising materials mentioning features or use of this software must display
// the following acknowledgement: This product includes software developed by Polka Biome
// Ltd., SORA, and Polkaswap.
//
// Neither the name of the Polka Biome Ltd. nor the names of its contributors may be used
// to endorse or promote products derived from this software without specific prior written permission.
// THIS SOFTWARE IS PROVIDED BY Polka Biome Ltd. AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Polka Biome Ltd. BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Creating mock Runtime here
use crate::{AssetConfig, Config, NetworkConfig};
use codec::{Codec, Decode, Encode};
use common::mock::ExistentialDeposits;
use common::prelude::Balance;
use common::{
Amount, AssetId32, AssetName, AssetSymbol, PredefinedAssetId, DEFAULT_BALANCE_PRECISION, VAL,
};
use currencies::BasicCurrencyAdapter;
use frame_support::dispatch::{DispatchInfo, GetDispatchInfo};
use frame_support::sp_io::TestExternalities;
use frame_support::sp_runtime::app_crypto::sp_core;
use frame_support::sp_runtime::app_crypto::sp_core::crypto::AccountId32;
use frame_support::sp_runtime::app_crypto::sp_core::offchain::{OffchainExt, TransactionPoolExt};
use frame_support::sp_runtime::app_crypto::sp_core::{ecdsa, sr25519, Pair, Public};
use frame_support::sp_runtime::offchain::testing::{
OffchainState, PoolState, TestOffchainExt, TestTransactionPoolExt,
};
use frame_support::sp_runtime::serde::{Serialize, Serializer};
use frame_support::sp_runtime::testing::Header;
use frame_support::sp_runtime::traits::{
self, Applyable, BlakeTwo256, Checkable, DispatchInfoOf, Dispatchable, IdentifyAccount,
IdentityLookup, PostDispatchInfoOf, SignedExtension, ValidateUnsigned, Verify,
};
use frame_support::sp_runtime::transaction_validity::{
TransactionSource, TransactionValidity, TransactionValidityError,
};
use frame_support::sp_runtime::{
self, ApplyExtrinsicResultWithInfo, MultiSignature, MultiSigner, Perbill, Percent,
};
use frame_support::traits::GenesisBuild;
use frame_support::weights::{Pays, Weight};
use frame_support::{construct_runtime, parameter_types};
use frame_system::offchain::{Account, SigningTypes};
use parking_lot::RwLock;
use sp_core::H256;
use sp_keystore::testing::KeyStore;
use sp_keystore::KeystoreExt;
use sp_std::collections::btree_set::BTreeSet;
use sp_std::fmt::Debug;
use sp_std::str::FromStr;
use sp_std::sync::Arc;
use std::collections::HashMap;
use {crate as eth_bridge, frame_system};
pub const PSWAP: PredefinedAssetId = PredefinedAssetId::PSWAP;
pub const XOR: PredefinedAssetId = PredefinedAssetId::XOR;
/// An index to a block.
pub type BlockNumber = u64;
pub type Signature = MultiSignature;
/// Some way of identifying an account on the chain. We intentionally make it equivalent
/// to the public key of our transaction signing scheme.
pub type AccountId = <<Signature as Verify>::Signer as IdentifyAccount>::AccountId;
type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic<Runtime>;
type Block = frame_system::mocking::MockBlock<Runtime>;
parameter_types! {
pub const GetBaseAssetId: AssetId32<PredefinedAssetId> = AssetId32::from_asset_id(XOR);
pub const DepositBase: u64 = 1;
pub const DepositFactor: u64 = 1;
pub const MaxSignatories: u16 = 4;
pub const UnsignedPriority: u64 = 100;
pub const EthNetworkId: <Runtime as Config>::NetworkId = 0;
}
#[derive(PartialEq, Eq, Clone, Encode, Decode, Debug)]
pub struct MyTestXt<Call, Extra> {
/// Signature of the extrinsic.
pub signature: Option<(AccountId, Extra)>,
/// Call of the extrinsic.
pub call: Call,
}
parity_util_mem::malloc_size_of_is_0!(any: MyTestXt<Call, Extra>);
impl<Call: Codec + Sync + Send, Context, Extra> Checkable<Context> for MyTestXt<Call, Extra> {
type Checked = Self;
fn check(self, _c: &Context) -> Result<Self::Checked, TransactionValidityError> {
Ok(self)
}
}
impl<Call: Codec + Sync + Send, Extra> traits::Extrinsic for MyTestXt<Call, Extra> {
type Call = Call;
type SignaturePayload = (AccountId, Extra);
fn is_signed(&self) -> Option<bool> {
Some(self.signature.is_some())
}
fn new(c: Call, sig: Option<Self::SignaturePayload>) -> Option<Self> {
Some(MyTestXt {
signature: sig,
call: c,
})
}
}
impl SignedExtension for MyExtra {
type AccountId = AccountId;
type Call = Call;
type AdditionalSigned = ();
type Pre = ();
const IDENTIFIER: &'static str = "testextension";
fn additional_signed(&self) -> Result<Self::AdditionalSigned, TransactionValidityError> {
Ok(())
}
}
impl<Origin, Call, Extra> Applyable for MyTestXt<Call, Extra>
where
Call:
'static + Sized + Send + Sync + Clone + Eq + Codec + Debug + Dispatchable<Origin = Origin>,
Extra: SignedExtension<AccountId = AccountId, Call = Call>,
Origin: From<Option<AccountId32>>,
{
type Call = Call;
/// Checks to see if this is a valid *transaction*. It returns information on it if so.
fn validate<U: ValidateUnsigned<Call = Self::Call>>(
&self,
_source: TransactionSource,
_info: &DispatchInfoOf<Self::Call>,
_len: usize,
) -> TransactionValidity {
Ok(Default::default())
}
/// Executes all necessary logic needed prior to dispatch and deconstructs into function call,
/// index and sender.
fn apply<U: ValidateUnsigned<Call = Self::Call>>(
self,
info: &DispatchInfoOf<Self::Call>,
len: usize,
) -> ApplyExtrinsicResultWithInfo<PostDispatchInfoOf<Self::Call>> {
let maybe_who = if let Some((who, extra)) = self.signature {
Extra::pre_dispatch(extra, &who, &self.call, info, len)?;
Some(who)
} else {
Extra::pre_dispatch_unsigned(&self.call, info, len)?;
None
};
Ok(self.call.dispatch(maybe_who.into()))
}
}
impl<Call, Extra> Serialize for MyTestXt<Call, Extra>
where
MyTestXt<Call, Extra>: Encode,
{
fn serialize<S>(&self, seq: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
self.using_encoded(|bytes| seq.serialize_bytes(bytes))
}
}
impl<Call: Encode, Extra: Encode> GetDispatchInfo for MyTestXt<Call, Extra> {
fn get_dispatch_info(&self) -> DispatchInfo {
// for testing: weight == size.
DispatchInfo {
weight: self.encode().len() as _,
pays_fee: Pays::No,
..Default::default()
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Encode, Decode)]
pub struct | ;
pub type TestExtrinsic = MyTestXt<Call, MyExtra>;
parameter_types! {
pub const BlockHashCount: u64 = 250;
pub const MaximumBlockWeight: Weight = 1024;
pub const MaximumBlockLength: u32 = 2 * 1024;
pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75);
pub const ExistentialDeposit: u128 = 0;
}
impl frame_system::Config for Runtime {
type BaseCallFilter = ();
type BlockWeights = ();
type BlockLength = ();
type Origin = Origin;
type Call = Call;
type Index = u64;
type BlockNumber = u64;
type Hash = H256;
type Hashing = BlakeTwo256;
type AccountId = AccountId;
type Lookup = IdentityLookup<Self::AccountId>;
type Header = Header;
type Event = Event;
type BlockHashCount = BlockHashCount;
type DbWeight = ();
type Version = ();
type AccountData = pallet_balances::AccountData<Balance>;
type OnNewAccount = ();
type OnKilledAccount = ();
type SystemWeightInfo = ();
type PalletInfo = PalletInfo;
type SS58Prefix = ();
}
impl<T: SigningTypes> frame_system::offchain::SignMessage<T> for Runtime {
type SignatureData = ();
fn sign_message(&self, _message: &[u8]) -> Self::SignatureData {
unimplemented!()
}
fn sign<TPayload, F>(&self, _f: F) -> Self::SignatureData
where
F: Fn(&Account<T>) -> TPayload,
TPayload: frame_system::offchain::SignedPayload<T>,
{
unimplemented!()
}
}
impl<LocalCall> frame_system::offchain::CreateSignedTransaction<LocalCall> for Runtime
where
Call: From<LocalCall>,
{
fn create_transaction<C: frame_system::offchain::AppCrypto<Self::Public, Self::Signature>>(
call: Call,
_public: <Signature as Verify>::Signer,
account: <Runtime as frame_system::Config>::AccountId,
_index: <Runtime as frame_system::Config>::Index,
) -> Option<(
Call,
<TestExtrinsic as sp_runtime::traits::Extrinsic>::SignaturePayload,
)> {
Some((call, (account, MyExtra {})))
}
}
impl frame_system::offchain::SigningTypes for Runtime {
type Public = <Signature as Verify>::Signer;
type Signature = Signature;
}
impl<C> frame_system::offchain::SendTransactionTypes<C> for Runtime
where
Call: From<C>,
{
type OverarchingCall = Call;
type Extrinsic = TestExtrinsic;
}
impl pallet_balances::Config for Runtime {
/// The type for recording an account's balance.
type Balance = Balance;
/// The ubiquitous event type.
type Event = Event;
type DustRemoval = ();
type ExistentialDeposit = ExistentialDeposit;
type AccountStore = System;
type WeightInfo = ();
type MaxLocks = ();
}
impl tokens::Config for Runtime {
type Event = Event;
type Balance = Balance;
type Amount = Amount;
type CurrencyId = <Runtime as assets::Config>::AssetId;
type WeightInfo = ();
type ExistentialDeposits = ExistentialDeposits;
type OnDust = ();
}
impl currencies::Config for Runtime {
type Event = Event;
type MultiCurrency = Tokens;
type NativeCurrency = BasicCurrencyAdapter<Runtime, Balances, Amount, BlockNumber>;
type GetNativeCurrencyId = <Runtime as assets::Config>::GetBaseAssetId;
type WeightInfo = ();
}
impl assets::Config for Runtime {
type Event = Event;
type ExtraAccountId = [u8; 32];
type ExtraAssetRecordArg =
common::AssetIdExtraAssetRecordArg<common::DEXId, common::LiquiditySourceType, [u8; 32]>;
type AssetId = common::AssetId32<PredefinedAssetId>;
type GetBaseAssetId = GetBaseAssetId;
type Currency = currencies::Module<Runtime>;
type WeightInfo = ();
}
impl common::Config for Runtime {
type DEXId = common::DEXId;
type LstId = common::LiquiditySourceType;
}
impl permissions::Config for Runtime {
type Event = Event;
}
impl bridge_multisig::Config for Runtime {
type Call = Call;
type Event = Event;
type Currency = Balances;
type DepositBase = DepositBase;
type DepositFactor = DepositFactor;
type MaxSignatories = MaxSignatories;
type WeightInfo = ();
}
impl pallet_sudo::Config for Runtime {
type Call = Call;
type Event = Event;
}
impl crate::Config for Runtime {
type PeerId = crate::crypto::TestAuthId;
type Call = Call;
type Event = Event;
type NetworkId = u32;
type GetEthNetworkId = EthNetworkId;
type WeightInfo = ();
}
impl sp_runtime::traits::ExtrinsicMetadata for TestExtrinsic {
const VERSION: u8 = 1;
type SignedExtensions = ();
}
construct_runtime!(
pub enum Runtime where
Block = Block,
NodeBlock = Block,
UncheckedExtrinsic = UncheckedExtrinsic
{
System: frame_system::{Module, Call, Config, Storage, Event<T>},
Balances: pallet_balances::{Module, Call, Storage, Config<T>, Event<T>},
Multisig: bridge_multisig::{Module, Call, Storage, Config<T>, Event<T>},
Tokens: tokens::{Module, Call, Storage, Config<T>, Event<T>},
Currencies: currencies::{Module, Call, Storage, Event<T>},
Assets: assets::{Module, Call, Storage, Config<T>, Event<T>},
Permissions: permissions::{Module, Call, Storage, Config<T>, Event<T>},
Sudo: pallet_sudo::{Module, Call, Storage, Config<T>, Event<T>},
EthBridge: eth_bridge::{Module, Call, Storage, Config<T>, Event<T>},
}
);
pub type SubstrateAccountId = <<Signature as Verify>::Signer as IdentifyAccount>::AccountId;
pub struct State {
pub networks: HashMap<u32, ExtendedNetworkConfig>,
pub authority_account_id: AccountId32,
pub pool_state: Arc<RwLock<PoolState>>,
pub offchain_state: Arc<RwLock<OffchainState>>,
}
#[derive(Clone, Debug)]
pub struct ExtendedNetworkConfig {
pub ocw_keypairs: Vec<(MultiSigner, AccountId32, [u8; 32])>,
pub config: NetworkConfig<Runtime>,
}
pub struct ExtBuilder {
pub networks: HashMap<u32, ExtendedNetworkConfig>,
last_network_id: u32,
root_account_id: AccountId32,
}
impl Default for ExtBuilder {
fn default() -> Self {
let mut builder = Self {
networks: Default::default(),
last_network_id: Default::default(),
root_account_id: get_account_id_from_seed::<sr25519::Public>("Alice"),
};
builder.add_network(
vec![
AssetConfig::Thischain { id: PSWAP.into() },
AssetConfig::Sidechain {
id: XOR.into(),
sidechain_id: sp_core::H160::from_str(
"40fd72257597aa14c7231a7b1aaa29fce868f677",
)
.unwrap(),
owned: true,
precision: DEFAULT_BALANCE_PRECISION,
},
AssetConfig::Sidechain {
id: VAL.into(),
sidechain_id: sp_core::H160::from_str(
"3f9feac97e5feb15d8bf98042a9a01b515da3dfb",
)
.unwrap(),
owned: true,
precision: DEFAULT_BALANCE_PRECISION,
},
],
Some(vec![
(XOR.into(), common::balance!(350000)),
(VAL.into(), common::balance!(33900000)),
]),
Some(4),
);
builder
}
}
impl ExtBuilder {
pub fn new() -> Self {
Self {
networks: Default::default(),
last_network_id: Default::default(),
root_account_id: get_account_id_from_seed::<sr25519::Public>("Alice"),
}
}
pub fn add_currency(
&mut self,
network_id: u32,
currency: AssetConfig<AssetId32<PredefinedAssetId>>,
) {
self.networks
.get_mut(&network_id)
.unwrap()
.config
.assets
.push(currency);
}
pub fn add_network(
&mut self,
assets: Vec<AssetConfig<AssetId32<PredefinedAssetId>>>,
reserves: Option<Vec<(AssetId32<PredefinedAssetId>, Balance)>>,
peers_num: Option<usize>,
) -> u32 {
let net_id = self.last_network_id;
let multisig_account_id = bridge_multisig::Module::<Runtime>::multi_account_id(
&self.root_account_id,
1,
net_id as u64 + 10,
);
let peers_keys = gen_peers_keys(&format!("OCW{}", net_id), peers_num.unwrap_or(4));
self.networks.insert(
net_id,
ExtendedNetworkConfig {
config: NetworkConfig {
initial_peers: peers_keys.iter().map(|(_, id, _)| id).cloned().collect(),
bridge_account_id: multisig_account_id.clone(),
assets,
bridge_contract_address: Default::default(),
reserves: reserves.unwrap_or_default(),
},
ocw_keypairs: peers_keys,
},
);
self.last_network_id += 1;
net_id
}
pub fn build(self) -> (TestExternalities, State) {
let (offchain, offchain_state) = TestOffchainExt::new();
let (pool, pool_state) = TestTransactionPoolExt::new();
let authority_account_id =
bridge_multisig::Module::<Runtime>::multi_account_id(&self.root_account_id, 1, 0);
let mut bridge_accounts = Vec::new();
let mut bridge_network_configs = Vec::new();
let mut endowed_accounts: Vec<(_, AssetId32<PredefinedAssetId>, _)> = Vec::new();
let mut networks: Vec<_> = self.networks.clone().into_iter().collect();
networks.sort_by(|(x, _), (y, _)| x.cmp(y));
for (_net_id, ext_network) in networks {
bridge_network_configs.push(ext_network.config.clone());
endowed_accounts.extend(ext_network.config.assets.iter().cloned().map(
|asset_config| {
(
ext_network.config.bridge_account_id.clone(),
asset_config.asset_id().clone(),
0,
)
},
));
endowed_accounts.extend(ext_network.config.reserves.iter().cloned().map(
|(asset_id, _balance)| (ext_network.config.bridge_account_id.clone(), asset_id, 0),
));
bridge_accounts.push((
ext_network.config.bridge_account_id.clone(),
bridge_multisig::MultisigAccount::new(
ext_network
.ocw_keypairs
.iter()
.map(|x| x.1.clone())
.collect(),
Percent::from_parts(67),
),
));
}
// pallet_balances and orml_tokens no longer accept duplicate elements.
let mut unique_endowed_accounts: Vec<(_, AssetId32<PredefinedAssetId>, _)> = Vec::new();
for acc in endowed_accounts {
if let Some(unique_acc) = unique_endowed_accounts.iter_mut().find(|a| a.1 == acc.1) {
unique_acc.2 += acc.2;
} else {
unique_endowed_accounts.push(acc);
}
}
let endowed_accounts = unique_endowed_accounts;
let endowed_assets: BTreeSet<_> = endowed_accounts
.iter()
.map(|x| {
(
x.1,
self.root_account_id.clone(),
AssetSymbol(b"".to_vec()),
AssetName(b"".to_vec()),
18,
Balance::from(0u32),
true,
)
})
.collect();
let mut storage = frame_system::GenesisConfig::default()
.build_storage::<Runtime>()
.unwrap();
let mut balances: Vec<_> = endowed_accounts
.iter()
.map(|(acc, ..)| acc)
.chain(vec![&self.root_account_id, &authority_account_id])
.map(|x| (x.clone(), Balance::from(0u32)))
.collect();
balances.extend(bridge_accounts.iter().map(|(acc, _)| (acc.clone(), 0)));
for (_net_id, ext_network) in &self.networks {
balances.extend(ext_network.ocw_keypairs.iter().map(|x| (x.1.clone(), 0)));
}
balances.sort_by_key(|x| x.0.clone());
balances.dedup_by_key(|x| x.0.clone());
BalancesConfig { balances }
.assimilate_storage(&mut storage)
.unwrap();
if !endowed_accounts.is_empty() {
SudoConfig {
key: endowed_accounts[0].0.clone(),
}
.assimilate_storage(&mut storage)
.unwrap();
}
MultisigConfig {
accounts: bridge_accounts,
}
.assimilate_storage(&mut storage)
.unwrap();
PermissionsConfig {
initial_permission_owners: vec![],
initial_permissions: Vec::new(),
}
.assimilate_storage(&mut storage)
.unwrap();
TokensConfig {
endowed_accounts: endowed_accounts.clone(),
}
.assimilate_storage(&mut storage)
.unwrap();
AssetsConfig {
endowed_assets: endowed_assets.into_iter().collect(),
}
.assimilate_storage(&mut storage)
.unwrap();
EthBridgeConfig {
networks: bridge_network_configs,
authority_account: authority_account_id.clone(),
val_master_contract_address: sp_core::H160::from_str(
"47e229aa491763038f6a505b4f85d8eb463f0962",
)
.unwrap(),
xor_master_contract_address: sp_core::H160::from_str(
"12c6a709925783f49fcca0b398d13b0d597e6e1c",
)
.unwrap(),
}
.assimilate_storage(&mut storage)
.unwrap();
let mut t = TestExternalities::from(storage);
t.register_extension(OffchainExt::new(offchain));
t.register_extension(TransactionPoolExt::new(pool));
t.register_extension(KeystoreExt(Arc::new(KeyStore::new())));
t.execute_with(|| System::set_block_number(1));
let state = State {
networks: self.networks,
authority_account_id,
pool_state,
offchain_state,
};
(t, state)
}
}
pub fn get_from_seed<TPublic: Public>(seed: &str) -> <TPublic::Pair as Pair>::Public {
TPublic::Pair::from_string(&format!("//{}", seed), None)
.expect("static values are valid; qed")
.public()
}
pub type AccountPublic = <Signature as Verify>::Signer;
/// Helper function to generate an account ID from seed
pub fn get_account_id_from_seed<TPublic: Public>(seed: &str) -> SubstrateAccountId
where
AccountPublic: From<<TPublic::Pair as Pair>::Public>,
{
AccountPublic::from(get_from_seed::<TPublic>(seed)).into_account()
}
pub fn gen_peers_keys(
prefix: &str,
peers_num: usize,
) -> Vec<(AccountPublic, AccountId32, [u8; 32])> {
(0..peers_num)
.map(|i| {
let kp = ecdsa::Pair::from_string(&format!("//{}{}", prefix, i), None).unwrap();
let signer = AccountPublic::from(kp.public());
(signer.clone(), signer.into_account(), kp.seed())
})
.collect()
}
| MyExtra | identifier_name |
mock.rs | // This file is part of the SORA network and Polkaswap app.
// Copyright (c) 2020, 2021, Polka Biome Ltd. All rights reserved.
// SPDX-License-Identifier: BSD-4-Clause
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
// Redistributions of source code must retain the above copyright notice, this list
// of conditions and the following disclaimer.
// Redistributions in binary form must reproduce the above copyright notice, this
// list of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
//
// All advertising materials mentioning features or use of this software must display
// the following acknowledgement: This product includes software developed by Polka Biome
// Ltd., SORA, and Polkaswap.
//
// Neither the name of the Polka Biome Ltd. nor the names of its contributors may be used
// to endorse or promote products derived from this software without specific prior written permission.
// THIS SOFTWARE IS PROVIDED BY Polka Biome Ltd. AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Polka Biome Ltd. BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Creating mock Runtime here
use crate::{AssetConfig, Config, NetworkConfig};
use codec::{Codec, Decode, Encode};
use common::mock::ExistentialDeposits;
use common::prelude::Balance;
use common::{
Amount, AssetId32, AssetName, AssetSymbol, PredefinedAssetId, DEFAULT_BALANCE_PRECISION, VAL,
};
use currencies::BasicCurrencyAdapter;
use frame_support::dispatch::{DispatchInfo, GetDispatchInfo};
use frame_support::sp_io::TestExternalities;
use frame_support::sp_runtime::app_crypto::sp_core;
use frame_support::sp_runtime::app_crypto::sp_core::crypto::AccountId32;
use frame_support::sp_runtime::app_crypto::sp_core::offchain::{OffchainExt, TransactionPoolExt};
use frame_support::sp_runtime::app_crypto::sp_core::{ecdsa, sr25519, Pair, Public};
use frame_support::sp_runtime::offchain::testing::{
OffchainState, PoolState, TestOffchainExt, TestTransactionPoolExt,
};
use frame_support::sp_runtime::serde::{Serialize, Serializer};
use frame_support::sp_runtime::testing::Header;
use frame_support::sp_runtime::traits::{
self, Applyable, BlakeTwo256, Checkable, DispatchInfoOf, Dispatchable, IdentifyAccount,
IdentityLookup, PostDispatchInfoOf, SignedExtension, ValidateUnsigned, Verify,
};
use frame_support::sp_runtime::transaction_validity::{
TransactionSource, TransactionValidity, TransactionValidityError,
};
use frame_support::sp_runtime::{
self, ApplyExtrinsicResultWithInfo, MultiSignature, MultiSigner, Perbill, Percent,
};
use frame_support::traits::GenesisBuild;
use frame_support::weights::{Pays, Weight};
use frame_support::{construct_runtime, parameter_types};
use frame_system::offchain::{Account, SigningTypes};
use parking_lot::RwLock;
use sp_core::H256;
use sp_keystore::testing::KeyStore;
use sp_keystore::KeystoreExt;
use sp_std::collections::btree_set::BTreeSet;
use sp_std::fmt::Debug;
use sp_std::str::FromStr;
use sp_std::sync::Arc;
use std::collections::HashMap;
use {crate as eth_bridge, frame_system};
pub const PSWAP: PredefinedAssetId = PredefinedAssetId::PSWAP;
pub const XOR: PredefinedAssetId = PredefinedAssetId::XOR;
/// An index to a block.
pub type BlockNumber = u64;
pub type Signature = MultiSignature;
/// Some way of identifying an account on the chain. We intentionally make it equivalent
/// to the public key of our transaction signing scheme.
pub type AccountId = <<Signature as Verify>::Signer as IdentifyAccount>::AccountId;
type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic<Runtime>;
type Block = frame_system::mocking::MockBlock<Runtime>;
parameter_types! {
pub const GetBaseAssetId: AssetId32<PredefinedAssetId> = AssetId32::from_asset_id(XOR);
pub const DepositBase: u64 = 1;
pub const DepositFactor: u64 = 1;
pub const MaxSignatories: u16 = 4;
pub const UnsignedPriority: u64 = 100;
pub const EthNetworkId: <Runtime as Config>::NetworkId = 0;
}
#[derive(PartialEq, Eq, Clone, Encode, Decode, Debug)]
pub struct MyTestXt<Call, Extra> {
/// Signature of the extrinsic.
pub signature: Option<(AccountId, Extra)>,
/// Call of the extrinsic.
pub call: Call,
}
parity_util_mem::malloc_size_of_is_0!(any: MyTestXt<Call, Extra>);
impl<Call: Codec + Sync + Send, Context, Extra> Checkable<Context> for MyTestXt<Call, Extra> {
type Checked = Self;
fn check(self, _c: &Context) -> Result<Self::Checked, TransactionValidityError> {
Ok(self)
}
}
impl<Call: Codec + Sync + Send, Extra> traits::Extrinsic for MyTestXt<Call, Extra> {
type Call = Call;
type SignaturePayload = (AccountId, Extra);
fn is_signed(&self) -> Option<bool> {
Some(self.signature.is_some())
}
fn new(c: Call, sig: Option<Self::SignaturePayload>) -> Option<Self> {
Some(MyTestXt {
signature: sig,
call: c,
})
}
}
impl SignedExtension for MyExtra {
type AccountId = AccountId;
type Call = Call;
type AdditionalSigned = ();
type Pre = ();
const IDENTIFIER: &'static str = "testextension";
fn additional_signed(&self) -> Result<Self::AdditionalSigned, TransactionValidityError> {
Ok(())
}
}
impl<Origin, Call, Extra> Applyable for MyTestXt<Call, Extra>
where
Call:
'static + Sized + Send + Sync + Clone + Eq + Codec + Debug + Dispatchable<Origin = Origin>,
Extra: SignedExtension<AccountId = AccountId, Call = Call>,
Origin: From<Option<AccountId32>>,
{
type Call = Call;
/// Checks to see if this is a valid *transaction*. It returns information on it if so.
fn validate<U: ValidateUnsigned<Call = Self::Call>>(
&self,
_source: TransactionSource,
_info: &DispatchInfoOf<Self::Call>,
_len: usize,
) -> TransactionValidity {
Ok(Default::default())
}
/// Executes all necessary logic needed prior to dispatch and deconstructs into function call,
/// index and sender.
fn apply<U: ValidateUnsigned<Call = Self::Call>>(
self,
info: &DispatchInfoOf<Self::Call>,
len: usize,
) -> ApplyExtrinsicResultWithInfo<PostDispatchInfoOf<Self::Call>> {
let maybe_who = if let Some((who, extra)) = self.signature {
Extra::pre_dispatch(extra, &who, &self.call, info, len)?;
Some(who)
} else {
Extra::pre_dispatch_unsigned(&self.call, info, len)?;
None
};
Ok(self.call.dispatch(maybe_who.into()))
}
}
impl<Call, Extra> Serialize for MyTestXt<Call, Extra>
where
MyTestXt<Call, Extra>: Encode,
{
fn serialize<S>(&self, seq: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
self.using_encoded(|bytes| seq.serialize_bytes(bytes))
}
}
impl<Call: Encode, Extra: Encode> GetDispatchInfo for MyTestXt<Call, Extra> {
fn get_dispatch_info(&self) -> DispatchInfo {
// for testing: weight == size.
DispatchInfo {
weight: self.encode().len() as _,
pays_fee: Pays::No,
..Default::default()
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Encode, Decode)]
pub struct MyExtra;
pub type TestExtrinsic = MyTestXt<Call, MyExtra>;
parameter_types! {
pub const BlockHashCount: u64 = 250;
pub const MaximumBlockWeight: Weight = 1024;
pub const MaximumBlockLength: u32 = 2 * 1024;
pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75);
pub const ExistentialDeposit: u128 = 0;
}
impl frame_system::Config for Runtime {
type BaseCallFilter = ();
type BlockWeights = ();
type BlockLength = ();
type Origin = Origin;
type Call = Call;
type Index = u64;
type BlockNumber = u64;
type Hash = H256;
type Hashing = BlakeTwo256;
type AccountId = AccountId;
type Lookup = IdentityLookup<Self::AccountId>;
type Header = Header;
type Event = Event;
type BlockHashCount = BlockHashCount;
type DbWeight = ();
type Version = ();
type AccountData = pallet_balances::AccountData<Balance>;
type OnNewAccount = ();
type OnKilledAccount = ();
type SystemWeightInfo = ();
type PalletInfo = PalletInfo;
type SS58Prefix = ();
}
impl<T: SigningTypes> frame_system::offchain::SignMessage<T> for Runtime {
type SignatureData = ();
fn sign_message(&self, _message: &[u8]) -> Self::SignatureData {
unimplemented!()
}
fn sign<TPayload, F>(&self, _f: F) -> Self::SignatureData
where
F: Fn(&Account<T>) -> TPayload,
TPayload: frame_system::offchain::SignedPayload<T>,
{
unimplemented!()
}
}
impl<LocalCall> frame_system::offchain::CreateSignedTransaction<LocalCall> for Runtime
where
Call: From<LocalCall>,
{
fn create_transaction<C: frame_system::offchain::AppCrypto<Self::Public, Self::Signature>>(
call: Call,
_public: <Signature as Verify>::Signer,
account: <Runtime as frame_system::Config>::AccountId,
_index: <Runtime as frame_system::Config>::Index,
) -> Option<(
Call,
<TestExtrinsic as sp_runtime::traits::Extrinsic>::SignaturePayload,
)> {
Some((call, (account, MyExtra {})))
}
}
impl frame_system::offchain::SigningTypes for Runtime {
type Public = <Signature as Verify>::Signer;
type Signature = Signature;
}
impl<C> frame_system::offchain::SendTransactionTypes<C> for Runtime
where
Call: From<C>,
{
type OverarchingCall = Call;
type Extrinsic = TestExtrinsic;
}
impl pallet_balances::Config for Runtime {
/// The type for recording an account's balance.
type Balance = Balance;
/// The ubiquitous event type.
type Event = Event;
type DustRemoval = ();
type ExistentialDeposit = ExistentialDeposit;
type AccountStore = System;
type WeightInfo = ();
type MaxLocks = ();
}
impl tokens::Config for Runtime {
type Event = Event;
type Balance = Balance;
type Amount = Amount;
type CurrencyId = <Runtime as assets::Config>::AssetId;
type WeightInfo = ();
type ExistentialDeposits = ExistentialDeposits;
type OnDust = ();
}
impl currencies::Config for Runtime {
type Event = Event;
type MultiCurrency = Tokens;
type NativeCurrency = BasicCurrencyAdapter<Runtime, Balances, Amount, BlockNumber>;
type GetNativeCurrencyId = <Runtime as assets::Config>::GetBaseAssetId;
type WeightInfo = ();
}
impl assets::Config for Runtime {
type Event = Event;
type ExtraAccountId = [u8; 32];
type ExtraAssetRecordArg =
common::AssetIdExtraAssetRecordArg<common::DEXId, common::LiquiditySourceType, [u8; 32]>;
type AssetId = common::AssetId32<PredefinedAssetId>;
type GetBaseAssetId = GetBaseAssetId;
type Currency = currencies::Module<Runtime>;
type WeightInfo = ();
}
impl common::Config for Runtime {
type DEXId = common::DEXId;
type LstId = common::LiquiditySourceType;
}
impl permissions::Config for Runtime {
type Event = Event;
}
impl bridge_multisig::Config for Runtime {
type Call = Call;
type Event = Event;
type Currency = Balances;
type DepositBase = DepositBase;
type DepositFactor = DepositFactor;
type MaxSignatories = MaxSignatories;
type WeightInfo = ();
}
impl pallet_sudo::Config for Runtime {
type Call = Call;
type Event = Event;
}
impl crate::Config for Runtime {
type PeerId = crate::crypto::TestAuthId;
type Call = Call;
type Event = Event;
type NetworkId = u32;
type GetEthNetworkId = EthNetworkId;
type WeightInfo = ();
}
impl sp_runtime::traits::ExtrinsicMetadata for TestExtrinsic {
const VERSION: u8 = 1;
type SignedExtensions = ();
}
construct_runtime!(
pub enum Runtime where
Block = Block,
NodeBlock = Block,
UncheckedExtrinsic = UncheckedExtrinsic
{
System: frame_system::{Module, Call, Config, Storage, Event<T>},
Balances: pallet_balances::{Module, Call, Storage, Config<T>, Event<T>},
Multisig: bridge_multisig::{Module, Call, Storage, Config<T>, Event<T>},
Tokens: tokens::{Module, Call, Storage, Config<T>, Event<T>},
Currencies: currencies::{Module, Call, Storage, Event<T>},
Assets: assets::{Module, Call, Storage, Config<T>, Event<T>},
Permissions: permissions::{Module, Call, Storage, Config<T>, Event<T>},
Sudo: pallet_sudo::{Module, Call, Storage, Config<T>, Event<T>},
EthBridge: eth_bridge::{Module, Call, Storage, Config<T>, Event<T>},
}
);
pub type SubstrateAccountId = <<Signature as Verify>::Signer as IdentifyAccount>::AccountId;
pub struct State {
pub networks: HashMap<u32, ExtendedNetworkConfig>,
pub authority_account_id: AccountId32,
pub pool_state: Arc<RwLock<PoolState>>,
pub offchain_state: Arc<RwLock<OffchainState>>,
}
#[derive(Clone, Debug)]
pub struct ExtendedNetworkConfig {
pub ocw_keypairs: Vec<(MultiSigner, AccountId32, [u8; 32])>,
pub config: NetworkConfig<Runtime>,
}
pub struct ExtBuilder {
pub networks: HashMap<u32, ExtendedNetworkConfig>,
last_network_id: u32,
root_account_id: AccountId32,
}
impl Default for ExtBuilder {
fn default() -> Self {
let mut builder = Self {
networks: Default::default(),
last_network_id: Default::default(),
root_account_id: get_account_id_from_seed::<sr25519::Public>("Alice"),
};
builder.add_network(
vec![
AssetConfig::Thischain { id: PSWAP.into() },
AssetConfig::Sidechain {
id: XOR.into(),
sidechain_id: sp_core::H160::from_str(
"40fd72257597aa14c7231a7b1aaa29fce868f677",
)
.unwrap(),
owned: true,
precision: DEFAULT_BALANCE_PRECISION,
},
AssetConfig::Sidechain {
id: VAL.into(),
sidechain_id: sp_core::H160::from_str(
"3f9feac97e5feb15d8bf98042a9a01b515da3dfb",
)
.unwrap(),
owned: true,
precision: DEFAULT_BALANCE_PRECISION,
},
],
Some(vec![
(XOR.into(), common::balance!(350000)),
(VAL.into(), common::balance!(33900000)),
]),
Some(4),
);
builder
}
}
impl ExtBuilder {
pub fn new() -> Self {
Self {
networks: Default::default(),
last_network_id: Default::default(),
root_account_id: get_account_id_from_seed::<sr25519::Public>("Alice"),
}
}
pub fn add_currency(
&mut self,
network_id: u32,
currency: AssetConfig<AssetId32<PredefinedAssetId>>,
) {
self.networks
.get_mut(&network_id)
.unwrap()
.config
.assets
.push(currency);
}
pub fn add_network(
&mut self,
assets: Vec<AssetConfig<AssetId32<PredefinedAssetId>>>,
reserves: Option<Vec<(AssetId32<PredefinedAssetId>, Balance)>>,
peers_num: Option<usize>,
) -> u32 |
pub fn build(self) -> (TestExternalities, State) {
let (offchain, offchain_state) = TestOffchainExt::new();
let (pool, pool_state) = TestTransactionPoolExt::new();
let authority_account_id =
bridge_multisig::Module::<Runtime>::multi_account_id(&self.root_account_id, 1, 0);
let mut bridge_accounts = Vec::new();
let mut bridge_network_configs = Vec::new();
let mut endowed_accounts: Vec<(_, AssetId32<PredefinedAssetId>, _)> = Vec::new();
let mut networks: Vec<_> = self.networks.clone().into_iter().collect();
networks.sort_by(|(x, _), (y, _)| x.cmp(y));
for (_net_id, ext_network) in networks {
bridge_network_configs.push(ext_network.config.clone());
endowed_accounts.extend(ext_network.config.assets.iter().cloned().map(
|asset_config| {
(
ext_network.config.bridge_account_id.clone(),
asset_config.asset_id().clone(),
0,
)
},
));
endowed_accounts.extend(ext_network.config.reserves.iter().cloned().map(
|(asset_id, _balance)| (ext_network.config.bridge_account_id.clone(), asset_id, 0),
));
bridge_accounts.push((
ext_network.config.bridge_account_id.clone(),
bridge_multisig::MultisigAccount::new(
ext_network
.ocw_keypairs
.iter()
.map(|x| x.1.clone())
.collect(),
Percent::from_parts(67),
),
));
}
// pallet_balances and orml_tokens no longer accept duplicate elements.
let mut unique_endowed_accounts: Vec<(_, AssetId32<PredefinedAssetId>, _)> = Vec::new();
for acc in endowed_accounts {
if let Some(unique_acc) = unique_endowed_accounts.iter_mut().find(|a| a.1 == acc.1) {
unique_acc.2 += acc.2;
} else {
unique_endowed_accounts.push(acc);
}
}
let endowed_accounts = unique_endowed_accounts;
let endowed_assets: BTreeSet<_> = endowed_accounts
.iter()
.map(|x| {
(
x.1,
self.root_account_id.clone(),
AssetSymbol(b"".to_vec()),
AssetName(b"".to_vec()),
18,
Balance::from(0u32),
true,
)
})
.collect();
let mut storage = frame_system::GenesisConfig::default()
.build_storage::<Runtime>()
.unwrap();
let mut balances: Vec<_> = endowed_accounts
.iter()
.map(|(acc, ..)| acc)
.chain(vec![&self.root_account_id, &authority_account_id])
.map(|x| (x.clone(), Balance::from(0u32)))
.collect();
balances.extend(bridge_accounts.iter().map(|(acc, _)| (acc.clone(), 0)));
for (_net_id, ext_network) in &self.networks {
balances.extend(ext_network.ocw_keypairs.iter().map(|x| (x.1.clone(), 0)));
}
balances.sort_by_key(|x| x.0.clone());
balances.dedup_by_key(|x| x.0.clone());
BalancesConfig { balances }
.assimilate_storage(&mut storage)
.unwrap();
if !endowed_accounts.is_empty() {
SudoConfig {
key: endowed_accounts[0].0.clone(),
}
.assimilate_storage(&mut storage)
.unwrap();
}
MultisigConfig {
accounts: bridge_accounts,
}
.assimilate_storage(&mut storage)
.unwrap();
PermissionsConfig {
initial_permission_owners: vec![],
initial_permissions: Vec::new(),
}
.assimilate_storage(&mut storage)
.unwrap();
TokensConfig {
endowed_accounts: endowed_accounts.clone(),
}
.assimilate_storage(&mut storage)
.unwrap();
AssetsConfig {
endowed_assets: endowed_assets.into_iter().collect(),
}
.assimilate_storage(&mut storage)
.unwrap();
EthBridgeConfig {
networks: bridge_network_configs,
authority_account: authority_account_id.clone(),
val_master_contract_address: sp_core::H160::from_str(
"47e229aa491763038f6a505b4f85d8eb463f0962",
)
.unwrap(),
xor_master_contract_address: sp_core::H160::from_str(
"12c6a709925783f49fcca0b398d13b0d597e6e1c",
)
.unwrap(),
}
.assimilate_storage(&mut storage)
.unwrap();
let mut t = TestExternalities::from(storage);
t.register_extension(OffchainExt::new(offchain));
t.register_extension(TransactionPoolExt::new(pool));
t.register_extension(KeystoreExt(Arc::new(KeyStore::new())));
t.execute_with(|| System::set_block_number(1));
let state = State {
networks: self.networks,
authority_account_id,
pool_state,
offchain_state,
};
(t, state)
}
}
pub fn get_from_seed<TPublic: Public>(seed: &str) -> <TPublic::Pair as Pair>::Public {
TPublic::Pair::from_string(&format!("//{}", seed), None)
.expect("static values are valid; qed")
.public()
}
pub type AccountPublic = <Signature as Verify>::Signer;
/// Helper function to generate an account ID from seed
pub fn get_account_id_from_seed<TPublic: Public>(seed: &str) -> SubstrateAccountId
where
AccountPublic: From<<TPublic::Pair as Pair>::Public>,
{
AccountPublic::from(get_from_seed::<TPublic>(seed)).into_account()
}
pub fn gen_peers_keys(
prefix: &str,
peers_num: usize,
) -> Vec<(AccountPublic, AccountId32, [u8; 32])> {
(0..peers_num)
.map(|i| {
let kp = ecdsa::Pair::from_string(&format!("//{}{}", prefix, i), None).unwrap();
let signer = AccountPublic::from(kp.public());
(signer.clone(), signer.into_account(), kp.seed())
})
.collect()
}
| {
let net_id = self.last_network_id;
let multisig_account_id = bridge_multisig::Module::<Runtime>::multi_account_id(
&self.root_account_id,
1,
net_id as u64 + 10,
);
let peers_keys = gen_peers_keys(&format!("OCW{}", net_id), peers_num.unwrap_or(4));
self.networks.insert(
net_id,
ExtendedNetworkConfig {
config: NetworkConfig {
initial_peers: peers_keys.iter().map(|(_, id, _)| id).cloned().collect(),
bridge_account_id: multisig_account_id.clone(),
assets,
bridge_contract_address: Default::default(),
reserves: reserves.unwrap_or_default(),
},
ocw_keypairs: peers_keys,
},
);
self.last_network_id += 1;
net_id
} | identifier_body |
lofsigrank.py | #!/usr/bin/env python
"""Identify significantly mutated genes in a set of many WES samples.
Prints a table of each gene's observed and expected loss-of-function (LOF)
mutation burdens and estimated false discovery rate (FDR) for predicted tumor
suppressors.
"""
from __future__ import print_function, division
import collections
import itertools
import random
import sys
import pandas
import numpy
def main(args):
"""Run the LOF SigRank procedure using command-line arguments."""
genes = read_list(args.genes)
samples = read_list(args.samples)
data_table = pandas.read_table(args.data_table, na_filter=False)
summary_function = {'sumcap': lambda x: min(2, sum(x)),
'mean': numpy.mean,
'max': max}[args.function]
# Step_1: Calculate gene-level mutational statistics
lof_table = make_lof_table(data_table, genes, samples, summary_function)
print("Processed", len(lof_table.values), "genes in data table",
file=sys.stderr)
# Step_2: Rank genes by burden of LOF mutations
gene_scores = sorted(lof_sig_scores(lof_table, samples),
key=lambda pair: pair[1])
# Step_3: Compare gene LOF scores to a simulated "background" distribution
if args.permutations:
# Calculate gene score percentiles
orig_pctiles = numpy.arange(1, 0, -1. / len(gene_scores))
# Calculate percentiles for simulated "background" scores
perm_scores = simulate_lof_scores(data_table, args.permutations,
genes, samples, summary_function)
# Calculate FDR for each gene
table_header = ["Gene", "Obs.Score", "Obs.Pctile", "Sim.Score",
"Sim.Pctile", "FDR"]
table_rows = []
perm_pctiles = numpy.arange(1, 0, -1. / len(perm_scores))
perm_pctiles_rev = perm_pctiles[::-1]
for (gene, obs_score), obs_pctile in zip(gene_scores, orig_pctiles):
score_rank = perm_scores.searchsorted(obs_score)
if score_rank == len(perm_scores):
exp_pctile = 0
fdr = 0.0
else:
exp_pctile = perm_pctiles[score_rank]
# FDR: % false positives / % true positives
fdr = min(1.0, exp_pctile / obs_pctile)
exp_score = perm_scores[len(perm_scores) - 1 -
perm_pctiles_rev.searchsorted(obs_pctile)]
table_rows.append((gene, obs_score, obs_pctile, exp_score,
exp_pctile, fdr))
out_table = pandas.DataFrame.from_records(table_rows,
columns=table_header)
else:
out_table = pandas.DataFrame.from_records(gene_scores,
columns=["Gene", "Score"])
# Output as a table to file or screen
if args.output:
out_table.to_csv(args.output, index=False)
else:
print(out_table.to_string(index=False))
def read_list(fname):
"""Parse a "list" file of one string per line."""
with open(fname) as handle:
items = [line.strip() for line in handle]
return items
# _____________________________________________________________________________
# Step_1: Calculate gene-level mutational statistics
def make_lof_table(data_table, my_genes, my_samples, summary_func):
|
def group_data_by_gs(data_table):
"""Group relevant fields in a data table by gene and sample."""
gene_data = collections.defaultdict(lambda: collections.defaultdict(list))
for _idx, row in data_table.iterrows():
samp = row['sample']
gene = row['gene']
gene_data[gene][samp].append({
'muttype': row['type'].strip(),
'normalized': row['Normalized'], # NMAF in the manuscript
'consequence': row['MissenseConsequence'].strip(),
})
return gene_data
# _____________________________________________________________________________
# Step_2: Rank genes by burden of LOF mutations
def lof_sig_scores(table, samples, verbose=True):
"""Calculate LOF mutation burden scores for genes in the processed table."""
mut_probdam = 'Missense:Probably'
mut_syn = 'Synonymous'
mut_trunc = ['Nonsense', 'Frameshift', 'Splice-site']
mut_other = ['Missense:Benign', 'Missense:Possibly', 'MissenseNA', 'Indel']
mut_all = [mut_probdam, mut_syn] + mut_trunc + mut_other
# Calculate the global nonsynonymous:synonymous ratio ---------------------
# Within each mutation category, sum counts (across all genes)
tot_count_probdam = sum(table[mut_probdam])
tot_count_syn = sum(table[mut_syn])
tot_count_trunc = sum(itertools.chain(*(list(table[col])
for col in mut_trunc)))
tot_count_other = sum(itertools.chain(*(list(table[col])
for col in mut_other)))
# Global mutation count across all categories and genes (= 3504)
tot_count_all = sum((tot_count_probdam, tot_count_syn, tot_count_trunc,
tot_count_other))
if verbose:
print("Counted", tot_count_all, "mutations across", len(table), "genes",
"and", len(samples), "samples", file=sys.stderr)
# Fraction of global mutations in each category of interest
tot_frac_probdam = tot_count_probdam / tot_count_all
tot_frac_syn = tot_count_syn / tot_count_all
tot_frac_trunc = tot_count_trunc / tot_count_all
# Global nonsynonymous:synonymous ratio = (1-syn)/syn (= 2.13697)
tot_ns_s_ratio = (1 - tot_frac_syn) / tot_frac_syn
# Calculate each gene's mutation score ------------------------------------
for _idx, row in table.iterrows():
gene_count_all = sum([row[col] for col in mut_all])
if not gene_count_all:
# Gene is not mutated at all --> zero score
yield (row['Gene'], 0.0)
continue
# Initial score is the sum the 'Normalized' values across all samples
raw_score = sum(row[sid] for sid in samples)
# Adjust for NS:S ratio
gene_count_syn = row[mut_syn]
syn_factor = max(1 - tot_ns_s_ratio * gene_count_syn / gene_count_all,
0)
new_score = raw_score * syn_factor
# Adjust for "probably damaging" missense and truncating mutations
gene_frac_probdam = row[mut_probdam] / gene_count_all
probdam_factor = 1 + gene_frac_probdam - tot_frac_probdam
gene_frac_trunc = sum([row[col] for col in mut_trunc]) / gene_count_all
trunc_factor = gene_frac_trunc / tot_frac_trunc
final_score = new_score * probdam_factor * trunc_factor
yield (row['Gene'], final_score)
# _____________________________________________________________________________
# Step_3: False Discovery Rate (FDR) calculation
def simulate_lof_scores(table, n_permutations, genes, samples, summary_func):
"""Generate a background distribution of LOF scores via permutation."""
perm_scores = []
print("Permuting mutation data", n_permutations, "times:", end=' ',
file=sys.stderr)
for idx in range(n_permutations):
print(idx + 1, end=' ', file=sys.stderr)
permute_table(table)
ptable = make_lof_table(table, genes, samples, summary_func)
perm_scores.extend(s for g, s in
lof_sig_scores(ptable, samples, False))
perm_scores = numpy.asfarray(sorted(perm_scores))
print("\nMax permutation score:", perm_scores[-1], file=sys.stderr)
return perm_scores
def permute_table(dtable):
"""Permute a mutation data table's gene, sample and NMAF columns."""
shuffle_field(dtable, 'gene')
shuffle_field(dtable, 'sample')
shuffle_field(dtable, 'Normalized')
if 'Filler' in dtable:
del dtable['Filler']
def shuffle_field(dframe, field):
"""Shuffle a column of a pandas DataFrame in-place."""
column = list(dframe[field])
random.shuffle(column)
dframe[field] = column
# _____________________________________________________________________________
# Command-line arguments
if __name__ == '__main__':
import argparse
AP = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
AP.add_argument('data_table',
help="""Mutation data table with NMAF values and Polyphen-2
predictions. (e.g. Data.txt)""")
AP.add_argument('-g', '--genes', default="Genes.txt",
help="List of gene names, one per line.")
AP.add_argument('-s', '--samples', default="Samples.txt",
help="List of sample names, one per line.")
AP.add_argument('-p', '--permutations', type=int, default=20,
help="""Number of times to permute the input data to
simulate the background mutation frequencies.""")
AP.add_argument('-f', '--function', default='sumcap',
choices=['sumcap', 'max', 'mean'],
help="Summary function for gene-level NMAF counts.")
AP.add_argument('-o', '--output', help="Output file name (*.csv).")
main(AP.parse_args())
| """Calculate gene-level mutational statistics from a table of mutations.
Input: nested dict of genes -> samples -> list of mut. type, NMAF, Polyphen
Output: table stratifying the mutational status of a gene in each sample.
The output table has a row for each gene and a column for each sample, in
which there is a number ranging from 0-2 that corresponds to the estimated
number of alleles lost in the sample. This value is calculated by summing
the normalized mutant alleles frequencies (NMAF) of all non-synonymous
mutations striking the gene in this sample, capped at 2. In addition, the
final 9 columns of output are the counts of each mutation type (not weighted
by MAF).
This output is used as input to Step 2 to calculate the LOF burden.
"""
table_header = ["Gene"] + my_samples + [
"Missense:Benign", "Missense:Possibly", "Missense:Probably",
"MissenseNA", "Indel", "Nonsense", "Frameshift", "Splice-site",
"Synonymous"]
table_records = []
gs_lookup = group_data_by_gs(data_table)
for gene in my_genes:
synonymous = missense_benign = missense_possibly = missense_probably = \
missense_na = frameshift = nonsense = splice = indel = 0
out_row = [gene]
for sample in my_samples:
normalized = [0]
# Count mutations of each type for this gene and sample
for entry in gs_lookup[gene][sample]:
if entry['muttype'] == 'Silent':
synonymous += 1
continue
if entry['muttype'] == 'Intron':
# Shouldn't be here; ignore
continue
if entry['muttype'] == 'Missense_Mutation':
if entry['consequence'] == 'benign':
missense_benign += 1
elif entry['consequence'] == 'possibly':
missense_possibly += 1
elif entry['consequence'] == 'probably':
missense_probably += 1
elif entry['consequence'] == 'NA':
missense_na += 1
else:
print("Unhandled missense consequence level:",
entry['consequence'], file=sys.stderr)
elif entry['muttype'] == 'Nonsense_Mutation':
nonsense += 1
elif entry['muttype'] == 'Splice_Site':
splice += 1
elif entry['muttype'] in ('Frame_Shift_Ins', 'Frame_Shift_Del'):
frameshift += 1
elif entry['muttype'] in ('In_Frame_Ins', 'In_Frame_Del'):
indel += 1
else:
print("Unhandled mutation type:", entry['muttype'],
file=sys.stderr)
continue
normalized.append(entry['normalized'])
# Summarize the normalized mutation counts for this gene and sample
out_row.append(summary_func(normalized))
out_row.extend((missense_benign, missense_possibly, missense_probably,
missense_na, indel, nonsense, frameshift, splice,
synonymous))
table_records.append(out_row)
return pandas.DataFrame.from_records(table_records, columns=table_header) | identifier_body |
lofsigrank.py | #!/usr/bin/env python
"""Identify significantly mutated genes in a set of many WES samples.
Prints a table of each gene's observed and expected loss-of-function (LOF)
mutation burdens and estimated false discovery rate (FDR) for predicted tumor
suppressors.
"""
from __future__ import print_function, division
import collections
import itertools
import random
import sys
import pandas
import numpy
def main(args):
"""Run the LOF SigRank procedure using command-line arguments."""
genes = read_list(args.genes)
samples = read_list(args.samples)
data_table = pandas.read_table(args.data_table, na_filter=False)
summary_function = {'sumcap': lambda x: min(2, sum(x)),
'mean': numpy.mean,
'max': max}[args.function]
# Step_1: Calculate gene-level mutational statistics
lof_table = make_lof_table(data_table, genes, samples, summary_function)
print("Processed", len(lof_table.values), "genes in data table",
file=sys.stderr)
# Step_2: Rank genes by burden of LOF mutations
gene_scores = sorted(lof_sig_scores(lof_table, samples),
key=lambda pair: pair[1])
# Step_3: Compare gene LOF scores to a simulated "background" distribution
if args.permutations:
# Calculate gene score percentiles
orig_pctiles = numpy.arange(1, 0, -1. / len(gene_scores))
# Calculate percentiles for simulated "background" scores
perm_scores = simulate_lof_scores(data_table, args.permutations,
genes, samples, summary_function)
# Calculate FDR for each gene
table_header = ["Gene", "Obs.Score", "Obs.Pctile", "Sim.Score",
"Sim.Pctile", "FDR"]
table_rows = []
perm_pctiles = numpy.arange(1, 0, -1. / len(perm_scores))
perm_pctiles_rev = perm_pctiles[::-1]
for (gene, obs_score), obs_pctile in zip(gene_scores, orig_pctiles):
score_rank = perm_scores.searchsorted(obs_score)
if score_rank == len(perm_scores):
exp_pctile = 0
fdr = 0.0
else:
exp_pctile = perm_pctiles[score_rank]
# FDR: % false positives / % true positives
fdr = min(1.0, exp_pctile / obs_pctile)
exp_score = perm_scores[len(perm_scores) - 1 -
perm_pctiles_rev.searchsorted(obs_pctile)]
table_rows.append((gene, obs_score, obs_pctile, exp_score,
exp_pctile, fdr))
out_table = pandas.DataFrame.from_records(table_rows,
columns=table_header)
else:
out_table = pandas.DataFrame.from_records(gene_scores,
columns=["Gene", "Score"])
# Output as a table to file or screen
if args.output:
out_table.to_csv(args.output, index=False)
else:
print(out_table.to_string(index=False))
def read_list(fname):
"""Parse a "list" file of one string per line."""
with open(fname) as handle:
items = [line.strip() for line in handle]
return items
# _____________________________________________________________________________
# Step_1: Calculate gene-level mutational statistics
def make_lof_table(data_table, my_genes, my_samples, summary_func):
"""Calculate gene-level mutational statistics from a table of mutations.
Input: nested dict of genes -> samples -> list of mut. type, NMAF, Polyphen
Output: table stratifying the mutational status of a gene in each sample.
The output table has a row for each gene and a column for each sample, in
which there is a number ranging from 0-2 that corresponds to the estimated
number of alleles lost in the sample. This value is calculated by summing
the normalized mutant alleles frequencies (NMAF) of all non-synonymous
mutations striking the gene in this sample, capped at 2. In addition, the
final 9 columns of output are the counts of each mutation type (not weighted
by MAF).
This output is used as input to Step 2 to calculate the LOF burden.
"""
table_header = ["Gene"] + my_samples + [
"Missense:Benign", "Missense:Possibly", "Missense:Probably",
"MissenseNA", "Indel", "Nonsense", "Frameshift", "Splice-site",
"Synonymous"]
table_records = []
gs_lookup = group_data_by_gs(data_table)
for gene in my_genes:
synonymous = missense_benign = missense_possibly = missense_probably = \
missense_na = frameshift = nonsense = splice = indel = 0
out_row = [gene]
for sample in my_samples:
normalized = [0]
# Count mutations of each type for this gene and sample
for entry in gs_lookup[gene][sample]:
if entry['muttype'] == 'Silent':
synonymous += 1
continue
if entry['muttype'] == 'Intron':
# Shouldn't be here; ignore
continue
if entry['muttype'] == 'Missense_Mutation':
if entry['consequence'] == 'benign':
missense_benign += 1
elif entry['consequence'] == 'possibly':
missense_possibly += 1
elif entry['consequence'] == 'probably':
missense_probably += 1
elif entry['consequence'] == 'NA':
missense_na += 1
else:
print("Unhandled missense consequence level:",
entry['consequence'], file=sys.stderr)
elif entry['muttype'] == 'Nonsense_Mutation':
nonsense += 1
elif entry['muttype'] == 'Splice_Site':
splice += 1
elif entry['muttype'] in ('Frame_Shift_Ins', 'Frame_Shift_Del'):
frameshift += 1
elif entry['muttype'] in ('In_Frame_Ins', 'In_Frame_Del'):
indel += 1
else:
print("Unhandled mutation type:", entry['muttype'],
file=sys.stderr)
continue
normalized.append(entry['normalized'])
# Summarize the normalized mutation counts for this gene and sample
out_row.append(summary_func(normalized))
out_row.extend((missense_benign, missense_possibly, missense_probably,
missense_na, indel, nonsense, frameshift, splice,
synonymous))
table_records.append(out_row)
return pandas.DataFrame.from_records(table_records, columns=table_header)
def group_data_by_gs(data_table):
"""Group relevant fields in a data table by gene and sample."""
gene_data = collections.defaultdict(lambda: collections.defaultdict(list))
for _idx, row in data_table.iterrows():
|
return gene_data
# _____________________________________________________________________________
# Step_2: Rank genes by burden of LOF mutations
def lof_sig_scores(table, samples, verbose=True):
"""Calculate LOF mutation burden scores for genes in the processed table."""
mut_probdam = 'Missense:Probably'
mut_syn = 'Synonymous'
mut_trunc = ['Nonsense', 'Frameshift', 'Splice-site']
mut_other = ['Missense:Benign', 'Missense:Possibly', 'MissenseNA', 'Indel']
mut_all = [mut_probdam, mut_syn] + mut_trunc + mut_other
# Calculate the global nonsynonymous:synonymous ratio ---------------------
# Within each mutation category, sum counts (across all genes)
tot_count_probdam = sum(table[mut_probdam])
tot_count_syn = sum(table[mut_syn])
tot_count_trunc = sum(itertools.chain(*(list(table[col])
for col in mut_trunc)))
tot_count_other = sum(itertools.chain(*(list(table[col])
for col in mut_other)))
# Global mutation count across all categories and genes (= 3504)
tot_count_all = sum((tot_count_probdam, tot_count_syn, tot_count_trunc,
tot_count_other))
if verbose:
print("Counted", tot_count_all, "mutations across", len(table), "genes",
"and", len(samples), "samples", file=sys.stderr)
# Fraction of global mutations in each category of interest
tot_frac_probdam = tot_count_probdam / tot_count_all
tot_frac_syn = tot_count_syn / tot_count_all
tot_frac_trunc = tot_count_trunc / tot_count_all
# Global nonsynonymous:synonymous ratio = (1-syn)/syn (= 2.13697)
tot_ns_s_ratio = (1 - tot_frac_syn) / tot_frac_syn
# Calculate each gene's mutation score ------------------------------------
for _idx, row in table.iterrows():
gene_count_all = sum([row[col] for col in mut_all])
if not gene_count_all:
# Gene is not mutated at all --> zero score
yield (row['Gene'], 0.0)
continue
# Initial score is the sum the 'Normalized' values across all samples
raw_score = sum(row[sid] for sid in samples)
# Adjust for NS:S ratio
gene_count_syn = row[mut_syn]
syn_factor = max(1 - tot_ns_s_ratio * gene_count_syn / gene_count_all,
0)
new_score = raw_score * syn_factor
# Adjust for "probably damaging" missense and truncating mutations
gene_frac_probdam = row[mut_probdam] / gene_count_all
probdam_factor = 1 + gene_frac_probdam - tot_frac_probdam
gene_frac_trunc = sum([row[col] for col in mut_trunc]) / gene_count_all
trunc_factor = gene_frac_trunc / tot_frac_trunc
final_score = new_score * probdam_factor * trunc_factor
yield (row['Gene'], final_score)
# _____________________________________________________________________________
# Step_3: False Discovery Rate (FDR) calculation
def simulate_lof_scores(table, n_permutations, genes, samples, summary_func):
"""Generate a background distribution of LOF scores via permutation."""
perm_scores = []
print("Permuting mutation data", n_permutations, "times:", end=' ',
file=sys.stderr)
for idx in range(n_permutations):
print(idx + 1, end=' ', file=sys.stderr)
permute_table(table)
ptable = make_lof_table(table, genes, samples, summary_func)
perm_scores.extend(s for g, s in
lof_sig_scores(ptable, samples, False))
perm_scores = numpy.asfarray(sorted(perm_scores))
print("\nMax permutation score:", perm_scores[-1], file=sys.stderr)
return perm_scores
def permute_table(dtable):
"""Permute a mutation data table's gene, sample and NMAF columns."""
shuffle_field(dtable, 'gene')
shuffle_field(dtable, 'sample')
shuffle_field(dtable, 'Normalized')
if 'Filler' in dtable:
del dtable['Filler']
def shuffle_field(dframe, field):
"""Shuffle a column of a pandas DataFrame in-place."""
column = list(dframe[field])
random.shuffle(column)
dframe[field] = column
# _____________________________________________________________________________
# Command-line arguments
if __name__ == '__main__':
import argparse
AP = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
AP.add_argument('data_table',
help="""Mutation data table with NMAF values and Polyphen-2
predictions. (e.g. Data.txt)""")
AP.add_argument('-g', '--genes', default="Genes.txt",
help="List of gene names, one per line.")
AP.add_argument('-s', '--samples', default="Samples.txt",
help="List of sample names, one per line.")
AP.add_argument('-p', '--permutations', type=int, default=20,
help="""Number of times to permute the input data to
simulate the background mutation frequencies.""")
AP.add_argument('-f', '--function', default='sumcap',
choices=['sumcap', 'max', 'mean'],
help="Summary function for gene-level NMAF counts.")
AP.add_argument('-o', '--output', help="Output file name (*.csv).")
main(AP.parse_args())
| samp = row['sample']
gene = row['gene']
gene_data[gene][samp].append({
'muttype': row['type'].strip(),
'normalized': row['Normalized'], # NMAF in the manuscript
'consequence': row['MissenseConsequence'].strip(),
}) | conditional_block |
lofsigrank.py | #!/usr/bin/env python
"""Identify significantly mutated genes in a set of many WES samples.
Prints a table of each gene's observed and expected loss-of-function (LOF)
mutation burdens and estimated false discovery rate (FDR) for predicted tumor
suppressors.
"""
from __future__ import print_function, division
import collections
import itertools
import random
import sys
import pandas
import numpy
def main(args):
"""Run the LOF SigRank procedure using command-line arguments."""
genes = read_list(args.genes)
samples = read_list(args.samples)
data_table = pandas.read_table(args.data_table, na_filter=False)
summary_function = {'sumcap': lambda x: min(2, sum(x)),
'mean': numpy.mean,
'max': max}[args.function]
# Step_1: Calculate gene-level mutational statistics
lof_table = make_lof_table(data_table, genes, samples, summary_function)
print("Processed", len(lof_table.values), "genes in data table",
file=sys.stderr)
# Step_2: Rank genes by burden of LOF mutations
gene_scores = sorted(lof_sig_scores(lof_table, samples),
key=lambda pair: pair[1])
# Step_3: Compare gene LOF scores to a simulated "background" distribution
if args.permutations:
# Calculate gene score percentiles
orig_pctiles = numpy.arange(1, 0, -1. / len(gene_scores))
# Calculate percentiles for simulated "background" scores
perm_scores = simulate_lof_scores(data_table, args.permutations,
genes, samples, summary_function)
# Calculate FDR for each gene
table_header = ["Gene", "Obs.Score", "Obs.Pctile", "Sim.Score",
"Sim.Pctile", "FDR"]
table_rows = []
perm_pctiles = numpy.arange(1, 0, -1. / len(perm_scores))
perm_pctiles_rev = perm_pctiles[::-1]
for (gene, obs_score), obs_pctile in zip(gene_scores, orig_pctiles):
score_rank = perm_scores.searchsorted(obs_score)
if score_rank == len(perm_scores):
exp_pctile = 0
fdr = 0.0
else:
exp_pctile = perm_pctiles[score_rank]
# FDR: % false positives / % true positives
fdr = min(1.0, exp_pctile / obs_pctile)
exp_score = perm_scores[len(perm_scores) - 1 -
perm_pctiles_rev.searchsorted(obs_pctile)]
table_rows.append((gene, obs_score, obs_pctile, exp_score,
exp_pctile, fdr))
out_table = pandas.DataFrame.from_records(table_rows,
columns=table_header)
else:
out_table = pandas.DataFrame.from_records(gene_scores,
columns=["Gene", "Score"])
# Output as a table to file or screen
if args.output:
out_table.to_csv(args.output, index=False)
else:
print(out_table.to_string(index=False))
def read_list(fname):
"""Parse a "list" file of one string per line."""
with open(fname) as handle:
items = [line.strip() for line in handle]
return items
# _____________________________________________________________________________
# Step_1: Calculate gene-level mutational statistics
def make_lof_table(data_table, my_genes, my_samples, summary_func):
"""Calculate gene-level mutational statistics from a table of mutations.
Input: nested dict of genes -> samples -> list of mut. type, NMAF, Polyphen
Output: table stratifying the mutational status of a gene in each sample.
The output table has a row for each gene and a column for each sample, in
which there is a number ranging from 0-2 that corresponds to the estimated
number of alleles lost in the sample. This value is calculated by summing
the normalized mutant alleles frequencies (NMAF) of all non-synonymous
mutations striking the gene in this sample, capped at 2. In addition, the
final 9 columns of output are the counts of each mutation type (not weighted
by MAF).
This output is used as input to Step 2 to calculate the LOF burden.
"""
table_header = ["Gene"] + my_samples + [
"Missense:Benign", "Missense:Possibly", "Missense:Probably",
"MissenseNA", "Indel", "Nonsense", "Frameshift", "Splice-site",
"Synonymous"]
table_records = []
gs_lookup = group_data_by_gs(data_table)
for gene in my_genes:
synonymous = missense_benign = missense_possibly = missense_probably = \
missense_na = frameshift = nonsense = splice = indel = 0
out_row = [gene] | synonymous += 1
continue
if entry['muttype'] == 'Intron':
# Shouldn't be here; ignore
continue
if entry['muttype'] == 'Missense_Mutation':
if entry['consequence'] == 'benign':
missense_benign += 1
elif entry['consequence'] == 'possibly':
missense_possibly += 1
elif entry['consequence'] == 'probably':
missense_probably += 1
elif entry['consequence'] == 'NA':
missense_na += 1
else:
print("Unhandled missense consequence level:",
entry['consequence'], file=sys.stderr)
elif entry['muttype'] == 'Nonsense_Mutation':
nonsense += 1
elif entry['muttype'] == 'Splice_Site':
splice += 1
elif entry['muttype'] in ('Frame_Shift_Ins', 'Frame_Shift_Del'):
frameshift += 1
elif entry['muttype'] in ('In_Frame_Ins', 'In_Frame_Del'):
indel += 1
else:
print("Unhandled mutation type:", entry['muttype'],
file=sys.stderr)
continue
normalized.append(entry['normalized'])
# Summarize the normalized mutation counts for this gene and sample
out_row.append(summary_func(normalized))
out_row.extend((missense_benign, missense_possibly, missense_probably,
missense_na, indel, nonsense, frameshift, splice,
synonymous))
table_records.append(out_row)
return pandas.DataFrame.from_records(table_records, columns=table_header)
def group_data_by_gs(data_table):
"""Group relevant fields in a data table by gene and sample."""
gene_data = collections.defaultdict(lambda: collections.defaultdict(list))
for _idx, row in data_table.iterrows():
samp = row['sample']
gene = row['gene']
gene_data[gene][samp].append({
'muttype': row['type'].strip(),
'normalized': row['Normalized'], # NMAF in the manuscript
'consequence': row['MissenseConsequence'].strip(),
})
return gene_data
# _____________________________________________________________________________
# Step_2: Rank genes by burden of LOF mutations
def lof_sig_scores(table, samples, verbose=True):
"""Calculate LOF mutation burden scores for genes in the processed table."""
mut_probdam = 'Missense:Probably'
mut_syn = 'Synonymous'
mut_trunc = ['Nonsense', 'Frameshift', 'Splice-site']
mut_other = ['Missense:Benign', 'Missense:Possibly', 'MissenseNA', 'Indel']
mut_all = [mut_probdam, mut_syn] + mut_trunc + mut_other
# Calculate the global nonsynonymous:synonymous ratio ---------------------
# Within each mutation category, sum counts (across all genes)
tot_count_probdam = sum(table[mut_probdam])
tot_count_syn = sum(table[mut_syn])
tot_count_trunc = sum(itertools.chain(*(list(table[col])
for col in mut_trunc)))
tot_count_other = sum(itertools.chain(*(list(table[col])
for col in mut_other)))
# Global mutation count across all categories and genes (= 3504)
tot_count_all = sum((tot_count_probdam, tot_count_syn, tot_count_trunc,
tot_count_other))
if verbose:
print("Counted", tot_count_all, "mutations across", len(table), "genes",
"and", len(samples), "samples", file=sys.stderr)
# Fraction of global mutations in each category of interest
tot_frac_probdam = tot_count_probdam / tot_count_all
tot_frac_syn = tot_count_syn / tot_count_all
tot_frac_trunc = tot_count_trunc / tot_count_all
# Global nonsynonymous:synonymous ratio = (1-syn)/syn (= 2.13697)
tot_ns_s_ratio = (1 - tot_frac_syn) / tot_frac_syn
# Calculate each gene's mutation score ------------------------------------
for _idx, row in table.iterrows():
gene_count_all = sum([row[col] for col in mut_all])
if not gene_count_all:
# Gene is not mutated at all --> zero score
yield (row['Gene'], 0.0)
continue
# Initial score is the sum the 'Normalized' values across all samples
raw_score = sum(row[sid] for sid in samples)
# Adjust for NS:S ratio
gene_count_syn = row[mut_syn]
syn_factor = max(1 - tot_ns_s_ratio * gene_count_syn / gene_count_all,
0)
new_score = raw_score * syn_factor
# Adjust for "probably damaging" missense and truncating mutations
gene_frac_probdam = row[mut_probdam] / gene_count_all
probdam_factor = 1 + gene_frac_probdam - tot_frac_probdam
gene_frac_trunc = sum([row[col] for col in mut_trunc]) / gene_count_all
trunc_factor = gene_frac_trunc / tot_frac_trunc
final_score = new_score * probdam_factor * trunc_factor
yield (row['Gene'], final_score)
# _____________________________________________________________________________
# Step_3: False Discovery Rate (FDR) calculation
def simulate_lof_scores(table, n_permutations, genes, samples, summary_func):
"""Generate a background distribution of LOF scores via permutation."""
perm_scores = []
print("Permuting mutation data", n_permutations, "times:", end=' ',
file=sys.stderr)
for idx in range(n_permutations):
print(idx + 1, end=' ', file=sys.stderr)
permute_table(table)
ptable = make_lof_table(table, genes, samples, summary_func)
perm_scores.extend(s for g, s in
lof_sig_scores(ptable, samples, False))
perm_scores = numpy.asfarray(sorted(perm_scores))
print("\nMax permutation score:", perm_scores[-1], file=sys.stderr)
return perm_scores
def permute_table(dtable):
"""Permute a mutation data table's gene, sample and NMAF columns."""
shuffle_field(dtable, 'gene')
shuffle_field(dtable, 'sample')
shuffle_field(dtable, 'Normalized')
if 'Filler' in dtable:
del dtable['Filler']
def shuffle_field(dframe, field):
"""Shuffle a column of a pandas DataFrame in-place."""
column = list(dframe[field])
random.shuffle(column)
dframe[field] = column
# _____________________________________________________________________________
# Command-line arguments
if __name__ == '__main__':
import argparse
AP = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
AP.add_argument('data_table',
help="""Mutation data table with NMAF values and Polyphen-2
predictions. (e.g. Data.txt)""")
AP.add_argument('-g', '--genes', default="Genes.txt",
help="List of gene names, one per line.")
AP.add_argument('-s', '--samples', default="Samples.txt",
help="List of sample names, one per line.")
AP.add_argument('-p', '--permutations', type=int, default=20,
help="""Number of times to permute the input data to
simulate the background mutation frequencies.""")
AP.add_argument('-f', '--function', default='sumcap',
choices=['sumcap', 'max', 'mean'],
help="Summary function for gene-level NMAF counts.")
AP.add_argument('-o', '--output', help="Output file name (*.csv).")
main(AP.parse_args()) | for sample in my_samples:
normalized = [0]
# Count mutations of each type for this gene and sample
for entry in gs_lookup[gene][sample]:
if entry['muttype'] == 'Silent': | random_line_split |
lofsigrank.py | #!/usr/bin/env python
"""Identify significantly mutated genes in a set of many WES samples.
Prints a table of each gene's observed and expected loss-of-function (LOF)
mutation burdens and estimated false discovery rate (FDR) for predicted tumor
suppressors.
"""
from __future__ import print_function, division
import collections
import itertools
import random
import sys
import pandas
import numpy
def main(args):
"""Run the LOF SigRank procedure using command-line arguments."""
genes = read_list(args.genes)
samples = read_list(args.samples)
data_table = pandas.read_table(args.data_table, na_filter=False)
summary_function = {'sumcap': lambda x: min(2, sum(x)),
'mean': numpy.mean,
'max': max}[args.function]
# Step_1: Calculate gene-level mutational statistics
lof_table = make_lof_table(data_table, genes, samples, summary_function)
print("Processed", len(lof_table.values), "genes in data table",
file=sys.stderr)
# Step_2: Rank genes by burden of LOF mutations
gene_scores = sorted(lof_sig_scores(lof_table, samples),
key=lambda pair: pair[1])
# Step_3: Compare gene LOF scores to a simulated "background" distribution
if args.permutations:
# Calculate gene score percentiles
orig_pctiles = numpy.arange(1, 0, -1. / len(gene_scores))
# Calculate percentiles for simulated "background" scores
perm_scores = simulate_lof_scores(data_table, args.permutations,
genes, samples, summary_function)
# Calculate FDR for each gene
table_header = ["Gene", "Obs.Score", "Obs.Pctile", "Sim.Score",
"Sim.Pctile", "FDR"]
table_rows = []
perm_pctiles = numpy.arange(1, 0, -1. / len(perm_scores))
perm_pctiles_rev = perm_pctiles[::-1]
for (gene, obs_score), obs_pctile in zip(gene_scores, orig_pctiles):
score_rank = perm_scores.searchsorted(obs_score)
if score_rank == len(perm_scores):
exp_pctile = 0
fdr = 0.0
else:
exp_pctile = perm_pctiles[score_rank]
# FDR: % false positives / % true positives
fdr = min(1.0, exp_pctile / obs_pctile)
exp_score = perm_scores[len(perm_scores) - 1 -
perm_pctiles_rev.searchsorted(obs_pctile)]
table_rows.append((gene, obs_score, obs_pctile, exp_score,
exp_pctile, fdr))
out_table = pandas.DataFrame.from_records(table_rows,
columns=table_header)
else:
out_table = pandas.DataFrame.from_records(gene_scores,
columns=["Gene", "Score"])
# Output as a table to file or screen
if args.output:
out_table.to_csv(args.output, index=False)
else:
print(out_table.to_string(index=False))
def read_list(fname):
"""Parse a "list" file of one string per line."""
with open(fname) as handle:
items = [line.strip() for line in handle]
return items
# _____________________________________________________________________________
# Step_1: Calculate gene-level mutational statistics
def make_lof_table(data_table, my_genes, my_samples, summary_func):
"""Calculate gene-level mutational statistics from a table of mutations.
Input: nested dict of genes -> samples -> list of mut. type, NMAF, Polyphen
Output: table stratifying the mutational status of a gene in each sample.
The output table has a row for each gene and a column for each sample, in
which there is a number ranging from 0-2 that corresponds to the estimated
number of alleles lost in the sample. This value is calculated by summing
the normalized mutant alleles frequencies (NMAF) of all non-synonymous
mutations striking the gene in this sample, capped at 2. In addition, the
final 9 columns of output are the counts of each mutation type (not weighted
by MAF).
This output is used as input to Step 2 to calculate the LOF burden.
"""
table_header = ["Gene"] + my_samples + [
"Missense:Benign", "Missense:Possibly", "Missense:Probably",
"MissenseNA", "Indel", "Nonsense", "Frameshift", "Splice-site",
"Synonymous"]
table_records = []
gs_lookup = group_data_by_gs(data_table)
for gene in my_genes:
synonymous = missense_benign = missense_possibly = missense_probably = \
missense_na = frameshift = nonsense = splice = indel = 0
out_row = [gene]
for sample in my_samples:
normalized = [0]
# Count mutations of each type for this gene and sample
for entry in gs_lookup[gene][sample]:
if entry['muttype'] == 'Silent':
synonymous += 1
continue
if entry['muttype'] == 'Intron':
# Shouldn't be here; ignore
continue
if entry['muttype'] == 'Missense_Mutation':
if entry['consequence'] == 'benign':
missense_benign += 1
elif entry['consequence'] == 'possibly':
missense_possibly += 1
elif entry['consequence'] == 'probably':
missense_probably += 1
elif entry['consequence'] == 'NA':
missense_na += 1
else:
print("Unhandled missense consequence level:",
entry['consequence'], file=sys.stderr)
elif entry['muttype'] == 'Nonsense_Mutation':
nonsense += 1
elif entry['muttype'] == 'Splice_Site':
splice += 1
elif entry['muttype'] in ('Frame_Shift_Ins', 'Frame_Shift_Del'):
frameshift += 1
elif entry['muttype'] in ('In_Frame_Ins', 'In_Frame_Del'):
indel += 1
else:
print("Unhandled mutation type:", entry['muttype'],
file=sys.stderr)
continue
normalized.append(entry['normalized'])
# Summarize the normalized mutation counts for this gene and sample
out_row.append(summary_func(normalized))
out_row.extend((missense_benign, missense_possibly, missense_probably,
missense_na, indel, nonsense, frameshift, splice,
synonymous))
table_records.append(out_row)
return pandas.DataFrame.from_records(table_records, columns=table_header)
def | (data_table):
"""Group relevant fields in a data table by gene and sample."""
gene_data = collections.defaultdict(lambda: collections.defaultdict(list))
for _idx, row in data_table.iterrows():
samp = row['sample']
gene = row['gene']
gene_data[gene][samp].append({
'muttype': row['type'].strip(),
'normalized': row['Normalized'], # NMAF in the manuscript
'consequence': row['MissenseConsequence'].strip(),
})
return gene_data
# _____________________________________________________________________________
# Step_2: Rank genes by burden of LOF mutations
def lof_sig_scores(table, samples, verbose=True):
"""Calculate LOF mutation burden scores for genes in the processed table."""
mut_probdam = 'Missense:Probably'
mut_syn = 'Synonymous'
mut_trunc = ['Nonsense', 'Frameshift', 'Splice-site']
mut_other = ['Missense:Benign', 'Missense:Possibly', 'MissenseNA', 'Indel']
mut_all = [mut_probdam, mut_syn] + mut_trunc + mut_other
# Calculate the global nonsynonymous:synonymous ratio ---------------------
# Within each mutation category, sum counts (across all genes)
tot_count_probdam = sum(table[mut_probdam])
tot_count_syn = sum(table[mut_syn])
tot_count_trunc = sum(itertools.chain(*(list(table[col])
for col in mut_trunc)))
tot_count_other = sum(itertools.chain(*(list(table[col])
for col in mut_other)))
# Global mutation count across all categories and genes (= 3504)
tot_count_all = sum((tot_count_probdam, tot_count_syn, tot_count_trunc,
tot_count_other))
if verbose:
print("Counted", tot_count_all, "mutations across", len(table), "genes",
"and", len(samples), "samples", file=sys.stderr)
# Fraction of global mutations in each category of interest
tot_frac_probdam = tot_count_probdam / tot_count_all
tot_frac_syn = tot_count_syn / tot_count_all
tot_frac_trunc = tot_count_trunc / tot_count_all
# Global nonsynonymous:synonymous ratio = (1-syn)/syn (= 2.13697)
tot_ns_s_ratio = (1 - tot_frac_syn) / tot_frac_syn
# Calculate each gene's mutation score ------------------------------------
for _idx, row in table.iterrows():
gene_count_all = sum([row[col] for col in mut_all])
if not gene_count_all:
# Gene is not mutated at all --> zero score
yield (row['Gene'], 0.0)
continue
# Initial score is the sum the 'Normalized' values across all samples
raw_score = sum(row[sid] for sid in samples)
# Adjust for NS:S ratio
gene_count_syn = row[mut_syn]
syn_factor = max(1 - tot_ns_s_ratio * gene_count_syn / gene_count_all,
0)
new_score = raw_score * syn_factor
# Adjust for "probably damaging" missense and truncating mutations
gene_frac_probdam = row[mut_probdam] / gene_count_all
probdam_factor = 1 + gene_frac_probdam - tot_frac_probdam
gene_frac_trunc = sum([row[col] for col in mut_trunc]) / gene_count_all
trunc_factor = gene_frac_trunc / tot_frac_trunc
final_score = new_score * probdam_factor * trunc_factor
yield (row['Gene'], final_score)
# _____________________________________________________________________________
# Step_3: False Discovery Rate (FDR) calculation
def simulate_lof_scores(table, n_permutations, genes, samples, summary_func):
"""Generate a background distribution of LOF scores via permutation."""
perm_scores = []
print("Permuting mutation data", n_permutations, "times:", end=' ',
file=sys.stderr)
for idx in range(n_permutations):
print(idx + 1, end=' ', file=sys.stderr)
permute_table(table)
ptable = make_lof_table(table, genes, samples, summary_func)
perm_scores.extend(s for g, s in
lof_sig_scores(ptable, samples, False))
perm_scores = numpy.asfarray(sorted(perm_scores))
print("\nMax permutation score:", perm_scores[-1], file=sys.stderr)
return perm_scores
def permute_table(dtable):
"""Permute a mutation data table's gene, sample and NMAF columns."""
shuffle_field(dtable, 'gene')
shuffle_field(dtable, 'sample')
shuffle_field(dtable, 'Normalized')
if 'Filler' in dtable:
del dtable['Filler']
def shuffle_field(dframe, field):
"""Shuffle a column of a pandas DataFrame in-place."""
column = list(dframe[field])
random.shuffle(column)
dframe[field] = column
# _____________________________________________________________________________
# Command-line arguments
if __name__ == '__main__':
import argparse
AP = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
AP.add_argument('data_table',
help="""Mutation data table with NMAF values and Polyphen-2
predictions. (e.g. Data.txt)""")
AP.add_argument('-g', '--genes', default="Genes.txt",
help="List of gene names, one per line.")
AP.add_argument('-s', '--samples', default="Samples.txt",
help="List of sample names, one per line.")
AP.add_argument('-p', '--permutations', type=int, default=20,
help="""Number of times to permute the input data to
simulate the background mutation frequencies.""")
AP.add_argument('-f', '--function', default='sumcap',
choices=['sumcap', 'max', 'mean'],
help="Summary function for gene-level NMAF counts.")
AP.add_argument('-o', '--output', help="Output file name (*.csv).")
main(AP.parse_args())
| group_data_by_gs | identifier_name |
helpers.py | """Processing for MyProjects Web Application
"""
import datetime
from django.http import Http404
from django.core.exceptions import ObjectDoesNotExist, FieldError # , DoesNotExist
from django.utils.translation import gettext as _
import docs.models as my
from myprojects.settings import MEDIA_ROOT, SITES
RELTXT = '<br/><a href="/docs/{0}/{1}/{2}/">{3}</a>'
BTNTXT = '<a href="/docs/{0}/{1}/{2}/{3}/{4}/"><input type="button" value="{5}" /></a>'
ADD_TEXT, REMOVE_TEXT = _("leg relatie"), _("verwijder relatie")
# let op: obj.model._meta bestaat niet (meer), obj,_meta.model wel
# maar in get_related heb ik model._meta nou juist vervangen door _meta
# ook is daar rel.to._meta.model_name vervangen door related_model
# maar ook is dat vergelijken met _meta.model_name vervangen door vergelijken met_meta.model
# in dit geval vergelijken we met een naam dus moeten we _meta.model_name blijven gebruiken
# in deze vergelijking vervangen we dus alleen rel.to door related_model
def get_related(this_obj, other_obj, m2m=False):
"""geeft het resultaat van een reversed relatie terug
eerst wordt in het gerelateerde model de related_name opgezocht
dan wordt hiermee het betreffende attribuut van het huidige object bepaald
"""
# is het niet raar dat je voor twee concrete objecten ophaalt naar welke van het ene type
# verwezen wordt vanuit het andere type? Of is dat om de vorige/volgende te kunnen bepalen?
# als ik kijk naar het gebruik in GetRelations dan is het tweede argument ook niet een object
# maar een relatie (uit de fields verzameling)
if m2m:
fields = [x for x in other_obj._meta.many_to_many]
else:
fields = [x for x in other_obj._meta.get_fields() if x.name != 'project' and
x.get_internal_type() == 'ForeignKey']
for fld in fields:
if fld.related_model == this_obj._meta.model:
related_name = fld.related_query_name()
break
else:
return None # not found
try:
return this_obj.__getattribute__(related_name).all()
except UnboundLocalError:
return None
# zou je deze ook kunnen vervangen door een aanroep van get_relation en dan met de opgehaalde
# naam de gerelateerde objecten ophalen en meteen de vorige en de volgende bepalen?
# (heeft uiteraard konsekwenties voor de aanroepende code)
# oorspronkelijk lijkt dat ook zo geweest te zijn, de functie heette toen get_relation en het
# gedeelte dat nu nog zo heet was daarin hardgecodeerd
# deze functie wordt alleen aangeroepen in een paar methoden van de hieronder opgenomen klasse
# GetRelations, namelijk om de namen van relaties uit andere objecten naar het huidige te kunnen
# bepalen.
# Als je get_relation zoals die nu is gebruikt zou je dat onderscheid (van versus naar relaties)
# met dezelfde functie kunnen afhandelen
def get_relation(srt, soort):
"""Geeft veldnaam en cardinaliteit terug voor een relatie van srt naar soort
"""
result, multiple = None, None
if srt != soort or soort in ('funcproc', 'techproc'):
for relobj in my.rectypes[srt]._meta.get_fields():
if relobj.related_model and corr_naam(relobj.related_model._meta.model_name) == soort:
result = relobj.name
multiple = False if relobj.get_internal_type() == 'ForeignKey' else True
break
return result, multiple
def set_relation(o, soort, r, srt):
attr_name, multiple = get_relation(soort, srt)
if multiple:
o.__getattribute__(attr_name).add(r)
else:
o.__setattr__(attr_name, r)
o.save()
def remove_relation(o, soort, r, srt):
attr_name, multiple = get_relation(soort, srt)
if multiple:
o.__getattribute__(attr_name).remove(r)
else:
o.__setattr__(attr_name, None)
o.save()
def corr_naam(name):
"""convert name used in program to model name and back
Note: all names must be unique!
"""
names = (("techtaak", 'techtask'), ("programma", 'procproc'))
for name1, name2 in names:
if name == name1:
return name2
if name == name2:
return name1
return name
def get_field_attr(name):
"""leidt veldnaam, type en lengte af uit de definities in models.py
"""
# de variant met een repeating group (entiteit, dataitem) levert hier nog een probleem op.
# is dat omdat er twee entiteiten in 1 scherm staan?
fields = []
opts = my.rectypes[name]._meta
for x in opts.get_fields(): # fields:
fldname = x.name
fldtype = x.get_internal_type()
if fldname == 'id' or fldtype in ('ForeignKey', 'ManyToManyField'):
# if fldname == 'id' or any((x.many2one, x.many2many, x.one2many))
continue
try:
length = x.max_length
except AttributeError:
length = -1
fields.append((fldname, fldtype[:-5], length))
return fields
def get_relation_fields(name):
"""deze functie is van de vorige afgesplitst (afwijkend pad als tweede argument alles = True)
enig gemeenschappelijke is loopen over get_fields
deze werd bovendien nergens gebruikt
"""
fields = []
opts = my.rectypes[name]._meta
for rel in opts.get_fields():
# print(rel, rel.one_to_many or rel.many_to_many)
if rel.one_to_many or rel.many_to_many:
try:
fields.append((rel.name, rel.get_internal_type(), rel.max_length))
except AttributeError:
fields.append((rel.name, rel.get_internal_type(), -1))
return fields
def get_new_numberkey_for_soort(owner_proj, soort):
"""generate new id for certain document types
"""
if soort == 'userwijz':
sel = owner_proj.rfcs
elif soort == 'userprob':
sel = owner_proj.probs
elif soort == 'bevinding':
sel = owner_proj.tbev
else:
return ''
ny = str(datetime.date.today().year)
h = ''
try:
last_id = sel.latest("datum_in").nummer
except ObjectDoesNotExist:
pass
else:
yr, nr = last_id.split('-')
if yr == ny:
h = '-'.join((yr, '%04i' % (int(nr) + 1)))
if h == '':
h = '-'.join((ny, '0001'))
return h
def get_stats_texts(proj, action_type):
"""get certain texts for certain document types (also registered in actiereg)
"""
first = _("(nog) geen")
if action_type == 'userwijz':
all_objects = my.Userwijz.objects.filter(project=proj)
second = _('ingediend')
hlp = _("gerealiseerd"), _('in behandeling via')
elif action_type == 'probleem':
all_objects = my.Userprob.objects.filter(project=proj)
second = _("gemeld")
hlp = _('opgelost'), _('doorgekoppeld naar')
elif action_type == 'bevinding':
all_objects = my.Bevinding.objects.filter(project=proj)
second = _("opgevoerd")
hlp = _('opgelost'), _('doorgekoppeld naar')
else:
return '', ''
solved = all_objects.filter(gereed=True).count()
working = all_objects.filter(gereed=False).filter(actie__isnull=False).count()
if all_objects.count() != 0:
first = all_objects.count()
second = str(_("waarvan {} {} en {} {} Actiereg").format(solved, hlp[0], working, hlp[1]))
return first, second
def get_names_for_type(typename):
"get verbose names from model definition"
return (my.rectypes[typename]._meta.verbose_name,
my.rectypes[typename]._meta.verbose_name_plural,
my.rectypes[typename].section)
def get_projectlist():
"return list of all the projects"
return my.Project.objects.all().order_by('naam')
def get_ordered_objectlist(proj, soort):
"return ordered list of objects of the given type for the given project"
# if soort in my.rectypes: -- overbodige test volgens mij
# return None
# if proj:
lijst = my.rectypes[soort].objects.filter(project=proj)
# else:
# lijst = my.rectypes[soort].objects.select_related()
# ik denk dat het voorgaande nooit gewerkt heeft. Om te beginnen omdat het vanaf het begin af aan
# select.related heeft gestaan en dat heeft noit bestaan, dus ik denk dat je hier nooit komt met een
# leeg project (want dan ga je naar get_projectlist) - dus maar weghalen:w
# if soort in ('userwijz', 'userprob', 'bevinding'):
if 'naam' in [x[0] for x in get_field_attr(soort)]:
return lijst.order_by('naam')
return lijst.order_by('nummer') | def get_object(soort, id, new=False):
"return specified document object"
if soort not in my.rectypes:
raise Http404('Onbekend type `{}`'.format(soort))
if new:
o = my.rectypes[soort]()
else:
try:
o = my.rectypes[soort].objects.get(pk=id)
except ObjectDoesNotExist:
raise Http404(str(id).join((soort + ' ', _(' bestaat niet'))))
return o
def determine_adjacent(all_items, o):
"return keys for previous and next object"
prev = next = 0
nog_een = False
for x in all_items:
if nog_een:
next = x.id
nog_een = False
break
if x == o:
nog_een = True
else:
prev = x.id
return prev, next
def get_list_title_attrs(proj, soort, srt, id, rel):
"return title, name (single and plural) and section for object type"
soortnm_ev, soortnm_mv, sect = get_names_for_type(soort)
if srt:
srtnm_ev, srtnm_mv = get_names_for_type(srt)[:2]
if proj:
pr = my.Project.objects.get(pk=proj)
title = _(' bij project ').join((soortnm_mv.capitalize(), pr.naam))
else:
pr = None
title = _('Lijst ') + str(soortnm_mv)
if rel:
document = my.rectypes[srt].objects.get(pk=id)
if srt in ('userwijz', 'userprob', 'bevinding'):
docid = document.nummer
else:
docid = document.naam
itemoms = '{} "{}"'.format(srtnm_ev, docid)
relstr = str(_('{} relateren aan {}'))
if rel == 'from':
title = relstr.format(itemoms, soortnm_ev)
else:
title = relstr.format(soortnm_ev, itemoms)
if pr: # is dit niet dubbel? Ja zeker
title = "Project {0} - {1}".format(pr.naam, title)
return title, soortnm_ev, soortnm_mv, sect
def init_infodict_for_detail(proj, soort, edit, meld):
return {'start': '', 'soort': soort, 'prev': '', 'notnw': 'new', 'next': '', "sites": SITES,
'proj': '' if proj == 'proj' else proj, 'sect': '', 'meld': meld,
'projecten': get_projectlist(),
# 'edit': 'view' if edit else '',
# 'view': 'edit' if not edit else '',
'mode': 'edit' if edit else 'view',
'new': 'nieuw' if edit == 'new' else ''}
def get_update_url(proj, edit, soort='', id='', srt='', verw=''):
"return url to view that does the actual update"
if edit == 'new': # form action for new document
if soort:
ref = '{}/{}/'.format(srt, verw) if srt else ''
return "/{}/{}/mut/{}".format(proj, soort, ref)
return "/proj/mut/"
elif edit: # form action for existing
if soort:
return "/{}/{}/{}/mut/".format(proj, soort, id)
return "/{}/mut/".format(proj)
return ''
def get_fieldlengths(soort):
"return dictionary of maxlength per field"
return {x: z for x, y, z in get_field_attr(soort)}
def get_margins_for_type(typename):
"geeft voor een aantal soorten afwijkende marges terug"
left_margin = {"project": 140,
"userspec": 230,
"funcdoc": 160,
"gebrtaak": 240,
"funcproc": 160,
"entiteit": 140,
"techtaak": 200,
"techproc": 140,
"testplan": 140,
"bevinding": 140} .get(typename, 120)
leftw = "{0}px".format(left_margin)
rightw = "{0}px".format(910 - left_margin)
rightm = "{0}px".format(left_margin + 5)
return leftw, rightw, rightm
def get_detail_title(soort, edit, obj):
"""geeft titel zonder "DocTool!" terug"""
naam_ev = get_names_for_type(soort)[0]
if edit == 'new':
return _('Nieuw(e) ') + str(naam_ev)
try:
title = " ".join((naam_ev.capitalize(), obj.naam))
except AttributeError:
title = " ".join((naam_ev.capitalize(), obj.nummer))
return title
def get_relation_buttons(proj, soort, id, button_lijst):
"build buttons to create related documents"
# in het document krijg ik per soort te relateren document eerst een "leg relatie" knop
# daarna als er relaties zijn de verwijzingen met een knop "verwijder relatie"
# en tenslotte dit setje knoppen, dat van mij ook wel bij de "leg relatie" knoppen mag
buttons = []
for s in button_lijst:
buttons.append(BTNTXT.format(proj, s, "new", soort, id, _("Opvoeren ") +
str(my.rectypes[s]._meta.verbose_name)))
return buttons
def execute_update(soort, obj, postdict, files=None):
if soort in ('userwijz', 'userprob', 'bevinding'):
gereed = obj.gereed
for x, y, z in get_field_attr(soort): # naam,type,lengte
if x == 'datum_gereed':
if postdict['gereed'] == '1' and not gereed:
obj.datum_gereed = datetime.datetime.today()
elif x == "gereed":
obj.gereed = True if postdict[x] == "1" else False
elif x == 'link':
if 'link_file' in files:
uploaded = files['link_file']
pad = [y.upload_to for y in my.rectypes[soort]._meta.fields if y.name == 'link'][0]
save_name = "/".join((pad, uploaded.name))
with open(MEDIA_ROOT + save_name, 'wb+') as destination:
for chunk in uploaded.chunks():
destination.write(chunk)
obj.__dict__[x] = save_name
elif x != 'datum_in':
obj.__dict__[x] = postdict[x]
obj.save()
def execute_update_for_link(soort, obj, postdict, files):
model = models.get_model('myprojects', soort.capitalize())
manipulator = my.rectypes[soort].AddManipulator()
new_data = postdict.copy()
new_data.update({'project': proj})
for x,y,z in getfields(soort): # naam,type,lengte
if x == 'link' and y == 'File':
new_data.update(files)
continue
# return HttpResponse(str(new_data))
errors = manipulator.get_validation_errors(new_data)
manipulator.do_html2python(new_data)
if errors:
return 'errors', HttpResponse('\n'.join((str(errors),str(new_data))))
new_object = manipulator.save(new_data)
return 'ok', my.rectypes[soort].objects.get(pk=new_object.id)
def update_link_from_actiereg(obj, arid, arnum):
obj.actie = int(arid) # int(data.get("id","0"))
obj.actienummer = arnum # data.get("actie","")
obj.save()
def update_status_from_actiereg(obj, arstat):
obj.gereed = {"arch": True, "herl": False}[arstat]
obj.save()
def update_subitem(srt1, obj1, srt2, obj2, new, data):
if new:
obj2.hoort_bij = obj1
obj2.naam = data["naam"]
if (srt1, srt2) == ('entiteit', 'attribuut'):
obj2.type = data["type"]
obj2.bereik = data["bereik"]
obj2.primarykey = data["key"] if data["key"] in ('1', '2', '3', '4', '5') else '0'
elif (srt1, srt2) == ('dataitem', 'element'):
obj2.soort = data["type"]
obj2.omschrijving = data["oms"]
obj2.sleutel = data["sleutel"] if data["sleutel"] in ('1', '2', '3', '4', '5') else '0'
if "rel" in data:
if data["rel"] in [x.naam for x in my.Entiteit.objects.filter(project=obj1.project)]:
# try:
obj2.relatie = my.rectypes[srt1].objects.get(naam=data["rel"])
# except ObjectDoesNotExist:
# pass
obj2.save()
def update_related(soort, obj, related, relobj):
"bijwerken van de eventueel meegegeven relatie"
if related not in my.rectypes:
raise Http404('Onbekend type `{}` voor relatie'.format(related))
data = my.rectypes[related].objects.get(pk=relobj)
set_relation(obj, soort, data, related)
class GetRelations:
"zoek relaties bij gegeven object"
def __init__(self, obj, soort): # , related_soort):
self.obj = obj
self.soort = soort
# self.srt = related_soort
self.opts = my.rectypes[soort]._meta
# dit opnieuw opzetten met één lus over opts.get_fields(show_hidden=True)
# in plaats van de vier lussen die ik nu heb
# maar eerst nog even met 4 functies
def get_foreignkeys_to(self):
fkeys_to = []
for fld in self.opts.fields:
if fld.name == "project":
continue
if fld.get_internal_type() == 'ForeignKey':
srt = corr_naam(fld.related_model._meta.model_name)
result = self.obj.__getattribute__(fld.name)
rel = {'text': ' '.join((str(my.rectypes[self.soort].to_titles[srt]),
str(my.rectypes[srt]._meta.verbose_name))),
'links': []}
if result:
rel['links'].append(RELTXT.format(self.obj.project.id, srt, result.id, result) +
" " +
BTNTXT.format(self.obj.project.id, self.soort, self.obj.id,
"unrel/van/" + srt, result.id, REMOVE_TEXT))
else:
rel['btn'] = BTNTXT.format(self.obj.project.id, srt, "rel", self.soort,
self.obj.id, ADD_TEXT)
fkeys_to.append(rel)
return fkeys_to
def get_many2many_to(self):
m2ms_to = []
for x in self.opts.many_to_many:
srt = corr_naam(x.related_model._meta.model_name)
y = {'text': ' '.join((str(my.rectypes[self.soort].to_titles[srt]),
str(my.rectypes[srt]._meta.verbose_name))),
'btn': BTNTXT.format(self.obj.project.id, srt, "rel", self.soort, self.obj.id,
ADD_TEXT),
'links': []}
result = self.obj.__getattribute__(x.name)
for item in result.all():
y['links'].append(RELTXT.format(self.obj.project.id, srt, item.id, item) + " " +
BTNTXT.format(self.obj.project.id, self.soort, self.obj.id,
"unrel/van/" + srt, item.id, REMOVE_TEXT))
m2ms_to.append(y)
return m2ms_to
def get_foreignkeys_from(self):
button_lijst, fkeys_from, andere, attrs = [], [], [], []
# for relobj in opts.get_all_related_objects():
for relobj in [x for x in self.opts.get_fields()
if (x.one_to_many or x.one_to_one) and x.auto_created and not x.concrete]:
# print(self.obj, relobj, self.soort)
srt = corr_naam(relobj.related_model._meta.model_name)
if (self.soort, srt) == ('entiteit', 'attribuut'):
andere = [x.naam for x in my.Entiteit.objects.filter(project=self.obj.project)
if x != self.obj]
attrs = self.obj.attrs.all()
elif (self.soort, srt) == ('dataitem', 'dataelement'):
andere = [x.naam for x in my.Dataitem.objects.filter(project=self.obj.project)
if x != self.obj]
attrs = self.obj.elems.all()
else:
button_lijst.append(srt)
y = {'text': ' '.join((str(my.rectypes[self.soort].from_titles[srt]),
str(my.rectypes[srt]._meta.verbose_name))),
'btn': BTNTXT.format(self.obj.project.id, self.soort, self.obj.id, "rel", srt,
ADD_TEXT),
'links': []}
#result = get_related(self.obj, relobj)
result = self.obj.__getattribute__(relobj.related_name).all()
if result:
for item in result.all():
y['links'].append(RELTXT.format(self.obj.project.id, srt, item.id, item) +
" " +
BTNTXT.format(self.obj.project.id, srt, item.id,
"unrel/naar/" + self.soort, self.obj.id,
REMOVE_TEXT))
fkeys_from.append(y)
return button_lijst, fkeys_from, andere, attrs
def get_many2many_from(self):
button_lijst, m2ms_from = [], []
# for x in opts.get_all_related_many_to_many_objects():
for x in [y for y in self.opts.get_fields() # include_hidden=True)
if y.many_to_many and y.auto_created]:
srt = corr_naam(x.related_model._meta.model_name)
button_lijst.append(srt)
y = {'text': ' '.join((str(my.rectypes[self.soort].from_titles[srt]),
str(my.rectypes[srt]._meta.verbose_name))),
'btn': BTNTXT.format(self.obj.project.id, self.soort, self.obj.id, "rel", srt,
ADD_TEXT),
'links': []}
# result = get_related(self.obj, x, m2m=True)
result = self.obj.__getattribute__(x.related_name).all()
if result:
for item in result.all():
y['links'].append(RELTXT.format(self.obj.project.id, srt, item.id, item) + " " +
BTNTXT.format(self.obj.project.id, srt, item.id,
"unrel/naar/" + self.soort, self.obj.id,
REMOVE_TEXT))
m2ms_from.append(y)
return button_lijst, m2ms_from | random_line_split |
|
helpers.py | """Processing for MyProjects Web Application
"""
import datetime
from django.http import Http404
from django.core.exceptions import ObjectDoesNotExist, FieldError # , DoesNotExist
from django.utils.translation import gettext as _
import docs.models as my
from myprojects.settings import MEDIA_ROOT, SITES
RELTXT = '<br/><a href="/docs/{0}/{1}/{2}/">{3}</a>'
BTNTXT = '<a href="/docs/{0}/{1}/{2}/{3}/{4}/"><input type="button" value="{5}" /></a>'
ADD_TEXT, REMOVE_TEXT = _("leg relatie"), _("verwijder relatie")
# let op: obj.model._meta bestaat niet (meer), obj,_meta.model wel
# maar in get_related heb ik model._meta nou juist vervangen door _meta
# ook is daar rel.to._meta.model_name vervangen door related_model
# maar ook is dat vergelijken met _meta.model_name vervangen door vergelijken met_meta.model
# in dit geval vergelijken we met een naam dus moeten we _meta.model_name blijven gebruiken
# in deze vergelijking vervangen we dus alleen rel.to door related_model
def get_related(this_obj, other_obj, m2m=False):
"""geeft het resultaat van een reversed relatie terug
eerst wordt in het gerelateerde model de related_name opgezocht
dan wordt hiermee het betreffende attribuut van het huidige object bepaald
"""
# is het niet raar dat je voor twee concrete objecten ophaalt naar welke van het ene type
# verwezen wordt vanuit het andere type? Of is dat om de vorige/volgende te kunnen bepalen?
# als ik kijk naar het gebruik in GetRelations dan is het tweede argument ook niet een object
# maar een relatie (uit de fields verzameling)
if m2m:
fields = [x for x in other_obj._meta.many_to_many]
else:
fields = [x for x in other_obj._meta.get_fields() if x.name != 'project' and
x.get_internal_type() == 'ForeignKey']
for fld in fields:
if fld.related_model == this_obj._meta.model:
related_name = fld.related_query_name()
break
else:
return None # not found
try:
return this_obj.__getattribute__(related_name).all()
except UnboundLocalError:
return None
# zou je deze ook kunnen vervangen door een aanroep van get_relation en dan met de opgehaalde
# naam de gerelateerde objecten ophalen en meteen de vorige en de volgende bepalen?
# (heeft uiteraard konsekwenties voor de aanroepende code)
# oorspronkelijk lijkt dat ook zo geweest te zijn, de functie heette toen get_relation en het
# gedeelte dat nu nog zo heet was daarin hardgecodeerd
# deze functie wordt alleen aangeroepen in een paar methoden van de hieronder opgenomen klasse
# GetRelations, namelijk om de namen van relaties uit andere objecten naar het huidige te kunnen
# bepalen.
# Als je get_relation zoals die nu is gebruikt zou je dat onderscheid (van versus naar relaties)
# met dezelfde functie kunnen afhandelen
def get_relation(srt, soort):
"""Geeft veldnaam en cardinaliteit terug voor een relatie van srt naar soort
"""
result, multiple = None, None
if srt != soort or soort in ('funcproc', 'techproc'):
for relobj in my.rectypes[srt]._meta.get_fields():
if relobj.related_model and corr_naam(relobj.related_model._meta.model_name) == soort:
result = relobj.name
multiple = False if relobj.get_internal_type() == 'ForeignKey' else True
break
return result, multiple
def set_relation(o, soort, r, srt):
|
def remove_relation(o, soort, r, srt):
attr_name, multiple = get_relation(soort, srt)
if multiple:
o.__getattribute__(attr_name).remove(r)
else:
o.__setattr__(attr_name, None)
o.save()
def corr_naam(name):
"""convert name used in program to model name and back
Note: all names must be unique!
"""
names = (("techtaak", 'techtask'), ("programma", 'procproc'))
for name1, name2 in names:
if name == name1:
return name2
if name == name2:
return name1
return name
def get_field_attr(name):
"""leidt veldnaam, type en lengte af uit de definities in models.py
"""
# de variant met een repeating group (entiteit, dataitem) levert hier nog een probleem op.
# is dat omdat er twee entiteiten in 1 scherm staan?
fields = []
opts = my.rectypes[name]._meta
for x in opts.get_fields(): # fields:
fldname = x.name
fldtype = x.get_internal_type()
if fldname == 'id' or fldtype in ('ForeignKey', 'ManyToManyField'):
# if fldname == 'id' or any((x.many2one, x.many2many, x.one2many))
continue
try:
length = x.max_length
except AttributeError:
length = -1
fields.append((fldname, fldtype[:-5], length))
return fields
def get_relation_fields(name):
"""deze functie is van de vorige afgesplitst (afwijkend pad als tweede argument alles = True)
enig gemeenschappelijke is loopen over get_fields
deze werd bovendien nergens gebruikt
"""
fields = []
opts = my.rectypes[name]._meta
for rel in opts.get_fields():
# print(rel, rel.one_to_many or rel.many_to_many)
if rel.one_to_many or rel.many_to_many:
try:
fields.append((rel.name, rel.get_internal_type(), rel.max_length))
except AttributeError:
fields.append((rel.name, rel.get_internal_type(), -1))
return fields
def get_new_numberkey_for_soort(owner_proj, soort):
"""generate new id for certain document types
"""
if soort == 'userwijz':
sel = owner_proj.rfcs
elif soort == 'userprob':
sel = owner_proj.probs
elif soort == 'bevinding':
sel = owner_proj.tbev
else:
return ''
ny = str(datetime.date.today().year)
h = ''
try:
last_id = sel.latest("datum_in").nummer
except ObjectDoesNotExist:
pass
else:
yr, nr = last_id.split('-')
if yr == ny:
h = '-'.join((yr, '%04i' % (int(nr) + 1)))
if h == '':
h = '-'.join((ny, '0001'))
return h
def get_stats_texts(proj, action_type):
"""get certain texts for certain document types (also registered in actiereg)
"""
first = _("(nog) geen")
if action_type == 'userwijz':
all_objects = my.Userwijz.objects.filter(project=proj)
second = _('ingediend')
hlp = _("gerealiseerd"), _('in behandeling via')
elif action_type == 'probleem':
all_objects = my.Userprob.objects.filter(project=proj)
second = _("gemeld")
hlp = _('opgelost'), _('doorgekoppeld naar')
elif action_type == 'bevinding':
all_objects = my.Bevinding.objects.filter(project=proj)
second = _("opgevoerd")
hlp = _('opgelost'), _('doorgekoppeld naar')
else:
return '', ''
solved = all_objects.filter(gereed=True).count()
working = all_objects.filter(gereed=False).filter(actie__isnull=False).count()
if all_objects.count() != 0:
first = all_objects.count()
second = str(_("waarvan {} {} en {} {} Actiereg").format(solved, hlp[0], working, hlp[1]))
return first, second
def get_names_for_type(typename):
"get verbose names from model definition"
return (my.rectypes[typename]._meta.verbose_name,
my.rectypes[typename]._meta.verbose_name_plural,
my.rectypes[typename].section)
def get_projectlist():
"return list of all the projects"
return my.Project.objects.all().order_by('naam')
def get_ordered_objectlist(proj, soort):
"return ordered list of objects of the given type for the given project"
# if soort in my.rectypes: -- overbodige test volgens mij
# return None
# if proj:
lijst = my.rectypes[soort].objects.filter(project=proj)
# else:
# lijst = my.rectypes[soort].objects.select_related()
# ik denk dat het voorgaande nooit gewerkt heeft. Om te beginnen omdat het vanaf het begin af aan
# select.related heeft gestaan en dat heeft noit bestaan, dus ik denk dat je hier nooit komt met een
# leeg project (want dan ga je naar get_projectlist) - dus maar weghalen:w
# if soort in ('userwijz', 'userprob', 'bevinding'):
if 'naam' in [x[0] for x in get_field_attr(soort)]:
return lijst.order_by('naam')
return lijst.order_by('nummer')
def get_object(soort, id, new=False):
"return specified document object"
if soort not in my.rectypes:
raise Http404('Onbekend type `{}`'.format(soort))
if new:
o = my.rectypes[soort]()
else:
try:
o = my.rectypes[soort].objects.get(pk=id)
except ObjectDoesNotExist:
raise Http404(str(id).join((soort + ' ', _(' bestaat niet'))))
return o
def determine_adjacent(all_items, o):
"return keys for previous and next object"
prev = next = 0
nog_een = False
for x in all_items:
if nog_een:
next = x.id
nog_een = False
break
if x == o:
nog_een = True
else:
prev = x.id
return prev, next
def get_list_title_attrs(proj, soort, srt, id, rel):
"return title, name (single and plural) and section for object type"
soortnm_ev, soortnm_mv, sect = get_names_for_type(soort)
if srt:
srtnm_ev, srtnm_mv = get_names_for_type(srt)[:2]
if proj:
pr = my.Project.objects.get(pk=proj)
title = _(' bij project ').join((soortnm_mv.capitalize(), pr.naam))
else:
pr = None
title = _('Lijst ') + str(soortnm_mv)
if rel:
document = my.rectypes[srt].objects.get(pk=id)
if srt in ('userwijz', 'userprob', 'bevinding'):
docid = document.nummer
else:
docid = document.naam
itemoms = '{} "{}"'.format(srtnm_ev, docid)
relstr = str(_('{} relateren aan {}'))
if rel == 'from':
title = relstr.format(itemoms, soortnm_ev)
else:
title = relstr.format(soortnm_ev, itemoms)
if pr: # is dit niet dubbel? Ja zeker
title = "Project {0} - {1}".format(pr.naam, title)
return title, soortnm_ev, soortnm_mv, sect
def init_infodict_for_detail(proj, soort, edit, meld):
return {'start': '', 'soort': soort, 'prev': '', 'notnw': 'new', 'next': '', "sites": SITES,
'proj': '' if proj == 'proj' else proj, 'sect': '', 'meld': meld,
'projecten': get_projectlist(),
# 'edit': 'view' if edit else '',
# 'view': 'edit' if not edit else '',
'mode': 'edit' if edit else 'view',
'new': 'nieuw' if edit == 'new' else ''}
def get_update_url(proj, edit, soort='', id='', srt='', verw=''):
"return url to view that does the actual update"
if edit == 'new': # form action for new document
if soort:
ref = '{}/{}/'.format(srt, verw) if srt else ''
return "/{}/{}/mut/{}".format(proj, soort, ref)
return "/proj/mut/"
elif edit: # form action for existing
if soort:
return "/{}/{}/{}/mut/".format(proj, soort, id)
return "/{}/mut/".format(proj)
return ''
def get_fieldlengths(soort):
"return dictionary of maxlength per field"
return {x: z for x, y, z in get_field_attr(soort)}
def get_margins_for_type(typename):
"geeft voor een aantal soorten afwijkende marges terug"
left_margin = {"project": 140,
"userspec": 230,
"funcdoc": 160,
"gebrtaak": 240,
"funcproc": 160,
"entiteit": 140,
"techtaak": 200,
"techproc": 140,
"testplan": 140,
"bevinding": 140} .get(typename, 120)
leftw = "{0}px".format(left_margin)
rightw = "{0}px".format(910 - left_margin)
rightm = "{0}px".format(left_margin + 5)
return leftw, rightw, rightm
def get_detail_title(soort, edit, obj):
"""geeft titel zonder "DocTool!" terug"""
naam_ev = get_names_for_type(soort)[0]
if edit == 'new':
return _('Nieuw(e) ') + str(naam_ev)
try:
title = " ".join((naam_ev.capitalize(), obj.naam))
except AttributeError:
title = " ".join((naam_ev.capitalize(), obj.nummer))
return title
def get_relation_buttons(proj, soort, id, button_lijst):
"build buttons to create related documents"
# in het document krijg ik per soort te relateren document eerst een "leg relatie" knop
# daarna als er relaties zijn de verwijzingen met een knop "verwijder relatie"
# en tenslotte dit setje knoppen, dat van mij ook wel bij de "leg relatie" knoppen mag
buttons = []
for s in button_lijst:
buttons.append(BTNTXT.format(proj, s, "new", soort, id, _("Opvoeren ") +
str(my.rectypes[s]._meta.verbose_name)))
return buttons
def execute_update(soort, obj, postdict, files=None):
if soort in ('userwijz', 'userprob', 'bevinding'):
gereed = obj.gereed
for x, y, z in get_field_attr(soort): # naam,type,lengte
if x == 'datum_gereed':
if postdict['gereed'] == '1' and not gereed:
obj.datum_gereed = datetime.datetime.today()
elif x == "gereed":
obj.gereed = True if postdict[x] == "1" else False
elif x == 'link':
if 'link_file' in files:
uploaded = files['link_file']
pad = [y.upload_to for y in my.rectypes[soort]._meta.fields if y.name == 'link'][0]
save_name = "/".join((pad, uploaded.name))
with open(MEDIA_ROOT + save_name, 'wb+') as destination:
for chunk in uploaded.chunks():
destination.write(chunk)
obj.__dict__[x] = save_name
elif x != 'datum_in':
obj.__dict__[x] = postdict[x]
obj.save()
def execute_update_for_link(soort, obj, postdict, files):
model = models.get_model('myprojects', soort.capitalize())
manipulator = my.rectypes[soort].AddManipulator()
new_data = postdict.copy()
new_data.update({'project': proj})
for x,y,z in getfields(soort): # naam,type,lengte
if x == 'link' and y == 'File':
new_data.update(files)
continue
# return HttpResponse(str(new_data))
errors = manipulator.get_validation_errors(new_data)
manipulator.do_html2python(new_data)
if errors:
return 'errors', HttpResponse('\n'.join((str(errors),str(new_data))))
new_object = manipulator.save(new_data)
return 'ok', my.rectypes[soort].objects.get(pk=new_object.id)
def update_link_from_actiereg(obj, arid, arnum):
obj.actie = int(arid) # int(data.get("id","0"))
obj.actienummer = arnum # data.get("actie","")
obj.save()
def update_status_from_actiereg(obj, arstat):
obj.gereed = {"arch": True, "herl": False}[arstat]
obj.save()
def update_subitem(srt1, obj1, srt2, obj2, new, data):
if new:
obj2.hoort_bij = obj1
obj2.naam = data["naam"]
if (srt1, srt2) == ('entiteit', 'attribuut'):
obj2.type = data["type"]
obj2.bereik = data["bereik"]
obj2.primarykey = data["key"] if data["key"] in ('1', '2', '3', '4', '5') else '0'
elif (srt1, srt2) == ('dataitem', 'element'):
obj2.soort = data["type"]
obj2.omschrijving = data["oms"]
obj2.sleutel = data["sleutel"] if data["sleutel"] in ('1', '2', '3', '4', '5') else '0'
if "rel" in data:
if data["rel"] in [x.naam for x in my.Entiteit.objects.filter(project=obj1.project)]:
# try:
obj2.relatie = my.rectypes[srt1].objects.get(naam=data["rel"])
# except ObjectDoesNotExist:
# pass
obj2.save()
def update_related(soort, obj, related, relobj):
"bijwerken van de eventueel meegegeven relatie"
if related not in my.rectypes:
raise Http404('Onbekend type `{}` voor relatie'.format(related))
data = my.rectypes[related].objects.get(pk=relobj)
set_relation(obj, soort, data, related)
class GetRelations:
"zoek relaties bij gegeven object"
def __init__(self, obj, soort): # , related_soort):
self.obj = obj
self.soort = soort
# self.srt = related_soort
self.opts = my.rectypes[soort]._meta
# dit opnieuw opzetten met één lus over opts.get_fields(show_hidden=True)
# in plaats van de vier lussen die ik nu heb
# maar eerst nog even met 4 functies
def get_foreignkeys_to(self):
fkeys_to = []
for fld in self.opts.fields:
if fld.name == "project":
continue
if fld.get_internal_type() == 'ForeignKey':
srt = corr_naam(fld.related_model._meta.model_name)
result = self.obj.__getattribute__(fld.name)
rel = {'text': ' '.join((str(my.rectypes[self.soort].to_titles[srt]),
str(my.rectypes[srt]._meta.verbose_name))),
'links': []}
if result:
rel['links'].append(RELTXT.format(self.obj.project.id, srt, result.id, result) +
" " +
BTNTXT.format(self.obj.project.id, self.soort, self.obj.id,
"unrel/van/" + srt, result.id, REMOVE_TEXT))
else:
rel['btn'] = BTNTXT.format(self.obj.project.id, srt, "rel", self.soort,
self.obj.id, ADD_TEXT)
fkeys_to.append(rel)
return fkeys_to
def get_many2many_to(self):
m2ms_to = []
for x in self.opts.many_to_many:
srt = corr_naam(x.related_model._meta.model_name)
y = {'text': ' '.join((str(my.rectypes[self.soort].to_titles[srt]),
str(my.rectypes[srt]._meta.verbose_name))),
'btn': BTNTXT.format(self.obj.project.id, srt, "rel", self.soort, self.obj.id,
ADD_TEXT),
'links': []}
result = self.obj.__getattribute__(x.name)
for item in result.all():
y['links'].append(RELTXT.format(self.obj.project.id, srt, item.id, item) + " " +
BTNTXT.format(self.obj.project.id, self.soort, self.obj.id,
"unrel/van/" + srt, item.id, REMOVE_TEXT))
m2ms_to.append(y)
return m2ms_to
def get_foreignkeys_from(self):
button_lijst, fkeys_from, andere, attrs = [], [], [], []
# for relobj in opts.get_all_related_objects():
for relobj in [x for x in self.opts.get_fields()
if (x.one_to_many or x.one_to_one) and x.auto_created and not x.concrete]:
# print(self.obj, relobj, self.soort)
srt = corr_naam(relobj.related_model._meta.model_name)
if (self.soort, srt) == ('entiteit', 'attribuut'):
andere = [x.naam for x in my.Entiteit.objects.filter(project=self.obj.project)
if x != self.obj]
attrs = self.obj.attrs.all()
elif (self.soort, srt) == ('dataitem', 'dataelement'):
andere = [x.naam for x in my.Dataitem.objects.filter(project=self.obj.project)
if x != self.obj]
attrs = self.obj.elems.all()
else:
button_lijst.append(srt)
y = {'text': ' '.join((str(my.rectypes[self.soort].from_titles[srt]),
str(my.rectypes[srt]._meta.verbose_name))),
'btn': BTNTXT.format(self.obj.project.id, self.soort, self.obj.id, "rel", srt,
ADD_TEXT),
'links': []}
#result = get_related(self.obj, relobj)
result = self.obj.__getattribute__(relobj.related_name).all()
if result:
for item in result.all():
y['links'].append(RELTXT.format(self.obj.project.id, srt, item.id, item) +
" " +
BTNTXT.format(self.obj.project.id, srt, item.id,
"unrel/naar/" + self.soort, self.obj.id,
REMOVE_TEXT))
fkeys_from.append(y)
return button_lijst, fkeys_from, andere, attrs
def get_many2many_from(self):
button_lijst, m2ms_from = [], []
# for x in opts.get_all_related_many_to_many_objects():
for x in [y for y in self.opts.get_fields() # include_hidden=True)
if y.many_to_many and y.auto_created]:
srt = corr_naam(x.related_model._meta.model_name)
button_lijst.append(srt)
y = {'text': ' '.join((str(my.rectypes[self.soort].from_titles[srt]),
str(my.rectypes[srt]._meta.verbose_name))),
'btn': BTNTXT.format(self.obj.project.id, self.soort, self.obj.id, "rel", srt,
ADD_TEXT),
'links': []}
# result = get_related(self.obj, x, m2m=True)
result = self.obj.__getattribute__(x.related_name).all()
if result:
for item in result.all():
y['links'].append(RELTXT.format(self.obj.project.id, srt, item.id, item) + " " +
BTNTXT.format(self.obj.project.id, srt, item.id,
"unrel/naar/" + self.soort, self.obj.id,
REMOVE_TEXT))
m2ms_from.append(y)
return button_lijst, m2ms_from
| attr_name, multiple = get_relation(soort, srt)
if multiple:
o.__getattribute__(attr_name).add(r)
else:
o.__setattr__(attr_name, r)
o.save() | identifier_body |
helpers.py | """Processing for MyProjects Web Application
"""
import datetime
from django.http import Http404
from django.core.exceptions import ObjectDoesNotExist, FieldError # , DoesNotExist
from django.utils.translation import gettext as _
import docs.models as my
from myprojects.settings import MEDIA_ROOT, SITES
RELTXT = '<br/><a href="/docs/{0}/{1}/{2}/">{3}</a>'
BTNTXT = '<a href="/docs/{0}/{1}/{2}/{3}/{4}/"><input type="button" value="{5}" /></a>'
ADD_TEXT, REMOVE_TEXT = _("leg relatie"), _("verwijder relatie")
# let op: obj.model._meta bestaat niet (meer), obj,_meta.model wel
# maar in get_related heb ik model._meta nou juist vervangen door _meta
# ook is daar rel.to._meta.model_name vervangen door related_model
# maar ook is dat vergelijken met _meta.model_name vervangen door vergelijken met_meta.model
# in dit geval vergelijken we met een naam dus moeten we _meta.model_name blijven gebruiken
# in deze vergelijking vervangen we dus alleen rel.to door related_model
def get_related(this_obj, other_obj, m2m=False):
"""geeft het resultaat van een reversed relatie terug
eerst wordt in het gerelateerde model de related_name opgezocht
dan wordt hiermee het betreffende attribuut van het huidige object bepaald
"""
# is het niet raar dat je voor twee concrete objecten ophaalt naar welke van het ene type
# verwezen wordt vanuit het andere type? Of is dat om de vorige/volgende te kunnen bepalen?
# als ik kijk naar het gebruik in GetRelations dan is het tweede argument ook niet een object
# maar een relatie (uit de fields verzameling)
if m2m:
fields = [x for x in other_obj._meta.many_to_many]
else:
fields = [x for x in other_obj._meta.get_fields() if x.name != 'project' and
x.get_internal_type() == 'ForeignKey']
for fld in fields:
if fld.related_model == this_obj._meta.model:
related_name = fld.related_query_name()
break
else:
return None # not found
try:
return this_obj.__getattribute__(related_name).all()
except UnboundLocalError:
return None
# zou je deze ook kunnen vervangen door een aanroep van get_relation en dan met de opgehaalde
# naam de gerelateerde objecten ophalen en meteen de vorige en de volgende bepalen?
# (heeft uiteraard konsekwenties voor de aanroepende code)
# oorspronkelijk lijkt dat ook zo geweest te zijn, de functie heette toen get_relation en het
# gedeelte dat nu nog zo heet was daarin hardgecodeerd
# deze functie wordt alleen aangeroepen in een paar methoden van de hieronder opgenomen klasse
# GetRelations, namelijk om de namen van relaties uit andere objecten naar het huidige te kunnen
# bepalen.
# Als je get_relation zoals die nu is gebruikt zou je dat onderscheid (van versus naar relaties)
# met dezelfde functie kunnen afhandelen
def get_relation(srt, soort):
"""Geeft veldnaam en cardinaliteit terug voor een relatie van srt naar soort
"""
result, multiple = None, None
if srt != soort or soort in ('funcproc', 'techproc'):
for relobj in my.rectypes[srt]._meta.get_fields():
if relobj.related_model and corr_naam(relobj.related_model._meta.model_name) == soort:
result = relobj.name
multiple = False if relobj.get_internal_type() == 'ForeignKey' else True
break
return result, multiple
def set_relation(o, soort, r, srt):
attr_name, multiple = get_relation(soort, srt)
if multiple:
o.__getattribute__(attr_name).add(r)
else:
o.__setattr__(attr_name, r)
o.save()
def remove_relation(o, soort, r, srt):
attr_name, multiple = get_relation(soort, srt)
if multiple:
o.__getattribute__(attr_name).remove(r)
else:
o.__setattr__(attr_name, None)
o.save()
def corr_naam(name):
"""convert name used in program to model name and back
Note: all names must be unique!
"""
names = (("techtaak", 'techtask'), ("programma", 'procproc'))
for name1, name2 in names:
if name == name1:
return name2
if name == name2:
return name1
return name
def get_field_attr(name):
"""leidt veldnaam, type en lengte af uit de definities in models.py
"""
# de variant met een repeating group (entiteit, dataitem) levert hier nog een probleem op.
# is dat omdat er twee entiteiten in 1 scherm staan?
fields = []
opts = my.rectypes[name]._meta
for x in opts.get_fields(): # fields:
fldname = x.name
fldtype = x.get_internal_type()
if fldname == 'id' or fldtype in ('ForeignKey', 'ManyToManyField'):
# if fldname == 'id' or any((x.many2one, x.many2many, x.one2many))
continue
try:
length = x.max_length
except AttributeError:
length = -1
fields.append((fldname, fldtype[:-5], length))
return fields
def get_relation_fields(name):
"""deze functie is van de vorige afgesplitst (afwijkend pad als tweede argument alles = True)
enig gemeenschappelijke is loopen over get_fields
deze werd bovendien nergens gebruikt
"""
fields = []
opts = my.rectypes[name]._meta
for rel in opts.get_fields():
# print(rel, rel.one_to_many or rel.many_to_many)
if rel.one_to_many or rel.many_to_many:
try:
fields.append((rel.name, rel.get_internal_type(), rel.max_length))
except AttributeError:
fields.append((rel.name, rel.get_internal_type(), -1))
return fields
def get_new_numberkey_for_soort(owner_proj, soort):
"""generate new id for certain document types
"""
if soort == 'userwijz':
sel = owner_proj.rfcs
elif soort == 'userprob':
sel = owner_proj.probs
elif soort == 'bevinding':
sel = owner_proj.tbev
else:
return ''
ny = str(datetime.date.today().year)
h = ''
try:
last_id = sel.latest("datum_in").nummer
except ObjectDoesNotExist:
pass
else:
yr, nr = last_id.split('-')
if yr == ny:
h = '-'.join((yr, '%04i' % (int(nr) + 1)))
if h == '':
h = '-'.join((ny, '0001'))
return h
def get_stats_texts(proj, action_type):
"""get certain texts for certain document types (also registered in actiereg)
"""
first = _("(nog) geen")
if action_type == 'userwijz':
all_objects = my.Userwijz.objects.filter(project=proj)
second = _('ingediend')
hlp = _("gerealiseerd"), _('in behandeling via')
elif action_type == 'probleem':
all_objects = my.Userprob.objects.filter(project=proj)
second = _("gemeld")
hlp = _('opgelost'), _('doorgekoppeld naar')
elif action_type == 'bevinding':
all_objects = my.Bevinding.objects.filter(project=proj)
second = _("opgevoerd")
hlp = _('opgelost'), _('doorgekoppeld naar')
else:
return '', ''
solved = all_objects.filter(gereed=True).count()
working = all_objects.filter(gereed=False).filter(actie__isnull=False).count()
if all_objects.count() != 0:
|
return first, second
def get_names_for_type(typename):
"get verbose names from model definition"
return (my.rectypes[typename]._meta.verbose_name,
my.rectypes[typename]._meta.verbose_name_plural,
my.rectypes[typename].section)
def get_projectlist():
"return list of all the projects"
return my.Project.objects.all().order_by('naam')
def get_ordered_objectlist(proj, soort):
"return ordered list of objects of the given type for the given project"
# if soort in my.rectypes: -- overbodige test volgens mij
# return None
# if proj:
lijst = my.rectypes[soort].objects.filter(project=proj)
# else:
# lijst = my.rectypes[soort].objects.select_related()
# ik denk dat het voorgaande nooit gewerkt heeft. Om te beginnen omdat het vanaf het begin af aan
# select.related heeft gestaan en dat heeft noit bestaan, dus ik denk dat je hier nooit komt met een
# leeg project (want dan ga je naar get_projectlist) - dus maar weghalen:w
# if soort in ('userwijz', 'userprob', 'bevinding'):
if 'naam' in [x[0] for x in get_field_attr(soort)]:
return lijst.order_by('naam')
return lijst.order_by('nummer')
def get_object(soort, id, new=False):
"return specified document object"
if soort not in my.rectypes:
raise Http404('Onbekend type `{}`'.format(soort))
if new:
o = my.rectypes[soort]()
else:
try:
o = my.rectypes[soort].objects.get(pk=id)
except ObjectDoesNotExist:
raise Http404(str(id).join((soort + ' ', _(' bestaat niet'))))
return o
def determine_adjacent(all_items, o):
"return keys for previous and next object"
prev = next = 0
nog_een = False
for x in all_items:
if nog_een:
next = x.id
nog_een = False
break
if x == o:
nog_een = True
else:
prev = x.id
return prev, next
def get_list_title_attrs(proj, soort, srt, id, rel):
"return title, name (single and plural) and section for object type"
soortnm_ev, soortnm_mv, sect = get_names_for_type(soort)
if srt:
srtnm_ev, srtnm_mv = get_names_for_type(srt)[:2]
if proj:
pr = my.Project.objects.get(pk=proj)
title = _(' bij project ').join((soortnm_mv.capitalize(), pr.naam))
else:
pr = None
title = _('Lijst ') + str(soortnm_mv)
if rel:
document = my.rectypes[srt].objects.get(pk=id)
if srt in ('userwijz', 'userprob', 'bevinding'):
docid = document.nummer
else:
docid = document.naam
itemoms = '{} "{}"'.format(srtnm_ev, docid)
relstr = str(_('{} relateren aan {}'))
if rel == 'from':
title = relstr.format(itemoms, soortnm_ev)
else:
title = relstr.format(soortnm_ev, itemoms)
if pr: # is dit niet dubbel? Ja zeker
title = "Project {0} - {1}".format(pr.naam, title)
return title, soortnm_ev, soortnm_mv, sect
def init_infodict_for_detail(proj, soort, edit, meld):
return {'start': '', 'soort': soort, 'prev': '', 'notnw': 'new', 'next': '', "sites": SITES,
'proj': '' if proj == 'proj' else proj, 'sect': '', 'meld': meld,
'projecten': get_projectlist(),
# 'edit': 'view' if edit else '',
# 'view': 'edit' if not edit else '',
'mode': 'edit' if edit else 'view',
'new': 'nieuw' if edit == 'new' else ''}
def get_update_url(proj, edit, soort='', id='', srt='', verw=''):
"return url to view that does the actual update"
if edit == 'new': # form action for new document
if soort:
ref = '{}/{}/'.format(srt, verw) if srt else ''
return "/{}/{}/mut/{}".format(proj, soort, ref)
return "/proj/mut/"
elif edit: # form action for existing
if soort:
return "/{}/{}/{}/mut/".format(proj, soort, id)
return "/{}/mut/".format(proj)
return ''
def get_fieldlengths(soort):
"return dictionary of maxlength per field"
return {x: z for x, y, z in get_field_attr(soort)}
def get_margins_for_type(typename):
"geeft voor een aantal soorten afwijkende marges terug"
left_margin = {"project": 140,
"userspec": 230,
"funcdoc": 160,
"gebrtaak": 240,
"funcproc": 160,
"entiteit": 140,
"techtaak": 200,
"techproc": 140,
"testplan": 140,
"bevinding": 140} .get(typename, 120)
leftw = "{0}px".format(left_margin)
rightw = "{0}px".format(910 - left_margin)
rightm = "{0}px".format(left_margin + 5)
return leftw, rightw, rightm
def get_detail_title(soort, edit, obj):
"""geeft titel zonder "DocTool!" terug"""
naam_ev = get_names_for_type(soort)[0]
if edit == 'new':
return _('Nieuw(e) ') + str(naam_ev)
try:
title = " ".join((naam_ev.capitalize(), obj.naam))
except AttributeError:
title = " ".join((naam_ev.capitalize(), obj.nummer))
return title
def get_relation_buttons(proj, soort, id, button_lijst):
"build buttons to create related documents"
# in het document krijg ik per soort te relateren document eerst een "leg relatie" knop
# daarna als er relaties zijn de verwijzingen met een knop "verwijder relatie"
# en tenslotte dit setje knoppen, dat van mij ook wel bij de "leg relatie" knoppen mag
buttons = []
for s in button_lijst:
buttons.append(BTNTXT.format(proj, s, "new", soort, id, _("Opvoeren ") +
str(my.rectypes[s]._meta.verbose_name)))
return buttons
def execute_update(soort, obj, postdict, files=None):
if soort in ('userwijz', 'userprob', 'bevinding'):
gereed = obj.gereed
for x, y, z in get_field_attr(soort): # naam,type,lengte
if x == 'datum_gereed':
if postdict['gereed'] == '1' and not gereed:
obj.datum_gereed = datetime.datetime.today()
elif x == "gereed":
obj.gereed = True if postdict[x] == "1" else False
elif x == 'link':
if 'link_file' in files:
uploaded = files['link_file']
pad = [y.upload_to for y in my.rectypes[soort]._meta.fields if y.name == 'link'][0]
save_name = "/".join((pad, uploaded.name))
with open(MEDIA_ROOT + save_name, 'wb+') as destination:
for chunk in uploaded.chunks():
destination.write(chunk)
obj.__dict__[x] = save_name
elif x != 'datum_in':
obj.__dict__[x] = postdict[x]
obj.save()
def execute_update_for_link(soort, obj, postdict, files):
model = models.get_model('myprojects', soort.capitalize())
manipulator = my.rectypes[soort].AddManipulator()
new_data = postdict.copy()
new_data.update({'project': proj})
for x,y,z in getfields(soort): # naam,type,lengte
if x == 'link' and y == 'File':
new_data.update(files)
continue
# return HttpResponse(str(new_data))
errors = manipulator.get_validation_errors(new_data)
manipulator.do_html2python(new_data)
if errors:
return 'errors', HttpResponse('\n'.join((str(errors),str(new_data))))
new_object = manipulator.save(new_data)
return 'ok', my.rectypes[soort].objects.get(pk=new_object.id)
def update_link_from_actiereg(obj, arid, arnum):
obj.actie = int(arid) # int(data.get("id","0"))
obj.actienummer = arnum # data.get("actie","")
obj.save()
def update_status_from_actiereg(obj, arstat):
obj.gereed = {"arch": True, "herl": False}[arstat]
obj.save()
def update_subitem(srt1, obj1, srt2, obj2, new, data):
if new:
obj2.hoort_bij = obj1
obj2.naam = data["naam"]
if (srt1, srt2) == ('entiteit', 'attribuut'):
obj2.type = data["type"]
obj2.bereik = data["bereik"]
obj2.primarykey = data["key"] if data["key"] in ('1', '2', '3', '4', '5') else '0'
elif (srt1, srt2) == ('dataitem', 'element'):
obj2.soort = data["type"]
obj2.omschrijving = data["oms"]
obj2.sleutel = data["sleutel"] if data["sleutel"] in ('1', '2', '3', '4', '5') else '0'
if "rel" in data:
if data["rel"] in [x.naam for x in my.Entiteit.objects.filter(project=obj1.project)]:
# try:
obj2.relatie = my.rectypes[srt1].objects.get(naam=data["rel"])
# except ObjectDoesNotExist:
# pass
obj2.save()
def update_related(soort, obj, related, relobj):
"bijwerken van de eventueel meegegeven relatie"
if related not in my.rectypes:
raise Http404('Onbekend type `{}` voor relatie'.format(related))
data = my.rectypes[related].objects.get(pk=relobj)
set_relation(obj, soort, data, related)
class GetRelations:
"zoek relaties bij gegeven object"
def __init__(self, obj, soort): # , related_soort):
self.obj = obj
self.soort = soort
# self.srt = related_soort
self.opts = my.rectypes[soort]._meta
# dit opnieuw opzetten met één lus over opts.get_fields(show_hidden=True)
# in plaats van de vier lussen die ik nu heb
# maar eerst nog even met 4 functies
def get_foreignkeys_to(self):
fkeys_to = []
for fld in self.opts.fields:
if fld.name == "project":
continue
if fld.get_internal_type() == 'ForeignKey':
srt = corr_naam(fld.related_model._meta.model_name)
result = self.obj.__getattribute__(fld.name)
rel = {'text': ' '.join((str(my.rectypes[self.soort].to_titles[srt]),
str(my.rectypes[srt]._meta.verbose_name))),
'links': []}
if result:
rel['links'].append(RELTXT.format(self.obj.project.id, srt, result.id, result) +
" " +
BTNTXT.format(self.obj.project.id, self.soort, self.obj.id,
"unrel/van/" + srt, result.id, REMOVE_TEXT))
else:
rel['btn'] = BTNTXT.format(self.obj.project.id, srt, "rel", self.soort,
self.obj.id, ADD_TEXT)
fkeys_to.append(rel)
return fkeys_to
def get_many2many_to(self):
m2ms_to = []
for x in self.opts.many_to_many:
srt = corr_naam(x.related_model._meta.model_name)
y = {'text': ' '.join((str(my.rectypes[self.soort].to_titles[srt]),
str(my.rectypes[srt]._meta.verbose_name))),
'btn': BTNTXT.format(self.obj.project.id, srt, "rel", self.soort, self.obj.id,
ADD_TEXT),
'links': []}
result = self.obj.__getattribute__(x.name)
for item in result.all():
y['links'].append(RELTXT.format(self.obj.project.id, srt, item.id, item) + " " +
BTNTXT.format(self.obj.project.id, self.soort, self.obj.id,
"unrel/van/" + srt, item.id, REMOVE_TEXT))
m2ms_to.append(y)
return m2ms_to
def get_foreignkeys_from(self):
button_lijst, fkeys_from, andere, attrs = [], [], [], []
# for relobj in opts.get_all_related_objects():
for relobj in [x for x in self.opts.get_fields()
if (x.one_to_many or x.one_to_one) and x.auto_created and not x.concrete]:
# print(self.obj, relobj, self.soort)
srt = corr_naam(relobj.related_model._meta.model_name)
if (self.soort, srt) == ('entiteit', 'attribuut'):
andere = [x.naam for x in my.Entiteit.objects.filter(project=self.obj.project)
if x != self.obj]
attrs = self.obj.attrs.all()
elif (self.soort, srt) == ('dataitem', 'dataelement'):
andere = [x.naam for x in my.Dataitem.objects.filter(project=self.obj.project)
if x != self.obj]
attrs = self.obj.elems.all()
else:
button_lijst.append(srt)
y = {'text': ' '.join((str(my.rectypes[self.soort].from_titles[srt]),
str(my.rectypes[srt]._meta.verbose_name))),
'btn': BTNTXT.format(self.obj.project.id, self.soort, self.obj.id, "rel", srt,
ADD_TEXT),
'links': []}
#result = get_related(self.obj, relobj)
result = self.obj.__getattribute__(relobj.related_name).all()
if result:
for item in result.all():
y['links'].append(RELTXT.format(self.obj.project.id, srt, item.id, item) +
" " +
BTNTXT.format(self.obj.project.id, srt, item.id,
"unrel/naar/" + self.soort, self.obj.id,
REMOVE_TEXT))
fkeys_from.append(y)
return button_lijst, fkeys_from, andere, attrs
def get_many2many_from(self):
button_lijst, m2ms_from = [], []
# for x in opts.get_all_related_many_to_many_objects():
for x in [y for y in self.opts.get_fields() # include_hidden=True)
if y.many_to_many and y.auto_created]:
srt = corr_naam(x.related_model._meta.model_name)
button_lijst.append(srt)
y = {'text': ' '.join((str(my.rectypes[self.soort].from_titles[srt]),
str(my.rectypes[srt]._meta.verbose_name))),
'btn': BTNTXT.format(self.obj.project.id, self.soort, self.obj.id, "rel", srt,
ADD_TEXT),
'links': []}
# result = get_related(self.obj, x, m2m=True)
result = self.obj.__getattribute__(x.related_name).all()
if result:
for item in result.all():
y['links'].append(RELTXT.format(self.obj.project.id, srt, item.id, item) + " " +
BTNTXT.format(self.obj.project.id, srt, item.id,
"unrel/naar/" + self.soort, self.obj.id,
REMOVE_TEXT))
m2ms_from.append(y)
return button_lijst, m2ms_from
| first = all_objects.count()
second = str(_("waarvan {} {} en {} {} Actiereg").format(solved, hlp[0], working, hlp[1])) | conditional_block |
helpers.py | """Processing for MyProjects Web Application
"""
import datetime
from django.http import Http404
from django.core.exceptions import ObjectDoesNotExist, FieldError # , DoesNotExist
from django.utils.translation import gettext as _
import docs.models as my
from myprojects.settings import MEDIA_ROOT, SITES
RELTXT = '<br/><a href="/docs/{0}/{1}/{2}/">{3}</a>'
BTNTXT = '<a href="/docs/{0}/{1}/{2}/{3}/{4}/"><input type="button" value="{5}" /></a>'
ADD_TEXT, REMOVE_TEXT = _("leg relatie"), _("verwijder relatie")
# let op: obj.model._meta bestaat niet (meer), obj,_meta.model wel
# maar in get_related heb ik model._meta nou juist vervangen door _meta
# ook is daar rel.to._meta.model_name vervangen door related_model
# maar ook is dat vergelijken met _meta.model_name vervangen door vergelijken met_meta.model
# in dit geval vergelijken we met een naam dus moeten we _meta.model_name blijven gebruiken
# in deze vergelijking vervangen we dus alleen rel.to door related_model
def get_related(this_obj, other_obj, m2m=False):
"""geeft het resultaat van een reversed relatie terug
eerst wordt in het gerelateerde model de related_name opgezocht
dan wordt hiermee het betreffende attribuut van het huidige object bepaald
"""
# is het niet raar dat je voor twee concrete objecten ophaalt naar welke van het ene type
# verwezen wordt vanuit het andere type? Of is dat om de vorige/volgende te kunnen bepalen?
# als ik kijk naar het gebruik in GetRelations dan is het tweede argument ook niet een object
# maar een relatie (uit de fields verzameling)
if m2m:
fields = [x for x in other_obj._meta.many_to_many]
else:
fields = [x for x in other_obj._meta.get_fields() if x.name != 'project' and
x.get_internal_type() == 'ForeignKey']
for fld in fields:
if fld.related_model == this_obj._meta.model:
related_name = fld.related_query_name()
break
else:
return None # not found
try:
return this_obj.__getattribute__(related_name).all()
except UnboundLocalError:
return None
# zou je deze ook kunnen vervangen door een aanroep van get_relation en dan met de opgehaalde
# naam de gerelateerde objecten ophalen en meteen de vorige en de volgende bepalen?
# (heeft uiteraard konsekwenties voor de aanroepende code)
# oorspronkelijk lijkt dat ook zo geweest te zijn, de functie heette toen get_relation en het
# gedeelte dat nu nog zo heet was daarin hardgecodeerd
# deze functie wordt alleen aangeroepen in een paar methoden van de hieronder opgenomen klasse
# GetRelations, namelijk om de namen van relaties uit andere objecten naar het huidige te kunnen
# bepalen.
# Als je get_relation zoals die nu is gebruikt zou je dat onderscheid (van versus naar relaties)
# met dezelfde functie kunnen afhandelen
def get_relation(srt, soort):
"""Geeft veldnaam en cardinaliteit terug voor een relatie van srt naar soort
"""
result, multiple = None, None
if srt != soort or soort in ('funcproc', 'techproc'):
for relobj in my.rectypes[srt]._meta.get_fields():
if relobj.related_model and corr_naam(relobj.related_model._meta.model_name) == soort:
result = relobj.name
multiple = False if relobj.get_internal_type() == 'ForeignKey' else True
break
return result, multiple
def set_relation(o, soort, r, srt):
attr_name, multiple = get_relation(soort, srt)
if multiple:
o.__getattribute__(attr_name).add(r)
else:
o.__setattr__(attr_name, r)
o.save()
def remove_relation(o, soort, r, srt):
attr_name, multiple = get_relation(soort, srt)
if multiple:
o.__getattribute__(attr_name).remove(r)
else:
o.__setattr__(attr_name, None)
o.save()
def corr_naam(name):
"""convert name used in program to model name and back
Note: all names must be unique!
"""
names = (("techtaak", 'techtask'), ("programma", 'procproc'))
for name1, name2 in names:
if name == name1:
return name2
if name == name2:
return name1
return name
def get_field_attr(name):
"""leidt veldnaam, type en lengte af uit de definities in models.py
"""
# de variant met een repeating group (entiteit, dataitem) levert hier nog een probleem op.
# is dat omdat er twee entiteiten in 1 scherm staan?
fields = []
opts = my.rectypes[name]._meta
for x in opts.get_fields(): # fields:
fldname = x.name
fldtype = x.get_internal_type()
if fldname == 'id' or fldtype in ('ForeignKey', 'ManyToManyField'):
# if fldname == 'id' or any((x.many2one, x.many2many, x.one2many))
continue
try:
length = x.max_length
except AttributeError:
length = -1
fields.append((fldname, fldtype[:-5], length))
return fields
def get_relation_fields(name):
"""deze functie is van de vorige afgesplitst (afwijkend pad als tweede argument alles = True)
enig gemeenschappelijke is loopen over get_fields
deze werd bovendien nergens gebruikt
"""
fields = []
opts = my.rectypes[name]._meta
for rel in opts.get_fields():
# print(rel, rel.one_to_many or rel.many_to_many)
if rel.one_to_many or rel.many_to_many:
try:
fields.append((rel.name, rel.get_internal_type(), rel.max_length))
except AttributeError:
fields.append((rel.name, rel.get_internal_type(), -1))
return fields
def get_new_numberkey_for_soort(owner_proj, soort):
"""generate new id for certain document types
"""
if soort == 'userwijz':
sel = owner_proj.rfcs
elif soort == 'userprob':
sel = owner_proj.probs
elif soort == 'bevinding':
sel = owner_proj.tbev
else:
return ''
ny = str(datetime.date.today().year)
h = ''
try:
last_id = sel.latest("datum_in").nummer
except ObjectDoesNotExist:
pass
else:
yr, nr = last_id.split('-')
if yr == ny:
h = '-'.join((yr, '%04i' % (int(nr) + 1)))
if h == '':
h = '-'.join((ny, '0001'))
return h
def get_stats_texts(proj, action_type):
"""get certain texts for certain document types (also registered in actiereg)
"""
first = _("(nog) geen")
if action_type == 'userwijz':
all_objects = my.Userwijz.objects.filter(project=proj)
second = _('ingediend')
hlp = _("gerealiseerd"), _('in behandeling via')
elif action_type == 'probleem':
all_objects = my.Userprob.objects.filter(project=proj)
second = _("gemeld")
hlp = _('opgelost'), _('doorgekoppeld naar')
elif action_type == 'bevinding':
all_objects = my.Bevinding.objects.filter(project=proj)
second = _("opgevoerd")
hlp = _('opgelost'), _('doorgekoppeld naar')
else:
return '', ''
solved = all_objects.filter(gereed=True).count()
working = all_objects.filter(gereed=False).filter(actie__isnull=False).count()
if all_objects.count() != 0:
first = all_objects.count()
second = str(_("waarvan {} {} en {} {} Actiereg").format(solved, hlp[0], working, hlp[1]))
return first, second
def get_names_for_type(typename):
"get verbose names from model definition"
return (my.rectypes[typename]._meta.verbose_name,
my.rectypes[typename]._meta.verbose_name_plural,
my.rectypes[typename].section)
def get_projectlist():
"return list of all the projects"
return my.Project.objects.all().order_by('naam')
def get_ordered_objectlist(proj, soort):
"return ordered list of objects of the given type for the given project"
# if soort in my.rectypes: -- overbodige test volgens mij
# return None
# if proj:
lijst = my.rectypes[soort].objects.filter(project=proj)
# else:
# lijst = my.rectypes[soort].objects.select_related()
# ik denk dat het voorgaande nooit gewerkt heeft. Om te beginnen omdat het vanaf het begin af aan
# select.related heeft gestaan en dat heeft noit bestaan, dus ik denk dat je hier nooit komt met een
# leeg project (want dan ga je naar get_projectlist) - dus maar weghalen:w
# if soort in ('userwijz', 'userprob', 'bevinding'):
if 'naam' in [x[0] for x in get_field_attr(soort)]:
return lijst.order_by('naam')
return lijst.order_by('nummer')
def get_object(soort, id, new=False):
"return specified document object"
if soort not in my.rectypes:
raise Http404('Onbekend type `{}`'.format(soort))
if new:
o = my.rectypes[soort]()
else:
try:
o = my.rectypes[soort].objects.get(pk=id)
except ObjectDoesNotExist:
raise Http404(str(id).join((soort + ' ', _(' bestaat niet'))))
return o
def determine_adjacent(all_items, o):
"return keys for previous and next object"
prev = next = 0
nog_een = False
for x in all_items:
if nog_een:
next = x.id
nog_een = False
break
if x == o:
nog_een = True
else:
prev = x.id
return prev, next
def get_list_title_attrs(proj, soort, srt, id, rel):
"return title, name (single and plural) and section for object type"
soortnm_ev, soortnm_mv, sect = get_names_for_type(soort)
if srt:
srtnm_ev, srtnm_mv = get_names_for_type(srt)[:2]
if proj:
pr = my.Project.objects.get(pk=proj)
title = _(' bij project ').join((soortnm_mv.capitalize(), pr.naam))
else:
pr = None
title = _('Lijst ') + str(soortnm_mv)
if rel:
document = my.rectypes[srt].objects.get(pk=id)
if srt in ('userwijz', 'userprob', 'bevinding'):
docid = document.nummer
else:
docid = document.naam
itemoms = '{} "{}"'.format(srtnm_ev, docid)
relstr = str(_('{} relateren aan {}'))
if rel == 'from':
title = relstr.format(itemoms, soortnm_ev)
else:
title = relstr.format(soortnm_ev, itemoms)
if pr: # is dit niet dubbel? Ja zeker
title = "Project {0} - {1}".format(pr.naam, title)
return title, soortnm_ev, soortnm_mv, sect
def init_infodict_for_detail(proj, soort, edit, meld):
return {'start': '', 'soort': soort, 'prev': '', 'notnw': 'new', 'next': '', "sites": SITES,
'proj': '' if proj == 'proj' else proj, 'sect': '', 'meld': meld,
'projecten': get_projectlist(),
# 'edit': 'view' if edit else '',
# 'view': 'edit' if not edit else '',
'mode': 'edit' if edit else 'view',
'new': 'nieuw' if edit == 'new' else ''}
def get_update_url(proj, edit, soort='', id='', srt='', verw=''):
"return url to view that does the actual update"
if edit == 'new': # form action for new document
if soort:
ref = '{}/{}/'.format(srt, verw) if srt else ''
return "/{}/{}/mut/{}".format(proj, soort, ref)
return "/proj/mut/"
elif edit: # form action for existing
if soort:
return "/{}/{}/{}/mut/".format(proj, soort, id)
return "/{}/mut/".format(proj)
return ''
def get_fieldlengths(soort):
"return dictionary of maxlength per field"
return {x: z for x, y, z in get_field_attr(soort)}
def get_margins_for_type(typename):
"geeft voor een aantal soorten afwijkende marges terug"
left_margin = {"project": 140,
"userspec": 230,
"funcdoc": 160,
"gebrtaak": 240,
"funcproc": 160,
"entiteit": 140,
"techtaak": 200,
"techproc": 140,
"testplan": 140,
"bevinding": 140} .get(typename, 120)
leftw = "{0}px".format(left_margin)
rightw = "{0}px".format(910 - left_margin)
rightm = "{0}px".format(left_margin + 5)
return leftw, rightw, rightm
def get_detail_title(soort, edit, obj):
"""geeft titel zonder "DocTool!" terug"""
naam_ev = get_names_for_type(soort)[0]
if edit == 'new':
return _('Nieuw(e) ') + str(naam_ev)
try:
title = " ".join((naam_ev.capitalize(), obj.naam))
except AttributeError:
title = " ".join((naam_ev.capitalize(), obj.nummer))
return title
def | (proj, soort, id, button_lijst):
"build buttons to create related documents"
# in het document krijg ik per soort te relateren document eerst een "leg relatie" knop
# daarna als er relaties zijn de verwijzingen met een knop "verwijder relatie"
# en tenslotte dit setje knoppen, dat van mij ook wel bij de "leg relatie" knoppen mag
buttons = []
for s in button_lijst:
buttons.append(BTNTXT.format(proj, s, "new", soort, id, _("Opvoeren ") +
str(my.rectypes[s]._meta.verbose_name)))
return buttons
def execute_update(soort, obj, postdict, files=None):
if soort in ('userwijz', 'userprob', 'bevinding'):
gereed = obj.gereed
for x, y, z in get_field_attr(soort): # naam,type,lengte
if x == 'datum_gereed':
if postdict['gereed'] == '1' and not gereed:
obj.datum_gereed = datetime.datetime.today()
elif x == "gereed":
obj.gereed = True if postdict[x] == "1" else False
elif x == 'link':
if 'link_file' in files:
uploaded = files['link_file']
pad = [y.upload_to for y in my.rectypes[soort]._meta.fields if y.name == 'link'][0]
save_name = "/".join((pad, uploaded.name))
with open(MEDIA_ROOT + save_name, 'wb+') as destination:
for chunk in uploaded.chunks():
destination.write(chunk)
obj.__dict__[x] = save_name
elif x != 'datum_in':
obj.__dict__[x] = postdict[x]
obj.save()
def execute_update_for_link(soort, obj, postdict, files):
model = models.get_model('myprojects', soort.capitalize())
manipulator = my.rectypes[soort].AddManipulator()
new_data = postdict.copy()
new_data.update({'project': proj})
for x,y,z in getfields(soort): # naam,type,lengte
if x == 'link' and y == 'File':
new_data.update(files)
continue
# return HttpResponse(str(new_data))
errors = manipulator.get_validation_errors(new_data)
manipulator.do_html2python(new_data)
if errors:
return 'errors', HttpResponse('\n'.join((str(errors),str(new_data))))
new_object = manipulator.save(new_data)
return 'ok', my.rectypes[soort].objects.get(pk=new_object.id)
def update_link_from_actiereg(obj, arid, arnum):
obj.actie = int(arid) # int(data.get("id","0"))
obj.actienummer = arnum # data.get("actie","")
obj.save()
def update_status_from_actiereg(obj, arstat):
obj.gereed = {"arch": True, "herl": False}[arstat]
obj.save()
def update_subitem(srt1, obj1, srt2, obj2, new, data):
if new:
obj2.hoort_bij = obj1
obj2.naam = data["naam"]
if (srt1, srt2) == ('entiteit', 'attribuut'):
obj2.type = data["type"]
obj2.bereik = data["bereik"]
obj2.primarykey = data["key"] if data["key"] in ('1', '2', '3', '4', '5') else '0'
elif (srt1, srt2) == ('dataitem', 'element'):
obj2.soort = data["type"]
obj2.omschrijving = data["oms"]
obj2.sleutel = data["sleutel"] if data["sleutel"] in ('1', '2', '3', '4', '5') else '0'
if "rel" in data:
if data["rel"] in [x.naam for x in my.Entiteit.objects.filter(project=obj1.project)]:
# try:
obj2.relatie = my.rectypes[srt1].objects.get(naam=data["rel"])
# except ObjectDoesNotExist:
# pass
obj2.save()
def update_related(soort, obj, related, relobj):
"bijwerken van de eventueel meegegeven relatie"
if related not in my.rectypes:
raise Http404('Onbekend type `{}` voor relatie'.format(related))
data = my.rectypes[related].objects.get(pk=relobj)
set_relation(obj, soort, data, related)
class GetRelations:
"zoek relaties bij gegeven object"
def __init__(self, obj, soort): # , related_soort):
self.obj = obj
self.soort = soort
# self.srt = related_soort
self.opts = my.rectypes[soort]._meta
# dit opnieuw opzetten met één lus over opts.get_fields(show_hidden=True)
# in plaats van de vier lussen die ik nu heb
# maar eerst nog even met 4 functies
def get_foreignkeys_to(self):
fkeys_to = []
for fld in self.opts.fields:
if fld.name == "project":
continue
if fld.get_internal_type() == 'ForeignKey':
srt = corr_naam(fld.related_model._meta.model_name)
result = self.obj.__getattribute__(fld.name)
rel = {'text': ' '.join((str(my.rectypes[self.soort].to_titles[srt]),
str(my.rectypes[srt]._meta.verbose_name))),
'links': []}
if result:
rel['links'].append(RELTXT.format(self.obj.project.id, srt, result.id, result) +
" " +
BTNTXT.format(self.obj.project.id, self.soort, self.obj.id,
"unrel/van/" + srt, result.id, REMOVE_TEXT))
else:
rel['btn'] = BTNTXT.format(self.obj.project.id, srt, "rel", self.soort,
self.obj.id, ADD_TEXT)
fkeys_to.append(rel)
return fkeys_to
def get_many2many_to(self):
m2ms_to = []
for x in self.opts.many_to_many:
srt = corr_naam(x.related_model._meta.model_name)
y = {'text': ' '.join((str(my.rectypes[self.soort].to_titles[srt]),
str(my.rectypes[srt]._meta.verbose_name))),
'btn': BTNTXT.format(self.obj.project.id, srt, "rel", self.soort, self.obj.id,
ADD_TEXT),
'links': []}
result = self.obj.__getattribute__(x.name)
for item in result.all():
y['links'].append(RELTXT.format(self.obj.project.id, srt, item.id, item) + " " +
BTNTXT.format(self.obj.project.id, self.soort, self.obj.id,
"unrel/van/" + srt, item.id, REMOVE_TEXT))
m2ms_to.append(y)
return m2ms_to
def get_foreignkeys_from(self):
button_lijst, fkeys_from, andere, attrs = [], [], [], []
# for relobj in opts.get_all_related_objects():
for relobj in [x for x in self.opts.get_fields()
if (x.one_to_many or x.one_to_one) and x.auto_created and not x.concrete]:
# print(self.obj, relobj, self.soort)
srt = corr_naam(relobj.related_model._meta.model_name)
if (self.soort, srt) == ('entiteit', 'attribuut'):
andere = [x.naam for x in my.Entiteit.objects.filter(project=self.obj.project)
if x != self.obj]
attrs = self.obj.attrs.all()
elif (self.soort, srt) == ('dataitem', 'dataelement'):
andere = [x.naam for x in my.Dataitem.objects.filter(project=self.obj.project)
if x != self.obj]
attrs = self.obj.elems.all()
else:
button_lijst.append(srt)
y = {'text': ' '.join((str(my.rectypes[self.soort].from_titles[srt]),
str(my.rectypes[srt]._meta.verbose_name))),
'btn': BTNTXT.format(self.obj.project.id, self.soort, self.obj.id, "rel", srt,
ADD_TEXT),
'links': []}
#result = get_related(self.obj, relobj)
result = self.obj.__getattribute__(relobj.related_name).all()
if result:
for item in result.all():
y['links'].append(RELTXT.format(self.obj.project.id, srt, item.id, item) +
" " +
BTNTXT.format(self.obj.project.id, srt, item.id,
"unrel/naar/" + self.soort, self.obj.id,
REMOVE_TEXT))
fkeys_from.append(y)
return button_lijst, fkeys_from, andere, attrs
def get_many2many_from(self):
button_lijst, m2ms_from = [], []
# for x in opts.get_all_related_many_to_many_objects():
for x in [y for y in self.opts.get_fields() # include_hidden=True)
if y.many_to_many and y.auto_created]:
srt = corr_naam(x.related_model._meta.model_name)
button_lijst.append(srt)
y = {'text': ' '.join((str(my.rectypes[self.soort].from_titles[srt]),
str(my.rectypes[srt]._meta.verbose_name))),
'btn': BTNTXT.format(self.obj.project.id, self.soort, self.obj.id, "rel", srt,
ADD_TEXT),
'links': []}
# result = get_related(self.obj, x, m2m=True)
result = self.obj.__getattribute__(x.related_name).all()
if result:
for item in result.all():
y['links'].append(RELTXT.format(self.obj.project.id, srt, item.id, item) + " " +
BTNTXT.format(self.obj.project.id, srt, item.id,
"unrel/naar/" + self.soort, self.obj.id,
REMOVE_TEXT))
m2ms_from.append(y)
return button_lijst, m2ms_from
| get_relation_buttons | identifier_name |
js_lua_state.rs | use std::sync::Arc;
use std::{fs, thread};
use crate::js_traits::{FromJs, ToJs};
use crate::lua_execution;
use crate::value::Value;
use mlua::{Lua, StdLib};
use neon::context::Context;
use neon::handle::Handle;
use neon::prelude::*;
use neon::declare_types;
fn lua_version() -> &'static str {
if cfg!(feature = "lua54") {
"lua54"
} else if cfg!(feature = "lua53") {
"lua53"
} else if cfg!(feature = "lua52") {
"lua52"
} else if cfg!(feature = "lua51") {
"lua51"
} else if cfg!(feature = "luajit") {
"luajit"
} else {
panic!("No version specified")
}
}
/// LuaState Class wrapper. Holds on to the lua context reference,
/// as well as the set of active lua libraries, and (eventually) the registered functions
pub struct LuaState {
libraries: StdLib,
lua: Arc<Lua>,
}
impl LuaState {
fn reset(&mut self) -> () {
// By creating a new lua state, we remove all references allowing the js runtime
// to exit if we've attached any event emitters. Without this, the program won't
// close. Is there a more explicit way to close event listeners, or is relying on
// the GC a normal/reasonable approach?
let lua = unsafe { Lua::unsafe_new_with(self.libraries) };
self.lua = Arc::new(lua)
}
}
impl Default for LuaState {
fn default() -> Self |
}
fn flag_into_std_lib(flag: u32) -> Option<StdLib> {
const ALL_SAFE: u32 = u32::MAX - 1;
match flag {
#[cfg(any(feature = "lua54", feature = "lua53", feature = "lua52"))]
0x1 => Some(StdLib::COROUTINE),
0x2 => Some(StdLib::TABLE),
0x4 => Some(StdLib::IO),
0x8 => Some(StdLib::OS),
0x10 => Some(StdLib::STRING),
#[cfg(any(feature = "lua54", feature = "lua53"))]
0x20 => Some(StdLib::UTF8),
#[cfg(any(feature = "lua52", feature = "luajit"))]
0x40 => Some(StdLib::BIT),
0x80 => Some(StdLib::MATH),
0x100 => Some(StdLib::PACKAGE),
#[cfg(any(feature = "luajit"))]
0x200 => Some(StdLib::JIT),
#[cfg(any(feature = "luajit"))]
0x4000_0000 => Some(StdLib::FFI),
0x8000_0000 => Some(StdLib::DEBUG),
u32::MAX => Some(StdLib::ALL),
ALL_SAFE => Some(StdLib::ALL_SAFE),
_ => None,
}
}
/// These correspond to our JS Enum. Used for a clearer error notification when including them in
/// incompatible versions.
fn flag_to_string(flag: u32) -> String {
const ALL_SAFE: u32 = u32::MAX - 1;
match flag {
0x1 => String::from("Coroutine"),
0x2 => String::from("Table"),
0x4 => String::from("Io"),
0x8 => String::from("Os"),
0x10 => String::from("String"),
0x20 => String::from("Utf8"),
0x40 => String::from("Bit"),
0x80 => String::from("Math"),
0x100 => String::from("Package"),
0x200 => String::from("Jit"),
0x4000_0000 => String::from("Ffi"),
0x8000_0000 => String::from("Debug"),
u32::MAX => String::from("All"),
ALL_SAFE => String::from("AllSafe"),
_ => flag.to_string(),
}
}
fn build_libraries_option(
mut cx: CallContext<JsUndefined>,
libs: Handle<JsValue>,
) -> NeonResult<StdLib> {
if libs.is_a::<JsArray>() {
let libflags: Vec<Handle<JsValue>> = libs
.downcast_or_throw::<JsArray, CallContext<JsUndefined>>(&mut cx)?
.to_vec(&mut cx)?;
// Hack to get a StdLib(0)
let mut libset = StdLib::TABLE ^ StdLib::TABLE;
for value in libflags.into_iter() {
let flag = value
.downcast_or_throw::<JsNumber, CallContext<JsUndefined>>(&mut cx)?
.value() as u32;
if let Some(lib) = flag_into_std_lib(flag) {
libset |= lib;
} else {
return cx.throw_error(format!(
"unrecognized Library flag \"{}\" for {}",
flag_to_string(flag),
lua_version()
));
}
}
Ok(libset)
} else if libs.is_a::<JsUndefined>() {
Ok(StdLib::ALL_SAFE)
} else {
cx.throw_error("Expected 'libraries' to be an an array")
}
}
fn init(mut cx: CallContext<JsUndefined>) -> NeonResult<LuaState> {
let opt_options = cx.argument_opt(0);
if let None = opt_options {
return Ok(LuaState::default());
};
let options: Handle<JsObject> = opt_options.unwrap().downcast_or_throw(&mut cx)?;
let libraries_key = cx.string("libraries");
let libs = options.get(&mut cx, libraries_key)?;
let libraries = build_libraries_option(cx, libs)?;
// Because we're allowing the end user to dynamically choose their libraries,
// we're using the unsafe call in case they include `debug`. We need to notify
// the end user in the documentation about the caveats of `debug`.
let lua = unsafe {
let lua = Lua::unsafe_new_with(libraries);
Arc::new(lua)
};
Ok(LuaState { lua, libraries })
}
fn do_string_sync(
mut cx: MethodContext<JsLuaState>,
code: String,
name: Option<String>,
) -> JsResult<JsValue> {
let this = cx.this();
let lua: &Lua = {
let guard = cx.lock();
let state = this.borrow(&guard);
&state.lua.clone()
};
match lua_execution::do_string_sync(lua, code, name) {
Ok(v) => v.to_js(&mut cx),
Err(e) => cx.throw_error(e.to_string()),
}
}
fn do_file_sync(
mut cx: MethodContext<JsLuaState>,
filename: String,
chunk_name: Option<String>,
) -> JsResult<JsValue> {
match fs::read_to_string(filename) {
Ok(contents) => do_string_sync(cx, contents, chunk_name),
Err(e) => cx.throw_error(e.to_string()),
}
}
fn call_chunk<'a>(
mut cx: MethodContext<'a, JsLuaState>,
code: String,
chunk_name: Option<String>,
js_args: Handle<'a, JsArray>,
) -> JsResult<'a, JsValue> {
let this = cx.this();
let mut args: Vec<Value> = vec![];
let js_args = js_args.to_vec(&mut cx)?;
for arg in js_args.iter() {
let value = Value::from_js(*arg, &mut cx)?;
args.push(value);
}
let lua: &Lua = {
let guard = cx.lock();
let state = this.borrow(&guard);
&state.lua.clone()
};
match lua_execution::call_chunk(&lua, code, chunk_name, args) {
Ok(v) => v.to_js(&mut cx),
Err(e) => cx.throw_error(e.to_string()),
}
}
fn register_function<'a>(
mut cx: MethodContext<'a, JsLuaState>,
name: String,
cb: Handle<JsFunction>,
) -> JsResult<'a, JsValue> {
let this = cx.this();
let handler = EventHandler::new(&cx, this, cb);
let lua: &Lua = {
let guard = cx.lock();
let state = this.borrow(&guard);
&state.lua.clone()
};
let callback = move |values: Vec<Value>| {
let handler = handler.clone();
thread::spawn(move || {
handler.schedule_with(move |event_ctx, this, callback| {
let arr = JsArray::new(event_ctx, values.len() as u32);
// TODO remove unwraps, handle errors, and pass to callback if needed.
for (i, value) in values.into_iter().enumerate() {
let js_val = value.to_js(event_ctx).unwrap();
arr.set(event_ctx, i as u32, js_val).unwrap();
}
// TODO How to pass an error via on('error') vs the current setup?
let args: Vec<Handle<JsValue>> = vec![arr.upcast()];
let _result = callback.call(event_ctx, this, args);
});
});
};
match lua_execution::register_function(lua, name, callback) {
Ok(_) => Ok(cx.undefined().upcast()),
Err(e) => cx.throw_error(e.to_string()),
}
}
fn set_global<'a>(
mut cx: MethodContext<'a, JsLuaState>,
name: String,
handle: Handle<'a, JsValue>,
) -> JsResult<'a, JsValue> {
let this: Handle<JsLuaState> = cx.this();
let lua: &Lua = {
let guard = cx.lock();
let state = this.borrow(&guard);
&state.lua.clone()
};
let set_value = Value::from_js(handle, &mut cx)?;
match lua_execution::set_global(lua, name, set_value) {
Ok(v) => v.to_js(&mut cx),
Err(e) => cx.throw_error(e.to_string()),
}
}
fn get_global(mut cx: MethodContext<JsLuaState>, name: String) -> JsResult<JsValue> {
let this: Handle<JsLuaState> = cx.this();
let lua: &Lua = {
let guard = cx.lock();
let state = this.borrow(&guard);
&state.lua.clone()
};
match lua_execution::get_global(lua, name) {
Ok(v) => v.to_js(&mut cx),
Err(e) => cx.throw_error(e.to_string()),
}
}
declare_types! {
pub class JsLuaState for LuaState {
init(cx) {
init(cx)
}
method registerFunction(mut cx) {
let name = cx.argument::<JsString>(0)?.value();
let cb = cx.argument::<JsFunction>(1)?;
register_function(cx, name, cb)
}
method reset(mut cx) {
let mut this = cx.this();
{
let guard = cx.lock();
let mut state = this.borrow_mut(&guard);
state.reset();
}
Ok(cx.undefined().upcast())
}
method close(mut cx) {
let mut this = cx.this();
{
let guard = cx.lock();
let mut state = this.borrow_mut(&guard);
state.reset();
}
Ok(cx.undefined().upcast())
}
method doStringSync(mut cx) {
let code = cx.argument::<JsString>(0)?.value();
let chunk_name = match cx.argument_opt(1) {
Some(arg) => Some(arg.downcast::<JsString>().or_throw(&mut cx)?.value()),
None => None
};
do_string_sync(cx, code, chunk_name)
}
method doFileSync(mut cx) {
let filename = cx.argument::<JsString>(0)?.value();
// TODO chop the filename on error a bit so it's legible.
// currently the `root/stuff/...` is at the end vs `.../stuff/things.lua`
let chunk_name = match cx.argument_opt(1) {
Some(arg) => Some(arg.downcast::<JsString>().or_throw(&mut cx)?.value()),
None => Some(String::from(filename.clone()))
};
do_file_sync(cx, filename, chunk_name)
}
method callChunk(mut cx) {
let code = cx.argument::<JsString>(0)?.value();
let (chunk_name, args) = match cx.len() {
2 => {
let args = cx.argument::<JsArray>(1)?;
Ok((None, args))
},
3 => {
let chunk_name = cx.argument::<JsString>(1)?.value();
let args = cx.argument::<JsArray>(2)?;
Ok((Some(chunk_name), args))
},
_ => {
let e = cx.string(format!("expected 2 or 3 arguments. Found: {}", cx.len()));
cx.throw(e)
}
}?;
call_chunk(cx, code, chunk_name, args)
}
method setGlobal(mut cx) {
let name = cx.argument::<JsString>(0)?.value();
let value = cx.argument::<JsValue>(1)?;
set_global(cx, name, value)
}
method getGlobal(mut cx) {
let name = cx.argument::<JsString>(0)?.value();
get_global(cx, name)
}
}
}
| {
LuaState {
libraries: StdLib::ALL_SAFE,
lua: Arc::new(Lua::new_with(StdLib::ALL_SAFE).unwrap()),
}
} | identifier_body |
js_lua_state.rs | use std::sync::Arc;
use std::{fs, thread};
use crate::js_traits::{FromJs, ToJs};
use crate::lua_execution;
use crate::value::Value;
use mlua::{Lua, StdLib};
use neon::context::Context;
use neon::handle::Handle;
use neon::prelude::*;
use neon::declare_types;
fn lua_version() -> &'static str {
if cfg!(feature = "lua54") {
"lua54"
} else if cfg!(feature = "lua53") {
"lua53"
} else if cfg!(feature = "lua52") {
"lua52"
} else if cfg!(feature = "lua51") {
"lua51"
} else if cfg!(feature = "luajit") {
"luajit"
} else {
panic!("No version specified")
}
}
/// LuaState Class wrapper. Holds on to the lua context reference,
/// as well as the set of active lua libraries, and (eventually) the registered functions
pub struct LuaState {
libraries: StdLib,
lua: Arc<Lua>,
}
impl LuaState {
fn reset(&mut self) -> () {
// By creating a new lua state, we remove all references allowing the js runtime
// to exit if we've attached any event emitters. Without this, the program won't
// close. Is there a more explicit way to close event listeners, or is relying on
// the GC a normal/reasonable approach?
let lua = unsafe { Lua::unsafe_new_with(self.libraries) };
self.lua = Arc::new(lua)
}
}
impl Default for LuaState {
fn default() -> Self {
LuaState {
libraries: StdLib::ALL_SAFE,
lua: Arc::new(Lua::new_with(StdLib::ALL_SAFE).unwrap()),
}
}
}
fn flag_into_std_lib(flag: u32) -> Option<StdLib> {
const ALL_SAFE: u32 = u32::MAX - 1;
match flag {
#[cfg(any(feature = "lua54", feature = "lua53", feature = "lua52"))]
0x1 => Some(StdLib::COROUTINE),
0x2 => Some(StdLib::TABLE),
0x4 => Some(StdLib::IO),
0x8 => Some(StdLib::OS),
0x10 => Some(StdLib::STRING),
#[cfg(any(feature = "lua54", feature = "lua53"))]
0x20 => Some(StdLib::UTF8),
#[cfg(any(feature = "lua52", feature = "luajit"))]
0x40 => Some(StdLib::BIT),
0x80 => Some(StdLib::MATH),
0x100 => Some(StdLib::PACKAGE),
#[cfg(any(feature = "luajit"))]
0x200 => Some(StdLib::JIT),
#[cfg(any(feature = "luajit"))]
0x4000_0000 => Some(StdLib::FFI),
0x8000_0000 => Some(StdLib::DEBUG),
u32::MAX => Some(StdLib::ALL),
ALL_SAFE => Some(StdLib::ALL_SAFE),
_ => None,
}
}
/// These correspond to our JS Enum. Used for a clearer error notification when including them in
/// incompatible versions.
fn flag_to_string(flag: u32) -> String {
const ALL_SAFE: u32 = u32::MAX - 1;
match flag {
0x1 => String::from("Coroutine"),
0x2 => String::from("Table"),
0x4 => String::from("Io"),
0x8 => String::from("Os"),
0x10 => String::from("String"),
0x20 => String::from("Utf8"),
0x40 => String::from("Bit"),
0x80 => String::from("Math"),
0x100 => String::from("Package"),
0x200 => String::from("Jit"),
0x4000_0000 => String::from("Ffi"),
0x8000_0000 => String::from("Debug"),
u32::MAX => String::from("All"),
ALL_SAFE => String::from("AllSafe"),
_ => flag.to_string(),
}
}
fn build_libraries_option(
mut cx: CallContext<JsUndefined>,
libs: Handle<JsValue>,
) -> NeonResult<StdLib> {
if libs.is_a::<JsArray>() {
let libflags: Vec<Handle<JsValue>> = libs
.downcast_or_throw::<JsArray, CallContext<JsUndefined>>(&mut cx)?
.to_vec(&mut cx)?;
// Hack to get a StdLib(0)
let mut libset = StdLib::TABLE ^ StdLib::TABLE;
for value in libflags.into_iter() {
let flag = value
.downcast_or_throw::<JsNumber, CallContext<JsUndefined>>(&mut cx)?
.value() as u32;
if let Some(lib) = flag_into_std_lib(flag) {
libset |= lib;
} else {
return cx.throw_error(format!(
"unrecognized Library flag \"{}\" for {}",
flag_to_string(flag),
lua_version()
));
}
}
Ok(libset)
} else if libs.is_a::<JsUndefined>() {
Ok(StdLib::ALL_SAFE)
} else {
cx.throw_error("Expected 'libraries' to be an an array")
}
}
fn init(mut cx: CallContext<JsUndefined>) -> NeonResult<LuaState> {
let opt_options = cx.argument_opt(0);
if let None = opt_options {
return Ok(LuaState::default());
};
let options: Handle<JsObject> = opt_options.unwrap().downcast_or_throw(&mut cx)?;
let libraries_key = cx.string("libraries"); | let libraries = build_libraries_option(cx, libs)?;
// Because we're allowing the end user to dynamically choose their libraries,
// we're using the unsafe call in case they include `debug`. We need to notify
// the end user in the documentation about the caveats of `debug`.
let lua = unsafe {
let lua = Lua::unsafe_new_with(libraries);
Arc::new(lua)
};
Ok(LuaState { lua, libraries })
}
fn do_string_sync(
mut cx: MethodContext<JsLuaState>,
code: String,
name: Option<String>,
) -> JsResult<JsValue> {
let this = cx.this();
let lua: &Lua = {
let guard = cx.lock();
let state = this.borrow(&guard);
&state.lua.clone()
};
match lua_execution::do_string_sync(lua, code, name) {
Ok(v) => v.to_js(&mut cx),
Err(e) => cx.throw_error(e.to_string()),
}
}
fn do_file_sync(
mut cx: MethodContext<JsLuaState>,
filename: String,
chunk_name: Option<String>,
) -> JsResult<JsValue> {
match fs::read_to_string(filename) {
Ok(contents) => do_string_sync(cx, contents, chunk_name),
Err(e) => cx.throw_error(e.to_string()),
}
}
fn call_chunk<'a>(
mut cx: MethodContext<'a, JsLuaState>,
code: String,
chunk_name: Option<String>,
js_args: Handle<'a, JsArray>,
) -> JsResult<'a, JsValue> {
let this = cx.this();
let mut args: Vec<Value> = vec![];
let js_args = js_args.to_vec(&mut cx)?;
for arg in js_args.iter() {
let value = Value::from_js(*arg, &mut cx)?;
args.push(value);
}
let lua: &Lua = {
let guard = cx.lock();
let state = this.borrow(&guard);
&state.lua.clone()
};
match lua_execution::call_chunk(&lua, code, chunk_name, args) {
Ok(v) => v.to_js(&mut cx),
Err(e) => cx.throw_error(e.to_string()),
}
}
fn register_function<'a>(
mut cx: MethodContext<'a, JsLuaState>,
name: String,
cb: Handle<JsFunction>,
) -> JsResult<'a, JsValue> {
let this = cx.this();
let handler = EventHandler::new(&cx, this, cb);
let lua: &Lua = {
let guard = cx.lock();
let state = this.borrow(&guard);
&state.lua.clone()
};
let callback = move |values: Vec<Value>| {
let handler = handler.clone();
thread::spawn(move || {
handler.schedule_with(move |event_ctx, this, callback| {
let arr = JsArray::new(event_ctx, values.len() as u32);
// TODO remove unwraps, handle errors, and pass to callback if needed.
for (i, value) in values.into_iter().enumerate() {
let js_val = value.to_js(event_ctx).unwrap();
arr.set(event_ctx, i as u32, js_val).unwrap();
}
// TODO How to pass an error via on('error') vs the current setup?
let args: Vec<Handle<JsValue>> = vec![arr.upcast()];
let _result = callback.call(event_ctx, this, args);
});
});
};
match lua_execution::register_function(lua, name, callback) {
Ok(_) => Ok(cx.undefined().upcast()),
Err(e) => cx.throw_error(e.to_string()),
}
}
fn set_global<'a>(
mut cx: MethodContext<'a, JsLuaState>,
name: String,
handle: Handle<'a, JsValue>,
) -> JsResult<'a, JsValue> {
let this: Handle<JsLuaState> = cx.this();
let lua: &Lua = {
let guard = cx.lock();
let state = this.borrow(&guard);
&state.lua.clone()
};
let set_value = Value::from_js(handle, &mut cx)?;
match lua_execution::set_global(lua, name, set_value) {
Ok(v) => v.to_js(&mut cx),
Err(e) => cx.throw_error(e.to_string()),
}
}
fn get_global(mut cx: MethodContext<JsLuaState>, name: String) -> JsResult<JsValue> {
let this: Handle<JsLuaState> = cx.this();
let lua: &Lua = {
let guard = cx.lock();
let state = this.borrow(&guard);
&state.lua.clone()
};
match lua_execution::get_global(lua, name) {
Ok(v) => v.to_js(&mut cx),
Err(e) => cx.throw_error(e.to_string()),
}
}
declare_types! {
pub class JsLuaState for LuaState {
init(cx) {
init(cx)
}
method registerFunction(mut cx) {
let name = cx.argument::<JsString>(0)?.value();
let cb = cx.argument::<JsFunction>(1)?;
register_function(cx, name, cb)
}
method reset(mut cx) {
let mut this = cx.this();
{
let guard = cx.lock();
let mut state = this.borrow_mut(&guard);
state.reset();
}
Ok(cx.undefined().upcast())
}
method close(mut cx) {
let mut this = cx.this();
{
let guard = cx.lock();
let mut state = this.borrow_mut(&guard);
state.reset();
}
Ok(cx.undefined().upcast())
}
method doStringSync(mut cx) {
let code = cx.argument::<JsString>(0)?.value();
let chunk_name = match cx.argument_opt(1) {
Some(arg) => Some(arg.downcast::<JsString>().or_throw(&mut cx)?.value()),
None => None
};
do_string_sync(cx, code, chunk_name)
}
method doFileSync(mut cx) {
let filename = cx.argument::<JsString>(0)?.value();
// TODO chop the filename on error a bit so it's legible.
// currently the `root/stuff/...` is at the end vs `.../stuff/things.lua`
let chunk_name = match cx.argument_opt(1) {
Some(arg) => Some(arg.downcast::<JsString>().or_throw(&mut cx)?.value()),
None => Some(String::from(filename.clone()))
};
do_file_sync(cx, filename, chunk_name)
}
method callChunk(mut cx) {
let code = cx.argument::<JsString>(0)?.value();
let (chunk_name, args) = match cx.len() {
2 => {
let args = cx.argument::<JsArray>(1)?;
Ok((None, args))
},
3 => {
let chunk_name = cx.argument::<JsString>(1)?.value();
let args = cx.argument::<JsArray>(2)?;
Ok((Some(chunk_name), args))
},
_ => {
let e = cx.string(format!("expected 2 or 3 arguments. Found: {}", cx.len()));
cx.throw(e)
}
}?;
call_chunk(cx, code, chunk_name, args)
}
method setGlobal(mut cx) {
let name = cx.argument::<JsString>(0)?.value();
let value = cx.argument::<JsValue>(1)?;
set_global(cx, name, value)
}
method getGlobal(mut cx) {
let name = cx.argument::<JsString>(0)?.value();
get_global(cx, name)
}
}
} | let libs = options.get(&mut cx, libraries_key)?; | random_line_split |
js_lua_state.rs | use std::sync::Arc;
use std::{fs, thread};
use crate::js_traits::{FromJs, ToJs};
use crate::lua_execution;
use crate::value::Value;
use mlua::{Lua, StdLib};
use neon::context::Context;
use neon::handle::Handle;
use neon::prelude::*;
use neon::declare_types;
fn lua_version() -> &'static str {
if cfg!(feature = "lua54") {
"lua54"
} else if cfg!(feature = "lua53") {
"lua53"
} else if cfg!(feature = "lua52") {
"lua52"
} else if cfg!(feature = "lua51") {
"lua51"
} else if cfg!(feature = "luajit") {
"luajit"
} else {
panic!("No version specified")
}
}
/// LuaState Class wrapper. Holds on to the lua context reference,
/// as well as the set of active lua libraries, and (eventually) the registered functions
pub struct LuaState {
libraries: StdLib,
lua: Arc<Lua>,
}
impl LuaState {
fn reset(&mut self) -> () {
// By creating a new lua state, we remove all references allowing the js runtime
// to exit if we've attached any event emitters. Without this, the program won't
// close. Is there a more explicit way to close event listeners, or is relying on
// the GC a normal/reasonable approach?
let lua = unsafe { Lua::unsafe_new_with(self.libraries) };
self.lua = Arc::new(lua)
}
}
impl Default for LuaState {
fn default() -> Self {
LuaState {
libraries: StdLib::ALL_SAFE,
lua: Arc::new(Lua::new_with(StdLib::ALL_SAFE).unwrap()),
}
}
}
fn flag_into_std_lib(flag: u32) -> Option<StdLib> {
const ALL_SAFE: u32 = u32::MAX - 1;
match flag {
#[cfg(any(feature = "lua54", feature = "lua53", feature = "lua52"))]
0x1 => Some(StdLib::COROUTINE),
0x2 => Some(StdLib::TABLE),
0x4 => Some(StdLib::IO),
0x8 => Some(StdLib::OS),
0x10 => Some(StdLib::STRING),
#[cfg(any(feature = "lua54", feature = "lua53"))]
0x20 => Some(StdLib::UTF8),
#[cfg(any(feature = "lua52", feature = "luajit"))]
0x40 => Some(StdLib::BIT),
0x80 => Some(StdLib::MATH),
0x100 => Some(StdLib::PACKAGE),
#[cfg(any(feature = "luajit"))]
0x200 => Some(StdLib::JIT),
#[cfg(any(feature = "luajit"))]
0x4000_0000 => Some(StdLib::FFI),
0x8000_0000 => Some(StdLib::DEBUG),
u32::MAX => Some(StdLib::ALL),
ALL_SAFE => Some(StdLib::ALL_SAFE),
_ => None,
}
}
/// These correspond to our JS Enum. Used for a clearer error notification when including them in
/// incompatible versions.
fn flag_to_string(flag: u32) -> String {
const ALL_SAFE: u32 = u32::MAX - 1;
match flag {
0x1 => String::from("Coroutine"),
0x2 => String::from("Table"),
0x4 => String::from("Io"),
0x8 => String::from("Os"),
0x10 => String::from("String"),
0x20 => String::from("Utf8"),
0x40 => String::from("Bit"),
0x80 => String::from("Math"),
0x100 => String::from("Package"),
0x200 => String::from("Jit"),
0x4000_0000 => String::from("Ffi"),
0x8000_0000 => String::from("Debug"),
u32::MAX => String::from("All"),
ALL_SAFE => String::from("AllSafe"),
_ => flag.to_string(),
}
}
fn build_libraries_option(
mut cx: CallContext<JsUndefined>,
libs: Handle<JsValue>,
) -> NeonResult<StdLib> {
if libs.is_a::<JsArray>() {
let libflags: Vec<Handle<JsValue>> = libs
.downcast_or_throw::<JsArray, CallContext<JsUndefined>>(&mut cx)?
.to_vec(&mut cx)?;
// Hack to get a StdLib(0)
let mut libset = StdLib::TABLE ^ StdLib::TABLE;
for value in libflags.into_iter() {
let flag = value
.downcast_or_throw::<JsNumber, CallContext<JsUndefined>>(&mut cx)?
.value() as u32;
if let Some(lib) = flag_into_std_lib(flag) {
libset |= lib;
} else {
return cx.throw_error(format!(
"unrecognized Library flag \"{}\" for {}",
flag_to_string(flag),
lua_version()
));
}
}
Ok(libset)
} else if libs.is_a::<JsUndefined>() {
Ok(StdLib::ALL_SAFE)
} else {
cx.throw_error("Expected 'libraries' to be an an array")
}
}
fn init(mut cx: CallContext<JsUndefined>) -> NeonResult<LuaState> {
let opt_options = cx.argument_opt(0);
if let None = opt_options {
return Ok(LuaState::default());
};
let options: Handle<JsObject> = opt_options.unwrap().downcast_or_throw(&mut cx)?;
let libraries_key = cx.string("libraries");
let libs = options.get(&mut cx, libraries_key)?;
let libraries = build_libraries_option(cx, libs)?;
// Because we're allowing the end user to dynamically choose their libraries,
// we're using the unsafe call in case they include `debug`. We need to notify
// the end user in the documentation about the caveats of `debug`.
let lua = unsafe {
let lua = Lua::unsafe_new_with(libraries);
Arc::new(lua)
};
Ok(LuaState { lua, libraries })
}
fn | (
mut cx: MethodContext<JsLuaState>,
code: String,
name: Option<String>,
) -> JsResult<JsValue> {
let this = cx.this();
let lua: &Lua = {
let guard = cx.lock();
let state = this.borrow(&guard);
&state.lua.clone()
};
match lua_execution::do_string_sync(lua, code, name) {
Ok(v) => v.to_js(&mut cx),
Err(e) => cx.throw_error(e.to_string()),
}
}
fn do_file_sync(
mut cx: MethodContext<JsLuaState>,
filename: String,
chunk_name: Option<String>,
) -> JsResult<JsValue> {
match fs::read_to_string(filename) {
Ok(contents) => do_string_sync(cx, contents, chunk_name),
Err(e) => cx.throw_error(e.to_string()),
}
}
fn call_chunk<'a>(
mut cx: MethodContext<'a, JsLuaState>,
code: String,
chunk_name: Option<String>,
js_args: Handle<'a, JsArray>,
) -> JsResult<'a, JsValue> {
let this = cx.this();
let mut args: Vec<Value> = vec![];
let js_args = js_args.to_vec(&mut cx)?;
for arg in js_args.iter() {
let value = Value::from_js(*arg, &mut cx)?;
args.push(value);
}
let lua: &Lua = {
let guard = cx.lock();
let state = this.borrow(&guard);
&state.lua.clone()
};
match lua_execution::call_chunk(&lua, code, chunk_name, args) {
Ok(v) => v.to_js(&mut cx),
Err(e) => cx.throw_error(e.to_string()),
}
}
fn register_function<'a>(
mut cx: MethodContext<'a, JsLuaState>,
name: String,
cb: Handle<JsFunction>,
) -> JsResult<'a, JsValue> {
let this = cx.this();
let handler = EventHandler::new(&cx, this, cb);
let lua: &Lua = {
let guard = cx.lock();
let state = this.borrow(&guard);
&state.lua.clone()
};
let callback = move |values: Vec<Value>| {
let handler = handler.clone();
thread::spawn(move || {
handler.schedule_with(move |event_ctx, this, callback| {
let arr = JsArray::new(event_ctx, values.len() as u32);
// TODO remove unwraps, handle errors, and pass to callback if needed.
for (i, value) in values.into_iter().enumerate() {
let js_val = value.to_js(event_ctx).unwrap();
arr.set(event_ctx, i as u32, js_val).unwrap();
}
// TODO How to pass an error via on('error') vs the current setup?
let args: Vec<Handle<JsValue>> = vec![arr.upcast()];
let _result = callback.call(event_ctx, this, args);
});
});
};
match lua_execution::register_function(lua, name, callback) {
Ok(_) => Ok(cx.undefined().upcast()),
Err(e) => cx.throw_error(e.to_string()),
}
}
fn set_global<'a>(
mut cx: MethodContext<'a, JsLuaState>,
name: String,
handle: Handle<'a, JsValue>,
) -> JsResult<'a, JsValue> {
let this: Handle<JsLuaState> = cx.this();
let lua: &Lua = {
let guard = cx.lock();
let state = this.borrow(&guard);
&state.lua.clone()
};
let set_value = Value::from_js(handle, &mut cx)?;
match lua_execution::set_global(lua, name, set_value) {
Ok(v) => v.to_js(&mut cx),
Err(e) => cx.throw_error(e.to_string()),
}
}
fn get_global(mut cx: MethodContext<JsLuaState>, name: String) -> JsResult<JsValue> {
let this: Handle<JsLuaState> = cx.this();
let lua: &Lua = {
let guard = cx.lock();
let state = this.borrow(&guard);
&state.lua.clone()
};
match lua_execution::get_global(lua, name) {
Ok(v) => v.to_js(&mut cx),
Err(e) => cx.throw_error(e.to_string()),
}
}
declare_types! {
pub class JsLuaState for LuaState {
init(cx) {
init(cx)
}
method registerFunction(mut cx) {
let name = cx.argument::<JsString>(0)?.value();
let cb = cx.argument::<JsFunction>(1)?;
register_function(cx, name, cb)
}
method reset(mut cx) {
let mut this = cx.this();
{
let guard = cx.lock();
let mut state = this.borrow_mut(&guard);
state.reset();
}
Ok(cx.undefined().upcast())
}
method close(mut cx) {
let mut this = cx.this();
{
let guard = cx.lock();
let mut state = this.borrow_mut(&guard);
state.reset();
}
Ok(cx.undefined().upcast())
}
method doStringSync(mut cx) {
let code = cx.argument::<JsString>(0)?.value();
let chunk_name = match cx.argument_opt(1) {
Some(arg) => Some(arg.downcast::<JsString>().or_throw(&mut cx)?.value()),
None => None
};
do_string_sync(cx, code, chunk_name)
}
method doFileSync(mut cx) {
let filename = cx.argument::<JsString>(0)?.value();
// TODO chop the filename on error a bit so it's legible.
// currently the `root/stuff/...` is at the end vs `.../stuff/things.lua`
let chunk_name = match cx.argument_opt(1) {
Some(arg) => Some(arg.downcast::<JsString>().or_throw(&mut cx)?.value()),
None => Some(String::from(filename.clone()))
};
do_file_sync(cx, filename, chunk_name)
}
method callChunk(mut cx) {
let code = cx.argument::<JsString>(0)?.value();
let (chunk_name, args) = match cx.len() {
2 => {
let args = cx.argument::<JsArray>(1)?;
Ok((None, args))
},
3 => {
let chunk_name = cx.argument::<JsString>(1)?.value();
let args = cx.argument::<JsArray>(2)?;
Ok((Some(chunk_name), args))
},
_ => {
let e = cx.string(format!("expected 2 or 3 arguments. Found: {}", cx.len()));
cx.throw(e)
}
}?;
call_chunk(cx, code, chunk_name, args)
}
method setGlobal(mut cx) {
let name = cx.argument::<JsString>(0)?.value();
let value = cx.argument::<JsValue>(1)?;
set_global(cx, name, value)
}
method getGlobal(mut cx) {
let name = cx.argument::<JsString>(0)?.value();
get_global(cx, name)
}
}
}
| do_string_sync | identifier_name |
controllerserver.go | package hostpath
import (
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/golang/glog"
"github.com/google/uuid"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"strconv"
)
const (
deviceID = "deviceID"
maxStorageCapacity = tib
)
type accessType int
const (
mountAccess accessType = iota
blockAccess
)
type controllerServer struct {
caps []*csi.ControllerServiceCapability
nodeID string
}
func (cs controllerServer) | (ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
if err := cs.validateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
glog.V(3).Infof("invalid create volume req: %v", req)
return nil, err
}
// Check arguments
if len(req.GetName()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Name missing in request")
}
caps := req.GetVolumeCapabilities()
if caps == nil {
return nil, status.Error(codes.InvalidArgument, "Volume Capabilities missing in request")
}
// Keep a record of the requested access types.
var accessTypeMount, accessTypeBlock bool
for _, ca := range caps {
if ca.GetBlock() != nil {
accessTypeBlock = true
}
if ca.GetMount() != nil {
accessTypeMount = true
}
}
// A real driver would also need to check that the other
// fields in VolumeCapabilities are sane. The check above is
// just enough to pass the "[Testpattern: Dynamic PV (block
// volmode)] volumeMode should fail in binding dynamic
// provisioned PV to PVC" storage E2E test.
if accessTypeBlock && accessTypeMount {
return nil, status.Error(codes.InvalidArgument, "cannot have both block and mount access type")
}
var requestedAccessType accessType
if accessTypeBlock {
requestedAccessType = blockAccess
} else {
// Default to mount.
requestedAccessType = mountAccess
}
// Check for maximum available capacity
capacity := int64(req.GetCapacityRange().GetRequiredBytes())
if capacity >= maxStorageCapacity {
return nil, status.Errorf(codes.OutOfRange, "Requested capacity %d exceeds maximum allowed %d", capacity, maxStorageCapacity)
}
topologies := []*csi.Topology{
&csi.Topology{
Segments: map[string]string{TopologyKeyNode: cs.nodeID},
},
}
// Need to check for already existing volume name, and if found
// check for the requested capacity and already allocated capacity
if exVol, err := getVolumeByName(req.GetName()); err == nil {
// Since err is nil, it means the volume with the same name already exists
// need to check if the size of existing volume is the same as in new
// request
if exVol.VolSize < capacity {
return nil, status.Errorf(codes.AlreadyExists, "Volume with the same name: %s but with different size already exist", req.GetName())
}
if req.GetVolumeContentSource() != nil {
volumeSource := req.VolumeContentSource
switch volumeSource.Type.(type) {
case *csi.VolumeContentSource_Snapshot:
if volumeSource.GetSnapshot() != nil && exVol.ParentSnapID != "" && exVol.ParentSnapID != volumeSource.GetSnapshot().GetSnapshotId() {
return nil, status.Error(codes.AlreadyExists, "existing volume source snapshot id not matching")
}
case *csi.VolumeContentSource_Volume:
if volumeSource.GetVolume() != nil && exVol.ParentVolID != volumeSource.GetVolume().GetVolumeId() {
return nil, status.Error(codes.AlreadyExists, "existing volume source volume id not matching")
}
default:
return nil, status.Errorf(codes.InvalidArgument, "%v not a proper volume source", volumeSource)
}
}
// TODO (sbezverk) Do I need to make sure that volume still exists?
return &csi.CreateVolumeResponse{
Volume: &csi.Volume{
VolumeId: exVol.VolID,
CapacityBytes: int64(exVol.VolSize),
VolumeContext: req.GetParameters(),
ContentSource: req.GetVolumeContentSource(),
AccessibleTopology: topologies,
},
}, nil
}
volumeID := uuid.New().String()
vol, err := createHostpathVolume(volumeID, req.GetName(), capacity, requestedAccessType, false /* ephemeral */)
if err != nil {
return nil, status.Errorf(codes.Internal, "failed to create volume %v: %v", volumeID, err)
}
glog.V(4).Infof("created volume %s at path %s", vol.VolID, vol.VolPath)
if req.GetVolumeContentSource() != nil {
path := getVolumePath(volumeID)
volumeSource := req.VolumeContentSource
switch volumeSource.Type.(type) {
case *csi.VolumeContentSource_Snapshot:
if snapshot := volumeSource.GetSnapshot(); snapshot != nil {
err = loadFromSnapshot(capacity, snapshot.GetSnapshotId(), path, requestedAccessType)
vol.ParentSnapID = snapshot.GetSnapshotId()
}
case *csi.VolumeContentSource_Volume:
if srcVolume := volumeSource.GetVolume(); srcVolume != nil {
err = loadFromVolume(capacity, srcVolume.GetVolumeId(), path, requestedAccessType)
vol.ParentVolID = srcVolume.GetVolumeId()
}
default:
err = status.Errorf(codes.InvalidArgument, "%v not a proper volume source", volumeSource)
}
if err != nil {
glog.V(4).Infof("VolumeSource error: %v", err)
if delErr := deleteHostpathVolume(volumeID); delErr != nil {
glog.V(2).Infof("deleting hostpath volume %v failed: %v", volumeID, delErr)
}
return nil, err
}
glog.V(4).Infof("successfully populated volume %s", vol.VolID)
}
return &csi.CreateVolumeResponse{
Volume: &csi.Volume{
VolumeId: volumeID,
CapacityBytes: req.GetCapacityRange().GetRequiredBytes(),
VolumeContext: req.GetParameters(),
ContentSource: req.GetVolumeContentSource(),
AccessibleTopology: topologies,
},
}, nil
}
func (cs controllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) {
// Check arguments
if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if err := cs.validateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
glog.V(3).Infof("invalid delete volume req: %v", req)
return nil, err
}
volId := req.GetVolumeId()
if err := deleteHostpathVolume(volId); err != nil {
return nil, status.Errorf(codes.Internal, "failed to delete volume %v: %v", volId, err)
}
glog.V(4).Infof("volume %v successfully deleted", volId)
return &csi.DeleteVolumeResponse{}, nil
}
func (cs controllerServer) ControllerPublishVolume(ctx context.Context, request *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) {
panic("implement me")
}
func (cs controllerServer) ControllerUnpublishVolume(ctx context.Context, request *csi.ControllerUnpublishVolumeRequest) (*csi.ControllerUnpublishVolumeResponse, error) {
panic("implement me")
}
func (cs controllerServer) ValidateVolumeCapabilities(ctx context.Context, request *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) {
panic("implement me")
}
func (cs controllerServer) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) {
volumeRes := &csi.ListVolumesResponse{
Entries: []*csi.ListVolumesResponse_Entry{},
}
var (
startIdx, volumesLength, maxLength int64
hpVolume hostPathVolume
)
volumeIds := getSortedVolumeIDs()
if req.StartingToken == "" {
req.StartingToken = "1"
}
startIdx, err := strconv.ParseInt(req.StartingToken, 10, 32)
if err != nil {
return nil, status.Error(codes.Aborted, "The type of startingToken should be integer")
}
volumesLength = int64(len(volumeIds))
maxLength = int64(req.MaxEntries)
if maxLength > volumesLength || maxLength <= 0 {
maxLength = volumesLength
}
for index := startIdx - 1; index < volumesLength && index < maxLength; index++ {
hpVolume = hostPathVolumes[volumeIds[index]]
healthy, msg := doHealthCheckInControllerSide(volumeIds[index])
glog.V(3).Infof("Healthy state: %s Volume: %t", hpVolume.VolName, healthy)
volumeRes.Entries = append(volumeRes.Entries, &csi.ListVolumesResponse_Entry{
Volume: &csi.Volume{
VolumeId: hpVolume.VolID,
CapacityBytes: hpVolume.VolSize,
},
Status: &csi.ListVolumesResponse_VolumeStatus{
PublishedNodeIds: []string{hpVolume.NodeID},
VolumeCondition: &csi.VolumeCondition{
Abnormal: !healthy,
Message: msg,
},
},
})
}
glog.V(5).Infof("Volumes are: %+v", *volumeRes)
return volumeRes, nil
}
func (cs controllerServer) GetCapacity(ctx context.Context, request *csi.GetCapacityRequest) (*csi.GetCapacityResponse, error) {
panic("implement me")
}
func (cs controllerServer) ControllerGetCapabilities(ctx context.Context, request *csi.ControllerGetCapabilitiesRequest) (*csi.ControllerGetCapabilitiesResponse, error) {
panic("implement me")
}
func (cs controllerServer) CreateSnapshot(ctx context.Context, request *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) {
panic("implement me")
}
func (cs controllerServer) DeleteSnapshot(ctx context.Context, request *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) {
panic("implement me")
}
func (cs controllerServer) ListSnapshots(ctx context.Context, request *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) {
panic("implement me")
}
func (cs controllerServer) ControllerExpandVolume(ctx context.Context, request *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) {
panic("implement me")
}
func (cs controllerServer) ControllerGetVolume(ctx context.Context, request *csi.ControllerGetVolumeRequest) (*csi.ControllerGetVolumeResponse, error) {
panic("implement me")
}
func NewControllerServer(ephemeral bool, nodeID string) *controllerServer {
if ephemeral {
return &controllerServer{caps: getControllerServiceCapabilities(nil), nodeID: nodeID}
}
return &controllerServer{
caps: getControllerServiceCapabilities(
[]csi.ControllerServiceCapability_RPC_Type{
csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME,
csi.ControllerServiceCapability_RPC_GET_VOLUME,
csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT,
csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS,
csi.ControllerServiceCapability_RPC_LIST_VOLUMES,
csi.ControllerServiceCapability_RPC_CLONE_VOLUME,
csi.ControllerServiceCapability_RPC_EXPAND_VOLUME,
csi.ControllerServiceCapability_RPC_VOLUME_CONDITION,
}),
nodeID: nodeID,
}
}
func getControllerServiceCapabilities(cl []csi.ControllerServiceCapability_RPC_Type) []*csi.ControllerServiceCapability {
var csc []*csi.ControllerServiceCapability
for _, ca := range cl {
glog.Infof("Enabling controller service capability: %v", ca.String())
csc = append(csc, &csi.ControllerServiceCapability{
Type: &csi.ControllerServiceCapability_Rpc{
Rpc: &csi.ControllerServiceCapability_RPC{
Type: ca,
},
},
})
}
return csc
}
func (cs *controllerServer) validateControllerServiceRequest(c csi.ControllerServiceCapability_RPC_Type) error {
if c == csi.ControllerServiceCapability_RPC_UNKNOWN {
return nil
}
for _, ca := range cs.caps {
if c == ca.GetRpc().GetType() {
return nil
}
}
return status.Errorf(codes.InvalidArgument, "unsupported capability %s", c)
}
| CreateVolume | identifier_name |
controllerserver.go | package hostpath
import (
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/golang/glog"
"github.com/google/uuid"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"strconv"
)
const (
deviceID = "deviceID"
maxStorageCapacity = tib
)
type accessType int
const (
mountAccess accessType = iota
blockAccess
)
type controllerServer struct {
caps []*csi.ControllerServiceCapability
nodeID string
}
func (cs controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
if err := cs.validateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
glog.V(3).Infof("invalid create volume req: %v", req)
return nil, err
}
// Check arguments
if len(req.GetName()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Name missing in request")
}
caps := req.GetVolumeCapabilities()
if caps == nil {
return nil, status.Error(codes.InvalidArgument, "Volume Capabilities missing in request")
}
// Keep a record of the requested access types.
var accessTypeMount, accessTypeBlock bool
for _, ca := range caps {
if ca.GetBlock() != nil {
accessTypeBlock = true
}
if ca.GetMount() != nil {
accessTypeMount = true
}
}
// A real driver would also need to check that the other
// fields in VolumeCapabilities are sane. The check above is
// just enough to pass the "[Testpattern: Dynamic PV (block
// volmode)] volumeMode should fail in binding dynamic
// provisioned PV to PVC" storage E2E test.
if accessTypeBlock && accessTypeMount {
return nil, status.Error(codes.InvalidArgument, "cannot have both block and mount access type")
}
var requestedAccessType accessType
if accessTypeBlock {
requestedAccessType = blockAccess
} else {
// Default to mount.
requestedAccessType = mountAccess
}
// Check for maximum available capacity
capacity := int64(req.GetCapacityRange().GetRequiredBytes())
if capacity >= maxStorageCapacity {
return nil, status.Errorf(codes.OutOfRange, "Requested capacity %d exceeds maximum allowed %d", capacity, maxStorageCapacity)
}
topologies := []*csi.Topology{
&csi.Topology{
Segments: map[string]string{TopologyKeyNode: cs.nodeID},
},
}
// Need to check for already existing volume name, and if found
// check for the requested capacity and already allocated capacity
if exVol, err := getVolumeByName(req.GetName()); err == nil {
// Since err is nil, it means the volume with the same name already exists
// need to check if the size of existing volume is the same as in new
// request
if exVol.VolSize < capacity {
return nil, status.Errorf(codes.AlreadyExists, "Volume with the same name: %s but with different size already exist", req.GetName())
}
if req.GetVolumeContentSource() != nil {
volumeSource := req.VolumeContentSource
switch volumeSource.Type.(type) {
case *csi.VolumeContentSource_Snapshot:
if volumeSource.GetSnapshot() != nil && exVol.ParentSnapID != "" && exVol.ParentSnapID != volumeSource.GetSnapshot().GetSnapshotId() {
return nil, status.Error(codes.AlreadyExists, "existing volume source snapshot id not matching")
}
case *csi.VolumeContentSource_Volume:
if volumeSource.GetVolume() != nil && exVol.ParentVolID != volumeSource.GetVolume().GetVolumeId() {
return nil, status.Error(codes.AlreadyExists, "existing volume source volume id not matching")
}
default:
return nil, status.Errorf(codes.InvalidArgument, "%v not a proper volume source", volumeSource)
}
}
// TODO (sbezverk) Do I need to make sure that volume still exists?
return &csi.CreateVolumeResponse{
Volume: &csi.Volume{
VolumeId: exVol.VolID,
CapacityBytes: int64(exVol.VolSize),
VolumeContext: req.GetParameters(),
ContentSource: req.GetVolumeContentSource(),
AccessibleTopology: topologies,
},
}, nil
}
volumeID := uuid.New().String()
vol, err := createHostpathVolume(volumeID, req.GetName(), capacity, requestedAccessType, false /* ephemeral */)
if err != nil {
return nil, status.Errorf(codes.Internal, "failed to create volume %v: %v", volumeID, err)
}
glog.V(4).Infof("created volume %s at path %s", vol.VolID, vol.VolPath)
if req.GetVolumeContentSource() != nil {
path := getVolumePath(volumeID)
volumeSource := req.VolumeContentSource
switch volumeSource.Type.(type) {
case *csi.VolumeContentSource_Snapshot:
if snapshot := volumeSource.GetSnapshot(); snapshot != nil {
err = loadFromSnapshot(capacity, snapshot.GetSnapshotId(), path, requestedAccessType)
vol.ParentSnapID = snapshot.GetSnapshotId()
}
case *csi.VolumeContentSource_Volume:
if srcVolume := volumeSource.GetVolume(); srcVolume != nil {
err = loadFromVolume(capacity, srcVolume.GetVolumeId(), path, requestedAccessType)
vol.ParentVolID = srcVolume.GetVolumeId()
}
default:
err = status.Errorf(codes.InvalidArgument, "%v not a proper volume source", volumeSource)
}
if err != nil {
glog.V(4).Infof("VolumeSource error: %v", err)
if delErr := deleteHostpathVolume(volumeID); delErr != nil {
glog.V(2).Infof("deleting hostpath volume %v failed: %v", volumeID, delErr)
}
return nil, err
}
glog.V(4).Infof("successfully populated volume %s", vol.VolID)
}
return &csi.CreateVolumeResponse{
Volume: &csi.Volume{
VolumeId: volumeID,
CapacityBytes: req.GetCapacityRange().GetRequiredBytes(),
VolumeContext: req.GetParameters(),
ContentSource: req.GetVolumeContentSource(),
AccessibleTopology: topologies,
},
}, nil
}
func (cs controllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) {
// Check arguments
if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if err := cs.validateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
glog.V(3).Infof("invalid delete volume req: %v", req)
return nil, err
}
volId := req.GetVolumeId()
if err := deleteHostpathVolume(volId); err != nil {
return nil, status.Errorf(codes.Internal, "failed to delete volume %v: %v", volId, err)
}
glog.V(4).Infof("volume %v successfully deleted", volId)
return &csi.DeleteVolumeResponse{}, nil
}
func (cs controllerServer) ControllerPublishVolume(ctx context.Context, request *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) {
panic("implement me")
}
func (cs controllerServer) ControllerUnpublishVolume(ctx context.Context, request *csi.ControllerUnpublishVolumeRequest) (*csi.ControllerUnpublishVolumeResponse, error) {
panic("implement me")
}
func (cs controllerServer) ValidateVolumeCapabilities(ctx context.Context, request *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) {
panic("implement me")
}
func (cs controllerServer) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) {
volumeRes := &csi.ListVolumesResponse{
Entries: []*csi.ListVolumesResponse_Entry{},
}
var (
startIdx, volumesLength, maxLength int64
hpVolume hostPathVolume
)
volumeIds := getSortedVolumeIDs()
if req.StartingToken == "" {
req.StartingToken = "1"
}
startIdx, err := strconv.ParseInt(req.StartingToken, 10, 32)
if err != nil {
return nil, status.Error(codes.Aborted, "The type of startingToken should be integer")
}
volumesLength = int64(len(volumeIds))
maxLength = int64(req.MaxEntries)
if maxLength > volumesLength || maxLength <= 0 {
maxLength = volumesLength
}
for index := startIdx - 1; index < volumesLength && index < maxLength; index++ {
hpVolume = hostPathVolumes[volumeIds[index]]
healthy, msg := doHealthCheckInControllerSide(volumeIds[index])
glog.V(3).Infof("Healthy state: %s Volume: %t", hpVolume.VolName, healthy)
volumeRes.Entries = append(volumeRes.Entries, &csi.ListVolumesResponse_Entry{
Volume: &csi.Volume{
VolumeId: hpVolume.VolID,
CapacityBytes: hpVolume.VolSize,
},
Status: &csi.ListVolumesResponse_VolumeStatus{
PublishedNodeIds: []string{hpVolume.NodeID},
VolumeCondition: &csi.VolumeCondition{
Abnormal: !healthy,
Message: msg,
},
},
})
}
glog.V(5).Infof("Volumes are: %+v", *volumeRes)
return volumeRes, nil
}
func (cs controllerServer) GetCapacity(ctx context.Context, request *csi.GetCapacityRequest) (*csi.GetCapacityResponse, error) {
panic("implement me")
}
func (cs controllerServer) ControllerGetCapabilities(ctx context.Context, request *csi.ControllerGetCapabilitiesRequest) (*csi.ControllerGetCapabilitiesResponse, error) {
panic("implement me")
}
func (cs controllerServer) CreateSnapshot(ctx context.Context, request *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) {
panic("implement me")
}
func (cs controllerServer) DeleteSnapshot(ctx context.Context, request *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) {
panic("implement me")
}
func (cs controllerServer) ListSnapshots(ctx context.Context, request *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) {
panic("implement me")
}
func (cs controllerServer) ControllerExpandVolume(ctx context.Context, request *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) {
panic("implement me")
}
func (cs controllerServer) ControllerGetVolume(ctx context.Context, request *csi.ControllerGetVolumeRequest) (*csi.ControllerGetVolumeResponse, error) {
panic("implement me")
}
func NewControllerServer(ephemeral bool, nodeID string) *controllerServer {
if ephemeral {
return &controllerServer{caps: getControllerServiceCapabilities(nil), nodeID: nodeID}
}
return &controllerServer{
caps: getControllerServiceCapabilities(
[]csi.ControllerServiceCapability_RPC_Type{
csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME,
csi.ControllerServiceCapability_RPC_GET_VOLUME,
csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT,
csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS,
csi.ControllerServiceCapability_RPC_LIST_VOLUMES,
csi.ControllerServiceCapability_RPC_CLONE_VOLUME,
csi.ControllerServiceCapability_RPC_EXPAND_VOLUME, | }),
nodeID: nodeID,
}
}
func getControllerServiceCapabilities(cl []csi.ControllerServiceCapability_RPC_Type) []*csi.ControllerServiceCapability {
var csc []*csi.ControllerServiceCapability
for _, ca := range cl {
glog.Infof("Enabling controller service capability: %v", ca.String())
csc = append(csc, &csi.ControllerServiceCapability{
Type: &csi.ControllerServiceCapability_Rpc{
Rpc: &csi.ControllerServiceCapability_RPC{
Type: ca,
},
},
})
}
return csc
}
func (cs *controllerServer) validateControllerServiceRequest(c csi.ControllerServiceCapability_RPC_Type) error {
if c == csi.ControllerServiceCapability_RPC_UNKNOWN {
return nil
}
for _, ca := range cs.caps {
if c == ca.GetRpc().GetType() {
return nil
}
}
return status.Errorf(codes.InvalidArgument, "unsupported capability %s", c)
} | csi.ControllerServiceCapability_RPC_VOLUME_CONDITION, | random_line_split |
controllerserver.go | package hostpath
import (
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/golang/glog"
"github.com/google/uuid"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"strconv"
)
const (
deviceID = "deviceID"
maxStorageCapacity = tib
)
type accessType int
const (
mountAccess accessType = iota
blockAccess
)
type controllerServer struct {
caps []*csi.ControllerServiceCapability
nodeID string
}
func (cs controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
if err := cs.validateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
glog.V(3).Infof("invalid create volume req: %v", req)
return nil, err
}
// Check arguments
if len(req.GetName()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Name missing in request")
}
caps := req.GetVolumeCapabilities()
if caps == nil {
return nil, status.Error(codes.InvalidArgument, "Volume Capabilities missing in request")
}
// Keep a record of the requested access types.
var accessTypeMount, accessTypeBlock bool
for _, ca := range caps {
if ca.GetBlock() != nil {
accessTypeBlock = true
}
if ca.GetMount() != nil {
accessTypeMount = true
}
}
// A real driver would also need to check that the other
// fields in VolumeCapabilities are sane. The check above is
// just enough to pass the "[Testpattern: Dynamic PV (block
// volmode)] volumeMode should fail in binding dynamic
// provisioned PV to PVC" storage E2E test.
if accessTypeBlock && accessTypeMount {
return nil, status.Error(codes.InvalidArgument, "cannot have both block and mount access type")
}
var requestedAccessType accessType
if accessTypeBlock {
requestedAccessType = blockAccess
} else {
// Default to mount.
requestedAccessType = mountAccess
}
// Check for maximum available capacity
capacity := int64(req.GetCapacityRange().GetRequiredBytes())
if capacity >= maxStorageCapacity {
return nil, status.Errorf(codes.OutOfRange, "Requested capacity %d exceeds maximum allowed %d", capacity, maxStorageCapacity)
}
topologies := []*csi.Topology{
&csi.Topology{
Segments: map[string]string{TopologyKeyNode: cs.nodeID},
},
}
// Need to check for already existing volume name, and if found
// check for the requested capacity and already allocated capacity
if exVol, err := getVolumeByName(req.GetName()); err == nil {
// Since err is nil, it means the volume with the same name already exists
// need to check if the size of existing volume is the same as in new
// request
if exVol.VolSize < capacity {
return nil, status.Errorf(codes.AlreadyExists, "Volume with the same name: %s but with different size already exist", req.GetName())
}
if req.GetVolumeContentSource() != nil {
volumeSource := req.VolumeContentSource
switch volumeSource.Type.(type) {
case *csi.VolumeContentSource_Snapshot:
if volumeSource.GetSnapshot() != nil && exVol.ParentSnapID != "" && exVol.ParentSnapID != volumeSource.GetSnapshot().GetSnapshotId() {
return nil, status.Error(codes.AlreadyExists, "existing volume source snapshot id not matching")
}
case *csi.VolumeContentSource_Volume:
if volumeSource.GetVolume() != nil && exVol.ParentVolID != volumeSource.GetVolume().GetVolumeId() {
return nil, status.Error(codes.AlreadyExists, "existing volume source volume id not matching")
}
default:
return nil, status.Errorf(codes.InvalidArgument, "%v not a proper volume source", volumeSource)
}
}
// TODO (sbezverk) Do I need to make sure that volume still exists?
return &csi.CreateVolumeResponse{
Volume: &csi.Volume{
VolumeId: exVol.VolID,
CapacityBytes: int64(exVol.VolSize),
VolumeContext: req.GetParameters(),
ContentSource: req.GetVolumeContentSource(),
AccessibleTopology: topologies,
},
}, nil
}
volumeID := uuid.New().String()
vol, err := createHostpathVolume(volumeID, req.GetName(), capacity, requestedAccessType, false /* ephemeral */)
if err != nil {
return nil, status.Errorf(codes.Internal, "failed to create volume %v: %v", volumeID, err)
}
glog.V(4).Infof("created volume %s at path %s", vol.VolID, vol.VolPath)
if req.GetVolumeContentSource() != nil {
path := getVolumePath(volumeID)
volumeSource := req.VolumeContentSource
switch volumeSource.Type.(type) {
case *csi.VolumeContentSource_Snapshot:
if snapshot := volumeSource.GetSnapshot(); snapshot != nil {
err = loadFromSnapshot(capacity, snapshot.GetSnapshotId(), path, requestedAccessType)
vol.ParentSnapID = snapshot.GetSnapshotId()
}
case *csi.VolumeContentSource_Volume:
if srcVolume := volumeSource.GetVolume(); srcVolume != nil {
err = loadFromVolume(capacity, srcVolume.GetVolumeId(), path, requestedAccessType)
vol.ParentVolID = srcVolume.GetVolumeId()
}
default:
err = status.Errorf(codes.InvalidArgument, "%v not a proper volume source", volumeSource)
}
if err != nil {
glog.V(4).Infof("VolumeSource error: %v", err)
if delErr := deleteHostpathVolume(volumeID); delErr != nil {
glog.V(2).Infof("deleting hostpath volume %v failed: %v", volumeID, delErr)
}
return nil, err
}
glog.V(4).Infof("successfully populated volume %s", vol.VolID)
}
return &csi.CreateVolumeResponse{
Volume: &csi.Volume{
VolumeId: volumeID,
CapacityBytes: req.GetCapacityRange().GetRequiredBytes(),
VolumeContext: req.GetParameters(),
ContentSource: req.GetVolumeContentSource(),
AccessibleTopology: topologies,
},
}, nil
}
func (cs controllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) {
// Check arguments
if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if err := cs.validateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
glog.V(3).Infof("invalid delete volume req: %v", req)
return nil, err
}
volId := req.GetVolumeId()
if err := deleteHostpathVolume(volId); err != nil {
return nil, status.Errorf(codes.Internal, "failed to delete volume %v: %v", volId, err)
}
glog.V(4).Infof("volume %v successfully deleted", volId)
return &csi.DeleteVolumeResponse{}, nil
}
func (cs controllerServer) ControllerPublishVolume(ctx context.Context, request *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) {
panic("implement me")
}
func (cs controllerServer) ControllerUnpublishVolume(ctx context.Context, request *csi.ControllerUnpublishVolumeRequest) (*csi.ControllerUnpublishVolumeResponse, error) {
panic("implement me")
}
func (cs controllerServer) ValidateVolumeCapabilities(ctx context.Context, request *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) {
panic("implement me")
}
func (cs controllerServer) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) {
volumeRes := &csi.ListVolumesResponse{
Entries: []*csi.ListVolumesResponse_Entry{},
}
var (
startIdx, volumesLength, maxLength int64
hpVolume hostPathVolume
)
volumeIds := getSortedVolumeIDs()
if req.StartingToken == "" {
req.StartingToken = "1"
}
startIdx, err := strconv.ParseInt(req.StartingToken, 10, 32)
if err != nil {
return nil, status.Error(codes.Aborted, "The type of startingToken should be integer")
}
volumesLength = int64(len(volumeIds))
maxLength = int64(req.MaxEntries)
if maxLength > volumesLength || maxLength <= 0 {
maxLength = volumesLength
}
for index := startIdx - 1; index < volumesLength && index < maxLength; index++ {
hpVolume = hostPathVolumes[volumeIds[index]]
healthy, msg := doHealthCheckInControllerSide(volumeIds[index])
glog.V(3).Infof("Healthy state: %s Volume: %t", hpVolume.VolName, healthy)
volumeRes.Entries = append(volumeRes.Entries, &csi.ListVolumesResponse_Entry{
Volume: &csi.Volume{
VolumeId: hpVolume.VolID,
CapacityBytes: hpVolume.VolSize,
},
Status: &csi.ListVolumesResponse_VolumeStatus{
PublishedNodeIds: []string{hpVolume.NodeID},
VolumeCondition: &csi.VolumeCondition{
Abnormal: !healthy,
Message: msg,
},
},
})
}
glog.V(5).Infof("Volumes are: %+v", *volumeRes)
return volumeRes, nil
}
func (cs controllerServer) GetCapacity(ctx context.Context, request *csi.GetCapacityRequest) (*csi.GetCapacityResponse, error) {
panic("implement me")
}
func (cs controllerServer) ControllerGetCapabilities(ctx context.Context, request *csi.ControllerGetCapabilitiesRequest) (*csi.ControllerGetCapabilitiesResponse, error) {
panic("implement me")
}
func (cs controllerServer) CreateSnapshot(ctx context.Context, request *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) {
panic("implement me")
}
func (cs controllerServer) DeleteSnapshot(ctx context.Context, request *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) {
panic("implement me")
}
func (cs controllerServer) ListSnapshots(ctx context.Context, request *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) {
panic("implement me")
}
func (cs controllerServer) ControllerExpandVolume(ctx context.Context, request *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) |
func (cs controllerServer) ControllerGetVolume(ctx context.Context, request *csi.ControllerGetVolumeRequest) (*csi.ControllerGetVolumeResponse, error) {
panic("implement me")
}
func NewControllerServer(ephemeral bool, nodeID string) *controllerServer {
if ephemeral {
return &controllerServer{caps: getControllerServiceCapabilities(nil), nodeID: nodeID}
}
return &controllerServer{
caps: getControllerServiceCapabilities(
[]csi.ControllerServiceCapability_RPC_Type{
csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME,
csi.ControllerServiceCapability_RPC_GET_VOLUME,
csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT,
csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS,
csi.ControllerServiceCapability_RPC_LIST_VOLUMES,
csi.ControllerServiceCapability_RPC_CLONE_VOLUME,
csi.ControllerServiceCapability_RPC_EXPAND_VOLUME,
csi.ControllerServiceCapability_RPC_VOLUME_CONDITION,
}),
nodeID: nodeID,
}
}
func getControllerServiceCapabilities(cl []csi.ControllerServiceCapability_RPC_Type) []*csi.ControllerServiceCapability {
var csc []*csi.ControllerServiceCapability
for _, ca := range cl {
glog.Infof("Enabling controller service capability: %v", ca.String())
csc = append(csc, &csi.ControllerServiceCapability{
Type: &csi.ControllerServiceCapability_Rpc{
Rpc: &csi.ControllerServiceCapability_RPC{
Type: ca,
},
},
})
}
return csc
}
func (cs *controllerServer) validateControllerServiceRequest(c csi.ControllerServiceCapability_RPC_Type) error {
if c == csi.ControllerServiceCapability_RPC_UNKNOWN {
return nil
}
for _, ca := range cs.caps {
if c == ca.GetRpc().GetType() {
return nil
}
}
return status.Errorf(codes.InvalidArgument, "unsupported capability %s", c)
}
| {
panic("implement me")
} | identifier_body |
controllerserver.go | package hostpath
import (
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/golang/glog"
"github.com/google/uuid"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"strconv"
)
const (
deviceID = "deviceID"
maxStorageCapacity = tib
)
type accessType int
const (
mountAccess accessType = iota
blockAccess
)
type controllerServer struct {
caps []*csi.ControllerServiceCapability
nodeID string
}
func (cs controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
if err := cs.validateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
glog.V(3).Infof("invalid create volume req: %v", req)
return nil, err
}
// Check arguments
if len(req.GetName()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Name missing in request")
}
caps := req.GetVolumeCapabilities()
if caps == nil {
return nil, status.Error(codes.InvalidArgument, "Volume Capabilities missing in request")
}
// Keep a record of the requested access types.
var accessTypeMount, accessTypeBlock bool
for _, ca := range caps {
if ca.GetBlock() != nil {
accessTypeBlock = true
}
if ca.GetMount() != nil |
}
// A real driver would also need to check that the other
// fields in VolumeCapabilities are sane. The check above is
// just enough to pass the "[Testpattern: Dynamic PV (block
// volmode)] volumeMode should fail in binding dynamic
// provisioned PV to PVC" storage E2E test.
if accessTypeBlock && accessTypeMount {
return nil, status.Error(codes.InvalidArgument, "cannot have both block and mount access type")
}
var requestedAccessType accessType
if accessTypeBlock {
requestedAccessType = blockAccess
} else {
// Default to mount.
requestedAccessType = mountAccess
}
// Check for maximum available capacity
capacity := int64(req.GetCapacityRange().GetRequiredBytes())
if capacity >= maxStorageCapacity {
return nil, status.Errorf(codes.OutOfRange, "Requested capacity %d exceeds maximum allowed %d", capacity, maxStorageCapacity)
}
topologies := []*csi.Topology{
&csi.Topology{
Segments: map[string]string{TopologyKeyNode: cs.nodeID},
},
}
// Need to check for already existing volume name, and if found
// check for the requested capacity and already allocated capacity
if exVol, err := getVolumeByName(req.GetName()); err == nil {
// Since err is nil, it means the volume with the same name already exists
// need to check if the size of existing volume is the same as in new
// request
if exVol.VolSize < capacity {
return nil, status.Errorf(codes.AlreadyExists, "Volume with the same name: %s but with different size already exist", req.GetName())
}
if req.GetVolumeContentSource() != nil {
volumeSource := req.VolumeContentSource
switch volumeSource.Type.(type) {
case *csi.VolumeContentSource_Snapshot:
if volumeSource.GetSnapshot() != nil && exVol.ParentSnapID != "" && exVol.ParentSnapID != volumeSource.GetSnapshot().GetSnapshotId() {
return nil, status.Error(codes.AlreadyExists, "existing volume source snapshot id not matching")
}
case *csi.VolumeContentSource_Volume:
if volumeSource.GetVolume() != nil && exVol.ParentVolID != volumeSource.GetVolume().GetVolumeId() {
return nil, status.Error(codes.AlreadyExists, "existing volume source volume id not matching")
}
default:
return nil, status.Errorf(codes.InvalidArgument, "%v not a proper volume source", volumeSource)
}
}
// TODO (sbezverk) Do I need to make sure that volume still exists?
return &csi.CreateVolumeResponse{
Volume: &csi.Volume{
VolumeId: exVol.VolID,
CapacityBytes: int64(exVol.VolSize),
VolumeContext: req.GetParameters(),
ContentSource: req.GetVolumeContentSource(),
AccessibleTopology: topologies,
},
}, nil
}
volumeID := uuid.New().String()
vol, err := createHostpathVolume(volumeID, req.GetName(), capacity, requestedAccessType, false /* ephemeral */)
if err != nil {
return nil, status.Errorf(codes.Internal, "failed to create volume %v: %v", volumeID, err)
}
glog.V(4).Infof("created volume %s at path %s", vol.VolID, vol.VolPath)
if req.GetVolumeContentSource() != nil {
path := getVolumePath(volumeID)
volumeSource := req.VolumeContentSource
switch volumeSource.Type.(type) {
case *csi.VolumeContentSource_Snapshot:
if snapshot := volumeSource.GetSnapshot(); snapshot != nil {
err = loadFromSnapshot(capacity, snapshot.GetSnapshotId(), path, requestedAccessType)
vol.ParentSnapID = snapshot.GetSnapshotId()
}
case *csi.VolumeContentSource_Volume:
if srcVolume := volumeSource.GetVolume(); srcVolume != nil {
err = loadFromVolume(capacity, srcVolume.GetVolumeId(), path, requestedAccessType)
vol.ParentVolID = srcVolume.GetVolumeId()
}
default:
err = status.Errorf(codes.InvalidArgument, "%v not a proper volume source", volumeSource)
}
if err != nil {
glog.V(4).Infof("VolumeSource error: %v", err)
if delErr := deleteHostpathVolume(volumeID); delErr != nil {
glog.V(2).Infof("deleting hostpath volume %v failed: %v", volumeID, delErr)
}
return nil, err
}
glog.V(4).Infof("successfully populated volume %s", vol.VolID)
}
return &csi.CreateVolumeResponse{
Volume: &csi.Volume{
VolumeId: volumeID,
CapacityBytes: req.GetCapacityRange().GetRequiredBytes(),
VolumeContext: req.GetParameters(),
ContentSource: req.GetVolumeContentSource(),
AccessibleTopology: topologies,
},
}, nil
}
func (cs controllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) {
// Check arguments
if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if err := cs.validateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
glog.V(3).Infof("invalid delete volume req: %v", req)
return nil, err
}
volId := req.GetVolumeId()
if err := deleteHostpathVolume(volId); err != nil {
return nil, status.Errorf(codes.Internal, "failed to delete volume %v: %v", volId, err)
}
glog.V(4).Infof("volume %v successfully deleted", volId)
return &csi.DeleteVolumeResponse{}, nil
}
func (cs controllerServer) ControllerPublishVolume(ctx context.Context, request *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) {
panic("implement me")
}
func (cs controllerServer) ControllerUnpublishVolume(ctx context.Context, request *csi.ControllerUnpublishVolumeRequest) (*csi.ControllerUnpublishVolumeResponse, error) {
panic("implement me")
}
func (cs controllerServer) ValidateVolumeCapabilities(ctx context.Context, request *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) {
panic("implement me")
}
func (cs controllerServer) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) {
volumeRes := &csi.ListVolumesResponse{
Entries: []*csi.ListVolumesResponse_Entry{},
}
var (
startIdx, volumesLength, maxLength int64
hpVolume hostPathVolume
)
volumeIds := getSortedVolumeIDs()
if req.StartingToken == "" {
req.StartingToken = "1"
}
startIdx, err := strconv.ParseInt(req.StartingToken, 10, 32)
if err != nil {
return nil, status.Error(codes.Aborted, "The type of startingToken should be integer")
}
volumesLength = int64(len(volumeIds))
maxLength = int64(req.MaxEntries)
if maxLength > volumesLength || maxLength <= 0 {
maxLength = volumesLength
}
for index := startIdx - 1; index < volumesLength && index < maxLength; index++ {
hpVolume = hostPathVolumes[volumeIds[index]]
healthy, msg := doHealthCheckInControllerSide(volumeIds[index])
glog.V(3).Infof("Healthy state: %s Volume: %t", hpVolume.VolName, healthy)
volumeRes.Entries = append(volumeRes.Entries, &csi.ListVolumesResponse_Entry{
Volume: &csi.Volume{
VolumeId: hpVolume.VolID,
CapacityBytes: hpVolume.VolSize,
},
Status: &csi.ListVolumesResponse_VolumeStatus{
PublishedNodeIds: []string{hpVolume.NodeID},
VolumeCondition: &csi.VolumeCondition{
Abnormal: !healthy,
Message: msg,
},
},
})
}
glog.V(5).Infof("Volumes are: %+v", *volumeRes)
return volumeRes, nil
}
func (cs controllerServer) GetCapacity(ctx context.Context, request *csi.GetCapacityRequest) (*csi.GetCapacityResponse, error) {
panic("implement me")
}
func (cs controllerServer) ControllerGetCapabilities(ctx context.Context, request *csi.ControllerGetCapabilitiesRequest) (*csi.ControllerGetCapabilitiesResponse, error) {
panic("implement me")
}
func (cs controllerServer) CreateSnapshot(ctx context.Context, request *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) {
panic("implement me")
}
func (cs controllerServer) DeleteSnapshot(ctx context.Context, request *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) {
panic("implement me")
}
func (cs controllerServer) ListSnapshots(ctx context.Context, request *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) {
panic("implement me")
}
func (cs controllerServer) ControllerExpandVolume(ctx context.Context, request *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) {
panic("implement me")
}
func (cs controllerServer) ControllerGetVolume(ctx context.Context, request *csi.ControllerGetVolumeRequest) (*csi.ControllerGetVolumeResponse, error) {
panic("implement me")
}
func NewControllerServer(ephemeral bool, nodeID string) *controllerServer {
if ephemeral {
return &controllerServer{caps: getControllerServiceCapabilities(nil), nodeID: nodeID}
}
return &controllerServer{
caps: getControllerServiceCapabilities(
[]csi.ControllerServiceCapability_RPC_Type{
csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME,
csi.ControllerServiceCapability_RPC_GET_VOLUME,
csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT,
csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS,
csi.ControllerServiceCapability_RPC_LIST_VOLUMES,
csi.ControllerServiceCapability_RPC_CLONE_VOLUME,
csi.ControllerServiceCapability_RPC_EXPAND_VOLUME,
csi.ControllerServiceCapability_RPC_VOLUME_CONDITION,
}),
nodeID: nodeID,
}
}
func getControllerServiceCapabilities(cl []csi.ControllerServiceCapability_RPC_Type) []*csi.ControllerServiceCapability {
var csc []*csi.ControllerServiceCapability
for _, ca := range cl {
glog.Infof("Enabling controller service capability: %v", ca.String())
csc = append(csc, &csi.ControllerServiceCapability{
Type: &csi.ControllerServiceCapability_Rpc{
Rpc: &csi.ControllerServiceCapability_RPC{
Type: ca,
},
},
})
}
return csc
}
func (cs *controllerServer) validateControllerServiceRequest(c csi.ControllerServiceCapability_RPC_Type) error {
if c == csi.ControllerServiceCapability_RPC_UNKNOWN {
return nil
}
for _, ca := range cs.caps {
if c == ca.GetRpc().GetType() {
return nil
}
}
return status.Errorf(codes.InvalidArgument, "unsupported capability %s", c)
}
| {
accessTypeMount = true
} | conditional_block |
AwardEditModal.js | import React from "react"
import { Modal ,Form, Input,message, Select,Upload,Icon,InputNumber } from "antd"
import { connect } from 'dva'
import HzInput from '@/components/HzInput'
import { ACTIVIT_TYPE } from '../../../services/lottery_activity'
const FormItem = Form.Item
const Option = Select.Option
const imgMap = {
1:'https://image.51zan.com/2019/05/09/FkTuoJ2HaE4pkwNzDeB6ZcHrZkRE.png',
3:'https://image.51zan.com/2019/05/09/Fgky804m1N5W0R4oARqIRrtmQJ3O.png',
4:'https://image.51zan.com/2019/05/09/FjYQF6N2SVJFgHmquuu77kGuukBa.png'
}
@Form.create()
export default class extends React.Component {
state = {
fileList :[],
type:4
}
getFile = (fileList) => {
this.setState({
fileList
})
}
onCancel =()=>{
this.onProbabilityChange(this.props.row.probability)
this.props.form.resetFields()
this.props.onCancel && this.props.onCancel()
}
onClick = ()=>{
this.props.form.validateFields((err,values)=>{
if(!err){
values.img_path = this.state.fileList[0] && this.state.fileList[0].url
let row = this.props.row
row = {...this.props.row,...values}
row.probability = Number(row.probability)
this.props.onOk && this.props.onOk(row)
this.props.form.resetFields()
this.props.onCancel && this.props.onCancel()
}
})
}
componentDidUpdate(prevProps){
if(this.props.visible && !prevProps.visible){
let row = { ...this.props.row }
if(!row.type){
row.type = 4
row.name = '谢谢参与'
}
this.setState({
type:row.type
},()=>{
this.props.form.setFieldsValue(row)
})
let {img_path} = this.props.row
if(!img_path) img_path = imgMap[row.type]
if(img_path){
this.setState({
fileList:[{
uid: '-1',
name: img_path,
status: 'done',
url: img_path
}]
})
}else{
this.setState({
fileList:[]
})
}
}
}
validatorByProbability= (rule,value,callback) =>{
if(this.props.probability < 0){
callback(`中奖概率之和不能大于100`)
}else{
callback()
}
}
onProbabilityChange = (value) =>{
let {row,probabilityChange} = this.props
let o = {...row}
o.probability = value
probabilityChange && probabilityChange(o)
}
typeChange=(type)=>{
this.props.form.resetFields()
this.onProbabilityChange(this.props.row.probability)
this.setState({
type
},()=>{
let name = ''
if(type === 4) name= '谢谢参与'
this.props.form.setFieldsValue({name})
let img_path = imgMap[type]
let fileList = []
if(img_path){
fileList = [{
uid: '-1',
name: img_path,
status: 'done',
url: img_path
}]
}
this.setState({fileList})
})
}
prizeChange = (value) => {
const {type} = this.state
const {setFieldsValue} = this.props.form
let name = ''
switch(type){
case 1:
name = `${value}积分`
break
case 3:
name = `${value}元红包`
break
default:
return
}
setFieldsValue({name})
}
render() {
const formItemLayout = {
labelCol: {span: 5},
wrapperCol: {span: 18},
}
const { visible , probability, from} = this.props
const { getFieldDecorator } = this.props.form
const {type} = this.state
let TYPES = ACTIVIT_TYPE.filter(i=>{
if(i.value === 4) return true
return from.prize_type.indexOf(i.value) > -1
})
return <Modal
visible={visible}
title="编辑"
okText="确定"
cancelText="取消"
destroyOnClose
onCancel={this.onCancel}
onOk={this.onClick}
width={480}
>
<Form>
<FormItem label="奖品类型" {...formItemLayout}>
{getFieldDecorator('type', {
rules:[
{required:true,message:'请选择奖品类型'}
],
initialValue:4
})(
<Select
placeholder='请选择奖品类型'
onChange={this.typeChange}
getPopupContainer={triggerNode => triggerNode.parentNode}
>
{
TYPES.map((item) => {
return <Option key={item.value} value={item.value}>{item.label}</Option>
})
}
</Select>
)}
</FormItem>
{type === 1||type === 3 ? <FormItem label="奖品面额" {...formItemLayout}>
{getFieldDecorator('prize_value', {
rules:[
{required:true,message:'请输入奖品面额'}
]
})(
<InputNumber
min={1}
max={type===1?99999:200}
onChange={this.prizeChange}
step={1}
precision={type===1?0:2}
style={{width:'100%'}}
placeholder={`${type===1?'单位:积分':'1.00~200元'}`} /> |
<FormItem label="奖品名称" {...formItemLayout}>
{getFieldDecorator('name', {
rules:[
{required:true,message:'请输入奖品名称'}
]
})(
<HzInput maxLength={type===1?7:6} placeholder='请输入奖品名称' />
)}
</FormItem>
{type !== 4 ? <FormItem label="奖品数量" {...formItemLayout}>
{getFieldDecorator('number', {
rules:[
{required:true,message:'请输入奖品数量'}
]
})(
<InputNumber
min={1}
max={99999}
step={1}
precision={0}
style={{width:'100%'}}
placeholder='大于0正整数' />
)}
</FormItem>:null}
<FormItem label="中奖概率" {...formItemLayout}>
{getFieldDecorator('probability', {
validateTrigger:'onBlur',
rules:[
{required:true,message:'请输入中奖概率'},
{validator:this.validatorByProbability}
]
})(
<InputNumber
min={0}
step={1}
precision={0}
onChange={this.onProbabilityChange}
style={{width:'100%'}}
placeholder='请输入中奖概率' />
)}
<span style={{fontSize:12,color:'#9EA8B1',display:'block',marginTop: '-6px'}}>还剩{probability}%的中奖概率 </span>
</FormItem>
<UploadContainer {...this.props} getFile={this.getFile} fileList={this.state.fileList} />
</Form>
</Modal>
}
}
@connect(({shop_fitment}) => ({ shop_fitment }))
class UploadContainer extends React.Component {
state = {
fileList: [],
showUploadIcon: true,
previewVisible: false,
previewImage: ''
}
componentDidMount(){
this.props.dispatch({
type:'shop_fitment/getToken',
payload: {
type: 'image',
}
})
}
static getDerivedStateFromProps(nextProps, prevState) {
return {
fileList: nextProps.fileList,
showUploadIcon: nextProps.fileList.length === 0 || (nextProps.fileList[0] && nextProps.fileList[0].status !== 'done'),
previewImage:nextProps.fileList[0] && nextProps.fileList[0].url
}
}
handleCancel = () => {
this.setState({
previewVisible: false,
previewImage: ''
})
}
setShowUploadIcon = (status) => {
setTimeout(_ => {
this.setState({
showUploadIcon: status
})
}, 400)
}
handlePreview = (fileList) => {
if (fileList && fileList[0]) {
this.setState({
previewVisible: true,
previewImage: fileList[0].url
})
}
}
beforeUpload = (file, fileList) => {
const isJPG = file.type === 'image/jpeg' || file.type === 'image/png'
if (!isJPG) {
message.error('只能上传jpg、jpeg和png格式的图片!')
}
const isLt2M = file.size / 1024 <= 100
if (!isLt2M) {
message.error('图片大小不能超过100KB!')
}
const maxPic = this.state.fileList.length + fileList.length <= 1
if (!maxPic) {
message.error('最多只能上传1张图片!')
}
return isJPG && isLt2M && maxPic
}
handleChange = (info) => {
const { fileList } = info
const photoPrefix = this.props.shop_fitment.photoPrefix
if (info.file.status === 'uploading') {
this.props.getFile && this.props.getFile(fileList)
}
if (info.file.status === 'done') {
fileList.map((file) => {
if (file.response) {
file.url = `https://${photoPrefix}/${file.response.key}`
file.key = file.response.key
}
return file
})
this.props.getFile && this.props.getFile(fileList)
// this.setState({ fileList }, () => {
// this.setShowUploadIcon(fileList.length === 0)
// })
}
}
handleRemove = (file) => {
const { fileList } = this.state
for (let [i, v] of fileList.entries()) {
if (v.uid === file.uid) {
fileList.splice(i, 1)
this.props.getFile && this.props.getFile([])
// this.setState({ fileList, showUploadIcon: fileList.length === 0 }, () => {
this.props.form.validateFields(['images'], { force: true })
// })
return
}
}
}
validatorByImg = (rule, value, callback) =>{
const {fileList} = this.state
if(fileList.length && fileList[0].url){
callback()
}else{
callback('请上传图片')
}
}
render() {
const fileList = this.state.fileList
const photoToken = this.props.shop_fitment.photoToken
const formItemLayout = {
labelCol: {span: 5},
wrapperCol: {span: 18},
}
const uploadProps = {
name: 'file',
action: '//upload.qiniup.com/',
accept: ".jpg,.jpeg,.png",
headers: {},
data: {
token: photoToken,
},
listType: "picture-card",
multiple: true,
onPreview: () => this.handlePreview(fileList),
beforeUpload: this.beforeUpload,
onChange: this.handleChange,
onRemove: this.handleRemove,
fileList: fileList,
className: "avatar-uploader"
}
const { getFieldDecorator } = this.props.form
return <Form.Item label="奖励图片:" {...formItemLayout}
extra={<div style={{fontSize:12}}>支持.png/.jpeg/.png,建议上传尺寸120x120,大小控制在100KB以内</div>}
>
{getFieldDecorator("img_path", {
rules: [
{ required: true,validator: this.validatorByImg },
]
})(
<Upload {...uploadProps}>
{
this.state.showUploadIcon ? <div>
<Icon type='plus' style={{ fontSize: 32, color: '#9EA8B1' }} />
<div className="ant-upload-text">上传图片</div>
</div> : null
}
</Upload>
)}
<Modal visible={this.state.previewVisible} footer={null} onCancel={this.handleCancel}>
<img alt="" style={{ width: '100%' }} src={this.state.previewImage} />
</Modal>
</Form.Item>
}
} | )}
</FormItem>: null} | random_line_split |
AwardEditModal.js | import React from "react"
import { Modal ,Form, Input,message, Select,Upload,Icon,InputNumber } from "antd"
import { connect } from 'dva'
import HzInput from '@/components/HzInput'
import { ACTIVIT_TYPE } from '../../../services/lottery_activity'
const FormItem = Form.Item
const Option = Select.Option
const imgMap = {
1:'https://image.51zan.com/2019/05/09/FkTuoJ2HaE4pkwNzDeB6ZcHrZkRE.png',
3:'https://image.51zan.com/2019/05/09/Fgky804m1N5W0R4oARqIRrtmQJ3O.png',
4:'https://image.51zan.com/2019/05/09/FjYQF6N2SVJFgHmquuu77kGuukBa.png'
}
@Form.create()
export default class extends React.Component {
state = {
fileList :[],
type:4
}
getFile = (fileList) => {
this.setState({
fileList
})
}
onCancel =()=>{
this.onProbabilityChange(this.props.row.probability)
this.props.form.resetFields()
this.props.onCancel && this.props.onCancel()
}
onClick = ()=>{
this.props.form.validateFields((err,values)=>{
if(!err){
values.img_path = this.state.fileList[0] && this.state.fileList[0].url
let row = this.props.row
row = {...this.props.row,...values}
row.probability = Number(row.probability)
this.props.onOk && this.props.onOk(row)
this.props.form.resetFields()
this.props.onCancel && this.props.onCancel()
}
})
}
componentDidUpdate(prevProps){
if(this.props.visible && !prevProps.visible){
let row = { ...this.props.row }
if(!row.type){
row.type = 4
row.name = '谢谢参与'
}
this.setState({
type:row.type
},()=>{
this.props.form.setFieldsValue(row)
})
let {img_path} = this.props.row
if(!img_path) img_path = imgMap[row.type]
if(img_path){
this.setState({
fileList:[{
uid: '-1',
name: img_path,
status: 'done',
url: img_path
}]
})
}else{
this.setState({
fileList:[]
})
}
}
}
validatorByProbability= (rule,value,callback) =>{
if(this.props.probability < 0){
callback(`中奖概率之和不能大于100`)
}else{
callback()
}
}
onProbabilityChange = (value) =>{
let {row,probabilityChange} = this.props
let o = {...row}
o.probability = value
probabilityChange && probabilityChange(o)
}
typeChange=(type)=>{
this.props.form.resetFields()
this.onProbabilityChange(this.props.row.probability)
this.setState({
type
},()=>{
let name = ''
if(type === 4) name= '谢谢参与'
this.props.form.setFieldsValue({name})
let img_path = imgMap[type]
let fileList = []
if(img_path){
fileList = [{
uid: '-1',
name: img_path,
status: 'done',
url: img_path
}]
}
this.setState({fileList})
})
}
prizeChange = (value) => {
const {type} = this.state
const {setFieldsValue} = this.props.form
let name = ''
switch(type){
case 1:
name = `${value}积分`
break
case 3:
name = `${value}元红包`
break
default:
return
}
setFieldsValue({name})
}
render() {
const formItemLayout = {
labelCol: {span: 5},
wrapperCol: {span: 18},
}
const { visible , probability, from} = this.props
const { getFieldDecorator } = this.props.form
const {type} = this.state
let TYPES = ACTIVIT_TYPE.filter(i=>{
if(i.value === 4) return true
return from.prize_type.indexOf(i.value) > -1
})
return <Modal
visible={visible}
title="编辑"
okText="确定"
cancelText="取消"
destroyOnClose
onCancel={this.onCancel}
onOk={this.onClick}
width={480}
>
<Form>
<FormItem label="奖品类型" {...formItemLayout}>
{getFieldDecorator('type', {
rules:[
{required:true,message:'请选择奖品类型'}
],
initialValue:4
})(
<Select
placeholder='请选择奖品类型'
onChange={this.typeChange}
getPopupContainer={triggerNode => triggerNode.parentNode}
>
{
TYPES.map((item) => {
return <Option key={item.value} value={item.value}>{item.label}</Option>
})
}
</Select>
)}
</FormItem>
{type === 1||type === 3 ? <FormItem label="奖品面额" {...formItemLayout}>
{getFieldDecorator('prize_value', {
rules:[
{required:true,message:'请输入奖品面额'}
]
})(
<InputNumber
min={1}
max={type===1?99999:200}
onChange={this.prizeChange}
step={1}
precision={type===1?0:2}
style={{width:'100%'}}
placeholder={`${type===1?'单位:积分':'1.00~200元'}`} />
)}
</FormItem>: null}
<FormItem label="奖品名称" {...formItemLayout}>
{getFieldDecorator('name', {
rules:[
{required:true,message:'请输入奖品名称'}
]
})(
<HzInput maxLength={type===1?7:6} placeholder='请输入奖品名称' />
)}
</FormItem>
{type !== 4 ? <FormItem label="奖品数量" {...formItemLayout}>
{getFieldDecorator('number', {
rules:[
{required:true,message:'请输入奖品数量'}
]
})(
<InputNumber
min={1}
max={99999}
step={1}
precision={0}
style={{width:'100%'}}
placeholder='大于0正整数' />
)}
</FormItem>:null}
<FormItem label="中奖概率" {...formItemLayout}>
{getFieldDecorator('probability', {
validateTrigger:'onBlur',
rules:[
{required:true,message:'请输入中奖概率'},
{validator:this.validatorByProbability}
]
})(
<InputNumber
min={0}
step={1}
precision={0}
onChange={this.onProbabilityChange}
style={{width:'100%'}}
placeholder='请输入中奖概率' />
)}
<span style={{fontSize:12,color:'#9EA8B1',display:'block',marginTop: '-6px'}}>还剩{probability}%的中奖概率 </span>
</FormItem>
<UploadContainer {...this.props} getFile={this.getFile} fileList={this.state.fileList} />
</Form>
</Modal>
}
}
@connect(({shop_fitment}) => ({ shop_fitment }))
class UploadContainer extends React.Component {
state = {
fileList: [],
showUploadIcon: true,
previewVisible: false,
previewImage: ''
}
componentDidMount(){
this.props.dispatch({
type:'shop_fitment/getToken',
payload: {
type: 'image',
}
})
}
static getDerivedStateFromProps(nextProps, prevState) {
return {
fileList: nextProps.fileList,
showUploadIcon: nextProps.fileList.length === 0 || (nextProps.fileList[0] && nextProps.fileList[0].status !== 'done'),
previewImage:nextProps.fileList[0] && nextProps.fileList[0].url
}
}
handleCancel = () => {
this.setState({
previewVisible: false,
previewImage: ''
})
}
setShowUploadIcon = (status) => {
setTimeout(_ => {
this.setState({
showUploadIcon: status
})
}, 400)
}
handlePreview = (fileList) => {
if (fileList && fileList[0]) {
this.setState({
previewVisible: true,
previewImage: fileList[0].url
})
}
}
beforeUpload = (file, fileList) => {
const isJPG = file.type === 'image/jpeg' || file.type === 'image/png'
if (!isJPG) {
message.error('只能上传jpg、jpeg和png格式的图片!')
}
const isLt2M = file.size / 1024 <= 100
if (!isLt2M) {
message.error('图片大小不能超过100KB!')
}
const maxPic = this.state.fileList.length + fileList.length <= 1
if (!maxPic) {
message.error('最多只能上传1张图片!')
}
return isJPG && isLt2M && maxPic
}
handleChange = (info) => {
const { fileList } = info
const photoPrefix = this.props.shop_fitment.photoPrefix
if (info.file.status === 'uploading') {
this.props.getFile && this.props.getFile(fileList)
}
if (info.file.status === 'done') {
fileList.map((file) => {
if (file.response) {
file.url = `https://${photoPrefix}/${file.response.key}`
file.key = file.response.key
}
return file
})
this.props.getFile && this.props.getFile(fileList)
// this.setState({ fileList }, () => {
// this.setShowUploadIcon(fileList.length === 0)
// })
}
}
handleRemove = (file) => {
const { fileList } = this.state
for (let [i, v] of fileList.entries()) {
if (v.uid === file.uid) {
fileList.splice(i, 1)
this.props.getFile && this.props.getFile([])
// this.setState({ fileList, showUploadIcon: fileList.length === 0 }, () => {
this.props.form.validateFields(['images'], { force: true })
// })
return
}
}
}
validatorByImg = (rule, value, callback) =>{
const {fileList} = this.state
if(fileList.length && fileList[0].url){
callback()
}else{
callback('请上传图片')
}
}
render() {
const fileList = this.state.fileList
const photoToken = this.props.shop_fitment.photoToken
const formItemLayout = {
labelCol: {span: 5},
wrapperCol: {span: 18},
}
const uploadProps = {
name: 'file',
action: '//upload.qi | om/',
accept: ".jpg,.jpeg,.png",
headers: {},
data: {
token: photoToken,
},
listType: "picture-card",
multiple: true,
onPreview: () => this.handlePreview(fileList),
beforeUpload: this.beforeUpload,
onChange: this.handleChange,
onRemove: this.handleRemove,
fileList: fileList,
className: "avatar-uploader"
}
const { getFieldDecorator } = this.props.form
return <Form.Item label="奖励图片:" {...formItemLayout}
extra={<div style={{fontSize:12}}>支持.png/.jpeg/.png,建议上传尺寸120x120,大小控制在100KB以内</div>}
>
{getFieldDecorator("img_path", {
rules: [
{ required: true,validator: this.validatorByImg },
]
})(
<Upload {...uploadProps}>
{
this.state.showUploadIcon ? <div>
<Icon type='plus' style={{ fontSize: 32, color: '#9EA8B1' }} />
<div className="ant-upload-text">上传图片</div>
</div> : null
}
</Upload>
)}
<Modal visible={this.state.previewVisible} footer={null} onCancel={this.handleCancel}>
<img alt="" style={{ width: '100%' }} src={this.state.previewImage} />
</Modal>
</Form.Item>
}
}
| niup.c | identifier_name |
AwardEditModal.js | import React from "react"
import { Modal ,Form, Input,message, Select,Upload,Icon,InputNumber } from "antd"
import { connect } from 'dva'
import HzInput from '@/components/HzInput'
import { ACTIVIT_TYPE } from '../../../services/lottery_activity'
const FormItem = Form.Item
const Option = Select.Option
const imgMap = {
1:'https://image.51zan.com/2019/05/09/FkTuoJ2HaE4pkwNzDeB6ZcHrZkRE.png',
3:'https://image.51zan.com/2019/05/09/Fgky804m1N5W0R4oARqIRrtmQJ3O.png',
4:'https://image.51zan.com/2019/05/09/FjYQF6N2SVJFgHmquuu77kGuukBa.png'
}
@Form.create()
export default class extends React.Component {
state = {
fileList :[],
type:4
}
getFile = (fileList) => {
this.setState({
fileList
})
}
onCancel =()=>{
this.onProbabilityChange(this.props.row.probability)
this.props.form.resetFields()
this.props.onCancel && this.props.onCancel()
}
onClick = ()=>{
this.props.form.validateFields((err,values)=>{
if(!err){
values.img_path = this.state.fileList[0] && this.state.fileList[0].url
let row = this.props.row
row = {...this.props.row,...values}
row.probability = Number(row.probability)
this.props.onOk && this.props.onOk(row)
this.props.form.resetFields()
this.props.onCancel && this.props.onCancel()
}
})
}
componentDidUpdate(prevProps){
if(this.props.visible && !prevProps.visible){
let row = { ...this.props.row }
if(!row.type){
row.type = 4
row.name = '谢谢参与'
}
this.setState({
type:row.type
},()=>{
this.props.form.setFieldsValue(row)
})
let {img_path} = this.props.row
if(!img_path) img_path = imgMap[row.type]
if(img_path){
this.setState({
fileList:[{
uid: '-1',
name: img_path,
status: 'done',
url: img_path
}]
})
}else{
this.setState({
fileList:[]
})
}
}
}
validatorByProbability= (rule,value,callback) =>{
if(this.props.probability < 0){
|
}
}
onProbabilityChange = (value) =>{
let {row,probabilityChange} = this.props
let o = {...row}
o.probability = value
probabilityChange && probabilityChange(o)
}
typeChange=(type)=>{
this.props.form.resetFields()
this.onProbabilityChange(this.props.row.probability)
this.setState({
type
},()=>{
let name = ''
if(type === 4) name= '谢谢参与'
this.props.form.setFieldsValue({name})
let img_path = imgMap[type]
let fileList = []
if(img_path){
fileList = [{
uid: '-1',
name: img_path,
status: 'done',
url: img_path
}]
}
this.setState({fileList})
})
}
prizeChange = (value) => {
const {type} = this.state
const {setFieldsValue} = this.props.form
let name = ''
switch(type){
case 1:
name = `${value}积分`
break
case 3:
name = `${value}元红包`
break
default:
return
}
setFieldsValue({name})
}
render() {
const formItemLayout = {
labelCol: {span: 5},
wrapperCol: {span: 18},
}
const { visible , probability, from} = this.props
const { getFieldDecorator } = this.props.form
const {type} = this.state
let TYPES = ACTIVIT_TYPE.filter(i=>{
if(i.value === 4) return true
return from.prize_type.indexOf(i.value) > -1
})
return <Modal
visible={visible}
title="编辑"
okText="确定"
cancelText="取消"
destroyOnClose
onCancel={this.onCancel}
onOk={this.onClick}
width={480}
>
<Form>
<FormItem label="奖品类型" {...formItemLayout}>
{getFieldDecorator('type', {
rules:[
{required:true,message:'请选择奖品类型'}
],
initialValue:4
})(
<Select
placeholder='请选择奖品类型'
onChange={this.typeChange}
getPopupContainer={triggerNode => triggerNode.parentNode}
>
{
TYPES.map((item) => {
return <Option key={item.value} value={item.value}>{item.label}</Option>
})
}
</Select>
)}
</FormItem>
{type === 1||type === 3 ? <FormItem label="奖品面额" {...formItemLayout}>
{getFieldDecorator('prize_value', {
rules:[
{required:true,message:'请输入奖品面额'}
]
})(
<InputNumber
min={1}
max={type===1?99999:200}
onChange={this.prizeChange}
step={1}
precision={type===1?0:2}
style={{width:'100%'}}
placeholder={`${type===1?'单位:积分':'1.00~200元'}`} />
)}
</FormItem>: null}
<FormItem label="奖品名称" {...formItemLayout}>
{getFieldDecorator('name', {
rules:[
{required:true,message:'请输入奖品名称'}
]
})(
<HzInput maxLength={type===1?7:6} placeholder='请输入奖品名称' />
)}
</FormItem>
{type !== 4 ? <FormItem label="奖品数量" {...formItemLayout}>
{getFieldDecorator('number', {
rules:[
{required:true,message:'请输入奖品数量'}
]
})(
<InputNumber
min={1}
max={99999}
step={1}
precision={0}
style={{width:'100%'}}
placeholder='大于0正整数' />
)}
</FormItem>:null}
<FormItem label="中奖概率" {...formItemLayout}>
{getFieldDecorator('probability', {
validateTrigger:'onBlur',
rules:[
{required:true,message:'请输入中奖概率'},
{validator:this.validatorByProbability}
]
})(
<InputNumber
min={0}
step={1}
precision={0}
onChange={this.onProbabilityChange}
style={{width:'100%'}}
placeholder='请输入中奖概率' />
)}
<span style={{fontSize:12,color:'#9EA8B1',display:'block',marginTop: '-6px'}}>还剩{probability}%的中奖概率 </span>
</FormItem>
<UploadContainer {...this.props} getFile={this.getFile} fileList={this.state.fileList} />
</Form>
</Modal>
}
}
@connect(({shop_fitment}) => ({ shop_fitment }))
class UploadContainer extends React.Component {
state = {
fileList: [],
showUploadIcon: true,
previewVisible: false,
previewImage: ''
}
componentDidMount(){
this.props.dispatch({
type:'shop_fitment/getToken',
payload: {
type: 'image',
}
})
}
static getDerivedStateFromProps(nextProps, prevState) {
return {
fileList: nextProps.fileList,
showUploadIcon: nextProps.fileList.length === 0 || (nextProps.fileList[0] && nextProps.fileList[0].status !== 'done'),
previewImage:nextProps.fileList[0] && nextProps.fileList[0].url
}
}
handleCancel = () => {
this.setState({
previewVisible: false,
previewImage: ''
})
}
setShowUploadIcon = (status) => {
setTimeout(_ => {
this.setState({
showUploadIcon: status
})
}, 400)
}
handlePreview = (fileList) => {
if (fileList && fileList[0]) {
this.setState({
previewVisible: true,
previewImage: fileList[0].url
})
}
}
beforeUpload = (file, fileList) => {
const isJPG = file.type === 'image/jpeg' || file.type === 'image/png'
if (!isJPG) {
message.error('只能上传jpg、jpeg和png格式的图片!')
}
const isLt2M = file.size / 1024 <= 100
if (!isLt2M) {
message.error('图片大小不能超过100KB!')
}
const maxPic = this.state.fileList.length + fileList.length <= 1
if (!maxPic) {
message.error('最多只能上传1张图片!')
}
return isJPG && isLt2M && maxPic
}
handleChange = (info) => {
const { fileList } = info
const photoPrefix = this.props.shop_fitment.photoPrefix
if (info.file.status === 'uploading') {
this.props.getFile && this.props.getFile(fileList)
}
if (info.file.status === 'done') {
fileList.map((file) => {
if (file.response) {
file.url = `https://${photoPrefix}/${file.response.key}`
file.key = file.response.key
}
return file
})
this.props.getFile && this.props.getFile(fileList)
// this.setState({ fileList }, () => {
// this.setShowUploadIcon(fileList.length === 0)
// })
}
}
handleRemove = (file) => {
const { fileList } = this.state
for (let [i, v] of fileList.entries()) {
if (v.uid === file.uid) {
fileList.splice(i, 1)
this.props.getFile && this.props.getFile([])
// this.setState({ fileList, showUploadIcon: fileList.length === 0 }, () => {
this.props.form.validateFields(['images'], { force: true })
// })
return
}
}
}
validatorByImg = (rule, value, callback) =>{
const {fileList} = this.state
if(fileList.length && fileList[0].url){
callback()
}else{
callback('请上传图片')
}
}
render() {
const fileList = this.state.fileList
const photoToken = this.props.shop_fitment.photoToken
const formItemLayout = {
labelCol: {span: 5},
wrapperCol: {span: 18},
}
const uploadProps = {
name: 'file',
action: '//upload.qiniup.com/',
accept: ".jpg,.jpeg,.png",
headers: {},
data: {
token: photoToken,
},
listType: "picture-card",
multiple: true,
onPreview: () => this.handlePreview(fileList),
beforeUpload: this.beforeUpload,
onChange: this.handleChange,
onRemove: this.handleRemove,
fileList: fileList,
className: "avatar-uploader"
}
const { getFieldDecorator } = this.props.form
return <Form.Item label="奖励图片:" {...formItemLayout}
extra={<div style={{fontSize:12}}>支持.png/.jpeg/.png,建议上传尺寸120x120,大小控制在100KB以内</div>}
>
{getFieldDecorator("img_path", {
rules: [
{ required: true,validator: this.validatorByImg },
]
})(
<Upload {...uploadProps}>
{
this.state.showUploadIcon ? <div>
<Icon type='plus' style={{ fontSize: 32, color: '#9EA8B1' }} />
<div className="ant-upload-text">上传图片</div>
</div> : null
}
</Upload>
)}
<Modal visible={this.state.previewVisible} footer={null} onCancel={this.handleCancel}>
<img alt="" style={{ width: '100%' }} src={this.state.previewImage} />
</Modal>
</Form.Item>
}
}
| callback(`中奖概率之和不能大于100`)
}else{
callback() | conditional_block |
PublicFunction.go | package public
import (
"crypto/md5"
"encoding/json"
"fmt"
"io"
"log"
"mime/multipart"
"net"
"net/http"
"os"
"reflect"
"sort"
"strconv"
"github.com/otiai10/copy"
//"strconv"
//"net/http/cookiejar"
"io/ioutil"
//"log"
//"path/filepath"
//"path"
"os/exec"
"path/filepath"
"strings"
"time"
//"github.com/kardianos/osext"
"archive/zip"
"bytes"
"encoding/binary"
//"github.com/tomasen/realip"
//"github.com/satori/go.uuid"
//"github.com/op/go-logging"
)
var IsShowLog = false
func GetRandom() string {
return GetUUIDS()
}
//截取字符串 start 起点下标 length 需要截取的长度
func Substr(str string, start int, length int) string {
rs := []rune(str)
rl := len(rs)
end := 0
if start < 0 {
start = rl - 1 + start
}
end = start + length
if start > end {
start, end = end, start
}
if start < 0 {
start = 0
}
if start > rl {
start = rl
}
if end < 0 {
end = 0
}
if end > rl {
end = rl
}
return string(rs[start:end])
}
func GetMd5(str string) string {
data := []byte(str)
has := md5.Sum(data)
md5str1 := fmt.Sprintf("%x", has) //将[]byte转成16进制
//Log("sign=" + md5str1)
return strings.ToUpper(md5str1)
}
func Unzip(src_zip string) string {
// 解析解压包名
dest := strings.Split(src_zip, ".")[0]
// 打开压缩包
unzip_file, err := zip.OpenReader(src_zip)
if err != nil {
return "压缩包损坏"
}
// 创建解压目录
os.MkdirAll(dest, 0755)
// 循环解压zip文件
for _, f := range unzip_file.File {
rc, err := f.Open()
if err != nil {
return "压缩包中文件损坏"
}
path := filepath.Join(dest, f.Name)
// 判断解压出的是文件还是目录
if f.FileInfo().IsDir() {
os.MkdirAll(path, f.Mode())
} else {
// 创建解压文件
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
if err != nil {
return "创建本地文件失败"
}
// 写入本地
_, err = io.Copy(f, rc)
if err != nil {
if err != io.EOF {
return "写入本地失败"
}
}
f.Close()
}
}
unzip_file.Close()
return "OK"
}
func UnzipToest(src, dest string) error {
r, err := zip.OpenReader(src)
if err != nil {
return err
}
defer func() {
if err := r.Close(); err != nil {
Log(err)
}
}()
os.MkdirAll(dest, 0755)
// Closure to address file descriptors issue with all the deferred .Close() methods
extractAndWriteFile := func(f *zip.File) error {
rc, err := f.Open()
if err != nil {
return err
}
defer func() {
if err := rc.Close(); err != nil {
Log(err)
}
}()
path := filepath.Join(dest, f.Name)
if f.FileInfo().IsDir() {
os.MkdirAll(path, f.Mode())
} else {
os.MkdirAll(filepath.Dir(path), f.Mode())
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
if err != nil {
return err
}
defer func() {
if err := f.Close(); err != nil {
Log(err)
}
}()
_, err = io.Copy(f, rc)
if err != nil {
return err
}
}
return nil
}
for _, f := range r.File {
err := extractAndWriteFile(f)
if err != nil {
return err
}
}
return nil
}
func GetCurDir() string {
dir, _ := GetCurrentPath()
return dir
}
func GetCurrentPath() (dir string, err error) {
//path, err := filepath.Abs(filepath.Dir(os.Args[0]))
path, err := exec.LookPath(os.Args[0])
if err != nil {
Log("exec.LookPath(%s), err: %s\n", os.Args[0], err)
return "", err
}
absPath, err := filepath.Abs(path)
if err != nil {
Log("filepath.Abs(%s), err: %s\n", path, err)
return "", err
}
dir = filepath.Dir(absPath)
return dir, nil
}
func GetCurRunPath() string {
dir, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
return ""
}
return dir
}
func ExistsPath(fullpath string) bool {
//dir, _ := GetCurrentPath() //os.Getwd() //当前的目录
//fullpath := dir + "/" + path
_, err := os.Stat(fullpath)
//Log("fullpath==" + fullpath)
return err == nil || os.IsExist(err)
}
func CreatePath(fullpath string) {
//dir, _ := GetCurrentPath() //os.Getwd() //当前的目录
//fullpath := dir + "/" + newPath
//fullpath = strings.Replace(fullpath, "/", "\\", -1)
//fullpath = strings.Replace(fullpath, " ", "", -1)
//newPath = strings.Replace(newPath, " ", "", -1)
_, errr := os.Stat(fullpath)
if errr != nil && os.IsNotExist(errr) {
//Log(ff, fullpath+"文件不存在 创建") //为什么打印nil 是这样的如果file不存在 返回f文件的指针是nil的 所以我们不能使用defer f.Close()会报错的
/*
var path string
if os.IsPathSeparator('\\') { //前边的判断是否是系统的分隔符
path = "\\"
} else {
path = "/"
}
*/
if err := os.MkdirAll(fullpath, 0777); err != nil {
if os.IsPermission(err) {
Log("你不够权限创建文件")
}
} else {
//Log("创建目录" + fullpath + "成功")
}
//err := os.Mkdir(fullpath, os.ModePerm) //在当前目录下生成md目录
//if err != nil {
// Log(err)
//}
} else {
//Log(ff, fullpath+"文件存在 ")
}
}
func SetCookie(r *http.Request, name string, value string) {
COOKIE_MAX_MAX_AGE := time.Hour * 24 / time.Second // 单位:秒。
maxAge := int(COOKIE_MAX_MAX_AGE)
uid_cookie := &http.Cookie{
Name: name,
Value: value,
Path: "/",
HttpOnly: false,
MaxAge: maxAge}
r.AddCookie(uid_cookie)
}
func GetTotal(price string, num string) string {
fPrice, err1 := strconv.ParseFloat(price, 64)
fnum, err2 := strconv.ParseFloat(num, 64)
if err1 == nil && err2 == nil {
return fmt.Sprintf("%1.2f", fPrice*fnum)
}
return ""
}
func RemovePath(path string) bool {
//Log("upload picture Task is running...")
//curdir := GetCurDir()
//fullPath := curdir + "/" + path + "/"
if ExistsPath(path) {
err := os.RemoveAll(path)
if err != nil {
Log("remove fail " + path)
return false
} else {
//如果删除成功则输出 file remove OK!
return true
}
} else {
return false
}
}
func RemoveFile(path string) bool {
//Log("upload picture Task is running...")
//curdir := GetCurDir()
//fullPath := curdir + "/" + path + "/"
if ExistsPath(path) {
err := os.Remove(path) //删除文件test.txt
if err != nil {
Log("remove fail " + path)
return false
} else {
//如果删除成功则输出 file remove OK!
return true
}
} else {
return false
}
}
func SavePictureTask(res http.ResponseWriter, req *http.Request, path stri | e()会报错的
CreatePath(curdir + "/" + path + "/")
}
var (
status int
err error
)
defer func() {
if nil != err {
http.Error(res, err.Error(), status)
}
}()
// parse request
const _24K = (1 << 20) * 24
if err = req.ParseMultipartForm(_24K); nil != err {
status = http.StatusInternalServerError
return ""
}
for _, fheaders := range req.MultipartForm.File {
for _, hdr := range fheaders {
// open uploaded
var infile multipart.File
if infile, err = hdr.Open(); nil != err {
status = http.StatusInternalServerError
return ""
}
filename := hdr.Filename
if strings.Contains(strings.ToLower(filename), ".mp3") || strings.Contains(strings.ToLower(filename), ".mov") {
//如果是音频文件,直接存到picture文件夹,不存temp文件夹
path = "Picture/" + userid + "/" + typeid
CreatePath(curdir + "/" + path + "/")
}
// open destination
var outfile *os.File
savePath := curdir + "/" + path + "/" + filename
if outfile, err = os.Create(savePath); nil != err {
status = http.StatusInternalServerError
return ""
}
// 32K buffer copy
//var written int64
if _, err = io.Copy(outfile, infile); nil != err {
status = http.StatusInternalServerError
return ""
}
infile.Close()
outfile.Close()
//CreatePath(curdir + "/" + path + "/thumbnial")
//ImageFile_resize(infile, curdir+"/"+path+"/thumbnial/"+hdr.Filename, 200, 200)
fileNames += "," + hdr.Filename
//outfile.Close()
//res.Write([]byte("uploaded file:" + hdr.Filename + ";length:" + strconv.Itoa(int(written))))
}
}
}
fileNames = strings.Replace(fileNames, "#,", "", -1)
fileNames = strings.Replace(fileNames, "#", "", -1)
return fileNames
}
func SaveConfigTask(res http.ResponseWriter, req *http.Request, path string, filename string) string {
//Log("upload picture Task is running...")
curdir := GetCurDir()
var fileNames string = "#"
if req.Method == "GET" {
} else {
ff, errr := os.Open(curdir + "/" + path + "/")
if errr != nil && os.IsNotExist(errr) {
Log(ff, ""+path+"文件不存在,创建") //为什么打印nil 是这样的如果file不存在 返回f文件的指针是nil的 所以我们不能使用defer f.Close()会报错的
CreatePath(curdir + "/" + path + "/")
}
var (
status int
err error
)
defer func() {
if nil != err {
http.Error(res, err.Error(), status)
}
}()
// parse request
const _24K = (1 << 20) * 24
if err = req.ParseMultipartForm(_24K); nil != err {
status = http.StatusInternalServerError
return ""
}
for _, fheaders := range req.MultipartForm.File {
for _, hdr := range fheaders {
// open uploaded
var infile multipart.File
if infile, err = hdr.Open(); nil != err {
status = http.StatusInternalServerError
return ""
}
//filename := hdr.Filename
// open destination
var outfile *os.File
savePath := curdir + "/" + path + "/" + filename
if outfile, err = os.Create(savePath); nil != err {
status = http.StatusInternalServerError
return ""
}
// 32K buffer copy
//var written int64
if _, err = io.Copy(outfile, infile); nil != err {
status = http.StatusInternalServerError
return ""
}
infile.Close()
outfile.Close()
//CreatePath(curdir + "/" + path + "/thumbnial")
//ImageFile_resize(infile, curdir+"/"+path+"/thumbnial/"+hdr.Filename, 200, 200)
fileNames += "," + hdr.Filename
//outfile.Close()
//res.Write([]byte("uploaded file:" + hdr.Filename + ";length:" + strconv.Itoa(int(written))))
}
}
}
fileNames = strings.Replace(fileNames, "#,", "", -1)
fileNames = strings.Replace(fileNames, "#", "", -1)
return fileNames
}
func SaveUploadPictureTask(res http.ResponseWriter, req *http.Request, path string) string {
//Log("upload picture Task is running...")
curdir := GetCurDir()
var fileNames string = "#"
if req.Method == "GET" {
} else {
defer func() {
if err := recover(); err != nil {
Log("SaveUploadPictureTask")
Log(err)
}
}()
ff, errr := os.Open(curdir + "/" + path + "/")
if errr != nil && os.IsNotExist(errr) {
Log(ff, ""+path+"文件不存在,创建") //为什么打印nil 是这样的如果file不存在 返回f文件的指针是nil的 所以我们不能使用defer f.Close()会报错的
CreatePath(curdir + "/" + path + "/")
}
var (
status int
err error
)
defer func() {
if nil != err {
http.Error(res, err.Error(), status)
}
}()
// parse request
const _24K = (1 << 20) * 24
if err = req.ParseMultipartForm(_24K); nil != err {
status = http.StatusInternalServerError
return ""
}
for _, fheaders := range req.MultipartForm.File {
for _, hdr := range fheaders {
// open uploaded
var infile multipart.File
if infile, err = hdr.Open(); nil != err {
status = http.StatusInternalServerError
return ""
}
filename := hdr.Filename
// open destination
var outfile *os.File
savePath := curdir + "/" + path + "/" + filename
//如果文件存在就给一个随机文件名
if ExistsPath(savePath) {
filename = GetRandomFileName(hdr.Filename)
savePath = curdir + "/" + path + "/" + filename
}
if outfile, err = os.Create(savePath); nil != err {
status = http.StatusInternalServerError
return ""
}
// 32K buffer copy
//var written int64
if _, err = io.Copy(outfile, infile); nil != err {
status = http.StatusInternalServerError
return ""
}
infile.Close()
outfile.Close()
//CreatePath(curdir + "/" + path + "/thumbnial")
//ImageFile_resize(infile, curdir+"/"+path+"/thumbnial/"+hdr.Filename, 200, 200)
fileNames += "," + filename
//outfile.Close()
//res.Write([]byte("uploaded file:" + hdr.Filename + ";length:" + strconv.Itoa(int(written))))
}
}
}
fileNames = strings.Replace(fileNames, "#,", "", -1)
fileNames = strings.Replace(fileNames, "#", "", -1)
return fileNames
}
func GetRandomFileName(name string) string {
//name := hdr.Filename
arr := strings.Split(name, ".")
extent := arr[len(arr)-1]
return GetRandom() + "." + extent
}
func CopyFile(src, dst string) error {
in, err := os.Open(src)
if err != nil {
Log(err.Error())
return err
}
defer in.Close()
out, err := os.Create(dst)
if err != nil {
Log(err.Error())
return err
}
defer out.Close()
_, err = io.Copy(out, in)
if err != nil {
Log(err.Error())
return err
}
if ExistsPath(dst) {
//Log("copy success" + dst)
}
return out.Close()
}
//拷贝文件 要拷贝的文件路径 拷贝到哪里 "github.com/otiai10/copy"
func CopyFiles(source, dest string) bool {
if source == "" || dest == "" {
Log("source or dest is null")
return false
}
err := copy.Copy(source, dest)
if err == nil {
return true
} else {
return false
}
}
func NetWorkStatus() bool {
cmd := exec.Command("ping", "baidu.com", "-c", "1", "-W", "5")
fmt.Println("NetWorkStatus Start:", time.Now().Unix())
err := cmd.Run()
fmt.Println("NetWorkStatus End :", time.Now().Unix())
if err != nil {
fmt.Println(err.Error())
return false
} else {
fmt.Println("Net Status , OK")
}
return true
}
func GetMapByJsonStr(jsonstr string) map[string]interface{} {
if !strings.Contains(jsonstr, "{") {
Log("bad json=" + jsonstr)
return nil
}
jsonstr = strings.Replace(jsonstr, "\x00", "", -1)
if len(jsonstr) > 4 {
var d map[string]interface{}
err := json.Unmarshal([]byte(jsonstr), &d)
if err != nil {
log.Printf("error decoding sakura response: %v", err)
if e, ok := err.(*json.SyntaxError); ok {
log.Printf("syntax error at byte offset %d", e.Offset)
}
//log.Printf("sakura response: %q", resBody)
Log("bad json" + jsonstr)
Log(err)
//panic("bad json")
return nil
}
return d
}
return nil
}
func GetMessageMapByJsonKey(jsonstr string, keystr string) map[string]interface{} {
//var jsonstr:='{\"data\": { \"mes\": [ {\"fromuserid\": \"25\", \"touserid\": \"56\",\"message\": \"hhhhhaaaaaa\",\"time\": \"2017-12-12 12:11:11\"}]}}';
index := strings.IndexRune(jsonstr, '{')
jsonstr = jsonstr[index : len(jsonstr)-index]
if len(jsonstr) > 4 && strings.Index(jsonstr, "{") > -1 && strings.Index(jsonstr, "}") > -1 {
mapp := GetMapByJsonStr(jsonstr)
//Log(mapp)
mappp := mapp[keystr]
//Log(mappp)
//kll := mapp.(map[string]interface{})[keystr]
//Log(kll)
mymap := mappp.(map[string]interface{})
//Log(mymap["Fromuserid"])
return mymap
}
return nil
}
func GetMessageMapByJson(jsonstr string) map[string]interface{} {
//var jsonstr:='{\"data\": { \"mes\": [ {\"fromuserid\": \"25\", \"touserid\": \"56\",\"message\": \"hhhhhaaaaaa\",\"time\": \"2017-12-12 12:11:11\"}]}}';
index := strings.IndexRune(jsonstr, '{')
jsonstr = jsonstr[index : len(jsonstr)-index]
if len(jsonstr) > 4 && strings.Index(jsonstr, "{") > -1 && strings.Index(jsonstr, "}") > -1 {
mapp := GetMapByJsonStr(jsonstr)
//Log(mapp)
mappp := mapp["data"]
//Log(mappp)
kll := mappp.(map[string]interface{})["mes"]
//Log(kll)
mymap := kll.(map[string]interface{})
//Log(mymap["fromuserid"])
return mymap
}
return nil
}
func GetJsonStrByMap(MapList map[int]map[string]string) string {
var str string = "##"
sorted_keys := make([]int, 0)
for k, _ := range MapList {
sorted_keys = append(sorted_keys, k)
}
// sort 'string' key in increasing order
sort.Ints(sorted_keys)
for _, k := range sorted_keys {
//fmt.Printf("k=%v, v=%v\n", k, MapList[k])
jsonStr, err := json.Marshal(MapList[k])
if err != nil {
Log(err)
}
//Log("map to json", string(str))
str += "," + string(jsonStr)
}
str = strings.Replace(str, "##,", "", -1)
str = strings.Replace(str, "##", "", -1)
return str
}
func ConverToStr(v interface{}) string {
if v == nil {
return ""
}
var str string = ""
if reflect.TypeOf(v).Kind() == reflect.String {
str = v.(string)
} else if reflect.TypeOf(v).Kind() == reflect.Int {
str = string(v.(int))
} else if reflect.TypeOf(v).Kind() == reflect.Int8 {
str = string(v.(int8))
} else if reflect.TypeOf(v).Kind() == reflect.Int16 {
str = string(v.(int16))
} else if reflect.TypeOf(v).Kind() == reflect.Int32 {
str = string(v.(int32))
} else if reflect.TypeOf(v).Kind() == reflect.Int64 {
str = string(v.(int64))
} else if reflect.TypeOf(v).Kind() == reflect.Float32 {
str = fmt.Sprintf("%f", v)
} else if reflect.TypeOf(v).Kind() == reflect.Float64 {
str = fmt.Sprintf("%f", v)
} else {
str = v.(string)
}
return strings.Replace(str, ".000000", "", -1)
}
func GetCurDateTime() string {
return time.Now().Format("2006-01-02 15:04:05")
}
func GetCurDay() string {
return time.Now().Format("2006-01-02")
}
func GetNameSinceNow(after int) string {
day := time.Now().AddDate(0, 0, after).Format("2006-01-02")
day = strings.Replace(day, "-", "", -1)
return day
}
func GetDaySinceNow(after int) string {
return time.Now().AddDate(0, 0, after).Format("2006-01-02")
}
func ReplaceStr(str string) string {
//过滤一下,防止sql注入
str = strings.Replace(str, "'", "", -1)
//str = strings.Replace(str, "-", "\\-", -1)
str = strings.Replace(str, "exec", "exe.c", -1)
return str //.Replace(str, ",", "", -1).Replace(str, "-", "\-", -1) //-1表示替换所有
}
var logfile *os.File
var oldFileName string
func Log(a ...interface{}) (n int, err error) {
//log.SetFlags(log.LstdFlags | log.Lshortfile)
log.Println(a...)
return 1, nil
}
// GetLocalIP returns the non loopback local IP of the host
func GetLocalIP22() string {
addrs, err := net.InterfaceAddrs()
if err != nil {
return ""
}
for _, address := range addrs {
// check the address type and if it is not a loopback the display it
if ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
if ipnet.IP.To4() != nil {
ipstr := ipnet.IP.String()
index := strings.Index(ipstr, "127.0")
if index > -1 {
continue
}
index = strings.Index(ipstr, "192.168.")
if index > -1 {
return ipstr
break
}
index = strings.Index(ipstr, "169.254.")
if index > -1 {
continue
}
return ipstr
}
}
}
return ""
}
func GetLocalIP() string {
conn, err := net.Dial("udp", "8.8.8.8:80")
if err == nil {
defer conn.Close()
localAddr := conn.LocalAddr().(*net.UDPAddr)
return localAddr.IP.String()
} else {
return GetLocalIPP()
}
}
func GetLocalIPP() string {
//GetIpList()
var ipstr string = ""
//windows 获取IP
host, _ := os.Hostname()
addrss, err := net.LookupIP(host)
if err != nil {
Log("error", err.Error())
//return ""
}
var ipArray []string
for _, addr := range addrss {
if ipv4 := addr.To4(); ipv4 != nil {
Log("ippppp=: ", ipv4)
ipstr = ipv4.String()
if !strings.HasPrefix(ipstr, "127.0") && !strings.HasPrefix(ipstr, "169.254") && !strings.HasPrefix(ipstr, "172.16") {
ipArray = append(ipArray, ipstr)
}
}
}
//提取公网IP
//var pubIpArray []string
for i := 0; i < len(ipArray); i++ {
//Log("pubip===" + ipArray[i])
if !strings.HasPrefix(ipArray[i], "10.") && !strings.HasPrefix(ipArray[i], "192.168") && !strings.HasPrefix(ipArray[i], "172.") {
return ipArray[i]
//pubIpArray = append(pubIpArray, ipstr)
}
}
//如果没有公网IP 就返回一个本地IP
if len(ipArray) > 0 {
return ipArray[0]
}
//linux 获取IP
if ipstr == "" {
ifaces, errr := net.Interfaces()
// handle err
if errr != nil {
Log("error", errr.Error())
return ""
}
for _, i := range ifaces {
addrs, _ := i.Addrs()
// handle err
for _, addr := range addrs {
var ip net.IP
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
case *net.IPAddr:
ip = v.IP
}
// process IP address
//Log("ip=", ip)
ipstr = fmt.Sprintf("%s", ip)
Log("ipstr=", ipstr)
index := strings.Index(ipstr, "127.0")
if index > -1 {
continue
}
index = strings.Index(ipstr, "192.168.")
if index > -1 {
return ipstr
break
}
index = strings.Index(ipstr, "169.254.")
if index > -1 {
continue
}
if len(ipstr) > 6 {
array := strings.Split(ipstr, ".")
if len(array) == 4 {
return ipstr
}
}
}
}
}
return ""
}
func HttpPost(url string, paras string) string {
//Log("url=" + url + " paras=" + paras)
client := &http.Client{}
req, err := http.NewRequest("POST",
url,
strings.NewReader(paras))
if err != nil {
// handle error
return ""
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
//req.Header.Set("Cookie", "name=anny")
resp, err := client.Do(req)
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
// handle error
return ""
}
//Log(string(body))
return string(body)
}
func HttpGet(url string) string {
//Log("get =" + url)
resp, err := http.Get(url)
if err != nil {
// handle error
Log(err.Error())
return ""
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
// handle error
Log(err.Error())
return ""
}
//Log("response =" + string(body))
return string(body)
}
func HttpDownloadFile(url string, toPath string) {
//Log("get =" + url)
res, err := http.Get(url)
if err != nil {
Log(err)
return
}
f, err := os.Create(toPath)
defer f.Close()
if err != nil {
Log(err)
return
}
io.Copy(f, res.Body)
//Log("size =" + size)
}
//整形转换成字节
func IntToBytes(n int) []byte {
tmp := int32(n)
bytesBuffer := bytes.NewBuffer([]byte{})
binary.Write(bytesBuffer, binary.BigEndian, tmp)
return bytesBuffer.Bytes()
}
//字节转换成整形
func BytesToInt(b []byte) int {
bytesBuffer := bytes.NewBuffer(b)
var tmp int32
binary.Read(bytesBuffer, binary.BigEndian, &tmp)
return int(tmp)
}
func RealIPHand(h http.Handler) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) {
if rip := RealIP(r); rip != "" {
r.RemoteAddr = rip
}
h.ServeHTTP(w, r)
}
return http.HandlerFunc(fn)
}
var xForwardedFor = http.CanonicalHeaderKey("X-Forwarded-For")
var xForwardedFor2 = http.CanonicalHeaderKey("x-forwarded-for")
var xRealIP = http.CanonicalHeaderKey("X-Real-IP")
var xRealIP2 = http.CanonicalHeaderKey("x-real-ip")
var xRealIP3 = http.CanonicalHeaderKey("x-real-client-ip")
var ProxyClientIP = http.CanonicalHeaderKey("Proxy-Client-IP")
var WLProxyClientIP = http.CanonicalHeaderKey("WL-Proxy-Client-IP")
var HTTPXFORWARDEDFOR = http.CanonicalHeaderKey("HTTP_X_FORWARDED_FOR")
func RealIP(r *http.Request) string {
PrintHead(r)
var ip string
//clientIP := realip.FromRequest(r)
//log.Println("GET / from", clientIP)
if xff := r.Header.Get(xForwardedFor); xff != "" {
//Log(xff)
i := strings.Index(xff, ", ")
if i == -1 {
i = len(xff)
}
ip = xff[:i]
} else if xff := r.Header.Get(xForwardedFor2); xff != "" {
//Log(xff)
i := strings.Index(xff, ", ")
if i == -1 {
i = len(xff)
}
ip = xff[:i]
} else if xrip := r.Header.Get(xRealIP); xrip != "" {
ip = xrip
} else if xrip := r.Header.Get(xRealIP2); xrip != "" {
ip = xrip
} else if xrip := r.Header.Get(xRealIP3); xrip != "" {
ip = xrip
} else if xrip := r.Header.Get(ProxyClientIP); xrip != "" {
ip = xrip
} else if xrip := r.Header.Get(WLProxyClientIP); xrip != "" {
ip = xrip
} else {
ip = r.RemoteAddr
}
return ip
//return realip.FromRequest(r)
}
func PrintHead(r *http.Request) {
realip := r.Header.Get(xForwardedFor)
if len(realip) == 0 {
realip = r.Header.Get("http_client_ip")
}
if len(realip) == 0 {
//Log(xRealIP)
realip = r.Header.Get(xRealIP)
}
if len(realip) == 0 {
//Log(ProxyClientIP)
realip = r.Header.Get(ProxyClientIP)
}
if len(realip) == 0 {
//Log(WLProxyClientIP)
realip = r.Header.Get(WLProxyClientIP)
}
if len(realip) == 0 {
//Log(HTTPXFORWARDEDFOR)
realip = r.Header.Get(HTTPXFORWARDEDFOR)
}
if len(realip) == 0 {
realip = r.RemoteAddr
}
//Log("ip=" + r.RemoteAddr)
//Log("realip=" + realip)
}
| ng, userid string, typeid string) string {
//Log("upload picture Task is running...")
curdir := GetCurDir()
var fileNames string = "#"
if req.Method == "GET" {
} else {
ff, errr := os.Open(curdir + "/" + path + "/")
if errr != nil && os.IsNotExist(errr) {
Log(ff, ""+path+"文件不存在,创建") //为什么打印nil 是这样的如果file不存在 返回f文件的指针是nil的 所以我们不能使用defer f.Clos | identifier_body |
PublicFunction.go | package public
import (
"crypto/md5"
"encoding/json"
"fmt"
"io"
"log"
"mime/multipart"
"net"
"net/http"
"os"
"reflect"
"sort"
"strconv"
"github.com/otiai10/copy"
//"strconv"
//"net/http/cookiejar"
"io/ioutil"
//"log"
//"path/filepath"
//"path"
"os/exec"
"path/filepath"
"strings"
"time"
//"github.com/kardianos/osext"
"archive/zip"
"bytes"
"encoding/binary"
//"github.com/tomasen/realip"
//"github.com/satori/go.uuid"
//"github.com/op/go-logging"
)
var IsShowLog = false
func GetRandom() string {
return GetUUIDS()
}
//截取字符串 start 起点下标 length 需要截取的长度
func Substr(str string, start int, length int) string {
rs := []rune(str)
rl := len(rs)
end := 0
if start < 0 {
start = rl - 1 + start
}
end = start + length
if start > end {
start, end = end, start
}
if start < 0 {
start = 0
}
if start > rl {
start = rl
}
if end < 0 {
end = 0
}
if end > rl {
end = rl
}
return string(rs[start:end])
}
func GetMd5(str string) string {
data := []byte(str)
has := md5.Sum(data)
md5str1 := fmt.Sprintf("%x", has) //将[]byte转成16进制
//Log("sign=" + md5str1)
return strings.ToUpper(md5str1)
}
func Unzip(src_zip string) string {
// 解析解压包名
dest := strings.Split(src_zip, ".")[0]
// 打开压缩包
unzip_file, err := zip.OpenReader(src_zip)
if err != nil {
return "压缩包损坏"
}
// 创建解压目录
os.MkdirAll(dest, 0755)
// 循环解压zip文件
for _, f := range unzip_file.File {
rc, err := f.Open()
if err != nil {
return "压缩包中文件损坏"
}
path := filepath.Join(dest, f.Name)
// 判断解压出的是文件还是目录
if f.FileInfo().IsDir() {
os.MkdirAll(path, f.Mode())
} else {
// 创建解压文件
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
if err != nil {
return "创建本地文件失败"
}
// 写入本地
_, err = io.Copy(f, rc)
if err != nil {
if err != io.EOF {
return "写入本地失败"
}
}
f.Close()
}
}
unzip_file.Close()
return "OK"
}
func UnzipToest(src, dest string) error {
r, err := zip.OpenReader(src)
if err != nil {
return err
}
defer func() {
if err := r.Close(); err != nil {
Log(err)
}
}()
os.MkdirAll(dest, 0755)
// Closure to address file descriptors issue with all the deferred .Close() methods
extractAndWriteFile := func(f *zip.File) error {
rc, err := f.Open()
if err != nil {
return err
}
defer func() {
if err := rc.Close(); err != nil {
Log(err)
}
}()
path := filepath.Join(dest, f.Name)
if f.FileInfo().IsDir() {
os.MkdirAll(path, f.Mode())
} else {
os.MkdirAll(filepath.Dir(path), f.Mode())
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
if err != nil {
return err
}
defer func() {
if err := f.Close(); err != nil {
Log(err)
}
}()
_, err = io.Copy(f, rc)
if err != nil {
return err
}
}
return nil
}
for _, f := range r.File {
err := extractAndWriteFile(f)
if err != nil {
return err
}
}
return nil
}
func GetCurDir() string {
dir, _ := GetCurrentPath()
return dir
}
func GetCurrentPath() (dir string, err error) {
//path, err := filepath.Abs(filepath.Dir(os.Args[0]))
path, err := exec.LookPath(os.Args[0])
if err != nil {
Log("exec.LookPath(%s), err: %s\n", os.Args[0], err)
return "", err
}
absPath, err := filepath.Abs(path)
if err != nil {
Log("filepath.Abs(%s), err: %s\n", path, err)
return "", err
}
dir = filepath.Dir(absPath)
return dir, nil
}
func GetCurRunPath() string {
dir, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
return ""
}
return dir
}
func ExistsPath(fullpath string) bool {
//dir, _ := GetCurrentPath() //os.Getwd() //当前的目录
//fullpath := dir + "/" + path
_, err := os.Stat(fullpath)
//Log("fullpath==" + fullpath)
return err == nil || os.IsExist(err)
}
func CreatePath(fullpath string) {
//dir, _ := GetCurrentPath() //os.Getwd() //当前的目录
//fullpath := dir + "/" + newPath
//fullpath = strings.Replace(fullpath, "/", "\\", -1)
//fullpath = strings.Replace(fullpath, " ", "", -1)
//newPath = strings.Replace(newPath, " ", "", -1)
_, errr := os.Stat(fullpath)
if errr != nil && os.IsNotExist(errr) {
//Log(ff, fullpath+"文件不存在 创建") //为什么打印nil 是这样的如果file不存在 返回f文件的指针是nil的 所以我们不能使用defer f.Close()会报错的
/*
var path string
if os.IsPathSeparator('\\') { //前边的判断是否是系统的分隔符
path = "\\"
} else {
path = "/"
}
*/
if err := os.MkdirAll(fullpath, 0777); err != nil {
if os.IsPermission(err) {
Log("你不够权限创建文件")
}
} else {
//Log("创建目录" + fullpath + "成功")
}
//err := os.Mkdir(fullpath, os.ModePerm) //在当前目录下生成md目录
//if err != nil {
// Log(err)
//}
} else {
//Log(ff, fullpath+"文件存在 ")
}
}
func SetCookie(r *http.Request, name string, value string) {
COOKIE_MAX_MAX_AGE := time.Hour * 24 / time.Second // 单位:秒。
maxAge := int(COOKIE_MAX_MAX_AGE)
uid_cookie := &http.Cookie{
Name: name,
Value: value,
Path: "/",
HttpOnly: false,
MaxAge: maxAge}
r.AddCookie(uid_cookie)
}
func GetTotal(price string, num string) string {
fPrice, err1 := strconv.ParseFloat(price, 64)
fnum, err2 := strconv.ParseFloat(num, 64)
if err1 == nil && err2 == nil {
return fmt.Sprintf("%1.2f", fPrice*fnum)
}
return ""
}
func RemovePath(path string) bool {
//Log("upload picture Task is running...")
//curdir := GetCurDir()
//fullPath := curdir + "/" + path + "/"
if ExistsPath(path) {
err := os.RemoveAll(path)
if err != nil {
Log("remove fail " + path)
return false
} else {
//如果删除成功则输出 file remove OK!
return true
}
} else {
return false
}
}
func RemoveFile(path string) bool {
//Log("upload picture Task is running...")
//curdir := GetCurDir()
//fullPath := curdir + "/" + path + "/"
if ExistsPath(path) {
err := os.Remove(path) //删除文件test.txt
if err != nil {
Log("remove fail " + path)
return false
} else {
//如果删除成功则输出 file remove OK!
return true
}
} else {
return false
}
}
func SavePictureTask(res http.ResponseWriter, req *http.Request, path string, userid string, typeid string) string {
//Log("upload picture Task is running...")
curdir := GetCurDir()
var fileNames string = "#"
if req.Method == "GET" {
} else {
ff, errr := os.Open(curdir + "/" + path + "/")
if errr != nil && os.IsNotExist(errr) {
Log(ff, ""+path+"文件不存在,创建") //为什么打印nil 是这样的如果file不存在 返回f文件的指针是nil的 所以我们不能使用defer f.Close()会报错的
CreatePath(curdir + "/" + path + "/")
}
var (
status int
err error
)
defer func() {
if nil != err {
http.Error(res, err.Error(), status)
}
}()
// parse request
const _24K = (1 << 20) * 24
if err = req.ParseMultipartForm(_24K); nil != err {
status = http.StatusInternalServerError
return ""
}
for _, fheaders := range req.MultipartForm.File {
for _, hdr := range fheaders {
// open uploaded
var infile multipart.File
if infile, err = hdr.Open(); nil != err {
status = http.StatusInternalServerError
return ""
}
filename := hdr.Filename
if strings.Contains(strings.ToLower(filename), ".mp3") || strings.Contains(strings.ToLower(filename), ".mov") {
//如果是音频文件,直接存到picture文件夹,不存temp文件夹
path = "Picture/" + userid + "/" + typeid
CreatePath(curdir + "/" + path + "/")
}
// open destination
var outfile *os.File
savePath := curdir + "/" + path + "/" + filename
if outfile, err = os.Create(savePath); nil != err {
status = http.StatusInternalServerError
return ""
}
// 32K buffer copy
//var written int64
if _, err = io.Copy(outfile, infile); nil != err {
status = http.StatusInternalServerError
return ""
}
infile.Close()
outfile.Close()
//CreatePath(curdir + "/" + path + "/thumbnial")
//ImageFile_resize(infile, curdir+"/"+path+"/thumbnial/"+hdr.Filename, 200, 200)
fileNames += "," + hdr.Filename
//outfile.Close()
//res.Write([]byte("uploaded file:" + hdr.Filename + ";length:" + strconv.Itoa(int(written))))
}
}
}
fileNames = strings.Replace(fileNames, "#,", "", -1)
fileNames = strings.Replace(fileNames, "#", "", -1)
return fileNames
}
func SaveConfigTask(res http.ResponseWriter, req *http.Request, path string, filename string) string {
//Log("upload picture Task is running...")
curdir := GetCurDir()
var fileNames string = "#"
if req.Method == "GET" {
} else {
ff, errr := os.Open(curdir + "/" + path + "/")
if errr != nil && os.IsNotExist(errr) {
Log(ff, ""+path+"文件不存在,创建") //为什么打印nil 是这样的如果file不存在 返回f文件的指针是nil的 所以我们不能使用defer f.Close()会报错的
CreatePath(curdir + "/" + path + "/")
}
var (
status int
err error
)
defer func() {
if nil != err {
http.Error(res, err.Error(), status)
}
}()
// parse request
const _24K = (1 << 20) * 24
if err = req.ParseMultipartForm(_24K); nil != err {
status = http.StatusInternalServerError
return ""
}
for _, fheaders := range req.MultipartForm.File {
for _, hdr := range fheaders {
// open uploaded
var infile multipart.File
if infile, err = hdr.Open(); nil != err {
status = http.StatusInternalServerError
return ""
}
//filename := hdr.Filename
// open destination
var outfile *os.File
savePath := curdir + "/" + path + "/" + filename
if outfile, err = os.Create(savePath); nil != err {
status = http.StatusInternalServerError
return ""
}
// 32K buffer copy
//var written int64
if _, err = io.Copy(outfile, infile); nil != err {
status = http.StatusInternalServerError
return ""
}
infile.Close()
outfile.Close()
//CreatePath(curdir + "/" + path + "/thumbnial")
//ImageFile_resize(infile, curdir+"/"+path+"/thumbnial/"+hdr.Filename, 200, 200)
fileNames += "," + hdr.Filename
//outfile.Close()
//res.Write([]byte("uploaded file:" + hdr.Filename + ";length:" + strconv.Itoa(int(written))))
}
}
}
fileNames = strings.Replace(fileNames, "#,", "", -1)
fileNames = strings.Replace(fileNames, "#", "", -1)
return fileNames
}
func SaveUploadPictureTask(res http.ResponseWriter, req *http.Request, path string) string {
//Log("upload picture Task is running...")
curdir := GetCurDir()
var fileNames string = "#"
if req.Method == "GET" {
} else {
defer func() {
if err := recover(); err != nil {
Log("SaveUploadPictureTask")
Log(err)
}
}()
ff, errr := os.Open(curdir + "/" + path + "/")
if errr != nil && os.IsNotExist(errr) {
Log(ff, ""+path+"文件不存在,创建") //为什么打印nil 是这样的如果file不存在 返回f文件的指针是nil的 所以我们不能使用defer f.Close()会报错的
CreatePath(curdir + "/" + path + "/")
}
var (
status int
err error
)
defer func() {
if nil != err {
http.Error(res, err.Error(), status)
}
}()
// parse request
const _24K = (1 << 20) * 24
if err = req.ParseMultipartForm(_24K); nil != err {
status = http.StatusInternalServerError
return ""
}
for _, fheaders := range req.MultipartForm.File {
for _, hdr := range fheaders {
// open uploaded
var infile multipart.File
if infile, err = hdr.Open(); nil != err {
status = http.StatusInternalServerError
return ""
}
filename := hdr.Filename
// open destination
var outfile *os.File
savePath := curdir + "/" + path + "/" + filename
//如果文件存在就给一个随机文件名
if ExistsPath(savePath) {
filename = GetRandomFileName(hdr.Filename)
savePath = curdir + "/" + path + "/" + filename
}
if outfile, err = os.Create(savePath); nil != err {
status = http.StatusInternalServerError
return ""
}
// 32K buffer copy
//var written int64
if _, err = io.Copy(outfile, infile); nil != err {
status = http.StatusInternalServerError
return ""
}
infile.Close()
outfile.Close()
//CreatePath(curdir + "/" + path + "/thumbnial")
//ImageFile_resize(infile, curdir+"/"+path+"/thumbnial/"+hdr.Filename, 200, 200)
fileNames += "," + filename
//outfile.Close()
//res.Write([]byte("uploaded file:" + hdr.Filename + ";length:" + strconv.Itoa(int(written))))
}
}
}
fileNames = strings.Replace(fileNames, "#,", "", -1)
fileNames = strings.Replace(fileNames, "#", "", -1)
return fileNames
}
func GetRandomFileName(name string) string {
//name := hdr.Filename
arr := strings.Split(name, ".")
extent := arr[len(arr)-1]
return GetRandom() + "." + extent
}
func CopyFile(src, dst string) error {
in, err := os.Open(src)
if err != nil {
Log(err.Error())
return err
}
defer in.Close()
out, err := os.Create(dst)
if err != nil {
Log(err.Error())
return err
}
defer out.Close()
_, err = io.Copy(out, in)
if err != nil {
Log(err.Error())
return err
}
if ExistsPath(dst) {
//Log("copy success" + dst)
}
return out.Close()
}
//拷贝文件 要拷贝的文件路径 拷贝到哪里 "github.com/otiai10/copy"
func CopyFiles(source, dest string) bool {
if source == "" || dest == "" {
Log("source or dest is null")
return false
}
err := copy.Copy(source, dest)
if err == nil {
return true
} else {
return false
}
}
func NetWorkStatus() bool {
cmd := exec.Command("ping", "baidu.com", "-c", "1", "-W", "5")
fmt.Println("NetWorkStatus Start:", time.Now().Unix())
err := cmd.Run()
fmt.Println("NetWorkStatus End :", time.Now().Unix())
if err != nil {
fmt.Println(err.Error())
return false
} else {
fmt.Println("Net Status , OK")
}
return true
}
func GetMapByJsonStr(jsonstr string) map[string]interface{} {
if !strings.Contains(jsonstr, "{") {
Log("bad json=" + jsonstr)
return nil
}
jsonstr = strings.Replace(jsonstr, "\x00", "", -1)
if len(jsonstr) > 4 {
var d map[string]interface{}
err := json.Unmarshal([]byte(jsonstr), &d)
if err != nil {
log.Printf("error decoding sakura response: %v", err)
if e, ok := err.(*json.SyntaxError); ok {
log.Printf("syntax error at byte offset %d", e.Offset)
}
//log.Printf("sakura response: %q", resBody)
Log("bad json" + jsonstr)
Log(err)
//panic("bad json")
return nil
}
return d
}
return nil
}
func GetMessageMapByJsonKey(jsonstr string, keystr string) map[string]interface{} {
//var jsonstr:='{\"data\": { \"mes\": [ {\"fromuserid\": \"25\", \"touserid\": \"56\",\"message\": \"hhhhhaaaaaa\",\"time\": \"2017-12-12 12:11:11\"}]}}';
index := strings.IndexRune(jsonstr, '{')
jsonstr = jsonstr[index : len(jsonstr)-index]
if len(jsonstr) > 4 && strings.Index(jsonstr, "{") > -1 && strings.Index(jsonstr, "}") > -1 {
mapp := GetMapByJsonStr(jsonstr)
//Log(mapp)
mappp := mapp[keystr]
//Log(mappp)
//kll := mapp.(map[string]interface{})[keystr]
//Log(kll)
mymap := mappp.(map[string]interface{})
//Log(mymap["Fromuserid"])
return mymap
}
return nil
}
func GetMessageMapByJson(jsonstr string) map[string]interface{} {
//var jsonstr:='{\"data\": { \"mes\": [ {\"fromuserid\": \"25\", \"touserid\": \"56\",\"message\": \"hhhhhaaaaaa\",\"time\": \"2017-12-12 12:11:11\"}]}}';
index := strings.IndexRune(jsonstr, '{')
jsonstr = jsonstr[index : len(jsonstr)-index]
if len(jsonstr) > 4 && strings.Index(jsonstr, "{") > -1 && strings.Index(jsonstr, "}") > -1 {
mapp := GetMapByJsonStr(jsonstr)
//Log(mapp)
mappp := mapp["data"]
//Log(mappp)
kll := mappp.(map[string]interface{})["mes"]
//Log(kll)
mymap := kll.(map[string]interface{})
//Log(mymap["fromuserid"])
return mymap
}
return nil
}
func GetJsonStrByMap(MapList map[int]map[string]string) string {
var str string = "##"
sorted_keys := make([]int, 0)
for k, _ := range MapList {
sorted_keys = append(sorted_keys, k)
}
// sort 'string' key in increasing order
sort.Ints(sorted_keys)
for _, k := range sorted_keys {
//fmt.Printf("k=%v, v=%v\n", k, MapList[k])
jsonStr, err := json.Marshal(MapList[k])
if err != nil {
Log(err)
}
//Log("map to json", string(str))
str += "," + string(jsonStr)
}
str = strings.Replace(str, "##,", "", -1)
str = strings.Replace(str, "##", "", -1)
return str
}
func ConverToStr(v interface{}) string {
if v == nil {
return ""
}
var str string = ""
if reflect.TypeOf(v).Kind() == reflect.String {
str = v.(string)
} else if reflect.TypeOf(v).Kind() == reflect.Int {
str = string(v.(int))
} else if reflect.TypeOf(v).Kind() == reflect.Int8 {
str = string(v.(int8))
} else if reflect.TypeOf(v).Kind() == reflect.Int16 {
str = string(v.(int16))
} else if reflect.TypeOf(v).Kind() == reflect.Int32 {
str = string(v.(int32))
} else if reflect.TypeOf(v).Kind() == reflect.Int64 {
str = string(v.(int64))
} else if reflect.TypeOf(v).Kind() == reflect.Float32 {
str = fmt.Sprintf("%f", v)
} else if reflect.TypeOf(v).Kind() == reflect.Float64 {
str = fmt.Sprintf("%f", v)
} else {
str = v.(string)
}
return strings.Replace(str, ".000000", "", -1)
}
func GetCurDateTime() string {
return time.Now().Format("2006-01-02 15:04:05")
}
func GetCurDay() string {
return time.Now().Format("2006-01-02")
}
func GetNameSinceNow(after int) string {
day := time.Now().AddDate(0, 0, after).Format("2006-01-02")
day = strings.Replace(day, "-", "", -1)
return day
}
func GetDaySinceNow(after int) string {
return time.Now().AddDate(0, 0, after).Format("2006-01-02")
}
func ReplaceStr(str string) string {
//过滤一下,防止sql注入
str = strings.Replace(str, "'", "", -1)
//str = strings.Replace(str, "-", "\\-", -1)
str = strings.Replace(str, "exec", "exe.c", -1)
return str //.Replace(str, ",", "", -1).Replace(str, "-", "\-", -1) //-1表示替换所有
}
var logfile *os.File
var oldFileName string
func Log(a ...interface{}) (n int, err error) {
//log.SetFlags(log.LstdFlags | log.Lshortfile)
log.Println(a...)
return 1, nil
}
// GetLocalIP returns the non loopback local IP of the host
func GetLocalIP22() string {
addrs, err := net.InterfaceAddrs()
if err != nil {
return ""
}
for _, address := range addrs {
// check the address type and if it is not a loopback the display it
if ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
if ipnet.IP.To4() != nil {
ipstr := ipnet.IP.String()
index := strings.Index(ipstr, "127.0")
if index > -1 {
continue
}
index = strings.Index(ipstr, "192.168.")
if index > -1 {
return ipstr
break
}
index = strings.Index(ipstr, "169.254.")
if index > -1 {
continue
}
return ipstr
}
}
}
return ""
}
func GetLocalIP() string {
conn, err := net.Dial("udp", "8.8.8.8:80")
if err == nil {
defer conn.Close()
localAddr := conn.LocalAddr().(*net.UDPAddr)
return localAddr.IP.String()
} else {
return GetLocalIPP()
}
}
func GetLocalIPP() string {
//GetIpList()
var ipstr string = ""
//windows 获取IP
host, _ := os.Hostname()
addrss, err := net.LookupIP(host)
if err != nil {
Log("error", err.Error())
//return ""
}
var ipArray []string
for _, addr := range addrss {
if ipv4 := addr.To4(); ipv4 != nil {
Log("ippppp=: ", ipv4)
ipstr = ipv4.String()
if !strings.HasPrefix(ipstr, "127.0") && !strings.HasPrefix(ipstr, "169.254") && !strings.HasPrefix(ipstr, "172.16") {
ipArray = append(ipArray, ipstr)
}
}
}
//提取公网IP
//var pubIpArray []string
for i := 0; i < len(ipArray); i++ {
//Log("pubip===" + ipArray[i])
if !strings.HasPrefix(ipArray[i], "10.") && !strings.HasPrefix(ipArray[i], "192.168") && !strings.HasPrefix(ipArray[i], "172.") {
return ipArray[i]
//pubIpArray = append(pubIpArray, ipstr)
}
}
//如果没有公网IP 就返回一个本地IP
if len(ipArray) > 0 {
return ipArray[0]
}
//linux 获取IP
if ipstr == "" {
ifaces, errr := net.Interfaces()
// handle err
if errr != nil {
Log("error", errr.Error())
return ""
}
for _, i := range ifaces {
addrs, _ := i.Addrs()
// handle err
for _, addr := range addrs {
var ip net.IP
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
case *net.IPAddr:
ip = v.IP
}
// process IP address
//Log("ip=", ip)
ipstr = fmt.Sprintf("%s", ip)
Log("ipstr=", ipstr)
index := strings.Index(ipstr, "127.0")
if index > -1 {
continue
}
index = strings.Index(ipstr, "192.168.")
if index > -1 {
return ipstr
break
}
index = strings.Index(ipstr, "169.254.")
if index > -1 {
continue
}
if len(ipstr) > 6 {
array := strings.Split(ipstr, ".")
if len(array) == 4 {
return ipstr
}
}
}
}
}
return ""
}
func HttpPost(url string, paras string) string {
//Log("url=" + url + " paras=" + paras)
client := &http.Client{}
req, err := http.NewRequest("POST",
url,
strings.NewReader(paras))
if err != nil {
// handle error
return ""
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
//req.Header.Set("Cookie", "name=anny")
resp, err := client.Do(req)
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
// handle error
return ""
}
//Log(string(body))
return string(body)
}
func HttpGet(url string) string {
//Log("get =" + url)
resp, err := http.Get(url)
if err != nil {
// handle error
Log(err.Error())
return ""
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
// handle error
Log(err.Error())
return ""
}
//Log("response =" + string(body))
return string(body)
}
func HttpDownloadFile(url string, toPath string) {
//Log("get =" + url)
res, err := http.Get(url)
if err != nil {
Log(err)
return
}
f, err := os.Create(toPath)
defer f.Close()
if err != nil {
Log(err)
return
}
io.Copy(f, res.Body)
//Log("size =" + size)
}
//整形转换成字节
func IntToBytes(n int) []byte {
tmp := int32(n)
bytesBuffer := bytes.NewBuffer([]byte{})
binary.Write(bytesBuffer, binary.BigEndian, tmp)
return bytesBuffer.Bytes()
}
//字节转换成整形
func BytesToInt(b []byte) int {
bytesBuffer := bytes.NewBuffer(b)
var tmp int32
binary.Read(bytesBuffer, binary.BigEndian, &tmp)
return int(tmp)
}
func RealIPHand(h http.Handler) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) {
if rip := RealIP(r); rip != "" {
r.RemoteAddr = rip
}
h.ServeHTTP(w, r)
}
return http.HandlerFunc(fn)
}
var xForwardedFor = http.CanonicalHeaderKey("X-Forwarded-For")
var xForwardedFor2 = http.CanonicalHeaderKey("x-forwarded-for")
var xRealIP = http.CanonicalHeaderKey("X-Real-IP")
var xRealIP2 = http.CanonicalHeaderKey("x-real-ip")
var xRealIP3 = http.CanonicalHeaderKey("x-real-client-ip")
var ProxyClientIP = http.CanonicalHeaderKey("Proxy-Client-IP")
var WLProxyClientIP = http.CanonicalHeaderKey("WL-Proxy-Client-IP")
var HTTPXFORWARDEDFOR = http.CanonicalHeaderKey("HTTP_X_FORWARDED_FOR")
func RealIP(r *http.Request) string {
PrintHead(r)
var ip string
//clientIP := realip.FromRequest(r)
//log.Println("GET / from", clientIP)
if xff := r.Header.Get(xForwardedFor); xff != "" {
//Log(xff)
i := strings.Index(xff, ", ")
if i == -1 {
i = len(xff)
}
ip = xff[:i]
} else if xff := r.Header.Get(xForwardedFor2); xff != "" {
//Log(xff)
i := strings.Index(xff, ", ")
if i == -1 {
i = len(xff)
}
ip = xff[:i]
} else if xrip := r.Header.Get(xRealIP); xrip != "" {
ip = xrip
} else if xrip := r.Header.Get(xRealIP2); xrip != "" {
ip = xrip
} else if xrip := r.Header.Get(xRealIP3); xrip != "" {
ip = xrip
} else if xrip := r.Header.Get(ProxyClientIP); xrip != "" {
ip = xrip
} else if xrip := r.Header.Get(WLProxyClientIP); xrip != "" {
ip = xrip
} else {
ip = r.RemoteAddr
}
return ip
//return realip.FromRequest(r)
}
func PrintHead(r *http.Request) {
realip := r.Header.Get(xForwardedFor)
if len(realip) == 0 {
realip = r.Header.Get("http_client_ip")
}
if len(realip) == 0 {
//Log(xRealIP)
realip = r.Header.Get(xRealIP)
}
if len(realip) == 0 {
//Log(ProxyClientIP)
realip = r.Header.Get(ProxyClientIP)
}
if len(realip) == 0 {
//Log(WLProxyClientIP)
realip = r.Header.Get(WLProxyClientIP)
}
if len(realip) == 0 {
//Log(HTTPXFORWARDEDFOR)
realip = r.Header.Get(HTTPXFORWARDEDFOR)
}
if len(realip) == 0 {
realip = r.RemoteAddr
}
//Log("ip=" + r.RemoteAddr)
// | realip)
}
| Log("realip=" + | conditional_block |
PublicFunction.go | package public
import (
"crypto/md5"
"encoding/json"
"fmt"
"io"
"log"
"mime/multipart"
"net"
"net/http"
"os"
"reflect"
"sort"
"strconv"
"github.com/otiai10/copy"
//"strconv"
//"net/http/cookiejar"
"io/ioutil"
//"log"
//"path/filepath"
//"path"
"os/exec"
"path/filepath"
"strings"
"time"
//"github.com/kardianos/osext"
"archive/zip"
"bytes"
"encoding/binary"
//"github.com/tomasen/realip"
//"github.com/satori/go.uuid"
//"github.com/op/go-logging"
)
var IsShowLog = false
func GetRandom() string {
return GetUUIDS()
}
//截取字符串 start 起点下标 length 需要截取的长度
func Substr(str string, start int, length int) string {
rs := []rune(str)
rl := len(rs)
end := 0
if start < 0 {
start = rl - 1 + start
}
end = start + length
if start > end {
start, end = end, start
}
if start < 0 {
start = 0
}
if start > rl {
start = rl
}
if end < 0 {
end = 0
}
if end > rl {
end = rl
}
return string(rs[start:end])
}
func GetMd5(str string) string {
data := []byte(str)
has := md5.Sum(data)
md5str1 := fmt.Sprintf("%x", has) //将[]byte转成16进制
//Log("sign=" + md5str1)
return strings.ToUpper(md5str1)
}
func Unzip(src_zip string) string {
// 解析解压包名
dest := strings.Split(src_zip, ".")[0]
// 打开压缩包
unzip_file, err := zip.OpenReader(src_zip)
if err != nil {
return "压缩包损坏"
}
// 创建解压目录
os.MkdirAll(dest, 0755)
// 循环解压zip文件
for _, f := range unzip_file.File {
rc, err := f.Open()
if err != nil {
return "压缩包中文件损坏"
}
path := filepath.Join(dest, f.Name)
// 判断解压出的是文件还是目录
if f.FileInfo().IsDir() {
os.MkdirAll(path, f.Mode())
} else {
// 创建解压文件
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
if err != nil {
return "创建本地文件失败"
}
// 写入本地
_, err = io.Copy(f, rc)
if err != nil {
if err != io.EOF {
return "写入本地失败"
}
}
f.Close()
}
}
unzip_file.Close()
return "OK"
}
func UnzipToest(src, dest string) error {
r, err := zip.OpenReader(src)
if err != nil {
return err
}
defer func() {
if err := r.Close(); err != nil {
Log(err)
}
}()
os.MkdirAll(dest, 0755)
// Closure to address file descriptors issue with all the deferred .Close() methods
extractAndWriteFile := func(f *zip.File) error {
rc, err := f.Open()
if err != nil {
return err
}
defer func() {
if err := rc.Close(); err != nil {
Log(err)
}
}()
path := filepath.Join(dest, f.Name)
if f.FileInfo().IsDir() {
os.MkdirAll(path, f.Mode())
} else {
os.MkdirAll(filepath.Dir(path), f.Mode())
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
if err != nil {
return err
}
defer func() {
if err := f.Close(); err != nil {
Log(err)
}
}()
_, err = io.Copy(f, rc)
if err != nil {
return err
}
}
return nil
}
for _, f := range r.File {
err := extractAndWriteFile(f)
if err != nil {
return err
}
}
return nil
}
func GetCurDir() string {
dir, _ := GetCurrentPath()
return dir
}
func GetCurrentPath() (dir string, err error) {
//path, err := filepath.Abs(filepath.Dir(os.Args[0]))
path, err := exec.LookPath(os.Args[0])
if err != nil {
Log("exec.LookPath(%s), err: %s\n", os.Args[0], err)
return "", err
}
absPath, err := filepath.Abs(path)
if err != nil {
Log("filepath.Abs(%s), err: %s\n", path, err)
return "", err
}
dir = filepath.Dir(absPath)
return dir, nil
}
func GetCurRunPath() string {
dir, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
return ""
}
return dir
}
func ExistsPath(fullpath string) bool {
//dir, _ := GetCurrentPath() //os.Getwd() //当前的目录
//fullpath := dir + "/" + path
_, err := os.Stat(fullpath)
//Log("fullpath==" + fullpath)
return err == nil || os.IsExist(err)
}
func CreatePath(fullpath string) {
//dir, _ := GetCurrentPath() //os.Getwd() //当前的目录
//fullpath := dir + "/" + newPath
//fullpath = strings.Replace(fullpath, "/", "\\", -1)
//fullpath = strings.Replace(fullpath, " ", "", -1)
//newPath = strings.Replace(newPath, " ", "", -1)
_, errr := os.Stat(fullpath)
if errr != nil && os.IsNotExist(errr) {
//Log(ff, fullpath+"文件不存在 创建") //为什么打印nil 是这样的如果file不存在 返回f文件的指针是nil的 所以我们不能使用defer f.Close()会报错的
/*
var path string
if os.IsPathSeparator('\\') { //前边的判断是否是系统的分隔符
path = "\\"
} else {
path = "/"
}
*/
if err := os.MkdirAll(fullpath, 0777); err != nil {
if os.IsPermission(err) {
Log("你不够权限创建文件")
}
} else {
//Log("创建目录" + fullpath + "成功")
}
//err := os.Mkdir(fullpath, os.ModePerm) //在当前目录下生成md目录
//if err != nil {
// Log(err)
//}
} else {
//Log(ff, fullpath+"文件存在 ")
}
}
func SetCookie(r *http.Request, name string, value string) {
COOKIE_MAX_MAX_AGE := time.Hour * 24 / time.Second // 单位:秒。
maxAge := int(COOKIE_MAX_MAX_AGE)
uid_cookie := &http.Cookie{
Name: name,
Value: value,
Path: "/",
HttpOnly: false,
MaxAge: maxAge}
r.AddCookie(uid_cookie)
}
func GetTotal(price string, num string) string {
fPrice, err1 := strconv.ParseFloat(price, 64)
fnum, err2 := strconv.ParseFloat(num, 64)
if err1 == nil && err2 == nil {
return fmt.Sprintf("%1.2f", fPrice*fnum)
}
return ""
}
func RemovePath(path string) bool {
//Log("upload picture Task is running...")
//curdir := GetCurDir()
//fullPath := curdir + "/" + path + "/"
if ExistsPath(path) {
err := os.RemoveAll(path)
if err != nil {
Log("remove fail " + path)
return false
} else {
//如果删除成功则输出 file remove OK!
return true
}
} else {
return false
}
}
func RemoveFile(path string) bool {
//Log("upload picture Task is running...")
//curdir := GetCurDir()
//fullPath := curdir + "/" + path + "/"
if ExistsPath(path) {
err := os.Remove(path) //删除文件test.txt
if err != nil {
Log("remove fail " + path)
return false
} else {
//如果删除成功则输出 file remove OK!
return true
}
} else {
return false
}
}
func SavePictureTask(res http.ResponseWriter, req *http.Request, path string, userid string, typeid string) string {
//Log("upload picture Task is running...")
curdir := GetCurDir()
var fileNames string = "#"
if req.Method == "GET" {
} else {
ff, errr := os.Open(curdir + "/" + path + "/")
if errr != nil && os.IsNotExist(errr) {
Log(ff, ""+path+"文件不存在,创建") //为什么打印nil 是这样的如果file不存在 返回f文件的指针是nil的 所以我们不能使用defer f.Close()会报错的
CreatePath(curdir + "/" + path + "/")
}
var (
status int
err error
)
defer func() {
if nil != err {
http.Error(res, err.Error(), status)
}
}()
// parse request
const _24K = (1 << 20) * 24
if err = req.ParseMultipartForm(_24K); nil != err {
status = http.StatusInternalServerError
return ""
}
for _, fheaders := range req.MultipartForm.File {
for _, hdr := range fheaders {
// open uploaded
var infile multipart.File
if infile, err = hdr.Open(); nil != err {
status = http.StatusInternalServerError
return ""
}
filename := hdr.Filename
if strings.Contains(strings.ToLower(filename), ".mp3") || strings.Contains(strings.ToLower(filename), ".mov") {
//如果是音频文件,直接存到picture文件夹,不存temp文件夹
path = "Picture/" + userid + "/" + typeid
CreatePath(curdir + "/" + path + "/")
}
// open destination
var outfile *os.File
savePath := curdir + "/" + path + "/" + filename
if outfile, err = os.Create(savePath); nil != err {
status = http.StatusInternalServerError
return ""
}
// 32K buffer copy
//var written int64
if _, err = io.Copy(outfile, infile); nil != err {
status = http.StatusInternalServerError
return ""
}
infile.Close()
outfile.Close()
//CreatePath(curdir + "/" + path + "/thumbnial")
//ImageFile_resize(infile, curdir+"/"+path+"/thumbnial/"+hdr.Filename, 200, 200)
fileNames += "," + hdr.Filename
//outfile.Close()
//res.Write([]byte("uploaded file:" + hdr.Filename + ";length:" + strconv.Itoa(int(written))))
}
}
}
fileNames = strings.Replace(fileNames, "#,", "", -1)
fileNames = strings.Replace(fileNames, "#", "", -1)
return fileNames
}
func SaveConfigTask(res http.ResponseWriter, req *http.Request, path string, filename string) string {
//Log("upload picture Task is running...")
curdir := GetCurDir()
var fileNames string = "#"
if req.Method == "GET" {
} else {
ff, errr := os.Open(curdir + "/" + path + "/")
if errr != nil && os.IsNotExist(errr) {
Log(ff, ""+path+"文件不存在,创建") //为什么打印nil 是这样的如果file不存在 返回f文件的指针是nil的 所以我们不能使用defer f.Close()会报错的
CreatePath(curdir + "/" + path + "/")
}
var (
status int
err error
)
defer func() {
if nil != err {
http.Error(res, err.Error(), status)
}
}()
// parse request
const _24K = (1 << 20) * 24
if err = req.ParseMultipartForm(_24K); nil != err {
status = http.StatusInternalServerError
return ""
}
for _, fheaders := range req.MultipartForm.File {
for _, hdr := range fheaders {
// open uploaded
var infile multipart.File
if infile, err = hdr.Open(); nil != err {
status = http.StatusInternalServerError
return ""
}
//filename := hdr.Filename
// open destination
var outfile *os.File
savePath := curdir + "/" + path + "/" + filename
if outfile, err = os.Create(savePath); nil != err {
status = http.StatusInternalServerError
return ""
}
// 32K buffer copy
//var written int64
if _, err = io.Copy(outfile, infile); nil != err {
status = http.StatusInternalServerError
return ""
}
infile.Close()
outfile.Close()
//CreatePath(curdir + "/" + path + "/thumbnial")
//ImageFile_resize(infile, curdir+"/"+path+"/thumbnial/"+hdr.Filename, 200, 200)
fileNames += "," + hdr.Filename
//outfile.Close()
//res.Write([]byte("uploaded file:" + hdr.Filename + ";length:" + strconv.Itoa(int(written))))
}
}
}
fileNames = strings.Replace(fileNames, "#,", "", -1)
fileNames = strings.Replace(fileNames, "#", "", -1)
return fileNames
}
func SaveUploadPictureTask(res http.ResponseWriter, req *http.Request, path string) string {
//Log("upload picture Task is running...")
curdir := GetCurDir()
var fileNames string = "#"
if req.Method == "GET" {
} else {
defer func() {
if err := recover(); err != nil {
Log("SaveUploadPictureTask")
Log(err)
}
}()
ff, errr := os.Open(curdir + "/" + path + "/")
if errr != nil && os.IsNotExist(errr) {
Log(ff, ""+path+"文件不存在,创建") //为什么打印nil 是这样的如果file不存在 返回f文件的指针是nil的 所以我们不能使用defer f.Close()会报错的
CreatePath(curdir + "/" + path + "/")
}
var (
status int
err error
)
defer func() {
if nil != err {
http.Error(res, err.Error(), status)
}
}()
// parse request
const _24K = (1 << 20) * 24
if err = req.ParseMultipartForm(_24K); nil != err {
status = http.StatusInternalServerError
return ""
}
for _, fheaders := range req.MultipartForm.File {
for _, hdr := range fheaders {
// open uploaded
var infile multipart.File
if infile, err = hdr.Open(); nil != err {
status = http.StatusInternalServerError
return ""
}
filename := hdr.Filename
// open destination
var outfile *os.File
savePath := curdir + "/" + path + "/" + filename
//如果文件存在就给一个随机文件名
if ExistsPath(savePath) {
filename = GetRandomFileName(hdr.Filename)
savePath = curdir + "/" + path + "/" + filename
}
if outfile, err = os.Create(savePath); nil != err {
status = http.StatusInternalServerError
return ""
}
// 32K buffer copy
//var written int64
if _, err = io.Copy(outfile, infile); nil != err {
status = http.StatusInternalServerError
return ""
}
infile.Close()
outfile.Close()
//CreatePath(curdir + "/" + path + "/thumbnial")
//ImageFile_resize(infile, curdir+"/"+path+"/thumbnial/"+hdr.Filename, 200, 200)
fileNames += "," + filename
//outfile.Close()
//res.Write([]byte("uploaded file:" + hdr.Filename + ";length:" + strconv.Itoa(int(written))))
}
}
}
fileNames = strings.Replace(fileNames, "#,", "", -1)
fileNames = strings.Replace(fileNames, "#", "", -1)
return fileNames
}
func GetRandomFileName(name string) string {
//name := hdr.Filename
arr := strings.Split(name, ".")
extent := arr[len(arr)-1]
return GetRandom() + "." + extent
}
func CopyFile(src, dst string) error {
in, err := os.Open(src)
if err != nil {
Log(err.Error())
return err
}
defer in.Close()
out, err := os.Create(dst)
if err != nil {
Log(err.Error())
return err
}
defer out.Close()
_, err = io.Copy(out, in)
if err != nil {
Log(err.Error())
return err
}
if ExistsPath(dst) {
//Log("copy success" + dst)
}
return out.Close()
}
//拷贝文件 要拷贝的文件路径 拷贝到哪里 "github.com/otiai10/copy"
func CopyFiles(source, dest string) bool {
if source == "" || dest == "" {
Log("source or dest is null")
return false
}
err := copy.Copy(source, dest)
if err == nil {
return true
} else {
return false
}
}
func NetWorkStatus() bool {
cmd := exec.Command("ping", "baidu.com", "-c", "1", "-W", "5")
fmt.Println("NetWorkStatus Start:", time.Now().Unix())
err := cmd.Run()
fmt.Println("NetWorkStatus End :", time.Now().Unix())
if err != nil {
fmt.Println(err.Error())
return false
} else {
fmt.Println("Net Status , OK")
}
return true
}
func GetMapByJsonStr(jsonstr string) map[string]interface{} {
if !strings.Contains(jsonstr, "{") {
Log("bad json=" + jsonstr)
return nil
}
jsonstr = strings.Replace(jsonstr, "\x00", "", -1)
if len(jsonstr) > 4 {
var d map[string]interface{}
err := json.Unmarshal([]byte(jsonstr), &d)
if err != nil {
log.Printf("error decoding sakura response: %v", err)
if e, ok := err.(*json.SyntaxError); ok {
log.Printf("syntax error at byte offset %d", e.Offset)
}
//log.Printf("sakura response: %q", resBody)
Log("bad json" + jsonstr)
Log(err)
//panic("bad json")
return nil
}
return d
}
return nil
}
func GetMessageMapByJsonKey(jsonstr string, keystr string) map[string]interface{} {
//var jsonstr:='{\"data\": { \"mes\": [ {\"fromuserid\": \"25\", \"touserid\": \"56\",\"message\": \"hhhhhaaaaaa\",\"time\": \"2017-12-12 12:11:11\"}]}}';
index := strings.IndexRune(jsonstr, '{')
jsonstr = jsonstr[index : len(jsonstr)-index]
if len(jsonstr) > 4 && strings.Index(jsonstr, "{") > -1 && strings.Index(jsonstr, "}") > -1 {
mapp := GetMapByJsonStr(jsonstr)
//Log(mapp)
mappp := mapp[keystr]
//Log(mappp)
//kll := mapp.(map[string]interface{})[keystr]
//Log(kll)
mymap := mappp.(map[string]interface{})
//Log(mymap["Fromuserid"])
return mymap
}
return nil
}
func GetMessageMapByJson(jsonstr string) map[string]interface{} {
//var jsonstr:='{\"data\": { \"mes\": [ {\"fromuserid\": \"25\", \"touserid\": \"56\",\"message\": \"hhhhhaaaaaa\",\"time\": \"2017-12-12 12:11:11\"}]}}';
index := strings.IndexRune(jsonstr, '{')
jsonstr = jsonstr[index : len(jsonstr)-index]
if len(jsonstr) > 4 && strings.Index(jsonstr, "{") > -1 && strings.Index(jsonstr, "}") > -1 {
mapp := GetMapByJsonStr(jsonstr)
//Log(mapp)
mappp := mapp["data"]
//Log(mappp)
kll := mappp.(map[string]interface{})["mes"]
//Log(kll)
mymap := kll.(map[string]interface{})
//Log(mymap["fromuserid"])
return mymap
}
return nil
}
func GetJsonStrByMap(MapList map[int]map[string]string) string {
var str string = "##"
sorted_keys := make([]int, 0)
for k, _ := range MapList {
sorted_keys = append(sorted_keys, k)
}
// sort 'string' key in increasing order
sort.Ints(sorted_keys)
for _, k := range sorted_keys {
//fmt.Printf("k=%v, v=%v\n", k, MapList[k])
jsonStr, err := json.Marshal(MapList[k])
if err != nil {
Log(err)
}
//Log("map to json", string(str))
str += "," + string(jsonStr)
}
str = strings.Replace(str, "##,", "", -1)
str = strings.Replace(str, "##", "", -1)
return str
}
func ConverToStr(v interface{}) string {
if v == nil {
return ""
}
var str string = ""
if reflect.TypeOf(v).Kind() == reflect.String {
str = v.(string)
} else if reflect.TypeOf(v).Kind() == reflect.Int {
str = string(v.(int))
} else if reflect.TypeOf(v).Kind() == reflect.Int8 {
str = string(v.(int8))
} else if reflect.TypeOf(v).Kind() == reflect.Int16 {
str = string(v.(int16))
} else if reflect.TypeOf(v).Kind() == reflect.Int32 {
str = string(v.(int32))
} else if reflect.TypeOf(v).Kind() == reflect.Int64 {
str = string(v.(int64))
} else if reflect.TypeOf(v).Kind() == reflect.Float32 {
str = fmt.Sprintf("%f", v)
} else if reflect.TypeOf(v).Kind() == reflect.Float64 {
str = fmt.Sprintf("%f", v)
} else {
str = v.(string)
}
return strings.Replace(str, ".000000", "", -1)
}
func GetCurDateTime() string {
return time.Now().Format("2006-01-02 15:04:05")
}
func GetCurDay() string {
return time.Now().Format("2006-01-02")
}
func GetNameSinceNow(after int) string {
day := time.Now().AddDate(0, 0, after).Format("2006-01-02")
day = strings.Replace(day, "-", "", -1)
return day
}
func GetDaySinceNow(after int) string {
return time.Now().AddDate(0, 0, after).Format("2006-01-02")
}
func ReplaceStr(str string) string {
//过滤一下,防止sql注入
str = strings.Replace(str, "'", "", -1)
//str = strings.Replace(str, "-", "\\-", -1)
str = strings.Replace(str, "exec", "exe.c", -1)
return str //.Replace(str, ",", "", -1).Replace(str, "-", "\-", -1) //-1表示替换所有
}
var logfile *os.File
var oldFileName string
func Log(a ...interface{}) (n int, err error) {
//log.SetFlags(log.LstdFlags | log.Lshortfile)
log.Println(a...)
return 1, nil
}
// GetLocalIP returns the non loopback local IP of the host
func GetLocalIP22() string {
addrs, err := net.InterfaceAddrs()
if err != nil {
return ""
}
for _, address := range addrs {
// check the address type and if it is not a loopback the display it
if ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
if ipnet.IP.To4() != nil {
ipstr := ipnet.IP.String()
index := strings.Index(ipstr, "127.0")
if index > -1 {
continue
}
index = strings.Index(ipstr, "192.168.")
if index > -1 {
return ipstr
break
}
index = strings.Index(ipstr, "169.254.")
if index > -1 {
continue
}
return ipstr
}
}
}
return ""
}
func GetLocalIP() string {
conn, err := net.Dial("udp", "8.8.8.8:80")
if err == nil {
defer conn.Close()
localAddr := conn.LocalAddr().(*net.UDPAddr)
return localAddr.IP.String()
} else {
return GetLocalIPP()
}
}
func GetLocalIPP() string {
//GetIpList()
var ipstr string = ""
//windows 获取IP
host, _ := os.Hostname()
addrss, err := net.LookupIP(host)
if err != nil {
Log("error", err.Error())
//return ""
}
var ipArray []string
for _, addr := range addrss {
if ipv4 := addr.To4(); ipv4 != nil {
Log("ippppp=: ", ipv4)
ipstr = ipv4.String()
if !strings.HasPrefix(ipstr, "127.0") && !strings.HasPrefix(ipstr, "169.254") && !strings.HasPrefix(ipstr, "172.16") {
ipArray = append(ipArray, ipstr)
}
}
}
//提取公网IP
//var pubIpArray []string
for i := 0; i < len(ipArray); i++ {
//Log("pubip===" + ipArray[i])
if !strings.HasPrefix(ipArray[i], "10.") && !strings.HasPrefix(ipArray[i], "192.168") && !strings.HasPrefix(ipArray[i], "172.") {
return ipArray[i]
//pubIpArray = append(pubIpArray, ipstr)
}
}
//如果没有公网IP 就返回一个本地IP
if len(ipArray) > 0 {
return ipArray[0]
}
//linux 获取IP
if ipstr == "" {
ifaces, errr := net.Interfaces()
// handle err
if errr != nil {
Log("error", errr.Error())
return ""
}
for _, i := range ifaces {
addrs, _ := i.Addrs()
// handle err
for _, addr := range addrs {
var ip net.IP
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
case *net.IPAddr:
ip = v.IP
}
// process IP address
//Log("ip=", ip)
ipstr = fmt.Sprintf("%s", ip)
Log("ipstr=", ipstr)
index := strings.Index(ipstr, "127.0")
if index > -1 {
continue
}
index = strings.Index(ipstr, "192.168.")
if index > -1 {
return ipstr
break
}
index = strings.Index(ipstr, "169.254.")
if index > -1 {
continue
}
if len(ipstr) > 6 {
array := strings.Split(ipstr, ".")
if len(array) == 4 {
return ipstr
}
}
}
}
}
return ""
}
func HttpPost(url string, paras string) string {
//Log("url=" + url + " paras=" + paras)
client := &http.Client{}
req, err := http.NewRequest("POST",
url,
strings.NewReader(paras))
if err != nil {
// handle error
return ""
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
//req.Header.Set("Cookie", "name=anny")
resp, err := client.Do(req)
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
// handle error
return ""
}
//Log(string(body))
return string(body)
}
func HttpGet(url string) string {
//Log("get =" + url)
resp, err := http.Get(url)
if err != nil {
// handle error
Log(err.Error())
return ""
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
// handle error
Log(err.Error())
return ""
}
//Log("response =" + string(body))
return string(body)
}
func HttpDownloadFile(url string, toPath string) {
//Log("get =" + url)
res, err := http.Get(url)
if err != nil {
Log(err)
return
}
f, err := os.Create(toPath)
defer f.Close()
if err != nil {
Log(err)
return
}
io.Copy(f, res.Body)
//Log("size =" + size)
}
//整形转换成字节
func IntToBytes(n int) []byte {
tmp := int32(n)
bytesBuffer := bytes.NewBuffer([]byte{})
binary.Write(bytesBuffer, binary.BigEndian, tmp)
return bytesBuffer.Bytes()
}
//字节转换成整形
func BytesToInt(b []byte) int {
bytesBuffer := bytes.NewBuffer(b) |
func RealIPHand(h http.Handler) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) {
if rip := RealIP(r); rip != "" {
r.RemoteAddr = rip
}
h.ServeHTTP(w, r)
}
return http.HandlerFunc(fn)
}
var xForwardedFor = http.CanonicalHeaderKey("X-Forwarded-For")
var xForwardedFor2 = http.CanonicalHeaderKey("x-forwarded-for")
var xRealIP = http.CanonicalHeaderKey("X-Real-IP")
var xRealIP2 = http.CanonicalHeaderKey("x-real-ip")
var xRealIP3 = http.CanonicalHeaderKey("x-real-client-ip")
var ProxyClientIP = http.CanonicalHeaderKey("Proxy-Client-IP")
var WLProxyClientIP = http.CanonicalHeaderKey("WL-Proxy-Client-IP")
var HTTPXFORWARDEDFOR = http.CanonicalHeaderKey("HTTP_X_FORWARDED_FOR")
func RealIP(r *http.Request) string {
PrintHead(r)
var ip string
//clientIP := realip.FromRequest(r)
//log.Println("GET / from", clientIP)
if xff := r.Header.Get(xForwardedFor); xff != "" {
//Log(xff)
i := strings.Index(xff, ", ")
if i == -1 {
i = len(xff)
}
ip = xff[:i]
} else if xff := r.Header.Get(xForwardedFor2); xff != "" {
//Log(xff)
i := strings.Index(xff, ", ")
if i == -1 {
i = len(xff)
}
ip = xff[:i]
} else if xrip := r.Header.Get(xRealIP); xrip != "" {
ip = xrip
} else if xrip := r.Header.Get(xRealIP2); xrip != "" {
ip = xrip
} else if xrip := r.Header.Get(xRealIP3); xrip != "" {
ip = xrip
} else if xrip := r.Header.Get(ProxyClientIP); xrip != "" {
ip = xrip
} else if xrip := r.Header.Get(WLProxyClientIP); xrip != "" {
ip = xrip
} else {
ip = r.RemoteAddr
}
return ip
//return realip.FromRequest(r)
}
func PrintHead(r *http.Request) {
realip := r.Header.Get(xForwardedFor)
if len(realip) == 0 {
realip = r.Header.Get("http_client_ip")
}
if len(realip) == 0 {
//Log(xRealIP)
realip = r.Header.Get(xRealIP)
}
if len(realip) == 0 {
//Log(ProxyClientIP)
realip = r.Header.Get(ProxyClientIP)
}
if len(realip) == 0 {
//Log(WLProxyClientIP)
realip = r.Header.Get(WLProxyClientIP)
}
if len(realip) == 0 {
//Log(HTTPXFORWARDEDFOR)
realip = r.Header.Get(HTTPXFORWARDEDFOR)
}
if len(realip) == 0 {
realip = r.RemoteAddr
}
//Log("ip=" + r.RemoteAddr)
//Log("realip=" + realip)
} | var tmp int32
binary.Read(bytesBuffer, binary.BigEndian, &tmp)
return int(tmp)
} | random_line_split |
PublicFunction.go | package public
import (
"crypto/md5"
"encoding/json"
"fmt"
"io"
"log"
"mime/multipart"
"net"
"net/http"
"os"
"reflect"
"sort"
"strconv"
"github.com/otiai10/copy"
//"strconv"
//"net/http/cookiejar"
"io/ioutil"
//"log"
//"path/filepath"
//"path"
"os/exec"
"path/filepath"
"strings"
"time"
//"github.com/kardianos/osext"
"archive/zip"
"bytes"
"encoding/binary"
//"github.com/tomasen/realip"
//"github.com/satori/go.uuid"
//"github.com/op/go-logging"
)
var IsShowLog = false
func GetRandom() string {
return GetUUIDS()
}
//截取字符串 start 起点下标 length 需要截取的长度
func Substr(str string, start int, length int) string {
rs := []rune(str)
rl := len(rs)
end := 0
if start < 0 {
start = rl - 1 + start
}
end = start + length
if start > end {
start, end = end, start
}
if start < 0 {
start = 0
}
if start > rl {
start = rl
}
if end < 0 {
end = 0
}
if end > rl {
end = rl
}
return string(rs[start:end])
}
func GetMd5(str string) string {
data := []byte(str)
has := md5.Sum(data)
md5str1 := fmt.Sprintf("%x", has) //将[]byte转成16进制
//Log("sign=" + md5str1)
return strings.ToUpper(md5str1)
}
func Unzip(src_zip string) string {
// 解析解压包名
dest := strings.Split(src_zip, ".")[0]
// 打开压缩包
unzip_file, err := zip.OpenReader(src_zip)
if err != nil {
return "压缩包损坏"
}
// 创建解压目录
os.MkdirAll(dest, 0755)
// 循环解压zip文件
for _, f := range unzip_file.File {
rc, err := f.Open()
if err != nil {
return "压缩包中文件损坏"
}
path := filepath.Join(dest, f.Name)
// 判断解压出的是文件还是目录
if f.FileInfo().IsDir() {
os.MkdirAll(path, f.Mode())
} else {
// 创建解压文件
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
if err != nil {
return "创建本地文件失败"
}
// 写入本地
_, err = io.Copy(f, rc)
if err != nil {
if err != io.EOF {
return "写入本地失败"
}
}
f.Close()
}
}
unzip_file.Close()
return "OK"
}
func UnzipToest(src, dest string) error {
r, err := zip.OpenReader(src)
if err != nil {
return err
}
defer func() {
if err := r.Close(); err != nil {
Log(err)
}
}()
os.MkdirAll(dest, 0755)
// Closure to address file descriptors issue with all the deferred .Close() methods
extractAndWriteFile := func(f *zip.File) error {
rc, err := f.Open()
if err != nil {
return err
}
defer func() {
if err := rc.Close(); err != nil {
Log(err)
}
}()
path := filepath.Join(dest, f.Name)
if f.FileInfo().IsDir() {
os.MkdirAll(path, f.Mode())
} else {
os.MkdirAll(filepath.Dir(path), f.Mode())
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
if err != nil {
return err
}
defer func() {
if err := f.Close(); err != nil {
Log(err)
}
}()
_, err = io.Copy(f, rc)
if err != nil {
return err
}
}
return nil
}
for _, f := range r.File {
err := extractAndWriteFile(f)
if err != nil {
return err
}
}
return nil
}
func GetCurDir() string {
dir, _ := GetCurrentPath()
return dir
}
func GetCurrentPath() (dir string, err error) {
//path, err := filepath.Abs(filepath.Dir(os.Args[0]))
path, err := exec.LookPath(os.Args[0])
if err != nil {
Log("exec.LookPath(%s), err: %s\n", os.Args[0], err)
return "", err
}
absPath, err := filepath.Abs(path)
if err != nil {
Log("filepath.Abs(%s), err: %s\n", path, err)
return "", err
}
dir = filepath.Dir(absPath)
return dir, nil
}
func GetCurRunPath() string {
dir, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
return ""
}
return dir
}
func ExistsPath(fullpath string) bool {
//dir, _ := GetCurrentPath() //os.Getwd() //当前的目录
//fullpath := dir + "/" + path
_, err := os.Stat(fullpath)
//Log("fullpath==" + fullpath)
return err == nil || os.IsExist(err)
}
func CreatePath(fullpath string) {
//dir, _ := GetCurrentPath() //os.Getwd() //当前的目录
//fullpath := dir + "/" + newPath
//fullpath = strings.Replace(fullpath, "/", "\\", -1)
//fullpath = strings.Replace(fullpath, " ", "", -1)
//newPath = strings.Replace(newPath, " ", "", -1)
_, errr := os.Stat(fullpath)
if errr != nil && os.IsNotExist(errr) {
//Log(ff, fullpath+"文件不存在 创建") //为什么打印nil 是这样的如果file不存在 返回f文件的指针是nil的 所以我们不能使用defer f.Close()会报错的
/*
var path string
if os.IsPathSeparator('\\') { //前边的判断是否是系统的分隔符
path = "\\"
} else {
path = "/"
}
*/
if err := os.MkdirAll(fullpath, 0777); err != nil {
if os.IsPermission(err) {
Log("你不够权限创建文件")
}
} else {
//Log("创建目录" + fullpath + "成功")
}
//err := os.Mkdir(fullpath, os.ModePerm) //在当前目录下生成md目录
//if err != nil {
// Log(err)
//}
} else {
//Log(ff, fullpath+"文件存在 ")
}
}
func SetCookie(r *http.Request, name string, value string) {
COOKIE_MAX_MAX_AGE := time.Hour * 24 / time.Second // 单位:秒。
maxAge := int(COOKIE_MAX_MAX_AGE)
uid_cookie := &http.Cookie{
Name: name,
Value: value,
Path: "/",
HttpOnly: false,
MaxAge: maxAge}
r.AddCookie(uid_cookie)
}
func GetTotal(price string, num string) string {
fPrice, err1 := strconv.ParseFloat(price, 64)
fnum, err2 := strconv.ParseFloat(num, 64)
if err1 == nil && err2 == nil {
return fmt.Sprintf("%1.2f", fPrice*fnum)
}
return ""
}
func RemovePath(path string) bool {
//Log("upload picture Task is running...")
//curdir := GetCurDir()
//fullPath := curdir + "/" + path + "/"
if ExistsPath(path) {
err := os.RemoveAll(path)
if err != nil {
Log("remove fail " + path)
return false
} else {
//如果删除成功则输出 file remove OK!
return true
}
} else {
return false
}
}
func RemoveFile(path string) bool {
//Log("upload picture Task is running...")
//curdir := GetCurDir()
//fullPath := curdir + "/" + path + "/"
if ExistsPath(path) {
err := os.Remove(path) //删除文件test.txt
if err != nil {
Log("remove fail " + path)
return false
} else {
//如果删除成功则输出 file remove OK!
return true
}
} else {
return false
}
}
func SavePictureTask(res http.ResponseWriter, req *http.Request, path string, userid string, typeid string) string {
//Log("upload picture Task is running...")
curdir := GetCurDir()
var fileNames string = "#"
if req.Method == "GET" {
} else {
ff, errr := os.Open(curdir + "/" + path + "/")
if errr != nil && os.IsNotExist(errr) {
Log(ff, ""+path+"文件不存在,创建") //为什么打印nil 是这样的如果file不存在 返回f文件的指针是nil的 所以我们不能使用defer f.Close()会报错的
CreatePath(curdir + "/" + path + "/")
}
var (
status int
err error
)
defer func() {
if nil != err {
http.Error(res, err.Error(), status)
}
}()
// parse request
const _24K = (1 << 20) * 24
if err = req.ParseMultipartForm(_24K); nil != err {
status = http.StatusInternalServerError
return ""
}
for _, fheaders := range req.MultipartForm.File {
for _, hdr := range fheaders {
// open uploaded
var infile multipart.File
if infile, err = hdr.Open(); nil != err {
status = http.StatusInternalServerError
return ""
}
filename := hdr.Filename
if strings.Contains(strings.ToLower(filename), ".mp3") || strings.Contains(strings.ToLower(filename), ".mov") {
//如果是音频文件,直接存到picture文件夹,不存temp文件夹
path = "Picture/" + userid + "/" + typeid
CreatePath(curdir + "/" + path + "/")
}
// open destination
var outfile *os.File
savePath := curdir + "/" + path + "/" + filename
if outfile, err = os.Create(savePath); nil != err {
status = http.StatusInternalServerError
return ""
}
// 32K buffer copy
//var written int64
if _, err = io.Copy(outfile, infile); nil != err {
status = http.StatusInternalServerError
return ""
}
infile.Close()
outfile.Close()
//CreatePath(curdir + "/" + path + "/thumbnial")
//ImageFile_resize(infile, curdir+"/"+path+"/thumbnial/"+hdr.Filename, 200, 200)
fileNames += "," + hdr.Filename
//outfile.Close()
//res.Write([]byte("uploaded file:" + hdr.Filename + ";length:" + strconv.Itoa(int(written))))
}
}
}
fileNames = strings.Replace(fileNames, "#,", "", -1)
fileNames = strings.Replace(fileNames, "#", "", -1)
return fileNames
}
func SaveConfigTask(res http.ResponseWriter, req *http.Request, path string, filename string) string {
//Log("upload picture Task is running...")
curdir := GetCurDir()
var fileNames string = "#"
if req.Method == "GET" {
} else {
ff, errr := os.Open(curdir + "/" + path + "/")
if errr != nil && os.IsNotExist(errr) {
Log(ff, ""+path+"文件不存在,创建") //为什么打印nil 是这样的如果file不存在 返回f文件的指针是nil的 所以我们不能使用defer f.Close()会报错的
CreatePath(curdir + "/" + path + "/")
}
var (
status int
err error
)
defer func() {
if nil != err {
http.Error(res, err.Error(), status)
}
}()
// parse request
const _24K = (1 << 20) * 24
if err = req.ParseMultipartForm(_24K); nil != err {
status = http.StatusInternalServerError
return ""
}
for _, fheaders := range req.MultipartForm.File {
for _, hdr := range fheaders {
// open uploaded
var infile multipart.File
if infile, err = hdr.Open(); nil != err {
status = http.StatusInternalServerError
return ""
}
//filename := hdr.Filename
// open destination
var outfile *os.File
savePath := curdir + "/" + path + "/" + filename
if outfile, err = os.Create(savePath); nil != err {
status = http.StatusInternalServerError
return ""
}
// 32K buffer copy
//var written int64
if _, err = io.Copy(outfile, infile); nil != err {
status = http.StatusInternalServerError
return ""
}
infile.Close()
outfile.Close()
//CreatePath(curdir + "/" + path + "/thumbnial")
//ImageFile_resize(infile, curdir+"/"+path+"/thumbnial/"+hdr.Filename, 200, 200)
fileNames += "," + hdr.Filename
//outfile.Close()
//res.Write([]byte("uploaded file:" + hdr.Filename + ";length:" + strconv.Itoa(int(written))))
}
}
}
fileNames = strings.Replace(fileNames, "#,", "", -1)
fileNames = strings.Replace(fileNames, "#", "", -1)
return fileNames
}
func SaveUploadPictureTask(res http.ResponseWriter, req *http.Request, path string) string {
//Log("upload picture Task is running...")
curdir := GetCurDir()
var fileNames string = "#"
if req.Method == "GET" {
} else {
defer func() {
if err := recover(); err != nil {
Log("SaveUploadPictureTask")
Log(err)
}
}()
ff, errr := os.Open(curdir + "/" + path + "/")
if errr != nil && os.IsNotExist(errr) {
Log(ff, ""+path+"文件不存在,创建") //为什么打印nil 是这样的如果file不存在 返回f文件的指针是nil的 所以我们不能使用defer f.Close()会报错的
CreatePath(curdir + "/" + path + "/")
}
var (
status int
err error
)
defer func() {
if nil != err {
http.Error(res, err.Error(), status)
}
}()
// parse request
const _24K = (1 << 20) * 24
if err = req.ParseMultipartForm(_24K); nil != err {
status = http.StatusInternalServerError
return ""
}
for _, fheaders := range req.MultipartForm.File {
for _, hdr := range fheaders {
// open uploaded
var infile multipart.File
if infile, err = hdr.Open(); nil != err {
status = http.StatusInternalServerError
return ""
}
filename := hdr.Filename
// open destination
var outfile *os.File
savePath := curdir + "/" + path + "/" + filename
//如果文件存在就给一个随机文件名
if ExistsPath(savePath) {
filename = GetRandomFileName(hdr.Filename)
savePath = curdir + "/" + path + "/" + filename
}
if outfile, err = os.Create(savePath); nil != err {
status = http.StatusInternalServerError
return ""
}
// 32K buffer copy
//var written int64
if _, err = io.Copy(outfile, infile); nil != err {
status = http.StatusInternalServerError
return ""
}
infile.Close()
outfile.Close()
//CreatePath(curdir + "/" + path + "/thumbnial")
//ImageFile_resize(infile, curdir+"/"+path+"/thumbnial/"+hdr.Filename, 200, 200)
fileNames += "," + filename
//outfile.Close()
//res.Write([]byte("uploaded file:" + hdr.Filename + ";length:" + strconv.Itoa(int(written))))
}
}
}
fileNames = strings.Replace(fileNames, "#,", "", -1)
fileNames = strings.Replace(fileNames, "#", "", -1)
return fileNames
}
func GetRandomFileName(name string) string {
//name := hdr.Filename
arr := strings.Split(name, ".")
extent := arr[len(arr)-1]
return GetRandom() + "." + extent
}
func CopyFile(src, dst string) error {
in, err := os.Open(src)
if err != nil {
Log(err.Error())
return err
}
defer in.Close()
out, err := os.Create(dst)
if err != nil {
Log(err.Error())
return err
}
defer out.Close()
_, err = io.Copy(out, in)
if err != nil {
Log(err.Error())
return err
}
if ExistsPath(dst) {
//Log("copy success" + dst)
}
return out.Close()
}
//拷贝文件 要拷贝的文件路径 拷贝到哪里 "github.com/otiai10/copy"
func CopyFiles(source, dest string) bool {
if source == "" || dest == "" {
Log("source or dest is null")
return false
}
err := copy.Copy(source, dest)
if err == nil {
return true
} else {
return false
}
}
func NetWorkStatus() bool {
cmd := exec.Command("ping", "baidu.com", "-c", "1", "-W", "5")
fmt.Println("NetWorkStatus Start:", time.Now().Unix())
err := cmd.Run()
fmt.Println("NetWorkStatus End :", time.Now().Unix())
if err != nil {
fmt.Println(err.Error())
return false
} else {
fmt.Println("Net Status , OK")
}
return true
}
func GetMapByJsonStr(jsonstr string) map[string]interface{} {
if !strings.Contains(jsonstr, "{") {
Log("bad json=" + jsonstr)
return nil
}
jsonstr = strings.Replace(jsonstr, "\x00", "", -1)
if len(jsonstr) > 4 {
var d map[string]interface{}
err := json.Unmarshal([]byte(jsonstr), &d)
if err != nil {
log.Printf("error decoding sakura response: %v", err)
if e, ok := err.(*json.SyntaxError); ok {
log.Printf("syntax error at byte offset %d", e.Offset)
}
//log.Printf("sakura response: %q", resBody)
Log("bad json" + jsonstr)
Log(err)
//panic("bad json")
return nil
}
return d
}
return nil
}
func GetMessageMapByJsonKey(jsonstr string, keystr string) map[string]interface{} {
//var jsonstr:='{\"data\": { \"mes\": [ {\"fromuserid\": \"25\", \"touserid\": \"56\",\"message\": \"hhhhhaaaaaa\",\"time\": \"2017-12-12 12:11:11\"}]}}';
index := strings.IndexRune(jsonstr, '{')
jsonstr = jsonstr[index : len(jsonstr)-index]
if len(jsonstr) > 4 && strings.Index(jsonstr, "{") > -1 && strings.Index(jsonstr, "}") > -1 {
mapp := GetMapByJsonStr(jsonstr)
//Log(mapp)
mappp := mapp[keystr]
//Log(mappp)
//kll := mapp.(map[string]interface{})[keystr]
//Log(kll)
mymap := mappp.(map[string]interface{})
//Log(mymap["Fromuserid"])
return mymap
}
return nil
}
func GetMessageMapByJson(jsonstr string) map[string]interface{} {
//var jsonstr:='{\"data\": { \"mes\": [ {\"fromuse | rid\": \"56\",\"message\": \"hhhhhaaaaaa\",\"time\": \"2017-12-12 12:11:11\"}]}}';
index := strings.IndexRune(jsonstr, '{')
jsonstr = jsonstr[index : len(jsonstr)-index]
if len(jsonstr) > 4 && strings.Index(jsonstr, "{") > -1 && strings.Index(jsonstr, "}") > -1 {
mapp := GetMapByJsonStr(jsonstr)
//Log(mapp)
mappp := mapp["data"]
//Log(mappp)
kll := mappp.(map[string]interface{})["mes"]
//Log(kll)
mymap := kll.(map[string]interface{})
//Log(mymap["fromuserid"])
return mymap
}
return nil
}
func GetJsonStrByMap(MapList map[int]map[string]string) string {
var str string = "##"
sorted_keys := make([]int, 0)
for k, _ := range MapList {
sorted_keys = append(sorted_keys, k)
}
// sort 'string' key in increasing order
sort.Ints(sorted_keys)
for _, k := range sorted_keys {
//fmt.Printf("k=%v, v=%v\n", k, MapList[k])
jsonStr, err := json.Marshal(MapList[k])
if err != nil {
Log(err)
}
//Log("map to json", string(str))
str += "," + string(jsonStr)
}
str = strings.Replace(str, "##,", "", -1)
str = strings.Replace(str, "##", "", -1)
return str
}
func ConverToStr(v interface{}) string {
if v == nil {
return ""
}
var str string = ""
if reflect.TypeOf(v).Kind() == reflect.String {
str = v.(string)
} else if reflect.TypeOf(v).Kind() == reflect.Int {
str = string(v.(int))
} else if reflect.TypeOf(v).Kind() == reflect.Int8 {
str = string(v.(int8))
} else if reflect.TypeOf(v).Kind() == reflect.Int16 {
str = string(v.(int16))
} else if reflect.TypeOf(v).Kind() == reflect.Int32 {
str = string(v.(int32))
} else if reflect.TypeOf(v).Kind() == reflect.Int64 {
str = string(v.(int64))
} else if reflect.TypeOf(v).Kind() == reflect.Float32 {
str = fmt.Sprintf("%f", v)
} else if reflect.TypeOf(v).Kind() == reflect.Float64 {
str = fmt.Sprintf("%f", v)
} else {
str = v.(string)
}
return strings.Replace(str, ".000000", "", -1)
}
func GetCurDateTime() string {
return time.Now().Format("2006-01-02 15:04:05")
}
func GetCurDay() string {
return time.Now().Format("2006-01-02")
}
func GetNameSinceNow(after int) string {
day := time.Now().AddDate(0, 0, after).Format("2006-01-02")
day = strings.Replace(day, "-", "", -1)
return day
}
func GetDaySinceNow(after int) string {
return time.Now().AddDate(0, 0, after).Format("2006-01-02")
}
func ReplaceStr(str string) string {
//过滤一下,防止sql注入
str = strings.Replace(str, "'", "", -1)
//str = strings.Replace(str, "-", "\\-", -1)
str = strings.Replace(str, "exec", "exe.c", -1)
return str //.Replace(str, ",", "", -1).Replace(str, "-", "\-", -1) //-1表示替换所有
}
var logfile *os.File
var oldFileName string
func Log(a ...interface{}) (n int, err error) {
//log.SetFlags(log.LstdFlags | log.Lshortfile)
log.Println(a...)
return 1, nil
}
// GetLocalIP returns the non loopback local IP of the host
func GetLocalIP22() string {
addrs, err := net.InterfaceAddrs()
if err != nil {
return ""
}
for _, address := range addrs {
// check the address type and if it is not a loopback the display it
if ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
if ipnet.IP.To4() != nil {
ipstr := ipnet.IP.String()
index := strings.Index(ipstr, "127.0")
if index > -1 {
continue
}
index = strings.Index(ipstr, "192.168.")
if index > -1 {
return ipstr
break
}
index = strings.Index(ipstr, "169.254.")
if index > -1 {
continue
}
return ipstr
}
}
}
return ""
}
func GetLocalIP() string {
conn, err := net.Dial("udp", "8.8.8.8:80")
if err == nil {
defer conn.Close()
localAddr := conn.LocalAddr().(*net.UDPAddr)
return localAddr.IP.String()
} else {
return GetLocalIPP()
}
}
func GetLocalIPP() string {
//GetIpList()
var ipstr string = ""
//windows 获取IP
host, _ := os.Hostname()
addrss, err := net.LookupIP(host)
if err != nil {
Log("error", err.Error())
//return ""
}
var ipArray []string
for _, addr := range addrss {
if ipv4 := addr.To4(); ipv4 != nil {
Log("ippppp=: ", ipv4)
ipstr = ipv4.String()
if !strings.HasPrefix(ipstr, "127.0") && !strings.HasPrefix(ipstr, "169.254") && !strings.HasPrefix(ipstr, "172.16") {
ipArray = append(ipArray, ipstr)
}
}
}
//提取公网IP
//var pubIpArray []string
for i := 0; i < len(ipArray); i++ {
//Log("pubip===" + ipArray[i])
if !strings.HasPrefix(ipArray[i], "10.") && !strings.HasPrefix(ipArray[i], "192.168") && !strings.HasPrefix(ipArray[i], "172.") {
return ipArray[i]
//pubIpArray = append(pubIpArray, ipstr)
}
}
//如果没有公网IP 就返回一个本地IP
if len(ipArray) > 0 {
return ipArray[0]
}
//linux 获取IP
if ipstr == "" {
ifaces, errr := net.Interfaces()
// handle err
if errr != nil {
Log("error", errr.Error())
return ""
}
for _, i := range ifaces {
addrs, _ := i.Addrs()
// handle err
for _, addr := range addrs {
var ip net.IP
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
case *net.IPAddr:
ip = v.IP
}
// process IP address
//Log("ip=", ip)
ipstr = fmt.Sprintf("%s", ip)
Log("ipstr=", ipstr)
index := strings.Index(ipstr, "127.0")
if index > -1 {
continue
}
index = strings.Index(ipstr, "192.168.")
if index > -1 {
return ipstr
break
}
index = strings.Index(ipstr, "169.254.")
if index > -1 {
continue
}
if len(ipstr) > 6 {
array := strings.Split(ipstr, ".")
if len(array) == 4 {
return ipstr
}
}
}
}
}
return ""
}
func HttpPost(url string, paras string) string {
//Log("url=" + url + " paras=" + paras)
client := &http.Client{}
req, err := http.NewRequest("POST",
url,
strings.NewReader(paras))
if err != nil {
// handle error
return ""
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
//req.Header.Set("Cookie", "name=anny")
resp, err := client.Do(req)
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
// handle error
return ""
}
//Log(string(body))
return string(body)
}
func HttpGet(url string) string {
//Log("get =" + url)
resp, err := http.Get(url)
if err != nil {
// handle error
Log(err.Error())
return ""
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
// handle error
Log(err.Error())
return ""
}
//Log("response =" + string(body))
return string(body)
}
func HttpDownloadFile(url string, toPath string) {
//Log("get =" + url)
res, err := http.Get(url)
if err != nil {
Log(err)
return
}
f, err := os.Create(toPath)
defer f.Close()
if err != nil {
Log(err)
return
}
io.Copy(f, res.Body)
//Log("size =" + size)
}
//整形转换成字节
func IntToBytes(n int) []byte {
tmp := int32(n)
bytesBuffer := bytes.NewBuffer([]byte{})
binary.Write(bytesBuffer, binary.BigEndian, tmp)
return bytesBuffer.Bytes()
}
//字节转换成整形
func BytesToInt(b []byte) int {
bytesBuffer := bytes.NewBuffer(b)
var tmp int32
binary.Read(bytesBuffer, binary.BigEndian, &tmp)
return int(tmp)
}
func RealIPHand(h http.Handler) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) {
if rip := RealIP(r); rip != "" {
r.RemoteAddr = rip
}
h.ServeHTTP(w, r)
}
return http.HandlerFunc(fn)
}
var xForwardedFor = http.CanonicalHeaderKey("X-Forwarded-For")
var xForwardedFor2 = http.CanonicalHeaderKey("x-forwarded-for")
var xRealIP = http.CanonicalHeaderKey("X-Real-IP")
var xRealIP2 = http.CanonicalHeaderKey("x-real-ip")
var xRealIP3 = http.CanonicalHeaderKey("x-real-client-ip")
var ProxyClientIP = http.CanonicalHeaderKey("Proxy-Client-IP")
var WLProxyClientIP = http.CanonicalHeaderKey("WL-Proxy-Client-IP")
var HTTPXFORWARDEDFOR = http.CanonicalHeaderKey("HTTP_X_FORWARDED_FOR")
func RealIP(r *http.Request) string {
PrintHead(r)
var ip string
//clientIP := realip.FromRequest(r)
//log.Println("GET / from", clientIP)
if xff := r.Header.Get(xForwardedFor); xff != "" {
//Log(xff)
i := strings.Index(xff, ", ")
if i == -1 {
i = len(xff)
}
ip = xff[:i]
} else if xff := r.Header.Get(xForwardedFor2); xff != "" {
//Log(xff)
i := strings.Index(xff, ", ")
if i == -1 {
i = len(xff)
}
ip = xff[:i]
} else if xrip := r.Header.Get(xRealIP); xrip != "" {
ip = xrip
} else if xrip := r.Header.Get(xRealIP2); xrip != "" {
ip = xrip
} else if xrip := r.Header.Get(xRealIP3); xrip != "" {
ip = xrip
} else if xrip := r.Header.Get(ProxyClientIP); xrip != "" {
ip = xrip
} else if xrip := r.Header.Get(WLProxyClientIP); xrip != "" {
ip = xrip
} else {
ip = r.RemoteAddr
}
return ip
//return realip.FromRequest(r)
}
func PrintHead(r *http.Request) {
realip := r.Header.Get(xForwardedFor)
if len(realip) == 0 {
realip = r.Header.Get("http_client_ip")
}
if len(realip) == 0 {
//Log(xRealIP)
realip = r.Header.Get(xRealIP)
}
if len(realip) == 0 {
//Log(ProxyClientIP)
realip = r.Header.Get(ProxyClientIP)
}
if len(realip) == 0 {
//Log(WLProxyClientIP)
realip = r.Header.Get(WLProxyClientIP)
}
if len(realip) == 0 {
//Log(HTTPXFORWARDEDFOR)
realip = r.Header.Get(HTTPXFORWARDEDFOR)
}
if len(realip) == 0 {
realip = r.RemoteAddr
}
//Log("ip=" + r.RemoteAddr)
//Log("realip=" + realip)
}
| rid\": \"25\", \"touse | identifier_name |
bot.js | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// bot.js is your main bot dialog entry point for handling activity types
// Import required Bot Builder
const { ActionTypes, ActivityTypes, CardFactory } = require('botbuilder');
const { LuisRecognizer } = require('botbuilder-ai');
const { DialogSet, WaterfallDialog } = require('botbuilder-dialogs');
const { OAuthHelpers, LOGIN_PROMPT } = require('./oauth-helpers');
const CONNECTION_SETTING_NAME = '<MS Graph API Connection Name>';
/**
* Demonstrates the following concepts:
* Displaying a Welcome Card, using Adaptive Card technology
* Use LUIS to model Greetings, Help, and Cancel interactions
* Use a Waterfall dialog to model multi-turn conversation flow
* Use custom prompts to validate user input
* Store conversation and user state
* Handle conversation interruptions
*/
let luisResult = null;
class BasicBot {
/**
* Constructs the three pieces necessary for this bot to operate:
* 1. StatePropertyAccessor for conversation state
* 2. StatePropertyAccess for user state
* 3. LUIS client
* 4. DialogSet to handle our GreetingDialog
*
* @param {ConversationState} conversationState property accessor
* @param {application} LUISApplication property accessor
* @param {luisPredictionOptions} PredictionOptions property accessor
* @param {includeApiResults} APIResults Application property accessor
*/
constructor(conversationState, application, luisPredictionOptions, includeApiResults) {
this.luisRecognizer = new LuisRecognizer(application,luisPredictionOptions, true);
this.conversationState = conversationState;
// DialogState property accessor. Used to keep persist DialogState when using DialogSet.
this.dialogState = conversationState.createProperty('dialogState');
this.commandState = conversationState.createProperty('commandState');
// Instructions for the user with information about commands that this bot may handle.
this.helpMessage = `You can type "send <recipient_email>" to send an email, "recent" to view recent unread mail,` +
` "me" to see information about your, or "help" to view the commands` +
` again. For others LUIS displays intent with score.`;
// Create a DialogSet that contains the OAuthPrompt.
this.dialogs = new DialogSet(this.dialogState);
// Add an OAuthPrompt with the connection name as specified on the Bot's settings blade in Azure.
this.dialogs.add(OAuthHelpers.prompt(CONNECTION_SETTING_NAME));
this._graphDialogId = 'graphDialog';
// Logs in the user and calls proceeding dialogs, if login is successful.
this.dialogs.add(new WaterfallDialog(this._graphDialogId, [
this.promptStep.bind(this),
this.processStep.bind(this)
]));
}
/**
* Driver code that does one of the following:
* 1. Display a welcome card upon receiving ConversationUpdate activity
* 2. Use LUIS to recognize intents for incoming user message
* 3. Start a greeting dialog
* 4. Optionally handle Cancel or Help interruptions
*
* @param {Context} turnContext turn context from the adapter
*/
async onTurn(turnContext) {
const dc = await this.dialogs.createContext(turnContext);
const results = await this.luisRecognizer.recognize(turnContext);
switch (turnContext._activity.type) {
case ActivityTypes.Message:
this.luisResult = results;
await this.processInput(dc);
break;
case ActivityTypes.Event:
case ActivityTypes.Invoke:
if (turnContext._activity.type === ActivityTypes.Invoke && turnContext._activity.channelId !== 'msteams') {
throw new Error('The Invoke type is only valid on the MS Teams channel.');
};
await dc.continueDialog();
if (!turnContext.responded) {
await dc.beginDialog(this._graphDialogId);
};
break;
case ActivityTypes.ConversationUpdate:
await this.sendWelcomeMessage(turnContext);
break;
default:
await turnContext.sendActivity(`[${ turnContext._activity.type }]-type activity detected.`);
}
await this.conversationState.saveChanges(turnContext);
}
async sendWelcomeMessage(turnContext) {
const activity = turnContext.activity;
if (activity && activity.membersAdded) {
const heroCard = CardFactory.heroCard(
'Welcome to LUIS with MSGraph API Authentication BOT!',
CardFactory.images(['https://botframeworksamples.blob.core.windows.net/samples/aadlogo.png']),
CardFactory.actions([
{
type: ActionTypes.ImBack,
title: 'Log me in',
value: 'login'
},
{
type: ActionTypes.ImBack,
title: 'Me',
value: 'me'
},
{
type: ActionTypes.ImBack,
title: 'Recent',
value: 'recent'
},
{
type: ActionTypes.ImBack,
title: 'View Token',
value: 'viewToken'
},
{
type: ActionTypes.ImBack,
title: 'Help',
value: 'help'
},
{
type: ActionTypes.ImBack,
title: 'Signout',
value: 'signout'
}
])
);
for (const idx in activity.membersAdded) {
if (activity.membersAdded[idx].id !== activity.recipient.id) {
await turnContext.sendActivity({ attachments: [heroCard] });
}
}
}
}
async processInput(dc, luisResult) {
//console.log(dc);
switch (dc.context.activity.text.toLowerCase()) {
case 'signout':
case 'logout':
case 'signoff':
case 'logoff':
// The bot adapter encapsulates the authentication processes and sends
// activities to from the Bot Connector Service.
const botAdapter = dc.context.adapter;
await botAdapter.signOutUser(dc.context, CONNECTION_SETTING_NAME);
// Let the user know they are signed out.
await dc.context.sendActivity('You are now signed out.');
break;
case 'help':
await dc.context.sendActivity(this.helpMessage);
break;
default:
// The user has input a command that has not been handled yet,
// begin the waterfall dialog to handle the input.
await dc.continueDialog();
if (!dc.context.responded) {
await dc.beginDialog(this._graphDialogId);
}
}
};
async promptStep(step) {
const activity = step.context.activity;
if (activity.type === ActivityTypes.Message && !(/\d{6}/).test(activity.text)) {
await this.commandState.set(step.context, activity.text);
await this.conversationState.saveChanges(step.context);
}
return await step.beginDialog(LOGIN_PROMPT);
}
async | (step) {
//console.log(step);
// We do not need to store the token in the bot. When we need the token we can
// send another prompt. If the token is valid the user will not need to log back in.
// The token will be available in the Result property of the task.
const tokenResponse = step.result;
// If the user is authenticated the bot can use the token to make API calls.
if (tokenResponse !== undefined) {
let parts = await this.commandState.get(step.context);
if (!parts) {
parts = step.context.activity.text;
}
const command = parts.split(' ')[0].toLowerCase();
console.log(command);
if(command === 'login' || command === 'signin'){
await step.context.sendActivity(`You have already loggedin!`);
}
else if (command === 'me') {
await OAuthHelpers.listMe(step.context, tokenResponse);
} else if (command === 'send') {
await OAuthHelpers.sendMail(step.context, tokenResponse, parts.split(' ')[1].toLowerCase());
} else if (command === 'recent') {
await OAuthHelpers.listRecentMail(step.context, tokenResponse);
} else if(command.toLowerCase() === 'viewtoken'){
await step.context.sendActivity(`Your token is: ${ tokenResponse.token }`);
}else{
console.log(this.luisResult);
const topIntent = this.luisResult.luisResult.topScoringIntent;
if(topIntent !== 'None'){
await step.context.sendActivity(`LUIS Top Scoring Intent: ${ topIntent.intent }, Score: ${ topIntent.score }`);
}else{
await step.context.sendActivity(`Please try something else!`);
// If the top scoring intent was "None" tell the user no valid intents were found and provide help.
// await step.context.sendActivity(`No LUIS intents were found.
// \nThis sample is about identifying two user intents:
// \n - 'Calendar.Add'
// \n - 'Calendar.Find'
// \nTry typing 'Add Event' or 'Show me tomorrow'.`);
}
}
} else {
// Ask the user to try logging in later as they are not logged in.
await step.context.sendActivity(`We couldn't log you in. Please try again later.`);
}
return await step.endDialog();
};
};
exports.BasicBot = BasicBot;
| processStep | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.